qemu: blockjob: Synchronously update backing chain in XML on ABORT/PIVOT

When the synchronous pivot option is selected, libvirt would not update
the backing chain until the job was exitted. Some applications then
received invalid data as their job serialized first.

This patch removes polling to wait for the ABORT/PIVOT job completion
and replaces it with a condition. If a synchronous operation is
requested the update of the XML is executed in the job of the caller of
the synchronous request. Otherwise the monitor event callback uses a
separate worker to update the backing chain with a new job.

This is a regression since 1a92c71910

When the ABORT job is finished synchronously you get the following call
stack:
 #0  qemuBlockJobEventProcess
 #1  qemuDomainBlockJobImpl
 #2  qemuDomainBlockJobAbort
 #3  virDomainBlockJobAbort

While previously or while using the _ASYNC flag you'd get:
 #0  qemuBlockJobEventProcess
 #1  processBlockJobEvent
 #2  qemuProcessEventHandler
 #3  virThreadPoolWorker
This commit is contained in:
Peter Krempa 2015-03-30 11:26:20 +02:00 committed by Daniel Veillard
parent 0c4474df4e
commit 630ee5ac6c
4 changed files with 65 additions and 40 deletions

View File

@ -1289,9 +1289,22 @@ virDomainDiskDefNew(void)
if (VIR_ALLOC(ret) < 0)
return NULL;
if (VIR_ALLOC(ret->src) < 0)
VIR_FREE(ret);
goto error;
if (virCondInit(&ret->blockJobSyncCond) < 0) {
virReportSystemError(errno, "%s", _("Failed to initialize condition"));
goto error;
}
return ret;
error:
virStorageSourceFree(ret->src);
VIR_FREE(ret);
return NULL;
}
@ -1310,6 +1323,7 @@ virDomainDiskDefFree(virDomainDiskDefPtr def)
VIR_FREE(def->product);
VIR_FREE(def->domain_name);
virDomainDeviceInfoClear(&def->info);
virCondDestroy(&def->blockJobSyncCond);
VIR_FREE(def);
}

View File

@ -685,6 +685,12 @@ struct _virDomainDiskDef {
int mirrorState; /* enum virDomainDiskMirrorState */
int mirrorJob; /* virDomainBlockJobType */
/* for some synchronous block jobs, we need to notify the owner */
virCond blockJobSyncCond;
int blockJobType; /* type of the block job from the event */
int blockJobStatus; /* status of the finished block job */
bool blockJobSync; /* the block job needs synchronized termination */
struct {
unsigned int cylinders;
unsigned int heads;

View File

@ -16276,6 +16276,12 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
goto endjob;
if (mode == BLOCK_JOB_ABORT) {
if (async && !(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)) {
/* prepare state for event delivery */
disk->blockJobStatus = -1;
disk->blockJobSync = true;
}
if ((flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) &&
!(async && disk->mirror)) {
virReportError(VIR_ERR_OPERATION_INVALID,
@ -16389,37 +16395,24 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
status);
event2 = virDomainEventBlockJob2NewFromObj(vm, disk->dst, type,
status);
} else if (!(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)) {
} else if (disk->blockJobSync) {
/* XXX If the event reports failure, we should reflect
* that back into the return status of this API call. */
while (1) {
/* Poll every 50ms */
static struct timespec ts = {
.tv_sec = 0,
.tv_nsec = 50 * 1000 * 1000ull };
virDomainBlockJobInfo dummy;
qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorBlockJobInfo(priv->mon, device, &dummy, NULL);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
ret = -1;
if (ret <= 0)
break;
virObjectUnlock(vm);
nanosleep(&ts, NULL);
virObjectLock(vm);
if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("domain is not running"));
ret = -1;
break;
while (disk->blockJobStatus == -1 && disk->blockJobSync) {
if (virCondWait(&disk->blockJobSyncCond, &vm->parent.lock) < 0) {
virReportSystemError(errno, "%s",
_("Unable to wait on block job sync "
"condition"));
disk->blockJobSync = false;
goto endjob;
}
}
qemuBlockJobEventProcess(driver, vm, disk,
disk->blockJobType,
disk->blockJobStatus);
disk->blockJobSync = false;
}
}

View File

@ -1020,28 +1020,40 @@ qemuProcessHandleBlockJob(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
{
virQEMUDriverPtr driver = opaque;
struct qemuProcessEvent *processEvent = NULL;
char *data;
virDomainDiskDefPtr disk;
char *data = NULL;
virObjectLock(vm);
VIR_DEBUG("Block job for device %s (domain: %p,%s) type %d status %d",
diskAlias, vm, vm->def->name, type, status);
if (VIR_ALLOC(processEvent) < 0)
if (!(disk = qemuProcessFindDomainDiskByAlias(vm, diskAlias)))
goto error;
processEvent->eventType = QEMU_PROCESS_EVENT_BLOCK_JOB;
if (VIR_STRDUP(data, diskAlias) < 0)
goto error;
processEvent->data = data;
processEvent->vm = vm;
processEvent->action = type;
processEvent->status = status;
if (disk->blockJobSync) {
disk->blockJobType = type;
disk->blockJobStatus = status;
/* We have an SYNC API waiting for this event, dispatch it back */
virCondSignal(&disk->blockJobSyncCond);
} else {
/* there is no waiting SYNC API, dispatch the update to a thread */
if (VIR_ALLOC(processEvent) < 0)
goto error;
virObjectRef(vm);
if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
ignore_value(virObjectUnref(vm));
goto error;
processEvent->eventType = QEMU_PROCESS_EVENT_BLOCK_JOB;
if (VIR_STRDUP(data, diskAlias) < 0)
goto error;
processEvent->data = data;
processEvent->vm = vm;
processEvent->action = type;
processEvent->status = status;
virObjectRef(vm);
if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
ignore_value(virObjectUnref(vm));
goto error;
}
}
cleanup: