qemu: Check for domain being active on successful job acquire

As this is needed. Although some functions check for domain
being active before obtaining job, we need to check it after,
because obtaining job unlocks domain object, during which
a state of domain can be changed.
This commit is contained in:
Michal Privoznik 2011-10-11 10:40:36 +02:00
parent d81eee40c2
commit 9bc9999b6e

View File

@ -1948,9 +1948,18 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags)
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
goto endjob;
}
qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorInjectNMI(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
endjob:
if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
goto cleanup;
@ -4397,7 +4406,7 @@ static char *qemuDomainGetXMLDesc(virDomainPtr dom,
virDomainObjPtr vm;
char *ret = NULL;
unsigned long balloon;
int err;
int err = 0;
/* Flags checked by virDomainDefFormat */
@ -4423,9 +4432,17 @@ static char *qemuDomainGetXMLDesc(virDomainPtr dom,
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
goto endjob;
}
qemuDomainObjEnterMonitorWithDriver(driver, vm);
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
endjob:
if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
goto cleanup;
@ -7163,6 +7180,12 @@ qemudDomainBlockStatsFlags (virDomainPtr dom,
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
goto endjob;
}
qemuDomainObjEnterMonitor(driver, vm);
tmp = *nparams;
ret = qemuMonitorGetBlockStatsParamsNumber(priv->mon, nparams);
@ -8682,6 +8705,12 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
goto endjob;
}
VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
@ -8690,6 +8719,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
if (ret == 0)
priv->migMaxBandwidth = bandwidth;
endjob:
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
} else {
@ -8784,6 +8814,12 @@ qemuDomainSnapshotCreateActive(virConnectPtr conn,
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
return -1;
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
goto endjob;
}
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
/* savevm monitor command pauses the domain emitting an event which
* confuses libvirt since it's not notified when qemu resumes the
@ -8833,6 +8869,7 @@ cleanup:
_("resuming after snapshot failed"));
}
endjob:
if (vm && qemuDomainObjEndJob(driver, vm) == 0) {
/* Only possible if a transient vm quit while our locks were down,
* in which case we don't want to save snapshot metadata. */
@ -9055,6 +9092,13 @@ qemuDomainSnapshotCreateDiskActive(virConnectPtr conn,
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
return -1;
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
goto endjob;
}
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
/* In qemu, snapshot_blkdev on a single disk will pause cpus,
* but this confuses libvirt since notifications are not given
@ -9137,12 +9181,14 @@ cleanup:
(persist &&
virDomainSaveConfig(driver->configDir, vm->newDef) < 0))
ret = -1;
if (qemuDomainObjEndJob(driver, vm) == 0) {
}
endjob:
if (vm && (qemuDomainObjEndJob(driver, vm) == 0)) {
/* Only possible if a transient vm quit while our locks were down,
* in which case we don't want to save snapshot metadata. */
*vmptr = NULL;
ret = -1;
}
}
return ret;
@ -10286,20 +10332,28 @@ static int qemuDomainMonitorCommand(virDomainPtr domain, const char *cmd,
goto cleanup;
}
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("domain is not running"));
goto endjob;
}
priv = vm->privateData;
qemuDomainObjTaint(driver, vm, VIR_DOMAIN_TAINT_CUSTOM_MONITOR, -1);
hmp = !!(flags & VIR_DOMAIN_QEMU_MONITOR_COMMAND_HMP);
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorArbitraryCommand(priv->mon, cmd, result, hmp);
qemuDomainObjExitMonitorWithDriver(driver, vm);
endjob:
if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
goto cleanup;
}
cleanup:
@ -10536,10 +10590,19 @@ qemuDomainBlockJobImpl(virDomainPtr dom, const char *path,
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("domain is not running"));
goto endjob;
}
qemuDomainObjEnterMonitorWithDriver(driver, vm);
priv = vm->privateData;
ret = qemuMonitorBlockJob(priv->mon, device, bandwidth, info, mode);
qemuDomainObjExitMonitorWithDriver(driver, vm);
endjob:
if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
goto cleanup;