1
0
mirror of https://gitlab.com/libvirt/libvirt.git synced 2025-03-07 17:28:15 +00:00

qemu: fix crash when mixing sync and async monitor jobs

Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.

In the function qemuDomainObjEnterMonitorInternal():
    if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
        if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.

Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.

* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
This commit is contained in:
Eric Blake 2011-07-28 17:18:24 -06:00
parent c03f7f1358
commit 193cd0f3c8
9 changed files with 248 additions and 152 deletions

View File

@ -217,10 +217,7 @@ To acquire the QEMU monitor lock
NB: caller must take care to drop the driver lock if necessary NB: caller must take care to drop the driver lock if necessary
These functions automatically begin/end nested job if called inside an These functions must not be used by an asynchronous job.
asynchronous job. The caller must then check the return value of
qemuDomainObjEnterMonitor to detect if domain died while waiting on
the nested job.
To acquire the QEMU monitor lock with the driver lock held To acquire the QEMU monitor lock with the driver lock held
@ -237,10 +234,30 @@ To acquire the QEMU monitor lock with the driver lock held
NB: caller must take care to drop the driver lock if necessary NB: caller must take care to drop the driver lock if necessary
These functions automatically begin/end nested job if called inside an These functions must not be used inside an asynchronous job.
asynchronous job. The caller must then check the return value of
qemuDomainObjEnterMonitorWithDriver to detect if domain died while
waiting on the nested job. To acquire the QEMU monitor lock with the driver lock held and as part
of an asynchronous job
qemuDomainObjEnterMonitorAsync()
- Validates that the right async job is still running
- Acquires the qemuMonitorObjPtr lock
- Releases the virDomainObjPtr lock
- Releases the driver lock
- Validates that the VM is still active
qemuDomainObjExitMonitorWithDriver()
- Releases the qemuMonitorObjPtr lock
- Acquires the driver lock
- Acquires the virDomainObjPtr lock
NB: caller must take care to drop the driver lock if necessary
These functions are for use inside an asynchronous job; the caller
must check for a return of -1 (VM not running, so nothing to exit).
Helper functions may also call this with QEMU_ASYNC_JOB_NONE when
used from a sync job (such as when first starting a domain).
To keep a domain alive while waiting on a remote command, starting To keep a domain alive while waiting on a remote command, starting
@ -333,8 +350,7 @@ Design patterns
...do prep work... ...do prep work...
if (virDomainObjIsActive(vm)) { if (virDomainObjIsActive(vm)) {
/* using ignore_value is safe since vm is active */ qemuDomainObjEnterMonitor(obj);
ignore_value(qemuDomainObjEnterMonitor(obj));
qemuMonitorXXXX(priv->mon); qemuMonitorXXXX(priv->mon);
qemuDomainObjExitMonitor(obj); qemuDomainObjExitMonitor(obj);
} }
@ -361,8 +377,7 @@ Design patterns
...do prep work... ...do prep work...
if (virDomainObjIsActive(vm)) { if (virDomainObjIsActive(vm)) {
/* using ignore_value is safe since vm is active */ qemuDomainObjEnterMonitorWithDriver(driver, obj);
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, obj));
qemuMonitorXXXX(priv->mon); qemuMonitorXXXX(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, obj); qemuDomainObjExitMonitorWithDriver(driver, obj);
} }
@ -374,7 +389,7 @@ Design patterns
qemuDriverUnlock(driver); qemuDriverUnlock(driver);
* Running asynchronous job * Running asynchronous job with driver lock held
virDomainObjPtr obj; virDomainObjPtr obj;
qemuDomainObjPrivatePtr priv; qemuDomainObjPrivatePtr priv;
@ -387,7 +402,8 @@ Design patterns
...do prep work... ...do prep work...
if (qemuDomainObjEnterMonitorWithDriver(driver, obj) < 0) { if (qemuDomainObjEnterMonitorAsync(driver, obj,
QEMU_ASYNC_JOB_TYPE) < 0) {
/* domain died in the meantime */ /* domain died in the meantime */
goto error; goto error;
} }
@ -395,7 +411,8 @@ Design patterns
qemuDomainObjExitMonitorWithDriver(driver, obj); qemuDomainObjExitMonitorWithDriver(driver, obj);
while (!finished) { while (!finished) {
if (qemuDomainObjEnterMonitorWithDriver(driver, obj) < 0) { if (qemuDomainObjEnterMonitorAsync(driver, obj,
QEMU_ASYNC_JOB_TYPE) < 0) {
/* domain died in the meantime */ /* domain died in the meantime */
goto error; goto error;
} }

View File

@ -863,14 +863,20 @@ qemuDomainObjEndAsyncJob(struct qemud_driver *driver, virDomainObjPtr obj)
return virDomainObjUnref(obj); return virDomainObjUnref(obj);
} }
static int ATTRIBUTE_NONNULL(1) static int
qemuDomainObjEnterMonitorInternal(struct qemud_driver *driver, qemuDomainObjEnterMonitorInternal(struct qemud_driver *driver,
bool driver_locked, bool driver_locked,
virDomainObjPtr obj) virDomainObjPtr obj,
enum qemuDomainAsyncJob asyncJob)
{ {
qemuDomainObjPrivatePtr priv = obj->privateData; qemuDomainObjPrivatePtr priv = obj->privateData;
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (asyncJob != QEMU_ASYNC_JOB_NONE) {
if (asyncJob != priv->job.asyncJob) {
qemuReportError(VIR_ERR_INTERNAL_ERROR,
_("unepxected async job %d"), asyncJob);
return -1;
}
if (qemuDomainObjBeginJobInternal(driver, driver_locked, obj, if (qemuDomainObjBeginJobInternal(driver, driver_locked, obj,
QEMU_JOB_ASYNC_NESTED, QEMU_JOB_ASYNC_NESTED,
QEMU_ASYNC_JOB_NONE) < 0) QEMU_ASYNC_JOB_NONE) < 0)
@ -878,6 +884,8 @@ qemuDomainObjEnterMonitorInternal(struct qemud_driver *driver,
if (!virDomainObjIsActive(obj)) { if (!virDomainObjIsActive(obj)) {
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s", qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("domain is no longer running")); _("domain is no longer running"));
/* Still referenced by the containing async job. */
ignore_value(qemuDomainObjEndJob(driver, obj));
return -1; return -1;
} }
} }
@ -930,15 +938,15 @@ qemuDomainObjExitMonitorInternal(struct qemud_driver *driver,
* *
* To be called immediately before any QEMU monitor API call * To be called immediately before any QEMU monitor API call
* Must have already either called qemuDomainObjBeginJob() and checked * Must have already either called qemuDomainObjBeginJob() and checked
* that the VM is still active or called qemuDomainObjBeginAsyncJob, in which * that the VM is still active; may not be used for nested async jobs.
* case this will start a nested job.
* *
* To be followed with qemuDomainObjExitMonitor() once complete * To be followed with qemuDomainObjExitMonitor() once complete
*/ */
int qemuDomainObjEnterMonitor(struct qemud_driver *driver, void qemuDomainObjEnterMonitor(struct qemud_driver *driver,
virDomainObjPtr obj) virDomainObjPtr obj)
{ {
return qemuDomainObjEnterMonitorInternal(driver, false, obj); ignore_value(qemuDomainObjEnterMonitorInternal(driver, false, obj,
QEMU_ASYNC_JOB_NONE));
} }
/* obj must NOT be locked before calling, qemud_driver must be unlocked /* obj must NOT be locked before calling, qemud_driver must be unlocked
@ -956,15 +964,36 @@ void qemuDomainObjExitMonitor(struct qemud_driver *driver,
* *
* To be called immediately before any QEMU monitor API call * To be called immediately before any QEMU monitor API call
* Must have already either called qemuDomainObjBeginJobWithDriver() and * Must have already either called qemuDomainObjBeginJobWithDriver() and
* checked that the VM is still active or called qemuDomainObjBeginAsyncJob, * checked that the VM is still active; may not be used for nested async jobs.
* in which case this will start a nested job.
* *
* To be followed with qemuDomainObjExitMonitorWithDriver() once complete * To be followed with qemuDomainObjExitMonitorWithDriver() once complete
*/ */
int qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver, void qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj) virDomainObjPtr obj)
{ {
return qemuDomainObjEnterMonitorInternal(driver, true, obj); ignore_value(qemuDomainObjEnterMonitorInternal(driver, true, obj,
QEMU_ASYNC_JOB_NONE));
}
/*
* obj and qemud_driver must be locked before calling
*
* To be called immediately before any QEMU monitor API call.
* Must have already either called qemuDomainObjBeginJobWithDriver()
* and checked that the VM is still active, with asyncJob of
* QEMU_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob,
* with the same asyncJob.
*
* Returns 0 if job was started, in which case this must be followed with
* qemuDomainObjExitMonitorWithDriver(); or -1 if the job could not be
* started (probably because the vm exited in the meantime).
*/
int
qemuDomainObjEnterMonitorAsync(struct qemud_driver *driver,
virDomainObjPtr obj,
enum qemuDomainAsyncJob asyncJob)
{
return qemuDomainObjEnterMonitorInternal(driver, true, obj, asyncJob);
} }
/* obj must NOT be locked before calling, qemud_driver must be unlocked, /* obj must NOT be locked before calling, qemud_driver must be unlocked,

View File

@ -168,20 +168,28 @@ void qemuDomainObjRestoreJob(virDomainObjPtr obj,
void qemuDomainObjDiscardAsyncJob(struct qemud_driver *driver, void qemuDomainObjDiscardAsyncJob(struct qemud_driver *driver,
virDomainObjPtr obj); virDomainObjPtr obj);
int qemuDomainObjEnterMonitor(struct qemud_driver *driver, void qemuDomainObjEnterMonitor(struct qemud_driver *driver,
virDomainObjPtr obj) virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK; ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
void qemuDomainObjExitMonitor(struct qemud_driver *driver, void qemuDomainObjExitMonitor(struct qemud_driver *driver,
virDomainObjPtr obj); virDomainObjPtr obj)
int qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver, ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
virDomainObjPtr obj) void qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
ATTRIBUTE_RETURN_CHECK; virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
int qemuDomainObjEnterMonitorAsync(struct qemud_driver *driver,
virDomainObjPtr obj,
enum qemuDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK;
void qemuDomainObjExitMonitorWithDriver(struct qemud_driver *driver, void qemuDomainObjExitMonitorWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj); virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
void qemuDomainObjEnterRemoteWithDriver(struct qemud_driver *driver, void qemuDomainObjEnterRemoteWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj); virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
void qemuDomainObjExitRemoteWithDriver(struct qemud_driver *driver, void qemuDomainObjExitRemoteWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj); virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
char *qemuDomainDefFormatXML(struct qemud_driver *driver, char *qemuDomainDefFormatXML(struct qemud_driver *driver,
virDomainDefPtr vm, virDomainDefPtr vm,

View File

@ -1375,7 +1375,7 @@ static int qemudDomainSuspend(virDomainPtr dom) {
goto endjob; goto endjob;
} }
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_PAUSED) { if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_PAUSED) {
if (qemuProcessStopCPUs(driver, vm, reason) < 0) { if (qemuProcessStopCPUs(driver, vm, reason, QEMU_ASYNC_JOB_NONE) < 0) {
goto endjob; goto endjob;
} }
event = virDomainEventNewFromObj(vm, event = virDomainEventNewFromObj(vm,
@ -1428,7 +1428,8 @@ static int qemudDomainResume(virDomainPtr dom) {
} }
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
if (qemuProcessStartCPUs(driver, vm, dom->conn, if (qemuProcessStartCPUs(driver, vm, dom->conn,
VIR_DOMAIN_RUNNING_UNPAUSED) < 0) { VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0) {
if (virGetLastError() == NULL) if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_OPERATION_FAILED, qemuReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("resume operation failed")); "%s", _("resume operation failed"));
@ -1484,7 +1485,7 @@ static int qemuDomainShutdown(virDomainPtr dom) {
} }
priv = vm->privateData; priv = vm->privateData;
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorSystemPowerdown(priv->mon); ret = qemuMonitorSystemPowerdown(priv->mon);
qemuDomainObjExitMonitor(driver, vm); qemuDomainObjExitMonitor(driver, vm);
@ -1536,7 +1537,7 @@ static int qemuDomainReboot(virDomainPtr dom, unsigned int flags) {
goto endjob; goto endjob;
} }
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorSystemPowerdown(priv->mon); ret = qemuMonitorSystemPowerdown(priv->mon);
qemuDomainObjExitMonitor(driver, vm); qemuDomainObjExitMonitor(driver, vm);
@ -1775,7 +1776,7 @@ static int qemudDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem,
if (flags & VIR_DOMAIN_AFFECT_LIVE) { if (flags & VIR_DOMAIN_AFFECT_LIVE) {
priv = vm->privateData; priv = vm->privateData;
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
r = qemuMonitorSetBalloon(priv->mon, newmem); r = qemuMonitorSetBalloon(priv->mon, newmem);
qemuDomainObjExitMonitor(driver, vm); qemuDomainObjExitMonitor(driver, vm);
virDomainAuditMemory(vm, vm->def->mem.cur_balloon, newmem, "update", virDomainAuditMemory(vm, vm->def->mem.cur_balloon, newmem, "update",
@ -1849,7 +1850,7 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags)
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0) if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup; goto cleanup;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorInjectNMI(priv->mon); ret = qemuMonitorInjectNMI(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(driver, vm) == 0) { if (qemuDomainObjEndJob(driver, vm) == 0) {
@ -1918,7 +1919,7 @@ static int qemuDomainSendKey(virDomainPtr domain,
goto cleanup; goto cleanup;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorSendKey(priv->mon, holdtime, keycodes, nkeycodes); ret = qemuMonitorSendKey(priv->mon, holdtime, keycodes, nkeycodes);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(driver, vm) == 0) { if (qemuDomainObjEndJob(driver, vm) == 0) {
@ -1979,7 +1980,7 @@ static int qemudDomainGetInfo(virDomainPtr dom,
if (!virDomainObjIsActive(vm)) if (!virDomainObjIsActive(vm))
err = 0; err = 0;
else { else {
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon); err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
qemuDomainObjExitMonitor(driver, vm); qemuDomainObjExitMonitor(driver, vm);
} }
@ -2232,7 +2233,8 @@ qemuDomainSaveInternal(struct qemud_driver *driver, virDomainPtr dom,
/* Pause */ /* Pause */
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
header.was_running = 1; header.was_running = 1;
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE) < 0) if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
QEMU_ASYNC_JOB_SAVE) < 0)
goto endjob; goto endjob;
if (!virDomainObjIsActive(vm)) { if (!virDomainObjIsActive(vm)) {
@ -2404,7 +2406,8 @@ qemuDomainSaveInternal(struct qemud_driver *driver, virDomainPtr dom,
/* Perform the migration */ /* Perform the migration */
if (qemuMigrationToFile(driver, vm, fd, offset, path, if (qemuMigrationToFile(driver, vm, fd, offset, path,
qemuCompressProgramName(compressed), qemuCompressProgramName(compressed),
is_reg, bypassSecurityDriver) < 0) is_reg, bypassSecurityDriver,
QEMU_ASYNC_JOB_SAVE) < 0)
goto endjob; goto endjob;
if (VIR_CLOSE(fd) < 0) { if (VIR_CLOSE(fd) < 0) {
virReportSystemError(errno, _("unable to close %s"), path); virReportSystemError(errno, _("unable to close %s"), path);
@ -2433,7 +2436,8 @@ endjob:
if (ret != 0) { if (ret != 0) {
if (header.was_running && virDomainObjIsActive(vm)) { if (header.was_running && virDomainObjIsActive(vm)) {
rc = qemuProcessStartCPUs(driver, vm, dom->conn, rc = qemuProcessStartCPUs(driver, vm, dom->conn,
VIR_DOMAIN_RUNNING_SAVE_CANCELED); VIR_DOMAIN_RUNNING_SAVE_CANCELED,
QEMU_ASYNC_JOB_SAVE);
if (rc < 0) if (rc < 0)
VIR_WARN("Unable to resume guest CPUs after save failure"); VIR_WARN("Unable to resume guest CPUs after save failure");
} }
@ -2696,7 +2700,8 @@ doCoreDump(struct qemud_driver *driver,
goto cleanup; goto cleanup;
if (qemuMigrationToFile(driver, vm, fd, 0, path, if (qemuMigrationToFile(driver, vm, fd, 0, path,
qemuCompressProgramName(compress), true, false) < 0) qemuCompressProgramName(compress), true, false,
QEMU_ASYNC_JOB_DUMP) < 0)
goto cleanup; goto cleanup;
if (VIR_CLOSE(fd) < 0) { if (VIR_CLOSE(fd) < 0) {
@ -2787,7 +2792,8 @@ static int qemudDomainCoreDump(virDomainPtr dom,
/* Pause domain for non-live dump */ /* Pause domain for non-live dump */
if (!(flags & VIR_DUMP_LIVE) && if (!(flags & VIR_DUMP_LIVE) &&
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_DUMP) < 0) if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_DUMP,
QEMU_ASYNC_JOB_DUMP) < 0)
goto endjob; goto endjob;
paused = 1; paused = 1;
@ -2819,7 +2825,8 @@ endjob:
the migration is complete. */ the migration is complete. */
else if (resume && paused && virDomainObjIsActive(vm)) { else if (resume && paused && virDomainObjIsActive(vm)) {
if (qemuProcessStartCPUs(driver, vm, dom->conn, if (qemuProcessStartCPUs(driver, vm, dom->conn,
VIR_DOMAIN_RUNNING_UNPAUSED) < 0) { VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_DUMP) < 0) {
if (virGetLastError() == NULL) if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_OPERATION_FAILED, qemuReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("resuming after dump failed")); "%s", _("resuming after dump failed"));
@ -2902,7 +2909,7 @@ qemuDomainScreenshot(virDomainPtr dom,
virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp); virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp);
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
if (qemuMonitorScreendump(priv->mon, tmp) < 0) { if (qemuMonitorScreendump(priv->mon, tmp) < 0) {
qemuDomainObjExitMonitor(driver, vm); qemuDomainObjExitMonitor(driver, vm);
goto endjob; goto endjob;
@ -2978,7 +2985,8 @@ static void processWatchdogEvent(void *data, void *opaque)
"%s", _("Dump failed")); "%s", _("Dump failed"));
ret = qemuProcessStartCPUs(driver, wdEvent->vm, NULL, ret = qemuProcessStartCPUs(driver, wdEvent->vm, NULL,
VIR_DOMAIN_RUNNING_UNPAUSED); VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_DUMP);
if (ret < 0) if (ret < 0)
qemuReportError(VIR_ERR_OPERATION_FAILED, qemuReportError(VIR_ERR_OPERATION_FAILED,
@ -3014,7 +3022,7 @@ static int qemudDomainHotplugVcpus(struct qemud_driver *driver,
int oldvcpus = vm->def->vcpus; int oldvcpus = vm->def->vcpus;
int vcpus = oldvcpus; int vcpus = oldvcpus;
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
/* We need different branches here, because we want to offline /* We need different branches here, because we want to offline
* in reverse order to onlining, so any partial fail leaves us in a * in reverse order to onlining, so any partial fail leaves us in a
@ -3934,7 +3942,8 @@ qemuDomainSaveImageStartVM(virConnectPtr conn,
/* If it was running before, resume it now. */ /* If it was running before, resume it now. */
if (header->was_running) { if (header->was_running) {
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm, conn,
VIR_DOMAIN_RUNNING_RESTORED) < 0) { VIR_DOMAIN_RUNNING_RESTORED,
QEMU_ASYNC_JOB_NONE) < 0) {
if (virGetLastError() == NULL) if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_OPERATION_FAILED, qemuReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("failed to resume domain")); "%s", _("failed to resume domain"));
@ -4195,7 +4204,7 @@ static char *qemuDomainGetXMLDesc(virDomainPtr dom,
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_QUERY) < 0) if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup; goto cleanup;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon); err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(driver, vm) == 0) { if (qemuDomainObjEndJob(driver, vm) == 0) {
@ -6777,7 +6786,7 @@ qemudDomainBlockStats (virDomainPtr dom,
goto endjob; goto endjob;
} }
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorGetBlockStatsInfo(priv->mon, ret = qemuMonitorGetBlockStatsInfo(priv->mon,
disk->info.alias, disk->info.alias,
&stats->rd_req, &stats->rd_req,
@ -6887,7 +6896,7 @@ qemudDomainMemoryStats (virDomainPtr dom,
if (virDomainObjIsActive(vm)) { if (virDomainObjIsActive(vm)) {
qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainObjPrivatePtr priv = vm->privateData;
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorGetMemoryStats(priv->mon, stats, nr_stats); ret = qemuMonitorGetMemoryStats(priv->mon, stats, nr_stats);
qemuDomainObjExitMonitor(driver, vm); qemuDomainObjExitMonitor(driver, vm);
} else { } else {
@ -7034,7 +7043,7 @@ qemudDomainMemoryPeek (virDomainPtr dom,
virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp); virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp);
priv = vm->privateData; priv = vm->privateData;
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
if (flags == VIR_MEMORY_VIRTUAL) { if (flags == VIR_MEMORY_VIRTUAL) {
if (qemuMonitorSaveVirtualMemory(priv->mon, offset, size, tmp) < 0) { if (qemuMonitorSaveVirtualMemory(priv->mon, offset, size, tmp) < 0) {
qemuDomainObjExitMonitor(driver, vm); qemuDomainObjExitMonitor(driver, vm);
@ -7213,7 +7222,7 @@ static int qemuDomainGetBlockInfo(virDomainPtr dom,
goto cleanup; goto cleanup;
if (virDomainObjIsActive(vm)) { if (virDomainObjIsActive(vm)) {
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorGetBlockExtent(priv->mon, ret = qemuMonitorGetBlockExtent(priv->mon,
disk->info.alias, disk->info.alias,
&info->allocation); &info->allocation);
@ -8088,7 +8097,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) {
} }
VIR_DEBUG("Cancelling job at client request"); VIR_DEBUG("Cancelling job at client request");
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorMigrateCancel(priv->mon); ret = qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitor(driver, vm); qemuDomainObjExitMonitor(driver, vm);
@ -8145,7 +8154,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
} }
VIR_DEBUG("Setting migration downtime to %llums", downtime); VIR_DEBUG("Setting migration downtime to %llums", downtime);
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorSetMigrationDowntime(priv->mon, downtime); ret = qemuMonitorSetMigrationDowntime(priv->mon, downtime);
qemuDomainObjExitMonitor(driver, vm); qemuDomainObjExitMonitor(driver, vm);
@ -8201,7 +8210,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
} }
VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth); VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth); ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
qemuDomainObjExitMonitor(driver, vm); qemuDomainObjExitMonitor(driver, vm);
@ -8398,7 +8407,8 @@ qemuDomainSnapshotCreateActive(virConnectPtr conn,
* confuses libvirt since it's not notified when qemu resumes the * confuses libvirt since it's not notified when qemu resumes the
* domain. Thus we stop and start CPUs ourselves. * domain. Thus we stop and start CPUs ourselves.
*/ */
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE) < 0) if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
QEMU_ASYNC_JOB_NONE) < 0)
goto cleanup; goto cleanup;
resume = true; resume = true;
@ -8409,14 +8419,15 @@ qemuDomainSnapshotCreateActive(virConnectPtr conn,
} }
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name); ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
cleanup: cleanup:
if (resume && virDomainObjIsActive(vm) && if (resume && virDomainObjIsActive(vm) &&
qemuProcessStartCPUs(driver, vm, conn, qemuProcessStartCPUs(driver, vm, conn,
VIR_DOMAIN_RUNNING_UNPAUSED) < 0 && VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0 &&
virGetLastError() == NULL) { virGetLastError() == NULL) {
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s", qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("resuming after snapshot failed")); _("resuming after snapshot failed"));
@ -8743,7 +8754,7 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
if (virDomainObjIsActive(vm)) { if (virDomainObjIsActive(vm)) {
priv = vm->privateData; priv = vm->privateData;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
rc = qemuMonitorLoadSnapshot(priv->mon, snap->def->name); rc = qemuMonitorLoadSnapshot(priv->mon, snap->def->name);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
if (rc < 0) if (rc < 0)
@ -8765,9 +8776,11 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
if (snap->def->state == VIR_DOMAIN_PAUSED) { if (snap->def->state == VIR_DOMAIN_PAUSED) {
/* qemu unconditionally starts the domain running again after /* qemu unconditionally starts the domain running again after
* loadvm, so let's pause it to keep consistency * loadvm, so let's pause it to keep consistency
* XXX we should have used qemuProcessStart's start_paused instead
*/ */
rc = qemuProcessStopCPUs(driver, vm, rc = qemuProcessStopCPUs(driver, vm,
VIR_DOMAIN_PAUSED_FROM_SNAPSHOT); VIR_DOMAIN_PAUSED_FROM_SNAPSHOT,
QEMU_ASYNC_JOB_NONE);
if (rc < 0) if (rc < 0)
goto endjob; goto endjob;
} else { } else {
@ -8867,7 +8880,7 @@ static int qemuDomainSnapshotDiscard(struct qemud_driver *driver,
} }
else { else {
priv = vm->privateData; priv = vm->privateData;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
/* we continue on even in the face of error */ /* we continue on even in the face of error */
qemuMonitorDeleteSnapshot(priv->mon, snap->def->name); qemuMonitorDeleteSnapshot(priv->mon, snap->def->name);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
@ -9077,7 +9090,7 @@ static int qemuDomainMonitorCommand(virDomainPtr domain, const char *cmd,
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0) if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup; goto cleanup;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorArbitraryCommand(priv->mon, cmd, result, hmp); ret = qemuMonitorArbitraryCommand(priv->mon, cmd, result, hmp);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(driver, vm) == 0) { if (qemuDomainObjEndJob(driver, vm) == 0) {
@ -9316,7 +9329,7 @@ qemuDomainBlockJobImpl(virDomainPtr dom, const char *path,
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0) if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup; goto cleanup;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
priv = vm->privateData; priv = vm->privateData;
ret = qemuMonitorBlockJob(priv->mon, device, bandwidth, info, mode); ret = qemuMonitorBlockJob(priv->mon, device, bandwidth, info, mode);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);

View File

@ -97,7 +97,7 @@ int qemuDomainChangeEjectableMedia(struct qemud_driver *driver,
if (!(driveAlias = qemuDeviceDriveHostAlias(origdisk, priv->qemuCaps))) if (!(driveAlias = qemuDeviceDriveHostAlias(origdisk, priv->qemuCaps)))
goto error; goto error;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (disk->src) { if (disk->src) {
const char *format = NULL; const char *format = NULL;
if (disk->type != VIR_DOMAIN_DISK_TYPE_DIR) { if (disk->type != VIR_DOMAIN_DISK_TYPE_DIR) {
@ -199,7 +199,7 @@ int qemuDomainAttachPciDiskDevice(struct qemud_driver *driver,
goto error; goto error;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) { if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorAddDrive(priv->mon, drivestr); ret = qemuMonitorAddDrive(priv->mon, drivestr);
if (ret == 0) { if (ret == 0) {
@ -296,7 +296,7 @@ int qemuDomainAttachPciControllerDevice(struct qemud_driver *driver,
goto cleanup; goto cleanup;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) { if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorAddDevice(priv->mon, devstr); ret = qemuMonitorAddDevice(priv->mon, devstr);
} else { } else {
@ -441,7 +441,7 @@ int qemuDomainAttachSCSIDisk(struct qemud_driver *driver,
goto error; goto error;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) { if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorAddDrive(priv->mon, drivestr); ret = qemuMonitorAddDrive(priv->mon, drivestr);
if (ret == 0) { if (ret == 0) {
@ -543,7 +543,7 @@ int qemuDomainAttachUsbMassstorageDevice(struct qemud_driver *driver,
goto error; goto error;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) { if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorAddDrive(priv->mon, drivestr); ret = qemuMonitorAddDrive(priv->mon, drivestr);
if (ret == 0) { if (ret == 0) {
@ -688,7 +688,7 @@ int qemuDomainAttachNetDevice(virConnectPtr conn,
goto cleanup; goto cleanup;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_NETDEV) && if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_NETDEV) &&
qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) { qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorAddNetdev(priv->mon, netstr, tapfd, tapfd_name, if (qemuMonitorAddNetdev(priv->mon, netstr, tapfd, tapfd_name,
@ -724,7 +724,7 @@ int qemuDomainAttachNetDevice(virConnectPtr conn,
goto try_remove; goto try_remove;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) { if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorAddDevice(priv->mon, nicstr) < 0) { if (qemuMonitorAddDevice(priv->mon, nicstr) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
@ -783,7 +783,7 @@ try_remove:
char *netdev_name; char *netdev_name;
if (virAsprintf(&netdev_name, "host%s", net->info.alias) < 0) if (virAsprintf(&netdev_name, "host%s", net->info.alias) < 0)
goto no_memory; goto no_memory;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorRemoveNetdev(priv->mon, netdev_name) < 0) if (qemuMonitorRemoveNetdev(priv->mon, netdev_name) < 0)
VIR_WARN("Failed to remove network backend for netdev %s", VIR_WARN("Failed to remove network backend for netdev %s",
netdev_name); netdev_name);
@ -796,7 +796,7 @@ try_remove:
char *hostnet_name; char *hostnet_name;
if (virAsprintf(&hostnet_name, "host%s", net->info.alias) < 0) if (virAsprintf(&hostnet_name, "host%s", net->info.alias) < 0)
goto no_memory; goto no_memory;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorRemoveHostNetwork(priv->mon, vlan, hostnet_name) < 0) if (qemuMonitorRemoveHostNetwork(priv->mon, vlan, hostnet_name) < 0)
VIR_WARN("Failed to remove network backend for vlan %d, net %s", VIR_WARN("Failed to remove network backend for vlan %d, net %s",
vlan, hostnet_name); vlan, hostnet_name);
@ -857,14 +857,14 @@ int qemuDomainAttachHostPciDevice(struct qemud_driver *driver,
priv->qemuCaps))) priv->qemuCaps)))
goto error; goto error;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorAddDeviceWithFd(priv->mon, devstr, ret = qemuMonitorAddDeviceWithFd(priv->mon, devstr,
configfd, configfd_name); configfd, configfd_name);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
} else { } else {
virDomainDevicePCIAddress guestAddr; virDomainDevicePCIAddress guestAddr;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorAddPCIHostDevice(priv->mon, ret = qemuMonitorAddPCIHostDevice(priv->mon,
&hostdev->source.subsys.u.pci, &hostdev->source.subsys.u.pci,
&guestAddr); &guestAddr);
@ -945,7 +945,7 @@ int qemuDomainAttachHostUsbDevice(struct qemud_driver *driver,
goto error; goto error;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE))
ret = qemuMonitorAddDevice(priv->mon, devstr); ret = qemuMonitorAddDevice(priv->mon, devstr);
else else
@ -1273,7 +1273,7 @@ int qemuDomainDetachPciDiskDevice(struct qemud_driver *driver,
goto cleanup; goto cleanup;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) { if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) { if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
@ -1369,7 +1369,7 @@ int qemuDomainDetachDiskDevice(struct qemud_driver *driver,
goto cleanup; goto cleanup;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) { if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
virDomainAuditDisk(vm, detach, NULL, "detach", false); virDomainAuditDisk(vm, detach, NULL, "detach", false);
@ -1507,7 +1507,7 @@ int qemuDomainDetachPciControllerDevice(struct qemud_driver *driver,
goto cleanup; goto cleanup;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) { if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias)) { if (qemuMonitorDelDevice(priv->mon, detach->info.alias)) {
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
@ -1602,7 +1602,7 @@ int qemuDomainDetachNetDevice(struct qemud_driver *driver,
goto cleanup; goto cleanup;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) { if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) { if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
@ -1739,7 +1739,7 @@ int qemuDomainDetachHostPciDevice(struct qemud_driver *driver,
return -1; return -1;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) { if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorDelDevice(priv->mon, detach->info.alias); ret = qemuMonitorDelDevice(priv->mon, detach->info.alias);
} else { } else {
@ -1842,7 +1842,7 @@ int qemuDomainDetachHostUsbDevice(struct qemud_driver *driver,
return -1; return -1;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorDelDevice(priv->mon, detach->info.alias); ret = qemuMonitorDelDevice(priv->mon, detach->info.alias);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
virDomainAuditHostdev(vm, detach, "detach", ret == 0); virDomainAuditHostdev(vm, detach, "detach", ret == 0);
@ -1921,7 +1921,7 @@ qemuDomainChangeGraphicsPasswords(struct qemud_driver *driver,
if (auth->connected) if (auth->connected)
connected = virDomainGraphicsAuthConnectedTypeToString(auth->connected); connected = virDomainGraphicsAuthConnectedTypeToString(auth->connected);
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorSetPassword(priv->mon, ret = qemuMonitorSetPassword(priv->mon,
type, type,
auth->passwd ? auth->passwd : defaultPasswd, auth->passwd ? auth->passwd : defaultPasswd,

View File

@ -727,7 +727,8 @@ qemuMigrationSetOffline(struct qemud_driver *driver,
{ {
int ret; int ret;
VIR_DEBUG("driver=%p vm=%p", driver, vm); VIR_DEBUG("driver=%p vm=%p", driver, vm);
ret = qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION); ret = qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
QEMU_ASYNC_JOB_MIGRATION_OUT);
if (ret == 0) { if (ret == 0) {
virDomainEventPtr event; virDomainEventPtr event;
@ -745,7 +746,8 @@ qemuMigrationSetOffline(struct qemud_driver *driver,
static int static int
qemuMigrationUpdateJobStatus(struct qemud_driver *driver, qemuMigrationUpdateJobStatus(struct qemud_driver *driver,
virDomainObjPtr vm, virDomainObjPtr vm,
const char *job) const char *job,
enum qemuDomainAsyncJob asyncJob)
{ {
qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1; int ret = -1;
@ -754,21 +756,17 @@ qemuMigrationUpdateJobStatus(struct qemud_driver *driver,
unsigned long long memRemaining; unsigned long long memRemaining;
unsigned long long memTotal; unsigned long long memTotal;
if (!virDomainObjIsActive(vm)) { ret = qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob);
qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"), if (ret < 0) {
job, _("guest unexpectedly quit")); /* Guest already exited; nothing further to update. */
return -1; return -1;
} }
ret = qemuMonitorGetMigrationStatus(priv->mon,
ret = qemuDomainObjEnterMonitorWithDriver(driver, vm); &status,
if (ret == 0) { &memProcessed,
ret = qemuMonitorGetMigrationStatus(priv->mon, &memRemaining,
&status, &memTotal);
&memProcessed, qemuDomainObjExitMonitorWithDriver(driver, vm);
&memRemaining,
&memTotal);
qemuDomainObjExitMonitorWithDriver(driver, vm);
}
if (ret < 0 || virTimeMs(&priv->job.info.timeElapsed) < 0) { if (ret < 0 || virTimeMs(&priv->job.info.timeElapsed) < 0) {
priv->job.info.type = VIR_DOMAIN_JOB_FAILED; priv->job.info.type = VIR_DOMAIN_JOB_FAILED;
@ -817,8 +815,9 @@ qemuMigrationUpdateJobStatus(struct qemud_driver *driver,
} }
int static int
qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm) qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm,
enum qemuDomainAsyncJob asyncJob)
{ {
qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainObjPrivatePtr priv = vm->privateData;
const char *job; const char *job;
@ -843,7 +842,7 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm)
/* Poll every 50ms for progress & to allow cancellation */ /* Poll every 50ms for progress & to allow cancellation */
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull }; struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
if (qemuMigrationUpdateJobStatus(driver, vm, job) < 0) if (qemuMigrationUpdateJobStatus(driver, vm, job, asyncJob) < 0)
goto cleanup; goto cleanup;
virDomainObjUnlock(vm); virDomainObjUnlock(vm);
@ -883,7 +882,8 @@ qemuDomainMigrateGraphicsRelocate(struct qemud_driver *driver,
if (cookie->graphics->type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE) if (cookie->graphics->type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE)
return 0; return 0;
ret = qemuDomainObjEnterMonitorWithDriver(driver, vm); ret = qemuDomainObjEnterMonitorAsync(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT);
if (ret == 0) { if (ret == 0) {
ret = qemuMonitorGraphicsRelocate(priv->mon, ret = qemuMonitorGraphicsRelocate(priv->mon,
cookie->graphics->type, cookie->graphics->type,
@ -1330,7 +1330,8 @@ static int doNativeMigrate(struct qemud_driver *driver,
goto cleanup; goto cleanup;
} }
if (qemuDomainObjEnterMonitorWithDriver(driver, vm) < 0) if (qemuDomainObjEnterMonitorAsync(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup; goto cleanup;
if (resource > 0 && if (resource > 0 &&
@ -1352,7 +1353,8 @@ static int doNativeMigrate(struct qemud_driver *driver,
} }
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuMigrationWaitForCompletion(driver, vm) < 0) if (qemuMigrationWaitForCompletion(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup; goto cleanup;
/* When migration completed, QEMU will have paused the /* When migration completed, QEMU will have paused the
@ -1591,7 +1593,8 @@ static int doTunnelMigrate(struct qemud_driver *driver,
goto cleanup; goto cleanup;
} }
if (qemuDomainObjEnterMonitorWithDriver(driver, vm) < 0) if (qemuDomainObjEnterMonitorAsync(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup; goto cleanup;
if (resource > 0 && if (resource > 0 &&
@ -1634,7 +1637,8 @@ static int doTunnelMigrate(struct qemud_driver *driver,
/* it is also possible that the migrate didn't fail initially, but /* it is also possible that the migrate didn't fail initially, but
* rather failed later on. Check the output of "info migrate" * rather failed later on. Check the output of "info migrate"
*/ */
if (qemuDomainObjEnterMonitorWithDriver(driver, vm) < 0) if (qemuDomainObjEnterMonitorAsync(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cancel; goto cancel;
if (qemuMonitorGetMigrationStatus(priv->mon, if (qemuMonitorGetMigrationStatus(priv->mon,
&status, &status,
@ -1664,7 +1668,8 @@ static int doTunnelMigrate(struct qemud_driver *driver,
if (!(iothread = qemuMigrationStartTunnel(st, client_sock))) if (!(iothread = qemuMigrationStartTunnel(st, client_sock)))
goto cancel; goto cancel;
ret = qemuMigrationWaitForCompletion(driver, vm); ret = qemuMigrationWaitForCompletion(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT);
/* When migration completed, QEMU will have paused the /* When migration completed, QEMU will have paused the
* CPUs for us, but unless we're using the JSON monitor * CPUs for us, but unless we're using the JSON monitor
@ -1693,7 +1698,8 @@ cancel:
if (ret != 0 && virDomainObjIsActive(vm)) { if (ret != 0 && virDomainObjIsActive(vm)) {
VIR_FORCE_CLOSE(client_sock); VIR_FORCE_CLOSE(client_sock);
VIR_FORCE_CLOSE(qemu_sock); VIR_FORCE_CLOSE(qemu_sock);
if (qemuDomainObjEnterMonitorWithDriver(driver, vm) == 0) { if (qemuDomainObjEnterMonitorAsync(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
qemuMonitorMigrateCancel(priv->mon); qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
} }
@ -2201,7 +2207,8 @@ endjob:
if (resume && virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { if (resume && virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
/* we got here through some sort of failure; start the domain again */ /* we got here through some sort of failure; start the domain again */
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm, conn,
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED) < 0) { VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
/* Hm, we already know we are in error here. We don't want to /* Hm, we already know we are in error here. We don't want to
* overwrite the previous error, though, so we just throw something * overwrite the previous error, though, so we just throw something
* to the logs and hope for the best * to the logs and hope for the best
@ -2274,7 +2281,8 @@ qemuMigrationPerformPhase(struct qemud_driver *driver,
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
/* we got here through some sort of failure; start the domain again */ /* we got here through some sort of failure; start the domain again */
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm, conn,
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED) < 0) { VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
/* Hm, we already know we are in error here. We don't want to /* Hm, we already know we are in error here. We don't want to
* overwrite the previous error, though, so we just throw something * overwrite the previous error, though, so we just throw something
* to the logs and hope for the best * to the logs and hope for the best
@ -2500,7 +2508,8 @@ qemuMigrationFinish(struct qemud_driver *driver,
* older qemu's, but it also doesn't hurt anything there * older qemu's, but it also doesn't hurt anything there
*/ */
if (qemuProcessStartCPUs(driver, vm, dconn, if (qemuProcessStartCPUs(driver, vm, dconn,
VIR_DOMAIN_RUNNING_MIGRATED) < 0) { VIR_DOMAIN_RUNNING_MIGRATED,
QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
if (virGetLastError() == NULL) if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_INTERNAL_ERROR, qemuReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed")); "%s", _("resume operation failed"));
@ -2626,7 +2635,8 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
* older qemu's, but it also doesn't hurt anything there * older qemu's, but it also doesn't hurt anything there
*/ */
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm, conn,
VIR_DOMAIN_RUNNING_MIGRATED) < 0) { VIR_DOMAIN_RUNNING_MIGRATED,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
if (virGetLastError() == NULL) if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_INTERNAL_ERROR, qemuReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed")); "%s", _("resume operation failed"));
@ -2657,7 +2667,8 @@ int
qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm, qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm,
int fd, off_t offset, const char *path, int fd, off_t offset, const char *path,
const char *compressor, const char *compressor,
bool is_reg, bool bypassSecurityDriver) bool is_reg, bool bypassSecurityDriver,
enum qemuDomainAsyncJob asyncJob)
{ {
qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainObjPrivatePtr priv = vm->privateData;
virCgroupPtr cgroup = NULL; virCgroupPtr cgroup = NULL;
@ -2709,7 +2720,7 @@ qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm,
restoreLabel = true; restoreLabel = true;
} }
if (qemuDomainObjEnterMonitorWithDriver(driver, vm) < 0) if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
goto cleanup; goto cleanup;
if (!compressor) { if (!compressor) {
@ -2763,7 +2774,7 @@ qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm,
if (rc < 0) if (rc < 0)
goto cleanup; goto cleanup;
rc = qemuMigrationWaitForCompletion(driver, vm); rc = qemuMigrationWaitForCompletion(driver, vm, asyncJob);
if (rc < 0) if (rc < 0)
goto cleanup; goto cleanup;

View File

@ -78,8 +78,6 @@ bool qemuMigrationIsAllowed(virDomainDefPtr def)
int qemuMigrationSetOffline(struct qemud_driver *driver, int qemuMigrationSetOffline(struct qemud_driver *driver,
virDomainObjPtr vm); virDomainObjPtr vm);
int qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm);
char *qemuMigrationBegin(struct qemud_driver *driver, char *qemuMigrationBegin(struct qemud_driver *driver,
virDomainObjPtr vm, virDomainObjPtr vm,
const char *xmlin, const char *xmlin,
@ -145,7 +143,8 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
int qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm, int qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm,
int fd, off_t offset, const char *path, int fd, off_t offset, const char *path,
const char *compressor, const char *compressor,
bool is_reg, bool bypassSecurityDriver) bool is_reg, bool bypassSecurityDriver,
enum qemuDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(5)
ATTRIBUTE_RETURN_CHECK; ATTRIBUTE_RETURN_CHECK;

View File

@ -388,7 +388,7 @@ qemuProcessFakeReboot(void *opaque)
goto endjob; goto endjob;
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorSystemReset(priv->mon) < 0) { if (qemuMonitorSystemReset(priv->mon) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
goto endjob; goto endjob;
@ -402,7 +402,8 @@ qemuProcessFakeReboot(void *opaque)
} }
if (qemuProcessStartCPUs(driver, vm, NULL, if (qemuProcessStartCPUs(driver, vm, NULL,
VIR_DOMAIN_RUNNING_BOOTED) < 0) { VIR_DOMAIN_RUNNING_BOOTED,
QEMU_ASYNC_JOB_NONE) < 0) {
if (virGetLastError() == NULL) if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_INTERNAL_ERROR, qemuReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed")); "%s", _("resume operation failed"));
@ -850,7 +851,7 @@ qemuConnectMonitor(struct qemud_driver *driver, virDomainObjPtr vm)
} }
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorSetCapabilities(priv->mon); ret = qemuMonitorSetCapabilities(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
@ -1202,7 +1203,7 @@ qemuProcessWaitForMonitor(struct qemud_driver* driver,
goto cleanup; goto cleanup;
priv = vm->privateData; priv = vm->privateData;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorGetPtyPaths(priv->mon, paths); ret = qemuMonitorGetPtyPaths(priv->mon, paths);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
@ -1257,7 +1258,7 @@ qemuProcessDetectVcpuPIDs(struct qemud_driver *driver,
/* What follows is now all KVM specific */ /* What follows is now all KVM specific */
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if ((ncpupids = qemuMonitorGetCPUInfo(priv->mon, &cpupids)) < 0) { if ((ncpupids = qemuMonitorGetCPUInfo(priv->mon, &cpupids)) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
return -1; return -1;
@ -1551,7 +1552,7 @@ qemuProcessInitPasswords(virConnectPtr conn,
goto cleanup; goto cleanup;
alias = vm->def->disks[i]->info.alias; alias = vm->def->disks[i]->info.alias;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorSetDrivePassphrase(priv->mon, alias, secret); ret = qemuMonitorSetDrivePassphrase(priv->mon, alias, secret);
VIR_FREE(secret); VIR_FREE(secret);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
@ -1942,7 +1943,7 @@ qemuProcessInitPCIAddresses(struct qemud_driver *driver,
int ret; int ret;
qemuMonitorPCIAddress *addrs = NULL; qemuMonitorPCIAddress *addrs = NULL;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
naddrs = qemuMonitorGetAllPCIAddresses(priv->mon, naddrs = qemuMonitorGetAllPCIAddresses(priv->mon,
&addrs); &addrs);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
@ -2148,7 +2149,8 @@ qemuProcessPrepareMonitorChr(struct qemud_driver *driver,
*/ */
int int
qemuProcessStartCPUs(struct qemud_driver *driver, virDomainObjPtr vm, qemuProcessStartCPUs(struct qemud_driver *driver, virDomainObjPtr vm,
virConnectPtr conn, virDomainRunningReason reason) virConnectPtr conn, virDomainRunningReason reason,
enum qemuDomainAsyncJob asyncJob)
{ {
int ret; int ret;
qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainObjPrivatePtr priv = vm->privateData;
@ -2163,9 +2165,11 @@ qemuProcessStartCPUs(struct qemud_driver *driver, virDomainObjPtr vm,
} }
VIR_FREE(priv->lockState); VIR_FREE(priv->lockState);
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); ret = qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob);
ret = qemuMonitorStartCPUs(priv->mon, conn); if (ret == 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm); ret = qemuMonitorStartCPUs(priv->mon, conn);
qemuDomainObjExitMonitorWithDriver(driver, vm);
}
if (ret == 0) { if (ret == 0) {
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, reason); virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, reason);
@ -2180,7 +2184,8 @@ qemuProcessStartCPUs(struct qemud_driver *driver, virDomainObjPtr vm,
int qemuProcessStopCPUs(struct qemud_driver *driver, virDomainObjPtr vm, int qemuProcessStopCPUs(struct qemud_driver *driver, virDomainObjPtr vm,
virDomainPausedReason reason) virDomainPausedReason reason,
enum qemuDomainAsyncJob asyncJob)
{ {
int ret; int ret;
int oldState; int oldState;
@ -2191,7 +2196,7 @@ int qemuProcessStopCPUs(struct qemud_driver *driver, virDomainObjPtr vm,
oldState = virDomainObjGetState(vm, &oldReason); oldState = virDomainObjGetState(vm, &oldReason);
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason); virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason);
ret = qemuDomainObjEnterMonitorWithDriver(driver, vm); ret = qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob);
if (ret == 0) { if (ret == 0) {
ret = qemuMonitorStopCPUs(priv->mon); ret = qemuMonitorStopCPUs(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
@ -2254,7 +2259,7 @@ qemuProcessUpdateState(struct qemud_driver *driver, virDomainObjPtr vm)
bool running; bool running;
int ret; int ret;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorGetStatus(priv->mon, &running); ret = qemuMonitorGetStatus(priv->mon, &running);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
@ -2315,7 +2320,8 @@ qemuProcessRecoverMigration(struct qemud_driver *driver,
VIR_DEBUG("Incoming migration finished, resuming domain %s", VIR_DEBUG("Incoming migration finished, resuming domain %s",
vm->def->name); vm->def->name);
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm, conn,
VIR_DOMAIN_RUNNING_UNPAUSED) < 0) { VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name); VIR_WARN("Could not resume domain %s", vm->def->name);
} }
break; break;
@ -2346,7 +2352,7 @@ qemuProcessRecoverMigration(struct qemud_driver *driver,
* domain */ * domain */
VIR_DEBUG("Canceling unfinished outgoing migration of domain %s", VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
vm->def->name); vm->def->name);
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
ignore_value(qemuMonitorMigrateCancel(priv->mon)); ignore_value(qemuMonitorMigrateCancel(priv->mon));
qemuDomainObjExitMonitor(driver, vm); qemuDomainObjExitMonitor(driver, vm);
/* resume the domain but only if it was paused as a result of /* resume the domain but only if it was paused as a result of
@ -2355,7 +2361,8 @@ qemuProcessRecoverMigration(struct qemud_driver *driver,
(reason == VIR_DOMAIN_PAUSED_MIGRATION || (reason == VIR_DOMAIN_PAUSED_MIGRATION ||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) { reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm, conn,
VIR_DOMAIN_RUNNING_UNPAUSED) < 0) { VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name); VIR_WARN("Could not resume domain %s", vm->def->name);
} }
} }
@ -2375,7 +2382,8 @@ qemuProcessRecoverMigration(struct qemud_driver *driver,
(reason == VIR_DOMAIN_PAUSED_MIGRATION || (reason == VIR_DOMAIN_PAUSED_MIGRATION ||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) { reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm, conn,
VIR_DOMAIN_RUNNING_UNPAUSED) < 0) { VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name); VIR_WARN("Could not resume domain %s", vm->def->name);
} }
} }
@ -2412,11 +2420,13 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
case QEMU_ASYNC_JOB_SAVE: case QEMU_ASYNC_JOB_SAVE:
case QEMU_ASYNC_JOB_DUMP: case QEMU_ASYNC_JOB_DUMP:
ignore_value(qemuDomainObjEnterMonitor(driver, vm)); qemuDomainObjEnterMonitor(driver, vm);
ignore_value(qemuMonitorMigrateCancel(priv->mon)); ignore_value(qemuMonitorMigrateCancel(priv->mon));
qemuDomainObjExitMonitor(driver, vm); qemuDomainObjExitMonitor(driver, vm);
/* resume the domain but only if it was paused as a result of /* resume the domain but only if it was paused as a result of
* running save/dump operation */ * running save/dump operation. Although we are recovering an
* async job, this function is run at startup and must resume
* things using sync monitor connections. */
if (state == VIR_DOMAIN_PAUSED && if (state == VIR_DOMAIN_PAUSED &&
((job->asyncJob == QEMU_ASYNC_JOB_DUMP && ((job->asyncJob == QEMU_ASYNC_JOB_DUMP &&
reason == VIR_DOMAIN_PAUSED_DUMP) || reason == VIR_DOMAIN_PAUSED_DUMP) ||
@ -2424,7 +2434,8 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
reason == VIR_DOMAIN_PAUSED_SAVE) || reason == VIR_DOMAIN_PAUSED_SAVE) ||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) { reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm, conn,
VIR_DOMAIN_RUNNING_UNPAUSED) < 0) { VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s after", vm->def->name); VIR_WARN("Could not resume domain %s after", vm->def->name);
} }
} }
@ -2974,9 +2985,13 @@ int qemuProcessStart(virConnectPtr conn,
goto cleanup; goto cleanup;
} }
/* Technically, qemuProcessStart can be called from inside
* QEMU_ASYNC_JOB_MIGRATION_IN, but we are okay treating this like
* a sync job since no other job can call into the domain until
* migration completes. */
VIR_DEBUG("Setting initial memory amount"); VIR_DEBUG("Setting initial memory amount");
cur_balloon = vm->def->mem.cur_balloon; cur_balloon = vm->def->mem.cur_balloon;
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorSetBalloon(priv->mon, cur_balloon) < 0) { if (qemuMonitorSetBalloon(priv->mon, cur_balloon) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
goto cleanup; goto cleanup;
@ -2987,7 +3002,8 @@ int qemuProcessStart(virConnectPtr conn,
VIR_DEBUG("Starting domain CPUs"); VIR_DEBUG("Starting domain CPUs");
/* Allow the CPUS to start executing */ /* Allow the CPUS to start executing */
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm, conn,
VIR_DOMAIN_RUNNING_BOOTED) < 0) { VIR_DOMAIN_RUNNING_BOOTED,
QEMU_ASYNC_JOB_NONE) < 0) {
if (virGetLastError() == NULL) if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_INTERNAL_ERROR, qemuReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed")); "%s", _("resume operation failed"));
@ -3396,7 +3412,7 @@ int qemuProcessAttach(virConnectPtr conn ATTRIBUTE_UNUSED,
} }
VIR_DEBUG("Getting initial memory amount"); VIR_DEBUG("Getting initial memory amount");
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm)); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorGetBalloonInfo(priv->mon, &vm->def->mem.cur_balloon) < 0) { if (qemuMonitorGetBalloonInfo(priv->mon, &vm->def->mem.cur_balloon) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
goto cleanup; goto cleanup;

View File

@ -23,6 +23,7 @@
# define __QEMU_PROCESS_H__ # define __QEMU_PROCESS_H__
# include "qemu_conf.h" # include "qemu_conf.h"
# include "qemu_domain.h"
int qemuProcessPrepareMonitorChr(struct qemud_driver *driver, int qemuProcessPrepareMonitorChr(struct qemud_driver *driver,
virDomainChrSourceDefPtr monConfig, virDomainChrSourceDefPtr monConfig,
@ -31,10 +32,12 @@ int qemuProcessPrepareMonitorChr(struct qemud_driver *driver,
int qemuProcessStartCPUs(struct qemud_driver *driver, int qemuProcessStartCPUs(struct qemud_driver *driver,
virDomainObjPtr vm, virDomainObjPtr vm,
virConnectPtr conn, virConnectPtr conn,
virDomainRunningReason reason); virDomainRunningReason reason,
enum qemuDomainAsyncJob asyncJob);
int qemuProcessStopCPUs(struct qemud_driver *driver, int qemuProcessStopCPUs(struct qemud_driver *driver,
virDomainObjPtr vm, virDomainObjPtr vm,
virDomainPausedReason reason); virDomainPausedReason reason,
enum qemuDomainAsyncJob asyncJob);
void qemuProcessAutostartAll(struct qemud_driver *driver); void qemuProcessAutostartAll(struct qemud_driver *driver);
void qemuProcessReconnectAll(virConnectPtr conn, struct qemud_driver *driver); void qemuProcessReconnectAll(virConnectPtr conn, struct qemud_driver *driver);