qemu: Don't explicitly stop CPUs after migration

With a very old QEMU which doesn't support events we need to explicitly
call qemuMigrationSetOffline at the end of migration to update our
internal state. On the other hand, if we talk to QEMU using QMP, we
should just wait for the STOP event and let the event handler update the
state and trigger a libvirt event.

Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
This commit is contained in:
Jiri Denemark 2016-01-05 22:19:28 +01:00
parent 5d01e8666b
commit 315808e99e
3 changed files with 33 additions and 16 deletions

View File

@ -205,6 +205,8 @@ struct _qemuDomainObjPrivate {
bool signalIOError; /* true if the domain condition should be signalled on
I/O error */
bool signalStop; /* true if the domain condition should be signalled on
QMP STOP event */
char *machineName;
char *libDir; /* base path for per-domain files */
char *channelTargetDir; /* base path for per-domain channel targets */

View File

@ -4491,19 +4491,24 @@ qemuMigrationRun(virQEMUDriverPtr driver,
else if (rc == -1)
goto cleanup;
/* When migration completed, QEMU will have paused the
* CPUs for us, but unless we're using the JSON monitor
* we won't have been notified of this, so might still
* think we're running. For v2 protocol this doesn't
* matter because we'll kill the VM soon, but for v3
* this is important because we stay paused until the
* confirm3 step, but need to release the lock state
/* When migration completed, QEMU will have paused the CPUs for us.
* Wait for the STOP event to be processed or explicitly stop CPUs
* (for old QEMU which does not send events) to release the lock state.
*/
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
if (qemuMigrationSetOffline(driver, vm) < 0) {
priv->job.current->type = VIR_DOMAIN_JOB_FAILED;
goto cleanup;
if (priv->monJSON) {
while (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
priv->signalStop = true;
rc = virDomainObjWait(vm);
priv->signalStop = false;
if (rc < 0) {
priv->job.current->type = VIR_DOMAIN_JOB_FAILED;
goto cleanup;
}
}
} else if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
qemuMigrationSetOffline(driver, vm) < 0) {
priv->job.current->type = VIR_DOMAIN_JOB_FAILED;
goto cleanup;
}
ret = 0;

View File

@ -697,6 +697,8 @@ qemuProcessHandleStop(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
{
virQEMUDriverPtr driver = opaque;
virObjectEventPtr event = NULL;
virDomainPausedReason reason = VIR_DOMAIN_PAUSED_UNKNOWN;
virDomainEventSuspendedDetailType detail = VIR_DOMAIN_EVENT_SUSPENDED_PAUSED;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
virObjectLock(vm);
@ -708,16 +710,24 @@ qemuProcessHandleStop(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
goto unlock;
}
VIR_DEBUG("Transitioned guest %s to paused state",
vm->def->name);
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
reason = VIR_DOMAIN_PAUSED_MIGRATION;
detail = VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED;
}
VIR_DEBUG("Transitioned guest %s to paused state, reason %s",
vm->def->name, virDomainPausedReasonTypeToString(reason));
if (priv->job.current)
ignore_value(virTimeMillisNow(&priv->job.current->stopped));
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_UNKNOWN);
if (priv->signalStop)
virDomainObjBroadcast(vm);
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason);
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED,
VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
VIR_DOMAIN_EVENT_SUSPENDED,
detail);
VIR_FREE(priv->lockState);
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)