qemu: Avoid duplicate resume events and state changes
The only place where VIR_DOMAIN_EVENT_RESUMED should be generated is the RESUME event handler to make sure we don't generate duplicate events or state changes. In the worse case the duplicity can revert or cover changes done by other event handlers. For example, after QEMU sent RESUME, BLOCK_IO_ERROR, and STOP events we could happily mark the domain as running and report VIR_DOMAIN_EVENT_RESUMED to registered clients. https://bugzilla.redhat.com/show_bug.cgi?id=1612943 Signed-off-by: Jiri Denemark <jdenemar@redhat.com> Reviewed-by: John Ferlan <jferlan@redhat.com>
This commit is contained in:
parent
8ae9b49f5a
commit
e6d77a75c4
@ -1861,7 +1861,6 @@ static int qemuDomainResume(virDomainPtr dom)
|
|||||||
virQEMUDriverPtr driver = dom->conn->privateData;
|
virQEMUDriverPtr driver = dom->conn->privateData;
|
||||||
virDomainObjPtr vm;
|
virDomainObjPtr vm;
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
virObjectEventPtr event = NULL;
|
|
||||||
int state;
|
int state;
|
||||||
int reason;
|
int reason;
|
||||||
virQEMUDriverConfigPtr cfg = NULL;
|
virQEMUDriverConfigPtr cfg = NULL;
|
||||||
@ -1900,9 +1899,6 @@ static int qemuDomainResume(virDomainPtr dom)
|
|||||||
"%s", _("resume operation failed"));
|
"%s", _("resume operation failed"));
|
||||||
goto endjob;
|
goto endjob;
|
||||||
}
|
}
|
||||||
event = virDomainEventLifecycleNewFromObj(vm,
|
|
||||||
VIR_DOMAIN_EVENT_RESUMED,
|
|
||||||
VIR_DOMAIN_EVENT_RESUMED_UNPAUSED);
|
|
||||||
}
|
}
|
||||||
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
|
||||||
goto endjob;
|
goto endjob;
|
||||||
@ -1913,7 +1909,6 @@ static int qemuDomainResume(virDomainPtr dom)
|
|||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
virDomainObjEndAPI(&vm);
|
virDomainObjEndAPI(&vm);
|
||||||
virObjectEventStateQueue(driver->domainEventState, event);
|
|
||||||
virObjectUnref(cfg);
|
virObjectUnref(cfg);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -15983,7 +15978,6 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
|
|||||||
virDomainDefPtr config = NULL;
|
virDomainDefPtr config = NULL;
|
||||||
virQEMUDriverConfigPtr cfg = NULL;
|
virQEMUDriverConfigPtr cfg = NULL;
|
||||||
virCapsPtr caps = NULL;
|
virCapsPtr caps = NULL;
|
||||||
bool was_running = false;
|
|
||||||
bool was_stopped = false;
|
bool was_stopped = false;
|
||||||
qemuDomainSaveCookiePtr cookie;
|
qemuDomainSaveCookiePtr cookie;
|
||||||
virCPUDefPtr origCPU = NULL;
|
virCPUDefPtr origCPU = NULL;
|
||||||
@ -16174,7 +16168,6 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
|
|||||||
priv = vm->privateData;
|
priv = vm->privateData;
|
||||||
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
||||||
/* Transitions 5, 6 */
|
/* Transitions 5, 6 */
|
||||||
was_running = true;
|
|
||||||
if (qemuProcessStopCPUs(driver, vm,
|
if (qemuProcessStopCPUs(driver, vm,
|
||||||
VIR_DOMAIN_PAUSED_FROM_SNAPSHOT,
|
VIR_DOMAIN_PAUSED_FROM_SNAPSHOT,
|
||||||
QEMU_ASYNC_JOB_START) < 0)
|
QEMU_ASYNC_JOB_START) < 0)
|
||||||
@ -16271,12 +16264,6 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
|
|||||||
event = virDomainEventLifecycleNewFromObj(vm,
|
event = virDomainEventLifecycleNewFromObj(vm,
|
||||||
VIR_DOMAIN_EVENT_STARTED,
|
VIR_DOMAIN_EVENT_STARTED,
|
||||||
detail);
|
detail);
|
||||||
} else if (!was_running) {
|
|
||||||
/* Transition 8 */
|
|
||||||
detail = VIR_DOMAIN_EVENT_RESUMED_FROM_SNAPSHOT;
|
|
||||||
event = virDomainEventLifecycleNewFromObj(vm,
|
|
||||||
VIR_DOMAIN_EVENT_RESUMED,
|
|
||||||
detail);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -2982,14 +2982,10 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver,
|
|||||||
virFreeError(orig_err);
|
virFreeError(orig_err);
|
||||||
|
|
||||||
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
|
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
|
||||||
reason == VIR_DOMAIN_PAUSED_POSTCOPY) {
|
reason == VIR_DOMAIN_PAUSED_POSTCOPY)
|
||||||
qemuMigrationAnyPostcopyFailed(driver, vm);
|
qemuMigrationAnyPostcopyFailed(driver, vm);
|
||||||
} else if (qemuMigrationSrcRestoreDomainState(driver, vm)) {
|
else
|
||||||
event = virDomainEventLifecycleNewFromObj(vm,
|
qemuMigrationSrcRestoreDomainState(driver, vm);
|
||||||
VIR_DOMAIN_EVENT_RESUMED,
|
|
||||||
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
|
||||||
virObjectEventStateQueue(driver->domainEventState, event);
|
|
||||||
}
|
|
||||||
|
|
||||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||||
priv->job.migParams, priv->job.apiFlags);
|
priv->job.migParams, priv->job.apiFlags);
|
||||||
@ -4624,11 +4620,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver,
|
|||||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||||
priv->job.migParams, priv->job.apiFlags);
|
priv->job.migParams, priv->job.apiFlags);
|
||||||
|
|
||||||
if (qemuMigrationSrcRestoreDomainState(driver, vm)) {
|
qemuMigrationSrcRestoreDomainState(driver, vm);
|
||||||
event = virDomainEventLifecycleNewFromObj(vm,
|
|
||||||
VIR_DOMAIN_EVENT_RESUMED,
|
|
||||||
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
|
||||||
}
|
|
||||||
|
|
||||||
qemuMigrationJobFinish(driver, vm);
|
qemuMigrationJobFinish(driver, vm);
|
||||||
if (!virDomainObjIsActive(vm) && ret == 0) {
|
if (!virDomainObjIsActive(vm) && ret == 0) {
|
||||||
@ -4672,7 +4664,6 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
|
|||||||
unsigned long resource)
|
unsigned long resource)
|
||||||
{
|
{
|
||||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||||
virObjectEventPtr event = NULL;
|
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
|
|
||||||
/* If we didn't start the job in the begin phase, start it now. */
|
/* If we didn't start the job in the begin phase, start it now. */
|
||||||
@ -4694,11 +4685,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
|
|||||||
nmigrate_disks, migrate_disks, migParams);
|
nmigrate_disks, migrate_disks, migParams);
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
if (qemuMigrationSrcRestoreDomainState(driver, vm)) {
|
qemuMigrationSrcRestoreDomainState(driver, vm);
|
||||||
event = virDomainEventLifecycleNewFromObj(vm,
|
|
||||||
VIR_DOMAIN_EVENT_RESUMED,
|
|
||||||
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
|
||||||
}
|
|
||||||
goto endjob;
|
goto endjob;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4722,7 +4709,6 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
|
|||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
virDomainObjEndAPI(&vm);
|
virDomainObjEndAPI(&vm);
|
||||||
virObjectEventStateQueue(driver->domainEventState, event);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5074,13 +5060,8 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver,
|
|||||||
goto endjob;
|
goto endjob;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (inPostCopy) {
|
if (inPostCopy)
|
||||||
doKill = false;
|
doKill = false;
|
||||||
event = virDomainEventLifecycleNewFromObj(vm,
|
|
||||||
VIR_DOMAIN_EVENT_RESUMED,
|
|
||||||
VIR_DOMAIN_EVENT_RESUMED_POSTCOPY);
|
|
||||||
virObjectEventStateQueue(driver->domainEventState, event);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mig->jobInfo) {
|
if (mig->jobInfo) {
|
||||||
@ -5111,10 +5092,20 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver,
|
|||||||
|
|
||||||
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid, vm->def->id);
|
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid, vm->def->id);
|
||||||
|
|
||||||
event = virDomainEventLifecycleNewFromObj(vm,
|
if (inPostCopy) {
|
||||||
VIR_DOMAIN_EVENT_RESUMED,
|
/* The only RESUME event during post-copy migration is triggered by
|
||||||
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
* QEMU when the running domain moves from the source to the
|
||||||
virObjectEventStateQueue(driver->domainEventState, event);
|
* destination host, but then the migration keeps running until all
|
||||||
|
* modified memory is transferred from the source host. This will
|
||||||
|
* result in VIR_DOMAIN_EVENT_RESUMED with RESUMED_POSTCOPY detail.
|
||||||
|
* However, our API documentation says we need to fire another RESUMED
|
||||||
|
* event at the very end of migration with RESUMED_MIGRATED detail.
|
||||||
|
*/
|
||||||
|
event = virDomainEventLifecycleNewFromObj(vm,
|
||||||
|
VIR_DOMAIN_EVENT_RESUMED,
|
||||||
|
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
||||||
|
virObjectEventStateQueue(driver->domainEventState, event);
|
||||||
|
}
|
||||||
|
|
||||||
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
|
||||||
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER);
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER);
|
||||||
|
@ -456,7 +456,6 @@ qemuProcessFakeReboot(void *opaque)
|
|||||||
virDomainObjPtr vm = opaque;
|
virDomainObjPtr vm = opaque;
|
||||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||||
virQEMUDriverPtr driver = priv->driver;
|
virQEMUDriverPtr driver = priv->driver;
|
||||||
virObjectEventPtr event = NULL;
|
|
||||||
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
||||||
virDomainRunningReason reason = VIR_DOMAIN_RUNNING_BOOTED;
|
virDomainRunningReason reason = VIR_DOMAIN_RUNNING_BOOTED;
|
||||||
int ret = -1, rc;
|
int ret = -1, rc;
|
||||||
@ -493,9 +492,6 @@ qemuProcessFakeReboot(void *opaque)
|
|||||||
goto endjob;
|
goto endjob;
|
||||||
}
|
}
|
||||||
priv->gotShutdown = false;
|
priv->gotShutdown = false;
|
||||||
event = virDomainEventLifecycleNewFromObj(vm,
|
|
||||||
VIR_DOMAIN_EVENT_RESUMED,
|
|
||||||
VIR_DOMAIN_EVENT_RESUMED_UNPAUSED);
|
|
||||||
|
|
||||||
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
|
||||||
VIR_WARN("Unable to save status on vm %s after state change",
|
VIR_WARN("Unable to save status on vm %s after state change",
|
||||||
@ -511,7 +507,6 @@ qemuProcessFakeReboot(void *opaque)
|
|||||||
if (ret == -1)
|
if (ret == -1)
|
||||||
ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_FORCE));
|
ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_FORCE));
|
||||||
virDomainObjEndAPI(&vm);
|
virDomainObjEndAPI(&vm);
|
||||||
virObjectEventStateQueue(driver->domainEventState, event);
|
|
||||||
virObjectUnref(cfg);
|
virObjectUnref(cfg);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3109,7 +3104,10 @@ qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto release;
|
goto release;
|
||||||
|
|
||||||
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, reason);
|
/* The RESUME event handler will change the domain state with the reason
|
||||||
|
* saved in priv->runningReason and it will also emit corresponding domain
|
||||||
|
* lifecycle event.
|
||||||
|
*/
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
virObjectUnref(cfg);
|
virObjectUnref(cfg);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user