Ignore qemu STOP event when stopping CPUs

With JSON qemu monitor, we get a STOP event from qemu whenever qemu
stops guests CPUs. The downside of it is that vm->state is changed to
PAUSED and a new generic paused event is send to applications. However,
when we ask qemu to stop the CPUs we are not really interested in qemu
event and we usually want to issue a more specific event.

By setting vm->status to PAUSED before actually sending the request to
qemu (and resetting it back if the request fails) we can ignore the
event since the event handler does nothing when the guest is already
paused. This solution is quite hacky but unfortunately it's the best
solution which I was able to come up with and it doesn't introduce a
race condition.
This commit is contained in:
Jiri Denemark 2010-04-27 10:10:31 +02:00
parent 29bca037aa
commit 35b6137696

View File

@ -4117,13 +4117,16 @@ static int qemudDomainSuspend(virDomainPtr dom) {
} }
if (vm->state != VIR_DOMAIN_PAUSED) { if (vm->state != VIR_DOMAIN_PAUSED) {
int rc; int rc;
int state = vm->state;
vm->state = VIR_DOMAIN_PAUSED;
qemuDomainObjEnterMonitorWithDriver(driver, vm); qemuDomainObjEnterMonitorWithDriver(driver, vm);
rc = qemuMonitorStopCPUs(priv->mon); rc = qemuMonitorStopCPUs(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
if (rc < 0) if (rc < 0) {
vm->state = state;
goto endjob; goto endjob;
vm->state = VIR_DOMAIN_PAUSED; }
event = virDomainEventNewFromObj(vm, event = virDomainEventNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED,
VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
@ -4491,8 +4494,10 @@ qemuDomainMigrateOffline(struct qemud_driver *driver,
virDomainObjPtr vm) virDomainObjPtr vm)
{ {
qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainObjPrivatePtr priv = vm->privateData;
int state = vm->state;
int ret; int ret;
vm->state = VIR_DOMAIN_PAUSED;
qemuDomainObjEnterMonitorWithDriver(driver, vm); qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorStopCPUs(priv->mon); ret = qemuMonitorStopCPUs(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
@ -4500,13 +4505,13 @@ qemuDomainMigrateOffline(struct qemud_driver *driver,
if (ret == 0) { if (ret == 0) {
virDomainEventPtr event; virDomainEventPtr event;
vm->state = VIR_DOMAIN_PAUSED;
event = virDomainEventNewFromObj(vm, event = virDomainEventNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED,
VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED); VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED);
if (event) if (event)
qemuDomainEventQueue(driver, event); qemuDomainEventQueue(driver, event);
} } else
vm->state = state;
return ret; return ret;
} }
@ -4743,13 +4748,14 @@ static int qemudDomainSaveFlag(virDomainPtr dom, const char *path,
/* Pause */ /* Pause */
if (vm->state == VIR_DOMAIN_RUNNING) { if (vm->state == VIR_DOMAIN_RUNNING) {
header.was_running = 1; header.was_running = 1;
vm->state = VIR_DOMAIN_PAUSED;
qemuDomainObjEnterMonitorWithDriver(driver, vm); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorStopCPUs(priv->mon) < 0) { if (qemuMonitorStopCPUs(priv->mon) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
vm->state = VIR_DOMAIN_RUNNING;
goto endjob; goto endjob;
} }
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
vm->state = VIR_DOMAIN_PAUSED;
} }
/* Get XML for the domain */ /* Get XML for the domain */
@ -5167,9 +5173,11 @@ static int qemudDomainCoreDump(virDomainPtr dom,
/* Pause domain for non-live dump */ /* Pause domain for non-live dump */
if (!(flags & VIR_DUMP_LIVE) && vm->state == VIR_DOMAIN_RUNNING) { if (!(flags & VIR_DUMP_LIVE) && vm->state == VIR_DOMAIN_RUNNING) {
vm->state = VIR_DOMAIN_PAUSED;
qemuDomainObjEnterMonitorWithDriver(driver, vm); qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorStopCPUs(priv->mon) < 0) { if (qemuMonitorStopCPUs(priv->mon) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
vm->state = VIR_DOMAIN_RUNNING;
goto endjob; goto endjob;
} }
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
@ -5214,6 +5222,7 @@ endjob:
"%s", _("resuming after dump failed")); "%s", _("resuming after dump failed"));
} }
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
vm->state = VIR_DOMAIN_RUNNING;
} }
if (qemuDomainObjEndJob(vm) == 0) if (qemuDomainObjEndJob(vm) == 0)
@ -11056,13 +11065,17 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
/* qemu unconditionally starts the domain running again after /* qemu unconditionally starts the domain running again after
* loadvm, so let's pause it to keep consistency * loadvm, so let's pause it to keep consistency
*/ */
int state = vm->state;
priv = vm->privateData; priv = vm->privateData;
vm->state = VIR_DOMAIN_PAUSED;
qemuDomainObjEnterMonitorWithDriver(driver, vm); qemuDomainObjEnterMonitorWithDriver(driver, vm);
rc = qemuMonitorStopCPUs(priv->mon); rc = qemuMonitorStopCPUs(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm); qemuDomainObjExitMonitorWithDriver(driver, vm);
if (rc < 0) if (rc < 0) {
vm->state = state;
goto cleanup; goto cleanup;
} }
}
event = virDomainEventNewFromObj(vm, event = virDomainEventNewFromObj(vm,
VIR_DOMAIN_EVENT_STARTED, VIR_DOMAIN_EVENT_STARTED,