qemuDomainPMSuspendForDuration: check for wake-up support

If the current QEMU guest can't wake up from suspend properly,
and we are able to determine that, avoid suspending the guest
at all. To be able to determine this support, QEMU needs to
implement the 'query-current-machine' QMP call. This is reflected
by the QEMU_CAPS_QUERY_CURRENT_MACHINE cap.

If the cap is enabled, a new function qemuDomainProbeQMPCurrentMachine
is called. This is wrapper for qemuMonitorGetCurrentMachineInfo,
where the 'wakeup-suspend-support' flag is retrieved from
'query-current-machine'. If wakeupSuspendSupport is true,
proceed with the regular flow of qemuDomainPMSuspendForDuration.

The absence of QEMU_CAPS_QUERY_CURRENT_MACHINE indicates that
we're dealing with a QEMU version older than 4.0 (which implements
the required QMP API). In this case, proceed as usual with the
suspend logic of qemuDomainPMSuspendForDuration, since we can't
assume whether the guest has support or not.

Fixes: https://bugs.launchpad.net/ubuntu/+source/qemu/+bug/1759509
Reported-by: Balamuruhan S <bala24@linux.vnet.ibm.com>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
This commit is contained in:
Daniel Henrique Barboza 2019-04-24 18:16:29 -03:00 committed by Michal Privoznik
parent 70a4e3ee07
commit cc1d1dbbd5

View File

@ -19145,6 +19145,27 @@ qemuDomainGetCPUStats(virDomainPtr domain,
return ret; return ret;
} }
static int
qemuDomainProbeQMPCurrentMachine(virQEMUDriverPtr driver,
virDomainObjPtr vm,
bool *wakeupSupported)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
qemuMonitorCurrentMachineInfo info = { 0 };
int rv;
qemuDomainObjEnterMonitor(driver, vm);
rv = qemuMonitorGetCurrentMachineInfo(priv->mon, &info);
if (qemuDomainObjExitMonitor(driver, vm) < 0 ||
rv < 0)
return -1;
*wakeupSupported = info.wakeupSuspendSupport;
return 0;
}
static int static int
qemuDomainPMSuspendForDuration(virDomainPtr dom, qemuDomainPMSuspendForDuration(virDomainPtr dom,
unsigned int target, unsigned int target,
@ -19152,8 +19173,10 @@ qemuDomainPMSuspendForDuration(virDomainPtr dom,
unsigned int flags) unsigned int flags)
{ {
virQEMUDriverPtr driver = dom->conn->privateData; virQEMUDriverPtr driver = dom->conn->privateData;
qemuDomainObjPrivatePtr priv;
virDomainObjPtr vm; virDomainObjPtr vm;
qemuAgentPtr agent; qemuAgentPtr agent;
qemuDomainJob job = QEMU_JOB_NONE;
int ret = -1; int ret = -1;
virCheckFlags(0, -1); virCheckFlags(0, -1);
@ -19179,12 +19202,37 @@ qemuDomainPMSuspendForDuration(virDomainPtr dom,
if (virDomainPMSuspendForDurationEnsureACL(dom->conn, vm->def) < 0) if (virDomainPMSuspendForDurationEnsureACL(dom->conn, vm->def) < 0)
goto cleanup; goto cleanup;
if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) priv = vm->privateData;
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CURRENT_MACHINE))
job = QEMU_JOB_MODIFY;
if (qemuDomainObjBeginJobWithAgent(driver, vm, job, QEMU_AGENT_JOB_MODIFY) < 0)
goto cleanup; goto cleanup;
if (virDomainObjCheckActive(vm) < 0) if (virDomainObjCheckActive(vm) < 0)
goto endjob; goto endjob;
/*
* The case we want to handle here is when QEMU has the API (i.e.
* QEMU_CAPS_QUERY_CURRENT_MACHINE is set). Otherwise, do not interfere
* with the suspend process. This means that existing running domains,
* that don't know about this cap, will keep their old behavior of
* suspending 'in the dark'.
*/
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CURRENT_MACHINE)) {
bool wakeupSupported;
if (qemuDomainProbeQMPCurrentMachine(driver, vm, &wakeupSupported) < 0)
goto endjob;
if (!wakeupSupported) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
_("Domain does not have suspend support"));
goto endjob;
}
}
if (vm->def->pm.s3 || vm->def->pm.s4) { if (vm->def->pm.s3 || vm->def->pm.s4) {
if (vm->def->pm.s3 == VIR_TRISTATE_BOOL_NO && if (vm->def->pm.s3 == VIR_TRISTATE_BOOL_NO &&
(target == VIR_NODE_SUSPEND_TARGET_MEM || (target == VIR_NODE_SUSPEND_TARGET_MEM ||
@ -19210,7 +19258,10 @@ qemuDomainPMSuspendForDuration(virDomainPtr dom,
qemuDomainObjExitAgent(vm, agent); qemuDomainObjExitAgent(vm, agent);
endjob: endjob:
qemuDomainObjEndAgentJob(vm); if (job)
qemuDomainObjEndJobWithAgent(driver, vm);
else
qemuDomainObjEndAgentJob(vm);
cleanup: cleanup:
virDomainObjEndAPI(&vm); virDomainObjEndAPI(&vm);