qemu: Save job type in domain status XML

If libvirtd is restarted when a job is running, the new libvirtd process
needs to know about that to be able to recover and rollback the
operation.
This commit is contained in:
Jiri Denemark 2011-06-06 10:28:38 +02:00
parent 361842881e
commit ff340a84b8
6 changed files with 214 additions and 127 deletions

View File

@ -44,6 +44,26 @@
#define QEMU_NAMESPACE_HREF "http://libvirt.org/schemas/domain/qemu/1.0"
VIR_ENUM_DECL(qemuDomainJob)
VIR_ENUM_IMPL(qemuDomainJob, QEMU_JOB_LAST,
"none",
"query",
"destroy",
"suspend",
"modify",
"none", /* async job is never stored in job.active */
"async nested",
);
VIR_ENUM_DECL(qemuDomainAsyncJob)
VIR_ENUM_IMPL(qemuDomainAsyncJob, QEMU_ASYNC_JOB_LAST,
"none",
"migration out",
"migration in",
"save",
"dump",
);
static void qemuDomainEventDispatchFunc(virConnectPtr conn,
virDomainEventPtr event,
@ -214,6 +234,12 @@ static int qemuDomainObjPrivateXMLFormat(virBufferPtr buf, void *data)
if (priv->lockState)
virBufferAsprintf(buf, " <lockstate>%s</lockstate>\n", priv->lockState);
if (priv->job.active || priv->job.asyncJob) {
virBufferAsprintf(buf, " <job type='%s' async='%s'/>\n",
qemuDomainJobTypeToString(priv->job.active),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
}
return 0;
}
@ -320,6 +346,32 @@ static int qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt, void *data)
priv->lockState = virXPathString("string(./lockstate)", ctxt);
if ((tmp = virXPathString("string(./job[1]/@type)", ctxt))) {
int type;
if ((type = qemuDomainJobTypeFromString(tmp)) < 0) {
qemuReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown job type %s"), tmp);
VIR_FREE(tmp);
goto error;
}
VIR_FREE(tmp);
priv->job.active = type;
}
if ((tmp = virXPathString("string(./job[1]/@async)", ctxt))) {
int async;
if ((async = qemuDomainAsyncJobTypeFromString(tmp)) < 0) {
qemuReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown async job type %s"), tmp);
VIR_FREE(tmp);
goto error;
}
VIR_FREE(tmp);
priv->job.asyncJob = async;
}
return 0;
error:
@ -516,12 +568,16 @@ void qemuDomainSetNamespaceHooks(virCapsPtr caps)
}
void
qemuDomainObjSetJob(virDomainObjPtr obj,
enum qemuDomainJob job)
qemuDomainObjSaveJob(struct qemud_driver *driver, virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
if (!virDomainObjIsActive(obj)) {
/* don't write the state file yet, it will be written once the domain
* gets activated */
return;
}
priv->job.active = job;
if (virDomainSaveStatus(driver->caps, driver->stateDir, obj) < 0)
VIR_WARN("Failed to save status on vm %s", obj->def->name);
}
void
@ -537,13 +593,14 @@ qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
}
void
qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj)
qemuDomainObjDiscardAsyncJob(struct qemud_driver *driver, virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
qemuDomainObjResetJob(priv);
qemuDomainObjResetAsyncJob(priv);
qemuDomainObjSaveJob(driver, obj);
}
static bool
@ -559,7 +616,7 @@ qemuDomainJobAllowed(qemuDomainObjPrivatePtr priv, enum qemuDomainJob job)
* obj must be locked before calling; driver_locked says if qemu_driver is
* locked or not.
*/
static int
static int ATTRIBUTE_NONNULL(1)
qemuDomainObjBeginJobInternal(struct qemud_driver *driver,
bool driver_locked,
virDomainObjPtr obj,
@ -611,6 +668,8 @@ retry:
virDomainObjLock(obj);
}
qemuDomainObjSaveJob(driver, obj);
return 0;
error:
@ -639,16 +698,19 @@ error:
* Upon successful return, the object will have its ref count increased,
* successful calls must be followed by EndJob eventually
*/
int qemuDomainObjBeginJob(virDomainObjPtr obj, enum qemuDomainJob job)
int qemuDomainObjBeginJob(struct qemud_driver *driver,
virDomainObjPtr obj,
enum qemuDomainJob job)
{
return qemuDomainObjBeginJobInternal(NULL, false, obj, job,
return qemuDomainObjBeginJobInternal(driver, false, obj, job,
QEMU_ASYNC_JOB_NONE);
}
int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj,
int qemuDomainObjBeginAsyncJob(struct qemud_driver *driver,
virDomainObjPtr obj,
enum qemuDomainAsyncJob asyncJob)
{
return qemuDomainObjBeginJobInternal(NULL, false, obj, QEMU_JOB_ASYNC,
return qemuDomainObjBeginJobInternal(driver, false, obj, QEMU_JOB_ASYNC,
asyncJob);
}
@ -692,9 +754,10 @@ int qemuDomainObjBeginAsyncJobWithDriver(struct qemud_driver *driver,
* qemuDomainObjBeginJob{,WithDriver} instead.
*/
int
qemuDomainObjBeginNestedJob(virDomainObjPtr obj)
qemuDomainObjBeginNestedJob(struct qemud_driver *driver,
virDomainObjPtr obj)
{
return qemuDomainObjBeginJobInternal(NULL, false, obj,
return qemuDomainObjBeginJobInternal(driver, false, obj,
QEMU_JOB_ASYNC_NESTED,
QEMU_ASYNC_JOB_NONE);
}
@ -717,33 +780,36 @@ qemuDomainObjBeginNestedJobWithDriver(struct qemud_driver *driver,
* Returns remaining refcount on 'obj', maybe 0 to indicated it
* was deleted
*/
int qemuDomainObjEndJob(virDomainObjPtr obj)
int qemuDomainObjEndJob(struct qemud_driver *driver, virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
qemuDomainObjResetJob(priv);
qemuDomainObjSaveJob(driver, obj);
virCondSignal(&priv->job.cond);
return virDomainObjUnref(obj);
}
int
qemuDomainObjEndAsyncJob(virDomainObjPtr obj)
qemuDomainObjEndAsyncJob(struct qemud_driver *driver, virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
qemuDomainObjResetAsyncJob(priv);
qemuDomainObjSaveJob(driver, obj);
virCondBroadcast(&priv->job.asyncCond);
return virDomainObjUnref(obj);
}
void
qemuDomainObjEndNestedJob(virDomainObjPtr obj)
qemuDomainObjEndNestedJob(struct qemud_driver *driver, virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
qemuDomainObjResetJob(priv);
qemuDomainObjSaveJob(driver, obj);
virCondSignal(&priv->job.cond);
/* safe to ignore since the surrounding async job increased the reference
@ -752,14 +818,15 @@ qemuDomainObjEndNestedJob(virDomainObjPtr obj)
}
static int
static int ATTRIBUTE_NONNULL(1)
qemuDomainObjEnterMonitorInternal(struct qemud_driver *driver,
bool driver_locked,
virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(obj) < 0)
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
return -1;
if (!virDomainObjIsActive(obj)) {
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
@ -772,14 +839,15 @@ qemuDomainObjEnterMonitorInternal(struct qemud_driver *driver,
qemuMonitorRef(priv->mon);
ignore_value(virTimeMs(&priv->monStart));
virDomainObjUnlock(obj);
if (driver)
if (driver_locked)
qemuDriverUnlock(driver);
return 0;
}
static void
static void ATTRIBUTE_NONNULL(1)
qemuDomainObjExitMonitorInternal(struct qemud_driver *driver,
bool driver_locked,
virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
@ -790,7 +858,7 @@ qemuDomainObjExitMonitorInternal(struct qemud_driver *driver,
if (refs > 0)
qemuMonitorUnlock(priv->mon);
if (driver)
if (driver_locked)
qemuDriverLock(driver);
virDomainObjLock(obj);
@ -800,7 +868,7 @@ qemuDomainObjExitMonitorInternal(struct qemud_driver *driver,
}
if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
qemuDomainObjEndNestedJob(obj);
qemuDomainObjEndNestedJob(driver, obj);
}
/*
@ -813,18 +881,20 @@ qemuDomainObjExitMonitorInternal(struct qemud_driver *driver,
*
* To be followed with qemuDomainObjExitMonitor() once complete
*/
int qemuDomainObjEnterMonitor(virDomainObjPtr obj)
int qemuDomainObjEnterMonitor(struct qemud_driver *driver,
virDomainObjPtr obj)
{
return qemuDomainObjEnterMonitorInternal(NULL, obj);
return qemuDomainObjEnterMonitorInternal(driver, false, obj);
}
/* obj must NOT be locked before calling, qemud_driver must be unlocked
*
* Should be paired with an earlier qemuDomainObjEnterMonitor() call
*/
void qemuDomainObjExitMonitor(virDomainObjPtr obj)
void qemuDomainObjExitMonitor(struct qemud_driver *driver,
virDomainObjPtr obj)
{
qemuDomainObjExitMonitorInternal(NULL, obj);
qemuDomainObjExitMonitorInternal(driver, false, obj);
}
/*
@ -840,7 +910,7 @@ void qemuDomainObjExitMonitor(virDomainObjPtr obj)
int qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj)
{
return qemuDomainObjEnterMonitorInternal(driver, obj);
return qemuDomainObjEnterMonitorInternal(driver, true, obj);
}
/* obj must NOT be locked before calling, qemud_driver must be unlocked,
@ -851,7 +921,7 @@ int qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
void qemuDomainObjExitMonitorWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj)
{
qemuDomainObjExitMonitorInternal(driver, obj);
qemuDomainObjExitMonitorInternal(driver, true, obj);
}
void qemuDomainObjEnterRemoteWithDriver(struct qemud_driver *driver,

View File

@ -50,9 +50,11 @@ enum qemuDomainJob {
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
QEMU_JOB_MODIFY, /* May change state */
/* The following two items must always be the last items */
/* The following two items must always be the last items before JOB_LAST */
QEMU_JOB_ASYNC, /* Asynchronous job */
QEMU_JOB_ASYNC_NESTED, /* Normal job within an async job */
QEMU_JOB_LAST
};
/* Async job consists of a series of jobs that may change state. Independent
@ -65,6 +67,8 @@ enum qemuDomainAsyncJob {
QEMU_ASYNC_JOB_MIGRATION_IN,
QEMU_ASYNC_JOB_SAVE,
QEMU_ASYNC_JOB_DUMP,
QEMU_ASYNC_JOB_LAST
};
enum qemuDomainJobSignals {
@ -145,13 +149,16 @@ void qemuDomainEventQueue(struct qemud_driver *driver,
void qemuDomainSetPrivateDataHooks(virCapsPtr caps);
void qemuDomainSetNamespaceHooks(virCapsPtr caps);
int qemuDomainObjBeginJob(virDomainObjPtr obj,
int qemuDomainObjBeginJob(struct qemud_driver *driver,
virDomainObjPtr obj,
enum qemuDomainJob job)
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj,
int qemuDomainObjBeginAsyncJob(struct qemud_driver *driver,
virDomainObjPtr obj,
enum qemuDomainAsyncJob asyncJob)
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjBeginNestedJob(virDomainObjPtr obj)
int qemuDomainObjBeginNestedJob(struct qemud_driver *driver,
virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjBeginJobWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj,
@ -165,20 +172,26 @@ int qemuDomainObjBeginNestedJobWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjEndJob(virDomainObjPtr obj)
int qemuDomainObjEndJob(struct qemud_driver *driver,
virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjEndAsyncJob(virDomainObjPtr obj)
int qemuDomainObjEndAsyncJob(struct qemud_driver *driver,
virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
void qemuDomainObjEndNestedJob(virDomainObjPtr obj);
void qemuDomainObjEndNestedJob(struct qemud_driver *driver,
virDomainObjPtr obj);
void qemuDomainObjSetJob(virDomainObjPtr obj, enum qemuDomainJob job);
void qemuDomainObjSaveJob(struct qemud_driver *driver, virDomainObjPtr obj);
void qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
unsigned long long allowedJobs);
void qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj);
void qemuDomainObjDiscardAsyncJob(struct qemud_driver *driver,
virDomainObjPtr obj);
int qemuDomainObjEnterMonitor(virDomainObjPtr obj)
int qemuDomainObjEnterMonitor(struct qemud_driver *driver,
virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
void qemuDomainObjExitMonitor(virDomainObjPtr obj);
void qemuDomainObjExitMonitor(struct qemud_driver *driver,
virDomainObjPtr obj);
int qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;

View File

@ -157,7 +157,7 @@ qemuAutostartDomain(void *payload, const void *name ATTRIBUTE_UNUSED, void *opaq
err ? err->message : _("unknown error"));
}
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(data->driver, vm) == 0)
vm = NULL;
}
@ -1288,7 +1288,7 @@ static virDomainPtr qemudDomainCreate(virConnectPtr conn, const char *xml,
(flags & VIR_DOMAIN_START_AUTODESTROY) != 0,
-1, NULL, VIR_VM_OP_CREATE) < 0) {
virDomainAuditStart(vm, "booted", false);
if (qemuDomainObjEndJob(vm) > 0)
if (qemuDomainObjEndJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains,
vm);
vm = NULL;
@ -1304,7 +1304,7 @@ static virDomainPtr qemudDomainCreate(virConnectPtr conn, const char *xml,
if (dom) dom->id = vm->def->id;
if (vm &&
qemuDomainObjEndJob(vm) == 0)
qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -1380,7 +1380,7 @@ static int qemudDomainSuspend(virDomainPtr dom) {
}
endjob:
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -1436,7 +1436,7 @@ static int qemudDomainResume(virDomainPtr dom) {
ret = 0;
endjob:
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -1467,7 +1467,7 @@ static int qemuDomainShutdown(virDomainPtr dom) {
goto cleanup;
}
if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@ -1477,14 +1477,14 @@ static int qemuDomainShutdown(virDomainPtr dom) {
}
priv = vm->privateData;
ignore_value(qemuDomainObjEnterMonitor(vm));
ignore_value(qemuDomainObjEnterMonitor(driver, vm));
ret = qemuMonitorSystemPowerdown(priv->mon);
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
priv->fakeReboot = false;
endjob:
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -1517,7 +1517,7 @@ static int qemuDomainReboot(virDomainPtr dom, unsigned int flags) {
#if HAVE_YAJL
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MONITOR_JSON)) {
if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@ -1526,14 +1526,14 @@ static int qemuDomainReboot(virDomainPtr dom, unsigned int flags) {
goto endjob;
}
ignore_value(qemuDomainObjEnterMonitor(vm));
ignore_value(qemuDomainObjEnterMonitor(driver, vm));
ret = qemuMonitorSystemPowerdown(priv->mon);
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
priv->fakeReboot = true;
endjob:
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
} else {
#endif
@ -1593,7 +1593,7 @@ static int qemudDomainDestroy(virDomainPtr dom) {
virDomainAuditStop(vm, "destroyed");
if (!vm->persistent) {
if (qemuDomainObjEndJob(vm) > 0)
if (qemuDomainObjEndJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains,
vm);
vm = NULL;
@ -1602,7 +1602,7 @@ static int qemudDomainDestroy(virDomainPtr dom) {
endjob:
if (vm &&
qemuDomainObjEndJob(vm) == 0)
qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -1690,7 +1690,7 @@ static int qemudDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem,
goto cleanup;
}
if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
isActive = virDomainObjIsActive(vm);
@ -1755,9 +1755,9 @@ static int qemudDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem,
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
priv = vm->privateData;
ignore_value(qemuDomainObjEnterMonitor(vm));
ignore_value(qemuDomainObjEnterMonitor(driver, vm));
r = qemuMonitorSetBalloon(priv->mon, newmem);
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
virDomainAuditMemory(vm, vm->def->mem.cur_balloon, newmem, "update",
r == 1);
if (r < 0)
@ -1781,7 +1781,7 @@ static int qemudDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem,
ret = 0;
endjob:
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -1832,7 +1832,7 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags)
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorInjectNMI(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(vm) == 0) {
if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
goto cleanup;
}
@ -1885,16 +1885,16 @@ static int qemudDomainGetInfo(virDomainPtr dom,
(vm->def->memballoon->model == VIR_DOMAIN_MEMBALLOON_MODEL_NONE)) {
info->memory = vm->def->mem.max_balloon;
} else if (!priv->job.active) {
if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm))
err = 0;
else {
ignore_value(qemuDomainObjEnterMonitor(vm));
ignore_value(qemuDomainObjEnterMonitor(driver, vm));
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
}
if (qemuDomainObjEndJob(vm) == 0) {
if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
goto cleanup;
}
@ -2298,7 +2298,7 @@ static int qemudDomainSaveFlag(struct qemud_driver *driver, virDomainPtr dom,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_SAVED);
if (!vm->persistent) {
if (qemuDomainObjEndAsyncJob(vm) > 0)
if (qemuDomainObjEndAsyncJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains,
vm);
vm = NULL;
@ -2314,7 +2314,7 @@ endjob:
VIR_WARN("Unable to resume guest CPUs after save failure");
}
}
if (qemuDomainObjEndAsyncJob(vm) == 0)
if (qemuDomainObjEndAsyncJob(driver, vm) == 0)
vm = NULL;
}
@ -2669,7 +2669,7 @@ endjob:
}
}
if (qemuDomainObjEndAsyncJob(vm) == 0)
if (qemuDomainObjEndAsyncJob(driver, vm) == 0)
vm = NULL;
else if ((ret == 0) && (flags & VIR_DUMP_CRASH) && !vm->persistent) {
virDomainRemoveInactive(&driver->domains,
@ -2713,7 +2713,7 @@ qemuDomainScreenshot(virDomainPtr dom,
priv = vm->privateData;
if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@ -2743,12 +2743,12 @@ qemuDomainScreenshot(virDomainPtr dom,
virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp);
ignore_value(qemuDomainObjEnterMonitor(vm));
ignore_value(qemuDomainObjEnterMonitor(driver, vm));
if (qemuMonitorScreendump(priv->mon, tmp) < 0) {
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
goto endjob;
}
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
if (VIR_CLOSE(tmp_fd) < 0) {
virReportSystemError(errno, _("unable to close %s"), tmp);
@ -2767,7 +2767,7 @@ endjob:
VIR_FORCE_CLOSE(tmp_fd);
VIR_FREE(tmp);
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -2837,7 +2837,7 @@ endjob:
/* Safe to ignore value since ref count was incremented in
* qemuProcessHandleWatchdog().
*/
ignore_value(qemuDomainObjEndAsyncJob(wdEvent->vm));
ignore_value(qemuDomainObjEndAsyncJob(driver, wdEvent->vm));
unlock:
if (virDomainObjUnref(wdEvent->vm) > 0)
@ -2846,7 +2846,9 @@ unlock:
VIR_FREE(wdEvent);
}
static int qemudDomainHotplugVcpus(virDomainObjPtr vm, unsigned int nvcpus)
static int qemudDomainHotplugVcpus(struct qemud_driver *driver,
virDomainObjPtr vm,
unsigned int nvcpus)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int i, rc = 1;
@ -2854,7 +2856,7 @@ static int qemudDomainHotplugVcpus(virDomainObjPtr vm, unsigned int nvcpus)
int oldvcpus = vm->def->vcpus;
int vcpus = oldvcpus;
ignore_value(qemuDomainObjEnterMonitor(vm));
ignore_value(qemuDomainObjEnterMonitor(driver, vm));
/* We need different branches here, because we want to offline
* in reverse order to onlining, so any partial fail leaves us in a
@ -2886,7 +2888,7 @@ static int qemudDomainHotplugVcpus(virDomainObjPtr vm, unsigned int nvcpus)
ret = 0;
cleanup:
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
vm->def->vcpus = vcpus;
virDomainAuditVcpu(vm, oldvcpus, nvcpus, "update", rc == 1);
return ret;
@ -2940,7 +2942,7 @@ qemudDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus,
goto cleanup;
}
if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm) && (flags & VIR_DOMAIN_AFFECT_LIVE)) {
@ -2996,11 +2998,11 @@ qemudDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus,
break;
case VIR_DOMAIN_AFFECT_LIVE:
ret = qemudDomainHotplugVcpus(vm, nvcpus);
ret = qemudDomainHotplugVcpus(driver, vm, nvcpus);
break;
case VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG:
ret = qemudDomainHotplugVcpus(vm, nvcpus);
ret = qemudDomainHotplugVcpus(driver, vm, nvcpus);
if (ret == 0) {
persistentDef->vcpus = nvcpus;
}
@ -3012,7 +3014,7 @@ qemudDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus,
ret = virDomainSaveConfig(driver->configDir, persistentDef);
endjob:
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -3767,7 +3769,7 @@ qemuDomainRestore(virConnectPtr conn,
ret = qemuDomainSaveImageStartVM(conn, driver, vm, &fd, &header, path);
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
else if (ret < 0 && !vm->persistent) {
virDomainRemoveInactive(&driver->domains, vm);
@ -3862,7 +3864,7 @@ static char *qemuDomainGetXMLDesc(virDomainPtr dom,
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(vm) == 0) {
if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
goto cleanup;
}
@ -4111,7 +4113,7 @@ qemudDomainStartWithFlags(virDomainPtr dom, unsigned int flags)
ret = 0;
endjob:
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -4934,7 +4936,7 @@ qemuDomainModifyDeviceFlags(virDomainPtr dom, const char *xml,
}
endjob:
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -6040,7 +6042,7 @@ qemudDomainBlockStats (virDomainPtr dom,
if (virDomainObjUnref(vm) == 0)
vm = NULL;
} else {
if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@ -6049,7 +6051,7 @@ qemudDomainBlockStats (virDomainPtr dom,
goto endjob;
}
ignore_value(qemuDomainObjEnterMonitor(vm));
ignore_value(qemuDomainObjEnterMonitor(driver, vm));
ret = qemuMonitorGetBlockStatsInfo(priv->mon,
disk->info.alias,
&stats->rd_req,
@ -6057,10 +6059,10 @@ qemudDomainBlockStats (virDomainPtr dom,
&stats->wr_req,
&stats->wr_bytes,
&stats->errs);
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
endjob:
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
}
@ -6152,20 +6154,20 @@ qemudDomainMemoryStats (virDomainPtr dom,
goto cleanup;
}
if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
qemuDomainObjPrivatePtr priv = vm->privateData;
ignore_value(qemuDomainObjEnterMonitor(vm));
ignore_value(qemuDomainObjEnterMonitor(driver, vm));
ret = qemuMonitorGetMemoryStats(priv->mon, stats, nr_stats);
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
} else {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
}
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -6276,7 +6278,7 @@ qemudDomainMemoryPeek (virDomainPtr dom,
goto cleanup;
}
if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@ -6300,19 +6302,19 @@ qemudDomainMemoryPeek (virDomainPtr dom,
virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp);
priv = vm->privateData;
ignore_value(qemuDomainObjEnterMonitor(vm));
ignore_value(qemuDomainObjEnterMonitor(driver, vm));
if (flags == VIR_MEMORY_VIRTUAL) {
if (qemuMonitorSaveVirtualMemory(priv->mon, offset, size, tmp) < 0) {
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
goto endjob;
}
} else {
if (qemuMonitorSavePhysicalMemory(priv->mon, offset, size, tmp) < 0) {
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
goto endjob;
}
}
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
/* Read the memory file into buffer. */
if (saferead (fd, buffer, size) == (ssize_t) -1) {
@ -6325,7 +6327,7 @@ qemudDomainMemoryPeek (virDomainPtr dom,
ret = 0;
endjob:
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -6487,20 +6489,20 @@ static int qemuDomainGetBlockInfo(virDomainPtr dom,
if (virDomainObjUnref(vm) == 0)
vm = NULL;
} else {
if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
ignore_value(qemuDomainObjEnterMonitor(vm));
ignore_value(qemuDomainObjEnterMonitor(driver, vm));
ret = qemuMonitorGetBlockExtent(priv->mon,
disk->info.alias,
&info->allocation);
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
} else {
ret = 0;
}
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
}
} else {
@ -7107,7 +7109,7 @@ qemuDomainMigrateConfirm3(virDomainPtr domain,
cookiein, cookieinlen,
flags, cancelled);
if (qemuDomainObjEndJob(vm) == 0) {
if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
} else if (!virDomainObjIsActive(vm) &&
(!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) {
@ -7688,7 +7690,7 @@ cleanup:
_("resuming after snapshot failed"));
}
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
*vmptr = NULL;
return ret;
@ -8063,7 +8065,7 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
if (!vm->persistent) {
if (qemuDomainObjEndJob(vm) > 0)
if (qemuDomainObjEndJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
goto cleanup;
@ -8077,7 +8079,7 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
ret = 0;
endjob:
if (vm && qemuDomainObjEndJob(vm) == 0)
if (vm && qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -8298,7 +8300,7 @@ static int qemuDomainSnapshotDelete(virDomainSnapshotPtr snapshot,
ret = qemuDomainSnapshotDiscard(driver, vm, snap);
endjob:
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -8346,7 +8348,7 @@ static int qemuDomainMonitorCommand(virDomainPtr domain, const char *cmd,
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorArbitraryCommand(priv->mon, cmd, result, hmp);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(vm) == 0) {
if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
goto cleanup;
}
@ -8429,7 +8431,7 @@ static virDomainPtr qemuDomainAttach(virConnectPtr conn,
if (dom) dom->id = vm->def->id;
endjob:
if (qemuDomainObjEndJob(vm) == 0) {
if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
goto cleanup;
}

View File

@ -1245,14 +1245,14 @@ int qemuDomainDetachPciDiskDevice(struct qemud_driver *driver,
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
virDomainAuditDisk(vm, detach, NULL, "detach", false);
goto cleanup;
}
} else {
if (qemuMonitorRemovePCIDevice(priv->mon,
&detach->info.addr.pci) < 0) {
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
virDomainAuditDisk(vm, detach, NULL, "detach", false);
goto cleanup;
}
@ -1340,7 +1340,7 @@ int qemuDomainDetachDiskDevice(struct qemud_driver *driver,
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
virDomainAuditDisk(vm, detach, NULL, "detach", false);
goto cleanup;
}
@ -1479,13 +1479,13 @@ int qemuDomainDetachPciControllerDevice(struct qemud_driver *driver,
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias)) {
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
goto cleanup;
}
} else {
if (qemuMonitorRemovePCIDevice(priv->mon,
&detach->info.addr.pci) < 0) {
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
goto cleanup;
}
}
@ -1574,7 +1574,7 @@ int qemuDomainDetachNetDevice(struct qemud_driver *driver,
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
qemuDomainObjExitMonitor(vm);
qemuDomainObjExitMonitor(driver, vm);
virDomainAuditNet(vm, detach, NULL, "detach", false);
goto cleanup;
}

View File

@ -1160,7 +1160,7 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver,
virDomainAuditStart(vm, "migrated", false);
qemuProcessStop(driver, vm, 0, VIR_DOMAIN_SHUTOFF_FAILED);
if (!vm->persistent) {
if (qemuDomainObjEndAsyncJob(vm) > 0)
if (qemuDomainObjEndAsyncJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
}
@ -1189,7 +1189,7 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver,
endjob:
if (vm &&
qemuDomainObjEndAsyncJob(vm) == 0)
qemuDomainObjEndAsyncJob(driver, vm) == 0)
vm = NULL;
/* We set a fake job active which is held across
@ -1200,6 +1200,7 @@ endjob:
if (vm &&
virDomainObjIsActive(vm)) {
priv->job.asyncJob = QEMU_ASYNC_JOB_MIGRATION_IN;
qemuDomainObjSaveJob(driver, vm);
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
priv->job.start = now;
}
@ -1378,7 +1379,7 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver,
* should have already done that.
*/
if (!vm->persistent) {
if (qemuDomainObjEndAsyncJob(vm) > 0)
if (qemuDomainObjEndAsyncJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
}
@ -1411,7 +1412,7 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver,
endjob:
if (vm &&
qemuDomainObjEndAsyncJob(vm) == 0)
qemuDomainObjEndAsyncJob(driver, vm) == 0)
vm = NULL;
/* We set a fake job active which is held across
@ -1422,6 +1423,7 @@ endjob:
if (vm &&
virDomainObjIsActive(vm)) {
priv->job.asyncJob = QEMU_ASYNC_JOB_MIGRATION_IN;
qemuDomainObjSaveJob(driver, vm);
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
priv->job.start = now;
}
@ -2388,7 +2390,7 @@ endjob:
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
}
if (vm) {
if (qemuDomainObjEndAsyncJob(vm) == 0) {
if (qemuDomainObjEndAsyncJob(driver, vm) == 0) {
vm = NULL;
} else if (!virDomainObjIsActive(vm) &&
(!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) {
@ -2478,7 +2480,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
_("domain '%s' is not processing incoming migration"), vm->def->name);
goto cleanup;
}
qemuDomainObjDiscardAsyncJob(vm);
qemuDomainObjDiscardAsyncJob(driver, vm);
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
goto cleanup;
@ -2526,7 +2528,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
qemuProcessStop(driver, vm, 1, VIR_DOMAIN_SHUTOFF_FAILED);
virDomainAuditStop(vm, "failed");
if (newVM) {
if (qemuDomainObjEndJob(vm) > 0)
if (qemuDomainObjEndJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
}
@ -2575,7 +2577,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FAILED);
if (!vm->persistent) {
if (qemuDomainObjEndJob(vm) > 0)
if (qemuDomainObjEndJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
}
@ -2611,7 +2613,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FAILED);
if (!vm->persistent) {
if (qemuDomainObjEndJob(vm) > 0)
if (qemuDomainObjEndJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
}
@ -2622,7 +2624,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
endjob:
if (vm &&
qemuDomainObjEndJob(vm) == 0)
qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:

View File

@ -377,7 +377,7 @@ qemuProcessFakeReboot(void *opaque)
VIR_DEBUG("vm=%p", vm);
qemuDriverLock(driver);
virDomainObjLock(vm);
if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@ -413,7 +413,7 @@ qemuProcessFakeReboot(void *opaque)
ret = 0;
endjob:
if (qemuDomainObjEndJob(vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -3215,7 +3215,7 @@ static void qemuProcessAutoDestroyDom(void *payload,
if (priv->job.asyncJob) {
VIR_DEBUG("vm=%s has long-term job active, cancelling",
dom->def->name);
qemuDomainObjDiscardAsyncJob(dom);
qemuDomainObjDiscardAsyncJob(data->driver, dom);
}
if (qemuDomainObjBeginJobWithDriver(data->driver, dom,
@ -3228,7 +3228,7 @@ static void qemuProcessAutoDestroyDom(void *payload,
event = virDomainEventNewFromObj(dom,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
if (qemuDomainObjEndJob(dom) == 0)
if (qemuDomainObjEndJob(data->driver, dom) == 0)
dom = NULL;
if (dom && !dom->persistent)
virDomainRemoveInactive(&data->driver->domains, dom);