qemu: completely rework reference counting

There is one problem that causes various errors in the daemon.  When
domain is waiting for a job, it is unlocked while waiting on the
condition.  However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd.  If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object.  However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).

The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list.  That reference can be removed when the API is waiting for
a job.  And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.

This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked.  This makes all reference counting
deterministic and makes the code a bit clearer.

Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
This commit is contained in:
Martin Kletzander 2014-12-04 14:41:36 +01:00
parent 3b0f05573f
commit 540c339a25
8 changed files with 375 additions and 638 deletions

View File

@ -26,12 +26,20 @@ There are a number of locks on various objects
* virDomainObjPtr
Will be locked after calling any of the virDomainFindBy{ID,Name,UUID}
methods.
methods. However, preferred method is qemuDomObjFromDomain() that uses
virDomainFindByUUIDRef() which also increases the reference counter and
finds the domain in the domain list without blocking all other lookups.
When the domain is locked and the reference increased, the prefered way of
decrementing the reference counter and unlocking the domain is using the
qemuDomObjEndAPI() function.
Lock must be held when changing/reading any variable in the virDomainObjPtr
If the lock needs to be dropped & then re-acquired for a short period of
time, the reference count must be incremented first using virDomainObjRef().
There is no need to increase the reference count if qemuDomObjFromDomain()
was used for looking up the domain. In this case there is one reference
already added by that function.
This lock must not be held for anything which sleeps/waits (i.e. monitor
commands).
@ -109,7 +117,6 @@ To lock the virDomainObjPtr
To acquire the normal job condition
qemuDomainObjBeginJob()
- Increments ref count on virDomainObjPtr
- Waits until the job is compatible with current async job or no
async job is running
- Waits for job.cond condition 'job.active != 0' using virDomainObjPtr
@ -122,14 +129,12 @@ To acquire the normal job condition
qemuDomainObjEndJob()
- Sets job.active to 0
- Signals on job.cond condition
- Decrements ref count on virDomainObjPtr
To acquire the asynchronous job condition
qemuDomainObjBeginAsyncJob()
- Increments ref count on virDomainObjPtr
- Waits until no async job is running
- Waits for job.cond condition 'job.active != 0' using virDomainObjPtr
mutex
@ -141,7 +146,6 @@ To acquire the asynchronous job condition
qemuDomainObjEndAsyncJob()
- Sets job.asyncJob to 0
- Broadcasts on job.asyncCond condition
- Decrements ref count on virDomainObjPtr
@ -179,12 +183,10 @@ To acquire the QEMU monitor lock as part of an asynchronous job
To keep a domain alive while waiting on a remote command
qemuDomainObjEnterRemote()
- Increments ref count on virDomainObjPtr
- Releases the virDomainObjPtr lock
qemuDomainObjExitRemote()
- Acquires the virDomainObjPtr lock
- Decrements ref count on virDomainObjPtr
Design patterns
@ -195,18 +197,18 @@ Design patterns
virDomainObjPtr obj;
obj = virDomainFindByUUID(driver->domains, dom->uuid);
obj = qemuDomObjFromDomain(dom);
...do work...
virDomainObjUnlock(obj);
qemuDomObjEndAPI(&obj);
* Updating something directly to do with a virDomainObjPtr
virDomainObjPtr obj;
obj = virDomainFindByUUID(driver->domains, dom->uuid);
obj = qemuDomObjFromDomain(dom);
qemuDomainObjBeginJob(obj, QEMU_JOB_TYPE);
@ -214,18 +216,15 @@ Design patterns
qemuDomainObjEndJob(obj);
virDomainObjUnlock(obj);
qemuDomObjEndAPI(&obj);
* Invoking a monitor command on a virDomainObjPtr
virDomainObjPtr obj;
qemuDomainObjPrivatePtr priv;
obj = virDomainFindByUUID(driver->domains, dom->uuid);
obj = qemuDomObjFromDomain(dom);
qemuDomainObjBeginJob(obj, QEMU_JOB_TYPE);
@ -240,8 +239,7 @@ Design patterns
...do final work...
qemuDomainObjEndJob(obj);
virDomainObjUnlock(obj);
qemuDomObjEndAPI(&obj);
* Running asynchronous job
@ -249,7 +247,7 @@ Design patterns
virDomainObjPtr obj;
qemuDomainObjPrivatePtr priv;
obj = virDomainFindByUUID(driver->domains, dom->uuid);
obj = qemuDomObjFromDomain(dom);
qemuDomainObjBeginAsyncJob(obj, QEMU_ASYNC_JOB_TYPE);
qemuDomainObjSetAsyncJobMask(obj, allowedJobs);
@ -281,7 +279,7 @@ Design patterns
...do final work...
qemuDomainObjEndAsyncJob(obj);
virDomainObjUnlock(obj);
qemuDomObjEndAPI(&obj);
* Coordinating with a remote server for migration
@ -289,7 +287,7 @@ Design patterns
virDomainObjPtr obj;
qemuDomainObjPrivatePtr priv;
obj = virDomainFindByUUID(driver->domains, dom->uuid);
obj = qemuDomObjFromDomain(dom);
qemuDomainObjBeginAsyncJob(obj, QEMU_ASYNC_JOB_TYPE);
@ -309,4 +307,4 @@ Design patterns
...do final work...
qemuDomainObjEndAsyncJob(obj);
virDomainObjUnlock(obj);
qemuDomObjEndAPI(&obj);

View File

@ -1319,8 +1319,6 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver,
priv->jobs_queued++;
then = now + QEMU_JOB_WAIT_TIME;
virObjectRef(obj);
retry:
if (cfg->maxQueuedJobs &&
priv->jobs_queued > cfg->maxQueuedJobs) {
@ -1399,7 +1397,6 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver,
cleanup:
priv->jobs_queued--;
virObjectUnref(obj);
virObjectUnref(cfg);
return ret;
}
@ -1410,8 +1407,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver,
* This must be called by anything that will change the VM state
* in any way, or anything that will use the QEMU monitor.
*
* Upon successful return, the object will have its ref count increased,
* successful calls must be followed by EndJob eventually
* Successful calls must be followed by EndJob eventually
*/
int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
@ -1460,15 +1456,13 @@ qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver,
/*
* obj must be locked before calling
* obj must be locked and have a reference before calling
*
* To be called after completing the work associated with the
* earlier qemuDomainBeginJob() call
*
* Returns true if @obj was still referenced, false if it was
* disposed of.
*/
bool qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
void
qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
qemuDomainJob job = priv->job.active;
@ -1484,11 +1478,9 @@ bool qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
if (qemuDomainTrackJob(job))
qemuDomainObjSaveJob(driver, obj);
virCondSignal(&priv->job.cond);
return virObjectUnref(obj);
}
bool
void
qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
@ -1502,8 +1494,6 @@ qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
qemuDomainObjResetAsyncJob(priv);
qemuDomainObjSaveJob(driver, obj);
virCondBroadcast(&priv->job.asyncCond);
return virObjectUnref(obj);
}
void
@ -1541,8 +1531,7 @@ qemuDomainObjEnterMonitorInternal(virQEMUDriverPtr driver,
if (!virDomainObjIsActive(obj)) {
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("domain is no longer running"));
/* Still referenced by the containing async job. */
ignore_value(qemuDomainObjEndJob(driver, obj));
qemuDomainObjEndJob(driver, obj);
return -1;
}
} else if (priv->job.asyncOwner == virThreadSelfID()) {
@ -1681,7 +1670,6 @@ void qemuDomainObjEnterRemote(virDomainObjPtr obj)
{
VIR_DEBUG("Entering remote (vm=%p name=%s)",
obj, obj->def->name);
virObjectRef(obj);
virObjectUnlock(obj);
}
@ -1690,7 +1678,6 @@ void qemuDomainObjExitRemote(virDomainObjPtr obj)
virObjectLock(obj);
VIR_DEBUG("Exited remote (vm=%p name=%s)",
obj, obj->def->name);
virObjectUnref(obj);
}
@ -2391,8 +2378,7 @@ qemuDomainSnapshotDiscardAllMetadata(virQEMUDriverPtr driver,
}
/*
* The caller must hold a lock the vm and there must
* be no remaining references to vm.
* The caller must hold a lock the vm.
*/
void
qemuDomainRemoveInactive(virQEMUDriverPtr driver,
@ -2423,7 +2409,7 @@ qemuDomainRemoveInactive(virQEMUDriverPtr driver,
virObjectUnref(cfg);
if (haveJob)
ignore_value(qemuDomainObjEndJob(driver, vm));
qemuDomainObjEndJob(driver, vm);
}
void
@ -2812,3 +2798,22 @@ qemuDomainAgentAvailable(qemuDomainObjPrivatePtr priv,
}
return true;
}
/*
* Finish working with a domain object in an API. This function
* clears whatever was left of a domain that was gathered using
* qemuDomObjFromDomain(). Currently that means only unlocking and
* decrementing the reference counter of that domain. And in order to
* make sure the caller does not access the domain, the pointer is
* cleared.
*/
void
qemuDomObjEndAPI(virDomainObjPtr *vm)
{
if (!*vm)
return;
virObjectUnlock(*vm);
virObjectUnref(*vm);
*vm = NULL;
}

View File

@ -226,12 +226,10 @@ int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
qemuDomainAsyncJob asyncJob)
ATTRIBUTE_RETURN_CHECK;
bool qemuDomainObjEndJob(virQEMUDriverPtr driver,
virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
bool qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
void qemuDomainObjEndJob(virQEMUDriverPtr driver,
virDomainObjPtr obj);
void qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj);
void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj);
void qemuDomainObjSetJobPhase(virQEMUDriverPtr driver,
virDomainObjPtr obj,
@ -413,4 +411,6 @@ int qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2)
ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4);
void qemuDomObjEndAPI(virDomainObjPtr *vm);
#endif /* __QEMU_DOMAIN_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -2737,31 +2737,20 @@ qemuMigrationBegin(virConnectPtr conn,
if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
qemuMigrationCleanup) < 0)
goto endjob;
if (qemuMigrationJobContinue(vm) == 0) {
vm = NULL;
virReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("domain disappeared"));
VIR_FREE(xml);
if (cookieout)
VIR_FREE(*cookieout);
}
qemuMigrationJobContinue(vm);
} else {
goto endjob;
}
cleanup:
if (vm)
virObjectUnlock(vm);
qemuDomObjEndAPI(&vm);
return xml;
endjob:
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
if (qemuMigrationJobFinish(driver, vm) == 0)
vm = NULL;
} else {
if (!qemuDomainObjEndJob(driver, vm))
vm = NULL;
}
if (flags & VIR_MIGRATE_CHANGE_PROTECTION)
qemuMigrationJobFinish(driver, vm);
else
qemuDomainObjEndJob(driver, vm);
goto cleanup;
}
@ -2974,6 +2963,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
NULL)))
goto cleanup;
virObjectRef(vm);
*def = NULL;
priv = vm->privateData;
if (VIR_STRDUP(priv->origname, origname) < 0)
@ -3100,12 +3090,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
* This prevents any other APIs being invoked while incoming
* migration is taking place.
*/
if (!qemuMigrationJobContinue(vm)) {
vm = NULL;
virReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("domain disappeared"));
goto cleanup;
}
qemuMigrationJobContinue(vm);
if (autoPort)
priv->migrationPort = port;
@ -3116,16 +3101,12 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
VIR_FREE(xmlout);
VIR_FORCE_CLOSE(dataFD[0]);
VIR_FORCE_CLOSE(dataFD[1]);
if (vm) {
if (ret < 0) {
virPortAllocatorRelease(driver->migrationPorts, priv->nbdPort);
priv->nbdPort = 0;
}
if (ret >= 0 || vm->persistent)
virObjectUnlock(vm);
else
qemuDomainRemoveInactive(driver, vm);
if (ret < 0) {
virPortAllocatorRelease(driver->migrationPorts, priv->nbdPort);
priv->nbdPort = 0;
qemuDomainRemoveInactive(driver, vm);
}
qemuDomObjEndAPI(&vm);
if (event)
qemuDomainEventQueue(driver, event);
qemuMigrationCookieFree(mig);
@ -3138,8 +3119,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0);
endjob:
if (!qemuMigrationJobFinish(driver, vm))
vm = NULL;
qemuMigrationJobFinish(driver, vm);
goto cleanup;
}
@ -3508,19 +3488,16 @@ qemuMigrationConfirm(virConnectPtr conn,
cookiein, cookieinlen,
flags, cancelled);
if (qemuMigrationJobFinish(driver, vm) == 0) {
vm = NULL;
} else if (!virDomainObjIsActive(vm) &&
(!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) {
qemuMigrationJobFinish(driver, vm);
if (!virDomainObjIsActive(vm) &&
(!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) {
if (flags & VIR_MIGRATE_UNDEFINE_SOURCE)
virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
qemuDomainRemoveInactive(driver, vm);
vm = NULL;
}
cleanup:
if (vm)
virObjectUnlock(vm);
qemuDomObjEndAPI(&vm);
virObjectUnref(cfg);
return ret;
}
@ -4878,15 +4855,13 @@ qemuMigrationPerformJob(virQEMUDriverPtr driver,
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
}
if (!qemuMigrationJobFinish(driver, vm)) {
vm = NULL;
} else if (!virDomainObjIsActive(vm) &&
(!vm->persistent ||
(ret == 0 && (flags & VIR_MIGRATE_UNDEFINE_SOURCE)))) {
qemuMigrationJobFinish(driver, vm);
if (!virDomainObjIsActive(vm) &&
(!vm->persistent ||
(ret == 0 && (flags & VIR_MIGRATE_UNDEFINE_SOURCE)))) {
if (flags & VIR_MIGRATE_UNDEFINE_SOURCE)
virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
qemuDomainRemoveInactive(driver, vm);
vm = NULL;
}
if (orig_err) {
@ -4895,8 +4870,7 @@ qemuMigrationPerformJob(virQEMUDriverPtr driver,
}
cleanup:
if (vm)
virObjectUnlock(vm);
qemuDomObjEndAPI(&vm);
if (event)
qemuDomainEventQueue(driver, event);
virObjectUnref(cfg);
@ -4921,7 +4895,6 @@ qemuMigrationPerformPhase(virQEMUDriverPtr driver,
{
virObjectEventPtr event = NULL;
int ret = -1;
bool hasrefs;
/* If we didn't start the job in the begin phase, start it now. */
if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
@ -4956,19 +4929,14 @@ qemuMigrationPerformPhase(virQEMUDriverPtr driver,
endjob:
if (ret < 0)
hasrefs = qemuMigrationJobFinish(driver, vm);
qemuMigrationJobFinish(driver, vm);
else
hasrefs = qemuMigrationJobContinue(vm);
if (!hasrefs) {
vm = NULL;
} else if (!virDomainObjIsActive(vm) && !vm->persistent) {
qemuMigrationJobContinue(vm);
if (!virDomainObjIsActive(vm) && !vm->persistent)
qemuDomainRemoveInactive(driver, vm);
vm = NULL;
}
cleanup:
if (vm)
virObjectUnlock(vm);
qemuDomObjEndAPI(&vm);
if (event)
qemuDomainEventQueue(driver, event);
return ret;
@ -5302,21 +5270,16 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
VIR_WARN("Unable to encode migration cookie");
endjob:
if (qemuMigrationJobFinish(driver, vm) == 0) {
vm = NULL;
} else if (!vm->persistent && !virDomainObjIsActive(vm)) {
qemuMigrationJobFinish(driver, vm);
if (!vm->persistent && !virDomainObjIsActive(vm))
qemuDomainRemoveInactive(driver, vm);
vm = NULL;
}
cleanup:
virPortAllocatorRelease(driver->migrationPorts, port);
if (vm) {
if (priv->mon)
qemuMonitorSetDomainLog(priv->mon, -1);
VIR_FREE(priv->origname);
virObjectUnlock(vm);
}
if (priv->mon)
qemuMonitorSetDomainLog(priv->mon, -1);
VIR_FREE(priv->origname);
qemuDomObjEndAPI(&vm);
if (event)
qemuDomainEventQueue(driver, event);
qemuMigrationCookieFree(mig);
@ -5564,15 +5527,13 @@ qemuMigrationJobStartPhase(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuMigrationJobPhase phase)
{
virObjectRef(vm);
qemuMigrationJobSetPhase(driver, vm, phase);
}
bool
void
qemuMigrationJobContinue(virDomainObjPtr vm)
{
qemuDomainObjReleaseAsyncJob(vm);
return virObjectUnref(vm);
}
bool
@ -5595,8 +5556,8 @@ qemuMigrationJobIsActive(virDomainObjPtr vm,
return true;
}
bool
void
qemuMigrationJobFinish(virQEMUDriverPtr driver, virDomainObjPtr vm)
{
return qemuDomainObjEndAsyncJob(driver, vm);
qemuDomainObjEndAsyncJob(driver, vm);
}

View File

@ -1,7 +1,7 @@
/*
* qemu_migration.h: QEMU migration handling
*
* Copyright (C) 2006-2011 Red Hat, Inc.
* Copyright (C) 2006-2011, 2014 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@ -82,13 +82,13 @@ void qemuMigrationJobStartPhase(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuMigrationJobPhase phase)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
bool qemuMigrationJobContinue(virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_RETURN_CHECK;
void qemuMigrationJobContinue(virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1);
bool qemuMigrationJobIsActive(virDomainObjPtr vm,
qemuDomainAsyncJob job)
ATTRIBUTE_NONNULL(1);
bool qemuMigrationJobFinish(virQEMUDriverPtr driver, virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK;
void qemuMigrationJobFinish(virQEMUDriverPtr driver, virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
int qemuMigrationSetOffline(virQEMUDriverPtr driver,
virDomainObjPtr vm);

View File

@ -572,6 +572,7 @@ qemuProcessFakeReboot(void *opaque)
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
virDomainRunningReason reason = VIR_DOMAIN_RUNNING_BOOTED;
int ret = -1;
VIR_DEBUG("vm=%p", vm);
virObjectLock(vm);
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
@ -620,16 +621,12 @@ qemuProcessFakeReboot(void *opaque)
ret = 0;
endjob:
if (!qemuDomainObjEndJob(driver, vm))
vm = NULL;
qemuDomainObjEndJob(driver, vm);
cleanup:
if (vm) {
if (ret == -1)
ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_FORCE));
if (virObjectUnref(vm))
virObjectUnlock(vm);
}
if (ret == -1)
ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_FORCE));
qemuDomObjEndAPI(&vm);
if (event)
qemuDomainEventQueue(driver, event);
virObjectUnref(cfg);
@ -1447,7 +1444,7 @@ qemuProcessHandleGuestPanic(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
cleanup:
if (vm)
virObjectUnlock(vm);
virObjectUnlock(vm);
return 0;
}
@ -3575,7 +3572,7 @@ struct qemuProcessReconnectData {
* this thread function has increased the reference counter to it
* so that we now have to close it.
*
* This function also inherits a locked domain object.
* This function also inherits a locked and ref'd domain object.
*
* This function needs to:
* 1. Enter job
@ -3608,10 +3605,6 @@ qemuProcessReconnect(void *opaque)
qemuDomainObjRestoreJob(obj, &oldjob);
/* Hold an extra reference because we can't allow 'vm' to be
* deleted if qemuConnectMonitor() failed */
virObjectRef(obj);
cfg = virQEMUDriverGetConfig(driver);
priv = obj->privateData;
@ -3700,7 +3693,8 @@ qemuProcessReconnect(void *opaque)
VIR_DEBUG("Finishing shutdown sequence for domain %s",
obj->def->name);
qemuProcessShutdownOrReboot(driver, obj);
goto endjob;
qemuDomainObjEndJob(driver, obj);
goto cleanup;
}
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE))
@ -3752,23 +3746,11 @@ qemuProcessReconnect(void *opaque)
if (virAtomicIntInc(&driver->nactive) == 1 && driver->inhibitCallback)
driver->inhibitCallback(true, driver->inhibitOpaque);
endjob:
/* we hold an extra reference, so this will never fail */
ignore_value(qemuDomainObjEndJob(driver, obj));
if (virObjectUnref(obj))
virObjectUnlock(obj);
virObjectUnref(conn);
virObjectUnref(cfg);
virNWFilterUnlockFilterUpdates();
return;
qemuDomainObjEndJob(driver, obj);
goto cleanup;
error:
/* we hold an extra reference, so this will never fail */
ignore_value(qemuDomainObjEndJob(driver, obj));
qemuDomainObjEndJob(driver, obj);
killvm:
if (virDomainObjIsActive(obj)) {
/* We can't get the monitor back, so must kill the VM
@ -3788,13 +3770,11 @@ qemuProcessReconnect(void *opaque)
qemuProcessStop(driver, obj, state, 0);
}
if (virObjectUnref(obj)) {
if (!obj->persistent)
qemuDomainRemoveInactive(driver, obj);
else
virObjectUnlock(obj);
}
if (!obj->persistent)
qemuDomainRemoveInactive(driver, obj);
cleanup:
qemuDomObjEndAPI(&obj);
virObjectUnref(conn);
virObjectUnref(cfg);
virNWFilterUnlockFilterUpdates();
@ -3818,9 +3798,10 @@ qemuProcessReconnectHelper(virDomainObjPtr obj,
memcpy(data, src, sizeof(*data));
data->obj = obj;
/* this lock will be eventually transferred to the thread that handles the
* reconnect */
/* this lock and reference will be eventually transferred to the thread
* that handles the reconnect */
virObjectLock(obj);
virObjectRef(obj);
/* Since we close the connection later on, we have to make sure that the
* threads we start see a valid connection throughout their lifetime. We
@ -3836,9 +3817,8 @@ qemuProcessReconnectHelper(virDomainObjPtr obj,
qemuProcessStop(src->driver, obj, VIR_DOMAIN_SHUTOFF_FAILED, 0);
if (!obj->persistent)
qemuDomainRemoveInactive(src->driver, obj);
else
virObjectUnlock(obj);
qemuDomObjEndAPI(&obj);
virObjectUnref(data->conn);
VIR_FREE(data);
return -1;
@ -5505,12 +5485,11 @@ qemuProcessAutoDestroy(virDomainObjPtr dom,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
if (!qemuDomainObjEndJob(driver, dom))
dom = NULL;
if (dom && !dom->persistent) {
qemuDomainObjEndJob(driver, dom);
if (!dom->persistent)
qemuDomainRemoveInactive(driver, dom);
dom = NULL;
}
if (event)
qemuDomainEventQueue(driver, event);
@ -5530,9 +5509,11 @@ int qemuProcessAutoDestroyAdd(virQEMUDriverPtr driver,
int qemuProcessAutoDestroyRemove(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
int ret;
VIR_DEBUG("vm=%s", vm->def->name);
return virCloseCallbacksUnset(driver->closeCallbacks, vm,
qemuProcessAutoDestroy);
ret = virCloseCallbacksUnset(driver->closeCallbacks, vm,
qemuProcessAutoDestroy);
return ret;
}
bool qemuProcessAutoDestroyActive(virQEMUDriverPtr driver,

View File

@ -138,6 +138,7 @@ virCloseCallbacksSet(virCloseCallbacksPtr closeCallbacks,
VIR_FREE(closeDef);
goto cleanup;
}
virObjectRef(vm);
}
ret = 0;
@ -172,7 +173,11 @@ virCloseCallbacksUnset(virCloseCallbacksPtr closeCallbacks,
goto cleanup;
}
ret = virHashRemoveEntry(closeCallbacks->list, uuidstr);
if (virHashRemoveEntry(closeCallbacks->list, uuidstr) < 0)
goto cleanup;
virObjectUnref(vm);
ret = 0;
cleanup:
virObjectUnlock(closeCallbacks);
return ret;