qemu: don't leak vm on failure

Failure to attach to a domain during 'virsh qemu-attach' left
the list of domains in an odd state:

$ virsh qemu-attach 4176
error: An error occurred, but the cause is unknown

$ virsh list --all
 Id    Name                           State
----------------------------------------------------
 2     foo                            shut off

$ virsh qemu-attach 4176
error: Requested operation is not valid: domain is already active as 'foo'

$ virsh undefine foo
error: Failed to undefine domain foo
error: Requested operation is not valid: cannot undefine transient domain

$ virsh shutdown foo
error: Failed to shutdown domain foo
error: invalid argument: monitor must not be NULL

It all stems from leaving the list of domains unmodified on
the initial failure; we should follow the lead of createXML
which removes vm on failure (the actual initial failure still
needs to be fixed in a later patch, but at least this patch
gets us to the point where we aren't getting stuck with an
unremovable "shut off" transient domain).

While investigating, I also found a leak in qemuDomainCreateXML;
the two functions should behave similarly.  Note that there are
still two unusual paths: if dom is not allocated, the user will
see an OOM error even though the vm remains registered (but oom
errors already indicate tricky cleanup); and if the vm starts
and then quits again all before the job ends, it is possible
to return a non-NULL dom even though the dom will no longer be
useful for anything (but this at least lets the user know their
short-lived vm ran).

* src/qemu/qemu_driver.c (qemuDomainCreateXML): Don't leak vm on
failure to obtain job.
(qemuDomainQemuAttach): Match cleanup of qemuDomainCreateXML.

Signed-off-by: Eric Blake <eblake@redhat.com>
This commit is contained in:
Eric Blake 2013-08-28 14:27:44 -06:00
parent ea3534fc54
commit d047b2d983

View File

@ -1602,8 +1602,11 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn,
def = NULL;
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup; /* XXXX free the 'vm' we created ? */
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) {
qemuDomainRemoveInactive(driver, vm);
vm = NULL;
goto cleanup;
}
if (qemuProcessStart(conn, driver, vm, NULL, -1, NULL, NULL,
VIR_NETDEV_VPORT_PROFILE_OP_CREATE,
@ -1631,10 +1634,10 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn,
virDomainAuditStart(vm, "booted", true);
dom = virGetDomain(conn, vm->def->name, vm->def->uuid);
if (dom) dom->id = vm->def->id;
if (dom)
dom->id = vm->def->id;
if (vm &&
qemuDomainObjEndJob(driver, vm) == 0)
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@ -13630,34 +13633,38 @@ static virDomainPtr qemuDomainQemuAttach(virConnectPtr conn,
def = NULL;
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) {
qemuDomainRemoveInactive(driver, vm);
vm = NULL;
goto cleanup;
}
if (qemuProcessAttach(conn, driver, vm, pid,
pidfile, monConfig, monJSON) < 0) {
if (qemuDomainObjEndJob(driver, vm) > 0)
qemuDomainRemoveInactive(driver, vm);
vm = NULL;
monConfig = NULL;
goto endjob;
goto cleanup;
}
monConfig = NULL;
dom = virGetDomain(conn, vm->def->name, vm->def->uuid);
if (dom) dom->id = vm->def->id;
if (dom)
dom->id = vm->def->id;
endjob:
if (qemuDomainObjEndJob(driver, vm) == 0) {
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
goto cleanup;
}
cleanup:
virDomainDefFree(def);
virObjectUnref(qemuCaps);
virDomainChrSourceDefFree(monConfig);
if (vm)
virObjectUnlock(vm);
VIR_FREE(pidfile);
virObjectUnref(caps);
virObjectUnref(qemuCaps);
return dom;
}