mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2024-12-22 05:35:25 +00:00
qemu: Resolve Coverity RESOURCE_LEAK
This seemed to be more of a false positive as for some reason Coverity was missing the "ret < 0" goto error condition and somehow believing that event could be overwritten. At first I thought it was just the ret != 0 condition difference, but it wasn't. In any case, make use of the recent change to qemuDomainEventQueue to check event == NULL and just pass it as a parameter directly in the error path. That avoids the error. Signed-off-by: John Ferlan <jferlan@redhat.com>
This commit is contained in:
parent
83cbbbef45
commit
a73c67b6cc
@ -3175,7 +3175,6 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom,
|
||||
char *xml = NULL;
|
||||
bool was_running = false;
|
||||
int ret = -1;
|
||||
int rc;
|
||||
virObjectEventPtr event = NULL;
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
virCapsPtr caps;
|
||||
@ -3256,14 +3255,14 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom,
|
||||
if (ret < 0) {
|
||||
if (was_running && virDomainObjIsActive(vm)) {
|
||||
virErrorPtr save_err = virSaveLastError();
|
||||
rc = qemuProcessStartCPUs(driver, vm, dom->conn,
|
||||
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
|
||||
QEMU_ASYNC_JOB_SAVE);
|
||||
if (rc < 0) {
|
||||
if (qemuProcessStartCPUs(driver, vm, dom->conn,
|
||||
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
|
||||
QEMU_ASYNC_JOB_SAVE) < 0) {
|
||||
VIR_WARN("Unable to resume guest CPUs after save failure");
|
||||
event = virDomainEventLifecycleNewFromObj(vm,
|
||||
VIR_DOMAIN_EVENT_SUSPENDED,
|
||||
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
|
||||
qemuDomainEventQueue(driver,
|
||||
virDomainEventLifecycleNewFromObj(vm,
|
||||
VIR_DOMAIN_EVENT_SUSPENDED,
|
||||
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR));
|
||||
}
|
||||
virSetError(save_err);
|
||||
virFreeError(save_err);
|
||||
|
Loading…
Reference in New Issue
Block a user