mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2024-12-22 05:35:25 +00:00
qemu: Pass / fill niothreads for qemuMonitorGetIOThreads
Let's pass along / fill @niothreads rather than trying to make dual use as a return value and thread count. This resolves a Coverity issue detected in qemuDomainGetIOThreadsMon where if qemuDomainObjExitMonitor failed, then a -1 was returned and overwrite @niothreads causing a memory leak. Signed-off-by: John Ferlan <jferlan@redhat.com> Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
This commit is contained in:
parent
9801f91a8e
commit
148cfcf051
@ -4972,17 +4972,18 @@ qemuDomainGetMaxVcpus(virDomainPtr dom)
|
||||
static int
|
||||
qemuDomainGetIOThreadsMon(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm,
|
||||
qemuMonitorIOThreadInfoPtr **iothreads)
|
||||
qemuMonitorIOThreadInfoPtr **iothreads,
|
||||
int *niothreads)
|
||||
{
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
int niothreads = 0;
|
||||
int ret = -1;
|
||||
|
||||
qemuDomainObjEnterMonitor(driver, vm);
|
||||
niothreads = qemuMonitorGetIOThreads(priv->mon, iothreads);
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0 || niothreads < 0)
|
||||
ret = qemuMonitorGetIOThreads(priv->mon, iothreads, niothreads);
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
||||
return -1;
|
||||
|
||||
return niothreads;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@ -5014,7 +5015,7 @@ qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver,
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
if ((niothreads = qemuDomainGetIOThreadsMon(driver, vm, &iothreads)) < 0)
|
||||
if ((ret = qemuDomainGetIOThreadsMon(driver, vm, &iothreads, &niothreads)) < 0)
|
||||
goto endjob;
|
||||
|
||||
/* Nothing to do */
|
||||
@ -5314,8 +5315,7 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver,
|
||||
* IOThreads thread_id's, adjust the cgroups, thread affinity,
|
||||
* and add the thread_id to the vm->def->iothreadids list.
|
||||
*/
|
||||
if ((new_niothreads = qemuMonitorGetIOThreads(priv->mon,
|
||||
&new_iothreads)) < 0)
|
||||
if (qemuMonitorGetIOThreads(priv->mon, &new_iothreads, &new_niothreads) < 0)
|
||||
goto exit_monitor;
|
||||
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
||||
@ -5425,8 +5425,7 @@ qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver,
|
||||
if (rc < 0)
|
||||
goto exit_monitor;
|
||||
|
||||
if ((new_niothreads = qemuMonitorGetIOThreads(priv->mon,
|
||||
&new_iothreads)) < 0)
|
||||
if (qemuMonitorGetIOThreads(priv->mon, &new_iothreads, &new_niothreads) < 0)
|
||||
goto exit_monitor;
|
||||
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
||||
@ -18507,7 +18506,7 @@ qemuDomainGetStatsIOThread(virQEMUDriverPtr driver,
|
||||
qemuDomainObjPrivatePtr priv = dom->privateData;
|
||||
size_t i;
|
||||
qemuMonitorIOThreadInfoPtr *iothreads = NULL;
|
||||
int niothreads;
|
||||
int niothreads = 0;
|
||||
int ret = -1;
|
||||
|
||||
if (!HAVE_JOB(privflags) || !virDomainObjIsActive(dom))
|
||||
@ -18516,7 +18515,7 @@ qemuDomainGetStatsIOThread(virQEMUDriverPtr driver,
|
||||
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD))
|
||||
return 0;
|
||||
|
||||
if ((niothreads = qemuDomainGetIOThreadsMon(driver, dom, &iothreads)) < 0)
|
||||
if (qemuDomainGetIOThreadsMon(driver, dom, &iothreads, &niothreads) < 0)
|
||||
return -1;
|
||||
|
||||
/* qemuDomainGetIOThreadsMon returns a NULL-terminated list, so we must free
|
||||
|
@ -4211,22 +4211,24 @@ qemuMonitorRTCResetReinjection(qemuMonitorPtr mon)
|
||||
* qemuMonitorGetIOThreads:
|
||||
* @mon: Pointer to the monitor
|
||||
* @iothreads: Location to return array of IOThreadInfo data
|
||||
* @niothreads: Count of the number of IOThreads in the array
|
||||
*
|
||||
* Issue query-iothreads command.
|
||||
* Retrieve the list of iothreads defined/running for the machine
|
||||
*
|
||||
* Returns count of IOThreadInfo structures on success
|
||||
* Returns 0 on success
|
||||
* -1 on error.
|
||||
*/
|
||||
int
|
||||
qemuMonitorGetIOThreads(qemuMonitorPtr mon,
|
||||
qemuMonitorIOThreadInfoPtr **iothreads)
|
||||
qemuMonitorIOThreadInfoPtr **iothreads,
|
||||
int *niothreads)
|
||||
{
|
||||
VIR_DEBUG("iothreads=%p", iothreads);
|
||||
|
||||
QEMU_CHECK_MONITOR(mon);
|
||||
|
||||
return qemuMonitorJSONGetIOThreads(mon, iothreads);
|
||||
return qemuMonitorJSONGetIOThreads(mon, iothreads, niothreads);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1365,7 +1365,8 @@ struct _qemuMonitorIOThreadInfo {
|
||||
bool set_poll_shrink;
|
||||
};
|
||||
int qemuMonitorGetIOThreads(qemuMonitorPtr mon,
|
||||
qemuMonitorIOThreadInfoPtr **iothreads);
|
||||
qemuMonitorIOThreadInfoPtr **iothreads,
|
||||
int *niothreads);
|
||||
int qemuMonitorSetIOThread(qemuMonitorPtr mon,
|
||||
qemuMonitorIOThreadInfoPtr iothreadInfo);
|
||||
|
||||
|
@ -8083,7 +8083,8 @@ qemuMonitorJSONRTCResetReinjection(qemuMonitorPtr mon)
|
||||
*/
|
||||
int
|
||||
qemuMonitorJSONGetIOThreads(qemuMonitorPtr mon,
|
||||
qemuMonitorIOThreadInfoPtr **iothreads)
|
||||
qemuMonitorIOThreadInfoPtr **iothreads,
|
||||
int *niothreads)
|
||||
{
|
||||
int ret = -1;
|
||||
virJSONValuePtr cmd;
|
||||
@ -8160,9 +8161,10 @@ qemuMonitorJSONGetIOThreads(qemuMonitorPtr mon,
|
||||
info->poll_valid = true;
|
||||
}
|
||||
|
||||
ret = n;
|
||||
*niothreads = n;
|
||||
*iothreads = infolist;
|
||||
infolist = NULL;
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if (infolist) {
|
||||
|
@ -550,7 +550,8 @@ int qemuMonitorJSONGetGuestCPU(qemuMonitorPtr mon,
|
||||
int qemuMonitorJSONRTCResetReinjection(qemuMonitorPtr mon);
|
||||
|
||||
int qemuMonitorJSONGetIOThreads(qemuMonitorPtr mon,
|
||||
qemuMonitorIOThreadInfoPtr **iothreads)
|
||||
qemuMonitorIOThreadInfoPtr **iothreads,
|
||||
int *niothreads)
|
||||
ATTRIBUTE_NONNULL(2);
|
||||
|
||||
int qemuMonitorJSONSetIOThread(qemuMonitorPtr mon,
|
||||
|
@ -2498,10 +2498,10 @@ qemuProcessDetectIOThreadPIDs(virQEMUDriverPtr driver,
|
||||
/* Get the list of IOThreads from qemu */
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
||||
goto cleanup;
|
||||
niothreads = qemuMonitorGetIOThreads(priv->mon, &iothreads);
|
||||
ret = qemuMonitorGetIOThreads(priv->mon, &iothreads, &niothreads);
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
||||
goto cleanup;
|
||||
if (niothreads < 0)
|
||||
if (ret < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (niothreads != vm->def->niothreadids) {
|
||||
|
@ -2377,8 +2377,8 @@ testQemuMonitorJSONGetIOThreads(const void *opaque)
|
||||
"}") < 0)
|
||||
goto cleanup;
|
||||
|
||||
if ((ninfo = qemuMonitorGetIOThreads(qemuMonitorTestGetMonitor(test),
|
||||
&info)) < 0)
|
||||
if (qemuMonitorGetIOThreads(qemuMonitorTestGetMonitor(test),
|
||||
&info, &ninfo) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (ninfo != 2) {
|
||||
|
Loading…
Reference in New Issue
Block a user