qemu: Implement the qemu driver fetch for IOThreads

Depending on the flags passed, either attempt to return the active/live
IOThread data for the domain or the config data.

The active/live path will call into the Monitor in order to get the
IOThread data and then correlate the thread_id's returned from the
monitor to the currently running system/threads in order to ascertain
the affinity for each iothread_id.

The config path will map each of the configured IOThreads and return
any configured iothreadspin data

Signed-off-by: John Ferlan <jferlan@redhat.com>
This commit is contained in:
John Ferlan 2015-02-11 12:45:58 -05:00
parent 1e5a8ddc81
commit 82649eb7f1

View File

@ -5557,6 +5557,229 @@ qemuDomainGetMaxVcpus(virDomainPtr dom)
VIR_DOMAIN_VCPU_MAXIMUM));
}
static int
qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainIOThreadInfoPtr **info)
{
qemuDomainObjPrivatePtr priv;
qemuMonitorIOThreadsInfoPtr *iothreads = NULL;
virDomainIOThreadInfoPtr *info_ret = NULL;
int niothreads = 0;
int maxcpu, hostcpus, maplen;
size_t i;
int ret = -1;
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("cannot list IOThreads for an inactive domain"));
goto endjob;
}
priv = vm->privateData;
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("IOThreads not supported with this binary"));
goto endjob;
}
qemuDomainObjEnterMonitor(driver, vm);
niothreads = qemuMonitorGetIOThreads(priv->mon, &iothreads);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
goto endjob;
if (niothreads < 0)
goto endjob;
/* Nothing to do */
if (niothreads == 0) {
ret = 0;
goto endjob;
}
if ((hostcpus = nodeGetCPUCount()) < 0)
goto endjob;
maplen = VIR_CPU_MAPLEN(hostcpus);
maxcpu = maplen * 8;
if (maxcpu > hostcpus)
maxcpu = hostcpus;
if (VIR_ALLOC_N(info_ret, niothreads) < 0)
goto endjob;
for (i = 0; i < niothreads; i++) {
virBitmapPtr map = NULL;
unsigned char *tmpmap = NULL;
int tmpmaplen = 0;
if (VIR_ALLOC(info_ret[i]) < 0)
goto endjob;
if (virStrToLong_ui(iothreads[i]->name + strlen("iothread"), NULL, 10,
&info_ret[i]->iothread_id) < 0)
goto endjob;
if (VIR_ALLOC_N(info_ret[i]->cpumap, maplen) < 0)
goto endjob;
if (virProcessGetAffinity(iothreads[i]->thread_id, &map, maxcpu) < 0)
goto endjob;
virBitmapToData(map, &tmpmap, &tmpmaplen);
if (tmpmaplen > maplen)
tmpmaplen = maplen;
memcpy(info_ret[i]->cpumap, tmpmap, tmpmaplen);
info_ret[i]->cpumaplen = tmpmaplen;
VIR_FREE(tmpmap);
virBitmapFree(map);
}
*info = info_ret;
info_ret = NULL;
ret = niothreads;
endjob:
qemuDomainObjEndJob(driver, vm);
cleanup:
if (info_ret) {
for (i = 0; i < niothreads; i++)
virDomainIOThreadsInfoFree(info_ret[i]);
VIR_FREE(info_ret);
}
if (iothreads) {
for (i = 0; i < niothreads; i++)
qemuMonitorIOThreadsInfoFree(iothreads[i]);
VIR_FREE(iothreads);
}
return ret;
}
static int
qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef,
virDomainIOThreadInfoPtr **info)
{
virDomainIOThreadInfoPtr *info_ret = NULL;
virDomainVcpuPinDefPtr *iothreadspin_list;
virBitmapPtr cpumask = NULL;
unsigned char *cpumap;
int maxcpu, hostcpus, maplen;
size_t i, pcpu;
bool pinned;
int ret = -1;
if (targetDef->iothreads == 0)
return 0;
if ((hostcpus = nodeGetCPUCount()) < 0)
goto cleanup;
maplen = VIR_CPU_MAPLEN(hostcpus);
maxcpu = maplen * 8;
if (maxcpu > hostcpus)
maxcpu = hostcpus;
if (VIR_ALLOC_N(info_ret, targetDef->iothreads) < 0)
goto cleanup;
for (i = 0; i < targetDef->iothreads; i++) {
if (VIR_ALLOC(info_ret[i]) < 0)
goto cleanup;
/* IOThreads being counting at 1 */
info_ret[i]->iothread_id = i + 1;
if (VIR_ALLOC_N(info_ret[i]->cpumap, maplen) < 0)
goto cleanup;
/* Initialize the cpumap */
info_ret[i]->cpumaplen = maplen;
memset(info_ret[i]->cpumap, 0xff, maplen);
if (maxcpu % 8)
info_ret[i]->cpumap[maplen - 1] &= (1 << maxcpu % 8) - 1;
}
/* If iothreadspin setting exists, there are unused physical cpus */
iothreadspin_list = targetDef->cputune.iothreadspin;
for (i = 0; i < targetDef->cputune.niothreadspin; i++) {
/* vcpuid is the iothread_id...
* iothread_id is the index into info_ret + 1, so we can
* assume that the info_ret index we want is vcpuid - 1
*/
cpumap = info_ret[iothreadspin_list[i]->vcpuid - 1]->cpumap;
cpumask = iothreadspin_list[i]->cpumask;
for (pcpu = 0; pcpu < maxcpu; pcpu++) {
if (virBitmapGetBit(cpumask, pcpu, &pinned) < 0)
goto cleanup;
if (!pinned)
VIR_UNUSE_CPU(cpumap, pcpu);
}
}
*info = info_ret;
info_ret = NULL;
ret = targetDef->iothreads;
cleanup:
if (info_ret) {
for (i = 0; i < targetDef->iothreads; i++)
virDomainIOThreadsInfoFree(info_ret[i]);
VIR_FREE(info_ret);
}
return ret;
}
static int
qemuDomainGetIOThreadsInfo(virDomainPtr dom,
virDomainIOThreadInfoPtr **info,
unsigned int flags)
{
virQEMUDriverPtr driver = dom->conn->privateData;
virDomainObjPtr vm;
virCapsPtr caps = NULL;
virDomainDefPtr targetDef = NULL;
int ret = -1;
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
VIR_DOMAIN_AFFECT_CONFIG, -1);
if (!(vm = qemuDomObjFromDomain(dom)))
goto cleanup;
if (virDomainGetIOThreadsInfoEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto cleanup;
if (virDomainLiveConfigHelperMethod(caps, driver->xmlopt, vm, &flags,
&targetDef) < 0)
goto cleanup;
if (flags & VIR_DOMAIN_AFFECT_LIVE)
targetDef = vm->def;
/* Coverity didn't realize that targetDef must be set if we got here. */
sa_assert(targetDef);
if (flags & VIR_DOMAIN_AFFECT_LIVE)
ret = qemuDomainGetIOThreadsLive(driver, vm, info);
else
ret = qemuDomainGetIOThreadsConfig(targetDef, info);
cleanup:
qemuDomObjEndAPI(&vm);
virObjectUnref(caps);
return ret;
}
static int qemuDomainGetSecurityLabel(virDomainPtr dom, virSecurityLabelPtr seclabel)
{
virQEMUDriverPtr driver = dom->conn->privateData;
@ -19104,6 +19327,7 @@ static virHypervisorDriver qemuHypervisorDriver = {
.domainGetEmulatorPinInfo = qemuDomainGetEmulatorPinInfo, /* 0.10.0 */
.domainGetVcpus = qemuDomainGetVcpus, /* 0.4.4 */
.domainGetMaxVcpus = qemuDomainGetMaxVcpus, /* 0.4.4 */
.domainGetIOThreadsInfo = qemuDomainGetIOThreadsInfo, /* 1.2.14 */
.domainGetSecurityLabel = qemuDomainGetSecurityLabel, /* 0.6.1 */
.domainGetSecurityLabelList = qemuDomainGetSecurityLabelList, /* 0.10.0 */
.nodeGetSecurityModel = qemuNodeGetSecurityModel, /* 0.6.1 */