qemu: Replace checking for vcpu<->pid mapping availability with a helper

Add qemuDomainHasVCpuPids to do the checking and replace in place checks
with it.

We no longer need checking whether the thread contains fake data
(vcpupids[0] == vm->pid) as in b07f3d821dfb11a118ee75ea275fd6ab737d9500
and 65686e5a81d654d834d338fceeaf0229b2ca4f0d this was removed.
This commit is contained in:
Peter Krempa 2015-11-11 14:20:04 +01:00
parent e4bf9a3bcc
commit 220a2d51de
5 changed files with 36 additions and 24 deletions

View File

@ -1025,12 +1025,9 @@ qemuSetupCgroupForVcpu(virDomainObjPtr vm)
!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET))
return 0;
if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
/* If we don't know VCPU<->PID mapping or all vcpu runs in the same
* thread, we cannot control each vcpu.
*/
/* If vCPU<->pid mapping is missing we can't do vCPU pinning */
if (!qemuDomainHasVcpuPids(vm))
return 0;
}
if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 &&
mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&

View File

@ -4116,3 +4116,18 @@ qemuDomainRequiresMlock(virDomainDefPtr def)
return false;
}
/**
* qemuDomainHasVcpuPids:
* @vm: Domain object
*
* Returns true if we were able to successfully detect vCPU pids for the VM.
*/
bool
qemuDomainHasVcpuPids(virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
return priv->nvcpupids > 0;
}

View File

@ -505,4 +505,6 @@ int qemuDomainDefValidateMemoryHotplug(const virDomainDef *def,
virQEMUCapsPtr qemuCaps,
const virDomainMemoryDef *mem);
bool qemuDomainHasVcpuPids(virDomainObjPtr vm);
#endif /* __QEMU_DOMAIN_H__ */

View File

@ -1428,7 +1428,7 @@ qemuDomainHelperGetVcpus(virDomainObjPtr vm, virVcpuInfoPtr info, int maxinfo,
size_t i, v;
qemuDomainObjPrivatePtr priv = vm->privateData;
if (priv->vcpupids == NULL) {
if (!qemuDomainHasVcpuPids(vm)) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("cpu affinity is not supported"));
return -1;
@ -5118,7 +5118,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
}
if (def) {
if (priv->vcpupids == NULL) {
if (!qemuDomainHasVcpuPids(vm)) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("cpu affinity is not supported"));
goto endjob;
@ -10287,21 +10287,18 @@ qemuSetVcpusBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
if (period == 0 && quota == 0)
return 0;
/* If we does not know VCPU<->PID mapping or all vcpu runs in the same
* thread, we cannot control each vcpu. So we only modify cpu bandwidth
* when each vcpu has a separated thread.
*/
if (priv->nvcpupids != 0 && priv->vcpupids[0] != vm->pid) {
for (i = 0; i < priv->nvcpupids; i++) {
if (virCgroupNewThread(cgroup, VIR_CGROUP_THREAD_VCPU, i,
false, &cgroup_vcpu) < 0)
goto cleanup;
if (!qemuDomainHasVcpuPids(vm))
return 0;
if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
goto cleanup;
for (i = 0; i < priv->nvcpupids; i++) {
if (virCgroupNewThread(cgroup, VIR_CGROUP_THREAD_VCPU, i,
false, &cgroup_vcpu) < 0)
goto cleanup;
virCgroupFree(&cgroup_vcpu);
}
if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
goto cleanup;
virCgroupFree(&cgroup_vcpu);
}
return 0;
@ -10604,7 +10601,7 @@ qemuGetVcpusBWLive(virDomainObjPtr vm,
int ret = -1;
priv = vm->privateData;
if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
if (!qemuDomainHasVcpuPids(vm)) {
/* We do not create sub dir for each vcpu */
rc = qemuGetVcpuBWLive(priv->cgroup, period, quota);
if (rc < 0)

View File

@ -2239,12 +2239,13 @@ qemuProcessSetVcpuAffinities(virDomainObjPtr vm)
virDomainPinDefPtr pininfo;
int n;
int ret = -1;
VIR_DEBUG("Setting affinity on CPUs nvcpupin=%zu nvcpus=%d nvcpupids=%d",
def->cputune.nvcpupin, virDomainDefGetVcpus(def), priv->nvcpupids);
VIR_DEBUG("Setting affinity on CPUs nvcpupin=%zu nvcpus=%d hasVcpupids=%d",
def->cputune.nvcpupin, virDomainDefGetVcpus(def),
qemuDomainHasVcpuPids(vm));
if (!def->cputune.nvcpupin)
return 0;
if (priv->vcpupids == NULL) {
if (!qemuDomainHasVcpuPids(vm)) {
/* If any CPU has custom affinity that differs from the
* VM default affinity, we must reject it
*/