qemu: vcpu: Aggregate code to set vCPU tuning

Rather than iterating 3 times for various settings this function
aggregates all the code into single place. One of the other advantages
is that it can then be reused for properly setting vCPU info on hotplug.

With this approach autoCpuset is also used when setting the process
affinity rather than just via cgroups.
This commit is contained in:
Peter Krempa 2016-01-13 16:36:52 +01:00
parent d9c57ca9f9
commit 56971667ee
4 changed files with 149 additions and 164 deletions

View File

@ -1018,101 +1018,6 @@ qemuSetupCgroupCpusetCpus(virCgroupPtr cgroup,
return ret;
}
int
qemuSetupCgroupForVcpu(virDomainObjPtr vm)
{
virCgroupPtr cgroup_vcpu = NULL;
qemuDomainObjPrivatePtr priv = vm->privateData;
virDomainDefPtr def = vm->def;
size_t i;
unsigned long long period = vm->def->cputune.period;
long long quota = vm->def->cputune.quota;
char *mem_mask = NULL;
virDomainNumatuneMemMode mem_mode;
if ((period || quota) &&
!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("cgroup cpu is required for scheduler tuning"));
return -1;
}
/*
* If CPU cgroup controller is not initialized here, then we need
* neither period nor quota settings. And if CPUSET controller is
* not initialized either, then there's nothing to do anyway. CPU pinning
* will be set via virProcessSetAffinity.
*/
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) &&
!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET))
return 0;
/* If vCPU<->pid mapping is missing we can't do vCPU pinning */
if (!qemuDomainHasVcpuPids(vm))
return 0;
if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 &&
mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
virDomainNumatuneMaybeFormatNodeset(vm->def->numa,
priv->autoNodeset,
&mem_mask, -1) < 0)
goto cleanup;
for (i = 0; i < virDomainDefGetVcpusMax(def); i++) {
virDomainVcpuInfoPtr vcpu = virDomainDefGetVcpu(def, i);
if (!vcpu->online)
continue;
virCgroupFree(&cgroup_vcpu);
if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_VCPU, i,
true, &cgroup_vcpu) < 0)
goto cleanup;
if (period || quota) {
if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
goto cleanup;
}
/* Set vcpupin in cgroup if vcpupin xml is provided */
if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
virBitmapPtr cpumap = NULL;
if (mem_mask &&
virCgroupSetCpusetMems(cgroup_vcpu, mem_mask) < 0)
goto cleanup;
if (vcpu->cpumask)
cpumap = vcpu->cpumask;
else if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO)
cpumap = priv->autoCpuset;
else
cpumap = vm->def->cpumask;
if (cpumap && qemuSetupCgroupCpusetCpus(cgroup_vcpu, cpumap) < 0)
goto cleanup;
}
/* move the thread for vcpu to sub dir */
if (virCgroupAddTask(cgroup_vcpu,
qemuDomainGetVcpuPid(vm, i)) < 0)
goto cleanup;
}
virCgroupFree(&cgroup_vcpu);
VIR_FREE(mem_mask);
return 0;
cleanup:
if (cgroup_vcpu) {
virCgroupRemove(cgroup_vcpu);
virCgroupFree(&cgroup_vcpu);
}
VIR_FREE(mem_mask);
return -1;
}
int
qemuSetupCgroupForEmulator(virDomainObjPtr vm)

View File

@ -53,7 +53,6 @@ int qemuSetupCgroupVcpuBW(virCgroupPtr cgroup,
unsigned long long period,
long long quota);
int qemuSetupCgroupCpusetCpus(virCgroupPtr cgroup, virBitmapPtr cpumask);
int qemuSetupCgroupForVcpu(virDomainObjPtr vm);
int qemuSetupCgroupForIOThreads(virDomainObjPtr vm);
int qemuSetupCgroupForEmulator(virDomainObjPtr vm);
int qemuRemoveCgroup(virDomainObjPtr vm);

View File

@ -2188,56 +2188,6 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver,
return ret;
}
/* Set CPU affinities for vcpus if vcpupin xml provided. */
static int
qemuProcessSetVcpuAffinities(virDomainObjPtr vm)
{
virDomainDefPtr def = vm->def;
virDomainVcpuInfoPtr vcpu;
size_t i;
int ret = -1;
VIR_DEBUG("Setting affinity on CPUs");
if (!qemuDomainHasVcpuPids(vm)) {
/* If any CPU has custom affinity that differs from the
* VM default affinity, we must reject it
*/
for (i = 0; i < virDomainDefGetVcpusMax(def); i++) {
vcpu = virDomainDefGetVcpu(def, i);
if (!vcpu->online)
continue;
if (vcpu->cpumask &&
!virBitmapEqual(def->cpumask, vcpu->cpumask)) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("cpu affinity is not supported"));
return -1;
}
}
return 0;
}
for (i = 0; i < virDomainDefGetVcpusMax(def); i++) {
virBitmapPtr bitmap;
vcpu = virDomainDefGetVcpu(def, i);
if (!vcpu->online)
continue;
if (!(bitmap = vcpu->cpumask) &&
!(bitmap = def->cpumask))
continue;
if (virProcessSetAffinity(qemuDomainGetVcpuPid(vm, i), bitmap) < 0)
goto cleanup;
}
ret = 0;
cleanup:
return ret;
}
/* Set CPU affinities for emulator threads. */
static int
@ -2286,18 +2236,6 @@ qemuProcessSetSchedulers(virDomainObjPtr vm)
{
size_t i = 0;
for (i = 0; i < virDomainDefGetVcpusMax(vm->def); i++) {
virDomainVcpuInfoPtr vcpu = virDomainDefGetVcpu(vm->def, i);
if (!vcpu->online ||
vcpu->sched.policy == VIR_PROC_POLICY_NONE)
continue;
if (virProcessSetScheduler(qemuDomainGetVcpuPid(vm, i),
vcpu->sched.policy, vcpu->sched.priority) < 0)
return -1;
}
for (i = 0; i < vm->def->niothreadids; i++) {
virDomainIOThreadIDDefPtr info = vm->def->iothreadids[i];
@ -4494,6 +4432,149 @@ qemuProcessInit(virQEMUDriverPtr driver,
}
/**
* qemuProcessSetupVcpu:
* @vm: domain object
* @vcpuid: id of VCPU to set defaults
*
* This function sets resource properties (cgroups, affinity, scheduler) for a
* vCPU. This function expects that the vCPU is online and the vCPU pids were
* correctly detected at the point when it's called.
*
* Returns 0 on success, -1 on error.
*/
int
qemuProcessSetupVcpu(virDomainObjPtr vm,
unsigned int vcpuid)
{
pid_t vcpupid = qemuDomainGetVcpuPid(vm, vcpuid);
virDomainVcpuInfoPtr vcpu = virDomainDefGetVcpu(vm->def, vcpuid);
qemuDomainObjPrivatePtr priv = vm->privateData;
char *mem_mask = NULL;
virDomainNumatuneMemMode mem_mode;
unsigned long long period = vm->def->cputune.period;
long long quota = vm->def->cputune.quota;
virCgroupPtr cgroup_vcpu = NULL;
virBitmapPtr cpumask;
int ret = -1;
if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) ||
virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 &&
mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
virDomainNumatuneMaybeFormatNodeset(vm->def->numa,
priv->autoNodeset,
&mem_mask, -1) < 0)
goto cleanup;
if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_VCPU, vcpuid,
true, &cgroup_vcpu) < 0)
goto cleanup;
if (period || quota) {
if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
goto cleanup;
}
}
/* infer which cpumask shall be used */
if (vcpu->cpumask)
cpumask = vcpu->cpumask;
else if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO)
cpumask = priv->autoCpuset;
else
cpumask = vm->def->cpumask;
/* setup cgroups */
if (cgroup_vcpu) {
if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
if (mem_mask && virCgroupSetCpusetMems(cgroup_vcpu, mem_mask) < 0)
goto cleanup;
if (cpumask && qemuSetupCgroupCpusetCpus(cgroup_vcpu, cpumask) < 0)
goto cleanup;
}
/* move the thread for vcpu to sub dir */
if (virCgroupAddTask(cgroup_vcpu, vcpupid) < 0)
goto cleanup;
}
/* setup legacy affinty */
if (cpumask && virProcessSetAffinity(vcpupid, cpumask) < 0)
goto cleanup;
/* set scheduler type and priority */
if (vcpu->sched.policy != VIR_PROC_POLICY_NONE) {
if (virProcessSetScheduler(vcpupid, vcpu->sched.policy,
vcpu->sched.priority) < 0)
goto cleanup;
}
ret = 0;
cleanup:
VIR_FREE(mem_mask);
if (cgroup_vcpu) {
if (ret < 0)
virCgroupRemove(cgroup_vcpu);
virCgroupFree(&cgroup_vcpu);
}
return ret;
}
static int
qemuProcessSetupVcpus(virDomainObjPtr vm)
{
virDomainVcpuInfoPtr vcpu;
unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def);
size_t i;
if ((vm->def->cputune.period || vm->def->cputune.quota) &&
!virCgroupHasController(((qemuDomainObjPrivatePtr) vm->privateData)->cgroup,
VIR_CGROUP_CONTROLLER_CPU)) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("cgroup cpu is required for scheduler tuning"));
return -1;
}
if (!qemuDomainHasVcpuPids(vm)) {
/* If any CPU has custom affinity that differs from the
* VM default affinity, we must reject it */
for (i = 0; i < maxvcpus; i++) {
vcpu = virDomainDefGetVcpu(vm->def, i);
if (!vcpu->online)
continue;
if (vcpu->cpumask &&
!virBitmapEqual(vm->def->cpumask, vcpu->cpumask)) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("cpu affinity is not supported"));
return -1;
}
}
return 0;
}
for (i = 0; i < maxvcpus; i++) {
vcpu = virDomainDefGetVcpu(vm->def, i);
if (!vcpu->online)
continue;
if (qemuProcessSetupVcpu(vm, i) < 0)
return -1;
}
return 0;
}
/**
* qemuProcessLaunch:
*
@ -4958,18 +5039,14 @@ qemuProcessLaunch(virConnectPtr conn,
if (qemuProcessDetectIOThreadPIDs(driver, vm, asyncJob) < 0)
goto cleanup;
VIR_DEBUG("Setting cgroup for each VCPU (if required)");
if (qemuSetupCgroupForVcpu(vm) < 0)
VIR_DEBUG("Setting vCPU tuning/settings");
if (qemuProcessSetupVcpus(vm) < 0)
goto cleanup;
VIR_DEBUG("Setting cgroup for each IOThread (if required)");
if (qemuSetupCgroupForIOThreads(vm) < 0)
goto cleanup;
VIR_DEBUG("Setting VCPU affinities");
if (qemuProcessSetVcpuAffinities(vm) < 0)
goto cleanup;
VIR_DEBUG("Setting affinity of IOThread threads");
if (qemuProcessSetIOThreadsAffinity(vm) < 0)
goto cleanup;

View File

@ -160,4 +160,8 @@ virDomainDiskDefPtr qemuProcessFindDomainDiskByAlias(virDomainObjPtr vm,
int qemuConnectAgent(virQEMUDriverPtr driver, virDomainObjPtr vm);
int qemuProcessSetupVcpu(virDomainObjPtr vm,
unsigned int vcpuid);
#endif /* __QEMU_PROCESS_H__ */