mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2025-01-22 12:35:17 +00:00
numad: Convert node list to cpumap before setting affinity
Instead of returning a CPUs list, numad returns NUMA node list instead, this patch is to convert the node list to cpumap before affinity setting. Otherwise, the domain processes will be pinned only to CPU[$numa_cell_num], which will cause significiant performance losses. Also because numad will balance the affinity dynamically, reflecting the cpuset from numad back doesn't make much sense then, and it may just could produce confusion for the users. Thus the better way is not to reflect it back to XML. And in this case, it's better to ignore the cpuset when parsing XML. The codes to update the cpuset is removed in this patch incidentally, and there will be a follow up patch to ignore the manually specified "cpuset" if "placement" is "auto", and document will be updated too.
This commit is contained in:
parent
360a88c317
commit
ccf80e3630
@ -1820,38 +1820,45 @@ qemuProcessInitCpuAffinity(struct qemud_driver *driver,
|
||||
}
|
||||
|
||||
if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) {
|
||||
char *tmp_cpumask = NULL;
|
||||
char *nodeset = NULL;
|
||||
char *nodemask = NULL;
|
||||
|
||||
nodeset = qemuGetNumadAdvice(vm->def);
|
||||
if (!nodeset)
|
||||
goto cleanup;
|
||||
|
||||
if (VIR_ALLOC_N(tmp_cpumask, VIR_DOMAIN_CPUMASK_LEN) < 0) {
|
||||
if (VIR_ALLOC_N(nodemask, VIR_DOMAIN_CPUMASK_LEN) < 0) {
|
||||
virReportOOMError();
|
||||
VIR_FREE(nodeset);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (virDomainCpuSetParse(nodeset, 0, tmp_cpumask,
|
||||
if (virDomainCpuSetParse(nodeset, 0, nodemask,
|
||||
VIR_DOMAIN_CPUMASK_LEN) < 0) {
|
||||
VIR_FREE(tmp_cpumask);
|
||||
VIR_FREE(nodemask);
|
||||
VIR_FREE(nodeset);
|
||||
goto cleanup;
|
||||
}
|
||||
VIR_FREE(nodeset);
|
||||
|
||||
for (i = 0; i < maxcpu && i < VIR_DOMAIN_CPUMASK_LEN; i++) {
|
||||
if (tmp_cpumask[i])
|
||||
VIR_USE_CPU(cpumap, i);
|
||||
/* numad returns the NUMA node list, convert it to cpumap */
|
||||
int prev_total_ncpus = 0;
|
||||
for (i = 0; i < driver->caps->host.nnumaCell; i++) {
|
||||
int j;
|
||||
int cur_ncpus = driver->caps->host.numaCell[i]->ncpus;
|
||||
if (nodemask[i]) {
|
||||
for (j = prev_total_ncpus;
|
||||
j < cur_ncpus + prev_total_ncpus &&
|
||||
j < maxcpu &&
|
||||
j < VIR_DOMAIN_CPUMASK_LEN;
|
||||
j++) {
|
||||
VIR_USE_CPU(cpumap, j);
|
||||
}
|
||||
}
|
||||
prev_total_ncpus += cur_ncpus;
|
||||
}
|
||||
|
||||
VIR_FREE(vm->def->cpumask);
|
||||
vm->def->cpumask = tmp_cpumask;
|
||||
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
||||
VIR_WARN("Unable to save status on vm %s after state change",
|
||||
vm->def->name);
|
||||
}
|
||||
VIR_FREE(nodemask);
|
||||
} else {
|
||||
if (vm->def->cpumask) {
|
||||
/* XXX why don't we keep 'cpumask' in the libvirt cpumap
|
||||
|
Loading…
x
Reference in New Issue
Block a user