mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2024-12-23 06:05:27 +00:00
conf: Don't store vcpusched orthogonally to other vcpu info
Due to bad design the vcpu sched element is orthogonal to the way how the data belongs to the corresponding objects. Now that vcpus are a struct that allow to store other info too, let's convert the data to the sane structure. The helpers for the conversion are made universal so that they can be reused for iothreads too. This patch also resolves https://bugzilla.redhat.com/show_bug.cgi?id=1235180 since with the correct storage approach you can't have dangling data.
This commit is contained in:
parent
e1fa2571c5
commit
99c5fe0e7c
@ -1416,6 +1416,19 @@ virDomainDefGetVcpu(virDomainDefPtr def,
|
||||
}
|
||||
|
||||
|
||||
static virDomainThreadSchedParamPtr
|
||||
virDomainDefGetVcpuSched(virDomainDefPtr def,
|
||||
unsigned int vcpu)
|
||||
{
|
||||
virDomainVcpuInfoPtr vcpuinfo;
|
||||
|
||||
if (!(vcpuinfo = virDomainDefGetVcpu(def, vcpu)))
|
||||
return NULL;
|
||||
|
||||
return &vcpuinfo->sched;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* virDomainDefHasVcpuPin:
|
||||
* @def: domain definition
|
||||
@ -2547,10 +2560,6 @@ void virDomainDefFree(virDomainDefPtr def)
|
||||
|
||||
virBitmapFree(def->cputune.emulatorpin);
|
||||
|
||||
for (i = 0; i < def->cputune.nvcpusched; i++)
|
||||
virBitmapFree(def->cputune.vcpusched[i].ids);
|
||||
VIR_FREE(def->cputune.vcpusched);
|
||||
|
||||
for (i = 0; i < def->cputune.niothreadsched; i++)
|
||||
virBitmapFree(def->cputune.iothreadsched[i].ids);
|
||||
VIR_FREE(def->cputune.iothreadsched);
|
||||
@ -14565,6 +14574,55 @@ virDomainSchedulerParse(xmlNodePtr node,
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
virDomainThreadSchedParseHelper(xmlNodePtr node,
|
||||
const char *name,
|
||||
virDomainThreadSchedParamPtr (*func)(virDomainDefPtr, unsigned int),
|
||||
virDomainDefPtr def)
|
||||
{
|
||||
ssize_t next = -1;
|
||||
virBitmapPtr map = NULL;
|
||||
virDomainThreadSchedParamPtr sched;
|
||||
virProcessSchedPolicy policy;
|
||||
int priority;
|
||||
int ret = -1;
|
||||
|
||||
if (!(map = virDomainSchedulerParse(node, name, &policy, &priority)))
|
||||
goto cleanup;
|
||||
|
||||
while ((next = virBitmapNextSetBit(map, next)) > -1) {
|
||||
if (!(sched = func(def, next)))
|
||||
goto cleanup;
|
||||
|
||||
if (sched->policy != VIR_PROC_POLICY_NONE) {
|
||||
virReportError(VIR_ERR_XML_DETAIL,
|
||||
_("%ssched attributes 'vcpus' must not overlap"),
|
||||
name);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
sched->policy = policy;
|
||||
sched->priority = priority;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
virBitmapFree(map);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
virDomainVcpuThreadSchedParse(xmlNodePtr node,
|
||||
virDomainDefPtr def)
|
||||
{
|
||||
return virDomainThreadSchedParseHelper(node, "vcpus",
|
||||
virDomainDefGetVcpuSched,
|
||||
def);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
virDomainThreadSchedParse(xmlNodePtr node,
|
||||
unsigned int minid,
|
||||
@ -15120,29 +15178,10 @@ virDomainDefParseXML(xmlDocPtr xml,
|
||||
_("cannot extract vcpusched nodes"));
|
||||
goto error;
|
||||
}
|
||||
if (n) {
|
||||
if (VIR_ALLOC_N(def->cputune.vcpusched, n) < 0)
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (virDomainVcpuThreadSchedParse(nodes[i], def) < 0)
|
||||
goto error;
|
||||
def->cputune.nvcpusched = n;
|
||||
|
||||
for (i = 0; i < def->cputune.nvcpusched; i++) {
|
||||
if (virDomainThreadSchedParse(nodes[i],
|
||||
0,
|
||||
virDomainDefGetVcpusMax(def) - 1,
|
||||
"vcpus",
|
||||
&def->cputune.vcpusched[i]) < 0)
|
||||
goto error;
|
||||
|
||||
for (j = 0; j < i; j++) {
|
||||
if (virBitmapOverlaps(def->cputune.vcpusched[i].ids,
|
||||
def->cputune.vcpusched[j].ids)) {
|
||||
virReportError(VIR_ERR_XML_DETAIL, "%s",
|
||||
_("vcpusched attributes 'vcpus' "
|
||||
"must not overlap"));
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
VIR_FREE(nodes);
|
||||
|
||||
@ -21443,6 +21482,143 @@ virDomainDefHasCapabilitiesFeatures(virDomainDefPtr def)
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* virDomainFormatSchedDef:
|
||||
* @def: domain definiton
|
||||
* @buf: target XML buffer
|
||||
* @name: name of the target XML element
|
||||
* @func: function that returns the thread scheduler parameter struct for an object
|
||||
* @resourceMap: bitmap of indexes of objects that shall be formatted (used with @func)
|
||||
*
|
||||
* Formats one of the two scheduler tuning elements to the XML. This function
|
||||
* transforms the internal representation where the scheduler info is stored
|
||||
* per-object to the XML representation where the info is stored per group of
|
||||
* objects. This function autogroups all the relevant scheduler configs.
|
||||
*
|
||||
* Returns 0 on success -1 on error.
|
||||
*/
|
||||
static int
|
||||
virDomainFormatSchedDef(virDomainDefPtr def,
|
||||
virBufferPtr buf,
|
||||
const char *name,
|
||||
virDomainThreadSchedParamPtr (*func)(virDomainDefPtr, unsigned int),
|
||||
virBitmapPtr resourceMap)
|
||||
{
|
||||
virBitmapPtr schedMap = NULL;
|
||||
virBitmapPtr prioMap = NULL;
|
||||
virDomainThreadSchedParamPtr sched;
|
||||
char *tmp = NULL;
|
||||
ssize_t next;
|
||||
size_t i;
|
||||
int ret = -1;
|
||||
|
||||
if (!(schedMap = virBitmapNew(VIR_DOMAIN_CPUMASK_LEN)) ||
|
||||
!(prioMap = virBitmapNew(VIR_DOMAIN_CPUMASK_LEN)))
|
||||
goto cleanup;
|
||||
|
||||
for (i = VIR_PROC_POLICY_NONE + 1; i < VIR_PROC_POLICY_LAST; i++) {
|
||||
virBitmapClearAll(schedMap);
|
||||
|
||||
/* find vcpus using a particular scheduler */
|
||||
next = -1;
|
||||
while ((next = virBitmapNextSetBit(resourceMap, next)) > -1) {
|
||||
sched = func(def, next);
|
||||
|
||||
if (sched->policy == i)
|
||||
ignore_value(virBitmapSetBit(schedMap, next));
|
||||
}
|
||||
|
||||
/* it's necessary to discriminate priority levels for schedulers that
|
||||
* have them */
|
||||
while (!virBitmapIsAllClear(schedMap)) {
|
||||
virBitmapPtr currentMap = NULL;
|
||||
ssize_t nextprio;
|
||||
bool hasPriority = false;
|
||||
int priority;
|
||||
|
||||
switch ((virProcessSchedPolicy) i) {
|
||||
case VIR_PROC_POLICY_NONE:
|
||||
case VIR_PROC_POLICY_BATCH:
|
||||
case VIR_PROC_POLICY_IDLE:
|
||||
case VIR_PROC_POLICY_LAST:
|
||||
currentMap = schedMap;
|
||||
break;
|
||||
|
||||
case VIR_PROC_POLICY_FIFO:
|
||||
case VIR_PROC_POLICY_RR:
|
||||
virBitmapClearAll(prioMap);
|
||||
hasPriority = true;
|
||||
|
||||
/* we need to find a subset of vCPUs with the given scheduler
|
||||
* that share the priority */
|
||||
nextprio = virBitmapNextSetBit(schedMap, -1);
|
||||
sched = func(def, nextprio);
|
||||
priority = sched->priority;
|
||||
|
||||
ignore_value(virBitmapSetBit(prioMap, nextprio));
|
||||
|
||||
while ((nextprio = virBitmapNextSetBit(schedMap, nextprio)) > -1) {
|
||||
sched = func(def, nextprio);
|
||||
if (sched->priority == priority)
|
||||
ignore_value(virBitmapSetBit(prioMap, nextprio));
|
||||
}
|
||||
|
||||
currentMap = prioMap;
|
||||
break;
|
||||
}
|
||||
|
||||
/* now we have the complete group */
|
||||
if (!(tmp = virBitmapFormat(currentMap)))
|
||||
goto cleanup;
|
||||
|
||||
virBufferAsprintf(buf,
|
||||
"<%sched %s='%s' scheduler='%s'",
|
||||
name, name, tmp,
|
||||
virProcessSchedPolicyTypeToString(i));
|
||||
VIR_FREE(tmp);
|
||||
|
||||
if (hasPriority)
|
||||
virBufferAsprintf(buf, " priority='%d'", priority);
|
||||
|
||||
virBufferAddLit(buf, "/>\n");
|
||||
|
||||
/* subtract all vCPUs that were already found */
|
||||
virBitmapSubtract(schedMap, currentMap);
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
virBitmapFree(schedMap);
|
||||
virBitmapFree(prioMap);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
virDomainFormatVcpuSchedDef(virDomainDefPtr def,
|
||||
virBufferPtr buf)
|
||||
{
|
||||
virBitmapPtr allcpumap;
|
||||
int ret;
|
||||
|
||||
if (virDomainDefGetVcpusMax(def) == 0)
|
||||
return 0;
|
||||
|
||||
if (!(allcpumap = virBitmapNew(virDomainDefGetVcpusMax(def))))
|
||||
return -1;
|
||||
|
||||
virBitmapSetAll(allcpumap);
|
||||
|
||||
ret = virDomainFormatSchedDef(def, buf, "vcpus", virDomainDefGetVcpuSched,
|
||||
allcpumap);
|
||||
|
||||
virBitmapFree(allcpumap);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
virDomainCputuneDefFormat(virBufferPtr buf,
|
||||
virDomainDefPtr def)
|
||||
@ -21517,22 +21693,8 @@ virDomainCputuneDefFormat(virBufferPtr buf,
|
||||
VIR_FREE(cpumask);
|
||||
}
|
||||
|
||||
for (i = 0; i < def->cputune.nvcpusched; i++) {
|
||||
virDomainThreadSchedParamPtr sp = &def->cputune.vcpusched[i];
|
||||
char *ids = NULL;
|
||||
|
||||
if (!(ids = virBitmapFormat(sp->ids)))
|
||||
goto cleanup;
|
||||
|
||||
virBufferAsprintf(&childrenBuf, "<vcpusched vcpus='%s' scheduler='%s'",
|
||||
ids, virProcessSchedPolicyTypeToString(sp->policy));
|
||||
VIR_FREE(ids);
|
||||
|
||||
if (sp->policy == VIR_PROC_POLICY_FIFO ||
|
||||
sp->policy == VIR_PROC_POLICY_RR)
|
||||
virBufferAsprintf(&childrenBuf, " priority='%d'", sp->priority);
|
||||
virBufferAddLit(&childrenBuf, "/>\n");
|
||||
}
|
||||
if (virDomainFormatVcpuSchedDef(def, &childrenBuf) < 0)
|
||||
goto cleanup;
|
||||
|
||||
for (i = 0; i < def->cputune.niothreadsched; i++) {
|
||||
virDomainThreadSchedParamPtr sp = &def->cputune.iothreadsched[i];
|
||||
|
@ -2112,8 +2112,6 @@ struct _virDomainCputune {
|
||||
long long emulator_quota;
|
||||
virBitmapPtr emulatorpin;
|
||||
|
||||
size_t nvcpusched;
|
||||
virDomainThreadSchedParamPtr vcpusched;
|
||||
size_t niothreadsched;
|
||||
virDomainThreadSchedParamPtr iothreadsched;
|
||||
};
|
||||
@ -2125,6 +2123,9 @@ typedef virDomainVcpuInfo *virDomainVcpuInfoPtr;
|
||||
struct _virDomainVcpuInfo {
|
||||
bool online;
|
||||
virBitmapPtr cpumask;
|
||||
|
||||
/* note: the sched.ids bitmap is unused so it doesn't have to be cleared */
|
||||
virDomainThreadSchedParam sched;
|
||||
};
|
||||
|
||||
typedef struct _virDomainBlkiotune virDomainBlkiotune;
|
||||
|
@ -4804,9 +4804,9 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver,
|
||||
}
|
||||
}
|
||||
|
||||
if (qemuProcessSetSchedParams(vcpu, vcpupid,
|
||||
vm->def->cputune.nvcpusched,
|
||||
vm->def->cputune.vcpusched) < 0)
|
||||
if (vcpuinfo->sched.policy != VIR_PROC_POLICY_NONE &&
|
||||
virProcessSetScheduler(vcpupid, vcpuinfo->sched.policy,
|
||||
vcpuinfo->sched.priority) < 0)
|
||||
goto cleanup;
|
||||
|
||||
ret = 0;
|
||||
|
@ -2317,12 +2317,12 @@ qemuProcessSetSchedulers(virDomainObjPtr vm)
|
||||
for (i = 0; i < virDomainDefGetVcpusMax(vm->def); i++) {
|
||||
virDomainVcpuInfoPtr vcpu = virDomainDefGetVcpu(vm->def, i);
|
||||
|
||||
if (!vcpu->online)
|
||||
if (!vcpu->online ||
|
||||
vcpu->sched.policy == VIR_PROC_POLICY_NONE)
|
||||
continue;
|
||||
|
||||
if (qemuProcessSetSchedParams(i, qemuDomainGetVcpuPid(vm, i),
|
||||
vm->def->cputune.nvcpusched,
|
||||
vm->def->cputune.vcpusched) < 0)
|
||||
if (virProcessSetScheduler(qemuDomainGetVcpuPid(vm, i),
|
||||
vcpu->sched.policy, vcpu->sched.priority) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user