mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2025-01-21 20:15:17 +00:00
conf: move NUMA capabilities into self contained object
The NUMA cells are stored directly in the virCapsHostPtr struct. This moves them into their own struct allowing them to be stored independantly of the rest of the host capabilities. The change is used as an excuse to switch the representation to use a GPtrArray too. Reviewed-by: Michal Privoznik <mprivozn@redhat.com> Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
This commit is contained in:
parent
bc1676fc2f
commit
6cc992bd1a
@ -182,14 +182,19 @@ virCapabilitiesFreeStoragePool(virCapsStoragePoolPtr pool)
|
||||
|
||||
|
||||
void
|
||||
virCapabilitiesFreeNUMAInfo(virCapsPtr caps)
|
||||
virCapabilitiesHostNUMAUnref(virCapsHostNUMAPtr caps)
|
||||
{
|
||||
size_t i;
|
||||
if (g_atomic_int_dec_and_test(&caps->refs)) {
|
||||
g_ptr_array_unref(caps->cells);
|
||||
|
||||
for (i = 0; i < caps->host.nnumaCell; i++)
|
||||
virCapabilitiesFreeHostNUMACell(caps->host.numaCell[i]);
|
||||
VIR_FREE(caps->host.numaCell);
|
||||
caps->host.nnumaCell = 0;
|
||||
VIR_FREE(caps);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
virCapabilitiesHostNUMARef(virCapsHostNUMAPtr caps)
|
||||
{
|
||||
g_atomic_int_inc(&caps->refs);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -234,7 +239,8 @@ virCapsDispose(void *object)
|
||||
VIR_FREE(caps->host.features[i]);
|
||||
VIR_FREE(caps->host.features);
|
||||
|
||||
virCapabilitiesFreeNUMAInfo(caps);
|
||||
if (caps->host.numa)
|
||||
virCapabilitiesHostNUMAUnref(caps->host.numa);
|
||||
|
||||
for (i = 0; i < caps->host.nmigrateTrans; i++)
|
||||
VIR_FREE(caps->host.migrateTrans[i]);
|
||||
@ -320,7 +326,7 @@ virCapabilitiesSetNetPrefix(virCapsPtr caps,
|
||||
|
||||
|
||||
/**
|
||||
* virCapabilitiesAddHostNUMACell:
|
||||
* virCapabilitiesHostNUMAAddCell:
|
||||
* @caps: capabilities to extend
|
||||
* @num: ID number of NUMA cell
|
||||
* @mem: Total size of memory in the NUMA node (in KiB)
|
||||
@ -334,8 +340,8 @@ virCapabilitiesSetNetPrefix(virCapsPtr caps,
|
||||
* Registers a new NUMA cell for a host, passing in a
|
||||
* array of CPU IDs belonging to the cell
|
||||
*/
|
||||
int
|
||||
virCapabilitiesAddHostNUMACell(virCapsPtr caps,
|
||||
void
|
||||
virCapabilitiesHostNUMAAddCell(virCapsHostNUMAPtr caps,
|
||||
int num,
|
||||
unsigned long long mem,
|
||||
int ncpus,
|
||||
@ -345,14 +351,7 @@ virCapabilitiesAddHostNUMACell(virCapsPtr caps,
|
||||
int npageinfo,
|
||||
virCapsHostNUMACellPageInfoPtr pageinfo)
|
||||
{
|
||||
virCapsHostNUMACellPtr cell;
|
||||
|
||||
if (VIR_RESIZE_N(caps->host.numaCell, caps->host.nnumaCell_max,
|
||||
caps->host.nnumaCell, 1) < 0)
|
||||
return -1;
|
||||
|
||||
if (VIR_ALLOC(cell) < 0)
|
||||
return -1;
|
||||
virCapsHostNUMACellPtr cell = g_new0(virCapsHostNUMACell, 1);
|
||||
|
||||
cell->num = num;
|
||||
cell->mem = mem;
|
||||
@ -363,9 +362,7 @@ virCapabilitiesAddHostNUMACell(virCapsPtr caps,
|
||||
cell->npageinfo = npageinfo;
|
||||
cell->pageinfo = pageinfo;
|
||||
|
||||
caps->host.numaCell[caps->host.nnumaCell++] = cell;
|
||||
|
||||
return 0;
|
||||
g_ptr_array_add(caps->cells, cell);
|
||||
}
|
||||
|
||||
|
||||
@ -857,9 +854,8 @@ virCapabilitiesAddStoragePool(virCapsPtr caps,
|
||||
|
||||
|
||||
static int
|
||||
virCapabilitiesFormatNUMATopology(virBufferPtr buf,
|
||||
size_t ncells,
|
||||
virCapsHostNUMACellPtr *cells)
|
||||
virCapabilitiesHostNUMAFormat(virCapsHostNUMAPtr caps,
|
||||
virBufferPtr buf)
|
||||
{
|
||||
size_t i;
|
||||
size_t j;
|
||||
@ -867,48 +863,49 @@ virCapabilitiesFormatNUMATopology(virBufferPtr buf,
|
||||
|
||||
virBufferAddLit(buf, "<topology>\n");
|
||||
virBufferAdjustIndent(buf, 2);
|
||||
virBufferAsprintf(buf, "<cells num='%zu'>\n", ncells);
|
||||
virBufferAsprintf(buf, "<cells num='%d'>\n", caps->cells->len);
|
||||
virBufferAdjustIndent(buf, 2);
|
||||
for (i = 0; i < ncells; i++) {
|
||||
virBufferAsprintf(buf, "<cell id='%d'>\n", cells[i]->num);
|
||||
for (i = 0; i < caps->cells->len; i++) {
|
||||
virCapsHostNUMACellPtr cell = g_ptr_array_index(caps->cells, i);
|
||||
virBufferAsprintf(buf, "<cell id='%d'>\n", cell->num);
|
||||
virBufferAdjustIndent(buf, 2);
|
||||
|
||||
/* Print out the numacell memory total if it is available */
|
||||
if (cells[i]->mem)
|
||||
if (cell->mem)
|
||||
virBufferAsprintf(buf, "<memory unit='KiB'>%llu</memory>\n",
|
||||
cells[i]->mem);
|
||||
cell->mem);
|
||||
|
||||
for (j = 0; j < cells[i]->npageinfo; j++) {
|
||||
for (j = 0; j < cell->npageinfo; j++) {
|
||||
virBufferAsprintf(buf, "<pages unit='KiB' size='%u'>%llu</pages>\n",
|
||||
cells[i]->pageinfo[j].size,
|
||||
cells[i]->pageinfo[j].avail);
|
||||
cell->pageinfo[j].size,
|
||||
cell->pageinfo[j].avail);
|
||||
}
|
||||
|
||||
if (cells[i]->nsiblings) {
|
||||
if (cell->nsiblings) {
|
||||
virBufferAddLit(buf, "<distances>\n");
|
||||
virBufferAdjustIndent(buf, 2);
|
||||
for (j = 0; j < cells[i]->nsiblings; j++) {
|
||||
for (j = 0; j < cell->nsiblings; j++) {
|
||||
virBufferAsprintf(buf, "<sibling id='%d' value='%d'/>\n",
|
||||
cells[i]->siblings[j].node,
|
||||
cells[i]->siblings[j].distance);
|
||||
cell->siblings[j].node,
|
||||
cell->siblings[j].distance);
|
||||
}
|
||||
virBufferAdjustIndent(buf, -2);
|
||||
virBufferAddLit(buf, "</distances>\n");
|
||||
}
|
||||
|
||||
virBufferAsprintf(buf, "<cpus num='%d'>\n", cells[i]->ncpus);
|
||||
virBufferAsprintf(buf, "<cpus num='%d'>\n", cell->ncpus);
|
||||
virBufferAdjustIndent(buf, 2);
|
||||
for (j = 0; j < cells[i]->ncpus; j++) {
|
||||
virBufferAsprintf(buf, "<cpu id='%d'", cells[i]->cpus[j].id);
|
||||
for (j = 0; j < cell->ncpus; j++) {
|
||||
virBufferAsprintf(buf, "<cpu id='%d'", cell->cpus[j].id);
|
||||
|
||||
if (cells[i]->cpus[j].siblings) {
|
||||
if (!(siblings = virBitmapFormat(cells[i]->cpus[j].siblings)))
|
||||
if (cell->cpus[j].siblings) {
|
||||
if (!(siblings = virBitmapFormat(cell->cpus[j].siblings)))
|
||||
return -1;
|
||||
|
||||
virBufferAsprintf(buf,
|
||||
" socket_id='%d' core_id='%d' siblings='%s'",
|
||||
cells[i]->cpus[j].socket_id,
|
||||
cells[i]->cpus[j].core_id,
|
||||
cell->cpus[j].socket_id,
|
||||
cell->cpus[j].core_id,
|
||||
siblings);
|
||||
VIR_FREE(siblings);
|
||||
}
|
||||
@ -1187,9 +1184,8 @@ virCapabilitiesFormatHostXML(virCapsHostPtr host,
|
||||
virBufferAsprintf(buf, "<netprefix>%s</netprefix>\n",
|
||||
host->netprefix);
|
||||
|
||||
if (host->nnumaCell &&
|
||||
virCapabilitiesFormatNUMATopology(buf, host->nnumaCell,
|
||||
host->numaCell) < 0)
|
||||
if (host->numa &&
|
||||
virCapabilitiesHostNUMAFormat(host->numa, buf) < 0)
|
||||
goto error;
|
||||
|
||||
if (virCapabilitiesFormatCaches(buf, &host->cache) < 0)
|
||||
@ -1394,14 +1390,15 @@ virCapabilitiesFormatXML(virCapsPtr caps)
|
||||
|
||||
/* get the maximum ID of cpus in the host */
|
||||
static unsigned int
|
||||
virCapabilitiesGetHostMaxcpu(virCapsPtr caps)
|
||||
virCapabilitiesHostNUMAGetMaxcpu(virCapsHostNUMAPtr caps)
|
||||
{
|
||||
unsigned int maxcpu = 0;
|
||||
size_t node;
|
||||
size_t cpu;
|
||||
|
||||
for (node = 0; node < caps->host.nnumaCell; node++) {
|
||||
virCapsHostNUMACellPtr cell = caps->host.numaCell[node];
|
||||
for (node = 0; node < caps->cells->len; node++) {
|
||||
virCapsHostNUMACellPtr cell =
|
||||
g_ptr_array_index(caps->cells, node);
|
||||
|
||||
for (cpu = 0; cpu < cell->ncpus; cpu++) {
|
||||
if (cell->cpus[cpu].id > maxcpu)
|
||||
@ -1414,7 +1411,7 @@ virCapabilitiesGetHostMaxcpu(virCapsPtr caps)
|
||||
|
||||
/* set cpus of a numa node in the bitmask */
|
||||
static int
|
||||
virCapabilitiesGetCpusForNode(virCapsPtr caps,
|
||||
virCapabilitiesHostNUMAGetCellCpus(virCapsHostNUMAPtr caps,
|
||||
size_t node,
|
||||
virBitmapPtr cpumask)
|
||||
{
|
||||
@ -1422,11 +1419,11 @@ virCapabilitiesGetCpusForNode(virCapsPtr caps,
|
||||
size_t cpu;
|
||||
size_t i;
|
||||
/* The numa node numbers can be non-contiguous. Ex: 0,1,16,17. */
|
||||
for (i = 0; i < caps->host.nnumaCell; i++) {
|
||||
if (caps->host.numaCell[i]->num == node) {
|
||||
cell = caps->host.numaCell[i];
|
||||
for (i = 0; i < caps->cells->len; i++) {
|
||||
cell = g_ptr_array_index(caps->cells, i);
|
||||
if (cell->num == node)
|
||||
break;
|
||||
}
|
||||
cell = NULL;
|
||||
}
|
||||
|
||||
for (cpu = 0; cell && cpu < cell->ncpus; cpu++) {
|
||||
@ -1443,11 +1440,11 @@ virCapabilitiesGetCpusForNode(virCapsPtr caps,
|
||||
}
|
||||
|
||||
virBitmapPtr
|
||||
virCapabilitiesGetCpusForNodemask(virCapsPtr caps,
|
||||
virCapabilitiesHostNUMAGetCpus(virCapsHostNUMAPtr caps,
|
||||
virBitmapPtr nodemask)
|
||||
{
|
||||
virBitmapPtr ret = NULL;
|
||||
unsigned int maxcpu = virCapabilitiesGetHostMaxcpu(caps);
|
||||
unsigned int maxcpu = virCapabilitiesHostNUMAGetMaxcpu(caps);
|
||||
ssize_t node = -1;
|
||||
|
||||
if (!(ret = virBitmapNew(maxcpu + 1)))
|
||||
@ -1455,7 +1452,7 @@ virCapabilitiesGetCpusForNodemask(virCapsPtr caps,
|
||||
|
||||
|
||||
while ((node = virBitmapNextSetBit(nodemask, node)) >= 0) {
|
||||
if (virCapabilitiesGetCpusForNode(caps, node, ret) < 0) {
|
||||
if (virCapabilitiesHostNUMAGetCellCpus(caps, node, ret) < 0) {
|
||||
virBitmapFree(ret);
|
||||
return NULL;
|
||||
}
|
||||
@ -1591,7 +1588,7 @@ virCapabilitiesGetNUMAPagesInfo(int node,
|
||||
|
||||
|
||||
static int
|
||||
virCapabilitiesInitNUMAFake(virCapsPtr caps)
|
||||
virCapabilitiesHostNUMAInitFake(virCapsHostNUMAPtr caps)
|
||||
{
|
||||
virNodeInfo nodeinfo;
|
||||
virCapsHostNUMACellCPUPtr cpus;
|
||||
@ -1631,7 +1628,10 @@ virCapabilitiesInitNUMAFake(virCapsPtr caps)
|
||||
}
|
||||
}
|
||||
|
||||
if (virCapabilitiesAddHostNUMACell(caps, 0,
|
||||
caps = g_new0(virCapsHostNUMA, 1);
|
||||
caps->cells = g_ptr_array_new_with_free_func(
|
||||
(GDestroyNotify)virCapabilitiesFreeHostNUMACell);
|
||||
virCapabilitiesHostNUMAAddCell(caps, 0,
|
||||
nodeinfo.memory,
|
||||
#ifdef __linux__
|
||||
onlinecpus, cpus,
|
||||
@ -1639,8 +1639,7 @@ virCapabilitiesInitNUMAFake(virCapsPtr caps)
|
||||
ncpus, cpus,
|
||||
#endif
|
||||
0, NULL,
|
||||
0, NULL) < 0)
|
||||
goto error;
|
||||
0, NULL);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1651,8 +1650,9 @@ virCapabilitiesInitNUMAFake(virCapsPtr caps)
|
||||
return -1;
|
||||
}
|
||||
|
||||
int
|
||||
virCapabilitiesInitNUMA(virCapsPtr caps)
|
||||
|
||||
static int
|
||||
virCapabilitiesHostNUMAInitReal(virCapsHostNUMAPtr caps)
|
||||
{
|
||||
int n;
|
||||
unsigned long long memory;
|
||||
@ -1665,12 +1665,8 @@ virCapabilitiesInitNUMA(virCapsPtr caps)
|
||||
int ret = -1;
|
||||
int ncpus = 0;
|
||||
int cpu;
|
||||
bool topology_failed = false;
|
||||
int max_node;
|
||||
|
||||
if (!virNumaIsAvailable())
|
||||
return virCapabilitiesInitNUMAFake(caps);
|
||||
|
||||
if ((max_node = virNumaGetMaxNode()) < 0)
|
||||
goto cleanup;
|
||||
|
||||
@ -1690,10 +1686,8 @@ virCapabilitiesInitNUMA(virCapsPtr caps)
|
||||
|
||||
for (i = 0; i < virBitmapSize(cpumap); i++) {
|
||||
if (virBitmapIsBitSet(cpumap, i)) {
|
||||
if (virCapabilitiesFillCPUInfo(i, cpus + cpu++) < 0) {
|
||||
topology_failed = true;
|
||||
virResetLastError();
|
||||
}
|
||||
if (virCapabilitiesFillCPUInfo(i, cpus + cpu++) < 0)
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1707,11 +1701,10 @@ virCapabilitiesInitNUMA(virCapsPtr caps)
|
||||
virNumaGetNodeMemory(n, &memory, NULL);
|
||||
memory >>= 10;
|
||||
|
||||
if (virCapabilitiesAddHostNUMACell(caps, n, memory,
|
||||
virCapabilitiesHostNUMAAddCell(caps, n, memory,
|
||||
ncpus, cpus,
|
||||
nsiblings, siblings,
|
||||
npageinfo, pageinfo) < 0)
|
||||
goto cleanup;
|
||||
npageinfo, pageinfo);
|
||||
|
||||
cpus = NULL;
|
||||
siblings = NULL;
|
||||
@ -1723,9 +1716,6 @@ virCapabilitiesInitNUMA(virCapsPtr caps)
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if ((topology_failed || ret < 0) && cpus)
|
||||
virCapabilitiesClearHostNUMACellCPUTopology(cpus, ncpus);
|
||||
|
||||
virBitmapFree(cpumap);
|
||||
VIR_FREE(cpus);
|
||||
VIR_FREE(siblings);
|
||||
@ -1733,6 +1723,44 @@ virCapabilitiesInitNUMA(virCapsPtr caps)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
virCapsHostNUMAPtr
|
||||
virCapabilitiesHostNUMANew(void)
|
||||
{
|
||||
virCapsHostNUMAPtr caps = NULL;
|
||||
|
||||
caps = g_new0(virCapsHostNUMA, 1);
|
||||
caps->refs = 1;
|
||||
caps->cells = g_ptr_array_new_with_free_func(
|
||||
(GDestroyNotify)virCapabilitiesFreeHostNUMACell);
|
||||
|
||||
return caps;
|
||||
}
|
||||
|
||||
|
||||
virCapsHostNUMAPtr
|
||||
virCapabilitiesHostNUMANewHost(void)
|
||||
{
|
||||
virCapsHostNUMAPtr caps = virCapabilitiesHostNUMANew();
|
||||
|
||||
if (virNumaIsAvailable()) {
|
||||
if (virCapabilitiesHostNUMAInitReal(caps) == 0)
|
||||
return caps;
|
||||
|
||||
virCapabilitiesHostNUMAUnref(caps);
|
||||
caps = virCapabilitiesHostNUMANew();
|
||||
VIR_WARN("Failed to query host NUMA topology, faking single NUMA node");
|
||||
}
|
||||
|
||||
if (virCapabilitiesHostNUMAInitFake(caps) < 0) {
|
||||
virCapabilitiesHostNUMAUnref(caps);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return caps;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
virCapabilitiesInitPages(virCapsPtr caps)
|
||||
{
|
||||
|
@ -113,6 +113,11 @@ struct _virCapsHostNUMACell {
|
||||
virCapsHostNUMACellPageInfoPtr pageinfo;
|
||||
};
|
||||
|
||||
struct _virCapsHostNUMA {
|
||||
gint refs;
|
||||
GPtrArray *cells;
|
||||
};
|
||||
|
||||
struct _virCapsHostSecModelLabel {
|
||||
char *type;
|
||||
char *label;
|
||||
@ -168,9 +173,8 @@ struct _virCapsHost {
|
||||
size_t nmigrateTrans;
|
||||
size_t nmigrateTrans_max;
|
||||
char **migrateTrans;
|
||||
size_t nnumaCell;
|
||||
size_t nnumaCell_max;
|
||||
virCapsHostNUMACellPtr *numaCell;
|
||||
|
||||
virCapsHostNUMAPtr numa;
|
||||
|
||||
virResctrlInfoPtr resctrl;
|
||||
|
||||
@ -225,7 +229,11 @@ virCapabilitiesNew(virArch hostarch,
|
||||
bool liveMigrate);
|
||||
|
||||
void
|
||||
virCapabilitiesFreeNUMAInfo(virCapsPtr caps);
|
||||
virCapabilitiesHostNUMAUnref(virCapsHostNUMAPtr caps);
|
||||
void
|
||||
virCapabilitiesHostNUMARef(virCapsHostNUMAPtr caps);
|
||||
|
||||
G_DEFINE_AUTOPTR_CLEANUP_FUNC(virCapsHostNUMA, virCapabilitiesHostNUMAUnref);
|
||||
|
||||
int
|
||||
virCapabilitiesAddHostFeature(virCapsPtr caps,
|
||||
@ -239,8 +247,8 @@ int
|
||||
virCapabilitiesSetNetPrefix(virCapsPtr caps,
|
||||
const char *prefix);
|
||||
|
||||
int
|
||||
virCapabilitiesAddHostNUMACell(virCapsPtr caps,
|
||||
void
|
||||
virCapabilitiesHostNUMAAddCell(virCapsHostNUMAPtr caps,
|
||||
int num,
|
||||
unsigned long long mem,
|
||||
int ncpus,
|
||||
@ -323,14 +331,15 @@ virCapabilitiesClearHostNUMACellCPUTopology(virCapsHostNUMACellCPUPtr cpu,
|
||||
char *
|
||||
virCapabilitiesFormatXML(virCapsPtr caps);
|
||||
|
||||
virBitmapPtr virCapabilitiesGetCpusForNodemask(virCapsPtr caps,
|
||||
virBitmapPtr virCapabilitiesHostNUMAGetCpus(virCapsHostNUMAPtr caps,
|
||||
virBitmapPtr nodemask);
|
||||
|
||||
int virCapabilitiesGetNodeInfo(virNodeInfoPtr nodeinfo);
|
||||
|
||||
int virCapabilitiesInitPages(virCapsPtr caps);
|
||||
|
||||
int virCapabilitiesInitNUMA(virCapsPtr caps);
|
||||
virCapsHostNUMAPtr virCapabilitiesHostNUMANew(void);
|
||||
virCapsHostNUMAPtr virCapabilitiesHostNUMANewHost(void);
|
||||
|
||||
bool virCapsHostCacheBankEquals(virCapsHostCacheBankPtr a,
|
||||
virCapsHostCacheBankPtr b);
|
||||
|
@ -66,6 +66,9 @@ typedef virCapsHostMemBW *virCapsHostMemBWPtr;
|
||||
typedef struct _virCapsHostMemBWNode virCapsHostMemBWNode;
|
||||
typedef virCapsHostMemBWNode *virCapsHostMemBWNodePtr;
|
||||
|
||||
typedef struct _virCapsHostNUMA virCapsHostNUMA;
|
||||
typedef virCapsHostNUMA *virCapsHostNUMAPtr;
|
||||
|
||||
typedef struct _virCapsHostNUMACell virCapsHostNUMACell;
|
||||
typedef virCapsHostNUMACell *virCapsHostNUMACellPtr;
|
||||
|
||||
|
@ -49,7 +49,6 @@ virCapabilitiesAddGuestFeature;
|
||||
virCapabilitiesAddGuestFeatureWithToggle;
|
||||
virCapabilitiesAddHostFeature;
|
||||
virCapabilitiesAddHostMigrateTransport;
|
||||
virCapabilitiesAddHostNUMACell;
|
||||
virCapabilitiesAddStoragePool;
|
||||
virCapabilitiesAllocMachines;
|
||||
virCapabilitiesClearHostNUMACellCPUTopology;
|
||||
@ -58,13 +57,16 @@ virCapabilitiesDomainSupported;
|
||||
virCapabilitiesFormatXML;
|
||||
virCapabilitiesFreeGuest;
|
||||
virCapabilitiesFreeMachines;
|
||||
virCapabilitiesFreeNUMAInfo;
|
||||
virCapabilitiesGetCpusForNodemask;
|
||||
virCapabilitiesGetNodeInfo;
|
||||
virCapabilitiesHostInitIOMMU;
|
||||
virCapabilitiesHostNUMAAddCell;
|
||||
virCapabilitiesHostNUMAGetCpus;
|
||||
virCapabilitiesHostNUMANew;
|
||||
virCapabilitiesHostNUMANewHost;
|
||||
virCapabilitiesHostNUMARef;
|
||||
virCapabilitiesHostNUMAUnref;
|
||||
virCapabilitiesHostSecModelAddBaseLabel;
|
||||
virCapabilitiesInitCaches;
|
||||
virCapabilitiesInitNUMA;
|
||||
virCapabilitiesInitPages;
|
||||
virCapabilitiesNew;
|
||||
virCapabilitiesSetHostCPU;
|
||||
|
@ -320,6 +320,7 @@ libxlCapsInitNuma(libxl_ctx *ctx, virCapsPtr caps)
|
||||
}
|
||||
}
|
||||
|
||||
caps->host.numa = virCapabilitiesHostNUMANew();
|
||||
for (i = 0; i < nr_nodes; i++) {
|
||||
if (numa_info[i].size == LIBXL_NUMAINFO_INVALID_ENTRY)
|
||||
continue;
|
||||
@ -337,15 +338,11 @@ libxlCapsInitNuma(libxl_ctx *ctx, virCapsPtr caps)
|
||||
}
|
||||
}
|
||||
|
||||
if (virCapabilitiesAddHostNUMACell(caps, i,
|
||||
virCapabilitiesHostNUMAAddCell(caps->host.numa, i,
|
||||
numa_info[i].size / 1024,
|
||||
nr_cpus_node[i], cpus[i],
|
||||
nr_siblings, siblings,
|
||||
0, NULL) < 0) {
|
||||
virCapabilitiesClearHostNUMACellCPUTopology(cpus[i],
|
||||
nr_cpus_node[i]);
|
||||
goto cleanup;
|
||||
}
|
||||
0, NULL);
|
||||
|
||||
/* This is safe, as the CPU list is now stored in the NUMA cell */
|
||||
cpus[i] = NULL;
|
||||
@ -357,7 +354,10 @@ libxlCapsInitNuma(libxl_ctx *ctx, virCapsPtr caps)
|
||||
if (ret != 0) {
|
||||
for (i = 0; cpus && i < nr_nodes; i++)
|
||||
VIR_FREE(cpus[i]);
|
||||
virCapabilitiesFreeNUMAInfo(caps);
|
||||
if (caps->host.numa) {
|
||||
virCapabilitiesHostNUMAUnref(caps->host.numa);
|
||||
caps->host.numa = NULL;
|
||||
}
|
||||
VIR_FREE(siblings);
|
||||
}
|
||||
|
||||
|
@ -70,10 +70,8 @@ virCapsPtr virLXCDriverCapsInit(virLXCDriverPtr driver)
|
||||
* unexpected failures. We don't want to break the lxc
|
||||
* driver in this scenario, so log errors & carry on
|
||||
*/
|
||||
if (virCapabilitiesInitNUMA(caps) < 0) {
|
||||
virCapabilitiesFreeNUMAInfo(caps);
|
||||
VIR_WARN("Failed to query host NUMA topology, disabling NUMA capabilities");
|
||||
}
|
||||
if (!(caps->host.numa = virCapabilitiesHostNUMANewHost()))
|
||||
goto error;
|
||||
|
||||
if (virCapabilitiesInitCaches(caps) < 0)
|
||||
VIR_WARN("Failed to get host CPU cache info");
|
||||
|
@ -155,7 +155,7 @@ virCapsPtr openvzCapsInit(void)
|
||||
false, false)) == NULL)
|
||||
goto no_memory;
|
||||
|
||||
if (virCapabilitiesInitNUMA(caps) < 0)
|
||||
if (!(caps->host.numa = virCapabilitiesHostNUMANewHost()))
|
||||
goto no_memory;
|
||||
|
||||
if (virCapabilitiesInitCaches(caps) < 0)
|
||||
|
@ -322,11 +322,8 @@ phypCapsInit(void)
|
||||
* unexpected failures. We don't want to break the QEMU
|
||||
* driver in this scenario, so log errors & carry on
|
||||
*/
|
||||
if (virCapabilitiesInitNUMA(caps) < 0) {
|
||||
virCapabilitiesFreeNUMAInfo(caps);
|
||||
VIR_WARN
|
||||
("Failed to query host NUMA topology, disabling NUMA capabilities");
|
||||
}
|
||||
if (!(caps->host.numa = virCapabilitiesHostNUMANewHost()))
|
||||
goto no_memory;
|
||||
|
||||
if (virCapabilitiesInitCaches(caps) < 0)
|
||||
VIR_WARN("Failed to get host CPU cache info");
|
||||
|
@ -1036,10 +1036,8 @@ virQEMUCapsInit(virFileCachePtr cache)
|
||||
* unexpected failures. We don't want to break the QEMU
|
||||
* driver in this scenario, so log errors & carry on
|
||||
*/
|
||||
if (virCapabilitiesInitNUMA(caps) < 0) {
|
||||
virCapabilitiesFreeNUMAInfo(caps);
|
||||
VIR_WARN("Failed to query host NUMA topology, disabling NUMA capabilities");
|
||||
}
|
||||
if (!(caps->host.numa = virCapabilitiesHostNUMANewHost()))
|
||||
goto error;
|
||||
|
||||
if (virCapabilitiesInitCaches(caps) < 0)
|
||||
VIR_WARN("Failed to get host CPU cache info");
|
||||
|
@ -2941,8 +2941,11 @@ qemuDomainObjPrivateXMLParseAutomaticPlacement(xmlXPathContextPtr ctxt,
|
||||
/* Figure out how big the nodeset bitmap needs to be.
|
||||
* This is necessary because NUMA node IDs are not guaranteed to
|
||||
* start from 0 or be densely allocated */
|
||||
for (i = 0; i < caps->host.nnumaCell; i++)
|
||||
nodesetSize = MAX(nodesetSize, caps->host.numaCell[i]->num + 1);
|
||||
for (i = 0; i < caps->host.numa->cells->len; i++) {
|
||||
virCapsHostNUMACellPtr cell =
|
||||
g_ptr_array_index(caps->host.numa->cells, i);
|
||||
nodesetSize = MAX(nodesetSize, cell->num + 1);
|
||||
}
|
||||
|
||||
if (nodeset &&
|
||||
virBitmapParse(nodeset, &priv->autoNodeset, nodesetSize) < 0)
|
||||
@ -2954,7 +2957,7 @@ qemuDomainObjPrivateXMLParseAutomaticPlacement(xmlXPathContextPtr ctxt,
|
||||
} else {
|
||||
/* autoNodeset is present in this case, since otherwise we wouldn't
|
||||
* reach this code */
|
||||
if (!(priv->autoCpuset = virCapabilitiesGetCpusForNodemask(caps,
|
||||
if (!(priv->autoCpuset = virCapabilitiesHostNUMAGetCpus(caps->host.numa,
|
||||
priv->autoNodeset)))
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -4838,16 +4838,12 @@ qemuMigrationDstPersist(virQEMUDriverPtr driver,
|
||||
{
|
||||
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
virCapsPtr caps = NULL;
|
||||
virDomainDefPtr vmdef;
|
||||
virDomainDefPtr oldDef = NULL;
|
||||
unsigned int oldPersist = vm->persistent;
|
||||
virObjectEventPtr event;
|
||||
int ret = -1;
|
||||
|
||||
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
|
||||
goto cleanup;
|
||||
|
||||
vm->persistent = 1;
|
||||
oldDef = vm->newDef;
|
||||
vm->newDef = qemuMigrationCookieGetPersistent(mig);
|
||||
@ -4871,7 +4867,6 @@ qemuMigrationDstPersist(virQEMUDriverPtr driver,
|
||||
|
||||
cleanup:
|
||||
virDomainDefFree(oldDef);
|
||||
virObjectUnref(caps);
|
||||
virObjectUnref(cfg);
|
||||
return ret;
|
||||
|
||||
|
@ -6161,7 +6161,7 @@ qemuProcessPrepareDomainNUMAPlacement(virDomainObjPtr vm,
|
||||
/* numad may return a nodeset that only contains cpus but cgroups don't play
|
||||
* well with that. Set the autoCpuset from all cpus from that nodeset, but
|
||||
* assign autoNodeset only with nodes containing memory. */
|
||||
if (!(priv->autoCpuset = virCapabilitiesGetCpusForNodemask(caps, numadNodeset)))
|
||||
if (!(priv->autoCpuset = virCapabilitiesHostNUMAGetCpus(caps->host.numa, numadNodeset)))
|
||||
goto cleanup;
|
||||
|
||||
virBitmapIntersect(numadNodeset, hostMemoryNodeset);
|
||||
|
@ -302,6 +302,7 @@ testBuildCapabilities(virConnectPtr conn)
|
||||
caps->host.pagesSize[caps->host.nPagesSize++] = 2048;
|
||||
caps->host.pagesSize[caps->host.nPagesSize++] = 1024 * 1024;
|
||||
|
||||
caps->host.numa = virCapabilitiesHostNUMANew();
|
||||
for (i = 0; i < privconn->numCells; i++) {
|
||||
virCapsHostNUMACellCPUPtr cpu_cells;
|
||||
virCapsHostNUMACellPageInfoPtr pages;
|
||||
@ -326,10 +327,10 @@ testBuildCapabilities(virConnectPtr conn)
|
||||
|
||||
pages[0].avail = privconn->cells[i].mem / pages[0].size;
|
||||
|
||||
if (virCapabilitiesAddHostNUMACell(caps, i, privconn->cells[i].mem,
|
||||
virCapabilitiesHostNUMAAddCell(caps->host.numa,
|
||||
i, privconn->cells[i].mem,
|
||||
privconn->cells[i].numCpus,
|
||||
cpu_cells, 0, NULL, nPages, pages) < 0)
|
||||
goto error;
|
||||
cpu_cells, 0, NULL, nPages, pages);
|
||||
}
|
||||
|
||||
for (i = 0; i < G_N_ELEMENTS(guest_types); i++) {
|
||||
|
@ -76,7 +76,7 @@ vboxCapsInit(void)
|
||||
false, false)) == NULL)
|
||||
goto no_memory;
|
||||
|
||||
if (virCapabilitiesInitNUMA(caps) < 0)
|
||||
if (!(caps->host.numa = virCapabilitiesHostNUMANewHost()))
|
||||
goto no_memory;
|
||||
|
||||
if (virCapabilitiesInitCaches(caps) < 0)
|
||||
|
@ -71,7 +71,7 @@ vmwareCapsInit(void)
|
||||
false, false)) == NULL)
|
||||
goto error;
|
||||
|
||||
if (virCapabilitiesInitNUMA(caps) < 0)
|
||||
if (!(caps->host.numa = virCapabilitiesHostNUMANewHost()))
|
||||
goto error;
|
||||
|
||||
if (virCapabilitiesInitCaches(caps) < 0)
|
||||
|
@ -116,7 +116,7 @@ vzBuildCapabilities(void)
|
||||
false, false)) == NULL)
|
||||
return NULL;
|
||||
|
||||
if (virCapabilitiesInitNUMA(caps) < 0)
|
||||
if (!(caps->host.numa = virCapabilitiesHostNUMANewHost()))
|
||||
goto error;
|
||||
|
||||
if (virCapabilitiesInitCaches(caps) < 0)
|
||||
|
@ -1053,10 +1053,10 @@ virCapsPtr virTestGenericCapsInit(void)
|
||||
* Build NUMA topology with cell id starting from (0 + seq)
|
||||
* for testing
|
||||
*/
|
||||
int
|
||||
virTestCapsBuildNUMATopology(virCapsPtr caps,
|
||||
int seq)
|
||||
virCapsHostNUMAPtr
|
||||
virTestCapsBuildNUMATopology(int seq)
|
||||
{
|
||||
virCapsHostNUMAPtr caps = virCapabilitiesHostNUMANew();
|
||||
virCapsHostNUMACellCPUPtr cell_cpus = NULL;
|
||||
int core_id, cell_id;
|
||||
int id;
|
||||
@ -1077,22 +1077,21 @@ virTestCapsBuildNUMATopology(virCapsPtr caps,
|
||||
}
|
||||
id++;
|
||||
|
||||
if (virCapabilitiesAddHostNUMACell(caps, cell_id + seq,
|
||||
virCapabilitiesHostNUMAAddCell(caps, cell_id + seq,
|
||||
MAX_MEM_IN_CELL,
|
||||
MAX_CPUS_IN_CELL, cell_cpus,
|
||||
VIR_ARCH_NONE, NULL,
|
||||
VIR_ARCH_NONE, NULL) < 0)
|
||||
goto error;
|
||||
VIR_ARCH_NONE, NULL);
|
||||
|
||||
cell_cpus = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return caps;
|
||||
|
||||
error:
|
||||
virCapabilitiesClearHostNUMACellCPUTopology(cell_cpus, MAX_CPUS_IN_CELL);
|
||||
virCapabilitiesHostNUMAUnref(caps);
|
||||
VIR_FREE(cell_cpus);
|
||||
return -1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static virDomainDefParserConfig virTestGenericDomainDefParserConfig = {
|
||||
|
@ -157,8 +157,7 @@ int virTestMain(int argc,
|
||||
#define VIR_TEST_MOCK(mock) (abs_builddir "/.libs/lib" mock "mock" MOCK_EXT)
|
||||
|
||||
virCapsPtr virTestGenericCapsInit(void);
|
||||
int virTestCapsBuildNUMATopology(virCapsPtr caps,
|
||||
int seq);
|
||||
virCapsHostNUMAPtr virTestCapsBuildNUMATopology(int seq);
|
||||
virDomainXMLOptionPtr virTestGenericDomainXMLConfInit(void);
|
||||
|
||||
typedef enum {
|
||||
|
@ -205,7 +205,7 @@ virCapsPtr testQemuCapsInit(void)
|
||||
* Build a NUMA topology with cell_id (NUMA node id
|
||||
* being 3(0 + 3),4(1 + 3), 5 and 6
|
||||
*/
|
||||
if (virTestCapsBuildNUMATopology(caps, 3) < 0)
|
||||
if (!(caps->host.numa = virTestCapsBuildNUMATopology(3)))
|
||||
goto cleanup;
|
||||
|
||||
for (i = 0; i < VIR_ARCH_LAST; i++) {
|
||||
|
@ -58,8 +58,10 @@ test_virCapabilities(const void *opaque)
|
||||
if (!caps)
|
||||
goto cleanup;
|
||||
|
||||
if (virCapabilitiesInitNUMA(caps) < 0 ||
|
||||
virCapabilitiesInitCaches(caps) < 0)
|
||||
if (!(caps->host.numa = virCapabilitiesHostNUMANewHost()))
|
||||
goto cleanup;
|
||||
|
||||
if (virCapabilitiesInitCaches(caps) < 0)
|
||||
goto cleanup;
|
||||
|
||||
virFileWrapperClearPrefixes();
|
||||
|
@ -35,27 +35,22 @@ test_virCapabilitiesGetCpusForNodemask(const void *data G_GNUC_UNUSED)
|
||||
const char *nodestr = "3,4,5,6";
|
||||
virBitmapPtr nodemask = NULL;
|
||||
virBitmapPtr cpumap = NULL;
|
||||
virCapsPtr caps = NULL;
|
||||
g_autoptr(virCapsHostNUMA) caps = NULL;
|
||||
int mask_size = 8;
|
||||
int ret = -1;
|
||||
|
||||
|
||||
if (!(caps = virCapabilitiesNew(VIR_ARCH_X86_64, false, false)))
|
||||
goto error;
|
||||
|
||||
if (virTestCapsBuildNUMATopology(caps, 3) < 0)
|
||||
if (!(caps = virTestCapsBuildNUMATopology(3)))
|
||||
goto error;
|
||||
|
||||
if (virBitmapParse(nodestr, &nodemask, mask_size) < 0)
|
||||
goto error;
|
||||
|
||||
if (!(cpumap = virCapabilitiesGetCpusForNodemask(caps, nodemask)))
|
||||
if (!(cpumap = virCapabilitiesHostNUMAGetCpus(caps, nodemask)))
|
||||
goto error;
|
||||
|
||||
ret = 0;
|
||||
|
||||
error:
|
||||
virObjectUnref(caps);
|
||||
virBitmapFree(nodemask);
|
||||
virBitmapFree(cpumap);
|
||||
return ret;
|
||||
|
Loading…
x
Reference in New Issue
Block a user