Introduce virNodeAllocPages

A long time ago in a galaxy far, far away it has been decided
that libvirt will manage not only domains but host as well. And
with my latest work on qemu driver supporting huge pages, we miss
the cherry on top: an API to allocate huge pages on the run.
Currently users are forced to log into the host and adjust the
huge pages pool themselves.  However, with this API the problem
is gone - they can both size up and size down the pool.

Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
This commit is contained in:
Michal Privoznik 2014-09-16 18:17:22 +02:00
parent 4aa8a68faa
commit fa5c5580d6
8 changed files with 223 additions and 1 deletions

View File

@ -6303,6 +6303,43 @@ remoteDispatchConnectGetAllDomainStats(virNetServerPtr server ATTRIBUTE_UNUSED,
}
static int
remoteDispatchNodeAllocPages(virNetServerPtr server ATTRIBUTE_UNUSED,
virNetServerClientPtr client,
virNetMessagePtr msg ATTRIBUTE_UNUSED,
virNetMessageErrorPtr rerr,
remote_node_alloc_pages_args *args,
remote_node_alloc_pages_ret *ret)
{
int rv = -1;
int len;
struct daemonClientPrivate *priv =
virNetServerClientGetPrivateData(client);
if (!priv->conn) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open"));
goto cleanup;
}
if ((len = virNodeAllocPages(priv->conn,
args->pageSizes.pageSizes_len,
args->pageSizes.pageSizes_val,
(unsigned long long *) args->pageCounts.pageCounts_val,
args->startCell,
args->cellCount,
args->flags)) < 0)
goto cleanup;
ret->ret = len;
rv = 0;
cleanup:
if (rv < 0)
virNetMessageSaveError(rerr);
return rv;
}
/*----- Helpers. -----*/
/* get_nonnull_domain and get_nonnull_network turn an on-wire

View File

@ -5616,6 +5616,22 @@ int virNodeGetFreePages(virConnectPtr conn,
unsigned int cellcount,
unsigned long long *counts,
unsigned int flags);
typedef enum {
VIR_NODE_ALLOC_PAGES_ADD = 0, /* Add @pageCounts to the pages pool. This
can be used only to size up the pool. */
VIR_NODE_ALLOC_PAGES_SET = (1 << 0), /* Don't add @pageCounts, instead set
passed number of pages. This can be
used to free allocated pages. */
} virNodeAllocPagesFlags;
int virNodeAllocPages(virConnectPtr conn,
unsigned int npages,
unsigned int *pageSizes,
unsigned long long *pageCounts,
int startCell,
unsigned int cellCount,
unsigned int flags);
/**
* virSchedParameterType:
*

View File

@ -1212,6 +1212,15 @@ typedef int
virDomainStatsRecordPtr **retStats,
unsigned int flags);
typedef int
(*virDrvNodeAllocPages)(virConnectPtr conn,
unsigned int npages,
unsigned int *pageSizes,
unsigned long long *pageCounts,
int startCell,
unsigned int cellCount,
unsigned int flags);
typedef struct _virDriver virDriver;
typedef virDriver *virDriverPtr;
@ -1435,6 +1444,7 @@ struct _virDriver {
virDrvNodeGetFreePages nodeGetFreePages;
virDrvConnectGetDomainCapabilities connectGetDomainCapabilities;
virDrvConnectGetAllDomainStats connectGetAllDomainStats;
virDrvNodeAllocPages nodeAllocPages;
};

View File

@ -21841,3 +21841,75 @@ virDomainStatsRecordListFree(virDomainStatsRecordPtr *stats)
VIR_FREE(stats);
}
/**
* virNodeAllocPages:
* @conn: pointer to the hypervisor connection
* @npages: number of items in the @pageSizes and
* @pageCounts arrays
* @pageSizes: which huge page sizes to allocate
* @pageCounts: how many pages should be allocated
* @startCell: index of first cell to allocate pages on
* @cellCount: number of consecutive cells to allocate pages on
* @flags: extra flags; binary-OR of virNodeAllocPagesFlags
*
* Sometimes, when trying to start a new domain, it may be
* necessary to reserve some huge pages in the system pool which
* can be then allocated by the domain. This API serves that
* purpose. On its input, @pageSizes and @pageCounts are arrays
* of the same cardinality of @npages. The @pageSizes contains
* page sizes which are to be allocated in the system (the size
* unit is kibibytes), and @pageCounts then contains the number
* of pages to reserve. If @flags is 0
* (VIR_NODE_ALLOC_PAGES_ADD), each pool corresponding to
* @pageSizes grows by the number of pages specified in the
* corresponding @pageCounts. If @flags contains
* VIR_NODE_ALLOC_PAGES_SET, each pool mentioned is resized to
* the given number of pages. The pages pool can be allocated
* over several NUMA nodes at once, just point at @startCell and
* tell how many subsequent NUMA nodes should be taken in. As a
* special case, if @startCell is equal to negative one, then
* kernel is instructed to allocate the pages over all NUMA nodes
* proportionally.
*
* Returns: the number of nodes successfully adjusted or -1 in
* case of an error.
*/
int
virNodeAllocPages(virConnectPtr conn,
unsigned int npages,
unsigned int *pageSizes,
unsigned long long *pageCounts,
int startCell,
unsigned int cellCount,
unsigned int flags)
{
VIR_DEBUG("conn=%p npages=%u pageSizes=%p pageCounts=%p "
"startCell=%d cellCount=%u flagx=%x",
conn, npages, pageSizes, pageCounts, startCell,
cellCount, flags);
virResetLastError();
virCheckConnectReturn(conn, -1);
virCheckNonZeroArgGoto(npages, error);
virCheckNonNullArgGoto(pageSizes, error);
virCheckNonNullArgGoto(pageCounts, error);
virCheckNonZeroArgGoto(cellCount, error);
if (conn->driver->nodeAllocPages) {
int ret;
ret = conn->driver->nodeAllocPages(conn, npages, pageSizes,
pageCounts, startCell,
cellCount, flags);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(conn);
return -1;
}

View File

@ -679,4 +679,9 @@ LIBVIRT_1.2.8 {
virDomainStatsRecordListFree;
} LIBVIRT_1.2.7;
LIBVIRT_1.2.9 {
global:
virNodeAllocPages;
} LIBVIRT_1.2.8;
# .... define new API here using predicted next version number ....

View File

@ -7853,6 +7853,52 @@ remoteConnectGetAllDomainStats(virConnectPtr conn,
return rv;
}
static int
remoteNodeAllocPages(virConnectPtr conn,
unsigned int npages,
unsigned int *pageSizes,
unsigned long long *pageCounts,
int startCell,
unsigned int cellCount,
unsigned int flags)
{
int rv = -1;
remote_node_alloc_pages_args args;
remote_node_alloc_pages_ret ret;
struct private_data *priv = conn->privateData;
remoteDriverLock(priv);
if (npages > REMOTE_NODE_MAX_CELLS) {
virReportError(VIR_ERR_RPC,
_("too many NUMA cells: %d > %d"),
npages, REMOTE_NODE_MAX_CELLS);
goto done;
}
args.pageSizes.pageSizes_val = (u_int *) pageSizes;
args.pageSizes.pageSizes_len = npages;
args.pageCounts.pageCounts_val = (uint64_t *) pageCounts;
args.pageCounts.pageCounts_len = npages;
args.startCell = startCell;
args.cellCount = cellCount;
args.flags = flags;
memset(&ret, 0, sizeof(ret));
if (call(conn, priv, 0, REMOTE_PROC_NODE_ALLOC_PAGES,
(xdrproc_t) xdr_remote_node_alloc_pages_args, (char *) &args,
(xdrproc_t) xdr_remote_node_alloc_pages_ret, (char *) &ret) == -1)
goto done;
rv = ret.ret;
done:
remoteDriverUnlock(priv);
return rv;
}
/* get_nonnull_domain and get_nonnull_network turn an on-wire
* (name, uuid) pair into virDomainPtr or virNetworkPtr object.
* These can return NULL if underlying memory allocations fail,
@ -8194,6 +8240,7 @@ static virDriver remote_driver = {
.nodeGetFreePages = remoteNodeGetFreePages, /* 1.2.6 */
.connectGetDomainCapabilities = remoteConnectGetDomainCapabilities, /* 1.2.7 */
.connectGetAllDomainStats = remoteConnectGetAllDomainStats, /* 1.2.8 */
.nodeAllocPages = remoteNodeAllocPages, /* 1.2.9 */
};
static virNetworkDriver network_driver = {

View File

@ -3061,6 +3061,18 @@ struct remote_node_get_free_pages_ret {
unsigned hyper counts<REMOTE_NODE_MAX_CELLS>;
};
struct remote_node_alloc_pages_args {
unsigned int pageSizes<REMOTE_NODE_MAX_CELLS>;
unsigned hyper pageCounts<REMOTE_NODE_MAX_CELLS>;
int startCell;
unsigned int cellCount;
unsigned int flags;
};
struct remote_node_alloc_pages_ret {
int ret;
};
struct remote_network_dhcp_lease {
remote_nonnull_string iface;
hyper expirytime;
@ -5487,5 +5499,11 @@ enum remote_procedure {
* @generate: both
* @acl: none
*/
REMOTE_PROC_DOMAIN_EVENT_CALLBACK_TUNABLE = 346
REMOTE_PROC_DOMAIN_EVENT_CALLBACK_TUNABLE = 346,
/**
* @generate: none
* @acl: connect:write
*/
REMOTE_PROC_NODE_ALLOC_PAGES = 347
};

View File

@ -2518,6 +2518,22 @@ struct remote_node_get_free_pages_ret {
uint64_t * counts_val;
} counts;
};
struct remote_node_alloc_pages_args {
struct {
u_int pageSizes_len;
u_int * pageSizes_val;
} pageSizes;
struct {
u_int pageCounts_len;
uint64_t * pageCounts_val;
} pageCounts;
int startCell;
u_int cellCount;
u_int flags;
};
struct remote_node_alloc_pages_ret {
int ret;
};
struct remote_network_dhcp_lease {
remote_nonnull_string iface;
int64_t expirytime;
@ -2910,4 +2926,5 @@ enum remote_procedure {
REMOTE_PROC_CONNECT_GET_ALL_DOMAIN_STATS = 344,
REMOTE_PROC_DOMAIN_BLOCK_COPY = 345,
REMOTE_PROC_DOMAIN_EVENT_CALLBACK_TUNABLE = 346,
REMOTE_PROC_NODE_ALLOC_PAGES = 347,
};