Introduce command 'virsh domstats --memory' for reporting memory BW

Introduce an option '--memory' for showing memory related
information. The memory bandwidth infomatio is listed as:

Domain: 'libvirt-vm'
 memory.bandwidth.monitor.count=4
 memory.bandwidth.monitor.0.name=vcpus_0-4
 memory.bandwidth.monitor.0.vcpus=0-4
 memory.bandwidth.monitor.0.node.count=2
 memory.bandwidth.monitor.0.node.0.id=0
 memory.bandwidth.monitor.0.node.0.bytes.total=10208067584
 memory.bandwidth.monitor.0.node.0.bytes.local=4807114752
 memory.bandwidth.monitor.0.node.1.id=1
 memory.bandwidth.monitor.0.node.1.bytes.total=8693735424
 memory.bandwidth.monitor.0.node.1.bytes.local=5850161152
 memory.bandwidth.monitor.1.name=vcpus_7
 memory.bandwidth.monitor.1.vcpus=7
 memory.bandwidth.monitor.1.node.count=2
 memory.bandwidth.monitor.1.node.0.id=0
 memory.bandwidth.monitor.1.node.0.bytes.total=853811200
 memory.bandwidth.monitor.1.node.0.bytes.local=290701312
 memory.bandwidth.monitor.1.node.1.id=1
 memory.bandwidth.monitor.1.node.1.bytes.total=406044672
 memory.bandwidth.monitor.1.node.1.bytes.local=229425152

Signed-off-by: Wang Huaqiang <huaqiang.wang@intel.com>
This commit is contained in:
Wang Huaqiang 2020-01-02 18:45:05 +08:00 committed by Daniel P. Berrangé
parent 5d876f25bd
commit 65a63d8864
5 changed files with 152 additions and 2 deletions

View File

@ -2186,7 +2186,7 @@ domstats
domstats [--raw] [--enforce] [--backing] [--nowait] [--state]
[--cpu-total] [--balloon] [--vcpu] [--interface]
[--block] [--perf] [--iothread]
[--block] [--perf] [--iothread] [--memory]
[[--list-active] [--list-inactive]
[--list-persistent] [--list-transient] [--list-running]y
[--list-paused] [--list-shutoff] [--list-other]] | [domain ...]
@ -2205,7 +2205,7 @@ behavior use the *--raw* flag.
The individual statistics groups are selectable via specific flags. By
default all supported statistics groups are returned. Supported
statistics groups flags are: *--state*, *--cpu-total*, *--balloon*,
*--vcpu*, *--interface*, *--block*, *--perf*, *--iothread*.
*--vcpu*, *--interface*, *--block*, *--perf*, *--iothread*, *--memory*.
Note that - depending on the hypervisor type and version or the domain state
- not all of the following statistics may be returned.
@ -2372,6 +2372,24 @@ not available for statistical purposes.
* ``iothread.<id>.poll-shrink`` - polling time shrink value. A value of
(zero) indicates shrink is managed by hypervisor.
*--memory* returns:
* ``memory.bandwidth.monitor.count`` - the number of memory bandwidth
monitors for this domain
* ``memory.bandwidth.monitor.<num>.name`` - the name of monitor <num>
* ``memory.bandwidth.monitor.<num>.vcpus`` - the vcpu list of monitor <num>
* ``memory.bandwidth.monitor.<num>.node.count`` - the number of memory
controller in monitor <num>
* ``memory.bandwidth.monitor.<num>.node.<index>.id`` - host allocated memory
controller id for controller <index> of monitor <num>
* ``memory.bandwidth.monitor.<num>.node.<index>.bytes.local`` - the accumulative
bytes consumed by @vcpus that passing through the memory controller in the
same processor that the scheduled host CPU belongs to.
* ``memory.bandwidth.monitor.<num>.node.<index>.bytes.total`` - the total
bytes consumed by @vcpus that passing through all memory controllers, either
local or remote controller.
Selecting a specific statistics groups doesn't guarantee that the
daemon supports the selected group of stats. Flag *--enforce*
forces the command to fail if the daemon doesn't support the

View File

@ -2160,6 +2160,7 @@ typedef enum {
VIR_DOMAIN_STATS_BLOCK = (1 << 5), /* return domain block info */
VIR_DOMAIN_STATS_PERF = (1 << 6), /* return domain perf event info */
VIR_DOMAIN_STATS_IOTHREAD = (1 << 7), /* return iothread poll info */
VIR_DOMAIN_STATS_MEMORY = (1 << 8), /* return domain memory info */
} virDomainStatsTypes;
typedef enum {

View File

@ -11640,6 +11640,27 @@ virConnectGetDomainCapabilities(virConnectPtr conn,
* hypervisor to choose how to shrink the
* polling time.
*
* VIR_DOMAIN_STATS_MEMORY:
* Return memory bandwidth statistics and the usage information. The typed
* parameter keys are in this format:
*
* "memory.bandwidth.monitor.count" - the number of memory bandwidth
* monitors for this domain
* "memory.bandwidth.monitor.<num>.name" - the name of monitor <num>
* "memory.bandwidth.monitor.<num>.vcpus" - the vcpu list of monitor <num>
* "memory.bandwidth.monitor.<num>.node.count" - the number of memory
* controller in monitor <num>
* "memory.bandwidth.monitor.<num>.node.<index>.id" - host allocated memory
* controller id for controller
* <index> of monitor <num>
* "memory.bandwidth.monitor.<num>.node.<index>.bytes.local" - the
* accumulative bytes consumed by @vcpus that passing
* through the memory controller in the same processor
* that the scheduled host CPU belongs to.
* "memory.bandwidth.monitor.<num>.node.<index>.bytes.total" - the total
* bytes consumed by @vcpus that passing through all
* memory controllers, either local or remote controller.
*
* Note that entire stats groups or individual stat fields may be missing from
* the output in case they are not supported by the given hypervisor, are not
* applicable for the current state of the guest domain, or their retrieval

View File

@ -20676,6 +20676,9 @@ qemuDomainGetResctrlMonData(virQEMUDriverPtr driver,
features = caps->host.cache.monitor->features;
break;
case VIR_RESCTRL_MONITOR_TYPE_MEMBW:
if (caps->host.memBW.monitor)
features = caps->host.memBW.monitor->features;
break;
case VIR_RESCTRL_MONITOR_TYPE_UNSUPPORT:
case VIR_RESCTRL_MONITOR_TYPE_LAST:
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
@ -20728,6 +20731,94 @@ qemuDomainGetResctrlMonData(virQEMUDriverPtr driver,
}
static int
qemuDomainGetStatsMemoryBandwidth(virQEMUDriverPtr driver,
virDomainObjPtr dom,
virTypedParamListPtr params)
{
virQEMUResctrlMonDataPtr *resdata = NULL;
char **features = NULL;
size_t nresdata = 0;
size_t i = 0;
size_t j = 0;
size_t k = 0;
int ret = -1;
if (!virDomainObjIsActive(dom))
return 0;
if (qemuDomainGetResctrlMonData(driver, dom, &resdata, &nresdata,
VIR_RESCTRL_MONITOR_TYPE_MEMBW) < 0)
goto cleanup;
if (nresdata == 0)
return 0;
if (virTypedParamListAddUInt(params, nresdata,
"memory.bandwidth.monitor.count") < 0)
goto cleanup;
for (i = 0; i < nresdata; i++) {
if (virTypedParamListAddString(params, resdata[i]->name,
"memory.bandwidth.monitor.%zu.name",
i) < 0)
goto cleanup;
if (virTypedParamListAddString(params, resdata[i]->vcpus,
"memory.bandwidth.monitor.%zu.vcpus",
i) < 0)
goto cleanup;
if (virTypedParamListAddUInt(params, resdata[i]->nstats,
"memory.bandwidth.monitor.%zu.node.count",
i) < 0)
goto cleanup;
for (j = 0; j < resdata[i]->nstats; j++) {
if (virTypedParamListAddUInt(params, resdata[i]->stats[j]->id,
"memory.bandwidth.monitor.%zu."
"node.%zu.id",
i, j) < 0)
goto cleanup;
features = resdata[i]->stats[j]->features;
for (k = 0; features[k]; k++) {
if (STREQ(features[k], "mbm_local_bytes")) {
/* The accumulative data passing through local memory
* controller is recorded with 64 bit counter. */
if (virTypedParamListAddULLong(params,
resdata[i]->stats[j]->vals[k],
"memory.bandwidth.monitor."
"%zu.node.%zu.bytes.local",
i, j) < 0)
goto cleanup;
}
if (STREQ(features[k], "mbm_total_bytes")) {
/* The accumulative data passing through local and remote
* memory controller is recorded with 64 bit counter. */
if (virTypedParamListAddULLong(params,
resdata[i]->stats[j]->vals[k],
"memory.bandwidth.monitor."
"%zu.node.%zu.bytes.total",
i, j) < 0)
goto cleanup;
}
}
}
}
ret = 0;
cleanup:
for (i = 0; i < nresdata; i++)
qemuDomainFreeResctrlMonData(resdata[i]);
VIR_FREE(resdata);
return ret;
}
static int
qemuDomainGetStatsCpuCache(virQEMUDriverPtr driver,
virDomainObjPtr dom,
@ -20836,6 +20927,17 @@ qemuDomainGetStatsCpu(virQEMUDriverPtr driver,
}
static int
qemuDomainGetStatsMemory(virQEMUDriverPtr driver,
virDomainObjPtr dom,
virTypedParamListPtr params,
unsigned int privflags G_GNUC_UNUSED)
{
return qemuDomainGetStatsMemoryBandwidth(driver, dom, params);
}
static int
qemuDomainGetStatsBalloon(virQEMUDriverPtr driver,
virDomainObjPtr dom,
@ -21505,6 +21607,7 @@ static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[] = {
{ qemuDomainGetStatsBlock, VIR_DOMAIN_STATS_BLOCK, true },
{ qemuDomainGetStatsPerf, VIR_DOMAIN_STATS_PERF, false },
{ qemuDomainGetStatsIOThread, VIR_DOMAIN_STATS_IOTHREAD, true },
{ qemuDomainGetStatsMemory, VIR_DOMAIN_STATS_MEMORY, false },
{ NULL, 0, false }
};

View File

@ -2130,6 +2130,10 @@ static const vshCmdOptDef opts_domstats[] = {
.type = VSH_OT_BOOL,
.help = N_("report domain IOThread information"),
},
{.name = "memory",
.type = VSH_OT_BOOL,
.help = N_("report domain memory usage"),
},
{.name = "list-active",
.type = VSH_OT_BOOL,
.help = N_("list only active domains"),
@ -2246,6 +2250,9 @@ cmdDomstats(vshControl *ctl, const vshCmd *cmd)
if (vshCommandOptBool(cmd, "iothread"))
stats |= VIR_DOMAIN_STATS_IOTHREAD;
if (vshCommandOptBool(cmd, "memory"))
stats |= VIR_DOMAIN_STATS_MEMORY;
if (vshCommandOptBool(cmd, "list-active"))
flags |= VIR_CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE;