1
0
mirror of https://gitlab.com/libvirt/libvirt.git synced 2025-03-07 17:28:15 +00:00

qemu: bulk stats: extend internal collection API

Future patches which will implement more bulk stats groups for QEMU will
need to access the connection object.

To accommodate that, a few changes are needed:

* enrich internal prototype to pass qemu driver object

* add per-group flag to mark if one collector needs monitor access or not

* If at least one collector of the requested stats needs monitor access
  we must start a query job for each domain.  The specific collectors
  will run nested monitor jobs inside that.

* If the job can't be acquired we pass flags to the collector so
  specific collectors that need monitor access can be skipped in order
  to gather as much data as is possible.

Signed-off-by: Francesco Romani <fromani@redhat.com>
Signed-off-by: Peter Krempa <pkrempa@redhat.com>
This commit is contained in:
Francesco Romani 2014-09-15 10:48:04 +02:00 committed by Peter Krempa
parent b3f42da6b7
commit 1f4831ee6e

View File

@ -17373,7 +17373,8 @@ qemuConnectGetDomainCapabilities(virConnectPtr conn,
static int static int
qemuDomainGetStatsState(virDomainObjPtr dom, qemuDomainGetStatsState(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
virDomainObjPtr dom,
virDomainStatsRecordPtr record, virDomainStatsRecordPtr record,
int *maxparams, int *maxparams,
unsigned int privflags ATTRIBUTE_UNUSED) unsigned int privflags ATTRIBUTE_UNUSED)
@ -17396,8 +17397,18 @@ qemuDomainGetStatsState(virDomainObjPtr dom,
} }
typedef enum {
QEMU_DOMAIN_STATS_HAVE_JOB = (1 << 0), /* job is entered, monitor can be
accessed */
} qemuDomainStatsFlags;
#define HAVE_JOB(flags) ((flags) & QEMU_DOMAIN_STATS_HAVE_JOB)
typedef int typedef int
(*qemuDomainGetStatsFunc)(virDomainObjPtr dom, (*qemuDomainGetStatsFunc)(virQEMUDriverPtr driver,
virDomainObjPtr dom,
virDomainStatsRecordPtr record, virDomainStatsRecordPtr record,
int *maxparams, int *maxparams,
unsigned int flags); unsigned int flags);
@ -17405,11 +17416,12 @@ typedef int
struct qemuDomainGetStatsWorker { struct qemuDomainGetStatsWorker {
qemuDomainGetStatsFunc func; qemuDomainGetStatsFunc func;
unsigned int stats; unsigned int stats;
bool monitor;
}; };
static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[] = { static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[] = {
{ qemuDomainGetStatsState, VIR_DOMAIN_STATS_STATE}, { qemuDomainGetStatsState, VIR_DOMAIN_STATS_STATE, false },
{ NULL, 0 } { NULL, 0, false }
}; };
@ -17441,6 +17453,20 @@ qemuDomainGetStatsCheckSupport(unsigned int *stats,
} }
static bool
qemuDomainGetStatsNeedMonitor(unsigned int stats)
{
size_t i;
for (i = 0; qemuDomainGetStatsWorkers[i].func; i++)
if (stats & qemuDomainGetStatsWorkers[i].stats &&
qemuDomainGetStatsWorkers[i].monitor)
return true;
return false;
}
static int static int
qemuDomainGetStats(virConnectPtr conn, qemuDomainGetStats(virConnectPtr conn,
virDomainObjPtr dom, virDomainObjPtr dom,
@ -17458,8 +17484,8 @@ qemuDomainGetStats(virConnectPtr conn,
for (i = 0; qemuDomainGetStatsWorkers[i].func; i++) { for (i = 0; qemuDomainGetStatsWorkers[i].func; i++) {
if (stats & qemuDomainGetStatsWorkers[i].stats) { if (stats & qemuDomainGetStatsWorkers[i].stats) {
if (qemuDomainGetStatsWorkers[i].func(dom, tmp, &maxparams, if (qemuDomainGetStatsWorkers[i].func(conn->privateData, dom, tmp,
flags) < 0) &maxparams, flags) < 0)
goto cleanup; goto cleanup;
} }
} }
@ -17498,6 +17524,8 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
int nstats = 0; int nstats = 0;
size_t i; size_t i;
int ret = -1; int ret = -1;
unsigned int privflags = 0;
unsigned int domflags = 0;
if (ndoms) if (ndoms)
virCheckFlags(VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS, -1); virCheckFlags(VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS, -1);
@ -17532,7 +17560,11 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
if (VIR_ALLOC_N(tmpstats, ndoms + 1) < 0) if (VIR_ALLOC_N(tmpstats, ndoms + 1) < 0)
goto cleanup; goto cleanup;
if (qemuDomainGetStatsNeedMonitor(stats))
privflags |= QEMU_DOMAIN_STATS_HAVE_JOB;
for (i = 0; i < ndoms; i++) { for (i = 0; i < ndoms; i++) {
domflags = privflags;
virDomainStatsRecordPtr tmp = NULL; virDomainStatsRecordPtr tmp = NULL;
if (!(dom = qemuDomObjFromDomain(doms[i]))) if (!(dom = qemuDomObjFromDomain(doms[i])))
@ -17542,12 +17574,22 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
!virConnectGetAllDomainStatsCheckACL(conn, dom->def)) !virConnectGetAllDomainStatsCheckACL(conn, dom->def))
continue; continue;
if (qemuDomainGetStats(conn, dom, stats, &tmp, flags) < 0) if (HAVE_JOB(domflags) &&
goto cleanup; qemuDomainObjBeginJob(driver, dom, QEMU_JOB_QUERY) < 0)
/* As it was never requested. Gather as much as possible anyway. */
domflags &= ~QEMU_DOMAIN_STATS_HAVE_JOB;
if (qemuDomainGetStats(conn, dom, stats, &tmp, domflags) < 0)
goto endjob;
if (tmp) if (tmp)
tmpstats[nstats++] = tmp; tmpstats[nstats++] = tmp;
if (HAVE_JOB(domflags) && !qemuDomainObjEndJob(driver, dom)) {
dom = NULL;
continue;
}
virObjectUnlock(dom); virObjectUnlock(dom);
dom = NULL; dom = NULL;
} }
@ -17557,6 +17599,11 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
ret = nstats; ret = nstats;
endjob:
if (HAVE_JOB(domflags) && dom)
if (!qemuDomainObjEndJob(driver, dom))
dom = NULL;
cleanup: cleanup:
if (dom) if (dom)
virObjectUnlock(dom); virObjectUnlock(dom);