mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2025-01-21 20:15:17 +00:00
qemu: monitor: Extract 'write-threshold' automatically for -blockdev
In cases when -blockdev is used we need to use 'query-named-block-nodes' instead of 'query-block'. This means that we can extract the write-threshold variable right away. To keep compatibility with old VMs modify the code which was extracting the value previously so that it updates the stats structure and a single code path then can be used to extract the data. Signed-off-by: Peter Krempa <pkrempa@redhat.com> Reviewed-by: Ján Tomko <jtomko@redhat.com>
This commit is contained in:
parent
a656a19c02
commit
196104b91f
@ -20027,29 +20027,39 @@ qemuDomainGetStatsOneBlockFallback(virQEMUDriverPtr driver,
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
qemuDomainGetStatsOneBlockNode(virDomainStatsRecordPtr record,
|
||||
int *maxparams,
|
||||
virStorageSourcePtr src,
|
||||
size_t block_idx,
|
||||
virHashTablePtr nodedata)
|
||||
/**
|
||||
* qemuDomainGetStatsOneBlockRefreshNamed:
|
||||
* @src: disk source structure
|
||||
* @alias: disk alias
|
||||
* @stats: hash table containing stats for all disks
|
||||
* @nodedata: reply containing 'query-named-block-nodes' data
|
||||
*
|
||||
* Refresh disk block stats data (qemuBlockStatsPtr) which are present only
|
||||
* in the reply of 'query-named-block-nodes' in cases when the data was gathered
|
||||
* by using query-block originally.
|
||||
*/
|
||||
static void
|
||||
qemuDomainGetStatsOneBlockRefreshNamed(virStorageSourcePtr src,
|
||||
const char *alias,
|
||||
virHashTablePtr stats,
|
||||
virHashTablePtr nodedata)
|
||||
{
|
||||
qemuBlockStatsPtr entry;
|
||||
|
||||
virJSONValuePtr data;
|
||||
unsigned long long tmp;
|
||||
int ret = -1;
|
||||
|
||||
if (src->nodestorage &&
|
||||
(data = virHashLookup(nodedata, src->nodestorage))) {
|
||||
if (virJSONValueObjectGetNumberUlong(data, "write_threshold", &tmp) == 0 &&
|
||||
tmp > 0)
|
||||
QEMU_ADD_BLOCK_PARAM_ULL(record, maxparams, block_idx,
|
||||
"threshold", tmp);
|
||||
}
|
||||
if (!nodedata || !src->nodestorage)
|
||||
return;
|
||||
|
||||
ret = 0;
|
||||
if (!(entry = virHashLookup(stats, alias)))
|
||||
return;
|
||||
|
||||
cleanup:
|
||||
return ret;
|
||||
if (!(data = virHashLookup(nodedata, src->nodestorage)))
|
||||
return;
|
||||
|
||||
if (virJSONValueObjectGetNumberUlong(data, "write_threshold", &tmp) == 0)
|
||||
entry->write_threshold = tmp;
|
||||
}
|
||||
|
||||
|
||||
@ -20063,8 +20073,7 @@ qemuDomainGetStatsOneBlock(virQEMUDriverPtr driver,
|
||||
const char *entryname,
|
||||
virStorageSourcePtr src,
|
||||
size_t block_idx,
|
||||
virHashTablePtr stats,
|
||||
virHashTablePtr nodedata)
|
||||
virHashTablePtr stats)
|
||||
{
|
||||
qemuBlockStats *entry;
|
||||
int ret = -1;
|
||||
@ -20129,9 +20138,9 @@ qemuDomainGetStatsOneBlock(virQEMUDriverPtr driver,
|
||||
}
|
||||
}
|
||||
|
||||
if (qemuDomainGetStatsOneBlockNode(record, maxparams, src, block_idx,
|
||||
nodedata) < 0)
|
||||
goto cleanup;
|
||||
if (entry->write_threshold)
|
||||
QEMU_ADD_BLOCK_PARAM_ULL(record, maxparams, block_idx, "threshold",
|
||||
entry->write_threshold);
|
||||
|
||||
ret = 0;
|
||||
cleanup:
|
||||
@ -20202,9 +20211,11 @@ qemuDomainGetStatsBlock(virQEMUDriverPtr driver,
|
||||
!(alias = qemuDomainStorageAlias(disk->info.alias, src->id)))
|
||||
goto cleanup;
|
||||
|
||||
qemuDomainGetStatsOneBlockRefreshNamed(src, alias, stats, nodestats);
|
||||
|
||||
if (qemuDomainGetStatsOneBlock(driver, cfg, dom, record, maxparams,
|
||||
disk->dst, alias, src, visited,
|
||||
stats, nodestats) < 0)
|
||||
stats) < 0)
|
||||
goto cleanup;
|
||||
|
||||
VIR_FREE(alias);
|
||||
|
@ -587,6 +587,9 @@ struct _qemuBlockStats {
|
||||
* if wr_highest_offset_valid is true */
|
||||
unsigned long long wr_highest_offset;
|
||||
bool wr_highest_offset_valid;
|
||||
|
||||
/* write_threshold is valid only if it's non-zero, conforming to qemu semantics */
|
||||
unsigned long long write_threshold;
|
||||
};
|
||||
|
||||
int qemuMonitorGetAllBlockStatsInfo(qemuMonitorPtr mon,
|
||||
|
@ -2492,7 +2492,8 @@ qemuMonitorJSONGetAllBlockStatsInfo(qemuMonitorPtr mon,
|
||||
static int
|
||||
qemuMonitorJSONBlockStatsUpdateCapacityData(virJSONValuePtr image,
|
||||
const char *name,
|
||||
virHashTablePtr stats)
|
||||
virHashTablePtr stats,
|
||||
qemuBlockStatsPtr *entry)
|
||||
{
|
||||
qemuBlockStatsPtr bstats;
|
||||
|
||||
@ -2506,6 +2507,9 @@ qemuMonitorJSONBlockStatsUpdateCapacityData(virJSONValuePtr image,
|
||||
}
|
||||
}
|
||||
|
||||
if (entry)
|
||||
*entry = bstats;
|
||||
|
||||
/* failures can be ignored after this point */
|
||||
if (virJSONValueObjectGetNumberUlong(image, "virtual-size",
|
||||
&bstats->capacity) < 0)
|
||||
@ -2531,7 +2535,8 @@ qemuMonitorJSONBlockStatsUpdateCapacityOne(virJSONValuePtr image,
|
||||
char *entry_name = qemuDomainStorageAlias(dev_name, depth);
|
||||
virJSONValuePtr backing;
|
||||
|
||||
if (qemuMonitorJSONBlockStatsUpdateCapacityData(image, entry_name, stats) < 0)
|
||||
if (qemuMonitorJSONBlockStatsUpdateCapacityData(image, entry_name,
|
||||
stats, NULL) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (backingChain &&
|
||||
@ -2601,6 +2606,7 @@ qemuMonitorJSONBlockStatsUpdateCapacityBlockdevWorker(size_t pos ATTRIBUTE_UNUSE
|
||||
virHashTablePtr stats = opaque;
|
||||
virJSONValuePtr image;
|
||||
const char *nodename;
|
||||
qemuBlockStatsPtr entry;
|
||||
|
||||
if (!(nodename = virJSONValueObjectGetString(val, "node-name")) ||
|
||||
!(image = virJSONValueObjectGetObject(val, "image"))) {
|
||||
@ -2609,9 +2615,13 @@ qemuMonitorJSONBlockStatsUpdateCapacityBlockdevWorker(size_t pos ATTRIBUTE_UNUSE
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (qemuMonitorJSONBlockStatsUpdateCapacityData(image, nodename, stats) < 0)
|
||||
if (qemuMonitorJSONBlockStatsUpdateCapacityData(image, nodename, stats, &entry) < 0)
|
||||
return -1;
|
||||
|
||||
if (entry)
|
||||
ignore_value(virJSONValueObjectGetNumberUlong(val, "write_threshold",
|
||||
&entry->write_threshold));
|
||||
|
||||
return 1; /* we don't want to steal the value from the JSON array */
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user