qemu: Use 'niothreadids' instead of 'iothreads'

Although theoretically both should be the same value, the niothreadids
should be used in favor of iothreads when performing comparisons. This
leaves the iothreads as a purely numeric value to be saved in the config
file.  The one exception to the rule is virDomainIOThreadIDDefArrayInit
where the iothreadids are being generated from the iothreads count since
iothreadids were added after initial iothreads support.
This commit is contained in:
John Ferlan 2015-10-15 10:26:26 -04:00
parent 4ceaa7491e
commit 4f8e888714
5 changed files with 16 additions and 16 deletions

View File

@ -886,8 +886,8 @@ virDomainAuditStart(virDomainObjPtr vm, const char *reason, bool success)
virDomainAuditMemory(vm, 0, virDomainDefGetMemoryActual(vm->def),
"start", true);
virDomainAuditVcpu(vm, 0, vm->def->vcpus, "start", true);
if (vm->def->iothreads)
virDomainAuditIOThread(vm, 0, vm->def->iothreads, "start", true);
if (vm->def->niothreadids)
virDomainAuditIOThread(vm, 0, vm->def->niothreadids, "start", true);
virDomainAuditLifecycle(vm, "start", reason, success);
}

View File

@ -15220,7 +15220,7 @@ virDomainDefParseXML(xmlDocPtr xml,
goto error;
}
if (n) {
if (n > def->iothreads) {
if (n > def->niothreadids) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("too many iothreadsched nodes in cputune"));
goto error;
@ -21729,7 +21729,7 @@ virDomainDefFormatInternal(virDomainDefPtr def,
virBufferAsprintf(buf, " current='%u'", def->vcpus);
virBufferAsprintf(buf, ">%u</vcpu>\n", def->maxvcpus);
if (def->iothreads > 0) {
if (def->niothreadids > 0) {
virBufferAsprintf(buf, "<iothreads>%u</iothreads>\n",
def->iothreads);
/* Only print out iothreadids if we read at least one */

View File

@ -9462,7 +9462,7 @@ qemuBuildCommandLine(virConnectPtr conn,
virCommandAddArg(cmd, smp);
VIR_FREE(smp);
if (def->iothreads > 0 &&
if (def->niothreadids > 0 &&
virQEMUCapsGet(qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) {
/* Create iothread objects using the defined iothreadids list
* and the defined id and name from the list. These may be used

View File

@ -5671,13 +5671,13 @@ qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef,
size_t i;
int ret = -1;
if (targetDef->iothreads == 0)
if (targetDef->niothreadids == 0)
return 0;
if ((hostcpus = nodeGetCPUCount(NULL)) < 0)
goto cleanup;
if (VIR_ALLOC_N(info_ret, targetDef->iothreads) < 0)
if (VIR_ALLOC_N(info_ret, targetDef->niothreadids) < 0)
goto cleanup;
for (i = 0; i < targetDef->niothreadids; i++) {
@ -5707,11 +5707,11 @@ qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef,
*info = info_ret;
info_ret = NULL;
ret = targetDef->iothreads;
ret = targetDef->niothreadids;
cleanup:
if (info_ret) {
for (i = 0; i < targetDef->iothreads; i++)
for (i = 0; i < targetDef->niothreadids; i++)
virDomainIOThreadInfoFree(info_ret[i]);
VIR_FREE(info_ret);
}
@ -5910,8 +5910,8 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver,
size_t idx;
int rc = -1;
int ret = -1;
unsigned int orig_niothreads = vm->def->iothreads;
unsigned int exp_niothreads = vm->def->iothreads;
unsigned int orig_niothreads = vm->def->niothreadids;
unsigned int exp_niothreads = vm->def->niothreadids;
int new_niothreads = 0;
qemuMonitorIOThreadInfoPtr *new_iothreads = NULL;
virCgroupPtr cgroup_iothread = NULL;
@ -6039,8 +6039,8 @@ qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver,
char *alias = NULL;
int rc = -1;
int ret = -1;
unsigned int orig_niothreads = vm->def->iothreads;
unsigned int exp_niothreads = vm->def->iothreads;
unsigned int orig_niothreads = vm->def->niothreadids;
unsigned int exp_niothreads = vm->def->niothreadids;
int new_niothreads = 0;
qemuMonitorIOThreadInfoPtr *new_iothreads = NULL;

View File

@ -2308,11 +2308,11 @@ qemuProcessDetectIOThreadPIDs(virQEMUDriverPtr driver,
if (niothreads < 0)
goto cleanup;
if (niothreads != vm->def->iothreads) {
if (niothreads != vm->def->niothreadids) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("got wrong number of IOThread pids from QEMU monitor. "
"got %d, wanted %d"),
niothreads, vm->def->iothreads);
"got %d, wanted %zu"),
niothreads, vm->def->niothreadids);
goto cleanup;
}