vcpuinfo: add the code to fallback to try new API

The "virsh vcpuinfo" command results in failure when the target domain
is inactive on KVM. This patch improves this behavior by adding the
fallback to invoke virDomainGetVcpupinInfo API in case of
virDomainGetVcpus API returns error and the target domain is inactive.

Signed-off-by: Taku Izumi <izumi.taku@jp.fujitsu.com>
This commit is contained in:
Taku Izumi 2011-06-24 18:01:25 +09:00 committed by Eric Blake
parent 291162abf4
commit ffb552ebf0

View File

@ -2909,10 +2909,11 @@ cmdVcpuinfo(vshControl *ctl, const vshCmd *cmd)
virDomainPtr dom; virDomainPtr dom;
virNodeInfo nodeinfo; virNodeInfo nodeinfo;
virVcpuInfoPtr cpuinfo; virVcpuInfoPtr cpuinfo;
unsigned char *cpumap; unsigned char *cpumaps;
int ncpus; int ncpus, maxcpu;
size_t cpumaplen; size_t cpumaplen;
bool ret = true; bool ret = true;
int n, m;
if (!vshConnectionUsability(ctl, ctl->conn)) if (!vshConnectionUsability(ctl, ctl->conn))
return false; return false;
@ -2931,15 +2932,14 @@ cmdVcpuinfo(vshControl *ctl, const vshCmd *cmd)
} }
cpuinfo = vshMalloc(ctl, sizeof(virVcpuInfo)*info.nrVirtCpu); cpuinfo = vshMalloc(ctl, sizeof(virVcpuInfo)*info.nrVirtCpu);
cpumaplen = VIR_CPU_MAPLEN(VIR_NODEINFO_MAXCPUS(nodeinfo)); maxcpu = VIR_NODEINFO_MAXCPUS(nodeinfo);
cpumap = vshMalloc(ctl, info.nrVirtCpu * cpumaplen); cpumaplen = VIR_CPU_MAPLEN(maxcpu);
cpumaps = vshMalloc(ctl, info.nrVirtCpu * cpumaplen);
if ((ncpus = virDomainGetVcpus(dom, if ((ncpus = virDomainGetVcpus(dom,
cpuinfo, info.nrVirtCpu, cpuinfo, info.nrVirtCpu,
cpumap, cpumaplen)) >= 0) { cpumaps, cpumaplen)) >= 0) {
int n;
for (n = 0 ; n < ncpus ; n++) { for (n = 0 ; n < ncpus ; n++) {
unsigned int m;
vshPrint(ctl, "%-15s %d\n", _("VCPU:"), n); vshPrint(ctl, "%-15s %d\n", _("VCPU:"), n);
vshPrint(ctl, "%-15s %d\n", _("CPU:"), cpuinfo[n].cpu); vshPrint(ctl, "%-15s %d\n", _("CPU:"), cpuinfo[n].cpu);
vshPrint(ctl, "%-15s %s\n", _("State:"), vshPrint(ctl, "%-15s %s\n", _("State:"),
@ -2952,8 +2952,8 @@ cmdVcpuinfo(vshControl *ctl, const vshCmd *cmd)
vshPrint(ctl, "%-15s %.1lfs\n", _("CPU time:"), cpuUsed); vshPrint(ctl, "%-15s %.1lfs\n", _("CPU time:"), cpuUsed);
} }
vshPrint(ctl, "%-15s ", _("CPU Affinity:")); vshPrint(ctl, "%-15s ", _("CPU Affinity:"));
for (m = 0 ; m < VIR_NODEINFO_MAXCPUS(nodeinfo) ; m++) { for (m = 0; m < maxcpu; m++) {
vshPrint(ctl, "%c", VIR_CPU_USABLE(cpumap, cpumaplen, n, m) ? 'y' : '-'); vshPrint(ctl, "%c", VIR_CPU_USABLE(cpumaps, cpumaplen, n, m) ? 'y' : '-');
} }
vshPrint(ctl, "\n"); vshPrint(ctl, "\n");
if (n < (ncpus - 1)) { if (n < (ncpus - 1)) {
@ -2961,14 +2961,34 @@ cmdVcpuinfo(vshControl *ctl, const vshCmd *cmd)
} }
} }
} else { } else {
if (info.state == VIR_DOMAIN_SHUTOFF) { if (info.state == VIR_DOMAIN_SHUTOFF &&
vshError(ctl, "%s", (ncpus = virDomainGetVcpupinInfo(dom, info.nrVirtCpu,
_("Domain shut off, virtual CPUs not present.")); cpumaps, cpumaplen,
VIR_DOMAIN_AFFECT_CONFIG)) >= 0) {
/* fallback plan to use virDomainGetVcpupinInfo */
for (n = 0; n < ncpus; n++) {
vshPrint(ctl, "%-15s %d\n", _("VCPU:"), n);
vshPrint(ctl, "%-15s %s\n", _("CPU:"), _("N/A"));
vshPrint(ctl, "%-15s %s\n", _("State:"), _("N/A"));
vshPrint(ctl, "%-15s %s\n", _("CPU time"), _("N/A"));
vshPrint(ctl, "%-15s ", _("CPU Affinity:"));
for (m = 0; m < maxcpu; m++) {
vshPrint(ctl, "%c",
VIR_CPU_USABLE(cpumaps, cpumaplen, n, m) ? 'y' : '-');
}
vshPrint(ctl, "\n");
if (n < (ncpus - 1)) {
vshPrint(ctl, "\n");
}
}
} else {
ret = false;
} }
ret = false;
} }
VIR_FREE(cpumap); VIR_FREE(cpumaps);
VIR_FREE(cpuinfo); VIR_FREE(cpuinfo);
virDomainFree(dom); virDomainFree(dom);
return ret; return ret;