Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
/*
|
|
|
|
* cpu.c: internal functions for CPU manipulation
|
|
|
|
*
|
2010-01-25 17:27:56 +00:00
|
|
|
* Copyright (C) 2009--2010 Red Hat, Inc.
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Jiri Denemark <jdenemar@redhat.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
2010-03-23 09:53:28 +00:00
|
|
|
#include "logging.h"
|
2010-01-25 17:27:56 +00:00
|
|
|
#include "memory.h"
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
#include "xml.h"
|
|
|
|
#include "cpu.h"
|
|
|
|
#include "cpu_x86.h"
|
|
|
|
#include "cpu_generic.h"
|
|
|
|
|
|
|
|
|
|
|
|
#define NR_DRIVERS ARRAY_CARDINALITY(drivers)
|
|
|
|
#define VIR_FROM_THIS VIR_FROM_CPU
|
|
|
|
|
|
|
|
static struct cpuArchDriver *drivers[] = {
|
|
|
|
&cpuDriverX86,
|
|
|
|
/* generic driver must always be the last one */
|
|
|
|
&cpuDriverGeneric
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static struct cpuArchDriver *
|
2010-02-10 12:22:36 +00:00
|
|
|
cpuGetSubDriver(const char *arch)
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int j;
|
|
|
|
|
|
|
|
if (arch == NULL) {
|
2010-02-10 12:22:36 +00:00
|
|
|
virCPUReportError(VIR_ERR_INTERNAL_ERROR,
|
2009-12-21 21:16:25 +00:00
|
|
|
"%s", _("undefined hardware architecture"));
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < NR_DRIVERS - 1; i++) {
|
|
|
|
for (j = 0; j < drivers[i]->narch; j++) {
|
|
|
|
if (STREQ(arch, drivers[i]->arch[j]))
|
|
|
|
return drivers[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* use generic driver by default */
|
|
|
|
return drivers[NR_DRIVERS - 1];
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
virCPUCompareResult
|
2010-02-10 12:22:36 +00:00
|
|
|
cpuCompareXML(virCPUDefPtr host,
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
const char *xml)
|
|
|
|
{
|
|
|
|
xmlDocPtr doc = NULL;
|
|
|
|
xmlXPathContextPtr ctxt = NULL;
|
|
|
|
virCPUDefPtr cpu = NULL;
|
|
|
|
virCPUCompareResult ret = VIR_CPU_COMPARE_ERROR;
|
|
|
|
|
2010-03-23 09:53:28 +00:00
|
|
|
VIR_DEBUG("host=%p, xml=%s", host, NULLSTR(xml));
|
|
|
|
|
2010-02-24 20:50:54 +00:00
|
|
|
if (!(doc = virXMLParseString(xml, "cpu.xml")))
|
|
|
|
goto cleanup;
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
|
2010-02-24 20:50:54 +00:00
|
|
|
if ((ctxt = xmlXPathNewContext(doc)) == NULL) {
|
2010-02-04 18:19:08 +00:00
|
|
|
virReportOOMError();
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctxt->node = xmlDocGetRootElement(doc);
|
|
|
|
|
2010-02-10 12:22:36 +00:00
|
|
|
cpu = virCPUDefParseXML(ctxt->node, ctxt, VIR_CPU_TYPE_AUTO);
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
if (cpu == NULL)
|
|
|
|
goto cleanup;
|
|
|
|
|
2010-02-10 12:22:36 +00:00
|
|
|
ret = cpuCompare(host, cpu);
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virCPUDefFree(cpu);
|
|
|
|
xmlXPathFreeContext(ctxt);
|
|
|
|
xmlFreeDoc(doc);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
virCPUCompareResult
|
2010-02-10 12:22:36 +00:00
|
|
|
cpuCompare(virCPUDefPtr host,
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
virCPUDefPtr cpu)
|
|
|
|
{
|
|
|
|
struct cpuArchDriver *driver;
|
|
|
|
|
2010-03-23 09:53:28 +00:00
|
|
|
VIR_DEBUG("host=%p, cpu=%p", host, cpu);
|
|
|
|
|
2010-02-10 12:22:36 +00:00
|
|
|
if ((driver = cpuGetSubDriver(host->arch)) == NULL)
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
return VIR_CPU_COMPARE_ERROR;
|
|
|
|
|
|
|
|
if (driver->compare == NULL) {
|
2010-02-10 12:22:36 +00:00
|
|
|
virCPUReportError(VIR_ERR_NO_SUPPORT,
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
_("cannot compare CPUs of %s architecture"),
|
|
|
|
host->arch);
|
|
|
|
return VIR_CPU_COMPARE_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
return driver->compare(host, cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2010-02-10 12:22:36 +00:00
|
|
|
cpuDecode(virCPUDefPtr cpu,
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
const union cpuData *data,
|
2010-02-11 09:47:43 +00:00
|
|
|
const char **models,
|
2010-04-15 10:06:13 +00:00
|
|
|
unsigned int nmodels,
|
|
|
|
const char *preferred)
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
{
|
|
|
|
struct cpuArchDriver *driver;
|
|
|
|
|
2010-04-15 10:06:13 +00:00
|
|
|
VIR_DEBUG("cpu=%p, data=%p, nmodels=%u, preferred=%s",
|
|
|
|
cpu, data, nmodels, NULLSTR(preferred));
|
2010-03-23 09:53:28 +00:00
|
|
|
if (models) {
|
|
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < nmodels; i++)
|
|
|
|
VIR_DEBUG("models[%u]=%s", i, NULLSTR(models[i]));
|
|
|
|
}
|
|
|
|
|
2010-02-01 11:42:27 +00:00
|
|
|
if (models == NULL && nmodels != 0) {
|
2010-02-10 12:22:36 +00:00
|
|
|
virCPUReportError(VIR_ERR_INTERNAL_ERROR,
|
2010-02-01 11:42:27 +00:00
|
|
|
"%s", _("nonzero nmodels doesn't match with NULL models"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
if (cpu == NULL) {
|
2010-02-10 12:22:36 +00:00
|
|
|
virCPUReportError(VIR_ERR_INTERNAL_ERROR,
|
2009-12-21 21:16:25 +00:00
|
|
|
"%s", _("invalid CPU definition"));
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-02-10 12:22:36 +00:00
|
|
|
if ((driver = cpuGetSubDriver(cpu->arch)) == NULL)
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (driver->decode == NULL) {
|
2010-02-10 12:22:36 +00:00
|
|
|
virCPUReportError(VIR_ERR_NO_SUPPORT,
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
_("cannot decode CPU data for %s architecture"),
|
|
|
|
cpu->arch);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-04-15 10:06:13 +00:00
|
|
|
return driver->decode(cpu, data, models, nmodels, preferred);
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2010-02-10 12:22:36 +00:00
|
|
|
cpuEncode(const char *arch,
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
const virCPUDefPtr cpu,
|
|
|
|
union cpuData **forced,
|
|
|
|
union cpuData **required,
|
|
|
|
union cpuData **optional,
|
|
|
|
union cpuData **disabled,
|
|
|
|
union cpuData **forbidden)
|
|
|
|
{
|
|
|
|
struct cpuArchDriver *driver;
|
|
|
|
|
2010-03-23 09:53:28 +00:00
|
|
|
VIR_DEBUG("arch=%s, cpu=%p, forced=%p, required=%p, "
|
|
|
|
"optional=%p, disabled=%p, forbidden=%p",
|
|
|
|
NULLSTR(arch), cpu, forced, required,
|
|
|
|
optional, disabled, forbidden);
|
|
|
|
|
2010-02-10 12:22:36 +00:00
|
|
|
if ((driver = cpuGetSubDriver(arch)) == NULL)
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (driver->encode == NULL) {
|
2010-02-10 12:22:36 +00:00
|
|
|
virCPUReportError(VIR_ERR_NO_SUPPORT,
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
_("cannot encode CPU data for %s architecture"),
|
|
|
|
arch);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return driver->encode(cpu, forced, required,
|
|
|
|
optional, disabled, forbidden);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
2010-02-10 12:22:36 +00:00
|
|
|
cpuDataFree(const char *arch,
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
union cpuData *data)
|
|
|
|
{
|
|
|
|
struct cpuArchDriver *driver;
|
|
|
|
|
2010-03-23 09:53:28 +00:00
|
|
|
VIR_DEBUG("arch=%s, data=%p", NULLSTR(arch), data);
|
|
|
|
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
if (data == NULL)
|
|
|
|
return;
|
|
|
|
|
2010-02-10 12:22:36 +00:00
|
|
|
if ((driver = cpuGetSubDriver(arch)) == NULL)
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (driver->free == NULL) {
|
2010-02-10 12:22:36 +00:00
|
|
|
virCPUReportError(VIR_ERR_NO_SUPPORT,
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
_("cannot free CPU data for %s architecture"),
|
|
|
|
arch);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
driver->free(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
union cpuData *
|
2010-02-10 12:22:36 +00:00
|
|
|
cpuNodeData(const char *arch)
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
{
|
|
|
|
struct cpuArchDriver *driver;
|
|
|
|
|
2010-03-23 09:53:28 +00:00
|
|
|
VIR_DEBUG("arch=%s", NULLSTR(arch));
|
|
|
|
|
2010-02-10 12:22:36 +00:00
|
|
|
if ((driver = cpuGetSubDriver(arch)) == NULL)
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (driver->nodeData == NULL) {
|
2010-02-10 12:22:36 +00:00
|
|
|
virCPUReportError(VIR_ERR_NO_SUPPORT,
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
_("cannot get node CPU data for %s architecture"),
|
|
|
|
arch);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return driver->nodeData();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
virCPUCompareResult
|
2010-02-10 12:22:36 +00:00
|
|
|
cpuGuestData(virCPUDefPtr host,
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
virCPUDefPtr guest,
|
|
|
|
union cpuData **data)
|
|
|
|
{
|
|
|
|
struct cpuArchDriver *driver;
|
|
|
|
|
2010-03-23 09:53:28 +00:00
|
|
|
VIR_DEBUG("host=%p, guest=%p, data=%p", host, guest, data);
|
|
|
|
|
2010-02-10 12:22:36 +00:00
|
|
|
if ((driver = cpuGetSubDriver(host->arch)) == NULL)
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
return VIR_CPU_COMPARE_ERROR;
|
|
|
|
|
|
|
|
if (driver->guestData == NULL) {
|
2010-02-10 12:22:36 +00:00
|
|
|
virCPUReportError(VIR_ERR_NO_SUPPORT,
|
Adds CPU selection infrastructure
Each driver supporting CPU selection must fill in host CPU capabilities.
When filling them, drivers for hypervisors running on the same node as
libvirtd can use cpuNodeData() to obtain raw CPU data. Other drivers,
such as VMware, need to implement their own way of getting such data.
Raw data can be decoded into virCPUDefPtr using cpuDecode() function.
When implementing virConnectCompareCPU(), a hypervisor driver can just
call cpuCompareXML() function with host CPU capabilities.
For each guest for which a driver supports selecting CPU models, it must
set the appropriate feature in guest's capabilities:
virCapabilitiesAddGuestFeature(guest, "cpuselection", 1, 0)
Actions needed when a domain is being created depend on whether the
hypervisor understands raw CPU data (currently CPUID for i686, x86_64
architectures) or symbolic names has to be used.
Typical use by hypervisors which prefer CPUID (such as VMware and Xen):
- convert guest CPU configuration from domain's XML into a set of raw
data structures each representing one of the feature policies:
cpuEncode(conn, architecture, guest_cpu_config,
&forced_data, &required_data, &optional_data,
&disabled_data, &forbidden_data)
- create a mask or whatever the hypervisor expects to see and pass it
to the hypervisor
Typical use by hypervisors with symbolic model names (such as QEMU):
- get raw CPU data for a computed guest CPU:
cpuGuestData(conn, host_cpu, guest_cpu_config, &data)
- decode raw data into virCPUDefPtr with a possible restriction on
allowed model names:
cpuDecode(conn, guest, data, n_allowed_models, allowed_models)
- pass guest->model and guest->features to the hypervisor
* src/cpu/cpu.c src/cpu/cpu.h src/cpu/cpu_generic.c
src/cpu/cpu_generic.h src/cpu/cpu_map.c src/cpu/cpu_map.h
src/cpu/cpu_x86.c src/cpu/cpu_x86.h src/cpu/cpu_x86_data.h
* configure.in: check for CPUID instruction
* src/Makefile.am: glue the new files in
* src/libvirt_private.syms: add new private symbols
* po/POTFILES.in: add new cpu files containing translatable strings
2009-12-18 15:02:11 +00:00
|
|
|
_("cannot compute guest CPU data for %s architecture"),
|
|
|
|
host->arch);
|
|
|
|
return VIR_CPU_COMPARE_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
return driver->guestData(host, guest, data);
|
|
|
|
}
|
2010-01-25 17:27:56 +00:00
|
|
|
|
|
|
|
|
|
|
|
char *
|
|
|
|
cpuBaselineXML(const char **xmlCPUs,
|
|
|
|
unsigned int ncpus,
|
|
|
|
const char **models,
|
|
|
|
unsigned int nmodels)
|
|
|
|
{
|
|
|
|
xmlDocPtr doc = NULL;
|
|
|
|
xmlXPathContextPtr ctxt = NULL;
|
|
|
|
virCPUDefPtr *cpus = NULL;
|
|
|
|
virCPUDefPtr cpu = NULL;
|
|
|
|
char *cpustr;
|
|
|
|
unsigned int i;
|
|
|
|
|
2010-03-23 09:53:28 +00:00
|
|
|
VIR_DEBUG("ncpus=%u, nmodels=%u", ncpus, nmodels);
|
|
|
|
if (xmlCPUs) {
|
|
|
|
for (i = 0; i < ncpus; i++)
|
|
|
|
VIR_DEBUG("xmlCPUs[%u]=%s", i, NULLSTR(xmlCPUs[i]));
|
|
|
|
}
|
|
|
|
if (models) {
|
|
|
|
for (i = 0; i < nmodels; i++)
|
|
|
|
VIR_DEBUG("models[%u]=%s", i, NULLSTR(models[i]));
|
|
|
|
}
|
|
|
|
|
2010-01-25 17:27:56 +00:00
|
|
|
if (xmlCPUs == NULL && ncpus != 0) {
|
|
|
|
virCPUReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("nonzero ncpus doesn't match with NULL xmlCPUs"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ncpus < 1) {
|
|
|
|
virCPUReportError(VIR_ERR_INVALID_ARG, "%s", _("No CPUs given"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (VIR_ALLOC_N(cpus, ncpus))
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
for (i = 0; i < ncpus; i++) {
|
2010-02-24 20:50:54 +00:00
|
|
|
if (!(doc = virXMLParseString(xmlCPUs[i], "cpu.xml")))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if ((ctxt = xmlXPathNewContext(doc)) == NULL)
|
2010-01-25 17:27:56 +00:00
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
ctxt->node = xmlDocGetRootElement(doc);
|
|
|
|
|
|
|
|
cpus[i] = virCPUDefParseXML(ctxt->node, ctxt, VIR_CPU_TYPE_HOST);
|
|
|
|
if (cpus[i] == NULL)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
xmlXPathFreeContext(ctxt);
|
|
|
|
xmlFreeDoc(doc);
|
|
|
|
ctxt = NULL;
|
|
|
|
doc = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(cpu = cpuBaseline(cpus, ncpus, models, nmodels)))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
cpustr = virCPUDefFormat(cpu, "", 0);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (cpus) {
|
|
|
|
for (i = 0; i < ncpus; i++)
|
|
|
|
virCPUDefFree(cpus[i]);
|
|
|
|
VIR_FREE(cpus);
|
|
|
|
}
|
|
|
|
virCPUDefFree(cpu);
|
|
|
|
xmlXPathFreeContext(ctxt);
|
|
|
|
xmlFreeDoc(doc);
|
|
|
|
|
|
|
|
return cpustr;
|
|
|
|
|
|
|
|
no_memory:
|
|
|
|
virReportOOMError();
|
|
|
|
error:
|
|
|
|
cpustr = NULL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
virCPUDefPtr
|
|
|
|
cpuBaseline(virCPUDefPtr *cpus,
|
|
|
|
unsigned int ncpus,
|
|
|
|
const char **models,
|
|
|
|
unsigned int nmodels)
|
|
|
|
{
|
|
|
|
struct cpuArchDriver *driver;
|
2010-03-23 09:53:28 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
VIR_DEBUG("ncpus=%u, nmodels=%u", ncpus, nmodels);
|
|
|
|
if (cpus) {
|
|
|
|
for (i = 0; i < ncpus; i++)
|
|
|
|
VIR_DEBUG("cpus[%u]=%p", i, cpus[i]);
|
|
|
|
}
|
|
|
|
if (models) {
|
|
|
|
for (i = 0; i < nmodels; i++)
|
|
|
|
VIR_DEBUG("models[%u]=%s", i, NULLSTR(models[i]));
|
|
|
|
}
|
2010-01-25 17:27:56 +00:00
|
|
|
|
|
|
|
if (cpus == NULL && ncpus != 0) {
|
|
|
|
virCPUReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("nonzero ncpus doesn't match with NULL cpus"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ncpus < 1) {
|
|
|
|
virCPUReportError(VIR_ERR_INVALID_ARG, "%s", _("No CPUs given"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (models == NULL && nmodels != 0) {
|
|
|
|
virCPUReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("nonzero nmodels doesn't match with NULL models"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((driver = cpuGetSubDriver(cpus[0]->arch)) == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (driver->baseline == NULL) {
|
|
|
|
virCPUReportError(VIR_ERR_NO_SUPPORT,
|
|
|
|
_("cannot compute baseline CPU of %s architecture"),
|
|
|
|
cpus[0]->arch);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-07-02 15:51:20 +00:00
|
|
|
return driver->baseline(cpus, ncpus, models, nmodels);
|
2010-01-25 17:27:56 +00:00
|
|
|
}
|
2010-03-23 08:32:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
cpuUpdate(virCPUDefPtr guest,
|
|
|
|
const virCPUDefPtr host)
|
|
|
|
{
|
|
|
|
struct cpuArchDriver *driver;
|
|
|
|
|
|
|
|
VIR_DEBUG("guest=%p, host=%p", guest, host);
|
|
|
|
|
|
|
|
if ((driver = cpuGetSubDriver(host->arch)) == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (driver->update == NULL) {
|
|
|
|
virCPUReportError(VIR_ERR_NO_SUPPORT,
|
|
|
|
_("cannot update guest CPU data for %s architecture"),
|
|
|
|
host->arch);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return driver->update(guest, host);
|
|
|
|
}
|