hypervisor, vmm: move away from CpuId type

CpuId is an alias type for the flexible array structure type over
CpuIdEntry. The type itself and the type of the element in the array
portion are tied to the underlying hypervisor.

Switch to using CpuIdEntry slice or vector directly. The construction of
CpuId type is left to hypervisors.

This allows us to decouple CpuIdEntry from hypervisors more easily.

No functional change intended.

Signed-off-by: Wei Liu <liuwe@microsoft.com>
This commit is contained in:
Wei Liu 2022-07-15 10:20:07 +00:00 committed by Rob Bradford
parent 9fdfdf25fb
commit 45fbf840db
10 changed files with 113 additions and 110 deletions

View File

@ -15,7 +15,7 @@ pub mod regs;
use crate::GuestMemoryMmap;
use crate::InitramfsConfig;
use crate::RegionType;
use hypervisor::x86_64::{CpuId, CpuIdEntry, CPUID_FLAG_VALID_INDEX};
use hypervisor::x86_64::{CpuIdEntry, CPUID_FLAG_VALID_INDEX};
use hypervisor::HypervisorError;
use linux_loader::loader::bootparam::boot_params;
use linux_loader::loader::elf::start_info::{
@ -223,16 +223,14 @@ pub struct CpuidPatch {
impl CpuidPatch {
pub fn set_cpuid_reg(
cpuid: &mut CpuId,
cpuid: &mut Vec<CpuIdEntry>,
function: u32,
index: Option<u32>,
reg: CpuidReg,
value: u32,
) {
let entries = cpuid.as_mut_slice();
let mut entry_found = false;
for entry in entries.iter_mut() {
for entry in cpuid.iter_mut() {
if entry.function == function && (index == None || index.unwrap() == entry.index) {
entry_found = true;
match reg {
@ -279,16 +277,12 @@ impl CpuidPatch {
}
}
if let Err(e) = cpuid.push(entry) {
error!("Failed adding new CPUID entry: {:?}", e);
}
cpuid.push(entry);
}
}
pub fn patch_cpuid(cpuid: &mut CpuId, patches: Vec<CpuidPatch>) {
let entries = cpuid.as_mut_slice();
for entry in entries.iter_mut() {
pub fn patch_cpuid(cpuid: &mut [CpuIdEntry], patches: Vec<CpuidPatch>) {
for entry in cpuid {
for patch in patches.iter() {
if entry.function == patch.function && entry.index == patch.index {
if let Some(flags_bit) = patch.flags_bit {
@ -312,16 +306,15 @@ impl CpuidPatch {
}
pub fn is_feature_enabled(
cpuid: &CpuId,
cpuid: &[CpuIdEntry],
function: u32,
index: u32,
reg: CpuidReg,
feature_bit: usize,
) -> bool {
let entries = cpuid.as_slice();
let mask = 1 << feature_bit;
for entry in entries.iter() {
for entry in cpuid {
if entry.function == function && entry.index == index {
let reg_val = match reg {
CpuidReg::EAX => entry.eax,
@ -470,12 +463,12 @@ impl CpuidFeatureEntry {
}
fn get_features_from_cpuid(
cpuid: &CpuId,
cpuid: &[CpuIdEntry],
feature_entry_list: &[CpuidFeatureEntry],
) -> Vec<u32> {
let mut features = vec![0; feature_entry_list.len()];
for (i, feature_entry) in feature_entry_list.iter().enumerate() {
for cpuid_entry in cpuid.as_slice().iter() {
for cpuid_entry in cpuid {
if cpuid_entry.function == feature_entry.function
&& cpuid_entry.index == feature_entry.index
{
@ -505,8 +498,8 @@ impl CpuidFeatureEntry {
// The function returns `Error` (a.k.a. "incompatible"), when the CPUID features from `src_vm_cpuid`
// is not a subset of those of the `dest_vm_cpuid`.
pub fn check_cpuid_compatibility(
src_vm_cpuid: &CpuId,
dest_vm_cpuid: &CpuId,
src_vm_cpuid: &[CpuIdEntry],
dest_vm_cpuid: &[CpuIdEntry],
) -> Result<(), Error> {
let feature_entry_list = &Self::checked_feature_entry_list();
let src_vm_features = Self::get_features_from_cpuid(src_vm_cpuid, feature_entry_list);
@ -558,7 +551,7 @@ pub fn generate_common_cpuid(
phys_bits: u8,
kvm_hyperv: bool,
#[cfg(feature = "tdx")] tdx_enabled: bool,
) -> super::Result<CpuId> {
) -> super::Result<Vec<CpuIdEntry>> {
let cpuid_patches = vec![
// Patch tsc deadline timer bit
CpuidPatch {
@ -669,16 +662,14 @@ pub fn generate_common_cpuid(
for i in 0x8000_0002..=0x8000_0004 {
cpuid.retain(|c| c.function != i);
let leaf = unsafe { std::arch::x86_64::__cpuid(i) };
cpuid
.push(CpuIdEntry {
function: i,
eax: leaf.eax,
ebx: leaf.ebx,
ecx: leaf.ecx,
edx: leaf.edx,
..Default::default()
})
.map_err(Error::CpuidIdentification)?;
cpuid.push(CpuIdEntry {
function: i,
eax: leaf.eax,
ebx: leaf.ebx,
ecx: leaf.ecx,
edx: leaf.edx,
..Default::default()
});
}
if kvm_hyperv {
@ -687,56 +678,44 @@ pub fn generate_common_cpuid(
cpuid.retain(|c| c.function != 0x4000_0001);
// See "Hypervisor Top Level Functional Specification" for details
// Compliance with "Hv#1" requires leaves up to 0x4000_000a
cpuid
.push(CpuIdEntry {
function: 0x40000000,
eax: 0x4000000a, // Maximum cpuid leaf
ebx: 0x756e694c, // "Linu"
ecx: 0x564b2078, // "x KV"
edx: 0x7648204d, // "M Hv"
..Default::default()
})
.map_err(Error::CpuidKvmHyperV)?;
cpuid
.push(CpuIdEntry {
function: 0x40000001,
eax: 0x31237648, // "Hv#1"
..Default::default()
})
.map_err(Error::CpuidKvmHyperV)?;
cpuid
.push(CpuIdEntry {
function: 0x40000002,
eax: 0x3839, // "Build number"
ebx: 0xa0000, // "Version"
..Default::default()
})
.map_err(Error::CpuidKvmHyperV)?;
cpuid
.push(CpuIdEntry {
function: 0x4000_0003,
eax: 1 << 1 // AccessPartitionReferenceCounter
cpuid.push(CpuIdEntry {
function: 0x40000000,
eax: 0x4000000a, // Maximum cpuid leaf
ebx: 0x756e694c, // "Linu"
ecx: 0x564b2078, // "x KV"
edx: 0x7648204d, // "M Hv"
..Default::default()
});
cpuid.push(CpuIdEntry {
function: 0x40000001,
eax: 0x31237648, // "Hv#1"
..Default::default()
});
cpuid.push(CpuIdEntry {
function: 0x40000002,
eax: 0x3839, // "Build number"
ebx: 0xa0000, // "Version"
..Default::default()
});
cpuid.push(CpuIdEntry {
function: 0x4000_0003,
eax: 1 << 1 // AccessPartitionReferenceCounter
| 1 << 2 // AccessSynicRegs
| 1 << 3 // AccessSyntheticTimerRegs
| 1 << 9, // AccessPartitionReferenceTsc
edx: 1 << 3, // CPU dynamic partitioning
..Default::default()
})
.map_err(Error::CpuidKvmHyperV)?;
cpuid
.push(CpuIdEntry {
function: 0x4000_0004,
eax: 1 << 5, // Recommend relaxed timing
..Default::default()
})
.map_err(Error::CpuidKvmHyperV)?;
edx: 1 << 3, // CPU dynamic partitioning
..Default::default()
});
cpuid.push(CpuIdEntry {
function: 0x4000_0004,
eax: 1 << 5, // Recommend relaxed timing
..Default::default()
});
for i in 0x4000_0005..=0x4000_000a {
cpuid
.push(CpuIdEntry {
function: i,
..Default::default()
})
.map_err(Error::CpuidKvmHyperV)?;
cpuid.push(CpuIdEntry {
function: i,
..Default::default()
});
}
}
@ -748,7 +727,7 @@ pub fn configure_vcpu(
id: u8,
kernel_entry_point: Option<EntryPoint>,
vm_memory: &GuestMemoryAtomic<GuestMemoryMmap>,
cpuid: CpuId,
cpuid: Vec<CpuIdEntry>,
kvm_hyperv: bool,
) -> super::Result<()> {
// Per vCPU CPUID changes; common are handled via generate_common_cpuid()
@ -1056,7 +1035,7 @@ pub fn get_host_cpu_phys_bits() -> u8 {
}
fn update_cpuid_topology(
cpuid: &mut CpuId,
cpuid: &mut Vec<CpuIdEntry>,
threads_per_core: u8,
cores_per_die: u8,
dies_per_package: u8,
@ -1120,7 +1099,10 @@ fn update_cpuid_topology(
// The goal is to update the CPUID sub-leaves to reflect the number of EPC
// sections exposed to the guest.
fn update_cpuid_sgx(cpuid: &mut CpuId, epc_sections: Vec<SgxEpcSection>) -> Result<(), Error> {
fn update_cpuid_sgx(
cpuid: &mut Vec<CpuIdEntry>,
epc_sections: Vec<SgxEpcSection>,
) -> Result<(), Error> {
// Something's wrong if there's no EPC section.
if epc_sections.is_empty() {
return Err(Error::NoSgxEpcSection);

View File

@ -19,7 +19,7 @@ use crate::kvm::{TdxExitDetails, TdxExitStatus};
#[cfg(target_arch = "x86_64")]
use crate::x86_64::Xsave;
#[cfg(target_arch = "x86_64")]
use crate::x86_64::{CpuId, LapicState};
use crate::x86_64::{CpuIdEntry, LapicState};
#[cfg(target_arch = "x86_64")]
use crate::x86_64::{ExtendedControlRegisters, FpuState, MsrEntries, VcpuEvents};
use crate::CpuState;
@ -312,7 +312,7 @@ pub trait Vcpu: Send + Sync {
///
/// X86 specific call to setup the CPUID registers.
///
fn set_cpuid2(&self, cpuid: &CpuId) -> Result<()>;
fn set_cpuid2(&self, cpuid: &[CpuIdEntry]) -> Result<()>;
#[cfg(target_arch = "x86_64")]
///
/// X86 specific call to enable HyperV SynIC
@ -322,7 +322,7 @@ pub trait Vcpu: Send + Sync {
///
/// X86 specific call to retrieve the CPUID registers.
///
fn get_cpuid2(&self, num_entries: usize) -> Result<CpuId>;
fn get_cpuid2(&self, num_entries: usize) -> Result<Vec<CpuIdEntry>>;
#[cfg(target_arch = "x86_64")]
///
/// Returns the state of the LAPIC (Local Advanced Programmable Interrupt Controller).

View File

@ -11,7 +11,7 @@
use crate::kvm::TdxCapabilities;
use crate::vm::Vm;
#[cfg(target_arch = "x86_64")]
use crate::x86_64::CpuId;
use crate::x86_64::CpuIdEntry;
#[cfg(target_arch = "x86_64")]
use crate::x86_64::MsrList;
use std::sync::Arc;
@ -100,7 +100,7 @@ pub trait Hypervisor: Send + Sync {
///
/// Get the supported CpuID
///
fn get_cpuid(&self) -> Result<CpuId>;
fn get_cpuid(&self) -> Result<Vec<CpuIdEntry>>;
///
/// Check particular extensions if any
///

View File

@ -755,7 +755,15 @@ impl vm::Vm for KvmVm {
/// Initialize TDX for this VM
///
#[cfg(feature = "tdx")]
fn tdx_init(&self, cpuid: &CpuId, max_vcpus: u32) -> vm::Result<()> {
fn tdx_init(&self, cpuid: &[CpuIdEntry], max_vcpus: u32) -> vm::Result<()> {
use std::io::{Error, ErrorKind};
let kvm_cpuid = kvm_bindings::CpuId::from_entries(cpuid).map_err(|_| {
vm::HypervisorVmError::InitializeTdx(Error::new(
ErrorKind::Other,
"failed to allocate CpuId",
))
})?;
#[repr(C)]
struct TdxInitVm {
max_vcpus: u32,
@ -771,7 +779,7 @@ impl vm::Vm for KvmVm {
max_vcpus,
tsc_khz: 0,
attributes: 0,
cpuid: cpuid.as_fam_struct_ptr() as u64,
cpuid: kvm_cpuid.as_fam_struct_ptr() as u64,
mrconfigid: [0; 6],
mrowner: [0; 6],
mrownerconfig: [0; 6],
@ -983,10 +991,15 @@ impl hypervisor::Hypervisor for KvmHypervisor {
///
/// X86 specific call to get the system supported CPUID values.
///
fn get_cpuid(&self) -> hypervisor::Result<CpuId> {
self.kvm
fn get_cpuid(&self) -> hypervisor::Result<Vec<CpuIdEntry>> {
let kvm_cpuid = self
.kvm
.get_supported_cpuid(kvm_bindings::KVM_MAX_CPUID_ENTRIES)
.map_err(|e| hypervisor::HypervisorError::GetCpuId(e.into()))
.map_err(|e| hypervisor::HypervisorError::GetCpuId(e.into()))?;
let v = kvm_cpuid.as_slice().to_vec();
Ok(v)
}
#[cfg(target_arch = "x86_64")]
@ -1311,9 +1324,12 @@ impl cpu::Vcpu for KvmVcpu {
///
/// X86 specific call to setup the CPUID registers.
///
fn set_cpuid2(&self, cpuid: &CpuId) -> cpu::Result<()> {
fn set_cpuid2(&self, cpuid: &[CpuIdEntry]) -> cpu::Result<()> {
let kvm_cpuid = CpuId::from_entries(cpuid)
.map_err(|_| cpu::HypervisorCpuError::SetCpuid(anyhow!("failed to create CpuId")))?;
self.fd
.set_cpuid2(cpuid)
.set_cpuid2(&kvm_cpuid)
.map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into()))
}
#[cfg(target_arch = "x86_64")]
@ -1337,10 +1353,15 @@ impl cpu::Vcpu for KvmVcpu {
/// X86 specific call to retrieve the CPUID registers.
///
#[cfg(target_arch = "x86_64")]
fn get_cpuid2(&self, num_entries: usize) -> cpu::Result<CpuId> {
self.fd
fn get_cpuid2(&self, num_entries: usize) -> cpu::Result<Vec<CpuIdEntry>> {
let kvm_cpuid = self
.fd
.get_cpuid2(num_entries)
.map_err(|e| cpu::HypervisorCpuError::GetCpuid(e.into()))
.map_err(|e| cpu::HypervisorCpuError::GetCpuid(e.into()))?;
let v = kvm_cpuid.as_slice().to_vec();
Ok(v)
}
#[cfg(target_arch = "x86_64")]
///

View File

@ -52,7 +52,7 @@ pub fn check_required_kvm_extensions(kvm: &Kvm) -> KvmResult<()> {
}
#[derive(Clone, Serialize, Deserialize)]
pub struct VcpuKvmState {
pub cpuid: CpuId,
pub cpuid: Vec<CpuIdEntry>,
pub msrs: MsrEntries,
pub vcpu_events: VcpuEvents,
pub regs: kvm_regs,

View File

@ -235,8 +235,8 @@ impl hypervisor::Hypervisor for MshvHypervisor {
///
/// Get the supported CpuID
///
fn get_cpuid(&self) -> hypervisor::Result<CpuId> {
Ok(CpuId::new(1).unwrap())
fn get_cpuid(&self) -> hypervisor::Result<Vec<CpuIdEntry>> {
Ok(Vec::new())
}
#[cfg(target_arch = "x86_64")]
///
@ -254,7 +254,7 @@ impl hypervisor::Hypervisor for MshvHypervisor {
pub struct MshvVcpu {
fd: VcpuFd,
vp_index: u8,
cpuid: CpuId,
cpuid: Vec<CpuIdEntry>,
msrs: MsrEntries,
vm_ops: Option<Arc<dyn vm::VmOps>>,
}
@ -563,14 +563,14 @@ impl cpu::Vcpu for MshvVcpu {
///
/// X86 specific call to setup the CPUID registers.
///
fn set_cpuid2(&self, _cpuid: &CpuId) -> cpu::Result<()> {
fn set_cpuid2(&self, _cpuid: &[CpuIdEntry]) -> cpu::Result<()> {
Ok(())
}
#[cfg(target_arch = "x86_64")]
///
/// X86 specific call to retrieve the CPUID registers.
///
fn get_cpuid2(&self, _num_entries: usize) -> cpu::Result<CpuId> {
fn get_cpuid2(&self, _num_entries: usize) -> cpu::Result<Vec<CpuIdEntry>> {
Ok(self.cpuid.clone())
}
#[cfg(target_arch = "x86_64")]
@ -952,7 +952,7 @@ impl vm::Vm for MshvVm {
let vcpu = MshvVcpu {
fd: vcpu_fd,
vp_index: id,
cpuid: CpuId::new(1).unwrap(),
cpuid: Vec::new(),
msrs: self.msrs.clone(),
vm_ops,
};

View File

@ -15,7 +15,7 @@ use crate::arch::aarch64::gic::Vgic;
use crate::cpu::Vcpu;
use crate::device::Device;
#[cfg(feature = "tdx")]
use crate::x86_64::CpuId;
use crate::x86_64::CpuIdEntry;
#[cfg(target_arch = "x86_64")]
use crate::ClockData;
use crate::CreateDevice;
@ -342,7 +342,7 @@ pub trait Vm: Send + Sync {
fn get_dirty_log(&self, slot: u32, base_gpa: u64, memory_size: u64) -> Result<Vec<u64>>;
#[cfg(feature = "tdx")]
/// Initalize TDX on this VM
fn tdx_init(&self, cpuid: &CpuId, max_vcpus: u32) -> Result<()>;
fn tdx_init(&self, cpuid: &[CpuIdEntry], max_vcpus: u32) -> Result<()>;
#[cfg(feature = "tdx")]
/// Finalize the configuration of TDX on this VM
fn tdx_finalize(&self) -> Result<()>;

View File

@ -43,7 +43,7 @@ use hypervisor::kvm::kvm_bindings;
#[cfg(feature = "tdx")]
use hypervisor::kvm::{TdxExitDetails, TdxExitStatus};
#[cfg(target_arch = "x86_64")]
use hypervisor::x86_64::CpuId;
use hypervisor::x86_64::CpuIdEntry;
#[cfg(feature = "guest_debug")]
use hypervisor::x86_64::{MsrEntries, MsrEntry};
use hypervisor::{CpuState, HypervisorCpuError, VmExit, VmOps};
@ -311,7 +311,7 @@ impl Vcpu {
#[cfg(target_arch = "aarch64")] vm: &Arc<dyn hypervisor::Vm>,
kernel_entry_point: Option<EntryPoint>,
#[cfg(target_arch = "x86_64")] vm_memory: &GuestMemoryAtomic<GuestMemoryMmap>,
#[cfg(target_arch = "x86_64")] cpuid: CpuId,
#[cfg(target_arch = "x86_64")] cpuid: Vec<CpuIdEntry>,
#[cfg(target_arch = "x86_64")] kvm_hyperv: bool,
) -> Result<()> {
#[cfg(target_arch = "aarch64")]
@ -418,7 +418,7 @@ pub struct CpuManager {
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
vm_memory: GuestMemoryAtomic<GuestMemoryMmap>,
#[cfg(target_arch = "x86_64")]
cpuid: CpuId,
cpuid: Vec<CpuIdEntry>,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
vm: Arc<dyn hypervisor::Vm>,
vcpus_kill_signalled: Arc<AtomicBool>,
@ -1217,7 +1217,7 @@ impl CpuManager {
}
#[cfg(target_arch = "x86_64")]
pub fn common_cpuid(&self) -> CpuId {
pub fn common_cpuid(&self) -> Vec<CpuIdEntry> {
self.cpuid.clone()
}

View File

@ -355,7 +355,7 @@ pub fn start_vmm_thread(
struct VmMigrationConfig {
vm_config: Arc<Mutex<VmConfig>>,
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
common_cpuid: hypervisor::x86_64::CpuId,
common_cpuid: Vec<hypervisor::x86_64::CpuIdEntry>,
memory_manager_data: MemoryManagerSnapshotData,
}
@ -1593,7 +1593,7 @@ impl Vmm {
fn vm_check_cpuid_compatibility(
&self,
src_vm_config: &Arc<Mutex<VmConfig>>,
src_vm_cpuid: &hypervisor::x86_64::CpuId,
src_vm_cpuid: &[hypervisor::x86_64::CpuIdEntry],
) -> result::Result<(), MigratableError> {
// We check the `CPUID` compatibility of between the source vm and destination, which is
// mostly about feature compatibility and "topology/sgx" leaves are not relevant.

View File

@ -2618,7 +2618,7 @@ pub struct VmSnapshot {
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
pub clock: Option<hypervisor::ClockData>,
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
pub common_cpuid: hypervisor::x86_64::CpuId,
pub common_cpuid: Vec<hypervisor::x86_64::CpuIdEntry>,
}
pub const VM_SNAPSHOT_ID: &str = "vm";