hypervisor: move away from MsrEntries type

It is a flexible array. Switch to vector and slice instead.

No functional change intended.

Signed-off-by: Wei Liu <liuwe@microsoft.com>
This commit is contained in:
Wei Liu 2022-07-19 09:51:18 +00:00 committed by Liu Wei
parent 563919fc4a
commit 4d2cc3778f
6 changed files with 84 additions and 74 deletions

View File

@ -17,7 +17,7 @@ use crate::arch::x86::{CpuIdEntry, FpuState, LapicState, SpecialRegisters, Stand
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
use crate::kvm::{TdxExitDetails, TdxExitStatus}; use crate::kvm::{TdxExitDetails, TdxExitStatus};
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
use crate::x86_64::MsrEntries; use crate::x86_64::MsrEntry;
use crate::CpuState; use crate::CpuState;
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
use crate::DeviceAttr; use crate::DeviceAttr;
@ -333,12 +333,12 @@ pub trait Vcpu: Send + Sync {
/// ///
/// Returns the model-specific registers (MSR) for this vCPU. /// Returns the model-specific registers (MSR) for this vCPU.
/// ///
fn get_msrs(&self, msrs: &mut MsrEntries) -> Result<usize>; fn get_msrs(&self, msrs: &mut Vec<MsrEntry>) -> Result<usize>;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
/// ///
/// Setup the model-specific registers (MSR) for this vCPU. /// Setup the model-specific registers (MSR) for this vCPU.
/// ///
fn set_msrs(&self, msrs: &MsrEntries) -> Result<usize>; fn set_msrs(&self, msrs: &[MsrEntry]) -> Result<usize>;
/// ///
/// Returns the vcpu's current "multiprocessing state". /// Returns the vcpu's current "multiprocessing state".
/// ///
@ -442,5 +442,5 @@ pub trait Vcpu: Send + Sync {
/// ///
/// Return the list of initial MSR entries for a VCPU /// Return the list of initial MSR entries for a VCPU
/// ///
fn boot_msr_entries(&self) -> MsrEntries; fn boot_msr_entries(&self) -> Vec<MsrEntry>;
} }

View File

@ -65,7 +65,7 @@ use kvm_bindings::{
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
use x86_64::check_required_kvm_extensions; use x86_64::check_required_kvm_extensions;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
pub use x86_64::{CpuId, ExtendedControlRegisters, MsrEntries, VcpuKvmState, Xsave}; pub use x86_64::{CpuId, ExtendedControlRegisters, MsrEntries, MsrEntry, VcpuKvmState, Xsave};
// aarch64 dependencies // aarch64 dependencies
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
pub mod aarch64; pub mod aarch64;
@ -309,7 +309,7 @@ struct KvmDirtyLogSlot {
pub struct KvmVm { pub struct KvmVm {
fd: Arc<VmFd>, fd: Arc<VmFd>,
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
msrs: MsrEntries, msrs: Vec<MsrEntry>,
dirty_log_slots: Arc<RwLock<HashMap<u32, KvmDirtyLogSlot>>>, dirty_log_slots: Arc<RwLock<HashMap<u32, KvmDirtyLogSlot>>>,
} }
@ -950,11 +950,15 @@ impl hypervisor::Hypervisor for KvmHypervisor {
{ {
let msr_list = self.get_msr_list()?; let msr_list = self.get_msr_list()?;
let num_msrs = msr_list.as_fam_struct_ref().nmsrs as usize; let num_msrs = msr_list.as_fam_struct_ref().nmsrs as usize;
let mut msrs = MsrEntries::new(num_msrs).unwrap(); let mut msrs: Vec<MsrEntry> = vec![
MsrEntry {
..Default::default()
};
num_msrs
];
let indices = msr_list.as_slice(); let indices = msr_list.as_slice();
let msr_entries = msrs.as_mut_slice();
for (pos, index) in indices.iter().enumerate() { for (pos, index) in indices.iter().enumerate() {
msr_entries[pos].index = *index; msrs[pos].index = *index;
} }
Ok(Arc::new(KvmVm { Ok(Arc::new(KvmVm {
@ -1049,7 +1053,7 @@ impl hypervisor::Hypervisor for KvmHypervisor {
pub struct KvmVcpu { pub struct KvmVcpu {
fd: VcpuFd, fd: VcpuFd,
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
msrs: MsrEntries, msrs: Vec<MsrEntry>,
vm_ops: Option<Arc<dyn vm::VmOps>>, vm_ops: Option<Arc<dyn vm::VmOps>>,
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
hyperv_synic: AtomicBool, hyperv_synic: AtomicBool,
@ -1398,19 +1402,26 @@ impl cpu::Vcpu for KvmVcpu {
/// ///
/// Returns the model-specific registers (MSR) for this vCPU. /// Returns the model-specific registers (MSR) for this vCPU.
/// ///
fn get_msrs(&self, msrs: &mut MsrEntries) -> cpu::Result<usize> { fn get_msrs(&self, msrs: &mut Vec<MsrEntry>) -> cpu::Result<usize> {
self.fd let mut kvm_msrs = MsrEntries::from_entries(msrs).unwrap();
.get_msrs(msrs) let succ = self
.map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into())) .fd
.get_msrs(&mut kvm_msrs)
.map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?;
msrs[..succ].copy_from_slice(&kvm_msrs.as_slice()[..succ]);
Ok(succ)
} }
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
/// ///
/// Setup the model-specific registers (MSR) for this vCPU. /// Setup the model-specific registers (MSR) for this vCPU.
/// Returns the number of MSR entries actually written. /// Returns the number of MSR entries actually written.
/// ///
fn set_msrs(&self, msrs: &MsrEntries) -> cpu::Result<usize> { fn set_msrs(&self, msrs: &[MsrEntry]) -> cpu::Result<usize> {
let kvm_msrs = MsrEntries::from_entries(msrs).unwrap();
self.fd self.fd
.set_msrs(msrs) .set_msrs(&kvm_msrs)
.map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into())) .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into()))
} }
/// ///
@ -1801,41 +1812,31 @@ impl cpu::Vcpu for KvmVcpu {
index, index,
..Default::default() ..Default::default()
}; };
msr_entries.push(msr).unwrap(); msr_entries.push(msr);
} }
} }
let expected_num_msrs = msr_entries.as_fam_struct_ref().nmsrs as usize; let expected_num_msrs = msr_entries.len();
let num_msrs = self.get_msrs(&mut msr_entries)?; let num_msrs = self.get_msrs(&mut msr_entries)?;
let msrs = if num_msrs != expected_num_msrs { let msrs = if num_msrs != expected_num_msrs {
let mut faulty_msr_index = num_msrs; let mut faulty_msr_index = num_msrs;
let mut msr_entries_tmp = let mut msr_entries_tmp = msr_entries[..faulty_msr_index].to_vec();
MsrEntries::from_entries(&msr_entries.as_slice()[..faulty_msr_index]).unwrap();
loop { loop {
warn!( warn!(
"Detected faulty MSR 0x{:x} while getting MSRs", "Detected faulty MSR 0x{:x} while getting MSRs",
msr_entries.as_slice()[faulty_msr_index].index msr_entries[faulty_msr_index].index
); );
// Skip the first bad MSR
let start_pos = faulty_msr_index + 1; let start_pos = faulty_msr_index + 1;
let mut sub_msr_entries =
MsrEntries::from_entries(&msr_entries.as_slice()[start_pos..]).unwrap(); let mut sub_msr_entries = msr_entries[start_pos..].to_vec();
let expected_num_msrs = sub_msr_entries.as_fam_struct_ref().nmsrs as usize;
let num_msrs = self.get_msrs(&mut sub_msr_entries)?; let num_msrs = self.get_msrs(&mut sub_msr_entries)?;
for i in 0..num_msrs { msr_entries_tmp.extend(&sub_msr_entries[..num_msrs]);
msr_entries_tmp
.push(sub_msr_entries.as_slice()[i])
.map_err(|e| {
cpu::HypervisorCpuError::GetMsrEntries(anyhow!(
"Failed adding MSR entries: {:?}",
e
))
})?;
}
if num_msrs == expected_num_msrs { if num_msrs == sub_msr_entries.len() {
break; break;
} }
@ -1934,7 +1935,7 @@ impl cpu::Vcpu for KvmVcpu {
// expected amount, we fallback onto a slower method by setting MSRs // expected amount, we fallback onto a slower method by setting MSRs
// by chunks. This is the only way to make sure we try to set as many // by chunks. This is the only way to make sure we try to set as many
// MSRs as possible, even if some MSRs are not supported. // MSRs as possible, even if some MSRs are not supported.
let expected_num_msrs = state.msrs.as_fam_struct_ref().nmsrs as usize; let expected_num_msrs = state.msrs.len();
let num_msrs = self.set_msrs(&state.msrs)?; let num_msrs = self.set_msrs(&state.msrs)?;
if num_msrs != expected_num_msrs { if num_msrs != expected_num_msrs {
let mut faulty_msr_index = num_msrs; let mut faulty_msr_index = num_msrs;
@ -1942,16 +1943,17 @@ impl cpu::Vcpu for KvmVcpu {
loop { loop {
warn!( warn!(
"Detected faulty MSR 0x{:x} while setting MSRs", "Detected faulty MSR 0x{:x} while setting MSRs",
state.msrs.as_slice()[faulty_msr_index].index state.msrs[faulty_msr_index].index
); );
// Skip the first bad MSR
let start_pos = faulty_msr_index + 1; let start_pos = faulty_msr_index + 1;
let sub_msr_entries =
MsrEntries::from_entries(&state.msrs.as_slice()[start_pos..]).unwrap(); let sub_msr_entries = state.msrs[start_pos..].to_vec();
let expected_num_msrs = sub_msr_entries.as_fam_struct_ref().nmsrs as usize;
let num_msrs = self.set_msrs(&sub_msr_entries)?; let num_msrs = self.set_msrs(&sub_msr_entries)?;
if num_msrs == expected_num_msrs { if num_msrs == sub_msr_entries.len() {
break; break;
} }
@ -2032,11 +2034,10 @@ impl cpu::Vcpu for KvmVcpu {
/// ///
/// Return the list of initial MSR entries for a VCPU /// Return the list of initial MSR entries for a VCPU
/// ///
fn boot_msr_entries(&self) -> MsrEntries { fn boot_msr_entries(&self) -> Vec<MsrEntry> {
use crate::arch::x86::{msr_index, MTRR_ENABLE, MTRR_MEM_TYPE_WB}; use crate::arch::x86::{msr_index, MTRR_ENABLE, MTRR_MEM_TYPE_WB};
use kvm_bindings::kvm_msr_entry as MsrEntry;
MsrEntries::from_entries(&[ [
msr!(msr_index::MSR_IA32_SYSENTER_CS), msr!(msr_index::MSR_IA32_SYSENTER_CS),
msr!(msr_index::MSR_IA32_SYSENTER_ESP), msr!(msr_index::MSR_IA32_SYSENTER_ESP),
msr!(msr_index::MSR_IA32_SYSENTER_EIP), msr!(msr_index::MSR_IA32_SYSENTER_EIP),
@ -2051,8 +2052,8 @@ impl cpu::Vcpu for KvmVcpu {
msr_index::MSR_IA32_MISC_ENABLE_FAST_STRING as u64 msr_index::MSR_IA32_MISC_ENABLE_FAST_STRING as u64
), ),
msr_data!(msr_index::MSR_MTRRdefType, MTRR_ENABLE | MTRR_MEM_TYPE_WB), msr_data!(msr_index::MSR_MTRRdefType, MTRR_ENABLE | MTRR_MEM_TYPE_WB),
]) ]
.unwrap() .to_vec()
} }
} }

View File

@ -55,7 +55,7 @@ pub fn check_required_kvm_extensions(kvm: &Kvm) -> KvmResult<()> {
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct VcpuKvmState { pub struct VcpuKvmState {
pub cpuid: Vec<CpuIdEntry>, pub cpuid: Vec<CpuIdEntry>,
pub msrs: MsrEntries, pub msrs: Vec<MsrEntry>,
pub vcpu_events: VcpuEvents, pub vcpu_events: VcpuEvents,
pub regs: kvm_regs, pub regs: kvm_regs,
pub sregs: kvm_sregs, pub sregs: kvm_sregs,

View File

@ -229,11 +229,15 @@ impl hypervisor::Hypervisor for MshvHypervisor {
let msr_list = self.get_msr_list()?; let msr_list = self.get_msr_list()?;
let num_msrs = msr_list.as_fam_struct_ref().nmsrs as usize; let num_msrs = msr_list.as_fam_struct_ref().nmsrs as usize;
let mut msrs = MsrEntries::new(num_msrs).unwrap(); let mut msrs: Vec<MsrEntry> = vec![
MsrEntry {
..Default::default()
};
num_msrs
];
let indices = msr_list.as_slice(); let indices = msr_list.as_slice();
let msr_entries = msrs.as_mut_slice();
for (pos, index) in indices.iter().enumerate() { for (pos, index) in indices.iter().enumerate() {
msr_entries[pos].index = *index; msrs[pos].index = *index;
} }
let vm_fd = Arc::new(fd); let vm_fd = Arc::new(fd);
@ -258,7 +262,7 @@ pub struct MshvVcpu {
fd: VcpuFd, fd: VcpuFd,
vp_index: u8, vp_index: u8,
cpuid: Vec<CpuIdEntry>, cpuid: Vec<CpuIdEntry>,
msrs: MsrEntries, msrs: Vec<MsrEntry>,
vm_ops: Option<Arc<dyn vm::VmOps>>, vm_ops: Option<Arc<dyn vm::VmOps>>,
} }
@ -341,19 +345,26 @@ impl cpu::Vcpu for MshvVcpu {
/// ///
/// Returns the model-specific registers (MSR) for this vCPU. /// Returns the model-specific registers (MSR) for this vCPU.
/// ///
fn get_msrs(&self, msrs: &mut MsrEntries) -> cpu::Result<usize> { fn get_msrs(&self, msrs: &mut Vec<MsrEntry>) -> cpu::Result<usize> {
self.fd let mut mshv_msrs = MsrEntries::from_entries(msrs).unwrap();
.get_msrs(msrs) let succ = self
.map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into())) .fd
.get_msrs(&mut mshv_msrs)
.map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?;
msrs[..succ].copy_from_slice(&mshv_msrs.as_slice()[..succ]);
Ok(succ)
} }
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
/// ///
/// Setup the model-specific registers (MSR) for this vCPU. /// Setup the model-specific registers (MSR) for this vCPU.
/// Returns the number of MSR entries actually written. /// Returns the number of MSR entries actually written.
/// ///
fn set_msrs(&self, msrs: &MsrEntries) -> cpu::Result<usize> { fn set_msrs(&self, msrs: &[MsrEntry]) -> cpu::Result<usize> {
let mshv_msrs = MsrEntries::from_entries(msrs).unwrap();
self.fd self.fd
.set_msrs(msrs) .set_msrs(&mshv_msrs)
.map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into())) .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into()))
} }
@ -655,10 +666,10 @@ impl cpu::Vcpu for MshvVcpu {
/// ///
/// Return the list of initial MSR entries for a VCPU /// Return the list of initial MSR entries for a VCPU
/// ///
fn boot_msr_entries(&self) -> MsrEntries { fn boot_msr_entries(&self) -> Vec<MsrEntry> {
use crate::arch::x86::{msr_index, MTRR_ENABLE, MTRR_MEM_TYPE_WB}; use crate::arch::x86::{msr_index, MTRR_ENABLE, MTRR_MEM_TYPE_WB};
MsrEntries::from_entries(&[ [
msr!(msr_index::MSR_IA32_SYSENTER_CS), msr!(msr_index::MSR_IA32_SYSENTER_CS),
msr!(msr_index::MSR_IA32_SYSENTER_ESP), msr!(msr_index::MSR_IA32_SYSENTER_ESP),
msr!(msr_index::MSR_IA32_SYSENTER_EIP), msr!(msr_index::MSR_IA32_SYSENTER_EIP),
@ -669,8 +680,8 @@ impl cpu::Vcpu for MshvVcpu {
msr!(msr_index::MSR_SYSCALL_MASK), msr!(msr_index::MSR_SYSCALL_MASK),
msr!(msr_index::MSR_IA32_TSC), msr!(msr_index::MSR_IA32_TSC),
msr_data!(msr_index::MSR_MTRRdefType, MTRR_ENABLE | MTRR_MEM_TYPE_WB), msr_data!(msr_index::MSR_MTRRdefType, MTRR_ENABLE | MTRR_MEM_TYPE_WB),
]) ]
.unwrap() .to_vec()
} }
} }
@ -888,7 +899,7 @@ impl<'a> PlatformEmulator for MshvEmulatorContext<'a> {
/// Wrapper over Mshv VM ioctls. /// Wrapper over Mshv VM ioctls.
pub struct MshvVm { pub struct MshvVm {
fd: Arc<VmFd>, fd: Arc<VmFd>,
msrs: MsrEntries, msrs: Vec<MsrEntry>,
vm_ops: Option<Arc<dyn vm::VmOps>>, vm_ops: Option<Arc<dyn vm::VmOps>>,
dirty_log_slots: Arc<RwLock<HashMap<u64, MshvDirtyLogSlot>>>, dirty_log_slots: Arc<RwLock<HashMap<u64, MshvDirtyLogSlot>>>,
} }

View File

@ -32,7 +32,7 @@ pub use {
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct VcpuMshvState { pub struct VcpuMshvState {
pub msrs: MsrEntries, pub msrs: Vec<MsrEntry>,
pub vcpu_events: VcpuEvents, pub vcpu_events: VcpuEvents,
pub regs: MshvStandardRegisters, pub regs: MshvStandardRegisters,
pub sregs: MshvSpecialRegisters, pub sregs: MshvSpecialRegisters,
@ -46,10 +46,10 @@ pub struct VcpuMshvState {
impl fmt::Display for VcpuMshvState { impl fmt::Display for VcpuMshvState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let expected_num_msrs = self.msrs.as_fam_struct_ref().nmsrs as usize; let expected_num_msrs = self.msrs.len();
let mut msr_entries = vec![vec![0; 2]; expected_num_msrs]; let mut msr_entries = vec![vec![0; 2]; expected_num_msrs];
for (i, entry) in self.msrs.as_slice().iter().enumerate() { for (i, entry) in self.msrs.iter().enumerate() {
msr_entries[i][1] = entry.data; msr_entries[i][1] = entry.data;
msr_entries[i][0] = entry.index as u64; msr_entries[i][0] = entry.index as u64;
} }

View File

@ -45,7 +45,7 @@ use hypervisor::kvm::kvm_bindings;
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
use hypervisor::kvm::{TdxExitDetails, TdxExitStatus}; use hypervisor::kvm::{TdxExitDetails, TdxExitStatus};
#[cfg(feature = "guest_debug")] #[cfg(feature = "guest_debug")]
use hypervisor::x86_64::{MsrEntries, MsrEntry}; use hypervisor::x86_64::MsrEntry;
use hypervisor::{CpuState, HypervisorCpuError, VmExit, VmOps}; use hypervisor::{CpuState, HypervisorCpuError, VmExit, VmOps};
use libc::{c_void, siginfo_t}; use libc::{c_void, siginfo_t};
#[cfg(feature = "guest_debug")] #[cfg(feature = "guest_debug")]
@ -2271,11 +2271,10 @@ impl CpuElf64Writable for CpuManager {
.get_sregs() .get_sregs()
.map_err(|_e| GuestDebuggableError::Coredump(anyhow!("get sregs failed")))?; .map_err(|_e| GuestDebuggableError::Coredump(anyhow!("get sregs failed")))?;
let mut msrs = MsrEntries::from_entries(&[MsrEntry { let mut msrs = vec![MsrEntry {
index: msr_index::MSR_KERNEL_GS_BASE, index: msr_index::MSR_KERNEL_GS_BASE,
..Default::default() ..Default::default()
}]) }];
.map_err(|_e| GuestDebuggableError::Coredump(anyhow!("get msr failed")))?;
self.vcpus[vcpu_id as usize] self.vcpus[vcpu_id as usize]
.lock() .lock()
@ -2283,7 +2282,7 @@ impl CpuElf64Writable for CpuManager {
.vcpu .vcpu
.get_msrs(&mut msrs) .get_msrs(&mut msrs)
.map_err(|_e| GuestDebuggableError::Coredump(anyhow!("get msr failed")))?; .map_err(|_e| GuestDebuggableError::Coredump(anyhow!("get msr failed")))?;
let kernel_gs_base = msrs.as_slice()[0].data; let kernel_gs_base = msrs[0].data;
let cs = CpuSegment::new(sregs.cs); let cs = CpuSegment::new(sregs.cs);
let ds = CpuSegment::new(sregs.ds); let ds = CpuSegment::new(sregs.ds);
@ -2389,7 +2388,7 @@ mod tests {
#[test] #[test]
fn test_setup_msrs() { fn test_setup_msrs() {
use hypervisor::arch::x86::msr_index; use hypervisor::arch::x86::msr_index;
use hypervisor::x86_64::{MsrEntries, MsrEntry}; use hypervisor::x86_64::MsrEntry;
let hv = hypervisor::new().unwrap(); let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed"); let vm = hv.create_vm().expect("new VM fd creation failed");
@ -2398,11 +2397,10 @@ mod tests {
// This test will check against the last MSR entry configured (the tenth one). // This test will check against the last MSR entry configured (the tenth one).
// See create_msr_entries for details. // See create_msr_entries for details.
let mut msrs = MsrEntries::from_entries(&[MsrEntry { let mut msrs = vec![MsrEntry {
index: msr_index::MSR_IA32_MISC_ENABLE, index: msr_index::MSR_IA32_MISC_ENABLE,
..Default::default() ..Default::default()
}]) }];
.unwrap();
// get_msrs returns the number of msrs that it succeed in reading. We only want to read 1 // get_msrs returns the number of msrs that it succeed in reading. We only want to read 1
// in this test case scenario. // in this test case scenario.