hypervisor: kvm: Introduce Mutex around VcpuFd

This is required as the VcpuFd::run and VcpuFd::set_immediate_exit
methods now take a &mut self. I explored alternative solutions:

1. Using RefCell for runtime interior mutability - the Vcpu trait is
   Sync and it's not possible to use RefCell with types that are Sync.
2. Using UnsafeCell - the mutable reference nature of ::run and and
   ::set_kvm_immediate_exit was added for a reason so it unwise to
   bypass this.
3. Adjusting the trait interface to expose the &mut self - this requires
   an Arc<Mutex<>> around the hypervisor::vcpu::Vcpu object and was very
   intrusive.

Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
This commit is contained in:
Rob Bradford 2024-04-25 16:18:53 +01:00
parent c022063ae8
commit 7be69edf51

View File

@ -36,7 +36,6 @@ use std::os::unix::io::RawFd;
use std::result; use std::result;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
#[cfg(target_arch = "aarch64")]
use std::sync::Mutex; use std::sync::Mutex;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -455,12 +454,12 @@ impl vm::Vm for KvmVm {
id: u8, id: u8,
vm_ops: Option<Arc<dyn VmOps>>, vm_ops: Option<Arc<dyn VmOps>>,
) -> vm::Result<Arc<dyn cpu::Vcpu>> { ) -> vm::Result<Arc<dyn cpu::Vcpu>> {
let vc = self let fd = self
.fd .fd
.create_vcpu(id as u64) .create_vcpu(id as u64)
.map_err(|e| vm::HypervisorVmError::CreateVcpu(e.into()))?; .map_err(|e| vm::HypervisorVmError::CreateVcpu(e.into()))?;
let vcpu = KvmVcpu { let vcpu = KvmVcpu {
fd: vc, fd: Arc::new(Mutex::new(fd)),
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
msrs: self.msrs.clone(), msrs: self.msrs.clone(),
vm_ops, vm_ops,
@ -1170,7 +1169,7 @@ impl hypervisor::Hypervisor for KvmHypervisor {
/// Vcpu struct for KVM /// Vcpu struct for KVM
pub struct KvmVcpu { pub struct KvmVcpu {
fd: VcpuFd, fd: Arc<Mutex<VcpuFd>>,
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
msrs: Vec<MsrEntry>, msrs: Vec<MsrEntry>,
vm_ops: Option<Arc<dyn vm::VmOps>>, vm_ops: Option<Arc<dyn vm::VmOps>>,
@ -1198,6 +1197,8 @@ impl cpu::Vcpu for KvmVcpu {
fn get_regs(&self) -> cpu::Result<StandardRegisters> { fn get_regs(&self) -> cpu::Result<StandardRegisters> {
Ok(self Ok(self
.fd .fd
.lock()
.unwrap()
.get_regs() .get_regs()
.map_err(|e| cpu::HypervisorCpuError::GetStandardRegs(e.into()))? .map_err(|e| cpu::HypervisorCpuError::GetStandardRegs(e.into()))?
.into()) .into())
@ -1219,6 +1220,8 @@ impl cpu::Vcpu for KvmVcpu {
for i in 0..31 { for i in 0..31 {
let mut bytes = [0_u8; 8]; let mut bytes = [0_u8; 8];
self.fd self.fd
.lock()
.unwrap()
.get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
.map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?;
state.regs.regs[i] = u64::from_le_bytes(bytes); state.regs.regs[i] = u64::from_le_bytes(bytes);
@ -1230,6 +1233,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(user_pt_regs, sp); let off = offset_of!(user_pt_regs, sp);
let mut bytes = [0_u8; 8]; let mut bytes = [0_u8; 8];
self.fd self.fd
.lock()
.unwrap()
.get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
.map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?;
state.regs.sp = u64::from_le_bytes(bytes); state.regs.sp = u64::from_le_bytes(bytes);
@ -1238,6 +1243,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(user_pt_regs, pc); let off = offset_of!(user_pt_regs, pc);
let mut bytes = [0_u8; 8]; let mut bytes = [0_u8; 8];
self.fd self.fd
.lock()
.unwrap()
.get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
.map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?;
state.regs.pc = u64::from_le_bytes(bytes); state.regs.pc = u64::from_le_bytes(bytes);
@ -1246,6 +1253,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(user_pt_regs, pstate); let off = offset_of!(user_pt_regs, pstate);
let mut bytes = [0_u8; 8]; let mut bytes = [0_u8; 8];
self.fd self.fd
.lock()
.unwrap()
.get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
.map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?;
state.regs.pstate = u64::from_le_bytes(bytes); state.regs.pstate = u64::from_le_bytes(bytes);
@ -1254,6 +1263,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(kvm_regs, sp_el1); let off = offset_of!(kvm_regs, sp_el1);
let mut bytes = [0_u8; 8]; let mut bytes = [0_u8; 8];
self.fd self.fd
.lock()
.unwrap()
.get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
.map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?;
state.sp_el1 = u64::from_le_bytes(bytes); state.sp_el1 = u64::from_le_bytes(bytes);
@ -1263,6 +1274,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(kvm_regs, elr_el1); let off = offset_of!(kvm_regs, elr_el1);
let mut bytes = [0_u8; 8]; let mut bytes = [0_u8; 8];
self.fd self.fd
.lock()
.unwrap()
.get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
.map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?;
state.elr_el1 = u64::from_le_bytes(bytes); state.elr_el1 = u64::from_le_bytes(bytes);
@ -1272,6 +1285,8 @@ impl cpu::Vcpu for KvmVcpu {
for i in 0..KVM_NR_SPSR as usize { for i in 0..KVM_NR_SPSR as usize {
let mut bytes = [0_u8; 8]; let mut bytes = [0_u8; 8];
self.fd self.fd
.lock()
.unwrap()
.get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
.map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?;
state.spsr[i] = u64::from_le_bytes(bytes); state.spsr[i] = u64::from_le_bytes(bytes);
@ -1284,6 +1299,8 @@ impl cpu::Vcpu for KvmVcpu {
for i in 0..32 { for i in 0..32 {
let mut bytes = [0_u8; 16]; let mut bytes = [0_u8; 16];
self.fd self.fd
.lock()
.unwrap()
.get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U128, off), &mut bytes) .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U128, off), &mut bytes)
.map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?;
state.fp_regs.vregs[i] = u128::from_le_bytes(bytes); state.fp_regs.vregs[i] = u128::from_le_bytes(bytes);
@ -1294,6 +1311,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpsr); let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpsr);
let mut bytes = [0_u8; 4]; let mut bytes = [0_u8; 4];
self.fd self.fd
.lock()
.unwrap()
.get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U32, off), &mut bytes) .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U32, off), &mut bytes)
.map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?;
state.fp_regs.fpsr = u32::from_le_bytes(bytes); state.fp_regs.fpsr = u32::from_le_bytes(bytes);
@ -1302,6 +1321,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpcr); let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpcr);
let mut bytes = [0_u8; 4]; let mut bytes = [0_u8; 4];
self.fd self.fd
.lock()
.unwrap()
.get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U32, off), &mut bytes) .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U32, off), &mut bytes)
.map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?;
state.fp_regs.fpcr = u32::from_le_bytes(bytes); state.fp_regs.fpcr = u32::from_le_bytes(bytes);
@ -1315,6 +1336,8 @@ impl cpu::Vcpu for KvmVcpu {
fn set_regs(&self, regs: &StandardRegisters) -> cpu::Result<()> { fn set_regs(&self, regs: &StandardRegisters) -> cpu::Result<()> {
let regs = (*regs).into(); let regs = (*regs).into();
self.fd self.fd
.lock()
.unwrap()
.set_regs(&regs) .set_regs(&regs)
.map_err(|e| cpu::HypervisorCpuError::SetStandardRegs(e.into())) .map_err(|e| cpu::HypervisorCpuError::SetStandardRegs(e.into()))
} }
@ -1331,6 +1354,8 @@ impl cpu::Vcpu for KvmVcpu {
let mut off = offset_of!(user_pt_regs, regs); let mut off = offset_of!(user_pt_regs, regs);
for i in 0..31 { for i in 0..31 {
self.fd self.fd
.lock()
.unwrap()
.set_one_reg( .set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, off), arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
&state.regs.regs[i].to_le_bytes(), &state.regs.regs[i].to_le_bytes(),
@ -1341,6 +1366,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(user_pt_regs, sp); let off = offset_of!(user_pt_regs, sp);
self.fd self.fd
.lock()
.unwrap()
.set_one_reg( .set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, off), arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
&state.regs.sp.to_le_bytes(), &state.regs.sp.to_le_bytes(),
@ -1349,6 +1376,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(user_pt_regs, pc); let off = offset_of!(user_pt_regs, pc);
self.fd self.fd
.lock()
.unwrap()
.set_one_reg( .set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, off), arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
&state.regs.pc.to_le_bytes(), &state.regs.pc.to_le_bytes(),
@ -1357,6 +1386,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(user_pt_regs, pstate); let off = offset_of!(user_pt_regs, pstate);
self.fd self.fd
.lock()
.unwrap()
.set_one_reg( .set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, off), arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
&state.regs.pstate.to_le_bytes(), &state.regs.pstate.to_le_bytes(),
@ -1365,6 +1396,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(kvm_regs, sp_el1); let off = offset_of!(kvm_regs, sp_el1);
self.fd self.fd
.lock()
.unwrap()
.set_one_reg( .set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, off), arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
&state.sp_el1.to_le_bytes(), &state.sp_el1.to_le_bytes(),
@ -1373,6 +1406,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(kvm_regs, elr_el1); let off = offset_of!(kvm_regs, elr_el1);
self.fd self.fd
.lock()
.unwrap()
.set_one_reg( .set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, off), arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
&state.elr_el1.to_le_bytes(), &state.elr_el1.to_le_bytes(),
@ -1382,6 +1417,8 @@ impl cpu::Vcpu for KvmVcpu {
let mut off = offset_of!(kvm_regs, spsr); let mut off = offset_of!(kvm_regs, spsr);
for i in 0..KVM_NR_SPSR as usize { for i in 0..KVM_NR_SPSR as usize {
self.fd self.fd
.lock()
.unwrap()
.set_one_reg( .set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, off), arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
&state.spsr[i].to_le_bytes(), &state.spsr[i].to_le_bytes(),
@ -1393,6 +1430,8 @@ impl cpu::Vcpu for KvmVcpu {
let mut off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, vregs); let mut off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, vregs);
for i in 0..32 { for i in 0..32 {
self.fd self.fd
.lock()
.unwrap()
.set_one_reg( .set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U128, off), arm64_core_reg_id!(KVM_REG_SIZE_U128, off),
&state.fp_regs.vregs[i].to_le_bytes(), &state.fp_regs.vregs[i].to_le_bytes(),
@ -1403,6 +1442,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpsr); let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpsr);
self.fd self.fd
.lock()
.unwrap()
.set_one_reg( .set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U32, off), arm64_core_reg_id!(KVM_REG_SIZE_U32, off),
&state.fp_regs.fpsr.to_le_bytes(), &state.fp_regs.fpsr.to_le_bytes(),
@ -1411,6 +1452,8 @@ impl cpu::Vcpu for KvmVcpu {
let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpcr); let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpcr);
self.fd self.fd
.lock()
.unwrap()
.set_one_reg( .set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U32, off), arm64_core_reg_id!(KVM_REG_SIZE_U32, off),
&state.fp_regs.fpcr.to_le_bytes(), &state.fp_regs.fpcr.to_le_bytes(),
@ -1426,6 +1469,8 @@ impl cpu::Vcpu for KvmVcpu {
fn get_sregs(&self) -> cpu::Result<SpecialRegisters> { fn get_sregs(&self) -> cpu::Result<SpecialRegisters> {
Ok(self Ok(self
.fd .fd
.lock()
.unwrap()
.get_sregs() .get_sregs()
.map_err(|e| cpu::HypervisorCpuError::GetSpecialRegs(e.into()))? .map_err(|e| cpu::HypervisorCpuError::GetSpecialRegs(e.into()))?
.into()) .into())
@ -1438,6 +1483,8 @@ impl cpu::Vcpu for KvmVcpu {
fn set_sregs(&self, sregs: &SpecialRegisters) -> cpu::Result<()> { fn set_sregs(&self, sregs: &SpecialRegisters) -> cpu::Result<()> {
let sregs = (*sregs).into(); let sregs = (*sregs).into();
self.fd self.fd
.lock()
.unwrap()
.set_sregs(&sregs) .set_sregs(&sregs)
.map_err(|e| cpu::HypervisorCpuError::SetSpecialRegs(e.into())) .map_err(|e| cpu::HypervisorCpuError::SetSpecialRegs(e.into()))
} }
@ -1449,6 +1496,8 @@ impl cpu::Vcpu for KvmVcpu {
fn get_fpu(&self) -> cpu::Result<FpuState> { fn get_fpu(&self) -> cpu::Result<FpuState> {
Ok(self Ok(self
.fd .fd
.lock()
.unwrap()
.get_fpu() .get_fpu()
.map_err(|e| cpu::HypervisorCpuError::GetFloatingPointRegs(e.into()))? .map_err(|e| cpu::HypervisorCpuError::GetFloatingPointRegs(e.into()))?
.into()) .into())
@ -1461,6 +1510,8 @@ impl cpu::Vcpu for KvmVcpu {
fn set_fpu(&self, fpu: &FpuState) -> cpu::Result<()> { fn set_fpu(&self, fpu: &FpuState) -> cpu::Result<()> {
let fpu: kvm_bindings::kvm_fpu = (*fpu).clone().into(); let fpu: kvm_bindings::kvm_fpu = (*fpu).clone().into();
self.fd self.fd
.lock()
.unwrap()
.set_fpu(&fpu) .set_fpu(&fpu)
.map_err(|e| cpu::HypervisorCpuError::SetFloatingPointRegs(e.into())) .map_err(|e| cpu::HypervisorCpuError::SetFloatingPointRegs(e.into()))
} }
@ -1476,6 +1527,8 @@ impl cpu::Vcpu for KvmVcpu {
.map_err(|_| cpu::HypervisorCpuError::SetCpuid(anyhow!("failed to create CpuId")))?; .map_err(|_| cpu::HypervisorCpuError::SetCpuid(anyhow!("failed to create CpuId")))?;
self.fd self.fd
.lock()
.unwrap()
.set_cpuid2(&kvm_cpuid) .set_cpuid2(&kvm_cpuid)
.map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into())) .map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into()))
} }
@ -1494,6 +1547,8 @@ impl cpu::Vcpu for KvmVcpu {
..Default::default() ..Default::default()
}; };
self.fd self.fd
.lock()
.unwrap()
.enable_cap(&cap) .enable_cap(&cap)
.map_err(|e| cpu::HypervisorCpuError::EnableHyperVSyncIc(e.into())) .map_err(|e| cpu::HypervisorCpuError::EnableHyperVSyncIc(e.into()))
} }
@ -1505,6 +1560,8 @@ impl cpu::Vcpu for KvmVcpu {
fn get_cpuid2(&self, num_entries: usize) -> cpu::Result<Vec<CpuIdEntry>> { fn get_cpuid2(&self, num_entries: usize) -> cpu::Result<Vec<CpuIdEntry>> {
let kvm_cpuid = self let kvm_cpuid = self
.fd .fd
.lock()
.unwrap()
.get_cpuid2(num_entries) .get_cpuid2(num_entries)
.map_err(|e| cpu::HypervisorCpuError::GetCpuid(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetCpuid(e.into()))?;
@ -1520,6 +1577,8 @@ impl cpu::Vcpu for KvmVcpu {
fn get_lapic(&self) -> cpu::Result<LapicState> { fn get_lapic(&self) -> cpu::Result<LapicState> {
Ok(self Ok(self
.fd .fd
.lock()
.unwrap()
.get_lapic() .get_lapic()
.map_err(|e| cpu::HypervisorCpuError::GetlapicState(e.into()))? .map_err(|e| cpu::HypervisorCpuError::GetlapicState(e.into()))?
.into()) .into())
@ -1532,6 +1591,8 @@ impl cpu::Vcpu for KvmVcpu {
fn set_lapic(&self, klapic: &LapicState) -> cpu::Result<()> { fn set_lapic(&self, klapic: &LapicState) -> cpu::Result<()> {
let klapic: kvm_bindings::kvm_lapic_state = (*klapic).clone().into(); let klapic: kvm_bindings::kvm_lapic_state = (*klapic).clone().into();
self.fd self.fd
.lock()
.unwrap()
.set_lapic(&klapic) .set_lapic(&klapic)
.map_err(|e| cpu::HypervisorCpuError::SetLapicState(e.into())) .map_err(|e| cpu::HypervisorCpuError::SetLapicState(e.into()))
} }
@ -1545,6 +1606,8 @@ impl cpu::Vcpu for KvmVcpu {
let mut kvm_msrs = MsrEntries::from_entries(&kvm_msrs).unwrap(); let mut kvm_msrs = MsrEntries::from_entries(&kvm_msrs).unwrap();
let succ = self let succ = self
.fd .fd
.lock()
.unwrap()
.get_msrs(&mut kvm_msrs) .get_msrs(&mut kvm_msrs)
.map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?;
@ -1567,6 +1630,8 @@ impl cpu::Vcpu for KvmVcpu {
let kvm_msrs: Vec<kvm_msr_entry> = msrs.iter().map(|e| (*e).into()).collect(); let kvm_msrs: Vec<kvm_msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
let kvm_msrs = MsrEntries::from_entries(&kvm_msrs).unwrap(); let kvm_msrs = MsrEntries::from_entries(&kvm_msrs).unwrap();
self.fd self.fd
.lock()
.unwrap()
.set_msrs(&kvm_msrs) .set_msrs(&kvm_msrs)
.map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into())) .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into()))
} }
@ -1577,6 +1642,8 @@ impl cpu::Vcpu for KvmVcpu {
fn get_mp_state(&self) -> cpu::Result<MpState> { fn get_mp_state(&self) -> cpu::Result<MpState> {
Ok(self Ok(self
.fd .fd
.lock()
.unwrap()
.get_mp_state() .get_mp_state()
.map_err(|e| cpu::HypervisorCpuError::GetMpState(e.into()))? .map_err(|e| cpu::HypervisorCpuError::GetMpState(e.into()))?
.into()) .into())
@ -1587,6 +1654,8 @@ impl cpu::Vcpu for KvmVcpu {
/// ///
fn set_mp_state(&self, mp_state: MpState) -> cpu::Result<()> { fn set_mp_state(&self, mp_state: MpState) -> cpu::Result<()> {
self.fd self.fd
.lock()
.unwrap()
.set_mp_state(mp_state.into()) .set_mp_state(mp_state.into())
.map_err(|e| cpu::HypervisorCpuError::SetMpState(e.into())) .map_err(|e| cpu::HypervisorCpuError::SetMpState(e.into()))
} }
@ -1598,6 +1667,8 @@ impl cpu::Vcpu for KvmVcpu {
fn translate_gva(&self, gva: u64, _flags: u64) -> cpu::Result<(u64, u32)> { fn translate_gva(&self, gva: u64, _flags: u64) -> cpu::Result<(u64, u32)> {
let tr = self let tr = self
.fd .fd
.lock()
.unwrap()
.translate_gva(gva) .translate_gva(gva)
.map_err(|e| cpu::HypervisorCpuError::TranslateVirtualAddress(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::TranslateVirtualAddress(e.into()))?;
// tr.valid is set if the GVA is mapped to valid GPA. // tr.valid is set if the GVA is mapped to valid GPA.
@ -1614,7 +1685,7 @@ impl cpu::Vcpu for KvmVcpu {
/// Triggers the running of the current virtual CPU returning an exit reason. /// Triggers the running of the current virtual CPU returning an exit reason.
/// ///
fn run(&self) -> std::result::Result<cpu::VmExit, cpu::HypervisorCpuError> { fn run(&self) -> std::result::Result<cpu::VmExit, cpu::HypervisorCpuError> {
match self.fd.run() { match self.fd.lock().unwrap().run() {
Ok(run) => match run { Ok(run) => match run {
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
VcpuExit::IoIn(addr, data) => { VcpuExit::IoIn(addr, data) => {
@ -1708,7 +1779,7 @@ impl cpu::Vcpu for KvmVcpu {
/// potential soft lockups when being resumed. /// potential soft lockups when being resumed.
/// ///
fn notify_guest_clock_paused(&self) -> cpu::Result<()> { fn notify_guest_clock_paused(&self) -> cpu::Result<()> {
if let Err(e) = self.fd.kvmclock_ctrl() { if let Err(e) = self.fd.lock().unwrap().kvmclock_ctrl() {
// Linux kernel returns -EINVAL if the PV clock isn't yet initialised // Linux kernel returns -EINVAL if the PV clock isn't yet initialised
// which could be because we're still in firmware or the guest doesn't // which could be because we're still in firmware or the guest doesn't
// use KVM clock. // use KVM clock.
@ -1770,6 +1841,8 @@ impl cpu::Vcpu for KvmVcpu {
} }
} }
self.fd self.fd
.lock()
.unwrap()
.set_guest_debug(&dbg) .set_guest_debug(&dbg)
.map_err(|e| cpu::HypervisorCpuError::SetDebugRegs(e.into())) .map_err(|e| cpu::HypervisorCpuError::SetDebugRegs(e.into()))
} }
@ -1777,6 +1850,8 @@ impl cpu::Vcpu for KvmVcpu {
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
fn vcpu_init(&self, kvi: &VcpuInit) -> cpu::Result<()> { fn vcpu_init(&self, kvi: &VcpuInit) -> cpu::Result<()> {
self.fd self.fd
.lock()
.unwrap()
.vcpu_init(kvi) .vcpu_init(kvi)
.map_err(|e| cpu::HypervisorCpuError::VcpuInit(e.into())) .map_err(|e| cpu::HypervisorCpuError::VcpuInit(e.into()))
} }
@ -1788,6 +1863,8 @@ impl cpu::Vcpu for KvmVcpu {
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
fn get_reg_list(&self, reg_list: &mut RegList) -> cpu::Result<()> { fn get_reg_list(&self, reg_list: &mut RegList) -> cpu::Result<()> {
self.fd self.fd
.lock()
.unwrap()
.get_reg_list(reg_list) .get_reg_list(reg_list)
.map_err(|e| cpu::HypervisorCpuError::GetRegList(e.into())) .map_err(|e| cpu::HypervisorCpuError::GetRegList(e.into()))
} }
@ -1820,6 +1897,8 @@ impl cpu::Vcpu for KvmVcpu {
| KVM_REG_ARM64_SYSREG_OP2_MASK)) as u64); | KVM_REG_ARM64_SYSREG_OP2_MASK)) as u64);
let mut bytes = [0_u8; 8]; let mut bytes = [0_u8; 8];
self.fd self.fd
.lock()
.unwrap()
.get_one_reg(id, &mut bytes) .get_one_reg(id, &mut bytes)
.map_err(|e| cpu::HypervisorCpuError::GetSysRegister(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetSysRegister(e.into()))?;
Ok(u64::from_le_bytes(bytes)) Ok(u64::from_le_bytes(bytes))
@ -1847,6 +1926,8 @@ impl cpu::Vcpu for KvmVcpu {
// Get the register index of the PSTATE (Processor State) register. // Get the register index of the PSTATE (Processor State) register.
let pstate = offset_of!(user_pt_regs, pstate) + kreg_off; let pstate = offset_of!(user_pt_regs, pstate) + kreg_off;
self.fd self.fd
.lock()
.unwrap()
.set_one_reg( .set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, pstate), arm64_core_reg_id!(KVM_REG_SIZE_U64, pstate),
&PSTATE_FAULT_BITS_64.to_le_bytes(), &PSTATE_FAULT_BITS_64.to_le_bytes(),
@ -1858,6 +1939,8 @@ impl cpu::Vcpu for KvmVcpu {
// Setting the PC (Processor Counter) to the current program address (kernel address). // Setting the PC (Processor Counter) to the current program address (kernel address).
let pc = offset_of!(user_pt_regs, pc) + kreg_off; let pc = offset_of!(user_pt_regs, pc) + kreg_off;
self.fd self.fd
.lock()
.unwrap()
.set_one_reg( .set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, pc), arm64_core_reg_id!(KVM_REG_SIZE_U64, pc),
&boot_ip.to_le_bytes(), &boot_ip.to_le_bytes(),
@ -1870,6 +1953,8 @@ impl cpu::Vcpu for KvmVcpu {
// We are choosing to place it the end of DRAM. See `get_fdt_addr`. // We are choosing to place it the end of DRAM. See `get_fdt_addr`.
let regs0 = offset_of!(user_pt_regs, regs) + kreg_off; let regs0 = offset_of!(user_pt_regs, regs) + kreg_off;
self.fd self.fd
.lock()
.unwrap()
.set_one_reg( .set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, regs0), arm64_core_reg_id!(KVM_REG_SIZE_U64, regs0),
&fdt_start.to_le_bytes(), &fdt_start.to_le_bytes(),
@ -2021,6 +2106,8 @@ impl cpu::Vcpu for KvmVcpu {
let mut sys_regs: Vec<Register> = Vec::new(); let mut sys_regs: Vec<Register> = Vec::new();
let mut reg_list = RegList::new(500).unwrap(); let mut reg_list = RegList::new(500).unwrap();
self.fd self.fd
.lock()
.unwrap()
.get_reg_list(&mut reg_list) .get_reg_list(&mut reg_list)
.map_err(|e| cpu::HypervisorCpuError::GetRegList(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetRegList(e.into()))?;
@ -2039,6 +2126,8 @@ impl cpu::Vcpu for KvmVcpu {
for index in indices.iter() { for index in indices.iter() {
let mut bytes = [0_u8; 8]; let mut bytes = [0_u8; 8];
self.fd self.fd
.lock()
.unwrap()
.get_one_reg(*index, &mut bytes) .get_one_reg(*index, &mut bytes)
.map_err(|e| cpu::HypervisorCpuError::GetSysRegister(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::GetSysRegister(e.into()))?;
sys_regs.push(kvm_bindings::kvm_one_reg { sys_regs.push(kvm_bindings::kvm_one_reg {
@ -2153,6 +2242,8 @@ impl cpu::Vcpu for KvmVcpu {
// Set system registers // Set system registers
for reg in &state.sys_regs { for reg in &state.sys_regs {
self.fd self.fd
.lock()
.unwrap()
.set_one_reg(reg.id, &reg.addr.to_le_bytes()) .set_one_reg(reg.id, &reg.addr.to_le_bytes())
.map_err(|e| cpu::HypervisorCpuError::SetSysRegister(e.into()))?; .map_err(|e| cpu::HypervisorCpuError::SetSysRegister(e.into()))?;
} }
@ -2167,15 +2258,20 @@ impl cpu::Vcpu for KvmVcpu {
/// ///
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
fn tdx_init(&self, hob_address: u64) -> cpu::Result<()> { fn tdx_init(&self, hob_address: u64) -> cpu::Result<()> {
tdx_command(&self.fd.as_raw_fd(), TdxCommand::InitVcpu, 0, hob_address) tdx_command(
.map_err(cpu::HypervisorCpuError::InitializeTdx) &self.fd.lock().unwrap().as_raw_fd(),
TdxCommand::InitVcpu,
0,
hob_address,
)
.map_err(cpu::HypervisorCpuError::InitializeTdx)
} }
/// ///
/// Set the "immediate_exit" state /// Set the "immediate_exit" state
/// ///
fn set_immediate_exit(&self, exit: bool) { fn set_immediate_exit(&self, exit: bool) {
self.fd.set_kvm_immediate_exit(exit.into()); self.fd.lock().unwrap().set_kvm_immediate_exit(exit.into());
} }
/// ///
@ -2183,7 +2279,8 @@ impl cpu::Vcpu for KvmVcpu {
/// ///
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
fn get_tdx_exit_details(&mut self) -> cpu::Result<TdxExitDetails> { fn get_tdx_exit_details(&mut self) -> cpu::Result<TdxExitDetails> {
let kvm_run = self.fd.get_kvm_run(); let mut fd = self.fd.as_ref().lock().unwrap();
let kvm_run = fd.get_kvm_run();
// SAFETY: accessing a union field in a valid structure // SAFETY: accessing a union field in a valid structure
let tdx_vmcall = unsafe { let tdx_vmcall = unsafe {
&mut (*((&mut kvm_run.__bindgen_anon_1) as *mut kvm_run__bindgen_ty_1 &mut (*((&mut kvm_run.__bindgen_anon_1) as *mut kvm_run__bindgen_ty_1
@ -2212,7 +2309,8 @@ impl cpu::Vcpu for KvmVcpu {
/// ///
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
fn set_tdx_status(&mut self, status: TdxExitStatus) { fn set_tdx_status(&mut self, status: TdxExitStatus) {
let kvm_run = self.fd.get_kvm_run(); let mut fd = self.fd.as_ref().lock().unwrap();
let kvm_run = fd.get_kvm_run();
// SAFETY: accessing a union field in a valid structure // SAFETY: accessing a union field in a valid structure
let tdx_vmcall = unsafe { let tdx_vmcall = unsafe {
&mut (*((&mut kvm_run.__bindgen_anon_1) as *mut kvm_run__bindgen_ty_1 &mut (*((&mut kvm_run.__bindgen_anon_1) as *mut kvm_run__bindgen_ty_1
@ -2261,7 +2359,7 @@ impl cpu::Vcpu for KvmVcpu {
addr: 0x0, addr: 0x0,
flags: 0, flags: 0,
}; };
self.fd.has_device_attr(&cpu_attr).is_ok() self.fd.lock().unwrap().has_device_attr(&cpu_attr).is_ok()
} }
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
@ -2279,9 +2377,13 @@ impl cpu::Vcpu for KvmVcpu {
flags: 0, flags: 0,
}; };
self.fd self.fd
.lock()
.unwrap()
.set_device_attr(&cpu_attr_irq) .set_device_attr(&cpu_attr_irq)
.map_err(|_| cpu::HypervisorCpuError::InitializePmu)?; .map_err(|_| cpu::HypervisorCpuError::InitializePmu)?;
self.fd self.fd
.lock()
.unwrap()
.set_device_attr(&cpu_attr) .set_device_attr(&cpu_attr)
.map_err(|_| cpu::HypervisorCpuError::InitializePmu) .map_err(|_| cpu::HypervisorCpuError::InitializePmu)
} }
@ -2291,7 +2393,7 @@ impl cpu::Vcpu for KvmVcpu {
/// Get the frequency of the TSC if available /// Get the frequency of the TSC if available
/// ///
fn tsc_khz(&self) -> cpu::Result<Option<u32>> { fn tsc_khz(&self) -> cpu::Result<Option<u32>> {
match self.fd.get_tsc_khz() { match self.fd.lock().unwrap().get_tsc_khz() {
Err(e) => { Err(e) => {
if e.errno() == libc::EIO { if e.errno() == libc::EIO {
Ok(None) Ok(None)
@ -2308,7 +2410,7 @@ impl cpu::Vcpu for KvmVcpu {
/// Set the frequency of the TSC if available /// Set the frequency of the TSC if available
/// ///
fn set_tsc_khz(&self, freq: u32) -> cpu::Result<()> { fn set_tsc_khz(&self, freq: u32) -> cpu::Result<()> {
match self.fd.set_tsc_khz(freq) { match self.fd.lock().unwrap().set_tsc_khz(freq) {
Err(e) => { Err(e) => {
if e.errno() == libc::EIO { if e.errno() == libc::EIO {
Ok(()) Ok(())
@ -2325,7 +2427,7 @@ impl cpu::Vcpu for KvmVcpu {
/// Trigger NMI interrupt /// Trigger NMI interrupt
/// ///
fn nmi(&self) -> cpu::Result<()> { fn nmi(&self) -> cpu::Result<()> {
match self.fd.nmi() { match self.fd.lock().unwrap().nmi() {
Err(e) => { Err(e) => {
if e.errno() == libc::EIO { if e.errno() == libc::EIO {
Ok(()) Ok(())
@ -2346,6 +2448,8 @@ impl KvmVcpu {
fn get_xsave(&self) -> cpu::Result<XsaveState> { fn get_xsave(&self) -> cpu::Result<XsaveState> {
Ok(self Ok(self
.fd .fd
.lock()
.unwrap()
.get_xsave() .get_xsave()
.map_err(|e| cpu::HypervisorCpuError::GetXsaveState(e.into()))? .map_err(|e| cpu::HypervisorCpuError::GetXsaveState(e.into()))?
.into()) .into())
@ -2358,6 +2462,8 @@ impl KvmVcpu {
fn set_xsave(&self, xsave: &XsaveState) -> cpu::Result<()> { fn set_xsave(&self, xsave: &XsaveState) -> cpu::Result<()> {
let xsave: kvm_bindings::kvm_xsave = (*xsave).clone().into(); let xsave: kvm_bindings::kvm_xsave = (*xsave).clone().into();
self.fd self.fd
.lock()
.unwrap()
.set_xsave(&xsave) .set_xsave(&xsave)
.map_err(|e| cpu::HypervisorCpuError::SetXsaveState(e.into())) .map_err(|e| cpu::HypervisorCpuError::SetXsaveState(e.into()))
} }
@ -2368,6 +2474,8 @@ impl KvmVcpu {
/// ///
fn get_xcrs(&self) -> cpu::Result<ExtendedControlRegisters> { fn get_xcrs(&self) -> cpu::Result<ExtendedControlRegisters> {
self.fd self.fd
.lock()
.unwrap()
.get_xcrs() .get_xcrs()
.map_err(|e| cpu::HypervisorCpuError::GetXcsr(e.into())) .map_err(|e| cpu::HypervisorCpuError::GetXcsr(e.into()))
} }
@ -2378,6 +2486,8 @@ impl KvmVcpu {
/// ///
fn set_xcrs(&self, xcrs: &ExtendedControlRegisters) -> cpu::Result<()> { fn set_xcrs(&self, xcrs: &ExtendedControlRegisters) -> cpu::Result<()> {
self.fd self.fd
.lock()
.unwrap()
.set_xcrs(xcrs) .set_xcrs(xcrs)
.map_err(|e| cpu::HypervisorCpuError::SetXcsr(e.into())) .map_err(|e| cpu::HypervisorCpuError::SetXcsr(e.into()))
} }
@ -2389,6 +2499,8 @@ impl KvmVcpu {
/// ///
fn get_vcpu_events(&self) -> cpu::Result<VcpuEvents> { fn get_vcpu_events(&self) -> cpu::Result<VcpuEvents> {
self.fd self.fd
.lock()
.unwrap()
.get_vcpu_events() .get_vcpu_events()
.map_err(|e| cpu::HypervisorCpuError::GetVcpuEvents(e.into())) .map_err(|e| cpu::HypervisorCpuError::GetVcpuEvents(e.into()))
} }
@ -2400,6 +2512,8 @@ impl KvmVcpu {
/// ///
fn set_vcpu_events(&self, events: &VcpuEvents) -> cpu::Result<()> { fn set_vcpu_events(&self, events: &VcpuEvents) -> cpu::Result<()> {
self.fd self.fd
.lock()
.unwrap()
.set_vcpu_events(events) .set_vcpu_events(events)
.map_err(|e| cpu::HypervisorCpuError::SetVcpuEvents(e.into())) .map_err(|e| cpu::HypervisorCpuError::SetVcpuEvents(e.into()))
} }