diff --git a/hypervisor/src/kvm/mod.rs b/hypervisor/src/kvm/mod.rs index 658bef4ef..cfde8b803 100644 --- a/hypervisor/src/kvm/mod.rs +++ b/hypervisor/src/kvm/mod.rs @@ -17,7 +17,7 @@ use crate::cpu; use crate::device; use crate::hypervisor; use crate::vec_with_array_field; -use crate::vm::{self, VmmOps}; +use crate::vm::{self, VmOps}; #[cfg(target_arch = "aarch64")] use crate::{arm64_core_reg_id, offset__of}; use kvm_ioctls::{NoDatamatch, VcpuFd, VmFd}; @@ -234,7 +234,7 @@ impl vm::Vm for KvmVm { fn create_vcpu( &self, id: u8, - vmmops: Option>, + vm_ops: Option>, ) -> vm::Result> { let vc = self .fd @@ -244,7 +244,7 @@ impl vm::Vm for KvmVm { fd: vc, #[cfg(target_arch = "x86_64")] msrs: self.msrs.clone(), - vmmops, + vm_ops, #[cfg(target_arch = "x86_64")] hyperv_synic: AtomicBool::new(false), }; @@ -816,7 +816,7 @@ pub struct KvmVcpu { fd: VcpuFd, #[cfg(target_arch = "x86_64")] msrs: MsrEntries, - vmmops: Option>, + vm_ops: Option>, #[cfg(target_arch = "x86_64")] hyperv_synic: AtomicBool, } @@ -1056,8 +1056,8 @@ impl cpu::Vcpu for KvmVcpu { Ok(run) => match run { #[cfg(target_arch = "x86_64")] VcpuExit::IoIn(addr, data) => { - if let Some(vmmops) = &self.vmmops { - return vmmops + if let Some(vm_ops) = &self.vm_ops { + return vm_ops .pio_read(addr.into(), data) .map(|_| cpu::VmExit::Ignore) .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into())); @@ -1067,8 +1067,8 @@ impl cpu::Vcpu for KvmVcpu { } #[cfg(target_arch = "x86_64")] VcpuExit::IoOut(addr, data) => { - if let Some(vmmops) = &self.vmmops { - return vmmops + if let Some(vm_ops) = &self.vm_ops { + return vm_ops .pio_write(addr.into(), data) .map(|_| cpu::VmExit::Ignore) .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into())); @@ -1100,8 +1100,8 @@ impl cpu::Vcpu for KvmVcpu { } VcpuExit::MmioRead(addr, data) => { - if let Some(vmmops) = &self.vmmops { - return vmmops + if let Some(vm_ops) = &self.vm_ops { + return vm_ops .mmio_read(addr, data) .map(|_| cpu::VmExit::Ignore) .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into())); @@ -1110,8 +1110,8 @@ impl cpu::Vcpu for KvmVcpu { Ok(cpu::VmExit::MmioRead(addr, data)) } VcpuExit::MmioWrite(addr, data) => { - if let Some(vmmops) = &self.vmmops { - return vmmops + if let Some(vm_ops) = &self.vm_ops { + return vm_ops .mmio_write(addr, data) .map(|_| cpu::VmExit::Ignore) .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into())); diff --git a/hypervisor/src/mshv/mod.rs b/hypervisor/src/mshv/mod.rs index eeba4f9b4..539a54cea 100644 --- a/hypervisor/src/mshv/mod.rs +++ b/hypervisor/src/mshv/mod.rs @@ -11,7 +11,7 @@ use crate::cpu; use crate::cpu::Vcpu; use crate::hypervisor; use crate::vec_with_array_field; -use crate::vm::{self, VmmOps}; +use crate::vm::{self, VmOps}; pub use mshv_bindings::*; pub use mshv_ioctls::IoEventAddress; use mshv_ioctls::{set_registers_64, Mshv, NoDatamatch, VcpuFd, VmFd}; @@ -122,7 +122,7 @@ impl hypervisor::Hypervisor for MshvHypervisor { fd: vm_fd, msrs, hv_state: hv_state_init(), - vmmops: None, + vm_ops: None, dirty_log_slots: Arc::new(RwLock::new(HashMap::new())), })) } @@ -151,7 +151,7 @@ pub struct MshvVcpu { cpuid: CpuId, msrs: MsrEntries, hv_state: Arc>, // Mshv State - vmmops: Option>, + vm_ops: Option>, } /// Implementation of Vcpu trait for Microsoft Hypervisor @@ -355,14 +355,14 @@ impl cpu::Vcpu for MshvVcpu { if is_write { let data = (info.rax as u32).to_le_bytes(); - if let Some(vmmops) = &self.vmmops { - vmmops + if let Some(vm_ops) = &self.vm_ops { + vm_ops .pio_write(port.into(), &data[0..len]) .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?; } } else { - if let Some(vmmops) = &self.vmmops { - vmmops + if let Some(vm_ops) = &self.vm_ops { + vm_ops .pio_read(port.into(), &mut data[0..len]) .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?; } @@ -655,9 +655,9 @@ impl<'a> PlatformEmulator for MshvEmulatorContext<'a> { gpa ); - if let Some(vmmops) = &self.vcpu.vmmops { - if vmmops.guest_mem_read(gpa, data).is_err() { - vmmops + if let Some(vm_ops) = &self.vcpu.vm_ops { + if vm_ops.guest_mem_read(gpa, data).is_err() { + vm_ops .mmio_read(gpa, data) .map_err(|e| PlatformError::MemoryReadFailure(e.into()))?; } @@ -675,9 +675,9 @@ impl<'a> PlatformEmulator for MshvEmulatorContext<'a> { gpa ); - if let Some(vmmops) = &self.vcpu.vmmops { - if vmmops.guest_mem_write(gpa, data).is_err() { - vmmops + if let Some(vm_ops) = &self.vcpu.vm_ops { + if vm_ops.guest_mem_write(gpa, data).is_err() { + vm_ops .mmio_write(gpa, data) .map_err(|e| PlatformError::MemoryWriteFailure(e.into()))?; } @@ -746,7 +746,7 @@ pub struct MshvVm { msrs: MsrEntries, // Hypervisor State hv_state: Arc>, - vmmops: Option>, + vm_ops: Option>, dirty_log_slots: Arc>>, } @@ -816,7 +816,7 @@ impl vm::Vm for MshvVm { fn create_vcpu( &self, id: u8, - vmmops: Option>, + vm_ops: Option>, ) -> vm::Result> { let vcpu_fd = self .fd @@ -828,7 +828,7 @@ impl vm::Vm for MshvVm { cpuid: CpuId::new(1).unwrap(), msrs: self.msrs.clone(), hv_state: self.hv_state.clone(), - vmmops, + vm_ops, }; Ok(Arc::new(vcpu)) } diff --git a/hypervisor/src/vm.rs b/hypervisor/src/vm.rs index 6b30a99d1..55682d3a7 100644 --- a/hypervisor/src/vm.rs +++ b/hypervisor/src/vm.rs @@ -235,7 +235,7 @@ pub trait Vm: Send + Sync { /// Unregister an event that will, when signaled, trigger the `gsi` IRQ. fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> Result<()>; /// Creates a new KVM vCPU file descriptor and maps the memory corresponding - fn create_vcpu(&self, id: u8, vmmops: Option>) -> Result>; + fn create_vcpu(&self, id: u8, vm_ops: Option>) -> Result>; /// Registers an event to be signaled whenever a certain address is written to. fn register_ioevent( &self, @@ -309,7 +309,7 @@ pub trait Vm: Send + Sync { ) -> Result<()>; } -pub trait VmmOps: Send + Sync { +pub trait VmOps: Send + Sync { fn guest_mem_write(&self, gpa: u64, buf: &[u8]) -> Result; fn guest_mem_read(&self, gpa: u64, buf: &mut [u8]) -> Result; fn mmio_read(&self, gpa: u64, data: &mut [u8]) -> Result<()>; diff --git a/vmm/src/cpu.rs b/vmm/src/cpu.rs index e82ab2e69..e544db8e1 100644 --- a/vmm/src/cpu.rs +++ b/vmm/src/cpu.rs @@ -34,7 +34,7 @@ use hypervisor::kvm::kvm_bindings; use hypervisor::x86_64::{SpecialRegisters, StandardRegisters}; #[cfg(target_arch = "x86_64")] use hypervisor::CpuId; -use hypervisor::{vm::VmmOps, CpuState, HypervisorCpuError, VmExit}; +use hypervisor::{vm::VmOps, CpuState, HypervisorCpuError, VmExit}; #[cfg(feature = "tdx")] use hypervisor::{TdxExitDetails, TdxExitStatus}; use libc::{c_void, siginfo_t}; @@ -255,14 +255,14 @@ impl Vcpu { /// /// * `id` - Represents the CPU number between [0, max vcpus). /// * `vm` - The virtual machine this vcpu will get attached to. - /// * `vmmops` - Optional object for exit handling. + /// * `vm_ops` - Optional object for exit handling. pub fn new( id: u8, vm: &Arc, - vmmops: Option>, + vm_ops: Option>, ) -> Result { let vcpu = vm - .create_vcpu(id, vmmops) + .create_vcpu(id, vm_ops) .map_err(|e| Error::VcpuCreate(e.into()))?; // Initially the cpuid per vCPU is the one supported by this VM. Ok(Vcpu { @@ -412,7 +412,7 @@ pub struct CpuManager { selected_cpu: u8, vcpus: Vec>>, seccomp_action: SeccompAction, - vmmops: Arc, + vm_ops: Arc, #[cfg_attr(target_arch = "aarch64", allow(dead_code))] acpi_address: Option, proximity_domain_per_cpu: BTreeMap, @@ -562,7 +562,7 @@ impl CpuManager { #[cfg(feature = "gdb")] vm_debug_evt: EventFd, hypervisor: Arc, seccomp_action: SeccompAction, - vmmops: Arc, + vm_ops: Arc, #[cfg(feature = "tdx")] tdx_enabled: bool, numa_nodes: &NumaNodes, ) -> Result>> { @@ -684,7 +684,7 @@ impl CpuManager { selected_cpu: 0, vcpus: Vec::with_capacity(usize::from(config.max_vcpus)), seccomp_action, - vmmops, + vm_ops, acpi_address, proximity_domain_per_cpu, affinity, @@ -713,7 +713,7 @@ impl CpuManager { ) -> Result<()> { info!("Creating vCPU: cpu_id = {}", cpu_id); - let mut vcpu = Vcpu::new(cpu_id, &self.vm, Some(self.vmmops.clone()))?; + let mut vcpu = Vcpu::new(cpu_id, &self.vm, Some(self.vm_ops.clone()))?; if let Some(snapshot) = snapshot { // AArch64 vCPUs should be initialized after created. diff --git a/vmm/src/vm.rs b/vmm/src/vm.rs index c8533cf77..775b35d3f 100644 --- a/vmm/src/vm.rs +++ b/vmm/src/vm.rs @@ -49,7 +49,7 @@ use devices::interrupt_controller::{self, InterruptController}; use devices::AcpiNotificationFlags; #[cfg(all(target_arch = "x86_64", feature = "gdb"))] use gdbstub_arch::x86::reg::X86_64CoreRegs; -use hypervisor::vm::{HypervisorVmError, VmmOps}; +use hypervisor::vm::{HypervisorVmError, VmOps}; use linux_loader::cmdline::Cmdline; #[cfg(target_arch = "x86_64")] use linux_loader::loader::elf::PvhBootCapability::PvhEntryPresent; @@ -343,7 +343,7 @@ impl VmState { } } -struct VmOps { +struct VmOpsHandler { memory: GuestMemoryAtomic, #[cfg(target_arch = "x86_64")] io_bus: Arc, @@ -352,7 +352,7 @@ struct VmOps { pci_config_io: Arc>, } -impl VmmOps for VmOps { +impl VmOps for VmOpsHandler { fn guest_mem_write(&self, gpa: u64, buf: &[u8]) -> hypervisor::vm::Result { self.memory .memory() @@ -544,13 +544,11 @@ impl Vm { #[cfg(target_arch = "x86_64")] let io_bus = Arc::clone(device_manager.lock().unwrap().io_bus()); let mmio_bus = Arc::clone(device_manager.lock().unwrap().mmio_bus()); - // Create the VmOps structure, which implements the VmmOps trait. - // And send it to the hypervisor. #[cfg(target_arch = "x86_64")] let pci_config_io = device_manager.lock().unwrap().pci_config_io() as Arc>; - let vm_ops: Arc = Arc::new(VmOps { + let vm_ops: Arc = Arc::new(VmOpsHandler { memory, #[cfg(target_arch = "x86_64")] io_bus,