vmm, hypervisor: Clean up nomenclature around offloading VM operations

The trait and functionality is about operations on the VM rather than
the VMM so should be named appropriately. This clashed with with
existing struct for the concrete implementation that was renamed
appropriately.

Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
Rob Bradford 2022-05-04 15:34:56 +01:00
parent f24f35b65d
commit 387d56879b
5 changed files with 42 additions and 44 deletions

View File

@ -17,7 +17,7 @@ use crate::cpu;
use crate::device;
use crate::hypervisor;
use crate::vec_with_array_field;
use crate::vm::{self, VmmOps};
use crate::vm::{self, VmOps};
#[cfg(target_arch = "aarch64")]
use crate::{arm64_core_reg_id, offset__of};
use kvm_ioctls::{NoDatamatch, VcpuFd, VmFd};
@ -234,7 +234,7 @@ impl vm::Vm for KvmVm {
fn create_vcpu(
&self,
id: u8,
vmmops: Option<Arc<dyn VmmOps>>,
vm_ops: Option<Arc<dyn VmOps>>,
) -> vm::Result<Arc<dyn cpu::Vcpu>> {
let vc = self
.fd
@ -244,7 +244,7 @@ impl vm::Vm for KvmVm {
fd: vc,
#[cfg(target_arch = "x86_64")]
msrs: self.msrs.clone(),
vmmops,
vm_ops,
#[cfg(target_arch = "x86_64")]
hyperv_synic: AtomicBool::new(false),
};
@ -816,7 +816,7 @@ pub struct KvmVcpu {
fd: VcpuFd,
#[cfg(target_arch = "x86_64")]
msrs: MsrEntries,
vmmops: Option<Arc<dyn vm::VmmOps>>,
vm_ops: Option<Arc<dyn vm::VmOps>>,
#[cfg(target_arch = "x86_64")]
hyperv_synic: AtomicBool,
}
@ -1056,8 +1056,8 @@ impl cpu::Vcpu for KvmVcpu {
Ok(run) => match run {
#[cfg(target_arch = "x86_64")]
VcpuExit::IoIn(addr, data) => {
if let Some(vmmops) = &self.vmmops {
return vmmops
if let Some(vm_ops) = &self.vm_ops {
return vm_ops
.pio_read(addr.into(), data)
.map(|_| cpu::VmExit::Ignore)
.map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()));
@ -1067,8 +1067,8 @@ impl cpu::Vcpu for KvmVcpu {
}
#[cfg(target_arch = "x86_64")]
VcpuExit::IoOut(addr, data) => {
if let Some(vmmops) = &self.vmmops {
return vmmops
if let Some(vm_ops) = &self.vm_ops {
return vm_ops
.pio_write(addr.into(), data)
.map(|_| cpu::VmExit::Ignore)
.map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()));
@ -1100,8 +1100,8 @@ impl cpu::Vcpu for KvmVcpu {
}
VcpuExit::MmioRead(addr, data) => {
if let Some(vmmops) = &self.vmmops {
return vmmops
if let Some(vm_ops) = &self.vm_ops {
return vm_ops
.mmio_read(addr, data)
.map(|_| cpu::VmExit::Ignore)
.map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()));
@ -1110,8 +1110,8 @@ impl cpu::Vcpu for KvmVcpu {
Ok(cpu::VmExit::MmioRead(addr, data))
}
VcpuExit::MmioWrite(addr, data) => {
if let Some(vmmops) = &self.vmmops {
return vmmops
if let Some(vm_ops) = &self.vm_ops {
return vm_ops
.mmio_write(addr, data)
.map(|_| cpu::VmExit::Ignore)
.map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()));

View File

@ -11,7 +11,7 @@ use crate::cpu;
use crate::cpu::Vcpu;
use crate::hypervisor;
use crate::vec_with_array_field;
use crate::vm::{self, VmmOps};
use crate::vm::{self, VmOps};
pub use mshv_bindings::*;
pub use mshv_ioctls::IoEventAddress;
use mshv_ioctls::{set_registers_64, Mshv, NoDatamatch, VcpuFd, VmFd};
@ -122,7 +122,7 @@ impl hypervisor::Hypervisor for MshvHypervisor {
fd: vm_fd,
msrs,
hv_state: hv_state_init(),
vmmops: None,
vm_ops: None,
dirty_log_slots: Arc::new(RwLock::new(HashMap::new())),
}))
}
@ -151,7 +151,7 @@ pub struct MshvVcpu {
cpuid: CpuId,
msrs: MsrEntries,
hv_state: Arc<RwLock<HvState>>, // Mshv State
vmmops: Option<Arc<dyn vm::VmmOps>>,
vm_ops: Option<Arc<dyn vm::VmOps>>,
}
/// Implementation of Vcpu trait for Microsoft Hypervisor
@ -355,14 +355,14 @@ impl cpu::Vcpu for MshvVcpu {
if is_write {
let data = (info.rax as u32).to_le_bytes();
if let Some(vmmops) = &self.vmmops {
vmmops
if let Some(vm_ops) = &self.vm_ops {
vm_ops
.pio_write(port.into(), &data[0..len])
.map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
}
} else {
if let Some(vmmops) = &self.vmmops {
vmmops
if let Some(vm_ops) = &self.vm_ops {
vm_ops
.pio_read(port.into(), &mut data[0..len])
.map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
}
@ -655,9 +655,9 @@ impl<'a> PlatformEmulator for MshvEmulatorContext<'a> {
gpa
);
if let Some(vmmops) = &self.vcpu.vmmops {
if vmmops.guest_mem_read(gpa, data).is_err() {
vmmops
if let Some(vm_ops) = &self.vcpu.vm_ops {
if vm_ops.guest_mem_read(gpa, data).is_err() {
vm_ops
.mmio_read(gpa, data)
.map_err(|e| PlatformError::MemoryReadFailure(e.into()))?;
}
@ -675,9 +675,9 @@ impl<'a> PlatformEmulator for MshvEmulatorContext<'a> {
gpa
);
if let Some(vmmops) = &self.vcpu.vmmops {
if vmmops.guest_mem_write(gpa, data).is_err() {
vmmops
if let Some(vm_ops) = &self.vcpu.vm_ops {
if vm_ops.guest_mem_write(gpa, data).is_err() {
vm_ops
.mmio_write(gpa, data)
.map_err(|e| PlatformError::MemoryWriteFailure(e.into()))?;
}
@ -746,7 +746,7 @@ pub struct MshvVm {
msrs: MsrEntries,
// Hypervisor State
hv_state: Arc<RwLock<HvState>>,
vmmops: Option<Arc<dyn vm::VmmOps>>,
vm_ops: Option<Arc<dyn vm::VmOps>>,
dirty_log_slots: Arc<RwLock<HashMap<u64, MshvDirtyLogSlot>>>,
}
@ -816,7 +816,7 @@ impl vm::Vm for MshvVm {
fn create_vcpu(
&self,
id: u8,
vmmops: Option<Arc<dyn VmmOps>>,
vm_ops: Option<Arc<dyn VmOps>>,
) -> vm::Result<Arc<dyn cpu::Vcpu>> {
let vcpu_fd = self
.fd
@ -828,7 +828,7 @@ impl vm::Vm for MshvVm {
cpuid: CpuId::new(1).unwrap(),
msrs: self.msrs.clone(),
hv_state: self.hv_state.clone(),
vmmops,
vm_ops,
};
Ok(Arc::new(vcpu))
}

View File

@ -235,7 +235,7 @@ pub trait Vm: Send + Sync {
/// Unregister an event that will, when signaled, trigger the `gsi` IRQ.
fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> Result<()>;
/// Creates a new KVM vCPU file descriptor and maps the memory corresponding
fn create_vcpu(&self, id: u8, vmmops: Option<Arc<dyn VmmOps>>) -> Result<Arc<dyn Vcpu>>;
fn create_vcpu(&self, id: u8, vm_ops: Option<Arc<dyn VmOps>>) -> Result<Arc<dyn Vcpu>>;
/// Registers an event to be signaled whenever a certain address is written to.
fn register_ioevent(
&self,
@ -309,7 +309,7 @@ pub trait Vm: Send + Sync {
) -> Result<()>;
}
pub trait VmmOps: Send + Sync {
pub trait VmOps: Send + Sync {
fn guest_mem_write(&self, gpa: u64, buf: &[u8]) -> Result<usize>;
fn guest_mem_read(&self, gpa: u64, buf: &mut [u8]) -> Result<usize>;
fn mmio_read(&self, gpa: u64, data: &mut [u8]) -> Result<()>;

View File

@ -34,7 +34,7 @@ use hypervisor::kvm::kvm_bindings;
use hypervisor::x86_64::{SpecialRegisters, StandardRegisters};
#[cfg(target_arch = "x86_64")]
use hypervisor::CpuId;
use hypervisor::{vm::VmmOps, CpuState, HypervisorCpuError, VmExit};
use hypervisor::{vm::VmOps, CpuState, HypervisorCpuError, VmExit};
#[cfg(feature = "tdx")]
use hypervisor::{TdxExitDetails, TdxExitStatus};
use libc::{c_void, siginfo_t};
@ -255,14 +255,14 @@ impl Vcpu {
///
/// * `id` - Represents the CPU number between [0, max vcpus).
/// * `vm` - The virtual machine this vcpu will get attached to.
/// * `vmmops` - Optional object for exit handling.
/// * `vm_ops` - Optional object for exit handling.
pub fn new(
id: u8,
vm: &Arc<dyn hypervisor::Vm>,
vmmops: Option<Arc<dyn VmmOps>>,
vm_ops: Option<Arc<dyn VmOps>>,
) -> Result<Self> {
let vcpu = vm
.create_vcpu(id, vmmops)
.create_vcpu(id, vm_ops)
.map_err(|e| Error::VcpuCreate(e.into()))?;
// Initially the cpuid per vCPU is the one supported by this VM.
Ok(Vcpu {
@ -412,7 +412,7 @@ pub struct CpuManager {
selected_cpu: u8,
vcpus: Vec<Arc<Mutex<Vcpu>>>,
seccomp_action: SeccompAction,
vmmops: Arc<dyn VmmOps>,
vm_ops: Arc<dyn VmOps>,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
acpi_address: Option<GuestAddress>,
proximity_domain_per_cpu: BTreeMap<u8, u32>,
@ -562,7 +562,7 @@ impl CpuManager {
#[cfg(feature = "gdb")] vm_debug_evt: EventFd,
hypervisor: Arc<dyn hypervisor::Hypervisor>,
seccomp_action: SeccompAction,
vmmops: Arc<dyn VmmOps>,
vm_ops: Arc<dyn VmOps>,
#[cfg(feature = "tdx")] tdx_enabled: bool,
numa_nodes: &NumaNodes,
) -> Result<Arc<Mutex<CpuManager>>> {
@ -684,7 +684,7 @@ impl CpuManager {
selected_cpu: 0,
vcpus: Vec::with_capacity(usize::from(config.max_vcpus)),
seccomp_action,
vmmops,
vm_ops,
acpi_address,
proximity_domain_per_cpu,
affinity,
@ -713,7 +713,7 @@ impl CpuManager {
) -> Result<()> {
info!("Creating vCPU: cpu_id = {}", cpu_id);
let mut vcpu = Vcpu::new(cpu_id, &self.vm, Some(self.vmmops.clone()))?;
let mut vcpu = Vcpu::new(cpu_id, &self.vm, Some(self.vm_ops.clone()))?;
if let Some(snapshot) = snapshot {
// AArch64 vCPUs should be initialized after created.

View File

@ -49,7 +49,7 @@ use devices::interrupt_controller::{self, InterruptController};
use devices::AcpiNotificationFlags;
#[cfg(all(target_arch = "x86_64", feature = "gdb"))]
use gdbstub_arch::x86::reg::X86_64CoreRegs;
use hypervisor::vm::{HypervisorVmError, VmmOps};
use hypervisor::vm::{HypervisorVmError, VmOps};
use linux_loader::cmdline::Cmdline;
#[cfg(target_arch = "x86_64")]
use linux_loader::loader::elf::PvhBootCapability::PvhEntryPresent;
@ -343,7 +343,7 @@ impl VmState {
}
}
struct VmOps {
struct VmOpsHandler {
memory: GuestMemoryAtomic<GuestMemoryMmap>,
#[cfg(target_arch = "x86_64")]
io_bus: Arc<Bus>,
@ -352,7 +352,7 @@ struct VmOps {
pci_config_io: Arc<Mutex<dyn BusDevice>>,
}
impl VmmOps for VmOps {
impl VmOps for VmOpsHandler {
fn guest_mem_write(&self, gpa: u64, buf: &[u8]) -> hypervisor::vm::Result<usize> {
self.memory
.memory()
@ -544,13 +544,11 @@ impl Vm {
#[cfg(target_arch = "x86_64")]
let io_bus = Arc::clone(device_manager.lock().unwrap().io_bus());
let mmio_bus = Arc::clone(device_manager.lock().unwrap().mmio_bus());
// Create the VmOps structure, which implements the VmmOps trait.
// And send it to the hypervisor.
#[cfg(target_arch = "x86_64")]
let pci_config_io =
device_manager.lock().unwrap().pci_config_io() as Arc<Mutex<dyn BusDevice>>;
let vm_ops: Arc<dyn VmmOps> = Arc::new(VmOps {
let vm_ops: Arc<dyn VmOps> = Arc::new(VmOpsHandler {
memory,
#[cfg(target_arch = "x86_64")]
io_bus,