From c73c6039c3572edeef2faac801640d7ffa854507 Mon Sep 17 00:00:00 2001 From: Sebastien Boeuf Date: Fri, 11 Mar 2022 12:31:25 +0100 Subject: [PATCH] vmm: Enable vDPA support Based on the newly added Vdpa device along with the new vdpa parameter, this patch enables the support for vDPA devices. It's important to note this the only virtio device for which we provide an ExternalDmaMapping instance. This will allow for the right DMA ranges to be mapped/unmapped. Signed-off-by: Sebastien Boeuf --- vmm/src/device_manager.rs | 77 +++++++++++++++++++++++++++++++++++++- vmm/src/seccomp_filters.rs | 54 ++++++++++++++++++++++++++ 2 files changed, 129 insertions(+), 2 deletions(-) diff --git a/vmm/src/device_manager.rs b/vmm/src/device_manager.rs index cea72956d..9a776d908 100644 --- a/vmm/src/device_manager.rs +++ b/vmm/src/device_manager.rs @@ -11,7 +11,7 @@ use crate::config::{ ConsoleOutputMode, DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, UserDeviceConfig, - VhostMode, VmConfig, VsockConfig, + VdpaConfig, VhostMode, VmConfig, VsockConfig, }; use crate::device_tree::{DeviceNode, DeviceTree}; #[cfg(feature = "kvm")] @@ -89,7 +89,7 @@ use vfio_ioctls::{VfioContainer, VfioDevice}; use virtio_devices::transport::VirtioPciDevice; use virtio_devices::transport::VirtioTransport; use virtio_devices::vhost_user::VhostUserConfig; -use virtio_devices::{AccessPlatformMapping, VirtioMemMappingSource}; +use virtio_devices::{AccessPlatformMapping, VdpaDmaMapping, VirtioMemMappingSource}; use virtio_devices::{Endpoint, IommuMapping}; use virtio_devices::{VirtioSharedMemory, VirtioSharedMemoryList}; use vm_allocator::{AddressAllocator, SystemAllocator}; @@ -133,6 +133,7 @@ const BALLOON_DEVICE_NAME: &str = "_balloon"; const NET_DEVICE_NAME_PREFIX: &str = "_net"; const PMEM_DEVICE_NAME_PREFIX: &str = "_pmem"; const RNG_DEVICE_NAME: &str = "_rng"; +const VDPA_DEVICE_NAME_PREFIX: &str = "_vdpa"; const VSOCK_DEVICE_NAME_PREFIX: &str = "_vsock"; const WATCHDOG_DEVICE_NAME: &str = "_watchdog"; @@ -176,9 +177,15 @@ pub enum DeviceManagerError { /// Cannot create virtio-pmem device CreateVirtioPmem(io::Error), + /// Cannot create vDPA device + CreateVdpa(virtio_devices::vdpa::Error), + /// Cannot create virtio-vsock device CreateVirtioVsock(io::Error), + /// Failed to convert Path to &str for the vDPA device. + CreateVdpaConvertPath, + /// Failed to convert Path to &str for the virtio-vsock device. CreateVsockConvertPath, @@ -1950,6 +1957,9 @@ impl DeviceManager { // Add virtio-watchdog device devices.append(&mut self.make_virtio_watchdog_devices()?); + // Add vDPA devices if required + devices.append(&mut self.make_vdpa_devices()?); + Ok(devices) } @@ -2893,6 +2903,69 @@ impl DeviceManager { Ok(devices) } + fn make_vdpa_device( + &mut self, + vdpa_cfg: &mut VdpaConfig, + ) -> DeviceManagerResult { + let id = if let Some(id) = &vdpa_cfg.id { + id.clone() + } else { + let id = self.next_device_name(VDPA_DEVICE_NAME_PREFIX)?; + vdpa_cfg.id = Some(id.clone()); + id + }; + + info!("Creating vDPA device: {:?}", vdpa_cfg); + + let device_path = vdpa_cfg + .path + .to_str() + .ok_or(DeviceManagerError::CreateVdpaConvertPath)?; + + let vdpa_device = Arc::new(Mutex::new( + virtio_devices::Vdpa::new( + id.clone(), + device_path, + self.memory_manager.lock().unwrap().guest_memory(), + vdpa_cfg.num_queues as u16, + ) + .map_err(DeviceManagerError::CreateVdpa)?, + )); + + // Create the DMA handler that is required by the vDPA device + let vdpa_mapping = Arc::new(VdpaDmaMapping::new( + Arc::clone(&vdpa_device), + Arc::new(self.memory_manager.lock().unwrap().guest_memory()), + )); + + self.device_tree + .lock() + .unwrap() + .insert(id.clone(), device_node!(id)); + + Ok(MetaVirtioDevice { + virtio_device: vdpa_device as Arc>, + iommu: false, + id, + pci_segment: vdpa_cfg.pci_segment, + dma_handler: Some(vdpa_mapping), + }) + } + + fn make_vdpa_devices(&mut self) -> DeviceManagerResult> { + let mut devices = Vec::new(); + // Add vdpa if required + let mut vdpa_devices = self.config.lock().unwrap().vdpa.clone(); + if let Some(vdpa_list_cfg) = &mut vdpa_devices { + for vdpa_cfg in vdpa_list_cfg.iter_mut() { + devices.push(self.make_vdpa_device(vdpa_cfg)?); + } + } + self.config.lock().unwrap().vdpa = vdpa_devices; + + Ok(devices) + } + fn next_device_name(&mut self, prefix: &str) -> DeviceManagerResult { let start_id = self.device_id_cnt; loop { diff --git a/vmm/src/seccomp_filters.rs b/vmm/src/seccomp_filters.rs index f0b354329..3660c08ed 100644 --- a/vmm/src/seccomp_filters.rs +++ b/vmm/src/seccomp_filters.rs @@ -86,6 +86,27 @@ const VFIO_IOMMU_MAP_DMA: u64 = 0x3b71; const VFIO_IOMMU_UNMAP_DMA: u64 = 0x3b72; const VFIO_DEVICE_IOEVENTFD: u64 = 0x3b74; +// See include/uapi/linux/vhost.h in the kernel code +const VHOST_GET_FEATURES: u64 = 0x8008af00; +const VHOST_SET_FEATURES: u64 = 0x4008af00; +const VHOST_SET_OWNER: u64 = 0xaf01; +const VHOST_SET_VRING_NUM: u64 = 0x4008af10; +const VHOST_SET_VRING_ADDR: u64 = 0x4028af11; +const VHOST_SET_VRING_BASE: u64 = 0x4008af12; +const VHOST_SET_VRING_KICK: u64 = 0x4008af20; +const VHOST_SET_VRING_CALL: u64 = 0x4008af21; +const VHOST_SET_BACKEND_FEATURES: u64 = 0x4008af25; +const VHOST_GET_BACKEND_FEATURES: u64 = 0x8008af26; +const VHOST_VDPA_GET_DEVICE_ID: u64 = 0x8004af70; +const VHOST_VDPA_GET_STATUS: u64 = 0x8001af71; +const VHOST_VDPA_SET_STATUS: u64 = 0x4001af72; +const VHOST_VDPA_GET_CONFIG: u64 = 0x8008af73; +const VHOST_VDPA_SET_CONFIG: u64 = 0x4008af74; +const VHOST_VDPA_SET_VRING_ENABLE: u64 = 0x4008af75; +const VHOST_VDPA_GET_VRING_NUM: u64 = 0x8002af76; +const VHOST_VDPA_SET_CONFIG_CALL: u64 = 0x4004af77; +const VHOST_VDPA_GET_IOVA_RANGE: u64 = 0x8010af78; + // See include/uapi/linux/kvm.h in the kernel code. #[cfg(feature = "kvm")] mod kvm { @@ -262,6 +283,30 @@ fn create_vmm_ioctl_seccomp_rule_common() -> Result, BackendErr and![Cond::new(1, ArgLen::Dword, Eq, VFIO_IOMMU_MAP_DMA)?], and![Cond::new(1, ArgLen::Dword, Eq, VFIO_IOMMU_UNMAP_DMA)?], and![Cond::new(1, ArgLen::Dword, Eq, VFIO_DEVICE_IOEVENTFD)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_GET_FEATURES)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_SET_FEATURES)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_SET_OWNER)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_SET_VRING_NUM)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_SET_VRING_ADDR)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_SET_VRING_BASE)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_SET_VRING_KICK)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_SET_VRING_CALL)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_SET_BACKEND_FEATURES)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_GET_BACKEND_FEATURES)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_VDPA_GET_DEVICE_ID)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_VDPA_GET_STATUS)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_VDPA_SET_STATUS)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_VDPA_GET_CONFIG)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_VDPA_SET_CONFIG)?], + and![Cond::new( + 1, + ArgLen::Dword, + Eq, + VHOST_VDPA_SET_VRING_ENABLE + )?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_VDPA_GET_VRING_NUM)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_VDPA_SET_CONFIG_CALL)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_VDPA_GET_IOVA_RANGE)?], ]; let hypervisor_rules = create_vmm_ioctl_seccomp_rule_hypervisor()?; @@ -583,6 +628,15 @@ fn create_vcpu_ioctl_seccomp_rule() -> Result, BackendError> { and![Cond::new(1, ArgLen::Dword, Eq, VFIO_DEVICE_SET_IRQS)?], and![Cond::new(1, ArgLen::Dword, Eq, VFIO_GROUP_UNSET_CONTAINER)?], and![Cond::new(1, ArgLen::Dword, Eq, VFIO_IOMMU_UNMAP_DMA)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_VDPA_SET_STATUS)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_VDPA_GET_CONFIG)?], + and![Cond::new(1, ArgLen::Dword, Eq, VHOST_VDPA_SET_CONFIG)?], + and![Cond::new( + 1, + ArgLen::Dword, + Eq, + VHOST_VDPA_SET_VRING_ENABLE + )?], ]; let hypervisor_rules = create_vcpu_ioctl_seccomp_rule_hypervisor()?;