Compare commits

..

No commits in common. "3fa02b34ca05894adfd1ce9fd3073a473c9361ed" and "fd854c7339c189bf9e27a898d6aff8bb433a14d1" have entirely different histories.

6 changed files with 100 additions and 209 deletions

View File

@ -26,7 +26,7 @@ pub use self::device::{
};
pub use self::msi::{msi_num_enabled_vectors, MsiCap, MsiConfig};
pub use self::msix::{MsixCap, MsixConfig, MsixTableEntry, MSIX_CONFIG_ID, MSIX_TABLE_ENTRY_SIZE};
pub use self::vfio::{MmioRegion, VfioDmaMapping, VfioPciDevice, VfioPciError};
pub use self::vfio::{VfioPciDevice, VfioPciError};
pub use self::vfio_user::{VfioUserDmaMapping, VfioUserPciDevice, VfioUserPciDeviceError};
use serde::de::Visitor;
use std::fmt::{self, Display};

View File

@ -32,12 +32,11 @@ use vm_allocator::page_size::{
align_page_size_down, align_page_size_up, is_4k_aligned, is_4k_multiple, is_page_size_aligned,
};
use vm_allocator::{AddressAllocator, SystemAllocator};
use vm_device::dma_mapping::ExternalDmaMapping;
use vm_device::interrupt::{
InterruptIndex, InterruptManager, InterruptSourceGroup, MsiIrqGroupConfig,
};
use vm_device::{BusDevice, Resource};
use vm_memory::{Address, GuestAddress, GuestAddressSpace, GuestMemory, GuestUsize};
use vm_memory::{Address, GuestAddress, GuestUsize};
use vm_migration::{
Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped,
};
@ -275,48 +274,6 @@ pub struct MmioRegion {
pub(crate) index: u32,
pub(crate) user_memory_regions: Vec<UserMemoryRegion>,
}
trait MmioRegionRange {
fn check_range(&self, guest_addr: u64, size: u64) -> bool;
fn find_user_address(&self, guest_addr: u64) -> Result<u64, io::Error>;
}
impl MmioRegionRange for Vec<MmioRegion> {
// Check if a guest address is within the range of mmio regions
fn check_range(&self, guest_addr: u64, size: u64) -> bool {
for region in self.iter() {
let Some(guest_addr_end) = guest_addr.checked_add(size) else {
return false;
};
let Some(region_end) = region.start.raw_value().checked_add(region.length) else {
return false;
};
if guest_addr >= region.start.raw_value() && guest_addr_end <= region_end {
return true;
}
}
false
}
// Locate the user region address for a guest address within all mmio regions
fn find_user_address(&self, guest_addr: u64) -> Result<u64, io::Error> {
for region in self.iter() {
for user_region in region.user_memory_regions.iter() {
if guest_addr >= user_region.start
&& guest_addr < user_region.start + user_region.size
{
return Ok(user_region.host_addr + (guest_addr - user_region.start));
}
}
}
Err(io::Error::new(
io::ErrorKind::Other,
format!("unable to find user address: 0x{guest_addr:x}"),
))
}
}
#[derive(Debug, Error)]
pub enum VfioError {
#[error("Kernel VFIO error: {0}")]
@ -1928,80 +1885,3 @@ impl Snapshottable for VfioPciDevice {
}
impl Transportable for VfioPciDevice {}
impl Migratable for VfioPciDevice {}
/// This structure implements the ExternalDmaMapping trait. It is meant to
/// be used when the caller tries to provide a way to update the mappings
/// associated with a specific VFIO container.
pub struct VfioDmaMapping<M: GuestAddressSpace> {
container: Arc<VfioContainer>,
memory: Arc<M>,
mmio_regions: Arc<Mutex<Vec<MmioRegion>>>,
}
impl<M: GuestAddressSpace> VfioDmaMapping<M> {
/// Create a DmaMapping object.
/// # Parameters
/// * `container`: VFIO container object.
/// * `memory`: guest memory to mmap.
/// * `mmio_regions`: mmio_regions to mmap.
pub fn new(
container: Arc<VfioContainer>,
memory: Arc<M>,
mmio_regions: Arc<Mutex<Vec<MmioRegion>>>,
) -> Self {
VfioDmaMapping {
container,
memory,
mmio_regions,
}
}
}
impl<M: GuestAddressSpace + Sync + Send> ExternalDmaMapping for VfioDmaMapping<M> {
fn map(&self, iova: u64, gpa: u64, size: u64) -> std::result::Result<(), io::Error> {
let mem = self.memory.memory();
let guest_addr = GuestAddress(gpa);
let user_addr = if mem.check_range(guest_addr, size as usize) {
match mem.get_host_address(guest_addr) {
Ok(t) => t as u64,
Err(e) => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("unable to retrieve user address for gpa 0x{gpa:x} from guest memory region: {e}")
));
}
}
} else if self.mmio_regions.lock().unwrap().check_range(gpa, size) {
self.mmio_regions.lock().unwrap().find_user_address(gpa)?
} else {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("failed to locate guest address 0x{gpa:x} in guest memory"),
));
};
self.container
.vfio_dma_map(iova, size, user_addr)
.map_err(|e| {
io::Error::new(
io::ErrorKind::Other,
format!(
"failed to map memory for VFIO container, \
iova 0x{iova:x}, gpa 0x{gpa:x}, size 0x{size:x}: {e:?}"
),
)
})
}
fn unmap(&self, iova: u64, size: u64) -> std::result::Result<(), io::Error> {
self.container.vfio_dma_unmap(iova, size).map_err(|e| {
io::Error::new(
io::ErrorKind::Other,
format!(
"failed to unmap memory for VFIO container, \
iova 0x{iova:x}, size 0x{size:x}: {e:?}"
),
)
})
}
}

View File

@ -407,15 +407,6 @@ impl Request {
let bypass =
(req.flags & VIRTIO_IOMMU_ATTACH_F_BYPASS) == VIRTIO_IOMMU_ATTACH_F_BYPASS;
let mut old_domain_id = domain_id;
if let Some(&id) = mapping.endpoints.read().unwrap().get(&endpoint) {
old_domain_id = id;
}
if old_domain_id != domain_id {
detach_endpoint_from_domain(endpoint, old_domain_id, mapping, ext_mapping)?;
}
// Add endpoint associated with specific domain
mapping
.endpoints
@ -423,19 +414,6 @@ impl Request {
.unwrap()
.insert(endpoint, domain_id);
// If any other mappings exist in the domain for other containers,
// make sure to issue these mappings for the new endpoint/container
if let Some(domain_mappings) = &mapping.domains.read().unwrap().get(&domain_id)
{
if let Some(ext_map) = ext_mapping.get(&endpoint) {
for (virt_start, addr_map) in &domain_mappings.mappings {
ext_map
.map(*virt_start, addr_map.gpa, addr_map.size)
.map_err(Error::ExternalUnmapping)?;
}
}
}
// Add new domain with no mapping if the entry didn't exist yet
let mut domains = mapping.domains.write().unwrap();
let domain = Domain {
@ -461,7 +439,22 @@ impl Request {
let endpoint = req.endpoint;
// Remove endpoint associated with specific domain
detach_endpoint_from_domain(endpoint, domain_id, mapping, ext_mapping)?;
mapping.endpoints.write().unwrap().remove(&endpoint);
// After all endpoints have been successfully detached from a
// domain, the domain can be removed. This means we must remove
// the mappings associated with this domain.
if mapping
.endpoints
.write()
.unwrap()
.iter()
.filter(|(_, &d)| d == domain_id)
.count()
== 0
{
mapping.domains.write().unwrap().remove(&domain_id);
}
}
VIRTIO_IOMMU_T_MAP => {
if desc_size_left != size_of::<VirtioIommuReqMap>() {
@ -498,9 +491,7 @@ impl Request {
.map(|(&e, _)| e)
.collect();
// For viommu all endpoints receive their own VFIO container, as a result
// Each endpoint within the domain needs to be separately mapped, as the
// mapping is done on a per-container level, not a per-domain level
// Trigger external mapping if necessary.
for endpoint in endpoints {
if let Some(ext_map) = ext_mapping.get(&endpoint) {
let size = req.virt_end - req.virt_start + 1;
@ -649,41 +640,6 @@ impl Request {
}
}
fn detach_endpoint_from_domain(
endpoint: u32,
domain_id: u32,
mapping: &Arc<IommuMapping>,
ext_mapping: &BTreeMap<u32, Arc<dyn ExternalDmaMapping>>,
) -> result::Result<(), Error> {
// Remove endpoint associated with specific domain
mapping.endpoints.write().unwrap().remove(&endpoint);
// Trigger external unmapping for the endpoint if necessary.
if let Some(domain_mappings) = &mapping.domains.read().unwrap().get(&domain_id) {
if let Some(ext_map) = ext_mapping.get(&endpoint) {
for (virt_start, addr_map) in &domain_mappings.mappings {
ext_map
.unmap(*virt_start, addr_map.size)
.map_err(Error::ExternalUnmapping)?;
}
}
}
if mapping
.endpoints
.write()
.unwrap()
.iter()
.filter(|(_, &d)| d == domain_id)
.count()
== 0
{
mapping.domains.write().unwrap().remove(&domain_id);
}
Ok(())
}
struct IommuEpollHandler {
mem: GuestMemoryAtomic<GuestMemoryMmap>,
request_queue: Queue,

View File

@ -2,6 +2,8 @@
//
// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
pub mod vfio;
/// Trait meant for triggering the DMA mapping update related to an external
/// device not managed fully through virtio. It is dedicated to virtio-iommu
/// in order to trigger the map update anytime the mapping is updated from the

View File

@ -0,0 +1,70 @@
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
use crate::dma_mapping::ExternalDmaMapping;
use std::io;
use std::sync::Arc;
use vfio_ioctls::VfioContainer;
use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemory};
/// This structure implements the ExternalDmaMapping trait. It is meant to
/// be used when the caller tries to provide a way to update the mappings
/// associated with a specific VFIO container.
pub struct VfioDmaMapping<M: GuestAddressSpace> {
container: Arc<VfioContainer>,
memory: Arc<M>,
}
impl<M: GuestAddressSpace> VfioDmaMapping<M> {
/// Create a DmaMapping object.
///
/// # Parameters
/// * `container`: VFIO container object.
/// * `memory·: guest memory to mmap.
pub fn new(container: Arc<VfioContainer>, memory: Arc<M>) -> Self {
VfioDmaMapping { container, memory }
}
}
impl<M: GuestAddressSpace + Sync + Send> ExternalDmaMapping for VfioDmaMapping<M> {
fn map(&self, iova: u64, gpa: u64, size: u64) -> std::result::Result<(), io::Error> {
let mem = self.memory.memory();
let guest_addr = GuestAddress(gpa);
let user_addr = if mem.check_range(guest_addr, size as usize) {
mem.get_host_address(guest_addr).unwrap() as u64
} else {
return Err(io::Error::new(
io::ErrorKind::Other,
format!(
"failed to convert guest address 0x{gpa:x} into \
host user virtual address"
),
));
};
self.container
.vfio_dma_map(iova, size, user_addr)
.map_err(|e| {
io::Error::new(
io::ErrorKind::Other,
format!(
"failed to map memory for VFIO container, \
iova 0x{iova:x}, gpa 0x{gpa:x}, size 0x{size:x}: {e:?}"
),
)
})
}
fn unmap(&self, iova: u64, size: u64) -> std::result::Result<(), io::Error> {
self.container.vfio_dma_unmap(iova, size).map_err(|e| {
io::Error::new(
io::ErrorKind::Other,
format!(
"failed to unmap memory for VFIO container, \
iova 0x{iova:x}, size 0x{size:x}: {e:?}"
),
)
})
}
}

View File

@ -58,8 +58,8 @@ use libc::{
O_TMPFILE, PROT_READ, PROT_WRITE, TCSANOW,
};
use pci::{
DeviceRelocation, MmioRegion, PciBarRegionType, PciBdf, PciDevice, VfioDmaMapping,
VfioPciDevice, VfioUserDmaMapping, VfioUserPciDevice, VfioUserPciDeviceError,
DeviceRelocation, PciBarRegionType, PciBdf, PciDevice, VfioPciDevice, VfioUserDmaMapping,
VfioUserPciDevice, VfioUserPciDeviceError,
};
use rate_limiter::group::RateLimiterGroup;
use seccompiler::SeccompAction;
@ -85,6 +85,7 @@ use virtio_devices::{
};
use virtio_devices::{Endpoint, IommuMapping};
use vm_allocator::{AddressAllocator, SystemAllocator};
use vm_device::dma_mapping::vfio::VfioDmaMapping;
use vm_device::dma_mapping::ExternalDmaMapping;
use vm_device::interrupt::{
InterruptIndex, InterruptManager, LegacyIrqGroupConfig, MsiIrqGroupConfig,
@ -965,8 +966,6 @@ pub struct DeviceManager {
snapshot: Option<Snapshot>,
rate_limit_groups: HashMap<String, Arc<RateLimiterGroup>>,
mmio_regions: Arc<Mutex<Vec<MmioRegion>>>,
}
impl DeviceManager {
@ -1197,7 +1196,6 @@ impl DeviceManager {
acpi_platform_addresses: AcpiPlatformAddresses::default(),
snapshot,
rate_limit_groups,
mmio_regions: Arc::new(Mutex::new(Vec::new())),
};
let device_manager = Arc::new(Mutex::new(device_manager));
@ -3426,7 +3424,6 @@ impl DeviceManager {
let vfio_mapping = Arc::new(VfioDmaMapping::new(
Arc::clone(&vfio_container),
Arc::new(self.memory_manager.lock().unwrap().guest_memory()),
Arc::clone(&self.mmio_regions),
));
if let Some(iommu) = &self.iommu_device {
@ -3471,7 +3468,6 @@ impl DeviceManager {
let vfio_mapping = Arc::new(VfioDmaMapping::new(
Arc::clone(&vfio_container),
Arc::new(self.memory_manager.lock().unwrap().guest_memory()),
Arc::clone(&self.mmio_regions),
));
for virtio_mem_device in self.virtio_mem_devices.iter() {
@ -3534,10 +3530,6 @@ impl DeviceManager {
.map_mmio_regions()
.map_err(DeviceManagerError::VfioMapRegion)?;
for mmio_region in vfio_pci_device.lock().unwrap().mmio_regions() {
self.mmio_regions.lock().unwrap().push(mmio_region);
}
let mut node = device_node!(vfio_name, vfio_pci_device);
// Update the device tree with correct resource information.
@ -4209,21 +4201,12 @@ impl DeviceManager {
let (pci_device, bus_device, virtio_device, remove_dma_handler) = match pci_device_handle {
// No need to remove any virtio-mem mapping here as the container outlives all devices
PciDeviceHandle::Vfio(vfio_pci_device) => {
for mmio_region in vfio_pci_device.lock().unwrap().mmio_regions() {
self.mmio_regions
.lock()
.unwrap()
.retain(|x| x.start != mmio_region.start)
}
(
PciDeviceHandle::Vfio(vfio_pci_device) => (
Arc::clone(&vfio_pci_device) as Arc<Mutex<dyn PciDevice>>,
Arc::clone(&vfio_pci_device) as Arc<Mutex<dyn BusDevice>>,
None as Option<Arc<Mutex<dyn virtio_devices::VirtioDevice>>>,
false,
)
}
),
PciDeviceHandle::Virtio(virtio_pci_device) => {
let dev = virtio_pci_device.lock().unwrap();
let bar_addr = dev.config_bar_addr();