virtio-mem: Support multiple mappings

For vfio-user the mapping handler is per device and needs to be removed
when the device in unplugged.

For VFIO the mapping handler is for the default VFIO container (used
when no vIOMMU is used - using a vIOMMU does not require mappings with
virtio-mem)

To represent these two use cases use an enum for the handlers that are
stored.

Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
Rob Bradford 2021-09-21 10:00:22 +01:00
parent 6fb88c3c5a
commit fd4f32fa69
2 changed files with 53 additions and 10 deletions

View File

@ -25,6 +25,7 @@ use crate::{VirtioInterrupt, VirtioInterruptType};
use anyhow::anyhow; use anyhow::anyhow;
use libc::EFD_NONBLOCK; use libc::EFD_NONBLOCK;
use seccompiler::SeccompAction; use seccompiler::SeccompAction;
use std::collections::BTreeMap;
use std::io; use std::io;
use std::mem::size_of; use std::mem::size_of;
use std::os::unix::io::{AsRawFd, RawFd}; use std::os::unix::io::{AsRawFd, RawFd};
@ -427,7 +428,7 @@ struct MemEpollHandler {
kill_evt: EventFd, kill_evt: EventFd,
pause_evt: EventFd, pause_evt: EventFd,
hugepages: bool, hugepages: bool,
dma_mapping_handler: Option<Arc<dyn ExternalDmaMapping>>, dma_mapping_handlers: Arc<Mutex<BTreeMap<VirtioMemMappingSource, Arc<dyn ExternalDmaMapping>>>>,
} }
impl MemEpollHandler { impl MemEpollHandler {
@ -504,10 +505,11 @@ impl MemEpollHandler {
.unwrap() .unwrap()
.set_range(first_block_index, nb_blocks, plug); .set_range(first_block_index, nb_blocks, plug);
let handlers = self.dma_mapping_handlers.lock().unwrap();
if plug { if plug {
let mut gpa = addr; let mut gpa = addr;
for _ in 0..nb_blocks { for _ in 0..nb_blocks {
if let Some(handler) = &self.dma_mapping_handler { for (_, handler) in handlers.iter() {
if let Err(e) = handler.map(gpa, gpa, config.block_size) { if let Err(e) = handler.map(gpa, gpa, config.block_size) {
error!( error!(
"failed DMA mapping addr 0x{:x} size 0x{:x}: {}", "failed DMA mapping addr 0x{:x} size 0x{:x}: {}",
@ -522,7 +524,7 @@ impl MemEpollHandler {
config.plugged_size += size; config.plugged_size += size;
} else { } else {
if let Some(handler) = &self.dma_mapping_handler { for (_, handler) in handlers.iter() {
if let Err(e) = handler.unmap(addr, size) { if let Err(e) = handler.unmap(addr, size) {
error!( error!(
"failed DMA unmapping addr 0x{:x} size 0x{:x}: {}", "failed DMA unmapping addr 0x{:x} size 0x{:x}: {}",
@ -547,10 +549,11 @@ impl MemEpollHandler {
// Remaining plugged blocks are unmapped. // Remaining plugged blocks are unmapped.
if config.plugged_size > 0 { if config.plugged_size > 0 {
let handlers = self.dma_mapping_handlers.lock().unwrap();
for (idx, plugged) in self.blocks_state.lock().unwrap().inner().iter().enumerate() { for (idx, plugged) in self.blocks_state.lock().unwrap().inner().iter().enumerate() {
if *plugged { if *plugged {
let gpa = config.addr + (idx as u64 * config.block_size); let gpa = config.addr + (idx as u64 * config.block_size);
if let Some(handler) = &self.dma_mapping_handler { for (_, handler) in handlers.iter() {
if let Err(e) = handler.unmap(gpa, config.block_size) { if let Err(e) = handler.unmap(gpa, config.block_size) {
error!( error!(
"failed DMA unmapping addr 0x{:x} size 0x{:x}: {}", "failed DMA unmapping addr 0x{:x} size 0x{:x}: {}",
@ -731,7 +734,12 @@ impl EpollHelperHandler for MemEpollHandler {
} }
} }
// Virtio device for exposing entropy to the guest OS through virtio. #[derive(PartialEq, Eq, PartialOrd, Ord)]
pub enum VirtioMemMappingSource {
Container,
Device(u32),
}
pub struct Mem { pub struct Mem {
common: VirtioCommon, common: VirtioCommon,
id: String, id: String,
@ -741,7 +749,7 @@ pub struct Mem {
config: Arc<Mutex<VirtioMemConfig>>, config: Arc<Mutex<VirtioMemConfig>>,
seccomp_action: SeccompAction, seccomp_action: SeccompAction,
hugepages: bool, hugepages: bool,
dma_mapping_handler: Option<Arc<dyn ExternalDmaMapping>>, dma_mapping_handlers: Arc<Mutex<BTreeMap<VirtioMemMappingSource, Arc<dyn ExternalDmaMapping>>>>,
blocks_state: Arc<Mutex<BlocksState>>, blocks_state: Arc<Mutex<BlocksState>>,
exit_evt: EventFd, exit_evt: EventFd,
} }
@ -829,7 +837,7 @@ impl Mem {
config: Arc::new(Mutex::new(config)), config: Arc::new(Mutex::new(config)),
seccomp_action, seccomp_action,
hugepages, hugepages,
dma_mapping_handler: None, dma_mapping_handlers: Arc::new(Mutex::new(BTreeMap::new())),
blocks_state: Arc::new(Mutex::new(BlocksState(vec![ blocks_state: Arc::new(Mutex::new(BlocksState(vec![
false; false;
(config.region_size / config.block_size) (config.region_size / config.block_size)
@ -841,6 +849,7 @@ impl Mem {
pub fn add_dma_mapping_handler( pub fn add_dma_mapping_handler(
&mut self, &mut self,
source: VirtioMemMappingSource,
handler: Arc<dyn ExternalDmaMapping>, handler: Arc<dyn ExternalDmaMapping>,
) -> result::Result<(), Error> { ) -> result::Result<(), Error> {
let config = self.config.lock().unwrap(); let config = self.config.lock().unwrap();
@ -856,7 +865,37 @@ impl Mem {
} }
} }
self.dma_mapping_handler = Some(handler); self.dma_mapping_handlers
.lock()
.unwrap()
.insert(source, handler);
Ok(())
}
pub fn remove_dma_mapping_handler(
&mut self,
source: VirtioMemMappingSource,
) -> result::Result<(), Error> {
let handler = self
.dma_mapping_handlers
.lock()
.unwrap()
.remove(&source)
.ok_or(Error::InvalidDmaMappingHandler)?;
let config = self.config.lock().unwrap();
if config.plugged_size > 0 {
for (idx, plugged) in self.blocks_state.lock().unwrap().inner().iter().enumerate() {
if *plugged {
let gpa = config.addr + (idx as u64 * config.block_size);
handler
.unmap(gpa, config.block_size)
.map_err(Error::DmaUnmap)?;
}
}
}
Ok(()) Ok(())
} }
@ -915,7 +954,7 @@ impl VirtioDevice for Mem {
kill_evt, kill_evt,
pause_evt, pause_evt,
hugepages: self.hugepages, hugepages: self.hugepages,
dma_mapping_handler: self.dma_mapping_handler.clone(), dma_mapping_handlers: Arc::clone(&self.dma_mapping_handlers),
}; };
handler handler

View File

@ -91,6 +91,7 @@ use vfio_ioctls::{VfioContainer, VfioDevice};
use virtio_devices::transport::VirtioPciDevice; use virtio_devices::transport::VirtioPciDevice;
use virtio_devices::transport::VirtioTransport; use virtio_devices::transport::VirtioTransport;
use virtio_devices::vhost_user::VhostUserConfig; use virtio_devices::vhost_user::VhostUserConfig;
use virtio_devices::VirtioMemMappingSource;
use virtio_devices::{DmaRemapping, Endpoint, IommuMapping}; use virtio_devices::{DmaRemapping, Endpoint, IommuMapping};
use virtio_devices::{VirtioSharedMemory, VirtioSharedMemoryList}; use virtio_devices::{VirtioSharedMemory, VirtioSharedMemoryList};
use vm_allocator::SystemAllocator; use vm_allocator::SystemAllocator;
@ -2950,7 +2951,10 @@ impl DeviceManager {
virtio_mem_device virtio_mem_device
.lock() .lock()
.unwrap() .unwrap()
.add_dma_mapping_handler(vfio_mapping.clone()) .add_dma_mapping_handler(
VirtioMemMappingSource::Container,
vfio_mapping.clone(),
)
.map_err(DeviceManagerError::AddDmaMappingHandlerVirtioMem)?; .map_err(DeviceManagerError::AddDmaMappingHandlerVirtioMem)?;
} }
} }