virtio-devices, vmm: Shutdown VMM on virtio thread panic

Shutdown the VMM in the virtio (or VMM side of vhost-user) thread
panics.

See: #3031

Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
Rob Bradford 2021-09-07 16:10:48 +01:00
parent 54e523c302
commit 687d646c60
18 changed files with 147 additions and 14 deletions

View File

@ -17,7 +17,7 @@ use std::sync::Arc;
use virtio_devices::{Block, VirtioDevice, VirtioInterrupt, VirtioInterruptType};
use vm_memory::{Bytes, GuestAddress, GuestMemoryAtomic, GuestMemoryMmap};
use vm_virtio::Queue;
use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK};
const MEM_SIZE: u64 = 256 * 1024 * 1024;
const DESC_SIZE: u64 = 16; // Bytes in one virtio descriptor.
@ -96,6 +96,7 @@ fuzz_target!(|bytes| {
256,
SeccompAction::Allow,
None,
EventFd::new(EFD_NONBLOCK).unwrap(),
)
.unwrap();

View File

@ -342,6 +342,7 @@ pub struct Balloon {
resize: VirtioBalloonResize,
config: Arc<Mutex<VirtioBalloonConfig>>,
seccomp_action: SeccompAction,
exit_evt: EventFd,
}
impl Balloon {
@ -351,6 +352,7 @@ impl Balloon {
size: u64,
deflate_on_oom: bool,
seccomp_action: SeccompAction,
exit_evt: EventFd,
) -> io::Result<Self> {
let mut avail_features = 1u64 << VIRTIO_F_VERSION_1;
if deflate_on_oom {
@ -375,6 +377,7 @@ impl Balloon {
resize: VirtioBalloonResize::new()?,
config: Arc::new(Mutex::new(config)),
seccomp_action,
exit_evt,
})
}
@ -466,6 +469,7 @@ impl VirtioDevice for Balloon {
&self.seccomp_action,
Thread::VirtioBalloon,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);

View File

@ -365,6 +365,7 @@ pub struct Block {
counters: BlockCounters,
seccomp_action: SeccompAction,
rate_limiter_config: Option<RateLimiterConfig>,
exit_evt: EventFd,
}
#[derive(Versionize)]
@ -391,6 +392,7 @@ impl Block {
queue_size: u16,
seccomp_action: SeccompAction,
rate_limiter_config: Option<RateLimiterConfig>,
exit_evt: EventFd,
) -> io::Result<Self> {
let disk_size = disk_image.size().map_err(|e| {
io::Error::new(
@ -448,6 +450,7 @@ impl Block {
counters: BlockCounters::default(),
seccomp_action,
rate_limiter_config,
exit_evt,
})
}
@ -593,6 +596,7 @@ impl VirtioDevice for Block {
&self.seccomp_action,
Thread::VirtioBlock,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);

View File

@ -325,6 +325,7 @@ pub struct Console {
endpoint: Endpoint,
seccomp_action: SeccompAction,
in_buffer: Arc<Mutex<VecDeque<u8>>>,
exit_evt: EventFd,
}
#[derive(Versionize)]
@ -346,6 +347,7 @@ impl Console {
rows: u16,
iommu: bool,
seccomp_action: SeccompAction,
exit_evt: EventFd,
) -> io::Result<(Console, Arc<ConsoleResizer>)> {
let mut avail_features = 1u64 << VIRTIO_F_VERSION_1 | 1u64 << VIRTIO_CONSOLE_F_SIZE;
@ -377,6 +379,7 @@ impl Console {
endpoint,
seccomp_action,
in_buffer: Arc::new(Mutex::new(VecDeque::new())),
exit_evt,
},
resizer,
))
@ -473,6 +476,7 @@ impl VirtioDevice for Console {
&self.seccomp_action,
Thread::VirtioConsole,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);

View File

@ -701,6 +701,7 @@ pub struct Iommu {
mapping: Arc<IommuMapping>,
ext_mapping: BTreeMap<u32, Arc<dyn ExternalDmaMapping>>,
seccomp_action: SeccompAction,
exit_evt: EventFd,
}
#[derive(Versionize)]
@ -714,7 +715,11 @@ struct IommuState {
impl VersionMapped for IommuState {}
impl Iommu {
pub fn new(id: String, seccomp_action: SeccompAction) -> io::Result<(Self, Arc<IommuMapping>)> {
pub fn new(
id: String,
seccomp_action: SeccompAction,
exit_evt: EventFd,
) -> io::Result<(Self, Arc<IommuMapping>)> {
let config = VirtioIommuConfig {
page_size_mask: VIRTIO_IOMMU_PAGE_SIZE_MASK,
probe_size: PROBE_PROP_SIZE,
@ -742,6 +747,7 @@ impl Iommu {
mapping: mapping.clone(),
ext_mapping: BTreeMap::new(),
seccomp_action,
exit_evt,
},
mapping,
))
@ -847,6 +853,7 @@ impl VirtioDevice for Iommu {
&self.seccomp_action,
Thread::VirtioIommu,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);

View File

@ -80,8 +80,12 @@ pub enum ActivateError {
BadActivate,
/// Queue number is not correct
BadQueueNum,
/// Failed to clone Kill event
/// Failed to clone Kill event fd
CloneKillEventFd,
/// Failed to clone exit event fd
CloneExitEventFd(std::io::Error),
// Failed to spawn thread
ThreadSpawn(std::io::Error),
/// Failed to create Vhost-user interrupt eventfd
VhostIrqCreate,
/// Failed to setup vhost-user-fs daemon.

View File

@ -746,10 +746,12 @@ pub struct Mem {
hugepages: bool,
dma_mapping_handlers: Arc<Mutex<BTreeMap<u32, Arc<dyn ExternalDmaMapping>>>>,
blocks_state: Arc<Mutex<BlocksState>>,
exit_evt: EventFd,
}
impl Mem {
// Create a new virtio-mem device.
#[allow(clippy::too_many_arguments)]
pub fn new(
id: String,
region: &Arc<GuestRegionMmap>,
@ -758,6 +760,7 @@ impl Mem {
numa_node_id: Option<u16>,
initial_size: u64,
hugepages: bool,
exit_evt: EventFd,
) -> io::Result<Mem> {
let region_len = region.len();
@ -835,6 +838,7 @@ impl Mem {
(config.region_size / config.block_size)
as usize
]))),
exit_evt,
})
}
@ -961,6 +965,7 @@ impl VirtioDevice for Mem {
&self.seccomp_action,
Thread::VirtioMem,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);

View File

@ -355,6 +355,7 @@ pub struct Net {
counters: NetCounters,
seccomp_action: SeccompAction,
rate_limiter_config: Option<RateLimiterConfig>,
exit_evt: EventFd,
}
#[derive(Versionize)]
@ -379,6 +380,7 @@ impl Net {
queue_size: u16,
seccomp_action: SeccompAction,
rate_limiter_config: Option<RateLimiterConfig>,
exit_evt: EventFd,
) -> Result<Self> {
let mut avail_features = 1 << VIRTIO_NET_F_CSUM
| 1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
@ -424,6 +426,7 @@ impl Net {
counters: NetCounters::default(),
seccomp_action,
rate_limiter_config,
exit_evt,
})
}
@ -442,6 +445,7 @@ impl Net {
queue_size: u16,
seccomp_action: SeccompAction,
rate_limiter_config: Option<RateLimiterConfig>,
exit_evt: EventFd,
) -> Result<Self> {
let taps = open_tap(if_name, ip_addr, netmask, host_mac, num_queues / 2, None)
.map_err(Error::OpenTap)?;
@ -455,9 +459,11 @@ impl Net {
queue_size,
seccomp_action,
rate_limiter_config,
exit_evt,
)
}
#[allow(clippy::too_many_arguments)]
pub fn from_tap_fds(
id: String,
fds: &[RawFd],
@ -466,6 +472,7 @@ impl Net {
queue_size: u16,
seccomp_action: SeccompAction,
rate_limiter_config: Option<RateLimiterConfig>,
exit_evt: EventFd,
) -> Result<Self> {
let mut taps: Vec<Tap> = Vec::new();
let num_queue_pairs = fds.len();
@ -489,6 +496,7 @@ impl Net {
queue_size,
seccomp_action,
rate_limiter_config,
exit_evt,
)
}
@ -576,6 +584,7 @@ impl VirtioDevice for Net {
&self.seccomp_action,
Thread::VirtioNetCtl,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = ctrl_handler.run_ctrl(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);
@ -654,6 +663,7 @@ impl VirtioDevice for Net {
&self.seccomp_action,
Thread::VirtioNet,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);

View File

@ -264,6 +264,7 @@ pub struct Pmem {
config: VirtioPmemConfig,
mapping: UserspaceMapping,
seccomp_action: SeccompAction,
exit_evt: EventFd,
// Hold ownership of the memory that is allocated for the device
// which will be automatically dropped when the device is dropped
@ -280,6 +281,7 @@ pub struct PmemState {
impl VersionMapped for PmemState {}
impl Pmem {
#[allow(clippy::too_many_arguments)]
pub fn new(
id: String,
disk: File,
@ -288,6 +290,7 @@ impl Pmem {
_region: MmapRegion,
iommu: bool,
seccomp_action: SeccompAction,
exit_evt: EventFd,
) -> io::Result<Pmem> {
let config = VirtioPmemConfig {
start: addr.raw_value().to_le(),
@ -315,6 +318,7 @@ impl Pmem {
mapping,
seccomp_action,
_region,
exit_evt,
})
}
@ -396,6 +400,7 @@ impl VirtioDevice for Pmem {
&self.seccomp_action,
Thread::VirtioPmem,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);

View File

@ -129,6 +129,7 @@ pub struct Rng {
id: String,
random_file: Option<File>,
seccomp_action: SeccompAction,
exit_evt: EventFd,
}
#[derive(Versionize)]
@ -146,6 +147,7 @@ impl Rng {
path: &str,
iommu: bool,
seccomp_action: SeccompAction,
exit_evt: EventFd,
) -> io::Result<Rng> {
let random_file = File::open(path)?;
let mut avail_features = 1u64 << VIRTIO_F_VERSION_1;
@ -166,6 +168,7 @@ impl Rng {
id,
random_file: Some(random_file),
seccomp_action,
exit_evt,
})
}
@ -241,6 +244,7 @@ impl VirtioDevice for Rng {
&self.seccomp_action,
Thread::VirtioRng,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);

View File

@ -8,13 +8,18 @@ use crate::{
ActivateError,
};
use seccompiler::{apply_filter, SeccompAction};
use std::thread::{self, JoinHandle};
use std::{
panic::AssertUnwindSafe,
thread::{self, JoinHandle},
};
use vmm_sys_util::eventfd::EventFd;
pub(crate) fn spawn_virtio_thread<F>(
name: &str,
seccomp_action: &SeccompAction,
thread_type: Thread,
epoll_threads: &mut Vec<JoinHandle<()>>,
exit_evt: &EventFd,
f: F,
) -> Result<(), ActivateError>
where
@ -24,20 +29,31 @@ where
let seccomp_filter = get_seccomp_filter(seccomp_action, thread_type)
.map_err(ActivateError::CreateSeccompFilter)?;
let thread_exit_evt = exit_evt
.try_clone()
.map_err(ActivateError::CloneExitEventFd)?;
let thread_name = name.to_string();
thread::Builder::new()
.name(name.to_string())
.spawn(move || {
if !seccomp_filter.is_empty() {
if let Err(e) = apply_filter(&seccomp_filter) {
error!("Error applying seccomp filter: {:?}", e);
thread_exit_evt.write(1).ok();
return;
}
}
f()
std::panic::catch_unwind(AssertUnwindSafe(move || f()))
.or_else(|_| {
error!("{} thread panicked", thread_name);
thread_exit_evt.write(1)
})
.ok();
})
.map(|thread| epoll_threads.push(thread))
.map_err(|e| {
error!("Failed to spawn thread for {}: {}", name, e);
ActivateError::BadActivate
ActivateError::ThreadSpawn(e)
})
}

View File

@ -59,6 +59,7 @@ pub struct Blk {
guest_memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>,
epoll_thread: Option<thread::JoinHandle<()>>,
seccomp_action: SeccompAction,
exit_evt: EventFd,
}
impl Blk {
@ -68,6 +69,7 @@ impl Blk {
vu_cfg: VhostUserConfig,
restoring: bool,
seccomp_action: SeccompAction,
exit_evt: EventFd,
) -> Result<Blk> {
let num_queues = vu_cfg.num_queues;
@ -93,6 +95,7 @@ impl Blk {
guest_memory: None,
epoll_thread: None,
seccomp_action,
exit_evt,
});
}
@ -180,6 +183,7 @@ impl Blk {
guest_memory: None,
epoll_thread: None,
seccomp_action,
exit_evt,
})
}
@ -314,6 +318,7 @@ impl VirtioDevice for Blk {
&self.seccomp_action,
Thread::VirtioVhostBlock,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);

View File

@ -309,6 +309,7 @@ pub struct Fs {
seccomp_action: SeccompAction,
guest_memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>,
epoll_thread: Option<thread::JoinHandle<()>>,
exit_evt: EventFd,
}
impl Fs {
@ -323,6 +324,7 @@ impl Fs {
cache: Option<(VirtioSharedMemoryList, MmapRegion)>,
seccomp_action: SeccompAction,
restoring: bool,
exit_evt: EventFd,
) -> Result<Fs> {
let mut slave_req_support = false;
@ -353,6 +355,7 @@ impl Fs {
seccomp_action,
guest_memory: None,
epoll_thread: None,
exit_evt,
});
}
@ -429,6 +432,7 @@ impl Fs {
seccomp_action,
guest_memory: None,
epoll_thread: None,
exit_evt,
})
}
@ -556,6 +560,7 @@ impl VirtioDevice for Fs {
&self.seccomp_action,
Thread::VirtioVhostFs,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);

View File

@ -113,6 +113,7 @@ pub struct Net {
ctrl_queue_epoll_thread: Option<thread::JoinHandle<()>>,
epoll_thread: Option<thread::JoinHandle<()>>,
seccomp_action: SeccompAction,
exit_evt: EventFd,
}
impl Net {
@ -124,6 +125,7 @@ impl Net {
server: bool,
seccomp_action: SeccompAction,
restoring: bool,
exit_evt: EventFd,
) -> Result<Net> {
let mut num_queues = vu_cfg.num_queues;
@ -152,6 +154,7 @@ impl Net {
ctrl_queue_epoll_thread: None,
epoll_thread: None,
seccomp_action,
exit_evt,
});
}
@ -237,6 +240,7 @@ impl Net {
ctrl_queue_epoll_thread: None,
epoll_thread: None,
seccomp_action,
exit_evt,
})
}
@ -339,6 +343,7 @@ impl VirtioDevice for Net {
&self.seccomp_action,
Thread::VirtioVhostNetCtl,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = ctrl_handler.run_ctrl(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);
@ -381,6 +386,7 @@ impl VirtioDevice for Net {
&self.seccomp_action,
Thread::VirtioVhostNet,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);

View File

@ -303,6 +303,7 @@ pub struct Vsock<B: VsockBackend> {
backend: Arc<RwLock<B>>,
path: PathBuf,
seccomp_action: SeccompAction,
exit_evt: EventFd,
}
#[derive(Versionize)]
@ -326,6 +327,7 @@ where
backend: B,
iommu: bool,
seccomp_action: SeccompAction,
exit_evt: EventFd,
) -> io::Result<Vsock<B>> {
let mut avail_features = 1u64 << VIRTIO_F_VERSION_1 | 1u64 << VIRTIO_F_IN_ORDER;
@ -347,6 +349,7 @@ where
backend: Arc::new(RwLock::new(backend)),
path,
seccomp_action,
exit_evt,
})
}
@ -439,6 +442,7 @@ where
&self.seccomp_action,
Thread::VirtioVsock,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);

View File

@ -275,6 +275,7 @@ mod tests {
TestBackend::new(),
false,
seccompiler::SeccompAction::Trap,
EventFd::new(EFD_NONBLOCK).unwrap(),
)
.unwrap(),
}

View File

@ -166,6 +166,7 @@ pub struct Watchdog {
reset_evt: EventFd,
last_ping_time: Arc<Mutex<Option<Instant>>>,
timer: File,
exit_evt: EventFd,
}
#[derive(Versionize)]
@ -183,6 +184,7 @@ impl Watchdog {
id: String,
reset_evt: EventFd,
seccomp_action: SeccompAction,
exit_evt: EventFd,
) -> io::Result<Watchdog> {
let avail_features = 1u64 << VIRTIO_F_VERSION_1;
let timer_fd = timerfd_create().map_err(|e| {
@ -204,6 +206,7 @@ impl Watchdog {
reset_evt,
last_ping_time: Arc::new(Mutex::new(None)),
timer,
exit_evt,
})
}
@ -324,6 +327,7 @@ impl VirtioDevice for Watchdog {
&self.seccomp_action,
Thread::VirtioWatchdog,
&mut epoll_threads,
&self.exit_evt,
move || {
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);

View File

@ -886,9 +886,7 @@ pub struct DeviceManager {
device_tree: Arc<Mutex<DeviceTree>>,
// Exit event
#[cfg(feature = "acpi")]
exit_evt: EventFd,
reset_evt: EventFd,
#[cfg(target_arch = "aarch64")]
@ -931,7 +929,7 @@ impl DeviceManager {
vm: Arc<dyn hypervisor::Vm>,
config: Arc<Mutex<VmConfig>>,
memory_manager: Arc<Mutex<MemoryManager>>,
_exit_evt: &EventFd,
exit_evt: &EventFd,
reset_evt: &EventFd,
seccomp_action: SeccompAction,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))] numa_nodes: NumaNodes,
@ -991,8 +989,7 @@ impl DeviceManager {
pci_devices_down: 0,
pci_irq_slots: [0; 32],
device_tree,
#[cfg(feature = "acpi")]
exit_evt: _exit_evt.try_clone().map_err(DeviceManagerError::EventFd)?,
exit_evt: exit_evt.try_clone().map_err(DeviceManagerError::EventFd)?,
reset_evt: reset_evt.try_clone().map_err(DeviceManagerError::EventFd)?,
#[cfg(target_arch = "aarch64")]
id_to_dev_info: HashMap::new(),
@ -1170,9 +1167,14 @@ impl DeviceManager {
let iommu_id = String::from(IOMMU_DEVICE_NAME);
let (iommu_device, iommu_mapping) = if self.config.lock().unwrap().iommu {
let (device, mapping) =
virtio_devices::Iommu::new(iommu_id.clone(), self.seccomp_action.clone())
.map_err(DeviceManagerError::CreateVirtioIommu)?;
let (device, mapping) = virtio_devices::Iommu::new(
iommu_id.clone(),
self.seccomp_action.clone(),
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
)
.map_err(DeviceManagerError::CreateVirtioIommu)?;
let device = Arc::new(Mutex::new(device));
self.iommu_device = Some(Arc::clone(&device));
@ -1733,6 +1735,9 @@ impl DeviceManager {
row,
self.force_iommu | console_config.iommu,
self.seccomp_action.clone(),
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
)
.map_err(DeviceManagerError::CreateVirtioConsole)?;
let virtio_console_device = Arc::new(Mutex::new(virtio_console_device));
@ -1867,6 +1872,9 @@ impl DeviceManager {
vu_cfg,
self.restoring,
self.seccomp_action.clone(),
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
) {
Ok(vub_device) => vub_device,
Err(e) => {
@ -1968,6 +1976,9 @@ impl DeviceManager {
disk_cfg.queue_size,
self.seccomp_action.clone(),
disk_cfg.rate_limiter_config,
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
)
.map_err(DeviceManagerError::CreateVirtioBlock)?,
));
@ -2035,6 +2046,9 @@ impl DeviceManager {
server,
self.seccomp_action.clone(),
self.restoring,
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
) {
Ok(vun_device) => vun_device,
Err(e) => {
@ -2071,6 +2085,9 @@ impl DeviceManager {
net_cfg.queue_size,
self.seccomp_action.clone(),
net_cfg.rate_limiter_config,
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
)
.map_err(DeviceManagerError::CreateVirtioNet)?,
))
@ -2084,6 +2101,9 @@ impl DeviceManager {
net_cfg.queue_size,
self.seccomp_action.clone(),
net_cfg.rate_limiter_config,
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
)
.map_err(DeviceManagerError::CreateVirtioNet)?,
))
@ -2101,6 +2121,9 @@ impl DeviceManager {
net_cfg.queue_size,
self.seccomp_action.clone(),
net_cfg.rate_limiter_config,
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
)
.map_err(DeviceManagerError::CreateVirtioNet)?,
))
@ -2155,6 +2178,9 @@ impl DeviceManager {
rng_path,
self.force_iommu | rng_config.iommu,
self.seccomp_action.clone(),
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
)
.map_err(DeviceManagerError::CreateVirtioRng)?,
));
@ -2303,6 +2329,9 @@ impl DeviceManager {
cache,
self.seccomp_action.clone(),
self.restoring,
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
)
.map_err(DeviceManagerError::CreateVirtioFs)?,
));
@ -2485,6 +2514,9 @@ impl DeviceManager {
mmap_region,
self.force_iommu | pmem_cfg.iommu,
self.seccomp_action.clone(),
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
)
.map_err(DeviceManagerError::CreateVirtioPmem)?,
));
@ -2551,6 +2583,9 @@ impl DeviceManager {
backend,
self.force_iommu | vsock_cfg.iommu,
self.seccomp_action.clone(),
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
)
.map_err(DeviceManagerError::CreateVirtioVsock)?,
));
@ -2614,6 +2649,9 @@ impl DeviceManager {
node_id,
virtio_mem_zone.hotplugged_size(),
virtio_mem_zone.hugepages(),
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
)
.map_err(DeviceManagerError::CreateVirtioMem)?,
));
@ -2654,6 +2692,9 @@ impl DeviceManager {
balloon_config.size,
balloon_config.deflate_on_oom,
self.seccomp_action.clone(),
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
)
.map_err(DeviceManagerError::CreateVirtioBalloon)?,
));
@ -2692,6 +2733,9 @@ impl DeviceManager {
id.clone(),
self.reset_evt.try_clone().unwrap(),
self.seccomp_action.clone(),
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
)
.map_err(DeviceManagerError::CreateVirtioWatchdog)?,
));