vmm: Convert virtio devices to Arc<Mutex<T>>

Migratable devices can be virtio or legacy devices.
In any case, they can potentially be tracked through one of the IO bus
as an Arc<Mutex<dyn BusDevice>>. In order for the DeviceManager to also
keep track of such devices as Migratable trait objects, they must be
shared as mutable atomic references, i.e. Arc<Mutex<T>>. That forces all
Migratable objects to be tracked as Arc<Mutex<dyn Migratable>>.

Virtio devices are typically migratable, and thus for them to be
referenced by the DeviceManager, they now should be built as
Arc<Mutex<VirtioDevice>>.

Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
This commit is contained in:
Samuel Ortiz 2019-11-19 00:10:42 +01:00
parent 5450de0f5e
commit 35d7721683
4 changed files with 131 additions and 81 deletions

View File

@ -3,7 +3,7 @@
// found in the LICENSE file.
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::sync::{Arc, Mutex, RwLock};
use byteorder::{ByteOrder, LittleEndian};
use libc::EFD_NONBLOCK;
@ -38,7 +38,7 @@ const MMIO_VERSION: u32 = 2;
/// Typically one page (4096 bytes) of MMIO address space is sufficient to handle this transport
/// and inner virtio device.
pub struct MmioDevice {
device: Box<dyn VirtioDevice>,
device: Arc<Mutex<dyn VirtioDevice>>,
device_activated: bool,
features_select: u32,
@ -57,13 +57,15 @@ impl MmioDevice {
/// Constructs a new MMIO transport for the given virtio device.
pub fn new(
mem: Arc<RwLock<GuestMemoryMmap>>,
device: Box<dyn VirtioDevice>,
device: Arc<Mutex<dyn VirtioDevice>>,
) -> Result<MmioDevice> {
let device_clone = device.clone();
let locked_device = device_clone.lock().unwrap();
let mut queue_evts = Vec::new();
for _ in device.queue_max_sizes().iter() {
for _ in locked_device.queue_max_sizes().iter() {
queue_evts.push(EventFd::new(EFD_NONBLOCK)?)
}
let queues = device
let queues = locked_device
.queue_max_sizes()
.iter()
.map(|&s| Queue::new(s))
@ -158,10 +160,10 @@ impl BusDevice for MmioDevice {
let v = match offset {
0x0 => MMIO_MAGIC_VALUE,
0x04 => MMIO_VERSION,
0x08 => self.device.device_type(),
0x08 => self.device.lock().unwrap().device_type(),
0x0c => VENDOR_ID, // vendor id
0x10 => {
self.device.features(self.features_select)
self.device.lock().unwrap().features(self.features_select)
| if self.features_select == 1 { 0x1 } else { 0x0 }
}
0x34 => self.with_queue(0, |q| u32::from(q.get_max_size())),
@ -176,7 +178,11 @@ impl BusDevice for MmioDevice {
};
LittleEndian::write_u32(data, v);
}
0x100..=0xfff => self.device.read_config(offset - 0x100, data),
0x100..=0xfff => self
.device
.lock()
.unwrap()
.read_config(offset - 0x100, data),
_ => {
warn!(
"invalid virtio mmio read: 0x{:x}:0x{:x}",
@ -202,7 +208,11 @@ impl BusDevice for MmioDevice {
let v = LittleEndian::read_u32(data);
match offset {
0x14 => self.features_select = v,
0x20 => self.device.ack_features(self.acked_features_select, v),
0x20 => self
.device
.lock()
.unwrap()
.ack_features(self.acked_features_select, v),
0x24 => self.acked_features_select = v,
0x30 => self.queue_select = v,
0x38 => mut_q = self.with_queue_mut(|q| q.size = v as u16),
@ -224,7 +234,13 @@ impl BusDevice for MmioDevice {
}
}
}
0x100..=0xfff => return self.device.write_config(offset - 0x100, data),
0x100..=0xfff => {
return self
.device
.lock()
.unwrap()
.write_config(offset - 0x100, data)
}
_ => {
warn!(
"invalid virtio mmio write: 0x{:x}:0x{:x}",
@ -244,6 +260,8 @@ impl BusDevice for MmioDevice {
if self.mem.is_some() {
let mem = self.mem.as_ref().unwrap().clone();
self.device
.lock()
.unwrap()
.activate(
mem,
interrupt_cb,

View File

@ -9,7 +9,7 @@ extern crate byteorder;
use byteorder::{ByteOrder, LittleEndian};
use std::sync::atomic::{AtomicU16, Ordering};
use std::sync::Arc;
use std::sync::{Arc, Mutex};
use vm_memory::GuestAddress;
use crate::{Queue, VirtioDevice};
@ -51,7 +51,7 @@ impl VirtioPciCommonConfig {
offset: u64,
data: &mut [u8],
queues: &mut Vec<Queue>,
device: &mut dyn VirtioDevice,
device: Arc<Mutex<dyn VirtioDevice>>,
) {
assert!(data.len() <= 8);
@ -81,7 +81,7 @@ impl VirtioPciCommonConfig {
offset: u64,
data: &[u8],
queues: &mut Vec<Queue>,
device: &mut dyn VirtioDevice,
device: Arc<Mutex<dyn VirtioDevice>>,
) {
assert!(data.len() <= 8);
@ -156,15 +156,16 @@ impl VirtioPciCommonConfig {
}
}
fn read_common_config_dword(&self, offset: u64, device: &dyn VirtioDevice) -> u32 {
fn read_common_config_dword(&self, offset: u64, device: Arc<Mutex<dyn VirtioDevice>>) -> u32 {
debug!("read_common_config_dword: offset 0x{:x}", offset);
match offset {
0x00 => self.device_feature_select,
0x04 => {
let locked_device = device.lock().unwrap();
// Only 64 bits of features (2 pages) are defined for now, so limit
// device_feature_select to avoid shifting by 64 or more bits.
if self.device_feature_select < 2 {
device.features(self.device_feature_select)
locked_device.features(self.device_feature_select)
} else {
0
}
@ -182,7 +183,7 @@ impl VirtioPciCommonConfig {
offset: u64,
value: u32,
queues: &mut Vec<Queue>,
device: &mut dyn VirtioDevice,
device: Arc<Mutex<dyn VirtioDevice>>,
) {
debug!("write_common_config_dword: offset 0x{:x}", offset);
fn hi(v: &mut GuestAddress, x: u32) {
@ -198,7 +199,8 @@ impl VirtioPciCommonConfig {
0x08 => self.driver_feature_select = value,
0x0c => {
if self.driver_feature_select < 2 {
device.ack_features(self.driver_feature_select, value);
let mut locked_device = device.lock().unwrap();
locked_device.ack_features(self.driver_feature_select, value);
} else {
warn!(
"invalid ack_features (page {}, value 0x{:x})",

View File

@ -225,7 +225,7 @@ pub struct VirtioPciDevice {
msix_num: u16,
// Virtio device reference and status
device: Box<dyn VirtioDevice>,
device: Arc<Mutex<dyn VirtioDevice>>,
device_activated: bool,
// PCI interrupts.
@ -250,15 +250,17 @@ impl VirtioPciDevice {
/// Constructs a new PCI transport for the given virtio device.
pub fn new(
memory: Arc<RwLock<GuestMemoryMmap>>,
device: Box<dyn VirtioDevice>,
device: Arc<Mutex<dyn VirtioDevice>>,
msix_num: u16,
iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
) -> Result<Self> {
let device_clone = device.clone();
let locked_device = device_clone.lock().unwrap();
let mut queue_evts = Vec::new();
for _ in device.queue_max_sizes().iter() {
for _ in locked_device.queue_max_sizes().iter() {
queue_evts.push(EventFd::new(EFD_NONBLOCK)?)
}
let queues = device
let queues = locked_device
.queue_max_sizes()
.iter()
.map(|&s| {
@ -268,7 +270,7 @@ impl VirtioPciDevice {
})
.collect();
let pci_device_id = VIRTIO_PCI_DEVICE_ID_BASE + device.device_type() as u16;
let pci_device_id = VIRTIO_PCI_DEVICE_ID_BASE + locked_device.device_type() as u16;
let (msix_config, msix_config_clone) = if msix_num > 0 {
let msix_config = Arc::new(Mutex::new(MsixConfig::new(msix_num)));
@ -282,7 +284,7 @@ impl VirtioPciDevice {
// The block devices should be given a 32-bit BAR so that they are easily accessible
// to firmware without requiring excessive identity mapping.
let mut use_64bit_bar = true;
let (class, subclass) = match VirtioDeviceType::from(device.device_type()) {
let (class, subclass) = match VirtioDeviceType::from(locked_device.device_type()) {
VirtioDeviceType::TYPE_NET => (
PciClassCode::NetworkController,
&PciNetworkControllerSubclass::EthernetController as &dyn PciSubclass,
@ -556,6 +558,8 @@ impl PciDevice for VirtioPciDevice {
) -> std::result::Result<Vec<(GuestAddress, GuestUsize, PciBarRegionType)>, PciDeviceError>
{
let mut ranges = Vec::new();
let device_clone = self.device.clone();
let device = device_clone.lock().unwrap();
// Allocate the virtio-pci capability BAR.
// See http://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.html#x1-740004
@ -589,7 +593,7 @@ impl PciDevice for VirtioPciDevice {
self.add_pci_capabilities(virtio_pci_bar)?;
// Allocate a dedicated BAR if there are some shared memory regions.
if let Some(shm_list) = self.device.get_shm_regions() {
if let Some(shm_list) = device.get_shm_regions() {
let config = PciBarConfiguration::default()
.set_register_index(2)
.set_address(shm_list.addr.raw_value())
@ -622,7 +626,7 @@ impl PciDevice for VirtioPciDevice {
o - COMMON_CONFIG_BAR_OFFSET,
data,
&mut self.queues,
self.device.as_mut(),
self.device.clone(),
),
o if ISR_CONFIG_BAR_OFFSET <= o && o < ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE => {
if let Some(v) = data.get_mut(0) {
@ -633,7 +637,8 @@ impl PciDevice for VirtioPciDevice {
o if DEVICE_CONFIG_BAR_OFFSET <= o
&& o < DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE =>
{
self.device.read_config(o - DEVICE_CONFIG_BAR_OFFSET, data);
let device = self.device.lock().unwrap();
device.read_config(o - DEVICE_CONFIG_BAR_OFFSET, data);
}
o if NOTIFICATION_BAR_OFFSET <= o
&& o < NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE =>
@ -666,7 +671,7 @@ impl PciDevice for VirtioPciDevice {
o - COMMON_CONFIG_BAR_OFFSET,
data,
&mut self.queues,
self.device.as_mut(),
self.device.clone(),
),
o if ISR_CONFIG_BAR_OFFSET <= o && o < ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE => {
if let Some(v) = data.get(0) {
@ -677,7 +682,8 @@ impl PciDevice for VirtioPciDevice {
o if DEVICE_CONFIG_BAR_OFFSET <= o
&& o < DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE =>
{
self.device.write_config(o - DEVICE_CONFIG_BAR_OFFSET, data);
let mut device = self.device.lock().unwrap();
device.write_config(o - DEVICE_CONFIG_BAR_OFFSET, data);
}
o if NOTIFICATION_BAR_OFFSET <= o
&& o < NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE =>
@ -707,7 +713,8 @@ impl PciDevice for VirtioPciDevice {
if let Some(interrupt_cb) = self.interrupt_cb.take() {
if self.memory.is_some() {
let mem = self.memory.as_ref().unwrap().clone();
self.device
let mut device = self.device.lock().unwrap();
device
.activate(
mem,
interrupt_cb,
@ -722,7 +729,8 @@ impl PciDevice for VirtioPciDevice {
// Device has been reset by the driver
if self.device_activated && self.is_driver_init() {
if let Some((interrupt_cb, mut queue_evts)) = self.device.reset() {
let mut device = self.device.lock().unwrap();
if let Some((interrupt_cb, mut queue_evts)) = device.reset() {
// Upon reset the device returns its interrupt EventFD and it's queue EventFDs
self.interrupt_cb = Some(interrupt_cb);
self.queue_evts.append(&mut queue_evts);

View File

@ -178,6 +178,8 @@ pub enum DeviceManagerError {
}
pub type DeviceManagerResult<T> = result::Result<T, DeviceManagerError>;
type VirtioDeviceArc = Arc<Mutex<dyn vm_virtio::VirtioDevice>>;
struct InterruptInfo<'a> {
_ioapic: &'a Arc<Mutex<ioapic::Ioapic>>,
}
@ -415,7 +417,7 @@ impl DeviceManager {
let io_bus = devices::Bus::new();
let mmio_bus = devices::Bus::new();
let mut virtio_devices: Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)> = Vec::new();
let mut virtio_devices: Vec<(Arc<Mutex<dyn vm_virtio::VirtioDevice>>, bool)> = Vec::new();
let mut mmap_regions = Vec::new();
#[allow(unused_mut)]
@ -508,7 +510,7 @@ impl DeviceManager {
address_manager: &Arc<AddressManager>,
mem_slots: u32,
virt_iommu: &mut Option<(u32, Vec<u32>)>,
virtio_devices: Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)>,
virtio_devices: Vec<(Arc<Mutex<dyn vm_virtio::VirtioDevice>>, bool)>,
interrupt_info: &InterruptInfo,
) -> DeviceManagerResult<()> {
#[cfg(feature = "pci_support")]
@ -572,7 +574,7 @@ impl DeviceManager {
// add the device to the PCI topology now. Otherwise, the
// b/d/f won't match the virtio-iommu device as expected.
DeviceManager::add_virtio_pci_device(
Box::new(iommu_device),
Arc::new(Mutex::new(iommu_device)),
vm_info.memory,
&address_manager,
vm_info.vm_fd,
@ -607,7 +609,7 @@ impl DeviceManager {
fn add_mmio_devices(
vm_info: &VmInfo,
address_manager: &Arc<AddressManager>,
virtio_devices: Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)>,
virtio_devices: Vec<(Arc<Mutex<dyn vm_virtio::VirtioDevice>>, bool)>,
interrupt_info: &InterruptInfo,
mut cmdline_additions: &mut Vec<String>,
) -> DeviceManagerResult<()> {
@ -744,7 +746,7 @@ impl DeviceManager {
vm_info: &VmInfo,
address_manager: &Arc<AddressManager>,
ioapic: &Arc<Mutex<ioapic::Ioapic>>,
virtio_devices: &mut Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)>,
virtio_devices: &mut Vec<(Arc<Mutex<dyn vm_virtio::VirtioDevice>>, bool)>,
) -> DeviceManagerResult<Arc<Console>> {
let serial_config = vm_info.vm_cfg.lock().unwrap().serial.clone();
let serial_writer: Option<Box<dyn io::Write + Send>> = match serial_config.mode {
@ -809,7 +811,8 @@ impl DeviceManager {
vm_virtio::Console::new(writer, col, row, console_config.iommu)
.map_err(DeviceManagerError::CreateVirtioConsole)?;
virtio_devices.push((
Box::new(virtio_console_device) as Box<dyn vm_virtio::VirtioDevice>,
Arc::new(Mutex::new(virtio_console_device))
as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
false,
));
Some(console_input)
@ -830,9 +833,9 @@ impl DeviceManager {
address_manager: &Arc<AddressManager>,
mut mem_slots: &mut u32,
mmap_regions: &mut Vec<(*mut libc::c_void, usize)>,
) -> DeviceManagerResult<Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)>> {
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
let mut allocator = address_manager.allocator.lock().unwrap();
let mut devices: Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)> = Vec::new();
let mut devices: Vec<(Arc<Mutex<dyn vm_virtio::VirtioDevice>>, bool)> = Vec::new();
// Create "standard" virtio devices (net/block/rng)
devices.append(&mut DeviceManager::make_virtio_block_devices(vm_info)?);
@ -873,7 +876,7 @@ impl DeviceManager {
fn make_virtio_block_devices(
vm_info: &VmInfo,
) -> DeviceManagerResult<Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)>> {
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
let mut devices = Vec::new();
if let Some(disk_list_cfg) = &vm_info.vm_cfg.lock().unwrap().disks {
@ -897,7 +900,8 @@ impl DeviceManager {
disk_cfg.iommu,
)
.map_err(DeviceManagerError::CreateVirtioBlock)?;
Box::new(dev) as Box<dyn vm_virtio::VirtioDevice>
Arc::new(Mutex::new(dev)) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>
}
ImageType::Qcow2 => {
let qcow_img = QcowFile::from(raw_img)
@ -909,11 +913,11 @@ impl DeviceManager {
disk_cfg.iommu,
)
.map_err(DeviceManagerError::CreateVirtioBlock)?;
Box::new(dev) as Box<dyn vm_virtio::VirtioDevice>
Arc::new(Mutex::new(dev)) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>
}
};
devices.push((block, disk_cfg.iommu));
devices.push((Arc::clone(&block), disk_cfg.iommu));
}
}
@ -922,7 +926,7 @@ impl DeviceManager {
fn make_virtio_net_devices(
vm_info: &VmInfo,
) -> DeviceManagerResult<Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)>> {
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
let mut devices = Vec::new();
// Add virtio-net if required
@ -930,15 +934,24 @@ impl DeviceManager {
for net_cfg in net_list_cfg.iter() {
let virtio_net_device = if let Some(ref tap_if_name) = net_cfg.tap {
let tap = Tap::open_named(tap_if_name).map_err(DeviceManagerError::OpenTap)?;
vm_virtio::Net::new_with_tap(tap, Some(&net_cfg.mac), net_cfg.iommu)
.map_err(DeviceManagerError::CreateVirtioNet)?
Arc::new(Mutex::new(
vm_virtio::Net::new_with_tap(tap, Some(&net_cfg.mac), net_cfg.iommu)
.map_err(DeviceManagerError::CreateVirtioNet)?,
))
} else {
vm_virtio::Net::new(net_cfg.ip, net_cfg.mask, Some(&net_cfg.mac), net_cfg.iommu)
.map_err(DeviceManagerError::CreateVirtioNet)?
Arc::new(Mutex::new(
vm_virtio::Net::new(
net_cfg.ip,
net_cfg.mask,
Some(&net_cfg.mac),
net_cfg.iommu,
)
.map_err(DeviceManagerError::CreateVirtioNet)?,
))
};
devices.push((
Box::new(virtio_net_device) as Box<dyn vm_virtio::VirtioDevice>,
Arc::clone(&virtio_net_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
net_cfg.iommu,
));
}
@ -949,16 +962,18 @@ impl DeviceManager {
fn make_virtio_rng_devices(
vm_info: &VmInfo,
) -> DeviceManagerResult<Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)>> {
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
let mut devices = Vec::new();
// Add virtio-rng if required
let rng_config = vm_info.vm_cfg.lock().unwrap().rng.clone();
if let Some(rng_path) = rng_config.src.to_str() {
let virtio_rng_device = vm_virtio::Rng::new(rng_path, rng_config.iommu)
.map_err(DeviceManagerError::CreateVirtioRng)?;
let virtio_rng_device = Arc::new(Mutex::new(
vm_virtio::Rng::new(rng_path, rng_config.iommu)
.map_err(DeviceManagerError::CreateVirtioRng)?,
));
devices.push((
Box::new(virtio_rng_device) as Box<dyn vm_virtio::VirtioDevice>,
Arc::clone(&virtio_rng_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
false,
));
}
@ -971,7 +986,7 @@ impl DeviceManager {
allocator: &mut SystemAllocator,
mem_slots: &mut u32,
mmap_regions: &mut Vec<(*mut libc::c_void, usize)>,
) -> DeviceManagerResult<Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)>> {
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
let mut devices = Vec::new();
// Add virtio-fs if required
if let Some(fs_list_cfg) = &vm_info.vm_cfg.lock().unwrap().fs {
@ -1036,17 +1051,19 @@ impl DeviceManager {
None
};
let virtio_fs_device = vm_virtio::vhost_user::Fs::new(
fs_sock,
&fs_cfg.tag,
fs_cfg.num_queues,
fs_cfg.queue_size,
cache,
)
.map_err(DeviceManagerError::CreateVirtioFs)?;
let virtio_fs_device = Arc::new(Mutex::new(
vm_virtio::vhost_user::Fs::new(
fs_sock,
&fs_cfg.tag,
fs_cfg.num_queues,
fs_cfg.queue_size,
cache,
)
.map_err(DeviceManagerError::CreateVirtioFs)?,
));
devices.push((
Box::new(virtio_fs_device) as Box<dyn vm_virtio::VirtioDevice>,
Arc::clone(&virtio_fs_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
false,
));
}
@ -1061,7 +1078,7 @@ impl DeviceManager {
allocator: &mut SystemAllocator,
mem_slots: &mut u32,
mmap_regions: &mut Vec<(*mut libc::c_void, usize)>,
) -> DeviceManagerResult<Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)>> {
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
let mut devices = Vec::new();
// Add virtio-pmem if required
if let Some(pmem_list_cfg) = &vm_info.vm_cfg.lock().unwrap().pmem {
@ -1143,12 +1160,13 @@ impl DeviceManager {
// Increment the KVM slot number
*mem_slots += 1;
let virtio_pmem_device =
let virtio_pmem_device = Arc::new(Mutex::new(
vm_virtio::Pmem::new(file, pmem_guest_addr, size as GuestUsize, pmem_cfg.iommu)
.map_err(DeviceManagerError::CreateVirtioPmem)?;
.map_err(DeviceManagerError::CreateVirtioPmem)?,
));
devices.push((
Box::new(virtio_pmem_device) as Box<dyn vm_virtio::VirtioDevice>,
Arc::clone(&virtio_pmem_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
false,
));
}
@ -1159,7 +1177,7 @@ impl DeviceManager {
fn make_virtio_vhost_user_net_devices(
vm_info: &VmInfo,
) -> DeviceManagerResult<Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)>> {
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
let mut devices = Vec::new();
// Add vhost-user-net if required
if let Some(vhost_user_net_list_cfg) = &vm_info.vm_cfg.lock().unwrap().vhost_user_net {
@ -1169,12 +1187,13 @@ impl DeviceManager {
num_queues: vhost_user_net_cfg.num_queues,
queue_size: vhost_user_net_cfg.queue_size,
};
let vhost_user_net_device =
let vhost_user_net_device = Arc::new(Mutex::new(
vm_virtio::vhost_user::Net::new(vhost_user_net_cfg.mac, vu_cfg)
.map_err(DeviceManagerError::CreateVhostUserNet)?;
.map_err(DeviceManagerError::CreateVhostUserNet)?,
));
devices.push((
Box::new(vhost_user_net_device) as Box<dyn vm_virtio::VirtioDevice>,
Arc::clone(&vhost_user_net_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
false,
));
}
@ -1185,7 +1204,7 @@ impl DeviceManager {
fn make_virtio_vhost_user_blk_devices(
vm_info: &VmInfo,
) -> DeviceManagerResult<Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)>> {
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
let mut devices = Vec::new();
// Add vhost-user-blk if required
if let Some(vhost_user_blk_list_cfg) = &vm_info.vm_cfg.lock().unwrap().vhost_user_blk {
@ -1195,12 +1214,13 @@ impl DeviceManager {
num_queues: vhost_user_blk_cfg.num_queues,
queue_size: vhost_user_blk_cfg.queue_size,
};
let vhost_user_blk_device =
let vhost_user_blk_device = Arc::new(Mutex::new(
vm_virtio::vhost_user::Blk::new(vhost_user_blk_cfg.wce, vu_cfg)
.map_err(DeviceManagerError::CreateVhostUserBlk)?;
.map_err(DeviceManagerError::CreateVhostUserBlk)?,
));
devices.push((
Box::new(vhost_user_blk_device) as Box<dyn vm_virtio::VirtioDevice>,
Arc::clone(&vhost_user_blk_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
false,
));
}
@ -1211,7 +1231,7 @@ impl DeviceManager {
fn make_virtio_vsock_devices(
vm_info: &VmInfo,
) -> DeviceManagerResult<Vec<(Box<dyn vm_virtio::VirtioDevice>, bool)>> {
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
let mut devices = Vec::new();
// Add vsock if required
if let Some(vsock_list_cfg) = &vm_info.vm_cfg.lock().unwrap().vsock {
@ -1224,11 +1244,13 @@ impl DeviceManager {
vm_virtio::vsock::VsockUnixBackend::new(vsock_cfg.cid, socket_path.to_string())
.map_err(DeviceManagerError::CreateVsockBackend)?;
let vsock_device = vm_virtio::Vsock::new(vsock_cfg.cid, backend, vsock_cfg.iommu)
.map_err(DeviceManagerError::CreateVirtioVsock)?;
let vsock_device = Arc::new(Mutex::new(
vm_virtio::Vsock::new(vsock_cfg.cid, backend, vsock_cfg.iommu)
.map_err(DeviceManagerError::CreateVirtioVsock)?,
));
devices.push((
Box::new(vsock_device) as Box<dyn vm_virtio::VirtioDevice>,
Arc::clone(&vsock_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
false,
));
}
@ -1337,7 +1359,7 @@ impl DeviceManager {
#[cfg(feature = "pci_support")]
#[allow(clippy::too_many_arguments)]
fn add_virtio_pci_device(
virtio_device: Box<dyn vm_virtio::VirtioDevice>,
virtio_device: Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
memory: &Arc<RwLock<GuestMemoryMmap>>,
address_manager: &Arc<AddressManager>,
vm_fd: &Arc<VmFd>,
@ -1347,7 +1369,7 @@ impl DeviceManager {
// Allows support for one MSI-X vector per queue. It also adds 1
// as we need to take into account the dedicated vector to notify
// about a virtio config change.
let msix_num = (virtio_device.queue_max_sizes().len() + 1) as u16;
let msix_num = (virtio_device.lock().unwrap().queue_max_sizes().len() + 1) as u16;
// We need to shift the device id since the 3 first bits are dedicated
// to the PCI function, and we know we don't do multifunction.
@ -1453,7 +1475,7 @@ impl DeviceManager {
#[allow(clippy::too_many_arguments)]
#[cfg(feature = "mmio_support")]
fn add_virtio_mmio_device(
virtio_device: Box<dyn vm_virtio::VirtioDevice>,
virtio_device: Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
memory: &Arc<RwLock<GuestMemoryMmap>>,
address_manager: &Arc<AddressManager>,
vm_fd: &Arc<VmFd>,