From aeeae661fc338a422f19316eddfb2d773708425c Mon Sep 17 00:00:00 2001 From: Rob Bradford Date: Tue, 28 Jan 2020 11:43:15 +0000 Subject: [PATCH] vmm: Support vhost-user-block via "--disks" Add a socket and vhost_user parameter to this option so that the same configuration option can be used for both virtio-block and vhost-user-block. For now it is necessary to specify both vhost_user and socket parameters as auto activation is not yet implemented. The wce parameter for supporting "Write Cache Enabling" is also added to the disk configuration. The original command line parameter is still supported for now and will be removed in a future release. Signed-off-by: Rob Bradford --- src/main.rs | 4 +- vmm/src/config.rs | 44 +++++++++++++ vmm/src/device_manager.rs | 135 ++++++++++++++++++++++---------------- 3 files changed, 126 insertions(+), 57 deletions(-) diff --git a/src/main.rs b/src/main.rs index 0d07350cd..5c5c01e92 100755 --- a/src/main.rs +++ b/src/main.rs @@ -124,7 +124,9 @@ fn create_app<'a, 'b>( "Disk parameters \"path=,\ readonly=on|off,iommu=on|off,\ num_queues=,\ - queue_size=\"", + queue_size=, + vhost_user=,socket=, + wce=\"", ) .takes_value(true) .min_values(1) diff --git a/vmm/src/config.rs b/vmm/src/config.rs index 7e37562f2..7d1fcb622 100644 --- a/vmm/src/config.rs +++ b/vmm/src/config.rs @@ -43,6 +43,12 @@ pub enum Error { ParseDiskNumQueuesParam(std::num::ParseIntError), /// Failed parsing disk queue size parameter. ParseDiskQueueSizeParam(std::num::ParseIntError), + /// Failed to parse vhost parameters + ParseDiskVhostParam(std::str::ParseBoolError), + /// Need a vhost socket + ParseDiskVhostSocketRequired, + /// Failed parsing disk wce parameter. + ParseDiskWceParam(std::str::ParseBoolError), /// Failed parsing random number generator parameters. ParseRngParams, /// Failed parsing network ip parameter. @@ -356,6 +362,11 @@ pub struct DiskConfig { pub num_queues: usize, #[serde(default = "default_diskconfig_queue_size")] pub queue_size: u16, + #[serde(default)] + pub vhost_user: bool, + pub vhost_socket: Option, + #[serde(default = "default_diskconfig_wce")] + pub wce: bool, } fn default_diskconfig_num_queues() -> usize { @@ -366,6 +377,10 @@ fn default_diskconfig_queue_size() -> u16 { DEFAULT_QUEUE_SIZE_VUBLK } +fn default_diskconfig_wce() -> bool { + true +} + impl DiskConfig { pub fn parse(disk: &str) -> Result { // Split the parameters based on the comma delimiter @@ -377,6 +392,9 @@ impl DiskConfig { let mut iommu_str: &str = ""; let mut num_queues_str: &str = ""; let mut queue_size_str: &str = ""; + let mut vhost_socket_str: &str = ""; + let mut vhost_user_str: &str = ""; + let mut wce_str: &str = ""; for param in params_list.iter() { if param.starts_with("path=") { @@ -391,11 +409,20 @@ impl DiskConfig { num_queues_str = ¶m[11..]; } else if param.starts_with("queue_size=") { queue_size_str = ¶m[11..]; + } else if param.starts_with("vhost_user=") { + vhost_user_str = ¶m[11..]; + } else if param.starts_with("socket=") { + vhost_socket_str = ¶m[7..]; + } else if param.starts_with("wce=") { + wce_str = ¶m[4..]; } } let mut num_queues: usize = default_diskconfig_num_queues(); let mut queue_size: u16 = default_diskconfig_queue_size(); + let mut vhost_user = false; + let mut vhost_socket = None; + let mut wce: bool = default_diskconfig_wce(); if !num_queues_str.is_empty() { num_queues = num_queues_str @@ -407,6 +434,20 @@ impl DiskConfig { .parse() .map_err(Error::ParseDiskQueueSizeParam)?; } + if !vhost_user_str.is_empty() { + vhost_user = vhost_user_str.parse().map_err(Error::ParseDiskVhostParam)?; + } + if !vhost_socket_str.is_empty() { + vhost_socket = Some(vhost_socket_str.to_owned()); + } + if !wce_str.is_empty() { + wce = wce_str.parse().map_err(Error::ParseDiskWceParam)?; + } + + // For now we require a socket if vhost-user is turned on + if vhost_user && vhost_socket.is_none() { + return Err(Error::ParseDiskVhostSocketRequired); + } Ok(DiskConfig { path: PathBuf::from(path_str), @@ -415,6 +456,9 @@ impl DiskConfig { iommu: parse_on_off(iommu_str)?, num_queues, queue_size, + vhost_socket, + vhost_user, + wce, }) } } diff --git a/vmm/src/device_manager.rs b/vmm/src/device_manager.rs index 53800677a..494342737 100644 --- a/vmm/src/device_manager.rs +++ b/vmm/src/device_manager.rs @@ -969,63 +969,86 @@ impl DeviceManager { if let Some(disk_list_cfg) = &vm_info.vm_cfg.lock().unwrap().disks { for disk_cfg in disk_list_cfg.iter() { - let mut options = OpenOptions::new(); - options.read(true); - options.write(!disk_cfg.readonly); - if disk_cfg.direct { - options.custom_flags(libc::O_DIRECT); + if disk_cfg.vhost_user { + let vu_cfg = VhostUserConfig { + sock: disk_cfg.vhost_socket.clone().unwrap(), + num_queues: disk_cfg.num_queues, + queue_size: disk_cfg.queue_size, + }; + let vhost_user_block_device = Arc::new(Mutex::new( + vm_virtio::vhost_user::Blk::new(disk_cfg.wce, vu_cfg) + .map_err(DeviceManagerError::CreateVhostUserBlk)?, + )); + + devices.push(( + Arc::clone(&vhost_user_block_device) + as Arc>, + false, + )); + + migratable_devices + .push(Arc::clone(&vhost_user_block_device) as Arc>); + } else { + let mut options = OpenOptions::new(); + options.read(true); + options.write(!disk_cfg.readonly); + if disk_cfg.direct { + options.custom_flags(libc::O_DIRECT); + } + // Open block device path + let image: File = options + .open(&disk_cfg.path) + .map_err(DeviceManagerError::Disk)?; + + let mut raw_img = vm_virtio::RawFile::new(image, disk_cfg.direct); + + let image_type = qcow::detect_image_type(&mut raw_img) + .map_err(DeviceManagerError::DetectImageType)?; + match image_type { + ImageType::Raw => { + let dev = vm_virtio::Block::new( + raw_img, + disk_cfg.path.clone(), + disk_cfg.readonly, + disk_cfg.iommu, + disk_cfg.num_queues, + disk_cfg.queue_size, + ) + .map_err(DeviceManagerError::CreateVirtioBlock)?; + + let block = Arc::new(Mutex::new(dev)); + + devices.push(( + Arc::clone(&block) as Arc>, + disk_cfg.iommu, + )); + migratable_devices + .push(Arc::clone(&block) as Arc>); + } + ImageType::Qcow2 => { + let qcow_img = QcowFile::from(raw_img) + .map_err(DeviceManagerError::QcowDeviceCreate)?; + let dev = vm_virtio::Block::new( + qcow_img, + disk_cfg.path.clone(), + disk_cfg.readonly, + disk_cfg.iommu, + disk_cfg.num_queues, + disk_cfg.queue_size, + ) + .map_err(DeviceManagerError::CreateVirtioBlock)?; + + let block = Arc::new(Mutex::new(dev)); + + devices.push(( + Arc::clone(&block) as Arc>, + disk_cfg.iommu, + )); + migratable_devices + .push(Arc::clone(&block) as Arc>); + } + }; } - // Open block device path - let image: File = options - .open(&disk_cfg.path) - .map_err(DeviceManagerError::Disk)?; - - let mut raw_img = vm_virtio::RawFile::new(image, disk_cfg.direct); - - let image_type = qcow::detect_image_type(&mut raw_img) - .map_err(DeviceManagerError::DetectImageType)?; - match image_type { - ImageType::Raw => { - let dev = vm_virtio::Block::new( - raw_img, - disk_cfg.path.clone(), - disk_cfg.readonly, - disk_cfg.iommu, - disk_cfg.num_queues, - disk_cfg.queue_size, - ) - .map_err(DeviceManagerError::CreateVirtioBlock)?; - - let block = Arc::new(Mutex::new(dev)); - - devices.push(( - Arc::clone(&block) as Arc>, - disk_cfg.iommu, - )); - migratable_devices.push(Arc::clone(&block) as Arc>); - } - ImageType::Qcow2 => { - let qcow_img = QcowFile::from(raw_img) - .map_err(DeviceManagerError::QcowDeviceCreate)?; - let dev = vm_virtio::Block::new( - qcow_img, - disk_cfg.path.clone(), - disk_cfg.readonly, - disk_cfg.iommu, - disk_cfg.num_queues, - disk_cfg.queue_size, - ) - .map_err(DeviceManagerError::CreateVirtioBlock)?; - - let block = Arc::new(Mutex::new(dev)); - - devices.push(( - Arc::clone(&block) as Arc>, - disk_cfg.iommu, - )); - migratable_devices.push(Arc::clone(&block) as Arc>); - } - }; } }