vmm: Support vhost-user-block via "--disks"

Add a socket and vhost_user parameter to this option so that the same
configuration option can be used for both virtio-block and
vhost-user-block.  For now it is necessary to specify both vhost_user
and socket parameters as auto activation is not yet implemented. The wce
parameter for supporting "Write Cache Enabling" is also added to the
disk configuration.

The original command line parameter is still supported for now and will
be removed in a future release.

Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
Rob Bradford 2020-01-28 11:43:15 +00:00
parent 362942fa6e
commit aeeae661fc
3 changed files with 126 additions and 57 deletions

View File

@ -124,7 +124,9 @@ fn create_app<'a, 'b>(
"Disk parameters \"path=<disk_image_path>,\
readonly=on|off,iommu=on|off,\
num_queues=<number_of_queues>,\
queue_size=<size_of_each_queue>\"",
queue_size=<size_of_each_queue>,
vhost_user=<vhost_user_enable>,socket=<vhost_user_socket_path>,
wce=<true|false, default true>\"",
)
.takes_value(true)
.min_values(1)

View File

@ -43,6 +43,12 @@ pub enum Error {
ParseDiskNumQueuesParam(std::num::ParseIntError),
/// Failed parsing disk queue size parameter.
ParseDiskQueueSizeParam(std::num::ParseIntError),
/// Failed to parse vhost parameters
ParseDiskVhostParam(std::str::ParseBoolError),
/// Need a vhost socket
ParseDiskVhostSocketRequired,
/// Failed parsing disk wce parameter.
ParseDiskWceParam(std::str::ParseBoolError),
/// Failed parsing random number generator parameters.
ParseRngParams,
/// Failed parsing network ip parameter.
@ -356,6 +362,11 @@ pub struct DiskConfig {
pub num_queues: usize,
#[serde(default = "default_diskconfig_queue_size")]
pub queue_size: u16,
#[serde(default)]
pub vhost_user: bool,
pub vhost_socket: Option<String>,
#[serde(default = "default_diskconfig_wce")]
pub wce: bool,
}
fn default_diskconfig_num_queues() -> usize {
@ -366,6 +377,10 @@ fn default_diskconfig_queue_size() -> u16 {
DEFAULT_QUEUE_SIZE_VUBLK
}
fn default_diskconfig_wce() -> bool {
true
}
impl DiskConfig {
pub fn parse(disk: &str) -> Result<Self> {
// Split the parameters based on the comma delimiter
@ -377,6 +392,9 @@ impl DiskConfig {
let mut iommu_str: &str = "";
let mut num_queues_str: &str = "";
let mut queue_size_str: &str = "";
let mut vhost_socket_str: &str = "";
let mut vhost_user_str: &str = "";
let mut wce_str: &str = "";
for param in params_list.iter() {
if param.starts_with("path=") {
@ -391,11 +409,20 @@ impl DiskConfig {
num_queues_str = &param[11..];
} else if param.starts_with("queue_size=") {
queue_size_str = &param[11..];
} else if param.starts_with("vhost_user=") {
vhost_user_str = &param[11..];
} else if param.starts_with("socket=") {
vhost_socket_str = &param[7..];
} else if param.starts_with("wce=") {
wce_str = &param[4..];
}
}
let mut num_queues: usize = default_diskconfig_num_queues();
let mut queue_size: u16 = default_diskconfig_queue_size();
let mut vhost_user = false;
let mut vhost_socket = None;
let mut wce: bool = default_diskconfig_wce();
if !num_queues_str.is_empty() {
num_queues = num_queues_str
@ -407,6 +434,20 @@ impl DiskConfig {
.parse()
.map_err(Error::ParseDiskQueueSizeParam)?;
}
if !vhost_user_str.is_empty() {
vhost_user = vhost_user_str.parse().map_err(Error::ParseDiskVhostParam)?;
}
if !vhost_socket_str.is_empty() {
vhost_socket = Some(vhost_socket_str.to_owned());
}
if !wce_str.is_empty() {
wce = wce_str.parse().map_err(Error::ParseDiskWceParam)?;
}
// For now we require a socket if vhost-user is turned on
if vhost_user && vhost_socket.is_none() {
return Err(Error::ParseDiskVhostSocketRequired);
}
Ok(DiskConfig {
path: PathBuf::from(path_str),
@ -415,6 +456,9 @@ impl DiskConfig {
iommu: parse_on_off(iommu_str)?,
num_queues,
queue_size,
vhost_socket,
vhost_user,
wce,
})
}
}

View File

@ -969,63 +969,86 @@ impl DeviceManager {
if let Some(disk_list_cfg) = &vm_info.vm_cfg.lock().unwrap().disks {
for disk_cfg in disk_list_cfg.iter() {
let mut options = OpenOptions::new();
options.read(true);
options.write(!disk_cfg.readonly);
if disk_cfg.direct {
options.custom_flags(libc::O_DIRECT);
if disk_cfg.vhost_user {
let vu_cfg = VhostUserConfig {
sock: disk_cfg.vhost_socket.clone().unwrap(),
num_queues: disk_cfg.num_queues,
queue_size: disk_cfg.queue_size,
};
let vhost_user_block_device = Arc::new(Mutex::new(
vm_virtio::vhost_user::Blk::new(disk_cfg.wce, vu_cfg)
.map_err(DeviceManagerError::CreateVhostUserBlk)?,
));
devices.push((
Arc::clone(&vhost_user_block_device)
as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
false,
));
migratable_devices
.push(Arc::clone(&vhost_user_block_device) as Arc<Mutex<dyn Migratable>>);
} else {
let mut options = OpenOptions::new();
options.read(true);
options.write(!disk_cfg.readonly);
if disk_cfg.direct {
options.custom_flags(libc::O_DIRECT);
}
// Open block device path
let image: File = options
.open(&disk_cfg.path)
.map_err(DeviceManagerError::Disk)?;
let mut raw_img = vm_virtio::RawFile::new(image, disk_cfg.direct);
let image_type = qcow::detect_image_type(&mut raw_img)
.map_err(DeviceManagerError::DetectImageType)?;
match image_type {
ImageType::Raw => {
let dev = vm_virtio::Block::new(
raw_img,
disk_cfg.path.clone(),
disk_cfg.readonly,
disk_cfg.iommu,
disk_cfg.num_queues,
disk_cfg.queue_size,
)
.map_err(DeviceManagerError::CreateVirtioBlock)?;
let block = Arc::new(Mutex::new(dev));
devices.push((
Arc::clone(&block) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
disk_cfg.iommu,
));
migratable_devices
.push(Arc::clone(&block) as Arc<Mutex<dyn Migratable>>);
}
ImageType::Qcow2 => {
let qcow_img = QcowFile::from(raw_img)
.map_err(DeviceManagerError::QcowDeviceCreate)?;
let dev = vm_virtio::Block::new(
qcow_img,
disk_cfg.path.clone(),
disk_cfg.readonly,
disk_cfg.iommu,
disk_cfg.num_queues,
disk_cfg.queue_size,
)
.map_err(DeviceManagerError::CreateVirtioBlock)?;
let block = Arc::new(Mutex::new(dev));
devices.push((
Arc::clone(&block) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
disk_cfg.iommu,
));
migratable_devices
.push(Arc::clone(&block) as Arc<Mutex<dyn Migratable>>);
}
};
}
// Open block device path
let image: File = options
.open(&disk_cfg.path)
.map_err(DeviceManagerError::Disk)?;
let mut raw_img = vm_virtio::RawFile::new(image, disk_cfg.direct);
let image_type = qcow::detect_image_type(&mut raw_img)
.map_err(DeviceManagerError::DetectImageType)?;
match image_type {
ImageType::Raw => {
let dev = vm_virtio::Block::new(
raw_img,
disk_cfg.path.clone(),
disk_cfg.readonly,
disk_cfg.iommu,
disk_cfg.num_queues,
disk_cfg.queue_size,
)
.map_err(DeviceManagerError::CreateVirtioBlock)?;
let block = Arc::new(Mutex::new(dev));
devices.push((
Arc::clone(&block) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
disk_cfg.iommu,
));
migratable_devices.push(Arc::clone(&block) as Arc<Mutex<dyn Migratable>>);
}
ImageType::Qcow2 => {
let qcow_img = QcowFile::from(raw_img)
.map_err(DeviceManagerError::QcowDeviceCreate)?;
let dev = vm_virtio::Block::new(
qcow_img,
disk_cfg.path.clone(),
disk_cfg.readonly,
disk_cfg.iommu,
disk_cfg.num_queues,
disk_cfg.queue_size,
)
.map_err(DeviceManagerError::CreateVirtioBlock)?;
let block = Arc::new(Mutex::new(dev));
devices.push((
Arc::clone(&block) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
disk_cfg.iommu,
));
migratable_devices.push(Arc::clone(&block) as Arc<Mutex<dyn Migratable>>);
}
};
}
}