vmm: Add support for spawning vhost-user-block backend

If no socket is supplied when enabling "vhost_user=true" on "--disk"
follow the "exe" path in the /proc entry for this process and launch the
network backend (via the vmm_path field.)

Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
Rob Bradford 2020-02-04 16:44:12 +00:00
parent 4d60ef59bc
commit 1f6cbad01a
2 changed files with 40 additions and 10 deletions

View File

@ -45,8 +45,6 @@ pub enum Error {
ParseDiskQueueSizeParam(std::num::ParseIntError),
/// Failed to parse vhost parameters
ParseDiskVhostParam(std::str::ParseBoolError),
/// Need a vhost socket
ParseDiskVhostSocketRequired,
/// Failed parsing disk wce parameter.
ParseDiskWceParam(std::str::ParseBoolError),
/// Failed parsing random number generator parameters.
@ -447,11 +445,6 @@ impl DiskConfig {
wce = wce_str.parse().map_err(Error::ParseDiskWceParam)?;
}
// For now we require a socket if vhost-user is turned on
if vhost_user && vhost_socket.is_none() {
return Err(Error::ParseDiskVhostSocketRequired);
}
Ok(DiskConfig {
path: PathBuf::from(path_str),
readonly: parse_on_off(readonly_str)?,

View File

@ -12,7 +12,7 @@
extern crate vm_device;
use crate::config::ConsoleOutputMode;
use crate::config::{NetConfig, VmConfig};
use crate::config::{DiskConfig, NetConfig, VmConfig};
use crate::interrupt::{
KvmLegacyUserspaceInterruptManager, KvmMsiInterruptManager, KvmRoutingEntry,
};
@ -204,6 +204,9 @@ pub enum DeviceManagerError {
/// Failed to spawn the network backend
SpawnNetBackend(io::Error),
/// Failed to spawn the block backend
SpawnBlockBackend(io::Error),
}
pub type DeviceManagerResult<T> = result::Result<T, DeviceManagerError>;
@ -863,14 +866,48 @@ impl DeviceManager {
Ok(devices)
}
/// Launch block backend
fn start_block_backend(&mut self, disk_cfg: &DiskConfig) -> DeviceManagerResult<String> {
let _socket_file = NamedTempFile::new().map_err(DeviceManagerError::CreateSocketFile)?;
let sock = _socket_file.path().to_str().unwrap().to_owned();
let child = std::process::Command::new(&self.vmm_path)
.args(&[
"--block-backend",
&format!(
"image={},sock={},num_queues={},queue_size={}",
disk_cfg.path.to_str().unwrap(),
&sock,
disk_cfg.num_queues,
disk_cfg.queue_size
),
])
.spawn()
.map_err(DeviceManagerError::SpawnBlockBackend)?;
// The ActivatedBackend::drop() will automatically reap the child
self.vhost_user_backends.push(ActivatedBackend {
child,
_socket_file,
});
Ok(sock)
}
fn make_virtio_block_devices(&mut self) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
let mut devices = Vec::new();
if let Some(disk_list_cfg) = &self.config.lock().unwrap().disks {
let block_devices = self.config.lock().unwrap().disks.clone();
if let Some(disk_list_cfg) = &block_devices {
for disk_cfg in disk_list_cfg.iter() {
if disk_cfg.vhost_user {
let sock = if let Some(sock) = disk_cfg.vhost_socket.clone() {
sock
} else {
self.start_block_backend(disk_cfg)?
};
let vu_cfg = VhostUserConfig {
sock: disk_cfg.vhost_socket.clone().unwrap(),
sock,
num_queues: disk_cfg.num_queues,
queue_size: disk_cfg.queue_size,
};