vmm: Add ability to add virtio-fs device post-boot

Adds DeviceManager method `make_virtio_fs_device` which creates a single
device, and modifies `make_virtio_fs_devices` to use this method.

Implements the new `vm.add-fs route`.

Signed-off-by: Dean Sheather <dean@coder.com>
This commit is contained in:
Dean Sheather 2020-04-14 19:21:24 +10:00 committed by Sebastien Boeuf
parent bb2139a408
commit c2abadc293
5 changed files with 213 additions and 94 deletions

View File

@ -5,11 +5,11 @@
use crate::api::http::EndpointHandler;
use crate::api::{
vm_add_device, vm_add_disk, vm_add_net, vm_add_pmem, vm_boot, vm_create, vm_delete, vm_info,
vm_pause, vm_reboot, vm_remove_device, vm_resize, vm_restore, vm_resume, vm_shutdown,
vm_add_device, vm_add_disk, vm_add_fs, vm_add_net, vm_add_pmem, vm_boot, vm_create, vm_delete,
vm_info, vm_pause, vm_reboot, vm_remove_device, vm_resize, vm_restore, vm_resume, vm_shutdown,
vm_snapshot, vmm_ping, vmm_shutdown, ApiError, ApiRequest, ApiResult, DeviceConfig, DiskConfig,
NetConfig, PmemConfig, RestoreConfig, VmAction, VmConfig, VmRemoveDeviceData, VmResizeData,
VmSnapshotConfig,
FsConfig, NetConfig, PmemConfig, RestoreConfig, VmAction, VmConfig, VmRemoveDeviceData,
VmResizeData, VmSnapshotConfig,
};
use micro_http::{Body, Method, Request, Response, StatusCode, Version};
use serde_json::Error as SerdeError;
@ -71,6 +71,9 @@ pub enum HttpError {
/// Could not add a disk to a VM
VmAddDisk(ApiError),
/// Could not add a fs to a VM
VmAddFs(ApiError),
/// Could not add a pmem device to a VM
VmAddPmem(ApiError),
@ -494,12 +497,33 @@ impl EndpointHandler for VmAddFs {
fn handle_request(
&self,
req: &Request,
_api_notifier: EventFd,
_api_sender: Sender<ApiRequest>,
api_notifier: EventFd,
api_sender: Sender<ApiRequest>,
) -> Response {
match req.method() {
// Not implemented.
Method::Put => Response::new(Version::Http11, StatusCode::NotImplemented),
Method::Put => {
match &req.body {
Some(body) => {
// Deserialize into a FsConfig
let vm_add_fs_data: FsConfig = match serde_json::from_slice(body.raw())
.map_err(HttpError::SerdeJsonDeserialize)
{
Ok(config) => config,
Err(e) => return error_response(e, StatusCode::BadRequest),
};
// Call vm_add_fs()
match vm_add_fs(api_notifier, api_sender, Arc::new(vm_add_fs_data))
.map_err(HttpError::VmAddFs)
{
Ok(_) => Response::new(Version::Http11, StatusCode::NoContent),
Err(e) => error_response(e, StatusCode::InternalServerError),
}
}
None => Response::new(Version::Http11, StatusCode::BadRequest),
}
}
_ => Response::new(Version::Http11, StatusCode::BadRequest),
}
}

View File

@ -37,7 +37,9 @@ pub use self::http::start_http_thread;
pub mod http;
pub mod http_endpoint;
use crate::config::{DeviceConfig, DiskConfig, NetConfig, PmemConfig, RestoreConfig, VmConfig};
use crate::config::{
DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, RestoreConfig, VmConfig,
};
use crate::vm::{Error as VmError, VmState};
use std::io;
use std::sync::mpsc::{channel, RecvError, SendError, Sender};
@ -122,6 +124,9 @@ pub enum ApiError {
/// The disk could not be added to the VM.
VmAddDisk(VmError),
/// The fs could not be added to the VM.
VmAddFs(VmError),
/// The pmem device could not be added to the VM.
VmAddPmem(VmError),
@ -230,6 +235,9 @@ pub enum ApiRequest {
/// Add a disk to the VM.
VmAddDisk(Arc<DiskConfig>, Sender<ApiResponse>),
/// Add a fs to the VM.
VmAddFs(Arc<FsConfig>, Sender<ApiResponse>),
/// Add a pmem device to the VM.
VmAddPmem(Arc<PmemConfig>, Sender<ApiResponse>),
@ -484,6 +492,24 @@ pub fn vm_add_disk(
Ok(())
}
pub fn vm_add_fs(
api_evt: EventFd,
api_sender: Sender<ApiRequest>,
data: Arc<FsConfig>,
) -> ApiResult<()> {
let (response_sender, response_receiver) = channel();
// Send the VM add-fs request.
api_sender
.send(ApiRequest::VmAddFs(data, response_sender))
.map_err(ApiError::RequestSend)?;
api_evt.write(1).map_err(ApiError::EventFdWrite)?;
response_receiver.recv().map_err(ApiError::ResponseRecv)??;
Ok(())
}
pub fn vm_add_pmem(
api_evt: EventFd,
api_sender: Sender<ApiRequest>,

View File

@ -14,7 +14,7 @@ extern crate vm_device;
use crate::config::ConsoleOutputMode;
#[cfg(feature = "pci_support")]
use crate::config::DeviceConfig;
use crate::config::{DiskConfig, NetConfig, PmemConfig, VmConfig};
use crate::config::{DiskConfig, FsConfig, NetConfig, PmemConfig, VmConfig};
use crate::interrupt::{
KvmLegacyUserspaceInterruptManager, KvmMsiInterruptManager, KvmRoutingEntry,
};
@ -104,6 +104,9 @@ pub enum DeviceManagerError {
/// Cannot create virtio-fs device
CreateVirtioFs(vm_virtio::vhost_user::Error),
/// Virtio-fs device was created without a sock.
NoVirtioFsSock,
/// Cannot create vhost-user-blk device
CreateVhostUserBlk(vm_virtio::vhost_user::Error),
@ -1414,93 +1417,97 @@ impl DeviceManager {
Ok(devices)
}
fn make_virtio_fs_device(
&mut self,
fs_cfg: &FsConfig,
) -> DeviceManagerResult<(VirtioDeviceArc, bool, Option<String>)> {
if let Some(fs_sock) = fs_cfg.sock.to_str() {
let cache = if fs_cfg.dax {
let fs_cache = fs_cfg.cache_size;
// The memory needs to be 2MiB aligned in order to support
// hugepages.
let fs_guest_addr = self
.address_manager
.allocator
.lock()
.unwrap()
.allocate_mmio_addresses(None, fs_cache as GuestUsize, Some(0x0020_0000))
.ok_or(DeviceManagerError::FsRangeAllocation)?;
let mmap_region = MmapRegion::build(
None,
fs_cache as usize,
libc::PROT_NONE,
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
)
.map_err(DeviceManagerError::NewMmapRegion)?;
let host_addr: u64 = mmap_region.as_ptr() as u64;
let mem_slot = self
.memory_manager
.lock()
.unwrap()
.create_userspace_mapping(
fs_guest_addr.raw_value(),
fs_cache,
host_addr,
false,
false,
)
.map_err(DeviceManagerError::MemoryManager)?;
let mut region_list = Vec::new();
region_list.push(VirtioSharedMemory {
offset: 0,
len: fs_cache,
});
Some((
VirtioSharedMemoryList {
host_addr,
mem_slot,
addr: fs_guest_addr,
len: fs_cache as GuestUsize,
region_list,
},
mmap_region,
))
} else {
None
};
let virtio_fs_device = Arc::new(Mutex::new(
vm_virtio::vhost_user::Fs::new(
fs_sock,
&fs_cfg.tag,
fs_cfg.num_queues,
fs_cfg.queue_size,
cache,
)
.map_err(DeviceManagerError::CreateVirtioFs)?,
));
self.add_migratable_device(Arc::clone(&virtio_fs_device) as Arc<Mutex<dyn Migratable>>);
Ok((
Arc::clone(&virtio_fs_device) as VirtioDeviceArc,
false,
None,
))
} else {
Err(DeviceManagerError::NoVirtioFsSock)
}
}
fn make_virtio_fs_devices(
&mut self,
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool, Option<String>)>> {
let mut devices = Vec::new();
// Add virtio-fs if required
if let Some(fs_list_cfg) = &self.config.lock().unwrap().fs {
let fs_devices = self.config.lock().unwrap().fs.clone();
if let Some(fs_list_cfg) = &fs_devices {
for fs_cfg in fs_list_cfg.iter() {
if let Some(fs_sock) = fs_cfg.sock.to_str() {
let cache = if fs_cfg.dax {
let fs_cache = fs_cfg.cache_size;
// The memory needs to be 2MiB aligned in order to support
// hugepages.
let fs_guest_addr = self
.address_manager
.allocator
.lock()
.unwrap()
.allocate_mmio_addresses(
None,
fs_cache as GuestUsize,
Some(0x0020_0000),
)
.ok_or(DeviceManagerError::FsRangeAllocation)?;
let mmap_region = MmapRegion::build(
None,
fs_cache as usize,
libc::PROT_NONE,
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
)
.map_err(DeviceManagerError::NewMmapRegion)?;
let host_addr: u64 = mmap_region.as_ptr() as u64;
let mem_slot = self
.memory_manager
.lock()
.unwrap()
.create_userspace_mapping(
fs_guest_addr.raw_value(),
fs_cache,
host_addr,
false,
false,
)
.map_err(DeviceManagerError::MemoryManager)?;
let mut region_list = Vec::new();
region_list.push(VirtioSharedMemory {
offset: 0,
len: fs_cache,
});
Some((
VirtioSharedMemoryList {
host_addr,
mem_slot,
addr: fs_guest_addr,
len: fs_cache as GuestUsize,
region_list,
},
mmap_region,
))
} else {
None
};
let virtio_fs_device = Arc::new(Mutex::new(
vm_virtio::vhost_user::Fs::new(
fs_sock,
&fs_cfg.tag,
fs_cfg.num_queues,
fs_cfg.queue_size,
cache,
)
.map_err(DeviceManagerError::CreateVirtioFs)?,
));
devices.push((
Arc::clone(&virtio_fs_device) as VirtioDeviceArc,
false,
None,
));
let migratable = Arc::clone(&virtio_fs_device) as Arc<Mutex<dyn Migratable>>;
let id = migratable.lock().unwrap().id();
self.migratable_devices.push((id, migratable));
}
devices.push(self.make_virtio_fs_device(fs_cfg)?);
}
}
@ -2248,6 +2255,12 @@ impl DeviceManager {
self.hotplug_virtio_pci_device(device, iommu_attached, id)
}
#[cfg(feature = "pci_support")]
pub fn add_fs(&mut self, fs_cfg: &mut FsConfig) -> DeviceManagerResult<()> {
let (device, iommu_attached, id) = self.make_virtio_fs_device(fs_cfg)?;
self.hotplug_virtio_pci_device(device, iommu_attached, id)
}
#[cfg(feature = "pci_support")]
pub fn add_pmem(&mut self, pmem_cfg: &mut PmemConfig) -> DeviceManagerResult<()> {
let (device, iommu_attached, id) = self.make_virtio_pmem_device(pmem_cfg)?;

View File

@ -18,7 +18,9 @@ extern crate url;
extern crate vmm_sys_util;
use crate::api::{ApiError, ApiRequest, ApiResponse, ApiResponsePayload, VmInfo, VmmPingResponse};
use crate::config::{DeviceConfig, DiskConfig, NetConfig, PmemConfig, RestoreConfig, VmConfig};
use crate::config::{
DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, RestoreConfig, VmConfig,
};
use crate::migration::{recv_vm_snapshot, vm_config_from_snapshot};
use crate::seccomp_filters::{get_seccomp_filter, Thread};
use crate::vm::{Error as VmError, Vm, VmState};
@ -483,6 +485,19 @@ impl Vmm {
}
}
fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> result::Result<(), VmError> {
if let Some(ref mut vm) = self.vm {
if let Err(e) = vm.add_fs(fs_cfg) {
error!("Error when adding new fs to the VM: {:?}", e);
Err(e)
} else {
Ok(())
}
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> result::Result<(), VmError> {
if let Some(ref mut vm) = self.vm {
if let Err(e) = vm.add_pmem(pmem_cfg) {
@ -701,6 +716,13 @@ impl Vmm {
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmAddFs(add_fs_data, sender) => {
let response = self
.vm_add_fs(add_fs_data.as_ref().clone())
.map_err(ApiError::VmAddFs)
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmAddPmem(add_pmem_data, sender) => {
let response = self
.vm_add_pmem(add_pmem_data.as_ref().clone())

View File

@ -26,7 +26,8 @@ extern crate vm_memory;
extern crate vm_virtio;
use crate::config::{
DeviceConfig, DiskConfig, HotplugMethod, NetConfig, PmemConfig, ValidationError, VmConfig,
DeviceConfig, DiskConfig, FsConfig, HotplugMethod, NetConfig, PmemConfig, ValidationError,
VmConfig,
};
use crate::cpu;
use crate::device_manager::{get_win_size, Console, DeviceManager, DeviceManagerError};
@ -777,6 +778,39 @@ impl Vm {
}
}
pub fn add_fs(&mut self, mut _fs_cfg: FsConfig) -> Result<()> {
if cfg!(feature = "pci_support") {
#[cfg(feature = "pci_support")]
{
self.device_manager
.lock()
.unwrap()
.add_fs(&mut _fs_cfg)
.map_err(Error::DeviceManager)?;
// Update VmConfig by adding the new device. This is important to
// ensure the device would be created in case of a reboot.
{
let mut config = self.config.lock().unwrap();
if let Some(fs_config) = config.fs.as_mut() {
fs_config.push(_fs_cfg);
} else {
config.fs = Some(vec![_fs_cfg]);
}
}
self.device_manager
.lock()
.unwrap()
.notify_hotplug(HotPlugNotificationFlags::PCI_DEVICES_CHANGED)
.map_err(Error::DeviceManager)?;
}
Ok(())
} else {
Err(Error::NoPciSupport)
}
}
pub fn add_pmem(&mut self, mut _pmem_cfg: PmemConfig) -> Result<()> {
if cfg!(feature = "pci_support") {
#[cfg(feature = "pci_support")]