2019-05-21 18:54:53 +00:00
|
|
|
// Copyright 2019 Intel Corporation. All Rights Reserved.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
2021-03-05 13:32:57 +00:00
|
|
|
use super::vu_common_ctrl::{
|
2021-06-07 06:27:06 +00:00
|
|
|
add_memory_region, connect_vhost_user, negotiate_features_vhost_user, reset_vhost_user,
|
|
|
|
setup_vhost_user, update_mem_table,
|
2021-03-05 13:32:57 +00:00
|
|
|
};
|
2021-05-21 16:26:00 +00:00
|
|
|
use super::{Error, Result, DEFAULT_VIRTIO_FEATURES};
|
2020-08-18 04:30:53 +00:00
|
|
|
use crate::seccomp_filters::{get_seccomp_filter, Thread};
|
2021-06-09 14:33:34 +00:00
|
|
|
use crate::vhost_user::VhostUserEpollHandler;
|
2019-08-05 20:04:24 +00:00
|
|
|
use crate::{
|
2020-09-03 09:37:36 +00:00
|
|
|
ActivateError, ActivateResult, Queue, UserspaceMapping, VirtioCommon, VirtioDevice,
|
2021-05-21 16:26:00 +00:00
|
|
|
VirtioDeviceType, VirtioInterrupt, VirtioSharedMemoryList,
|
2019-08-05 20:04:24 +00:00
|
|
|
};
|
2021-06-02 19:08:04 +00:00
|
|
|
use crate::{GuestMemoryMmap, GuestRegionMmap, MmapRegion};
|
2020-09-04 08:37:37 +00:00
|
|
|
use libc::{self, c_void, off64_t, pread64, pwrite64};
|
2020-08-18 04:30:53 +00:00
|
|
|
use seccomp::{SeccompAction, SeccompFilter};
|
2019-05-21 18:54:53 +00:00
|
|
|
use std::io;
|
2021-03-05 13:32:57 +00:00
|
|
|
use std::ops::Deref;
|
2020-04-20 15:24:31 +00:00
|
|
|
use std::os::unix::io::{AsRawFd, RawFd};
|
2019-12-02 20:08:53 +00:00
|
|
|
use std::result;
|
2021-06-07 06:27:06 +00:00
|
|
|
use std::sync::{Arc, Barrier, Mutex};
|
2019-05-21 18:54:53 +00:00
|
|
|
use std::thread;
|
2021-02-25 09:46:27 +00:00
|
|
|
use vhost::vhost_user::message::{
|
2020-03-14 05:35:31 +00:00
|
|
|
VhostUserFSSlaveMsg, VhostUserFSSlaveMsgFlags, VhostUserProtocolFeatures,
|
|
|
|
VhostUserVirtioFeatures, VHOST_USER_FS_SLAVE_ENTRIES,
|
2019-08-06 01:28:59 +00:00
|
|
|
};
|
2021-02-25 09:46:27 +00:00
|
|
|
use vhost::vhost_user::{
|
2019-08-06 01:28:59 +00:00
|
|
|
HandlerResult, Master, MasterReqHandler, VhostUserMaster, VhostUserMasterReqHandler,
|
|
|
|
};
|
2020-03-14 05:35:31 +00:00
|
|
|
use vm_memory::{
|
2020-04-22 23:10:31 +00:00
|
|
|
Address, ByteValued, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic,
|
2020-03-14 05:35:31 +00:00
|
|
|
};
|
2019-05-01 16:59:51 +00:00
|
|
|
use vm_migration::{Migratable, MigratableError, Pausable, Snapshottable, Transportable};
|
2019-08-02 14:23:52 +00:00
|
|
|
use vmm_sys_util::eventfd::EventFd;
|
2019-05-21 18:54:53 +00:00
|
|
|
|
|
|
|
const NUM_QUEUE_OFFSET: usize = 1;
|
2021-05-19 21:21:29 +00:00
|
|
|
const DEFAULT_QUEUE_NUMBER: usize = 2;
|
2019-05-21 18:54:53 +00:00
|
|
|
|
2019-08-06 01:28:59 +00:00
|
|
|
struct SlaveReqHandler {
|
2020-03-14 05:35:31 +00:00
|
|
|
cache_offset: GuestAddress,
|
2019-08-06 01:28:59 +00:00
|
|
|
cache_size: u64,
|
|
|
|
mmap_cache_addr: u64,
|
2020-04-22 23:10:31 +00:00
|
|
|
mem: GuestMemoryAtomic<GuestMemoryMmap>,
|
2019-08-06 01:28:59 +00:00
|
|
|
}
|
|
|
|
|
2020-03-27 09:33:27 +00:00
|
|
|
impl SlaveReqHandler {
|
|
|
|
// Make sure request is within cache range
|
|
|
|
fn is_req_valid(&self, offset: u64, len: u64) -> bool {
|
|
|
|
let end = match offset.checked_add(len) {
|
|
|
|
Some(n) => n,
|
|
|
|
None => return false,
|
|
|
|
};
|
|
|
|
|
|
|
|
!(offset >= self.cache_size || end > self.cache_size)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-06 01:28:59 +00:00
|
|
|
impl VhostUserMasterReqHandler for SlaveReqHandler {
|
2021-02-25 18:24:34 +00:00
|
|
|
fn handle_config_change(&self) -> HandlerResult<u64> {
|
2019-08-06 01:28:59 +00:00
|
|
|
debug!("handle_config_change");
|
2020-03-24 11:54:17 +00:00
|
|
|
Ok(0)
|
2019-08-06 01:28:59 +00:00
|
|
|
}
|
|
|
|
|
2021-02-25 18:24:34 +00:00
|
|
|
fn fs_slave_map(&self, fs: &VhostUserFSSlaveMsg, fd: RawFd) -> HandlerResult<u64> {
|
2019-08-06 01:28:59 +00:00
|
|
|
debug!("fs_slave_map");
|
|
|
|
|
2020-01-07 16:18:02 +00:00
|
|
|
for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
|
2020-03-27 09:33:27 +00:00
|
|
|
let offset = fs.cache_offset[i];
|
|
|
|
let len = fs.len[i];
|
|
|
|
|
2020-01-07 16:18:02 +00:00
|
|
|
// Ignore if the length is 0.
|
2020-03-27 09:33:27 +00:00
|
|
|
if len == 0 {
|
2020-01-07 16:18:02 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-03-27 09:33:27 +00:00
|
|
|
if !self.is_req_valid(offset, len) {
|
2020-03-27 09:35:55 +00:00
|
|
|
return Err(io::Error::from_raw_os_error(libc::EINVAL));
|
2020-01-07 16:18:02 +00:00
|
|
|
}
|
|
|
|
|
2020-03-27 09:33:27 +00:00
|
|
|
let addr = self.mmap_cache_addr + offset;
|
2021-05-07 10:09:40 +00:00
|
|
|
let flags = fs.flags[i];
|
2020-01-07 16:18:02 +00:00
|
|
|
let ret = unsafe {
|
|
|
|
libc::mmap(
|
|
|
|
addr as *mut libc::c_void,
|
2020-03-27 09:33:27 +00:00
|
|
|
len as usize,
|
2021-05-07 10:09:40 +00:00
|
|
|
flags.bits() as i32,
|
2020-01-07 16:18:02 +00:00
|
|
|
libc::MAP_SHARED | libc::MAP_FIXED,
|
|
|
|
fd,
|
|
|
|
fs.fd_offset[i] as libc::off_t,
|
|
|
|
)
|
|
|
|
};
|
|
|
|
if ret == libc::MAP_FAILED {
|
|
|
|
return Err(io::Error::last_os_error());
|
|
|
|
}
|
|
|
|
|
|
|
|
let ret = unsafe { libc::close(fd) };
|
|
|
|
if ret == -1 {
|
|
|
|
return Err(io::Error::last_os_error());
|
|
|
|
}
|
2019-08-06 01:28:59 +00:00
|
|
|
}
|
|
|
|
|
2020-03-24 11:54:17 +00:00
|
|
|
Ok(0)
|
2019-08-06 01:28:59 +00:00
|
|
|
}
|
|
|
|
|
2021-02-25 18:24:34 +00:00
|
|
|
fn fs_slave_unmap(&self, fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
|
2019-08-06 01:28:59 +00:00
|
|
|
debug!("fs_slave_unmap");
|
|
|
|
|
2020-01-07 16:18:02 +00:00
|
|
|
for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
|
2020-03-27 09:33:27 +00:00
|
|
|
let offset = fs.cache_offset[i];
|
2020-01-07 16:18:02 +00:00
|
|
|
let mut len = fs.len[i];
|
|
|
|
|
|
|
|
// Ignore if the length is 0.
|
|
|
|
if len == 0 {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Need to handle a special case where the slave ask for the unmapping
|
|
|
|
// of the entire mapping.
|
|
|
|
if len == 0xffff_ffff_ffff_ffff {
|
|
|
|
len = self.cache_size;
|
|
|
|
}
|
|
|
|
|
2020-03-27 09:33:27 +00:00
|
|
|
if !self.is_req_valid(offset, len) {
|
2020-03-27 09:35:55 +00:00
|
|
|
return Err(io::Error::from_raw_os_error(libc::EINVAL));
|
2020-01-07 16:18:02 +00:00
|
|
|
}
|
|
|
|
|
2020-03-27 09:33:27 +00:00
|
|
|
let addr = self.mmap_cache_addr + offset;
|
2020-01-07 16:18:02 +00:00
|
|
|
let ret = unsafe {
|
|
|
|
libc::mmap(
|
|
|
|
addr as *mut libc::c_void,
|
|
|
|
len as usize,
|
|
|
|
libc::PROT_NONE,
|
|
|
|
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE | libc::MAP_FIXED,
|
|
|
|
-1,
|
2021-01-02 20:37:31 +00:00
|
|
|
0,
|
2020-01-07 16:18:02 +00:00
|
|
|
)
|
|
|
|
};
|
|
|
|
if ret == libc::MAP_FAILED {
|
|
|
|
return Err(io::Error::last_os_error());
|
|
|
|
}
|
2019-08-06 01:28:59 +00:00
|
|
|
}
|
|
|
|
|
2020-03-24 11:54:17 +00:00
|
|
|
Ok(0)
|
2019-08-06 01:28:59 +00:00
|
|
|
}
|
|
|
|
|
2021-02-25 18:24:34 +00:00
|
|
|
fn fs_slave_sync(&self, fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
|
2019-08-06 01:28:59 +00:00
|
|
|
debug!("fs_slave_sync");
|
|
|
|
|
2020-01-07 16:18:02 +00:00
|
|
|
for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
|
2020-03-27 09:33:27 +00:00
|
|
|
let offset = fs.cache_offset[i];
|
|
|
|
let len = fs.len[i];
|
|
|
|
|
2020-01-07 16:18:02 +00:00
|
|
|
// Ignore if the length is 0.
|
2020-03-27 09:33:27 +00:00
|
|
|
if len == 0 {
|
2020-01-07 16:18:02 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-03-27 09:33:27 +00:00
|
|
|
if !self.is_req_valid(offset, len) {
|
2020-03-27 09:35:55 +00:00
|
|
|
return Err(io::Error::from_raw_os_error(libc::EINVAL));
|
2020-01-07 16:18:02 +00:00
|
|
|
}
|
|
|
|
|
2020-03-27 09:33:27 +00:00
|
|
|
let addr = self.mmap_cache_addr + offset;
|
|
|
|
let ret =
|
|
|
|
unsafe { libc::msync(addr as *mut libc::c_void, len as usize, libc::MS_SYNC) };
|
2020-01-07 16:18:02 +00:00
|
|
|
if ret == -1 {
|
|
|
|
return Err(io::Error::last_os_error());
|
|
|
|
}
|
2019-08-06 01:28:59 +00:00
|
|
|
}
|
|
|
|
|
2020-03-24 11:54:17 +00:00
|
|
|
Ok(0)
|
2019-08-06 01:28:59 +00:00
|
|
|
}
|
2020-03-14 05:35:31 +00:00
|
|
|
|
2021-02-25 18:24:34 +00:00
|
|
|
fn fs_slave_io(&self, fs: &VhostUserFSSlaveMsg, fd: RawFd) -> HandlerResult<u64> {
|
2020-03-14 05:35:31 +00:00
|
|
|
debug!("fs_slave_io");
|
|
|
|
|
2020-03-24 11:54:17 +00:00
|
|
|
let mut done: u64 = 0;
|
2020-03-14 05:35:31 +00:00
|
|
|
for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
|
|
|
|
// Ignore if the length is 0.
|
|
|
|
if fs.len[i] == 0 {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut foffset = fs.fd_offset[i];
|
|
|
|
let mut len = fs.len[i] as usize;
|
|
|
|
let gpa = fs.cache_offset[i];
|
2020-03-25 10:04:39 +00:00
|
|
|
let cache_end = self.cache_offset.raw_value() + self.cache_size;
|
|
|
|
let efault = libc::EFAULT;
|
2020-03-14 05:35:31 +00:00
|
|
|
|
2020-04-22 23:10:31 +00:00
|
|
|
let mut ptr = if gpa >= self.cache_offset.raw_value() && gpa < cache_end {
|
|
|
|
let offset = gpa
|
|
|
|
.checked_sub(self.cache_offset.raw_value())
|
|
|
|
.ok_or_else(|| io::Error::from_raw_os_error(efault))?;
|
|
|
|
let end = gpa
|
|
|
|
.checked_add(fs.len[i])
|
|
|
|
.ok_or_else(|| io::Error::from_raw_os_error(efault))?;
|
2020-03-14 05:35:31 +00:00
|
|
|
|
2020-04-22 23:10:31 +00:00
|
|
|
if end >= cache_end {
|
|
|
|
return Err(io::Error::from_raw_os_error(efault));
|
|
|
|
}
|
|
|
|
|
|
|
|
self.mmap_cache_addr + offset
|
|
|
|
} else {
|
|
|
|
self.mem
|
|
|
|
.memory()
|
|
|
|
.get_host_address(GuestAddress(gpa))
|
|
|
|
.map_err(|e| {
|
|
|
|
error!(
|
|
|
|
"Failed to find RAM region associated with guest physical address 0x{:x}: {:?}",
|
|
|
|
gpa, e
|
|
|
|
);
|
|
|
|
io::Error::from_raw_os_error(efault)
|
|
|
|
})? as u64
|
|
|
|
};
|
2020-03-25 10:04:39 +00:00
|
|
|
|
2020-03-14 05:35:31 +00:00
|
|
|
while len > 0 {
|
|
|
|
let ret = if (fs.flags[i] & VhostUserFSSlaveMsgFlags::MAP_W)
|
|
|
|
== VhostUserFSSlaveMsgFlags::MAP_W
|
|
|
|
{
|
|
|
|
debug!("write: foffset={}, len={}", foffset, len);
|
|
|
|
unsafe { pwrite64(fd, ptr as *const c_void, len as usize, foffset as off64_t) }
|
|
|
|
} else {
|
|
|
|
debug!("read: foffset={}, len={}", foffset, len);
|
|
|
|
unsafe { pread64(fd, ptr as *mut c_void, len as usize, foffset as off64_t) }
|
|
|
|
};
|
|
|
|
|
|
|
|
if ret < 0 {
|
|
|
|
return Err(io::Error::last_os_error());
|
|
|
|
}
|
|
|
|
|
|
|
|
if ret == 0 {
|
|
|
|
// EOF
|
|
|
|
return Err(io::Error::new(
|
|
|
|
io::ErrorKind::UnexpectedEof,
|
|
|
|
"failed to access whole buffer",
|
|
|
|
));
|
|
|
|
}
|
|
|
|
len -= ret as usize;
|
|
|
|
foffset += ret as u64;
|
|
|
|
ptr += ret as u64;
|
2020-03-24 11:54:17 +00:00
|
|
|
done += ret as u64;
|
2020-03-14 05:35:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let ret = unsafe { libc::close(fd) };
|
|
|
|
if ret == -1 {
|
|
|
|
return Err(io::Error::last_os_error());
|
|
|
|
}
|
|
|
|
|
2020-03-24 11:54:17 +00:00
|
|
|
Ok(done)
|
2020-03-14 05:35:31 +00:00
|
|
|
}
|
2019-08-06 01:28:59 +00:00
|
|
|
}
|
|
|
|
|
2020-01-27 18:06:42 +00:00
|
|
|
#[derive(Copy, Clone)]
|
|
|
|
#[repr(C, packed)]
|
|
|
|
struct VirtioFsConfig {
|
|
|
|
tag: [u8; 36],
|
|
|
|
num_request_queues: u32,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for VirtioFsConfig {
|
|
|
|
fn default() -> Self {
|
|
|
|
VirtioFsConfig {
|
|
|
|
tag: [0; 36],
|
|
|
|
num_request_queues: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe impl ByteValued for VirtioFsConfig {}
|
|
|
|
|
2019-05-21 18:54:53 +00:00
|
|
|
pub struct Fs {
|
2020-09-03 09:37:36 +00:00
|
|
|
common: VirtioCommon,
|
2020-04-27 11:53:45 +00:00
|
|
|
id: String,
|
2021-06-07 06:27:06 +00:00
|
|
|
vu: Arc<Mutex<Master>>,
|
2020-01-27 18:06:42 +00:00
|
|
|
config: VirtioFsConfig,
|
2020-04-14 14:47:11 +00:00
|
|
|
// Hold ownership of the memory that is allocated for the device
|
|
|
|
// which will be automatically dropped when the device is dropped
|
2020-04-17 17:30:33 +00:00
|
|
|
cache: Option<(VirtioSharedMemoryList, MmapRegion)>,
|
2019-08-06 01:28:59 +00:00
|
|
|
slave_req_support: bool,
|
2020-08-18 04:30:53 +00:00
|
|
|
seccomp_action: SeccompAction,
|
2021-03-05 13:32:57 +00:00
|
|
|
guest_memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>,
|
|
|
|
acked_protocol_features: u64,
|
2021-06-07 06:27:06 +00:00
|
|
|
socket_path: String,
|
|
|
|
reconnect_epoll_thread: Option<thread::JoinHandle<()>>,
|
2019-05-21 18:54:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Fs {
|
|
|
|
/// Create a new virtio-fs device.
|
2019-08-05 20:04:24 +00:00
|
|
|
pub fn new(
|
2020-04-27 11:53:45 +00:00
|
|
|
id: String,
|
2019-08-05 20:04:24 +00:00
|
|
|
path: &str,
|
|
|
|
tag: &str,
|
|
|
|
req_num_queues: usize,
|
|
|
|
queue_size: u16,
|
2020-04-17 17:30:33 +00:00
|
|
|
cache: Option<(VirtioSharedMemoryList, MmapRegion)>,
|
2020-08-18 04:30:53 +00:00
|
|
|
seccomp_action: SeccompAction,
|
2019-08-05 20:04:24 +00:00
|
|
|
) -> Result<Fs> {
|
2019-08-06 01:28:59 +00:00
|
|
|
let mut slave_req_support = false;
|
2019-08-30 17:40:33 +00:00
|
|
|
|
2019-05-21 18:54:53 +00:00
|
|
|
// Calculate the actual number of queues needed.
|
|
|
|
let num_queues = NUM_QUEUE_OFFSET + req_num_queues;
|
2019-08-30 17:40:33 +00:00
|
|
|
|
2019-05-21 18:54:53 +00:00
|
|
|
// Connect to the vhost-user socket.
|
2021-06-07 06:27:06 +00:00
|
|
|
let mut vhost_user_fs = connect_vhost_user(false, &path, num_queues as u64, false)?;
|
2019-08-30 17:40:33 +00:00
|
|
|
|
|
|
|
// Filling device and vring features VMM supports.
|
2021-05-21 16:26:00 +00:00
|
|
|
let avail_features = DEFAULT_VIRTIO_FEATURES;
|
2019-08-30 17:40:33 +00:00
|
|
|
|
2021-05-19 21:21:29 +00:00
|
|
|
let mut avail_protocol_features = VhostUserProtocolFeatures::MQ
|
|
|
|
| VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS
|
|
|
|
| VhostUserProtocolFeatures::REPLY_ACK;
|
|
|
|
let slave_protocol_features =
|
|
|
|
VhostUserProtocolFeatures::SLAVE_REQ | VhostUserProtocolFeatures::SLAVE_SEND_FD;
|
|
|
|
if cache.is_some() {
|
|
|
|
avail_protocol_features |= slave_protocol_features;
|
|
|
|
}
|
2019-08-30 17:40:33 +00:00
|
|
|
|
2021-05-19 21:58:02 +00:00
|
|
|
let (acked_features, acked_protocol_features) = negotiate_features_vhost_user(
|
|
|
|
&mut vhost_user_fs,
|
|
|
|
avail_features,
|
|
|
|
avail_protocol_features,
|
|
|
|
)?;
|
2021-03-05 13:32:57 +00:00
|
|
|
|
2021-05-19 21:21:29 +00:00
|
|
|
let backend_num_queues =
|
|
|
|
if acked_protocol_features & VhostUserProtocolFeatures::MQ.bits() != 0 {
|
|
|
|
vhost_user_fs
|
|
|
|
.get_queue_num()
|
|
|
|
.map_err(Error::VhostUserGetQueueMaxNum)? as usize
|
|
|
|
} else {
|
|
|
|
DEFAULT_QUEUE_NUMBER
|
|
|
|
};
|
2019-08-06 01:28:59 +00:00
|
|
|
|
2021-05-19 21:21:29 +00:00
|
|
|
if num_queues > backend_num_queues {
|
|
|
|
error!(
|
|
|
|
"vhost-user-fs requested too many queues ({}) since the backend only supports {}\n",
|
|
|
|
num_queues, backend_num_queues
|
|
|
|
);
|
|
|
|
return Err(Error::BadQueueNum);
|
|
|
|
}
|
2021-03-05 13:32:57 +00:00
|
|
|
|
2021-05-19 21:21:29 +00:00
|
|
|
if acked_protocol_features & slave_protocol_features.bits()
|
|
|
|
== slave_protocol_features.bits()
|
|
|
|
{
|
2019-08-06 01:28:59 +00:00
|
|
|
slave_req_support = true;
|
2019-05-21 18:54:53 +00:00
|
|
|
}
|
2019-08-30 17:40:33 +00:00
|
|
|
|
2020-01-27 18:06:42 +00:00
|
|
|
// Create virtio-fs device configuration.
|
|
|
|
let mut config = VirtioFsConfig::default();
|
|
|
|
let tag_bytes_vec = tag.to_string().into_bytes();
|
|
|
|
config.tag[..tag_bytes_vec.len()].copy_from_slice(tag_bytes_vec.as_slice());
|
|
|
|
config.num_request_queues = req_num_queues as u32;
|
2019-05-21 18:54:53 +00:00
|
|
|
|
|
|
|
Ok(Fs {
|
2020-09-03 09:37:36 +00:00
|
|
|
common: VirtioCommon {
|
2021-03-25 16:54:09 +00:00
|
|
|
device_type: VirtioDeviceType::Fs as u32,
|
2021-05-19 21:21:29 +00:00
|
|
|
avail_features: acked_features,
|
|
|
|
acked_features: 0,
|
2020-09-04 08:37:37 +00:00
|
|
|
queue_sizes: vec![queue_size; num_queues],
|
2021-06-09 14:33:34 +00:00
|
|
|
paused_sync: Some(Arc::new(Barrier::new(2))),
|
2021-05-19 21:21:29 +00:00
|
|
|
min_queues: DEFAULT_QUEUE_NUMBER as u16,
|
2020-09-03 15:56:32 +00:00
|
|
|
..Default::default()
|
2020-09-03 09:37:36 +00:00
|
|
|
},
|
2020-04-27 11:53:45 +00:00
|
|
|
id,
|
2021-06-07 06:27:06 +00:00
|
|
|
vu: Arc::new(Mutex::new(vhost_user_fs)),
|
2020-01-27 18:06:42 +00:00
|
|
|
config,
|
2019-08-06 01:28:59 +00:00
|
|
|
cache,
|
|
|
|
slave_req_support,
|
2020-08-18 04:30:53 +00:00
|
|
|
seccomp_action,
|
2021-03-05 13:32:57 +00:00
|
|
|
guest_memory: None,
|
|
|
|
acked_protocol_features,
|
2021-06-07 06:27:06 +00:00
|
|
|
socket_path: path.to_string(),
|
|
|
|
reconnect_epoll_thread: None,
|
2019-05-21 18:54:53 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for Fs {
|
|
|
|
fn drop(&mut self) {
|
2020-09-04 08:37:37 +00:00
|
|
|
if let Some(kill_evt) = self.common.kill_evt.take() {
|
2019-05-21 18:54:53 +00:00
|
|
|
// Ignore the result because there is nothing we can do about it.
|
|
|
|
let _ = kill_evt.write(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl VirtioDevice for Fs {
|
|
|
|
fn device_type(&self) -> u32 {
|
2020-09-04 08:37:37 +00:00
|
|
|
self.common.device_type
|
2019-05-21 18:54:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn queue_max_sizes(&self) -> &[u16] {
|
2020-09-04 08:37:37 +00:00
|
|
|
&self.common.queue_sizes
|
2019-05-21 18:54:53 +00:00
|
|
|
}
|
|
|
|
|
2020-01-23 10:14:38 +00:00
|
|
|
fn features(&self) -> u64 {
|
2020-09-03 09:37:36 +00:00
|
|
|
self.common.avail_features
|
2019-05-21 18:54:53 +00:00
|
|
|
}
|
|
|
|
|
2020-01-23 10:14:38 +00:00
|
|
|
fn ack_features(&mut self, value: u64) {
|
2020-09-03 09:37:36 +00:00
|
|
|
self.common.ack_features(value)
|
2019-05-21 18:54:53 +00:00
|
|
|
}
|
|
|
|
|
2020-07-16 09:34:51 +00:00
|
|
|
fn read_config(&self, offset: u64, data: &mut [u8]) {
|
|
|
|
self.read_config_from_slice(self.config.as_slice(), offset, data);
|
2019-05-21 18:54:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn activate(
|
|
|
|
&mut self,
|
2020-02-11 16:22:40 +00:00
|
|
|
mem: GuestMemoryAtomic<GuestMemoryMmap>,
|
2020-01-13 17:52:19 +00:00
|
|
|
interrupt_cb: Arc<dyn VirtioInterrupt>,
|
2019-05-21 18:54:53 +00:00
|
|
|
queues: Vec<Queue>,
|
|
|
|
queue_evts: Vec<EventFd>,
|
|
|
|
) -> ActivateResult {
|
2020-09-04 08:37:37 +00:00
|
|
|
self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
|
2021-03-05 13:32:57 +00:00
|
|
|
self.guest_memory = Some(mem.clone());
|
|
|
|
|
2019-08-06 01:28:59 +00:00
|
|
|
// Initialize slave communication.
|
|
|
|
let slave_req_handler = if self.slave_req_support {
|
2020-04-14 14:47:11 +00:00
|
|
|
if let Some(cache) = self.cache.as_ref() {
|
2021-02-25 18:24:34 +00:00
|
|
|
let vu_master_req_handler = Arc::new(SlaveReqHandler {
|
2020-03-14 05:35:31 +00:00
|
|
|
cache_offset: cache.0.addr,
|
2019-08-06 01:28:59 +00:00
|
|
|
cache_size: cache.0.len,
|
2020-04-17 17:30:33 +00:00
|
|
|
mmap_cache_addr: cache.0.host_addr,
|
2021-06-07 06:27:06 +00:00
|
|
|
mem: mem.clone(),
|
2021-02-25 18:24:34 +00:00
|
|
|
});
|
2019-08-06 01:28:59 +00:00
|
|
|
|
2021-02-25 18:24:34 +00:00
|
|
|
let mut req_handler =
|
|
|
|
MasterReqHandler::new(vu_master_req_handler).map_err(|e| {
|
2021-03-11 12:53:05 +00:00
|
|
|
ActivateError::VhostUserFsSetup(Error::MasterReqHandlerCreation(e))
|
2021-02-25 18:24:34 +00:00
|
|
|
})?;
|
|
|
|
req_handler.set_reply_ack_flag(true);
|
2019-08-06 01:28:59 +00:00
|
|
|
Some(req_handler)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
2021-06-09 15:27:14 +00:00
|
|
|
// The backend acknowledged features must contain the protocol feature
|
|
|
|
// bit in case it was initially set but lost through the features
|
|
|
|
// negotiation with the guest.
|
|
|
|
let backend_acked_features = self.common.acked_features
|
|
|
|
| (self.common.avail_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits());
|
|
|
|
|
|
|
|
setup_vhost_user(
|
|
|
|
&mut self.vu.lock().unwrap(),
|
|
|
|
&mem.memory(),
|
|
|
|
queues.clone(),
|
|
|
|
queue_evts.iter().map(|q| q.try_clone().unwrap()).collect(),
|
|
|
|
&interrupt_cb,
|
|
|
|
backend_acked_features,
|
|
|
|
&slave_req_handler,
|
|
|
|
)
|
|
|
|
.map_err(ActivateError::VhostUserFsSetup)?;
|
|
|
|
|
2021-06-07 06:27:06 +00:00
|
|
|
// Run a dedicated thread for handling potential reconnections with
|
2021-06-09 14:33:34 +00:00
|
|
|
// the backend as well as requests initiated by the backend.
|
2021-06-07 06:27:06 +00:00
|
|
|
let (kill_evt, pause_evt) = self.common.dup_eventfds();
|
2021-06-09 14:33:34 +00:00
|
|
|
let mut reconnect_handler: VhostUserEpollHandler<SlaveReqHandler> = VhostUserEpollHandler {
|
2021-06-07 06:27:06 +00:00
|
|
|
vu: self.vu.clone(),
|
|
|
|
mem,
|
|
|
|
kill_evt,
|
|
|
|
pause_evt,
|
|
|
|
queues,
|
|
|
|
queue_evts,
|
|
|
|
virtio_interrupt: interrupt_cb,
|
|
|
|
acked_features: backend_acked_features,
|
|
|
|
acked_protocol_features: self.acked_protocol_features,
|
|
|
|
socket_path: self.socket_path.clone(),
|
|
|
|
server: false,
|
2021-06-09 14:33:34 +00:00
|
|
|
slave_req_handler,
|
2021-06-07 06:27:06 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
let paused = self.common.paused.clone();
|
|
|
|
let paused_sync = self.common.paused_sync.clone();
|
|
|
|
|
2021-06-09 14:33:34 +00:00
|
|
|
let virtio_vhost_fs_seccomp_filter =
|
|
|
|
get_seccomp_filter(&self.seccomp_action, Thread::VirtioVhostFs)
|
|
|
|
.map_err(ActivateError::CreateSeccompFilter)?;
|
|
|
|
|
2021-06-07 06:27:06 +00:00
|
|
|
thread::Builder::new()
|
2021-06-09 14:33:34 +00:00
|
|
|
.name(self.id.to_string())
|
2021-06-07 06:27:06 +00:00
|
|
|
.spawn(move || {
|
2021-06-09 14:33:34 +00:00
|
|
|
if let Err(e) = SeccompFilter::apply(virtio_vhost_fs_seccomp_filter) {
|
|
|
|
error!("Error applying seccomp filter: {:?}", e);
|
|
|
|
} else if let Err(e) = reconnect_handler.run(paused, paused_sync.unwrap()) {
|
2021-06-07 06:27:06 +00:00
|
|
|
error!("Error running reconnection worker: {:?}", e);
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.map(|thread| self.reconnect_epoll_thread = Some(thread))
|
|
|
|
.map_err(|e| {
|
|
|
|
error!("failed to clone queue EventFd: {}", e);
|
|
|
|
ActivateError::BadActivate
|
|
|
|
})?;
|
|
|
|
|
2021-02-18 15:10:51 +00:00
|
|
|
event!("virtio-device", "activated", "id", &self.id);
|
2019-05-21 18:54:53 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
2019-08-06 01:28:59 +00:00
|
|
|
|
2021-01-18 12:38:08 +00:00
|
|
|
fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
|
2019-12-02 20:08:53 +00:00
|
|
|
// We first must resume the virtio thread if it was paused.
|
2020-09-04 08:37:37 +00:00
|
|
|
if self.common.pause_evt.take().is_some() {
|
|
|
|
self.common.resume().ok()?;
|
2019-12-02 20:08:53 +00:00
|
|
|
}
|
|
|
|
|
2021-06-07 06:27:06 +00:00
|
|
|
if let Err(e) =
|
|
|
|
reset_vhost_user(&mut self.vu.lock().unwrap(), self.common.queue_sizes.len())
|
|
|
|
{
|
2019-10-04 17:04:42 +00:00
|
|
|
error!("Failed to reset vhost-user daemon: {:?}", e);
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2020-09-04 08:37:37 +00:00
|
|
|
if let Some(kill_evt) = self.common.kill_evt.take() {
|
2019-10-04 17:04:42 +00:00
|
|
|
// Ignore the result because there is nothing we can do about it.
|
|
|
|
let _ = kill_evt.write(1);
|
|
|
|
}
|
|
|
|
|
2021-02-18 15:10:51 +00:00
|
|
|
event!("virtio-device", "reset", "id", &self.id);
|
|
|
|
|
2021-01-18 12:38:08 +00:00
|
|
|
// Return the interrupt
|
|
|
|
Some(self.common.interrupt_cb.take().unwrap())
|
2019-10-04 17:04:42 +00:00
|
|
|
}
|
|
|
|
|
2020-04-20 15:24:31 +00:00
|
|
|
fn shutdown(&mut self) {
|
2021-06-07 06:27:06 +00:00
|
|
|
let _ = unsafe { libc::close(self.vu.lock().unwrap().as_raw_fd()) };
|
2020-04-20 15:24:31 +00:00
|
|
|
}
|
|
|
|
|
2019-08-06 01:28:59 +00:00
|
|
|
fn get_shm_regions(&self) -> Option<VirtioSharedMemoryList> {
|
2021-03-29 06:15:34 +00:00
|
|
|
self.cache.as_ref().map(|cache| cache.0.clone())
|
2019-08-06 01:28:59 +00:00
|
|
|
}
|
2020-03-23 12:47:55 +00:00
|
|
|
|
2020-04-17 17:30:33 +00:00
|
|
|
fn set_shm_regions(
|
|
|
|
&mut self,
|
|
|
|
shm_regions: VirtioSharedMemoryList,
|
|
|
|
) -> std::result::Result<(), crate::Error> {
|
|
|
|
if let Some(mut cache) = self.cache.as_mut() {
|
|
|
|
cache.0 = shm_regions;
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(crate::Error::SetShmRegionsNotSupported)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-05 13:32:57 +00:00
|
|
|
fn add_memory_region(
|
|
|
|
&mut self,
|
|
|
|
region: &Arc<GuestRegionMmap>,
|
|
|
|
) -> std::result::Result<(), crate::Error> {
|
|
|
|
if self.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() != 0
|
|
|
|
{
|
2021-06-07 06:27:06 +00:00
|
|
|
add_memory_region(&mut self.vu.lock().unwrap(), region)
|
|
|
|
.map_err(crate::Error::VhostUserAddMemoryRegion)
|
2021-03-05 13:32:57 +00:00
|
|
|
} else if let Some(guest_memory) = &self.guest_memory {
|
2021-06-07 06:27:06 +00:00
|
|
|
update_mem_table(&mut self.vu.lock().unwrap(), guest_memory.memory().deref())
|
2021-03-05 13:32:57 +00:00
|
|
|
.map_err(crate::Error::VhostUserUpdateMemory)
|
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-20 14:12:59 +00:00
|
|
|
fn userspace_mappings(&self) -> Vec<UserspaceMapping> {
|
|
|
|
let mut mappings = Vec::new();
|
|
|
|
if let Some(cache) = self.cache.as_ref() {
|
|
|
|
mappings.push(UserspaceMapping {
|
|
|
|
host_addr: cache.0.host_addr,
|
|
|
|
mem_slot: cache.0.mem_slot,
|
|
|
|
addr: cache.0.addr,
|
|
|
|
len: cache.0.len,
|
|
|
|
mergeable: false,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
mappings
|
|
|
|
}
|
2019-05-21 18:54:53 +00:00
|
|
|
}
|
2019-12-02 20:08:53 +00:00
|
|
|
|
2020-09-04 08:37:37 +00:00
|
|
|
impl Pausable for Fs {
|
|
|
|
fn pause(&mut self) -> result::Result<(), MigratableError> {
|
|
|
|
self.common.pause()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn resume(&mut self) -> result::Result<(), MigratableError> {
|
2021-06-07 06:27:06 +00:00
|
|
|
self.common.resume()?;
|
|
|
|
|
|
|
|
if let Some(reconnect_epoll_thread) = &self.reconnect_epoll_thread {
|
|
|
|
reconnect_epoll_thread.thread().unpark();
|
|
|
|
}
|
|
|
|
Ok(())
|
2020-09-04 08:37:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-27 11:53:45 +00:00
|
|
|
impl Snapshottable for Fs {
|
|
|
|
fn id(&self) -> String {
|
|
|
|
self.id.clone()
|
|
|
|
}
|
|
|
|
}
|
2019-05-01 16:59:51 +00:00
|
|
|
impl Transportable for Fs {}
|
2019-12-02 20:08:53 +00:00
|
|
|
impl Migratable for Fs {}
|