cloud-hypervisor/virtio-devices/src/vhost_user/fs.rs

611 lines
20 KiB
Rust
Raw Normal View History

// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use super::vu_common_ctrl::{
add_memory_region, connect_vhost_user, negotiate_features_vhost_user, reset_vhost_user,
setup_vhost_user, update_mem_table,
};
use super::{Error, Result, DEFAULT_VIRTIO_FEATURES};
use crate::seccomp_filters::{get_seccomp_filter, Thread};
use crate::vhost_user::{Inflight, VhostUserEpollHandler};
use crate::{
ActivateError, ActivateResult, Queue, UserspaceMapping, VirtioCommon, VirtioDevice,
VirtioDeviceType, VirtioInterrupt, VirtioSharedMemoryList,
};
use crate::{GuestMemoryMmap, GuestRegionMmap, MmapRegion};
use libc::{self, c_void, off64_t, pread64, pwrite64};
use seccomp::{SeccompAction, SeccompFilter};
use std::io;
use std::ops::Deref;
use std::os::unix::io::{AsRawFd, RawFd};
use std::result;
use std::sync::{Arc, Barrier, Mutex};
use std::thread;
use vhost::vhost_user::message::{
VhostUserFSSlaveMsg, VhostUserFSSlaveMsgFlags, VhostUserProtocolFeatures,
VhostUserVirtioFeatures, VHOST_USER_FS_SLAVE_ENTRIES,
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
};
use vhost::vhost_user::{
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
HandlerResult, Master, MasterReqHandler, VhostUserMaster, VhostUserMasterReqHandler,
};
use vm_memory::{
Address, ByteValued, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic,
};
use vm_migration::{Migratable, MigratableError, Pausable, Snapshottable, Transportable};
use vmm_sys_util::eventfd::EventFd;
const NUM_QUEUE_OFFSET: usize = 1;
const DEFAULT_QUEUE_NUMBER: usize = 2;
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
struct SlaveReqHandler {
cache_offset: GuestAddress,
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
cache_size: u64,
mmap_cache_addr: u64,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
}
impl SlaveReqHandler {
// Make sure request is within cache range
fn is_req_valid(&self, offset: u64, len: u64) -> bool {
let end = match offset.checked_add(len) {
Some(n) => n,
None => return false,
};
!(offset >= self.cache_size || end > self.cache_size)
}
}
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
impl VhostUserMasterReqHandler for SlaveReqHandler {
fn handle_config_change(&self) -> HandlerResult<u64> {
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
debug!("handle_config_change");
Ok(0)
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
}
fn fs_slave_map(&self, fs: &VhostUserFSSlaveMsg, fd: RawFd) -> HandlerResult<u64> {
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
debug!("fs_slave_map");
for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
let offset = fs.cache_offset[i];
let len = fs.len[i];
// Ignore if the length is 0.
if len == 0 {
continue;
}
if !self.is_req_valid(offset, len) {
return Err(io::Error::from_raw_os_error(libc::EINVAL));
}
let addr = self.mmap_cache_addr + offset;
let flags = fs.flags[i];
let ret = unsafe {
libc::mmap(
addr as *mut libc::c_void,
len as usize,
flags.bits() as i32,
libc::MAP_SHARED | libc::MAP_FIXED,
fd,
fs.fd_offset[i] as libc::off_t,
)
};
if ret == libc::MAP_FAILED {
return Err(io::Error::last_os_error());
}
let ret = unsafe { libc::close(fd) };
if ret == -1 {
return Err(io::Error::last_os_error());
}
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
}
Ok(0)
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
}
fn fs_slave_unmap(&self, fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
debug!("fs_slave_unmap");
for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
let offset = fs.cache_offset[i];
let mut len = fs.len[i];
// Ignore if the length is 0.
if len == 0 {
continue;
}
// Need to handle a special case where the slave ask for the unmapping
// of the entire mapping.
if len == 0xffff_ffff_ffff_ffff {
len = self.cache_size;
}
if !self.is_req_valid(offset, len) {
return Err(io::Error::from_raw_os_error(libc::EINVAL));
}
let addr = self.mmap_cache_addr + offset;
let ret = unsafe {
libc::mmap(
addr as *mut libc::c_void,
len as usize,
libc::PROT_NONE,
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE | libc::MAP_FIXED,
-1,
0,
)
};
if ret == libc::MAP_FAILED {
return Err(io::Error::last_os_error());
}
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
}
Ok(0)
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
}
fn fs_slave_sync(&self, fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
debug!("fs_slave_sync");
for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
let offset = fs.cache_offset[i];
let len = fs.len[i];
// Ignore if the length is 0.
if len == 0 {
continue;
}
if !self.is_req_valid(offset, len) {
return Err(io::Error::from_raw_os_error(libc::EINVAL));
}
let addr = self.mmap_cache_addr + offset;
let ret =
unsafe { libc::msync(addr as *mut libc::c_void, len as usize, libc::MS_SYNC) };
if ret == -1 {
return Err(io::Error::last_os_error());
}
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
}
Ok(0)
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
}
fn fs_slave_io(&self, fs: &VhostUserFSSlaveMsg, fd: RawFd) -> HandlerResult<u64> {
debug!("fs_slave_io");
let mut done: u64 = 0;
for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
// Ignore if the length is 0.
if fs.len[i] == 0 {
continue;
}
let mut foffset = fs.fd_offset[i];
let mut len = fs.len[i] as usize;
let gpa = fs.cache_offset[i];
let cache_end = self.cache_offset.raw_value() + self.cache_size;
let efault = libc::EFAULT;
let mut ptr = if gpa >= self.cache_offset.raw_value() && gpa < cache_end {
let offset = gpa
.checked_sub(self.cache_offset.raw_value())
.ok_or_else(|| io::Error::from_raw_os_error(efault))?;
let end = gpa
.checked_add(fs.len[i])
.ok_or_else(|| io::Error::from_raw_os_error(efault))?;
if end >= cache_end {
return Err(io::Error::from_raw_os_error(efault));
}
self.mmap_cache_addr + offset
} else {
self.mem
.memory()
.get_host_address(GuestAddress(gpa))
.map_err(|e| {
error!(
"Failed to find RAM region associated with guest physical address 0x{:x}: {:?}",
gpa, e
);
io::Error::from_raw_os_error(efault)
})? as u64
};
while len > 0 {
let ret = if (fs.flags[i] & VhostUserFSSlaveMsgFlags::MAP_W)
== VhostUserFSSlaveMsgFlags::MAP_W
{
debug!("write: foffset={}, len={}", foffset, len);
unsafe { pwrite64(fd, ptr as *const c_void, len as usize, foffset as off64_t) }
} else {
debug!("read: foffset={}, len={}", foffset, len);
unsafe { pread64(fd, ptr as *mut c_void, len as usize, foffset as off64_t) }
};
if ret < 0 {
return Err(io::Error::last_os_error());
}
if ret == 0 {
// EOF
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"failed to access whole buffer",
));
}
len -= ret as usize;
foffset += ret as u64;
ptr += ret as u64;
done += ret as u64;
}
}
let ret = unsafe { libc::close(fd) };
if ret == -1 {
return Err(io::Error::last_os_error());
}
Ok(done)
}
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
}
#[derive(Copy, Clone)]
#[repr(C, packed)]
struct VirtioFsConfig {
tag: [u8; 36],
num_request_queues: u32,
}
impl Default for VirtioFsConfig {
fn default() -> Self {
VirtioFsConfig {
tag: [0; 36],
num_request_queues: 0,
}
}
}
unsafe impl ByteValued for VirtioFsConfig {}
pub struct Fs {
common: VirtioCommon,
id: String,
vu: Arc<Mutex<Master>>,
config: VirtioFsConfig,
// Hold ownership of the memory that is allocated for the device
// which will be automatically dropped when the device is dropped
cache: Option<(VirtioSharedMemoryList, MmapRegion)>,
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
slave_req_support: bool,
seccomp_action: SeccompAction,
guest_memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>,
acked_protocol_features: u64,
socket_path: String,
reconnect_epoll_thread: Option<thread::JoinHandle<()>>,
}
impl Fs {
/// Create a new virtio-fs device.
pub fn new(
id: String,
path: &str,
tag: &str,
req_num_queues: usize,
queue_size: u16,
cache: Option<(VirtioSharedMemoryList, MmapRegion)>,
seccomp_action: SeccompAction,
) -> Result<Fs> {
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
let mut slave_req_support = false;
// Calculate the actual number of queues needed.
let num_queues = NUM_QUEUE_OFFSET + req_num_queues;
// Connect to the vhost-user socket.
let mut vhost_user_fs = connect_vhost_user(false, &path, num_queues as u64, false)?;
// Filling device and vring features VMM supports.
let avail_features = DEFAULT_VIRTIO_FEATURES;
let mut avail_protocol_features = VhostUserProtocolFeatures::MQ
| VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS
| VhostUserProtocolFeatures::REPLY_ACK
| VhostUserProtocolFeatures::INFLIGHT_SHMFD;
let slave_protocol_features =
VhostUserProtocolFeatures::SLAVE_REQ | VhostUserProtocolFeatures::SLAVE_SEND_FD;
if cache.is_some() {
avail_protocol_features |= slave_protocol_features;
}
let (acked_features, acked_protocol_features) = negotiate_features_vhost_user(
&mut vhost_user_fs,
avail_features,
avail_protocol_features,
)?;
let backend_num_queues =
if acked_protocol_features & VhostUserProtocolFeatures::MQ.bits() != 0 {
vhost_user_fs
.get_queue_num()
.map_err(Error::VhostUserGetQueueMaxNum)? as usize
} else {
DEFAULT_QUEUE_NUMBER
};
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
if num_queues > backend_num_queues {
error!(
"vhost-user-fs requested too many queues ({}) since the backend only supports {}\n",
num_queues, backend_num_queues
);
return Err(Error::BadQueueNum);
}
if acked_protocol_features & slave_protocol_features.bits()
== slave_protocol_features.bits()
{
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
slave_req_support = true;
}
// Create virtio-fs device configuration.
let mut config = VirtioFsConfig::default();
let tag_bytes_vec = tag.to_string().into_bytes();
config.tag[..tag_bytes_vec.len()].copy_from_slice(tag_bytes_vec.as_slice());
config.num_request_queues = req_num_queues as u32;
Ok(Fs {
common: VirtioCommon {
device_type: VirtioDeviceType::Fs as u32,
avail_features: acked_features,
acked_features: 0,
queue_sizes: vec![queue_size; num_queues],
paused_sync: Some(Arc::new(Barrier::new(2))),
min_queues: DEFAULT_QUEUE_NUMBER as u16,
..Default::default()
},
id,
vu: Arc::new(Mutex::new(vhost_user_fs)),
config,
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
cache,
slave_req_support,
seccomp_action,
guest_memory: None,
acked_protocol_features,
socket_path: path.to_string(),
reconnect_epoll_thread: None,
})
}
}
impl Drop for Fs {
fn drop(&mut self) {
if let Some(kill_evt) = self.common.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.
let _ = kill_evt.write(1);
}
}
}
impl VirtioDevice for Fs {
fn device_type(&self) -> u32 {
self.common.device_type
}
fn queue_max_sizes(&self) -> &[u16] {
&self.common.queue_sizes
}
fn features(&self) -> u64 {
self.common.avail_features
}
fn ack_features(&mut self, value: u64) {
self.common.ack_features(value)
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
self.read_config_from_slice(self.config.as_slice(), offset, data);
}
fn activate(
&mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<Queue>,
queue_evts: Vec<EventFd>,
) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
self.guest_memory = Some(mem.clone());
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
// Initialize slave communication.
let slave_req_handler = if self.slave_req_support {
if let Some(cache) = self.cache.as_ref() {
let vu_master_req_handler = Arc::new(SlaveReqHandler {
cache_offset: cache.0.addr,
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
cache_size: cache.0.len,
mmap_cache_addr: cache.0.host_addr,
mem: mem.clone(),
});
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
let mut req_handler =
MasterReqHandler::new(vu_master_req_handler).map_err(|e| {
ActivateError::VhostUserFsSetup(Error::MasterReqHandlerCreation(e))
})?;
req_handler.set_reply_ack_flag(true);
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
Some(req_handler)
} else {
None
}
} else {
None
};
// The backend acknowledged features must contain the protocol feature
// bit in case it was initially set but lost through the features
// negotiation with the guest.
let backend_acked_features = self.common.acked_features
| (self.common.avail_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits());
let mut inflight: Option<Inflight> =
if self.acked_protocol_features & VhostUserProtocolFeatures::INFLIGHT_SHMFD.bits() != 0
{
Some(Inflight::default())
} else {
None
};
setup_vhost_user(
&mut self.vu.lock().unwrap(),
&mem.memory(),
queues.clone(),
queue_evts.iter().map(|q| q.try_clone().unwrap()).collect(),
&interrupt_cb,
backend_acked_features,
&slave_req_handler,
inflight.as_mut(),
)
.map_err(ActivateError::VhostUserFsSetup)?;
// Run a dedicated thread for handling potential reconnections with
// the backend as well as requests initiated by the backend.
let (kill_evt, pause_evt) = self.common.dup_eventfds();
let mut reconnect_handler: VhostUserEpollHandler<SlaveReqHandler> = VhostUserEpollHandler {
vu: self.vu.clone(),
mem,
kill_evt,
pause_evt,
queues,
queue_evts,
virtio_interrupt: interrupt_cb,
acked_features: backend_acked_features,
acked_protocol_features: self.acked_protocol_features,
socket_path: self.socket_path.clone(),
server: false,
slave_req_handler,
inflight,
};
let paused = self.common.paused.clone();
let paused_sync = self.common.paused_sync.clone();
let virtio_vhost_fs_seccomp_filter =
get_seccomp_filter(&self.seccomp_action, Thread::VirtioVhostFs)
.map_err(ActivateError::CreateSeccompFilter)?;
thread::Builder::new()
.name(self.id.to_string())
.spawn(move || {
if let Err(e) = SeccompFilter::apply(virtio_vhost_fs_seccomp_filter) {
error!("Error applying seccomp filter: {:?}", e);
} else if let Err(e) = reconnect_handler.run(paused, paused_sync.unwrap()) {
error!("Error running reconnection worker: {:?}", e);
}
})
.map(|thread| self.reconnect_epoll_thread = Some(thread))
.map_err(|e| {
error!("failed to clone queue EventFd: {}", e);
ActivateError::BadActivate
})?;
event!("virtio-device", "activated", "id", &self.id);
Ok(())
}
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
// We first must resume the virtio thread if it was paused.
if self.common.pause_evt.take().is_some() {
self.common.resume().ok()?;
}
if let Err(e) =
reset_vhost_user(&mut self.vu.lock().unwrap(), self.common.queue_sizes.len())
{
error!("Failed to reset vhost-user daemon: {:?}", e);
return None;
}
if let Some(kill_evt) = self.common.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.
let _ = kill_evt.write(1);
}
event!("virtio-device", "reset", "id", &self.id);
// Return the interrupt
Some(self.common.interrupt_cb.take().unwrap())
}
fn shutdown(&mut self) {
let _ = unsafe { libc::close(self.vu.lock().unwrap().as_raw_fd()) };
}
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
fn get_shm_regions(&self) -> Option<VirtioSharedMemoryList> {
self.cache.as_ref().map(|cache| cache.0.clone())
vm-virtio: fs: Add DAX shared region support This patch enables the vhost-user protocol features to let the slave initiates some request towards the master (VMM). It also takes care of receiving the requests from the slave and take appropriate actions based on the request type. The way the flow works now are as follow: - The VMM creates a region of memory that is made available to the guest by exposing it through the virtio-fs PCI BAR 2. - The virtio-fs device is created by the VMM, exposing some protocol features bits to virtiofsd, letting it know that it can send some request to the VMM through a dedicated socket. - On behalf of the guest driver asking for reading or writing a file, virtiofsd sends a request to the VMM, asking for a file descriptor to be mapped into the shared memory region at a specific offset. - The guest can directly read/write the file at the offset of the memory region. This implementation is more performant than the one using exclusively the virtqueues. With the virtqueues, the content of the file needs to be copied to the queues every time the guest is asking to access it. With the shared memory region, the virtqueues become the control plane where the libfuse commands are sent to virtiofsd. The data plane is literally the whole memory region which does not need any extra copy of the file content. The only penalty is the first time a file is accessed, it needs to be mapped into the VMM virtual address space. Another interesting case where this solution will not perform as well as expected is when a file is larger than the region itself. This means the file needs to be mapped in several times, but more than that this means it needs to be remapped every time it's being accessed. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-08-06 01:28:59 +00:00
}
fn set_shm_regions(
&mut self,
shm_regions: VirtioSharedMemoryList,
) -> std::result::Result<(), crate::Error> {
if let Some(mut cache) = self.cache.as_mut() {
cache.0 = shm_regions;
Ok(())
} else {
Err(crate::Error::SetShmRegionsNotSupported)
}
}
fn add_memory_region(
&mut self,
region: &Arc<GuestRegionMmap>,
) -> std::result::Result<(), crate::Error> {
if self.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() != 0
{
add_memory_region(&mut self.vu.lock().unwrap(), region)
.map_err(crate::Error::VhostUserAddMemoryRegion)
} else if let Some(guest_memory) = &self.guest_memory {
update_mem_table(&mut self.vu.lock().unwrap(), guest_memory.memory().deref())
.map_err(crate::Error::VhostUserUpdateMemory)
} else {
Ok(())
}
}
fn userspace_mappings(&self) -> Vec<UserspaceMapping> {
let mut mappings = Vec::new();
if let Some(cache) = self.cache.as_ref() {
mappings.push(UserspaceMapping {
host_addr: cache.0.host_addr,
mem_slot: cache.0.mem_slot,
addr: cache.0.addr,
len: cache.0.len,
mergeable: false,
})
}
mappings
}
}
impl Pausable for Fs {
fn pause(&mut self) -> result::Result<(), MigratableError> {
self.common.pause()
}
fn resume(&mut self) -> result::Result<(), MigratableError> {
self.common.resume()?;
if let Some(reconnect_epoll_thread) = &self.reconnect_epoll_thread {
reconnect_epoll_thread.thread().unpark();
}
Ok(())
}
}
impl Snapshottable for Fs {
fn id(&self) -> String {
self.id.clone()
}
}
impl Transportable for Fs {}
impl Migratable for Fs {}