vhost_user_fs: add fs cache request operations

This introduces setupmapping and removemapping methods to server.rs,
passthrough.rs and filesystem.rs in order to support virtiofs dax mode
inside guest.

Since we don't really want the server.rs to know that it is dealing with
vhost-user specifically, this is making it more generic by adding a new
trait which has three functions map()/unmap()/sync() corresponding to
fs_slave_{map, unmap, sync}, server.rs will take anything that implements
the trait.

Signed-off-by: Liu Bo <bo.liu@linux.alibaba.com>
This commit is contained in:
Liu Bo 2020-02-12 15:08:35 -08:00 committed by Rob Bradford
parent 956a84f73a
commit 3f09eff6c5
7 changed files with 227 additions and 1 deletions

1
Cargo.lock generated
View File

@ -1103,6 +1103,7 @@ dependencies = [
"bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
"vhost_rs 0.1.0",
"vm-memory 0.1.0 (git+https://github.com/rust-vmm/vm-memory)",
"vm-virtio 0.1.0",
]

View File

@ -10,3 +10,7 @@ libc = "0.2.65"
log = "0.4.8"
vm-memory = { git = "https://github.com/rust-vmm/vm-memory" }
vm-virtio = { path = "../vm-virtio" }
[dependencies.vhost_rs]
path = "../vhost_rs"
features = ["vhost-user-slave"]

View File

@ -13,8 +13,10 @@ use libc;
use crate::fuse;
use super::fs_cache_req_handler::FsCacheReqHandler;
pub use fuse::FsOptions;
pub use fuse::OpenOptions;
pub use fuse::RemovemappingOne;
pub use fuse::SetattrValid;
pub use fuse::ROOT_ID;
@ -1043,6 +1045,31 @@ pub trait FileSystem {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Setup a mapping so that guest can access files in DAX style.
#[allow(clippy::too_many_arguments)]
fn setupmapping<T: FsCacheReqHandler>(
&self,
_ctx: Context,
inode: Self::Inode,
handle: Self::Handle,
foffset: u64,
len: u64,
flags: u64,
moffset: u64,
vu_req: &mut T,
) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
fn removemapping<T: FsCacheReqHandler>(
&self,
_ctx: Context,
requests: Vec<RemovemappingOne>,
vu_req: &mut T,
) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Check file access permissions.
///
/// This method is called when a userspace process in the client makes an `access()` or

View File

@ -0,0 +1,61 @@
use crate::fuse;
use std::io;
use std::os::unix::io::RawFd;
use vhost_rs::vhost_user::message::{
VhostUserFSSlaveMsg, VhostUserFSSlaveMsgFlags, VHOST_USER_FS_SLAVE_ENTRIES,
};
use vhost_rs::vhost_user::{SlaveFsCacheReq, VhostUserMasterReqHandler};
/// Trait for virtio-fs cache requests operations. This is mainly used to hide
/// vhost-user details from virtio-fs's fuse part.
pub trait FsCacheReqHandler: Send + Sync + 'static {
/// Setup a dedicated mapping so that guest can access file data in DAX style.
fn map(
&mut self,
foffset: u64,
moffset: u64,
len: u64,
flags: u64,
fd: RawFd,
) -> io::Result<()>;
/// Remove those mappings that provide the access to file data.
fn unmap(&mut self, requests: Vec<fuse::RemovemappingOne>) -> io::Result<()>;
}
impl FsCacheReqHandler for SlaveFsCacheReq {
fn map(
&mut self,
foffset: u64,
moffset: u64,
len: u64,
flags: u64,
fd: RawFd,
) -> io::Result<()> {
let mut msg: VhostUserFSSlaveMsg = Default::default();
msg.fd_offset[0] = foffset;
msg.cache_offset[0] = moffset;
msg.len[0] = len;
msg.flags[0] = if (flags & fuse::SetupmappingFlags::WRITE.bits()) != 0 {
VhostUserFSSlaveMsgFlags::MAP_W | VhostUserFSSlaveMsgFlags::MAP_R
} else {
VhostUserFSSlaveMsgFlags::MAP_R
};
self.fs_slave_map(&msg, fd)
}
fn unmap(&mut self, requests: Vec<fuse::RemovemappingOne>) -> io::Result<()> {
for chunk in requests.chunks(VHOST_USER_FS_SLAVE_ENTRIES) {
let mut msg: VhostUserFSSlaveMsg = Default::default();
for (ind, req) in chunk.iter().enumerate() {
msg.len[ind] = req.len;
msg.cache_offset[ind] = req.moffset;
}
self.fs_slave_unmap(&msg)?;
}
Ok(())
}
}

View File

@ -8,6 +8,7 @@ extern crate log;
pub mod descriptor_utils;
pub mod file_traits;
pub mod filesystem;
pub mod fs_cache_req_handler;
pub mod fuse;
pub mod multikey;
pub mod passthrough;

View File

@ -17,6 +17,7 @@ use std::time::Duration;
use libc;
use vm_memory::ByteValued;
use super::fs_cache_req_handler::FsCacheReqHandler;
use crate::filesystem::{
Context, DirEntry, Entry, FileSystem, FsOptions, GetxattrReply, ListxattrReply, OpenOptions,
SetattrValid, ZeroCopyReader, ZeroCopyWriter,
@ -911,6 +912,41 @@ impl FileSystem for PassthroughFs {
self.do_unlink(parent, name, 0)
}
fn setupmapping<T: FsCacheReqHandler>(
&self,
_ctx: Context,
inode: Inode,
_handle: Handle,
foffset: u64,
len: u64,
flags: u64,
moffset: u64,
vu_req: &mut T,
) -> io::Result<()> {
debug!(
"setupmapping: ino {:?} foffset {} len {} flags {} moffset {}",
inode, foffset, len, flags, moffset
);
let open_flags = if (flags & fuse::SetupmappingFlags::WRITE.bits()) != 0 {
libc::O_RDWR
} else {
libc::O_RDONLY
};
let file = self.open_inode(inode, open_flags as i32)?;
(*vu_req).map(foffset, moffset, len, flags, file.as_raw_fd())
}
fn removemapping<T: FsCacheReqHandler>(
&self,
_ctx: Context,
requests: Vec<fuse::RemovemappingOne>,
vu_req: &mut T,
) -> io::Result<()> {
(*vu_req).unmap(requests)
}
fn read<W: io::Write + ZeroCopyWriter>(
&self,
_ctx: Context,

View File

@ -10,6 +10,7 @@ use std::mem::size_of;
use libc;
use vm_memory::ByteValued;
use super::fs_cache_req_handler::FsCacheReqHandler;
use crate::descriptor_utils::{Reader, Writer};
use crate::filesystem::{
Context, DirEntry, Entry, FileSystem, GetxattrReply, ListxattrReply, ZeroCopyReader,
@ -63,7 +64,12 @@ impl<F: FileSystem + Sync> Server<F> {
}
#[allow(clippy::cognitive_complexity)]
pub fn handle_message(&self, mut r: Reader, w: Writer) -> Result<usize> {
pub fn handle_message<T: FsCacheReqHandler>(
&self,
mut r: Reader,
w: Writer,
vu_req: Option<&mut T>,
) -> Result<usize> {
let in_header: InHeader = r.read_obj().map_err(Error::DecodeMessage)?;
if in_header.len > MAX_BUFFER_SIZE {
@ -118,6 +124,8 @@ impl<F: FileSystem + Sync> Server<F> {
x if x == Opcode::Readdirplus as u32 => self.readdirplus(in_header, r, w),
x if x == Opcode::Rename2 as u32 => self.rename2(in_header, r, w),
x if x == Opcode::Lseek as u32 => self.lseek(in_header, r, w),
x if x == Opcode::SetupMapping as u32 => self.setupmapping(in_header, r, w, vu_req),
x if x == Opcode::RemoveMapping as u32 => self.removemapping(in_header, r, w, vu_req),
_ => reply_error(
io::Error::from_raw_os_error(libc::ENOSYS),
in_header.unique,
@ -126,6 +134,94 @@ impl<F: FileSystem + Sync> Server<F> {
}
}
fn setupmapping<T: FsCacheReqHandler>(
&self,
in_header: InHeader,
mut r: Reader,
w: Writer,
vu_req: Option<&mut T>,
) -> Result<usize> {
if let Some(req) = vu_req {
let SetupmappingIn {
fh,
foffset,
len,
flags,
moffset,
} = r.read_obj().map_err(Error::DecodeMessage)?;
match self.fs.setupmapping(
Context::from(in_header),
in_header.nodeid.into(),
fh.into(),
foffset,
len,
flags,
moffset,
req,
) {
Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
Err(e) => reply_error(e, in_header.unique, w),
}
} else {
reply_error(
io::Error::from_raw_os_error(libc::EINVAL),
in_header.unique,
w,
)
}
}
fn removemapping<T: FsCacheReqHandler>(
&self,
in_header: InHeader,
mut r: Reader,
w: Writer,
vu_req: Option<&mut T>,
) -> Result<usize> {
if let Some(req) = vu_req {
let RemovemappingIn { count } = r.read_obj().map_err(Error::DecodeMessage)?;
if let Some(size) = (count as usize).checked_mul(size_of::<RemovemappingOne>()) {
if size > MAX_BUFFER_SIZE as usize {
return reply_error(
io::Error::from_raw_os_error(libc::ENOMEM),
in_header.unique,
w,
);
}
} else {
return reply_error(
io::Error::from_raw_os_error(libc::EOVERFLOW),
in_header.unique,
w,
);
}
let mut requests = Vec::with_capacity(count as usize);
for _ in 0..count {
requests.push(
r.read_obj::<RemovemappingOne>()
.map_err(Error::DecodeMessage)?,
);
}
match self
.fs
.removemapping(Context::from(in_header), requests, req)
{
Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
Err(e) => reply_error(e, in_header.unique, w),
}
} else {
reply_error(
io::Error::from_raw_os_error(libc::EINVAL),
in_header.unique,
w,
)
}
}
fn lookup(&self, in_header: InHeader, mut r: Reader, w: Writer) -> Result<usize> {
let namelen = (in_header.len as usize)
.checked_sub(size_of::<InHeader>())