vhost_user_block: Rely on upstream vhost-user-backend crate

Instead of relying on the local version of vhost-user-backend, this
patch allows the block backend implementation to rely on the upstream
version of the crate from rust-vmm.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2022-02-01 13:21:27 +01:00
parent e9b8126cf7
commit ac00838f34
4 changed files with 71 additions and 35 deletions

17
Cargo.lock generated
View File

@ -1107,6 +1107,21 @@ dependencies = [
"vmm-sys-util", "vmm-sys-util",
] ]
[[package]]
name = "vhost-user-backend"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8db00e93514caa8987bb8b536fe962c9b66b4068583abc4c531eb97988477cd"
dependencies = [
"libc",
"log",
"vhost",
"virtio-bindings",
"virtio-queue",
"vm-memory",
"vmm-sys-util",
]
[[package]] [[package]]
name = "vhost_user_backend" name = "vhost_user_backend"
version = "0.1.0" version = "0.1.0"
@ -1135,7 +1150,7 @@ dependencies = [
"option_parser", "option_parser",
"qcow", "qcow",
"vhost", "vhost",
"vhost_user_backend", "vhost-user-backend",
"virtio-bindings", "virtio-bindings",
"vm-memory", "vm-memory",
"vmm-sys-util", "vmm-sys-util",

View File

@ -38,6 +38,7 @@ clap = { version = "3.0.14", features = ["wrap_help"] }
kvm-bindings = { git = "https://github.com/cloud-hypervisor/kvm-bindings", branch = "ch-v0.5.0", features = ["with-serde", "fam-wrappers"] } kvm-bindings = { git = "https://github.com/cloud-hypervisor/kvm-bindings", branch = "ch-v0.5.0", features = ["with-serde", "fam-wrappers"] }
kvm-ioctls = { git = "https://github.com/rust-vmm/kvm-ioctls", branch = "main" } kvm-ioctls = { git = "https://github.com/rust-vmm/kvm-ioctls", branch = "main" }
versionize_derive = { git = "https://github.com/cloud-hypervisor/versionize_derive", branch = "ch" } versionize_derive = { git = "https://github.com/cloud-hypervisor/versionize_derive", branch = "ch" }
virtio-queue = { git = "https://github.com/rust-vmm/vm-virtio", branch = "main" }
[dev-dependencies] [dev-dependencies]
dirs = "4.0.0" dirs = "4.0.0"

View File

@ -13,8 +13,8 @@ libc = "0.2.116"
log = "0.4.14" log = "0.4.14"
option_parser = { path = "../option_parser" } option_parser = { path = "../option_parser" }
qcow = { path = "../qcow" } qcow = { path = "../qcow" }
vhost_user_backend = { path = "../vhost_user_backend" }
vhost = { version = "0.3.0", features = ["vhost-user-slave"] } vhost = { version = "0.3.0", features = ["vhost-user-slave"] }
vhost-user-backend = "0.1.0"
virtio-bindings = "0.1.0" virtio-bindings = "0.1.0"
vm-memory = "0.7.0" vm-memory = "0.7.0"
vmm-sys-util = "0.9.0" vmm-sys-util = "0.9.0"

View File

@ -23,18 +23,19 @@ use std::path::PathBuf;
use std::process; use std::process;
use std::result; use std::result;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, RwLock}; use std::sync::{Arc, Mutex, RwLock, RwLockWriteGuard};
use std::time::Instant; use std::time::Instant;
use std::vec::Vec; use std::vec::Vec;
use std::{convert, error, fmt, io}; use std::{convert, error, fmt, io};
use vhost::vhost_user::message::*; use vhost::vhost_user::message::*;
use vhost::vhost_user::Listener; use vhost::vhost_user::Listener;
use vhost_user_backend::{VhostUserBackend, VhostUserDaemon, Vring}; use vhost_user_backend::{VhostUserBackendMut, VhostUserDaemon, VringRwLock, VringState, VringT};
use virtio_bindings::bindings::virtio_blk::*; use virtio_bindings::bindings::virtio_blk::*;
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use vm_memory::ByteValued; use vm_memory::{bitmap::AtomicBitmap, ByteValued, Bytes, GuestMemoryAtomic};
use vm_memory::Bytes; use vmm_sys_util::{epoll::EventSet, eventfd::EventFd};
use vmm_sys_util::eventfd::EventFd;
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
const SECTOR_SHIFT: u8 = 9; const SECTOR_SHIFT: u8 = 9;
const SECTOR_SIZE: u64 = 0x01 << SECTOR_SHIFT; const SECTOR_SIZE: u64 = 0x01 << SECTOR_SHIFT;
@ -111,10 +112,13 @@ impl VhostUserBlkThread {
}) })
} }
fn process_queue(&mut self, vring: &mut Vring) -> bool { fn process_queue(
let mut used_any = false; &mut self,
vring: &mut RwLockWriteGuard<VringState<GuestMemoryAtomic<GuestMemoryMmap>>>,
) -> bool {
let mut used_desc_heads = Vec::new();
while let Some(mut desc_chain) = vring.mut_queue().iter().unwrap().next() { for mut desc_chain in vring.get_queue_mut().iter().unwrap() {
debug!("got an element in the queue"); debug!("got an element in the queue");
let len; let len;
match Request::parse(&mut desc_chain, None) { match Request::parse(&mut desc_chain, None) {
@ -147,29 +151,33 @@ impl VhostUserBlkThread {
} }
} }
used_desc_heads.push((desc_chain.head_index(), len));
}
let mut needs_signalling = false;
for (desc_head, len) in used_desc_heads.iter() {
if self.event_idx { if self.event_idx {
let queue = vring.mut_queue(); let queue = vring.get_queue_mut();
if queue.add_used(desc_chain.head_index(), len).is_ok() { if queue.add_used(*desc_head, *len).is_ok() {
if queue.needs_notification().unwrap() { if queue.needs_notification().unwrap() {
debug!("signalling queue"); debug!("signalling queue");
vring.signal_used_queue().unwrap(); needs_signalling = true;
} else { } else {
debug!("omitting signal (event_idx)"); debug!("omitting signal (event_idx)");
} }
used_any = true;
} }
} else { } else {
debug!("signalling queue"); debug!("signalling queue");
vring vring.get_queue_mut().add_used(*desc_head, *len).unwrap();
.mut_queue() needs_signalling = true;
.add_used(desc_chain.head_index(), len)
.unwrap();
vring.signal_used_queue().unwrap();
used_any = true;
} }
} }
used_any if needs_signalling {
vring.signal_used_queue().unwrap();
}
!used_desc_heads.is_empty()
} }
} }
@ -272,7 +280,9 @@ impl VhostUserBlkBackend {
} }
} }
impl VhostUserBackend for VhostUserBlkBackend { impl VhostUserBackendMut<VringRwLock<GuestMemoryAtomic<GuestMemoryMmap>>, AtomicBitmap>
for VhostUserBlkBackend
{
fn num_queues(&self) -> usize { fn num_queues(&self) -> usize {
self.config.num_queues as usize self.config.num_queues as usize
} }
@ -316,13 +326,13 @@ impl VhostUserBackend for VhostUserBlkBackend {
} }
fn handle_event( fn handle_event(
&self, &mut self,
device_event: u16, device_event: u16,
evset: epoll::Events, evset: EventSet,
vrings: &[Arc<RwLock<Vring>>], vrings: &[VringRwLock<GuestMemoryAtomic<GuestMemoryMmap>>],
thread_id: usize, thread_id: usize,
) -> VhostUserBackendResult<bool> { ) -> VhostUserBackendResult<bool> {
if evset != epoll::Events::EPOLLIN { if evset != EventSet::IN {
return Err(Error::HandleEventNotEpollIn.into()); return Err(Error::HandleEventNotEpollIn.into());
} }
@ -331,7 +341,7 @@ impl VhostUserBackend for VhostUserBlkBackend {
let mut thread = self.threads[thread_id].lock().unwrap(); let mut thread = self.threads[thread_id].lock().unwrap();
match device_event { match device_event {
0 => { 0 => {
let mut vring = vrings[0].write().unwrap(); let mut vring = vrings[0].get_mut();
if self.poll_queue { if self.poll_queue {
// Actively poll the queue until POLL_QUEUE_US has passed // Actively poll the queue until POLL_QUEUE_US has passed
@ -352,7 +362,7 @@ impl VhostUserBackend for VhostUserBlkBackend {
// calling process_queue() until it stops finding new // calling process_queue() until it stops finding new
// requests on the queue. // requests on the queue.
loop { loop {
vring.mut_queue().enable_notification().unwrap(); vring.get_queue_mut().enable_notification().unwrap();
if !thread.process_queue(&mut vring) { if !thread.process_queue(&mut vring) {
break; break;
} }
@ -386,22 +396,27 @@ impl VhostUserBackend for VhostUserBlkBackend {
Ok(()) Ok(())
} }
fn exit_event(&self, thread_index: usize) -> Option<(EventFd, Option<u16>)> { fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
// The exit event is placed after the queue, which is event index 1. Some(
Some((
self.threads[thread_index] self.threads[thread_index]
.lock() .lock()
.unwrap() .unwrap()
.kill_evt .kill_evt
.try_clone() .try_clone()
.unwrap(), .unwrap(),
Some(1), )
))
} }
fn queues_per_thread(&self) -> Vec<u64> { fn queues_per_thread(&self) -> Vec<u64> {
self.queues_per_thread.clone() self.queues_per_thread.clone()
} }
fn update_memory(
&mut self,
_mem: GuestMemoryAtomic<GuestMemoryMmap>,
) -> VhostUserBackendResult<()> {
Ok(())
}
} }
struct VhostUserBlkBackendConfig { struct VhostUserBlkBackendConfig {
@ -491,11 +506,16 @@ pub fn start_block_backend(backend_command: &str) {
let listener = Listener::new(&backend_config.socket, true).unwrap(); let listener = Listener::new(&backend_config.socket, true).unwrap();
let name = "vhost-user-blk-backend"; let name = "vhost-user-blk-backend";
let mut blk_daemon = VhostUserDaemon::new(name.to_string(), blk_backend.clone()).unwrap(); let mut blk_daemon = VhostUserDaemon::new(
name.to_string(),
blk_backend.clone(),
GuestMemoryAtomic::new(GuestMemoryMmap::new()),
)
.unwrap();
debug!("blk_daemon is created!\n"); debug!("blk_daemon is created!\n");
if let Err(e) = blk_daemon.start_server(listener) { if let Err(e) = blk_daemon.start(listener) {
error!( error!(
"Failed to start daemon for vhost-user-block with error: {:?}\n", "Failed to start daemon for vhost-user-block with error: {:?}\n",
e e