vhost_user_net: Rely on upstream vhost-user-backend crate

Instead of relying on the local version of vhost-user-backend, this
patch allows the block backend implementation to rely on the upstream
version of the crate from rust-vmm.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2022-02-02 11:36:47 +01:00
parent 41f2f41ed2
commit 71148e1fa5
3 changed files with 60 additions and 35 deletions

18
Cargo.lock generated
View File

@ -1122,6 +1122,20 @@ dependencies = [
"vmm-sys-util", "vmm-sys-util",
] ]
[[package]]
name = "vhost-user-backend"
version = "0.1.0"
source = "git+https://github.com/rust-vmm/vhost-user-backend?branch=main#bbc892ba4526bdf8101252f7aa51832d1f2eeabd"
dependencies = [
"libc",
"log",
"vhost",
"virtio-bindings",
"virtio-queue",
"vm-memory",
"vmm-sys-util",
]
[[package]] [[package]]
name = "vhost_user_backend" name = "vhost_user_backend"
version = "0.1.0" version = "0.1.0"
@ -1150,7 +1164,7 @@ dependencies = [
"option_parser", "option_parser",
"qcow", "qcow",
"vhost", "vhost",
"vhost-user-backend", "vhost-user-backend 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"virtio-bindings", "virtio-bindings",
"vm-memory", "vm-memory",
"vmm-sys-util", "vmm-sys-util",
@ -1168,7 +1182,7 @@ dependencies = [
"net_util", "net_util",
"option_parser", "option_parser",
"vhost", "vhost",
"vhost_user_backend", "vhost-user-backend 0.1.0 (git+https://github.com/rust-vmm/vhost-user-backend?branch=main)",
"virtio-bindings", "virtio-bindings",
"vm-memory", "vm-memory",
"vmm-sys-util", "vmm-sys-util",

View File

@ -12,8 +12,8 @@ libc = "0.2.117"
log = "0.4.14" log = "0.4.14"
net_util = { path = "../net_util" } net_util = { path = "../net_util" }
option_parser = { path = "../option_parser" } option_parser = { path = "../option_parser" }
vhost_user_backend = { path = "../vhost_user_backend" }
vhost = { version = "0.3.0", features = ["vhost-user-slave"] } vhost = { version = "0.3.0", features = ["vhost-user-slave"] }
vhost-user-backend = { git = "https://github.com/rust-vmm/vhost-user-backend", branch = "main" }
virtio-bindings = "0.1.0" virtio-bindings = "0.1.0"
vm-memory = "0.7.0" vm-memory = "0.7.0"
vmm-sys-util = "0.9.0" vmm-sys-util = "0.9.0"

View File

@ -16,15 +16,18 @@ use option_parser::{OptionParser, OptionParserError};
use std::fmt; use std::fmt;
use std::io::{self}; use std::io::{self};
use std::net::Ipv4Addr; use std::net::Ipv4Addr;
use std::os::unix::io::AsRawFd; use std::os::unix::io::{AsRawFd, RawFd};
use std::process; use std::process;
use std::sync::{Arc, Mutex, RwLock}; use std::sync::{Arc, Mutex, RwLock};
use std::vec::Vec; use std::vec::Vec;
use vhost::vhost_user::message::*; use vhost::vhost_user::message::*;
use vhost::vhost_user::Listener; use vhost::vhost_user::Listener;
use vhost_user_backend::{VhostUserBackend, VhostUserDaemon, Vring, VringWorker}; use vhost_user_backend::{VhostUserBackendMut, VhostUserDaemon, VringRwLock, VringT};
use virtio_bindings::bindings::virtio_net::*; use virtio_bindings::bindings::virtio_net::*;
use vmm_sys_util::eventfd::EventFd; use vm_memory::{bitmap::AtomicBitmap, GuestMemoryAtomic};
use vmm_sys_util::{epoll::EventSet, eventfd::EventFd};
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;
type VhostUserBackendResult<T> = std::result::Result<T, std::io::Error>; type VhostUserBackendResult<T> = std::result::Result<T, std::io::Error>;
@ -88,8 +91,8 @@ impl VhostUserNetThread {
tx_tap_listening: false, tx_tap_listening: false,
epoll_fd: None, epoll_fd: None,
counters: NetCounters::default(), counters: NetCounters::default(),
tap_rx_event_id: 2, tap_rx_event_id: 3,
tap_tx_event_id: 3, tap_tx_event_id: 4,
rx_desc_avail: false, rx_desc_avail: false,
rx_rate_limiter: None, rx_rate_limiter: None,
tx_rate_limiter: None, tx_rate_limiter: None,
@ -98,8 +101,8 @@ impl VhostUserNetThread {
}) })
} }
pub fn set_vring_worker(&mut self, vring_worker: Option<Arc<VringWorker>>) { pub fn set_epoll_fd(&mut self, fd: RawFd) {
self.net.epoll_fd = Some(vring_worker.as_ref().unwrap().as_raw_fd()); self.net.epoll_fd = Some(fd);
} }
} }
@ -146,7 +149,9 @@ impl VhostUserNetBackend {
} }
} }
impl VhostUserBackend for VhostUserNetBackend { impl VhostUserBackendMut<VringRwLock<GuestMemoryAtomic<GuestMemoryMmap>>, AtomicBitmap>
for VhostUserNetBackend
{
fn num_queues(&self) -> usize { fn num_queues(&self) -> usize {
self.num_queues self.num_queues
} }
@ -166,7 +171,6 @@ impl VhostUserBackend for VhostUserNetBackend {
| 1 << VIRTIO_NET_F_HOST_TSO6 | 1 << VIRTIO_NET_F_HOST_TSO6
| 1 << VIRTIO_NET_F_HOST_ECN | 1 << VIRTIO_NET_F_HOST_ECN
| 1 << VIRTIO_NET_F_HOST_UFO | 1 << VIRTIO_NET_F_HOST_UFO
| 1 << VIRTIO_NET_F_MRG_RXBUF
| 1 << VIRTIO_NET_F_CTRL_VQ | 1 << VIRTIO_NET_F_CTRL_VQ
| 1 << VIRTIO_NET_F_MQ | 1 << VIRTIO_NET_F_MQ
| 1 << VIRTIO_NET_F_MAC | 1 << VIRTIO_NET_F_MAC
@ -184,10 +188,10 @@ impl VhostUserBackend for VhostUserNetBackend {
fn set_event_idx(&mut self, _enabled: bool) {} fn set_event_idx(&mut self, _enabled: bool) {}
fn handle_event( fn handle_event(
&self, &mut self,
device_event: u16, device_event: u16,
_evset: epoll::Events, _evset: EventSet,
vrings: &[Arc<RwLock<Vring>>], vrings: &[VringRwLock<GuestMemoryAtomic<GuestMemoryMmap>>],
thread_id: usize, thread_id: usize,
) -> VhostUserBackendResult<bool> { ) -> VhostUserBackendResult<bool> {
let mut thread = self.threads[thread_id].lock().unwrap(); let mut thread = self.threads[thread_id].lock().unwrap();
@ -204,11 +208,11 @@ impl VhostUserBackend for VhostUserNetBackend {
thread.net.rx_tap_listening = true; thread.net.rx_tap_listening = true;
} }
} }
1 | 3 => { 1 | 4 => {
let mut vring = vrings[1].write().unwrap(); let mut vring = vrings[1].get_mut();
if thread if thread
.net .net
.process_tx(vring.mut_queue()) .process_tx(vring.get_queue_mut())
.map_err(Error::NetQueuePair)? .map_err(Error::NetQueuePair)?
{ {
vring vring
@ -216,11 +220,11 @@ impl VhostUserBackend for VhostUserNetBackend {
.map_err(Error::FailedSignalingUsedQueue)? .map_err(Error::FailedSignalingUsedQueue)?
} }
} }
2 => { 3 => {
let mut vring = vrings[0].write().unwrap(); let mut vring = vrings[0].get_mut();
if thread if thread
.net .net
.process_rx(vring.mut_queue()) .process_rx(vring.get_queue_mut())
.map_err(Error::NetQueuePair)? .map_err(Error::NetQueuePair)?
{ {
vring vring
@ -234,23 +238,27 @@ impl VhostUserBackend for VhostUserNetBackend {
Ok(false) Ok(false)
} }
fn exit_event(&self, thread_index: usize) -> Option<(EventFd, Option<u16>)> { fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
// The exit event is placed after the queues and the tap event, which Some(
// is event index 3.
Some((
self.threads[thread_index] self.threads[thread_index]
.lock() .lock()
.unwrap() .unwrap()
.kill_evt .kill_evt
.try_clone() .try_clone()
.unwrap(), .unwrap(),
Some(3), )
))
} }
fn queues_per_thread(&self) -> Vec<u64> { fn queues_per_thread(&self) -> Vec<u64> {
self.queues_per_thread.clone() self.queues_per_thread.clone()
} }
fn update_memory(
&mut self,
_mem: GuestMemoryAtomic<GuestMemoryMmap>,
) -> VhostUserBackendResult<()> {
Ok(())
}
} }
pub struct VhostUserNetBackendConfig { pub struct VhostUserNetBackendConfig {
@ -344,27 +352,30 @@ pub fn start_net_backend(backend_command: &str) {
.unwrap(), .unwrap(),
)); ));
let mut net_daemon = let mut net_daemon = VhostUserDaemon::new(
VhostUserDaemon::new("vhost-user-net-backend".to_string(), net_backend.clone()).unwrap(); "vhost-user-net-backend".to_string(),
net_backend.clone(),
GuestMemoryAtomic::new(GuestMemoryMmap::new()),
)
.unwrap();
let mut vring_workers = net_daemon.get_vring_workers(); let epoll_handlers = net_daemon.get_epoll_handlers();
if epoll_handlers.len() != net_backend.read().unwrap().threads.len() {
if vring_workers.len() != net_backend.read().unwrap().threads.len() {
error!("Number of vring workers must be identical to the number of backend threads"); error!("Number of vring workers must be identical to the number of backend threads");
process::exit(1); process::exit(1);
} }
for thread in net_backend.read().unwrap().threads.iter() { for (index, thread) in net_backend.read().unwrap().threads.iter().enumerate() {
thread thread
.lock() .lock()
.unwrap() .unwrap()
.set_vring_worker(Some(vring_workers.remove(0))); .set_epoll_fd(epoll_handlers[index].as_raw_fd());
} }
if let Err(e) = if backend_config.client { if let Err(e) = if backend_config.client {
net_daemon.start_client(&backend_config.socket) net_daemon.start_client(&backend_config.socket)
} else { } else {
net_daemon.start_server(Listener::new(&backend_config.socket, true).unwrap()) net_daemon.start(Listener::new(&backend_config.socket, true).unwrap())
} { } {
error!( error!(
"failed to start daemon for vhost-user-net with error: {:?}", "failed to start daemon for vhost-user-net with error: {:?}",