virtio-devices: vhost-user: Don't run threads for net and block

Some refactoring is performed in order to always expect the irqfd to be
provided by VirtioInterrupt trait. In case no irqfd is available, we
simply fail initializing the vhost-user device. This allows for further
simplification since we can assume the interrupt will always be
triggered directly by the vhost-user backend without proxying through
the VMM. This allows for complete removal of the dedicated thread for
both block and net.

vhost-user-fs is a bit more complex as it requires the slave request
protocol feature in order to support DAX. That's why we still need the
VMM to interfere and therefore run a dedicated thread for it.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2021-05-12 17:13:08 +02:00 committed by Rob Bradford
parent 496ceed1d0
commit 355e7468e4
6 changed files with 15 additions and 179 deletions

View File

@ -4,19 +4,16 @@
use super::super::{
ActivateError, ActivateResult, Queue, VirtioCommon, VirtioDevice, VirtioDeviceType,
};
use super::handler::*;
use super::vu_common_ctrl::*;
use super::{Error, Result};
use crate::seccomp_filters::{get_seccomp_filter, Thread};
use crate::VirtioInterrupt;
use block_util::VirtioBlockConfig;
use seccomp::{SeccompAction, SeccompFilter};
use seccomp::SeccompAction;
use std::mem;
use std::ops::Deref;
use std::os::unix::io::AsRawFd;
use std::result;
use std::sync::{Arc, Barrier};
use std::thread;
use std::vec::Vec;
use vhost::vhost_user::message::VhostUserConfigFlags;
use vhost::vhost_user::message::VHOST_USER_CONFIG_OFFSET;
@ -39,7 +36,7 @@ pub struct Blk {
id: String,
vhost_user_blk: Master,
config: VirtioBlockConfig,
seccomp_action: SeccompAction,
_seccomp_action: SeccompAction,
guest_memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>,
acked_protocol_features: u64,
}
@ -150,7 +147,7 @@ impl Blk {
id,
vhost_user_blk,
config,
seccomp_action,
_seccomp_action: seccomp_action,
guest_memory: None,
acked_protocol_features,
})
@ -219,7 +216,7 @@ impl VirtioDevice for Blk {
self.guest_memory = Some(mem.clone());
let mut vu_interrupt_list = setup_vhost_user(
setup_vhost_user(
&mut self.vhost_user_blk,
&mem.memory(),
queues,
@ -229,62 +226,6 @@ impl VirtioDevice for Blk {
)
.map_err(ActivateError::VhostUserBlkSetup)?;
let mut epoll_threads = Vec::new();
for i in 0..vu_interrupt_list.len() {
let interrupt_list_sub = vec![vu_interrupt_list.remove(0)];
let kill_evt = self
.common
.kill_evt
.as_ref()
.unwrap()
.try_clone()
.map_err(|e| {
error!("failed to clone kill_evt eventfd: {}", e);
ActivateError::BadActivate
})?;
let pause_evt = self
.common
.pause_evt
.as_ref()
.unwrap()
.try_clone()
.map_err(|e| {
error!("failed to clone pause_evt eventfd: {}", e);
ActivateError::BadActivate
})?;
let mut handler = VhostUserEpollHandler::<SlaveReqHandler>::new(VhostUserEpollConfig {
interrupt_cb: interrupt_cb.clone(),
kill_evt,
pause_evt,
vu_interrupt_list: interrupt_list_sub,
slave_req_handler: None,
});
let paused = self.common.paused.clone();
let paused_sync = self.common.paused_sync.clone();
let virtio_vhost_blk_seccomp_filter =
get_seccomp_filter(&self.seccomp_action, Thread::VirtioVhostBlk)
.map_err(ActivateError::CreateSeccompFilter)?;
thread::Builder::new()
.name(format!("{}_q{}", self.id.clone(), i))
.spawn(move || {
if let Err(e) = SeccompFilter::apply(virtio_vhost_blk_seccomp_filter) {
error!("Error applying seccomp filter: {:?}", e);
} else if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);
}
})
.map(|thread| epoll_threads.push(thread))
.map_err(|e| {
error!("failed to clone virtio epoll thread: {}", e);
ActivateError::BadActivate
})?;
}
self.common.epoll_threads = Some(epoll_threads);
event!("virtio-device", "activated", "id", &self.id);
Ok(())
}

View File

@ -439,7 +439,7 @@ impl VirtioDevice for Fs {
ActivateError::BadActivate
})?;
let vu_call_evt_queue_list = setup_vhost_user(
setup_vhost_user(
&mut self.vu,
&mem.memory(),
queues,
@ -478,8 +478,6 @@ impl VirtioDevice for Fs {
};
let mut handler = VhostUserEpollHandler::new(VhostUserEpollConfig {
vu_interrupt_list: vu_call_evt_queue_list,
interrupt_cb,
kill_evt,
pause_evt,
slave_req_handler,

View File

@ -7,18 +7,12 @@
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use super::super::{
EpollHelper, EpollHelperError, EpollHelperHandler, Queue, VirtioInterruptType,
EPOLL_HELPER_EVENT_LAST,
};
use super::{Error, Result};
use vmm_sys_util::eventfd::EventFd;
use crate::VirtioInterrupt;
use super::super::{EpollHelper, EpollHelperError, EpollHelperHandler, EPOLL_HELPER_EVENT_LAST};
use std::os::unix::io::AsRawFd;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Barrier};
use vhost::vhost_user::{MasterReqHandler, VhostUserMasterReqHandler};
use vmm_sys_util::eventfd::EventFd;
/// Collection of common parameters required by vhost-user devices while
/// call Epoll handler.
@ -28,16 +22,13 @@ use vhost::vhost_user::{MasterReqHandler, VhostUserMasterReqHandler};
/// * `kill_evt` - EventFd used to kill the vhost-user device.
/// * `vu_interrupt_list` - virtqueue and EventFd to signal when buffer used.
pub struct VhostUserEpollConfig<S: VhostUserMasterReqHandler> {
pub interrupt_cb: Arc<dyn VirtioInterrupt>,
pub kill_evt: EventFd,
pub pause_evt: EventFd,
pub vu_interrupt_list: Vec<(Option<EventFd>, Queue)>,
pub slave_req_handler: Option<MasterReqHandler<S>>,
}
pub struct VhostUserEpollHandler<S: VhostUserMasterReqHandler> {
vu_epoll_cfg: VhostUserEpollConfig<S>,
queue_evt_start_idx: u16,
slave_evt_idx: u16,
}
@ -50,23 +41,12 @@ impl<S: VhostUserMasterReqHandler> VhostUserEpollHandler<S> {
/// # Return
/// * `VhostUserEpollHandler` - epoll handler for vhost-user based devices
pub fn new(vu_epoll_cfg: VhostUserEpollConfig<S>) -> VhostUserEpollHandler<S> {
let queue_evt_start_idx = EPOLL_HELPER_EVENT_LAST + 1;
let slave_evt_idx = queue_evt_start_idx + vu_epoll_cfg.vu_interrupt_list.len() as u16;
VhostUserEpollHandler {
vu_epoll_cfg,
queue_evt_start_idx,
slave_evt_idx,
slave_evt_idx: EPOLL_HELPER_EVENT_LAST + 1,
}
}
fn signal_used_queue(&self, queue: &Queue) -> Result<()> {
self.vu_epoll_cfg
.interrupt_cb
.trigger(&VirtioInterruptType::Queue, Some(queue))
.map_err(Error::FailedSignalingUsedQueue)
}
pub fn run(
&mut self,
paused: Arc<AtomicBool>,
@ -75,12 +55,6 @@ impl<S: VhostUserMasterReqHandler> VhostUserEpollHandler<S> {
let mut helper =
EpollHelper::new(&self.vu_epoll_cfg.kill_evt, &self.vu_epoll_cfg.pause_evt)?;
for (i, vhost_user_interrupt) in self.vu_epoll_cfg.vu_interrupt_list.iter().enumerate() {
if let Some(eventfd) = &vhost_user_interrupt.0 {
helper.add_event(eventfd.as_raw_fd(), self.queue_evt_start_idx + i as u16)?;
}
}
if let Some(self_req_handler) = &self.vu_epoll_cfg.slave_req_handler {
helper.add_event(self_req_handler.as_raw_fd(), self.slave_evt_idx)?;
}
@ -95,21 +69,6 @@ impl<S: VhostUserMasterReqHandler> EpollHelperHandler for VhostUserEpollHandler<
fn handle_event(&mut self, _helper: &mut EpollHelper, event: &epoll::Event) -> bool {
let ev_type = event.data as u16;
match ev_type {
x if (x >= self.queue_evt_start_idx && x < self.slave_evt_idx) => {
let idx = (x - self.queue_evt_start_idx) as usize;
if let Some(eventfd) = &self.vu_epoll_cfg.vu_interrupt_list[idx].0 {
if let Err(e) = eventfd.read() {
error!("Failed to read queue: {:?}", e);
return true;
}
if let Err(e) =
self.signal_used_queue(&self.vu_epoll_cfg.vu_interrupt_list[idx].1)
{
error!("Failed to signal used queue: {:?}", e);
return true;
}
}
}
x if x == self.slave_evt_idx => {
if let Some(slave_req_handler) = self.vu_epoll_cfg.slave_req_handler.as_mut() {
if let Err(e) = slave_req_handler.handle_request() {

View File

@ -90,5 +90,7 @@ pub enum Error {
InvalidFeatures,
/// Missing file descriptor for the region.
MissingRegionFd,
/// Missing IrqFd
MissingIrqFd,
}
type Result<T> = std::result::Result<T, Error>;

View File

@ -7,7 +7,6 @@ use super::super::net_util::{
use super::super::{
ActivateError, ActivateResult, Queue, VirtioCommon, VirtioDevice, VirtioDeviceType,
};
use super::handler::*;
use super::vu_common_ctrl::*;
use super::{Error, Result};
use crate::seccomp_filters::{get_seccomp_filter, Thread};
@ -291,7 +290,7 @@ impl VirtioDevice for Net {
})?;
}
let mut vu_interrupt_list = setup_vhost_user(
setup_vhost_user(
&mut self.vhost_user_net,
&mem.memory(),
queues,
@ -301,62 +300,6 @@ impl VirtioDevice for Net {
)
.map_err(ActivateError::VhostUserNetSetup)?;
let mut epoll_threads = Vec::new();
for i in 0..vu_interrupt_list.len() / 2 {
let interrupt_list_sub = vec![vu_interrupt_list.remove(0), vu_interrupt_list.remove(0)];
let kill_evt = self
.common
.kill_evt
.as_ref()
.unwrap()
.try_clone()
.map_err(|e| {
error!("failed to clone kill_evt eventfd: {}", e);
ActivateError::BadActivate
})?;
let pause_evt = self
.common
.pause_evt
.as_ref()
.unwrap()
.try_clone()
.map_err(|e| {
error!("failed to clone pause_evt eventfd: {}", e);
ActivateError::BadActivate
})?;
let mut handler = VhostUserEpollHandler::<SlaveReqHandler>::new(VhostUserEpollConfig {
interrupt_cb: interrupt_cb.clone(),
kill_evt,
pause_evt,
vu_interrupt_list: interrupt_list_sub,
slave_req_handler: None,
});
let paused = self.common.paused.clone();
let paused_sync = self.common.paused_sync.clone();
let virtio_vhost_net_seccomp_filter =
get_seccomp_filter(&self.seccomp_action, Thread::VirtioVhostNet)
.map_err(ActivateError::CreateSeccompFilter)?;
thread::Builder::new()
.name(format!("{}_qp{}", self.id.clone(), i))
.spawn(move || {
if let Err(e) = SeccompFilter::apply(virtio_vhost_net_seccomp_filter) {
error!("Error applying seccomp filter: {:?}", e);
} else if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);
}
})
.map(|thread| epoll_threads.push(thread))
.map_err(|e| {
error!("failed to clone queue EventFd: {}", e);
ActivateError::BadActivate
})?;
}
self.common.epoll_threads = Some(epoll_threads);
Ok(())
}

View File

@ -4,7 +4,6 @@
use super::super::{Descriptor, Queue};
use super::{Error, Result};
use crate::{get_host_address_range, VirtioInterrupt, VirtioInterruptType};
use libc::EFD_NONBLOCK;
use std::convert::TryInto;
use std::os::unix::io::AsRawFd;
use std::sync::Arc;
@ -75,12 +74,10 @@ pub fn setup_vhost_user_vring(
queues: Vec<Queue>,
queue_evts: Vec<EventFd>,
virtio_interrupt: &Arc<dyn VirtioInterrupt>,
) -> Result<Vec<(Option<EventFd>, Queue)>> {
) -> Result<()> {
// Let's first provide the memory table to the backend.
update_mem_table(vu, mem)?;
let mut vu_interrupt_list = Vec::new();
for (queue_index, queue) in queues.into_iter().enumerate() {
let actual_size: usize = queue.actual_size().try_into().unwrap();
@ -117,12 +114,8 @@ pub fn setup_vhost_user_vring(
{
vu.set_vring_call(queue_index, &eventfd)
.map_err(Error::VhostUserSetVringCall)?;
vu_interrupt_list.push((None, queue));
} else {
let eventfd = EventFd::new(EFD_NONBLOCK).map_err(Error::VhostIrqCreate)?;
vu.set_vring_call(queue_index, &eventfd)
.map_err(Error::VhostUserSetVringCall)?;
vu_interrupt_list.push((Some(eventfd), queue));
return Err(Error::MissingIrqFd);
}
vu.set_vring_kick(queue_index, &queue_evts[queue_index])
@ -132,7 +125,7 @@ pub fn setup_vhost_user_vring(
.map_err(Error::VhostUserSetVringEnable)?;
}
Ok(vu_interrupt_list)
Ok(())
}
pub fn setup_vhost_user(
@ -142,7 +135,7 @@ pub fn setup_vhost_user(
queue_evts: Vec<EventFd>,
virtio_interrupt: &Arc<dyn VirtioInterrupt>,
acked_features: u64,
) -> Result<Vec<(Option<EventFd>, Queue)>> {
) -> Result<()> {
// Set features based on the acked features from the guest driver.
vu.set_features(acked_features)
.map_err(Error::VhostUserSetFeatures)?;