vm-virtio: Optimize vhost-user interrupt notification

Thanks to the recently introduced function notifier() in the
VirtioInterrupt trait, all vhost-user devices can now bypass
listening onto an intermediate event fd as they can provide the
actual fd responsible for triggering the interrupt directly to
the vhost-user backend.

In case the notifier does not provide the event fd, the code falls
back onto the creation of an intermediate event fd it needs to listen
to, so that it can trigger the interrupt on behalf of the backend.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2020-01-14 08:18:35 +01:00 committed by Samuel Ortiz
parent 1f029dd2dc
commit be421dccea
5 changed files with 41 additions and 26 deletions

View File

@ -260,6 +260,7 @@ impl VirtioDevice for Blk {
mem.load().as_ref(), mem.load().as_ref(),
queues, queues,
queue_evts, queue_evts,
&interrupt_cb,
self.acked_features, self.acked_features,
) )
.map_err(ActivateError::VhostUserBlkSetup)?; .map_err(ActivateError::VhostUserBlkSetup)?;

View File

@ -382,6 +382,7 @@ impl VirtioDevice for Fs {
mem.load().as_ref(), mem.load().as_ref(),
queues, queues,
queue_evts, queue_evts,
&interrupt_cb,
self.acked_features, self.acked_features,
) )
.map_err(ActivateError::VhostUserSetup)?; .map_err(ActivateError::VhostUserSetup)?;

View File

@ -31,7 +31,7 @@ pub struct VhostUserEpollConfig<S: VhostUserMasterReqHandler> {
pub interrupt_cb: Arc<dyn VirtioInterrupt>, pub interrupt_cb: Arc<dyn VirtioInterrupt>,
pub kill_evt: EventFd, pub kill_evt: EventFd,
pub pause_evt: EventFd, pub pause_evt: EventFd,
pub vu_interrupt_list: Vec<(EventFd, Queue)>, pub vu_interrupt_list: Vec<(Option<EventFd>, Queue)>,
pub slave_req_handler: Option<MasterReqHandler<S>>, pub slave_req_handler: Option<MasterReqHandler<S>>,
} }
@ -64,14 +64,16 @@ impl<S: VhostUserMasterReqHandler> VhostUserEpollHandler<S> {
for (index, vhost_user_interrupt) in self.vu_epoll_cfg.vu_interrupt_list.iter().enumerate() for (index, vhost_user_interrupt) in self.vu_epoll_cfg.vu_interrupt_list.iter().enumerate()
{ {
// Add events if let Some(eventfd) = &vhost_user_interrupt.0 {
epoll::ctl( // Add events
epoll_fd, epoll::ctl(
epoll::ControlOptions::EPOLL_CTL_ADD, epoll_fd,
vhost_user_interrupt.0.as_raw_fd(), epoll::ControlOptions::EPOLL_CTL_ADD,
epoll::Event::new(epoll::Events::EPOLLIN, index as u64), eventfd.as_raw_fd(),
) epoll::Event::new(epoll::Events::EPOLLIN, index as u64),
.map_err(Error::EpollCtl)?; )
.map_err(Error::EpollCtl)?;
}
} }
let kill_evt_index = self.vu_epoll_cfg.vu_interrupt_list.len(); let kill_evt_index = self.vu_epoll_cfg.vu_interrupt_list.len();
@ -136,15 +138,14 @@ impl<S: VhostUserMasterReqHandler> VhostUserEpollHandler<S> {
match ev_type { match ev_type {
x if x < kill_evt_index => { x if x < kill_evt_index => {
self.vu_epoll_cfg.vu_interrupt_list[x] if let Some(eventfd) = &self.vu_epoll_cfg.vu_interrupt_list[x].0 {
.0 eventfd.read().map_err(Error::FailedReadingQueue)?;
.read() if let Err(e) =
.map_err(Error::FailedReadingQueue)?; self.signal_used_queue(&self.vu_epoll_cfg.vu_interrupt_list[x].1)
if let Err(e) = {
self.signal_used_queue(&self.vu_epoll_cfg.vu_interrupt_list[x].1) error!("Failed to signal used queue: {:?}", e);
{ break 'poll;
error!("Failed to signal used queue: {:?}", e); }
break 'poll;
} }
} }
x if kill_evt_index == x => { x if kill_evt_index == x => {

View File

@ -302,13 +302,14 @@ impl VirtioDevice for Net {
mem.load().as_ref(), mem.load().as_ref(),
queues, queues,
queue_evts, queue_evts,
&interrupt_cb,
self.acked_features & self.backend_features, self.acked_features & self.backend_features,
) )
.map_err(ActivateError::VhostUserNetSetup)?; .map_err(ActivateError::VhostUserNetSetup)?;
let mut epoll_thread = Vec::new(); let mut epoll_thread = Vec::new();
for _ in 0..vu_interrupt_list.len() / 2 { for _ in 0..vu_interrupt_list.len() / 2 {
let mut interrupt_list_sub: Vec<(EventFd, Queue)> = Vec::with_capacity(2); let mut interrupt_list_sub: Vec<(Option<EventFd>, Queue)> = Vec::with_capacity(2);
interrupt_list_sub.push(vu_interrupt_list.remove(0)); interrupt_list_sub.push(vu_interrupt_list.remove(0));
interrupt_list_sub.push(vu_interrupt_list.remove(0)); interrupt_list_sub.push(vu_interrupt_list.remove(0));

View File

@ -5,9 +5,11 @@ use libc;
use libc::EFD_NONBLOCK; use libc::EFD_NONBLOCK;
use std::convert::TryInto; use std::convert::TryInto;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::sync::Arc;
use std::vec::Vec; use std::vec::Vec;
use crate::queue::Descriptor; use crate::queue::Descriptor;
use crate::{VirtioInterrupt, VirtioInterruptType};
use vm_device::get_host_address_range; use vm_device::get_host_address_range;
use vm_memory::{Address, Error as MmapError, GuestMemory, GuestMemoryMmap, GuestMemoryRegion}; use vm_memory::{Address, Error as MmapError, GuestMemory, GuestMemoryMmap, GuestMemoryRegion};
@ -30,7 +32,8 @@ pub fn setup_vhost_user_vring(
mem: &GuestMemoryMmap, mem: &GuestMemoryMmap,
queues: Vec<Queue>, queues: Vec<Queue>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
) -> Result<Vec<(EventFd, Queue)>> { virtio_interrupt: &Arc<dyn VirtioInterrupt>,
) -> Result<Vec<(Option<EventFd>, Queue)>> {
let mut regions: Vec<VhostUserMemoryRegionInfo> = Vec::new(); let mut regions: Vec<VhostUserMemoryRegionInfo> = Vec::new();
mem.with_regions_mut(|_, region| { mem.with_regions_mut(|_, region| {
let (mmap_handle, mmap_offset) = match region.file_offset() { let (mmap_handle, mmap_offset) = match region.file_offset() {
@ -89,10 +92,17 @@ pub fn setup_vhost_user_vring(
vu.set_vring_base(queue_index, 0u16) vu.set_vring_base(queue_index, 0u16)
.map_err(Error::VhostUserSetVringBase)?; .map_err(Error::VhostUserSetVringBase)?;
let vhost_user_interrupt = EventFd::new(EFD_NONBLOCK).map_err(Error::VhostIrqCreate)?; if let Some(eventfd) = virtio_interrupt.notifier(&VirtioInterruptType::Queue, Some(&queue))
vu.set_vring_call(queue_index, &vhost_user_interrupt) {
.map_err(Error::VhostUserSetVringCall)?; vu.set_vring_call(queue_index, &eventfd)
vu_interrupt_list.push((vhost_user_interrupt, queue)); .map_err(Error::VhostUserSetVringCall)?;
vu_interrupt_list.push((None, queue));
} else {
let eventfd = EventFd::new(EFD_NONBLOCK).map_err(Error::VhostIrqCreate)?;
vu.set_vring_call(queue_index, &eventfd)
.map_err(Error::VhostUserSetVringCall)?;
vu_interrupt_list.push((Some(eventfd), queue));
}
vu.set_vring_kick(queue_index, &queue_evts[queue_index]) vu.set_vring_kick(queue_index, &queue_evts[queue_index])
.map_err(Error::VhostUserSetVringKick)?; .map_err(Error::VhostUserSetVringKick)?;
@ -109,13 +119,14 @@ pub fn setup_vhost_user(
mem: &GuestMemoryMmap, mem: &GuestMemoryMmap,
queues: Vec<Queue>, queues: Vec<Queue>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
virtio_interrupt: &Arc<dyn VirtioInterrupt>,
acked_features: u64, acked_features: u64,
) -> Result<Vec<(EventFd, Queue)>> { ) -> Result<Vec<(Option<EventFd>, Queue)>> {
// Set features based on the acked features from the guest driver. // Set features based on the acked features from the guest driver.
vu.set_features(acked_features) vu.set_features(acked_features)
.map_err(Error::VhostUserSetFeatures)?; .map_err(Error::VhostUserSetFeatures)?;
setup_vhost_user_vring(vu, mem, queues, queue_evts) setup_vhost_user_vring(vu, mem, queues, queue_evts, virtio_interrupt)
} }
pub fn reset_vhost_user(vu: &mut Master, num_queues: usize) -> Result<()> { pub fn reset_vhost_user(vu: &mut Master, num_queues: usize) -> Result<()> {