vhost_user_block: Make use of the EVENT_IDX feature

Now that vhost_user_backend and vm-virtio do support EVENT_IDX, use it
in vhost_user_block to reduce the number of notifications sent between
the driver and the device.

This is specially useful when using active polling on the virtqueue,
as it'll be implemented by a future patch.

This is a snapshot of kvm_stat while generating ~60K IOPS with fio on
the guest without EVENT_IDX:

 Event                                         Total %Total CurAvg/s
 kvm_entry                                    393454   20.3    62494
 kvm_exit                                     393446   20.3    62494
 kvm_apic_accept_irq                          378146   19.5    60268
 kvm_msi_set_irq                              369720   19.0    58881
 kvm_fast_mmio                                370497   19.1    58817
 kvm_hv_timer_state                            10197    0.5     1715
 kvm_msr                                        8770    0.5     1443
 kvm_wait_lapic_expire                          7018    0.4     1118
 kvm_apic                                       2768    0.1      538
 kvm_pv_tlb_flush                               2028    0.1      360
 kvm_vcpu_wakeup                                1453    0.1      278
 kvm_apic_ipi                                   1384    0.1      269
 kvm_fpu                                        1148    0.1      164
 kvm_pio                                         574    0.0	  82
 kvm_userspace_exit                              574    0.0	  82
 kvm_halt_poll_ns                                 24    0.0	   3

And this is the snapshot while doing the same thing with EVENT_IDX:

 Event                                         Total %Total CurAvg/s
 kvm_entry                                     35506   26.0     3873
 kvm_exit                                      35499   26.0     3873
 kvm_hv_timer_state                            14740   10.8     1672
 kvm_apic_accept_irq                           13017    9.5     1438
 kvm_msr                                       12845    9.4     1421
 kvm_wait_lapic_expire                         10422    7.6     1118
 kvm_apic                                       3788    2.8      502
 kvm_pv_tlb_flush                               2708    2.0      340
 kvm_vcpu_wakeup                                1992    1.5      258
 kvm_apic_ipi                                   1894    1.4      251
 kvm_fpu                                        1476    1.1      164
 kvm_pio                                         738    0.5       82
 kvm_userspace_exit                              738    0.5	  82
 kvm_msi_set_irq                                 701    0.5	  69
 kvm_fast_mmio                                   238    0.2        4
 kvm_halt_poll_ns                                 50    0.0        1
 kvm_ple_window_update                            28    0.0        0
 kvm_page_fault                                    4    0.0        0

It can be clearly appreciated how the number of vm exits per second,
specially the ones related to notifications (kvm_fast_mmio and
kvm_msi_set_irq) is drastically lower.

Signed-off-by: Sergio Lopez <slp@redhat.com>
This commit is contained in:
Sergio Lopez 2020-02-14 07:24:34 -05:00 committed by Rob Bradford
parent 1ef6996207
commit 0e4e27ea9d

View File

@ -22,6 +22,7 @@ use std::fs::OpenOptions;
use std::io::Read;
use std::io::{Seek, SeekFrom, Write};
use std::mem;
use std::num::Wrapping;
use std::os::unix::fs::OpenOptionsExt;
use std::path::PathBuf;
use std::process;
@ -32,6 +33,7 @@ use std::{convert, error, fmt, io};
use vhost_rs::vhost_user::message::*;
use vhost_user_backend::{VhostUserBackend, VhostUserDaemon, Vring, VringWorker};
use virtio_bindings::bindings::virtio_blk::*;
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use vm_memory::{Bytes, GuestMemoryError, GuestMemoryMmap};
use vm_virtio::block::{build_disk_image_id, Request};
use vmm_sys_util::eventfd::EventFd;
@ -95,6 +97,7 @@ pub struct VhostUserBlkBackend {
disk_nsectors: u64,
config: virtio_blk_config,
rdonly: bool,
event_idx: bool,
kill_evt: EventFd,
}
@ -136,6 +139,7 @@ impl VhostUserBlkBackend {
disk_nsectors: nsectors,
config,
rdonly,
event_idx: false,
kill_evt: EventFd::new(EFD_NONBLOCK).map_err(Error::CreateKillEventFd)?,
})
}
@ -175,8 +179,15 @@ impl VhostUserBlkBackend {
len = 0;
}
}
vring.mut_queue().add_used(mem, head.index, len);
used_any = true;
if let Some(used_idx) = vring.mut_queue().add_used(mem, head.index, len) {
let used_event = vring.mut_queue().get_used_event(mem);
if vring.needs_notification(Wrapping(used_idx), used_event) {
debug!("signalling queue");
vring.signal_used_queue().unwrap();
}
used_any = true;
}
}
used_any
@ -199,6 +210,7 @@ impl VhostUserBackend for VhostUserBlkBackend {
fn features(&self) -> u64 {
let mut avail_features = 1 << VIRTIO_BLK_F_MQ
| 1 << VIRTIO_BLK_F_CONFIG_WCE
| 1 << VIRTIO_RING_F_EVENT_IDX
| 1 << VIRTIO_F_VERSION_1
| VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
@ -212,7 +224,9 @@ impl VhostUserBackend for VhostUserBlkBackend {
VhostUserProtocolFeatures::CONFIG
}
fn set_event_idx(&mut self, _enabled: bool) {}
fn set_event_idx(&mut self, enabled: bool) {
self.event_idx = enabled;
}
fn update_memory(&mut self, mem: GuestMemoryMmap) -> VhostUserBackendResult<()> {
self.mem = Some(mem);
@ -234,9 +248,12 @@ impl VhostUserBackend for VhostUserBlkBackend {
match device_event {
q if device_event < self.config.num_queues => {
let mut vring = vrings[q as usize].write().unwrap();
if self.process_queue(&mut vring) {
debug!("signalling queue");
vring.signal_used_queue().unwrap();
if self.process_queue(&mut vring) && self.event_idx {
if let Some(mem) = self.mem.as_ref() {
vring.mut_queue().update_avail_event(mem);
// Check the queue again to ensure there are no pending request
self.process_queue(&mut vring);
}
}
Ok(false)
}