vm-virtio: net: Implement VIRTIO_RING_F_EVENT_IDX

If VIRTIO_RING_F_EVENT_IDX is negotiated only generate suppress
interrupts if the guest has asked us to do so.

Fixes: #788

Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
Rob Bradford 2020-06-01 13:08:53 +01:00 committed by Sebastien Boeuf
parent f06970730b
commit a4d377a066
2 changed files with 22 additions and 3 deletions

View File

@ -31,6 +31,7 @@ use std::sync::Arc;
use std::thread; use std::thread;
use std::vec::Vec; use std::vec::Vec;
use virtio_bindings::bindings::virtio_net::*; use virtio_bindings::bindings::virtio_net::*;
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use vm_memory::{ByteValued, GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap}; use vm_memory::{ByteValued, GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap};
use vm_migration::{ use vm_migration::{
Migratable, MigratableError, Pausable, Snapshot, SnapshotDataSection, Snapshottable, Migratable, MigratableError, Pausable, Snapshot, SnapshotDataSection, Snapshottable,
@ -113,7 +114,12 @@ impl NetQueuePair {
} }
if self.rx.deferred_irqs { if self.rx.deferred_irqs {
self.rx.deferred_irqs = false; self.rx.deferred_irqs = false;
Ok(true) let mem = self
.mem
.as_ref()
.ok_or(DeviceError::NoMemoryConfigured)
.map(|m| m.memory())?;
Ok(queue.needs_notification(&mem, queue.next_used))
} else { } else {
Ok(false) Ok(false)
} }
@ -139,7 +145,12 @@ impl NetQueuePair {
self.process_rx(queue) self.process_rx(queue)
} else if self.rx.deferred_irqs { } else if self.rx.deferred_irqs {
self.rx.deferred_irqs = false; self.rx.deferred_irqs = false;
Ok(true) let mem = self
.mem
.as_ref()
.ok_or(DeviceError::NoMemoryConfigured)
.map(|m| m.memory())?;
Ok(queue.needs_notification(&mem, queue.next_used))
} else { } else {
Ok(false) Ok(false)
} }
@ -155,7 +166,7 @@ impl NetQueuePair {
.ok_or(DeviceError::NoMemoryConfigured) .ok_or(DeviceError::NoMemoryConfigured)
.map(|m| m.memory())?; .map(|m| m.memory())?;
self.tx.process_desc_chain(&mem, &mut self.tap, &mut queue); self.tx.process_desc_chain(&mem, &mut self.tap, &mut queue);
Ok(true) Ok(queue.needs_notification(&mem, queue.next_used))
} }
pub fn process_rx_tap(&mut self, mut queue: &mut Queue) -> result::Result<bool, DeviceError> { pub fn process_rx_tap(&mut self, mut queue: &mut Queue) -> result::Result<bool, DeviceError> {
@ -411,6 +422,7 @@ impl Net {
| 1 << VIRTIO_NET_F_GUEST_UFO | 1 << VIRTIO_NET_F_GUEST_UFO
| 1 << VIRTIO_NET_F_HOST_TSO4 | 1 << VIRTIO_NET_F_HOST_TSO4
| 1 << VIRTIO_NET_F_HOST_UFO | 1 << VIRTIO_NET_F_HOST_UFO
| 1 << VIRTIO_RING_F_EVENT_IDX
| 1 << VIRTIO_F_VERSION_1; | 1 << VIRTIO_F_VERSION_1;
if iommu { if iommu {
@ -615,6 +627,8 @@ impl VirtioDevice for Net {
})?; })?;
} }
let event_idx = self.acked_features & 1 << VIRTIO_RING_F_EVENT_IDX != 0;
let mut epoll_threads = Vec::new(); let mut epoll_threads = Vec::new();
for _ in 0..taps.len() { for _ in 0..taps.len() {
let rx = RxVirtio::new(); let rx = RxVirtio::new();
@ -624,6 +638,8 @@ impl VirtioDevice for Net {
let mut queue_pair = Vec::new(); let mut queue_pair = Vec::new();
queue_pair.push(queues.remove(0)); queue_pair.push(queues.remove(0));
queue_pair.push(queues.remove(0)); queue_pair.push(queues.remove(0));
queue_pair[0].set_event_idx(event_idx);
queue_pair[1].set_event_idx(event_idx);
let mut queue_evt_pair = Vec::new(); let mut queue_evt_pair = Vec::new();
queue_evt_pair.push(queue_evts.remove(0)); queue_evt_pair.push(queue_evts.remove(0));

View File

@ -202,6 +202,7 @@ impl CtrlVirtio {
} }
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
self.queue.add_used(&mem, desc_index, len); self.queue.add_used(&mem, desc_index, len);
self.queue.update_avail_event(&mem);
} }
Ok(()) Ok(())
@ -389,6 +390,7 @@ impl TxVirtio {
} }
}; };
queue.add_used(&mem, head_index, 0); queue.add_used(&mem, head_index, 0);
queue.update_avail_event(&mem);
} }
} }
} }
@ -460,6 +462,7 @@ impl RxVirtio {
} }
queue.add_used(&mem, head_index, write_count as u32); queue.add_used(&mem, head_index, write_count as u32);
queue.update_avail_event(&mem);
// Mark that we have at least one pending packet and we need to interrupt the guest. // Mark that we have at least one pending packet and we need to interrupt the guest.
self.deferred_irqs = true; self.deferred_irqs = true;