cloud-hypervisor/net_util/src/ctrl_queue.rs
Sebastien Boeuf a4859ffe85 virtio-devices: Optimize add_used() usage
Now that we rely on pop_descriptor_chain() rather than iter() to iterate
over a queue, there's no more borrow on the queue itself, meaning we can
invoke add_used() directly for the iteration loop. This simplifies the
processing of the queues for each virtio device, and bring some possible
performance improvement given we don't have to iterate twice over the
list of descriptors to invoke add_used().

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2022-07-29 17:41:32 +01:00

178 lines
6.2 KiB
Rust

// Copyright (c) 2021 Intel Corporation. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use crate::GuestMemoryMmap;
use crate::Tap;
use libc::c_uint;
use std::sync::Arc;
use virtio_bindings::bindings::virtio_net::{
VIRTIO_NET_CTRL_GUEST_OFFLOADS, VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, VIRTIO_NET_ERR, VIRTIO_NET_F_GUEST_CSUM,
VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_OK,
};
use virtio_queue::{Queue, QueueT};
use vm_memory::{ByteValued, Bytes, GuestMemoryError};
use vm_virtio::{AccessPlatform, Translatable};
#[derive(Debug)]
pub enum Error {
/// Read queue failed.
GuestMemory(GuestMemoryError),
/// No control header descriptor
NoControlHeaderDescriptor,
/// Missing the data descriptor in the chain.
NoDataDescriptor,
/// No status descriptor
NoStatusDescriptor,
/// Failed adding used index
QueueAddUsed(virtio_queue::Error),
/// Failed creating an iterator over the queue
QueueIterator(virtio_queue::Error),
/// Failed enabling notification for the queue
QueueEnableNotification(virtio_queue::Error),
}
type Result<T> = std::result::Result<T, Error>;
#[repr(C, packed)]
#[derive(Debug, Clone, Copy, Default)]
pub struct ControlHeader {
pub class: u8,
pub cmd: u8,
}
// SAFETY: ControlHeader only contains a series of integers
unsafe impl ByteValued for ControlHeader {}
pub struct CtrlQueue {
pub taps: Vec<Tap>,
}
impl CtrlQueue {
pub fn new(taps: Vec<Tap>) -> Self {
CtrlQueue { taps }
}
pub fn process(
&mut self,
mem: &GuestMemoryMmap,
queue: &mut Queue,
access_platform: Option<&Arc<dyn AccessPlatform>>,
) -> Result<()> {
while let Some(mut desc_chain) = queue.pop_descriptor_chain(mem) {
let ctrl_desc = desc_chain.next().ok_or(Error::NoControlHeaderDescriptor)?;
let ctrl_hdr: ControlHeader = desc_chain
.memory()
.read_obj(
ctrl_desc
.addr()
.translate_gva(access_platform, ctrl_desc.len() as usize),
)
.map_err(Error::GuestMemory)?;
let data_desc = desc_chain.next().ok_or(Error::NoDataDescriptor)?;
let data_desc_addr = data_desc
.addr()
.translate_gva(access_platform, data_desc.len() as usize);
let status_desc = desc_chain.next().ok_or(Error::NoStatusDescriptor)?;
let ok = match u32::from(ctrl_hdr.class) {
VIRTIO_NET_CTRL_MQ => {
let queue_pairs = desc_chain
.memory()
.read_obj::<u16>(data_desc_addr)
.map_err(Error::GuestMemory)?;
if u32::from(ctrl_hdr.cmd) != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET {
warn!("Unsupported command: {}", ctrl_hdr.cmd);
false
} else if (queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN as u16)
|| (queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX as u16)
{
warn!("Number of MQ pairs out of range: {}", queue_pairs);
false
} else {
info!("Number of MQ pairs requested: {}", queue_pairs);
true
}
}
VIRTIO_NET_CTRL_GUEST_OFFLOADS => {
let features = desc_chain
.memory()
.read_obj::<u64>(data_desc_addr)
.map_err(Error::GuestMemory)?;
if u32::from(ctrl_hdr.cmd) != VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET {
warn!("Unsupported command: {}", ctrl_hdr.cmd);
false
} else {
let mut ok = true;
for tap in self.taps.iter_mut() {
info!("Reprogramming tap offload with features: {}", features);
tap.set_offload(virtio_features_to_tap_offload(features))
.map_err(|e| {
error!("Error programming tap offload: {:?}", e);
ok = false
})
.ok();
}
ok
}
}
_ => {
warn!("Unsupported command {:?}", ctrl_hdr);
false
}
};
desc_chain
.memory()
.write_obj(
if ok { VIRTIO_NET_OK } else { VIRTIO_NET_ERR } as u8,
status_desc
.addr()
.translate_gva(access_platform, status_desc.len() as usize),
)
.map_err(Error::GuestMemory)?;
let len = ctrl_desc.len() + data_desc.len() + status_desc.len();
queue
.add_used(desc_chain.memory(), desc_chain.head_index(), len)
.map_err(Error::QueueAddUsed)?;
if !queue
.enable_notification(mem)
.map_err(Error::QueueEnableNotification)?
{
break;
}
}
Ok(())
}
}
pub fn virtio_features_to_tap_offload(features: u64) -> c_uint {
let mut tap_offloads: c_uint = 0;
if features & (1 << VIRTIO_NET_F_GUEST_CSUM) != 0 {
tap_offloads |= net_gen::TUN_F_CSUM;
}
if features & (1 << VIRTIO_NET_F_GUEST_TSO4) != 0 {
tap_offloads |= net_gen::TUN_F_TSO4;
}
if features & (1 << VIRTIO_NET_F_GUEST_TSO6) != 0 {
tap_offloads |= net_gen::TUN_F_TSO6;
}
if features & (1 << VIRTIO_NET_F_GUEST_ECN) != 0 {
tap_offloads |= net_gen::TUN_F_TSO_ECN;
}
if features & (1 << VIRTIO_NET_F_GUEST_UFO) != 0 {
tap_offloads |= net_gen::TUN_F_UFO;
}
tap_offloads
}