2020-07-07 14:02:18 +00:00
|
|
|
// Copyright (c) 2020 Intel Corporation. All rights reserved.
|
|
|
|
//
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
|
|
|
|
|
2021-06-28 14:28:26 +00:00
|
|
|
use super::{register_listener, unregister_listener, vnet_hdr_len, Tap};
|
2021-03-17 22:41:52 +00:00
|
|
|
use rate_limiter::{RateLimiter, TokenType};
|
2020-07-07 15:50:13 +00:00
|
|
|
use std::io;
|
2020-07-07 14:02:18 +00:00
|
|
|
use std::num::Wrapping;
|
2020-07-07 15:50:13 +00:00
|
|
|
use std::os::unix::io::{AsRawFd, RawFd};
|
|
|
|
use std::sync::atomic::{AtomicU64, Ordering};
|
|
|
|
use std::sync::Arc;
|
2022-08-10 21:58:36 +00:00
|
|
|
use thiserror::Error;
|
2022-07-06 14:08:08 +00:00
|
|
|
use virtio_queue::{Queue, QueueOwnedT, QueueT};
|
2024-05-01 08:24:48 +00:00
|
|
|
use vm_memory::bitmap::Bitmap;
|
2022-07-06 14:08:08 +00:00
|
|
|
use vm_memory::{Bytes, GuestMemory};
|
2022-01-26 22:44:31 +00:00
|
|
|
use vm_virtio::{AccessPlatform, Translatable};
|
2020-07-07 14:02:18 +00:00
|
|
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct TxVirtio {
|
|
|
|
pub counter_bytes: Wrapping<u64>,
|
|
|
|
pub counter_frames: Wrapping<u64>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for TxVirtio {
|
|
|
|
fn default() -> Self {
|
|
|
|
Self::new()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl TxVirtio {
|
|
|
|
pub fn new() -> Self {
|
|
|
|
TxVirtio {
|
|
|
|
counter_bytes: Wrapping(0),
|
|
|
|
counter_frames: Wrapping(0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-01 08:24:48 +00:00
|
|
|
pub fn process_desc_chain<B: Bitmap + 'static>(
|
2021-02-16 21:17:37 +00:00
|
|
|
&mut self,
|
2024-05-01 08:24:48 +00:00
|
|
|
mem: &vm_memory::GuestMemoryMmap<B>,
|
2023-08-22 09:44:09 +00:00
|
|
|
tap: &Tap,
|
2022-07-06 14:08:08 +00:00
|
|
|
queue: &mut Queue,
|
2021-03-17 22:41:52 +00:00
|
|
|
rate_limiter: &mut Option<RateLimiter>,
|
2022-01-26 16:12:15 +00:00
|
|
|
access_platform: Option<&Arc<dyn AccessPlatform>>,
|
2021-06-28 14:28:26 +00:00
|
|
|
) -> Result<bool, NetQueuePairError> {
|
|
|
|
let mut retry_write = false;
|
2021-06-30 23:34:13 +00:00
|
|
|
let mut rate_limit_reached = false;
|
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
while let Some(mut desc_chain) = queue.pop_descriptor_chain(mem) {
|
|
|
|
if rate_limit_reached {
|
|
|
|
queue.go_to_previous_position();
|
|
|
|
break;
|
|
|
|
}
|
2020-07-07 14:02:18 +00:00
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
let mut next_desc = desc_chain.next();
|
2021-06-29 14:52:42 +00:00
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
let mut iovecs = Vec::new();
|
|
|
|
while let Some(desc) = next_desc {
|
|
|
|
let desc_addr = desc
|
|
|
|
.addr()
|
|
|
|
.translate_gva(access_platform, desc.len() as usize);
|
|
|
|
if !desc.is_write_only() && desc.len() > 0 {
|
|
|
|
let buf = desc_chain
|
|
|
|
.memory()
|
|
|
|
.get_slice(desc_addr, desc.len() as usize)
|
|
|
|
.map_err(NetQueuePairError::GuestMemory)?
|
2023-09-07 12:21:02 +00:00
|
|
|
.ptr_guard_mut();
|
2022-07-08 12:30:50 +00:00
|
|
|
let iovec = libc::iovec {
|
2023-09-07 12:21:02 +00:00
|
|
|
iov_base: buf.as_ptr() as *mut libc::c_void,
|
2022-07-08 12:30:50 +00:00
|
|
|
iov_len: desc.len() as libc::size_t,
|
2021-10-21 10:41:16 +00:00
|
|
|
};
|
2022-07-08 12:30:50 +00:00
|
|
|
iovecs.push(iovec);
|
2021-10-21 10:41:16 +00:00
|
|
|
} else {
|
2022-07-08 12:30:50 +00:00
|
|
|
error!(
|
|
|
|
"Invalid descriptor chain: address = 0x{:x} length = {} write_only = {}",
|
|
|
|
desc_addr.0,
|
|
|
|
desc.len(),
|
|
|
|
desc.is_write_only()
|
|
|
|
);
|
|
|
|
return Err(NetQueuePairError::DescriptorChainInvalid);
|
|
|
|
}
|
|
|
|
next_desc = desc_chain.next();
|
|
|
|
}
|
|
|
|
|
|
|
|
let len = if !iovecs.is_empty() {
|
2022-11-16 22:25:39 +00:00
|
|
|
// SAFETY: FFI call with correct arguments
|
2022-07-08 12:30:50 +00:00
|
|
|
let result = unsafe {
|
|
|
|
libc::writev(
|
|
|
|
tap.as_raw_fd() as libc::c_int,
|
2023-07-12 04:02:38 +00:00
|
|
|
iovecs.as_ptr(),
|
2022-07-08 12:30:50 +00:00
|
|
|
iovecs.len() as libc::c_int,
|
|
|
|
)
|
2021-10-21 10:41:16 +00:00
|
|
|
};
|
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
if result < 0 {
|
|
|
|
let e = std::io::Error::last_os_error();
|
2021-10-21 10:41:16 +00:00
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
/* EAGAIN */
|
|
|
|
if e.kind() == std::io::ErrorKind::WouldBlock {
|
|
|
|
queue.go_to_previous_position();
|
|
|
|
retry_write = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
error!("net: tx: failed writing to tap: {}", e);
|
|
|
|
return Err(NetQueuePairError::WriteTap(e));
|
2021-10-21 10:41:16 +00:00
|
|
|
}
|
2022-07-08 12:30:50 +00:00
|
|
|
|
2022-11-28 16:15:38 +00:00
|
|
|
if (result as usize) < vnet_hdr_len() {
|
|
|
|
return Err(NetQueuePairError::InvalidVirtioNetHeader);
|
|
|
|
}
|
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
self.counter_bytes += Wrapping(result as u64 - vnet_hdr_len() as u64);
|
|
|
|
self.counter_frames += Wrapping(1);
|
|
|
|
|
|
|
|
result as u32
|
2021-06-30 23:34:13 +00:00
|
|
|
} else {
|
2022-07-08 12:30:50 +00:00
|
|
|
0
|
|
|
|
};
|
|
|
|
|
|
|
|
// For the sake of simplicity (similar to the RX rate limiting), we always
|
|
|
|
// let the 'last' descriptor chain go-through even if it was over the rate
|
|
|
|
// limit, and simply stop processing oncoming `avail_desc` if any.
|
|
|
|
if let Some(rate_limiter) = rate_limiter {
|
|
|
|
rate_limit_reached = !rate_limiter.consume(1, TokenType::Ops)
|
|
|
|
|| !rate_limiter.consume(len as u64, TokenType::Bytes);
|
2021-06-30 23:34:13 +00:00
|
|
|
}
|
2021-10-21 10:41:16 +00:00
|
|
|
|
|
|
|
queue
|
2022-07-25 12:26:17 +00:00
|
|
|
.add_used(desc_chain.memory(), desc_chain.head_index(), len)
|
2021-10-21 10:41:16 +00:00
|
|
|
.map_err(NetQueuePairError::QueueAddUsed)?;
|
2022-07-08 12:30:50 +00:00
|
|
|
|
2022-03-14 15:52:10 +00:00
|
|
|
if !queue
|
2022-07-06 14:08:08 +00:00
|
|
|
.enable_notification(mem)
|
2022-03-14 15:52:10 +00:00
|
|
|
.map_err(NetQueuePairError::QueueEnableNotification)?
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
2020-07-07 14:02:18 +00:00
|
|
|
}
|
2021-02-16 21:17:37 +00:00
|
|
|
|
2021-06-28 14:28:26 +00:00
|
|
|
Ok(retry_write)
|
2020-07-07 14:02:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct RxVirtio {
|
|
|
|
pub counter_bytes: Wrapping<u64>,
|
|
|
|
pub counter_frames: Wrapping<u64>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for RxVirtio {
|
|
|
|
fn default() -> Self {
|
|
|
|
Self::new()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl RxVirtio {
|
|
|
|
pub fn new() -> Self {
|
|
|
|
RxVirtio {
|
|
|
|
counter_bytes: Wrapping(0),
|
|
|
|
counter_frames: Wrapping(0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-01 08:24:48 +00:00
|
|
|
pub fn process_desc_chain<B: Bitmap + 'static>(
|
2020-07-07 14:02:18 +00:00
|
|
|
&mut self,
|
2024-05-01 08:24:48 +00:00
|
|
|
mem: &vm_memory::GuestMemoryMmap<B>,
|
2023-08-22 09:44:09 +00:00
|
|
|
tap: &Tap,
|
2022-07-06 14:08:08 +00:00
|
|
|
queue: &mut Queue,
|
2021-03-25 21:17:05 +00:00
|
|
|
rate_limiter: &mut Option<RateLimiter>,
|
2022-01-26 16:12:15 +00:00
|
|
|
access_platform: Option<&Arc<dyn AccessPlatform>>,
|
2021-02-16 21:17:37 +00:00
|
|
|
) -> Result<bool, NetQueuePairError> {
|
|
|
|
let mut exhausted_descs = true;
|
2021-03-25 21:17:05 +00:00
|
|
|
let mut rate_limit_reached = false;
|
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
while let Some(mut desc_chain) = queue.pop_descriptor_chain(mem) {
|
|
|
|
if rate_limit_reached {
|
|
|
|
exhausted_descs = false;
|
|
|
|
queue.go_to_previous_position();
|
|
|
|
break;
|
|
|
|
}
|
2021-03-25 21:17:05 +00:00
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
let desc = desc_chain
|
|
|
|
.next()
|
|
|
|
.ok_or(NetQueuePairError::DescriptorChainTooShort)?;
|
|
|
|
|
|
|
|
let num_buffers_addr = desc_chain
|
|
|
|
.memory()
|
|
|
|
.checked_offset(
|
|
|
|
desc.addr()
|
|
|
|
.translate_gva(access_platform, desc.len() as usize),
|
|
|
|
10,
|
|
|
|
)
|
2022-11-22 00:43:45 +00:00
|
|
|
.ok_or(NetQueuePairError::DescriptorInvalidHeader)?;
|
2022-07-08 12:30:50 +00:00
|
|
|
let mut next_desc = Some(desc);
|
|
|
|
|
|
|
|
let mut iovecs = Vec::new();
|
|
|
|
while let Some(desc) = next_desc {
|
|
|
|
let desc_addr = desc
|
|
|
|
.addr()
|
|
|
|
.translate_gva(access_platform, desc.len() as usize);
|
|
|
|
if desc.is_write_only() && desc.len() > 0 {
|
|
|
|
let buf = desc_chain
|
|
|
|
.memory()
|
|
|
|
.get_slice(desc_addr, desc.len() as usize)
|
|
|
|
.map_err(NetQueuePairError::GuestMemory)?
|
2023-09-07 12:21:02 +00:00
|
|
|
.ptr_guard_mut();
|
2022-07-08 12:30:50 +00:00
|
|
|
let iovec = libc::iovec {
|
2023-09-07 12:21:02 +00:00
|
|
|
iov_base: buf.as_ptr() as *mut libc::c_void,
|
2022-07-08 12:30:50 +00:00
|
|
|
iov_len: desc.len() as libc::size_t,
|
|
|
|
};
|
|
|
|
iovecs.push(iovec);
|
|
|
|
} else {
|
|
|
|
error!(
|
|
|
|
"Invalid descriptor chain: address = 0x{:x} length = {} write_only = {}",
|
|
|
|
desc_addr.0,
|
|
|
|
desc.len(),
|
|
|
|
desc.is_write_only()
|
|
|
|
);
|
|
|
|
return Err(NetQueuePairError::DescriptorChainInvalid);
|
2021-02-16 21:17:37 +00:00
|
|
|
}
|
2022-07-08 12:30:50 +00:00
|
|
|
next_desc = desc_chain.next();
|
|
|
|
}
|
2020-07-07 14:02:18 +00:00
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
let len = if !iovecs.is_empty() {
|
2022-11-16 22:25:39 +00:00
|
|
|
// SAFETY: FFI call with correct arguments
|
2022-07-08 12:30:50 +00:00
|
|
|
let result = unsafe {
|
|
|
|
libc::readv(
|
|
|
|
tap.as_raw_fd() as libc::c_int,
|
2023-07-12 04:02:38 +00:00
|
|
|
iovecs.as_ptr(),
|
2022-07-08 12:30:50 +00:00
|
|
|
iovecs.len() as libc::c_int,
|
2022-01-26 16:12:15 +00:00
|
|
|
)
|
2022-07-08 12:30:50 +00:00
|
|
|
};
|
|
|
|
if result < 0 {
|
|
|
|
let e = std::io::Error::last_os_error();
|
|
|
|
exhausted_descs = false;
|
|
|
|
queue.go_to_previous_position();
|
2021-02-16 21:17:37 +00:00
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
/* EAGAIN */
|
|
|
|
if e.kind() == std::io::ErrorKind::WouldBlock {
|
|
|
|
break;
|
2020-07-07 14:02:18 +00:00
|
|
|
}
|
2021-02-16 21:17:37 +00:00
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
error!("net: rx: failed reading from tap: {}", e);
|
|
|
|
return Err(NetQueuePairError::ReadTap(e));
|
|
|
|
}
|
2020-07-07 14:02:18 +00:00
|
|
|
|
2022-11-28 16:15:38 +00:00
|
|
|
if (result as usize) < vnet_hdr_len() {
|
|
|
|
return Err(NetQueuePairError::InvalidVirtioNetHeader);
|
|
|
|
}
|
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
// Write num_buffers to guest memory. We simply write 1 as we
|
|
|
|
// never spread the frame over more than one descriptor chain.
|
|
|
|
desc_chain
|
|
|
|
.memory()
|
|
|
|
.write_obj(1u16, num_buffers_addr)
|
|
|
|
.map_err(NetQueuePairError::GuestMemory)?;
|
2020-07-07 14:02:18 +00:00
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
self.counter_bytes += Wrapping(result as u64 - vnet_hdr_len() as u64);
|
|
|
|
self.counter_frames += Wrapping(1);
|
2021-10-21 10:41:16 +00:00
|
|
|
|
2022-07-08 12:30:50 +00:00
|
|
|
result as u32
|
2021-02-16 21:17:37 +00:00
|
|
|
} else {
|
2022-07-08 12:30:50 +00:00
|
|
|
0
|
|
|
|
};
|
|
|
|
|
|
|
|
// For the sake of simplicity (keeping the handling of RX_QUEUE_EVENT and
|
|
|
|
// RX_TAP_EVENT totally asynchronous), we always let the 'last' descriptor
|
|
|
|
// chain go-through even if it was over the rate limit, and simply stop
|
|
|
|
// processing oncoming `avail_desc` if any.
|
|
|
|
if let Some(rate_limiter) = rate_limiter {
|
|
|
|
rate_limit_reached = !rate_limiter.consume(1, TokenType::Ops)
|
|
|
|
|| !rate_limiter.consume(len as u64, TokenType::Bytes);
|
2021-03-25 21:17:05 +00:00
|
|
|
}
|
2021-10-21 10:41:16 +00:00
|
|
|
|
|
|
|
queue
|
2022-07-25 12:26:17 +00:00
|
|
|
.add_used(desc_chain.memory(), desc_chain.head_index(), len)
|
2021-10-21 10:41:16 +00:00
|
|
|
.map_err(NetQueuePairError::QueueAddUsed)?;
|
2022-07-08 12:30:50 +00:00
|
|
|
|
2022-03-14 15:52:10 +00:00
|
|
|
if !queue
|
2022-07-06 14:08:08 +00:00
|
|
|
.enable_notification(mem)
|
2022-03-14 15:52:10 +00:00
|
|
|
.map_err(NetQueuePairError::QueueEnableNotification)?
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
2020-07-07 14:02:18 +00:00
|
|
|
}
|
2021-02-16 21:17:37 +00:00
|
|
|
|
|
|
|
Ok(exhausted_descs)
|
2020-07-07 14:02:18 +00:00
|
|
|
}
|
|
|
|
}
|
2020-07-07 15:50:13 +00:00
|
|
|
|
|
|
|
#[derive(Default, Clone)]
|
|
|
|
pub struct NetCounters {
|
|
|
|
pub tx_bytes: Arc<AtomicU64>,
|
|
|
|
pub tx_frames: Arc<AtomicU64>,
|
|
|
|
pub rx_bytes: Arc<AtomicU64>,
|
|
|
|
pub rx_frames: Arc<AtomicU64>,
|
|
|
|
}
|
|
|
|
|
2022-08-10 21:58:36 +00:00
|
|
|
#[derive(Error, Debug)]
|
2020-07-07 15:50:13 +00:00
|
|
|
pub enum NetQueuePairError {
|
2022-10-20 18:45:16 +00:00
|
|
|
#[error("No memory configured")]
|
2020-07-07 15:50:13 +00:00
|
|
|
NoMemoryConfigured,
|
2022-08-10 21:58:36 +00:00
|
|
|
#[error("Error registering listener: {0}")]
|
2020-07-07 15:50:13 +00:00
|
|
|
RegisterListener(io::Error),
|
2022-08-10 21:58:36 +00:00
|
|
|
#[error("Error unregistering listener: {0}")]
|
2020-07-07 15:50:13 +00:00
|
|
|
UnregisterListener(io::Error),
|
2022-08-10 21:58:36 +00:00
|
|
|
#[error("Error writing to the TAP device: {0}")]
|
2021-02-16 21:17:37 +00:00
|
|
|
WriteTap(io::Error),
|
2022-08-10 21:58:36 +00:00
|
|
|
#[error("Error reading from the TAP device: {0}")]
|
2021-02-16 21:17:37 +00:00
|
|
|
ReadTap(io::Error),
|
2022-08-10 21:58:36 +00:00
|
|
|
#[error("Error related to guest memory: {0}")]
|
2021-02-16 21:17:37 +00:00
|
|
|
GuestMemory(vm_memory::GuestMemoryError),
|
2022-08-10 21:58:36 +00:00
|
|
|
#[error("Returned an error while iterating through the queue: {0}")]
|
2021-10-21 10:41:16 +00:00
|
|
|
QueueIteratorFailed(virtio_queue::Error),
|
2022-10-20 18:45:16 +00:00
|
|
|
#[error("Descriptor chain is too short")]
|
2021-10-21 10:41:16 +00:00
|
|
|
DescriptorChainTooShort,
|
2022-10-20 18:45:16 +00:00
|
|
|
#[error("Descriptor chain does not contain valid descriptors")]
|
2022-01-07 16:00:31 +00:00
|
|
|
DescriptorChainInvalid,
|
2022-08-10 21:58:36 +00:00
|
|
|
#[error("Failed to determine if queue needed notification: {0}")]
|
2021-10-21 10:41:16 +00:00
|
|
|
QueueNeedsNotification(virtio_queue::Error),
|
2022-08-10 21:58:36 +00:00
|
|
|
#[error("Failed to enable notification on the queue: {0}")]
|
2021-10-21 10:41:16 +00:00
|
|
|
QueueEnableNotification(virtio_queue::Error),
|
2022-08-10 21:58:36 +00:00
|
|
|
#[error("Failed to add used index to the queue: {0}")]
|
2021-10-21 10:41:16 +00:00
|
|
|
QueueAddUsed(virtio_queue::Error),
|
2022-11-22 00:43:45 +00:00
|
|
|
#[error("Descriptor with invalid virtio-net header")]
|
|
|
|
DescriptorInvalidHeader,
|
2022-11-28 16:15:38 +00:00
|
|
|
#[error("Invalid virtio-net header")]
|
|
|
|
InvalidVirtioNetHeader,
|
2020-07-07 15:50:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub struct NetQueuePair {
|
|
|
|
pub tap: Tap,
|
2021-06-28 14:28:26 +00:00
|
|
|
// With epoll each FD must be unique. So in order to filter the
|
|
|
|
// events we need to get a second FD responding to the original
|
|
|
|
// device so that we can send EPOLLOUT and EPOLLIN to separate
|
|
|
|
// events.
|
|
|
|
pub tap_for_write_epoll: Tap,
|
2020-07-07 15:50:13 +00:00
|
|
|
pub rx: RxVirtio,
|
|
|
|
pub tx: TxVirtio,
|
|
|
|
pub epoll_fd: Option<RawFd>,
|
|
|
|
pub rx_tap_listening: bool,
|
2021-06-28 14:28:26 +00:00
|
|
|
pub tx_tap_listening: bool,
|
2020-07-07 15:50:13 +00:00
|
|
|
pub counters: NetCounters,
|
2021-06-28 13:51:34 +00:00
|
|
|
pub tap_rx_event_id: u16,
|
2021-06-28 14:28:26 +00:00
|
|
|
pub tap_tx_event_id: u16,
|
2021-03-25 21:17:05 +00:00
|
|
|
pub rx_desc_avail: bool,
|
|
|
|
pub rx_rate_limiter: Option<RateLimiter>,
|
2021-03-17 22:41:52 +00:00
|
|
|
pub tx_rate_limiter: Option<RateLimiter>,
|
2022-01-26 16:12:15 +00:00
|
|
|
pub access_platform: Option<Arc<dyn AccessPlatform>>,
|
2020-07-07 15:50:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl NetQueuePair {
|
2024-05-01 08:24:48 +00:00
|
|
|
pub fn process_tx<B: Bitmap + 'static>(
|
2021-10-21 10:41:16 +00:00
|
|
|
&mut self,
|
2024-05-01 08:24:48 +00:00
|
|
|
mem: &vm_memory::GuestMemoryMmap<B>,
|
2022-07-06 14:08:08 +00:00
|
|
|
queue: &mut Queue,
|
2021-10-21 10:41:16 +00:00
|
|
|
) -> Result<bool, NetQueuePairError> {
|
2022-01-26 16:12:15 +00:00
|
|
|
let tx_tap_retry = self.tx.process_desc_chain(
|
2022-07-06 14:08:08 +00:00
|
|
|
mem,
|
2023-08-22 09:44:09 +00:00
|
|
|
&self.tap,
|
2022-01-26 16:12:15 +00:00
|
|
|
queue,
|
|
|
|
&mut self.tx_rate_limiter,
|
|
|
|
self.access_platform.as_ref(),
|
|
|
|
)?;
|
2021-06-28 14:28:26 +00:00
|
|
|
|
|
|
|
// We got told to try again when writing to the tap. Wait for the TAP to be writable
|
|
|
|
if tx_tap_retry && !self.tx_tap_listening {
|
|
|
|
register_listener(
|
|
|
|
self.epoll_fd.unwrap(),
|
|
|
|
self.tap_for_write_epoll.as_raw_fd(),
|
|
|
|
epoll::Events::EPOLLOUT,
|
|
|
|
u64::from(self.tap_tx_event_id),
|
|
|
|
)
|
|
|
|
.map_err(NetQueuePairError::RegisterListener)?;
|
|
|
|
self.tx_tap_listening = true;
|
|
|
|
info!("Writing to TAP returned EAGAIN. Listening for TAP to become writable.");
|
|
|
|
} else if !tx_tap_retry && self.tx_tap_listening {
|
|
|
|
unregister_listener(
|
|
|
|
self.epoll_fd.unwrap(),
|
|
|
|
self.tap_for_write_epoll.as_raw_fd(),
|
|
|
|
epoll::Events::EPOLLOUT,
|
|
|
|
u64::from(self.tap_tx_event_id),
|
|
|
|
)
|
|
|
|
.map_err(NetQueuePairError::UnregisterListener)?;
|
|
|
|
self.tx_tap_listening = false;
|
|
|
|
info!("Writing to TAP succeeded. No longer listening for TAP to become writable.");
|
|
|
|
}
|
2020-07-07 15:50:13 +00:00
|
|
|
|
|
|
|
self.counters
|
2021-02-16 21:17:37 +00:00
|
|
|
.tx_bytes
|
|
|
|
.fetch_add(self.tx.counter_bytes.0, Ordering::AcqRel);
|
2020-07-07 15:50:13 +00:00
|
|
|
self.counters
|
2021-02-16 21:17:37 +00:00
|
|
|
.tx_frames
|
|
|
|
.fetch_add(self.tx.counter_frames.0, Ordering::AcqRel);
|
|
|
|
self.tx.counter_bytes = Wrapping(0);
|
|
|
|
self.tx.counter_frames = Wrapping(0);
|
2020-07-07 15:50:13 +00:00
|
|
|
|
2021-10-21 10:41:16 +00:00
|
|
|
queue
|
2022-07-06 14:08:08 +00:00
|
|
|
.needs_notification(mem)
|
2021-10-21 10:41:16 +00:00
|
|
|
.map_err(NetQueuePairError::QueueNeedsNotification)
|
2020-07-07 15:50:13 +00:00
|
|
|
}
|
|
|
|
|
2024-05-01 08:24:48 +00:00
|
|
|
pub fn process_rx<B: Bitmap + 'static>(
|
2021-10-21 10:41:16 +00:00
|
|
|
&mut self,
|
2024-05-01 08:24:48 +00:00
|
|
|
mem: &vm_memory::GuestMemoryMmap<B>,
|
2022-07-06 14:08:08 +00:00
|
|
|
queue: &mut Queue,
|
2021-10-21 10:41:16 +00:00
|
|
|
) -> Result<bool, NetQueuePairError> {
|
2022-01-26 16:12:15 +00:00
|
|
|
self.rx_desc_avail = !self.rx.process_desc_chain(
|
2022-07-06 14:08:08 +00:00
|
|
|
mem,
|
2023-08-22 09:44:09 +00:00
|
|
|
&self.tap,
|
2022-01-26 16:12:15 +00:00
|
|
|
queue,
|
|
|
|
&mut self.rx_rate_limiter,
|
|
|
|
self.access_platform.as_ref(),
|
|
|
|
)?;
|
2021-03-25 21:17:05 +00:00
|
|
|
let rate_limit_reached = self
|
|
|
|
.rx_rate_limiter
|
|
|
|
.as_ref()
|
|
|
|
.map_or(false, |r| r.is_blocked());
|
|
|
|
|
|
|
|
// Stop listening on the `RX_TAP_EVENT` when:
|
|
|
|
// 1) there is no available describles, or
|
|
|
|
// 2) the RX rate limit is reached.
|
|
|
|
if self.rx_tap_listening && (!self.rx_desc_avail || rate_limit_reached) {
|
2021-02-16 21:17:37 +00:00
|
|
|
unregister_listener(
|
2020-07-07 15:50:13 +00:00
|
|
|
self.epoll_fd.unwrap(),
|
|
|
|
self.tap.as_raw_fd(),
|
|
|
|
epoll::Events::EPOLLIN,
|
2021-06-28 13:51:34 +00:00
|
|
|
u64::from(self.tap_rx_event_id),
|
2020-07-07 15:50:13 +00:00
|
|
|
)
|
2021-02-16 21:17:37 +00:00
|
|
|
.map_err(NetQueuePairError::UnregisterListener)?;
|
|
|
|
self.rx_tap_listening = false;
|
2020-07-07 15:50:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
self.counters
|
2021-02-16 21:17:37 +00:00
|
|
|
.rx_bytes
|
|
|
|
.fetch_add(self.rx.counter_bytes.0, Ordering::AcqRel);
|
2020-07-07 15:50:13 +00:00
|
|
|
self.counters
|
2021-02-16 21:17:37 +00:00
|
|
|
.rx_frames
|
|
|
|
.fetch_add(self.rx.counter_frames.0, Ordering::AcqRel);
|
|
|
|
self.rx.counter_bytes = Wrapping(0);
|
|
|
|
self.rx.counter_frames = Wrapping(0);
|
2020-07-07 15:50:13 +00:00
|
|
|
|
2021-10-21 10:41:16 +00:00
|
|
|
queue
|
2022-07-06 14:08:08 +00:00
|
|
|
.needs_notification(mem)
|
2021-10-21 10:41:16 +00:00
|
|
|
.map_err(NetQueuePairError::QueueNeedsNotification)
|
2020-07-07 15:50:13 +00:00
|
|
|
}
|
|
|
|
}
|