vm-virtio: net: Implement counters for network traffic

Add counters for RX/TX bytes and RX/TX frames. These are collected on a
per queue basis and then accumulated into an atomic shared value across
the different threads for the device as a whole.

Collecting and accumulating these counters makes minimal difference in
the iperf results. Any difference seen is within what is observed as
natural variation in this test.

e.g.

With counter updates:

$ iperf3 -c 192.168.249.2
Connecting to host 192.168.249.2, port 5201
[  5] local 192.168.249.1 port 52706 connected to 192.168.249.2 port 5201
[ ID] Interval           Transfer     Bitrate         Retr  Cwnd
[  5]   0.00-1.00   sec  6.19 GBytes  53.2 Gbits/sec    0   3.01 MBytes
[  5]   1.00-2.00   sec  6.31 GBytes  54.2 Gbits/sec    0   3.01 MBytes
[  5]   2.00-3.00   sec  6.29 GBytes  54.0 Gbits/sec    0   3.01 MBytes
[  5]   3.00-4.00   sec  6.22 GBytes  53.4 Gbits/sec    0   3.01 MBytes
[  5]   4.00-5.00   sec  6.14 GBytes  52.8 Gbits/sec    0   3.01 MBytes
[  5]   5.00-6.00   sec  6.13 GBytes  52.7 Gbits/sec    0   3.01 MBytes
[  5]   6.00-7.00   sec  6.20 GBytes  53.3 Gbits/sec    0   3.01 MBytes
[  5]   7.00-8.00   sec  6.16 GBytes  52.9 Gbits/sec    0   3.01 MBytes
[  5]   8.00-9.00   sec  6.13 GBytes  52.6 Gbits/sec    0   3.01 MBytes
[  5]   9.00-10.00  sec  6.15 GBytes  52.8 Gbits/sec    0   3.01 MBytes
- - - - - - - - - - - - - - - - - - - - - - - - -
[ ID] Interval           Transfer     Bitrate         Retr
[  5]   0.00-10.00  sec  61.9 GBytes  53.2 Gbits/sec    0             sender
[  5]   0.00-10.00  sec  61.9 GBytes  53.2 Gbits/sec                  receiver

iperf Done.

Without counter updates:

$ iperf3 -c 192.168.249.2
Connecting to host 192.168.249.2, port 5201
[  5] local 192.168.249.1 port 52716 connected to 192.168.249.2 port 5201
[ ID] Interval           Transfer     Bitrate         Retr  Cwnd
[  5]   0.00-1.00   sec  6.29 GBytes  54.1 Gbits/sec    0   3.03 MBytes
[  5]   1.00-2.00   sec  6.18 GBytes  53.1 Gbits/sec    0   3.03 MBytes
[  5]   2.00-3.00   sec  6.26 GBytes  53.8 Gbits/sec    0   3.03 MBytes
[  5]   3.00-4.00   sec  6.24 GBytes  53.6 Gbits/sec    0   3.03 MBytes
[  5]   4.00-5.00   sec  6.27 GBytes  53.9 Gbits/sec    1   3.03 MBytes
[  5]   5.00-6.00   sec  6.31 GBytes  54.2 Gbits/sec    0   3.03 MBytes
[  5]   6.00-7.00   sec  6.29 GBytes  54.1 Gbits/sec    0   3.03 MBytes
[  5]   7.00-8.00   sec  6.16 GBytes  52.9 Gbits/sec    0   3.03 MBytes
[  5]   8.00-9.00   sec  6.13 GBytes  52.6 Gbits/sec    0   3.03 MBytes
[  5]   9.00-10.00  sec  6.25 GBytes  53.7 Gbits/sec    0   3.03 MBytes
- - - - - - - - - - - - - - - - - - - - - - - - -
[ ID] Interval           Transfer     Bitrate         Retr
[  5]   0.00-10.00  sec  62.4 GBytes  53.6 Gbits/sec    1             sender
[  5]   0.00-10.00  sec  62.4 GBytes  53.6 Gbits/sec                  receiver

iperf Done.

Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
Rob Bradford 2020-06-23 16:28:41 +01:00 committed by Samuel Ortiz
parent 0583ce921b
commit 2b4a0358de
3 changed files with 55 additions and 2 deletions

View File

@ -30,7 +30,7 @@ use virtio_bindings::bindings::virtio_net::*;
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use vm_memory::{GuestMemoryAtomic, GuestMemoryMmap};
use vm_virtio::net_util::{open_tap, RxVirtio, TxVirtio};
use vm_virtio::NetQueuePair;
use vm_virtio::{NetCounters, NetQueuePair};
use vmm::config::{OptionParser, OptionParserError};
use vmm_sys_util::eventfd::EventFd;
@ -111,6 +111,7 @@ impl VhostUserNetThread {
tx: TxVirtio::new(),
rx_tap_listening: false,
epoll_fd: None,
counters: NetCounters::default(),
},
})
}

View File

@ -24,9 +24,10 @@ use std::fs::File;
use std::io::Read;
use std::io::{self, Write};
use std::net::Ipv4Addr;
use std::num::Wrapping;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::result;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc;
use std::thread;
use std::vec::Vec;
@ -54,6 +55,7 @@ pub struct NetQueuePair {
pub tx: TxVirtio,
pub epoll_fd: Option<RawFd>,
pub rx_tap_listening: bool,
pub counters: NetCounters,
}
impl NetQueuePair {
@ -112,6 +114,19 @@ impl NetQueuePair {
}
}
}
// Consume the counters from the Rx/Tx queues and accumulate into
// the counters for the device as whole. This consumption is needed
// to handle MQ.
self.counters
.rx_bytes
.fetch_add(self.rx.counter_bytes.0, Ordering::AcqRel);
self.counters
.rx_frames
.fetch_add(self.rx.counter_frames.0, Ordering::AcqRel);
self.rx.counter_bytes = Wrapping(0);
self.rx.counter_frames = Wrapping(0);
if self.rx.deferred_irqs {
self.rx.deferred_irqs = false;
let mem = self
@ -166,6 +181,16 @@ impl NetQueuePair {
.ok_or(DeviceError::NoMemoryConfigured)
.map(|m| m.memory())?;
self.tx.process_desc_chain(&mem, &mut self.tap, &mut queue);
self.counters
.tx_bytes
.fetch_add(self.tx.counter_bytes.0, Ordering::AcqRel);
self.counters
.tx_frames
.fetch_add(self.tx.counter_frames.0, Ordering::AcqRel);
self.tx.counter_bytes = Wrapping(0);
self.tx.counter_frames = Wrapping(0);
Ok(queue.needs_notification(&mem, queue.next_used))
}
@ -390,6 +415,14 @@ impl NetEpollHandler {
}
}
#[derive(Default, Clone)]
pub struct NetCounters {
tx_bytes: Arc<AtomicU64>,
tx_frames: Arc<AtomicU64>,
rx_bytes: Arc<AtomicU64>,
rx_frames: Arc<AtomicU64>,
}
pub struct Net {
id: String,
kill_evt: Option<EventFd>,
@ -404,6 +437,7 @@ pub struct Net {
ctrl_queue_epoll_thread: Option<thread::JoinHandle<result::Result<(), DeviceError>>>,
paused: Arc<AtomicBool>,
queue_size: Vec<u16>,
counters: NetCounters,
}
#[derive(Serialize, Deserialize)]
@ -461,6 +495,7 @@ impl Net {
ctrl_queue_epoll_thread: None,
paused: Arc::new(AtomicBool::new(false)),
queue_size: vec![queue_size; queue_num],
counters: NetCounters::default(),
})
}
@ -661,6 +696,7 @@ impl VirtioDevice for Net {
tx,
epoll_fd: None,
rx_tap_listening,
counters: self.counters.clone(),
},
interrupt_cb: interrupt_cb.clone(),
kill_evt: kill_evt.try_clone().unwrap(),

View File

@ -12,6 +12,7 @@ use std::fs::File;
use std::io::{self, Write};
use std::mem;
use std::net::Ipv4Addr;
use std::num::Wrapping;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
@ -340,6 +341,8 @@ impl NetCtrlEpollHandler {
pub struct TxVirtio {
pub iovec: Vec<(GuestAddress, usize)>,
pub frame_buf: [u8; MAX_BUFFER_SIZE],
pub counter_bytes: Wrapping<u64>,
pub counter_frames: Wrapping<u64>,
}
impl Default for TxVirtio {
@ -353,6 +356,8 @@ impl TxVirtio {
TxVirtio {
iovec: Vec::new(),
frame_buf: [0u8; MAX_BUFFER_SIZE],
counter_bytes: Wrapping(0),
counter_frames: Wrapping(0),
}
}
@ -400,6 +405,10 @@ impl TxVirtio {
println!("net: tx: error failed to write to tap: {}", e);
}
};
self.counter_bytes += Wrapping((read_count - vnet_hdr_len()) as u64);
self.counter_frames += Wrapping(1);
queue.add_used(&mem, head_index, 0);
queue.update_avail_event(&mem);
}
@ -412,6 +421,8 @@ pub struct RxVirtio {
pub deferred_irqs: bool,
pub bytes_read: usize,
pub frame_buf: [u8; MAX_BUFFER_SIZE],
pub counter_bytes: Wrapping<u64>,
pub counter_frames: Wrapping<u64>,
}
impl Default for RxVirtio {
@ -427,6 +438,8 @@ impl RxVirtio {
deferred_irqs: false,
bytes_read: 0,
frame_buf: [0u8; MAX_BUFFER_SIZE],
counter_bytes: Wrapping(0),
counter_frames: Wrapping(0),
}
}
@ -472,6 +485,9 @@ impl RxVirtio {
}
}
self.counter_bytes += Wrapping((write_count - vnet_hdr_len()) as u64);
self.counter_frames += Wrapping(1);
queue.add_used(&mem, head_index, write_count as u32);
queue.update_avail_event(&mem);