Move Cloud Hypervisor to virtio-queue crate

Relying on the vm-virtio/virtio-queue crate from rust-vmm which has been
copied inside the Cloud Hypervisor tree, the entire codebase is moved to
the new definition of a Queue and other related structures.

The reason for this move is to follow the upstream until we get some
agreement for the patches that we need on top of that to make it
properly work with Cloud Hypervisor.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2021-10-21 12:41:16 +02:00
parent 7c19ae92b8
commit 0249e8641a
44 changed files with 936 additions and 1652 deletions

15
Cargo.lock generated
View File

@ -144,6 +144,7 @@ dependencies = [
"versionize_derive", "versionize_derive",
"vhdx", "vhdx",
"virtio-bindings", "virtio-bindings",
"virtio-queue",
"vm-memory", "vm-memory",
"vm-virtio", "vm-virtio",
"vmm-sys-util", "vmm-sys-util",
@ -607,6 +608,7 @@ dependencies = [
"versionize", "versionize",
"versionize_derive", "versionize_derive",
"virtio-bindings", "virtio-bindings",
"virtio-queue",
"vm-memory", "vm-memory",
"vm-virtio", "vm-virtio",
"vmm-sys-util", "vmm-sys-util",
@ -1212,6 +1214,7 @@ dependencies = [
"log", "log",
"vhost", "vhost",
"virtio-bindings", "virtio-bindings",
"virtio-queue",
"vm-memory", "vm-memory",
"vm-virtio", "vm-virtio",
"vmm-sys-util", "vmm-sys-util",
@ -1285,6 +1288,7 @@ dependencies = [
"versionize_derive", "versionize_derive",
"vhost", "vhost",
"virtio-bindings", "virtio-bindings",
"virtio-queue",
"vm-allocator", "vm-allocator",
"vm-device", "vm-device",
"vm-memory", "vm-memory",
@ -1293,6 +1297,15 @@ dependencies = [
"vmm-sys-util", "vmm-sys-util",
] ]
[[package]]
name = "virtio-queue"
version = "0.1.0"
dependencies = [
"log",
"vm-memory",
"vmm-sys-util",
]
[[package]] [[package]]
name = "vm-allocator" name = "vm-allocator"
version = "0.1.0" version = "0.1.0"
@ -1352,6 +1365,7 @@ version = "0.1.0"
dependencies = [ dependencies = [
"log", "log",
"virtio-bindings", "virtio-bindings",
"virtio-queue",
"vm-memory", "vm-memory",
] ]
@ -1393,6 +1407,7 @@ dependencies = [
"vfio_user", "vfio_user",
"vhdx", "vhdx",
"virtio-devices", "virtio-devices",
"virtio-queue",
"vm-allocator", "vm-allocator",
"vm-device", "vm-device",
"vm-memory", "vm-memory",

View File

@ -83,6 +83,7 @@ members = [
"vhost_user_block", "vhost_user_block",
"vhost_user_net", "vhost_user_net",
"virtio-devices", "virtio-devices",
"virtio-queue",
"vmm", "vmm",
"vm-allocator", "vm-allocator",
"vm-device", "vm-device",

View File

@ -18,6 +18,7 @@ versionize = "0.1.6"
versionize_derive = "0.1.4" versionize_derive = "0.1.4"
vhdx = { path = "../vhdx" } vhdx = { path = "../vhdx" }
virtio-bindings = { version = "0.1.0", features = ["virtio-v5_0_0"] } virtio-bindings = { version = "0.1.0", features = ["virtio-v5_0_0"] }
virtio-queue = { path = "../virtio-queue" }
vm-memory = { version = "0.6.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] } vm-memory = { version = "0.6.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] }
vm-virtio = { path = "../vm-virtio" } vm-virtio = { path = "../vm-virtio" }
vmm-sys-util = "0.9.0" vmm-sys-util = "0.9.0"

View File

@ -36,11 +36,11 @@ use std::sync::{Arc, Mutex};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_bindings::bindings::virtio_blk::*; use virtio_bindings::bindings::virtio_blk::*;
use virtio_queue::DescriptorChain;
use vm_memory::{ use vm_memory::{
bitmap::AtomicBitmap, bitmap::Bitmap, ByteValued, Bytes, GuestAddress, GuestMemory, bitmap::AtomicBitmap, bitmap::Bitmap, ByteValued, Bytes, GuestAddress, GuestMemory,
GuestMemoryError, GuestMemoryAtomic, GuestMemoryError,
}; };
use vm_virtio::DescriptorChain;
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
@ -179,25 +179,32 @@ pub struct Request {
impl Request { impl Request {
pub fn parse( pub fn parse(
avail_desc: &DescriptorChain, desc_chain: &mut DescriptorChain<GuestMemoryAtomic<GuestMemoryMmap>>,
mem: &GuestMemoryMmap,
) -> result::Result<Request, Error> { ) -> result::Result<Request, Error> {
let hdr_desc = desc_chain
.next()
.ok_or(Error::DescriptorChainTooShort)
.map_err(|e| {
error!("Missing head descriptor");
e
})?;
// The head contains the request type which MUST be readable. // The head contains the request type which MUST be readable.
if avail_desc.is_write_only() { if hdr_desc.is_write_only() {
return Err(Error::UnexpectedWriteOnlyDescriptor); return Err(Error::UnexpectedWriteOnlyDescriptor);
} }
let mut req = Request { let mut req = Request {
request_type: request_type(mem, avail_desc.addr)?, request_type: request_type(desc_chain.memory(), hdr_desc.addr())?,
sector: sector(mem, avail_desc.addr)?, sector: sector(desc_chain.memory(), hdr_desc.addr())?,
data_descriptors: Vec::new(), data_descriptors: Vec::new(),
status_addr: GuestAddress(0), status_addr: GuestAddress(0),
writeback: true, writeback: true,
}; };
let status_desc; let status_desc;
let mut desc = avail_desc let mut desc = desc_chain
.next_descriptor() .next()
.ok_or(Error::DescriptorChainTooShort) .ok_or(Error::DescriptorChainTooShort)
.map_err(|e| { .map_err(|e| {
error!("Only head descriptor present: request = {:?}", req); error!("Only head descriptor present: request = {:?}", req);
@ -222,9 +229,9 @@ impl Request {
if !desc.is_write_only() && req.request_type == RequestType::GetDeviceId { if !desc.is_write_only() && req.request_type == RequestType::GetDeviceId {
return Err(Error::UnexpectedReadOnlyDescriptor); return Err(Error::UnexpectedReadOnlyDescriptor);
} }
req.data_descriptors.push((desc.addr, desc.len)); req.data_descriptors.push((desc.addr(), desc.len()));
desc = desc desc = desc_chain
.next_descriptor() .next()
.ok_or(Error::DescriptorChainTooShort) .ok_or(Error::DescriptorChainTooShort)
.map_err(|e| { .map_err(|e| {
error!("DescriptorChain corrupted: request = {:?}", req); error!("DescriptorChain corrupted: request = {:?}", req);
@ -239,11 +246,11 @@ impl Request {
return Err(Error::UnexpectedReadOnlyDescriptor); return Err(Error::UnexpectedReadOnlyDescriptor);
} }
if status_desc.len < 1 { if status_desc.len() < 1 {
return Err(Error::DescriptorLengthTooSmall); return Err(Error::DescriptorLengthTooSmall);
} }
req.status_addr = status_desc.addr; req.status_addr = status_desc.addr();
Ok(req) Ok(req)
} }

16
fuzz/Cargo.lock generated
View File

@ -105,6 +105,7 @@ dependencies = [
"versionize_derive", "versionize_derive",
"vhdx", "vhdx",
"virtio-bindings", "virtio-bindings",
"virtio-queue",
"vm-memory", "vm-memory",
"vm-virtio", "vm-virtio",
"vmm-sys-util", "vmm-sys-util",
@ -178,6 +179,7 @@ dependencies = [
"seccompiler", "seccompiler",
"vhdx", "vhdx",
"virtio-devices", "virtio-devices",
"virtio-queue",
"vm-memory", "vm-memory",
"vm-virtio", "vm-virtio",
"vmm-sys-util", "vmm-sys-util",
@ -401,6 +403,7 @@ dependencies = [
"versionize", "versionize",
"versionize_derive", "versionize_derive",
"virtio-bindings", "virtio-bindings",
"virtio-queue",
"vm-memory", "vm-memory",
"vm-virtio", "vm-virtio",
"vmm-sys-util", "vmm-sys-util",
@ -786,6 +789,7 @@ dependencies = [
"versionize_derive", "versionize_derive",
"vhost", "vhost",
"virtio-bindings", "virtio-bindings",
"virtio-queue",
"vm-allocator", "vm-allocator",
"vm-device", "vm-device",
"vm-memory", "vm-memory",
@ -794,6 +798,16 @@ dependencies = [
"vmm-sys-util", "vmm-sys-util",
] ]
[[package]]
name = "virtio-queue"
version = "0.1.0"
source = "git+https://github.com/sboeuf/vm-virtio?branch=fork_vm_virtio#223246e132e49bba58adb3d4156e25ff977c0eb1"
dependencies = [
"log",
"vm-memory",
"vmm-sys-util",
]
[[package]] [[package]]
name = "vm-allocator" name = "vm-allocator"
version = "0.1.0" version = "0.1.0"
@ -853,6 +867,7 @@ version = "0.1.0"
dependencies = [ dependencies = [
"log", "log",
"virtio-bindings", "virtio-bindings",
"virtio-queue",
"vm-memory", "vm-memory",
] ]
@ -893,6 +908,7 @@ dependencies = [
"vfio_user", "vfio_user",
"vhdx", "vhdx",
"virtio-devices", "virtio-devices",
"virtio-queue",
"vm-allocator", "vm-allocator",
"vm-device", "vm-device",
"vm-memory", "vm-memory",

View File

@ -16,6 +16,7 @@ qcow = { path = "../qcow" }
seccompiler = "0.2.0" seccompiler = "0.2.0"
vhdx = { path = "../vhdx" } vhdx = { path = "../vhdx" }
virtio-devices = { path = "../virtio-devices" } virtio-devices = { path = "../virtio-devices" }
virtio-queue = { path = "../virtio-queue" }
vmm-sys-util = "0.9.0" vmm-sys-util = "0.9.0"
vm-virtio = { path = "../vm-virtio" } vm-virtio = { path = "../vm-virtio" }
vm-memory = "0.6.0" vm-memory = "0.6.0"

View File

@ -15,10 +15,12 @@ use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use virtio_devices::{Block, VirtioDevice, VirtioInterrupt, VirtioInterruptType}; use virtio_devices::{Block, VirtioDevice, VirtioInterrupt, VirtioInterruptType};
use vm_memory::{Bytes, GuestAddress, GuestMemoryAtomic, GuestMemoryMmap}; use virtio_queue::{Queue, QueueState};
use vm_virtio::Queue; use vm_memory::{bitmap::AtomicBitmap, Bytes, GuestAddress, GuestMemoryAtomic};
use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK}; use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK};
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
const MEM_SIZE: u64 = 256 * 1024 * 1024; const MEM_SIZE: u64 = 256 * 1024 * 1024;
const DESC_SIZE: u64 = 16; // Bytes in one virtio descriptor. const DESC_SIZE: u64 = 16; // Bytes in one virtio descriptor.
const QUEUE_SIZE: u16 = 16; // Max entries in the queue. const QUEUE_SIZE: u16 = 16; // Max entries in the queue.
@ -73,10 +75,14 @@ fuzz_target!(|bytes| {
return; return;
} }
let mut q = Queue::new(QUEUE_SIZE); let guest_memory = GuestMemoryAtomic::new(mem);
q.ready = true;
q.size = QUEUE_SIZE / 2; let mut q = Queue::<
q.max_size = QUEUE_SIZE; GuestMemoryAtomic<GuestMemoryMmap>,
QueueState<GuestMemoryAtomic<GuestMemoryMmap>>,
>::new(guest_memory.clone(), QUEUE_SIZE);
q.state.ready = true;
q.state.size = QUEUE_SIZE / 2;
let queue_evts: Vec<EventFd> = vec![EventFd::new(0).unwrap()]; let queue_evts: Vec<EventFd> = vec![EventFd::new(0).unwrap()];
let queue_fd = queue_evts[0].as_raw_fd(); let queue_fd = queue_evts[0].as_raw_fd();
@ -102,7 +108,7 @@ fuzz_target!(|bytes| {
block block
.activate( .activate(
GuestMemoryAtomic::new(mem), guest_memory,
Arc::new(NoopVirtioInterrupt {}), Arc::new(NoopVirtioInterrupt {}),
vec![q], vec![q],
queue_evts, queue_evts,
@ -134,7 +140,7 @@ impl VirtioInterrupt for NoopVirtioInterrupt {
fn trigger( fn trigger(
&self, &self,
_int_type: &VirtioInterruptType, _int_type: &VirtioInterruptType,
_queue: Option<&Queue>, _queue: Option<&Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
) -> std::result::Result<(), std::io::Error> { ) -> std::result::Result<(), std::io::Error> {
Ok(()) Ok(())
} }

View File

@ -15,6 +15,7 @@ serde = "1.0.130"
versionize = "0.1.6" versionize = "0.1.6"
versionize_derive = "0.1.4" versionize_derive = "0.1.4"
virtio-bindings = "0.1.0" virtio-bindings = "0.1.0"
virtio-queue = { path = "../virtio-queue" }
vm-memory = { version = "0.6.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] } vm-memory = { version = "0.6.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] }
vm-virtio = { path = "../vm-virtio" } vm-virtio = { path = "../vm-virtio" }
vmm-sys-util = "0.9.0" vmm-sys-util = "0.9.0"

View File

@ -12,17 +12,25 @@ use virtio_bindings::bindings::virtio_net::{
VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_OK, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_OK,
}; };
use vm_memory::{ByteValued, Bytes, GuestMemoryError}; use virtio_queue::Queue;
use vm_virtio::Queue; use vm_memory::{ByteValued, Bytes, GuestMemoryAtomic, GuestMemoryError};
#[derive(Debug)] #[derive(Debug)]
pub enum Error { pub enum Error {
/// Read queue failed. /// Read queue failed.
GuestMemory(GuestMemoryError), GuestMemory(GuestMemoryError),
/// No control header descriptor
NoControlHeaderDescriptor,
/// No queue pairs number. /// No queue pairs number.
NoQueuePairsDescriptor, NoQueuePairsDescriptor,
/// No status descriptor /// No status descriptor
NoStatusDescriptor, NoStatusDescriptor,
/// Failed adding used index
QueueAddUsed(virtio_queue::Error),
/// Failed creating an iterator over the queue
QueueIterator(virtio_queue::Error),
/// Failed enabling notification for the queue
QueueEnableNotification(virtio_queue::Error),
} }
type Result<T> = std::result::Result<T, Error>; type Result<T> = std::result::Result<T, Error>;
@ -45,22 +53,26 @@ impl CtrlQueue {
CtrlQueue { taps } CtrlQueue { taps }
} }
pub fn process(&mut self, mem: &GuestMemoryMmap, queue: &mut Queue) -> Result<bool> { pub fn process(
&mut self,
queue: &mut Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
) -> Result<bool> {
let mut used_desc_heads = Vec::new(); let mut used_desc_heads = Vec::new();
for avail_desc in queue.iter(mem) { for mut desc_chain in queue.iter().map_err(Error::QueueIterator)? {
let ctrl_hdr: ControlHeader = let ctrl_desc = desc_chain.next().ok_or(Error::NoControlHeaderDescriptor)?;
mem.read_obj(avail_desc.addr).map_err(Error::GuestMemory)?;
let data_desc = avail_desc let ctrl_hdr: ControlHeader = desc_chain
.next_descriptor() .memory()
.ok_or(Error::NoQueuePairsDescriptor)?; .read_obj(ctrl_desc.addr())
let status_desc = data_desc .map_err(Error::GuestMemory)?;
.next_descriptor() let data_desc = desc_chain.next().ok_or(Error::NoQueuePairsDescriptor)?;
.ok_or(Error::NoStatusDescriptor)?; let status_desc = desc_chain.next().ok_or(Error::NoStatusDescriptor)?;
let ok = match u32::from(ctrl_hdr.class) { let ok = match u32::from(ctrl_hdr.class) {
VIRTIO_NET_CTRL_MQ => { VIRTIO_NET_CTRL_MQ => {
let queue_pairs = mem let queue_pairs = desc_chain
.read_obj::<u16>(data_desc.addr) .memory()
.read_obj::<u16>(data_desc.addr())
.map_err(Error::GuestMemory)?; .map_err(Error::GuestMemory)?;
if u32::from(ctrl_hdr.cmd) != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET { if u32::from(ctrl_hdr.cmd) != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET {
warn!("Unsupported command: {}", ctrl_hdr.cmd); warn!("Unsupported command: {}", ctrl_hdr.cmd);
@ -76,8 +88,9 @@ impl CtrlQueue {
} }
} }
VIRTIO_NET_CTRL_GUEST_OFFLOADS => { VIRTIO_NET_CTRL_GUEST_OFFLOADS => {
let features = mem let features = desc_chain
.read_obj::<u64>(data_desc.addr) .memory()
.read_obj::<u64>(data_desc.addr())
.map_err(Error::GuestMemory)?; .map_err(Error::GuestMemory)?;
if u32::from(ctrl_hdr.cmd) != VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET { if u32::from(ctrl_hdr.cmd) != VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET {
warn!("Unsupported command: {}", ctrl_hdr.cmd); warn!("Unsupported command: {}", ctrl_hdr.cmd);
@ -102,17 +115,24 @@ impl CtrlQueue {
} }
}; };
mem.write_obj( desc_chain
.memory()
.write_obj(
if ok { VIRTIO_NET_OK } else { VIRTIO_NET_ERR } as u8, if ok { VIRTIO_NET_OK } else { VIRTIO_NET_ERR } as u8,
status_desc.addr, status_desc.addr(),
) )
.map_err(Error::GuestMemory)?; .map_err(Error::GuestMemory)?;
used_desc_heads.push((avail_desc.index, avail_desc.len)); let len = ctrl_desc.len() + data_desc.len() + status_desc.len();
used_desc_heads.push((desc_chain.head_index(), len));
} }
for (desc_index, len) in used_desc_heads.iter() { for (desc_index, len) in used_desc_heads.iter() {
queue.add_used(mem, *desc_index, *len); queue
queue.update_avail_event(mem); .add_used(*desc_index, *len)
.map_err(Error::QueueAddUsed)?;
queue
.enable_notification()
.map_err(Error::QueueEnableNotification)?;
} }
Ok(!used_desc_heads.is_empty()) Ok(!used_desc_heads.is_empty())

View File

@ -10,8 +10,8 @@ use std::num::Wrapping;
use std::os::unix::io::{AsRawFd, RawFd}; use std::os::unix::io::{AsRawFd, RawFd};
use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc; use std::sync::Arc;
use vm_memory::{Bytes, GuestAddressSpace, GuestMemory, GuestMemoryAtomic}; use virtio_queue::Queue;
use vm_virtio::Queue; use vm_memory::{Bytes, GuestMemory, GuestMemoryAtomic};
#[derive(Clone)] #[derive(Clone)]
pub struct TxVirtio { pub struct TxVirtio {
@ -35,36 +35,42 @@ impl TxVirtio {
pub fn process_desc_chain( pub fn process_desc_chain(
&mut self, &mut self,
mem: &GuestMemoryMmap,
tap: &mut Tap, tap: &mut Tap,
queue: &mut Queue, queue: &mut Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
rate_limiter: &mut Option<RateLimiter>, rate_limiter: &mut Option<RateLimiter>,
) -> Result<bool, NetQueuePairError> { ) -> Result<bool, NetQueuePairError> {
let mut retry_write = false; let mut retry_write = false;
let mut rate_limit_reached = false; let mut rate_limit_reached = false;
while let Some(avail_desc) = queue.iter(mem).next() {
loop {
let used_desc_head: (u16, u32);
let mut avail_iter = queue
.iter()
.map_err(NetQueuePairError::QueueIteratorFailed)?;
if let Some(mut desc_chain) = avail_iter.next() {
if rate_limit_reached { if rate_limit_reached {
queue.go_to_previous_position(); avail_iter.go_to_previous_position();
break; break;
} }
let head_index = avail_desc.index; let mut next_desc = desc_chain.next();
let mut next_desc = Some(avail_desc);
let mut iovecs = Vec::new(); let mut iovecs = Vec::new();
while let Some(desc) = next_desc { while let Some(desc) = next_desc {
if !desc.is_write_only() && desc.len > 0 { if !desc.is_write_only() && desc.len() > 0 {
let buf = mem let buf = desc_chain
.get_slice(desc.addr, desc.len as usize) .memory()
.get_slice(desc.addr(), desc.len() as usize)
.map_err(NetQueuePairError::GuestMemory)? .map_err(NetQueuePairError::GuestMemory)?
.as_ptr(); .as_ptr();
let iovec = libc::iovec { let iovec = libc::iovec {
iov_base: buf as *mut libc::c_void, iov_base: buf as *mut libc::c_void,
iov_len: desc.len as libc::size_t, iov_len: desc.len() as libc::size_t,
}; };
iovecs.push(iovec); iovecs.push(iovec);
} }
next_desc = desc.next_descriptor(); next_desc = desc_chain.next();
} }
let len = if !iovecs.is_empty() { let len = if !iovecs.is_empty() {
@ -81,7 +87,7 @@ impl TxVirtio {
/* EAGAIN */ /* EAGAIN */
if e.kind() == std::io::ErrorKind::WouldBlock { if e.kind() == std::io::ErrorKind::WouldBlock {
queue.go_to_previous_position(); avail_iter.go_to_previous_position();
retry_write = true; retry_write = true;
break; break;
} }
@ -97,8 +103,7 @@ impl TxVirtio {
0 0
}; };
queue.add_used(mem, head_index, 0); used_desc_head = (desc_chain.head_index(), len);
queue.update_avail_event(mem);
// For the sake of simplicity (similar to the RX rate limiting), we always // For the sake of simplicity (similar to the RX rate limiting), we always
// let the 'last' descriptor chain go-through even if it was over the rate // let the 'last' descriptor chain go-through even if it was over the rate
@ -107,6 +112,16 @@ impl TxVirtio {
rate_limit_reached = !rate_limiter.consume(1, TokenType::Ops) rate_limit_reached = !rate_limiter.consume(1, TokenType::Ops)
|| !rate_limiter.consume(len as u64, TokenType::Bytes); || !rate_limiter.consume(len as u64, TokenType::Bytes);
} }
} else {
break;
}
queue
.add_used(used_desc_head.0, used_desc_head.1)
.map_err(NetQueuePairError::QueueAddUsed)?;
queue
.enable_notification()
.map_err(NetQueuePairError::QueueEnableNotification)?;
} }
Ok(retry_write) Ok(retry_write)
@ -135,39 +150,47 @@ impl RxVirtio {
pub fn process_desc_chain( pub fn process_desc_chain(
&mut self, &mut self,
mem: &GuestMemoryMmap,
tap: &mut Tap, tap: &mut Tap,
queue: &mut Queue, queue: &mut Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
rate_limiter: &mut Option<RateLimiter>, rate_limiter: &mut Option<RateLimiter>,
) -> Result<bool, NetQueuePairError> { ) -> Result<bool, NetQueuePairError> {
let mut exhausted_descs = true; let mut exhausted_descs = true;
let mut rate_limit_reached = false; let mut rate_limit_reached = false;
while let Some(avail_desc) = queue.iter(mem).next() { loop {
let used_desc_head: (u16, u32);
let mut avail_iter = queue
.iter()
.map_err(NetQueuePairError::QueueIteratorFailed)?;
if let Some(mut desc_chain) = avail_iter.next() {
if rate_limit_reached { if rate_limit_reached {
exhausted_descs = false; exhausted_descs = false;
queue.go_to_previous_position(); avail_iter.go_to_previous_position();
break; break;
} }
let head_index = avail_desc.index; let desc = desc_chain
let num_buffers_addr = mem.checked_offset(avail_desc.addr, 10).unwrap(); .next()
let mut next_desc = Some(avail_desc); .ok_or(NetQueuePairError::DescriptorChainTooShort)?;
let num_buffers_addr = desc_chain.memory().checked_offset(desc.addr(), 10).unwrap();
let mut next_desc = Some(desc);
let mut iovecs = Vec::new(); let mut iovecs = Vec::new();
while let Some(desc) = next_desc { while let Some(desc) = next_desc {
if desc.is_write_only() && desc.len > 0 { if desc.is_write_only() && desc.len() > 0 {
let buf = mem let buf = desc_chain
.get_slice(desc.addr, desc.len as usize) .memory()
.get_slice(desc.addr(), desc.len() as usize)
.map_err(NetQueuePairError::GuestMemory)? .map_err(NetQueuePairError::GuestMemory)?
.as_ptr(); .as_ptr();
let iovec = libc::iovec { let iovec = libc::iovec {
iov_base: buf as *mut libc::c_void, iov_base: buf as *mut libc::c_void,
iov_len: desc.len as libc::size_t, iov_len: desc.len() as libc::size_t,
}; };
iovecs.push(iovec); iovecs.push(iovec);
} }
next_desc = desc.next_descriptor(); next_desc = desc_chain.next();
} }
let len = if !iovecs.is_empty() { let len = if !iovecs.is_empty() {
@ -181,7 +204,7 @@ impl RxVirtio {
if result < 0 { if result < 0 {
let e = std::io::Error::last_os_error(); let e = std::io::Error::last_os_error();
exhausted_descs = false; exhausted_descs = false;
queue.go_to_previous_position(); avail_iter.go_to_previous_position();
/* EAGAIN */ /* EAGAIN */
if e.kind() == std::io::ErrorKind::WouldBlock { if e.kind() == std::io::ErrorKind::WouldBlock {
@ -194,7 +217,9 @@ impl RxVirtio {
// Write num_buffers to guest memory. We simply write 1 as we // Write num_buffers to guest memory. We simply write 1 as we
// never spread the frame over more than one descriptor chain. // never spread the frame over more than one descriptor chain.
mem.write_obj(1u16, num_buffers_addr) desc_chain
.memory()
.write_obj(1u16, num_buffers_addr)
.map_err(NetQueuePairError::GuestMemory)?; .map_err(NetQueuePairError::GuestMemory)?;
self.counter_bytes += Wrapping(result as u64 - vnet_hdr_len() as u64); self.counter_bytes += Wrapping(result as u64 - vnet_hdr_len() as u64);
@ -205,8 +230,7 @@ impl RxVirtio {
0 0
}; };
queue.add_used(mem, head_index, len); used_desc_head = (desc_chain.head_index(), len);
queue.update_avail_event(mem);
// For the sake of simplicity (keeping the handling of RX_QUEUE_EVENT and // For the sake of simplicity (keeping the handling of RX_QUEUE_EVENT and
// RX_TAP_EVENT totally asynchronous), we always let the 'last' descriptor // RX_TAP_EVENT totally asynchronous), we always let the 'last' descriptor
@ -216,6 +240,16 @@ impl RxVirtio {
rate_limit_reached = !rate_limiter.consume(1, TokenType::Ops) rate_limit_reached = !rate_limiter.consume(1, TokenType::Ops)
|| !rate_limiter.consume(len as u64, TokenType::Bytes); || !rate_limiter.consume(len as u64, TokenType::Bytes);
} }
} else {
break;
}
queue
.add_used(used_desc_head.0, used_desc_head.1)
.map_err(NetQueuePairError::QueueAddUsed)?;
queue
.enable_notification()
.map_err(NetQueuePairError::QueueEnableNotification)?;
} }
Ok(exhausted_descs) Ok(exhausted_descs)
@ -244,10 +278,19 @@ pub enum NetQueuePairError {
ReadTap(io::Error), ReadTap(io::Error),
/// Error related to guest memory /// Error related to guest memory
GuestMemory(vm_memory::GuestMemoryError), GuestMemory(vm_memory::GuestMemoryError),
/// Returned an error while iterating through the queue
QueueIteratorFailed(virtio_queue::Error),
/// Descriptor chain is too short
DescriptorChainTooShort,
/// Failed to determine if queue needed notification
QueueNeedsNotification(virtio_queue::Error),
/// Failed to enable notification on the queue
QueueEnableNotification(virtio_queue::Error),
/// Failed to add used index to the queue
QueueAddUsed(virtio_queue::Error),
} }
pub struct NetQueuePair { pub struct NetQueuePair {
pub mem: Option<GuestMemoryAtomic<GuestMemoryMmap>>,
pub tap: Tap, pub tap: Tap,
// With epoll each FD must be unique. So in order to filter the // With epoll each FD must be unique. So in order to filter the
// events we need to get a second FD responding to the original // events we need to get a second FD responding to the original
@ -268,16 +311,13 @@ pub struct NetQueuePair {
} }
impl NetQueuePair { impl NetQueuePair {
pub fn process_tx(&mut self, queue: &mut Queue) -> Result<bool, NetQueuePairError> { pub fn process_tx(
let mem = self &mut self,
.mem queue: &mut Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
.as_ref() ) -> Result<bool, NetQueuePairError> {
.ok_or(NetQueuePairError::NoMemoryConfigured)
.map(|m| m.memory())?;
let tx_tap_retry = let tx_tap_retry =
self.tx self.tx
.process_desc_chain(&mem, &mut self.tap, queue, &mut self.tx_rate_limiter)?; .process_desc_chain(&mut self.tap, queue, &mut self.tx_rate_limiter)?;
// We got told to try again when writing to the tap. Wait for the TAP to be writable // We got told to try again when writing to the tap. Wait for the TAP to be writable
if tx_tap_retry && !self.tx_tap_listening { if tx_tap_retry && !self.tx_tap_listening {
@ -311,20 +351,19 @@ impl NetQueuePair {
self.tx.counter_bytes = Wrapping(0); self.tx.counter_bytes = Wrapping(0);
self.tx.counter_frames = Wrapping(0); self.tx.counter_frames = Wrapping(0);
Ok(queue.needs_notification(&mem, queue.next_used)) queue
.needs_notification()
.map_err(NetQueuePairError::QueueNeedsNotification)
} }
pub fn process_rx(&mut self, queue: &mut Queue) -> Result<bool, NetQueuePairError> { pub fn process_rx(
let mem = self &mut self,
.mem queue: &mut Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
.as_ref() ) -> Result<bool, NetQueuePairError> {
.ok_or(NetQueuePairError::NoMemoryConfigured)
.map(|m| m.memory())?;
self.rx_desc_avail = self.rx_desc_avail =
!self !self
.rx .rx
.process_desc_chain(&mem, &mut self.tap, queue, &mut self.rx_rate_limiter)?; .process_desc_chain(&mut self.tap, queue, &mut self.rx_rate_limiter)?;
let rate_limit_reached = self let rate_limit_reached = self
.rx_rate_limiter .rx_rate_limiter
.as_ref() .as_ref()
@ -353,6 +392,8 @@ impl NetQueuePair {
self.rx.counter_bytes = Wrapping(0); self.rx.counter_bytes = Wrapping(0);
self.rx.counter_frames = Wrapping(0); self.rx.counter_frames = Wrapping(0);
Ok(queue.needs_notification(&mem, queue.next_used)) queue
.needs_notification()
.map_err(NetQueuePairError::QueueNeedsNotification)
} }
} }

View File

@ -12,6 +12,7 @@ epoll = "4.3.1"
libc = "0.2.104" libc = "0.2.104"
log = "0.4.14" log = "0.4.14"
virtio-bindings = "0.1.0" virtio-bindings = "0.1.0"
virtio-queue = { path = "../virtio-queue" }
vm-memory = { version = "0.6.0", features = ["backend-bitmap"] } vm-memory = { version = "0.6.0", features = ["backend-bitmap"] }
vm-virtio = { path = "../vm-virtio" } vm-virtio = { path = "../vm-virtio" }
vmm-sys-util = "0.9.0" vmm-sys-util = "0.9.0"

View File

@ -10,7 +10,6 @@ extern crate log;
use std::error; use std::error;
use std::fs::File; use std::fs::File;
use std::io; use std::io;
use std::num::Wrapping;
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::result; use std::result;
use std::sync::{Arc, Mutex, RwLock}; use std::sync::{Arc, Mutex, RwLock};
@ -26,9 +25,10 @@ use vhost::vhost_user::{
VhostUserSlaveReqHandlerMut, VhostUserSlaveReqHandlerMut,
}; };
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use virtio_queue::Queue;
use vm_memory::guest_memory::FileOffset; use vm_memory::guest_memory::FileOffset;
use vm_memory::{bitmap::AtomicBitmap, GuestAddress, MmapRegion}; use vm_memory::GuestAddressSpace;
use vm_virtio::Queue; use vm_memory::{bitmap::AtomicBitmap, GuestAddress, GuestMemoryAtomic, MmapRegion};
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
pub type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; pub type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
@ -83,9 +83,6 @@ pub trait VhostUserBackend: Send + Sync + 'static {
/// Tell the backend if EVENT_IDX has been negotiated. /// Tell the backend if EVENT_IDX has been negotiated.
fn set_event_idx(&mut self, enabled: bool); fn set_event_idx(&mut self, enabled: bool);
/// Update guest memory regions.
fn update_memory(&mut self, mem: GuestMemoryMmap) -> result::Result<(), io::Error>;
/// This function gets called if the backend registered some additional /// This function gets called if the backend registered some additional
/// listeners onto specific file descriptors. The library can handle /// listeners onto specific file descriptors. The library can handle
/// virtqueues on its own, but does not know what to do with events /// virtqueues on its own, but does not know what to do with events
@ -232,7 +229,7 @@ struct AddrMapping {
} }
pub struct Vring { pub struct Vring {
queue: Queue, queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
kick: Option<EventFd>, kick: Option<EventFd>,
call: Option<EventFd>, call: Option<EventFd>,
#[allow(dead_code)] #[allow(dead_code)]
@ -241,9 +238,9 @@ pub struct Vring {
} }
impl Vring { impl Vring {
fn new(max_queue_size: u16) -> Self { fn new(mem: GuestMemoryAtomic<GuestMemoryMmap>, max_queue_size: u16) -> Self {
Vring { Vring {
queue: Queue::new(max_queue_size), queue: Queue::new(mem, max_queue_size),
kick: None, kick: None,
call: None, call: None,
err: None, err: None,
@ -251,7 +248,7 @@ impl Vring {
} }
} }
pub fn mut_queue(&mut self) -> &mut Queue { pub fn mut_queue(&mut self) -> &mut Queue<GuestMemoryAtomic<GuestMemoryMmap>> {
&mut self.queue &mut self.queue
} }
@ -468,7 +465,7 @@ struct VhostUserHandler<S: VhostUserBackend> {
max_queue_size: usize, max_queue_size: usize,
queues_per_thread: Vec<u64>, queues_per_thread: Vec<u64>,
mappings: Vec<AddrMapping>, mappings: Vec<AddrMapping>,
guest_memory: Option<GuestMemoryMmap>, guest_memory: GuestMemoryAtomic<GuestMemoryMmap>,
vrings: Vec<Arc<RwLock<Vring>>>, vrings: Vec<Arc<RwLock<Vring>>>,
worker_threads: Vec<thread::JoinHandle<VringWorkerResult<()>>>, worker_threads: Vec<thread::JoinHandle<VringWorkerResult<()>>>,
} }
@ -478,10 +475,14 @@ impl<S: VhostUserBackend> VhostUserHandler<S> {
let num_queues = backend.read().unwrap().num_queues(); let num_queues = backend.read().unwrap().num_queues();
let max_queue_size = backend.read().unwrap().max_queue_size(); let max_queue_size = backend.read().unwrap().max_queue_size();
let queues_per_thread = backend.read().unwrap().queues_per_thread(); let queues_per_thread = backend.read().unwrap().queues_per_thread();
let guest_memory = GuestMemoryAtomic::new(GuestMemoryMmap::new());
let mut vrings: Vec<Arc<RwLock<Vring>>> = Vec::new(); let mut vrings: Vec<Arc<RwLock<Vring>>> = Vec::new();
for _ in 0..num_queues { for _ in 0..num_queues {
let vring = Arc::new(RwLock::new(Vring::new(max_queue_size as u16))); let vring = Arc::new(RwLock::new(Vring::new(
guest_memory.clone(),
max_queue_size as u16,
)));
vrings.push(vring); vrings.push(vring);
} }
@ -544,7 +545,7 @@ impl<S: VhostUserBackend> VhostUserHandler<S> {
max_queue_size, max_queue_size,
queues_per_thread, queues_per_thread,
mappings: Vec::new(), mappings: Vec::new(),
guest_memory: None, guest_memory,
vrings, vrings,
worker_threads, worker_threads,
}) })
@ -649,15 +650,7 @@ impl<S: VhostUserBackend> VhostUserSlaveReqHandlerMut for VhostUserHandler<S> {
let mem = GuestMemoryMmap::from_ranges_with_files(regions).map_err(|e| { let mem = GuestMemoryMmap::from_ranges_with_files(regions).map_err(|e| {
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
})?; })?;
self.backend self.guest_memory.lock().unwrap().replace(mem);
.write()
.unwrap()
.update_memory(mem.clone())
.map_err(|e| {
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
})?;
self.guest_memory = Some(mem);
self.mappings = mappings; self.mappings = mappings;
Ok(()) Ok(())
@ -671,7 +664,12 @@ impl<S: VhostUserBackend> VhostUserSlaveReqHandlerMut for VhostUserHandler<S> {
if index as usize >= self.num_queues || num == 0 || num as usize > self.max_queue_size { if index as usize >= self.num_queues || num == 0 || num as usize > self.max_queue_size {
return Err(VhostUserError::InvalidParam); return Err(VhostUserError::InvalidParam);
} }
self.vrings[index as usize].write().unwrap().queue.size = num as u16; self.vrings[index as usize]
.write()
.unwrap()
.queue
.state
.size = num as u16;
Ok(()) Ok(())
} }
@ -702,13 +700,20 @@ impl<S: VhostUserBackend> VhostUserSlaveReqHandlerMut for VhostUserHandler<S> {
.write() .write()
.unwrap() .unwrap()
.queue .queue
.state
.desc_table = GuestAddress(desc_table); .desc_table = GuestAddress(desc_table);
self.vrings[index as usize] self.vrings[index as usize]
.write() .write()
.unwrap() .unwrap()
.queue .queue
.state
.avail_ring = GuestAddress(avail_ring); .avail_ring = GuestAddress(avail_ring);
self.vrings[index as usize].write().unwrap().queue.used_ring = GuestAddress(used_ring); self.vrings[index as usize]
.write()
.unwrap()
.queue
.state
.used_ring = GuestAddress(used_ring);
Ok(()) Ok(())
} else { } else {
Err(VhostUserError::InvalidParam) Err(VhostUserError::InvalidParam)
@ -720,8 +725,7 @@ impl<S: VhostUserBackend> VhostUserSlaveReqHandlerMut for VhostUserHandler<S> {
.write() .write()
.unwrap() .unwrap()
.queue .queue
.next_avail = Wrapping(base as u16); .set_next_avail(base as u16);
self.vrings[index as usize].write().unwrap().queue.next_used = Wrapping(base as u16);
let event_idx: bool = (self.acked_features & (1 << VIRTIO_RING_F_EVENT_IDX)) != 0; let event_idx: bool = (self.acked_features & (1 << VIRTIO_RING_F_EVENT_IDX)) != 0;
self.vrings[index as usize] self.vrings[index as usize]
@ -742,7 +746,12 @@ impl<S: VhostUserBackend> VhostUserSlaveReqHandlerMut for VhostUserHandler<S> {
// that file descriptor is readable) on the descriptor specified by // that file descriptor is readable) on the descriptor specified by
// VHOST_USER_SET_VRING_KICK, and stop ring upon receiving // VHOST_USER_SET_VRING_KICK, and stop ring upon receiving
// VHOST_USER_GET_VRING_BASE. // VHOST_USER_GET_VRING_BASE.
self.vrings[index as usize].write().unwrap().queue.ready = false; self.vrings[index as usize]
.write()
.unwrap()
.queue
.state
.ready = false;
if let Some(fd) = self.vrings[index as usize].read().unwrap().kick.as_ref() { if let Some(fd) = self.vrings[index as usize].read().unwrap().kick.as_ref() {
for (thread_index, queues_mask) in self.queues_per_thread.iter().enumerate() { for (thread_index, queues_mask) in self.queues_per_thread.iter().enumerate() {
let shifted_queues_mask = queues_mask >> index; let shifted_queues_mask = queues_mask >> index;
@ -764,8 +773,7 @@ impl<S: VhostUserBackend> VhostUserSlaveReqHandlerMut for VhostUserHandler<S> {
.read() .read()
.unwrap() .unwrap()
.queue .queue
.next_avail .next_avail();
.0 as u16;
Ok(VhostUserVringState::new(index, u32::from(next_avail))) Ok(VhostUserVringState::new(index, u32::from(next_avail)))
} }
@ -783,7 +791,12 @@ impl<S: VhostUserBackend> VhostUserSlaveReqHandlerMut for VhostUserHandler<S> {
// that file descriptor is readable) on the descriptor specified by // that file descriptor is readable) on the descriptor specified by
// VHOST_USER_SET_VRING_KICK, and stop ring upon receiving // VHOST_USER_SET_VRING_KICK, and stop ring upon receiving
// VHOST_USER_GET_VRING_BASE. // VHOST_USER_GET_VRING_BASE.
self.vrings[index as usize].write().unwrap().queue.ready = true; self.vrings[index as usize]
.write()
.unwrap()
.queue
.state
.ready = true;
if let Some(fd) = self.vrings[index as usize].read().unwrap().kick.as_ref() { if let Some(fd) = self.vrings[index as usize].read().unwrap().kick.as_ref() {
for (thread_index, queues_mask) in self.queues_per_thread.iter().enumerate() { for (thread_index, queues_mask) in self.queues_per_thread.iter().enumerate() {
let shifted_queues_mask = queues_mask >> index; let shifted_queues_mask = queues_mask >> index;
@ -890,25 +903,14 @@ impl<S: VhostUserBackend> VhostUserSlaveReqHandlerMut for VhostUserHandler<S> {
)?, )?,
); );
let guest_memory = if let Some(guest_memory) = &self.guest_memory { let guest_memory = self
guest_memory.insert_region(guest_region).map_err(|e| { .guest_memory
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) .memory()
})? .insert_region(guest_region)
} else {
GuestMemoryMmap::from_arc_regions(vec![guest_region]).map_err(|e| {
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
})?
};
self.backend
.write()
.unwrap()
.update_memory(guest_memory.clone())
.map_err(|e| { .map_err(|e| {
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
})?; })?;
self.guest_memory.lock().unwrap().replace(guest_memory);
self.guest_memory = Some(guest_memory);
self.mappings.push(AddrMapping { self.mappings.push(AddrMapping {
vmm_addr: region.user_addr, vmm_addr: region.user_addr,
@ -920,26 +922,14 @@ impl<S: VhostUserBackend> VhostUserSlaveReqHandlerMut for VhostUserHandler<S> {
} }
fn remove_mem_region(&mut self, region: &VhostUserSingleMemoryRegion) -> VhostUserResult<()> { fn remove_mem_region(&mut self, region: &VhostUserSingleMemoryRegion) -> VhostUserResult<()> {
let guest_memory = if let Some(guest_memory) = &self.guest_memory { let (guest_memory, _) = self
let (updated_guest_memory, _) = guest_memory .guest_memory
.memory()
.remove_region(GuestAddress(region.guest_phys_addr), region.memory_size) .remove_region(GuestAddress(region.guest_phys_addr), region.memory_size)
.map_err(|e| { .map_err(|e| {
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
})?; })?;
updated_guest_memory self.guest_memory.lock().unwrap().replace(guest_memory);
} else {
return Err(VhostUserError::InvalidOperation);
};
self.backend
.write()
.unwrap()
.update_memory(guest_memory.clone())
.map_err(|e| {
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
})?;
self.guest_memory = Some(guest_memory);
self.mappings self.mappings
.retain(|mapping| mapping.gpa_base != region.guest_phys_addr); .retain(|mapping| mapping.gpa_base != region.guest_phys_addr);

View File

@ -17,7 +17,6 @@ use std::fs::File;
use std::fs::OpenOptions; use std::fs::OpenOptions;
use std::io::Read; use std::io::Read;
use std::io::{Seek, SeekFrom, Write}; use std::io::{Seek, SeekFrom, Write};
use std::num::Wrapping;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::os::unix::fs::OpenOptionsExt; use std::os::unix::fs::OpenOptionsExt;
use std::path::PathBuf; use std::path::PathBuf;
@ -30,7 +29,7 @@ use std::vec::Vec;
use std::{convert, error, fmt, io}; use std::{convert, error, fmt, io};
use vhost::vhost_user::message::*; use vhost::vhost_user::message::*;
use vhost::vhost_user::Listener; use vhost::vhost_user::Listener;
use vhost_user_backend::{GuestMemoryMmap, VhostUserBackend, VhostUserDaemon, Vring}; use vhost_user_backend::{VhostUserBackend, VhostUserDaemon, Vring};
use virtio_bindings::bindings::virtio_blk::*; use virtio_bindings::bindings::virtio_blk::*;
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use vm_memory::ByteValued; use vm_memory::ByteValued;
@ -87,7 +86,6 @@ impl convert::From<Error> for io::Error {
} }
struct VhostUserBlkThread { struct VhostUserBlkThread {
mem: Option<GuestMemoryMmap>,
disk_image: Arc<Mutex<dyn DiskFile>>, disk_image: Arc<Mutex<dyn DiskFile>>,
disk_image_id: Vec<u8>, disk_image_id: Vec<u8>,
disk_nsectors: u64, disk_nsectors: u64,
@ -104,7 +102,6 @@ impl VhostUserBlkThread {
writeback: Arc<AtomicBool>, writeback: Arc<AtomicBool>,
) -> Result<Self> { ) -> Result<Self> {
Ok(VhostUserBlkThread { Ok(VhostUserBlkThread {
mem: None,
disk_image, disk_image,
disk_image_id, disk_image_id,
disk_nsectors, disk_nsectors,
@ -116,22 +113,18 @@ impl VhostUserBlkThread {
fn process_queue(&mut self, vring: &mut Vring) -> bool { fn process_queue(&mut self, vring: &mut Vring) -> bool {
let mut used_any = false; let mut used_any = false;
let mem = match self.mem.as_ref() {
Some(m) => m,
None => return false,
};
while let Some(head) = vring.mut_queue().iter(mem).next() { while let Some(mut desc_chain) = vring.mut_queue().iter().unwrap().next() {
debug!("got an element in the queue"); debug!("got an element in the queue");
let len; let len;
match Request::parse(&head, mem) { match Request::parse(&mut desc_chain) {
Ok(mut request) => { Ok(mut request) => {
debug!("element is a valid request"); debug!("element is a valid request");
request.set_writeback(self.writeback.load(Ordering::Acquire)); request.set_writeback(self.writeback.load(Ordering::Acquire));
let status = match request.execute( let status = match request.execute(
&mut self.disk_image.lock().unwrap().deref_mut(), &mut self.disk_image.lock().unwrap().deref_mut(),
self.disk_nsectors, self.disk_nsectors,
mem, desc_chain.memory(),
&self.disk_image_id, &self.disk_image_id,
) { ) {
Ok(l) => { Ok(l) => {
@ -143,7 +136,10 @@ impl VhostUserBlkThread {
e.status() e.status()
} }
}; };
mem.write_obj(status, request.status_addr).unwrap(); desc_chain
.memory()
.write_obj(status, request.status_addr)
.unwrap();
} }
Err(err) => { Err(err) => {
error!("failed to parse available descriptor chain: {:?}", err); error!("failed to parse available descriptor chain: {:?}", err);
@ -153,8 +149,8 @@ impl VhostUserBlkThread {
if self.event_idx { if self.event_idx {
let queue = vring.mut_queue(); let queue = vring.mut_queue();
if let Some(used_idx) = queue.add_used(mem, head.index, len) { if queue.add_used(desc_chain.head_index(), len).is_ok() {
if queue.needs_notification(mem, Wrapping(used_idx)) { if queue.needs_notification().unwrap() {
debug!("signalling queue"); debug!("signalling queue");
vring.signal_used_queue().unwrap(); vring.signal_used_queue().unwrap();
} else { } else {
@ -164,7 +160,10 @@ impl VhostUserBlkThread {
} }
} else { } else {
debug!("signalling queue"); debug!("signalling queue");
vring.mut_queue().add_used(mem, head.index, len); vring
.mut_queue()
.add_used(desc_chain.head_index(), len)
.unwrap();
vring.signal_used_queue().unwrap(); vring.signal_used_queue().unwrap();
used_any = true; used_any = true;
} }
@ -316,13 +315,6 @@ impl VhostUserBackend for VhostUserBlkBackend {
} }
} }
fn update_memory(&mut self, mem: GuestMemoryMmap) -> VhostUserBackendResult<()> {
for thread in self.threads.iter() {
thread.lock().unwrap().mem = Some(mem.clone());
}
Ok(())
}
fn handle_event( fn handle_event(
&self, &self,
device_event: u16, device_event: u16,
@ -360,9 +352,7 @@ impl VhostUserBackend for VhostUserBlkBackend {
// calling process_queue() until it stops finding new // calling process_queue() until it stops finding new
// requests on the queue. // requests on the queue.
loop { loop {
vring vring.mut_queue().enable_notification().unwrap();
.mut_queue()
.update_avail_event(thread.mem.as_ref().unwrap());
if !thread.process_queue(&mut vring) { if !thread.process_queue(&mut vring) {
break; break;
} }

View File

@ -22,9 +22,8 @@ use std::sync::{Arc, Mutex, RwLock};
use std::vec::Vec; use std::vec::Vec;
use vhost::vhost_user::message::*; use vhost::vhost_user::message::*;
use vhost::vhost_user::Listener; use vhost::vhost_user::Listener;
use vhost_user_backend::{GuestMemoryMmap, VhostUserBackend, VhostUserDaemon, Vring, VringWorker}; use vhost_user_backend::{VhostUserBackend, VhostUserDaemon, Vring, VringWorker};
use virtio_bindings::bindings::virtio_net::*; use virtio_bindings::bindings::virtio_net::*;
use vm_memory::GuestMemoryAtomic;
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;
@ -81,7 +80,6 @@ impl VhostUserNetThread {
Ok(VhostUserNetThread { Ok(VhostUserNetThread {
kill_evt: EventFd::new(EFD_NONBLOCK).map_err(Error::CreateKillEventFd)?, kill_evt: EventFd::new(EFD_NONBLOCK).map_err(Error::CreateKillEventFd)?,
net: NetQueuePair { net: NetQueuePair {
mem: None,
tap_for_write_epoll: tap.clone(), tap_for_write_epoll: tap.clone(),
tap, tap,
rx: RxVirtio::new(), rx: RxVirtio::new(),
@ -184,13 +182,6 @@ impl VhostUserBackend for VhostUserNetBackend {
fn set_event_idx(&mut self, _enabled: bool) {} fn set_event_idx(&mut self, _enabled: bool) {}
fn update_memory(&mut self, mem: GuestMemoryMmap) -> VhostUserBackendResult<()> {
for thread in self.threads.iter() {
thread.lock().unwrap().net.mem = Some(GuestMemoryAtomic::new(mem.clone()));
}
Ok(())
}
fn handle_event( fn handle_event(
&self, &self,
device_event: u16, device_event: u16,

View File

@ -31,6 +31,7 @@ versionize = "0.1.6"
versionize_derive = "0.1.4" versionize_derive = "0.1.4"
vhost = { version = "0.2.0", features = ["vhost-user-master", "vhost-user-slave", "vhost-kern"] } vhost = { version = "0.2.0", features = ["vhost-user-master", "vhost-user-slave", "vhost-kern"] }
virtio-bindings = { version = "0.1.0", features = ["virtio-v5_0_0"] } virtio-bindings = { version = "0.1.0", features = ["virtio-v5_0_0"] }
virtio-queue = { path = "../virtio-queue" }
vm-allocator = { path = "../vm-allocator" } vm-allocator = { path = "../vm-allocator" }
vm-device = { path = "../vm-device" } vm-device = { path = "../vm-device" }
vm-memory = { version = "0.6.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] } vm-memory = { version = "0.6.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] }

View File

@ -13,8 +13,8 @@
// limitations under the License. // limitations under the License.
use super::{ use super::{
ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, Queue, ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, VirtioCommon,
VirtioCommon, VirtioDevice, VirtioDeviceType, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_VERSION_1, VirtioDevice, VirtioDeviceType, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_VERSION_1,
}; };
use crate::seccomp_filters::Thread; use crate::seccomp_filters::Thread;
use crate::thread_helper::spawn_virtio_thread; use crate::thread_helper::spawn_virtio_thread;
@ -31,11 +31,9 @@ use std::sync::mpsc;
use std::sync::{Arc, Barrier, Mutex}; use std::sync::{Arc, Barrier, Mutex};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::Queue;
use vm_memory::GuestMemory; use vm_memory::GuestMemory;
use vm_memory::{ use vm_memory::{Address, ByteValued, Bytes, GuestAddress, GuestMemoryAtomic, GuestMemoryError};
Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic,
GuestMemoryError,
};
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -81,6 +79,12 @@ pub enum Error {
ProcessQueueWrongEvType(u16), ProcessQueueWrongEvType(u16),
// Fail tp signal // Fail tp signal
FailedSignal(io::Error), FailedSignal(io::Error),
/// Descriptor chain is too short
DescriptorChainTooShort,
/// Failed adding used index
QueueAddUsed(virtio_queue::Error),
/// Failed creating an iterator over the queue
QueueIterator(virtio_queue::Error),
} }
// Got from include/uapi/linux/virtio_balloon.h // Got from include/uapi/linux/virtio_balloon.h
@ -152,8 +156,7 @@ impl VirtioBalloonResize {
struct BalloonEpollHandler { struct BalloonEpollHandler {
config: Arc<Mutex<VirtioBalloonConfig>>, config: Arc<Mutex<VirtioBalloonConfig>>,
resize_receiver: VirtioBalloonResizeReceiver, resize_receiver: VirtioBalloonResizeReceiver,
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
inflate_queue_evt: EventFd, inflate_queue_evt: EventFd,
deflate_queue_evt: EventFd, deflate_queue_evt: EventFd,
@ -165,7 +168,7 @@ impl BalloonEpollHandler {
fn signal( fn signal(
&self, &self,
int_type: &VirtioInterruptType, int_type: &VirtioInterruptType,
queue: Option<&Queue>, queue: Option<&Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
) -> result::Result<(), Error> { ) -> result::Result<(), Error> {
self.interrupt_cb.trigger(int_type, queue).map_err(|e| { self.interrupt_cb.trigger(int_type, queue).map_err(|e| {
error!("Failed to signal used queue: {:?}", e); error!("Failed to signal used queue: {:?}", e);
@ -182,38 +185,45 @@ impl BalloonEpollHandler {
let mut used_desc_heads = [0; QUEUE_SIZE as usize]; let mut used_desc_heads = [0; QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
let mem = self.mem.memory(); for mut desc_chain in self.queues[queue_index]
for avail_desc in self.queues[queue_index].iter(&mem) { .iter()
used_desc_heads[used_count] = avail_desc.index; .map_err(Error::QueueIterator)?
{
let desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
used_desc_heads[used_count] = desc_chain.head_index();
used_count += 1; used_count += 1;
let data_chunk_size = size_of::<u32>(); let data_chunk_size = size_of::<u32>();
// The head contains the request type which MUST be readable. // The head contains the request type which MUST be readable.
if avail_desc.is_write_only() { if desc.is_write_only() {
error!("The head contains the request type is not right"); error!("The head contains the request type is not right");
return Err(Error::UnexpectedWriteOnlyDescriptor); return Err(Error::UnexpectedWriteOnlyDescriptor);
} }
if avail_desc.len as usize % data_chunk_size != 0 { if desc.len() as usize % data_chunk_size != 0 {
error!("the request size {} is not right", avail_desc.len); error!("the request size {} is not right", desc.len());
return Err(Error::InvalidRequest); return Err(Error::InvalidRequest);
} }
let mut offset = 0u64; let mut offset = 0u64;
while offset < avail_desc.len as u64 { while offset < desc.len() as u64 {
let addr = avail_desc.addr.checked_add(offset).unwrap(); let addr = desc.addr().checked_add(offset).unwrap();
let pfn: u32 = mem.read_obj(addr).map_err(Error::GuestMemory)?; let pfn: u32 = desc_chain
.memory()
.read_obj(addr)
.map_err(Error::GuestMemory)?;
offset += data_chunk_size as u64; offset += data_chunk_size as u64;
let gpa = (pfn as u64) << VIRTIO_BALLOON_PFN_SHIFT; let gpa = (pfn as u64) << VIRTIO_BALLOON_PFN_SHIFT;
if let Ok(hva) = mem.get_host_address(GuestAddress(gpa)) { if let Ok(hva) = desc_chain.memory().get_host_address(GuestAddress(gpa)) {
let advice = match ev_type { let advice = match ev_type {
INFLATE_QUEUE_EVENT => { INFLATE_QUEUE_EVENT => {
let region = let region = desc_chain.memory().find_region(GuestAddress(gpa)).ok_or(
mem.find_region(GuestAddress(gpa)) Error::GuestMemory(GuestMemoryError::InvalidGuestAddress(
.ok_or(Error::GuestMemory( GuestAddress(gpa),
GuestMemoryError::InvalidGuestAddress(GuestAddress(gpa)), )),
))?; )?;
if let Some(f_off) = region.file_offset() { if let Some(f_off) = region.file_offset() {
let offset = hva as usize - region.as_ptr() as usize; let offset = hva as usize - region.as_ptr() as usize;
let res = unsafe { let res = unsafe {
@ -253,7 +263,9 @@ impl BalloonEpollHandler {
} }
for &desc_index in &used_desc_heads[..used_count] { for &desc_index in &used_desc_heads[..used_count] {
self.queues[queue_index].add_used(&mem, desc_index, 0); self.queues[queue_index]
.add_used(desc_index, 0)
.map_err(Error::QueueAddUsed)?;
} }
if used_count > 0 { if used_count > 0 {
self.signal(&VirtioInterruptType::Queue, Some(&self.queues[queue_index]))?; self.signal(&VirtioInterruptType::Queue, Some(&self.queues[queue_index]))?;
@ -463,9 +475,9 @@ impl VirtioDevice for Balloon {
fn activate( fn activate(
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, _mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mut queue_evts: Vec<EventFd>, mut queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?; self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
@ -478,7 +490,6 @@ impl VirtioDevice for Balloon {
ActivateError::BadActivate ActivateError::BadActivate
})?, })?,
queues, queues,
mem,
interrupt_cb, interrupt_cb,
inflate_queue_evt: queue_evts.remove(0), inflate_queue_evt: queue_evts.remove(0),
deflate_queue_evt: queue_evts.remove(0), deflate_queue_evt: queue_evts.remove(0),

View File

@ -10,7 +10,7 @@
use super::Error as DeviceError; use super::Error as DeviceError;
use super::{ use super::{
ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, Queue, ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler,
RateLimiterConfig, VirtioCommon, VirtioDevice, VirtioDeviceType, VirtioInterruptType, RateLimiterConfig, VirtioCommon, VirtioDevice, VirtioDeviceType, VirtioInterruptType,
EPOLL_HELPER_EVENT_LAST, EPOLL_HELPER_EVENT_LAST,
}; };
@ -35,6 +35,7 @@ use std::{collections::HashMap, convert::TryInto};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_bindings::bindings::virtio_blk::*; use virtio_bindings::bindings::virtio_blk::*;
use virtio_queue::Queue;
use vm_memory::{ByteValued, Bytes, GuestAddressSpace, GuestMemoryAtomic}; use vm_memory::{ByteValued, Bytes, GuestAddressSpace, GuestMemoryAtomic};
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
@ -62,6 +63,10 @@ pub enum Error {
AsyncRequestFailure, AsyncRequestFailure,
/// Failed synchronizing the file /// Failed synchronizing the file
Fsync(AsyncIoError), Fsync(AsyncIoError),
/// Failed adding used index
QueueAddUsed(virtio_queue::Error),
/// Failed creating an iterator over the queue
QueueIterator(virtio_queue::Error),
} }
pub type Result<T> = result::Result<T, Error>; pub type Result<T> = result::Result<T, Error>;
@ -75,7 +80,7 @@ pub struct BlockCounters {
} }
struct BlockEpollHandler { struct BlockEpollHandler {
queue: Queue, queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
disk_image: Box<dyn AsyncIo>, disk_image: Box<dyn AsyncIo>,
disk_nsectors: u64, disk_nsectors: u64,
@ -93,13 +98,13 @@ struct BlockEpollHandler {
impl BlockEpollHandler { impl BlockEpollHandler {
fn process_queue_submit(&mut self) -> Result<bool> { fn process_queue_submit(&mut self) -> Result<bool> {
let queue = &mut self.queue; let queue = &mut self.queue;
let mem = self.mem.memory();
let mut used_desc_heads = Vec::new(); let mut used_desc_heads = Vec::new();
let mut used_count = 0; let mut used_count = 0;
for avail_desc in queue.iter(&mem) { let mut avail_iter = queue.iter().map_err(Error::QueueIterator)?;
let mut request = Request::parse(&avail_desc, &mem).map_err(Error::RequestParsing)?; for mut desc_chain in &mut avail_iter {
let mut request = Request::parse(&mut desc_chain).map_err(Error::RequestParsing)?;
if let Some(rate_limiter) = &mut self.rate_limiter { if let Some(rate_limiter) = &mut self.rate_limiter {
// If limiter.consume() fails it means there is no more TokenType::Ops // If limiter.consume() fails it means there is no more TokenType::Ops
@ -107,7 +112,7 @@ impl BlockEpollHandler {
if !rate_limiter.consume(1, TokenType::Ops) { if !rate_limiter.consume(1, TokenType::Ops) {
// Stop processing the queue and return this descriptor chain to the // Stop processing the queue and return this descriptor chain to the
// avail ring, for later processing. // avail ring, for later processing.
queue.go_to_previous_position(); avail_iter.go_to_previous_position();
break; break;
} }
// Exercise the rate limiter only if this request is of data transfer type. // Exercise the rate limiter only if this request is of data transfer type.
@ -126,7 +131,7 @@ impl BlockEpollHandler {
rate_limiter.manual_replenish(1, TokenType::Ops); rate_limiter.manual_replenish(1, TokenType::Ops);
// Stop processing the queue and return this descriptor chain to the // Stop processing the queue and return this descriptor chain to the
// avail ring, for later processing. // avail ring, for later processing.
queue.go_to_previous_position(); avail_iter.go_to_previous_position();
break; break;
} }
}; };
@ -136,29 +141,34 @@ impl BlockEpollHandler {
if request if request
.execute_async( .execute_async(
&mem, desc_chain.memory(),
self.disk_nsectors, self.disk_nsectors,
self.disk_image.as_mut(), self.disk_image.as_mut(),
&self.disk_image_id, &self.disk_image_id,
avail_desc.index as u64, desc_chain.head_index() as u64,
) )
.map_err(Error::RequestExecuting)? .map_err(Error::RequestExecuting)?
{ {
self.request_list.insert(avail_desc.index, request); self.request_list.insert(desc_chain.head_index(), request);
} else { } else {
// We use unwrap because the request parsing process already // We use unwrap because the request parsing process already
// checked that the status_addr was valid. // checked that the status_addr was valid.
mem.write_obj(VIRTIO_BLK_S_OK, request.status_addr).unwrap(); desc_chain
.memory()
.write_obj(VIRTIO_BLK_S_OK, request.status_addr)
.unwrap();
// If no asynchronous operation has been submitted, we can // If no asynchronous operation has been submitted, we can
// simply return the used descriptor. // simply return the used descriptor.
used_desc_heads.push((avail_desc.index, 0)); used_desc_heads.push((desc_chain.head_index(), 0));
used_count += 1; used_count += 1;
} }
} }
for &(desc_index, len) in used_desc_heads.iter() { for &(desc_index, len) in used_desc_heads.iter() {
queue.add_used(&mem, desc_index, len); queue
.add_used(desc_index, len)
.map_err(Error::QueueAddUsed)?;
} }
Ok(used_count > 0) Ok(used_count > 0)
@ -221,7 +231,9 @@ impl BlockEpollHandler {
} }
for &(desc_index, len) in used_desc_heads.iter() { for &(desc_index, len) in used_desc_heads.iter() {
queue.add_used(&mem, desc_index, len); queue
.add_used(desc_index, len)
.map_err(Error::QueueAddUsed)?;
} }
self.counters self.counters
@ -545,7 +557,7 @@ impl VirtioDevice for Block {
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<Queue>, mut queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mut queue_evts: Vec<EventFd>, mut queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?; self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
@ -557,7 +569,7 @@ impl VirtioDevice for Block {
for i in 0..queues.len() { for i in 0..queues.len() {
let queue_evt = queue_evts.remove(0); let queue_evt = queue_evts.remove(0);
let queue = queues.remove(0); let queue = queues.remove(0);
let queue_size = queue.size; let queue_size = queue.state.size;
let (kill_evt, pause_evt) = self.common.dup_eventfds(); let (kill_evt, pause_evt) = self.common.dup_eventfds();
let rate_limiter: Option<RateLimiter> = self let rate_limiter: Option<RateLimiter> = self

View File

@ -3,9 +3,9 @@
use super::Error as DeviceError; use super::Error as DeviceError;
use super::{ use super::{
ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, Queue, VirtioCommon, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, VirtioCommon, VirtioDevice,
VirtioDevice, VirtioDeviceType, VirtioInterruptType, EPOLL_HELPER_EVENT_LAST, VirtioDeviceType, VirtioInterruptType, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_IOMMU_PLATFORM,
VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_VERSION_1, VIRTIO_F_VERSION_1,
}; };
use crate::seccomp_filters::Thread; use crate::seccomp_filters::Thread;
use crate::thread_helper::spawn_virtio_thread; use crate::thread_helper::spawn_virtio_thread;
@ -24,7 +24,8 @@ use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::{Arc, Barrier, Mutex}; use std::sync::{Arc, Barrier, Mutex};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use vm_memory::{ByteValued, Bytes, GuestAddressSpace, GuestMemoryAtomic}; use virtio_queue::Queue;
use vm_memory::{ByteValued, Bytes, GuestMemoryAtomic};
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -72,8 +73,7 @@ impl Default for VirtioConsoleConfig {
unsafe impl ByteValued for VirtioConsoleConfig {} unsafe impl ByteValued for VirtioConsoleConfig {}
struct ConsoleEpollHandler { struct ConsoleEpollHandler {
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
in_buffer: Arc<Mutex<VecDeque<u8>>>, in_buffer: Arc<Mutex<VecDeque<u8>>>,
resizer: Arc<ConsoleResizer>, resizer: Arc<ConsoleResizer>,
@ -140,17 +140,21 @@ impl ConsoleEpollHandler {
return false; return false;
} }
let mem = self.mem.memory(); let mut avail_iter = recv_queue.iter().unwrap();
for avail_desc in recv_queue.iter(&mem) { for mut desc_chain in &mut avail_iter {
let len = cmp::min(avail_desc.len as u32, in_buffer.len() as u32); let desc = desc_chain.next().unwrap();
let len = cmp::min(desc.len() as u32, in_buffer.len() as u32);
let source_slice = in_buffer.drain(..len as usize).collect::<Vec<u8>>(); let source_slice = in_buffer.drain(..len as usize).collect::<Vec<u8>>();
if let Err(e) = mem.write_slice(&source_slice[..], avail_desc.addr) { if let Err(e) = desc_chain
.memory()
.write_slice(&source_slice[..], desc.addr())
{
error!("Failed to write slice: {:?}", e); error!("Failed to write slice: {:?}", e);
recv_queue.go_to_previous_position(); avail_iter.go_to_previous_position();
break; break;
} }
used_desc_heads[used_count] = (avail_desc.index, len); used_desc_heads[used_count] = (desc_chain.head_index(), len);
used_count += 1; used_count += 1;
if in_buffer.is_empty() { if in_buffer.is_empty() {
@ -159,7 +163,7 @@ impl ConsoleEpollHandler {
} }
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
recv_queue.add_used(&mem, desc_index, len); recv_queue.add_used(desc_index, len).unwrap();
} }
used_count > 0 used_count > 0
@ -177,20 +181,20 @@ impl ConsoleEpollHandler {
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
let mem = self.mem.memory(); for mut desc_chain in trans_queue.iter().unwrap() {
for avail_desc in trans_queue.iter(&mem) { let desc = desc_chain.next().unwrap();
let len;
if let Some(ref mut out) = self.endpoint.out_file() { if let Some(ref mut out) = self.endpoint.out_file() {
let _ = mem.write_to(avail_desc.addr, out, avail_desc.len as usize); let _ = desc_chain
.memory()
.write_to(desc.addr(), out, desc.len() as usize);
let _ = out.flush(); let _ = out.flush();
} }
len = avail_desc.len; used_desc_heads[used_count] = (desc_chain.head_index(), desc.len());
used_desc_heads[used_count] = (avail_desc.index, len);
used_count += 1; used_count += 1;
} }
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
trans_queue.add_used(&mem, desc_index, len); trans_queue.add_used(desc_index, len).unwrap();
} }
used_count > 0 used_count > 0
} }
@ -477,9 +481,9 @@ impl VirtioDevice for Console {
fn activate( fn activate(
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, _mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mut queue_evts: Vec<EventFd>, mut queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?; self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
@ -498,7 +502,6 @@ impl VirtioDevice for Console {
let mut handler = ConsoleEpollHandler { let mut handler = ConsoleEpollHandler {
queues, queues,
mem,
interrupt_cb, interrupt_cb,
in_buffer: self.in_buffer.clone(), in_buffer: self.in_buffer.clone(),
endpoint: self.endpoint.clone(), endpoint: self.endpoint.clone(),

View File

@ -6,7 +6,7 @@
// //
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use crate::{ActivateError, ActivateResult, Error, Queue}; use crate::{ActivateError, ActivateResult, Error};
use crate::{GuestMemoryMmap, GuestRegionMmap}; use crate::{GuestMemoryMmap, GuestRegionMmap};
use libc::EFD_NONBLOCK; use libc::EFD_NONBLOCK;
use std::collections::HashMap; use std::collections::HashMap;
@ -17,6 +17,7 @@ use std::sync::{
Arc, Barrier, Arc, Barrier,
}; };
use std::thread; use std::thread;
use virtio_queue::Queue;
use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestUsize}; use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestUsize};
use vm_migration::{MigratableError, Pausable}; use vm_migration::{MigratableError, Pausable};
use vm_virtio::VirtioDeviceType; use vm_virtio::VirtioDeviceType;
@ -31,9 +32,13 @@ pub trait VirtioInterrupt: Send + Sync {
fn trigger( fn trigger(
&self, &self,
int_type: &VirtioInterruptType, int_type: &VirtioInterruptType,
queue: Option<&Queue>, queue: Option<&Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
) -> std::result::Result<(), std::io::Error>; ) -> std::result::Result<(), std::io::Error>;
fn notifier(&self, _int_type: &VirtioInterruptType, _queue: Option<&Queue>) -> Option<EventFd> { fn notifier(
&self,
_int_type: &VirtioInterruptType,
_queue: Option<&Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
) -> Option<EventFd> {
None None
} }
} }
@ -107,7 +112,7 @@ pub trait VirtioDevice: Send {
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_evt: Arc<dyn VirtioInterrupt>, interrupt_evt: Arc<dyn VirtioInterrupt>,
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
) -> ActivateResult; ) -> ActivateResult;
@ -247,7 +252,7 @@ impl VirtioCommon {
pub fn activate( pub fn activate(
&mut self, &mut self,
queues: &[Queue], queues: &[Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
queue_evts: &[EventFd], queue_evts: &[EventFd],
interrupt_cb: &Arc<dyn VirtioInterrupt>, interrupt_cb: &Arc<dyn VirtioInterrupt>,
) -> ActivateResult { ) -> ActivateResult {

View File

@ -26,6 +26,7 @@ pub enum EpollHelperError {
Ctl(std::io::Error), Ctl(std::io::Error),
IoError(std::io::Error), IoError(std::io::Error),
Wait(std::io::Error), Wait(std::io::Error),
QueueRingIndex(virtio_queue::Error),
} }
pub const EPOLL_HELPER_EVENT_PAUSE: u16 = 0; pub const EPOLL_HELPER_EVENT_PAUSE: u16 = 0;

View File

@ -4,8 +4,8 @@
use super::Error as DeviceError; use super::Error as DeviceError;
use super::{ use super::{
ActivateResult, DescriptorChain, EpollHelper, EpollHelperError, EpollHelperHandler, Queue, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, VirtioCommon, VirtioDevice,
VirtioCommon, VirtioDevice, VirtioDeviceType, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_VERSION_1, VirtioDeviceType, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_VERSION_1,
}; };
use crate::seccomp_filters::Thread; use crate::seccomp_filters::Thread;
use crate::thread_helper::spawn_virtio_thread; use crate::thread_helper::spawn_virtio_thread;
@ -23,11 +23,9 @@ use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Barrier, RwLock}; use std::sync::{Arc, Barrier, RwLock};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::{AccessPlatform, DescriptorChain, Queue};
use vm_device::dma_mapping::ExternalDmaMapping; use vm_device::dma_mapping::ExternalDmaMapping;
use vm_memory::{ use vm_memory::{Address, ByteValued, Bytes, GuestAddress, GuestMemoryAtomic, GuestMemoryError};
Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic,
GuestMemoryError,
};
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -352,27 +350,36 @@ impl Request {
// is created based on the information provided from the guest driver for // is created based on the information provided from the guest driver for
// virtio-iommu (giving the link device_id <=> domain). // virtio-iommu (giving the link device_id <=> domain).
fn parse( fn parse(
avail_desc: &DescriptorChain, desc_chain: &mut DescriptorChain<GuestMemoryAtomic<GuestMemoryMmap>>,
mem: &GuestMemoryMmap,
mapping: &Arc<IommuMapping>, mapping: &Arc<IommuMapping>,
ext_mapping: &BTreeMap<u32, Arc<dyn ExternalDmaMapping>>, ext_mapping: &BTreeMap<u32, Arc<dyn ExternalDmaMapping>>,
ext_domain_mapping: &mut BTreeMap<u32, Arc<dyn ExternalDmaMapping>>, ext_domain_mapping: &mut BTreeMap<u32, Arc<dyn ExternalDmaMapping>>,
msi_iova_space: (u64, u64), msi_iova_space: (u64, u64),
) -> result::Result<usize, Error> { ) -> result::Result<usize, Error> {
// The head contains the request type which MUST be readable. let desc = desc_chain
if avail_desc.is_write_only() { .next()
.ok_or(Error::DescriptorChainTooShort)
.map_err(|e| {
error!("Missing head descriptor");
e
})?;
// The descriptor contains the request type which MUST be readable.
if desc.is_write_only() {
return Err(Error::UnexpectedWriteOnlyDescriptor); return Err(Error::UnexpectedWriteOnlyDescriptor);
} }
if (avail_desc.len as usize) < size_of::<VirtioIommuReqHead>() { if (desc.len() as usize) < size_of::<VirtioIommuReqHead>() {
return Err(Error::InvalidRequest); return Err(Error::InvalidRequest);
} }
let req_head: VirtioIommuReqHead = let req_head: VirtioIommuReqHead = desc_chain
mem.read_obj(avail_desc.addr).map_err(Error::GuestMemory)?; .memory()
.read_obj(desc.addr())
.map_err(Error::GuestMemory)?;
let req_offset = size_of::<VirtioIommuReqHead>(); let req_offset = size_of::<VirtioIommuReqHead>();
let desc_size_left = (avail_desc.len as usize) - req_offset; let desc_size_left = (desc.len() as usize) - req_offset;
let req_addr = if let Some(addr) = avail_desc.addr.checked_add(req_offset as u64) { let req_addr = if let Some(addr) = desc.addr().checked_add(req_offset as u64) {
addr addr
} else { } else {
return Err(Error::InvalidRequest); return Err(Error::InvalidRequest);
@ -389,7 +396,8 @@ impl Request {
return Err(Error::InvalidAttachRequest); return Err(Error::InvalidAttachRequest);
} }
let req: VirtioIommuReqAttach = mem let req: VirtioIommuReqAttach = desc_chain
.memory()
.read_obj(req_addr as GuestAddress) .read_obj(req_addr as GuestAddress)
.map_err(Error::GuestMemory)?; .map_err(Error::GuestMemory)?;
debug!("Attach request {:?}", req); debug!("Attach request {:?}", req);
@ -419,7 +427,8 @@ impl Request {
return Err(Error::InvalidDetachRequest); return Err(Error::InvalidDetachRequest);
} }
let req: VirtioIommuReqDetach = mem let req: VirtioIommuReqDetach = desc_chain
.memory()
.read_obj(req_addr as GuestAddress) .read_obj(req_addr as GuestAddress)
.map_err(Error::GuestMemory)?; .map_err(Error::GuestMemory)?;
debug!("Detach request {:?}", req); debug!("Detach request {:?}", req);
@ -445,7 +454,8 @@ impl Request {
return Err(Error::InvalidMapRequest); return Err(Error::InvalidMapRequest);
} }
let req: VirtioIommuReqMap = mem let req: VirtioIommuReqMap = desc_chain
.memory()
.read_obj(req_addr as GuestAddress) .read_obj(req_addr as GuestAddress)
.map_err(Error::GuestMemory)?; .map_err(Error::GuestMemory)?;
debug!("Map request {:?}", req); debug!("Map request {:?}", req);
@ -481,7 +491,8 @@ impl Request {
return Err(Error::InvalidUnmapRequest); return Err(Error::InvalidUnmapRequest);
} }
let req: VirtioIommuReqUnmap = mem let req: VirtioIommuReqUnmap = desc_chain
.memory()
.read_obj(req_addr as GuestAddress) .read_obj(req_addr as GuestAddress)
.map_err(Error::GuestMemory)?; .map_err(Error::GuestMemory)?;
debug!("Unmap request {:?}", req); debug!("Unmap request {:?}", req);
@ -510,7 +521,8 @@ impl Request {
return Err(Error::InvalidProbeRequest); return Err(Error::InvalidProbeRequest);
} }
let req: VirtioIommuReqProbe = mem let req: VirtioIommuReqProbe = desc_chain
.memory()
.read_obj(req_addr as GuestAddress) .read_obj(req_addr as GuestAddress)
.map_err(Error::GuestMemory)?; .map_err(Error::GuestMemory)?;
debug!("Probe request {:?}", req); debug!("Probe request {:?}", req);
@ -534,16 +546,14 @@ impl Request {
_ => return Err(Error::InvalidRequest), _ => return Err(Error::InvalidRequest),
}; };
let status_desc = avail_desc let status_desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
.next_descriptor()
.ok_or(Error::DescriptorChainTooShort)?;
// The status MUST always be writable // The status MUST always be writable
if !status_desc.is_write_only() { if !status_desc.is_write_only() {
return Err(Error::UnexpectedReadOnlyDescriptor); return Err(Error::UnexpectedReadOnlyDescriptor);
} }
if status_desc.len < hdr_len + size_of::<VirtioIommuReqTail>() as u32 { if status_desc.len() < hdr_len + size_of::<VirtioIommuReqTail>() as u32 {
return Err(Error::BufferLengthTooSmall); return Err(Error::BufferLengthTooSmall);
} }
@ -553,7 +563,9 @@ impl Request {
}; };
reply.extend_from_slice(tail.as_slice()); reply.extend_from_slice(tail.as_slice());
mem.write_slice(reply.as_slice(), status_desc.addr) desc_chain
.memory()
.write_slice(reply.as_slice(), status_desc.addr())
.map_err(Error::GuestMemory)?; .map_err(Error::GuestMemory)?;
Ok((hdr_len as usize) + size_of::<VirtioIommuReqTail>()) Ok((hdr_len as usize) + size_of::<VirtioIommuReqTail>())
@ -561,8 +573,7 @@ impl Request {
} }
struct IommuEpollHandler { struct IommuEpollHandler {
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
kill_evt: EventFd, kill_evt: EventFd,
@ -577,11 +588,9 @@ impl IommuEpollHandler {
fn request_queue(&mut self) -> bool { fn request_queue(&mut self) -> bool {
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
let mem = self.mem.memory(); for mut desc_chain in self.queues[0].iter().unwrap() {
for avail_desc in self.queues[0].iter(&mem) {
let len = match Request::parse( let len = match Request::parse(
&avail_desc, &mut desc_chain,
&mem,
&self.mapping, &self.mapping,
&self.ext_mapping, &self.ext_mapping,
&mut self.ext_domain_mapping, &mut self.ext_domain_mapping,
@ -594,12 +603,12 @@ impl IommuEpollHandler {
} }
}; };
used_desc_heads[used_count] = (avail_desc.index, len); used_desc_heads[used_count] = (desc_chain.head_index(), len);
used_count += 1; used_count += 1;
} }
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
self.queues[0].add_used(&mem, desc_index, len); self.queues[0].add_used(desc_index, len).unwrap();
} }
used_count > 0 used_count > 0
} }
@ -608,7 +617,10 @@ impl IommuEpollHandler {
false false
} }
fn signal_used_queue(&self, queue: &Queue) -> result::Result<(), DeviceError> { fn signal_used_queue(
&self,
queue: &Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
) -> result::Result<(), DeviceError> {
self.interrupt_cb self.interrupt_cb
.trigger(&VirtioInterruptType::Queue, Some(queue)) .trigger(&VirtioInterruptType::Queue, Some(queue))
.map_err(|e| { .map_err(|e| {
@ -666,12 +678,13 @@ impl EpollHelperHandler for IommuEpollHandler {
} }
} }
#[derive(Clone, Copy, Versionize)] #[derive(Clone, Copy, Debug, Versionize)]
struct Mapping { struct Mapping {
gpa: u64, gpa: u64,
size: u64, size: u64,
} }
#[derive(Debug)]
pub struct IommuMapping { pub struct IommuMapping {
// Domain related to an endpoint. // Domain related to an endpoint.
endpoints: Arc<RwLock<BTreeMap<u32, u32>>>, endpoints: Arc<RwLock<BTreeMap<u32, u32>>>,
@ -704,6 +717,24 @@ impl DmaRemapping for IommuMapping {
} }
} }
#[derive(Debug)]
pub struct AccessPlatformMapping {
id: u32,
mapping: Arc<IommuMapping>,
}
impl AccessPlatformMapping {
pub fn new(id: u32, mapping: Arc<IommuMapping>) -> Self {
AccessPlatformMapping { id, mapping }
}
}
impl AccessPlatform for AccessPlatformMapping {
fn translate(&self, base: u64, _size: u64) -> std::result::Result<u64, std::io::Error> {
self.mapping.translate(self.id, base)
}
}
pub struct Iommu { pub struct Iommu {
common: VirtioCommon, common: VirtioCommon,
id: String, id: String,
@ -839,16 +870,15 @@ impl VirtioDevice for Iommu {
fn activate( fn activate(
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, _mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?; self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
let (kill_evt, pause_evt) = self.common.dup_eventfds(); let (kill_evt, pause_evt) = self.common.dup_eventfds();
let mut handler = IommuEpollHandler { let mut handler = IommuEpollHandler {
queues, queues,
mem,
interrupt_cb, interrupt_cb,
queue_evts, queue_evts,
kill_evt, kill_evt,

View File

@ -51,7 +51,7 @@ pub use self::rng::*;
pub use self::vsock::*; pub use self::vsock::*;
pub use self::watchdog::*; pub use self::watchdog::*;
use vm_memory::{bitmap::AtomicBitmap, GuestAddress, GuestMemory}; use vm_memory::{bitmap::AtomicBitmap, GuestAddress, GuestMemory};
use vm_virtio::{queue::*, VirtioDeviceType}; use vm_virtio::VirtioDeviceType;
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>; type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>;
@ -115,6 +115,8 @@ pub enum Error {
SetShmRegionsNotSupported, SetShmRegionsNotSupported,
NetQueuePair(::net_util::NetQueuePairError), NetQueuePair(::net_util::NetQueuePairError),
ApplySeccompFilter(seccompiler::Error), ApplySeccompFilter(seccompiler::Error),
QueueAddUsed(virtio_queue::Error),
QueueIterator(virtio_queue::Error),
} }
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, PartialEq)] #[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, PartialEq)]

View File

@ -14,9 +14,8 @@
use super::Error as DeviceError; use super::Error as DeviceError;
use super::{ use super::{
ActivateError, ActivateResult, DescriptorChain, EpollHelper, EpollHelperError, ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, VirtioCommon,
EpollHelperHandler, Queue, VirtioCommon, VirtioDevice, VirtioDeviceType, VirtioDevice, VirtioDeviceType, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_VERSION_1,
EPOLL_HELPER_EVENT_LAST, VIRTIO_F_VERSION_1,
}; };
use crate::seccomp_filters::Thread; use crate::seccomp_filters::Thread;
use crate::thread_helper::spawn_virtio_thread; use crate::thread_helper::spawn_virtio_thread;
@ -35,10 +34,11 @@ use std::sync::mpsc;
use std::sync::{Arc, Barrier, Mutex}; use std::sync::{Arc, Barrier, Mutex};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::{DescriptorChain, Queue};
use vm_device::dma_mapping::ExternalDmaMapping; use vm_device::dma_mapping::ExternalDmaMapping;
use vm_memory::{ use vm_memory::{
Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic, Address, ByteValued, Bytes, GuestAddress, GuestMemoryAtomic, GuestMemoryError,
GuestMemoryError, GuestMemoryRegion, GuestMemoryRegion,
}; };
use vm_migration::protocol::MemoryRangeTable; use vm_migration::protocol::MemoryRangeTable;
use vm_migration::{ use vm_migration::{
@ -277,34 +277,35 @@ struct Request {
impl Request { impl Request {
fn parse( fn parse(
avail_desc: &DescriptorChain, desc_chain: &mut DescriptorChain<GuestMemoryAtomic<GuestMemoryMmap>>,
mem: &GuestMemoryMmap,
) -> result::Result<Request, Error> { ) -> result::Result<Request, Error> {
// The head contains the request type which MUST be readable. let desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
if avail_desc.is_write_only() { // The descriptor contains the request type which MUST be readable.
if desc.is_write_only() {
return Err(Error::UnexpectedWriteOnlyDescriptor); return Err(Error::UnexpectedWriteOnlyDescriptor);
} }
if avail_desc.len as usize != size_of::<VirtioMemReq>() { if desc.len() as usize != size_of::<VirtioMemReq>() {
return Err(Error::InvalidRequest); return Err(Error::InvalidRequest);
} }
let req: VirtioMemReq = mem.read_obj(avail_desc.addr).map_err(Error::GuestMemory)?; let req: VirtioMemReq = desc_chain
.memory()
.read_obj(desc.addr())
.map_err(Error::GuestMemory)?;
let status_desc = avail_desc let status_desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
.next_descriptor()
.ok_or(Error::DescriptorChainTooShort)?;
// The status MUST always be writable // The status MUST always be writable
if !status_desc.is_write_only() { if !status_desc.is_write_only() {
return Err(Error::UnexpectedReadOnlyDescriptor); return Err(Error::UnexpectedReadOnlyDescriptor);
} }
if (status_desc.len as usize) < size_of::<VirtioMemResp>() { if (status_desc.len() as usize) < size_of::<VirtioMemResp>() {
return Err(Error::BufferLengthTooSmall); return Err(Error::BufferLengthTooSmall);
} }
Ok(Request { Ok(Request {
req, req,
status_addr: status_desc.addr, status_addr: status_desc.addr(),
}) })
} }
@ -455,8 +456,7 @@ struct MemEpollHandler {
blocks_state: Arc<Mutex<BlocksState>>, blocks_state: Arc<Mutex<BlocksState>>,
config: Arc<Mutex<VirtioMemConfig>>, config: Arc<Mutex<VirtioMemConfig>>,
resize: ResizeSender, resize: ResizeSender,
queue: Queue, queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queue_evt: EventFd, queue_evt: EventFd,
kill_evt: EventFd, kill_evt: EventFd,
@ -656,12 +656,16 @@ impl MemEpollHandler {
fn process_queue(&mut self) -> bool { fn process_queue(&mut self) -> bool {
let mut request_list = Vec::new(); let mut request_list = Vec::new();
let mut used_count = 0; let mut used_count = 0;
let mem = self.mem.memory();
for avail_desc in self.queue.iter(&mem) { for mut desc_chain in self.queue.iter().unwrap() {
request_list.push((avail_desc.index, Request::parse(&avail_desc, &mem))); request_list.push((
desc_chain.head_index(),
Request::parse(&mut desc_chain),
desc_chain.memory().clone(),
));
} }
for (desc_index, request) in request_list.iter() { for (head_index, request, memory) in request_list {
let len = match request { let len = match request {
Err(e) => { Err(e) => {
error!("failed parse VirtioMemReq: {:?}", e); error!("failed parse VirtioMemReq: {:?}", e);
@ -671,21 +675,21 @@ impl MemEpollHandler {
VIRTIO_MEM_REQ_PLUG => { VIRTIO_MEM_REQ_PLUG => {
let resp_type = let resp_type =
self.state_change_request(r.req.addr, r.req.nb_blocks, true); self.state_change_request(r.req.addr, r.req.nb_blocks, true);
r.send_response(&mem, resp_type, 0u16) r.send_response(&memory, resp_type, 0u16)
} }
VIRTIO_MEM_REQ_UNPLUG => { VIRTIO_MEM_REQ_UNPLUG => {
let resp_type = let resp_type =
self.state_change_request(r.req.addr, r.req.nb_blocks, false); self.state_change_request(r.req.addr, r.req.nb_blocks, false);
r.send_response(&mem, resp_type, 0u16) r.send_response(&memory, resp_type, 0u16)
} }
VIRTIO_MEM_REQ_UNPLUG_ALL => { VIRTIO_MEM_REQ_UNPLUG_ALL => {
let resp_type = self.unplug_all(); let resp_type = self.unplug_all();
r.send_response(&mem, resp_type, 0u16) r.send_response(&memory, resp_type, 0u16)
} }
VIRTIO_MEM_REQ_STATE => { VIRTIO_MEM_REQ_STATE => {
let (resp_type, resp_state) = let (resp_type, resp_state) =
self.state_request(r.req.addr, r.req.nb_blocks); self.state_request(r.req.addr, r.req.nb_blocks);
r.send_response(&mem, resp_type, resp_state) r.send_response(&memory, resp_type, resp_state)
} }
_ => { _ => {
error!("VirtioMemReq unknown request type {:?}", r.req.req_type); error!("VirtioMemReq unknown request type {:?}", r.req.req_type);
@ -694,8 +698,7 @@ impl MemEpollHandler {
}, },
}; };
self.queue.add_used(&mem, *desc_index, len); self.queue.add_used(head_index, len).unwrap();
used_count += 1; used_count += 1;
} }
@ -990,9 +993,9 @@ impl VirtioDevice for Mem {
fn activate( fn activate(
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, _mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<Queue>, mut queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mut queue_evts: Vec<EventFd>, mut queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?; self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
@ -1004,7 +1007,6 @@ impl VirtioDevice for Mem {
config: self.config.clone(), config: self.config.clone(),
resize: self.resize.clone(), resize: self.resize.clone(),
queue: queues.remove(0), queue: queues.remove(0),
mem,
interrupt_cb, interrupt_cb,
queue_evt: queue_evts.remove(0), queue_evt: queue_evts.remove(0),
kill_evt, kill_evt,

View File

@ -7,7 +7,7 @@
use super::Error as DeviceError; use super::Error as DeviceError;
use super::{ use super::{
ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, Queue, ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler,
RateLimiterConfig, VirtioCommon, VirtioDevice, VirtioDeviceType, VirtioInterruptType, RateLimiterConfig, VirtioCommon, VirtioDevice, VirtioDeviceType, VirtioInterruptType,
EPOLL_HELPER_EVENT_LAST, EPOLL_HELPER_EVENT_LAST,
}; };
@ -35,7 +35,8 @@ use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_bindings::bindings::virtio_net::*; use virtio_bindings::bindings::virtio_net::*;
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use vm_memory::{ByteValued, GuestAddressSpace, GuestMemoryAtomic}; use virtio_queue::Queue;
use vm_memory::{ByteValued, GuestMemoryAtomic};
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -45,12 +46,11 @@ use vmm_sys_util::eventfd::EventFd;
const CTRL_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1; const CTRL_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
pub struct NetCtrlEpollHandler { pub struct NetCtrlEpollHandler {
pub mem: GuestMemoryAtomic<GuestMemoryMmap>,
pub kill_evt: EventFd, pub kill_evt: EventFd,
pub pause_evt: EventFd, pub pause_evt: EventFd,
pub ctrl_q: CtrlQueue, pub ctrl_q: CtrlQueue,
pub queue_evt: EventFd, pub queue_evt: EventFd,
pub queue: Queue, pub queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
} }
impl NetCtrlEpollHandler { impl NetCtrlEpollHandler {
@ -72,12 +72,11 @@ impl EpollHelperHandler for NetCtrlEpollHandler {
let ev_type = event.data as u16; let ev_type = event.data as u16;
match ev_type { match ev_type {
CTRL_QUEUE_EVENT => { CTRL_QUEUE_EVENT => {
let mem = self.mem.memory();
if let Err(e) = self.queue_evt.read() { if let Err(e) = self.queue_evt.read() {
error!("failed to get ctl queue event: {:?}", e); error!("failed to get ctl queue event: {:?}", e);
return true; return true;
} }
if let Err(e) = self.ctrl_q.process(&mem, &mut self.queue) { if let Err(e) = self.ctrl_q.process(&mut self.queue) {
error!("failed to process ctrl queue: {:?}", e); error!("failed to process ctrl queue: {:?}", e);
return true; return true;
} }
@ -125,7 +124,7 @@ struct NetEpollHandler {
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
kill_evt: EventFd, kill_evt: EventFd,
pause_evt: EventFd, pause_evt: EventFd,
queue_pair: Vec<Queue>, queue_pair: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
queue_evt_pair: Vec<EventFd>, queue_evt_pair: Vec<EventFd>,
// Always generate interrupts until the driver has signalled to the device. // Always generate interrupts until the driver has signalled to the device.
// This mitigates a problem with interrupts from tap events being "lost" upon // This mitigates a problem with interrupts from tap events being "lost" upon
@ -135,7 +134,10 @@ struct NetEpollHandler {
} }
impl NetEpollHandler { impl NetEpollHandler {
fn signal_used_queue(&self, queue: &Queue) -> result::Result<(), DeviceError> { fn signal_used_queue(
&self,
queue: &Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
) -> result::Result<(), DeviceError> {
self.interrupt_cb self.interrupt_cb
.trigger(&VirtioInterruptType::Queue, Some(queue)) .trigger(&VirtioInterruptType::Queue, Some(queue))
.map_err(|e| { .map_err(|e| {
@ -235,8 +237,11 @@ impl NetEpollHandler {
// If there are some already available descriptors on the RX queue, // If there are some already available descriptors on the RX queue,
// then we can start the thread while listening onto the TAP. // then we can start the thread while listening onto the TAP.
if self.queue_pair[0] if self.queue_pair[0]
.available_descriptors(&self.net.mem.as_ref().unwrap().memory()) .used_idx(Ordering::Acquire)
.unwrap() .map_err(EpollHelperError::QueueRingIndex)?
< self.queue_pair[0]
.avail_idx(Ordering::Acquire)
.map_err(EpollHelperError::QueueRingIndex)?
{ {
helper.add_event(self.net.tap.as_raw_fd(), RX_TAP_EVENT)?; helper.add_event(self.net.tap.as_raw_fd(), RX_TAP_EVENT)?;
self.net.rx_tap_listening = true; self.net.rx_tap_listening = true;
@ -549,9 +554,9 @@ impl VirtioDevice for Net {
fn activate( fn activate(
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, _mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<Queue>, mut queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mut queue_evts: Vec<EventFd>, mut queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?; self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
@ -563,7 +568,6 @@ impl VirtioDevice for Net {
let (kill_evt, pause_evt) = self.common.dup_eventfds(); let (kill_evt, pause_evt) = self.common.dup_eventfds();
let mut ctrl_handler = NetCtrlEpollHandler { let mut ctrl_handler = NetCtrlEpollHandler {
mem: mem.clone(),
kill_evt, kill_evt,
pause_evt, pause_evt,
ctrl_q: CtrlQueue::new(self.taps.clone()), ctrl_q: CtrlQueue::new(self.taps.clone()),
@ -632,7 +636,6 @@ impl VirtioDevice for Net {
let mut handler = NetEpollHandler { let mut handler = NetEpollHandler {
net: NetQueuePair { net: NetQueuePair {
mem: Some(mem.clone()),
tap_for_write_epoll: tap.clone(), tap_for_write_epoll: tap.clone(),
tap, tap,
rx, rx,

View File

@ -8,9 +8,9 @@
use super::Error as DeviceError; use super::Error as DeviceError;
use super::{ use super::{
ActivateError, ActivateResult, DescriptorChain, EpollHelper, EpollHelperError, ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler,
EpollHelperHandler, Queue, UserspaceMapping, VirtioCommon, VirtioDevice, VirtioDeviceType, UserspaceMapping, VirtioCommon, VirtioDevice, VirtioDeviceType, EPOLL_HELPER_EVENT_LAST,
EPOLL_HELPER_EVENT_LAST, VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_VERSION_1, VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_VERSION_1,
}; };
use crate::seccomp_filters::Thread; use crate::seccomp_filters::Thread;
use crate::thread_helper::spawn_virtio_thread; use crate::thread_helper::spawn_virtio_thread;
@ -27,10 +27,8 @@ use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Barrier}; use std::sync::{Arc, Barrier};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use vm_memory::{ use virtio_queue::{DescriptorChain, Queue};
Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic, use vm_memory::{Address, ByteValued, Bytes, GuestAddress, GuestMemoryAtomic, GuestMemoryError};
GuestMemoryError,
};
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -116,48 +114,48 @@ struct Request {
impl Request { impl Request {
fn parse( fn parse(
avail_desc: &DescriptorChain, desc_chain: &mut DescriptorChain<GuestMemoryAtomic<GuestMemoryMmap>>,
mem: &GuestMemoryMmap,
) -> result::Result<Request, Error> { ) -> result::Result<Request, Error> {
// The head contains the request type which MUST be readable. let desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
if avail_desc.is_write_only() { // The descriptor contains the request type which MUST be readable.
if desc.is_write_only() {
return Err(Error::UnexpectedWriteOnlyDescriptor); return Err(Error::UnexpectedWriteOnlyDescriptor);
} }
if avail_desc.len as usize != size_of::<VirtioPmemReq>() { if desc.len() as usize != size_of::<VirtioPmemReq>() {
return Err(Error::InvalidRequest); return Err(Error::InvalidRequest);
} }
let request: VirtioPmemReq = mem.read_obj(avail_desc.addr).map_err(Error::GuestMemory)?; let request: VirtioPmemReq = desc_chain
.memory()
.read_obj(desc.addr())
.map_err(Error::GuestMemory)?;
let request_type = match request.type_ { let request_type = match request.type_ {
VIRTIO_PMEM_REQ_TYPE_FLUSH => RequestType::Flush, VIRTIO_PMEM_REQ_TYPE_FLUSH => RequestType::Flush,
_ => return Err(Error::InvalidRequest), _ => return Err(Error::InvalidRequest),
}; };
let status_desc = avail_desc let status_desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
.next_descriptor()
.ok_or(Error::DescriptorChainTooShort)?;
// The status MUST always be writable // The status MUST always be writable
if !status_desc.is_write_only() { if !status_desc.is_write_only() {
return Err(Error::UnexpectedReadOnlyDescriptor); return Err(Error::UnexpectedReadOnlyDescriptor);
} }
if (status_desc.len as usize) < size_of::<VirtioPmemResp>() { if (status_desc.len() as usize) < size_of::<VirtioPmemResp>() {
return Err(Error::BufferLengthTooSmall); return Err(Error::BufferLengthTooSmall);
} }
Ok(Request { Ok(Request {
type_: request_type, type_: request_type,
status_addr: status_desc.addr, status_addr: status_desc.addr(),
}) })
} }
} }
struct PmemEpollHandler { struct PmemEpollHandler {
queue: Queue, queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
disk: File, disk: File,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queue_evt: EventFd, queue_evt: EventFd,
@ -169,9 +167,8 @@ impl PmemEpollHandler {
fn process_queue(&mut self) -> bool { fn process_queue(&mut self) -> bool {
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
let mem = self.mem.memory(); for mut desc_chain in self.queue.iter().unwrap() {
for avail_desc in self.queue.iter(&mem) { let len = match Request::parse(&mut desc_chain) {
let len = match Request::parse(&avail_desc, &mem) {
Ok(ref req) if (req.type_ == RequestType::Flush) => { Ok(ref req) if (req.type_ == RequestType::Flush) => {
let status_code = match self.disk.sync_all() { let status_code = match self.disk.sync_all() {
Ok(()) => VIRTIO_PMEM_RESP_TYPE_OK, Ok(()) => VIRTIO_PMEM_RESP_TYPE_OK,
@ -182,7 +179,7 @@ impl PmemEpollHandler {
}; };
let resp = VirtioPmemResp { ret: status_code }; let resp = VirtioPmemResp { ret: status_code };
match mem.write_obj(resp, req.status_addr) { match desc_chain.memory().write_obj(resp, req.status_addr) {
Ok(_) => size_of::<VirtioPmemResp>() as u32, Ok(_) => size_of::<VirtioPmemResp>() as u32,
Err(e) => { Err(e) => {
error!("bad guest memory address: {}", e); error!("bad guest memory address: {}", e);
@ -201,12 +198,12 @@ impl PmemEpollHandler {
} }
}; };
used_desc_heads[used_count] = (avail_desc.index, len); used_desc_heads[used_count] = (desc_chain.head_index(), len);
used_count += 1; used_count += 1;
} }
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
self.queue.add_used(&mem, desc_index, len); self.queue.add_used(desc_index, len).unwrap();
} }
used_count > 0 used_count > 0
} }
@ -369,9 +366,9 @@ impl VirtioDevice for Pmem {
fn activate( fn activate(
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, _mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<Queue>, mut queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mut queue_evts: Vec<EventFd>, mut queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?; self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
@ -383,7 +380,6 @@ impl VirtioDevice for Pmem {
})?; })?;
let mut handler = PmemEpollHandler { let mut handler = PmemEpollHandler {
queue: queues.remove(0), queue: queues.remove(0),
mem,
disk, disk,
interrupt_cb, interrupt_cb,
queue_evt: queue_evts.remove(0), queue_evt: queue_evts.remove(0),

View File

@ -4,8 +4,8 @@
use super::Error as DeviceError; use super::Error as DeviceError;
use super::{ use super::{
ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, Queue, ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, VirtioCommon,
VirtioCommon, VirtioDevice, VirtioDeviceType, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_IOMMU_PLATFORM, VirtioDevice, VirtioDeviceType, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_IOMMU_PLATFORM,
VIRTIO_F_VERSION_1, VIRTIO_F_VERSION_1,
}; };
use crate::seccomp_filters::Thread; use crate::seccomp_filters::Thread;
@ -21,7 +21,8 @@ use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Barrier}; use std::sync::{Arc, Barrier};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use vm_memory::{Bytes, GuestAddressSpace, GuestMemoryAtomic}; use virtio_queue::Queue;
use vm_memory::{Bytes, GuestMemoryAtomic};
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -33,8 +34,7 @@ const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE];
const QUEUE_AVAIL_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1; const QUEUE_AVAIL_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
struct RngEpollHandler { struct RngEpollHandler {
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
random_file: File, random_file: File,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queue_evt: EventFd, queue_evt: EventFd,
@ -48,31 +48,28 @@ impl RngEpollHandler {
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
let mem = self.mem.memory(); for mut desc_chain in queue.iter().unwrap() {
for avail_desc in queue.iter(&mem) { let desc = desc_chain.next().unwrap();
let mut len = 0; let mut len = 0;
// Drivers can only read from the random device. // Drivers can only read from the random device.
if avail_desc.is_write_only() { if desc.is_write_only() {
// Fill the read with data from the random device on the host. // Fill the read with data from the random device on the host.
if mem if desc_chain
.read_from( .memory()
avail_desc.addr, .read_from(desc.addr(), &mut self.random_file, desc.len() as usize)
&mut self.random_file,
avail_desc.len as usize,
)
.is_ok() .is_ok()
{ {
len = avail_desc.len; len = desc.len();
} }
} }
used_desc_heads[used_count] = (avail_desc.index, len); used_desc_heads[used_count] = (desc_chain.head_index(), len);
used_count += 1; used_count += 1;
} }
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
queue.add_used(&mem, desc_index, len); queue.add_used(desc_index, len).unwrap();
} }
used_count > 0 used_count > 0
} }
@ -213,9 +210,9 @@ impl VirtioDevice for Rng {
fn activate( fn activate(
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, _mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mut queue_evts: Vec<EventFd>, mut queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?; self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
@ -228,7 +225,6 @@ impl VirtioDevice for Rng {
})?; })?;
let mut handler = RngEpollHandler { let mut handler = RngEpollHandler {
queues, queues,
mem,
random_file, random_file,
interrupt_cb, interrupt_cb,
queue_evt: queue_evts.remove(0), queue_evt: queue_evts.remove(0),

View File

@ -6,13 +6,14 @@
// //
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use crate::{Queue, VirtioDevice}; use crate::{GuestMemoryMmap, VirtioDevice};
use byteorder::{ByteOrder, LittleEndian}; use byteorder::{ByteOrder, LittleEndian};
use std::sync::atomic::{AtomicU16, Ordering}; use std::sync::atomic::{AtomicU16, Ordering};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use vm_memory::GuestAddress; use virtio_queue::Queue;
use vm_memory::{GuestAddress, GuestMemoryAtomic};
use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, VersionMapped}; use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, VersionMapped};
#[derive(Clone, Versionize)] #[derive(Clone, Versionize)]
@ -83,7 +84,7 @@ impl VirtioPciCommonConfig {
&mut self, &mut self,
offset: u64, offset: u64,
data: &mut [u8], data: &mut [u8],
queues: &mut Vec<Queue>, queues: &mut Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
device: Arc<Mutex<dyn VirtioDevice>>, device: Arc<Mutex<dyn VirtioDevice>>,
) { ) {
assert!(data.len() <= 8); assert!(data.len() <= 8);
@ -113,7 +114,7 @@ impl VirtioPciCommonConfig {
&mut self, &mut self,
offset: u64, offset: u64,
data: &[u8], data: &[u8],
queues: &mut Vec<Queue>, queues: &mut Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
device: Arc<Mutex<dyn VirtioDevice>>, device: Arc<Mutex<dyn VirtioDevice>>,
) { ) {
assert!(data.len() <= 8); assert!(data.len() <= 8);
@ -152,16 +153,20 @@ impl VirtioPciCommonConfig {
} }
} }
fn read_common_config_word(&self, offset: u64, queues: &[Queue]) -> u16 { fn read_common_config_word(
&self,
offset: u64,
queues: &[Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
) -> u16 {
debug!("read_common_config_word: offset 0x{:x}", offset); debug!("read_common_config_word: offset 0x{:x}", offset);
match offset { match offset {
0x10 => self.msix_config.load(Ordering::Acquire), 0x10 => self.msix_config.load(Ordering::Acquire),
0x12 => queues.len() as u16, // num_queues 0x12 => queues.len() as u16, // num_queues
0x16 => self.queue_select, 0x16 => self.queue_select,
0x18 => self.with_queue(queues, |q| q.size).unwrap_or(0), 0x18 => self.with_queue(queues, |q| q.state.size).unwrap_or(0),
0x1a => self.with_queue(queues, |q| q.vector).unwrap_or(0), 0x1a => self.with_queue(queues, |q| q.state.vector).unwrap_or(0),
0x1c => { 0x1c => {
if self.with_queue(queues, |q| q.ready).unwrap_or(false) { if self.with_queue(queues, |q| q.state.ready).unwrap_or(false) {
1 1
} else { } else {
0 0
@ -175,13 +180,18 @@ impl VirtioPciCommonConfig {
} }
} }
fn write_common_config_word(&mut self, offset: u64, value: u16, queues: &mut Vec<Queue>) { fn write_common_config_word(
&mut self,
offset: u64,
value: u16,
queues: &mut Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
) {
debug!("write_common_config_word: offset 0x{:x}", offset); debug!("write_common_config_word: offset 0x{:x}", offset);
match offset { match offset {
0x10 => self.msix_config.store(value, Ordering::Release), 0x10 => self.msix_config.store(value, Ordering::Release),
0x16 => self.queue_select = value, 0x16 => self.queue_select = value,
0x18 => self.with_queue_mut(queues, |q| q.size = value), 0x18 => self.with_queue_mut(queues, |q| q.state.size = value),
0x1a => self.with_queue_mut(queues, |q| q.vector = value), 0x1a => self.with_queue_mut(queues, |q| q.state.vector = value),
0x1c => self.with_queue_mut(queues, |q| q.enable(value == 1)), 0x1c => self.with_queue_mut(queues, |q| q.enable(value == 1)),
_ => { _ => {
warn!("invalid virtio register word write: 0x{:x}", offset); warn!("invalid virtio register word write: 0x{:x}", offset);
@ -215,7 +225,7 @@ impl VirtioPciCommonConfig {
&mut self, &mut self,
offset: u64, offset: u64,
value: u32, value: u32,
queues: &mut Vec<Queue>, queues: &mut Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
device: Arc<Mutex<dyn VirtioDevice>>, device: Arc<Mutex<dyn VirtioDevice>>,
) { ) {
debug!("write_common_config_dword: offset 0x{:x}", offset); debug!("write_common_config_dword: offset 0x{:x}", offset);
@ -242,12 +252,12 @@ impl VirtioPciCommonConfig {
); );
} }
} }
0x20 => self.with_queue_mut(queues, |q| lo(&mut q.desc_table, value)), 0x20 => self.with_queue_mut(queues, |q| lo(&mut q.state.desc_table, value)),
0x24 => self.with_queue_mut(queues, |q| hi(&mut q.desc_table, value)), 0x24 => self.with_queue_mut(queues, |q| hi(&mut q.state.desc_table, value)),
0x28 => self.with_queue_mut(queues, |q| lo(&mut q.avail_ring, value)), 0x28 => self.with_queue_mut(queues, |q| lo(&mut q.state.avail_ring, value)),
0x2c => self.with_queue_mut(queues, |q| hi(&mut q.avail_ring, value)), 0x2c => self.with_queue_mut(queues, |q| hi(&mut q.state.avail_ring, value)),
0x30 => self.with_queue_mut(queues, |q| lo(&mut q.used_ring, value)), 0x30 => self.with_queue_mut(queues, |q| lo(&mut q.state.used_ring, value)),
0x34 => self.with_queue_mut(queues, |q| hi(&mut q.used_ring, value)), 0x34 => self.with_queue_mut(queues, |q| hi(&mut q.state.used_ring, value)),
_ => { _ => {
warn!("invalid virtio register dword write: 0x{:x}", offset); warn!("invalid virtio register dword write: 0x{:x}", offset);
} }
@ -259,26 +269,39 @@ impl VirtioPciCommonConfig {
0 // Assume the guest has no reason to read write-only registers. 0 // Assume the guest has no reason to read write-only registers.
} }
fn write_common_config_qword(&mut self, offset: u64, value: u64, queues: &mut Vec<Queue>) { fn write_common_config_qword(
&mut self,
offset: u64,
value: u64,
queues: &mut Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
) {
debug!("write_common_config_qword: offset 0x{:x}", offset); debug!("write_common_config_qword: offset 0x{:x}", offset);
match offset { match offset {
0x20 => self.with_queue_mut(queues, |q| q.desc_table = GuestAddress(value)), 0x20 => self.with_queue_mut(queues, |q| q.state.desc_table = GuestAddress(value)),
0x28 => self.with_queue_mut(queues, |q| q.avail_ring = GuestAddress(value)), 0x28 => self.with_queue_mut(queues, |q| q.state.avail_ring = GuestAddress(value)),
0x30 => self.with_queue_mut(queues, |q| q.used_ring = GuestAddress(value)), 0x30 => self.with_queue_mut(queues, |q| q.state.used_ring = GuestAddress(value)),
_ => { _ => {
warn!("invalid virtio register qword write: 0x{:x}", offset); warn!("invalid virtio register qword write: 0x{:x}", offset);
} }
} }
} }
fn with_queue<U, F>(&self, queues: &[Queue], f: F) -> Option<U> fn with_queue<U, F>(
&self,
queues: &[Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
f: F,
) -> Option<U>
where where
F: FnOnce(&Queue) -> U, F: FnOnce(&Queue<GuestMemoryAtomic<GuestMemoryMmap>>) -> U,
{ {
queues.get(self.queue_select as usize).map(f) queues.get(self.queue_select as usize).map(f)
} }
fn with_queue_mut<F: FnOnce(&mut Queue)>(&self, queues: &mut Vec<Queue>, f: F) { fn with_queue_mut<F: FnOnce(&mut Queue<GuestMemoryAtomic<GuestMemoryMmap>>)>(
&self,
queues: &mut Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
f: F,
) {
if let Some(queue) = queues.get_mut(self.queue_select as usize) { if let Some(queue) = queues.get_mut(self.queue_select as usize) {
f(queue); f(queue);
} }
@ -308,6 +331,7 @@ mod tests {
use crate::GuestMemoryMmap; use crate::GuestMemoryMmap;
use crate::{ActivateResult, VirtioInterrupt}; use crate::{ActivateResult, VirtioInterrupt};
use std::sync::Arc; use std::sync::Arc;
use virtio_queue::Queue;
use vm_memory::GuestMemoryAtomic; use vm_memory::GuestMemoryAtomic;
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -326,7 +350,7 @@ mod tests {
&mut self, &mut self,
_mem: GuestMemoryAtomic<GuestMemoryMmap>, _mem: GuestMemoryAtomic<GuestMemoryMmap>,
_interrupt_evt: Arc<dyn VirtioInterrupt>, _interrupt_evt: Arc<dyn VirtioInterrupt>,
_queues: Vec<Queue>, _queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
_queue_evts: Vec<EventFd>, _queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
Ok(()) Ok(())

View File

@ -10,7 +10,7 @@ use super::VirtioPciCommonConfig;
use crate::transport::VirtioTransport; use crate::transport::VirtioTransport;
use crate::GuestMemoryMmap; use crate::GuestMemoryMmap;
use crate::{ use crate::{
ActivateResult, Queue, VirtioDevice, VirtioDeviceType, VirtioInterrupt, VirtioInterruptType, ActivateResult, VirtioDevice, VirtioDeviceType, VirtioInterrupt, VirtioInterruptType,
DEVICE_ACKNOWLEDGE, DEVICE_DRIVER, DEVICE_DRIVER_OK, DEVICE_FAILED, DEVICE_FEATURES_OK, DEVICE_ACKNOWLEDGE, DEVICE_DRIVER, DEVICE_DRIVER_OK, DEVICE_FAILED, DEVICE_FEATURES_OK,
DEVICE_INIT, DEVICE_INIT,
}; };
@ -24,30 +24,28 @@ use pci::{
use std::any::Any; use std::any::Any;
use std::cmp; use std::cmp;
use std::io::Write; use std::io::Write;
use std::num::Wrapping;
use std::result; use std::result;
use std::sync::atomic::{AtomicBool, AtomicU16, AtomicUsize, Ordering}; use std::sync::atomic::{AtomicBool, AtomicU16, AtomicUsize, Ordering};
use std::sync::{Arc, Barrier, Mutex}; use std::sync::{Arc, Barrier, Mutex};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::AccessPlatform;
use virtio_queue::{defs::VIRTQ_MSI_NO_VECTOR, Error as QueueError, Queue};
use vm_allocator::SystemAllocator; use vm_allocator::SystemAllocator;
use vm_device::interrupt::{ use vm_device::interrupt::{
InterruptIndex, InterruptManager, InterruptSourceGroup, MsiIrqGroupConfig, InterruptIndex, InterruptManager, InterruptSourceGroup, MsiIrqGroupConfig,
}; };
use vm_device::BusDevice; use vm_device::BusDevice;
use vm_memory::{ use vm_memory::{Address, ByteValued, GuestAddress, GuestMemoryAtomic, GuestUsize, Le32};
Address, ByteValued, GuestAddress, GuestAddressSpace, GuestMemoryAtomic, GuestUsize, Le32,
};
use vm_migration::{ use vm_migration::{
Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped, Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped,
}; };
use vm_virtio::{queue, VirtioIommuRemapping, VIRTIO_MSI_NO_VECTOR};
use vmm_sys_util::{errno::Result, eventfd::EventFd}; use vmm_sys_util::{errno::Result, eventfd::EventFd};
#[derive(Debug)] #[derive(Debug)]
enum Error { enum Error {
/// Failed to retrieve queue ring's index. /// Failed to retrieve queue ring's index.
QueueRingIndex(queue::Error), QueueRingIndex(QueueError),
} }
#[allow(clippy::enum_variant_names)] #[allow(clippy::enum_variant_names)]
@ -309,7 +307,7 @@ pub struct VirtioPciDevice {
interrupt_source_group: Arc<dyn InterruptSourceGroup>, interrupt_source_group: Arc<dyn InterruptSourceGroup>,
// virtio queues // virtio queues
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
// Guest memory // Guest memory
@ -348,7 +346,7 @@ impl VirtioPciDevice {
memory: GuestMemoryAtomic<GuestMemoryMmap>, memory: GuestMemoryAtomic<GuestMemoryMmap>,
device: Arc<Mutex<dyn VirtioDevice>>, device: Arc<Mutex<dyn VirtioDevice>>,
msix_num: u16, msix_num: u16,
iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>, access_platform: Option<Arc<dyn AccessPlatform>>,
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>, interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>,
pci_device_bdf: u32, pci_device_bdf: u32,
activate_evt: EventFd, activate_evt: EventFd,
@ -363,8 +361,11 @@ impl VirtioPciDevice {
.queue_max_sizes() .queue_max_sizes()
.iter() .iter()
.map(|&s| { .map(|&s| {
let mut queue = Queue::new(s); let mut queue = Queue::<
queue.iommu_mapping_cb = iommu_mapping_cb.clone(); GuestMemoryAtomic<GuestMemoryMmap>,
virtio_queue::QueueState<GuestMemoryAtomic<GuestMemoryMmap>>,
>::new(memory.clone(), s);
queue.state.access_platform = access_platform.clone();
queue queue
}) })
.collect(); .collect();
@ -432,7 +433,7 @@ impl VirtioPciDevice {
device_feature_select: 0, device_feature_select: 0,
driver_feature_select: 0, driver_feature_select: 0,
queue_select: 0, queue_select: 0,
msix_config: Arc::new(AtomicU16::new(VIRTIO_MSI_NO_VECTOR)), msix_config: Arc::new(AtomicU16::new(VIRTQ_MSI_NO_VECTOR)),
}, },
msix_config, msix_config,
msix_num, msix_num,
@ -472,13 +473,13 @@ impl VirtioPciDevice {
.queues .queues
.iter() .iter()
.map(|q| QueueState { .map(|q| QueueState {
max_size: q.max_size, max_size: q.max_size(),
size: q.size, size: q.state.size,
ready: q.ready, ready: q.state.ready,
vector: q.vector, vector: q.state.vector,
desc_table: q.desc_table.0, desc_table: q.state.desc_table.0,
avail_ring: q.avail_ring.0, avail_ring: q.state.avail_ring.0,
used_ring: q.used_ring.0, used_ring: q.state.used_ring.0,
}) })
.collect(), .collect(),
} }
@ -491,28 +492,26 @@ impl VirtioPciDevice {
.store(state.interrupt_status, Ordering::Release); .store(state.interrupt_status, Ordering::Release);
// Update virtqueues indexes for both available and used rings. // Update virtqueues indexes for both available and used rings.
if let Some(mem) = self.memory.as_ref() {
let mem = mem.memory();
for (i, queue) in self.queues.iter_mut().enumerate() { for (i, queue) in self.queues.iter_mut().enumerate() {
queue.max_size = state.queues[i].max_size; queue.state.size = state.queues[i].size;
queue.size = state.queues[i].size; queue.state.ready = state.queues[i].ready;
queue.ready = state.queues[i].ready; queue.state.vector = state.queues[i].vector;
queue.vector = state.queues[i].vector; queue.state.desc_table = GuestAddress(state.queues[i].desc_table);
queue.desc_table = GuestAddress(state.queues[i].desc_table); queue.state.avail_ring = GuestAddress(state.queues[i].avail_ring);
queue.avail_ring = GuestAddress(state.queues[i].avail_ring); queue.state.used_ring = GuestAddress(state.queues[i].used_ring);
queue.used_ring = GuestAddress(state.queues[i].used_ring); queue.set_next_avail(
queue.next_avail = Wrapping(
queue queue
.used_index_from_memory(&mem) .used_idx(Ordering::Acquire)
.map_err(Error::QueueRingIndex)?, .map_err(Error::QueueRingIndex)?
.0,
); );
queue.next_used = Wrapping( queue.set_next_used(
queue queue
.used_index_from_memory(&mem) .used_idx(Ordering::Acquire)
.map_err(Error::QueueRingIndex)?, .map_err(Error::QueueRingIndex)?
.0,
); );
} }
}
Ok(()) Ok(())
} }
@ -673,10 +672,10 @@ impl VirtioPciDevice {
let mut device = self.device.lock().unwrap(); let mut device = self.device.lock().unwrap();
let mut queue_evts = Vec::new(); let mut queue_evts = Vec::new();
let mut queues = self.queues.clone(); let mut queues = self.queues.clone();
queues.retain(|q| q.ready); queues.retain(|q| q.state.ready);
for (i, queue) in queues.iter().enumerate() { for (i, queue) in queues.iter().enumerate() {
queue_evts.push(self.queue_evts[i].try_clone().unwrap()); queue_evts.push(self.queue_evts[i].try_clone().unwrap());
if !queue.is_valid(&mem.memory()) { if !queue.is_valid() {
error!("Queue {} is not valid", i); error!("Queue {} is not valid", i);
} }
} }
@ -743,20 +742,20 @@ impl VirtioInterrupt for VirtioInterruptMsix {
fn trigger( fn trigger(
&self, &self,
int_type: &VirtioInterruptType, int_type: &VirtioInterruptType,
queue: Option<&Queue>, queue: Option<&Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
) -> std::result::Result<(), std::io::Error> { ) -> std::result::Result<(), std::io::Error> {
let vector = match int_type { let vector = match int_type {
VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire), VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire),
VirtioInterruptType::Queue => { VirtioInterruptType::Queue => {
if let Some(q) = queue { if let Some(q) = queue {
q.vector q.state.vector
} else { } else {
0 0
} }
} }
}; };
if vector == VIRTIO_MSI_NO_VECTOR { if vector == VIRTQ_MSI_NO_VECTOR {
return Ok(()); return Ok(());
} }
@ -776,12 +775,16 @@ impl VirtioInterrupt for VirtioInterruptMsix {
.trigger(vector as InterruptIndex) .trigger(vector as InterruptIndex)
} }
fn notifier(&self, int_type: &VirtioInterruptType, queue: Option<&Queue>) -> Option<EventFd> { fn notifier(
&self,
int_type: &VirtioInterruptType,
queue: Option<&Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
) -> Option<EventFd> {
let vector = match int_type { let vector = match int_type {
VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire), VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire),
VirtioInterruptType::Queue => { VirtioInterruptType::Queue => {
if let Some(q) = queue { if let Some(q) = queue {
q.vector q.state.vector
} else { } else {
0 0
} }

View File

@ -1,7 +1,7 @@
// Copyright 2019 Intel Corporation. All Rights Reserved. // Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
use super::super::{ActivateResult, Queue, VirtioCommon, VirtioDevice, VirtioDeviceType}; use super::super::{ActivateResult, VirtioCommon, VirtioDevice, VirtioDeviceType};
use super::vu_common_ctrl::{VhostUserConfig, VhostUserHandle}; use super::vu_common_ctrl::{VhostUserConfig, VhostUserHandle};
use super::{Error, Result, DEFAULT_VIRTIO_FEATURES}; use super::{Error, Result, DEFAULT_VIRTIO_FEATURES};
use crate::seccomp_filters::Thread; use crate::seccomp_filters::Thread;
@ -28,6 +28,7 @@ use virtio_bindings::bindings::virtio_blk::{
VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_SEG_MAX,
VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_WRITE_ZEROES, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_WRITE_ZEROES,
}; };
use virtio_queue::Queue;
use vm_memory::{ByteValued, GuestMemoryAtomic}; use vm_memory::{ByteValued, GuestMemoryAtomic};
use vm_migration::{ use vm_migration::{
protocol::MemoryRangeTable, Migratable, MigratableError, Pausable, Snapshot, Snapshottable, protocol::MemoryRangeTable, Migratable, MigratableError, Pausable, Snapshot, Snapshottable,
@ -279,7 +280,7 @@ impl VirtioDevice for Blk {
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?; self.common.activate(&queues, &queue_evts, &interrupt_cb)?;

View File

@ -7,8 +7,8 @@ use crate::seccomp_filters::Thread;
use crate::thread_helper::spawn_virtio_thread; use crate::thread_helper::spawn_virtio_thread;
use crate::vhost_user::VhostUserCommon; use crate::vhost_user::VhostUserCommon;
use crate::{ use crate::{
ActivateError, ActivateResult, Queue, UserspaceMapping, VirtioCommon, VirtioDevice, ActivateError, ActivateResult, UserspaceMapping, VirtioCommon, VirtioDevice, VirtioDeviceType,
VirtioDeviceType, VirtioInterrupt, VirtioSharedMemoryList, VirtioInterrupt, VirtioSharedMemoryList,
}; };
use crate::{GuestMemoryMmap, GuestRegionMmap, MmapRegion}; use crate::{GuestMemoryMmap, GuestRegionMmap, MmapRegion};
use libc::{self, c_void, off64_t, pread64, pwrite64}; use libc::{self, c_void, off64_t, pread64, pwrite64};
@ -27,6 +27,7 @@ use vhost::vhost_user::message::{
use vhost::vhost_user::{ use vhost::vhost_user::{
HandlerResult, MasterReqHandler, VhostUserMaster, VhostUserMasterReqHandler, HandlerResult, MasterReqHandler, VhostUserMaster, VhostUserMasterReqHandler,
}; };
use virtio_queue::Queue;
use vm_memory::{ use vm_memory::{
Address, ByteValued, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, Address, ByteValued, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic,
}; };
@ -501,7 +502,7 @@ impl VirtioDevice for Fs {
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?; self.common.activate(&queues, &queue_evts, &interrupt_cb)?;

View File

@ -3,7 +3,7 @@
use crate::{ use crate::{
ActivateError, EpollHelper, EpollHelperError, EpollHelperHandler, GuestMemoryMmap, ActivateError, EpollHelper, EpollHelperError, EpollHelperHandler, GuestMemoryMmap,
GuestRegionMmap, Queue, VirtioInterrupt, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_IN_ORDER, GuestRegionMmap, VirtioInterrupt, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_IN_ORDER,
VIRTIO_F_NOTIFICATION_DATA, VIRTIO_F_ORDER_PLATFORM, VIRTIO_F_RING_EVENT_IDX, VIRTIO_F_NOTIFICATION_DATA, VIRTIO_F_ORDER_PLATFORM, VIRTIO_F_RING_EVENT_IDX,
VIRTIO_F_RING_INDIRECT_DESC, VIRTIO_F_VERSION_1, VIRTIO_F_RING_INDIRECT_DESC, VIRTIO_F_VERSION_1,
}; };
@ -18,12 +18,13 @@ use vhost::vhost_user::message::{
}; };
use vhost::vhost_user::{MasterReqHandler, VhostUserMasterReqHandler}; use vhost::vhost_user::{MasterReqHandler, VhostUserMasterReqHandler};
use vhost::Error as VhostError; use vhost::Error as VhostError;
use virtio_queue::Error as QueueError;
use virtio_queue::Queue;
use vm_memory::{ use vm_memory::{
mmap::MmapRegionError, Address, Error as MmapError, GuestAddressSpace, GuestMemory, mmap::MmapRegionError, Address, Error as MmapError, GuestAddressSpace, GuestMemory,
GuestMemoryAtomic, GuestMemoryAtomic,
}; };
use vm_migration::{protocol::MemoryRangeTable, MigratableError, Snapshot, VersionMapped}; use vm_migration::{protocol::MemoryRangeTable, MigratableError, Snapshot, VersionMapped};
use vm_virtio::Error as VirtioError;
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
use vu_common_ctrl::VhostUserHandle; use vu_common_ctrl::VhostUserHandle;
@ -128,7 +129,7 @@ pub enum Error {
/// Missing IrqFd /// Missing IrqFd
MissingIrqFd, MissingIrqFd,
/// Failed getting the available index. /// Failed getting the available index.
GetAvailableIndex(VirtioError), GetAvailableIndex(QueueError),
/// Migration is not supported by this vhost-user device. /// Migration is not supported by this vhost-user device.
MigrationNotSupported, MigrationNotSupported,
/// Failed creating memfd. /// Failed creating memfd.
@ -166,7 +167,7 @@ pub struct VhostUserEpollHandler<S: VhostUserMasterReqHandler> {
pub mem: GuestMemoryAtomic<GuestMemoryMmap>, pub mem: GuestMemoryAtomic<GuestMemoryMmap>,
pub kill_evt: EventFd, pub kill_evt: EventFd,
pub pause_evt: EventFd, pub pause_evt: EventFd,
pub queues: Vec<Queue>, pub queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
pub queue_evts: Vec<EventFd>, pub queue_evts: Vec<EventFd>,
pub virtio_interrupt: Arc<dyn VirtioInterrupt>, pub virtio_interrupt: Arc<dyn VirtioInterrupt>,
pub acked_features: u64, pub acked_features: u64,
@ -298,7 +299,7 @@ impl VhostUserCommon {
pub fn activate<T: VhostUserMasterReqHandler>( pub fn activate<T: VhostUserMasterReqHandler>(
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
acked_features: u64, acked_features: u64,

View File

@ -6,9 +6,9 @@ use crate::thread_helper::spawn_virtio_thread;
use crate::vhost_user::vu_common_ctrl::{VhostUserConfig, VhostUserHandle}; use crate::vhost_user::vu_common_ctrl::{VhostUserConfig, VhostUserHandle};
use crate::vhost_user::{Error, Result, VhostUserCommon}; use crate::vhost_user::{Error, Result, VhostUserCommon};
use crate::{ use crate::{
ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, Queue, VirtioCommon, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, VirtioCommon, VirtioDevice,
VirtioDevice, VirtioDeviceType, VirtioInterrupt, EPOLL_HELPER_EVENT_LAST, VirtioDeviceType, VirtioInterrupt, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_RING_EVENT_IDX,
VIRTIO_F_RING_EVENT_IDX, VIRTIO_F_VERSION_1, VIRTIO_F_VERSION_1,
}; };
use crate::{GuestMemoryMmap, GuestRegionMmap}; use crate::{GuestMemoryMmap, GuestRegionMmap};
use net_util::{build_net_config_space, CtrlQueue, MacAddr, VirtioNetConfig}; use net_util::{build_net_config_space, CtrlQueue, MacAddr, VirtioNetConfig};
@ -29,7 +29,8 @@ use virtio_bindings::bindings::virtio_net::{
VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_UFO,
VIRTIO_NET_F_MAC, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_MAC, VIRTIO_NET_F_MRG_RXBUF,
}; };
use vm_memory::{ByteValued, GuestAddressSpace, GuestMemoryAtomic}; use virtio_queue::Queue;
use vm_memory::{ByteValued, GuestMemoryAtomic};
use vm_migration::{ use vm_migration::{
protocol::MemoryRangeTable, Migratable, MigratableError, Pausable, Snapshot, Snapshottable, protocol::MemoryRangeTable, Migratable, MigratableError, Pausable, Snapshot, Snapshottable,
Transportable, VersionMapped, Transportable, VersionMapped,
@ -62,7 +63,7 @@ pub struct NetCtrlEpollHandler {
pub pause_evt: EventFd, pub pause_evt: EventFd,
pub ctrl_q: CtrlQueue, pub ctrl_q: CtrlQueue,
pub queue_evt: EventFd, pub queue_evt: EventFd,
pub queue: Queue, pub queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
} }
impl NetCtrlEpollHandler { impl NetCtrlEpollHandler {
@ -84,12 +85,11 @@ impl EpollHelperHandler for NetCtrlEpollHandler {
let ev_type = event.data as u16; let ev_type = event.data as u16;
match ev_type { match ev_type {
CTRL_QUEUE_EVENT => { CTRL_QUEUE_EVENT => {
let mem = self.mem.memory();
if let Err(e) = self.queue_evt.read() { if let Err(e) = self.queue_evt.read() {
error!("failed to get ctl queue event: {:?}", e); error!("failed to get ctl queue event: {:?}", e);
return true; return true;
} }
if let Err(e) = self.ctrl_q.process(&mem, &mut self.queue) { if let Err(e) = self.ctrl_q.process(&mut self.queue) {
error!("failed to process ctrl queue: {:?}", e); error!("failed to process ctrl queue: {:?}", e);
return true; return true;
} }
@ -308,7 +308,7 @@ impl VirtioDevice for Net {
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<Queue>, mut queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mut queue_evts: Vec<EventFd>, mut queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?; self.common.activate(&queues, &queue_evts, &interrupt_cb)?;

View File

@ -1,7 +1,6 @@
// Copyright 2019 Intel Corporation. All Rights Reserved. // Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
use super::super::{Descriptor, Queue};
use super::{Error, Result}; use super::{Error, Result};
use crate::vhost_user::Inflight; use crate::vhost_user::Inflight;
use crate::{ use crate::{
@ -13,6 +12,7 @@ use std::ffi;
use std::fs::File; use std::fs::File;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::os::unix::net::UnixListener; use std::os::unix::net::UnixListener;
use std::sync::atomic::Ordering;
use std::sync::Arc; use std::sync::Arc;
use std::thread::sleep; use std::thread::sleep;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
@ -23,7 +23,10 @@ use vhost::vhost_user::message::{
}; };
use vhost::vhost_user::{Master, MasterReqHandler, VhostUserMaster, VhostUserMasterReqHandler}; use vhost::vhost_user::{Master, MasterReqHandler, VhostUserMaster, VhostUserMasterReqHandler};
use vhost::{VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData}; use vhost::{VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData};
use vm_memory::{Address, Error as MmapError, FileOffset, GuestMemory, GuestMemoryRegion}; use virtio_queue::{Descriptor, Queue};
use vm_memory::{
Address, Error as MmapError, FileOffset, GuestMemory, GuestMemoryAtomic, GuestMemoryRegion,
};
use vm_migration::protocol::MemoryRangeTable; use vm_migration::protocol::MemoryRangeTable;
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -148,7 +151,7 @@ impl VhostUserHandle {
pub fn setup_vhost_user<S: VhostUserMasterReqHandler>( pub fn setup_vhost_user<S: VhostUserMasterReqHandler>(
&mut self, &mut self,
mem: &GuestMemoryMmap, mem: &GuestMemoryMmap,
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
virtio_interrupt: &Arc<dyn VirtioInterrupt>, virtio_interrupt: &Arc<dyn VirtioInterrupt>,
acked_features: u64, acked_features: u64,
@ -203,29 +206,37 @@ impl VhostUserHandle {
let actual_size: usize = queue.actual_size().try_into().unwrap(); let actual_size: usize = queue.actual_size().try_into().unwrap();
let config_data = VringConfigData { let config_data = VringConfigData {
queue_max_size: queue.get_max_size(), queue_max_size: queue.max_size(),
queue_size: queue.actual_size(), queue_size: queue.actual_size(),
flags: 0u32, flags: 0u32,
desc_table_addr: get_host_address_range( desc_table_addr: get_host_address_range(
mem, mem,
queue.desc_table, queue.state.desc_table,
actual_size * std::mem::size_of::<Descriptor>(), actual_size * std::mem::size_of::<Descriptor>(),
) )
.ok_or(Error::DescriptorTableAddress)? as u64, .ok_or(Error::DescriptorTableAddress)? as u64,
// The used ring is {flags: u16; idx: u16; virtq_used_elem [{id: u16, len: u16}; actual_size]}, // The used ring is {flags: u16; idx: u16; virtq_used_elem [{id: u16, len: u16}; actual_size]},
// i.e. 4 + (4 + 4) * actual_size. // i.e. 4 + (4 + 4) * actual_size.
used_ring_addr: get_host_address_range(mem, queue.used_ring, 4 + actual_size * 8) used_ring_addr: get_host_address_range(
mem,
queue.state.used_ring,
4 + actual_size * 8,
)
.ok_or(Error::UsedAddress)? as u64, .ok_or(Error::UsedAddress)? as u64,
// The used ring is {flags: u16; idx: u16; elem [u16; actual_size]}, // The used ring is {flags: u16; idx: u16; elem [u16; actual_size]},
// i.e. 4 + (2) * actual_size. // i.e. 4 + (2) * actual_size.
avail_ring_addr: get_host_address_range(mem, queue.avail_ring, 4 + actual_size * 2) avail_ring_addr: get_host_address_range(
mem,
queue.state.avail_ring,
4 + actual_size * 2,
)
.ok_or(Error::AvailAddress)? as u64, .ok_or(Error::AvailAddress)? as u64,
log_addr: None, log_addr: None,
}; };
vrings_info.push(VringInfo { vrings_info.push(VringInfo {
config_data, config_data,
used_guest_addr: queue.used_ring.raw_value(), used_guest_addr: queue.state.used_ring.raw_value(),
}); });
self.vu self.vu
@ -235,8 +246,9 @@ impl VhostUserHandle {
.set_vring_base( .set_vring_base(
queue_index, queue_index,
queue queue
.used_index_from_memory(mem) .avail_idx(Ordering::Acquire)
.map_err(Error::GetAvailableIndex)?, .map_err(Error::GetAvailableIndex)?
.0,
) )
.map_err(Error::VhostUserSetVringBase)?; .map_err(Error::VhostUserSetVringBase)?;
@ -317,7 +329,7 @@ impl VhostUserHandle {
pub fn reinitialize_vhost_user<S: VhostUserMasterReqHandler>( pub fn reinitialize_vhost_user<S: VhostUserMasterReqHandler>(
&mut self, &mut self,
mem: &GuestMemoryMmap, mem: &GuestMemoryMmap,
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
virtio_interrupt: &Arc<dyn VirtioInterrupt>, virtio_interrupt: &Arc<dyn VirtioInterrupt>,
acked_features: u64, acked_features: u64,

View File

@ -818,8 +818,9 @@ mod tests {
let mut handler_ctx = vsock_test_ctx.create_epoll_handler_context(); let mut handler_ctx = vsock_test_ctx.create_epoll_handler_context();
let stream = TestStream::new(); let stream = TestStream::new();
let mut pkt = VsockPacket::from_rx_virtq_head( let mut pkt = VsockPacket::from_rx_virtq_head(
&handler_ctx.handler.queues[0] &mut handler_ctx.handler.queues[0]
.iter(&vsock_test_ctx.mem) .iter()
.unwrap()
.next() .next()
.unwrap(), .unwrap(),
) )

View File

@ -34,7 +34,7 @@ use crate::GuestMemoryMmap;
use crate::VirtioInterrupt; use crate::VirtioInterrupt;
use crate::{ use crate::{
thread_helper::spawn_virtio_thread, ActivateResult, EpollHelper, EpollHelperError, thread_helper::spawn_virtio_thread, ActivateResult, EpollHelper, EpollHelperError,
EpollHelperHandler, Queue, VirtioCommon, VirtioDevice, VirtioDeviceType, VirtioInterruptType, EpollHelperHandler, VirtioCommon, VirtioDevice, VirtioDeviceType, VirtioInterruptType,
EPOLL_HELPER_EVENT_LAST, VIRTIO_F_IN_ORDER, VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_VERSION_1, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_IN_ORDER, VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_VERSION_1,
}; };
use byteorder::{ByteOrder, LittleEndian}; use byteorder::{ByteOrder, LittleEndian};
@ -47,7 +47,8 @@ use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Barrier, RwLock}; use std::sync::{Arc, Barrier, RwLock};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use vm_memory::{GuestAddressSpace, GuestMemoryAtomic}; use virtio_queue::Queue;
use vm_memory::GuestMemoryAtomic;
use vm_migration::{ use vm_migration::{
Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped, Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped,
}; };
@ -86,7 +87,7 @@ pub const BACKEND_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 4;
/// ///
pub struct VsockEpollHandler<B: VsockBackend> { pub struct VsockEpollHandler<B: VsockBackend> {
pub mem: GuestMemoryAtomic<GuestMemoryMmap>, pub mem: GuestMemoryAtomic<GuestMemoryMmap>,
pub queues: Vec<Queue>, pub queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
pub queue_evts: Vec<EventFd>, pub queue_evts: Vec<EventFd>,
pub kill_evt: EventFd, pub kill_evt: EventFd,
pub pause_evt: EventFd, pub pause_evt: EventFd,
@ -101,7 +102,10 @@ where
/// Signal the guest driver that we've used some virtio buffers that it had previously made /// Signal the guest driver that we've used some virtio buffers that it had previously made
/// available. /// available.
/// ///
fn signal_used_queue(&self, queue: &Queue) -> result::Result<(), DeviceError> { fn signal_used_queue(
&self,
queue: &Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
) -> result::Result<(), DeviceError> {
debug!("vsock: raising IRQ"); debug!("vsock: raising IRQ");
self.interrupt_cb self.interrupt_cb
@ -120,16 +124,17 @@ where
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
let mem = self.mem.memory();
for avail_desc in self.queues[0].iter(&mem) { let mut avail_iter = self.queues[0].iter().map_err(DeviceError::QueueIterator)?;
let used_len = match VsockPacket::from_rx_virtq_head(&avail_desc) { for mut desc_chain in &mut avail_iter {
let used_len = match VsockPacket::from_rx_virtq_head(&mut desc_chain) {
Ok(mut pkt) => { Ok(mut pkt) => {
if self.backend.write().unwrap().recv_pkt(&mut pkt).is_ok() { if self.backend.write().unwrap().recv_pkt(&mut pkt).is_ok() {
pkt.hdr().len() as u32 + pkt.len() pkt.hdr().len() as u32 + pkt.len()
} else { } else {
// We are using a consuming iterator over the virtio buffers, so, if we can't // We are using a consuming iterator over the virtio buffers, so, if we can't
// fill in this buffer, we'll need to undo the last iterator step. // fill in this buffer, we'll need to undo the last iterator step.
self.queues[0].go_to_previous_position(); avail_iter.go_to_previous_position();
break; break;
} }
} }
@ -139,12 +144,14 @@ where
} }
}; };
used_desc_heads[used_count] = (avail_desc.index, used_len); used_desc_heads[used_count] = (desc_chain.head_index(), used_len);
used_count += 1; used_count += 1;
} }
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
self.queues[0].add_used(&mem, desc_index, len); self.queues[0]
.add_used(desc_index, len)
.map_err(DeviceError::QueueAddUsed)?;
} }
if used_count > 0 { if used_count > 0 {
@ -162,29 +169,32 @@ where
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
let mem = self.mem.memory();
for avail_desc in self.queues[1].iter(&mem) { let mut avail_iter = self.queues[1].iter().map_err(DeviceError::QueueIterator)?;
let pkt = match VsockPacket::from_tx_virtq_head(&avail_desc) { for mut desc_chain in &mut avail_iter {
let pkt = match VsockPacket::from_tx_virtq_head(&mut desc_chain) {
Ok(pkt) => pkt, Ok(pkt) => pkt,
Err(e) => { Err(e) => {
error!("vsock: error reading TX packet: {:?}", e); error!("vsock: error reading TX packet: {:?}", e);
used_desc_heads[used_count] = (avail_desc.index, 0); used_desc_heads[used_count] = (desc_chain.head_index(), 0);
used_count += 1; used_count += 1;
continue; continue;
} }
}; };
if self.backend.write().unwrap().send_pkt(&pkt).is_err() { if self.backend.write().unwrap().send_pkt(&pkt).is_err() {
self.queues[1].go_to_previous_position(); avail_iter.go_to_previous_position();
break; break;
} }
used_desc_heads[used_count] = (avail_desc.index, 0); used_desc_heads[used_count] = (desc_chain.head_index(), 0);
used_count += 1; used_count += 1;
} }
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
self.queues[1].add_used(&mem, desc_index, len); self.queues[1]
.add_used(desc_index, len)
.map_err(DeviceError::QueueAddUsed)?;
} }
if used_count > 0 { if used_count > 0 {
@ -417,7 +427,7 @@ where
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?; self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
@ -578,12 +588,18 @@ mod tests {
other => panic!("{:?}", other), other => panic!("{:?}", other),
} }
let memory = GuestMemoryAtomic::new(ctx.mem.clone());
// Test a correct activation. // Test a correct activation.
ctx.device ctx.device
.activate( .activate(
GuestMemoryAtomic::new(ctx.mem.clone()), memory.clone(),
Arc::new(NoopVirtioInterrupt {}), Arc::new(NoopVirtioInterrupt {}),
vec![Queue::new(256), Queue::new(256), Queue::new(256)], vec![
Queue::new(memory.clone(), 256),
Queue::new(memory.clone(), 256),
Queue::new(memory, 256),
],
vec![ vec![
EventFd::new(EFD_NONBLOCK).unwrap(), EventFd::new(EFD_NONBLOCK).unwrap(),
EventFd::new(EFD_NONBLOCK).unwrap(), EventFd::new(EFD_NONBLOCK).unwrap(),
@ -599,8 +615,9 @@ mod tests {
{ {
let test_ctx = TestContext::new(); let test_ctx = TestContext::new();
let ctx = test_ctx.create_epoll_handler_context(); let ctx = test_ctx.create_epoll_handler_context();
let memory = GuestMemoryAtomic::new(test_ctx.mem.clone());
let queue = Queue::new(256); let queue = Queue::new(memory, 256);
assert!(ctx.handler.signal_used_queue(&queue).is_ok()); assert!(ctx.handler.signal_used_queue(&queue).is_ok());
} }
} }

View File

@ -75,6 +75,8 @@ pub enum VsockError {
GuestMemoryBounds, GuestMemoryBounds,
/// The vsock header descriptor length is too small. /// The vsock header descriptor length is too small.
HdrDescTooSmall(u32), HdrDescTooSmall(u32),
/// The vsock header descriptor is expected, but missing.
HdrDescMissing,
/// The vsock header `len` field holds an invalid value. /// The vsock header `len` field holds an invalid value.
InvalidPktLen(u32), InvalidPktLen(u32),
/// A data fetch was attempted when no data was available. /// A data fetch was attempted when no data was available.
@ -168,10 +170,9 @@ mod tests {
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use virtio_queue::{defs::VIRTQ_DESC_F_NEXT, defs::VIRTQ_DESC_F_WRITE, Queue};
use vm_memory::{GuestAddress, GuestMemoryAtomic}; use vm_memory::{GuestAddress, GuestMemoryAtomic};
use vm_virtio::queue::testing::VirtQueue as GuestQ; use vm_virtio::queue::testing::VirtQueue as GuestQ;
use vm_virtio::queue::Queue;
use vm_virtio::queue::{VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE};
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
pub struct NoopVirtioInterrupt {} pub struct NoopVirtioInterrupt {}
@ -180,7 +181,7 @@ mod tests {
fn trigger( fn trigger(
&self, &self,
_int_type: &VirtioInterruptType, _int_type: &VirtioInterruptType,
_queue: Option<&Queue>, _queue: Option<&Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
) -> std::result::Result<(), std::io::Error> { ) -> std::result::Result<(), std::io::Error> {
Ok(()) Ok(())
} }

View File

@ -19,7 +19,9 @@ use byteorder::{ByteOrder, LittleEndian};
use super::defs; use super::defs;
use super::{Result, VsockError}; use super::{Result, VsockError};
use crate::{get_host_address_range, DescriptorChain}; use crate::{get_host_address_range, GuestMemoryMmap};
use virtio_queue::DescriptorChain;
use vm_memory::GuestMemoryAtomic;
// The vsock packet header is defined by the C struct: // The vsock packet header is defined by the C struct:
// //
@ -103,7 +105,11 @@ impl VsockPacket {
/// descriptor can optionally end the chain. Bounds and pointer checks are performed when /// descriptor can optionally end the chain. Bounds and pointer checks are performed when
/// creating the wrapper. /// creating the wrapper.
/// ///
pub fn from_tx_virtq_head(head: &DescriptorChain) -> Result<Self> { pub fn from_tx_virtq_head(
desc_chain: &mut DescriptorChain<GuestMemoryAtomic<GuestMemoryMmap>>,
) -> Result<Self> {
let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
// All buffers in the TX queue must be readable. // All buffers in the TX queue must be readable.
// //
if head.is_write_only() { if head.is_write_only() {
@ -111,12 +117,12 @@ impl VsockPacket {
} }
// The packet header should fit inside the head descriptor. // The packet header should fit inside the head descriptor.
if head.len < VSOCK_PKT_HDR_SIZE as u32 { if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
return Err(VsockError::HdrDescTooSmall(head.len)); return Err(VsockError::HdrDescTooSmall(head.len()));
} }
let mut pkt = Self { let mut pkt = Self {
hdr: get_host_address_range(head.mem, head.addr, VSOCK_PKT_HDR_SIZE) hdr: get_host_address_range(desc_chain.memory(), head.addr(), VSOCK_PKT_HDR_SIZE)
.ok_or(VsockError::GuestMemory)? as *mut u8, .ok_or(VsockError::GuestMemory)? as *mut u8,
buf: None, buf: None,
buf_size: 0, buf_size: 0,
@ -134,7 +140,7 @@ impl VsockPacket {
} }
// If the packet header showed a non-zero length, there should be a data descriptor here. // If the packet header showed a non-zero length, there should be a data descriptor here.
let buf_desc = head.next_descriptor().ok_or(VsockError::BufDescMissing)?; let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
// TX data should be read-only. // TX data should be read-only.
if buf_desc.is_write_only() { if buf_desc.is_write_only() {
@ -143,13 +149,13 @@ impl VsockPacket {
// The data buffer should be large enough to fit the size of the data, as described by // The data buffer should be large enough to fit the size of the data, as described by
// the header descriptor. // the header descriptor.
if buf_desc.len < pkt.len() { if buf_desc.len() < pkt.len() {
return Err(VsockError::BufDescTooSmall); return Err(VsockError::BufDescTooSmall);
} }
pkt.buf_size = buf_desc.len as usize; pkt.buf_size = buf_desc.len() as usize;
pkt.buf = Some( pkt.buf = Some(
get_host_address_range(buf_desc.mem, buf_desc.addr, pkt.buf_size) get_host_address_range(desc_chain.memory(), buf_desc.addr(), pkt.buf_size)
.ok_or(VsockError::GuestMemory)? as *mut u8, .ok_or(VsockError::GuestMemory)? as *mut u8,
); );
@ -161,7 +167,11 @@ impl VsockPacket {
/// There must be two descriptors in the chain, both writable: a header descriptor and a data /// There must be two descriptors in the chain, both writable: a header descriptor and a data
/// descriptor. Bounds and pointer checks are performed when creating the wrapper. /// descriptor. Bounds and pointer checks are performed when creating the wrapper.
/// ///
pub fn from_rx_virtq_head(head: &DescriptorChain) -> Result<Self> { pub fn from_rx_virtq_head(
desc_chain: &mut DescriptorChain<GuestMemoryAtomic<GuestMemoryMmap>>,
) -> Result<Self> {
let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
// All RX buffers must be writable. // All RX buffers must be writable.
// //
if !head.is_write_only() { if !head.is_write_only() {
@ -169,22 +179,22 @@ impl VsockPacket {
} }
// The packet header should fit inside the head descriptor. // The packet header should fit inside the head descriptor.
if head.len < VSOCK_PKT_HDR_SIZE as u32 { if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
return Err(VsockError::HdrDescTooSmall(head.len)); return Err(VsockError::HdrDescTooSmall(head.len()));
} }
// All RX descriptor chains should have a header and a data descriptor. // All RX descriptor chains should have a header and a data descriptor.
if !head.has_next() { if !head.has_next() {
return Err(VsockError::BufDescMissing); return Err(VsockError::BufDescMissing);
} }
let buf_desc = head.next_descriptor().ok_or(VsockError::BufDescMissing)?; let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
let buf_size = buf_desc.len as usize; let buf_size = buf_desc.len() as usize;
Ok(Self { Ok(Self {
hdr: get_host_address_range(head.mem, head.addr, VSOCK_PKT_HDR_SIZE) hdr: get_host_address_range(desc_chain.memory(), head.addr(), VSOCK_PKT_HDR_SIZE)
.ok_or(VsockError::GuestMemory)? as *mut u8, .ok_or(VsockError::GuestMemory)? as *mut u8,
buf: Some( buf: Some(
get_host_address_range(buf_desc.mem, buf_desc.addr, buf_size) get_host_address_range(desc_chain.memory(), buf_desc.addr(), buf_size)
.ok_or(VsockError::GuestMemory)? as *mut u8, .ok_or(VsockError::GuestMemory)? as *mut u8,
), ),
buf_size, buf_size,
@ -343,9 +353,9 @@ mod tests {
use super::*; use super::*;
use crate::vsock::defs::MAX_PKT_BUF_SIZE; use crate::vsock::defs::MAX_PKT_BUF_SIZE;
use crate::GuestMemoryMmap; use crate::GuestMemoryMmap;
use virtio_queue::defs::VIRTQ_DESC_F_WRITE;
use vm_memory::GuestAddress; use vm_memory::GuestAddress;
use vm_virtio::queue::testing::VirtqDesc as GuestQDesc; use vm_virtio::queue::testing::VirtqDesc as GuestQDesc;
use vm_virtio::queue::VIRTQ_DESC_F_WRITE;
macro_rules! create_context { macro_rules! create_context {
($test_ctx:ident, $handler_ctx:ident) => { ($test_ctx:ident, $handler_ctx:ident) => {
@ -365,8 +375,9 @@ mod tests {
}; };
($test_ctx:expr, $handler_ctx:expr, $err:pat, $ctor:ident, $vq:expr) => { ($test_ctx:expr, $handler_ctx:expr, $err:pat, $ctor:ident, $vq:expr) => {
match VsockPacket::$ctor( match VsockPacket::$ctor(
&$handler_ctx.handler.queues[$vq] &mut $handler_ctx.handler.queues[$vq]
.iter(&$test_ctx.mem) .iter()
.unwrap()
.next() .next()
.unwrap(), .unwrap(),
) { ) {
@ -394,8 +405,9 @@ mod tests {
create_context!(test_ctx, handler_ctx); create_context!(test_ctx, handler_ctx);
let pkt = VsockPacket::from_tx_virtq_head( let pkt = VsockPacket::from_tx_virtq_head(
&handler_ctx.handler.queues[1] &mut handler_ctx.handler.queues[1]
.iter(&test_ctx.mem) .iter()
.unwrap()
.next() .next()
.unwrap(), .unwrap(),
) )
@ -430,8 +442,9 @@ mod tests {
create_context!(test_ctx, handler_ctx); create_context!(test_ctx, handler_ctx);
set_pkt_len(0, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem); set_pkt_len(0, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
let mut pkt = VsockPacket::from_tx_virtq_head( let mut pkt = VsockPacket::from_tx_virtq_head(
&handler_ctx.handler.queues[1] &mut handler_ctx.handler.queues[1]
.iter(&test_ctx.mem) .iter()
.unwrap()
.next() .next()
.unwrap(), .unwrap(),
) )
@ -486,8 +499,9 @@ mod tests {
{ {
create_context!(test_ctx, handler_ctx); create_context!(test_ctx, handler_ctx);
let pkt = VsockPacket::from_rx_virtq_head( let pkt = VsockPacket::from_rx_virtq_head(
&handler_ctx.handler.queues[0] &mut handler_ctx.handler.queues[0]
.iter(&test_ctx.mem) .iter()
.unwrap()
.next() .next()
.unwrap(), .unwrap(),
) )
@ -541,8 +555,9 @@ mod tests {
create_context!(test_ctx, handler_ctx); create_context!(test_ctx, handler_ctx);
let mut pkt = VsockPacket::from_rx_virtq_head( let mut pkt = VsockPacket::from_rx_virtq_head(
&handler_ctx.handler.queues[0] &mut handler_ctx.handler.queues[0]
.iter(&test_ctx.mem) .iter()
.unwrap()
.next() .next()
.unwrap(), .unwrap(),
) )
@ -630,8 +645,9 @@ mod tests {
fn test_packet_buf() { fn test_packet_buf() {
create_context!(test_ctx, handler_ctx); create_context!(test_ctx, handler_ctx);
let mut pkt = VsockPacket::from_rx_virtq_head( let mut pkt = VsockPacket::from_rx_virtq_head(
&handler_ctx.handler.queues[0] &mut handler_ctx.handler.queues[0]
.iter(&test_ctx.mem) .iter()
.unwrap()
.next() .next()
.unwrap(), .unwrap(),
) )

View File

@ -840,8 +840,9 @@ mod tests {
let vsock_test_ctx = VsockTestContext::new(); let vsock_test_ctx = VsockTestContext::new();
let mut handler_ctx = vsock_test_ctx.create_epoll_handler_context(); let mut handler_ctx = vsock_test_ctx.create_epoll_handler_context();
let pkt = VsockPacket::from_rx_virtq_head( let pkt = VsockPacket::from_rx_virtq_head(
&handler_ctx.handler.queues[0] &mut handler_ctx.handler.queues[0]
.iter(&vsock_test_ctx.mem) .iter()
.unwrap()
.next() .next()
.unwrap(), .unwrap(),
) )

View File

@ -7,8 +7,8 @@
use super::Error as DeviceError; use super::Error as DeviceError;
use super::{ use super::{
ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, Queue, ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, VirtioCommon,
VirtioCommon, VirtioDevice, VirtioDeviceType, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_VERSION_1, VirtioDevice, VirtioDeviceType, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_VERSION_1,
}; };
use crate::seccomp_filters::Thread; use crate::seccomp_filters::Thread;
use crate::thread_helper::spawn_virtio_thread; use crate::thread_helper::spawn_virtio_thread;
@ -25,7 +25,8 @@ use std::sync::{Arc, Barrier, Mutex};
use std::time::Instant; use std::time::Instant;
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use vm_memory::{Bytes, GuestAddressSpace, GuestMemoryAtomic}; use virtio_queue::Queue;
use vm_memory::{Bytes, GuestMemoryAtomic};
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -46,8 +47,7 @@ const WATCHDOG_TIMER_INTERVAL: i64 = 15;
const WATCHDOG_TIMEOUT: u64 = WATCHDOG_TIMER_INTERVAL as u64 + 5; const WATCHDOG_TIMEOUT: u64 = WATCHDOG_TIMER_INTERVAL as u64 + 5;
struct WatchdogEpollHandler { struct WatchdogEpollHandler {
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queue_evt: EventFd, queue_evt: EventFd,
kill_evt: EventFd, kill_evt: EventFd,
@ -64,12 +64,13 @@ impl WatchdogEpollHandler {
let queue = &mut self.queues[0]; let queue = &mut self.queues[0];
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
let mem = self.mem.memory(); for mut desc_chain in queue.iter().unwrap() {
for avail_desc in queue.iter(&mem) { let desc = desc_chain.next().unwrap();
let mut len = 0; let mut len = 0;
if avail_desc.is_write_only() && mem.write_obj(1u8, avail_desc.addr).is_ok() { if desc.is_write_only() && desc_chain.memory().write_obj(1u8, desc.addr()).is_ok() {
len = avail_desc.len; len = desc.len();
// If this is the first "ping" then setup the timer // If this is the first "ping" then setup the timer
if self.last_ping_time.lock().unwrap().is_none() { if self.last_ping_time.lock().unwrap().is_none() {
info!( info!(
@ -83,12 +84,12 @@ impl WatchdogEpollHandler {
self.last_ping_time.lock().unwrap().replace(Instant::now()); self.last_ping_time.lock().unwrap().replace(Instant::now());
} }
used_desc_heads[used_count] = (avail_desc.index, len); used_desc_heads[used_count] = (desc_chain.head_index(), len);
used_count += 1; used_count += 1;
} }
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
queue.add_used(&mem, desc_index, len); queue.add_used(desc_index, len).unwrap();
} }
used_count > 0 used_count > 0
} }
@ -288,9 +289,9 @@ impl VirtioDevice for Watchdog {
fn activate( fn activate(
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, _mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<Queue>, queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
mut queue_evts: Vec<EventFd>, mut queue_evts: Vec<EventFd>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?; self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
@ -308,7 +309,6 @@ impl VirtioDevice for Watchdog {
let mut handler = WatchdogEpollHandler { let mut handler = WatchdogEpollHandler {
queues, queues,
mem,
interrupt_cb, interrupt_cb,
queue_evt: queue_evts.remove(0), queue_evt: queue_evts.remove(0),
kill_evt, kill_evt,

View File

@ -10,4 +10,5 @@ default = []
[dependencies] [dependencies]
log = "0.4.14" log = "0.4.14"
virtio-bindings = { version = "0.1.0", features = ["virtio-v5_0_0"] } virtio-bindings = { version = "0.1.0", features = ["virtio-v5_0_0"] }
virtio-queue = { path = "../virtio-queue" }
vm-memory = { version = "0.6.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] } vm-memory = { version = "0.6.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] }

View File

@ -10,17 +10,11 @@
//! Implements virtio queues //! Implements virtio queues
#[macro_use]
extern crate log;
use std::fmt; use std::fmt;
pub mod queue; pub mod queue;
pub use queue::*; pub use queue::*;
pub type VirtioIommuRemapping =
Box<dyn Fn(u64) -> std::result::Result<u64, std::io::Error> + Send + Sync>;
pub const VIRTIO_MSI_NO_VECTOR: u16 = 0xffff; pub const VIRTIO_MSI_NO_VECTOR: u16 = 0xffff;
// Types taken from linux/virtio_ids.h // Types taken from linux/virtio_ids.h

View File

@ -8,718 +8,12 @@
// //
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use crate::{VirtioIommuRemapping, VIRTIO_MSI_NO_VECTOR};
use std::cmp::min;
use std::convert::TryInto;
use std::fmt::{self, Display};
use std::num::Wrapping;
use std::sync::atomic::{fence, Ordering};
use std::sync::Arc;
use vm_memory::{
bitmap::AtomicBitmap, Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryError,
GuestUsize,
};
pub const VIRTQ_DESC_F_NEXT: u16 = 0x1;
pub const VIRTQ_DESC_F_WRITE: u16 = 0x2;
pub const VIRTQ_DESC_F_INDIRECT: u16 = 0x4;
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
#[derive(Debug)]
pub enum Error {
GuestMemoryError,
InvalidIndirectDescriptor,
InvalidChain,
InvalidOffset(u64),
InvalidRingIndexFromMemory(GuestMemoryError),
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match self {
GuestMemoryError => write!(f, "error accessing guest memory"),
InvalidChain => write!(f, "invalid descriptor chain"),
InvalidIndirectDescriptor => write!(f, "invalid indirect descriptor"),
InvalidOffset(o) => write!(f, "invalid offset {}", o),
InvalidRingIndexFromMemory(e) => write!(f, "invalid ring index from memory: {}", e),
}
}
}
// GuestMemoryMmap::read_obj() will be used to fetch the descriptor,
// which has an explicit constraint that the entire descriptor doesn't
// cross the page boundary. Otherwise the descriptor may be splitted into
// two mmap regions which causes failure of GuestMemoryMmap::read_obj().
//
// The Virtio Spec 1.0 defines the alignment of VirtIO descriptor is 16 bytes,
// which fulfills the explicit constraint of GuestMemoryMmap::read_obj().
/// An iterator over a single descriptor chain. Not to be confused with AvailIter,
/// which iterates over the descriptor chain heads in a queue.
pub struct DescIter<'a> {
next: Option<DescriptorChain<'a>>,
}
impl<'a> DescIter<'a> {
/// Returns an iterator that only yields the readable descriptors in the chain.
pub fn readable(self) -> impl Iterator<Item = DescriptorChain<'a>> {
self.filter(|d| !d.is_write_only())
}
/// Returns an iterator that only yields the writable descriptors in the chain.
pub fn writable(self) -> impl Iterator<Item = DescriptorChain<'a>> {
self.filter(DescriptorChain::is_write_only)
}
}
impl<'a> Iterator for DescIter<'a> {
type Item = DescriptorChain<'a>;
fn next(&mut self) -> Option<Self::Item> {
if let Some(current) = self.next.take() {
self.next = current.next_descriptor();
Some(current)
} else {
None
}
}
}
/// A virtio descriptor constraints with C representative.
#[repr(C)]
#[derive(Default, Clone, Copy)]
pub struct Descriptor {
addr: u64,
len: u32,
flags: u16,
next: u16,
}
unsafe impl ByteValued for Descriptor {}
/// A virtio descriptor head, not tied to a GuestMemoryMmap.
pub struct DescriptorHead {
desc_table: GuestAddress,
table_size: u16,
index: u16,
iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
}
/// A virtio descriptor chain.
#[derive(Clone)]
pub struct DescriptorChain<'a> {
desc_table: GuestAddress,
table_size: u16,
ttl: u16, // used to prevent infinite chain cycles
iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
/// Reference to guest memory
pub mem: &'a GuestMemoryMmap,
/// Index into the descriptor table
pub index: u16,
/// Guest physical address of device specific data
pub addr: GuestAddress,
/// Length of device specific data
pub len: u32,
/// Includes next, write, and indirect bits
pub flags: u16,
/// Index into the descriptor table of the next descriptor if flags has
/// the next bit set
pub next: u16,
}
impl<'a> DescriptorChain<'a> {
pub fn checked_new(
mem: &GuestMemoryMmap,
desc_table: GuestAddress,
table_size: u16,
index: u16,
iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
) -> Option<DescriptorChain> {
if index >= table_size {
return None;
}
let desc_head = match mem.checked_offset(desc_table, (index as usize) * 16) {
Some(a) => a,
None => return None,
};
mem.checked_offset(desc_head, 16)?;
// These reads can't fail unless Guest memory is hopelessly broken.
let desc = match mem.read_obj::<Descriptor>(desc_head) {
Ok(ret) => ret,
Err(_) => {
// TODO log address
error!("Failed to read from memory");
return None;
}
};
// Translate address if necessary
let desc_addr = if let Some(iommu_mapping_cb) = &iommu_mapping_cb {
(iommu_mapping_cb)(desc.addr).unwrap()
} else {
desc.addr
};
let chain = DescriptorChain {
mem,
desc_table,
table_size,
ttl: table_size,
index,
addr: GuestAddress(desc_addr),
len: desc.len,
flags: desc.flags,
next: desc.next,
iommu_mapping_cb,
};
if chain.is_valid() {
Some(chain)
} else {
None
}
}
pub fn new_from_indirect(&self) -> Result<DescriptorChain, Error> {
if !self.is_indirect() {
return Err(Error::InvalidIndirectDescriptor);
}
let desc_head = self.addr;
self.mem
.checked_offset(desc_head, 16)
.ok_or(Error::GuestMemoryError)?;
// These reads can't fail unless Guest memory is hopelessly broken.
let desc = match self.mem.read_obj::<Descriptor>(desc_head) {
Ok(ret) => ret,
Err(_) => return Err(Error::GuestMemoryError),
};
// Translate address if necessary
let (desc_addr, iommu_mapping_cb) =
if let Some(iommu_mapping_cb) = self.iommu_mapping_cb.clone() {
(
(iommu_mapping_cb)(desc.addr).unwrap(),
Some(iommu_mapping_cb),
)
} else {
(desc.addr, None)
};
let chain = DescriptorChain {
mem: self.mem,
desc_table: self.addr,
table_size: (self.len / 16).try_into().unwrap(),
ttl: (self.len / 16).try_into().unwrap(),
index: 0,
addr: GuestAddress(desc_addr),
len: desc.len,
flags: desc.flags,
next: desc.next,
iommu_mapping_cb,
};
if !chain.is_valid() {
return Err(Error::InvalidChain);
}
Ok(chain)
}
/// Returns a copy of a descriptor referencing a different GuestMemoryMmap object.
pub fn new_from_head(
mem: &'a GuestMemoryMmap,
head: DescriptorHead,
) -> Result<DescriptorChain<'a>, Error> {
match DescriptorChain::checked_new(
mem,
head.desc_table,
head.table_size,
head.index,
head.iommu_mapping_cb,
) {
Some(d) => Ok(d),
None => Err(Error::InvalidChain),
}
}
/// Returns a DescriptorHead that can be used to build a copy of a descriptor
/// referencing a different GuestMemoryMmap.
pub fn get_head(&self) -> DescriptorHead {
DescriptorHead {
desc_table: self.desc_table,
table_size: self.table_size,
index: self.index,
iommu_mapping_cb: self.iommu_mapping_cb.clone(),
}
}
fn is_valid(&self) -> bool {
!(!self.mem.check_range(self.addr, self.len as usize)
|| (self.has_next() && self.next >= self.table_size))
}
/// Gets if this descriptor has another descriptor linked after it.
pub fn has_next(&self) -> bool {
self.flags & VIRTQ_DESC_F_NEXT != 0 && self.ttl > 1
}
/// If the driver designated this as a write only descriptor.
///
/// If this is false, this descriptor is read only.
/// Write only means that the emulated device can write and the driver can read.
pub fn is_write_only(&self) -> bool {
self.flags & VIRTQ_DESC_F_WRITE != 0
}
pub fn is_indirect(&self) -> bool {
self.flags & VIRTQ_DESC_F_INDIRECT != 0
}
/// Gets the next descriptor in this descriptor chain, if there is one.
///
/// Note that this is distinct from the next descriptor chain returned by `AvailIter`, which is
/// the head of the next _available_ descriptor chain.
pub fn next_descriptor(&self) -> Option<DescriptorChain<'a>> {
if self.has_next() {
DescriptorChain::checked_new(
self.mem,
self.desc_table,
self.table_size,
self.next,
self.iommu_mapping_cb.clone(),
)
.map(|mut c| {
c.ttl = self.ttl - 1;
c
})
} else {
None
}
}
}
impl<'a> IntoIterator for DescriptorChain<'a> {
type Item = DescriptorChain<'a>;
type IntoIter = DescIter<'a>;
fn into_iter(self) -> Self::IntoIter {
DescIter { next: Some(self) }
}
}
/// Consuming iterator over all available descriptor chain heads in the queue.
pub struct AvailIter<'a, 'b> {
mem: &'a GuestMemoryMmap,
desc_table: GuestAddress,
avail_ring: GuestAddress,
next_index: Wrapping<u16>,
last_index: Wrapping<u16>,
queue_size: u16,
next_avail: &'b mut Wrapping<u16>,
iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
}
impl<'a, 'b> AvailIter<'a, 'b> {
pub fn new(mem: &'a GuestMemoryMmap, q_next_avail: &'b mut Wrapping<u16>) -> AvailIter<'a, 'b> {
AvailIter {
mem,
desc_table: GuestAddress(0),
avail_ring: GuestAddress(0),
next_index: Wrapping(0),
last_index: Wrapping(0),
queue_size: 0,
next_avail: q_next_avail,
iommu_mapping_cb: None,
}
}
}
impl<'a, 'b> Iterator for AvailIter<'a, 'b> {
type Item = DescriptorChain<'a>;
fn next(&mut self) -> Option<Self::Item> {
if self.next_index == self.last_index {
return None;
}
let offset = (4 + (self.next_index.0 % self.queue_size) * 2) as usize;
let avail_addr = match self.mem.checked_offset(self.avail_ring, offset) {
Some(a) => a,
None => return None,
};
// This index is checked below in checked_new
let desc_index: u16 = match self.mem.read_obj(avail_addr) {
Ok(ret) => ret,
Err(_) => {
// TODO log address
error!("Failed to read from memory");
return None;
}
};
self.next_index += Wrapping(1);
let ret = DescriptorChain::checked_new(
self.mem,
self.desc_table,
self.queue_size,
desc_index,
self.iommu_mapping_cb.clone(),
);
if ret.is_some() {
*self.next_avail += Wrapping(1);
}
ret
}
}
#[derive(Clone)]
/// A virtio queue's parameters.
pub struct Queue {
/// The maximal size in elements offered by the device
pub max_size: u16,
/// The queue size in elements the driver selected
pub size: u16,
/// Indicates if the queue is finished with configuration
pub ready: bool,
/// Interrupt vector index of the queue
pub vector: u16,
/// Guest physical address of the descriptor table
pub desc_table: GuestAddress,
/// Guest physical address of the available ring
pub avail_ring: GuestAddress,
/// Guest physical address of the used ring
pub used_ring: GuestAddress,
pub next_avail: Wrapping<u16>,
pub next_used: Wrapping<u16>,
pub iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
/// VIRTIO_F_RING_EVENT_IDX negotiated
event_idx: bool,
/// The last used value when using EVENT_IDX
signalled_used: Option<Wrapping<u16>>,
}
impl Queue {
/// Constructs an empty virtio queue with the given `max_size`.
pub fn new(max_size: u16) -> Queue {
Queue {
max_size,
size: max_size,
ready: false,
vector: VIRTIO_MSI_NO_VECTOR,
desc_table: GuestAddress(0),
avail_ring: GuestAddress(0),
used_ring: GuestAddress(0),
next_avail: Wrapping(0),
next_used: Wrapping(0),
iommu_mapping_cb: None,
event_idx: false,
signalled_used: None,
}
}
pub fn get_max_size(&self) -> u16 {
self.max_size
}
pub fn enable(&mut self, set: bool) {
self.ready = set;
if set {
// Translate address of descriptor table and vrings.
if let Some(iommu_mapping_cb) = &self.iommu_mapping_cb {
self.desc_table =
GuestAddress((iommu_mapping_cb)(self.desc_table.raw_value()).unwrap());
self.avail_ring =
GuestAddress((iommu_mapping_cb)(self.avail_ring.raw_value()).unwrap());
self.used_ring =
GuestAddress((iommu_mapping_cb)(self.used_ring.raw_value()).unwrap());
}
} else {
self.desc_table = GuestAddress(0);
self.avail_ring = GuestAddress(0);
self.used_ring = GuestAddress(0);
}
}
/// Return the actual size of the queue, as the driver may not set up a
/// queue as big as the device allows.
pub fn actual_size(&self) -> u16 {
min(self.size, self.max_size)
}
/// Reset the queue to a state that is acceptable for a device reset
pub fn reset(&mut self) {
self.ready = false;
self.size = self.max_size;
self.next_avail = Wrapping(0);
self.next_used = Wrapping(0);
self.vector = VIRTIO_MSI_NO_VECTOR;
self.desc_table = GuestAddress(0);
self.avail_ring = GuestAddress(0);
self.used_ring = GuestAddress(0);
self.event_idx = false;
self.signalled_used = None;
}
pub fn is_valid(&self, mem: &GuestMemoryMmap) -> bool {
let queue_size = self.actual_size() as usize;
let desc_table = self.desc_table;
let desc_table_size = 16 * queue_size;
let avail_ring = self.avail_ring;
let avail_ring_size = 6 + 2 * queue_size;
let used_ring = self.used_ring;
let used_ring_size = 6 + 8 * queue_size;
if !self.ready {
error!("attempt to use virtio queue that is not marked ready");
false
} else if self.size > self.max_size || self.size == 0 || (self.size & (self.size - 1)) != 0
{
error!("virtio queue with invalid size: {}", self.size);
false
} else if desc_table
.checked_add(desc_table_size as GuestUsize)
.map_or(true, |v| !mem.address_in_range(v))
{
error!(
"virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
desc_table.raw_value(),
desc_table_size
);
false
} else if avail_ring
.checked_add(avail_ring_size as GuestUsize)
.map_or(true, |v| !mem.address_in_range(v))
{
error!(
"virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
avail_ring.raw_value(),
avail_ring_size
);
false
} else if used_ring
.checked_add(used_ring_size as GuestUsize)
.map_or(true, |v| !mem.address_in_range(v))
{
error!(
"virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
used_ring.raw_value(),
used_ring_size
);
false
} else if desc_table.mask(0xf) != 0 {
error!("virtio queue descriptor table breaks alignment constraints");
false
} else if avail_ring.mask(0x1) != 0 {
error!("virtio queue available ring breaks alignment constraints");
false
} else if used_ring.mask(0x3) != 0 {
error!("virtio queue used ring breaks alignment constraints");
false
} else {
true
}
}
/// A consuming iterator over all available descriptor chain heads offered by the driver.
pub fn iter<'a, 'b>(&'b mut self, mem: &'a GuestMemoryMmap) -> AvailIter<'a, 'b> {
let queue_size = self.actual_size();
let avail_ring = self.avail_ring;
let index_addr = match mem.checked_offset(avail_ring, 2) {
Some(ret) => ret,
None => {
// TODO log address
warn!("Invalid offset");
return AvailIter::new(mem, &mut self.next_avail);
}
};
// Note that last_index has no invalid values
let last_index: u16 = match mem.read_obj::<u16>(index_addr) {
Ok(ret) => ret,
Err(_) => return AvailIter::new(mem, &mut self.next_avail),
};
AvailIter {
mem,
desc_table: self.desc_table,
avail_ring,
next_index: self.next_avail,
last_index: Wrapping(last_index),
queue_size,
next_avail: &mut self.next_avail,
iommu_mapping_cb: self.iommu_mapping_cb.clone(),
}
}
/// Update avail_event on the used ring with the last index in the avail ring.
pub fn update_avail_event(&mut self, mem: &GuestMemoryMmap) {
let index_addr = match mem.checked_offset(self.avail_ring, 2) {
Some(ret) => ret,
None => {
// TODO log address
warn!("Invalid offset");
return;
}
};
// Note that last_index has no invalid values
let last_index: u16 = match mem.read_obj::<u16>(index_addr) {
Ok(ret) => ret,
Err(_) => return,
};
match mem.checked_offset(self.used_ring, (4 + self.actual_size() * 8) as usize) {
Some(a) => {
mem.write_obj(last_index, a).unwrap();
}
None => warn!("Can't update avail_event"),
}
// This fence ensures both guest and us see the correct value (avail idx and avail event)
fence(Ordering::SeqCst);
}
/// Return the value present in the used_event field of the avail ring.
#[inline(always)]
pub fn get_used_event(&self, mem: &GuestMemoryMmap) -> Option<Wrapping<u16>> {
let avail_ring = self.avail_ring;
let used_event_addr =
match mem.checked_offset(avail_ring, (4 + self.actual_size() * 2) as usize) {
Some(a) => a,
None => {
warn!("Invalid offset looking for used_event");
return None;
}
};
// This fence ensures we're seeing the latest update from the guest.
fence(Ordering::SeqCst);
match mem.read_obj::<u16>(used_event_addr) {
Ok(ret) => Some(Wrapping(ret)),
Err(_) => None,
}
}
/// Puts an available descriptor head into the used ring for use by the guest.
pub fn add_used(&mut self, mem: &GuestMemoryMmap, desc_index: u16, len: u32) -> Option<u16> {
if desc_index >= self.actual_size() {
error!(
"attempted to add out of bounds descriptor to used ring: {}",
desc_index
);
return None;
}
let used_ring = self.used_ring;
let next_used = u64::from(self.next_used.0 % self.actual_size());
let used_elem = used_ring.unchecked_add(4 + next_used * 8);
// These writes can't fail as we are guaranteed to be within the descriptor ring.
mem.write_obj(u32::from(desc_index), used_elem).unwrap();
mem.write_obj(len as u32, used_elem.unchecked_add(4))
.unwrap();
self.next_used += Wrapping(1);
// This fence ensures all descriptor writes are visible before the index update is.
fence(Ordering::Release);
mem.write_obj(self.next_used.0 as u16, used_ring.unchecked_add(2))
.unwrap();
Some(self.next_used.0)
}
/// Goes back one position in the available descriptor chain offered by the driver.
/// Rust does not support bidirectional iterators. This is the only way to revert the effect
/// of an iterator increment on the queue.
pub fn go_to_previous_position(&mut self) {
self.next_avail -= Wrapping(1);
}
/// Get ring's index from memory.
fn index_from_memory(&self, ring: GuestAddress, mem: &GuestMemoryMmap) -> Result<u16, Error> {
mem.read_obj::<u16>(
mem.checked_offset(ring, 2)
.ok_or_else(|| Error::InvalidOffset(ring.raw_value() + 2))?,
)
.map_err(Error::InvalidRingIndexFromMemory)
}
/// Get latest index from available ring.
pub fn avail_index_from_memory(&self, mem: &GuestMemoryMmap) -> Result<u16, Error> {
self.index_from_memory(self.avail_ring, mem)
}
/// Get latest index from used ring.
pub fn used_index_from_memory(&self, mem: &GuestMemoryMmap) -> Result<u16, Error> {
self.index_from_memory(self.used_ring, mem)
}
pub fn available_descriptors(&self, mem: &GuestMemoryMmap) -> Result<bool, Error> {
Ok(self.used_index_from_memory(mem)? < self.avail_index_from_memory(mem)?)
}
pub fn set_event_idx(&mut self, enabled: bool) {
/* Also reset the last signalled event */
self.signalled_used = None;
self.event_idx = enabled;
}
pub fn needs_notification(&mut self, mem: &GuestMemoryMmap, used_idx: Wrapping<u16>) -> bool {
if !self.event_idx {
return true;
}
let mut notify = true;
if let Some(old_idx) = self.signalled_used {
if let Some(used_event) = self.get_used_event(mem) {
debug!(
"used_event = {:?} used_idx = {:?} old_idx = {:?}",
used_event, used_idx, old_idx
);
if (used_idx - used_event - Wrapping(1u16)) >= (used_idx - old_idx) {
notify = false;
}
}
}
self.signalled_used = Some(used_idx);
debug!("Needs notification: {:?}", notify);
notify
}
}
pub mod testing { pub mod testing {
use super::*;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::mem; use std::mem;
use vm_memory::Bytes; use virtio_queue::{Queue, QueueState};
use vm_memory::{bitmap::AtomicBitmap, Address, GuestAddress, GuestUsize}; use vm_memory::{bitmap::AtomicBitmap, Address, GuestAddress, GuestUsize};
use vm_memory::{Bytes, GuestMemoryAtomic};
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
@ -885,6 +179,7 @@ pub mod testing {
pub dtable: Vec<VirtqDesc<'a>>, pub dtable: Vec<VirtqDesc<'a>>,
pub avail: VirtqAvail<'a>, pub avail: VirtqAvail<'a>,
pub used: VirtqUsed<'a>, pub used: VirtqUsed<'a>,
pub mem: &'a GuestMemoryMmap,
} }
impl<'a> VirtQueue<'a> { impl<'a> VirtQueue<'a> {
@ -918,6 +213,7 @@ pub mod testing {
dtable, dtable,
avail, avail,
used, used,
mem,
} }
} }
@ -938,14 +234,18 @@ pub mod testing {
} }
// Creates a new Queue, using the underlying memory regions represented by the VirtQueue. // Creates a new Queue, using the underlying memory regions represented by the VirtQueue.
pub fn create_queue(&self) -> Queue { pub fn create_queue(&self) -> Queue<GuestMemoryAtomic<GuestMemoryMmap>> {
let mut q = Queue::new(self.size()); let mem = GuestMemoryAtomic::new(self.mem.clone());
let mut q = Queue::<
GuestMemoryAtomic<GuestMemoryMmap>,
QueueState<GuestMemoryAtomic<GuestMemoryMmap>>,
>::new(mem, self.size());
q.size = self.size(); q.state.size = self.size();
q.ready = true; q.state.ready = true;
q.desc_table = self.dtable_start(); q.state.desc_table = self.dtable_start();
q.avail_ring = self.avail_start(); q.state.avail_ring = self.avail_start();
q.used_ring = self.used_start(); q.state.used_ring = self.used_start();
q q
} }
@ -959,229 +259,3 @@ pub mod testing {
} }
} }
} }
#[cfg(test)]
pub mod tests {
use super::testing::*;
pub use super::*;
use vm_memory::{bitmap::AtomicBitmap, GuestAddress};
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
#[test]
fn test_checked_new_descriptor_chain() {
let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = VirtQueue::new(GuestAddress(0), m, 16);
assert!(vq.end().0 < 0x1000);
// index >= queue_size
assert!(DescriptorChain::checked_new(m, vq.start(), 16, 16, None).is_none());
// desc_table address is way off
assert!(
DescriptorChain::checked_new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0, None).is_none()
);
// the addr field of the descriptor is way off
vq.dtable[0].addr.set(0x0fff_ffff_ffff);
assert!(DescriptorChain::checked_new(m, vq.start(), 16, 0, None).is_none());
// let's create some invalid chains
{
// the addr field of the desc is ok now
vq.dtable[0].addr.set(0x1000);
// ...but the length is too large
vq.dtable[0].len.set(0xffff_ffff);
assert!(DescriptorChain::checked_new(m, vq.start(), 16, 0, None).is_none());
}
{
// the first desc has a normal len now, and the next_descriptor flag is set
vq.dtable[0].len.set(0x1000);
vq.dtable[0].flags.set(VIRTQ_DESC_F_NEXT);
//..but the index of the next descriptor is too large
vq.dtable[0].next.set(16);
assert!(DescriptorChain::checked_new(m, vq.start(), 16, 0, None).is_none());
}
// finally, let's test an ok chain
{
vq.dtable[0].next.set(1);
vq.dtable[1].set(0x2000, 0x1000, 0, 0);
let c = DescriptorChain::checked_new(m, vq.start(), 16, 0, None).unwrap();
assert_eq!(c.mem as *const GuestMemoryMmap, m as *const GuestMemoryMmap);
assert_eq!(c.desc_table, vq.start());
assert_eq!(c.table_size, 16);
assert_eq!(c.ttl, c.table_size);
assert_eq!(c.index, 0);
assert_eq!(c.addr, GuestAddress(0x1000));
assert_eq!(c.len, 0x1000);
assert_eq!(c.flags, VIRTQ_DESC_F_NEXT);
assert_eq!(c.next, 1);
assert!(c.next_descriptor().unwrap().next_descriptor().is_none());
}
}
#[test]
fn test_new_from_descriptor_chain() {
let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = VirtQueue::new(GuestAddress(0), m, 16);
// create a chain with a descriptor pointing to an indirect table
vq.dtable[0].addr.set(0x1000);
vq.dtable[0].len.set(0x1000);
vq.dtable[0].next.set(0);
vq.dtable[0].flags.set(VIRTQ_DESC_F_INDIRECT);
let c = DescriptorChain::checked_new(m, vq.start(), 16, 0, None).unwrap();
assert!(c.is_indirect());
// create an indirect table with 4 chained descriptors
let mut indirect_table = Vec::with_capacity(4);
for j in 0..4 {
let desc = VirtqDesc::new(GuestAddress(0x1000 + (j * 16)), m);
desc.set(0x1000, 0x1000, VIRTQ_DESC_F_NEXT, (j + 1) as u16);
indirect_table.push(desc);
}
// try to iterate through the indirect table descriptors
let mut i = c.new_from_indirect().unwrap();
for j in 0..4 {
assert_eq!(i.flags, VIRTQ_DESC_F_NEXT);
assert_eq!(i.next, j + 1);
i = i.next_descriptor().unwrap();
}
}
#[test]
fn test_queue_and_iterator() {
let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = VirtQueue::new(GuestAddress(0), m, 16);
let mut q = vq.create_queue();
// q is currently valid
assert!(q.is_valid(m));
// shouldn't be valid when not marked as ready
q.ready = false;
assert!(!q.is_valid(m));
q.ready = true;
// or when size > max_size
q.size = q.max_size << 1;
assert!(!q.is_valid(m));
q.size = q.max_size;
// or when size is 0
q.size = 0;
assert!(!q.is_valid(m));
q.size = q.max_size;
// or when size is not a power of 2
q.size = 11;
assert!(!q.is_valid(m));
q.size = q.max_size;
// or if the various addresses are off
q.desc_table = GuestAddress(0xffff_ffff);
assert!(!q.is_valid(m));
q.desc_table = GuestAddress(0x1001);
assert!(!q.is_valid(m));
q.desc_table = vq.dtable_start();
q.avail_ring = GuestAddress(0xffff_ffff);
assert!(!q.is_valid(m));
q.avail_ring = GuestAddress(0x1001);
assert!(!q.is_valid(m));
q.avail_ring = vq.avail_start();
q.used_ring = GuestAddress(0xffff_ffff);
assert!(!q.is_valid(m));
q.used_ring = GuestAddress(0x1001);
assert!(!q.is_valid(m));
q.used_ring = vq.used_start();
{
// an invalid queue should return an iterator with no next
q.ready = false;
let mut i = q.iter(m);
assert!(i.next().is_none());
}
q.ready = true;
// now let's create two simple descriptor chains
{
for j in 0..5 {
vq.dtable[j].set(
0x1000 * (j + 1) as u64,
0x1000,
VIRTQ_DESC_F_NEXT,
(j + 1) as u16,
);
}
// the chains are (0, 1) and (2, 3, 4)
vq.dtable[1].flags.set(0);
vq.dtable[4].flags.set(0);
vq.avail.ring[0].set(0);
vq.avail.ring[1].set(2);
vq.avail.idx.set(2);
let mut i = q.iter(m);
{
let mut c = i.next().unwrap();
c = c.next_descriptor().unwrap();
assert!(!c.has_next());
}
{
let mut c = i.next().unwrap();
c = c.next_descriptor().unwrap();
c = c.next_descriptor().unwrap();
assert!(!c.has_next());
}
}
// also test go_to_previous_position() works as expected
{
assert!(q.iter(m).next().is_none());
q.go_to_previous_position();
let mut c = q.iter(m).next().unwrap();
c = c.next_descriptor().unwrap();
c = c.next_descriptor().unwrap();
assert!(!c.has_next());
}
}
#[test]
fn test_add_used() {
let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = VirtQueue::new(GuestAddress(0), m, 16);
let mut q = vq.create_queue();
assert_eq!(vq.used.idx.get(), 0);
//index too large
q.add_used(m, 16, 0x1000);
assert_eq!(vq.used.idx.get(), 0);
//should be ok
q.add_used(m, 1, 0x1000);
assert_eq!(vq.used.idx.get(), 1);
let x = vq.used.ring[0].get();
assert_eq!(x.id, 1);
assert_eq!(x.len, 0x1000);
}
}

View File

@ -48,6 +48,7 @@ vfio-ioctls = { git = "https://github.com/rust-vmm/vfio-ioctls", branch = "main"
vfio_user = { path = "../vfio_user" } vfio_user = { path = "../vfio_user" }
vhdx = { path = "../vhdx" } vhdx = { path = "../vhdx" }
virtio-devices = { path = "../virtio-devices" } virtio-devices = { path = "../virtio-devices" }
virtio-queue = { path = "../virtio-queue" }
vm-allocator = { path = "../vm-allocator" } vm-allocator = { path = "../vm-allocator" }
vm-device = { path = "../vm-device" } vm-device = { path = "../vm-device" }
vm-memory = { version = "0.6.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] } vm-memory = { version = "0.6.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] }

View File

@ -91,9 +91,10 @@ use vfio_ioctls::{VfioContainer, VfioDevice};
use virtio_devices::transport::VirtioPciDevice; use virtio_devices::transport::VirtioPciDevice;
use virtio_devices::transport::VirtioTransport; use virtio_devices::transport::VirtioTransport;
use virtio_devices::vhost_user::VhostUserConfig; use virtio_devices::vhost_user::VhostUserConfig;
use virtio_devices::VirtioMemMappingSource; use virtio_devices::{AccessPlatformMapping, VirtioMemMappingSource};
use virtio_devices::{DmaRemapping, Endpoint, IommuMapping}; use virtio_devices::{Endpoint, IommuMapping};
use virtio_devices::{VirtioSharedMemory, VirtioSharedMemoryList}; use virtio_devices::{VirtioSharedMemory, VirtioSharedMemoryList};
use virtio_queue::AccessPlatform;
use vm_allocator::SystemAllocator; use vm_allocator::SystemAllocator;
use vm_device::dma_mapping::vfio::VfioDmaMapping; use vm_device::dma_mapping::vfio::VfioDmaMapping;
use vm_device::interrupt::{ use vm_device::interrupt::{
@ -109,7 +110,7 @@ use vm_migration::{
protocol::MemoryRangeTable, Migratable, MigratableError, Pausable, Snapshot, protocol::MemoryRangeTable, Migratable, MigratableError, Pausable, Snapshot,
SnapshotDataSection, Snapshottable, Transportable, SnapshotDataSection, Snapshottable, Transportable,
}; };
use vm_virtio::{VirtioDeviceType, VirtioIommuRemapping}; use vm_virtio::VirtioDeviceType;
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
@ -3165,23 +3166,15 @@ impl DeviceManager {
// about a virtio config change. // about a virtio config change.
let msix_num = (virtio_device.lock().unwrap().queue_max_sizes().len() + 1) as u16; let msix_num = (virtio_device.lock().unwrap().queue_max_sizes().len() + 1) as u16;
// Create the callback from the implementation of the DmaRemapping // Create the AccessPlatform trait from the implementation IommuMapping.
// trait. The point with the callback is to simplify the code as we // This will provide address translation for any virtio device sitting
// know about the device ID from this point. // behind a vIOMMU.
let iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>> = let access_platform: Option<Arc<dyn AccessPlatform>> = if let Some(mapping) = iommu_mapping
if let Some(mapping) = iommu_mapping { {
let mapping_clone = mapping.clone(); Some(Arc::new(AccessPlatformMapping::new(
Some(Arc::new(Box::new(move |addr: u64| { pci_device_bdf,
mapping_clone.translate(pci_device_bdf, addr).map_err(|e| { mapping.clone(),
std::io::Error::new( )))
std::io::ErrorKind::Other,
format!(
"failed to translate addr 0x{:x} for device 00:{:02x}.0 {}",
addr, pci_device_bdf, e
),
)
})
}) as VirtioIommuRemapping))
} else { } else {
None None
}; };
@ -3192,7 +3185,7 @@ impl DeviceManager {
memory, memory,
virtio_device, virtio_device,
msix_num, msix_num,
iommu_mapping_cb, access_platform,
&self.msi_interrupt_manager, &self.msi_interrupt_manager,
pci_device_bdf, pci_device_bdf,
self.activate_evt self.activate_evt