virtio: Port codebase to the latest virtio-queue version

The new virtio-queue version introduced some breaking changes which need
to be addressed so that Cloud Hypervisor can still work with this
version.

The most important change is about removing a handle to the guest memory
from the Queue, meaning the caller has to provide the guest memory
handle for multiple methods from the QueueT trait.

One interesting aspect is that QueueT has been widely extended to
provide every getter and setter we need to access and update the Queue
structure without having direct access to its internal fields.

This patch ports all the virtio and vhost-user devices to this new crate
definition. It also updates both vhost-user-block and vhost-user-net
backends based on the updated vhost-user-backend crate. It also updates
the fuzz directory.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2022-07-06 16:08:08 +02:00 committed by Rob Bradford
parent 7199119bb2
commit a423bf13ad
41 changed files with 414 additions and 354 deletions

10
Cargo.lock generated
View File

@ -1265,9 +1265,9 @@ dependencies = [
[[package]] [[package]]
name = "vhost-user-backend" name = "vhost-user-backend"
version = "0.5.1" version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ded8a9f15b09e61bb8a501d0a7a38056f4c1bd7f51cedcd41081c0e4233d5aa6" checksum = "558ac5ca9569fb03f518b2bdd17606809ffdc894b619b92d30df6c40c33d15f3"
dependencies = [ dependencies = [
"libc", "libc",
"log", "log",
@ -1293,6 +1293,7 @@ dependencies = [
"vhost", "vhost",
"vhost-user-backend", "vhost-user-backend",
"virtio-bindings", "virtio-bindings",
"virtio-queue",
"vm-memory", "vm-memory",
"vmm-sys-util", "vmm-sys-util",
] ]
@ -1357,11 +1358,12 @@ dependencies = [
[[package]] [[package]]
name = "virtio-queue" name = "virtio-queue"
version = "0.4.0" version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "519c0a333c871650269cba303bc108075d52a0c0d64f9b91fae61829b53725af" checksum = "b4f59652909f276e6edd8bf36e9f106480b2202f5f046717b3de14f1b4072a28"
dependencies = [ dependencies = [
"log", "log",
"virtio-bindings",
"vm-memory", "vm-memory",
"vmm-sys-util", "vmm-sys-util",
] ]

View File

@ -17,7 +17,7 @@ versionize = "0.1.6"
versionize_derive = "0.1.4" versionize_derive = "0.1.4"
vhdx = { path = "../vhdx" } vhdx = { path = "../vhdx" }
virtio-bindings = { version = "0.1.0", features = ["virtio-v5_0_0"] } virtio-bindings = { version = "0.1.0", features = ["virtio-v5_0_0"] }
virtio-queue = "0.4.0" virtio-queue = "0.5.0"
vm-memory = { version = "0.8.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] } vm-memory = { version = "0.8.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] }
vm-virtio = { path = "../vm-virtio" } vm-virtio = { path = "../vm-virtio" }
vmm-sys-util = "0.10.0" vmm-sys-util = "0.10.0"

5
fuzz/Cargo.lock generated
View File

@ -817,11 +817,12 @@ dependencies = [
[[package]] [[package]]
name = "virtio-queue" name = "virtio-queue"
version = "0.4.0" version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "519c0a333c871650269cba303bc108075d52a0c0d64f9b91fae61829b53725af" checksum = "b4f59652909f276e6edd8bf36e9f106480b2202f5f046717b3de14f1b4072a28"
dependencies = [ dependencies = [
"log", "log",
"virtio-bindings",
"vm-memory", "vm-memory",
"vmm-sys-util", "vmm-sys-util",
] ]

View File

@ -17,7 +17,7 @@ qcow = { path = "../qcow" }
seccompiler = "0.2.0" seccompiler = "0.2.0"
vhdx = { path = "../vhdx" } vhdx = { path = "../vhdx" }
virtio-devices = { path = "../virtio-devices" } virtio-devices = { path = "../virtio-devices" }
virtio-queue = "0.4.0" virtio-queue = "0.5.0"
vmm-sys-util = "0.10.0" vmm-sys-util = "0.10.0"
vm-memory = "0.8.0" vm-memory = "0.8.0"
vm-device = { path = "../vm-device" } vm-device = { path = "../vm-device" }

View File

@ -15,7 +15,7 @@ use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use virtio_devices::{Block, VirtioDevice, VirtioInterrupt, VirtioInterruptType}; use virtio_devices::{Block, VirtioDevice, VirtioInterrupt, VirtioInterruptType};
use virtio_queue::{Queue, QueueState}; use virtio_queue::{Queue, QueueT};
use vm_memory::{bitmap::AtomicBitmap, Bytes, GuestAddress, GuestMemoryAtomic}; use vm_memory::{bitmap::AtomicBitmap, Bytes, GuestAddress, GuestMemoryAtomic};
use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK}; use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK};
@ -77,12 +77,9 @@ fuzz_target!(|bytes| {
let guest_memory = GuestMemoryAtomic::new(mem); let guest_memory = GuestMemoryAtomic::new(mem);
let mut q = Queue::< let mut q = Queue::new(QUEUE_SIZE).unwrap();
GuestMemoryAtomic<GuestMemoryMmap>, q.set_ready(true);
QueueState, q.set_size(QUEUE_SIZE / 2);
>::new(guest_memory.clone(), QUEUE_SIZE);
q.state.ready = true;
q.state.size = QUEUE_SIZE / 2;
let evt = EventFd::new(0).unwrap(); let evt = EventFd::new(0).unwrap();
let queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(evt.as_raw_fd())) }; let queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(evt.as_raw_fd())) };

View File

@ -15,7 +15,7 @@ serde = "1.0.140"
versionize = "0.1.6" versionize = "0.1.6"
versionize_derive = "0.1.4" versionize_derive = "0.1.4"
virtio-bindings = "0.1.0" virtio-bindings = "0.1.0"
virtio-queue = "0.4.0" virtio-queue = "0.5.0"
vm-memory = { version = "0.8.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] } vm-memory = { version = "0.8.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] }
vm-virtio = { path = "../vm-virtio" } vm-virtio = { path = "../vm-virtio" }
vmm-sys-util = "0.10.0" vmm-sys-util = "0.10.0"

View File

@ -13,8 +13,8 @@ use virtio_bindings::bindings::virtio_net::{
VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_OK, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_OK,
}; };
use virtio_queue::Queue; use virtio_queue::{Queue, QueueOwnedT, QueueT};
use vm_memory::{ByteValued, Bytes, GuestMemoryAtomic, GuestMemoryError}; use vm_memory::{ByteValued, Bytes, GuestMemoryError};
use vm_virtio::{AccessPlatform, Translatable}; use vm_virtio::{AccessPlatform, Translatable};
#[derive(Debug)] #[derive(Debug)]
@ -58,12 +58,13 @@ impl CtrlQueue {
pub fn process( pub fn process(
&mut self, &mut self,
queue: &mut Queue<GuestMemoryAtomic<GuestMemoryMmap>>, mem: &GuestMemoryMmap,
queue: &mut Queue,
access_platform: Option<&Arc<dyn AccessPlatform>>, access_platform: Option<&Arc<dyn AccessPlatform>>,
) -> Result<()> { ) -> Result<()> {
let mut used_desc_heads = Vec::new(); let mut used_desc_heads = Vec::new();
loop { loop {
for mut desc_chain in queue.iter().map_err(Error::QueueIterator)? { for mut desc_chain in queue.iter(mem).map_err(Error::QueueIterator)? {
let ctrl_desc = desc_chain.next().ok_or(Error::NoControlHeaderDescriptor)?; let ctrl_desc = desc_chain.next().ok_or(Error::NoControlHeaderDescriptor)?;
let ctrl_hdr: ControlHeader = desc_chain let ctrl_hdr: ControlHeader = desc_chain
@ -144,12 +145,12 @@ impl CtrlQueue {
for (desc_index, len) in used_desc_heads.iter() { for (desc_index, len) in used_desc_heads.iter() {
queue queue
.add_used(*desc_index, *len) .add_used(mem, *desc_index, *len)
.map_err(Error::QueueAddUsed)?; .map_err(Error::QueueAddUsed)?;
} }
if !queue if !queue
.enable_notification() .enable_notification(mem)
.map_err(Error::QueueEnableNotification)? .map_err(Error::QueueEnableNotification)?
{ {
break; break;

View File

@ -10,8 +10,8 @@ use std::num::Wrapping;
use std::os::unix::io::{AsRawFd, RawFd}; use std::os::unix::io::{AsRawFd, RawFd};
use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc; use std::sync::Arc;
use virtio_queue::Queue; use virtio_queue::{Queue, QueueOwnedT, QueueT};
use vm_memory::{Bytes, GuestMemory, GuestMemoryAtomic}; use vm_memory::{Bytes, GuestMemory};
use vm_virtio::{AccessPlatform, Translatable}; use vm_virtio::{AccessPlatform, Translatable};
#[derive(Clone)] #[derive(Clone)]
@ -36,8 +36,9 @@ impl TxVirtio {
pub fn process_desc_chain( pub fn process_desc_chain(
&mut self, &mut self,
mem: &GuestMemoryMmap,
tap: &mut Tap, tap: &mut Tap,
queue: &mut Queue<GuestMemoryAtomic<GuestMemoryMmap>>, queue: &mut Queue,
rate_limiter: &mut Option<RateLimiter>, rate_limiter: &mut Option<RateLimiter>,
access_platform: Option<&Arc<dyn AccessPlatform>>, access_platform: Option<&Arc<dyn AccessPlatform>>,
) -> Result<bool, NetQueuePairError> { ) -> Result<bool, NetQueuePairError> {
@ -47,7 +48,7 @@ impl TxVirtio {
loop { loop {
let used_desc_head: (u16, u32); let used_desc_head: (u16, u32);
let mut avail_iter = queue let mut avail_iter = queue
.iter() .iter(mem)
.map_err(NetQueuePairError::QueueIteratorFailed)?; .map_err(NetQueuePairError::QueueIteratorFailed)?;
if let Some(mut desc_chain) = avail_iter.next() { if let Some(mut desc_chain) = avail_iter.next() {
@ -130,10 +131,10 @@ impl TxVirtio {
} }
queue queue
.add_used(used_desc_head.0, used_desc_head.1) .add_used(mem, used_desc_head.0, used_desc_head.1)
.map_err(NetQueuePairError::QueueAddUsed)?; .map_err(NetQueuePairError::QueueAddUsed)?;
if !queue if !queue
.enable_notification() .enable_notification(mem)
.map_err(NetQueuePairError::QueueEnableNotification)? .map_err(NetQueuePairError::QueueEnableNotification)?
{ {
break; break;
@ -166,8 +167,9 @@ impl RxVirtio {
pub fn process_desc_chain( pub fn process_desc_chain(
&mut self, &mut self,
mem: &GuestMemoryMmap,
tap: &mut Tap, tap: &mut Tap,
queue: &mut Queue<GuestMemoryAtomic<GuestMemoryMmap>>, queue: &mut Queue,
rate_limiter: &mut Option<RateLimiter>, rate_limiter: &mut Option<RateLimiter>,
access_platform: Option<&Arc<dyn AccessPlatform>>, access_platform: Option<&Arc<dyn AccessPlatform>>,
) -> Result<bool, NetQueuePairError> { ) -> Result<bool, NetQueuePairError> {
@ -177,7 +179,7 @@ impl RxVirtio {
loop { loop {
let used_desc_head: (u16, u32); let used_desc_head: (u16, u32);
let mut avail_iter = queue let mut avail_iter = queue
.iter() .iter(mem)
.map_err(NetQueuePairError::QueueIteratorFailed)?; .map_err(NetQueuePairError::QueueIteratorFailed)?;
if let Some(mut desc_chain) = avail_iter.next() { if let Some(mut desc_chain) = avail_iter.next() {
@ -281,10 +283,10 @@ impl RxVirtio {
} }
queue queue
.add_used(used_desc_head.0, used_desc_head.1) .add_used(mem, used_desc_head.0, used_desc_head.1)
.map_err(NetQueuePairError::QueueAddUsed)?; .map_err(NetQueuePairError::QueueAddUsed)?;
if !queue if !queue
.enable_notification() .enable_notification(mem)
.map_err(NetQueuePairError::QueueEnableNotification)? .map_err(NetQueuePairError::QueueEnableNotification)?
{ {
break; break;
@ -355,9 +357,11 @@ pub struct NetQueuePair {
impl NetQueuePair { impl NetQueuePair {
pub fn process_tx( pub fn process_tx(
&mut self, &mut self,
queue: &mut Queue<GuestMemoryAtomic<GuestMemoryMmap>>, mem: &GuestMemoryMmap,
queue: &mut Queue,
) -> Result<bool, NetQueuePairError> { ) -> Result<bool, NetQueuePairError> {
let tx_tap_retry = self.tx.process_desc_chain( let tx_tap_retry = self.tx.process_desc_chain(
mem,
&mut self.tap, &mut self.tap,
queue, queue,
&mut self.tx_rate_limiter, &mut self.tx_rate_limiter,
@ -397,15 +401,17 @@ impl NetQueuePair {
self.tx.counter_frames = Wrapping(0); self.tx.counter_frames = Wrapping(0);
queue queue
.needs_notification() .needs_notification(mem)
.map_err(NetQueuePairError::QueueNeedsNotification) .map_err(NetQueuePairError::QueueNeedsNotification)
} }
pub fn process_rx( pub fn process_rx(
&mut self, &mut self,
queue: &mut Queue<GuestMemoryAtomic<GuestMemoryMmap>>, mem: &GuestMemoryMmap,
queue: &mut Queue,
) -> Result<bool, NetQueuePairError> { ) -> Result<bool, NetQueuePairError> {
self.rx_desc_avail = !self.rx.process_desc_chain( self.rx_desc_avail = !self.rx.process_desc_chain(
mem,
&mut self.tap, &mut self.tap,
queue, queue,
&mut self.rx_rate_limiter, &mut self.rx_rate_limiter,
@ -440,7 +446,7 @@ impl NetQueuePair {
self.rx.counter_frames = Wrapping(0); self.rx.counter_frames = Wrapping(0);
queue queue
.needs_notification() .needs_notification(mem)
.map_err(NetQueuePairError::QueueNeedsNotification) .map_err(NetQueuePairError::QueueNeedsNotification)
} }
} }

View File

@ -14,8 +14,9 @@ log = "0.4.17"
option_parser = { path = "../option_parser" } option_parser = { path = "../option_parser" }
qcow = { path = "../qcow" } qcow = { path = "../qcow" }
vhost = { version = "0.4.0", features = ["vhost-user-slave"] } vhost = { version = "0.4.0", features = ["vhost-user-slave"] }
vhost-user-backend = "0.5.1" vhost-user-backend = "0.6.0"
virtio-bindings = "0.1.0" virtio-bindings = "0.1.0"
virtio-queue = "0.5.0"
vm-memory = "0.8.0" vm-memory = "0.8.0"
vmm-sys-util = "0.10.0" vmm-sys-util = "0.10.0"

View File

@ -19,6 +19,7 @@ use std::fs::File;
use std::fs::OpenOptions; use std::fs::OpenOptions;
use std::io::Read; use std::io::Read;
use std::io::{Seek, SeekFrom, Write}; use std::io::{Seek, SeekFrom, Write};
use std::ops::Deref;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::os::unix::fs::OpenOptionsExt; use std::os::unix::fs::OpenOptionsExt;
use std::path::PathBuf; use std::path::PathBuf;
@ -34,6 +35,8 @@ use vhost::vhost_user::Listener;
use vhost_user_backend::{VhostUserBackendMut, VhostUserDaemon, VringRwLock, VringState, VringT}; use vhost_user_backend::{VhostUserBackendMut, VhostUserDaemon, VringRwLock, VringState, VringT};
use virtio_bindings::bindings::virtio_blk::*; use virtio_bindings::bindings::virtio_blk::*;
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use virtio_queue::{QueueOwnedT, QueueT};
use vm_memory::GuestAddressSpace;
use vm_memory::{bitmap::AtomicBitmap, ByteValued, Bytes, GuestMemoryAtomic}; use vm_memory::{bitmap::AtomicBitmap, ByteValued, Bytes, GuestMemoryAtomic};
use vmm_sys_util::{epoll::EventSet, eventfd::EventFd}; use vmm_sys_util::{epoll::EventSet, eventfd::EventFd};
@ -95,6 +98,7 @@ struct VhostUserBlkThread {
event_idx: bool, event_idx: bool,
kill_evt: EventFd, kill_evt: EventFd,
writeback: Arc<AtomicBool>, writeback: Arc<AtomicBool>,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
} }
impl VhostUserBlkThread { impl VhostUserBlkThread {
@ -103,6 +107,7 @@ impl VhostUserBlkThread {
disk_image_id: Vec<u8>, disk_image_id: Vec<u8>,
disk_nsectors: u64, disk_nsectors: u64,
writeback: Arc<AtomicBool>, writeback: Arc<AtomicBool>,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
) -> Result<Self> { ) -> Result<Self> {
Ok(VhostUserBlkThread { Ok(VhostUserBlkThread {
disk_image, disk_image,
@ -111,6 +116,7 @@ impl VhostUserBlkThread {
event_idx: false, event_idx: false,
kill_evt: EventFd::new(EFD_NONBLOCK).map_err(Error::CreateKillEventFd)?, kill_evt: EventFd::new(EFD_NONBLOCK).map_err(Error::CreateKillEventFd)?,
writeback, writeback,
mem,
}) })
} }
@ -120,7 +126,7 @@ impl VhostUserBlkThread {
) -> bool { ) -> bool {
let mut used_desc_heads = Vec::new(); let mut used_desc_heads = Vec::new();
for mut desc_chain in vring.get_queue_mut().iter().unwrap() { for mut desc_chain in vring.get_queue_mut().iter(self.mem.memory()).unwrap() {
debug!("got an element in the queue"); debug!("got an element in the queue");
let len; let len;
match Request::parse(&mut desc_chain, None) { match Request::parse(&mut desc_chain, None) {
@ -156,12 +162,13 @@ impl VhostUserBlkThread {
used_desc_heads.push((desc_chain.head_index(), len)); used_desc_heads.push((desc_chain.head_index(), len));
} }
let mem = self.mem.memory();
let mut needs_signalling = false; let mut needs_signalling = false;
for (desc_head, len) in used_desc_heads.iter() { for (desc_head, len) in used_desc_heads.iter() {
if self.event_idx { if self.event_idx {
let queue = vring.get_queue_mut(); let queue = vring.get_queue_mut();
if queue.add_used(*desc_head, *len).is_ok() { if queue.add_used(mem.deref(), *desc_head, *len).is_ok() {
if queue.needs_notification().unwrap() { if queue.needs_notification(mem.deref()).unwrap() {
debug!("signalling queue"); debug!("signalling queue");
needs_signalling = true; needs_signalling = true;
} else { } else {
@ -170,7 +177,10 @@ impl VhostUserBlkThread {
} }
} else { } else {
debug!("signalling queue"); debug!("signalling queue");
vring.get_queue_mut().add_used(*desc_head, *len).unwrap(); vring
.get_queue_mut()
.add_used(mem.deref(), *desc_head, *len)
.unwrap();
needs_signalling = true; needs_signalling = true;
} }
} }
@ -192,6 +202,7 @@ struct VhostUserBlkBackend {
queue_size: usize, queue_size: usize,
acked_features: u64, acked_features: u64,
writeback: Arc<AtomicBool>, writeback: Arc<AtomicBool>,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
} }
impl VhostUserBlkBackend { impl VhostUserBlkBackend {
@ -202,6 +213,7 @@ impl VhostUserBlkBackend {
direct: bool, direct: bool,
poll_queue: bool, poll_queue: bool,
queue_size: usize, queue_size: usize,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
) -> Result<Self> { ) -> Result<Self> {
let mut options = OpenOptions::new(); let mut options = OpenOptions::new();
options.read(true); options.read(true);
@ -243,6 +255,7 @@ impl VhostUserBlkBackend {
image_id.clone(), image_id.clone(),
nsectors, nsectors,
writeback.clone(), writeback.clone(),
mem.clone(),
)?); )?);
threads.push(thread); threads.push(thread);
queues_per_thread.push(0b1 << i); queues_per_thread.push(0b1 << i);
@ -257,6 +270,7 @@ impl VhostUserBlkBackend {
queue_size, queue_size,
acked_features: 0, acked_features: 0,
writeback, writeback,
mem,
}) })
} }
@ -364,7 +378,10 @@ impl VhostUserBackendMut<VringRwLock<GuestMemoryAtomic<GuestMemoryMmap>>, Atomic
// calling process_queue() until it stops finding new // calling process_queue() until it stops finding new
// requests on the queue. // requests on the queue.
loop { loop {
vring.get_queue_mut().enable_notification().unwrap(); vring
.get_queue_mut()
.enable_notification(self.mem.memory().deref())
.unwrap();
if !thread.process_queue(&mut vring) { if !thread.process_queue(&mut vring) {
break; break;
} }
@ -491,6 +508,8 @@ pub fn start_block_backend(backend_command: &str) {
} }
}; };
let mem = GuestMemoryAtomic::new(GuestMemoryMmap::new());
let blk_backend = Arc::new(RwLock::new( let blk_backend = Arc::new(RwLock::new(
VhostUserBlkBackend::new( VhostUserBlkBackend::new(
backend_config.path, backend_config.path,
@ -499,6 +518,7 @@ pub fn start_block_backend(backend_command: &str) {
backend_config.direct, backend_config.direct,
backend_config.poll_queue, backend_config.poll_queue,
backend_config.queue_size, backend_config.queue_size,
mem.clone(),
) )
.unwrap(), .unwrap(),
)); ));
@ -508,12 +528,7 @@ pub fn start_block_backend(backend_command: &str) {
let listener = Listener::new(&backend_config.socket, true).unwrap(); let listener = Listener::new(&backend_config.socket, true).unwrap();
let name = "vhost-user-blk-backend"; let name = "vhost-user-blk-backend";
let mut blk_daemon = VhostUserDaemon::new( let mut blk_daemon = VhostUserDaemon::new(name.to_string(), blk_backend.clone(), mem).unwrap();
name.to_string(),
blk_backend.clone(),
GuestMemoryAtomic::new(GuestMemoryMmap::new()),
)
.unwrap();
debug!("blk_daemon is created!\n"); debug!("blk_daemon is created!\n");

View File

@ -13,7 +13,7 @@ log = "0.4.17"
net_util = { path = "../net_util" } net_util = { path = "../net_util" }
option_parser = { path = "../option_parser" } option_parser = { path = "../option_parser" }
vhost = { version = "0.4.0", features = ["vhost-user-slave"] } vhost = { version = "0.4.0", features = ["vhost-user-slave"] }
vhost-user-backend = "0.5.1" vhost-user-backend = "0.6.0"
virtio-bindings = "0.1.0" virtio-bindings = "0.1.0"
vm-memory = "0.8.0" vm-memory = "0.8.0"
vmm-sys-util = "0.10.0" vmm-sys-util = "0.10.0"

View File

@ -18,6 +18,7 @@ use option_parser::{OptionParser, OptionParserError};
use std::fmt; use std::fmt;
use std::io; use std::io;
use std::net::Ipv4Addr; use std::net::Ipv4Addr;
use std::ops::Deref;
use std::os::unix::io::{AsRawFd, RawFd}; use std::os::unix::io::{AsRawFd, RawFd};
use std::process; use std::process;
use std::sync::{Arc, Mutex, RwLock}; use std::sync::{Arc, Mutex, RwLock};
@ -26,6 +27,7 @@ use vhost::vhost_user::message::*;
use vhost::vhost_user::Listener; use vhost::vhost_user::Listener;
use vhost_user_backend::{VhostUserBackendMut, VhostUserDaemon, VringRwLock, VringT}; use vhost_user_backend::{VhostUserBackendMut, VhostUserDaemon, VringRwLock, VringT};
use virtio_bindings::bindings::virtio_net::*; use virtio_bindings::bindings::virtio_net::*;
use vm_memory::GuestAddressSpace;
use vm_memory::{bitmap::AtomicBitmap, GuestMemoryAtomic}; use vm_memory::{bitmap::AtomicBitmap, GuestMemoryAtomic};
use vmm_sys_util::{epoll::EventSet, eventfd::EventFd}; use vmm_sys_util::{epoll::EventSet, eventfd::EventFd};
@ -113,6 +115,7 @@ pub struct VhostUserNetBackend {
num_queues: usize, num_queues: usize,
queue_size: u16, queue_size: u16,
queues_per_thread: Vec<u64>, queues_per_thread: Vec<u64>,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
} }
impl VhostUserNetBackend { impl VhostUserNetBackend {
@ -123,6 +126,7 @@ impl VhostUserNetBackend {
num_queues: usize, num_queues: usize,
queue_size: u16, queue_size: u16,
ifname: Option<&str>, ifname: Option<&str>,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
) -> Result<Self> { ) -> Result<Self> {
let mut taps = open_tap( let mut taps = open_tap(
ifname, ifname,
@ -147,6 +151,7 @@ impl VhostUserNetBackend {
num_queues, num_queues,
queue_size, queue_size,
queues_per_thread, queues_per_thread,
mem,
}) })
} }
} }
@ -214,7 +219,7 @@ impl VhostUserBackendMut<VringRwLock<GuestMemoryAtomic<GuestMemoryMmap>>, Atomic
let mut vring = vrings[1].get_mut(); let mut vring = vrings[1].get_mut();
if thread if thread
.net .net
.process_tx(vring.get_queue_mut()) .process_tx(self.mem.memory().deref(), vring.get_queue_mut())
.map_err(Error::NetQueuePair)? .map_err(Error::NetQueuePair)?
{ {
vring vring
@ -226,7 +231,7 @@ impl VhostUserBackendMut<VringRwLock<GuestMemoryAtomic<GuestMemoryMmap>>, Atomic
let mut vring = vrings[0].get_mut(); let mut vring = vrings[0].get_mut();
if thread if thread
.net .net
.process_rx(vring.get_queue_mut()) .process_rx(self.mem.memory().deref(), vring.get_queue_mut())
.map_err(Error::NetQueuePair)? .map_err(Error::NetQueuePair)?
{ {
vring vring
@ -342,6 +347,8 @@ pub fn start_net_backend(backend_command: &str) {
let tap = backend_config.tap.as_deref(); let tap = backend_config.tap.as_deref();
let mem = GuestMemoryAtomic::new(GuestMemoryMmap::new());
let net_backend = Arc::new(RwLock::new( let net_backend = Arc::new(RwLock::new(
VhostUserNetBackend::new( VhostUserNetBackend::new(
backend_config.ip, backend_config.ip,
@ -350,6 +357,7 @@ pub fn start_net_backend(backend_command: &str) {
backend_config.num_queues, backend_config.num_queues,
backend_config.queue_size, backend_config.queue_size,
tap, tap,
mem.clone(),
) )
.unwrap(), .unwrap(),
)); ));
@ -357,7 +365,7 @@ pub fn start_net_backend(backend_command: &str) {
let mut net_daemon = VhostUserDaemon::new( let mut net_daemon = VhostUserDaemon::new(
"vhost-user-net-backend".to_string(), "vhost-user-net-backend".to_string(),
net_backend.clone(), net_backend.clone(),
GuestMemoryAtomic::new(GuestMemoryMmap::new()), mem,
) )
.unwrap(); .unwrap();

View File

@ -30,7 +30,7 @@ versionize = "0.1.6"
versionize_derive = "0.1.4" versionize_derive = "0.1.4"
vhost = { version = "0.4.0", features = ["vhost-user-master", "vhost-user-slave", "vhost-kern", "vhost-vdpa"] } vhost = { version = "0.4.0", features = ["vhost-user-master", "vhost-user-slave", "vhost-kern", "vhost-vdpa"] }
virtio-bindings = { version = "0.1.0", features = ["virtio-v5_0_0"] } virtio-bindings = { version = "0.1.0", features = ["virtio-v5_0_0"] }
virtio-queue = "0.4.0" virtio-queue = "0.5.0"
vm-allocator = { path = "../vm-allocator" } vm-allocator = { path = "../vm-allocator" }
vm-device = { path = "../vm-device" } vm-device = { path = "../vm-device" }
vm-memory = { version = "0.8.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] } vm-memory = { version = "0.8.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] }

View File

@ -22,6 +22,7 @@ use libc::EFD_NONBLOCK;
use seccompiler::SeccompAction; use seccompiler::SeccompAction;
use std::io; use std::io;
use std::mem::size_of; use std::mem::size_of;
use std::ops::Deref;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::result; use std::result;
use std::sync::{ use std::sync::{
@ -30,10 +31,10 @@ use std::sync::{
}; };
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::Queue; use virtio_queue::{Queue, QueueOwnedT, QueueT};
use vm_memory::{ use vm_memory::{
Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryAtomic, GuestMemoryError, Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic,
GuestMemoryRegion, GuestMemoryError, GuestMemoryRegion,
}; };
use vm_migration::{ use vm_migration::{
Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped, Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped,
@ -161,9 +162,10 @@ impl VirtioBalloonResize {
} }
struct BalloonEpollHandler { struct BalloonEpollHandler {
mem: GuestMemoryAtomic<GuestMemoryMmap>,
config: Arc<Mutex<VirtioBalloonConfig>>, config: Arc<Mutex<VirtioBalloonConfig>>,
resize_receiver: VirtioBalloonResizeReceiver, resize_receiver: VirtioBalloonResizeReceiver,
queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>, queues: Vec<Queue>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
inflate_queue_evt: EventFd, inflate_queue_evt: EventFd,
deflate_queue_evt: EventFd, deflate_queue_evt: EventFd,
@ -230,9 +232,10 @@ impl BalloonEpollHandler {
queue_index: usize, queue_index: usize,
used_descs: Vec<(u16, u32)>, used_descs: Vec<(u16, u32)>,
) -> result::Result<(), Error> { ) -> result::Result<(), Error> {
let mem = self.mem.memory();
for (desc_index, len) in used_descs.iter() { for (desc_index, len) in used_descs.iter() {
self.queues[queue_index] self.queues[queue_index]
.add_used(*desc_index, *len) .add_used(mem.deref(), *desc_index, *len)
.map_err(Error::QueueAddUsed)?; .map_err(Error::QueueAddUsed)?;
} }
@ -244,9 +247,10 @@ impl BalloonEpollHandler {
} }
fn process_queue(&mut self, queue_index: usize) -> result::Result<(), Error> { fn process_queue(&mut self, queue_index: usize) -> result::Result<(), Error> {
let mem = self.mem.memory();
let mut used_descs = Vec::new(); let mut used_descs = Vec::new();
for mut desc_chain in self.queues[queue_index] for mut desc_chain in self.queues[queue_index]
.iter() .iter(mem)
.map_err(Error::QueueIterator)? .map_err(Error::QueueIterator)?
{ {
let desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?; let desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
@ -298,10 +302,11 @@ impl BalloonEpollHandler {
} }
fn process_reporting_queue(&mut self, queue_index: usize) -> result::Result<(), Error> { fn process_reporting_queue(&mut self, queue_index: usize) -> result::Result<(), Error> {
let mem = self.mem.memory();
let mut used_descs = Vec::new(); let mut used_descs = Vec::new();
for mut desc_chain in self.queues[queue_index] for mut desc_chain in self.queues[queue_index]
.iter() .iter(mem)
.map_err(Error::QueueIterator)? .map_err(Error::QueueIterator)?
{ {
let mut descs_len = 0; let mut descs_len = 0;
@ -540,9 +545,9 @@ impl VirtioDevice for Balloon {
fn activate( fn activate(
&mut self, &mut self,
_mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
let (kill_evt, pause_evt) = self.common.dup_eventfds(); let (kill_evt, pause_evt) = self.common.dup_eventfds();
@ -564,6 +569,7 @@ impl VirtioDevice for Balloon {
}; };
let mut handler = BalloonEpollHandler { let mut handler = BalloonEpollHandler {
mem,
config: self.config.clone(), config: self.config.clone(),
resize_receiver: self.resize.get_receiver().map_err(|e| { resize_receiver: self.resize.get_receiver().map_err(|e| {
error!("failed to clone resize EventFd: {:?}", e); error!("failed to clone resize EventFd: {:?}", e);

View File

@ -26,6 +26,7 @@ use rate_limiter::{RateLimiter, TokenType};
use seccompiler::SeccompAction; use seccompiler::SeccompAction;
use std::io; use std::io;
use std::num::Wrapping; use std::num::Wrapping;
use std::ops::Deref;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::path::PathBuf; use std::path::PathBuf;
use std::result; use std::result;
@ -35,7 +36,7 @@ use std::{collections::HashMap, convert::TryInto};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_bindings::bindings::virtio_blk::*; use virtio_bindings::bindings::virtio_blk::*;
use virtio_queue::Queue; use virtio_queue::{Queue, QueueOwnedT, QueueT};
use vm_memory::{ByteValued, Bytes, GuestAddressSpace, GuestMemoryAtomic}; use vm_memory::{ByteValued, Bytes, GuestAddressSpace, GuestMemoryAtomic};
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
@ -84,7 +85,7 @@ pub struct BlockCounters {
struct BlockEpollHandler { struct BlockEpollHandler {
queue_index: u16, queue_index: u16,
queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>, queue: Queue,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
disk_image: Box<dyn AsyncIo>, disk_image: Box<dyn AsyncIo>,
disk_nsectors: u64, disk_nsectors: u64,
@ -107,7 +108,9 @@ impl BlockEpollHandler {
let mut used_desc_heads = Vec::new(); let mut used_desc_heads = Vec::new();
let mut used_count = 0; let mut used_count = 0;
let mut avail_iter = queue.iter().map_err(Error::QueueIterator)?; let mut avail_iter = queue
.iter(self.mem.memory())
.map_err(Error::QueueIterator)?;
for mut desc_chain in &mut avail_iter { for mut desc_chain in &mut avail_iter {
let mut request = Request::parse(&mut desc_chain, self.access_platform.as_ref()) let mut request = Request::parse(&mut desc_chain, self.access_platform.as_ref())
.map_err(Error::RequestParsing)?; .map_err(Error::RequestParsing)?;
@ -171,9 +174,10 @@ impl BlockEpollHandler {
} }
} }
let mem = self.mem.memory();
for &(desc_index, len) in used_desc_heads.iter() { for &(desc_index, len) in used_desc_heads.iter() {
queue queue
.add_used(desc_index, len) .add_used(mem.deref(), desc_index, len)
.map_err(Error::QueueAddUsed)?; .map_err(Error::QueueAddUsed)?;
} }
@ -239,7 +243,7 @@ impl BlockEpollHandler {
for &(desc_index, len) in used_desc_heads.iter() { for &(desc_index, len) in used_desc_heads.iter() {
queue queue
.add_used(desc_index, len) .add_used(mem.deref(), desc_index, len)
.map_err(Error::QueueAddUsed)?; .map_err(Error::QueueAddUsed)?;
} }
@ -587,7 +591,7 @@ impl VirtioDevice for Block {
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
@ -597,7 +601,7 @@ impl VirtioDevice for Block {
let mut epoll_threads = Vec::new(); let mut epoll_threads = Vec::new();
for i in 0..queues.len() { for i in 0..queues.len() {
let (_, queue, queue_evt) = queues.remove(0); let (_, queue, queue_evt) = queues.remove(0);
let queue_size = queue.state.size; let queue_size = queue.size();
let (kill_evt, pause_evt) = self.common.dup_eventfds(); let (kill_evt, pause_evt) = self.common.dup_eventfds();
let rate_limiter: Option<RateLimiter> = self let rate_limiter: Option<RateLimiter> = self

View File

@ -18,14 +18,15 @@ use std::collections::VecDeque;
use std::fs::File; use std::fs::File;
use std::io; use std::io;
use std::io::{Read, Write}; use std::io::{Read, Write};
use std::ops::Deref;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::result; use std::result;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::{Arc, Barrier, Mutex}; use std::sync::{Arc, Barrier, Mutex};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::Queue; use virtio_queue::{Queue, QueueOwnedT, QueueT};
use vm_memory::{ByteValued, Bytes, GuestMemoryAtomic}; use vm_memory::{ByteValued, Bytes, GuestAddressSpace, GuestMemoryAtomic};
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vm_virtio::{AccessPlatform, Translatable}; use vm_virtio::{AccessPlatform, Translatable};
@ -74,7 +75,8 @@ impl Default for VirtioConsoleConfig {
unsafe impl ByteValued for VirtioConsoleConfig {} unsafe impl ByteValued for VirtioConsoleConfig {}
struct ConsoleEpollHandler { struct ConsoleEpollHandler {
queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
queues: Vec<Queue>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
in_buffer: Arc<Mutex<VecDeque<u8>>>, in_buffer: Arc<Mutex<VecDeque<u8>>>,
resizer: Arc<ConsoleResizer>, resizer: Arc<ConsoleResizer>,
@ -142,7 +144,7 @@ impl ConsoleEpollHandler {
return false; return false;
} }
let mut avail_iter = recv_queue.iter().unwrap(); let mut avail_iter = recv_queue.iter(self.mem.memory()).unwrap();
for mut desc_chain in &mut avail_iter { for mut desc_chain in &mut avail_iter {
let desc = desc_chain.next().unwrap(); let desc = desc_chain.next().unwrap();
let len = cmp::min(desc.len() as u32, in_buffer.len() as u32); let len = cmp::min(desc.len() as u32, in_buffer.len() as u32);
@ -166,8 +168,9 @@ impl ConsoleEpollHandler {
} }
} }
let mem = self.mem.memory();
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
recv_queue.add_used(desc_index, len).unwrap(); recv_queue.add_used(mem.deref(), desc_index, len).unwrap();
} }
used_count > 0 used_count > 0
@ -185,7 +188,7 @@ impl ConsoleEpollHandler {
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
for mut desc_chain in trans_queue.iter().unwrap() { for mut desc_chain in trans_queue.iter(self.mem.memory()).unwrap() {
let desc = desc_chain.next().unwrap(); let desc = desc_chain.next().unwrap();
if let Some(ref mut out) = self.endpoint.out_file() { if let Some(ref mut out) = self.endpoint.out_file() {
let _ = desc_chain.memory().write_to( let _ = desc_chain.memory().write_to(
@ -200,8 +203,9 @@ impl ConsoleEpollHandler {
used_count += 1; used_count += 1;
} }
let mem = self.mem.memory();
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
trans_queue.add_used(desc_index, len).unwrap(); trans_queue.add_used(mem.deref(), desc_index, len).unwrap();
} }
used_count > 0 used_count > 0
} }
@ -488,9 +492,9 @@ impl VirtioDevice for Console {
fn activate( fn activate(
&mut self, &mut self,
_mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
self.resizer self.resizer
@ -515,6 +519,7 @@ impl VirtioDevice for Console {
let output_queue_evt = queue_evt; let output_queue_evt = queue_evt;
let mut handler = ConsoleEpollHandler { let mut handler = ConsoleEpollHandler {
mem,
queues: virtqueues, queues: virtqueues,
interrupt_cb, interrupt_cb,
in_buffer: self.in_buffer.clone(), in_buffer: self.in_buffer.clone(),

View File

@ -107,7 +107,7 @@ pub trait VirtioDevice: Send {
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_evt: Arc<dyn VirtioInterrupt>, interrupt_evt: Arc<dyn VirtioInterrupt>,
queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult; ) -> ActivateResult;
/// Optionally deactivates this device and returns ownership of the guest memory map, interrupt /// Optionally deactivates this device and returns ownership of the guest memory map, interrupt
@ -250,7 +250,7 @@ impl VirtioCommon {
pub fn activate( pub fn activate(
&mut self, &mut self,
queues: &[(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)], queues: &[(usize, Queue, EventFd)],
interrupt_cb: &Arc<dyn VirtioInterrupt>, interrupt_cb: &Arc<dyn VirtioInterrupt>,
) -> ActivateResult { ) -> ActivateResult {
if queues.len() < self.min_queues.into() { if queues.len() < self.min_queues.into() {

View File

@ -17,17 +17,18 @@ use std::fmt::{self, Display};
use std::io; use std::io;
use std::mem::size_of; use std::mem::size_of;
use std::ops::Bound::Included; use std::ops::Bound::Included;
use std::ops::Deref;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::result; use std::result;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Barrier, Mutex, RwLock}; use std::sync::{Arc, Barrier, Mutex, RwLock};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::{DescriptorChain, Queue}; use virtio_queue::{DescriptorChain, Queue, QueueOwnedT, QueueT};
use vm_device::dma_mapping::ExternalDmaMapping; use vm_device::dma_mapping::ExternalDmaMapping;
use vm_memory::{ use vm_memory::{
Address, ByteValued, Bytes, GuestAddress, GuestMemoryAtomic, GuestMemoryError, Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic,
GuestMemoryLoadGuard, GuestMemoryError, GuestMemoryLoadGuard,
}; };
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
@ -660,7 +661,8 @@ impl Request {
} }
struct IommuEpollHandler { struct IommuEpollHandler {
queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
queues: Vec<Queue>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
kill_evt: EventFd, kill_evt: EventFd,
@ -674,7 +676,7 @@ impl IommuEpollHandler {
fn request_queue(&mut self) -> bool { fn request_queue(&mut self) -> bool {
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
for mut desc_chain in self.queues[0].iter().unwrap() { for mut desc_chain in self.queues[0].iter(self.mem.memory()).unwrap() {
let len = match Request::parse( let len = match Request::parse(
&mut desc_chain, &mut desc_chain,
&self.mapping, &self.mapping,
@ -692,8 +694,11 @@ impl IommuEpollHandler {
used_count += 1; used_count += 1;
} }
let mem = self.mem.memory();
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
self.queues[0].add_used(desc_index, len).unwrap(); self.queues[0]
.add_used(mem.deref(), desc_index, len)
.unwrap();
} }
used_count > 0 used_count > 0
} }
@ -1050,9 +1055,9 @@ impl VirtioDevice for Iommu {
fn activate( fn activate(
&mut self, &mut self,
_mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
let (kill_evt, pause_evt) = self.common.dup_eventfds(); let (kill_evt, pause_evt) = self.common.dup_eventfds();
@ -1065,6 +1070,7 @@ impl VirtioDevice for Iommu {
} }
let mut handler = IommuEpollHandler { let mut handler = IommuEpollHandler {
mem,
queues: virtqueues, queues: virtqueues,
interrupt_cb, interrupt_cb,
queue_evts, queue_evts,

View File

@ -160,7 +160,7 @@ impl TryInto<rate_limiter::RateLimiter> for RateLimiterConfig {
/// to a host pointer and verify that the provided size define a valid /// to a host pointer and verify that the provided size define a valid
/// range within a single memory region. /// range within a single memory region.
/// Return None if it is out of bounds or if addr+size overlaps a single region. /// Return None if it is out of bounds or if addr+size overlaps a single region.
pub fn get_host_address_range<M: GuestMemory>( pub fn get_host_address_range<M: GuestMemory + ?Sized>(
mem: &M, mem: &M,
addr: GuestAddress, addr: GuestAddress,
size: usize, size: usize,

View File

@ -27,6 +27,7 @@ use seccompiler::SeccompAction;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::io; use std::io;
use std::mem::size_of; use std::mem::size_of;
use std::ops::Deref;
use std::os::unix::io::{AsRawFd, RawFd}; use std::os::unix::io::{AsRawFd, RawFd};
use std::result; use std::result;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
@ -34,11 +35,11 @@ use std::sync::mpsc;
use std::sync::{Arc, Barrier, Mutex}; use std::sync::{Arc, Barrier, Mutex};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::{DescriptorChain, Queue}; use virtio_queue::{DescriptorChain, Queue, QueueOwnedT, QueueT};
use vm_device::dma_mapping::ExternalDmaMapping; use vm_device::dma_mapping::ExternalDmaMapping;
use vm_memory::{ use vm_memory::{
Address, ByteValued, Bytes, GuestAddress, GuestMemoryAtomic, GuestMemoryError, Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic,
GuestMemoryLoadGuard, GuestMemoryRegion, GuestMemoryError, GuestMemoryLoadGuard, GuestMemoryRegion,
}; };
use vm_migration::protocol::MemoryRangeTable; use vm_migration::protocol::MemoryRangeTable;
use vm_migration::{ use vm_migration::{
@ -462,12 +463,13 @@ impl BlocksState {
} }
struct MemEpollHandler { struct MemEpollHandler {
mem: GuestMemoryAtomic<GuestMemoryMmap>,
host_addr: u64, host_addr: u64,
host_fd: Option<RawFd>, host_fd: Option<RawFd>,
blocks_state: Arc<Mutex<BlocksState>>, blocks_state: Arc<Mutex<BlocksState>>,
config: Arc<Mutex<VirtioMemConfig>>, config: Arc<Mutex<VirtioMemConfig>>,
resize: ResizeSender, resize: ResizeSender,
queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>, queue: Queue,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queue_evt: EventFd, queue_evt: EventFd,
kill_evt: EventFd, kill_evt: EventFd,
@ -666,7 +668,7 @@ impl MemEpollHandler {
let mut request_list = Vec::new(); let mut request_list = Vec::new();
let mut used_count = 0; let mut used_count = 0;
for mut desc_chain in self.queue.iter().unwrap() { for mut desc_chain in self.queue.iter(self.mem.memory()).unwrap() {
request_list.push(( request_list.push((
desc_chain.head_index(), desc_chain.head_index(),
Request::parse(&mut desc_chain), Request::parse(&mut desc_chain),
@ -674,6 +676,7 @@ impl MemEpollHandler {
)); ));
} }
let mem = self.mem.memory();
for (head_index, request, memory) in request_list { for (head_index, request, memory) in request_list {
let len = match request { let len = match request {
Err(e) => { Err(e) => {
@ -707,7 +710,7 @@ impl MemEpollHandler {
}, },
}; };
self.queue.add_used(head_index, len).unwrap(); self.queue.add_used(mem.deref(), head_index, len).unwrap();
used_count += 1; used_count += 1;
} }
@ -1002,9 +1005,9 @@ impl VirtioDevice for Mem {
fn activate( fn activate(
&mut self, &mut self,
_mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
let (kill_evt, pause_evt) = self.common.dup_eventfds(); let (kill_evt, pause_evt) = self.common.dup_eventfds();
@ -1012,6 +1015,7 @@ impl VirtioDevice for Mem {
let (_, queue, queue_evt) = queues.remove(0); let (_, queue, queue_evt) = queues.remove(0);
let mut handler = MemEpollHandler { let mut handler = MemEpollHandler {
mem,
host_addr: self.host_addr, host_addr: self.host_addr,
host_fd: self.host_fd, host_fd: self.host_fd,
blocks_state: Arc::clone(&self.blocks_state), blocks_state: Arc::clone(&self.blocks_state),

View File

@ -24,6 +24,7 @@ use net_util::{
use seccompiler::SeccompAction; use seccompiler::SeccompAction;
use std::net::Ipv4Addr; use std::net::Ipv4Addr;
use std::num::Wrapping; use std::num::Wrapping;
use std::ops::Deref;
use std::os::unix::io::{AsRawFd, RawFd}; use std::os::unix::io::{AsRawFd, RawFd};
use std::result; use std::result;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
@ -35,8 +36,8 @@ use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_bindings::bindings::virtio_net::*; use virtio_bindings::bindings::virtio_net::*;
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use virtio_queue::Queue; use virtio_queue::{Queue, QueueT};
use vm_memory::{ByteValued, GuestMemoryAtomic}; use vm_memory::{ByteValued, GuestAddressSpace, GuestMemoryAtomic};
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vm_virtio::AccessPlatform; use vm_virtio::AccessPlatform;
@ -47,11 +48,12 @@ use vmm_sys_util::eventfd::EventFd;
const CTRL_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1; const CTRL_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
pub struct NetCtrlEpollHandler { pub struct NetCtrlEpollHandler {
pub mem: GuestMemoryAtomic<GuestMemoryMmap>,
pub kill_evt: EventFd, pub kill_evt: EventFd,
pub pause_evt: EventFd, pub pause_evt: EventFd,
pub ctrl_q: CtrlQueue, pub ctrl_q: CtrlQueue,
pub queue_evt: EventFd, pub queue_evt: EventFd,
pub queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>, pub queue: Queue,
pub access_platform: Option<Arc<dyn AccessPlatform>>, pub access_platform: Option<Arc<dyn AccessPlatform>>,
pub interrupt_cb: Arc<dyn VirtioInterrupt>, pub interrupt_cb: Arc<dyn VirtioInterrupt>,
pub queue_index: u16, pub queue_index: u16,
@ -85,18 +87,19 @@ impl EpollHelperHandler for NetCtrlEpollHandler {
let ev_type = event.data as u16; let ev_type = event.data as u16;
match ev_type { match ev_type {
CTRL_QUEUE_EVENT => { CTRL_QUEUE_EVENT => {
let mem = self.mem.memory();
if let Err(e) = self.queue_evt.read() { if let Err(e) = self.queue_evt.read() {
error!("Failed to get control queue event: {:?}", e); error!("Failed to get control queue event: {:?}", e);
return true; return true;
} }
if let Err(e) = self if let Err(e) =
.ctrl_q self.ctrl_q
.process(&mut self.queue, self.access_platform.as_ref()) .process(mem.deref(), &mut self.queue, self.access_platform.as_ref())
{ {
error!("Failed to process control queue: {:?}", e); error!("Failed to process control queue: {:?}", e);
return true; return true;
} else { } else {
match self.queue.needs_notification() { match self.queue.needs_notification(mem.deref()) {
Ok(true) => { Ok(true) => {
if let Err(e) = self.signal_used_queue(self.queue_index) { if let Err(e) = self.signal_used_queue(self.queue_index) {
error!("Error signalling that control queue was used: {:?}", e); error!("Error signalling that control queue was used: {:?}", e);
@ -151,11 +154,12 @@ pub type Result<T> = result::Result<T, Error>;
struct NetEpollHandler { struct NetEpollHandler {
net: NetQueuePair, net: NetQueuePair,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
kill_evt: EventFd, kill_evt: EventFd,
pause_evt: EventFd, pause_evt: EventFd,
queue_index_base: u16, queue_index_base: u16,
queue_pair: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>, queue_pair: Vec<Queue>,
queue_evt_pair: Vec<EventFd>, queue_evt_pair: Vec<EventFd>,
// Always generate interrupts until the driver has signalled to the device. // Always generate interrupts until the driver has signalled to the device.
// This mitigates a problem with interrupts from tap events being "lost" upon // This mitigates a problem with interrupts from tap events being "lost" upon
@ -206,7 +210,7 @@ impl NetEpollHandler {
fn process_tx(&mut self) -> result::Result<(), DeviceError> { fn process_tx(&mut self) -> result::Result<(), DeviceError> {
if self if self
.net .net
.process_tx(&mut self.queue_pair[1]) .process_tx(&self.mem.memory(), &mut self.queue_pair[1])
.map_err(DeviceError::NetQueuePair)? .map_err(DeviceError::NetQueuePair)?
|| !self.driver_awake || !self.driver_awake
{ {
@ -235,7 +239,7 @@ impl NetEpollHandler {
fn handle_rx_tap_event(&mut self) -> result::Result<(), DeviceError> { fn handle_rx_tap_event(&mut self) -> result::Result<(), DeviceError> {
if self if self
.net .net
.process_rx(&mut self.queue_pair[0]) .process_rx(&self.mem.memory(), &mut self.queue_pair[0])
.map_err(DeviceError::NetQueuePair)? .map_err(DeviceError::NetQueuePair)?
|| !self.driver_awake || !self.driver_awake
{ {
@ -262,13 +266,14 @@ impl NetEpollHandler {
helper.add_event(rate_limiter.as_raw_fd(), TX_RATE_LIMITER_EVENT)?; helper.add_event(rate_limiter.as_raw_fd(), TX_RATE_LIMITER_EVENT)?;
} }
let mem = self.mem.memory();
// If there are some already available descriptors on the RX queue, // If there are some already available descriptors on the RX queue,
// then we can start the thread while listening onto the TAP. // then we can start the thread while listening onto the TAP.
if self.queue_pair[0] if self.queue_pair[0]
.used_idx(Ordering::Acquire) .used_idx(mem.deref(), Ordering::Acquire)
.map_err(EpollHelperError::QueueRingIndex)? .map_err(EpollHelperError::QueueRingIndex)?
< self.queue_pair[0] < self.queue_pair[0]
.avail_idx(Ordering::Acquire) .avail_idx(mem.deref(), Ordering::Acquire)
.map_err(EpollHelperError::QueueRingIndex)? .map_err(EpollHelperError::QueueRingIndex)?
{ {
helper.add_event(self.net.tap.as_raw_fd(), RX_TAP_EVENT)?; helper.add_event(self.net.tap.as_raw_fd(), RX_TAP_EVENT)?;
@ -583,9 +588,9 @@ impl VirtioDevice for Net {
fn activate( fn activate(
&mut self, &mut self,
_mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
@ -599,6 +604,7 @@ impl VirtioDevice for Net {
let (kill_evt, pause_evt) = self.common.dup_eventfds(); let (kill_evt, pause_evt) = self.common.dup_eventfds();
let mut ctrl_handler = NetCtrlEpollHandler { let mut ctrl_handler = NetCtrlEpollHandler {
mem: mem.clone(),
kill_evt, kill_evt,
pause_evt, pause_evt,
ctrl_q: CtrlQueue::new(self.taps.clone()), ctrl_q: CtrlQueue::new(self.taps.clone()),
@ -685,6 +691,7 @@ impl VirtioDevice for Net {
tx_rate_limiter, tx_rate_limiter,
access_platform: self.common.access_platform.clone(), access_platform: self.common.access_platform.clone(),
}, },
mem: mem.clone(),
queue_index_base: (i * 2) as u16, queue_index_base: (i * 2) as u16,
queue_pair, queue_pair,
queue_evt_pair, queue_evt_pair,

View File

@ -21,16 +21,17 @@ use std::fmt::{self, Display};
use std::fs::File; use std::fs::File;
use std::io; use std::io;
use std::mem::size_of; use std::mem::size_of;
use std::ops::Deref;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::result; use std::result;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Barrier}; use std::sync::{Arc, Barrier};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::{DescriptorChain, Queue}; use virtio_queue::{DescriptorChain, Queue, QueueOwnedT, QueueT};
use vm_memory::{ use vm_memory::{
Address, ByteValued, Bytes, GuestAddress, GuestMemoryAtomic, GuestMemoryError, Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic,
GuestMemoryLoadGuard, GuestMemoryError, GuestMemoryLoadGuard,
}; };
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
@ -165,7 +166,8 @@ impl Request {
} }
struct PmemEpollHandler { struct PmemEpollHandler {
queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
queue: Queue,
disk: File, disk: File,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queue_evt: EventFd, queue_evt: EventFd,
@ -178,7 +180,7 @@ impl PmemEpollHandler {
fn process_queue(&mut self) -> bool { fn process_queue(&mut self) -> bool {
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
for mut desc_chain in self.queue.iter().unwrap() { for mut desc_chain in self.queue.iter(self.mem.memory()).unwrap() {
let len = match Request::parse(&mut desc_chain, self.access_platform.as_ref()) { let len = match Request::parse(&mut desc_chain, self.access_platform.as_ref()) {
Ok(ref req) if (req.type_ == RequestType::Flush) => { Ok(ref req) if (req.type_ == RequestType::Flush) => {
let status_code = match self.disk.sync_all() { let status_code = match self.disk.sync_all() {
@ -213,8 +215,9 @@ impl PmemEpollHandler {
used_count += 1; used_count += 1;
} }
let mem = self.mem.memory();
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
self.queue.add_used(desc_index, len).unwrap(); self.queue.add_used(mem.deref(), desc_index, len).unwrap();
} }
used_count > 0 used_count > 0
} }
@ -377,9 +380,9 @@ impl VirtioDevice for Pmem {
fn activate( fn activate(
&mut self, &mut self,
_mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
let (kill_evt, pause_evt) = self.common.dup_eventfds(); let (kill_evt, pause_evt) = self.common.dup_eventfds();
@ -392,6 +395,7 @@ impl VirtioDevice for Pmem {
let (_, queue, queue_evt) = queues.remove(0); let (_, queue, queue_evt) = queues.remove(0);
let mut handler = PmemEpollHandler { let mut handler = PmemEpollHandler {
mem,
queue, queue,
disk, disk,
interrupt_cb, interrupt_cb,

View File

@ -15,14 +15,15 @@ use crate::{VirtioInterrupt, VirtioInterruptType};
use seccompiler::SeccompAction; use seccompiler::SeccompAction;
use std::fs::File; use std::fs::File;
use std::io; use std::io;
use std::ops::Deref;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::result; use std::result;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Barrier}; use std::sync::{Arc, Barrier};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::Queue; use virtio_queue::{Queue, QueueOwnedT, QueueT};
use vm_memory::{Bytes, GuestMemoryAtomic}; use vm_memory::{Bytes, GuestAddressSpace, GuestMemoryAtomic};
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vm_virtio::{AccessPlatform, Translatable}; use vm_virtio::{AccessPlatform, Translatable};
@ -35,7 +36,8 @@ const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE];
const QUEUE_AVAIL_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1; const QUEUE_AVAIL_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
struct RngEpollHandler { struct RngEpollHandler {
queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
queue: Queue,
random_file: File, random_file: File,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queue_evt: EventFd, queue_evt: EventFd,
@ -50,7 +52,7 @@ impl RngEpollHandler {
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
for mut desc_chain in queue.iter().unwrap() { for mut desc_chain in queue.iter(self.mem.memory()).unwrap() {
let desc = desc_chain.next().unwrap(); let desc = desc_chain.next().unwrap();
let mut len = 0; let mut len = 0;
@ -75,8 +77,9 @@ impl RngEpollHandler {
used_count += 1; used_count += 1;
} }
let mem = self.mem.memory();
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
queue.add_used(desc_index, len).unwrap(); queue.add_used(mem.deref(), desc_index, len).unwrap();
} }
used_count > 0 used_count > 0
} }
@ -217,9 +220,9 @@ impl VirtioDevice for Rng {
fn activate( fn activate(
&mut self, &mut self,
_mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
let (kill_evt, pause_evt) = self.common.dup_eventfds(); let (kill_evt, pause_evt) = self.common.dup_eventfds();
@ -233,6 +236,7 @@ impl VirtioDevice for Rng {
let (_, queue, queue_evt) = queues.remove(0); let (_, queue, queue_evt) = queues.remove(0);
let mut handler = RngEpollHandler { let mut handler = RngEpollHandler {
mem,
queue, queue,
random_file, random_file,
interrupt_cb, interrupt_cb,

View File

@ -6,14 +6,13 @@
// //
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use crate::{GuestMemoryMmap, VirtioDevice}; use crate::VirtioDevice;
use byteorder::{ByteOrder, LittleEndian}; use byteorder::{ByteOrder, LittleEndian};
use std::sync::atomic::{AtomicU16, Ordering}; use std::sync::atomic::{AtomicU16, Ordering};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::Queue; use virtio_queue::{Queue, QueueT};
use vm_memory::{GuestAddress, GuestMemoryAtomic};
use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, VersionMapped}; use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, VersionMapped};
use vm_virtio::AccessPlatform; use vm_virtio::AccessPlatform;
@ -90,7 +89,7 @@ impl VirtioPciCommonConfig {
&mut self, &mut self,
offset: u64, offset: u64,
data: &mut [u8], data: &mut [u8],
queues: &mut [Queue<GuestMemoryAtomic<GuestMemoryMmap>>], queues: &mut [Queue],
device: Arc<Mutex<dyn VirtioDevice>>, device: Arc<Mutex<dyn VirtioDevice>>,
) { ) {
assert!(data.len() <= 8); assert!(data.len() <= 8);
@ -120,7 +119,7 @@ impl VirtioPciCommonConfig {
&mut self, &mut self,
offset: u64, offset: u64,
data: &[u8], data: &[u8],
queues: &mut [Queue<GuestMemoryAtomic<GuestMemoryMmap>>], queues: &mut [Queue],
device: Arc<Mutex<dyn VirtioDevice>>, device: Arc<Mutex<dyn VirtioDevice>>,
) { ) {
assert!(data.len() <= 8); assert!(data.len() <= 8);
@ -159,20 +158,16 @@ impl VirtioPciCommonConfig {
} }
} }
fn read_common_config_word( fn read_common_config_word(&self, offset: u64, queues: &[Queue]) -> u16 {
&self,
offset: u64,
queues: &[Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
) -> u16 {
debug!("read_common_config_word: offset 0x{:x}", offset); debug!("read_common_config_word: offset 0x{:x}", offset);
match offset { match offset {
0x10 => self.msix_config.load(Ordering::Acquire), 0x10 => self.msix_config.load(Ordering::Acquire),
0x12 => queues.len() as u16, // num_queues 0x12 => queues.len() as u16, // num_queues
0x16 => self.queue_select, 0x16 => self.queue_select,
0x18 => self.with_queue(queues, |q| q.state.size).unwrap_or(0), 0x18 => self.with_queue(queues, |q| q.size()).unwrap_or(0),
0x1a => self.msix_queues.lock().unwrap()[self.queue_select as usize], 0x1a => self.msix_queues.lock().unwrap()[self.queue_select as usize],
0x1c => { 0x1c => {
if self.with_queue(queues, |q| q.state.ready).unwrap_or(false) { if self.with_queue(queues, |q| q.ready()).unwrap_or(false) {
1 1
} else { } else {
0 0
@ -186,17 +181,12 @@ impl VirtioPciCommonConfig {
} }
} }
fn write_common_config_word( fn write_common_config_word(&mut self, offset: u64, value: u16, queues: &mut [Queue]) {
&mut self,
offset: u64,
value: u16,
queues: &mut [Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
) {
debug!("write_common_config_word: offset 0x{:x}", offset); debug!("write_common_config_word: offset 0x{:x}", offset);
match offset { match offset {
0x10 => self.msix_config.store(value, Ordering::Release), 0x10 => self.msix_config.store(value, Ordering::Release),
0x16 => self.queue_select = value, 0x16 => self.queue_select = value,
0x18 => self.with_queue_mut(queues, |q| q.state.size = value), 0x18 => self.with_queue_mut(queues, |q| q.set_size(value)),
0x1a => self.msix_queues.lock().unwrap()[self.queue_select as usize] = value, 0x1a => self.msix_queues.lock().unwrap()[self.queue_select as usize] = value,
0x1c => self.with_queue_mut(queues, |q| { 0x1c => self.with_queue_mut(queues, |q| {
let ready = value == 1; let ready = value == 1;
@ -204,15 +194,9 @@ impl VirtioPciCommonConfig {
// Translate address of descriptor table and vrings. // Translate address of descriptor table and vrings.
if let Some(access_platform) = &self.access_platform { if let Some(access_platform) = &self.access_platform {
if ready { if ready {
let desc_table = access_platform let desc_table = access_platform.translate_gva(q.desc_table(), 0).unwrap();
.translate_gva(q.state.desc_table.0, 0) let avail_ring = access_platform.translate_gva(q.avail_ring(), 0).unwrap();
.unwrap(); let used_ring = access_platform.translate_gva(q.used_ring(), 0).unwrap();
let avail_ring = access_platform
.translate_gva(q.state.avail_ring.0, 0)
.unwrap();
let used_ring = access_platform
.translate_gva(q.state.used_ring.0, 0)
.unwrap();
q.set_desc_table_address( q.set_desc_table_address(
Some((desc_table & 0xffff_ffff) as u32), Some((desc_table & 0xffff_ffff) as u32),
Some((desc_table >> 32) as u32), Some((desc_table >> 32) as u32),
@ -260,17 +244,10 @@ impl VirtioPciCommonConfig {
&mut self, &mut self,
offset: u64, offset: u64,
value: u32, value: u32,
queues: &mut [Queue<GuestMemoryAtomic<GuestMemoryMmap>>], queues: &mut [Queue],
device: Arc<Mutex<dyn VirtioDevice>>, device: Arc<Mutex<dyn VirtioDevice>>,
) { ) {
debug!("write_common_config_dword: offset 0x{:x}", offset); debug!("write_common_config_dword: offset 0x{:x}", offset);
fn hi(v: &mut GuestAddress, x: u32) {
*v = (*v & 0xffff_ffff) | ((u64::from(x)) << 32)
}
fn lo(v: &mut GuestAddress, x: u32) {
*v = (*v & !0xffff_ffff) | (u64::from(x))
}
match offset { match offset {
0x00 => self.device_feature_select = value, 0x00 => self.device_feature_select = value,
@ -287,12 +264,12 @@ impl VirtioPciCommonConfig {
); );
} }
} }
0x20 => self.with_queue_mut(queues, |q| lo(&mut q.state.desc_table, value)), 0x20 => self.with_queue_mut(queues, |q| q.set_desc_table_address(Some(value), None)),
0x24 => self.with_queue_mut(queues, |q| hi(&mut q.state.desc_table, value)), 0x24 => self.with_queue_mut(queues, |q| q.set_desc_table_address(None, Some(value))),
0x28 => self.with_queue_mut(queues, |q| lo(&mut q.state.avail_ring, value)), 0x28 => self.with_queue_mut(queues, |q| q.set_avail_ring_address(Some(value), None)),
0x2c => self.with_queue_mut(queues, |q| hi(&mut q.state.avail_ring, value)), 0x2c => self.with_queue_mut(queues, |q| q.set_avail_ring_address(None, Some(value))),
0x30 => self.with_queue_mut(queues, |q| lo(&mut q.state.used_ring, value)), 0x30 => self.with_queue_mut(queues, |q| q.set_used_ring_address(Some(value), None)),
0x34 => self.with_queue_mut(queues, |q| hi(&mut q.state.used_ring, value)), 0x34 => self.with_queue_mut(queues, |q| q.set_used_ring_address(None, Some(value))),
_ => { _ => {
warn!("invalid virtio register dword write: 0x{:x}", offset); warn!("invalid virtio register dword write: 0x{:x}", offset);
} }
@ -304,39 +281,30 @@ impl VirtioPciCommonConfig {
0 // Assume the guest has no reason to read write-only registers. 0 // Assume the guest has no reason to read write-only registers.
} }
fn write_common_config_qword( fn write_common_config_qword(&mut self, offset: u64, value: u64, queues: &mut [Queue]) {
&mut self,
offset: u64,
value: u64,
queues: &mut [Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
) {
debug!("write_common_config_qword: offset 0x{:x}", offset); debug!("write_common_config_qword: offset 0x{:x}", offset);
let low = Some((value & 0xffff_ffff) as u32);
let high = Some((value >> 32) as u32);
match offset { match offset {
0x20 => self.with_queue_mut(queues, |q| q.state.desc_table = GuestAddress(value)), 0x20 => self.with_queue_mut(queues, |q| q.set_desc_table_address(low, high)),
0x28 => self.with_queue_mut(queues, |q| q.state.avail_ring = GuestAddress(value)), 0x28 => self.with_queue_mut(queues, |q| q.set_avail_ring_address(low, high)),
0x30 => self.with_queue_mut(queues, |q| q.state.used_ring = GuestAddress(value)), 0x30 => self.with_queue_mut(queues, |q| q.set_used_ring_address(low, high)),
_ => { _ => {
warn!("invalid virtio register qword write: 0x{:x}", offset); warn!("invalid virtio register qword write: 0x{:x}", offset);
} }
} }
} }
fn with_queue<U, F>( fn with_queue<U, F>(&self, queues: &[Queue], f: F) -> Option<U>
&self,
queues: &[Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
f: F,
) -> Option<U>
where where
F: FnOnce(&Queue<GuestMemoryAtomic<GuestMemoryMmap>>) -> U, F: FnOnce(&Queue) -> U,
{ {
queues.get(self.queue_select as usize).map(f) queues.get(self.queue_select as usize).map(f)
} }
fn with_queue_mut<F: FnOnce(&mut Queue<GuestMemoryAtomic<GuestMemoryMmap>>)>( fn with_queue_mut<F: FnOnce(&mut Queue)>(&self, queues: &mut [Queue], f: F) {
&self,
queues: &mut [Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
f: F,
) {
if let Some(queue) = queues.get_mut(self.queue_select as usize) { if let Some(queue) = queues.get_mut(self.queue_select as usize) {
f(queue); f(queue);
} }
@ -385,7 +353,7 @@ mod tests {
&mut self, &mut self,
_mem: GuestMemoryAtomic<GuestMemoryMmap>, _mem: GuestMemoryAtomic<GuestMemoryMmap>,
_interrupt_evt: Arc<dyn VirtioInterrupt>, _interrupt_evt: Arc<dyn VirtioInterrupt>,
_queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, _queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
Ok(()) Ok(())
} }

View File

@ -24,19 +24,20 @@ use pci::{
use std::any::Any; use std::any::Any;
use std::cmp; use std::cmp;
use std::io::Write; use std::io::Write;
use std::ops::Deref;
use std::result; use std::result;
use std::sync::atomic::{AtomicBool, AtomicU16, AtomicUsize, Ordering}; use std::sync::atomic::{AtomicBool, AtomicU16, AtomicUsize, Ordering};
use std::sync::{Arc, Barrier, Mutex}; use std::sync::{Arc, Barrier, Mutex};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::{Error as QueueError, Queue}; use virtio_queue::{Error as QueueError, Queue, QueueT};
use vm_allocator::{AddressAllocator, SystemAllocator}; use vm_allocator::{AddressAllocator, SystemAllocator};
use vm_device::dma_mapping::ExternalDmaMapping; use vm_device::dma_mapping::ExternalDmaMapping;
use vm_device::interrupt::{ use vm_device::interrupt::{
InterruptIndex, InterruptManager, InterruptSourceGroup, MsiIrqGroupConfig, InterruptIndex, InterruptManager, InterruptSourceGroup, MsiIrqGroupConfig,
}; };
use vm_device::{BusDevice, Resource}; use vm_device::{BusDevice, Resource};
use vm_memory::{Address, ByteValued, GuestAddress, GuestMemoryAtomic, Le32}; use vm_memory::{Address, ByteValued, GuestAddress, GuestAddressSpace, GuestMemoryAtomic, Le32};
use vm_migration::{ use vm_migration::{
Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped, Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped,
}; };
@ -292,8 +293,7 @@ pub struct VirtioPciDeviceActivator {
memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>, memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>,
device: Arc<Mutex<dyn VirtioDevice>>, device: Arc<Mutex<dyn VirtioDevice>>,
device_activated: Arc<AtomicBool>, device_activated: Arc<AtomicBool>,
#[allow(clippy::type_complexity)] queues: Option<Vec<(usize, Queue, EventFd)>>,
queues: Option<Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>>,
barrier: Option<Arc<Barrier>>, barrier: Option<Arc<Barrier>>,
id: String, id: String,
} }
@ -342,11 +342,11 @@ pub struct VirtioPciDevice {
interrupt_source_group: Arc<dyn InterruptSourceGroup>, interrupt_source_group: Arc<dyn InterruptSourceGroup>,
// virtio queues // virtio queues
queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>, queues: Vec<Queue>,
queue_evts: Vec<EventFd>, queue_evts: Vec<EventFd>,
// Guest memory // Guest memory
memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>, memory: GuestMemoryAtomic<GuestMemoryMmap>,
// Settings PCI BAR // Settings PCI BAR
settings_bar: u8, settings_bar: u8,
@ -406,12 +406,7 @@ impl VirtioPciDevice {
let queues = locked_device let queues = locked_device
.queue_max_sizes() .queue_max_sizes()
.iter() .iter()
.map(|&s| { .map(|&s| Queue::new(s).unwrap())
Queue::<GuestMemoryAtomic<GuestMemoryMmap>, virtio_queue::QueueState>::new(
memory.clone(),
s,
)
})
.collect(); .collect();
let pci_device_id = VIRTIO_PCI_DEVICE_ID_BASE + locked_device.device_type() as u16; let pci_device_id = VIRTIO_PCI_DEVICE_ID_BASE + locked_device.device_type() as u16;
@ -482,7 +477,7 @@ impl VirtioPciDevice {
virtio_interrupt: None, virtio_interrupt: None,
queues, queues,
queue_evts, queue_evts,
memory: Some(memory), memory,
settings_bar: 0, settings_bar: 0,
use_64bit_bar, use_64bit_bar,
interrupt_source_group, interrupt_source_group,
@ -514,11 +509,11 @@ impl VirtioPciDevice {
.iter() .iter()
.map(|q| QueueState { .map(|q| QueueState {
max_size: q.max_size(), max_size: q.max_size(),
size: q.state.size, size: q.size(),
ready: q.state.ready, ready: q.ready(),
desc_table: q.state.desc_table.0, desc_table: q.desc_table(),
avail_ring: q.state.avail_ring.0, avail_ring: q.avail_ring(),
used_ring: q.state.used_ring.0, used_ring: q.used_ring(),
}) })
.collect(), .collect(),
} }
@ -532,20 +527,26 @@ impl VirtioPciDevice {
// Update virtqueues indexes for both available and used rings. // Update virtqueues indexes for both available and used rings.
for (i, queue) in self.queues.iter_mut().enumerate() { for (i, queue) in self.queues.iter_mut().enumerate() {
queue.state.size = state.queues[i].size; queue.set_size(state.queues[i].size);
queue.state.ready = state.queues[i].ready; queue.set_ready(state.queues[i].ready);
queue.state.desc_table = GuestAddress(state.queues[i].desc_table); queue
queue.state.avail_ring = GuestAddress(state.queues[i].avail_ring); .try_set_desc_table_address(GuestAddress(state.queues[i].desc_table))
queue.state.used_ring = GuestAddress(state.queues[i].used_ring); .unwrap();
queue
.try_set_avail_ring_address(GuestAddress(state.queues[i].avail_ring))
.unwrap();
queue
.try_set_used_ring_address(GuestAddress(state.queues[i].used_ring))
.unwrap();
queue.set_next_avail( queue.set_next_avail(
queue queue
.used_idx(Ordering::Acquire) .used_idx(self.memory.memory().deref(), Ordering::Acquire)
.map_err(Error::QueueRingIndex)? .map_err(Error::QueueRingIndex)?
.0, .0,
); );
queue.set_next_used( queue.set_next_used(
queue queue
.used_idx(Ordering::Acquire) .used_idx(self.memory.memory().deref(), Ordering::Acquire)
.map_err(Error::QueueRingIndex)? .map_err(Error::QueueRingIndex)?
.0, .0,
); );
@ -701,11 +702,11 @@ impl VirtioPciDevice {
let mut queues = Vec::new(); let mut queues = Vec::new();
for (queue_index, queue) in self.queues.iter().enumerate() { for (queue_index, queue) in self.queues.iter().enumerate() {
if !queue.state.ready { if !queue.ready() {
continue; continue;
} }
if !queue.is_valid() { if !queue.is_valid(self.memory.memory().deref()) {
error!("Queue {} is not valid", queue_index); error!("Queue {} is not valid", queue_index);
} }
@ -718,7 +719,7 @@ impl VirtioPciDevice {
VirtioPciDeviceActivator { VirtioPciDeviceActivator {
interrupt: self.virtio_interrupt.take(), interrupt: self.virtio_interrupt.take(),
memory: self.memory.clone(), memory: Some(self.memory.clone()),
device: self.device.clone(), device: self.device.clone(),
queues: Some(queues), queues: Some(queues),
device_activated: self.device_activated.clone(), device_activated: self.device_activated.clone(),

View File

@ -19,7 +19,7 @@ use vhost::{
vhost_kern::VhostKernFeatures, vhost_kern::VhostKernFeatures,
VhostBackend, VringConfigData, VhostBackend, VringConfigData,
}; };
use virtio_queue::{Descriptor, Queue}; use virtio_queue::{Descriptor, Queue, QueueT};
use vm_device::dma_mapping::ExternalDmaMapping; use vm_device::dma_mapping::ExternalDmaMapping;
use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic}; use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic};
use vm_virtio::{AccessPlatform, Translatable}; use vm_virtio::{AccessPlatform, Translatable};
@ -143,9 +143,9 @@ impl Vdpa {
fn activate_vdpa( fn activate_vdpa(
&mut self, &mut self,
_mem: &GuestMemoryMmap, mem: &GuestMemoryMmap,
virtio_interrupt: &Arc<dyn VirtioInterrupt>, virtio_interrupt: &Arc<dyn VirtioInterrupt>,
queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, queues: Vec<(usize, Queue, EventFd)>,
) -> Result<()> { ) -> Result<()> {
self.vhost self.vhost
.set_features(self.common.acked_features) .set_features(self.common.acked_features)
@ -156,7 +156,7 @@ impl Vdpa {
for (queue_index, queue, queue_evt) in queues.iter() { for (queue_index, queue, queue_evt) in queues.iter() {
let queue_max_size = queue.max_size(); let queue_max_size = queue.max_size();
let queue_size = queue.state.size; let queue_size = queue.size();
self.vhost self.vhost
.set_vring_num(*queue_index, queue_size) .set_vring_num(*queue_index, queue_size)
.map_err(Error::SetVringNum)?; .map_err(Error::SetVringNum)?;
@ -165,30 +165,18 @@ impl Vdpa {
queue_max_size, queue_max_size,
queue_size, queue_size,
flags: 0u32, flags: 0u32,
desc_table_addr: queue desc_table_addr: queue.desc_table().translate_gpa(
.state self.common.access_platform.as_ref(),
.desc_table queue_size as usize * std::mem::size_of::<Descriptor>(),
.translate_gpa( ),
self.common.access_platform.as_ref(), used_ring_addr: queue.used_ring().translate_gpa(
queue_size as usize * std::mem::size_of::<Descriptor>(), self.common.access_platform.as_ref(),
) 4 + queue_size as usize * 8,
.0, ),
used_ring_addr: queue avail_ring_addr: queue.avail_ring().translate_gpa(
.state self.common.access_platform.as_ref(),
.used_ring 4 + queue_size as usize * 2,
.translate_gpa( ),
self.common.access_platform.as_ref(),
4 + queue_size as usize * 8,
)
.0,
avail_ring_addr: queue
.state
.avail_ring
.translate_gpa(
self.common.access_platform.as_ref(),
4 + queue_size as usize * 2,
)
.0,
log_addr: None, log_addr: None,
}; };
@ -199,7 +187,7 @@ impl Vdpa {
.set_vring_base( .set_vring_base(
*queue_index, *queue_index,
queue queue
.avail_idx(Ordering::Acquire) .avail_idx(mem, Ordering::Acquire)
.map_err(Error::GetAvailableIndex)? .map_err(Error::GetAvailableIndex)?
.0, .0,
) )
@ -296,7 +284,7 @@ impl VirtioDevice for Vdpa {
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
virtio_interrupt: Arc<dyn VirtioInterrupt>, virtio_interrupt: Arc<dyn VirtioInterrupt>,
queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.activate_vdpa(&mem.memory(), &virtio_interrupt, queues) self.activate_vdpa(&mem.memory(), &virtio_interrupt, queues)
.map_err(ActivateError::ActivateVdpa)?; .map_err(ActivateError::ActivateVdpa)?;

View File

@ -292,7 +292,7 @@ impl VirtioDevice for Blk {
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
self.guest_memory = Some(mem.clone()); self.guest_memory = Some(mem.clone());

View File

@ -504,7 +504,7 @@ impl VirtioDevice for Fs {
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
self.guest_memory = Some(mem.clone()); self.guest_memory = Some(mem.clone());

View File

@ -167,7 +167,7 @@ pub struct VhostUserEpollHandler<S: VhostUserMasterReqHandler> {
pub mem: GuestMemoryAtomic<GuestMemoryMmap>, pub mem: GuestMemoryAtomic<GuestMemoryMmap>,
pub kill_evt: EventFd, pub kill_evt: EventFd,
pub pause_evt: EventFd, pub pause_evt: EventFd,
pub queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, pub queues: Vec<(usize, Queue, EventFd)>,
pub virtio_interrupt: Arc<dyn VirtioInterrupt>, pub virtio_interrupt: Arc<dyn VirtioInterrupt>,
pub acked_features: u64, pub acked_features: u64,
pub acked_protocol_features: u64, pub acked_protocol_features: u64,
@ -297,7 +297,7 @@ impl VhostUserCommon {
pub fn activate<T: VhostUserMasterReqHandler>( pub fn activate<T: VhostUserMasterReqHandler>(
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, queues: Vec<(usize, Queue, EventFd)>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
acked_features: u64, acked_features: u64,
slave_req_handler: Option<MasterReqHandler<T>>, slave_req_handler: Option<MasterReqHandler<T>>,

View File

@ -27,7 +27,7 @@ use virtio_bindings::bindings::virtio_net::{
VIRTIO_NET_F_MAC, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_MAC, VIRTIO_NET_F_MRG_RXBUF,
}; };
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use virtio_queue::Queue; use virtio_queue::{Queue, QueueT};
use vm_memory::{ByteValued, GuestMemoryAtomic}; use vm_memory::{ByteValued, GuestMemoryAtomic};
use vm_migration::{ use vm_migration::{
protocol::MemoryRangeTable, Migratable, MigratableError, Pausable, Snapshot, Snapshottable, protocol::MemoryRangeTable, Migratable, MigratableError, Pausable, Snapshot, Snapshottable,
@ -272,7 +272,7 @@ impl VirtioDevice for Net {
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
self.guest_memory = Some(mem.clone()); self.guest_memory = Some(mem.clone());
@ -288,6 +288,7 @@ impl VirtioDevice for Net {
let (kill_evt, pause_evt) = self.common.dup_eventfds(); let (kill_evt, pause_evt) = self.common.dup_eventfds();
let mut ctrl_handler = NetCtrlEpollHandler { let mut ctrl_handler = NetCtrlEpollHandler {
mem: mem.clone(),
kill_evt, kill_evt,
pause_evt, pause_evt,
ctrl_q: CtrlQueue::new(Vec::new()), ctrl_q: CtrlQueue::new(Vec::new()),

View File

@ -23,9 +23,9 @@ use vhost::vhost_user::message::{
}; };
use vhost::vhost_user::{Master, MasterReqHandler, VhostUserMaster, VhostUserMasterReqHandler}; use vhost::vhost_user::{Master, MasterReqHandler, VhostUserMaster, VhostUserMasterReqHandler};
use vhost::{VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData}; use vhost::{VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData};
use virtio_queue::{Descriptor, Queue}; use virtio_queue::{Descriptor, Queue, QueueT};
use vm_memory::{ use vm_memory::{
Address, Error as MmapError, FileOffset, GuestMemory, GuestMemoryAtomic, GuestMemoryRegion, Address, Error as MmapError, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion,
}; };
use vm_migration::protocol::MemoryRangeTable; use vm_migration::protocol::MemoryRangeTable;
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -152,7 +152,7 @@ impl VhostUserHandle {
pub fn setup_vhost_user<S: VhostUserMasterReqHandler>( pub fn setup_vhost_user<S: VhostUserMasterReqHandler>(
&mut self, &mut self,
mem: &GuestMemoryMmap, mem: &GuestMemoryMmap,
queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, queues: Vec<(usize, Queue, EventFd)>,
virtio_interrupt: &Arc<dyn VirtioInterrupt>, virtio_interrupt: &Arc<dyn VirtioInterrupt>,
acked_features: u64, acked_features: u64,
slave_req_handler: &Option<MasterReqHandler<S>>, slave_req_handler: &Option<MasterReqHandler<S>>,
@ -173,7 +173,7 @@ impl VhostUserHandle {
// at early stage. // at early stage.
for (queue_index, queue, _) in queues.iter() { for (queue_index, queue, _) in queues.iter() {
self.vu self.vu
.set_vring_num(*queue_index, queue.state.size) .set_vring_num(*queue_index, queue.size())
.map_err(Error::VhostUserSetVringNum)?; .map_err(Error::VhostUserSetVringNum)?;
} }
@ -184,7 +184,7 @@ impl VhostUserHandle {
mmap_size: 0, mmap_size: 0,
mmap_offset: 0, mmap_offset: 0,
num_queues: queues.len() as u16, num_queues: queues.len() as u16,
queue_size: queues[0].1.state.size, queue_size: queues[0].1.size(),
}; };
let (info, fd) = self let (info, fd) = self
.vu .vu
@ -201,15 +201,15 @@ impl VhostUserHandle {
let mut vrings_info = Vec::new(); let mut vrings_info = Vec::new();
for (queue_index, queue, queue_evt) in queues.iter() { for (queue_index, queue, queue_evt) in queues.iter() {
let actual_size: usize = queue.state.size.try_into().unwrap(); let actual_size: usize = queue.size().try_into().unwrap();
let config_data = VringConfigData { let config_data = VringConfigData {
queue_max_size: queue.max_size(), queue_max_size: queue.max_size(),
queue_size: queue.state.size, queue_size: queue.size(),
flags: 0u32, flags: 0u32,
desc_table_addr: get_host_address_range( desc_table_addr: get_host_address_range(
mem, mem,
queue.state.desc_table, GuestAddress(queue.desc_table()),
actual_size * std::mem::size_of::<Descriptor>(), actual_size * std::mem::size_of::<Descriptor>(),
) )
.ok_or(Error::DescriptorTableAddress)? as u64, .ok_or(Error::DescriptorTableAddress)? as u64,
@ -217,7 +217,7 @@ impl VhostUserHandle {
// i.e. 4 + (4 + 4) * actual_size. // i.e. 4 + (4 + 4) * actual_size.
used_ring_addr: get_host_address_range( used_ring_addr: get_host_address_range(
mem, mem,
queue.state.used_ring, GuestAddress(queue.used_ring()),
4 + actual_size * 8, 4 + actual_size * 8,
) )
.ok_or(Error::UsedAddress)? as u64, .ok_or(Error::UsedAddress)? as u64,
@ -225,7 +225,7 @@ impl VhostUserHandle {
// i.e. 4 + (2) * actual_size. // i.e. 4 + (2) * actual_size.
avail_ring_addr: get_host_address_range( avail_ring_addr: get_host_address_range(
mem, mem,
queue.state.avail_ring, GuestAddress(queue.avail_ring()),
4 + actual_size * 2, 4 + actual_size * 2,
) )
.ok_or(Error::AvailAddress)? as u64, .ok_or(Error::AvailAddress)? as u64,
@ -234,7 +234,7 @@ impl VhostUserHandle {
vrings_info.push(VringInfo { vrings_info.push(VringInfo {
config_data, config_data,
used_guest_addr: queue.state.used_ring.raw_value(), used_guest_addr: queue.used_ring(),
}); });
self.vu self.vu
@ -244,7 +244,7 @@ impl VhostUserHandle {
.set_vring_base( .set_vring_base(
*queue_index, *queue_index,
queue queue
.avail_idx(Ordering::Acquire) .avail_idx(mem, Ordering::Acquire)
.map_err(Error::GetAvailableIndex)? .map_err(Error::GetAvailableIndex)?
.0, .0,
) )
@ -337,7 +337,7 @@ impl VhostUserHandle {
pub fn reinitialize_vhost_user<S: VhostUserMasterReqHandler>( pub fn reinitialize_vhost_user<S: VhostUserMasterReqHandler>(
&mut self, &mut self,
mem: &GuestMemoryMmap, mem: &GuestMemoryMmap,
queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, queues: Vec<(usize, Queue, EventFd)>,
virtio_interrupt: &Arc<dyn VirtioInterrupt>, virtio_interrupt: &Arc<dyn VirtioInterrupt>,
acked_features: u64, acked_features: u64,
acked_protocol_features: u64, acked_protocol_features: u64,

View File

@ -673,6 +673,7 @@ where
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use libc::EFD_NONBLOCK; use libc::EFD_NONBLOCK;
use virtio_queue::QueueOwnedT;
use std::io::{Error as IoError, ErrorKind, Read, Result as IoResult, Write}; use std::io::{Error as IoError, ErrorKind, Read, Result as IoResult, Write};
use std::os::unix::io::RawFd; use std::os::unix::io::RawFd;
@ -819,7 +820,7 @@ mod tests {
let stream = TestStream::new(); let stream = TestStream::new();
let mut pkt = VsockPacket::from_rx_virtq_head( let mut pkt = VsockPacket::from_rx_virtq_head(
&mut handler_ctx.handler.queues[0] &mut handler_ctx.handler.queues[0]
.iter() .iter(&vsock_test_ctx.mem)
.unwrap() .unwrap()
.next() .next()
.unwrap(), .unwrap(),

View File

@ -40,6 +40,7 @@ use crate::{
use byteorder::{ByteOrder, LittleEndian}; use byteorder::{ByteOrder, LittleEndian};
use seccompiler::SeccompAction; use seccompiler::SeccompAction;
use std::io; use std::io;
use std::ops::Deref;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::path::PathBuf; use std::path::PathBuf;
use std::result; use std::result;
@ -48,6 +49,9 @@ use std::sync::{Arc, Barrier, RwLock};
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::Queue; use virtio_queue::Queue;
use virtio_queue::QueueOwnedT;
use virtio_queue::QueueT;
use vm_memory::GuestAddressSpace;
use vm_memory::GuestMemoryAtomic; use vm_memory::GuestMemoryAtomic;
use vm_migration::{ use vm_migration::{
Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped, Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped,
@ -88,7 +92,7 @@ pub const BACKEND_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 4;
/// ///
pub struct VsockEpollHandler<B: VsockBackend> { pub struct VsockEpollHandler<B: VsockBackend> {
pub mem: GuestMemoryAtomic<GuestMemoryMmap>, pub mem: GuestMemoryAtomic<GuestMemoryMmap>,
pub queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>, pub queues: Vec<Queue>,
pub queue_evts: Vec<EventFd>, pub queue_evts: Vec<EventFd>,
pub kill_evt: EventFd, pub kill_evt: EventFd,
pub pause_evt: EventFd, pub pause_evt: EventFd,
@ -124,7 +128,9 @@ where
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
let mut avail_iter = self.queues[0].iter().map_err(DeviceError::QueueIterator)?; let mut avail_iter = self.queues[0]
.iter(self.mem.memory())
.map_err(DeviceError::QueueIterator)?;
for mut desc_chain in &mut avail_iter { for mut desc_chain in &mut avail_iter {
let used_len = match VsockPacket::from_rx_virtq_head( let used_len = match VsockPacket::from_rx_virtq_head(
&mut desc_chain, &mut desc_chain,
@ -150,9 +156,10 @@ where
used_count += 1; used_count += 1;
} }
let mem = self.mem.memory();
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
self.queues[0] self.queues[0]
.add_used(desc_index, len) .add_used(mem.deref(), desc_index, len)
.map_err(DeviceError::QueueAddUsed)?; .map_err(DeviceError::QueueAddUsed)?;
} }
@ -172,7 +179,9 @@ where
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
let mut avail_iter = self.queues[1].iter().map_err(DeviceError::QueueIterator)?; let mut avail_iter = self.queues[1]
.iter(self.mem.memory())
.map_err(DeviceError::QueueIterator)?;
for mut desc_chain in &mut avail_iter { for mut desc_chain in &mut avail_iter {
let pkt = match VsockPacket::from_tx_virtq_head( let pkt = match VsockPacket::from_tx_virtq_head(
&mut desc_chain, &mut desc_chain,
@ -196,9 +205,10 @@ where
used_count += 1; used_count += 1;
} }
let mem = self.mem.memory();
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
self.queues[1] self.queues[1]
.add_used(desc_index, len) .add_used(mem.deref(), desc_index, len)
.map_err(DeviceError::QueueAddUsed)?; .map_err(DeviceError::QueueAddUsed)?;
} }
@ -432,7 +442,7 @@ where
&mut self, &mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
let (kill_evt, pause_evt) = self.common.dup_eventfds(); let (kill_evt, pause_evt) = self.common.dup_eventfds();
@ -592,38 +602,36 @@ mod tests {
// A warning is, however, logged, if the guest driver attempts to write any config data. // A warning is, however, logged, if the guest driver attempts to write any config data.
ctx.device.write_config(0, &data[..4]); ctx.device.write_config(0, &data[..4]);
let memory = GuestMemoryAtomic::new(ctx.mem.clone());
// Test a bad activation. // Test a bad activation.
let bad_activate = ctx.device.activate( let bad_activate =
GuestMemoryAtomic::new(ctx.mem.clone()), ctx.device
Arc::new(NoopVirtioInterrupt {}), .activate(memory.clone(), Arc::new(NoopVirtioInterrupt {}), Vec::new());
Vec::new(),
);
match bad_activate { match bad_activate {
Err(ActivateError::BadActivate) => (), Err(ActivateError::BadActivate) => (),
other => panic!("{:?}", other), other => panic!("{:?}", other),
} }
let memory = GuestMemoryAtomic::new(ctx.mem.clone());
// Test a correct activation. // Test a correct activation.
ctx.device ctx.device
.activate( .activate(
memory.clone(), memory,
Arc::new(NoopVirtioInterrupt {}), Arc::new(NoopVirtioInterrupt {}),
vec![ vec![
( (
0, 0,
Queue::new(memory.clone(), 256), Queue::new(256).unwrap(),
EventFd::new(EFD_NONBLOCK).unwrap(), EventFd::new(EFD_NONBLOCK).unwrap(),
), ),
( (
1, 1,
Queue::new(memory.clone(), 256), Queue::new(256).unwrap(),
EventFd::new(EFD_NONBLOCK).unwrap(), EventFd::new(EFD_NONBLOCK).unwrap(),
), ),
( (
2, 2,
Queue::new(memory, 256), Queue::new(256).unwrap(),
EventFd::new(EFD_NONBLOCK).unwrap(), EventFd::new(EFD_NONBLOCK).unwrap(),
), ),
], ],
@ -637,9 +645,8 @@ mod tests {
{ {
let test_ctx = TestContext::new(); let test_ctx = TestContext::new();
let ctx = test_ctx.create_epoll_handler_context(); let ctx = test_ctx.create_epoll_handler_context();
let memory = GuestMemoryAtomic::new(test_ctx.mem.clone());
let _queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>> = Queue::new(memory, 256); let _queue: Queue = Queue::new(256).unwrap();
assert!(ctx.handler.signal_used_queue(0).is_ok()); assert!(ctx.handler.signal_used_queue(0).is_ok());
} }
} }

View File

@ -170,7 +170,7 @@ mod tests {
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use virtio_queue::{defs::VIRTQ_DESC_F_NEXT, defs::VIRTQ_DESC_F_WRITE}; use virtio_bindings::bindings::virtio_ring::{VRING_DESC_F_NEXT, VRING_DESC_F_WRITE};
use vm_memory::{GuestAddress, GuestMemoryAtomic}; use vm_memory::{GuestAddress, GuestMemoryAtomic};
use vm_virtio::queue::testing::VirtQueue as GuestQ; use vm_virtio::queue::testing::VirtQueue as GuestQ;
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -295,15 +295,20 @@ mod tests {
guest_rxvq.dtable[0].set( guest_rxvq.dtable[0].set(
0x0040_0000, 0x0040_0000,
VSOCK_PKT_HDR_SIZE as u32, VSOCK_PKT_HDR_SIZE as u32,
VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_NEXT, (VRING_DESC_F_WRITE | VRING_DESC_F_NEXT).try_into().unwrap(),
1, 1,
); );
guest_rxvq.dtable[1].set(0x0040_1000, 4096, VIRTQ_DESC_F_WRITE, 0); guest_rxvq.dtable[1].set(0x0040_1000, 4096, VRING_DESC_F_WRITE.try_into().unwrap(), 0);
guest_rxvq.avail.ring[0].set(0); guest_rxvq.avail.ring[0].set(0);
guest_rxvq.avail.idx.set(1); guest_rxvq.avail.idx.set(1);
// Set up one available descriptor in the TX queue. // Set up one available descriptor in the TX queue.
guest_txvq.dtable[0].set(0x0050_0000, VSOCK_PKT_HDR_SIZE as u32, VIRTQ_DESC_F_NEXT, 1); guest_txvq.dtable[0].set(
0x0050_0000,
VSOCK_PKT_HDR_SIZE as u32,
VRING_DESC_F_NEXT.try_into().unwrap(),
1,
);
guest_txvq.dtable[1].set(0x0050_1000, 4096, 0, 0); guest_txvq.dtable[1].set(0x0050_1000, 4096, 0, 0);
guest_txvq.avail.ring[0].set(0); guest_txvq.avail.ring[0].set(0);
guest_txvq.avail.idx.set(1); guest_txvq.avail.idx.set(1);

View File

@ -16,13 +16,14 @@
/// to temporary buffers, before passing it on to the vsock backend. /// to temporary buffers, before passing it on to the vsock backend.
/// ///
use byteorder::{ByteOrder, LittleEndian}; use byteorder::{ByteOrder, LittleEndian};
use std::ops::Deref;
use std::sync::Arc; use std::sync::Arc;
use super::defs; use super::defs;
use super::{Result, VsockError}; use super::{Result, VsockError};
use crate::{get_host_address_range, GuestMemoryMmap}; use crate::get_host_address_range;
use virtio_queue::DescriptorChain; use virtio_queue::DescriptorChain;
use vm_memory::GuestMemoryLoadGuard; use vm_memory::GuestMemory;
use vm_virtio::{AccessPlatform, Translatable}; use vm_virtio::{AccessPlatform, Translatable};
// The vsock packet header is defined by the C struct: // The vsock packet header is defined by the C struct:
@ -107,10 +108,14 @@ impl VsockPacket {
/// descriptor can optionally end the chain. Bounds and pointer checks are performed when /// descriptor can optionally end the chain. Bounds and pointer checks are performed when
/// creating the wrapper. /// creating the wrapper.
/// ///
pub fn from_tx_virtq_head( pub fn from_tx_virtq_head<M>(
desc_chain: &mut DescriptorChain<GuestMemoryLoadGuard<GuestMemoryMmap>>, desc_chain: &mut DescriptorChain<M>,
access_platform: Option<&Arc<dyn AccessPlatform>>, access_platform: Option<&Arc<dyn AccessPlatform>>,
) -> Result<Self> { ) -> Result<Self>
where
M: Clone + Deref,
M::Target: GuestMemory,
{
let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?; let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
// All buffers in the TX queue must be readable. // All buffers in the TX queue must be readable.
@ -181,10 +186,14 @@ impl VsockPacket {
/// There must be two descriptors in the chain, both writable: a header descriptor and a data /// There must be two descriptors in the chain, both writable: a header descriptor and a data
/// descriptor. Bounds and pointer checks are performed when creating the wrapper. /// descriptor. Bounds and pointer checks are performed when creating the wrapper.
/// ///
pub fn from_rx_virtq_head( pub fn from_rx_virtq_head<M>(
desc_chain: &mut DescriptorChain<GuestMemoryLoadGuard<GuestMemoryMmap>>, desc_chain: &mut DescriptorChain<M>,
access_platform: Option<&Arc<dyn AccessPlatform>>, access_platform: Option<&Arc<dyn AccessPlatform>>,
) -> Result<Self> { ) -> Result<Self>
where
M: Clone + Deref,
M::Target: GuestMemory,
{
let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?; let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
// All RX buffers must be writable. // All RX buffers must be writable.
@ -379,7 +388,8 @@ mod tests {
use super::*; use super::*;
use crate::vsock::defs::MAX_PKT_BUF_SIZE; use crate::vsock::defs::MAX_PKT_BUF_SIZE;
use crate::GuestMemoryMmap; use crate::GuestMemoryMmap;
use virtio_queue::defs::VIRTQ_DESC_F_WRITE; use virtio_bindings::bindings::virtio_ring::VRING_DESC_F_WRITE;
use virtio_queue::QueueOwnedT;
use vm_memory::GuestAddress; use vm_memory::GuestAddress;
use vm_virtio::queue::testing::VirtqDesc as GuestQDesc; use vm_virtio::queue::testing::VirtqDesc as GuestQDesc;
@ -402,7 +412,7 @@ mod tests {
($test_ctx:expr, $handler_ctx:expr, $err:pat, $ctor:ident, $vq:expr) => { ($test_ctx:expr, $handler_ctx:expr, $err:pat, $ctor:ident, $vq:expr) => {
match VsockPacket::$ctor( match VsockPacket::$ctor(
&mut $handler_ctx.handler.queues[$vq] &mut $handler_ctx.handler.queues[$vq]
.iter() .iter(&$test_ctx.mem)
.unwrap() .unwrap()
.next() .next()
.unwrap(), .unwrap(),
@ -433,7 +443,7 @@ mod tests {
let pkt = VsockPacket::from_tx_virtq_head( let pkt = VsockPacket::from_tx_virtq_head(
&mut handler_ctx.handler.queues[1] &mut handler_ctx.handler.queues[1]
.iter() .iter(&test_ctx.mem)
.unwrap() .unwrap()
.next() .next()
.unwrap(), .unwrap(),
@ -452,7 +462,7 @@ mod tests {
create_context!(test_ctx, handler_ctx); create_context!(test_ctx, handler_ctx);
handler_ctx.guest_txvq.dtable[0] handler_ctx.guest_txvq.dtable[0]
.flags .flags
.set(VIRTQ_DESC_F_WRITE); .set(VRING_DESC_F_WRITE.try_into().unwrap());
expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor); expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
} }
@ -471,7 +481,7 @@ mod tests {
set_pkt_len(0, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem); set_pkt_len(0, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
let mut pkt = VsockPacket::from_tx_virtq_head( let mut pkt = VsockPacket::from_tx_virtq_head(
&mut handler_ctx.handler.queues[1] &mut handler_ctx.handler.queues[1]
.iter() .iter(&test_ctx.mem)
.unwrap() .unwrap()
.next() .next()
.unwrap(), .unwrap(),
@ -508,7 +518,7 @@ mod tests {
create_context!(test_ctx, handler_ctx); create_context!(test_ctx, handler_ctx);
handler_ctx.guest_txvq.dtable[1] handler_ctx.guest_txvq.dtable[1]
.flags .flags
.set(VIRTQ_DESC_F_WRITE); .set(VRING_DESC_F_WRITE.try_into().unwrap());
expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor); expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
} }
@ -529,7 +539,7 @@ mod tests {
create_context!(test_ctx, handler_ctx); create_context!(test_ctx, handler_ctx);
let pkt = VsockPacket::from_rx_virtq_head( let pkt = VsockPacket::from_rx_virtq_head(
&mut handler_ctx.handler.queues[0] &mut handler_ctx.handler.queues[0]
.iter() .iter(&test_ctx.mem)
.unwrap() .unwrap()
.next() .next()
.unwrap(), .unwrap(),
@ -564,7 +574,7 @@ mod tests {
create_context!(test_ctx, handler_ctx); create_context!(test_ctx, handler_ctx);
handler_ctx.guest_rxvq.dtable[0] handler_ctx.guest_rxvq.dtable[0]
.flags .flags
.set(VIRTQ_DESC_F_WRITE); .set(VRING_DESC_F_WRITE.try_into().unwrap());
expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::BufDescMissing); expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::BufDescMissing);
} }
} }
@ -586,7 +596,7 @@ mod tests {
create_context!(test_ctx, handler_ctx); create_context!(test_ctx, handler_ctx);
let mut pkt = VsockPacket::from_rx_virtq_head( let mut pkt = VsockPacket::from_rx_virtq_head(
&mut handler_ctx.handler.queues[0] &mut handler_ctx.handler.queues[0]
.iter() .iter(&test_ctx.mem)
.unwrap() .unwrap()
.next() .next()
.unwrap(), .unwrap(),
@ -677,7 +687,7 @@ mod tests {
create_context!(test_ctx, handler_ctx); create_context!(test_ctx, handler_ctx);
let mut pkt = VsockPacket::from_rx_virtq_head( let mut pkt = VsockPacket::from_rx_virtq_head(
&mut handler_ctx.handler.queues[0] &mut handler_ctx.handler.queues[0]
.iter() .iter(&test_ctx.mem)
.unwrap() .unwrap()
.next() .next()
.unwrap(), .unwrap(),

View File

@ -817,6 +817,8 @@ mod tests {
use std::os::unix::net::{UnixListener, UnixStream}; use std::os::unix::net::{UnixListener, UnixStream};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use virtio_queue::QueueOwnedT;
use super::super::super::csm::defs as csm_defs; use super::super::super::csm::defs as csm_defs;
use super::super::super::tests::TestContext as VsockTestContext; use super::super::super::tests::TestContext as VsockTestContext;
use super::*; use super::*;
@ -842,7 +844,7 @@ mod tests {
let mut handler_ctx = vsock_test_ctx.create_epoll_handler_context(); let mut handler_ctx = vsock_test_ctx.create_epoll_handler_context();
let pkt = VsockPacket::from_rx_virtq_head( let pkt = VsockPacket::from_rx_virtq_head(
&mut handler_ctx.handler.queues[0] &mut handler_ctx.handler.queues[0]
.iter() .iter(&vsock_test_ctx.mem)
.unwrap() .unwrap()
.next() .next()
.unwrap(), .unwrap(),

View File

@ -18,6 +18,7 @@ use anyhow::anyhow;
use seccompiler::SeccompAction; use seccompiler::SeccompAction;
use std::fs::File; use std::fs::File;
use std::io::{self, Read}; use std::io::{self, Read};
use std::ops::Deref;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::result; use std::result;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
@ -25,8 +26,8 @@ use std::sync::{Arc, Barrier, Mutex};
use std::time::Instant; use std::time::Instant;
use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize; use versionize_derive::Versionize;
use virtio_queue::Queue; use virtio_queue::{Queue, QueueOwnedT, QueueT};
use vm_memory::{Bytes, GuestMemoryAtomic}; use vm_memory::{Bytes, GuestAddressSpace, GuestMemoryAtomic};
use vm_migration::VersionMapped; use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -47,7 +48,8 @@ const WATCHDOG_TIMER_INTERVAL: i64 = 15;
const WATCHDOG_TIMEOUT: u64 = WATCHDOG_TIMER_INTERVAL as u64 + 5; const WATCHDOG_TIMEOUT: u64 = WATCHDOG_TIMER_INTERVAL as u64 + 5;
struct WatchdogEpollHandler { struct WatchdogEpollHandler {
queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
queue: Queue,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
queue_evt: EventFd, queue_evt: EventFd,
kill_evt: EventFd, kill_evt: EventFd,
@ -64,7 +66,7 @@ impl WatchdogEpollHandler {
let queue = &mut self.queue; let queue = &mut self.queue;
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
for mut desc_chain in queue.iter().unwrap() { for mut desc_chain in queue.iter(self.mem.memory()).unwrap() {
let desc = desc_chain.next().unwrap(); let desc = desc_chain.next().unwrap();
let mut len = 0; let mut len = 0;
@ -88,8 +90,9 @@ impl WatchdogEpollHandler {
used_count += 1; used_count += 1;
} }
let mem = self.mem.memory();
for &(desc_index, len) in &used_desc_heads[..used_count] { for &(desc_index, len) in &used_desc_heads[..used_count] {
queue.add_used(desc_index, len).unwrap(); queue.add_used(mem.deref(), desc_index, len).unwrap();
} }
used_count > 0 used_count > 0
} }
@ -289,9 +292,9 @@ impl VirtioDevice for Watchdog {
fn activate( fn activate(
&mut self, &mut self,
_mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue<GuestMemoryAtomic<GuestMemoryMmap>>, EventFd)>, mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
let (kill_evt, pause_evt) = self.common.dup_eventfds(); let (kill_evt, pause_evt) = self.common.dup_eventfds();
@ -309,6 +312,7 @@ impl VirtioDevice for Watchdog {
let (_, queue, queue_evt) = queues.remove(0); let (_, queue, queue_evt) = queues.remove(0);
let mut handler = WatchdogEpollHandler { let mut handler = WatchdogEpollHandler {
mem,
queue, queue,
interrupt_cb, interrupt_cb,
queue_evt, queue_evt,

View File

@ -9,5 +9,5 @@ default = []
[dependencies] [dependencies]
log = "0.4.17" log = "0.4.17"
virtio-queue = "0.4.0" virtio-queue = "0.5.0"
vm-memory = { version = "0.8.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] } vm-memory = { version = "0.8.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] }

View File

@ -12,10 +12,8 @@
use std::fmt::{self, Debug}; use std::fmt::{self, Debug};
use std::sync::Arc; use std::sync::Arc;
use virtio_queue::Queue; use virtio_queue::{Queue, QueueT};
use vm_memory::{bitmap::AtomicBitmap, GuestAddress, GuestMemoryAtomic}; use vm_memory::GuestAddress;
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
pub mod queue; pub mod queue;
pub use queue::*; pub use queue::*;
@ -108,16 +106,25 @@ pub trait Translatable {
} }
impl Translatable for GuestAddress { impl Translatable for GuestAddress {
fn translate_gva(&self, access_platform: Option<&Arc<dyn AccessPlatform>>, len: usize) -> Self {
GuestAddress(self.0.translate_gva(access_platform, len))
}
fn translate_gpa(&self, access_platform: Option<&Arc<dyn AccessPlatform>>, len: usize) -> Self {
GuestAddress(self.0.translate_gpa(access_platform, len))
}
}
impl Translatable for u64 {
fn translate_gva(&self, access_platform: Option<&Arc<dyn AccessPlatform>>, len: usize) -> Self { fn translate_gva(&self, access_platform: Option<&Arc<dyn AccessPlatform>>, len: usize) -> Self {
if let Some(access_platform) = access_platform { if let Some(access_platform) = access_platform {
GuestAddress(access_platform.translate_gva(self.0, len as u64).unwrap()) access_platform.translate_gva(*self, len as u64).unwrap()
} else { } else {
*self *self
} }
} }
fn translate_gpa(&self, access_platform: Option<&Arc<dyn AccessPlatform>>, len: usize) -> Self { fn translate_gpa(&self, access_platform: Option<&Arc<dyn AccessPlatform>>, len: usize) -> Self {
if let Some(access_platform) = access_platform { if let Some(access_platform) = access_platform {
GuestAddress(access_platform.translate_gpa(self.0, len as u64).unwrap()) access_platform.translate_gpa(*self, len as u64).unwrap()
} else { } else {
*self *self
} }
@ -125,22 +132,20 @@ impl Translatable for GuestAddress {
} }
/// Helper for cloning a Queue since QueueState doesn't derive Clone /// Helper for cloning a Queue since QueueState doesn't derive Clone
pub fn clone_queue( pub fn clone_queue(queue: &Queue) -> Queue {
queue: &Queue<GuestMemoryAtomic<GuestMemoryMmap>>, let mut q = Queue::new(queue.max_size()).unwrap();
) -> Queue<GuestMemoryAtomic<GuestMemoryMmap>> {
Queue::<GuestMemoryAtomic<GuestMemoryMmap>, virtio_queue::QueueState> { q.set_next_avail(queue.next_avail());
mem: queue.mem.clone(), q.set_next_used(queue.next_used());
state: virtio_queue::QueueState { q.set_event_idx(queue.event_idx_enabled());
max_size: queue.state.max_size, q.set_size(queue.size());
next_avail: queue.state.next_avail, q.set_ready(queue.ready());
next_used: queue.state.next_used, q.try_set_desc_table_address(GuestAddress(queue.desc_table()))
event_idx_enabled: queue.state.event_idx_enabled, .unwrap();
num_added: queue.state.num_added, q.try_set_avail_ring_address(GuestAddress(queue.avail_ring()))
size: queue.state.size, .unwrap();
ready: queue.state.ready, q.try_set_used_ring_address(GuestAddress(queue.used_ring()))
desc_table: queue.state.desc_table, .unwrap();
avail_ring: queue.state.avail_ring,
used_ring: queue.state.used_ring, q
},
}
} }

View File

@ -11,9 +11,8 @@
pub mod testing { pub mod testing {
use std::marker::PhantomData; use std::marker::PhantomData;
use std::mem; use std::mem;
use virtio_queue::{Queue, QueueState, VirtqUsedElem}; use virtio_queue::{Queue, QueueT, VirtqUsedElem};
use vm_memory::{bitmap::AtomicBitmap, Address, GuestAddress, GuestUsize}; use vm_memory::{bitmap::AtomicBitmap, Address, Bytes, GuestAddress, GuestUsize};
use vm_memory::{Bytes, GuestMemoryAtomic};
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
@ -225,16 +224,14 @@ pub mod testing {
} }
// Creates a new Queue, using the underlying memory regions represented by the VirtQueue. // Creates a new Queue, using the underlying memory regions represented by the VirtQueue.
pub fn create_queue(&self) -> Queue<GuestMemoryAtomic<GuestMemoryMmap>> { pub fn create_queue(&self) -> Queue {
let mem = GuestMemoryAtomic::new(self.mem.clone()); let mut q = Queue::new(self.size()).unwrap();
let mut q =
Queue::<GuestMemoryAtomic<GuestMemoryMmap>, QueueState>::new(mem, self.size());
q.state.size = self.size(); q.set_size(self.size());
q.state.ready = true; q.set_ready(true);
q.state.desc_table = self.dtable_start(); q.try_set_desc_table_address(self.dtable_start()).unwrap();
q.state.avail_ring = self.avail_start(); q.try_set_avail_ring_address(self.avail_start()).unwrap();
q.state.used_ring = self.used_start(); q.try_set_used_ring_address(self.used_start()).unwrap();
q q
} }

View File

@ -50,7 +50,7 @@ vfio-ioctls = { git = "https://github.com/rust-vmm/vfio", branch = "main", defau
vfio_user = { path = "../vfio_user" } vfio_user = { path = "../vfio_user" }
vhdx = { path = "../vhdx" } vhdx = { path = "../vhdx" }
virtio-devices = { path = "../virtio-devices" } virtio-devices = { path = "../virtio-devices" }
virtio-queue = "0.4.0" virtio-queue = "0.5.0"
vm-allocator = { path = "../vm-allocator" } vm-allocator = { path = "../vm-allocator" }
vm-device = { path = "../vm-device" } vm-device = { path = "../vm-device" }
vm-memory = { version = "0.8.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] } vm-memory = { version = "0.8.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] }