virtio-queue: Remove AccessPlatform

Descriptor addresses are now translated from the virtio devices directly
and the definition of the AccessPlatform trait has been moved to
vm-virtio crate. For these reasons, the virtio-queue crate can be
simplified, which makes it very close to the upstream version.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2022-01-26 17:19:06 +01:00 committed by Rob Bradford
parent 8eed276d14
commit c99d637693
4 changed files with 21 additions and 71 deletions

View File

@ -14,12 +14,11 @@ use std::convert::TryFrom;
use std::fmt::{self, Debug};
use std::mem::size_of;
use std::ops::Deref;
use std::sync::Arc;
use vm_memory::{Address, Bytes, GuestAddress, GuestMemory};
use crate::defs::VIRTQ_DESCRIPTOR_SIZE;
use crate::{AccessPlatform, Descriptor, Error};
use crate::{Descriptor, Error};
/// A virtio descriptor chain.
#[derive(Clone, Debug)]
@ -31,7 +30,6 @@ pub struct DescriptorChain<M> {
next_index: u16,
ttl: u16,
is_indirect: bool,
access_platform: Option<Arc<dyn AccessPlatform>>,
}
impl<M> DescriptorChain<M>
@ -45,7 +43,6 @@ where
queue_size: u16,
ttl: u16,
head_index: u16,
access_platform: Option<Arc<dyn AccessPlatform>>,
) -> Self {
DescriptorChain {
mem,
@ -55,7 +52,6 @@ where
next_index: head_index,
ttl,
is_indirect: false,
access_platform,
}
}
@ -68,21 +64,8 @@ where
/// * `queue_size` - the size of the queue, which is also the maximum size of a descriptor
/// chain.
/// * `head_index` - the descriptor index of the chain head.
pub(crate) fn new(
mem: M,
desc_table: GuestAddress,
queue_size: u16,
head_index: u16,
access_platform: Option<Arc<dyn AccessPlatform>>,
) -> Self {
Self::with_ttl(
mem,
desc_table,
queue_size,
queue_size,
head_index,
access_platform,
)
pub(crate) fn new(mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self {
Self::with_ttl(mem, desc_table, queue_size, queue_size, head_index)
}
/// Get the descriptor index of the chain head.
@ -172,16 +155,7 @@ where
// The guest device driver should not touch the descriptor once submitted, so it's safe
// to use read_obj() here.
let mut desc = self.mem.read_obj::<Descriptor>(desc_addr).ok()?;
// When needed, it's very important to translate the decriptor address
// before returning the Descriptor to the consumer.
if let Some(access_platform) = &self.access_platform {
desc.set_addr(
access_platform
.translate(desc.addr().0, u64::from(desc.len()))
.ok()?,
);
}
let desc = self.mem.read_obj::<Descriptor>(desc_addr).ok()?;
if desc.refers_to_indirect_table() {
self.switch_to_indirect_table(desc).ok()?;
@ -264,21 +238,17 @@ mod tests {
// index >= queue_size
assert!(
DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 16, None)
DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 16)
.next()
.is_none()
);
// desc_table address is way off
assert!(DescriptorChain::<&GuestMemoryMmap>::new(
m,
GuestAddress(0x00ff_ffff_ffff),
16,
0,
None
)
.next()
.is_none());
assert!(
DescriptorChain::<&GuestMemoryMmap>::new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0,)
.next()
.is_none()
);
{
// the first desc has a normal len, and the next_descriptor flag is set
@ -286,7 +256,7 @@ mod tests {
let desc = Descriptor::new(0x1000, 0x1000, VIRTQ_DESC_F_NEXT, 16);
vq.desc_table().store(0, desc);
let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0, None);
let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0);
c.next().unwrap();
assert!(c.next().is_none());
}
@ -299,7 +269,7 @@ mod tests {
let desc = Descriptor::new(0x2000, 0x1000, 0, 0);
vq.desc_table().store(1, desc);
let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0, None);
let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0);
assert_eq!(
c.memory() as *const GuestMemoryMmap,
@ -341,7 +311,7 @@ mod tests {
let desc = Descriptor::new((0x1000 * 16) as u64, 0x1000, 0, 0);
vq.desc_table().store(QUEUE_SIZE - 1, desc);
let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), QUEUE_SIZE, 0, None);
let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), QUEUE_SIZE, 0);
assert_eq!(c.ttl, c.queue_size);
// Validate that `ttl` wraps around even when the entire descriptor table is populated.
@ -372,8 +342,7 @@ mod tests {
let desc = Descriptor::new(0x8000, 0x1000, 0, 0);
dtable.store(2, desc);
let mut c: DescriptorChain<&GuestMemoryMmap> =
DescriptorChain::new(m, vq.start(), 16, 0, None);
let mut c: DescriptorChain<&GuestMemoryMmap> = DescriptorChain::new(m, vq.start(), 16, 0);
// create an indirect table with 4 chained descriptors
let idtable = DescriptorTable::new(m, GuestAddress(0x7000), 4);
@ -421,7 +390,7 @@ mod tests {
vq.desc_table().store(0, desc);
let mut c: DescriptorChain<&GuestMemoryMmap> =
DescriptorChain::new(m, vq.start(), 16, 0, None);
DescriptorChain::new(m, vq.start(), 16, 0);
assert!(c.next().is_none());
}
@ -436,7 +405,7 @@ mod tests {
vq.desc_table().store(0, desc);
let mut c: DescriptorChain<&GuestMemoryMmap> =
DescriptorChain::new(m, vq.start(), 16, 0, None);
DescriptorChain::new(m, vq.start(), 16, 0);
assert!(c.next().is_none());
}
@ -456,7 +425,7 @@ mod tests {
vq.desc_table().store(0, desc);
let mut c: DescriptorChain<&GuestMemoryMmap> =
DescriptorChain::new(m, vq.start(), 16, 0, None);
DescriptorChain::new(m, vq.start(), 16, 0);
assert!(c.next().is_none());
}
@ -473,7 +442,7 @@ mod tests {
m.write_obj(desc, GuestAddress(0x1000)).unwrap();
let mut c: DescriptorChain<&GuestMemoryMmap> =
DescriptorChain::new(m, vq.start(), 16, 0, None);
DescriptorChain::new(m, vq.start(), 16, 0);
assert!(c.next().is_some());
// But it's not allowed to have an indirect descriptor that points to another indirect
@ -482,7 +451,7 @@ mod tests {
m.write_obj(desc, GuestAddress(0x1000)).unwrap();
let mut c: DescriptorChain<&GuestMemoryMmap> =
DescriptorChain::new(m, vq.start(), 16, 0, None);
DescriptorChain::new(m, vq.start(), 16, 0);
assert!(c.next().is_none());
}

View File

@ -13,12 +13,11 @@
use std::num::Wrapping;
use std::ops::Deref;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use vm_memory::{Address, Bytes, GuestAddress, GuestMemory};
use crate::defs::{VIRTQ_AVAIL_ELEMENT_SIZE, VIRTQ_AVAIL_RING_HEADER_SIZE};
use crate::{error, AccessPlatform, DescriptorChain, QueueState};
use crate::{error, DescriptorChain, QueueState};
/// Consuming iterator over all available descriptor chain heads in the queue.
///
@ -98,7 +97,6 @@ pub struct AvailIter<'b, M> {
queue_size: u16,
last_index: Wrapping<u16>,
next_avail: &'b mut Wrapping<u16>,
access_platform: &'b Option<Arc<dyn AccessPlatform>>,
}
impl<'b, M> AvailIter<'b, M>
@ -122,7 +120,6 @@ where
queue_size: state.size,
last_index: idx,
next_avail: &mut state.next_avail,
access_platform: &state.access_platform,
}
}
@ -170,7 +167,6 @@ where
self.desc_table,
self.queue_size,
head_index,
self.access_platform.clone(),
))
}
}

View File

@ -41,13 +41,6 @@ mod queue_guard;
mod state;
mod state_sync;
/// Trait for devices with access to data in memory being limited and/or
/// translated.
pub trait AccessPlatform: Send + Sync + Debug {
/// Provide a way to translate address ranges.
fn translate(&self, base: u64, size: u64) -> std::result::Result<u64, std::io::Error>;
}
/// Virtio Queue related errors.
#[derive(Debug)]
pub enum Error {

View File

@ -10,7 +10,6 @@ use std::mem::size_of;
use std::num::Wrapping;
use std::ops::Deref;
use std::sync::atomic::{fence, Ordering};
use std::sync::Arc;
use vm_memory::{Address, Bytes, GuestAddress, GuestMemory};
@ -20,10 +19,7 @@ use crate::defs::{
VIRTQ_USED_ELEMENT_SIZE, VIRTQ_USED_F_NO_NOTIFY, VIRTQ_USED_RING_HEADER_SIZE,
VIRTQ_USED_RING_META_SIZE,
};
use crate::{
error, AccessPlatform, AvailIter, Descriptor, Error, QueueStateGuard, QueueStateT,
VirtqUsedElem,
};
use crate::{error, AvailIter, Descriptor, Error, QueueStateGuard, QueueStateT, VirtqUsedElem};
/// Struct to maintain information and manipulate state of a virtio queue.
#[derive(Clone, Debug)]
@ -57,9 +53,6 @@ pub struct QueueState {
/// Guest physical address of the used ring.
pub used_ring: GuestAddress,
/// Access platform handler
pub access_platform: Option<Arc<dyn AccessPlatform>>,
}
impl QueueState {
@ -175,7 +168,6 @@ impl QueueStateT for QueueState {
next_used: Wrapping(0),
event_idx_enabled: false,
signalled_used: None,
access_platform: None,
}
}