2019-04-18 17:24:06 +00:00
|
|
|
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
|
|
//
|
|
|
|
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
2019-05-08 10:22:53 +00:00
|
|
|
// found in the LICENSE-BSD-3-Clause file.
|
|
|
|
//
|
|
|
|
// Copyright © 2019 Intel Corporation
|
|
|
|
//
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
|
2019-04-18 17:24:06 +00:00
|
|
|
|
|
|
|
use std::cmp::min;
|
2020-02-21 10:34:51 +00:00
|
|
|
use std::convert::TryInto;
|
|
|
|
use std::fmt::{self, Display};
|
2019-04-18 17:24:06 +00:00
|
|
|
use std::num::Wrapping;
|
|
|
|
use std::sync::atomic::{fence, Ordering};
|
2019-10-02 07:10:42 +00:00
|
|
|
use std::sync::Arc;
|
2019-04-18 17:24:06 +00:00
|
|
|
|
2019-10-02 07:10:42 +00:00
|
|
|
use crate::device::VirtioIommuRemapping;
|
2019-04-18 17:24:06 +00:00
|
|
|
use vm_memory::{
|
2020-04-21 15:32:26 +00:00
|
|
|
Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap,
|
|
|
|
GuestUsize,
|
2019-04-18 17:24:06 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
pub(super) const VIRTQ_DESC_F_NEXT: u16 = 0x1;
|
|
|
|
pub(super) const VIRTQ_DESC_F_WRITE: u16 = 0x2;
|
2020-02-21 10:34:51 +00:00
|
|
|
pub(super) const VIRTQ_DESC_F_INDIRECT: u16 = 0x4;
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum Error {
|
|
|
|
GuestMemoryError,
|
|
|
|
InvalidIndirectDescriptor,
|
|
|
|
InvalidChain,
|
2020-04-21 15:32:26 +00:00
|
|
|
InvalidOffset(u64),
|
|
|
|
InvalidRingIndexFromMemory(GuestMemoryError),
|
2020-02-21 10:34:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Display for Error {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
use self::Error::*;
|
|
|
|
|
|
|
|
match self {
|
|
|
|
GuestMemoryError => write!(f, "error accessing guest memory"),
|
|
|
|
InvalidChain => write!(f, "invalid descriptor chain"),
|
|
|
|
InvalidIndirectDescriptor => write!(f, "invalid indirect descriptor"),
|
2020-04-21 15:32:26 +00:00
|
|
|
InvalidOffset(o) => write!(f, "invalid offset {}", o),
|
|
|
|
InvalidRingIndexFromMemory(e) => write!(f, "invalid ring index from memory: {}", e),
|
2020-02-21 10:34:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-04-18 17:24:06 +00:00
|
|
|
|
|
|
|
// GuestMemoryMmap::read_obj() will be used to fetch the descriptor,
|
|
|
|
// which has an explicit constraint that the entire descriptor doesn't
|
|
|
|
// cross the page boundary. Otherwise the descriptor may be splitted into
|
|
|
|
// two mmap regions which causes failure of GuestMemoryMmap::read_obj().
|
|
|
|
//
|
|
|
|
// The Virtio Spec 1.0 defines the alignment of VirtIO descriptor is 16 bytes,
|
|
|
|
// which fulfills the explicit constraint of GuestMemoryMmap::read_obj().
|
|
|
|
|
2019-11-01 17:40:35 +00:00
|
|
|
/// An iterator over a single descriptor chain. Not to be confused with AvailIter,
|
|
|
|
/// which iterates over the descriptor chain heads in a queue.
|
|
|
|
pub struct DescIter<'a> {
|
|
|
|
next: Option<DescriptorChain<'a>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> DescIter<'a> {
|
|
|
|
/// Returns an iterator that only yields the readable descriptors in the chain.
|
|
|
|
pub fn readable(self) -> impl Iterator<Item = DescriptorChain<'a>> {
|
|
|
|
self.filter(|d| !d.is_write_only())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns an iterator that only yields the writable descriptors in the chain.
|
|
|
|
pub fn writable(self) -> impl Iterator<Item = DescriptorChain<'a>> {
|
|
|
|
self.filter(DescriptorChain::is_write_only)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> Iterator for DescIter<'a> {
|
|
|
|
type Item = DescriptorChain<'a>;
|
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
if let Some(current) = self.next.take() {
|
|
|
|
self.next = current.next_descriptor();
|
|
|
|
Some(current)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-18 17:24:06 +00:00
|
|
|
/// A virtio descriptor constraints with C representive.
|
|
|
|
#[repr(C)]
|
|
|
|
#[derive(Default, Clone, Copy)]
|
2019-12-12 13:08:34 +00:00
|
|
|
pub struct Descriptor {
|
2019-04-18 17:24:06 +00:00
|
|
|
addr: u64,
|
|
|
|
len: u32,
|
|
|
|
flags: u16,
|
|
|
|
next: u16,
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe impl ByteValued for Descriptor {}
|
|
|
|
|
2020-03-05 13:30:36 +00:00
|
|
|
/// A virtio descriptor head, not tied to a GuestMemoryMmap.
|
|
|
|
pub struct DescriptorHead {
|
|
|
|
desc_table: GuestAddress,
|
|
|
|
table_size: u16,
|
|
|
|
index: u16,
|
|
|
|
iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
|
|
|
|
}
|
|
|
|
|
2019-04-18 17:24:06 +00:00
|
|
|
/// A virtio descriptor chain.
|
2019-11-01 17:40:35 +00:00
|
|
|
#[derive(Clone)]
|
2019-04-18 17:24:06 +00:00
|
|
|
pub struct DescriptorChain<'a> {
|
|
|
|
desc_table: GuestAddress,
|
2020-02-21 10:34:51 +00:00
|
|
|
table_size: u16,
|
2019-04-18 17:24:06 +00:00
|
|
|
ttl: u16, // used to prevent infinite chain cycles
|
2019-10-02 07:10:42 +00:00
|
|
|
iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
|
2019-04-18 17:24:06 +00:00
|
|
|
|
2019-09-04 20:17:20 +00:00
|
|
|
/// Reference to guest memory
|
|
|
|
pub mem: &'a GuestMemoryMmap,
|
|
|
|
|
2019-04-18 17:24:06 +00:00
|
|
|
/// Index into the descriptor table
|
|
|
|
pub index: u16,
|
|
|
|
|
|
|
|
/// Guest physical address of device specific data
|
|
|
|
pub addr: GuestAddress,
|
|
|
|
|
|
|
|
/// Length of device specific data
|
|
|
|
pub len: u32,
|
|
|
|
|
|
|
|
/// Includes next, write, and indirect bits
|
|
|
|
pub flags: u16,
|
|
|
|
|
|
|
|
/// Index into the descriptor table of the next descriptor if flags has
|
|
|
|
/// the next bit set
|
|
|
|
pub next: u16,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> DescriptorChain<'a> {
|
2019-11-01 17:40:35 +00:00
|
|
|
pub fn checked_new(
|
2019-04-18 17:24:06 +00:00
|
|
|
mem: &GuestMemoryMmap,
|
|
|
|
desc_table: GuestAddress,
|
2020-02-21 10:34:51 +00:00
|
|
|
table_size: u16,
|
2019-04-18 17:24:06 +00:00
|
|
|
index: u16,
|
2019-10-02 07:10:42 +00:00
|
|
|
iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
|
2019-04-18 17:24:06 +00:00
|
|
|
) -> Option<DescriptorChain> {
|
2020-02-21 10:34:51 +00:00
|
|
|
if index >= table_size {
|
2019-04-18 17:24:06 +00:00
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
let desc_head = match mem.checked_offset(desc_table, (index as usize) * 16) {
|
|
|
|
Some(a) => a,
|
|
|
|
None => return None,
|
|
|
|
};
|
|
|
|
mem.checked_offset(desc_head, 16)?;
|
|
|
|
|
|
|
|
// These reads can't fail unless Guest memory is hopelessly broken.
|
|
|
|
let desc = match mem.read_obj::<Descriptor>(desc_head) {
|
|
|
|
Ok(ret) => ret,
|
|
|
|
Err(_) => {
|
|
|
|
// TODO log address
|
|
|
|
error!("Failed to read from memory");
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
};
|
2019-10-02 07:10:42 +00:00
|
|
|
|
|
|
|
// Translate address if necessary
|
|
|
|
let desc_addr = if let Some(iommu_mapping_cb) = &iommu_mapping_cb {
|
|
|
|
(iommu_mapping_cb)(desc.addr).unwrap()
|
|
|
|
} else {
|
|
|
|
desc.addr
|
|
|
|
};
|
|
|
|
|
2019-04-18 17:24:06 +00:00
|
|
|
let chain = DescriptorChain {
|
|
|
|
mem,
|
|
|
|
desc_table,
|
2020-02-21 10:34:51 +00:00
|
|
|
table_size,
|
|
|
|
ttl: table_size,
|
2019-04-18 17:24:06 +00:00
|
|
|
index,
|
2019-10-02 07:10:42 +00:00
|
|
|
addr: GuestAddress(desc_addr),
|
2019-04-18 17:24:06 +00:00
|
|
|
len: desc.len,
|
|
|
|
flags: desc.flags,
|
|
|
|
next: desc.next,
|
2019-10-02 07:10:42 +00:00
|
|
|
iommu_mapping_cb,
|
2019-04-18 17:24:06 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
if chain.is_valid() {
|
|
|
|
Some(chain)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-21 10:34:51 +00:00
|
|
|
pub fn new_from_indirect(&self) -> Result<DescriptorChain, Error> {
|
|
|
|
if !self.is_indirect() {
|
|
|
|
return Err(Error::InvalidIndirectDescriptor);
|
|
|
|
}
|
|
|
|
|
|
|
|
let desc_head = self.addr;
|
|
|
|
self.mem
|
|
|
|
.checked_offset(desc_head, 16)
|
|
|
|
.ok_or(Error::GuestMemoryError)?;
|
|
|
|
|
|
|
|
// These reads can't fail unless Guest memory is hopelessly broken.
|
|
|
|
let desc = match self.mem.read_obj::<Descriptor>(desc_head) {
|
|
|
|
Ok(ret) => ret,
|
|
|
|
Err(_) => return Err(Error::GuestMemoryError),
|
|
|
|
};
|
|
|
|
|
|
|
|
// Translate address if necessary
|
|
|
|
let (desc_addr, iommu_mapping_cb) =
|
|
|
|
if let Some(iommu_mapping_cb) = self.iommu_mapping_cb.clone() {
|
|
|
|
(
|
|
|
|
(iommu_mapping_cb)(desc.addr).unwrap(),
|
|
|
|
Some(iommu_mapping_cb),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
(desc.addr, None)
|
|
|
|
};
|
|
|
|
|
|
|
|
let chain = DescriptorChain {
|
|
|
|
mem: self.mem,
|
|
|
|
desc_table: self.addr,
|
|
|
|
table_size: (self.len / 16).try_into().unwrap(),
|
|
|
|
ttl: (self.len / 16).try_into().unwrap(),
|
|
|
|
index: 0,
|
|
|
|
addr: GuestAddress(desc_addr),
|
|
|
|
len: desc.len,
|
|
|
|
flags: desc.flags,
|
|
|
|
next: desc.next,
|
|
|
|
iommu_mapping_cb,
|
|
|
|
};
|
|
|
|
|
|
|
|
if !chain.is_valid() {
|
|
|
|
return Err(Error::InvalidChain);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(chain)
|
|
|
|
}
|
|
|
|
|
2020-03-05 13:30:36 +00:00
|
|
|
/// Returns a copy of a descriptor referencing a different GuestMemoryMmap object.
|
|
|
|
pub fn new_from_head(
|
|
|
|
mem: &'a GuestMemoryMmap,
|
|
|
|
head: DescriptorHead,
|
|
|
|
) -> Result<DescriptorChain<'a>, Error> {
|
|
|
|
match DescriptorChain::checked_new(
|
|
|
|
mem,
|
|
|
|
head.desc_table,
|
|
|
|
head.table_size,
|
|
|
|
head.index,
|
|
|
|
head.iommu_mapping_cb,
|
|
|
|
) {
|
|
|
|
Some(d) => Ok(d),
|
|
|
|
None => Err(Error::InvalidChain),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns a DescriptorHead that can be used to build a copy of a descriptor
|
|
|
|
/// referencing a different GuestMemoryMmap.
|
|
|
|
pub fn get_head(&self) -> DescriptorHead {
|
|
|
|
DescriptorHead {
|
|
|
|
desc_table: self.desc_table,
|
|
|
|
table_size: self.table_size,
|
|
|
|
index: self.index,
|
|
|
|
iommu_mapping_cb: self.iommu_mapping_cb.clone(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-18 17:24:06 +00:00
|
|
|
fn is_valid(&self) -> bool {
|
|
|
|
!(self
|
|
|
|
.mem
|
|
|
|
.checked_offset(self.addr, self.len as usize)
|
|
|
|
.is_none()
|
2020-02-21 10:34:51 +00:00
|
|
|
|| (self.has_next() && self.next >= self.table_size))
|
2019-04-18 17:24:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets if this descriptor chain has another descriptor chain linked after it.
|
|
|
|
pub fn has_next(&self) -> bool {
|
|
|
|
self.flags & VIRTQ_DESC_F_NEXT != 0 && self.ttl > 1
|
|
|
|
}
|
|
|
|
|
|
|
|
/// If the driver designated this as a write only descriptor.
|
|
|
|
///
|
|
|
|
/// If this is false, this descriptor is read only.
|
|
|
|
/// Write only means the the emulated device can write and the driver can read.
|
|
|
|
pub fn is_write_only(&self) -> bool {
|
|
|
|
self.flags & VIRTQ_DESC_F_WRITE != 0
|
|
|
|
}
|
|
|
|
|
2020-02-21 10:34:51 +00:00
|
|
|
pub fn is_indirect(&self) -> bool {
|
|
|
|
self.flags & VIRTQ_DESC_F_INDIRECT != 0
|
|
|
|
}
|
|
|
|
|
2019-04-18 17:24:06 +00:00
|
|
|
/// Gets the next descriptor in this descriptor chain, if there is one.
|
|
|
|
///
|
|
|
|
/// Note that this is distinct from the next descriptor chain returned by `AvailIter`, which is
|
|
|
|
/// the head of the next _available_ descriptor chain.
|
|
|
|
pub fn next_descriptor(&self) -> Option<DescriptorChain<'a>> {
|
|
|
|
if self.has_next() {
|
2019-10-02 07:10:42 +00:00
|
|
|
DescriptorChain::checked_new(
|
|
|
|
self.mem,
|
|
|
|
self.desc_table,
|
2020-02-21 10:34:51 +00:00
|
|
|
self.table_size,
|
2019-10-02 07:10:42 +00:00
|
|
|
self.next,
|
|
|
|
self.iommu_mapping_cb.clone(),
|
2019-04-18 17:24:06 +00:00
|
|
|
)
|
2019-10-02 07:10:42 +00:00
|
|
|
.map(|mut c| {
|
|
|
|
c.ttl = self.ttl - 1;
|
|
|
|
c
|
|
|
|
})
|
2019-04-18 17:24:06 +00:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-01 17:40:35 +00:00
|
|
|
impl<'a> IntoIterator for DescriptorChain<'a> {
|
|
|
|
type Item = DescriptorChain<'a>;
|
|
|
|
type IntoIter = DescIter<'a>;
|
|
|
|
|
|
|
|
fn into_iter(self) -> Self::IntoIter {
|
|
|
|
DescIter { next: Some(self) }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-18 17:24:06 +00:00
|
|
|
/// Consuming iterator over all available descriptor chain heads in the queue.
|
|
|
|
pub struct AvailIter<'a, 'b> {
|
|
|
|
mem: &'a GuestMemoryMmap,
|
|
|
|
desc_table: GuestAddress,
|
|
|
|
avail_ring: GuestAddress,
|
|
|
|
next_index: Wrapping<u16>,
|
|
|
|
last_index: Wrapping<u16>,
|
|
|
|
queue_size: u16,
|
|
|
|
next_avail: &'b mut Wrapping<u16>,
|
2019-10-02 07:10:42 +00:00
|
|
|
iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
|
2019-04-18 17:24:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, 'b> AvailIter<'a, 'b> {
|
|
|
|
pub fn new(mem: &'a GuestMemoryMmap, q_next_avail: &'b mut Wrapping<u16>) -> AvailIter<'a, 'b> {
|
|
|
|
AvailIter {
|
|
|
|
mem,
|
|
|
|
desc_table: GuestAddress(0),
|
|
|
|
avail_ring: GuestAddress(0),
|
|
|
|
next_index: Wrapping(0),
|
|
|
|
last_index: Wrapping(0),
|
|
|
|
queue_size: 0,
|
|
|
|
next_avail: q_next_avail,
|
2019-10-02 07:10:42 +00:00
|
|
|
iommu_mapping_cb: None,
|
2019-04-18 17:24:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, 'b> Iterator for AvailIter<'a, 'b> {
|
|
|
|
type Item = DescriptorChain<'a>;
|
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
if self.next_index == self.last_index {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
let offset = (4 + (self.next_index.0 % self.queue_size) * 2) as usize;
|
|
|
|
let avail_addr = match self.mem.checked_offset(self.avail_ring, offset) {
|
|
|
|
Some(a) => a,
|
|
|
|
None => return None,
|
|
|
|
};
|
|
|
|
// This index is checked below in checked_new
|
|
|
|
let desc_index: u16 = match self.mem.read_obj(avail_addr) {
|
|
|
|
Ok(ret) => ret,
|
|
|
|
Err(_) => {
|
|
|
|
// TODO log address
|
|
|
|
error!("Failed to read from memory");
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
self.next_index += Wrapping(1);
|
|
|
|
|
2019-10-02 07:10:42 +00:00
|
|
|
let ret = DescriptorChain::checked_new(
|
|
|
|
self.mem,
|
|
|
|
self.desc_table,
|
|
|
|
self.queue_size,
|
|
|
|
desc_index,
|
|
|
|
self.iommu_mapping_cb.clone(),
|
|
|
|
);
|
2019-04-18 17:24:06 +00:00
|
|
|
if ret.is_some() {
|
|
|
|
*self.next_avail += Wrapping(1);
|
|
|
|
}
|
|
|
|
ret
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-08 01:53:12 +00:00
|
|
|
#[derive(Serialize, Deserialize)]
|
|
|
|
#[serde(remote = "GuestAddress")]
|
|
|
|
struct GuestAddressDef(pub u64);
|
|
|
|
|
|
|
|
#[derive(Clone, Serialize, Deserialize)]
|
2019-04-18 17:24:06 +00:00
|
|
|
/// A virtio queue's parameters.
|
|
|
|
pub struct Queue {
|
|
|
|
/// The maximal size in elements offered by the device
|
2020-04-08 01:53:12 +00:00
|
|
|
pub max_size: u16,
|
2019-04-18 17:24:06 +00:00
|
|
|
|
|
|
|
/// The queue size in elements the driver selected
|
|
|
|
pub size: u16,
|
|
|
|
|
|
|
|
/// Inidcates if the queue is finished with configuration
|
|
|
|
pub ready: bool,
|
|
|
|
|
2019-05-29 23:33:29 +00:00
|
|
|
/// Interrupt vector index of the queue
|
|
|
|
pub vector: u16,
|
|
|
|
|
2020-04-08 01:53:12 +00:00
|
|
|
#[serde(with = "GuestAddressDef")]
|
2019-04-18 17:24:06 +00:00
|
|
|
/// Guest physical address of the descriptor table
|
|
|
|
pub desc_table: GuestAddress,
|
|
|
|
|
2020-04-08 01:53:12 +00:00
|
|
|
#[serde(with = "GuestAddressDef")]
|
2019-04-18 17:24:06 +00:00
|
|
|
/// Guest physical address of the available ring
|
|
|
|
pub avail_ring: GuestAddress,
|
|
|
|
|
2020-04-08 01:53:12 +00:00
|
|
|
#[serde(with = "GuestAddressDef")]
|
2019-04-18 17:24:06 +00:00
|
|
|
/// Guest physical address of the used ring
|
|
|
|
pub used_ring: GuestAddress,
|
|
|
|
|
2019-09-11 19:22:44 +00:00
|
|
|
pub next_avail: Wrapping<u16>,
|
|
|
|
pub next_used: Wrapping<u16>,
|
2019-10-02 07:10:42 +00:00
|
|
|
|
2020-04-08 01:53:12 +00:00
|
|
|
#[serde(skip)]
|
2019-10-02 07:10:42 +00:00
|
|
|
pub iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
|
2019-04-18 17:24:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Queue {
|
|
|
|
/// Constructs an empty virtio queue with the given `max_size`.
|
|
|
|
pub fn new(max_size: u16) -> Queue {
|
|
|
|
Queue {
|
|
|
|
max_size,
|
|
|
|
size: max_size,
|
|
|
|
ready: false,
|
2019-05-29 23:33:29 +00:00
|
|
|
vector: 0,
|
2019-04-18 17:24:06 +00:00
|
|
|
desc_table: GuestAddress(0),
|
|
|
|
avail_ring: GuestAddress(0),
|
|
|
|
used_ring: GuestAddress(0),
|
|
|
|
next_avail: Wrapping(0),
|
|
|
|
next_used: Wrapping(0),
|
2019-10-02 07:10:42 +00:00
|
|
|
iommu_mapping_cb: None,
|
2019-04-18 17:24:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_max_size(&self) -> u16 {
|
|
|
|
self.max_size
|
|
|
|
}
|
|
|
|
|
2019-10-02 07:10:42 +00:00
|
|
|
pub fn enable(&mut self, set: bool) {
|
|
|
|
self.ready = set;
|
|
|
|
|
|
|
|
if set {
|
|
|
|
// Translate address of descriptor table and vrings.
|
|
|
|
if let Some(iommu_mapping_cb) = &self.iommu_mapping_cb {
|
|
|
|
self.desc_table =
|
|
|
|
GuestAddress((iommu_mapping_cb)(self.desc_table.raw_value()).unwrap());
|
|
|
|
self.avail_ring =
|
|
|
|
GuestAddress((iommu_mapping_cb)(self.avail_ring.raw_value()).unwrap());
|
|
|
|
self.used_ring =
|
|
|
|
GuestAddress((iommu_mapping_cb)(self.used_ring.raw_value()).unwrap());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
self.desc_table = GuestAddress(0);
|
|
|
|
self.avail_ring = GuestAddress(0);
|
|
|
|
self.used_ring = GuestAddress(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-18 17:24:06 +00:00
|
|
|
/// Return the actual size of the queue, as the driver may not set up a
|
|
|
|
/// queue as big as the device allows.
|
|
|
|
pub fn actual_size(&self) -> u16 {
|
|
|
|
min(self.size, self.max_size)
|
|
|
|
}
|
|
|
|
|
2019-05-08 14:59:39 +00:00
|
|
|
/// Reset the queue to a state that is acceptable for a device reset
|
|
|
|
pub fn reset(&mut self) {
|
|
|
|
self.ready = false;
|
|
|
|
self.size = self.max_size;
|
|
|
|
}
|
|
|
|
|
2019-04-18 17:24:06 +00:00
|
|
|
pub fn is_valid(&self, mem: &GuestMemoryMmap) -> bool {
|
|
|
|
let queue_size = self.actual_size() as usize;
|
|
|
|
let desc_table = self.desc_table;
|
|
|
|
let desc_table_size = 16 * queue_size;
|
|
|
|
let avail_ring = self.avail_ring;
|
|
|
|
let avail_ring_size = 6 + 2 * queue_size;
|
|
|
|
let used_ring = self.used_ring;
|
|
|
|
let used_ring_size = 6 + 8 * queue_size;
|
|
|
|
if !self.ready {
|
|
|
|
error!("attempt to use virtio queue that is not marked ready");
|
|
|
|
false
|
|
|
|
} else if self.size > self.max_size || self.size == 0 || (self.size & (self.size - 1)) != 0
|
|
|
|
{
|
|
|
|
error!("virtio queue with invalid size: {}", self.size);
|
|
|
|
false
|
|
|
|
} else if desc_table
|
|
|
|
.checked_add(desc_table_size as GuestUsize)
|
|
|
|
.map_or(true, |v| !mem.address_in_range(v))
|
|
|
|
{
|
|
|
|
error!(
|
|
|
|
"virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
|
|
|
|
desc_table.raw_value(),
|
|
|
|
desc_table_size
|
|
|
|
);
|
|
|
|
false
|
|
|
|
} else if avail_ring
|
|
|
|
.checked_add(avail_ring_size as GuestUsize)
|
|
|
|
.map_or(true, |v| !mem.address_in_range(v))
|
|
|
|
{
|
|
|
|
error!(
|
|
|
|
"virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
|
|
|
|
avail_ring.raw_value(),
|
|
|
|
avail_ring_size
|
|
|
|
);
|
|
|
|
false
|
|
|
|
} else if used_ring
|
|
|
|
.checked_add(used_ring_size as GuestUsize)
|
|
|
|
.map_or(true, |v| !mem.address_in_range(v))
|
|
|
|
{
|
|
|
|
error!(
|
|
|
|
"virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
|
|
|
|
used_ring.raw_value(),
|
|
|
|
used_ring_size
|
|
|
|
);
|
|
|
|
false
|
|
|
|
} else if desc_table.mask(0xf) != 0 {
|
|
|
|
error!("virtio queue descriptor table breaks alignment contraints");
|
|
|
|
false
|
|
|
|
} else if avail_ring.mask(0x1) != 0 {
|
|
|
|
error!("virtio queue available ring breaks alignment contraints");
|
|
|
|
false
|
|
|
|
} else if used_ring.mask(0x3) != 0 {
|
|
|
|
error!("virtio queue used ring breaks alignment contraints");
|
|
|
|
false
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A consuming iterator over all available descriptor chain heads offered by the driver.
|
|
|
|
pub fn iter<'a, 'b>(&'b mut self, mem: &'a GuestMemoryMmap) -> AvailIter<'a, 'b> {
|
|
|
|
let queue_size = self.actual_size();
|
|
|
|
let avail_ring = self.avail_ring;
|
|
|
|
|
|
|
|
let index_addr = match mem.checked_offset(avail_ring, 2) {
|
|
|
|
Some(ret) => ret,
|
|
|
|
None => {
|
|
|
|
// TODO log address
|
|
|
|
warn!("Invalid offset");
|
|
|
|
return AvailIter::new(mem, &mut self.next_avail);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
// Note that last_index has no invalid values
|
|
|
|
let last_index: u16 = match mem.read_obj::<u16>(index_addr) {
|
|
|
|
Ok(ret) => ret,
|
|
|
|
Err(_) => return AvailIter::new(mem, &mut self.next_avail),
|
|
|
|
};
|
|
|
|
|
|
|
|
AvailIter {
|
|
|
|
mem,
|
|
|
|
desc_table: self.desc_table,
|
|
|
|
avail_ring,
|
|
|
|
next_index: self.next_avail,
|
|
|
|
last_index: Wrapping(last_index),
|
|
|
|
queue_size,
|
|
|
|
next_avail: &mut self.next_avail,
|
2019-10-02 07:10:42 +00:00
|
|
|
iommu_mapping_cb: self.iommu_mapping_cb.clone(),
|
2019-04-18 17:24:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-14 11:14:27 +00:00
|
|
|
/// Update avail_event on the used ring with the last index in the avail ring.
|
|
|
|
pub fn update_avail_event(&mut self, mem: &GuestMemoryMmap) {
|
|
|
|
let index_addr = match mem.checked_offset(self.avail_ring, 2) {
|
|
|
|
Some(ret) => ret,
|
|
|
|
None => {
|
|
|
|
// TODO log address
|
|
|
|
warn!("Invalid offset");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
// Note that last_index has no invalid values
|
|
|
|
let last_index: u16 = match mem.read_obj::<u16>(index_addr) {
|
|
|
|
Ok(ret) => ret,
|
|
|
|
Err(_) => return,
|
|
|
|
};
|
|
|
|
|
|
|
|
match mem.checked_offset(self.used_ring, (4 + self.actual_size() * 8) as usize) {
|
|
|
|
Some(a) => {
|
|
|
|
mem.write_obj(last_index, a).unwrap();
|
|
|
|
}
|
|
|
|
None => warn!("Can't update avail_event"),
|
|
|
|
}
|
|
|
|
|
2020-03-10 09:34:40 +00:00
|
|
|
// This fence ensures the guest sees the value we've just written.
|
2020-02-14 11:14:27 +00:00
|
|
|
fence(Ordering::Release);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the value present in the used_event field of the avail ring.
|
2020-03-10 09:37:23 +00:00
|
|
|
#[inline(always)]
|
2020-02-14 11:14:27 +00:00
|
|
|
pub fn get_used_event(&self, mem: &GuestMemoryMmap) -> Option<Wrapping<u16>> {
|
|
|
|
let avail_ring = self.avail_ring;
|
|
|
|
let used_event_addr =
|
|
|
|
match mem.checked_offset(avail_ring, (4 + self.actual_size() * 2) as usize) {
|
|
|
|
Some(a) => a,
|
|
|
|
None => {
|
|
|
|
warn!("Invalid offset looking for used_event");
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-03-10 09:20:57 +00:00
|
|
|
// This fence ensures we're seeing the latest update from the guest.
|
2020-03-18 12:50:14 +00:00
|
|
|
fence(Ordering::SeqCst);
|
2020-02-14 11:14:27 +00:00
|
|
|
match mem.read_obj::<u16>(used_event_addr) {
|
|
|
|
Ok(ret) => Some(Wrapping(ret)),
|
|
|
|
Err(_) => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-18 17:24:06 +00:00
|
|
|
/// Puts an available descriptor head into the used ring for use by the guest.
|
2020-02-14 11:14:27 +00:00
|
|
|
pub fn add_used(&mut self, mem: &GuestMemoryMmap, desc_index: u16, len: u32) -> Option<u16> {
|
2019-04-18 17:24:06 +00:00
|
|
|
if desc_index >= self.actual_size() {
|
|
|
|
error!(
|
|
|
|
"attempted to add out of bounds descriptor to used ring: {}",
|
|
|
|
desc_index
|
|
|
|
);
|
2020-02-14 11:14:27 +00:00
|
|
|
return None;
|
2019-04-18 17:24:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let used_ring = self.used_ring;
|
|
|
|
let next_used = u64::from(self.next_used.0 % self.actual_size());
|
|
|
|
let used_elem = used_ring.unchecked_add(4 + next_used * 8);
|
|
|
|
|
|
|
|
// These writes can't fail as we are guaranteed to be within the descriptor ring.
|
|
|
|
mem.write_obj(u32::from(desc_index), used_elem).unwrap();
|
|
|
|
mem.write_obj(len as u32, used_elem.unchecked_add(4))
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
self.next_used += Wrapping(1);
|
|
|
|
|
|
|
|
// This fence ensures all descriptor writes are visible before the index update is.
|
|
|
|
fence(Ordering::Release);
|
|
|
|
|
|
|
|
mem.write_obj(self.next_used.0 as u16, used_ring.unchecked_add(2))
|
|
|
|
.unwrap();
|
2020-02-14 11:14:27 +00:00
|
|
|
|
|
|
|
Some(self.next_used.0)
|
2019-04-18 17:24:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Goes back one position in the available descriptor chain offered by the driver.
|
|
|
|
/// Rust does not support bidirectional iterators. This is the only way to revert the effect
|
|
|
|
/// of an iterator increment on the queue.
|
|
|
|
pub fn go_to_previous_position(&mut self) {
|
|
|
|
self.next_avail -= Wrapping(1);
|
|
|
|
}
|
2020-04-16 12:11:37 +00:00
|
|
|
|
2020-04-21 15:32:26 +00:00
|
|
|
/// Get ring's index from memory.
|
|
|
|
fn index_from_memory(&self, ring: GuestAddress, mem: &GuestMemoryMmap) -> Result<u16, Error> {
|
|
|
|
mem.read_obj::<u16>(
|
|
|
|
mem.checked_offset(ring, 2)
|
|
|
|
.ok_or_else(|| Error::InvalidOffset(ring.raw_value() + 2))?,
|
|
|
|
)
|
|
|
|
.map_err(Error::InvalidRingIndexFromMemory)
|
|
|
|
}
|
2020-04-16 12:11:37 +00:00
|
|
|
|
2020-04-21 15:32:26 +00:00
|
|
|
/// Get latest index from available ring.
|
|
|
|
pub fn avail_index_from_memory(&self, mem: &GuestMemoryMmap) -> Result<u16, Error> {
|
|
|
|
self.index_from_memory(self.avail_ring, mem)
|
2020-04-16 12:11:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get latest index from used ring.
|
2020-04-21 15:32:26 +00:00
|
|
|
pub fn used_index_from_memory(&self, mem: &GuestMemoryMmap) -> Result<u16, Error> {
|
|
|
|
self.index_from_memory(self.used_ring, mem)
|
|
|
|
}
|
2020-04-16 12:11:37 +00:00
|
|
|
|
2020-04-21 15:32:26 +00:00
|
|
|
pub fn available_descriptors(&self, mem: &GuestMemoryMmap) -> Result<bool, Error> {
|
|
|
|
Ok(self.used_index_from_memory(mem)? < self.avail_index_from_memory(mem)?)
|
2020-04-16 12:11:37 +00:00
|
|
|
}
|
2019-04-18 17:24:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
pub(crate) mod tests {
|
|
|
|
extern crate vm_memory;
|
|
|
|
|
|
|
|
use std::marker::PhantomData;
|
|
|
|
use std::mem;
|
|
|
|
|
|
|
|
pub use super::*;
|
|
|
|
use vm_memory::{GuestAddress, GuestMemoryMmap, GuestUsize};
|
|
|
|
|
|
|
|
// Represents a location in GuestMemoryMmap which holds a given type.
|
|
|
|
pub struct SomeplaceInMemory<'a, T> {
|
|
|
|
pub location: GuestAddress,
|
|
|
|
mem: &'a GuestMemoryMmap,
|
|
|
|
phantom: PhantomData<*const T>,
|
|
|
|
}
|
|
|
|
|
|
|
|
// The ByteValued trait is required to use mem.read_obj and write_obj.
|
|
|
|
impl<'a, T> SomeplaceInMemory<'a, T>
|
|
|
|
where
|
|
|
|
T: vm_memory::ByteValued,
|
|
|
|
{
|
|
|
|
fn new(location: GuestAddress, mem: &'a GuestMemoryMmap) -> Self {
|
|
|
|
SomeplaceInMemory {
|
|
|
|
location,
|
|
|
|
mem,
|
|
|
|
phantom: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reads from the actual memory location.
|
|
|
|
pub fn get(&self) -> T {
|
|
|
|
self.mem.read_obj(self.location).unwrap()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Writes to the actual memory location.
|
|
|
|
pub fn set(&self, val: T) {
|
|
|
|
self.mem.write_obj(val, self.location).unwrap()
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function returns a place in memory which holds a value of type U, and starts
|
|
|
|
// offset bytes after the current location.
|
|
|
|
fn map_offset<U>(&self, offset: GuestUsize) -> SomeplaceInMemory<'a, U> {
|
|
|
|
SomeplaceInMemory {
|
|
|
|
location: self.location.checked_add(offset).unwrap(),
|
|
|
|
mem: self.mem,
|
|
|
|
phantom: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function returns a place in memory which holds a value of type U, and starts
|
|
|
|
// immediately after the end of self (which is location + sizeof(T)).
|
|
|
|
fn next_place<U>(&self) -> SomeplaceInMemory<'a, U> {
|
|
|
|
self.map_offset::<U>(mem::size_of::<T>() as u64)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn end(&self) -> GuestAddress {
|
|
|
|
self.location
|
|
|
|
.checked_add(mem::size_of::<T>() as u64)
|
|
|
|
.unwrap()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Represents a virtio descriptor in guest memory.
|
|
|
|
pub struct VirtqDesc<'a> {
|
|
|
|
pub addr: SomeplaceInMemory<'a, u64>,
|
|
|
|
pub len: SomeplaceInMemory<'a, u32>,
|
|
|
|
pub flags: SomeplaceInMemory<'a, u16>,
|
|
|
|
pub next: SomeplaceInMemory<'a, u16>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> VirtqDesc<'a> {
|
|
|
|
fn new(start: GuestAddress, mem: &'a GuestMemoryMmap) -> Self {
|
|
|
|
assert_eq!(start.0 & 0xf, 0);
|
|
|
|
|
|
|
|
let addr = SomeplaceInMemory::new(start, mem);
|
|
|
|
let len = addr.next_place();
|
|
|
|
let flags = len.next_place();
|
|
|
|
let next = flags.next_place();
|
|
|
|
|
|
|
|
VirtqDesc {
|
|
|
|
addr,
|
|
|
|
len,
|
|
|
|
flags,
|
|
|
|
next,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn start(&self) -> GuestAddress {
|
|
|
|
self.addr.location
|
|
|
|
}
|
|
|
|
|
|
|
|
fn end(&self) -> GuestAddress {
|
|
|
|
self.next.end()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn set(&self, addr: u64, len: u32, flags: u16, next: u16) {
|
|
|
|
self.addr.set(addr);
|
|
|
|
self.len.set(len);
|
|
|
|
self.flags.set(flags);
|
|
|
|
self.next.set(next);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Represents a virtio queue ring. The only difference between the used and available rings,
|
|
|
|
// is the ring element type.
|
|
|
|
pub struct VirtqRing<'a, T> {
|
|
|
|
pub flags: SomeplaceInMemory<'a, u16>,
|
|
|
|
pub idx: SomeplaceInMemory<'a, u16>,
|
|
|
|
pub ring: Vec<SomeplaceInMemory<'a, T>>,
|
|
|
|
pub event: SomeplaceInMemory<'a, u16>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, T> VirtqRing<'a, T>
|
|
|
|
where
|
|
|
|
T: vm_memory::ByteValued,
|
|
|
|
{
|
|
|
|
fn new(
|
|
|
|
start: GuestAddress,
|
|
|
|
mem: &'a GuestMemoryMmap,
|
|
|
|
qsize: u16,
|
|
|
|
alignment: GuestUsize,
|
|
|
|
) -> Self {
|
|
|
|
assert_eq!(start.0 & (alignment - 1), 0);
|
|
|
|
|
|
|
|
let flags = SomeplaceInMemory::new(start, mem);
|
|
|
|
let idx = flags.next_place();
|
|
|
|
|
|
|
|
let mut ring = Vec::with_capacity(qsize as usize);
|
|
|
|
|
|
|
|
ring.push(idx.next_place());
|
|
|
|
|
|
|
|
for _ in 1..qsize as usize {
|
|
|
|
let x = ring.last().unwrap().next_place();
|
|
|
|
ring.push(x)
|
|
|
|
}
|
|
|
|
|
|
|
|
let event = ring.last().unwrap().next_place();
|
|
|
|
|
|
|
|
flags.set(0);
|
|
|
|
idx.set(0);
|
|
|
|
event.set(0);
|
|
|
|
|
|
|
|
VirtqRing {
|
|
|
|
flags,
|
|
|
|
idx,
|
|
|
|
ring,
|
|
|
|
event,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn end(&self) -> GuestAddress {
|
|
|
|
self.event.end()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[repr(C)]
|
|
|
|
#[derive(Clone, Copy, Default)]
|
|
|
|
pub struct VirtqUsedElem {
|
|
|
|
pub id: u32,
|
|
|
|
pub len: u32,
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe impl vm_memory::ByteValued for VirtqUsedElem {}
|
|
|
|
|
|
|
|
pub type VirtqAvail<'a> = VirtqRing<'a, u16>;
|
|
|
|
pub type VirtqUsed<'a> = VirtqRing<'a, VirtqUsedElem>;
|
|
|
|
|
|
|
|
pub struct VirtQueue<'a> {
|
|
|
|
pub dtable: Vec<VirtqDesc<'a>>,
|
|
|
|
pub avail: VirtqAvail<'a>,
|
|
|
|
pub used: VirtqUsed<'a>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> VirtQueue<'a> {
|
|
|
|
// We try to make sure things are aligned properly :-s
|
|
|
|
pub fn new(start: GuestAddress, mem: &'a GuestMemoryMmap, qsize: u16) -> Self {
|
|
|
|
// power of 2?
|
|
|
|
assert!(qsize > 0 && qsize & (qsize - 1) == 0);
|
|
|
|
|
|
|
|
let mut dtable = Vec::with_capacity(qsize as usize);
|
|
|
|
|
|
|
|
let mut end = start;
|
|
|
|
|
|
|
|
for _ in 0..qsize {
|
|
|
|
let d = VirtqDesc::new(end, mem);
|
|
|
|
end = d.end();
|
|
|
|
dtable.push(d);
|
|
|
|
}
|
|
|
|
|
|
|
|
const AVAIL_ALIGN: u64 = 2;
|
|
|
|
|
|
|
|
let avail = VirtqAvail::new(end, mem, qsize, AVAIL_ALIGN);
|
|
|
|
|
|
|
|
const USED_ALIGN: u64 = 4;
|
|
|
|
|
|
|
|
let mut x = avail.end().0;
|
|
|
|
x = (x + USED_ALIGN - 1) & !(USED_ALIGN - 1);
|
|
|
|
|
|
|
|
let used = VirtqUsed::new(GuestAddress(x), mem, qsize, USED_ALIGN);
|
|
|
|
|
|
|
|
VirtQueue {
|
|
|
|
dtable,
|
|
|
|
avail,
|
|
|
|
used,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn size(&self) -> u16 {
|
|
|
|
self.dtable.len() as u16
|
|
|
|
}
|
|
|
|
|
|
|
|
fn dtable_start(&self) -> GuestAddress {
|
|
|
|
self.dtable.first().unwrap().start()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn avail_start(&self) -> GuestAddress {
|
|
|
|
self.avail.flags.location
|
|
|
|
}
|
|
|
|
|
|
|
|
fn used_start(&self) -> GuestAddress {
|
|
|
|
self.used.flags.location
|
|
|
|
}
|
|
|
|
|
|
|
|
// Creates a new Queue, using the underlying memory regions represented by the VirtQueue.
|
|
|
|
pub fn create_queue(&self) -> Queue {
|
|
|
|
let mut q = Queue::new(self.size());
|
|
|
|
|
|
|
|
q.size = self.size();
|
|
|
|
q.ready = true;
|
|
|
|
q.desc_table = self.dtable_start();
|
|
|
|
q.avail_ring = self.avail_start();
|
|
|
|
q.used_ring = self.used_start();
|
|
|
|
|
|
|
|
q
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn start(&self) -> GuestAddress {
|
|
|
|
self.dtable_start()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn end(&self) -> GuestAddress {
|
|
|
|
self.used.end()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_checked_new_descriptor_chain() {
|
2020-02-06 08:00:41 +00:00
|
|
|
let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
|
2019-04-18 17:24:06 +00:00
|
|
|
let vq = VirtQueue::new(GuestAddress(0), m, 16);
|
|
|
|
|
|
|
|
assert!(vq.end().0 < 0x1000);
|
|
|
|
|
|
|
|
// index >= queue_size
|
2019-10-02 07:10:42 +00:00
|
|
|
assert!(DescriptorChain::checked_new(m, vq.start(), 16, 16, None).is_none());
|
2019-04-18 17:24:06 +00:00
|
|
|
|
|
|
|
// desc_table address is way off
|
2019-10-02 07:10:42 +00:00
|
|
|
assert!(
|
|
|
|
DescriptorChain::checked_new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0, None).is_none()
|
|
|
|
);
|
2019-04-18 17:24:06 +00:00
|
|
|
|
|
|
|
// the addr field of the descriptor is way off
|
|
|
|
vq.dtable[0].addr.set(0x0fff_ffff_ffff);
|
2019-10-02 07:10:42 +00:00
|
|
|
assert!(DescriptorChain::checked_new(m, vq.start(), 16, 0, None).is_none());
|
2019-04-18 17:24:06 +00:00
|
|
|
|
|
|
|
// let's create some invalid chains
|
|
|
|
|
|
|
|
{
|
|
|
|
// the addr field of the desc is ok now
|
|
|
|
vq.dtable[0].addr.set(0x1000);
|
|
|
|
// ...but the length is too large
|
|
|
|
vq.dtable[0].len.set(0xffff_ffff);
|
2019-10-02 07:10:42 +00:00
|
|
|
assert!(DescriptorChain::checked_new(m, vq.start(), 16, 0, None).is_none());
|
2019-04-18 17:24:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// the first desc has a normal len now, and the next_descriptor flag is set
|
|
|
|
vq.dtable[0].len.set(0x1000);
|
|
|
|
vq.dtable[0].flags.set(VIRTQ_DESC_F_NEXT);
|
|
|
|
//..but the the index of the next descriptor is too large
|
|
|
|
vq.dtable[0].next.set(16);
|
|
|
|
|
2019-10-02 07:10:42 +00:00
|
|
|
assert!(DescriptorChain::checked_new(m, vq.start(), 16, 0, None).is_none());
|
2019-04-18 17:24:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// finally, let's test an ok chain
|
|
|
|
|
|
|
|
{
|
|
|
|
vq.dtable[0].next.set(1);
|
|
|
|
vq.dtable[1].set(0x2000, 0x1000, 0, 0);
|
|
|
|
|
2019-10-02 07:10:42 +00:00
|
|
|
let c = DescriptorChain::checked_new(m, vq.start(), 16, 0, None).unwrap();
|
2019-04-18 17:24:06 +00:00
|
|
|
|
|
|
|
assert_eq!(c.mem as *const GuestMemoryMmap, m as *const GuestMemoryMmap);
|
|
|
|
assert_eq!(c.desc_table, vq.start());
|
2020-02-21 10:34:51 +00:00
|
|
|
assert_eq!(c.table_size, 16);
|
|
|
|
assert_eq!(c.ttl, c.table_size);
|
2019-04-18 17:24:06 +00:00
|
|
|
assert_eq!(c.index, 0);
|
|
|
|
assert_eq!(c.addr, GuestAddress(0x1000));
|
|
|
|
assert_eq!(c.len, 0x1000);
|
|
|
|
assert_eq!(c.flags, VIRTQ_DESC_F_NEXT);
|
|
|
|
assert_eq!(c.next, 1);
|
|
|
|
|
|
|
|
assert!(c.next_descriptor().unwrap().next_descriptor().is_none());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-21 10:34:51 +00:00
|
|
|
#[test]
|
|
|
|
fn test_new_from_descriptor_chain() {
|
|
|
|
let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
|
|
|
|
let vq = VirtQueue::new(GuestAddress(0), m, 16);
|
|
|
|
|
|
|
|
// create a chain with a descriptor pointing to an indirect table
|
|
|
|
vq.dtable[0].addr.set(0x1000);
|
|
|
|
vq.dtable[0].len.set(0x1000);
|
|
|
|
vq.dtable[0].next.set(0);
|
|
|
|
vq.dtable[0].flags.set(VIRTQ_DESC_F_INDIRECT);
|
|
|
|
|
|
|
|
let c = DescriptorChain::checked_new(m, vq.start(), 16, 0, None).unwrap();
|
|
|
|
assert!(c.is_indirect());
|
|
|
|
|
|
|
|
// create an indirect table with 4 chained descriptors
|
|
|
|
let mut indirect_table = Vec::with_capacity(4 as usize);
|
|
|
|
for j in 0..4 {
|
|
|
|
let desc = VirtqDesc::new(GuestAddress(0x1000 + (j * 16)), m);
|
|
|
|
desc.set(0x1000, 0x1000, VIRTQ_DESC_F_NEXT, (j + 1) as u16);
|
|
|
|
indirect_table.push(desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
// try to iterate through the indirect table descriptors
|
|
|
|
let mut i = c.new_from_indirect().unwrap();
|
|
|
|
for j in 0..4 {
|
|
|
|
assert_eq!(i.flags, VIRTQ_DESC_F_NEXT);
|
|
|
|
assert_eq!(i.next, j + 1);
|
|
|
|
i = i.next_descriptor().unwrap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-18 17:24:06 +00:00
|
|
|
#[test]
|
|
|
|
fn test_queue_and_iterator() {
|
2020-02-06 08:00:41 +00:00
|
|
|
let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
|
2019-04-18 17:24:06 +00:00
|
|
|
let vq = VirtQueue::new(GuestAddress(0), m, 16);
|
|
|
|
|
|
|
|
let mut q = vq.create_queue();
|
|
|
|
|
|
|
|
// q is currently valid
|
|
|
|
assert!(q.is_valid(m));
|
|
|
|
|
|
|
|
// shouldn't be valid when not marked as ready
|
|
|
|
q.ready = false;
|
|
|
|
assert!(!q.is_valid(m));
|
|
|
|
q.ready = true;
|
|
|
|
|
|
|
|
// or when size > max_size
|
|
|
|
q.size = q.max_size << 1;
|
|
|
|
assert!(!q.is_valid(m));
|
|
|
|
q.size = q.max_size;
|
|
|
|
|
|
|
|
// or when size is 0
|
|
|
|
q.size = 0;
|
|
|
|
assert!(!q.is_valid(m));
|
|
|
|
q.size = q.max_size;
|
|
|
|
|
|
|
|
// or when size is not a power of 2
|
|
|
|
q.size = 11;
|
|
|
|
assert!(!q.is_valid(m));
|
|
|
|
q.size = q.max_size;
|
|
|
|
|
|
|
|
// or if the various addresses are off
|
|
|
|
|
|
|
|
q.desc_table = GuestAddress(0xffff_ffff);
|
|
|
|
assert!(!q.is_valid(m));
|
|
|
|
q.desc_table = GuestAddress(0x1001);
|
|
|
|
assert!(!q.is_valid(m));
|
|
|
|
q.desc_table = vq.dtable_start();
|
|
|
|
|
|
|
|
q.avail_ring = GuestAddress(0xffff_ffff);
|
|
|
|
assert!(!q.is_valid(m));
|
|
|
|
q.avail_ring = GuestAddress(0x1001);
|
|
|
|
assert!(!q.is_valid(m));
|
|
|
|
q.avail_ring = vq.avail_start();
|
|
|
|
|
|
|
|
q.used_ring = GuestAddress(0xffff_ffff);
|
|
|
|
assert!(!q.is_valid(m));
|
|
|
|
q.used_ring = GuestAddress(0x1001);
|
|
|
|
assert!(!q.is_valid(m));
|
|
|
|
q.used_ring = vq.used_start();
|
|
|
|
|
|
|
|
{
|
|
|
|
// an invalid queue should return an iterator with no next
|
|
|
|
q.ready = false;
|
|
|
|
let mut i = q.iter(m);
|
|
|
|
assert!(i.next().is_none());
|
|
|
|
}
|
|
|
|
|
|
|
|
q.ready = true;
|
|
|
|
|
|
|
|
// now let's create two simple descriptor chains
|
|
|
|
|
|
|
|
{
|
|
|
|
for j in 0..5 {
|
|
|
|
vq.dtable[j].set(
|
|
|
|
0x1000 * (j + 1) as u64,
|
|
|
|
0x1000,
|
|
|
|
VIRTQ_DESC_F_NEXT,
|
|
|
|
(j + 1) as u16,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// the chains are (0, 1) and (2, 3, 4)
|
|
|
|
vq.dtable[1].flags.set(0);
|
|
|
|
vq.dtable[4].flags.set(0);
|
|
|
|
vq.avail.ring[0].set(0);
|
|
|
|
vq.avail.ring[1].set(2);
|
|
|
|
vq.avail.idx.set(2);
|
|
|
|
|
|
|
|
let mut i = q.iter(m);
|
|
|
|
|
|
|
|
{
|
|
|
|
let mut c = i.next().unwrap();
|
|
|
|
c = c.next_descriptor().unwrap();
|
|
|
|
assert!(!c.has_next());
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
let mut c = i.next().unwrap();
|
|
|
|
c = c.next_descriptor().unwrap();
|
|
|
|
c = c.next_descriptor().unwrap();
|
|
|
|
assert!(!c.has_next());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// also test go_to_previous_position() works as expected
|
|
|
|
{
|
|
|
|
assert!(q.iter(m).next().is_none());
|
|
|
|
q.go_to_previous_position();
|
|
|
|
let mut c = q.iter(m).next().unwrap();
|
|
|
|
c = c.next_descriptor().unwrap();
|
|
|
|
c = c.next_descriptor().unwrap();
|
|
|
|
assert!(!c.has_next());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_add_used() {
|
2020-02-06 08:00:41 +00:00
|
|
|
let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
|
2019-04-18 17:24:06 +00:00
|
|
|
let vq = VirtQueue::new(GuestAddress(0), m, 16);
|
|
|
|
|
|
|
|
let mut q = vq.create_queue();
|
|
|
|
assert_eq!(vq.used.idx.get(), 0);
|
|
|
|
|
|
|
|
//index too large
|
|
|
|
q.add_used(m, 16, 0x1000);
|
|
|
|
assert_eq!(vq.used.idx.get(), 0);
|
|
|
|
|
|
|
|
//should be ok
|
|
|
|
q.add_used(m, 1, 0x1000);
|
|
|
|
assert_eq!(vq.used.idx.get(), 1);
|
|
|
|
let x = vq.used.ring[0].get();
|
|
|
|
assert_eq!(x.id, 1);
|
|
|
|
assert_eq!(x.len, 0x1000);
|
|
|
|
}
|
|
|
|
}
|