mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2024-11-05 19:41:27 +00:00
a423bf13ad
The new virtio-queue version introduced some breaking changes which need to be addressed so that Cloud Hypervisor can still work with this version. The most important change is about removing a handle to the guest memory from the Queue, meaning the caller has to provide the guest memory handle for multiple methods from the QueueT trait. One interesting aspect is that QueueT has been widely extended to provide every getter and setter we need to access and update the Queue structure without having direct access to its internal fields. This patch ports all the virtio and vhost-user devices to this new crate definition. It also updates both vhost-user-block and vhost-user-net backends based on the updated vhost-user-backend crate. It also updates the fuzz directory. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
248 lines
7.2 KiB
Rust
248 lines
7.2 KiB
Rust
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
//
|
|
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE-BSD-3-Clause file.
|
|
//
|
|
// Copyright © 2019 Intel Corporation
|
|
//
|
|
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
|
|
|
|
pub mod testing {
|
|
use std::marker::PhantomData;
|
|
use std::mem;
|
|
use virtio_queue::{Queue, QueueT, VirtqUsedElem};
|
|
use vm_memory::{bitmap::AtomicBitmap, Address, Bytes, GuestAddress, GuestUsize};
|
|
|
|
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
|
|
|
|
// Represents a location in GuestMemoryMmap which holds a given type.
|
|
pub struct SomeplaceInMemory<'a, T> {
|
|
pub location: GuestAddress,
|
|
mem: &'a GuestMemoryMmap,
|
|
phantom: PhantomData<*const T>,
|
|
}
|
|
|
|
// The ByteValued trait is required to use mem.read_obj and write_obj.
|
|
impl<'a, T> SomeplaceInMemory<'a, T>
|
|
where
|
|
T: vm_memory::ByteValued,
|
|
{
|
|
fn new(location: GuestAddress, mem: &'a GuestMemoryMmap) -> Self {
|
|
SomeplaceInMemory {
|
|
location,
|
|
mem,
|
|
phantom: PhantomData,
|
|
}
|
|
}
|
|
|
|
// Reads from the actual memory location.
|
|
pub fn get(&self) -> T {
|
|
self.mem.read_obj(self.location).unwrap()
|
|
}
|
|
|
|
// Writes to the actual memory location.
|
|
pub fn set(&self, val: T) {
|
|
self.mem.write_obj(val, self.location).unwrap()
|
|
}
|
|
|
|
// This function returns a place in memory which holds a value of type U, and starts
|
|
// offset bytes after the current location.
|
|
fn map_offset<U>(&self, offset: GuestUsize) -> SomeplaceInMemory<'a, U> {
|
|
SomeplaceInMemory {
|
|
location: self.location.checked_add(offset).unwrap(),
|
|
mem: self.mem,
|
|
phantom: PhantomData,
|
|
}
|
|
}
|
|
|
|
// This function returns a place in memory which holds a value of type U, and starts
|
|
// immediately after the end of self (which is location + sizeof(T)).
|
|
fn next_place<U>(&self) -> SomeplaceInMemory<'a, U> {
|
|
self.map_offset::<U>(mem::size_of::<T>() as u64)
|
|
}
|
|
|
|
fn end(&self) -> GuestAddress {
|
|
self.location
|
|
.checked_add(mem::size_of::<T>() as u64)
|
|
.unwrap()
|
|
}
|
|
}
|
|
|
|
// Represents a virtio descriptor in guest memory.
|
|
pub struct VirtqDesc<'a> {
|
|
pub addr: SomeplaceInMemory<'a, u64>,
|
|
pub len: SomeplaceInMemory<'a, u32>,
|
|
pub flags: SomeplaceInMemory<'a, u16>,
|
|
pub next: SomeplaceInMemory<'a, u16>,
|
|
}
|
|
|
|
impl<'a> VirtqDesc<'a> {
|
|
pub fn new(start: GuestAddress, mem: &'a GuestMemoryMmap) -> Self {
|
|
assert_eq!(start.0 & 0xf, 0);
|
|
|
|
let addr = SomeplaceInMemory::new(start, mem);
|
|
let len = addr.next_place();
|
|
let flags = len.next_place();
|
|
let next = flags.next_place();
|
|
|
|
VirtqDesc {
|
|
addr,
|
|
len,
|
|
flags,
|
|
next,
|
|
}
|
|
}
|
|
|
|
fn start(&self) -> GuestAddress {
|
|
self.addr.location
|
|
}
|
|
|
|
fn end(&self) -> GuestAddress {
|
|
self.next.end()
|
|
}
|
|
|
|
pub fn set(&self, addr: u64, len: u32, flags: u16, next: u16) {
|
|
self.addr.set(addr);
|
|
self.len.set(len);
|
|
self.flags.set(flags);
|
|
self.next.set(next);
|
|
}
|
|
}
|
|
|
|
// Represents a virtio queue ring. The only difference between the used and available rings,
|
|
// is the ring element type.
|
|
pub struct VirtqRing<'a, T> {
|
|
pub flags: SomeplaceInMemory<'a, u16>,
|
|
pub idx: SomeplaceInMemory<'a, u16>,
|
|
pub ring: Vec<SomeplaceInMemory<'a, T>>,
|
|
pub event: SomeplaceInMemory<'a, u16>,
|
|
}
|
|
|
|
impl<'a, T> VirtqRing<'a, T>
|
|
where
|
|
T: vm_memory::ByteValued,
|
|
{
|
|
fn new(
|
|
start: GuestAddress,
|
|
mem: &'a GuestMemoryMmap,
|
|
qsize: u16,
|
|
alignment: GuestUsize,
|
|
) -> Self {
|
|
assert_eq!(start.0 & (alignment - 1), 0);
|
|
|
|
let flags = SomeplaceInMemory::new(start, mem);
|
|
let idx = flags.next_place();
|
|
|
|
let mut ring = Vec::with_capacity(qsize as usize);
|
|
|
|
ring.push(idx.next_place());
|
|
|
|
for _ in 1..qsize as usize {
|
|
let x = ring.last().unwrap().next_place();
|
|
ring.push(x)
|
|
}
|
|
|
|
let event = ring.last().unwrap().next_place();
|
|
|
|
flags.set(0);
|
|
idx.set(0);
|
|
event.set(0);
|
|
|
|
VirtqRing {
|
|
flags,
|
|
idx,
|
|
ring,
|
|
event,
|
|
}
|
|
}
|
|
|
|
pub fn end(&self) -> GuestAddress {
|
|
self.event.end()
|
|
}
|
|
}
|
|
|
|
pub type VirtqAvail<'a> = VirtqRing<'a, u16>;
|
|
pub type VirtqUsed<'a> = VirtqRing<'a, VirtqUsedElem>;
|
|
|
|
pub struct VirtQueue<'a> {
|
|
pub dtable: Vec<VirtqDesc<'a>>,
|
|
pub avail: VirtqAvail<'a>,
|
|
pub used: VirtqUsed<'a>,
|
|
pub mem: &'a GuestMemoryMmap,
|
|
}
|
|
|
|
impl<'a> VirtQueue<'a> {
|
|
// We try to make sure things are aligned properly :-s
|
|
pub fn new(start: GuestAddress, mem: &'a GuestMemoryMmap, qsize: u16) -> Self {
|
|
// power of 2?
|
|
assert!(qsize > 0 && qsize & (qsize - 1) == 0);
|
|
|
|
let mut dtable = Vec::with_capacity(qsize as usize);
|
|
|
|
let mut end = start;
|
|
|
|
for _ in 0..qsize {
|
|
let d = VirtqDesc::new(end, mem);
|
|
end = d.end();
|
|
dtable.push(d);
|
|
}
|
|
|
|
const AVAIL_ALIGN: u64 = 2;
|
|
|
|
let avail = VirtqAvail::new(end, mem, qsize, AVAIL_ALIGN);
|
|
|
|
const USED_ALIGN: u64 = 4;
|
|
|
|
let mut x = avail.end().0;
|
|
x = (x + USED_ALIGN - 1) & !(USED_ALIGN - 1);
|
|
|
|
let used = VirtqUsed::new(GuestAddress(x), mem, qsize, USED_ALIGN);
|
|
|
|
VirtQueue {
|
|
dtable,
|
|
avail,
|
|
used,
|
|
mem,
|
|
}
|
|
}
|
|
|
|
fn size(&self) -> u16 {
|
|
self.dtable.len() as u16
|
|
}
|
|
|
|
pub fn dtable_start(&self) -> GuestAddress {
|
|
self.dtable.first().unwrap().start()
|
|
}
|
|
|
|
pub fn avail_start(&self) -> GuestAddress {
|
|
self.avail.flags.location
|
|
}
|
|
|
|
pub fn used_start(&self) -> GuestAddress {
|
|
self.used.flags.location
|
|
}
|
|
|
|
// Creates a new Queue, using the underlying memory regions represented by the VirtQueue.
|
|
pub fn create_queue(&self) -> Queue {
|
|
let mut q = Queue::new(self.size()).unwrap();
|
|
|
|
q.set_size(self.size());
|
|
q.set_ready(true);
|
|
q.try_set_desc_table_address(self.dtable_start()).unwrap();
|
|
q.try_set_avail_ring_address(self.avail_start()).unwrap();
|
|
q.try_set_used_ring_address(self.used_start()).unwrap();
|
|
|
|
q
|
|
}
|
|
|
|
pub fn start(&self) -> GuestAddress {
|
|
self.dtable_start()
|
|
}
|
|
|
|
pub fn end(&self) -> GuestAddress {
|
|
self.used.end()
|
|
}
|
|
}
|
|
}
|