cloud-hypervisor/virtio-devices/src/lib.rs
Sebastien Boeuf be7c389120 virtio-devices: Add Vdpa device
vDPA is a kernel framework introduced fairly recently in order to handle
devices complying with virtio specification on their datapath, while the
control path is vendor specific. For the datapath, that means the
virtqueues are handled through DMA directly between the hardware and the
guest, while the control path goes through the vDPA framework,
eventually exposed through a vhost-vdpa device.

vDPA, like VFIO, aims at achieving baremetal performance for devices
that are passed into a VM. But unlike VFIO, it provides a simpler/better
framework for achieving migration. Because the DMA accesses between the
device and the guest are going through virtio queues, migration can be
achieved way more easily, and doesn't require each device driver to
implement the migration support. In the VFIO case, each vendor is
expected to provide an implementation of the VFIO migration framework,
which makes things harder as it must be done for each and every device.

So to summarize the point is to support migration for hardware devices
through which we can achieve baremetal performances.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2022-03-18 12:28:40 +01:00

173 lines
4.9 KiB
Rust

// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
//! Implements virtio devices, queues, and transport mechanisms.
#[macro_use]
extern crate event_monitor;
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
use std::convert::TryInto;
use std::io;
#[macro_use]
mod device;
pub mod balloon;
pub mod block;
mod console;
pub mod epoll_helper;
mod iommu;
pub mod mem;
pub mod net;
mod pmem;
mod rng;
pub mod seccomp_filters;
mod thread_helper;
pub mod transport;
pub mod vdpa;
pub mod vhost_user;
pub mod vsock;
pub mod watchdog;
pub use self::balloon::*;
pub use self::block::*;
pub use self::console::*;
pub use self::device::*;
pub use self::epoll_helper::*;
pub use self::iommu::*;
pub use self::mem::*;
pub use self::net::*;
pub use self::pmem::*;
pub use self::rng::*;
pub use self::vdpa::*;
pub use self::vsock::*;
pub use self::watchdog::*;
use vm_memory::{bitmap::AtomicBitmap, GuestAddress, GuestMemory};
use vm_virtio::VirtioDeviceType;
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>;
type MmapRegion = vm_memory::MmapRegion<AtomicBitmap>;
const DEVICE_INIT: u32 = 0x00;
const DEVICE_ACKNOWLEDGE: u32 = 0x01;
const DEVICE_DRIVER: u32 = 0x02;
const DEVICE_DRIVER_OK: u32 = 0x04;
const DEVICE_FEATURES_OK: u32 = 0x08;
const DEVICE_FAILED: u32 = 0x80;
const VIRTIO_F_RING_INDIRECT_DESC: u32 = 28;
const VIRTIO_F_RING_EVENT_IDX: u32 = 29;
const VIRTIO_F_VERSION_1: u32 = 32;
const VIRTIO_F_IOMMU_PLATFORM: u32 = 33;
const VIRTIO_F_IN_ORDER: u32 = 35;
const VIRTIO_F_ORDER_PLATFORM: u32 = 36;
#[allow(dead_code)]
const VIRTIO_F_SR_IOV: u32 = 37;
const VIRTIO_F_NOTIFICATION_DATA: u32 = 38;
#[derive(Debug)]
pub enum ActivateError {
EpollCtl(std::io::Error),
BadActivate,
/// Queue number is not correct
BadQueueNum,
/// Failed to clone Kill event fd
CloneKillEventFd,
/// Failed to clone exit event fd
CloneExitEventFd(std::io::Error),
// Failed to spawn thread
ThreadSpawn(std::io::Error),
/// Failed to create Vhost-user interrupt eventfd
VhostIrqCreate,
/// Failed to setup vhost-user-fs daemon.
VhostUserFsSetup(vhost_user::Error),
/// Failed to setup vhost-user-net daemon.
VhostUserNetSetup(vhost_user::Error),
/// Failed to setup vhost-user-blk daemon.
VhostUserBlkSetup(vhost_user::Error),
/// Failed to reset vhost-user daemon.
VhostUserReset(vhost_user::Error),
/// Cannot create seccomp filter
CreateSeccompFilter(seccompiler::Error),
/// Cannot create rate limiter
CreateRateLimiter(std::io::Error),
/// Failed activating the vDPA device
ActivateVdpa(vdpa::Error),
}
pub type ActivateResult = std::result::Result<(), ActivateError>;
pub type DeviceEventT = u16;
#[derive(Debug)]
pub enum Error {
FailedSignalingUsedQueue(io::Error),
IoError(io::Error),
VdpaUpdateMemory(vdpa::Error),
VhostUserUpdateMemory(vhost_user::Error),
VhostUserAddMemoryRegion(vhost_user::Error),
SetShmRegionsNotSupported,
NetQueuePair(::net_util::NetQueuePairError),
ApplySeccompFilter(seccompiler::Error),
QueueAddUsed(virtio_queue::Error),
QueueIterator(virtio_queue::Error),
}
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, PartialEq)]
pub struct TokenBucketConfig {
pub size: u64,
pub one_time_burst: Option<u64>,
pub refill_time: u64,
}
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, PartialEq)]
#[serde(deny_unknown_fields)]
pub struct RateLimiterConfig {
pub bandwidth: Option<TokenBucketConfig>,
pub ops: Option<TokenBucketConfig>,
}
impl TryInto<rate_limiter::RateLimiter> for RateLimiterConfig {
type Error = io::Error;
fn try_into(self) -> std::result::Result<rate_limiter::RateLimiter, Self::Error> {
let bw = self.bandwidth.unwrap_or_default();
let ops = self.ops.unwrap_or_default();
rate_limiter::RateLimiter::new(
bw.size,
bw.one_time_burst.unwrap_or(0),
bw.refill_time,
ops.size,
ops.one_time_burst.unwrap_or(0),
ops.refill_time,
)
}
}
/// Convert an absolute address into an address space (GuestMemory)
/// to a host pointer and verify that the provided size define a valid
/// range within a single memory region.
/// Return None if it is out of bounds or if addr+size overlaps a single region.
pub fn get_host_address_range<M: GuestMemory>(
mem: &M,
addr: GuestAddress,
size: usize,
) -> Option<*mut u8> {
if mem.check_range(addr, size) {
Some(mem.get_host_address(addr).unwrap())
} else {
None
}
}