2020-07-02 12:25:19 +00:00
|
|
|
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
|
|
//
|
|
|
|
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE-BSD-3-Clause file.
|
|
|
|
//
|
|
|
|
// Copyright © 2019 Intel Corporation
|
|
|
|
//
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
|
|
|
|
|
|
|
|
//! Implements virtio devices, queues, and transport mechanisms.
|
|
|
|
|
|
|
|
#[macro_use]
|
2021-02-18 15:10:51 +00:00
|
|
|
extern crate event_monitor;
|
|
|
|
#[macro_use]
|
2020-07-02 12:25:19 +00:00
|
|
|
extern crate log;
|
|
|
|
|
2022-05-17 21:04:38 +00:00
|
|
|
use serde::{Deserialize, Serialize};
|
2021-02-26 20:06:10 +00:00
|
|
|
use std::convert::TryInto;
|
2020-07-02 12:25:19 +00:00
|
|
|
use std::io;
|
|
|
|
|
|
|
|
#[macro_use]
|
|
|
|
mod device;
|
2020-03-20 03:43:17 +00:00
|
|
|
pub mod balloon;
|
2021-01-22 10:15:13 +00:00
|
|
|
pub mod block;
|
2020-07-02 12:25:19 +00:00
|
|
|
mod console;
|
2020-07-22 13:34:36 +00:00
|
|
|
pub mod epoll_helper;
|
2020-07-02 12:25:19 +00:00
|
|
|
mod iommu;
|
|
|
|
pub mod mem;
|
|
|
|
pub mod net;
|
|
|
|
mod pmem;
|
|
|
|
mod rng;
|
2020-08-04 02:45:53 +00:00
|
|
|
pub mod seccomp_filters;
|
2021-09-03 10:43:30 +00:00
|
|
|
mod thread_helper;
|
2020-07-02 12:25:19 +00:00
|
|
|
pub mod transport;
|
virtio-devices: Add Vdpa device
vDPA is a kernel framework introduced fairly recently in order to handle
devices complying with virtio specification on their datapath, while the
control path is vendor specific. For the datapath, that means the
virtqueues are handled through DMA directly between the hardware and the
guest, while the control path goes through the vDPA framework,
eventually exposed through a vhost-vdpa device.
vDPA, like VFIO, aims at achieving baremetal performance for devices
that are passed into a VM. But unlike VFIO, it provides a simpler/better
framework for achieving migration. Because the DMA accesses between the
device and the guest are going through virtio queues, migration can be
achieved way more easily, and doesn't require each device driver to
implement the migration support. In the VFIO case, each vendor is
expected to provide an implementation of the VFIO migration framework,
which makes things harder as it must be done for each and every device.
So to summarize the point is to support migration for hardware devices
through which we can achieve baremetal performances.
Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2022-03-07 14:34:44 +00:00
|
|
|
pub mod vdpa;
|
2020-07-02 12:25:19 +00:00
|
|
|
pub mod vhost_user;
|
|
|
|
pub mod vsock;
|
2020-09-25 09:35:13 +00:00
|
|
|
pub mod watchdog;
|
2020-07-02 12:25:19 +00:00
|
|
|
|
2020-03-20 03:43:17 +00:00
|
|
|
pub use self::balloon::*;
|
2021-01-22 10:15:13 +00:00
|
|
|
pub use self::block::*;
|
2020-07-02 12:25:19 +00:00
|
|
|
pub use self::console::*;
|
|
|
|
pub use self::device::*;
|
2020-07-22 13:34:36 +00:00
|
|
|
pub use self::epoll_helper::*;
|
2020-07-02 12:25:19 +00:00
|
|
|
pub use self::iommu::*;
|
|
|
|
pub use self::mem::*;
|
|
|
|
pub use self::net::*;
|
|
|
|
pub use self::pmem::*;
|
|
|
|
pub use self::rng::*;
|
virtio-devices: Add Vdpa device
vDPA is a kernel framework introduced fairly recently in order to handle
devices complying with virtio specification on their datapath, while the
control path is vendor specific. For the datapath, that means the
virtqueues are handled through DMA directly between the hardware and the
guest, while the control path goes through the vDPA framework,
eventually exposed through a vhost-vdpa device.
vDPA, like VFIO, aims at achieving baremetal performance for devices
that are passed into a VM. But unlike VFIO, it provides a simpler/better
framework for achieving migration. Because the DMA accesses between the
device and the guest are going through virtio queues, migration can be
achieved way more easily, and doesn't require each device driver to
implement the migration support. In the VFIO case, each vendor is
expected to provide an implementation of the VFIO migration framework,
which makes things harder as it must be done for each and every device.
So to summarize the point is to support migration for hardware devices
through which we can achieve baremetal performances.
Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2022-03-07 14:34:44 +00:00
|
|
|
pub use self::vdpa::*;
|
2020-07-02 12:25:19 +00:00
|
|
|
pub use self::vsock::*;
|
2020-09-25 09:35:13 +00:00
|
|
|
pub use self::watchdog::*;
|
2021-06-02 19:08:04 +00:00
|
|
|
use vm_memory::{bitmap::AtomicBitmap, GuestAddress, GuestMemory};
|
2021-10-21 10:41:16 +00:00
|
|
|
use vm_virtio::VirtioDeviceType;
|
2020-07-02 12:25:19 +00:00
|
|
|
|
2021-06-02 19:08:04 +00:00
|
|
|
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
|
|
|
|
type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>;
|
|
|
|
type MmapRegion = vm_memory::MmapRegion<AtomicBitmap>;
|
|
|
|
|
2020-07-02 12:25:19 +00:00
|
|
|
const DEVICE_INIT: u32 = 0x00;
|
|
|
|
const DEVICE_ACKNOWLEDGE: u32 = 0x01;
|
|
|
|
const DEVICE_DRIVER: u32 = 0x02;
|
|
|
|
const DEVICE_DRIVER_OK: u32 = 0x04;
|
|
|
|
const DEVICE_FEATURES_OK: u32 = 0x08;
|
|
|
|
const DEVICE_FAILED: u32 = 0x80;
|
|
|
|
|
2021-05-21 16:26:00 +00:00
|
|
|
const VIRTIO_F_RING_INDIRECT_DESC: u32 = 28;
|
|
|
|
const VIRTIO_F_RING_EVENT_IDX: u32 = 29;
|
2020-07-02 12:25:19 +00:00
|
|
|
const VIRTIO_F_VERSION_1: u32 = 32;
|
|
|
|
const VIRTIO_F_IOMMU_PLATFORM: u32 = 33;
|
|
|
|
const VIRTIO_F_IN_ORDER: u32 = 35;
|
2021-05-21 16:26:00 +00:00
|
|
|
const VIRTIO_F_ORDER_PLATFORM: u32 = 36;
|
|
|
|
#[allow(dead_code)]
|
|
|
|
const VIRTIO_F_SR_IOV: u32 = 37;
|
|
|
|
const VIRTIO_F_NOTIFICATION_DATA: u32 = 38;
|
2020-07-02 12:25:19 +00:00
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum ActivateError {
|
|
|
|
EpollCtl(std::io::Error),
|
|
|
|
BadActivate,
|
|
|
|
/// Queue number is not correct
|
|
|
|
BadQueueNum,
|
2021-09-07 15:10:48 +00:00
|
|
|
/// Failed to clone Kill event fd
|
2020-07-02 12:25:19 +00:00
|
|
|
CloneKillEventFd,
|
2021-09-07 15:10:48 +00:00
|
|
|
/// Failed to clone exit event fd
|
|
|
|
CloneExitEventFd(std::io::Error),
|
|
|
|
// Failed to spawn thread
|
|
|
|
ThreadSpawn(std::io::Error),
|
2020-07-02 12:25:19 +00:00
|
|
|
/// Failed to create Vhost-user interrupt eventfd
|
|
|
|
VhostIrqCreate,
|
2021-03-11 12:53:05 +00:00
|
|
|
/// Failed to setup vhost-user-fs daemon.
|
|
|
|
VhostUserFsSetup(vhost_user::Error),
|
|
|
|
/// Failed to setup vhost-user-net daemon.
|
2020-07-02 12:25:19 +00:00
|
|
|
VhostUserNetSetup(vhost_user::Error),
|
2021-03-11 12:53:05 +00:00
|
|
|
/// Failed to setup vhost-user-blk daemon.
|
2020-07-02 12:25:19 +00:00
|
|
|
VhostUserBlkSetup(vhost_user::Error),
|
|
|
|
/// Failed to reset vhost-user daemon.
|
|
|
|
VhostUserReset(vhost_user::Error),
|
2020-08-04 02:45:53 +00:00
|
|
|
/// Cannot create seccomp filter
|
2021-08-17 03:40:11 +00:00
|
|
|
CreateSeccompFilter(seccompiler::Error),
|
2021-02-26 20:06:10 +00:00
|
|
|
/// Cannot create rate limiter
|
|
|
|
CreateRateLimiter(std::io::Error),
|
virtio-devices: Add Vdpa device
vDPA is a kernel framework introduced fairly recently in order to handle
devices complying with virtio specification on their datapath, while the
control path is vendor specific. For the datapath, that means the
virtqueues are handled through DMA directly between the hardware and the
guest, while the control path goes through the vDPA framework,
eventually exposed through a vhost-vdpa device.
vDPA, like VFIO, aims at achieving baremetal performance for devices
that are passed into a VM. But unlike VFIO, it provides a simpler/better
framework for achieving migration. Because the DMA accesses between the
device and the guest are going through virtio queues, migration can be
achieved way more easily, and doesn't require each device driver to
implement the migration support. In the VFIO case, each vendor is
expected to provide an implementation of the VFIO migration framework,
which makes things harder as it must be done for each and every device.
So to summarize the point is to support migration for hardware devices
through which we can achieve baremetal performances.
Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2022-03-07 14:34:44 +00:00
|
|
|
/// Failed activating the vDPA device
|
|
|
|
ActivateVdpa(vdpa::Error),
|
2020-07-02 12:25:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub type ActivateResult = std::result::Result<(), ActivateError>;
|
|
|
|
|
|
|
|
pub type DeviceEventT = u16;
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum Error {
|
|
|
|
FailedSignalingUsedQueue(io::Error),
|
|
|
|
IoError(io::Error),
|
virtio-devices: Add Vdpa device
vDPA is a kernel framework introduced fairly recently in order to handle
devices complying with virtio specification on their datapath, while the
control path is vendor specific. For the datapath, that means the
virtqueues are handled through DMA directly between the hardware and the
guest, while the control path goes through the vDPA framework,
eventually exposed through a vhost-vdpa device.
vDPA, like VFIO, aims at achieving baremetal performance for devices
that are passed into a VM. But unlike VFIO, it provides a simpler/better
framework for achieving migration. Because the DMA accesses between the
device and the guest are going through virtio queues, migration can be
achieved way more easily, and doesn't require each device driver to
implement the migration support. In the VFIO case, each vendor is
expected to provide an implementation of the VFIO migration framework,
which makes things harder as it must be done for each and every device.
So to summarize the point is to support migration for hardware devices
through which we can achieve baremetal performances.
Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2022-03-07 14:34:44 +00:00
|
|
|
VdpaUpdateMemory(vdpa::Error),
|
2020-07-02 12:25:19 +00:00
|
|
|
VhostUserUpdateMemory(vhost_user::Error),
|
2021-03-05 13:32:57 +00:00
|
|
|
VhostUserAddMemoryRegion(vhost_user::Error),
|
2020-07-02 12:25:19 +00:00
|
|
|
SetShmRegionsNotSupported,
|
2020-07-07 15:50:13 +00:00
|
|
|
NetQueuePair(::net_util::NetQueuePairError),
|
2021-08-17 03:40:11 +00:00
|
|
|
ApplySeccompFilter(seccompiler::Error),
|
2021-10-21 10:41:16 +00:00
|
|
|
QueueAddUsed(virtio_queue::Error),
|
|
|
|
QueueIterator(virtio_queue::Error),
|
2020-07-02 12:25:19 +00:00
|
|
|
}
|
2021-02-23 13:57:10 +00:00
|
|
|
|
2022-06-30 15:41:46 +00:00
|
|
|
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
|
2021-02-26 20:06:10 +00:00
|
|
|
pub struct TokenBucketConfig {
|
|
|
|
pub size: u64,
|
|
|
|
pub one_time_burst: Option<u64>,
|
|
|
|
pub refill_time: u64,
|
|
|
|
}
|
|
|
|
|
2022-06-30 15:41:46 +00:00
|
|
|
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
|
2021-02-26 20:06:10 +00:00
|
|
|
#[serde(deny_unknown_fields)]
|
|
|
|
pub struct RateLimiterConfig {
|
|
|
|
pub bandwidth: Option<TokenBucketConfig>,
|
|
|
|
pub ops: Option<TokenBucketConfig>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl TryInto<rate_limiter::RateLimiter> for RateLimiterConfig {
|
|
|
|
type Error = io::Error;
|
|
|
|
|
|
|
|
fn try_into(self) -> std::result::Result<rate_limiter::RateLimiter, Self::Error> {
|
|
|
|
let bw = self.bandwidth.unwrap_or_default();
|
|
|
|
let ops = self.ops.unwrap_or_default();
|
|
|
|
rate_limiter::RateLimiter::new(
|
|
|
|
bw.size,
|
|
|
|
bw.one_time_burst.unwrap_or(0),
|
|
|
|
bw.refill_time,
|
|
|
|
ops.size,
|
|
|
|
ops.one_time_burst.unwrap_or(0),
|
|
|
|
ops.refill_time,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-23 13:57:10 +00:00
|
|
|
/// Convert an absolute address into an address space (GuestMemory)
|
|
|
|
/// to a host pointer and verify that the provided size define a valid
|
|
|
|
/// range within a single memory region.
|
|
|
|
/// Return None if it is out of bounds or if addr+size overlaps a single region.
|
|
|
|
pub fn get_host_address_range<M: GuestMemory>(
|
|
|
|
mem: &M,
|
|
|
|
addr: GuestAddress,
|
|
|
|
size: usize,
|
|
|
|
) -> Option<*mut u8> {
|
|
|
|
if mem.check_range(addr, size) {
|
|
|
|
Some(mem.get_host_address(addr).unwrap())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|