2019-09-04 13:55:14 +00:00
|
|
|
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
|
|
//
|
|
|
|
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE-BSD-3-Clause file.
|
|
|
|
//
|
|
|
|
// Copyright © 2019 Intel Corporation
|
|
|
|
//
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
|
|
|
|
//
|
|
|
|
|
2019-11-18 23:24:31 +00:00
|
|
|
extern crate vm_device;
|
|
|
|
|
2019-12-06 16:14:32 +00:00
|
|
|
use crate::config::{ConsoleOutputMode, VmConfig};
|
2019-12-20 16:11:30 +00:00
|
|
|
use crate::memory_manager::{Error as MemoryManagerError, MemoryManager};
|
2019-09-04 13:55:14 +00:00
|
|
|
use crate::vm::VmInfo;
|
2019-12-06 16:14:32 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
use acpi_tables::{aml, aml::Aml};
|
2019-12-31 10:49:11 +00:00
|
|
|
use arc_swap::ArcSwap;
|
2019-12-06 16:14:32 +00:00
|
|
|
use arch::layout;
|
2019-12-31 10:49:11 +00:00
|
|
|
use arch::layout::{APIC_START, IOAPIC_SIZE, IOAPIC_START};
|
2020-01-14 10:17:23 +00:00
|
|
|
use devices::{ioapic, HotPlugNotificationFlags};
|
2019-12-20 16:11:30 +00:00
|
|
|
use kvm_bindings::kvm_irq_routing_entry;
|
2019-09-04 13:55:14 +00:00
|
|
|
use kvm_ioctls::*;
|
|
|
|
use libc::O_TMPFILE;
|
2019-12-05 16:36:28 +00:00
|
|
|
use libc::TIOCGWINSZ;
|
2019-09-04 13:55:14 +00:00
|
|
|
use net_util::Tap;
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
use pci::{
|
2019-10-28 16:53:13 +00:00
|
|
|
DeviceRelocation, InterruptDelivery, InterruptParameters, PciBarRegionType, PciBus,
|
2019-12-05 15:42:15 +00:00
|
|
|
PciConfigIo, PciConfigMmio, PciDevice, PciRoot,
|
2019-09-04 13:55:14 +00:00
|
|
|
};
|
|
|
|
use qcow::{self, ImageType, QcowFile};
|
2019-12-19 17:12:34 +00:00
|
|
|
use std::cmp;
|
2019-11-28 00:45:10 +00:00
|
|
|
use std::collections::HashMap;
|
2019-12-31 10:49:11 +00:00
|
|
|
use std::fs::{File, OpenOptions};
|
|
|
|
use std::io::{self, sink, stdout};
|
2019-09-04 13:55:14 +00:00
|
|
|
use std::os::unix::fs::OpenOptionsExt;
|
|
|
|
use std::os::unix::io::AsRawFd;
|
|
|
|
use std::ptr::null_mut;
|
|
|
|
use std::result;
|
2019-10-28 21:29:18 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
use std::sync::Weak;
|
2019-12-31 10:49:11 +00:00
|
|
|
use std::sync::{Arc, Mutex};
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-10-07 17:48:44 +00:00
|
|
|
use vfio::{VfioDevice, VfioDmaMapping, VfioPciDevice, VfioPciError};
|
2019-09-04 13:55:14 +00:00
|
|
|
use vm_allocator::SystemAllocator;
|
2019-11-18 23:24:31 +00:00
|
|
|
use vm_device::{Migratable, MigratableError, Pausable, Snapshotable};
|
2019-09-11 16:25:07 +00:00
|
|
|
use vm_memory::GuestAddress;
|
2019-09-04 13:55:14 +00:00
|
|
|
use vm_memory::{Address, GuestMemoryMmap, GuestUsize};
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
use vm_virtio::transport::VirtioPciDevice;
|
2019-10-30 18:03:02 +00:00
|
|
|
use vm_virtio::transport::VirtioTransport;
|
2019-09-27 07:46:19 +00:00
|
|
|
use vm_virtio::vhost_user::VhostUserConfig;
|
2019-10-02 20:57:20 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-10-07 16:28:03 +00:00
|
|
|
use vm_virtio::{DmaRemapping, IommuMapping, VirtioIommuRemapping};
|
|
|
|
use vm_virtio::{VirtioSharedMemory, VirtioSharedMemoryList};
|
2019-09-04 13:55:14 +00:00
|
|
|
use vmm_sys_util::eventfd::EventFd;
|
|
|
|
|
2019-09-11 16:25:07 +00:00
|
|
|
#[cfg(feature = "mmio_support")]
|
|
|
|
const MMIO_LEN: u64 = 0x1000;
|
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
/// Errors associated with device manager
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum DeviceManagerError {
|
|
|
|
/// Cannot create EventFd.
|
|
|
|
EventFd(io::Error),
|
|
|
|
|
|
|
|
/// Cannot open disk path
|
|
|
|
Disk(io::Error),
|
|
|
|
|
|
|
|
/// Cannot create vhost-user-net device
|
|
|
|
CreateVhostUserNet(vm_virtio::vhost_user::Error),
|
|
|
|
|
|
|
|
/// Cannot create virtio-blk device
|
|
|
|
CreateVirtioBlock(io::Error),
|
|
|
|
|
|
|
|
/// Cannot create virtio-net device
|
|
|
|
CreateVirtioNet(vm_virtio::net::Error),
|
|
|
|
|
|
|
|
/// Cannot create virtio-console device
|
|
|
|
CreateVirtioConsole(io::Error),
|
|
|
|
|
|
|
|
/// Cannot create virtio-rng device
|
|
|
|
CreateVirtioRng(io::Error),
|
|
|
|
|
|
|
|
/// Cannot create virtio-fs device
|
|
|
|
CreateVirtioFs(vm_virtio::vhost_user::Error),
|
|
|
|
|
2019-09-11 03:16:41 +00:00
|
|
|
/// Cannot create vhost-user-blk device
|
|
|
|
CreateVhostUserBlk(vm_virtio::vhost_user::Error),
|
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
/// Cannot create virtio-pmem device
|
|
|
|
CreateVirtioPmem(io::Error),
|
|
|
|
|
2019-09-04 18:14:54 +00:00
|
|
|
/// Cannot create virtio-vsock device
|
|
|
|
CreateVirtioVsock(io::Error),
|
|
|
|
|
2019-09-04 21:19:16 +00:00
|
|
|
/// Failed converting Path to &str for the virtio-vsock device.
|
|
|
|
CreateVsockConvertPath,
|
|
|
|
|
|
|
|
/// Cannot create virtio-vsock backend
|
|
|
|
CreateVsockBackend(vm_virtio::vsock::VsockUnixError),
|
|
|
|
|
2019-10-02 20:57:20 +00:00
|
|
|
/// Cannot create virtio-iommu device
|
|
|
|
CreateVirtioIommu(io::Error),
|
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
/// Failed parsing disk image format
|
|
|
|
DetectImageType(qcow::Error),
|
|
|
|
|
|
|
|
/// Cannot open qcow disk path
|
|
|
|
QcowDeviceCreate(qcow::Error),
|
|
|
|
|
|
|
|
/// Cannot open tap interface
|
|
|
|
OpenTap(net_util::TapError),
|
|
|
|
|
|
|
|
/// Cannot allocate IRQ.
|
|
|
|
AllocateIrq,
|
|
|
|
|
|
|
|
/// Cannot configure the IRQ.
|
2019-11-29 15:36:33 +00:00
|
|
|
Irq(kvm_ioctls::Error),
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
/// Cannot allocate PCI BARs
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
AllocateBars(pci::PciDeviceError),
|
|
|
|
|
|
|
|
/// Cannot register ioevent.
|
2019-11-29 15:36:33 +00:00
|
|
|
RegisterIoevent(kvm_ioctls::Error),
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
/// Cannot create virtio device
|
|
|
|
VirtioDevice(vmm_sys_util::errno::Error),
|
|
|
|
|
|
|
|
/// Cannot add PCI device
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
AddPciDevice(pci::PciRootError),
|
|
|
|
|
|
|
|
/// Cannot open persistent memory file
|
|
|
|
PmemFileOpen(io::Error),
|
|
|
|
|
|
|
|
/// Cannot set persistent memory file size
|
|
|
|
PmemFileSetLen(io::Error),
|
|
|
|
|
|
|
|
/// Cannot find a memory range for persistent memory
|
|
|
|
PmemRangeAllocation,
|
|
|
|
|
|
|
|
/// Cannot find a memory range for virtio-fs
|
|
|
|
FsRangeAllocation,
|
|
|
|
|
|
|
|
/// Error creating serial output file
|
|
|
|
SerialOutputFileOpen(io::Error),
|
|
|
|
|
|
|
|
/// Error creating console output file
|
|
|
|
ConsoleOutputFileOpen(io::Error),
|
|
|
|
|
|
|
|
/// Cannot create a VFIO device
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
VfioCreate(vfio::VfioError),
|
|
|
|
|
|
|
|
/// Cannot create a VFIO PCI device
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
VfioPciCreate(vfio::VfioPciError),
|
|
|
|
|
|
|
|
/// Failed to map VFIO MMIO region.
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
VfioMapRegion(VfioPciError),
|
|
|
|
|
|
|
|
/// Failed to create the KVM device.
|
2019-11-29 15:36:33 +00:00
|
|
|
CreateKvmDevice(kvm_ioctls::Error),
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
/// Failed to memory map.
|
|
|
|
Mmap(io::Error),
|
|
|
|
|
|
|
|
/// Cannot add legacy device to Bus.
|
|
|
|
BusError(devices::BusError),
|
2019-11-05 09:37:34 +00:00
|
|
|
|
|
|
|
/// Failed to allocate IO port
|
|
|
|
AllocateIOPort,
|
2019-11-27 15:28:22 +00:00
|
|
|
|
|
|
|
// Failed to make hotplug notification
|
|
|
|
HotPlugNotification(io::Error),
|
2019-12-20 16:11:30 +00:00
|
|
|
|
|
|
|
// Error from a memory manager operation
|
|
|
|
MemoryManager(MemoryManagerError),
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
pub type DeviceManagerResult<T> = result::Result<T, DeviceManagerError>;
|
|
|
|
|
2019-11-18 23:10:42 +00:00
|
|
|
type VirtioDeviceArc = Arc<Mutex<dyn vm_virtio::VirtioDevice>>;
|
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
struct InterruptInfo<'a> {
|
2019-12-05 16:36:28 +00:00
|
|
|
_ioapic: &'a Arc<Mutex<ioapic::Ioapic>>,
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct UserIoapicIrq {
|
|
|
|
ioapic: Arc<Mutex<ioapic::Ioapic>>,
|
|
|
|
irq: usize,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl UserIoapicIrq {
|
|
|
|
fn new(ioapic: Arc<Mutex<ioapic::Ioapic>>, irq: usize) -> Self {
|
|
|
|
UserIoapicIrq { ioapic, irq }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl devices::Interrupt for UserIoapicIrq {
|
|
|
|
fn deliver(&self) -> result::Result<(), io::Error> {
|
|
|
|
self.ioapic
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.service_irq(self.irq)
|
|
|
|
.map_err(|e| {
|
|
|
|
std::io::Error::new(
|
|
|
|
std::io::ErrorKind::Other,
|
|
|
|
format!("failed to inject IRQ #{}: {:?}", self.irq, e),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_win_size() -> (u16, u16) {
|
|
|
|
#[repr(C)]
|
|
|
|
struct WS {
|
|
|
|
rows: u16,
|
|
|
|
cols: u16,
|
|
|
|
};
|
|
|
|
let ws: WS = WS {
|
|
|
|
rows: 0u16,
|
|
|
|
cols: 0u16,
|
|
|
|
};
|
|
|
|
unsafe {
|
|
|
|
libc::ioctl(0, TIOCGWINSZ, &ws);
|
|
|
|
}
|
|
|
|
|
|
|
|
(ws.cols, ws.rows)
|
|
|
|
}
|
|
|
|
|
2019-09-06 15:42:41 +00:00
|
|
|
pub struct Console {
|
|
|
|
// Serial port on 0x3f8
|
|
|
|
serial: Option<Arc<Mutex<devices::legacy::Serial>>>,
|
|
|
|
console_input: Option<Arc<vm_virtio::ConsoleInput>>,
|
|
|
|
input_enabled: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Console {
|
|
|
|
pub fn queue_input_bytes(&self, out: &[u8]) -> vmm_sys_util::errno::Result<()> {
|
|
|
|
if self.serial.is_some() {
|
|
|
|
self.serial
|
|
|
|
.as_ref()
|
|
|
|
.unwrap()
|
|
|
|
.lock()
|
|
|
|
.expect("Failed to process stdin event due to poisoned lock")
|
|
|
|
.queue_input_bytes(out)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
if self.console_input.is_some() {
|
|
|
|
self.console_input.as_ref().unwrap().queue_input_bytes(out);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn update_console_size(&self, cols: u16, rows: u16) {
|
|
|
|
if self.console_input.is_some() {
|
|
|
|
self.console_input
|
|
|
|
.as_ref()
|
|
|
|
.unwrap()
|
|
|
|
.update_console_size(cols, rows)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn input_enabled(&self) -> bool {
|
|
|
|
self.input_enabled
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-23 22:14:13 +00:00
|
|
|
struct AddressManager {
|
|
|
|
allocator: Arc<Mutex<SystemAllocator>>,
|
2019-10-23 21:06:13 +00:00
|
|
|
io_bus: Arc<devices::Bus>,
|
|
|
|
mmio_bus: Arc<devices::Bus>,
|
2019-10-28 16:53:13 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
vm_fd: Arc<VmFd>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
impl DeviceRelocation for AddressManager {
|
|
|
|
fn move_bar(
|
|
|
|
&self,
|
|
|
|
old_base: u64,
|
|
|
|
new_base: u64,
|
|
|
|
len: u64,
|
|
|
|
pci_dev: &mut dyn PciDevice,
|
|
|
|
region_type: PciBarRegionType,
|
2019-10-29 01:15:08 +00:00
|
|
|
) -> std::result::Result<(), std::io::Error> {
|
2019-10-28 16:53:13 +00:00
|
|
|
match region_type {
|
|
|
|
PciBarRegionType::IORegion => {
|
|
|
|
// Update system allocator
|
|
|
|
self.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.free_io_addresses(GuestAddress(old_base), len as GuestUsize);
|
2019-10-29 01:15:08 +00:00
|
|
|
|
2019-10-28 16:53:13 +00:00
|
|
|
self.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_io_addresses(Some(GuestAddress(new_base)), len as GuestUsize, None)
|
2019-10-29 01:15:08 +00:00
|
|
|
.ok_or_else(|| {
|
|
|
|
io::Error::new(io::ErrorKind::Other, "failed allocating new IO range")
|
|
|
|
})?;
|
2019-10-28 16:53:13 +00:00
|
|
|
|
|
|
|
// Update PIO bus
|
|
|
|
self.io_bus
|
|
|
|
.update_range(old_base, len, new_base, len)
|
2019-10-29 01:15:08 +00:00
|
|
|
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
2019-10-28 16:53:13 +00:00
|
|
|
}
|
|
|
|
PciBarRegionType::Memory32BitRegion | PciBarRegionType::Memory64BitRegion => {
|
|
|
|
// Update system allocator
|
|
|
|
self.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.free_mmio_addresses(GuestAddress(old_base), len as GuestUsize);
|
|
|
|
|
|
|
|
if region_type == PciBarRegionType::Memory32BitRegion {
|
|
|
|
self.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_mmio_hole_addresses(
|
|
|
|
Some(GuestAddress(new_base)),
|
|
|
|
len as GuestUsize,
|
|
|
|
None,
|
|
|
|
)
|
2019-10-29 01:15:08 +00:00
|
|
|
.ok_or_else(|| {
|
|
|
|
io::Error::new(
|
|
|
|
io::ErrorKind::Other,
|
|
|
|
"failed allocating new 32 bits MMIO range",
|
|
|
|
)
|
|
|
|
})?;
|
2019-10-28 16:53:13 +00:00
|
|
|
} else {
|
|
|
|
self.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_mmio_addresses(
|
|
|
|
Some(GuestAddress(new_base)),
|
|
|
|
len as GuestUsize,
|
|
|
|
None,
|
|
|
|
)
|
2019-10-29 01:15:08 +00:00
|
|
|
.ok_or_else(|| {
|
|
|
|
io::Error::new(
|
|
|
|
io::ErrorKind::Other,
|
|
|
|
"failed allocating new 64 bits MMIO range",
|
|
|
|
)
|
|
|
|
})?;
|
2019-10-28 16:53:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update MMIO bus
|
|
|
|
self.mmio_bus
|
|
|
|
.update_range(old_base, len, new_base, len)
|
2019-10-29 01:15:08 +00:00
|
|
|
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
2019-10-28 16:53:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-30 15:15:38 +00:00
|
|
|
let any_dev = pci_dev.as_any();
|
|
|
|
if let Some(virtio_pci_dev) = any_dev.downcast_ref::<VirtioPciDevice>() {
|
2019-10-30 16:13:29 +00:00
|
|
|
let bar_addr = virtio_pci_dev.config_bar_addr();
|
|
|
|
if bar_addr == new_base {
|
2019-10-30 18:03:02 +00:00
|
|
|
for (event, addr) in virtio_pci_dev.ioeventfds(old_base) {
|
2019-10-30 16:41:08 +00:00
|
|
|
let io_addr = IoEventAddress::Mmio(addr);
|
2019-11-29 15:36:33 +00:00
|
|
|
self.vm_fd
|
|
|
|
.unregister_ioevent(event, &io_addr)
|
|
|
|
.map_err(|e| io::Error::from_raw_os_error(e.errno()))?;
|
2019-10-30 16:41:08 +00:00
|
|
|
}
|
2019-10-30 18:03:02 +00:00
|
|
|
for (event, addr) in virtio_pci_dev.ioeventfds(new_base) {
|
2019-10-30 16:13:29 +00:00
|
|
|
let io_addr = IoEventAddress::Mmio(addr);
|
2019-11-29 15:36:33 +00:00
|
|
|
self.vm_fd
|
|
|
|
.register_ioevent(event, &io_addr, NoDatamatch)
|
|
|
|
.map_err(|e| io::Error::from_raw_os_error(e.errno()))?;
|
2019-10-30 16:13:29 +00:00
|
|
|
}
|
2019-10-30 15:15:38 +00:00
|
|
|
}
|
2019-10-28 16:53:13 +00:00
|
|
|
}
|
|
|
|
|
2019-10-29 01:15:08 +00:00
|
|
|
pci_dev.move_bar(old_base, new_base)
|
2019-10-28 16:53:13 +00:00
|
|
|
}
|
2019-10-23 22:14:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub struct DeviceManager {
|
|
|
|
// Manage address space related to devices
|
|
|
|
address_manager: Arc<AddressManager>,
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-09-06 15:42:41 +00:00
|
|
|
// Console abstraction
|
|
|
|
console: Arc<Console>,
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
// IOAPIC
|
2019-09-04 14:20:09 +00:00
|
|
|
ioapic: Option<Arc<Mutex<ioapic::Ioapic>>>,
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
// mmap()ed region to unmap on drop
|
|
|
|
mmap_regions: Vec<(*mut libc::c_void, usize)>,
|
2019-09-11 15:22:00 +00:00
|
|
|
|
|
|
|
// Things to be added to the commandline (i.e. for virtio-mmio)
|
|
|
|
cmdline_additions: Vec<String>,
|
2019-10-02 20:57:20 +00:00
|
|
|
|
|
|
|
// Virtual IOMMU ID along with the list of device IDs attached to the
|
|
|
|
// virtual IOMMU. This is useful for filling the ACPI IORT table.
|
|
|
|
virt_iommu: Option<(u32, Vec<u32>)>,
|
2019-11-27 15:28:22 +00:00
|
|
|
|
|
|
|
// ACPI GED notification device
|
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
ged_notification_device: Option<Arc<Mutex<devices::AcpiGEDDevice>>>,
|
2019-12-06 16:14:32 +00:00
|
|
|
|
|
|
|
// VM configuration
|
|
|
|
config: Arc<Mutex<VmConfig>>,
|
2019-11-18 23:24:31 +00:00
|
|
|
|
|
|
|
// Migratable devices
|
|
|
|
migratable_devices: Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-12-20 15:25:06 +00:00
|
|
|
|
|
|
|
// Memory Manager
|
|
|
|
memory_manager: Arc<Mutex<MemoryManager>>,
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl DeviceManager {
|
|
|
|
pub fn new(
|
|
|
|
vm_info: &VmInfo,
|
2019-12-19 15:47:36 +00:00
|
|
|
allocator: Arc<Mutex<SystemAllocator>>,
|
2019-12-20 15:17:49 +00:00
|
|
|
memory_manager: Arc<Mutex<MemoryManager>>,
|
2019-09-16 13:25:08 +00:00
|
|
|
_exit_evt: &EventFd,
|
2019-09-04 14:28:48 +00:00
|
|
|
reset_evt: &EventFd,
|
2019-09-04 13:55:14 +00:00
|
|
|
) -> DeviceManagerResult<Self> {
|
2019-10-23 22:14:13 +00:00
|
|
|
let io_bus = devices::Bus::new();
|
|
|
|
let mmio_bus = devices::Bus::new();
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-11-18 23:10:42 +00:00
|
|
|
let mut virtio_devices: Vec<(Arc<Mutex<dyn vm_virtio::VirtioDevice>>, bool)> = Vec::new();
|
2019-11-18 23:24:31 +00:00
|
|
|
let mut migratable_devices: Vec<Arc<Mutex<dyn Migratable>>> = Vec::new();
|
2019-09-04 13:55:14 +00:00
|
|
|
let mut mmap_regions = Vec::new();
|
|
|
|
|
2019-09-11 16:07:33 +00:00
|
|
|
#[allow(unused_mut)]
|
2019-09-11 15:22:00 +00:00
|
|
|
let mut cmdline_additions = Vec::new();
|
|
|
|
|
2019-10-02 20:57:20 +00:00
|
|
|
#[allow(unused_mut)]
|
|
|
|
let mut virt_iommu: Option<(u32, Vec<u32>)> = None;
|
|
|
|
|
2019-10-23 22:14:13 +00:00
|
|
|
let address_manager = Arc::new(AddressManager {
|
2019-12-19 15:47:36 +00:00
|
|
|
allocator,
|
2019-10-23 22:14:13 +00:00
|
|
|
io_bus: Arc::new(io_bus),
|
|
|
|
mmio_bus: Arc::new(mmio_bus),
|
2019-10-28 16:53:13 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
vm_fd: vm_info.vm_fd.clone(),
|
2019-10-23 22:14:13 +00:00
|
|
|
});
|
|
|
|
|
2019-12-05 16:36:28 +00:00
|
|
|
let ioapic = DeviceManager::add_ioapic(vm_info, &address_manager)?;
|
2019-12-05 15:42:15 +00:00
|
|
|
let interrupt_info = InterruptInfo { _ioapic: &ioapic };
|
2019-11-18 11:02:37 +00:00
|
|
|
|
2019-11-19 16:15:29 +00:00
|
|
|
let console = DeviceManager::add_console_device(
|
2019-11-18 10:21:37 +00:00
|
|
|
vm_info,
|
|
|
|
&address_manager,
|
|
|
|
&ioapic,
|
|
|
|
&mut virtio_devices,
|
|
|
|
)?;
|
|
|
|
|
2020-01-13 22:17:57 +00:00
|
|
|
#[cfg(any(feature = "pci_support", feature = "mmio_support"))]
|
2019-11-18 10:21:37 +00:00
|
|
|
virtio_devices.append(&mut DeviceManager::make_virtio_devices(
|
|
|
|
vm_info,
|
|
|
|
&address_manager,
|
2019-12-20 15:17:49 +00:00
|
|
|
&memory_manager,
|
2019-11-18 10:21:37 +00:00
|
|
|
&mut mmap_regions,
|
2019-11-19 00:53:23 +00:00
|
|
|
&mut migratable_devices,
|
2019-11-18 10:21:37 +00:00
|
|
|
)?);
|
|
|
|
|
2019-11-18 10:44:01 +00:00
|
|
|
DeviceManager::add_legacy_devices(
|
|
|
|
vm_info,
|
|
|
|
&address_manager,
|
|
|
|
reset_evt.try_clone().map_err(DeviceManagerError::EventFd)?,
|
|
|
|
)?;
|
|
|
|
|
2019-11-27 15:28:22 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
let ged_notification_device = DeviceManager::add_acpi_devices(
|
|
|
|
vm_info,
|
2019-11-18 10:55:04 +00:00
|
|
|
&address_manager,
|
|
|
|
reset_evt.try_clone().map_err(DeviceManagerError::EventFd)?,
|
|
|
|
_exit_evt.try_clone().map_err(DeviceManagerError::EventFd)?,
|
2019-11-27 15:28:22 +00:00
|
|
|
&ioapic,
|
2019-11-18 10:55:04 +00:00
|
|
|
)?;
|
|
|
|
|
2019-09-11 16:25:07 +00:00
|
|
|
if cfg!(feature = "pci_support") {
|
2019-11-18 11:23:27 +00:00
|
|
|
DeviceManager::add_pci_devices(
|
|
|
|
vm_info,
|
|
|
|
&address_manager,
|
2019-12-20 15:17:49 +00:00
|
|
|
&memory_manager,
|
2019-11-18 11:23:27 +00:00
|
|
|
&mut virt_iommu,
|
|
|
|
virtio_devices,
|
|
|
|
&interrupt_info,
|
2019-11-19 00:53:23 +00:00
|
|
|
&mut migratable_devices,
|
2019-11-18 11:23:27 +00:00
|
|
|
)?;
|
2019-09-11 16:25:07 +00:00
|
|
|
} else if cfg!(feature = "mmio_support") {
|
2019-11-18 11:35:05 +00:00
|
|
|
DeviceManager::add_mmio_devices(
|
|
|
|
vm_info,
|
|
|
|
&address_manager,
|
|
|
|
virtio_devices,
|
|
|
|
&interrupt_info,
|
|
|
|
&mut cmdline_additions,
|
2019-11-19 00:53:23 +00:00
|
|
|
&mut migratable_devices,
|
2019-11-18 11:35:05 +00:00
|
|
|
)?;
|
2019-09-11 16:07:33 +00:00
|
|
|
}
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-12-06 16:14:32 +00:00
|
|
|
let config = vm_info.vm_cfg.clone();
|
|
|
|
|
2020-01-10 16:09:23 +00:00
|
|
|
address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_io_addresses(Some(GuestAddress(0x0a00)), 0x18, None)
|
|
|
|
.ok_or(DeviceManagerError::AllocateIOPort)?;
|
|
|
|
|
|
|
|
address_manager
|
|
|
|
.io_bus
|
|
|
|
.insert(memory_manager.clone(), 0xa00, 0x18)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
|
2019-09-16 14:02:54 +00:00
|
|
|
Ok(DeviceManager {
|
2019-10-23 22:14:13 +00:00
|
|
|
address_manager,
|
2019-09-06 15:42:41 +00:00
|
|
|
console,
|
2019-12-05 16:36:28 +00:00
|
|
|
ioapic: Some(ioapic),
|
2019-09-04 13:55:14 +00:00
|
|
|
mmap_regions,
|
2019-09-11 15:22:00 +00:00
|
|
|
cmdline_additions,
|
2019-10-02 20:57:20 +00:00
|
|
|
virt_iommu,
|
2019-11-27 15:28:22 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
ged_notification_device,
|
2019-12-06 16:14:32 +00:00
|
|
|
config,
|
2019-11-18 23:24:31 +00:00
|
|
|
migratable_devices,
|
2019-12-20 15:25:06 +00:00
|
|
|
memory_manager,
|
2019-09-16 14:02:54 +00:00
|
|
|
})
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-11-18 11:23:27 +00:00
|
|
|
#[allow(unused_variables)]
|
|
|
|
fn add_pci_devices(
|
|
|
|
vm_info: &VmInfo,
|
|
|
|
address_manager: &Arc<AddressManager>,
|
2019-12-20 15:17:49 +00:00
|
|
|
memory_manager: &Arc<Mutex<MemoryManager>>,
|
2019-11-18 11:23:27 +00:00
|
|
|
virt_iommu: &mut Option<(u32, Vec<u32>)>,
|
2019-11-18 23:10:42 +00:00
|
|
|
virtio_devices: Vec<(Arc<Mutex<dyn vm_virtio::VirtioDevice>>, bool)>,
|
2019-11-18 11:23:27 +00:00
|
|
|
interrupt_info: &InterruptInfo,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-11-18 11:23:27 +00:00
|
|
|
) -> DeviceManagerResult<()> {
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
{
|
|
|
|
let pci_root = PciRoot::new(None);
|
|
|
|
let mut pci_bus = PciBus::new(
|
|
|
|
pci_root,
|
|
|
|
Arc::downgrade(&address_manager) as Weak<dyn DeviceRelocation>,
|
|
|
|
);
|
|
|
|
|
2019-12-05 14:50:38 +00:00
|
|
|
let (mut iommu_device, iommu_mapping) = if vm_info.vm_cfg.lock().unwrap().iommu {
|
2019-11-18 11:23:27 +00:00
|
|
|
let (device, mapping) =
|
|
|
|
vm_virtio::Iommu::new().map_err(DeviceManagerError::CreateVirtioIommu)?;
|
|
|
|
(Some(device), Some(mapping))
|
|
|
|
} else {
|
|
|
|
(None, None)
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut iommu_attached_devices = Vec::new();
|
|
|
|
|
|
|
|
for (device, iommu_attached) in virtio_devices {
|
|
|
|
let mapping: &Option<Arc<IommuMapping>> = if iommu_attached {
|
|
|
|
&iommu_mapping
|
|
|
|
} else {
|
|
|
|
&None
|
|
|
|
};
|
|
|
|
|
|
|
|
let virtio_iommu_attach_dev = DeviceManager::add_virtio_pci_device(
|
|
|
|
device,
|
|
|
|
vm_info.memory,
|
|
|
|
&address_manager,
|
|
|
|
vm_info.vm_fd,
|
|
|
|
&mut pci_bus,
|
|
|
|
mapping,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices,
|
2019-11-18 11:23:27 +00:00
|
|
|
)?;
|
|
|
|
|
|
|
|
if let Some(dev_id) = virtio_iommu_attach_dev {
|
|
|
|
iommu_attached_devices.push(dev_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut vfio_iommu_device_ids = DeviceManager::add_vfio_devices(
|
|
|
|
vm_info,
|
|
|
|
&address_manager,
|
|
|
|
&mut pci_bus,
|
2019-12-20 15:17:49 +00:00
|
|
|
memory_manager,
|
2019-11-18 11:23:27 +00:00
|
|
|
&mut iommu_device,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
iommu_attached_devices.append(&mut vfio_iommu_device_ids);
|
|
|
|
|
|
|
|
if let Some(iommu_device) = iommu_device {
|
|
|
|
// We need to shift the device id since the 3 first bits
|
|
|
|
// are dedicated to the PCI function, and we know we don't
|
|
|
|
// do multifunction. Also, because we only support one PCI
|
|
|
|
// bus, the bus 0, we don't need to add anything to the
|
|
|
|
// global device ID.
|
|
|
|
let iommu_id = pci_bus.next_device_id() << 3;
|
|
|
|
|
|
|
|
// Because we determined the virtio-iommu b/d/f, we have to
|
|
|
|
// add the device to the PCI topology now. Otherwise, the
|
|
|
|
// b/d/f won't match the virtio-iommu device as expected.
|
|
|
|
DeviceManager::add_virtio_pci_device(
|
2019-11-18 23:10:42 +00:00
|
|
|
Arc::new(Mutex::new(iommu_device)),
|
2019-11-18 11:23:27 +00:00
|
|
|
vm_info.memory,
|
|
|
|
&address_manager,
|
|
|
|
vm_info.vm_fd,
|
|
|
|
&mut pci_bus,
|
|
|
|
&None,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices,
|
2019-11-18 11:23:27 +00:00
|
|
|
)?;
|
|
|
|
|
|
|
|
*virt_iommu = Some((iommu_id, iommu_attached_devices));
|
|
|
|
}
|
|
|
|
|
|
|
|
let pci_bus = Arc::new(Mutex::new(pci_bus));
|
|
|
|
let pci_config_io = Arc::new(Mutex::new(PciConfigIo::new(pci_bus.clone())));
|
|
|
|
address_manager
|
|
|
|
.io_bus
|
|
|
|
.insert(pci_config_io, 0xcf8, 0x8)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
let pci_config_mmio = Arc::new(Mutex::new(PciConfigMmio::new(pci_bus)));
|
|
|
|
address_manager
|
|
|
|
.mmio_bus
|
|
|
|
.insert(
|
|
|
|
pci_config_mmio,
|
|
|
|
arch::layout::PCI_MMCONFIG_START.0,
|
|
|
|
arch::layout::PCI_MMCONFIG_SIZE,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-11-18 11:35:05 +00:00
|
|
|
#[allow(unused_variables, unused_mut)]
|
|
|
|
fn add_mmio_devices(
|
|
|
|
vm_info: &VmInfo,
|
|
|
|
address_manager: &Arc<AddressManager>,
|
2019-11-18 23:10:42 +00:00
|
|
|
virtio_devices: Vec<(Arc<Mutex<dyn vm_virtio::VirtioDevice>>, bool)>,
|
2019-11-18 11:35:05 +00:00
|
|
|
interrupt_info: &InterruptInfo,
|
|
|
|
mut cmdline_additions: &mut Vec<String>,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-11-18 11:35:05 +00:00
|
|
|
) -> DeviceManagerResult<()> {
|
|
|
|
#[cfg(feature = "mmio_support")]
|
|
|
|
{
|
|
|
|
for (device, _) in virtio_devices {
|
|
|
|
let mmio_addr = address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_mmio_addresses(None, MMIO_LEN, Some(MMIO_LEN));
|
|
|
|
if let Some(addr) = mmio_addr {
|
|
|
|
DeviceManager::add_virtio_mmio_device(
|
|
|
|
device,
|
|
|
|
vm_info.memory,
|
|
|
|
&address_manager,
|
|
|
|
vm_info.vm_fd,
|
|
|
|
&interrupt_info,
|
|
|
|
addr,
|
|
|
|
&mut cmdline_additions,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices,
|
2019-11-18 11:35:05 +00:00
|
|
|
)?;
|
|
|
|
} else {
|
|
|
|
error!("Unable to allocate MMIO address!");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-11-19 16:15:29 +00:00
|
|
|
fn add_ioapic(
|
2019-11-18 11:02:37 +00:00
|
|
|
vm_info: &VmInfo,
|
|
|
|
address_manager: &Arc<AddressManager>,
|
2019-12-05 16:36:28 +00:00
|
|
|
) -> DeviceManagerResult<Arc<Mutex<ioapic::Ioapic>>> {
|
|
|
|
// Create IOAPIC
|
|
|
|
let ioapic = Arc::new(Mutex::new(ioapic::Ioapic::new(
|
|
|
|
vm_info.vm_fd.clone(),
|
|
|
|
APIC_START,
|
|
|
|
)));
|
2019-11-18 11:02:37 +00:00
|
|
|
|
2019-12-05 16:36:28 +00:00
|
|
|
address_manager
|
|
|
|
.mmio_bus
|
|
|
|
.insert(ioapic.clone(), IOAPIC_START.0, IOAPIC_SIZE)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
2019-11-18 11:02:37 +00:00
|
|
|
|
|
|
|
Ok(ioapic)
|
|
|
|
}
|
|
|
|
|
2019-11-18 10:55:04 +00:00
|
|
|
#[allow(unused_variables)]
|
2019-11-27 15:28:22 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
fn add_acpi_devices(
|
|
|
|
vm_info: &VmInfo,
|
2019-11-18 10:55:04 +00:00
|
|
|
address_manager: &Arc<AddressManager>,
|
|
|
|
reset_evt: EventFd,
|
|
|
|
exit_evt: EventFd,
|
2019-12-05 16:36:28 +00:00
|
|
|
ioapic: &Arc<Mutex<ioapic::Ioapic>>,
|
2019-11-27 15:28:22 +00:00
|
|
|
) -> DeviceManagerResult<Option<Arc<Mutex<devices::AcpiGEDDevice>>>> {
|
|
|
|
let acpi_device = Arc::new(Mutex::new(devices::AcpiShutdownDevice::new(
|
|
|
|
exit_evt, reset_evt,
|
|
|
|
)));
|
2019-11-18 10:55:04 +00:00
|
|
|
|
2019-11-27 15:28:22 +00:00
|
|
|
address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_io_addresses(Some(GuestAddress(0x3c0)), 0x8, None)
|
|
|
|
.ok_or(DeviceManagerError::AllocateIOPort)?;
|
2019-11-27 15:25:55 +00:00
|
|
|
|
2019-11-27 15:28:22 +00:00
|
|
|
address_manager
|
|
|
|
.io_bus
|
vmm: device_manager: Remove redundant clones
Address updated clippy errors:
error: redundant clone
--> vmm/src/device_manager.rs:699:32
|
699 | .insert(acpi_device.clone(), 0x3c0, 0x4)
| ^^^^^^^^ help: remove this
|
= note: `-D clippy::redundant-clone` implied by `-D warnings`
note: this value is dropped without further use
--> vmm/src/device_manager.rs:699:21
|
699 | .insert(acpi_device.clone(), 0x3c0, 0x4)
| ^^^^^^^^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
error: redundant clone
--> vmm/src/device_manager.rs:737:26
|
737 | .insert(i8042.clone(), 0x61, 0x4)
| ^^^^^^^^ help: remove this
|
note: this value is dropped without further use
--> vmm/src/device_manager.rs:737:21
|
737 | .insert(i8042.clone(), 0x61, 0x4)
| ^^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
error: redundant clone
--> vmm/src/device_manager.rs:754:29
|
754 | .insert(cmos.clone(), 0x70, 0x2)
| ^^^^^^^^ help: remove this
|
note: this value is dropped without further use
--> vmm/src/device_manager.rs:754:25
|
754 | .insert(cmos.clone(), 0x70, 0x2)
| ^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
Signed-off-by: Rob Bradford <robert.bradford@intel.com>
2019-12-19 17:02:36 +00:00
|
|
|
.insert(acpi_device, 0x3c0, 0x4)
|
2019-11-27 15:28:22 +00:00
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
2019-11-18 10:55:04 +00:00
|
|
|
|
2019-12-09 15:07:31 +00:00
|
|
|
let ged_irq = address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_irq()
|
|
|
|
.unwrap();
|
2019-12-05 16:36:28 +00:00
|
|
|
let interrupt: Box<dyn devices::Interrupt> =
|
2019-12-09 15:07:31 +00:00
|
|
|
Box::new(UserIoapicIrq::new(ioapic.clone(), ged_irq as usize));
|
2019-11-27 15:28:22 +00:00
|
|
|
|
2019-12-09 15:07:31 +00:00
|
|
|
let ged_device = Arc::new(Mutex::new(devices::AcpiGEDDevice::new(interrupt, ged_irq)));
|
2019-11-27 15:28:22 +00:00
|
|
|
|
|
|
|
address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_io_addresses(Some(GuestAddress(0xb000)), 0x1, None)
|
|
|
|
.ok_or(DeviceManagerError::AllocateIOPort)?;
|
|
|
|
|
|
|
|
address_manager
|
|
|
|
.io_bus
|
|
|
|
.insert(ged_device.clone(), 0xb000, 0x1)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
Ok(Some(ged_device))
|
2019-11-18 10:55:04 +00:00
|
|
|
}
|
|
|
|
|
2019-11-18 10:44:01 +00:00
|
|
|
fn add_legacy_devices(
|
|
|
|
_vm_info: &VmInfo,
|
|
|
|
address_manager: &Arc<AddressManager>,
|
|
|
|
reset_evt: EventFd,
|
|
|
|
) -> DeviceManagerResult<()> {
|
|
|
|
// Add a shutdown device (i8042)
|
|
|
|
let i8042 = Arc::new(Mutex::new(devices::legacy::I8042Device::new(reset_evt)));
|
|
|
|
|
|
|
|
address_manager
|
|
|
|
.io_bus
|
vmm: device_manager: Remove redundant clones
Address updated clippy errors:
error: redundant clone
--> vmm/src/device_manager.rs:699:32
|
699 | .insert(acpi_device.clone(), 0x3c0, 0x4)
| ^^^^^^^^ help: remove this
|
= note: `-D clippy::redundant-clone` implied by `-D warnings`
note: this value is dropped without further use
--> vmm/src/device_manager.rs:699:21
|
699 | .insert(acpi_device.clone(), 0x3c0, 0x4)
| ^^^^^^^^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
error: redundant clone
--> vmm/src/device_manager.rs:737:26
|
737 | .insert(i8042.clone(), 0x61, 0x4)
| ^^^^^^^^ help: remove this
|
note: this value is dropped without further use
--> vmm/src/device_manager.rs:737:21
|
737 | .insert(i8042.clone(), 0x61, 0x4)
| ^^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
error: redundant clone
--> vmm/src/device_manager.rs:754:29
|
754 | .insert(cmos.clone(), 0x70, 0x2)
| ^^^^^^^^ help: remove this
|
note: this value is dropped without further use
--> vmm/src/device_manager.rs:754:25
|
754 | .insert(cmos.clone(), 0x70, 0x2)
| ^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
Signed-off-by: Rob Bradford <robert.bradford@intel.com>
2019-12-19 17:02:36 +00:00
|
|
|
.insert(i8042, 0x61, 0x4)
|
2019-11-18 10:44:01 +00:00
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
#[cfg(feature = "cmos")]
|
|
|
|
{
|
|
|
|
// Add a CMOS emulated device
|
|
|
|
use vm_memory::GuestMemory;
|
2019-12-31 10:49:11 +00:00
|
|
|
let mem_size = _vm_info.memory.load().end_addr().0 + 1;
|
2019-11-18 10:44:01 +00:00
|
|
|
let mem_below_4g = std::cmp::min(arch::layout::MEM_32BIT_RESERVED_START.0, mem_size);
|
|
|
|
let mem_above_4g = mem_size.saturating_sub(arch::layout::RAM_64BIT_START.0);
|
|
|
|
|
|
|
|
let cmos = Arc::new(Mutex::new(devices::legacy::Cmos::new(
|
|
|
|
mem_below_4g,
|
|
|
|
mem_above_4g,
|
|
|
|
)));
|
|
|
|
|
|
|
|
address_manager
|
|
|
|
.io_bus
|
vmm: device_manager: Remove redundant clones
Address updated clippy errors:
error: redundant clone
--> vmm/src/device_manager.rs:699:32
|
699 | .insert(acpi_device.clone(), 0x3c0, 0x4)
| ^^^^^^^^ help: remove this
|
= note: `-D clippy::redundant-clone` implied by `-D warnings`
note: this value is dropped without further use
--> vmm/src/device_manager.rs:699:21
|
699 | .insert(acpi_device.clone(), 0x3c0, 0x4)
| ^^^^^^^^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
error: redundant clone
--> vmm/src/device_manager.rs:737:26
|
737 | .insert(i8042.clone(), 0x61, 0x4)
| ^^^^^^^^ help: remove this
|
note: this value is dropped without further use
--> vmm/src/device_manager.rs:737:21
|
737 | .insert(i8042.clone(), 0x61, 0x4)
| ^^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
error: redundant clone
--> vmm/src/device_manager.rs:754:29
|
754 | .insert(cmos.clone(), 0x70, 0x2)
| ^^^^^^^^ help: remove this
|
note: this value is dropped without further use
--> vmm/src/device_manager.rs:754:25
|
754 | .insert(cmos.clone(), 0x70, 0x2)
| ^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
Signed-off-by: Rob Bradford <robert.bradford@intel.com>
2019-12-19 17:02:36 +00:00
|
|
|
.insert(cmos, 0x70, 0x2)
|
2019-11-18 10:44:01 +00:00
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-11-19 16:15:29 +00:00
|
|
|
fn add_console_device(
|
2019-11-18 10:21:37 +00:00
|
|
|
vm_info: &VmInfo,
|
|
|
|
address_manager: &Arc<AddressManager>,
|
2019-12-05 16:36:28 +00:00
|
|
|
ioapic: &Arc<Mutex<ioapic::Ioapic>>,
|
2019-11-18 23:10:42 +00:00
|
|
|
virtio_devices: &mut Vec<(Arc<Mutex<dyn vm_virtio::VirtioDevice>>, bool)>,
|
2019-11-18 10:21:37 +00:00
|
|
|
) -> DeviceManagerResult<Arc<Console>> {
|
2019-12-05 14:50:38 +00:00
|
|
|
let serial_config = vm_info.vm_cfg.lock().unwrap().serial.clone();
|
|
|
|
let serial_writer: Option<Box<dyn io::Write + Send>> = match serial_config.mode {
|
2019-11-18 10:21:37 +00:00
|
|
|
ConsoleOutputMode::File => Some(Box::new(
|
2019-12-05 14:50:38 +00:00
|
|
|
File::create(serial_config.file.as_ref().unwrap())
|
2019-11-18 10:21:37 +00:00
|
|
|
.map_err(DeviceManagerError::SerialOutputFileOpen)?,
|
|
|
|
)),
|
|
|
|
ConsoleOutputMode::Tty => Some(Box::new(stdout())),
|
|
|
|
ConsoleOutputMode::Off | ConsoleOutputMode::Null => None,
|
|
|
|
};
|
2019-12-05 14:50:38 +00:00
|
|
|
let serial = if serial_config.mode != ConsoleOutputMode::Off {
|
2019-11-18 10:21:37 +00:00
|
|
|
// Serial is tied to IRQ #4
|
|
|
|
let serial_irq = 4;
|
2019-12-05 16:36:28 +00:00
|
|
|
let interrupt: Box<dyn devices::Interrupt> =
|
|
|
|
Box::new(UserIoapicIrq::new(ioapic.clone(), serial_irq));
|
2019-11-18 10:21:37 +00:00
|
|
|
|
|
|
|
let serial = Arc::new(Mutex::new(devices::legacy::Serial::new(
|
|
|
|
interrupt,
|
|
|
|
serial_writer,
|
|
|
|
)));
|
|
|
|
|
|
|
|
address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_io_addresses(Some(GuestAddress(0x3f8)), 0x8, None)
|
|
|
|
.ok_or(DeviceManagerError::AllocateIOPort)?;
|
|
|
|
|
|
|
|
address_manager
|
|
|
|
.io_bus
|
|
|
|
.insert(serial.clone(), 0x3f8, 0x8)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
|
|
|
|
Some(serial)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
// Create serial and virtio-console
|
2019-12-05 14:50:38 +00:00
|
|
|
let console_config = vm_info.vm_cfg.lock().unwrap().console.clone();
|
|
|
|
let console_writer: Option<Box<dyn io::Write + Send + Sync>> = match console_config.mode {
|
|
|
|
ConsoleOutputMode::File => Some(Box::new(
|
|
|
|
File::create(
|
|
|
|
vm_info
|
|
|
|
.vm_cfg
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.console
|
|
|
|
.file
|
|
|
|
.as_ref()
|
|
|
|
.unwrap(),
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::ConsoleOutputFileOpen)?,
|
|
|
|
)),
|
|
|
|
ConsoleOutputMode::Tty => Some(Box::new(stdout())),
|
|
|
|
ConsoleOutputMode::Null => Some(Box::new(sink())),
|
|
|
|
ConsoleOutputMode::Off => None,
|
|
|
|
};
|
2019-11-18 10:21:37 +00:00
|
|
|
let (col, row) = get_win_size();
|
|
|
|
let console_input = if let Some(writer) = console_writer {
|
|
|
|
let (virtio_console_device, console_input) =
|
2019-12-05 14:50:38 +00:00
|
|
|
vm_virtio::Console::new(writer, col, row, console_config.iommu)
|
2019-11-18 10:21:37 +00:00
|
|
|
.map_err(DeviceManagerError::CreateVirtioConsole)?;
|
|
|
|
virtio_devices.push((
|
2019-11-18 23:10:42 +00:00
|
|
|
Arc::new(Mutex::new(virtio_console_device))
|
|
|
|
as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
|
2019-11-18 10:21:37 +00:00
|
|
|
false,
|
|
|
|
));
|
|
|
|
Some(console_input)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(Arc::new(Console {
|
|
|
|
serial,
|
|
|
|
console_input,
|
2019-12-05 14:50:38 +00:00
|
|
|
input_enabled: serial_config.mode.input_enabled()
|
|
|
|
|| console_config.mode.input_enabled(),
|
2019-11-18 10:21:37 +00:00
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
fn make_virtio_devices(
|
2019-09-04 13:55:14 +00:00
|
|
|
vm_info: &VmInfo,
|
2019-11-18 10:21:37 +00:00
|
|
|
address_manager: &Arc<AddressManager>,
|
2019-12-20 15:17:49 +00:00
|
|
|
memory_manager: &Arc<Mutex<MemoryManager>>,
|
2019-09-04 13:55:14 +00:00
|
|
|
mmap_regions: &mut Vec<(*mut libc::c_void, usize)>,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-11-18 23:10:42 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
|
2019-11-18 10:21:37 +00:00
|
|
|
let mut allocator = address_manager.allocator.lock().unwrap();
|
2019-11-18 23:10:42 +00:00
|
|
|
let mut devices: Vec<(Arc<Mutex<dyn vm_virtio::VirtioDevice>>, bool)> = Vec::new();
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
// Create "standard" virtio devices (net/block/rng)
|
2019-11-19 00:53:23 +00:00
|
|
|
devices.append(&mut DeviceManager::make_virtio_block_devices(
|
|
|
|
vm_info,
|
|
|
|
migratable_devices,
|
|
|
|
)?);
|
|
|
|
devices.append(&mut DeviceManager::make_virtio_net_devices(
|
|
|
|
vm_info,
|
|
|
|
migratable_devices,
|
|
|
|
)?);
|
|
|
|
devices.append(&mut DeviceManager::make_virtio_rng_devices(
|
|
|
|
vm_info,
|
|
|
|
migratable_devices,
|
|
|
|
)?);
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
// Add virtio-fs if required
|
2019-09-10 16:14:16 +00:00
|
|
|
devices.append(&mut DeviceManager::make_virtio_fs_devices(
|
2019-09-04 13:55:14 +00:00
|
|
|
vm_info,
|
2019-11-18 10:21:37 +00:00
|
|
|
&mut allocator,
|
2019-12-20 15:17:49 +00:00
|
|
|
memory_manager,
|
2019-09-04 13:55:14 +00:00
|
|
|
mmap_regions,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices,
|
2019-09-10 16:14:16 +00:00
|
|
|
)?);
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
// Add virtio-pmem if required
|
2019-09-10 16:14:16 +00:00
|
|
|
devices.append(&mut DeviceManager::make_virtio_pmem_devices(
|
2019-09-04 13:55:14 +00:00
|
|
|
vm_info,
|
2019-11-18 10:21:37 +00:00
|
|
|
&mut allocator,
|
2019-12-20 15:17:49 +00:00
|
|
|
memory_manager,
|
2019-09-04 13:55:14 +00:00
|
|
|
mmap_regions,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices,
|
2019-09-10 16:14:16 +00:00
|
|
|
)?);
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
// Add virtio-vhost-user-net if required
|
2019-09-10 16:14:16 +00:00
|
|
|
devices.append(&mut DeviceManager::make_virtio_vhost_user_net_devices(
|
2019-09-04 13:55:14 +00:00
|
|
|
vm_info,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices,
|
2019-09-10 16:14:16 +00:00
|
|
|
)?);
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-09-11 03:16:41 +00:00
|
|
|
// Add virtio-vhost-user-blk if required
|
|
|
|
devices.append(&mut DeviceManager::make_virtio_vhost_user_blk_devices(
|
|
|
|
vm_info,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices,
|
2019-09-11 03:16:41 +00:00
|
|
|
)?);
|
|
|
|
|
2019-09-04 18:14:54 +00:00
|
|
|
// Add virtio-vsock if required
|
2019-11-19 00:53:23 +00:00
|
|
|
devices.append(&mut DeviceManager::make_virtio_vsock_devices(
|
|
|
|
vm_info,
|
|
|
|
migratable_devices,
|
|
|
|
)?);
|
2019-09-04 18:14:54 +00:00
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
fn make_virtio_block_devices(
|
2019-09-04 13:55:14 +00:00
|
|
|
vm_info: &VmInfo,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-11-18 23:10:42 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
|
2019-09-10 16:14:16 +00:00
|
|
|
let mut devices = Vec::new();
|
|
|
|
|
2019-12-05 14:50:38 +00:00
|
|
|
if let Some(disk_list_cfg) = &vm_info.vm_cfg.lock().unwrap().disks {
|
2019-09-04 13:55:14 +00:00
|
|
|
for disk_cfg in disk_list_cfg.iter() {
|
|
|
|
// Open block device path
|
|
|
|
let raw_img: File = OpenOptions::new()
|
|
|
|
.read(true)
|
|
|
|
.write(true)
|
2019-09-23 17:12:19 +00:00
|
|
|
.open(&disk_cfg.path)
|
2019-09-04 13:55:14 +00:00
|
|
|
.map_err(DeviceManagerError::Disk)?;
|
|
|
|
|
|
|
|
let image_type = qcow::detect_image_type(&raw_img)
|
|
|
|
.map_err(DeviceManagerError::DetectImageType)?;
|
2019-11-19 00:53:23 +00:00
|
|
|
match image_type {
|
2019-09-04 13:55:14 +00:00
|
|
|
ImageType::Raw => {
|
|
|
|
let raw_img = vm_virtio::RawFile::new(raw_img);
|
2019-10-02 20:57:20 +00:00
|
|
|
let dev = vm_virtio::Block::new(
|
|
|
|
raw_img,
|
|
|
|
disk_cfg.path.clone(),
|
|
|
|
false,
|
|
|
|
disk_cfg.iommu,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioBlock)?;
|
2019-11-18 23:10:42 +00:00
|
|
|
|
2019-11-19 00:53:23 +00:00
|
|
|
let block = Arc::new(Mutex::new(dev));
|
|
|
|
|
|
|
|
devices.push((
|
|
|
|
Arc::clone(&block) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
|
|
|
|
disk_cfg.iommu,
|
|
|
|
));
|
|
|
|
migratable_devices.push(Arc::clone(&block) as Arc<Mutex<dyn Migratable>>);
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
ImageType::Qcow2 => {
|
|
|
|
let qcow_img = QcowFile::from(raw_img)
|
|
|
|
.map_err(DeviceManagerError::QcowDeviceCreate)?;
|
2019-10-02 20:57:20 +00:00
|
|
|
let dev = vm_virtio::Block::new(
|
|
|
|
qcow_img,
|
|
|
|
disk_cfg.path.clone(),
|
|
|
|
false,
|
|
|
|
disk_cfg.iommu,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioBlock)?;
|
2019-11-19 00:53:23 +00:00
|
|
|
|
|
|
|
let block = Arc::new(Mutex::new(dev));
|
|
|
|
|
|
|
|
devices.push((
|
|
|
|
Arc::clone(&block) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
|
|
|
|
disk_cfg.iommu,
|
|
|
|
));
|
|
|
|
migratable_devices.push(Arc::clone(&block) as Arc<Mutex<dyn Migratable>>);
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
fn make_virtio_net_devices(
|
2019-09-04 13:55:14 +00:00
|
|
|
vm_info: &VmInfo,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-11-18 23:10:42 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
|
2019-09-10 16:14:16 +00:00
|
|
|
let mut devices = Vec::new();
|
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
// Add virtio-net if required
|
2019-12-05 14:50:38 +00:00
|
|
|
if let Some(net_list_cfg) = &vm_info.vm_cfg.lock().unwrap().net {
|
2019-09-04 13:55:14 +00:00
|
|
|
for net_cfg in net_list_cfg.iter() {
|
2019-09-23 17:14:35 +00:00
|
|
|
let virtio_net_device = if let Some(ref tap_if_name) = net_cfg.tap {
|
2019-09-04 13:55:14 +00:00
|
|
|
let tap = Tap::open_named(tap_if_name).map_err(DeviceManagerError::OpenTap)?;
|
2019-11-18 23:10:42 +00:00
|
|
|
Arc::new(Mutex::new(
|
|
|
|
vm_virtio::Net::new_with_tap(tap, Some(&net_cfg.mac), net_cfg.iommu)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioNet)?,
|
|
|
|
))
|
2019-09-04 13:55:14 +00:00
|
|
|
} else {
|
2019-11-18 23:10:42 +00:00
|
|
|
Arc::new(Mutex::new(
|
|
|
|
vm_virtio::Net::new(
|
|
|
|
net_cfg.ip,
|
|
|
|
net_cfg.mask,
|
|
|
|
Some(&net_cfg.mac),
|
|
|
|
net_cfg.iommu,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioNet)?,
|
|
|
|
))
|
2019-09-10 16:14:16 +00:00
|
|
|
};
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-10-02 20:57:20 +00:00
|
|
|
devices.push((
|
2019-11-18 23:10:42 +00:00
|
|
|
Arc::clone(&virtio_net_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
|
2019-10-02 20:57:20 +00:00
|
|
|
net_cfg.iommu,
|
|
|
|
));
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices
|
|
|
|
.push(Arc::clone(&virtio_net_device) as Arc<Mutex<dyn Migratable>>);
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
fn make_virtio_rng_devices(
|
2019-09-04 13:55:14 +00:00
|
|
|
vm_info: &VmInfo,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-11-18 23:10:42 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
|
2019-09-10 16:14:16 +00:00
|
|
|
let mut devices = Vec::new();
|
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
// Add virtio-rng if required
|
2019-12-05 14:50:38 +00:00
|
|
|
let rng_config = vm_info.vm_cfg.lock().unwrap().rng.clone();
|
|
|
|
if let Some(rng_path) = rng_config.src.to_str() {
|
2019-11-18 23:10:42 +00:00
|
|
|
let virtio_rng_device = Arc::new(Mutex::new(
|
|
|
|
vm_virtio::Rng::new(rng_path, rng_config.iommu)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioRng)?,
|
|
|
|
));
|
2019-10-02 20:57:20 +00:00
|
|
|
devices.push((
|
2019-11-18 23:10:42 +00:00
|
|
|
Arc::clone(&virtio_rng_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
|
2019-10-02 20:57:20 +00:00
|
|
|
false,
|
|
|
|
));
|
2019-11-19 00:53:23 +00:00
|
|
|
|
|
|
|
migratable_devices.push(Arc::clone(&virtio_rng_device) as Arc<Mutex<dyn Migratable>>);
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
fn make_virtio_fs_devices(
|
2019-09-04 13:55:14 +00:00
|
|
|
vm_info: &VmInfo,
|
|
|
|
allocator: &mut SystemAllocator,
|
2019-12-20 15:17:49 +00:00
|
|
|
memory_manager: &Arc<Mutex<MemoryManager>>,
|
2019-09-04 13:55:14 +00:00
|
|
|
mmap_regions: &mut Vec<(*mut libc::c_void, usize)>,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-11-18 23:10:42 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
|
2019-09-10 16:14:16 +00:00
|
|
|
let mut devices = Vec::new();
|
2019-09-04 13:55:14 +00:00
|
|
|
// Add virtio-fs if required
|
2019-12-05 14:50:38 +00:00
|
|
|
if let Some(fs_list_cfg) = &vm_info.vm_cfg.lock().unwrap().fs {
|
2019-09-04 13:55:14 +00:00
|
|
|
for fs_cfg in fs_list_cfg.iter() {
|
|
|
|
if let Some(fs_sock) = fs_cfg.sock.to_str() {
|
vmm: api: Adjust FsConfig for OpenAPI
The FsConfig structure has been recently adjusted so that the default
value matches between OpenAPI and CLI. Unfortunately, with the current
description, there is no way from the OpenAPI to describe a cache_size
value "None", so that DAX does not get enabled. Usually, using a Rust
"Option" works because the default value is None. But in this case, the
default value is Some(8G), which means we cannot describe a None.
This commit tackles the problem, introducing an explicit parameter
"dax", and leaving "cache_size" as a simple u64 integer.
This way, the default value is dax=true and cache_size=8G, but it lets
the opportunity to disable DAX entirely with dax=false, which will
simply ignore the cache_size value.
Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-12-11 09:44:26 +00:00
|
|
|
let cache: Option<(VirtioSharedMemoryList, u64)> = if fs_cfg.dax {
|
|
|
|
let fs_cache = fs_cfg.cache_size;
|
2019-09-04 13:55:14 +00:00
|
|
|
// The memory needs to be 2MiB aligned in order to support
|
|
|
|
// hugepages.
|
|
|
|
let fs_guest_addr = allocator
|
|
|
|
.allocate_mmio_addresses(
|
|
|
|
None,
|
|
|
|
fs_cache as GuestUsize,
|
|
|
|
Some(0x0020_0000),
|
|
|
|
)
|
|
|
|
.ok_or(DeviceManagerError::FsRangeAllocation)?;
|
|
|
|
|
|
|
|
let addr = unsafe {
|
|
|
|
libc::mmap(
|
|
|
|
null_mut(),
|
|
|
|
fs_cache as usize,
|
|
|
|
libc::PROT_READ | libc::PROT_WRITE,
|
|
|
|
libc::MAP_NORESERVE | libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
|
|
|
|
-1,
|
|
|
|
0 as libc::off_t,
|
|
|
|
)
|
|
|
|
};
|
|
|
|
if addr == libc::MAP_FAILED {
|
|
|
|
return Err(DeviceManagerError::Mmap(io::Error::last_os_error()));
|
|
|
|
}
|
|
|
|
|
|
|
|
mmap_regions.push((addr, fs_cache as usize));
|
|
|
|
|
2019-12-20 16:11:30 +00:00
|
|
|
memory_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.create_userspace_mapping(
|
|
|
|
fs_guest_addr.raw_value(),
|
|
|
|
fs_cache,
|
|
|
|
addr as u64,
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::MemoryManager)?;
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
let mut region_list = Vec::new();
|
|
|
|
region_list.push(VirtioSharedMemory {
|
|
|
|
offset: 0,
|
|
|
|
len: fs_cache,
|
|
|
|
});
|
vmm: api: Adjust FsConfig for OpenAPI
The FsConfig structure has been recently adjusted so that the default
value matches between OpenAPI and CLI. Unfortunately, with the current
description, there is no way from the OpenAPI to describe a cache_size
value "None", so that DAX does not get enabled. Usually, using a Rust
"Option" works because the default value is None. But in this case, the
default value is Some(8G), which means we cannot describe a None.
This commit tackles the problem, introducing an explicit parameter
"dax", and leaving "cache_size" as a simple u64 integer.
This way, the default value is dax=true and cache_size=8G, but it lets
the opportunity to disable DAX entirely with dax=false, which will
simply ignore the cache_size value.
Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-12-11 09:44:26 +00:00
|
|
|
|
|
|
|
Some((
|
2019-09-04 13:55:14 +00:00
|
|
|
VirtioSharedMemoryList {
|
|
|
|
addr: fs_guest_addr,
|
|
|
|
len: fs_cache as GuestUsize,
|
|
|
|
region_list,
|
|
|
|
},
|
|
|
|
addr as u64,
|
vmm: api: Adjust FsConfig for OpenAPI
The FsConfig structure has been recently adjusted so that the default
value matches between OpenAPI and CLI. Unfortunately, with the current
description, there is no way from the OpenAPI to describe a cache_size
value "None", so that DAX does not get enabled. Usually, using a Rust
"Option" works because the default value is None. But in this case, the
default value is Some(8G), which means we cannot describe a None.
This commit tackles the problem, introducing an explicit parameter
"dax", and leaving "cache_size" as a simple u64 integer.
This way, the default value is dax=true and cache_size=8G, but it lets
the opportunity to disable DAX entirely with dax=false, which will
simply ignore the cache_size value.
Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
2019-12-11 09:44:26 +00:00
|
|
|
))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-11-18 23:10:42 +00:00
|
|
|
let virtio_fs_device = Arc::new(Mutex::new(
|
|
|
|
vm_virtio::vhost_user::Fs::new(
|
|
|
|
fs_sock,
|
|
|
|
&fs_cfg.tag,
|
|
|
|
fs_cfg.num_queues,
|
|
|
|
fs_cfg.queue_size,
|
|
|
|
cache,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioFs)?,
|
|
|
|
));
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-10-02 20:57:20 +00:00
|
|
|
devices.push((
|
2019-11-18 23:10:42 +00:00
|
|
|
Arc::clone(&virtio_fs_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
|
2019-10-02 20:57:20 +00:00
|
|
|
false,
|
|
|
|
));
|
2019-11-19 00:53:23 +00:00
|
|
|
|
|
|
|
migratable_devices
|
|
|
|
.push(Arc::clone(&virtio_fs_device) as Arc<Mutex<dyn Migratable>>);
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
fn make_virtio_pmem_devices(
|
2019-09-04 13:55:14 +00:00
|
|
|
vm_info: &VmInfo,
|
|
|
|
allocator: &mut SystemAllocator,
|
2019-12-20 15:17:49 +00:00
|
|
|
memory_manager: &Arc<Mutex<MemoryManager>>,
|
2019-09-04 13:55:14 +00:00
|
|
|
mmap_regions: &mut Vec<(*mut libc::c_void, usize)>,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-11-18 23:10:42 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
|
2019-09-10 16:14:16 +00:00
|
|
|
let mut devices = Vec::new();
|
2019-09-04 13:55:14 +00:00
|
|
|
// Add virtio-pmem if required
|
2019-12-05 14:50:38 +00:00
|
|
|
if let Some(pmem_list_cfg) = &vm_info.vm_cfg.lock().unwrap().pmem {
|
2019-09-04 13:55:14 +00:00
|
|
|
for pmem_cfg in pmem_list_cfg.iter() {
|
|
|
|
let size = pmem_cfg.size;
|
|
|
|
|
|
|
|
// The memory needs to be 2MiB aligned in order to support
|
|
|
|
// hugepages.
|
|
|
|
let pmem_guest_addr = allocator
|
|
|
|
.allocate_mmio_addresses(None, size as GuestUsize, Some(0x0020_0000))
|
|
|
|
.ok_or(DeviceManagerError::PmemRangeAllocation)?;
|
|
|
|
|
|
|
|
let (custom_flags, set_len) = if pmem_cfg.file.is_dir() {
|
|
|
|
(O_TMPFILE, true)
|
|
|
|
} else {
|
|
|
|
(0, false)
|
|
|
|
};
|
|
|
|
|
|
|
|
let file = OpenOptions::new()
|
|
|
|
.read(true)
|
|
|
|
.write(true)
|
|
|
|
.custom_flags(custom_flags)
|
2019-09-23 17:27:56 +00:00
|
|
|
.open(&pmem_cfg.file)
|
2019-09-04 13:55:14 +00:00
|
|
|
.map_err(DeviceManagerError::PmemFileOpen)?;
|
|
|
|
|
|
|
|
if set_len {
|
|
|
|
file.set_len(size)
|
|
|
|
.map_err(DeviceManagerError::PmemFileSetLen)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
let addr = unsafe {
|
|
|
|
libc::mmap(
|
|
|
|
null_mut(),
|
|
|
|
size as usize,
|
|
|
|
libc::PROT_READ | libc::PROT_WRITE,
|
|
|
|
libc::MAP_NORESERVE | libc::MAP_SHARED,
|
|
|
|
file.as_raw_fd(),
|
|
|
|
0 as libc::off_t,
|
|
|
|
)
|
|
|
|
};
|
|
|
|
|
|
|
|
mmap_regions.push((addr, size as usize));
|
|
|
|
|
2019-12-20 16:11:30 +00:00
|
|
|
memory_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.create_userspace_mapping(
|
|
|
|
pmem_guest_addr.raw_value(),
|
|
|
|
size,
|
|
|
|
addr as u64,
|
|
|
|
pmem_cfg.mergeable,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::MemoryManager)?;
|
2019-11-19 23:22:20 +00:00
|
|
|
|
2019-11-18 23:10:42 +00:00
|
|
|
let virtio_pmem_device = Arc::new(Mutex::new(
|
2019-10-02 20:57:20 +00:00
|
|
|
vm_virtio::Pmem::new(file, pmem_guest_addr, size as GuestUsize, pmem_cfg.iommu)
|
2019-11-18 23:10:42 +00:00
|
|
|
.map_err(DeviceManagerError::CreateVirtioPmem)?,
|
|
|
|
));
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-10-02 20:57:20 +00:00
|
|
|
devices.push((
|
2019-11-18 23:10:42 +00:00
|
|
|
Arc::clone(&virtio_pmem_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
|
2019-10-02 20:57:20 +00:00
|
|
|
false,
|
|
|
|
));
|
2019-11-19 00:53:23 +00:00
|
|
|
|
|
|
|
migratable_devices
|
|
|
|
.push(Arc::clone(&virtio_pmem_device) as Arc<Mutex<dyn Migratable>>);
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
fn make_virtio_vhost_user_net_devices(
|
2019-09-04 13:55:14 +00:00
|
|
|
vm_info: &VmInfo,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-11-18 23:10:42 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
|
2019-09-10 16:14:16 +00:00
|
|
|
let mut devices = Vec::new();
|
2019-09-04 13:55:14 +00:00
|
|
|
// Add vhost-user-net if required
|
2019-12-05 14:50:38 +00:00
|
|
|
if let Some(vhost_user_net_list_cfg) = &vm_info.vm_cfg.lock().unwrap().vhost_user_net {
|
2019-09-04 13:55:14 +00:00
|
|
|
for vhost_user_net_cfg in vhost_user_net_list_cfg.iter() {
|
2019-09-27 07:46:19 +00:00
|
|
|
let vu_cfg = VhostUserConfig {
|
2019-12-10 15:40:32 +00:00
|
|
|
sock: vhost_user_net_cfg.sock.clone(),
|
|
|
|
num_queues: vhost_user_net_cfg.num_queues,
|
|
|
|
queue_size: vhost_user_net_cfg.queue_size,
|
2019-09-27 07:46:19 +00:00
|
|
|
};
|
2019-11-18 23:10:42 +00:00
|
|
|
let vhost_user_net_device = Arc::new(Mutex::new(
|
2019-09-27 07:46:19 +00:00
|
|
|
vm_virtio::vhost_user::Net::new(vhost_user_net_cfg.mac, vu_cfg)
|
2019-11-18 23:10:42 +00:00
|
|
|
.map_err(DeviceManagerError::CreateVhostUserNet)?,
|
|
|
|
));
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-10-02 20:57:20 +00:00
|
|
|
devices.push((
|
2019-11-18 23:10:42 +00:00
|
|
|
Arc::clone(&vhost_user_net_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
|
2019-10-02 20:57:20 +00:00
|
|
|
false,
|
|
|
|
));
|
2019-11-19 00:53:23 +00:00
|
|
|
|
|
|
|
migratable_devices
|
|
|
|
.push(Arc::clone(&vhost_user_net_device) as Arc<Mutex<dyn Migratable>>);
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-11 03:16:41 +00:00
|
|
|
fn make_virtio_vhost_user_blk_devices(
|
|
|
|
vm_info: &VmInfo,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-11-18 23:10:42 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
|
2019-09-11 03:16:41 +00:00
|
|
|
let mut devices = Vec::new();
|
|
|
|
// Add vhost-user-blk if required
|
2019-12-05 14:50:38 +00:00
|
|
|
if let Some(vhost_user_blk_list_cfg) = &vm_info.vm_cfg.lock().unwrap().vhost_user_blk {
|
2019-09-11 03:16:41 +00:00
|
|
|
for vhost_user_blk_cfg in vhost_user_blk_list_cfg.iter() {
|
2019-09-27 07:46:19 +00:00
|
|
|
let vu_cfg = VhostUserConfig {
|
2019-12-10 15:40:32 +00:00
|
|
|
sock: vhost_user_blk_cfg.sock.clone(),
|
|
|
|
num_queues: vhost_user_blk_cfg.num_queues,
|
|
|
|
queue_size: vhost_user_blk_cfg.queue_size,
|
2019-09-27 07:46:19 +00:00
|
|
|
};
|
2019-11-18 23:10:42 +00:00
|
|
|
let vhost_user_blk_device = Arc::new(Mutex::new(
|
2019-09-27 07:46:19 +00:00
|
|
|
vm_virtio::vhost_user::Blk::new(vhost_user_blk_cfg.wce, vu_cfg)
|
2019-11-18 23:10:42 +00:00
|
|
|
.map_err(DeviceManagerError::CreateVhostUserBlk)?,
|
|
|
|
));
|
2019-09-11 03:16:41 +00:00
|
|
|
|
2019-10-02 20:57:20 +00:00
|
|
|
devices.push((
|
2019-11-18 23:10:42 +00:00
|
|
|
Arc::clone(&vhost_user_blk_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
|
2019-10-02 20:57:20 +00:00
|
|
|
false,
|
|
|
|
));
|
2019-11-19 00:53:23 +00:00
|
|
|
|
|
|
|
migratable_devices
|
|
|
|
.push(Arc::clone(&vhost_user_blk_device) as Arc<Mutex<dyn Migratable>>);
|
2019-09-11 03:16:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(devices)
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
fn make_virtio_vsock_devices(
|
2019-09-04 18:14:54 +00:00
|
|
|
vm_info: &VmInfo,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-11-18 23:10:42 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool)>> {
|
2019-09-10 16:14:16 +00:00
|
|
|
let mut devices = Vec::new();
|
2019-09-04 18:14:54 +00:00
|
|
|
// Add vsock if required
|
2019-12-05 14:50:38 +00:00
|
|
|
if let Some(vsock_list_cfg) = &vm_info.vm_cfg.lock().unwrap().vsock {
|
2019-09-04 18:14:54 +00:00
|
|
|
for vsock_cfg in vsock_list_cfg.iter() {
|
2019-09-04 21:19:16 +00:00
|
|
|
let socket_path = vsock_cfg
|
|
|
|
.sock
|
|
|
|
.to_str()
|
|
|
|
.ok_or(DeviceManagerError::CreateVsockConvertPath)?;
|
|
|
|
let backend =
|
|
|
|
vm_virtio::vsock::VsockUnixBackend::new(vsock_cfg.cid, socket_path.to_string())
|
|
|
|
.map_err(DeviceManagerError::CreateVsockBackend)?;
|
|
|
|
|
2019-11-18 23:10:42 +00:00
|
|
|
let vsock_device = Arc::new(Mutex::new(
|
|
|
|
vm_virtio::Vsock::new(vsock_cfg.cid, backend, vsock_cfg.iommu)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioVsock)?,
|
|
|
|
));
|
2019-09-04 18:14:54 +00:00
|
|
|
|
2019-10-02 20:57:20 +00:00
|
|
|
devices.push((
|
2019-11-18 23:10:42 +00:00
|
|
|
Arc::clone(&vsock_device) as Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
|
2019-10-02 20:57:20 +00:00
|
|
|
false,
|
|
|
|
));
|
2019-11-19 00:53:23 +00:00
|
|
|
|
|
|
|
migratable_devices.push(Arc::clone(&vsock_device) as Arc<Mutex<dyn Migratable>>);
|
2019-09-04 18:14:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 18:14:54 +00:00
|
|
|
}
|
|
|
|
|
2019-09-16 14:04:24 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
fn create_kvm_device(vm: &Arc<VmFd>) -> DeviceManagerResult<DeviceFd> {
|
|
|
|
let mut vfio_dev = kvm_bindings::kvm_create_device {
|
|
|
|
type_: kvm_bindings::kvm_device_type_KVM_DEV_TYPE_VFIO,
|
|
|
|
fd: 0,
|
|
|
|
flags: 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
vm.create_device(&mut vfio_dev)
|
|
|
|
.map_err(DeviceManagerError::CreateKvmDevice)
|
|
|
|
}
|
|
|
|
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
fn add_vfio_devices(
|
|
|
|
vm_info: &VmInfo,
|
2019-10-23 22:14:13 +00:00
|
|
|
address_manager: &Arc<AddressManager>,
|
2019-09-30 14:23:57 +00:00
|
|
|
pci: &mut PciBus,
|
2019-12-20 15:17:49 +00:00
|
|
|
memory_manager: &Arc<Mutex<MemoryManager>>,
|
2019-10-08 05:05:08 +00:00
|
|
|
iommu_device: &mut Option<vm_virtio::Iommu>,
|
2019-10-07 16:28:03 +00:00
|
|
|
) -> DeviceManagerResult<Vec<u32>> {
|
2019-12-20 15:17:49 +00:00
|
|
|
let mut mem_slot = memory_manager.lock().unwrap().allocate_kvm_memory_slot();
|
2019-10-08 05:05:08 +00:00
|
|
|
let mut iommu_attached_device_ids = Vec::new();
|
2019-10-23 22:14:13 +00:00
|
|
|
let mut allocator = address_manager.allocator.lock().unwrap();
|
2019-11-28 00:45:10 +00:00
|
|
|
|
|
|
|
// Create a shared list of GSI that can be shared through all VFIO
|
|
|
|
// devices. This way, we can maintain the full list of used GSI,
|
|
|
|
// preventing one device from overriding interrupts setting from
|
|
|
|
// another one.
|
|
|
|
let gsi_msi_routes: Arc<Mutex<HashMap<u32, kvm_irq_routing_entry>>> =
|
|
|
|
Arc::new(Mutex::new(HashMap::new()));
|
|
|
|
|
2019-12-05 14:50:38 +00:00
|
|
|
if let Some(device_list_cfg) = &vm_info.vm_cfg.lock().unwrap().devices {
|
2019-09-04 13:55:14 +00:00
|
|
|
// Create the KVM VFIO device
|
|
|
|
let device_fd = DeviceManager::create_kvm_device(vm_info.vm_fd)?;
|
|
|
|
let device_fd = Arc::new(device_fd);
|
|
|
|
|
|
|
|
for device_cfg in device_list_cfg.iter() {
|
2019-10-07 16:28:03 +00:00
|
|
|
// We need to shift the device id since the 3 first bits
|
|
|
|
// are dedicated to the PCI function, and we know we don't
|
|
|
|
// do multifunction. Also, because we only support one PCI
|
|
|
|
// bus, the bus 0, we don't need to add anything to the
|
|
|
|
// global device ID.
|
|
|
|
let device_id = pci.next_device_id() << 3;
|
|
|
|
|
2019-10-08 19:50:05 +00:00
|
|
|
let vfio_device = VfioDevice::new(
|
|
|
|
&device_cfg.path,
|
|
|
|
device_fd.clone(),
|
|
|
|
vm_info.memory.clone(),
|
|
|
|
device_cfg.iommu,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::VfioCreate)?;
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-10-07 17:48:44 +00:00
|
|
|
if device_cfg.iommu {
|
2019-10-08 05:05:08 +00:00
|
|
|
if let Some(iommu) = iommu_device {
|
|
|
|
let vfio_mapping = Arc::new(VfioDmaMapping::new(
|
|
|
|
vfio_device.get_container(),
|
|
|
|
Arc::clone(vm_info.memory),
|
|
|
|
));
|
2019-10-07 17:48:44 +00:00
|
|
|
|
2019-10-08 05:05:08 +00:00
|
|
|
iommu_attached_device_ids.push(device_id);
|
|
|
|
iommu.add_external_mapping(device_id, vfio_mapping);
|
|
|
|
}
|
2019-10-07 17:48:44 +00:00
|
|
|
}
|
2019-10-07 17:20:35 +00:00
|
|
|
|
2019-11-28 00:45:10 +00:00
|
|
|
let mut vfio_pci_device = VfioPciDevice::new(
|
|
|
|
vm_info.vm_fd,
|
|
|
|
&mut allocator,
|
|
|
|
vfio_device,
|
|
|
|
gsi_msi_routes.clone(),
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::VfioPciCreate)?;
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
let bars = vfio_pci_device
|
2019-10-23 22:14:13 +00:00
|
|
|
.allocate_bars(&mut allocator)
|
2019-09-04 13:55:14 +00:00
|
|
|
.map_err(DeviceManagerError::AllocateBars)?;
|
|
|
|
|
|
|
|
mem_slot = vfio_pci_device
|
|
|
|
.map_mmio_regions(vm_info.vm_fd, mem_slot)
|
|
|
|
.map_err(DeviceManagerError::VfioMapRegion)?;
|
|
|
|
|
|
|
|
let vfio_pci_device = Arc::new(Mutex::new(vfio_pci_device));
|
|
|
|
|
|
|
|
pci.add_device(vfio_pci_device.clone())
|
|
|
|
.map_err(DeviceManagerError::AddPciDevice)?;
|
|
|
|
|
2019-10-23 21:06:13 +00:00
|
|
|
pci.register_mapping(
|
|
|
|
vfio_pci_device.clone(),
|
2019-10-23 22:14:13 +00:00
|
|
|
address_manager.io_bus.as_ref(),
|
|
|
|
address_manager.mmio_bus.as_ref(),
|
2019-10-23 21:06:13 +00:00
|
|
|
bars,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::AddPciDevice)?;
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
}
|
2019-10-08 05:05:08 +00:00
|
|
|
Ok(iommu_attached_device_ids)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-10-02 20:57:20 +00:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2019-09-04 13:55:14 +00:00
|
|
|
fn add_virtio_pci_device(
|
2019-11-18 23:10:42 +00:00
|
|
|
virtio_device: Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
|
2019-12-31 10:49:11 +00:00
|
|
|
memory: &Arc<ArcSwap<GuestMemoryMmap>>,
|
2019-10-23 22:14:13 +00:00
|
|
|
address_manager: &Arc<AddressManager>,
|
2019-09-04 13:55:14 +00:00
|
|
|
vm_fd: &Arc<VmFd>,
|
2019-09-30 14:23:57 +00:00
|
|
|
pci: &mut PciBus,
|
2019-10-02 20:57:20 +00:00
|
|
|
iommu_mapping: &Option<Arc<IommuMapping>>,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-10-02 20:57:20 +00:00
|
|
|
) -> DeviceManagerResult<Option<u32>> {
|
2019-12-05 15:42:15 +00:00
|
|
|
// Allows support for one MSI-X vector per queue. It also adds 1
|
|
|
|
// as we need to take into account the dedicated vector to notify
|
|
|
|
// about a virtio config change.
|
2019-11-18 23:10:42 +00:00
|
|
|
let msix_num = (virtio_device.lock().unwrap().queue_max_sizes().len() + 1) as u16;
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-10-02 20:57:20 +00:00
|
|
|
// We need to shift the device id since the 3 first bits are dedicated
|
|
|
|
// to the PCI function, and we know we don't do multifunction.
|
|
|
|
// Also, because we only support one PCI bus, the bus 0, we don't need
|
|
|
|
// to add anything to the global device ID.
|
|
|
|
let dev_id = pci.next_device_id() << 3;
|
|
|
|
|
|
|
|
// Create the callback from the implementation of the DmaRemapping
|
|
|
|
// trait. The point with the callback is to simplify the code as we
|
|
|
|
// know about the device ID from this point.
|
|
|
|
let iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>> =
|
|
|
|
if let Some(mapping) = iommu_mapping {
|
|
|
|
let mapping_clone = mapping.clone();
|
|
|
|
Some(Arc::new(Box::new(move |addr: u64| {
|
|
|
|
mapping_clone.translate(dev_id, addr).map_err(|e| {
|
|
|
|
std::io::Error::new(
|
|
|
|
std::io::ErrorKind::Other,
|
|
|
|
format!(
|
|
|
|
"failed to translate addr 0x{:x} for device 00:{:02x}.0 {}",
|
|
|
|
addr, dev_id, e
|
|
|
|
),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
}) as VirtioIommuRemapping))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
2019-10-02 07:10:42 +00:00
|
|
|
let mut virtio_pci_device =
|
2019-10-02 20:57:20 +00:00
|
|
|
VirtioPciDevice::new(memory.clone(), virtio_device, msix_num, iommu_mapping_cb)
|
2019-10-02 07:10:42 +00:00
|
|
|
.map_err(DeviceManagerError::VirtioDevice)?;
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-10-23 22:14:13 +00:00
|
|
|
let mut allocator = address_manager.allocator.lock().unwrap();
|
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
let bars = virtio_pci_device
|
2019-10-23 22:14:13 +00:00
|
|
|
.allocate_bars(&mut allocator)
|
2019-09-04 13:55:14 +00:00
|
|
|
.map_err(DeviceManagerError::AllocateBars)?;
|
|
|
|
|
2019-10-30 16:13:29 +00:00
|
|
|
let bar_addr = virtio_pci_device.config_bar_addr();
|
2019-10-30 18:03:02 +00:00
|
|
|
for (event, addr) in virtio_pci_device.ioeventfds(bar_addr) {
|
2019-09-04 13:55:14 +00:00
|
|
|
let io_addr = IoEventAddress::Mmio(addr);
|
|
|
|
vm_fd
|
2019-10-30 16:38:38 +00:00
|
|
|
.register_ioevent(event, &io_addr, NoDatamatch)
|
2019-09-04 13:55:14 +00:00
|
|
|
.map_err(DeviceManagerError::RegisterIoevent)?;
|
|
|
|
}
|
|
|
|
|
2019-12-05 15:42:15 +00:00
|
|
|
let vm_fd_clone = vm_fd.clone();
|
|
|
|
|
|
|
|
let msi_cb = Arc::new(Box::new(move |p: InterruptParameters| {
|
|
|
|
if let Some(entry) = p.msix {
|
|
|
|
use kvm_bindings::kvm_msi;
|
|
|
|
let msi_queue = kvm_msi {
|
|
|
|
address_lo: entry.msg_addr_lo,
|
|
|
|
address_hi: entry.msg_addr_hi,
|
|
|
|
data: entry.msg_data,
|
|
|
|
flags: 0u32,
|
|
|
|
devid: 0u32,
|
|
|
|
pad: [0u8; 12],
|
|
|
|
};
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-12-05 15:42:15 +00:00
|
|
|
return vm_fd_clone
|
|
|
|
.signal_msi(msi_queue)
|
|
|
|
.map_err(|e| io::Error::from_raw_os_error(e.errno()))
|
2019-12-19 17:12:34 +00:00
|
|
|
.map(|ret| match ret.cmp(&0) {
|
|
|
|
cmp::Ordering::Greater => {
|
2019-12-05 15:42:15 +00:00
|
|
|
debug!("MSI message successfully delivered");
|
2019-12-19 17:12:34 +00:00
|
|
|
}
|
|
|
|
cmp::Ordering::Equal => {
|
2019-12-05 15:42:15 +00:00
|
|
|
warn!("failed to deliver MSI message, blocked by guest");
|
|
|
|
}
|
2019-12-19 17:12:34 +00:00
|
|
|
_ => {}
|
2019-12-05 15:42:15 +00:00
|
|
|
});
|
|
|
|
}
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-12-05 15:42:15 +00:00
|
|
|
Err(std::io::Error::new(
|
|
|
|
std::io::ErrorKind::Other,
|
|
|
|
"missing MSI-X entry",
|
|
|
|
))
|
|
|
|
}) as InterruptDelivery);
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-12-05 15:42:15 +00:00
|
|
|
virtio_pci_device.assign_msix(msi_cb);
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
let virtio_pci_device = Arc::new(Mutex::new(virtio_pci_device));
|
|
|
|
|
|
|
|
pci.add_device(virtio_pci_device.clone())
|
|
|
|
.map_err(DeviceManagerError::AddPciDevice)?;
|
|
|
|
|
|
|
|
pci.register_mapping(
|
|
|
|
virtio_pci_device.clone(),
|
2019-10-23 22:14:13 +00:00
|
|
|
address_manager.io_bus.as_ref(),
|
|
|
|
address_manager.mmio_bus.as_ref(),
|
2019-09-04 13:55:14 +00:00
|
|
|
bars,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::AddPciDevice)?;
|
|
|
|
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices.push(Arc::clone(&virtio_pci_device) as Arc<Mutex<dyn Migratable>>);
|
|
|
|
|
2019-10-02 20:57:20 +00:00
|
|
|
let ret = if iommu_mapping.is_some() {
|
|
|
|
Some(dev_id)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(ret)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-11 16:25:07 +00:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
|
|
#[cfg(feature = "mmio_support")]
|
|
|
|
fn add_virtio_mmio_device(
|
2019-11-18 23:10:42 +00:00
|
|
|
virtio_device: Arc<Mutex<dyn vm_virtio::VirtioDevice>>,
|
2019-12-31 10:49:11 +00:00
|
|
|
memory: &Arc<ArcSwap<GuestMemoryMmap>>,
|
2019-10-23 22:14:13 +00:00
|
|
|
address_manager: &Arc<AddressManager>,
|
2019-09-11 16:25:07 +00:00
|
|
|
vm_fd: &Arc<VmFd>,
|
|
|
|
interrupt_info: &InterruptInfo,
|
|
|
|
mmio_base: GuestAddress,
|
|
|
|
cmdline_additions: &mut Vec<String>,
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
|
2019-09-11 16:25:07 +00:00
|
|
|
) -> DeviceManagerResult<()> {
|
|
|
|
let mut mmio_device = vm_virtio::transport::MmioDevice::new(memory.clone(), virtio_device)
|
|
|
|
.map_err(DeviceManagerError::VirtioDevice)?;
|
|
|
|
|
2019-10-30 18:03:02 +00:00
|
|
|
for (i, (event, addr)) in mmio_device.ioeventfds(mmio_base.0).iter().enumerate() {
|
|
|
|
let io_addr = IoEventAddress::Mmio(*addr);
|
2019-09-11 16:25:07 +00:00
|
|
|
vm_fd
|
2019-10-30 18:03:02 +00:00
|
|
|
.register_ioevent(event, &io_addr, i as u32)
|
2019-09-11 16:25:07 +00:00
|
|
|
.map_err(DeviceManagerError::RegisterIoevent)?;
|
|
|
|
}
|
|
|
|
|
2019-10-23 22:14:13 +00:00
|
|
|
let irq_num = address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2019-09-11 16:25:07 +00:00
|
|
|
.allocate_irq()
|
|
|
|
.ok_or(DeviceManagerError::AllocateIrq)?;
|
|
|
|
|
2019-12-05 16:36:28 +00:00
|
|
|
let interrupt: Box<dyn devices::Interrupt> = Box::new(UserIoapicIrq::new(
|
|
|
|
interrupt_info._ioapic.clone(),
|
|
|
|
irq_num as usize,
|
|
|
|
));
|
2019-09-11 16:25:07 +00:00
|
|
|
|
|
|
|
mmio_device.assign_interrupt(interrupt);
|
|
|
|
|
2019-11-19 00:53:23 +00:00
|
|
|
let mmio_device_arc = Arc::new(Mutex::new(mmio_device));
|
2019-10-23 22:14:13 +00:00
|
|
|
address_manager
|
|
|
|
.mmio_bus
|
2019-11-19 00:53:23 +00:00
|
|
|
.insert(mmio_device_arc.clone(), mmio_base.0, MMIO_LEN)
|
2019-09-11 16:25:07 +00:00
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
|
|
|
|
cmdline_additions.push(format!(
|
|
|
|
"virtio_mmio.device={}K@0x{:08x}:{}",
|
|
|
|
MMIO_LEN / 1024,
|
|
|
|
mmio_base.0,
|
|
|
|
irq_num
|
|
|
|
));
|
|
|
|
|
2019-11-19 00:53:23 +00:00
|
|
|
migratable_devices.push(Arc::clone(&mmio_device_arc) as Arc<Mutex<dyn Migratable>>);
|
|
|
|
|
2019-09-11 16:25:07 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-10-23 21:06:13 +00:00
|
|
|
pub fn io_bus(&self) -> &Arc<devices::Bus> {
|
2019-10-23 22:14:13 +00:00
|
|
|
&self.address_manager.io_bus
|
2019-09-04 14:20:09 +00:00
|
|
|
}
|
|
|
|
|
2019-10-23 21:06:13 +00:00
|
|
|
pub fn mmio_bus(&self) -> &Arc<devices::Bus> {
|
2019-10-23 22:14:13 +00:00
|
|
|
&self.address_manager.mmio_bus
|
2019-09-04 14:20:09 +00:00
|
|
|
}
|
|
|
|
|
2019-11-11 14:57:41 +00:00
|
|
|
pub fn allocator(&self) -> &Arc<Mutex<SystemAllocator>> {
|
|
|
|
&self.address_manager.allocator
|
|
|
|
}
|
|
|
|
|
2019-09-04 14:20:09 +00:00
|
|
|
pub fn ioapic(&self) -> &Option<Arc<Mutex<ioapic::Ioapic>>> {
|
|
|
|
&self.ioapic
|
|
|
|
}
|
2019-09-06 15:42:41 +00:00
|
|
|
|
|
|
|
pub fn console(&self) -> &Arc<Console> {
|
|
|
|
&self.console
|
|
|
|
}
|
2019-09-11 15:22:00 +00:00
|
|
|
|
|
|
|
pub fn cmdline_additions(&self) -> &[String] {
|
|
|
|
self.cmdline_additions.as_slice()
|
|
|
|
}
|
2019-10-02 20:57:20 +00:00
|
|
|
|
|
|
|
pub fn virt_iommu(&self) -> Option<(u32, &[u32])> {
|
|
|
|
if let Some((iommu_id, dev_ids)) = self.virt_iommu.as_ref() {
|
|
|
|
Some((*iommu_id, dev_ids.as_slice()))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
2019-11-27 15:28:22 +00:00
|
|
|
|
|
|
|
pub fn notify_hotplug(
|
|
|
|
&self,
|
2020-01-14 10:17:23 +00:00
|
|
|
_notification_type: HotPlugNotificationFlags,
|
2019-11-27 15:28:22 +00:00
|
|
|
) -> DeviceManagerResult<()> {
|
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
return self
|
|
|
|
.ged_notification_device
|
|
|
|
.as_ref()
|
|
|
|
.unwrap()
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.notify(_notification_type)
|
|
|
|
.map_err(DeviceManagerError::HotPlugNotification);
|
|
|
|
#[cfg(not(feature = "acpi"))]
|
|
|
|
return Ok(());
|
|
|
|
}
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for DeviceManager {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
for (addr, size) in self.mmap_regions.drain(..) {
|
|
|
|
unsafe {
|
|
|
|
libc::munmap(addr, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-12-06 16:14:32 +00:00
|
|
|
|
|
|
|
#[cfg(feature = "acpi")]
|
2019-12-09 15:07:31 +00:00
|
|
|
fn create_ged_device(ged_irq: u32) -> Vec<u8> {
|
2019-12-06 16:14:32 +00:00
|
|
|
aml::Device::new(
|
|
|
|
"_SB_.GED_".into(),
|
|
|
|
vec![
|
|
|
|
&aml::Name::new("_HID".into(), &"ACPI0013"),
|
|
|
|
&aml::Name::new("_UID".into(), &aml::ZERO),
|
|
|
|
&aml::Name::new(
|
|
|
|
"_CRS".into(),
|
|
|
|
&aml::ResourceTemplate::new(vec![&aml::Interrupt::new(
|
2019-12-09 15:07:31 +00:00
|
|
|
true, true, false, false, ged_irq,
|
2019-12-06 16:14:32 +00:00
|
|
|
)]),
|
|
|
|
),
|
|
|
|
&aml::OpRegion::new("GDST".into(), aml::OpRegionSpace::SystemIO, 0xb000, 0x1),
|
|
|
|
&aml::Field::new(
|
|
|
|
"GDST".into(),
|
|
|
|
aml::FieldAccessType::Byte,
|
|
|
|
aml::FieldUpdateRule::WriteAsZeroes,
|
|
|
|
vec![aml::FieldEntry::Named(*b"GDAT", 8)],
|
|
|
|
),
|
|
|
|
&aml::Method::new(
|
|
|
|
"_EVT".into(),
|
|
|
|
1,
|
|
|
|
true,
|
2020-01-09 15:15:18 +00:00
|
|
|
vec![
|
|
|
|
&aml::Store::new(&aml::Local(0), &aml::Path::new("GDAT")),
|
2020-01-14 10:17:23 +00:00
|
|
|
&aml::And::new(&aml::Local(1), &aml::Local(0), &aml::ONE),
|
2020-01-09 15:15:18 +00:00
|
|
|
&aml::If::new(
|
2020-01-14 10:17:23 +00:00
|
|
|
&aml::Equal::new(&aml::Local(1), &aml::ONE),
|
2020-01-09 15:15:18 +00:00
|
|
|
vec![&aml::MethodCall::new("\\_SB_.CPUS.CSCN".into(), vec![])],
|
|
|
|
),
|
2020-01-14 10:17:23 +00:00
|
|
|
&aml::And::new(&aml::Local(1), &aml::Local(0), &2usize),
|
2020-01-10 16:14:52 +00:00
|
|
|
&aml::If::new(
|
2020-01-14 10:17:23 +00:00
|
|
|
&aml::Equal::new(&aml::Local(1), &2usize),
|
2020-01-10 16:14:52 +00:00
|
|
|
vec![&aml::MethodCall::new("\\_SB_.MHPC.MSCN".into(), vec![])],
|
|
|
|
),
|
2020-01-09 15:15:18 +00:00
|
|
|
],
|
2019-12-06 16:14:32 +00:00
|
|
|
),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
.to_aml_bytes()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
impl Aml for DeviceManager {
|
|
|
|
fn to_aml_bytes(&self) -> Vec<u8> {
|
|
|
|
let mut bytes = Vec::new();
|
2019-12-20 15:25:06 +00:00
|
|
|
let start_of_device_area = self.memory_manager.lock().unwrap().start_of_device_area().0;
|
|
|
|
let end_of_device_area = self.memory_manager.lock().unwrap().end_of_device_area().0;
|
2019-12-06 16:14:32 +00:00
|
|
|
let pci_dsdt_data = aml::Device::new(
|
|
|
|
"_SB_.PCI0".into(),
|
|
|
|
vec![
|
|
|
|
&aml::Name::new("_HID".into(), &aml::EISAName::new("PNP0A08")),
|
|
|
|
&aml::Name::new("_CID".into(), &aml::EISAName::new("PNP0A03")),
|
|
|
|
&aml::Name::new("_ADR".into(), &aml::ZERO),
|
|
|
|
&aml::Name::new("_SEG".into(), &aml::ZERO),
|
|
|
|
&aml::Name::new("_UID".into(), &aml::ZERO),
|
|
|
|
&aml::Name::new("SUPP".into(), &aml::ZERO),
|
|
|
|
&aml::Name::new(
|
|
|
|
"_CRS".into(),
|
|
|
|
&aml::ResourceTemplate::new(vec![
|
|
|
|
&aml::AddressSpace::new_bus_number(0x0u16, 0xffu16),
|
|
|
|
&aml::IO::new(0xcf8, 0xcf8, 1, 0x8),
|
|
|
|
&aml::AddressSpace::new_io(0x0u16, 0xcf7u16),
|
|
|
|
&aml::AddressSpace::new_io(0xd00u16, 0xffffu16),
|
|
|
|
&aml::AddressSpace::new_memory(
|
|
|
|
aml::AddressSpaceCachable::NotCacheable,
|
|
|
|
true,
|
|
|
|
layout::MEM_32BIT_DEVICES_START.0 as u32,
|
|
|
|
(layout::MEM_32BIT_DEVICES_START.0 + layout::MEM_32BIT_DEVICES_SIZE - 1)
|
|
|
|
as u32,
|
|
|
|
),
|
|
|
|
&aml::AddressSpace::new_memory(
|
|
|
|
aml::AddressSpaceCachable::NotCacheable,
|
|
|
|
true,
|
2019-12-20 15:25:06 +00:00
|
|
|
start_of_device_area,
|
|
|
|
end_of_device_area,
|
2019-12-06 16:14:32 +00:00
|
|
|
),
|
|
|
|
]),
|
|
|
|
),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
.to_aml_bytes();
|
|
|
|
|
|
|
|
let mbrd_dsdt_data = aml::Device::new(
|
|
|
|
"_SB_.MBRD".into(),
|
|
|
|
vec![
|
|
|
|
&aml::Name::new("_HID".into(), &aml::EISAName::new("PNP0C02")),
|
|
|
|
&aml::Name::new("_UID".into(), &aml::ZERO),
|
|
|
|
&aml::Name::new(
|
|
|
|
"_CRS".into(),
|
|
|
|
&aml::ResourceTemplate::new(vec![&aml::Memory32Fixed::new(
|
|
|
|
true,
|
|
|
|
layout::PCI_MMCONFIG_START.0 as u32,
|
|
|
|
layout::PCI_MMCONFIG_SIZE as u32,
|
|
|
|
)]),
|
|
|
|
),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
.to_aml_bytes();
|
|
|
|
|
|
|
|
let com1_dsdt_data = aml::Device::new(
|
|
|
|
"_SB_.COM1".into(),
|
|
|
|
vec![
|
|
|
|
&aml::Name::new("_HID".into(), &aml::EISAName::new("PNP0501")),
|
|
|
|
&aml::Name::new("_UID".into(), &aml::ZERO),
|
|
|
|
&aml::Name::new(
|
|
|
|
"_CRS".into(),
|
|
|
|
&aml::ResourceTemplate::new(vec![
|
|
|
|
&aml::Interrupt::new(true, true, false, false, 4),
|
|
|
|
&aml::IO::new(0x3f8, 0x3f8, 0, 0x8),
|
|
|
|
]),
|
|
|
|
),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
.to_aml_bytes();
|
|
|
|
|
|
|
|
let s5_sleep_data =
|
|
|
|
aml::Name::new("_S5_".into(), &aml::Package::new(vec![&5u8])).to_aml_bytes();
|
|
|
|
|
2019-12-09 15:07:31 +00:00
|
|
|
let ged_data = create_ged_device(
|
|
|
|
self.ged_notification_device
|
|
|
|
.as_ref()
|
|
|
|
.unwrap()
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.irq(),
|
|
|
|
);
|
2019-12-06 16:14:32 +00:00
|
|
|
|
|
|
|
bytes.extend_from_slice(pci_dsdt_data.as_slice());
|
|
|
|
bytes.extend_from_slice(mbrd_dsdt_data.as_slice());
|
|
|
|
if self.config.lock().unwrap().serial.mode != ConsoleOutputMode::Off {
|
|
|
|
bytes.extend_from_slice(com1_dsdt_data.as_slice());
|
|
|
|
}
|
|
|
|
bytes.extend_from_slice(s5_sleep_data.as_slice());
|
|
|
|
bytes.extend_from_slice(ged_data.as_slice());
|
|
|
|
bytes
|
|
|
|
}
|
|
|
|
}
|
2019-11-18 23:24:31 +00:00
|
|
|
|
|
|
|
impl Pausable for DeviceManager {
|
|
|
|
fn pause(&mut self) -> result::Result<(), MigratableError> {
|
|
|
|
for dev in &self.migratable_devices {
|
|
|
|
dev.lock().unwrap().pause()?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn resume(&mut self) -> result::Result<(), MigratableError> {
|
|
|
|
for dev in &self.migratable_devices {
|
|
|
|
dev.lock().unwrap().resume()?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Snapshotable for DeviceManager {}
|
|
|
|
impl Migratable for DeviceManager {}
|