2019-09-04 13:55:14 +00:00
|
|
|
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
|
|
//
|
|
|
|
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE-BSD-3-Clause file.
|
|
|
|
//
|
|
|
|
// Copyright © 2019 Intel Corporation
|
|
|
|
//
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
|
|
|
|
//
|
|
|
|
|
2020-01-21 08:32:50 +00:00
|
|
|
use crate::config::ConsoleOutputMode;
|
2020-02-27 16:15:25 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
use crate::config::DeviceConfig;
|
2020-04-27 08:15:30 +00:00
|
|
|
use crate::config::{DiskConfig, FsConfig, NetConfig, PmemConfig, VmConfig, VsockConfig};
|
2020-05-05 10:19:43 +00:00
|
|
|
use crate::device_tree::{DeviceNode, DeviceTree};
|
2020-06-27 16:32:51 +00:00
|
|
|
use crate::interrupt::{kvm::KvmMsiInterruptManager, LegacyUserspaceInterruptManager};
|
2019-12-20 16:11:30 +00:00
|
|
|
use crate::memory_manager::{Error as MemoryManagerError, MemoryManager};
|
2020-06-11 14:48:25 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
use crate::PciDeviceInfo;
|
2020-05-05 10:19:43 +00:00
|
|
|
use crate::{device_node, DEVICE_MANAGER_SNAPSHOT_ID};
|
2019-12-06 16:14:32 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
use acpi_tables::{aml, aml::Aml};
|
2019-12-02 21:50:38 +00:00
|
|
|
use anyhow::anyhow;
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
use arch::aarch64::DeviceInfoForFDT;
|
2020-01-21 08:32:50 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
2019-12-06 16:14:32 +00:00
|
|
|
use arch::layout;
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2019-12-31 10:49:11 +00:00
|
|
|
use arch::layout::{APIC_START, IOAPIC_SIZE, IOAPIC_START};
|
2020-05-25 08:59:09 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
2020-06-09 06:17:42 +00:00
|
|
|
use arch::DeviceType;
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
2020-05-25 08:59:09 +00:00
|
|
|
use devices::gic;
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
use devices::ioapic;
|
2020-05-25 08:27:08 +00:00
|
|
|
use devices::{
|
2020-06-09 06:17:42 +00:00
|
|
|
interrupt_controller, interrupt_controller::InterruptController, legacy::Serial, BusDevice,
|
2020-05-25 08:27:08 +00:00
|
|
|
HotPlugNotificationFlags,
|
|
|
|
};
|
2020-06-02 02:29:54 +00:00
|
|
|
use hypervisor::kvm_ioctls;
|
|
|
|
use hypervisor::kvm_ioctls::*;
|
|
|
|
#[cfg(feature = "mmio_support")]
|
|
|
|
use hypervisor::vm::DataMatch;
|
2019-12-05 16:36:28 +00:00
|
|
|
use libc::TIOCGWINSZ;
|
2020-03-19 10:13:53 +00:00
|
|
|
use libc::{MAP_NORESERVE, MAP_PRIVATE, MAP_SHARED, O_TMPFILE, PROT_READ, PROT_WRITE};
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
use pci::{
|
2020-01-13 17:52:19 +00:00
|
|
|
DeviceRelocation, PciBarRegionType, PciBus, PciConfigIo, PciConfigMmio, PciDevice, PciRoot,
|
2019-11-08 14:50:39 +00:00
|
|
|
VfioPciDevice,
|
2019-09-04 13:55:14 +00:00
|
|
|
};
|
|
|
|
use qcow::{self, ImageType, QcowFile};
|
2020-03-06 13:18:07 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
use std::any::Any;
|
2019-11-28 00:45:10 +00:00
|
|
|
use std::collections::HashMap;
|
2019-12-31 10:49:11 +00:00
|
|
|
use std::fs::{File, OpenOptions};
|
2020-04-24 15:58:03 +00:00
|
|
|
use std::io::{self, sink, stdout, Seek, SeekFrom};
|
2020-03-06 10:34:24 +00:00
|
|
|
use std::num::Wrapping;
|
2019-09-04 13:55:14 +00:00
|
|
|
use std::os::unix::fs::OpenOptionsExt;
|
2020-02-14 09:55:19 +00:00
|
|
|
use std::path::PathBuf;
|
2019-09-04 13:55:14 +00:00
|
|
|
use std::result;
|
2019-12-31 10:49:11 +00:00
|
|
|
use std::sync::{Arc, Mutex};
|
2020-02-04 16:44:12 +00:00
|
|
|
use tempfile::NamedTempFile;
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-11-12 14:36:07 +00:00
|
|
|
use vfio_ioctls::{VfioContainer, VfioDevice, VfioDmaMapping};
|
2020-07-02 12:25:19 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
use virtio_devices::transport::VirtioPciDevice;
|
|
|
|
use virtio_devices::transport::VirtioTransport;
|
|
|
|
use virtio_devices::vhost_user::VhostUserConfig;
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
use virtio_devices::{DmaRemapping, IommuMapping};
|
|
|
|
use virtio_devices::{VirtioSharedMemory, VirtioSharedMemoryList};
|
2019-09-04 13:55:14 +00:00
|
|
|
use vm_allocator::SystemAllocator;
|
2020-02-04 11:04:10 +00:00
|
|
|
use vm_device::interrupt::{
|
|
|
|
InterruptIndex, InterruptManager, LegacyIrqGroupConfig, MsiIrqGroupConfig,
|
|
|
|
};
|
2020-04-27 17:12:00 +00:00
|
|
|
use vm_device::Resource;
|
2020-01-24 14:47:15 +00:00
|
|
|
use vm_memory::guest_memory::FileOffset;
|
2020-03-26 13:36:15 +00:00
|
|
|
use vm_memory::{
|
|
|
|
Address, GuestAddress, GuestAddressSpace, GuestRegionMmap, GuestUsize, MmapRegion,
|
|
|
|
};
|
2020-04-28 10:08:51 +00:00
|
|
|
use vm_migration::{
|
|
|
|
Migratable, MigratableError, Pausable, Snapshot, SnapshotDataSection, Snapshottable,
|
|
|
|
Transportable,
|
|
|
|
};
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2020-07-02 12:25:19 +00:00
|
|
|
use vm_virtio::{VirtioDeviceType, VirtioIommuRemapping};
|
2019-09-04 13:55:14 +00:00
|
|
|
use vmm_sys_util::eventfd::EventFd;
|
|
|
|
|
2020-06-03 08:59:35 +00:00
|
|
|
#[cfg(any(feature = "mmio_support", target_arch = "aarch64"))]
|
2019-09-11 16:25:07 +00:00
|
|
|
const MMIO_LEN: u64 = 0x1000;
|
|
|
|
|
2020-03-06 10:34:24 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2020-04-29 17:40:17 +00:00
|
|
|
const VFIO_DEVICE_NAME_PREFIX: &str = "_vfio";
|
2020-03-06 10:34:24 +00:00
|
|
|
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-04-29 17:40:17 +00:00
|
|
|
const IOAPIC_DEVICE_NAME: &str = "_ioapic";
|
2020-05-25 08:59:09 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
const GIC_DEVICE_NAME: &str = "_gic";
|
|
|
|
|
2020-04-29 17:40:17 +00:00
|
|
|
const SERIAL_DEVICE_NAME_PREFIX: &str = "_serial";
|
2020-04-28 13:22:38 +00:00
|
|
|
|
2020-04-29 17:40:17 +00:00
|
|
|
const CONSOLE_DEVICE_NAME: &str = "_console";
|
|
|
|
const DISK_DEVICE_NAME_PREFIX: &str = "_disk";
|
|
|
|
const FS_DEVICE_NAME_PREFIX: &str = "_fs";
|
|
|
|
const MEM_DEVICE_NAME: &str = "_mem";
|
2020-03-20 03:43:37 +00:00
|
|
|
const BALLOON_DEVICE_NAME: &str = "_balloon";
|
2020-04-29 17:40:17 +00:00
|
|
|
const NET_DEVICE_NAME_PREFIX: &str = "_net";
|
|
|
|
const PMEM_DEVICE_NAME_PREFIX: &str = "_pmem";
|
|
|
|
const RNG_DEVICE_NAME: &str = "_rng";
|
|
|
|
const VSOCK_DEVICE_NAME_PREFIX: &str = "_vsock";
|
2020-04-15 16:09:12 +00:00
|
|
|
|
2020-04-27 12:44:16 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2020-04-29 17:40:17 +00:00
|
|
|
const IOMMU_DEVICE_NAME: &str = "_iommu";
|
2020-04-27 12:44:16 +00:00
|
|
|
|
2020-04-27 14:05:04 +00:00
|
|
|
#[cfg(feature = "mmio_support")]
|
2020-04-29 17:40:17 +00:00
|
|
|
const VIRTIO_MMIO_DEVICE_NAME_PREFIX: &str = "_virtio-mmio";
|
2020-04-27 14:19:40 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2020-04-29 17:40:17 +00:00
|
|
|
const VIRTIO_PCI_DEVICE_NAME_PREFIX: &str = "_virtio-pci";
|
2020-04-27 14:05:04 +00:00
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
/// Errors associated with device manager
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum DeviceManagerError {
|
|
|
|
/// Cannot create EventFd.
|
|
|
|
EventFd(io::Error),
|
|
|
|
|
|
|
|
/// Cannot open disk path
|
|
|
|
Disk(io::Error),
|
|
|
|
|
|
|
|
/// Cannot create vhost-user-net device
|
2020-07-02 12:25:19 +00:00
|
|
|
CreateVhostUserNet(virtio_devices::vhost_user::Error),
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
/// Cannot create virtio-blk device
|
|
|
|
CreateVirtioBlock(io::Error),
|
|
|
|
|
|
|
|
/// Cannot create virtio-net device
|
2020-07-02 12:25:19 +00:00
|
|
|
CreateVirtioNet(virtio_devices::net::Error),
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
/// Cannot create virtio-console device
|
|
|
|
CreateVirtioConsole(io::Error),
|
|
|
|
|
|
|
|
/// Cannot create virtio-rng device
|
|
|
|
CreateVirtioRng(io::Error),
|
|
|
|
|
|
|
|
/// Cannot create virtio-fs device
|
2020-07-02 12:25:19 +00:00
|
|
|
CreateVirtioFs(virtio_devices::vhost_user::Error),
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2020-06-04 19:19:24 +00:00
|
|
|
/// Virtio-fs device was created without a socket.
|
2020-04-14 09:21:24 +00:00
|
|
|
NoVirtioFsSock,
|
|
|
|
|
2019-09-11 03:16:41 +00:00
|
|
|
/// Cannot create vhost-user-blk device
|
2020-07-02 12:25:19 +00:00
|
|
|
CreateVhostUserBlk(virtio_devices::vhost_user::Error),
|
2019-09-11 03:16:41 +00:00
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
/// Cannot create virtio-pmem device
|
|
|
|
CreateVirtioPmem(io::Error),
|
|
|
|
|
2019-09-04 18:14:54 +00:00
|
|
|
/// Cannot create virtio-vsock device
|
|
|
|
CreateVirtioVsock(io::Error),
|
|
|
|
|
2019-09-04 21:19:16 +00:00
|
|
|
/// Failed converting Path to &str for the virtio-vsock device.
|
|
|
|
CreateVsockConvertPath,
|
|
|
|
|
|
|
|
/// Cannot create virtio-vsock backend
|
2020-07-02 12:25:19 +00:00
|
|
|
CreateVsockBackend(virtio_devices::vsock::VsockUnixError),
|
2019-09-04 21:19:16 +00:00
|
|
|
|
2019-10-02 20:57:20 +00:00
|
|
|
/// Cannot create virtio-iommu device
|
|
|
|
CreateVirtioIommu(io::Error),
|
|
|
|
|
2020-03-20 03:43:37 +00:00
|
|
|
/// Cannot create virtio-balloon device
|
|
|
|
CreateVirtioBalloon(io::Error),
|
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
/// Failed parsing disk image format
|
|
|
|
DetectImageType(qcow::Error),
|
|
|
|
|
|
|
|
/// Cannot open qcow disk path
|
|
|
|
QcowDeviceCreate(qcow::Error),
|
|
|
|
|
|
|
|
/// Cannot open tap interface
|
|
|
|
OpenTap(net_util::TapError),
|
|
|
|
|
|
|
|
/// Cannot allocate IRQ.
|
|
|
|
AllocateIrq,
|
|
|
|
|
|
|
|
/// Cannot configure the IRQ.
|
2019-11-29 15:36:33 +00:00
|
|
|
Irq(kvm_ioctls::Error),
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
/// Cannot allocate PCI BARs
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
AllocateBars(pci::PciDeviceError),
|
|
|
|
|
2020-03-11 09:05:37 +00:00
|
|
|
/// Could not free the BARs associated with a PCI device.
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
FreePciBars(pci::PciDeviceError),
|
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
/// Cannot register ioevent.
|
2020-06-02 02:29:54 +00:00
|
|
|
RegisterIoevent(anyhow::Error),
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2020-04-08 14:39:50 +00:00
|
|
|
/// Cannot unregister ioevent.
|
2020-06-02 02:29:54 +00:00
|
|
|
UnRegisterIoevent(anyhow::Error),
|
2020-04-08 14:39:50 +00:00
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
/// Cannot create virtio device
|
|
|
|
VirtioDevice(vmm_sys_util::errno::Error),
|
|
|
|
|
|
|
|
/// Cannot add PCI device
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
AddPciDevice(pci::PciRootError),
|
|
|
|
|
|
|
|
/// Cannot open persistent memory file
|
|
|
|
PmemFileOpen(io::Error),
|
|
|
|
|
|
|
|
/// Cannot set persistent memory file size
|
|
|
|
PmemFileSetLen(io::Error),
|
|
|
|
|
|
|
|
/// Cannot find a memory range for persistent memory
|
|
|
|
PmemRangeAllocation,
|
|
|
|
|
|
|
|
/// Cannot find a memory range for virtio-fs
|
|
|
|
FsRangeAllocation,
|
|
|
|
|
|
|
|
/// Error creating serial output file
|
|
|
|
SerialOutputFileOpen(io::Error),
|
|
|
|
|
|
|
|
/// Error creating console output file
|
|
|
|
ConsoleOutputFileOpen(io::Error),
|
|
|
|
|
|
|
|
/// Cannot create a VFIO device
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-11-08 15:27:05 +00:00
|
|
|
VfioCreate(vfio_ioctls::VfioError),
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
/// Cannot create a VFIO PCI device
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-11-08 14:50:39 +00:00
|
|
|
VfioPciCreate(pci::VfioPciError),
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
/// Failed to map VFIO MMIO region.
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-11-08 14:50:39 +00:00
|
|
|
VfioMapRegion(pci::VfioPciError),
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2020-07-17 15:20:47 +00:00
|
|
|
/// Failed to create the passthrough device.
|
|
|
|
CreatePassthroughDevice(anyhow::Error),
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
/// Failed to memory map.
|
|
|
|
Mmap(io::Error),
|
|
|
|
|
|
|
|
/// Cannot add legacy device to Bus.
|
|
|
|
BusError(devices::BusError),
|
2019-11-05 09:37:34 +00:00
|
|
|
|
|
|
|
/// Failed to allocate IO port
|
|
|
|
AllocateIOPort,
|
2019-11-27 15:28:22 +00:00
|
|
|
|
|
|
|
// Failed to make hotplug notification
|
|
|
|
HotPlugNotification(io::Error),
|
2019-12-20 16:11:30 +00:00
|
|
|
|
|
|
|
// Error from a memory manager operation
|
|
|
|
MemoryManager(MemoryManagerError),
|
2020-01-20 14:07:08 +00:00
|
|
|
|
|
|
|
/// Failed to create new interrupt source group.
|
|
|
|
CreateInterruptGroup(io::Error),
|
|
|
|
|
|
|
|
/// Failed to update interrupt source group.
|
|
|
|
UpdateInterruptGroup(io::Error),
|
2020-01-22 21:55:02 +00:00
|
|
|
|
2020-05-25 08:27:08 +00:00
|
|
|
/// Failed creating interrupt controller.
|
|
|
|
CreateInterruptController(interrupt_controller::Error),
|
2020-01-24 14:47:15 +00:00
|
|
|
|
|
|
|
/// Failed creating a new MmapRegion instance.
|
|
|
|
NewMmapRegion(vm_memory::mmap::MmapRegionError),
|
|
|
|
|
|
|
|
/// Failed cloning a File.
|
|
|
|
CloneFile(io::Error),
|
2020-02-04 16:44:12 +00:00
|
|
|
|
|
|
|
/// Failed to create socket file
|
|
|
|
CreateSocketFile(io::Error),
|
|
|
|
|
|
|
|
/// Failed to spawn the network backend
|
|
|
|
SpawnNetBackend(io::Error),
|
2020-02-04 16:44:12 +00:00
|
|
|
|
|
|
|
/// Failed to spawn the block backend
|
|
|
|
SpawnBlockBackend(io::Error),
|
2020-02-28 11:29:43 +00:00
|
|
|
|
|
|
|
/// Missing PCI bus.
|
|
|
|
NoPciBus,
|
2020-03-06 10:34:24 +00:00
|
|
|
|
2020-04-15 16:58:49 +00:00
|
|
|
/// Could not find an available device name.
|
|
|
|
NoAvailableDeviceName,
|
2020-03-06 16:52:40 +00:00
|
|
|
|
|
|
|
/// Missing PCI device.
|
|
|
|
MissingPciDevice,
|
|
|
|
|
|
|
|
/// Failed removing a PCI device from the PCI bus.
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
RemoveDeviceFromPciBus(pci::PciRootError),
|
|
|
|
|
|
|
|
/// Failed removing a bus device from the IO bus.
|
|
|
|
RemoveDeviceFromIoBus(devices::BusError),
|
|
|
|
|
|
|
|
/// Failed removing a bus device from the MMIO bus.
|
|
|
|
RemoveDeviceFromMmioBus(devices::BusError),
|
2020-03-09 10:49:15 +00:00
|
|
|
|
2020-04-27 14:55:52 +00:00
|
|
|
/// Failed to find the device corresponding to a specific PCI b/d/f.
|
2020-03-09 10:49:15 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2020-04-27 14:55:52 +00:00
|
|
|
UnknownPciBdf(u32),
|
|
|
|
|
|
|
|
/// Not allowed to remove this type of device from the VM.
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
RemovalNotAllowed(vm_virtio::VirtioDeviceType),
|
|
|
|
|
|
|
|
/// Failed to find device corresponding to the given identifier.
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
UnknownDeviceId(String),
|
2020-03-09 15:09:11 +00:00
|
|
|
|
|
|
|
/// Failed to find an available PCI device ID.
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
NextPciDeviceId(pci::PciRootError),
|
2020-03-09 15:32:27 +00:00
|
|
|
|
2020-05-11 16:09:00 +00:00
|
|
|
/// Could not reserve the PCI device ID.
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
GetPciDeviceId(pci::PciRootError),
|
|
|
|
|
2020-03-09 15:32:27 +00:00
|
|
|
/// Could not give the PCI device ID back.
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
PutPciDeviceId(pci::PciRootError),
|
2020-03-11 10:23:42 +00:00
|
|
|
|
|
|
|
/// Incorrect device ID as it is already used by another device.
|
|
|
|
DeviceIdAlreadyInUse,
|
2020-03-13 10:25:17 +00:00
|
|
|
|
2020-03-23 11:10:26 +00:00
|
|
|
/// No disk path was specified when one was expected
|
2020-03-13 10:25:17 +00:00
|
|
|
NoDiskPath,
|
2020-03-23 11:10:26 +00:00
|
|
|
|
|
|
|
/// Failed updating guest memory for virtio device.
|
2020-07-02 12:25:19 +00:00
|
|
|
UpdateMemoryForVirtioDevice(virtio_devices::Error),
|
2020-03-04 02:16:27 +00:00
|
|
|
|
|
|
|
/// Cannot create virtio-mem device
|
|
|
|
CreateVirtioMem(io::Error),
|
|
|
|
|
|
|
|
/// Cannot try Clone virtio-mem resize
|
2020-07-02 12:25:19 +00:00
|
|
|
TryCloneVirtioMemResize(virtio_devices::mem::Error),
|
2020-03-04 02:16:27 +00:00
|
|
|
|
|
|
|
/// Cannot find a memory range for virtio-mem memory
|
|
|
|
VirtioMemRangeAllocation,
|
2020-03-26 13:53:43 +00:00
|
|
|
|
|
|
|
/// Failed updating guest memory for VFIO PCI device.
|
|
|
|
#[cfg(feature = "pci_support")]
|
2019-11-08 14:50:39 +00:00
|
|
|
UpdateMemoryForVfioPciDevice(pci::VfioPciError),
|
2020-04-24 15:58:03 +00:00
|
|
|
|
|
|
|
/// Trying to use a directory for pmem but no size specified
|
|
|
|
PmemWithDirectorySizeMissing,
|
|
|
|
|
|
|
|
/// Trying to use a size that is not multiple of 2MiB
|
|
|
|
PmemSizeNotAligned,
|
2020-04-27 17:12:00 +00:00
|
|
|
|
|
|
|
/// Could not find the node in the device tree.
|
|
|
|
MissingNode,
|
2020-04-29 10:02:54 +00:00
|
|
|
|
|
|
|
/// Could not find a MMIO range.
|
|
|
|
MmioRangeAllocation,
|
2020-04-29 13:18:04 +00:00
|
|
|
|
|
|
|
/// Resource was already found.
|
|
|
|
ResourceAlreadyExists,
|
|
|
|
|
|
|
|
/// Expected resources for virtio-mmio could not be found.
|
|
|
|
MissingVirtioMmioResources,
|
2020-04-29 14:16:30 +00:00
|
|
|
|
2020-05-11 18:02:19 +00:00
|
|
|
/// Expected resources for virtio-pci could not be found.
|
|
|
|
MissingVirtioPciResources,
|
|
|
|
|
2020-04-29 14:16:30 +00:00
|
|
|
/// Expected resources for virtio-fs could not be found.
|
|
|
|
MissingVirtioFsResources,
|
2020-05-11 16:09:00 +00:00
|
|
|
|
|
|
|
/// Missing PCI b/d/f from the DeviceNode.
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
MissingDeviceNodePciBdf,
|
2020-07-17 15:28:12 +00:00
|
|
|
|
|
|
|
/// No support for device passthrough
|
|
|
|
NoDevicePassthroughSupport,
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
pub type DeviceManagerResult<T> = result::Result<T, DeviceManagerError>;
|
|
|
|
|
2020-07-02 12:25:19 +00:00
|
|
|
type VirtioDeviceArc = Arc<Mutex<dyn virtio_devices::VirtioDevice>>;
|
2019-11-18 23:10:42 +00:00
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
pub fn get_win_size() -> (u16, u16) {
|
|
|
|
#[repr(C)]
|
2020-03-20 03:50:39 +00:00
|
|
|
#[derive(Default)]
|
2019-09-04 13:55:14 +00:00
|
|
|
struct WS {
|
|
|
|
rows: u16,
|
|
|
|
cols: u16,
|
2020-03-20 03:50:39 +00:00
|
|
|
xpixel: u16,
|
|
|
|
ypixel: u16,
|
2019-09-04 13:55:14 +00:00
|
|
|
};
|
2020-03-20 03:50:39 +00:00
|
|
|
let ws: WS = WS::default();
|
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
unsafe {
|
|
|
|
libc::ioctl(0, TIOCGWINSZ, &ws);
|
|
|
|
}
|
|
|
|
|
|
|
|
(ws.cols, ws.rows)
|
|
|
|
}
|
|
|
|
|
2020-01-29 15:53:12 +00:00
|
|
|
#[derive(Default)]
|
2019-09-06 15:42:41 +00:00
|
|
|
pub struct Console {
|
|
|
|
// Serial port on 0x3f8
|
2020-06-09 06:17:42 +00:00
|
|
|
serial: Option<Arc<Mutex<Serial>>>,
|
2020-07-02 12:25:19 +00:00
|
|
|
console_input: Option<Arc<virtio_devices::ConsoleInput>>,
|
2019-09-06 15:42:41 +00:00
|
|
|
input_enabled: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Console {
|
|
|
|
pub fn queue_input_bytes(&self, out: &[u8]) -> vmm_sys_util::errno::Result<()> {
|
|
|
|
if self.serial.is_some() {
|
|
|
|
self.serial
|
|
|
|
.as_ref()
|
|
|
|
.unwrap()
|
|
|
|
.lock()
|
|
|
|
.expect("Failed to process stdin event due to poisoned lock")
|
|
|
|
.queue_input_bytes(out)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
if self.console_input.is_some() {
|
|
|
|
self.console_input.as_ref().unwrap().queue_input_bytes(out);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn update_console_size(&self, cols: u16, rows: u16) {
|
|
|
|
if self.console_input.is_some() {
|
|
|
|
self.console_input
|
|
|
|
.as_ref()
|
|
|
|
.unwrap()
|
|
|
|
.update_console_size(cols, rows)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn input_enabled(&self) -> bool {
|
|
|
|
self.input_enabled
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-23 22:14:13 +00:00
|
|
|
struct AddressManager {
|
|
|
|
allocator: Arc<Mutex<SystemAllocator>>,
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-03-04 15:24:15 +00:00
|
|
|
io_bus: Arc<devices::Bus>,
|
2019-10-23 21:06:13 +00:00
|
|
|
mmio_bus: Arc<devices::Bus>,
|
2020-07-03 09:16:49 +00:00
|
|
|
vm: Arc<dyn hypervisor::Vm>,
|
2020-05-12 15:25:30 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
device_tree: Arc<Mutex<DeviceTree>>,
|
2019-10-28 16:53:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
impl DeviceRelocation for AddressManager {
|
|
|
|
fn move_bar(
|
|
|
|
&self,
|
|
|
|
old_base: u64,
|
|
|
|
new_base: u64,
|
|
|
|
len: u64,
|
|
|
|
pci_dev: &mut dyn PciDevice,
|
|
|
|
region_type: PciBarRegionType,
|
2019-10-29 01:15:08 +00:00
|
|
|
) -> std::result::Result<(), std::io::Error> {
|
2019-10-28 16:53:13 +00:00
|
|
|
match region_type {
|
|
|
|
PciBarRegionType::IORegion => {
|
2020-06-03 08:59:35 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
// Update system allocator
|
|
|
|
self.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.free_io_addresses(GuestAddress(old_base), len as GuestUsize);
|
2019-10-29 01:15:08 +00:00
|
|
|
|
2020-06-03 08:59:35 +00:00
|
|
|
self.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_io_addresses(
|
|
|
|
Some(GuestAddress(new_base)),
|
|
|
|
len as GuestUsize,
|
|
|
|
None,
|
|
|
|
)
|
|
|
|
.ok_or_else(|| {
|
|
|
|
io::Error::new(io::ErrorKind::Other, "failed allocating new IO range")
|
|
|
|
})?;
|
2019-10-28 16:53:13 +00:00
|
|
|
|
2020-06-03 08:59:35 +00:00
|
|
|
// Update PIO bus
|
|
|
|
self.io_bus
|
|
|
|
.update_range(old_base, len, new_base, len)
|
|
|
|
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
|
|
|
}
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
error!("I/O region is not supported");
|
2019-10-28 16:53:13 +00:00
|
|
|
}
|
|
|
|
PciBarRegionType::Memory32BitRegion | PciBarRegionType::Memory64BitRegion => {
|
|
|
|
// Update system allocator
|
|
|
|
if region_type == PciBarRegionType::Memory32BitRegion {
|
2020-03-11 09:12:37 +00:00
|
|
|
self.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.free_mmio_hole_addresses(GuestAddress(old_base), len as GuestUsize);
|
|
|
|
|
2019-10-28 16:53:13 +00:00
|
|
|
self.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_mmio_hole_addresses(
|
|
|
|
Some(GuestAddress(new_base)),
|
|
|
|
len as GuestUsize,
|
|
|
|
None,
|
|
|
|
)
|
2019-10-29 01:15:08 +00:00
|
|
|
.ok_or_else(|| {
|
|
|
|
io::Error::new(
|
|
|
|
io::ErrorKind::Other,
|
|
|
|
"failed allocating new 32 bits MMIO range",
|
|
|
|
)
|
|
|
|
})?;
|
2019-10-28 16:53:13 +00:00
|
|
|
} else {
|
2020-03-11 09:12:37 +00:00
|
|
|
self.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.free_mmio_addresses(GuestAddress(old_base), len as GuestUsize);
|
|
|
|
|
2019-10-28 16:53:13 +00:00
|
|
|
self.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_mmio_addresses(
|
|
|
|
Some(GuestAddress(new_base)),
|
|
|
|
len as GuestUsize,
|
|
|
|
None,
|
|
|
|
)
|
2019-10-29 01:15:08 +00:00
|
|
|
.ok_or_else(|| {
|
|
|
|
io::Error::new(
|
|
|
|
io::ErrorKind::Other,
|
|
|
|
"failed allocating new 64 bits MMIO range",
|
|
|
|
)
|
|
|
|
})?;
|
2019-10-28 16:53:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update MMIO bus
|
|
|
|
self.mmio_bus
|
|
|
|
.update_range(old_base, len, new_base, len)
|
2019-10-29 01:15:08 +00:00
|
|
|
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
2019-10-28 16:53:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-30 15:15:38 +00:00
|
|
|
let any_dev = pci_dev.as_any();
|
|
|
|
if let Some(virtio_pci_dev) = any_dev.downcast_ref::<VirtioPciDevice>() {
|
2020-05-12 15:25:30 +00:00
|
|
|
// Update the device_tree resources associated with the device
|
|
|
|
if let Some(node) = self
|
|
|
|
.device_tree
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.get_mut(&virtio_pci_dev.id())
|
|
|
|
{
|
|
|
|
let mut resource_updated = false;
|
|
|
|
for resource in node.resources.iter_mut() {
|
|
|
|
if let Resource::MmioAddressRange { base, .. } = resource {
|
|
|
|
if *base == old_base {
|
|
|
|
*base = new_base;
|
|
|
|
resource_updated = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !resource_updated {
|
|
|
|
return Err(io::Error::new(
|
|
|
|
io::ErrorKind::Other,
|
|
|
|
format!(
|
|
|
|
"Couldn't find a resource with base 0x{:x} for device {}",
|
|
|
|
old_base,
|
|
|
|
virtio_pci_dev.id()
|
|
|
|
),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return Err(io::Error::new(
|
|
|
|
io::ErrorKind::Other,
|
|
|
|
format!(
|
|
|
|
"Couldn't find device {} from device tree",
|
|
|
|
virtio_pci_dev.id()
|
|
|
|
),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
2019-10-30 16:13:29 +00:00
|
|
|
let bar_addr = virtio_pci_dev.config_bar_addr();
|
|
|
|
if bar_addr == new_base {
|
2019-10-30 18:03:02 +00:00
|
|
|
for (event, addr) in virtio_pci_dev.ioeventfds(old_base) {
|
2019-10-30 16:41:08 +00:00
|
|
|
let io_addr = IoEventAddress::Mmio(addr);
|
2020-07-03 09:16:49 +00:00
|
|
|
self.vm.unregister_ioevent(event, &io_addr).map_err(|e| {
|
|
|
|
io::Error::new(
|
|
|
|
io::ErrorKind::Other,
|
|
|
|
format!("failed to unregister ioevent: {:?}", e),
|
|
|
|
)
|
|
|
|
})?;
|
2019-10-30 16:41:08 +00:00
|
|
|
}
|
2019-10-30 18:03:02 +00:00
|
|
|
for (event, addr) in virtio_pci_dev.ioeventfds(new_base) {
|
2019-10-30 16:13:29 +00:00
|
|
|
let io_addr = IoEventAddress::Mmio(addr);
|
2020-07-03 09:16:49 +00:00
|
|
|
self.vm
|
2020-06-22 16:31:42 +00:00
|
|
|
.register_ioevent(event, &io_addr, None)
|
|
|
|
.map_err(|e| {
|
|
|
|
io::Error::new(
|
|
|
|
io::ErrorKind::Other,
|
|
|
|
format!("failed to register ioevent: {:?}", e),
|
|
|
|
)
|
|
|
|
})?;
|
2019-10-30 16:13:29 +00:00
|
|
|
}
|
2020-04-17 17:30:33 +00:00
|
|
|
} else {
|
|
|
|
let virtio_dev = virtio_pci_dev.virtio_device();
|
|
|
|
let mut virtio_dev = virtio_dev.lock().unwrap();
|
|
|
|
if let Some(mut shm_regions) = virtio_dev.get_shm_regions() {
|
|
|
|
if shm_regions.addr.raw_value() == old_base {
|
|
|
|
// Remove old region from KVM by passing a size of 0.
|
2020-07-16 10:47:02 +00:00
|
|
|
let mem_region = self.vm.make_user_memory_region(
|
2020-07-04 11:38:17 +00:00
|
|
|
shm_regions.mem_slot,
|
|
|
|
old_base,
|
|
|
|
0,
|
|
|
|
shm_regions.host_addr,
|
|
|
|
false,
|
|
|
|
);
|
2020-04-17 17:30:33 +00:00
|
|
|
|
2020-07-03 09:16:49 +00:00
|
|
|
self.vm.set_user_memory_region(mem_region).map_err(|e| {
|
2020-06-22 16:31:42 +00:00
|
|
|
io::Error::new(
|
|
|
|
io::ErrorKind::Other,
|
|
|
|
format!("failed to set user memory region: {:?}", e),
|
|
|
|
)
|
|
|
|
})?;
|
2020-04-17 17:30:33 +00:00
|
|
|
|
|
|
|
// Create new mapping by inserting new region to KVM.
|
2020-07-16 10:47:02 +00:00
|
|
|
let mem_region = self.vm.make_user_memory_region(
|
|
|
|
shm_regions.mem_slot,
|
|
|
|
new_base,
|
|
|
|
shm_regions.len,
|
|
|
|
shm_regions.host_addr,
|
|
|
|
false,
|
|
|
|
);
|
2020-04-17 17:30:33 +00:00
|
|
|
|
2020-07-03 09:16:49 +00:00
|
|
|
self.vm.set_user_memory_region(mem_region).map_err(|e| {
|
2020-06-02 02:29:54 +00:00
|
|
|
io::Error::new(
|
|
|
|
io::ErrorKind::Other,
|
|
|
|
format!("failed to set user memory regions: {:?}", e),
|
|
|
|
)
|
|
|
|
})?;
|
2020-04-17 17:30:33 +00:00
|
|
|
|
|
|
|
// Update shared memory regions to reflect the new mapping.
|
|
|
|
shm_regions.addr = GuestAddress(new_base);
|
|
|
|
virtio_dev.set_shm_regions(shm_regions).map_err(|e| {
|
|
|
|
io::Error::new(
|
|
|
|
io::ErrorKind::Other,
|
|
|
|
format!("failed to update shared memory regions: {:?}", e),
|
|
|
|
)
|
|
|
|
})?;
|
|
|
|
}
|
|
|
|
}
|
2019-10-30 15:15:38 +00:00
|
|
|
}
|
2019-10-28 16:53:13 +00:00
|
|
|
}
|
|
|
|
|
2019-10-29 01:15:08 +00:00
|
|
|
pci_dev.move_bar(old_base, new_base)
|
2019-10-28 16:53:13 +00:00
|
|
|
}
|
2019-10-23 22:14:13 +00:00
|
|
|
}
|
|
|
|
|
2020-02-04 16:44:12 +00:00
|
|
|
struct ActivatedBackend {
|
|
|
|
_socket_file: tempfile::NamedTempFile,
|
|
|
|
child: std::process::Child,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for ActivatedBackend {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
self.child.wait().ok();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-28 10:08:51 +00:00
|
|
|
#[derive(Serialize, Deserialize)]
|
|
|
|
struct DeviceManagerState {
|
2020-04-30 17:10:31 +00:00
|
|
|
device_tree: DeviceTree,
|
2020-04-28 10:08:51 +00:00
|
|
|
device_id_cnt: Wrapping<usize>,
|
|
|
|
}
|
|
|
|
|
2020-06-09 06:17:42 +00:00
|
|
|
/// Private structure for storing information about the MMIO device registered at some address on the bus.
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
pub struct MMIODeviceInfo {
|
|
|
|
addr: u64,
|
|
|
|
irq: u32,
|
|
|
|
len: u64,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
impl DeviceInfoForFDT for MMIODeviceInfo {
|
|
|
|
fn addr(&self) -> u64 {
|
|
|
|
self.addr
|
|
|
|
}
|
|
|
|
fn irq(&self) -> u32 {
|
|
|
|
self.irq
|
|
|
|
}
|
|
|
|
fn length(&self) -> u64 {
|
|
|
|
self.len
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-23 22:14:13 +00:00
|
|
|
pub struct DeviceManager {
|
|
|
|
// Manage address space related to devices
|
|
|
|
address_manager: Arc<AddressManager>,
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-09-06 15:42:41 +00:00
|
|
|
// Console abstraction
|
|
|
|
console: Arc<Console>,
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2020-05-25 08:27:08 +00:00
|
|
|
// Interrupt controller
|
2020-05-25 08:59:09 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-05-25 08:27:08 +00:00
|
|
|
interrupt_controller: Option<Arc<Mutex<ioapic::Ioapic>>>,
|
2020-05-25 08:59:09 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
interrupt_controller: Option<Arc<Mutex<gic::Gic>>>,
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-09-11 15:22:00 +00:00
|
|
|
// Things to be added to the commandline (i.e. for virtio-mmio)
|
|
|
|
cmdline_additions: Vec<String>,
|
2019-10-02 20:57:20 +00:00
|
|
|
|
2019-11-27 15:28:22 +00:00
|
|
|
// ACPI GED notification device
|
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
ged_notification_device: Option<Arc<Mutex<devices::AcpiGEDDevice>>>,
|
2019-12-06 16:14:32 +00:00
|
|
|
|
|
|
|
// VM configuration
|
|
|
|
config: Arc<Mutex<VmConfig>>,
|
2019-11-18 23:24:31 +00:00
|
|
|
|
2019-12-20 15:25:06 +00:00
|
|
|
// Memory Manager
|
2020-01-31 11:55:30 +00:00
|
|
|
memory_manager: Arc<Mutex<MemoryManager>>,
|
2020-02-14 10:08:14 +00:00
|
|
|
|
|
|
|
// The virtio devices on the system
|
2020-04-27 12:38:24 +00:00
|
|
|
virtio_devices: Vec<(VirtioDeviceArc, bool, String)>,
|
2020-02-14 09:55:19 +00:00
|
|
|
|
2020-03-04 14:46:40 +00:00
|
|
|
// List of bus devices
|
|
|
|
// Let the DeviceManager keep strong references to the BusDevice devices.
|
|
|
|
// This allows the IO and MMIO buses to be provided with Weak references,
|
|
|
|
// which prevents cyclic dependencies.
|
|
|
|
bus_devices: Vec<Arc<Mutex<dyn BusDevice>>>,
|
|
|
|
|
2020-02-14 09:55:19 +00:00
|
|
|
// The path to the VMM for self spawning
|
2020-02-04 16:44:12 +00:00
|
|
|
vmm_path: PathBuf,
|
|
|
|
|
|
|
|
// Backends that have been spawned
|
|
|
|
vhost_user_backends: Vec<ActivatedBackend>,
|
2020-02-27 16:44:14 +00:00
|
|
|
|
2020-04-27 08:37:56 +00:00
|
|
|
// Counter to keep track of the consumed device IDs.
|
|
|
|
device_id_cnt: Wrapping<usize>,
|
|
|
|
|
2020-02-27 16:44:14 +00:00
|
|
|
// Keep a reference to the PCI bus
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pci_bus: Option<Arc<Mutex<PciBus>>>,
|
2020-02-27 17:10:30 +00:00
|
|
|
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
|
2020-02-27 17:10:30 +00:00
|
|
|
// MSI Interrupt Manager
|
|
|
|
msi_interrupt_manager: Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>,
|
2020-02-27 17:24:47 +00:00
|
|
|
|
2020-07-17 15:20:47 +00:00
|
|
|
// Passthrough device handle
|
2020-02-27 17:24:47 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2020-07-17 15:20:47 +00:00
|
|
|
passthrough_device: Option<Arc<DeviceFd>>,
|
2020-02-27 17:43:47 +00:00
|
|
|
|
|
|
|
// Paravirtualized IOMMU
|
|
|
|
#[cfg(feature = "pci_support")]
|
2020-07-02 12:25:19 +00:00
|
|
|
iommu_device: Option<Arc<Mutex<virtio_devices::Iommu>>>,
|
2020-02-28 11:29:43 +00:00
|
|
|
|
|
|
|
// Bitmap of PCI devices to hotplug.
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pci_devices_up: u32,
|
|
|
|
|
|
|
|
// Bitmap of PCI devices to hotunplug.
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pci_devices_down: u32,
|
2020-03-06 10:34:24 +00:00
|
|
|
|
|
|
|
// Hashmap of device's name to their corresponding PCI b/d/f.
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pci_id_list: HashMap<String, u32>,
|
|
|
|
|
2020-03-06 13:18:07 +00:00
|
|
|
// Hashmap of PCI b/d/f to their corresponding Arc<Mutex<dyn PciDevice>>.
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pci_devices: HashMap<u32, Arc<dyn Any + Send + Sync>>,
|
2020-04-27 17:12:00 +00:00
|
|
|
|
|
|
|
// Tree of devices, representing the dependencies between devices.
|
|
|
|
// Useful for introspection, snapshot and restore.
|
2020-05-12 13:53:09 +00:00
|
|
|
device_tree: Arc<Mutex<DeviceTree>>,
|
2020-04-28 17:57:28 +00:00
|
|
|
|
|
|
|
// Exit event
|
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
exit_evt: EventFd,
|
2020-06-09 06:17:42 +00:00
|
|
|
|
2020-04-28 17:57:28 +00:00
|
|
|
// Reset event
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-04-28 17:57:28 +00:00
|
|
|
reset_evt: EventFd,
|
2020-06-09 06:17:42 +00:00
|
|
|
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
id_to_dev_info: HashMap<(DeviceType, String), MMIODeviceInfo>,
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl DeviceManager {
|
|
|
|
pub fn new(
|
2020-07-03 09:16:49 +00:00
|
|
|
vm: Arc<dyn hypervisor::Vm>,
|
2020-01-31 16:23:49 +00:00
|
|
|
config: Arc<Mutex<VmConfig>>,
|
2020-01-31 11:55:30 +00:00
|
|
|
memory_manager: Arc<Mutex<MemoryManager>>,
|
2019-09-16 13:25:08 +00:00
|
|
|
_exit_evt: &EventFd,
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg_attr(target_arch = "aarch64", allow(unused_variables))] reset_evt: &EventFd,
|
2020-02-04 16:44:12 +00:00
|
|
|
vmm_path: PathBuf,
|
2020-02-27 09:29:03 +00:00
|
|
|
) -> DeviceManagerResult<Arc<Mutex<Self>>> {
|
2020-05-12 15:25:30 +00:00
|
|
|
let device_tree = Arc::new(Mutex::new(DeviceTree::new()));
|
|
|
|
|
2019-10-23 22:14:13 +00:00
|
|
|
let address_manager = Arc::new(AddressManager {
|
2020-03-16 17:58:23 +00:00
|
|
|
allocator: memory_manager.lock().unwrap().allocator(),
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-03-04 15:27:58 +00:00
|
|
|
io_bus: Arc::new(devices::Bus::new()),
|
2020-02-28 17:11:34 +00:00
|
|
|
mmio_bus: Arc::new(devices::Bus::new()),
|
2020-07-03 09:16:49 +00:00
|
|
|
vm: vm.clone(),
|
2020-05-12 15:25:30 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
device_tree: Arc::clone(&device_tree),
|
2019-10-23 22:14:13 +00:00
|
|
|
});
|
|
|
|
|
2020-02-04 09:59:53 +00:00
|
|
|
// First we create the MSI interrupt manager, the legacy one is created
|
|
|
|
// later, after the IOAPIC device creation.
|
|
|
|
// The reason we create the MSI one first is because the IOAPIC needs it,
|
|
|
|
// and then the legacy interrupt manager needs an IOAPIC. So we're
|
|
|
|
// handling a linear dependency chain:
|
|
|
|
// msi_interrupt_manager <- IOAPIC <- legacy_interrupt_manager.
|
2020-02-04 11:04:10 +00:00
|
|
|
let msi_interrupt_manager: Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>> =
|
2020-02-04 09:59:53 +00:00
|
|
|
Arc::new(KvmMsiInterruptManager::new(
|
2020-01-22 17:39:46 +00:00
|
|
|
Arc::clone(&address_manager.allocator),
|
2020-07-03 09:16:49 +00:00
|
|
|
vm,
|
2020-01-22 17:39:46 +00:00
|
|
|
));
|
|
|
|
|
2020-04-28 17:57:28 +00:00
|
|
|
let device_manager = DeviceManager {
|
2020-02-27 10:36:30 +00:00
|
|
|
address_manager: Arc::clone(&address_manager),
|
2020-01-29 15:53:12 +00:00
|
|
|
console: Arc::new(Console::default()),
|
2020-05-25 08:27:08 +00:00
|
|
|
interrupt_controller: None,
|
2020-04-28 17:57:28 +00:00
|
|
|
cmdline_additions: Vec::new(),
|
2019-11-27 15:28:22 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
2020-01-29 15:53:12 +00:00
|
|
|
ged_notification_device: None,
|
2020-01-31 16:23:49 +00:00
|
|
|
config,
|
2020-01-31 11:55:30 +00:00
|
|
|
memory_manager,
|
2020-02-14 10:08:14 +00:00
|
|
|
virtio_devices: Vec::new(),
|
2020-04-28 17:57:28 +00:00
|
|
|
bus_devices: Vec::new(),
|
2020-02-04 16:44:12 +00:00
|
|
|
vmm_path,
|
|
|
|
vhost_user_backends: Vec::new(),
|
2020-04-27 08:37:56 +00:00
|
|
|
device_id_cnt: Wrapping(0),
|
2020-02-27 16:44:14 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pci_bus: None,
|
2020-04-28 17:57:28 +00:00
|
|
|
msi_interrupt_manager,
|
2020-02-27 17:24:47 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2020-07-17 15:20:47 +00:00
|
|
|
passthrough_device: None,
|
2020-02-27 17:43:47 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
iommu_device: None,
|
2020-02-28 11:29:43 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pci_devices_up: 0,
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pci_devices_down: 0,
|
2020-03-06 10:34:24 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pci_id_list: HashMap::new(),
|
|
|
|
#[cfg(feature = "pci_support")]
|
2020-03-06 13:18:07 +00:00
|
|
|
pci_devices: HashMap::new(),
|
2020-05-12 15:25:30 +00:00
|
|
|
device_tree,
|
2020-04-28 17:57:28 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
exit_evt: _exit_evt.try_clone().map_err(DeviceManagerError::EventFd)?,
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-04-28 17:57:28 +00:00
|
|
|
reset_evt: reset_evt.try_clone().map_err(DeviceManagerError::EventFd)?,
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
id_to_dev_info: HashMap::new(),
|
2020-01-29 15:33:30 +00:00
|
|
|
};
|
|
|
|
|
2020-04-28 17:57:28 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_io_addresses(Some(GuestAddress(0xae00)), 0x10, None)
|
|
|
|
.ok_or(DeviceManagerError::AllocateIOPort)?;
|
2020-01-29 15:53:12 +00:00
|
|
|
|
2020-04-28 17:57:28 +00:00
|
|
|
let device_manager = Arc::new(Mutex::new(device_manager));
|
2019-05-12 17:38:24 +00:00
|
|
|
|
2020-01-29 15:53:12 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
2020-04-28 17:57:28 +00:00
|
|
|
address_manager
|
|
|
|
.io_bus
|
|
|
|
.insert(
|
|
|
|
Arc::clone(&device_manager) as Arc<Mutex<dyn BusDevice>>,
|
|
|
|
0xae00,
|
|
|
|
0x10,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
2020-01-29 15:53:12 +00:00
|
|
|
|
2020-04-28 17:57:28 +00:00
|
|
|
Ok(device_manager)
|
|
|
|
}
|
2020-01-29 15:53:12 +00:00
|
|
|
|
2020-04-28 17:57:28 +00:00
|
|
|
pub fn create_devices(&mut self) -> DeviceManagerResult<()> {
|
|
|
|
let mut virtio_devices: Vec<(VirtioDeviceArc, bool, String)> = Vec::new();
|
2020-01-29 15:33:30 +00:00
|
|
|
|
2020-05-25 08:27:08 +00:00
|
|
|
let interrupt_controller = self.add_interrupt_controller()?;
|
2020-02-14 10:08:14 +00:00
|
|
|
|
2020-04-28 17:57:28 +00:00
|
|
|
// Now we can create the legacy interrupt manager, which needs the freshly
|
|
|
|
// formed IOAPIC device.
|
|
|
|
let legacy_interrupt_manager: Arc<
|
|
|
|
dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>,
|
2020-06-25 14:56:49 +00:00
|
|
|
> = Arc::new(LegacyUserspaceInterruptManager::new(Arc::clone(
|
2020-05-25 08:27:08 +00:00
|
|
|
&interrupt_controller,
|
|
|
|
)));
|
2020-02-27 10:36:30 +00:00
|
|
|
|
|
|
|
#[cfg(feature = "acpi")]
|
2020-04-28 17:57:28 +00:00
|
|
|
self.address_manager
|
2020-02-27 10:36:30 +00:00
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-04-28 17:57:28 +00:00
|
|
|
.allocate_io_addresses(Some(GuestAddress(0x0a00)), 0x18, None)
|
2020-02-27 10:36:30 +00:00
|
|
|
.ok_or(DeviceManagerError::AllocateIOPort)?;
|
|
|
|
|
|
|
|
#[cfg(feature = "acpi")]
|
2020-04-28 17:57:28 +00:00
|
|
|
self.address_manager
|
2020-02-27 10:36:30 +00:00
|
|
|
.io_bus
|
|
|
|
.insert(
|
2020-04-28 17:57:28 +00:00
|
|
|
Arc::clone(&self.memory_manager) as Arc<Mutex<dyn BusDevice>>,
|
|
|
|
0xa00,
|
|
|
|
0x18,
|
2020-02-27 10:36:30 +00:00
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-04-28 17:57:28 +00:00
|
|
|
self.add_legacy_devices(
|
|
|
|
self.reset_evt
|
|
|
|
.try_clone()
|
|
|
|
.map_err(DeviceManagerError::EventFd)?,
|
|
|
|
)?;
|
|
|
|
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
self.add_legacy_devices(&legacy_interrupt_manager)?;
|
|
|
|
|
2020-04-28 17:57:28 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
{
|
|
|
|
self.ged_notification_device = self.add_acpi_devices(
|
|
|
|
&legacy_interrupt_manager,
|
|
|
|
self.reset_evt
|
|
|
|
.try_clone()
|
|
|
|
.map_err(DeviceManagerError::EventFd)?,
|
|
|
|
self.exit_evt
|
|
|
|
.try_clone()
|
|
|
|
.map_err(DeviceManagerError::EventFd)?,
|
|
|
|
)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
self.console = self.add_console_device(&legacy_interrupt_manager, &mut virtio_devices)?;
|
|
|
|
|
|
|
|
#[cfg(any(feature = "pci_support", feature = "mmio_support"))]
|
|
|
|
virtio_devices.append(&mut self.make_virtio_devices()?);
|
|
|
|
|
|
|
|
if cfg!(feature = "pci_support") {
|
|
|
|
self.add_pci_devices(virtio_devices.clone())?;
|
|
|
|
} else if cfg!(feature = "mmio_support") {
|
|
|
|
self.add_mmio_devices(virtio_devices.clone(), &legacy_interrupt_manager)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
self.virtio_devices = virtio_devices;
|
|
|
|
|
|
|
|
Ok(())
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2020-04-28 10:08:51 +00:00
|
|
|
fn state(&self) -> DeviceManagerState {
|
|
|
|
DeviceManagerState {
|
2020-05-12 13:53:09 +00:00
|
|
|
device_tree: self.device_tree.lock().unwrap().clone(),
|
2020-04-28 10:08:51 +00:00
|
|
|
device_id_cnt: self.device_id_cnt,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn set_state(&mut self, state: &DeviceManagerState) -> DeviceManagerResult<()> {
|
2020-05-12 13:53:09 +00:00
|
|
|
self.device_tree = Arc::new(Mutex::new(state.device_tree.clone()));
|
2020-04-28 10:08:51 +00:00
|
|
|
self.device_id_cnt = state.device_id_cnt;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
/// Gets the information of the devices registered up to some point in time.
|
|
|
|
pub fn get_device_info(&self) -> &HashMap<(DeviceType, String), MMIODeviceInfo> {
|
|
|
|
&self.id_to_dev_info
|
|
|
|
}
|
|
|
|
|
2019-11-18 11:23:27 +00:00
|
|
|
#[allow(unused_variables)]
|
|
|
|
fn add_pci_devices(
|
2020-01-29 15:33:30 +00:00
|
|
|
&mut self,
|
2020-04-27 12:38:24 +00:00
|
|
|
virtio_devices: Vec<(VirtioDeviceArc, bool, String)>,
|
2019-11-18 11:23:27 +00:00
|
|
|
) -> DeviceManagerResult<()> {
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
{
|
|
|
|
let pci_root = PciRoot::new(None);
|
|
|
|
let mut pci_bus = PciBus::new(
|
|
|
|
pci_root,
|
2020-03-04 15:10:11 +00:00
|
|
|
Arc::clone(&self.address_manager) as Arc<dyn DeviceRelocation>,
|
2019-11-18 11:23:27 +00:00
|
|
|
);
|
|
|
|
|
2020-04-27 12:51:15 +00:00
|
|
|
let iommu_id = String::from(IOMMU_DEVICE_NAME);
|
|
|
|
|
2020-02-27 16:15:25 +00:00
|
|
|
let (iommu_device, iommu_mapping) = if self.config.lock().unwrap().iommu {
|
2020-07-02 12:25:19 +00:00
|
|
|
let (device, mapping) = virtio_devices::Iommu::new(iommu_id.clone())
|
2020-04-27 12:51:15 +00:00
|
|
|
.map_err(DeviceManagerError::CreateVirtioIommu)?;
|
2020-02-27 16:15:25 +00:00
|
|
|
let device = Arc::new(Mutex::new(device));
|
|
|
|
self.iommu_device = Some(Arc::clone(&device));
|
2020-04-30 18:08:04 +00:00
|
|
|
|
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
2020-05-05 08:23:32 +00:00
|
|
|
self.device_tree
|
2020-05-12 13:53:09 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-05-05 08:23:32 +00:00
|
|
|
.insert(iommu_id.clone(), device_node!(iommu_id, device));
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2019-11-18 11:23:27 +00:00
|
|
|
(Some(device), Some(mapping))
|
|
|
|
} else {
|
|
|
|
(None, None)
|
|
|
|
};
|
|
|
|
|
2020-02-27 17:10:30 +00:00
|
|
|
let interrupt_manager = Arc::clone(&self.msi_interrupt_manager);
|
|
|
|
|
2019-11-18 11:23:27 +00:00
|
|
|
let mut iommu_attached_devices = Vec::new();
|
|
|
|
|
2020-04-08 13:04:03 +00:00
|
|
|
for (device, iommu_attached, id) in virtio_devices {
|
2019-11-18 11:23:27 +00:00
|
|
|
let mapping: &Option<Arc<IommuMapping>> = if iommu_attached {
|
|
|
|
&iommu_mapping
|
|
|
|
} else {
|
|
|
|
&None
|
|
|
|
};
|
|
|
|
|
2020-04-08 13:04:03 +00:00
|
|
|
let dev_id = self.add_virtio_pci_device(
|
|
|
|
device,
|
|
|
|
&mut pci_bus,
|
|
|
|
mapping,
|
|
|
|
&interrupt_manager,
|
|
|
|
id,
|
|
|
|
)?;
|
2019-11-18 11:23:27 +00:00
|
|
|
|
2020-03-18 17:24:35 +00:00
|
|
|
if iommu_attached {
|
2019-11-18 11:23:27 +00:00
|
|
|
iommu_attached_devices.push(dev_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-29 15:53:12 +00:00
|
|
|
let mut vfio_iommu_device_ids =
|
2020-02-27 16:15:25 +00:00
|
|
|
self.add_vfio_devices(&mut pci_bus, &interrupt_manager)?;
|
2019-11-18 11:23:27 +00:00
|
|
|
|
|
|
|
iommu_attached_devices.append(&mut vfio_iommu_device_ids);
|
|
|
|
|
2020-02-27 16:15:25 +00:00
|
|
|
if let Some(iommu_device) = iommu_device {
|
|
|
|
iommu_device
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.attach_pci_devices(0, iommu_attached_devices);
|
2020-02-27 17:43:47 +00:00
|
|
|
|
2019-11-18 11:23:27 +00:00
|
|
|
// Because we determined the virtio-iommu b/d/f, we have to
|
|
|
|
// add the device to the PCI topology now. Otherwise, the
|
|
|
|
// b/d/f won't match the virtio-iommu device as expected.
|
2020-04-08 13:04:03 +00:00
|
|
|
self.add_virtio_pci_device(
|
|
|
|
iommu_device,
|
|
|
|
&mut pci_bus,
|
|
|
|
&None,
|
|
|
|
&interrupt_manager,
|
2020-04-27 12:38:24 +00:00
|
|
|
iommu_id,
|
2020-04-08 13:04:03 +00:00
|
|
|
)?;
|
2019-11-18 11:23:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let pci_bus = Arc::new(Mutex::new(pci_bus));
|
2020-02-27 16:44:14 +00:00
|
|
|
let pci_config_io = Arc::new(Mutex::new(PciConfigIo::new(Arc::clone(&pci_bus))));
|
2020-03-04 14:46:40 +00:00
|
|
|
self.bus_devices
|
|
|
|
.push(Arc::clone(&pci_config_io) as Arc<Mutex<dyn BusDevice>>);
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-01-29 15:33:30 +00:00
|
|
|
self.address_manager
|
2019-11-18 11:23:27 +00:00
|
|
|
.io_bus
|
|
|
|
.insert(pci_config_io, 0xcf8, 0x8)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
2020-02-27 16:44:14 +00:00
|
|
|
let pci_config_mmio = Arc::new(Mutex::new(PciConfigMmio::new(Arc::clone(&pci_bus))));
|
2020-03-04 14:46:40 +00:00
|
|
|
self.bus_devices
|
|
|
|
.push(Arc::clone(&pci_config_mmio) as Arc<Mutex<dyn BusDevice>>);
|
2020-01-29 15:33:30 +00:00
|
|
|
self.address_manager
|
2019-11-18 11:23:27 +00:00
|
|
|
.mmio_bus
|
|
|
|
.insert(
|
|
|
|
pci_config_mmio,
|
|
|
|
arch::layout::PCI_MMCONFIG_START.0,
|
|
|
|
arch::layout::PCI_MMCONFIG_SIZE,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
2020-02-27 16:44:14 +00:00
|
|
|
|
|
|
|
self.pci_bus = Some(pci_bus);
|
2019-11-18 11:23:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-11-18 11:35:05 +00:00
|
|
|
#[allow(unused_variables, unused_mut)]
|
|
|
|
fn add_mmio_devices(
|
2020-01-29 15:33:30 +00:00
|
|
|
&mut self,
|
2020-04-27 12:38:24 +00:00
|
|
|
virtio_devices: Vec<(VirtioDeviceArc, bool, String)>,
|
2020-02-04 11:04:10 +00:00
|
|
|
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
|
2019-11-18 11:35:05 +00:00
|
|
|
) -> DeviceManagerResult<()> {
|
|
|
|
#[cfg(feature = "mmio_support")]
|
|
|
|
{
|
2020-04-27 14:05:04 +00:00
|
|
|
for (device, _, id) in virtio_devices {
|
2020-04-29 10:02:54 +00:00
|
|
|
self.add_virtio_mmio_device(id, device, interrupt_manager)?;
|
2019-11-18 11:35:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
2020-05-25 08:27:08 +00:00
|
|
|
fn add_interrupt_controller(
|
|
|
|
&mut self,
|
|
|
|
) -> DeviceManagerResult<Arc<Mutex<dyn InterruptController>>> {
|
2020-05-25 08:59:09 +00:00
|
|
|
let id = String::from(GIC_DEVICE_NAME);
|
|
|
|
|
|
|
|
let interrupt_controller: Arc<Mutex<gic::Gic>> = Arc::new(Mutex::new(
|
|
|
|
gic::Gic::new(
|
|
|
|
self.config.lock().unwrap().cpus.boot_vcpus,
|
|
|
|
Arc::clone(&self.msi_interrupt_manager),
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateInterruptController)?,
|
|
|
|
));
|
|
|
|
|
|
|
|
self.interrupt_controller = Some(interrupt_controller.clone());
|
|
|
|
|
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
|
|
|
self.device_tree
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.insert(id.clone(), device_node!(id, interrupt_controller));
|
|
|
|
|
|
|
|
Ok(interrupt_controller)
|
2020-05-12 09:49:12 +00:00
|
|
|
}
|
|
|
|
|
2020-06-09 10:28:02 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
pub fn enable_interrupt_controller(&self) -> DeviceManagerResult<()> {
|
|
|
|
if let Some(interrupt_controller) = &self.interrupt_controller {
|
|
|
|
interrupt_controller.lock().unwrap().enable().unwrap();
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-05-25 08:27:08 +00:00
|
|
|
fn add_interrupt_controller(
|
|
|
|
&mut self,
|
|
|
|
) -> DeviceManagerResult<Arc<Mutex<dyn InterruptController>>> {
|
2020-04-28 13:26:35 +00:00
|
|
|
let id = String::from(IOAPIC_DEVICE_NAME);
|
2020-04-28 13:22:38 +00:00
|
|
|
|
2019-12-05 16:36:28 +00:00
|
|
|
// Create IOAPIC
|
2020-05-25 08:27:08 +00:00
|
|
|
let interrupt_controller = Arc::new(Mutex::new(
|
2020-04-30 18:08:04 +00:00
|
|
|
ioapic::Ioapic::new(
|
|
|
|
id.clone(),
|
|
|
|
APIC_START,
|
|
|
|
Arc::clone(&self.msi_interrupt_manager),
|
|
|
|
)
|
2020-05-25 08:27:08 +00:00
|
|
|
.map_err(DeviceManagerError::CreateInterruptController)?,
|
2020-01-22 21:55:02 +00:00
|
|
|
));
|
2019-11-18 11:02:37 +00:00
|
|
|
|
2020-05-25 08:27:08 +00:00
|
|
|
self.interrupt_controller = Some(interrupt_controller.clone());
|
2020-04-28 17:57:28 +00:00
|
|
|
|
|
|
|
self.address_manager
|
2019-12-05 16:36:28 +00:00
|
|
|
.mmio_bus
|
2020-05-25 08:27:08 +00:00
|
|
|
.insert(interrupt_controller.clone(), IOAPIC_START.0, IOAPIC_SIZE)
|
2019-12-05 16:36:28 +00:00
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
2019-11-18 11:02:37 +00:00
|
|
|
|
2020-04-28 17:57:28 +00:00
|
|
|
self.bus_devices
|
2020-05-25 08:27:08 +00:00
|
|
|
.push(Arc::clone(&interrupt_controller) as Arc<Mutex<dyn BusDevice>>);
|
2020-04-28 17:57:28 +00:00
|
|
|
|
2020-04-30 18:08:04 +00:00
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
2020-05-05 08:23:32 +00:00
|
|
|
self.device_tree
|
2020-05-12 13:53:09 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-05-25 08:27:08 +00:00
|
|
|
.insert(id.clone(), device_node!(id, interrupt_controller));
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2020-05-25 08:27:08 +00:00
|
|
|
Ok(interrupt_controller)
|
2019-11-18 11:02:37 +00:00
|
|
|
}
|
|
|
|
|
2019-11-27 15:28:22 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
fn add_acpi_devices(
|
2020-01-29 15:53:12 +00:00
|
|
|
&mut self,
|
2020-02-04 11:04:10 +00:00
|
|
|
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
|
2019-11-18 10:55:04 +00:00
|
|
|
reset_evt: EventFd,
|
|
|
|
exit_evt: EventFd,
|
2019-11-27 15:28:22 +00:00
|
|
|
) -> DeviceManagerResult<Option<Arc<Mutex<devices::AcpiGEDDevice>>>> {
|
|
|
|
let acpi_device = Arc::new(Mutex::new(devices::AcpiShutdownDevice::new(
|
|
|
|
exit_evt, reset_evt,
|
|
|
|
)));
|
2019-11-18 10:55:04 +00:00
|
|
|
|
2020-03-04 14:46:40 +00:00
|
|
|
self.bus_devices
|
|
|
|
.push(Arc::clone(&acpi_device) as Arc<Mutex<dyn BusDevice>>);
|
|
|
|
|
2020-01-29 15:53:12 +00:00
|
|
|
self.address_manager
|
2019-11-27 15:28:22 +00:00
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_io_addresses(Some(GuestAddress(0x3c0)), 0x8, None)
|
|
|
|
.ok_or(DeviceManagerError::AllocateIOPort)?;
|
2019-11-27 15:25:55 +00:00
|
|
|
|
2020-01-29 15:53:12 +00:00
|
|
|
self.address_manager
|
2019-11-27 15:28:22 +00:00
|
|
|
.io_bus
|
vmm: device_manager: Remove redundant clones
Address updated clippy errors:
error: redundant clone
--> vmm/src/device_manager.rs:699:32
|
699 | .insert(acpi_device.clone(), 0x3c0, 0x4)
| ^^^^^^^^ help: remove this
|
= note: `-D clippy::redundant-clone` implied by `-D warnings`
note: this value is dropped without further use
--> vmm/src/device_manager.rs:699:21
|
699 | .insert(acpi_device.clone(), 0x3c0, 0x4)
| ^^^^^^^^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
error: redundant clone
--> vmm/src/device_manager.rs:737:26
|
737 | .insert(i8042.clone(), 0x61, 0x4)
| ^^^^^^^^ help: remove this
|
note: this value is dropped without further use
--> vmm/src/device_manager.rs:737:21
|
737 | .insert(i8042.clone(), 0x61, 0x4)
| ^^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
error: redundant clone
--> vmm/src/device_manager.rs:754:29
|
754 | .insert(cmos.clone(), 0x70, 0x2)
| ^^^^^^^^ help: remove this
|
note: this value is dropped without further use
--> vmm/src/device_manager.rs:754:25
|
754 | .insert(cmos.clone(), 0x70, 0x2)
| ^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
Signed-off-by: Rob Bradford <robert.bradford@intel.com>
2019-12-19 17:02:36 +00:00
|
|
|
.insert(acpi_device, 0x3c0, 0x4)
|
2019-11-27 15:28:22 +00:00
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
2019-11-18 10:55:04 +00:00
|
|
|
|
2020-01-29 15:53:12 +00:00
|
|
|
let ged_irq = self
|
|
|
|
.address_manager
|
2019-12-09 15:07:31 +00:00
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_irq()
|
|
|
|
.unwrap();
|
2019-11-27 15:28:22 +00:00
|
|
|
|
2020-01-20 15:00:18 +00:00
|
|
|
let interrupt_group = interrupt_manager
|
2020-02-04 11:04:10 +00:00
|
|
|
.create_group(LegacyIrqGroupConfig {
|
|
|
|
irq: ged_irq as InterruptIndex,
|
|
|
|
})
|
2020-01-20 15:00:18 +00:00
|
|
|
.map_err(DeviceManagerError::CreateInterruptGroup)?;
|
|
|
|
|
|
|
|
let ged_device = Arc::new(Mutex::new(devices::AcpiGEDDevice::new(
|
|
|
|
interrupt_group,
|
|
|
|
ged_irq,
|
|
|
|
)));
|
2019-11-27 15:28:22 +00:00
|
|
|
|
2020-03-04 14:46:40 +00:00
|
|
|
self.bus_devices
|
|
|
|
.push(Arc::clone(&ged_device) as Arc<Mutex<dyn BusDevice>>);
|
|
|
|
|
2020-01-29 15:53:12 +00:00
|
|
|
self.address_manager
|
2020-01-24 10:36:39 +00:00
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_io_addresses(Some(GuestAddress(0xb000)), 0x1, None)
|
|
|
|
.ok_or(DeviceManagerError::AllocateIOPort)?;
|
2020-01-23 11:57:49 +00:00
|
|
|
|
2020-01-29 15:53:12 +00:00
|
|
|
self.address_manager
|
2020-01-24 10:36:39 +00:00
|
|
|
.io_bus
|
|
|
|
.insert(ged_device.clone(), 0xb000, 0x1)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
2019-11-27 15:28:22 +00:00
|
|
|
Ok(Some(ged_device))
|
2019-11-18 10:55:04 +00:00
|
|
|
}
|
|
|
|
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-01-31 11:55:30 +00:00
|
|
|
fn add_legacy_devices(&mut self, reset_evt: EventFd) -> DeviceManagerResult<()> {
|
2019-11-18 10:44:01 +00:00
|
|
|
// Add a shutdown device (i8042)
|
|
|
|
let i8042 = Arc::new(Mutex::new(devices::legacy::I8042Device::new(reset_evt)));
|
|
|
|
|
2020-03-04 14:46:40 +00:00
|
|
|
self.bus_devices
|
|
|
|
.push(Arc::clone(&i8042) as Arc<Mutex<dyn BusDevice>>);
|
|
|
|
|
2020-01-29 15:53:12 +00:00
|
|
|
self.address_manager
|
2019-11-18 10:44:01 +00:00
|
|
|
.io_bus
|
vmm: device_manager: Remove redundant clones
Address updated clippy errors:
error: redundant clone
--> vmm/src/device_manager.rs:699:32
|
699 | .insert(acpi_device.clone(), 0x3c0, 0x4)
| ^^^^^^^^ help: remove this
|
= note: `-D clippy::redundant-clone` implied by `-D warnings`
note: this value is dropped without further use
--> vmm/src/device_manager.rs:699:21
|
699 | .insert(acpi_device.clone(), 0x3c0, 0x4)
| ^^^^^^^^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
error: redundant clone
--> vmm/src/device_manager.rs:737:26
|
737 | .insert(i8042.clone(), 0x61, 0x4)
| ^^^^^^^^ help: remove this
|
note: this value is dropped without further use
--> vmm/src/device_manager.rs:737:21
|
737 | .insert(i8042.clone(), 0x61, 0x4)
| ^^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
error: redundant clone
--> vmm/src/device_manager.rs:754:29
|
754 | .insert(cmos.clone(), 0x70, 0x2)
| ^^^^^^^^ help: remove this
|
note: this value is dropped without further use
--> vmm/src/device_manager.rs:754:25
|
754 | .insert(cmos.clone(), 0x70, 0x2)
| ^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
Signed-off-by: Rob Bradford <robert.bradford@intel.com>
2019-12-19 17:02:36 +00:00
|
|
|
.insert(i8042, 0x61, 0x4)
|
2019-11-18 10:44:01 +00:00
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
#[cfg(feature = "cmos")]
|
|
|
|
{
|
|
|
|
// Add a CMOS emulated device
|
|
|
|
use vm_memory::GuestMemory;
|
2020-01-31 11:55:30 +00:00
|
|
|
let mem_size = self
|
|
|
|
.memory_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.guest_memory()
|
2020-02-11 16:22:40 +00:00
|
|
|
.memory()
|
2020-01-31 11:55:30 +00:00
|
|
|
.last_addr()
|
|
|
|
.0
|
|
|
|
+ 1;
|
2019-11-18 10:44:01 +00:00
|
|
|
let mem_below_4g = std::cmp::min(arch::layout::MEM_32BIT_RESERVED_START.0, mem_size);
|
|
|
|
let mem_above_4g = mem_size.saturating_sub(arch::layout::RAM_64BIT_START.0);
|
|
|
|
|
|
|
|
let cmos = Arc::new(Mutex::new(devices::legacy::Cmos::new(
|
|
|
|
mem_below_4g,
|
|
|
|
mem_above_4g,
|
|
|
|
)));
|
|
|
|
|
2020-03-04 14:46:40 +00:00
|
|
|
self.bus_devices
|
|
|
|
.push(Arc::clone(&cmos) as Arc<Mutex<dyn BusDevice>>);
|
|
|
|
|
2020-01-29 15:53:12 +00:00
|
|
|
self.address_manager
|
2019-11-18 10:44:01 +00:00
|
|
|
.io_bus
|
vmm: device_manager: Remove redundant clones
Address updated clippy errors:
error: redundant clone
--> vmm/src/device_manager.rs:699:32
|
699 | .insert(acpi_device.clone(), 0x3c0, 0x4)
| ^^^^^^^^ help: remove this
|
= note: `-D clippy::redundant-clone` implied by `-D warnings`
note: this value is dropped without further use
--> vmm/src/device_manager.rs:699:21
|
699 | .insert(acpi_device.clone(), 0x3c0, 0x4)
| ^^^^^^^^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
error: redundant clone
--> vmm/src/device_manager.rs:737:26
|
737 | .insert(i8042.clone(), 0x61, 0x4)
| ^^^^^^^^ help: remove this
|
note: this value is dropped without further use
--> vmm/src/device_manager.rs:737:21
|
737 | .insert(i8042.clone(), 0x61, 0x4)
| ^^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
error: redundant clone
--> vmm/src/device_manager.rs:754:29
|
754 | .insert(cmos.clone(), 0x70, 0x2)
| ^^^^^^^^ help: remove this
|
note: this value is dropped without further use
--> vmm/src/device_manager.rs:754:25
|
754 | .insert(cmos.clone(), 0x70, 0x2)
| ^^^^
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
Signed-off-by: Rob Bradford <robert.bradford@intel.com>
2019-12-19 17:02:36 +00:00
|
|
|
.insert(cmos, 0x70, 0x2)
|
2019-11-18 10:44:01 +00:00
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
}
|
2020-04-17 09:39:28 +00:00
|
|
|
#[cfg(feature = "fwdebug")]
|
|
|
|
{
|
|
|
|
let fwdebug = Arc::new(Mutex::new(devices::legacy::FwDebugDevice::new()));
|
|
|
|
|
|
|
|
self.bus_devices
|
|
|
|
.push(Arc::clone(&fwdebug) as Arc<Mutex<dyn BusDevice>>);
|
|
|
|
|
|
|
|
self.address_manager
|
|
|
|
.io_bus
|
|
|
|
.insert(fwdebug, 0x402, 0x1)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
}
|
2019-11-18 10:44:01 +00:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
fn add_legacy_devices(
|
|
|
|
&mut self,
|
|
|
|
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
|
|
|
|
) -> DeviceManagerResult<()> {
|
|
|
|
// Add a RTC device
|
|
|
|
let rtc_irq = self
|
|
|
|
.address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_irq()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let interrupt_group = interrupt_manager
|
|
|
|
.create_group(LegacyIrqGroupConfig {
|
|
|
|
irq: rtc_irq as InterruptIndex,
|
|
|
|
})
|
|
|
|
.map_err(DeviceManagerError::CreateInterruptGroup)?;
|
|
|
|
|
|
|
|
let rtc_device = Arc::new(Mutex::new(devices::legacy::RTC::new(interrupt_group)));
|
|
|
|
|
|
|
|
self.bus_devices
|
|
|
|
.push(Arc::clone(&rtc_device) as Arc<Mutex<dyn BusDevice>>);
|
|
|
|
|
|
|
|
let addr = GuestAddress(arch::layout::LEGACY_RTC_MAPPED_IO_START);
|
|
|
|
|
|
|
|
self.address_manager
|
|
|
|
.mmio_bus
|
|
|
|
.insert(rtc_device.clone(), addr.0, MMIO_LEN)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
|
|
|
|
self.id_to_dev_info.insert(
|
|
|
|
(DeviceType::RTC, "rtc".to_string()),
|
|
|
|
MMIODeviceInfo {
|
|
|
|
addr: addr.0,
|
|
|
|
len: MMIO_LEN,
|
|
|
|
irq: rtc_irq,
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn add_serial_device(
|
|
|
|
&mut self,
|
|
|
|
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
|
|
|
|
serial_writer: Option<Box<dyn io::Write + Send>>,
|
|
|
|
) -> DeviceManagerResult<Arc<Mutex<Serial>>> {
|
|
|
|
// Serial is tied to IRQ #4
|
|
|
|
let serial_irq = 4;
|
|
|
|
|
|
|
|
let id = String::from(SERIAL_DEVICE_NAME_PREFIX);
|
|
|
|
|
|
|
|
let interrupt_group = interrupt_manager
|
|
|
|
.create_group(LegacyIrqGroupConfig {
|
|
|
|
irq: serial_irq as InterruptIndex,
|
|
|
|
})
|
|
|
|
.map_err(DeviceManagerError::CreateInterruptGroup)?;
|
|
|
|
|
|
|
|
let serial = Arc::new(Mutex::new(Serial::new(
|
|
|
|
id.clone(),
|
|
|
|
interrupt_group,
|
|
|
|
serial_writer,
|
|
|
|
)));
|
|
|
|
|
|
|
|
self.bus_devices
|
|
|
|
.push(Arc::clone(&serial) as Arc<Mutex<dyn BusDevice>>);
|
|
|
|
|
|
|
|
self.address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_io_addresses(Some(GuestAddress(0x3f8)), 0x8, None)
|
|
|
|
.ok_or(DeviceManagerError::AllocateIOPort)?;
|
|
|
|
|
|
|
|
self.address_manager
|
|
|
|
.io_bus
|
|
|
|
.insert(serial.clone(), 0x3f8, 0x8)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
|
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
|
|
|
self.device_tree
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.insert(id.clone(), device_node!(id, serial));
|
|
|
|
|
|
|
|
Ok(serial)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
fn add_serial_device(
|
|
|
|
&mut self,
|
|
|
|
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
|
|
|
|
serial_writer: Option<Box<dyn io::Write + Send>>,
|
|
|
|
) -> DeviceManagerResult<Arc<Mutex<Serial>>> {
|
|
|
|
let id = String::from(SERIAL_DEVICE_NAME_PREFIX);
|
|
|
|
|
|
|
|
let serial_irq = self
|
|
|
|
.address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_irq()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let interrupt_group = interrupt_manager
|
|
|
|
.create_group(LegacyIrqGroupConfig {
|
|
|
|
irq: serial_irq as InterruptIndex,
|
|
|
|
})
|
|
|
|
.map_err(DeviceManagerError::CreateInterruptGroup)?;
|
|
|
|
|
|
|
|
let serial = Arc::new(Mutex::new(Serial::new(
|
|
|
|
id.clone(),
|
|
|
|
interrupt_group,
|
|
|
|
serial_writer,
|
|
|
|
)));
|
|
|
|
|
|
|
|
self.bus_devices
|
|
|
|
.push(Arc::clone(&serial) as Arc<Mutex<dyn BusDevice>>);
|
|
|
|
|
|
|
|
let addr = GuestAddress(arch::layout::LEGACY_SERIAL_MAPPED_IO_START);
|
|
|
|
|
|
|
|
self.address_manager
|
|
|
|
.mmio_bus
|
|
|
|
.insert(serial.clone(), addr.0, MMIO_LEN)
|
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
|
|
|
|
self.id_to_dev_info.insert(
|
|
|
|
(DeviceType::Serial, DeviceType::Serial.to_string()),
|
|
|
|
MMIODeviceInfo {
|
|
|
|
addr: addr.0,
|
|
|
|
len: MMIO_LEN,
|
|
|
|
irq: serial_irq,
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
self.cmdline_additions
|
|
|
|
.push(format!("earlycon=uart,mmio,0x{:08x}", addr.0));
|
|
|
|
|
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
|
|
|
self.device_tree
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.insert(id.clone(), device_node!(id, serial));
|
|
|
|
|
|
|
|
Ok(serial)
|
|
|
|
}
|
|
|
|
|
2019-11-19 16:15:29 +00:00
|
|
|
fn add_console_device(
|
2020-01-29 15:53:12 +00:00
|
|
|
&mut self,
|
2020-02-04 11:04:10 +00:00
|
|
|
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
|
2020-04-27 12:38:24 +00:00
|
|
|
virtio_devices: &mut Vec<(VirtioDeviceArc, bool, String)>,
|
2019-11-18 10:21:37 +00:00
|
|
|
) -> DeviceManagerResult<Arc<Console>> {
|
2020-01-31 11:42:48 +00:00
|
|
|
let serial_config = self.config.lock().unwrap().serial.clone();
|
2019-12-05 14:50:38 +00:00
|
|
|
let serial_writer: Option<Box<dyn io::Write + Send>> = match serial_config.mode {
|
2019-11-18 10:21:37 +00:00
|
|
|
ConsoleOutputMode::File => Some(Box::new(
|
2019-12-05 14:50:38 +00:00
|
|
|
File::create(serial_config.file.as_ref().unwrap())
|
2019-11-18 10:21:37 +00:00
|
|
|
.map_err(DeviceManagerError::SerialOutputFileOpen)?,
|
|
|
|
)),
|
|
|
|
ConsoleOutputMode::Tty => Some(Box::new(stdout())),
|
|
|
|
ConsoleOutputMode::Off | ConsoleOutputMode::Null => None,
|
|
|
|
};
|
2019-12-05 14:50:38 +00:00
|
|
|
let serial = if serial_config.mode != ConsoleOutputMode::Off {
|
2020-06-09 06:17:42 +00:00
|
|
|
Some(self.add_serial_device(interrupt_manager, serial_writer)?)
|
2019-11-18 10:21:37 +00:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
// Create serial and virtio-console
|
2020-01-31 11:42:48 +00:00
|
|
|
let console_config = self.config.lock().unwrap().console.clone();
|
2019-12-05 14:50:38 +00:00
|
|
|
let console_writer: Option<Box<dyn io::Write + Send + Sync>> = match console_config.mode {
|
|
|
|
ConsoleOutputMode::File => Some(Box::new(
|
2020-01-31 11:42:48 +00:00
|
|
|
File::create(console_config.file.as_ref().unwrap())
|
|
|
|
.map_err(DeviceManagerError::ConsoleOutputFileOpen)?,
|
2019-12-05 14:50:38 +00:00
|
|
|
)),
|
|
|
|
ConsoleOutputMode::Tty => Some(Box::new(stdout())),
|
|
|
|
ConsoleOutputMode::Null => Some(Box::new(sink())),
|
|
|
|
ConsoleOutputMode::Off => None,
|
|
|
|
};
|
2019-11-18 10:21:37 +00:00
|
|
|
let (col, row) = get_win_size();
|
|
|
|
let console_input = if let Some(writer) = console_writer {
|
2020-04-27 12:05:29 +00:00
|
|
|
let id = String::from(CONSOLE_DEVICE_NAME);
|
2020-04-27 17:12:00 +00:00
|
|
|
|
2019-11-18 10:21:37 +00:00
|
|
|
let (virtio_console_device, console_input) =
|
2020-07-02 12:25:19 +00:00
|
|
|
virtio_devices::Console::new(id.clone(), writer, col, row, console_config.iommu)
|
2019-11-18 10:21:37 +00:00
|
|
|
.map_err(DeviceManagerError::CreateVirtioConsole)?;
|
2020-04-28 12:53:05 +00:00
|
|
|
let virtio_console_device = Arc::new(Mutex::new(virtio_console_device));
|
2019-11-18 10:21:37 +00:00
|
|
|
virtio_devices.push((
|
2020-04-28 12:53:05 +00:00
|
|
|
Arc::clone(&virtio_console_device) as VirtioDeviceArc,
|
2020-04-29 16:14:48 +00:00
|
|
|
console_config.iommu,
|
2020-04-30 18:08:04 +00:00
|
|
|
id.clone(),
|
2019-11-18 10:21:37 +00:00
|
|
|
));
|
2020-04-28 12:53:05 +00:00
|
|
|
|
2020-04-30 18:08:04 +00:00
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
2020-05-05 08:23:32 +00:00
|
|
|
self.device_tree
|
2020-05-12 13:53:09 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-05-05 08:23:32 +00:00
|
|
|
.insert(id.clone(), device_node!(id, virtio_console_device));
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2019-11-18 10:21:37 +00:00
|
|
|
Some(console_input)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(Arc::new(Console {
|
|
|
|
serial,
|
|
|
|
console_input,
|
2019-12-05 14:50:38 +00:00
|
|
|
input_enabled: serial_config.mode.input_enabled()
|
|
|
|
|| console_config.mode.input_enabled(),
|
2019-11-18 10:21:37 +00:00
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
2020-04-27 12:38:24 +00:00
|
|
|
fn make_virtio_devices(&mut self) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool, String)>> {
|
|
|
|
let mut devices: Vec<(VirtioDeviceArc, bool, String)> = Vec::new();
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
// Create "standard" virtio devices (net/block/rng)
|
2020-01-31 11:42:48 +00:00
|
|
|
devices.append(&mut self.make_virtio_block_devices()?);
|
|
|
|
devices.append(&mut self.make_virtio_net_devices()?);
|
|
|
|
devices.append(&mut self.make_virtio_rng_devices()?);
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
// Add virtio-fs if required
|
2020-01-31 11:42:48 +00:00
|
|
|
devices.append(&mut self.make_virtio_fs_devices()?);
|
2019-09-04 13:55:14 +00:00
|
|
|
|
|
|
|
// Add virtio-pmem if required
|
2020-01-31 11:42:48 +00:00
|
|
|
devices.append(&mut self.make_virtio_pmem_devices()?);
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-09-04 18:14:54 +00:00
|
|
|
// Add virtio-vsock if required
|
2020-04-27 08:15:30 +00:00
|
|
|
devices.append(&mut self.make_virtio_vsock_devices()?);
|
2019-09-04 18:14:54 +00:00
|
|
|
|
2020-03-04 02:16:27 +00:00
|
|
|
devices.append(&mut self.make_virtio_mem_devices()?);
|
|
|
|
|
2020-03-20 03:43:37 +00:00
|
|
|
// Add virtio-balloon if required
|
|
|
|
devices.append(&mut self.make_virtio_balloon_devices()?);
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2020-02-04 16:44:12 +00:00
|
|
|
/// Launch block backend
|
|
|
|
fn start_block_backend(&mut self, disk_cfg: &DiskConfig) -> DeviceManagerResult<String> {
|
|
|
|
let _socket_file = NamedTempFile::new().map_err(DeviceManagerError::CreateSocketFile)?;
|
2020-06-04 19:19:24 +00:00
|
|
|
let socket = _socket_file.path().to_str().unwrap().to_owned();
|
2020-02-04 16:44:12 +00:00
|
|
|
|
|
|
|
let child = std::process::Command::new(&self.vmm_path)
|
|
|
|
.args(&[
|
|
|
|
"--block-backend",
|
|
|
|
&format!(
|
2020-04-30 12:50:21 +00:00
|
|
|
"path={},socket={},num_queues={},queue_size={}",
|
2020-03-13 10:25:17 +00:00
|
|
|
disk_cfg
|
|
|
|
.path
|
|
|
|
.as_ref()
|
|
|
|
.ok_or(DeviceManagerError::NoDiskPath)?
|
|
|
|
.to_str()
|
|
|
|
.unwrap(),
|
2020-06-04 19:19:24 +00:00
|
|
|
&socket,
|
2020-02-04 16:44:12 +00:00
|
|
|
disk_cfg.num_queues,
|
|
|
|
disk_cfg.queue_size
|
|
|
|
),
|
|
|
|
])
|
|
|
|
.spawn()
|
|
|
|
.map_err(DeviceManagerError::SpawnBlockBackend)?;
|
|
|
|
|
|
|
|
// The ActivatedBackend::drop() will automatically reap the child
|
|
|
|
self.vhost_user_backends.push(ActivatedBackend {
|
|
|
|
child,
|
|
|
|
_socket_file,
|
|
|
|
});
|
|
|
|
|
2020-06-04 19:19:24 +00:00
|
|
|
Ok(socket)
|
2020-02-04 16:44:12 +00:00
|
|
|
}
|
|
|
|
|
2020-03-13 09:38:42 +00:00
|
|
|
fn make_virtio_block_device(
|
|
|
|
&mut self,
|
2020-04-15 16:09:12 +00:00
|
|
|
disk_cfg: &mut DiskConfig,
|
2020-04-27 12:38:24 +00:00
|
|
|
) -> DeviceManagerResult<(VirtioDeviceArc, bool, String)> {
|
2020-04-27 09:21:15 +00:00
|
|
|
let id = if let Some(id) = &disk_cfg.id {
|
|
|
|
id.clone()
|
|
|
|
} else {
|
|
|
|
let id = self.next_device_name(DISK_DEVICE_NAME_PREFIX)?;
|
|
|
|
disk_cfg.id = Some(id.clone());
|
|
|
|
id
|
|
|
|
};
|
2020-04-15 16:09:12 +00:00
|
|
|
|
2020-03-13 09:38:42 +00:00
|
|
|
if disk_cfg.vhost_user {
|
2020-06-04 19:19:24 +00:00
|
|
|
let socket = if let Some(socket) = disk_cfg.vhost_socket.clone() {
|
|
|
|
socket
|
2020-03-13 09:38:42 +00:00
|
|
|
} else {
|
|
|
|
self.start_block_backend(disk_cfg)?
|
|
|
|
};
|
|
|
|
let vu_cfg = VhostUserConfig {
|
2020-06-04 19:19:24 +00:00
|
|
|
socket,
|
2020-03-13 09:38:42 +00:00
|
|
|
num_queues: disk_cfg.num_queues,
|
|
|
|
queue_size: disk_cfg.queue_size,
|
|
|
|
};
|
|
|
|
let vhost_user_block_device = Arc::new(Mutex::new(
|
2020-07-02 12:25:19 +00:00
|
|
|
virtio_devices::vhost_user::Blk::new(id.clone(), vu_cfg)
|
2020-03-13 09:38:42 +00:00
|
|
|
.map_err(DeviceManagerError::CreateVhostUserBlk)?,
|
|
|
|
));
|
|
|
|
|
2020-04-30 18:08:04 +00:00
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
2020-05-05 08:23:32 +00:00
|
|
|
self.device_tree
|
2020-05-12 13:53:09 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-05-05 08:23:32 +00:00
|
|
|
.insert(id.clone(), device_node!(id, vhost_user_block_device));
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2020-03-13 09:38:42 +00:00
|
|
|
Ok((
|
2020-04-08 13:04:03 +00:00
|
|
|
Arc::clone(&vhost_user_block_device) as VirtioDeviceArc,
|
2020-03-13 09:38:42 +00:00
|
|
|
false,
|
2020-04-27 12:38:24 +00:00
|
|
|
id,
|
2020-03-13 09:38:42 +00:00
|
|
|
))
|
|
|
|
} else {
|
|
|
|
let mut options = OpenOptions::new();
|
|
|
|
options.read(true);
|
|
|
|
options.write(!disk_cfg.readonly);
|
|
|
|
if disk_cfg.direct {
|
|
|
|
options.custom_flags(libc::O_DIRECT);
|
|
|
|
}
|
|
|
|
// Open block device path
|
|
|
|
let image: File = options
|
|
|
|
.open(
|
|
|
|
disk_cfg
|
|
|
|
.path
|
|
|
|
.as_ref()
|
|
|
|
.ok_or(DeviceManagerError::NoDiskPath)?
|
|
|
|
.clone(),
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::Disk)?;
|
|
|
|
|
2020-07-10 14:43:12 +00:00
|
|
|
let mut raw_img = qcow::RawFile::new(image, disk_cfg.direct);
|
2020-03-13 09:38:42 +00:00
|
|
|
|
|
|
|
let image_type = qcow::detect_image_type(&mut raw_img)
|
|
|
|
.map_err(DeviceManagerError::DetectImageType)?;
|
|
|
|
match image_type {
|
|
|
|
ImageType::Raw => {
|
2020-07-02 12:25:19 +00:00
|
|
|
let dev = virtio_devices::Block::new(
|
2020-04-27 12:38:24 +00:00
|
|
|
id.clone(),
|
2020-03-13 09:38:42 +00:00
|
|
|
raw_img,
|
|
|
|
disk_cfg
|
|
|
|
.path
|
|
|
|
.as_ref()
|
|
|
|
.ok_or(DeviceManagerError::NoDiskPath)?
|
|
|
|
.clone(),
|
|
|
|
disk_cfg.readonly,
|
|
|
|
disk_cfg.iommu,
|
|
|
|
disk_cfg.num_queues,
|
|
|
|
disk_cfg.queue_size,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioBlock)?;
|
|
|
|
|
|
|
|
let block = Arc::new(Mutex::new(dev));
|
|
|
|
|
2020-04-30 18:08:04 +00:00
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
2020-05-12 13:53:09 +00:00
|
|
|
self.device_tree
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.insert(id.clone(), device_node!(id, block));
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2020-04-27 12:38:24 +00:00
|
|
|
Ok((Arc::clone(&block) as VirtioDeviceArc, disk_cfg.iommu, id))
|
2020-03-13 09:38:42 +00:00
|
|
|
}
|
|
|
|
ImageType::Qcow2 => {
|
|
|
|
let qcow_img =
|
|
|
|
QcowFile::from(raw_img).map_err(DeviceManagerError::QcowDeviceCreate)?;
|
2020-07-02 12:25:19 +00:00
|
|
|
let dev = virtio_devices::Block::new(
|
2020-04-27 12:38:24 +00:00
|
|
|
id.clone(),
|
2020-03-13 09:38:42 +00:00
|
|
|
qcow_img,
|
|
|
|
disk_cfg
|
|
|
|
.path
|
|
|
|
.as_ref()
|
|
|
|
.ok_or(DeviceManagerError::NoDiskPath)?
|
|
|
|
.clone(),
|
|
|
|
disk_cfg.readonly,
|
|
|
|
disk_cfg.iommu,
|
|
|
|
disk_cfg.num_queues,
|
|
|
|
disk_cfg.queue_size,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioBlock)?;
|
|
|
|
|
|
|
|
let block = Arc::new(Mutex::new(dev));
|
|
|
|
|
2020-04-30 18:08:04 +00:00
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
2020-05-12 13:53:09 +00:00
|
|
|
self.device_tree
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.insert(id.clone(), device_node!(id, block));
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2020-04-27 12:38:24 +00:00
|
|
|
Ok((Arc::clone(&block) as VirtioDeviceArc, disk_cfg.iommu, id))
|
2020-03-13 09:38:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-08 13:04:03 +00:00
|
|
|
fn make_virtio_block_devices(
|
|
|
|
&mut self,
|
2020-04-27 12:38:24 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool, String)>> {
|
2019-09-10 16:14:16 +00:00
|
|
|
let mut devices = Vec::new();
|
|
|
|
|
2020-04-15 16:09:12 +00:00
|
|
|
let mut block_devices = self.config.lock().unwrap().disks.clone();
|
|
|
|
if let Some(disk_list_cfg) = &mut block_devices {
|
|
|
|
for disk_cfg in disk_list_cfg.iter_mut() {
|
2020-03-13 09:38:42 +00:00
|
|
|
devices.push(self.make_virtio_block_device(disk_cfg)?);
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-15 16:09:12 +00:00
|
|
|
self.config.lock().unwrap().disks = block_devices;
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2020-02-04 16:44:12 +00:00
|
|
|
/// Launch network backend
|
|
|
|
fn start_net_backend(&mut self, net_cfg: &NetConfig) -> DeviceManagerResult<String> {
|
|
|
|
let _socket_file = NamedTempFile::new().map_err(DeviceManagerError::CreateSocketFile)?;
|
2020-06-04 19:19:24 +00:00
|
|
|
let socket = _socket_file.path().to_str().unwrap().to_owned();
|
2020-02-04 16:44:12 +00:00
|
|
|
|
|
|
|
let child = std::process::Command::new(&self.vmm_path)
|
|
|
|
.args(&[
|
|
|
|
"--net-backend",
|
|
|
|
&format!(
|
2020-06-05 11:00:34 +00:00
|
|
|
"ip={},mask={},socket={},num_queues={},queue_size={}{}",
|
2020-05-15 09:00:38 +00:00
|
|
|
net_cfg.ip,
|
|
|
|
net_cfg.mask,
|
2020-06-04 19:19:24 +00:00
|
|
|
&socket,
|
2020-05-15 09:00:38 +00:00
|
|
|
net_cfg.num_queues,
|
|
|
|
net_cfg.queue_size,
|
2020-06-05 11:00:34 +00:00
|
|
|
if let Some(mac) = net_cfg.host_mac {
|
|
|
|
format!(",host_mac={:}", mac)
|
|
|
|
} else {
|
|
|
|
"".to_owned()
|
|
|
|
}
|
2020-02-04 16:44:12 +00:00
|
|
|
),
|
|
|
|
])
|
|
|
|
.spawn()
|
|
|
|
.map_err(DeviceManagerError::SpawnNetBackend)?;
|
|
|
|
|
|
|
|
// The ActivatedBackend::drop() will automatically reap the child
|
|
|
|
self.vhost_user_backends.push(ActivatedBackend {
|
|
|
|
child,
|
|
|
|
_socket_file,
|
|
|
|
});
|
|
|
|
|
2020-06-04 19:19:24 +00:00
|
|
|
Ok(socket)
|
2020-02-04 16:44:12 +00:00
|
|
|
}
|
|
|
|
|
2020-03-13 09:38:42 +00:00
|
|
|
fn make_virtio_net_device(
|
|
|
|
&mut self,
|
2020-04-15 16:09:12 +00:00
|
|
|
net_cfg: &mut NetConfig,
|
2020-04-27 12:38:24 +00:00
|
|
|
) -> DeviceManagerResult<(VirtioDeviceArc, bool, String)> {
|
2020-04-27 09:29:16 +00:00
|
|
|
let id = if let Some(id) = &net_cfg.id {
|
|
|
|
id.clone()
|
|
|
|
} else {
|
|
|
|
let id = self.next_device_name(NET_DEVICE_NAME_PREFIX)?;
|
|
|
|
net_cfg.id = Some(id.clone());
|
|
|
|
id
|
|
|
|
};
|
2020-04-15 16:09:12 +00:00
|
|
|
|
2020-03-13 09:38:42 +00:00
|
|
|
if net_cfg.vhost_user {
|
2020-06-04 19:19:24 +00:00
|
|
|
let socket = if let Some(socket) = net_cfg.vhost_socket.clone() {
|
|
|
|
socket
|
2020-03-13 09:38:42 +00:00
|
|
|
} else {
|
|
|
|
self.start_net_backend(net_cfg)?
|
|
|
|
};
|
|
|
|
let vu_cfg = VhostUserConfig {
|
2020-06-04 19:19:24 +00:00
|
|
|
socket,
|
2020-03-13 09:38:42 +00:00
|
|
|
num_queues: net_cfg.num_queues,
|
|
|
|
queue_size: net_cfg.queue_size,
|
|
|
|
};
|
|
|
|
let vhost_user_net_device = Arc::new(Mutex::new(
|
2020-07-02 12:25:19 +00:00
|
|
|
virtio_devices::vhost_user::Net::new(id.clone(), net_cfg.mac, vu_cfg)
|
2020-03-13 09:38:42 +00:00
|
|
|
.map_err(DeviceManagerError::CreateVhostUserNet)?,
|
|
|
|
));
|
2020-04-30 18:08:04 +00:00
|
|
|
|
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
2020-05-05 08:23:32 +00:00
|
|
|
self.device_tree
|
2020-05-12 13:53:09 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-05-05 08:23:32 +00:00
|
|
|
.insert(id.clone(), device_node!(id, vhost_user_net_device));
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2020-03-13 09:38:42 +00:00
|
|
|
Ok((
|
2020-04-08 13:04:03 +00:00
|
|
|
Arc::clone(&vhost_user_net_device) as VirtioDeviceArc,
|
2020-03-13 09:38:42 +00:00
|
|
|
net_cfg.iommu,
|
2020-04-27 12:38:24 +00:00
|
|
|
id,
|
2020-03-13 09:38:42 +00:00
|
|
|
))
|
|
|
|
} else {
|
|
|
|
let virtio_net_device = if let Some(ref tap_if_name) = net_cfg.tap {
|
|
|
|
Arc::new(Mutex::new(
|
2020-07-02 12:25:19 +00:00
|
|
|
virtio_devices::Net::new(
|
2020-04-27 12:38:24 +00:00
|
|
|
id.clone(),
|
2020-03-13 09:38:42 +00:00
|
|
|
Some(tap_if_name),
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
Some(net_cfg.mac),
|
2020-06-05 11:00:34 +00:00
|
|
|
&mut net_cfg.host_mac,
|
2020-03-13 09:38:42 +00:00
|
|
|
net_cfg.iommu,
|
|
|
|
net_cfg.num_queues,
|
|
|
|
net_cfg.queue_size,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioNet)?,
|
|
|
|
))
|
|
|
|
} else {
|
|
|
|
Arc::new(Mutex::new(
|
2020-07-02 12:25:19 +00:00
|
|
|
virtio_devices::Net::new(
|
2020-04-27 12:38:24 +00:00
|
|
|
id.clone(),
|
2020-03-13 09:38:42 +00:00
|
|
|
None,
|
|
|
|
Some(net_cfg.ip),
|
|
|
|
Some(net_cfg.mask),
|
|
|
|
Some(net_cfg.mac),
|
2020-06-05 11:00:34 +00:00
|
|
|
&mut net_cfg.host_mac,
|
2020-03-13 09:38:42 +00:00
|
|
|
net_cfg.iommu,
|
|
|
|
net_cfg.num_queues,
|
|
|
|
net_cfg.queue_size,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioNet)?,
|
|
|
|
))
|
|
|
|
};
|
2020-04-30 18:08:04 +00:00
|
|
|
|
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
2020-05-05 08:23:32 +00:00
|
|
|
self.device_tree
|
2020-05-12 13:53:09 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-05-05 08:23:32 +00:00
|
|
|
.insert(id.clone(), device_node!(id, virtio_net_device));
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2020-03-13 09:38:42 +00:00
|
|
|
Ok((
|
2020-04-08 13:04:03 +00:00
|
|
|
Arc::clone(&virtio_net_device) as VirtioDeviceArc,
|
2020-03-13 09:38:42 +00:00
|
|
|
net_cfg.iommu,
|
2020-04-27 12:38:24 +00:00
|
|
|
id,
|
2020-03-13 09:38:42 +00:00
|
|
|
))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-27 15:14:07 +00:00
|
|
|
/// Add virto-net and vhost-user-net devices
|
2020-04-08 13:04:03 +00:00
|
|
|
fn make_virtio_net_devices(
|
|
|
|
&mut self,
|
2020-04-27 12:38:24 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool, String)>> {
|
2019-09-10 16:14:16 +00:00
|
|
|
let mut devices = Vec::new();
|
2020-04-15 16:09:12 +00:00
|
|
|
let mut net_devices = self.config.lock().unwrap().net.clone();
|
|
|
|
if let Some(net_list_cfg) = &mut net_devices {
|
|
|
|
for net_cfg in net_list_cfg.iter_mut() {
|
2020-03-13 09:38:42 +00:00
|
|
|
devices.push(self.make_virtio_net_device(net_cfg)?);
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-15 16:09:12 +00:00
|
|
|
self.config.lock().unwrap().net = net_devices;
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2020-04-08 13:04:03 +00:00
|
|
|
fn make_virtio_rng_devices(
|
|
|
|
&mut self,
|
2020-04-27 12:38:24 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool, String)>> {
|
2019-09-10 16:14:16 +00:00
|
|
|
let mut devices = Vec::new();
|
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
// Add virtio-rng if required
|
2020-01-31 11:42:48 +00:00
|
|
|
let rng_config = self.config.lock().unwrap().rng.clone();
|
2019-12-05 14:50:38 +00:00
|
|
|
if let Some(rng_path) = rng_config.src.to_str() {
|
2020-04-27 07:47:26 +00:00
|
|
|
let id = String::from(RNG_DEVICE_NAME);
|
|
|
|
|
2019-11-18 23:10:42 +00:00
|
|
|
let virtio_rng_device = Arc::new(Mutex::new(
|
2020-07-02 12:25:19 +00:00
|
|
|
virtio_devices::Rng::new(id.clone(), rng_path, rng_config.iommu)
|
2019-11-18 23:10:42 +00:00
|
|
|
.map_err(DeviceManagerError::CreateVirtioRng)?,
|
|
|
|
));
|
2019-10-02 20:57:20 +00:00
|
|
|
devices.push((
|
2020-04-08 13:04:03 +00:00
|
|
|
Arc::clone(&virtio_rng_device) as VirtioDeviceArc,
|
2020-04-29 15:48:23 +00:00
|
|
|
rng_config.iommu,
|
2020-04-30 18:08:04 +00:00
|
|
|
id.clone(),
|
2019-10-02 20:57:20 +00:00
|
|
|
));
|
2019-11-19 00:53:23 +00:00
|
|
|
|
2020-04-30 18:08:04 +00:00
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
2020-05-05 08:23:32 +00:00
|
|
|
self.device_tree
|
2020-05-12 13:53:09 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-05-05 08:23:32 +00:00
|
|
|
.insert(id.clone(), device_node!(id, virtio_rng_device));
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2020-04-14 09:21:24 +00:00
|
|
|
fn make_virtio_fs_device(
|
2020-04-08 13:04:03 +00:00
|
|
|
&mut self,
|
2020-04-27 07:55:25 +00:00
|
|
|
fs_cfg: &mut FsConfig,
|
2020-04-27 12:38:24 +00:00
|
|
|
) -> DeviceManagerResult<(VirtioDeviceArc, bool, String)> {
|
2020-04-27 11:53:45 +00:00
|
|
|
let id = if let Some(id) = &fs_cfg.id {
|
|
|
|
id.clone()
|
|
|
|
} else {
|
|
|
|
let id = self.next_device_name(FS_DEVICE_NAME_PREFIX)?;
|
|
|
|
fs_cfg.id = Some(id.clone());
|
|
|
|
id
|
|
|
|
};
|
2020-04-27 07:55:25 +00:00
|
|
|
|
2020-05-05 08:23:32 +00:00
|
|
|
let mut node = device_node!(id);
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2020-04-29 14:16:30 +00:00
|
|
|
// Look for the id in the device tree. If it can be found, that means
|
|
|
|
// the device is being restored, otherwise it's created from scratch.
|
2020-05-12 13:53:09 +00:00
|
|
|
let cache_range = if let Some(node) = self.device_tree.lock().unwrap().get(&id) {
|
2020-04-29 14:16:30 +00:00
|
|
|
debug!("Restoring virtio-fs {} resources", id);
|
|
|
|
|
|
|
|
let mut cache_range: Option<(u64, u64)> = None;
|
|
|
|
for resource in node.resources.iter() {
|
|
|
|
match resource {
|
|
|
|
Resource::MmioAddressRange { base, size } => {
|
|
|
|
if cache_range.is_some() {
|
|
|
|
return Err(DeviceManagerError::ResourceAlreadyExists);
|
|
|
|
}
|
|
|
|
|
|
|
|
cache_range = Some((*base, *size));
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
error!("Unexpected resource {:?} for {}", resource, id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if cache_range.is_none() {
|
|
|
|
return Err(DeviceManagerError::MissingVirtioFsResources);
|
|
|
|
}
|
|
|
|
|
|
|
|
cache_range
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2020-04-27 17:12:00 +00:00
|
|
|
|
2020-06-04 19:19:24 +00:00
|
|
|
if let Some(fs_socket) = fs_cfg.socket.to_str() {
|
2020-04-14 09:21:24 +00:00
|
|
|
let cache = if fs_cfg.dax {
|
2020-04-29 14:16:30 +00:00
|
|
|
let (cache_base, cache_size) = if let Some((base, size)) = cache_range {
|
|
|
|
// The memory needs to be 2MiB aligned in order to support
|
|
|
|
// hugepages.
|
|
|
|
self.address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_mmio_addresses(
|
|
|
|
Some(GuestAddress(base)),
|
|
|
|
size as GuestUsize,
|
|
|
|
Some(0x0020_0000),
|
|
|
|
)
|
|
|
|
.ok_or(DeviceManagerError::FsRangeAllocation)?;
|
|
|
|
|
|
|
|
(base, size)
|
|
|
|
} else {
|
|
|
|
let size = fs_cfg.cache_size;
|
|
|
|
// The memory needs to be 2MiB aligned in order to support
|
|
|
|
// hugepages.
|
|
|
|
let base = self
|
|
|
|
.address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_mmio_addresses(None, size as GuestUsize, Some(0x0020_0000))
|
|
|
|
.ok_or(DeviceManagerError::FsRangeAllocation)?;
|
|
|
|
|
|
|
|
(base.raw_value(), size)
|
|
|
|
};
|
|
|
|
|
2020-04-30 18:08:04 +00:00
|
|
|
// Update the node with correct resource information.
|
|
|
|
node.resources.push(Resource::MmioAddressRange {
|
|
|
|
base: cache_base,
|
|
|
|
size: cache_size,
|
|
|
|
});
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2020-04-14 09:21:24 +00:00
|
|
|
let mmap_region = MmapRegion::build(
|
|
|
|
None,
|
2020-04-29 14:16:30 +00:00
|
|
|
cache_size as usize,
|
2020-04-14 09:21:24 +00:00
|
|
|
libc::PROT_NONE,
|
|
|
|
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::NewMmapRegion)?;
|
|
|
|
let host_addr: u64 = mmap_region.as_ptr() as u64;
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2020-04-14 09:21:24 +00:00
|
|
|
let mem_slot = self
|
|
|
|
.memory_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-04-29 14:16:30 +00:00
|
|
|
.create_userspace_mapping(cache_base, cache_size, host_addr, false, false)
|
2020-04-14 09:21:24 +00:00
|
|
|
.map_err(DeviceManagerError::MemoryManager)?;
|
|
|
|
|
|
|
|
let mut region_list = Vec::new();
|
|
|
|
region_list.push(VirtioSharedMemory {
|
|
|
|
offset: 0,
|
2020-04-29 14:16:30 +00:00
|
|
|
len: cache_size,
|
2020-04-14 09:21:24 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
Some((
|
|
|
|
VirtioSharedMemoryList {
|
|
|
|
host_addr,
|
|
|
|
mem_slot,
|
2020-04-29 14:16:30 +00:00
|
|
|
addr: GuestAddress(cache_base),
|
|
|
|
len: cache_size as GuestUsize,
|
2020-04-14 09:21:24 +00:00
|
|
|
region_list,
|
|
|
|
},
|
|
|
|
mmap_region,
|
|
|
|
))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2019-11-19 00:53:23 +00:00
|
|
|
|
2020-04-14 09:21:24 +00:00
|
|
|
let virtio_fs_device = Arc::new(Mutex::new(
|
2020-07-02 12:25:19 +00:00
|
|
|
virtio_devices::vhost_user::Fs::new(
|
2020-04-27 12:38:24 +00:00
|
|
|
id.clone(),
|
2020-06-04 19:19:24 +00:00
|
|
|
fs_socket,
|
2020-04-14 09:21:24 +00:00
|
|
|
&fs_cfg.tag,
|
|
|
|
fs_cfg.num_queues,
|
|
|
|
fs_cfg.queue_size,
|
|
|
|
cache,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioFs)?,
|
|
|
|
));
|
|
|
|
|
2020-04-30 18:08:04 +00:00
|
|
|
// Update the device tree with the migratable device.
|
|
|
|
node.migratable = Some(Arc::clone(&virtio_fs_device) as Arc<Mutex<dyn Migratable>>);
|
2020-05-12 13:53:09 +00:00
|
|
|
self.device_tree.lock().unwrap().insert(id.clone(), node);
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2020-04-27 12:38:24 +00:00
|
|
|
Ok((Arc::clone(&virtio_fs_device) as VirtioDeviceArc, false, id))
|
2020-04-14 09:21:24 +00:00
|
|
|
} else {
|
|
|
|
Err(DeviceManagerError::NoVirtioFsSock)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn make_virtio_fs_devices(
|
|
|
|
&mut self,
|
2020-04-27 12:38:24 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool, String)>> {
|
2020-04-14 09:21:24 +00:00
|
|
|
let mut devices = Vec::new();
|
|
|
|
|
2020-04-27 07:55:25 +00:00
|
|
|
let mut fs_devices = self.config.lock().unwrap().fs.clone();
|
|
|
|
if let Some(fs_list_cfg) = &mut fs_devices {
|
|
|
|
for fs_cfg in fs_list_cfg.iter_mut() {
|
2020-04-14 09:21:24 +00:00
|
|
|
devices.push(self.make_virtio_fs_device(fs_cfg)?);
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-27 07:55:25 +00:00
|
|
|
self.config.lock().unwrap().fs = fs_devices;
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2020-03-13 09:38:42 +00:00
|
|
|
fn make_virtio_pmem_device(
|
|
|
|
&mut self,
|
2020-04-15 16:09:12 +00:00
|
|
|
pmem_cfg: &mut PmemConfig,
|
2020-04-27 12:38:24 +00:00
|
|
|
) -> DeviceManagerResult<(VirtioDeviceArc, bool, String)> {
|
2020-04-27 11:36:41 +00:00
|
|
|
let id = if let Some(id) = &pmem_cfg.id {
|
|
|
|
id.clone()
|
|
|
|
} else {
|
|
|
|
let id = self.next_device_name(PMEM_DEVICE_NAME_PREFIX)?;
|
|
|
|
pmem_cfg.id = Some(id.clone());
|
|
|
|
id
|
|
|
|
};
|
2020-04-15 16:09:12 +00:00
|
|
|
|
2020-05-05 08:23:32 +00:00
|
|
|
let mut node = device_node!(id);
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2020-04-29 14:43:03 +00:00
|
|
|
// Look for the id in the device tree. If it can be found, that means
|
|
|
|
// the device is being restored, otherwise it's created from scratch.
|
2020-05-12 13:53:09 +00:00
|
|
|
let region_range = if let Some(node) = self.device_tree.lock().unwrap().get(&id) {
|
2020-04-29 14:43:03 +00:00
|
|
|
debug!("Restoring virtio-pmem {} resources", id);
|
|
|
|
|
|
|
|
let mut region_range: Option<(u64, u64)> = None;
|
|
|
|
for resource in node.resources.iter() {
|
|
|
|
match resource {
|
|
|
|
Resource::MmioAddressRange { base, size } => {
|
|
|
|
if region_range.is_some() {
|
|
|
|
return Err(DeviceManagerError::ResourceAlreadyExists);
|
|
|
|
}
|
|
|
|
|
|
|
|
region_range = Some((*base, *size));
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
error!("Unexpected resource {:?} for {}", resource, id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if region_range.is_none() {
|
|
|
|
return Err(DeviceManagerError::MissingVirtioFsResources);
|
|
|
|
}
|
|
|
|
|
|
|
|
region_range
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2020-04-27 17:12:00 +00:00
|
|
|
|
2020-03-13 09:38:42 +00:00
|
|
|
let (custom_flags, set_len) = if pmem_cfg.file.is_dir() {
|
2020-04-24 15:58:03 +00:00
|
|
|
if pmem_cfg.size.is_none() {
|
|
|
|
return Err(DeviceManagerError::PmemWithDirectorySizeMissing);
|
|
|
|
}
|
2020-03-13 09:38:42 +00:00
|
|
|
(O_TMPFILE, true)
|
|
|
|
} else {
|
|
|
|
(0, false)
|
|
|
|
};
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2020-04-24 15:58:03 +00:00
|
|
|
let mut file = OpenOptions::new()
|
2020-03-13 09:38:42 +00:00
|
|
|
.read(true)
|
|
|
|
.write(!pmem_cfg.discard_writes)
|
|
|
|
.custom_flags(custom_flags)
|
|
|
|
.open(&pmem_cfg.file)
|
|
|
|
.map_err(DeviceManagerError::PmemFileOpen)?;
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2020-04-24 15:58:03 +00:00
|
|
|
let size = if let Some(size) = pmem_cfg.size {
|
|
|
|
if set_len {
|
|
|
|
file.set_len(size)
|
|
|
|
.map_err(DeviceManagerError::PmemFileSetLen)?;
|
|
|
|
}
|
|
|
|
size
|
|
|
|
} else {
|
|
|
|
file.seek(SeekFrom::End(0))
|
|
|
|
.map_err(DeviceManagerError::PmemFileSetLen)?
|
|
|
|
};
|
|
|
|
|
|
|
|
if size % 0x20_0000 != 0 {
|
|
|
|
return Err(DeviceManagerError::PmemSizeNotAligned);
|
2020-03-13 09:38:42 +00:00
|
|
|
}
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2020-04-29 14:43:03 +00:00
|
|
|
let (region_base, region_size) = if let Some((base, size)) = region_range {
|
|
|
|
// The memory needs to be 2MiB aligned in order to support
|
|
|
|
// hugepages.
|
|
|
|
self.address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_mmio_addresses(
|
|
|
|
Some(GuestAddress(base)),
|
|
|
|
size as GuestUsize,
|
|
|
|
Some(0x0020_0000),
|
|
|
|
)
|
|
|
|
.ok_or(DeviceManagerError::PmemRangeAllocation)?;
|
|
|
|
|
|
|
|
(base, size)
|
|
|
|
} else {
|
|
|
|
// The memory needs to be 2MiB aligned in order to support
|
|
|
|
// hugepages.
|
|
|
|
let base = self
|
|
|
|
.address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_mmio_addresses(None, size as GuestUsize, Some(0x0020_0000))
|
|
|
|
.ok_or(DeviceManagerError::PmemRangeAllocation)?;
|
|
|
|
|
|
|
|
(base.raw_value(), size)
|
|
|
|
};
|
|
|
|
|
2020-03-13 09:38:42 +00:00
|
|
|
let cloned_file = file.try_clone().map_err(DeviceManagerError::CloneFile)?;
|
|
|
|
let mmap_region = MmapRegion::build(
|
|
|
|
Some(FileOffset::new(cloned_file, 0)),
|
2020-04-29 14:43:03 +00:00
|
|
|
region_size as usize,
|
2020-03-13 09:38:42 +00:00
|
|
|
if pmem_cfg.discard_writes {
|
|
|
|
PROT_READ
|
|
|
|
} else {
|
|
|
|
PROT_READ | PROT_WRITE
|
|
|
|
},
|
|
|
|
MAP_NORESERVE
|
|
|
|
| if pmem_cfg.discard_writes {
|
|
|
|
MAP_PRIVATE
|
|
|
|
} else {
|
|
|
|
MAP_SHARED
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::NewMmapRegion)?;
|
2020-04-20 14:30:36 +00:00
|
|
|
let host_addr: u64 = mmap_region.as_ptr() as u64;
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2020-04-20 14:30:36 +00:00
|
|
|
let mem_slot = self
|
|
|
|
.memory_manager
|
2020-03-13 09:38:42 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.create_userspace_mapping(
|
2020-04-29 14:43:03 +00:00
|
|
|
region_base,
|
|
|
|
region_size,
|
2020-04-20 14:30:36 +00:00
|
|
|
host_addr,
|
2020-03-13 09:38:42 +00:00
|
|
|
pmem_cfg.mergeable,
|
|
|
|
pmem_cfg.discard_writes,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::MemoryManager)?;
|
2019-11-19 23:22:20 +00:00
|
|
|
|
2020-07-02 12:25:19 +00:00
|
|
|
let mapping = virtio_devices::UserspaceMapping {
|
2020-04-20 14:30:36 +00:00
|
|
|
host_addr,
|
|
|
|
mem_slot,
|
2020-04-29 14:43:03 +00:00
|
|
|
addr: GuestAddress(region_base),
|
|
|
|
len: region_size,
|
2020-04-20 14:30:36 +00:00
|
|
|
mergeable: pmem_cfg.mergeable,
|
|
|
|
};
|
|
|
|
|
2020-03-13 09:38:42 +00:00
|
|
|
let virtio_pmem_device = Arc::new(Mutex::new(
|
2020-07-02 12:25:19 +00:00
|
|
|
virtio_devices::Pmem::new(
|
2020-04-27 12:38:24 +00:00
|
|
|
id.clone(),
|
2020-04-27 11:36:41 +00:00
|
|
|
file,
|
2020-04-29 14:43:03 +00:00
|
|
|
GuestAddress(region_base),
|
2020-04-27 11:36:41 +00:00
|
|
|
mapping,
|
|
|
|
mmap_region,
|
|
|
|
pmem_cfg.iommu,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioPmem)?,
|
2020-03-13 09:38:42 +00:00
|
|
|
));
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2020-04-30 18:08:04 +00:00
|
|
|
// Update the device tree with correct resource information and with
|
|
|
|
// the migratable device.
|
|
|
|
node.resources.push(Resource::MmioAddressRange {
|
|
|
|
base: region_base,
|
|
|
|
size: region_size,
|
|
|
|
});
|
|
|
|
node.migratable = Some(Arc::clone(&virtio_pmem_device) as Arc<Mutex<dyn Migratable>>);
|
2020-05-12 13:53:09 +00:00
|
|
|
self.device_tree.lock().unwrap().insert(id.clone(), node);
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2020-03-13 09:38:42 +00:00
|
|
|
Ok((
|
2020-04-08 13:04:03 +00:00
|
|
|
Arc::clone(&virtio_pmem_device) as VirtioDeviceArc,
|
2020-04-27 08:57:39 +00:00
|
|
|
pmem_cfg.iommu,
|
2020-04-27 12:38:24 +00:00
|
|
|
id,
|
2020-03-13 09:38:42 +00:00
|
|
|
))
|
|
|
|
}
|
2019-11-19 00:53:23 +00:00
|
|
|
|
2020-04-08 13:04:03 +00:00
|
|
|
fn make_virtio_pmem_devices(
|
|
|
|
&mut self,
|
2020-04-27 12:38:24 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool, String)>> {
|
2020-03-13 09:38:42 +00:00
|
|
|
let mut devices = Vec::new();
|
|
|
|
// Add virtio-pmem if required
|
2020-04-15 16:09:12 +00:00
|
|
|
let mut pmem_devices = self.config.lock().unwrap().pmem.clone();
|
|
|
|
if let Some(pmem_list_cfg) = &mut pmem_devices {
|
|
|
|
for pmem_cfg in pmem_list_cfg.iter_mut() {
|
2020-03-13 09:38:42 +00:00
|
|
|
devices.push(self.make_virtio_pmem_device(pmem_cfg)?);
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-27 08:57:39 +00:00
|
|
|
self.config.lock().unwrap().pmem = pmem_devices;
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2020-04-28 17:10:32 +00:00
|
|
|
fn make_virtio_vsock_device(
|
2020-04-08 13:04:03 +00:00
|
|
|
&mut self,
|
2020-04-27 08:15:30 +00:00
|
|
|
vsock_cfg: &mut VsockConfig,
|
2020-04-27 12:38:24 +00:00
|
|
|
) -> DeviceManagerResult<(VirtioDeviceArc, bool, String)> {
|
2020-04-27 11:49:54 +00:00
|
|
|
let id = if let Some(id) = &vsock_cfg.id {
|
|
|
|
id.clone()
|
|
|
|
} else {
|
|
|
|
let id = self.next_device_name(VSOCK_DEVICE_NAME_PREFIX)?;
|
|
|
|
vsock_cfg.id = Some(id.clone());
|
|
|
|
id
|
|
|
|
};
|
2020-04-27 08:15:30 +00:00
|
|
|
|
|
|
|
let socket_path = vsock_cfg
|
2020-06-04 19:19:24 +00:00
|
|
|
.socket
|
2020-04-27 08:15:30 +00:00
|
|
|
.to_str()
|
|
|
|
.ok_or(DeviceManagerError::CreateVsockConvertPath)?;
|
|
|
|
let backend =
|
2020-07-02 12:25:19 +00:00
|
|
|
virtio_devices::vsock::VsockUnixBackend::new(vsock_cfg.cid, socket_path.to_string())
|
2020-04-27 08:15:30 +00:00
|
|
|
.map_err(DeviceManagerError::CreateVsockBackend)?;
|
|
|
|
|
|
|
|
let vsock_device = Arc::new(Mutex::new(
|
2020-07-02 12:25:19 +00:00
|
|
|
virtio_devices::Vsock::new(
|
2020-05-05 09:54:38 +00:00
|
|
|
id.clone(),
|
|
|
|
vsock_cfg.cid,
|
2020-06-04 19:19:24 +00:00
|
|
|
vsock_cfg.socket.clone(),
|
2020-05-05 09:54:38 +00:00
|
|
|
backend,
|
|
|
|
vsock_cfg.iommu,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioVsock)?,
|
2020-04-27 08:15:30 +00:00
|
|
|
));
|
|
|
|
|
2020-04-30 18:08:04 +00:00
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
2020-05-05 08:23:32 +00:00
|
|
|
self.device_tree
|
2020-05-12 13:53:09 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-05-05 08:23:32 +00:00
|
|
|
.insert(id.clone(), device_node!(id, vsock_device));
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2020-04-27 08:15:30 +00:00
|
|
|
Ok((
|
|
|
|
Arc::clone(&vsock_device) as VirtioDeviceArc,
|
2020-04-27 08:15:30 +00:00
|
|
|
vsock_cfg.iommu,
|
2020-04-27 12:38:24 +00:00
|
|
|
id,
|
2020-04-27 08:15:30 +00:00
|
|
|
))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn make_virtio_vsock_devices(
|
|
|
|
&mut self,
|
2020-04-27 12:38:24 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool, String)>> {
|
2019-09-10 16:14:16 +00:00
|
|
|
let mut devices = Vec::new();
|
2020-04-28 17:10:32 +00:00
|
|
|
|
2020-04-27 08:15:30 +00:00
|
|
|
let mut vsock = self.config.lock().unwrap().vsock.clone();
|
|
|
|
if let Some(ref mut vsock_cfg) = &mut vsock {
|
|
|
|
devices.push(self.make_virtio_vsock_device(vsock_cfg)?);
|
2019-09-04 18:14:54 +00:00
|
|
|
}
|
2020-04-27 08:15:30 +00:00
|
|
|
self.config.lock().unwrap().vsock = vsock;
|
2019-09-04 18:14:54 +00:00
|
|
|
|
2019-09-10 16:14:16 +00:00
|
|
|
Ok(devices)
|
2019-09-04 18:14:54 +00:00
|
|
|
}
|
|
|
|
|
2020-04-08 13:04:03 +00:00
|
|
|
fn make_virtio_mem_devices(
|
|
|
|
&mut self,
|
2020-04-27 12:38:24 +00:00
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool, String)>> {
|
2020-03-04 02:16:27 +00:00
|
|
|
let mut devices = Vec::new();
|
|
|
|
|
2020-04-27 11:44:43 +00:00
|
|
|
let mm = self.memory_manager.clone();
|
|
|
|
let mm = mm.lock().unwrap();
|
2020-03-04 02:16:27 +00:00
|
|
|
if let (Some(region), Some(resize)) = (&mm.virtiomem_region, &mm.virtiomem_resize) {
|
2020-04-27 11:44:43 +00:00
|
|
|
let id = String::from(MEM_DEVICE_NAME);
|
|
|
|
|
2020-03-04 02:16:27 +00:00
|
|
|
let virtio_mem_device = Arc::new(Mutex::new(
|
2020-07-02 12:25:19 +00:00
|
|
|
virtio_devices::Mem::new(
|
2020-04-27 11:44:43 +00:00
|
|
|
id.clone(),
|
2020-03-04 02:16:27 +00:00
|
|
|
®ion,
|
|
|
|
resize
|
|
|
|
.try_clone()
|
|
|
|
.map_err(DeviceManagerError::TryCloneVirtioMemResize)?,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioMem)?,
|
|
|
|
));
|
|
|
|
|
2020-04-30 18:08:04 +00:00
|
|
|
devices.push((
|
|
|
|
Arc::clone(&virtio_mem_device) as VirtioDeviceArc,
|
|
|
|
false,
|
|
|
|
id.clone(),
|
|
|
|
));
|
|
|
|
|
|
|
|
// Fill the device tree with a new node. In case of restore, we
|
|
|
|
// know there is nothing to do, so we can simply override the
|
|
|
|
// existing entry.
|
2020-05-05 08:23:32 +00:00
|
|
|
self.device_tree
|
2020-05-12 13:53:09 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-05-05 08:23:32 +00:00
|
|
|
.insert(id.clone(), device_node!(id, virtio_mem_device));
|
2020-03-04 02:16:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(devices)
|
|
|
|
}
|
|
|
|
|
2020-03-20 03:43:37 +00:00
|
|
|
fn make_virtio_balloon_devices(
|
|
|
|
&mut self,
|
|
|
|
) -> DeviceManagerResult<Vec<(VirtioDeviceArc, bool, String)>> {
|
|
|
|
let mut devices = Vec::new();
|
|
|
|
|
|
|
|
if self.config.lock().unwrap().memory.balloon {
|
|
|
|
let id = String::from(BALLOON_DEVICE_NAME);
|
|
|
|
|
|
|
|
let virtio_balloon_device = Arc::new(Mutex::new(
|
2020-06-23 09:52:30 +00:00
|
|
|
virtio_devices::Balloon::new(
|
|
|
|
id.clone(),
|
|
|
|
self.config.lock().unwrap().memory.balloon_size,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::CreateVirtioBalloon)?,
|
2020-03-20 03:43:37 +00:00
|
|
|
));
|
|
|
|
|
|
|
|
self.memory_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.set_balloon(virtio_balloon_device.clone());
|
|
|
|
|
|
|
|
devices.push((
|
|
|
|
Arc::clone(&virtio_balloon_device) as VirtioDeviceArc,
|
|
|
|
false,
|
|
|
|
id.clone(),
|
|
|
|
));
|
|
|
|
|
|
|
|
self.device_tree
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.insert(id.clone(), device_node!(id, virtio_balloon_device));
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(devices)
|
|
|
|
}
|
|
|
|
|
2020-04-15 16:09:12 +00:00
|
|
|
#[cfg(not(feature = "pci_support"))]
|
2020-04-27 08:37:56 +00:00
|
|
|
fn next_device_name(&mut self, prefix: &str) -> DeviceManagerResult<String> {
|
|
|
|
// Generate the temporary name.
|
|
|
|
let name = format!("{}{}", prefix, self.device_id_cnt);
|
|
|
|
// Increment the counter.
|
|
|
|
self.device_id_cnt += Wrapping(1);
|
|
|
|
|
|
|
|
Ok(name)
|
2020-04-15 16:09:12 +00:00
|
|
|
}
|
|
|
|
|
2020-03-06 10:34:24 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2020-04-27 08:37:56 +00:00
|
|
|
fn next_device_name(&mut self, prefix: &str) -> DeviceManagerResult<String> {
|
2020-03-06 10:34:24 +00:00
|
|
|
let start_id = self.device_id_cnt;
|
|
|
|
loop {
|
|
|
|
// Generate the temporary name.
|
|
|
|
let name = format!("{}{}", prefix, self.device_id_cnt);
|
|
|
|
// Increment the counter.
|
|
|
|
self.device_id_cnt += Wrapping(1);
|
|
|
|
// Check if the name is already in use.
|
|
|
|
if !self.pci_id_list.contains_key(&name) {
|
2020-04-27 08:37:56 +00:00
|
|
|
return Ok(name);
|
2020-03-06 10:34:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if self.device_id_cnt == start_id {
|
|
|
|
// We went through a full loop and there's nothing else we can
|
|
|
|
// do.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2020-04-15 16:58:49 +00:00
|
|
|
Err(DeviceManagerError::NoAvailableDeviceName)
|
2020-03-06 10:34:24 +00:00
|
|
|
}
|
|
|
|
|
2020-02-27 16:15:25 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2020-07-17 15:28:12 +00:00
|
|
|
fn add_passthrough_device(
|
|
|
|
&mut self,
|
|
|
|
pci: &mut PciBus,
|
|
|
|
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>,
|
|
|
|
device_fd: &Arc<DeviceFd>,
|
|
|
|
device_cfg: &mut DeviceConfig,
|
|
|
|
) -> DeviceManagerResult<(u32, String)> {
|
|
|
|
#[cfg(feature = "kvm")]
|
|
|
|
return self.add_vfio_device(pci, interrupt_manager, device_fd, device_cfg);
|
|
|
|
|
|
|
|
#[cfg(not(feature = "kvm"))]
|
|
|
|
Err(DeviceManagerError::NoDevicePassthroughSupport)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(all(feature = "pci_support", feature = "kvm"))]
|
2020-02-27 16:15:25 +00:00
|
|
|
fn add_vfio_device(
|
2020-03-04 14:54:42 +00:00
|
|
|
&mut self,
|
2020-02-27 16:15:25 +00:00
|
|
|
pci: &mut PciBus,
|
|
|
|
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>,
|
|
|
|
device_fd: &Arc<DeviceFd>,
|
2020-03-09 13:08:59 +00:00
|
|
|
device_cfg: &mut DeviceConfig,
|
2020-06-11 14:48:25 +00:00
|
|
|
) -> DeviceManagerResult<(u32, String)> {
|
2020-02-27 16:15:25 +00:00
|
|
|
// We need to shift the device id since the 3 first bits
|
|
|
|
// are dedicated to the PCI function, and we know we don't
|
|
|
|
// do multifunction. Also, because we only support one PCI
|
|
|
|
// bus, the bus 0, we don't need to add anything to the
|
|
|
|
// global device ID.
|
2020-03-09 15:09:11 +00:00
|
|
|
let pci_device_bdf = pci
|
|
|
|
.next_device_id()
|
|
|
|
.map_err(DeviceManagerError::NextPciDeviceId)?
|
|
|
|
<< 3;
|
2020-02-27 16:15:25 +00:00
|
|
|
|
|
|
|
let memory = self.memory_manager.lock().unwrap().guest_memory();
|
2019-11-12 14:36:07 +00:00
|
|
|
let vfio_container = Arc::new(
|
|
|
|
VfioContainer::new(device_fd.clone()).map_err(DeviceManagerError::VfioCreate)?,
|
|
|
|
);
|
|
|
|
|
2020-02-27 16:15:25 +00:00
|
|
|
let vfio_device = VfioDevice::new(
|
|
|
|
&device_cfg.path,
|
2019-11-12 14:36:07 +00:00
|
|
|
Arc::clone(&vfio_container),
|
2020-02-27 16:15:25 +00:00
|
|
|
device_cfg.iommu,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::VfioCreate)?;
|
|
|
|
|
|
|
|
if device_cfg.iommu {
|
|
|
|
if let Some(iommu) = &self.iommu_device {
|
2019-11-12 14:36:07 +00:00
|
|
|
let vfio_mapping = Arc::new(VfioDmaMapping::new(
|
|
|
|
Arc::clone(&vfio_container),
|
|
|
|
Arc::new(memory),
|
|
|
|
));
|
2020-02-27 16:15:25 +00:00
|
|
|
|
|
|
|
iommu
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-03-06 10:34:24 +00:00
|
|
|
.add_external_mapping(pci_device_bdf, vfio_mapping);
|
2020-02-27 16:15:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-12 14:36:07 +00:00
|
|
|
let memory = self.memory_manager.lock().unwrap().guest_memory();
|
|
|
|
let mut vfio_pci_device = VfioPciDevice::new(
|
2020-07-03 09:16:49 +00:00
|
|
|
&self.address_manager.vm,
|
2019-11-12 14:36:07 +00:00
|
|
|
vfio_device,
|
|
|
|
interrupt_manager,
|
|
|
|
memory,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::VfioPciCreate)?;
|
2020-02-27 16:15:25 +00:00
|
|
|
|
|
|
|
let bars = vfio_pci_device
|
|
|
|
.allocate_bars(&mut self.address_manager.allocator.lock().unwrap())
|
|
|
|
.map_err(DeviceManagerError::AllocateBars)?;
|
|
|
|
|
|
|
|
vfio_pci_device
|
2020-07-03 09:16:49 +00:00
|
|
|
.map_mmio_regions(&self.address_manager.vm, || {
|
2020-07-04 11:08:52 +00:00
|
|
|
self.memory_manager.lock().unwrap().allocate_memory_slot()
|
2020-02-27 16:15:25 +00:00
|
|
|
})
|
|
|
|
.map_err(DeviceManagerError::VfioMapRegion)?;
|
|
|
|
|
|
|
|
let vfio_pci_device = Arc::new(Mutex::new(vfio_pci_device));
|
|
|
|
|
2020-03-12 17:12:06 +00:00
|
|
|
pci.add_device(pci_device_bdf, vfio_pci_device.clone())
|
2020-02-27 16:15:25 +00:00
|
|
|
.map_err(DeviceManagerError::AddPciDevice)?;
|
|
|
|
|
2020-03-06 13:18:07 +00:00
|
|
|
self.pci_devices.insert(
|
|
|
|
pci_device_bdf,
|
|
|
|
Arc::clone(&vfio_pci_device) as Arc<dyn Any + Send + Sync>,
|
|
|
|
);
|
2020-03-04 14:46:40 +00:00
|
|
|
self.bus_devices
|
|
|
|
.push(Arc::clone(&vfio_pci_device) as Arc<Mutex<dyn BusDevice>>);
|
|
|
|
|
2020-02-27 16:15:25 +00:00
|
|
|
pci.register_mapping(
|
|
|
|
vfio_pci_device,
|
2020-06-03 08:59:35 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-03-04 15:24:15 +00:00
|
|
|
self.address_manager.io_bus.as_ref(),
|
2020-02-27 16:15:25 +00:00
|
|
|
self.address_manager.mmio_bus.as_ref(),
|
|
|
|
bars,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::AddPciDevice)?;
|
|
|
|
|
2020-03-11 10:23:42 +00:00
|
|
|
let vfio_name = if let Some(id) = &device_cfg.id {
|
|
|
|
if self.pci_id_list.contains_key(id) {
|
|
|
|
return Err(DeviceManagerError::DeviceIdAlreadyInUse);
|
|
|
|
}
|
|
|
|
|
|
|
|
id.clone()
|
|
|
|
} else {
|
|
|
|
let id = self.next_device_name(VFIO_DEVICE_NAME_PREFIX)?;
|
2020-04-27 08:37:56 +00:00
|
|
|
device_cfg.id = Some(id.clone());
|
|
|
|
id
|
2020-03-11 10:23:42 +00:00
|
|
|
};
|
2020-06-11 14:48:25 +00:00
|
|
|
self.pci_id_list.insert(vfio_name.clone(), pci_device_bdf);
|
2020-03-06 10:34:24 +00:00
|
|
|
|
2020-06-11 14:48:25 +00:00
|
|
|
Ok((pci_device_bdf, vfio_name))
|
2020-02-27 16:15:25 +00:00
|
|
|
}
|
|
|
|
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
fn add_vfio_devices(
|
2020-01-29 15:53:12 +00:00
|
|
|
&mut self,
|
2019-09-30 14:23:57 +00:00
|
|
|
pci: &mut PciBus,
|
2020-02-04 11:04:10 +00:00
|
|
|
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>,
|
2019-10-07 16:28:03 +00:00
|
|
|
) -> DeviceManagerResult<Vec<u32>> {
|
2019-10-08 05:05:08 +00:00
|
|
|
let mut iommu_attached_device_ids = Vec::new();
|
2020-03-09 13:08:59 +00:00
|
|
|
let mut devices = self.config.lock().unwrap().devices.clone();
|
2019-11-28 00:45:10 +00:00
|
|
|
|
2020-03-09 13:08:59 +00:00
|
|
|
if let Some(device_list_cfg) = &mut devices {
|
2020-07-17 15:20:47 +00:00
|
|
|
// Create the passthrough device handle
|
2020-07-17 15:16:45 +00:00
|
|
|
let device_fd = self
|
|
|
|
.address_manager
|
|
|
|
.vm
|
|
|
|
.create_passthrough_device()
|
2020-07-17 15:20:47 +00:00
|
|
|
.map_err(|e| DeviceManagerError::CreatePassthroughDevice(e.into()))?;
|
2019-09-04 13:55:14 +00:00
|
|
|
let device_fd = Arc::new(device_fd);
|
2020-07-17 15:20:47 +00:00
|
|
|
self.passthrough_device = Some(Arc::clone(&device_fd));
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2020-03-09 13:08:59 +00:00
|
|
|
for device_cfg in device_list_cfg.iter_mut() {
|
2020-06-11 14:48:25 +00:00
|
|
|
let (device_id, _) =
|
2020-07-17 15:28:12 +00:00
|
|
|
self.add_passthrough_device(pci, interrupt_manager, &device_fd, device_cfg)?;
|
2020-03-11 17:23:11 +00:00
|
|
|
if device_cfg.iommu && self.iommu_device.is_some() {
|
2020-02-27 16:15:25 +00:00
|
|
|
iommu_attached_device_ids.push(device_id);
|
2019-10-07 17:48:44 +00:00
|
|
|
}
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-09 13:08:59 +00:00
|
|
|
|
|
|
|
// Update the list of devices
|
|
|
|
self.config.lock().unwrap().devices = devices;
|
|
|
|
|
2019-10-08 05:05:08 +00:00
|
|
|
Ok(iommu_attached_device_ids)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-09-04 13:55:14 +00:00
|
|
|
fn add_virtio_pci_device(
|
2020-01-29 15:53:12 +00:00
|
|
|
&mut self,
|
2020-04-08 13:04:03 +00:00
|
|
|
virtio_device: VirtioDeviceArc,
|
2019-09-30 14:23:57 +00:00
|
|
|
pci: &mut PciBus,
|
2019-10-02 20:57:20 +00:00
|
|
|
iommu_mapping: &Option<Arc<IommuMapping>>,
|
2020-02-04 11:04:10 +00:00
|
|
|
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>,
|
2020-04-27 14:19:40 +00:00
|
|
|
virtio_device_id: String,
|
2020-03-18 17:24:35 +00:00
|
|
|
) -> DeviceManagerResult<u32> {
|
2020-04-27 14:19:40 +00:00
|
|
|
let id = format!("{}-{}", VIRTIO_PCI_DEVICE_NAME_PREFIX, virtio_device_id);
|
|
|
|
|
2020-04-27 17:12:00 +00:00
|
|
|
// Add the new virtio-pci node to the device tree.
|
2020-05-05 08:23:32 +00:00
|
|
|
let mut node = device_node!(id);
|
2020-04-30 14:39:45 +00:00
|
|
|
node.children = vec![virtio_device_id.clone()];
|
2020-04-27 17:12:00 +00:00
|
|
|
|
2020-05-11 16:09:00 +00:00
|
|
|
// Look for the id in the device tree. If it can be found, that means
|
|
|
|
// the device is being restored, otherwise it's created from scratch.
|
2020-05-12 13:53:09 +00:00
|
|
|
let (pci_device_bdf, config_bar_addr) =
|
|
|
|
if let Some(node) = self.device_tree.lock().unwrap().get(&id) {
|
|
|
|
debug!("Restoring virtio-pci {} resources", id);
|
|
|
|
let pci_device_bdf = node
|
|
|
|
.pci_bdf
|
|
|
|
.ok_or(DeviceManagerError::MissingDeviceNodePciBdf)?;
|
2020-05-11 16:09:00 +00:00
|
|
|
|
2020-05-12 13:53:09 +00:00
|
|
|
pci.get_device_id((pci_device_bdf >> 3) as usize)
|
|
|
|
.map_err(DeviceManagerError::GetPciDeviceId)?;
|
2020-05-11 16:09:00 +00:00
|
|
|
|
2020-05-12 13:53:09 +00:00
|
|
|
if node.resources.is_empty() {
|
2020-05-11 18:02:19 +00:00
|
|
|
return Err(DeviceManagerError::MissingVirtioPciResources);
|
|
|
|
}
|
|
|
|
|
2020-05-12 13:53:09 +00:00
|
|
|
// We know the configuration BAR address is stored on the first
|
|
|
|
// resource in the list.
|
|
|
|
let config_bar_addr = match node.resources[0] {
|
|
|
|
Resource::MmioAddressRange { base, .. } => Some(base),
|
|
|
|
_ => {
|
|
|
|
error!("Unexpected resource {:?} for {}", node.resources[0], id);
|
|
|
|
return Err(DeviceManagerError::MissingVirtioPciResources);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
(pci_device_bdf, config_bar_addr)
|
|
|
|
} else {
|
|
|
|
// We need to shift the device id since the 3 first bits are dedicated
|
|
|
|
// to the PCI function, and we know we don't do multifunction.
|
|
|
|
// Also, because we only support one PCI bus, the bus 0, we don't need
|
|
|
|
// to add anything to the global device ID.
|
|
|
|
let pci_device_bdf = pci
|
|
|
|
.next_device_id()
|
|
|
|
.map_err(DeviceManagerError::NextPciDeviceId)?
|
|
|
|
<< 3;
|
|
|
|
|
|
|
|
(pci_device_bdf, None)
|
|
|
|
};
|
2020-05-11 16:09:00 +00:00
|
|
|
|
2020-04-27 17:12:00 +00:00
|
|
|
// Update the existing virtio node by setting the parent.
|
2020-05-12 13:53:09 +00:00
|
|
|
if let Some(node) = self.device_tree.lock().unwrap().get_mut(&virtio_device_id) {
|
2020-04-27 17:12:00 +00:00
|
|
|
node.parent = Some(id.clone());
|
|
|
|
} else {
|
|
|
|
return Err(DeviceManagerError::MissingNode);
|
|
|
|
}
|
|
|
|
|
2019-12-05 15:42:15 +00:00
|
|
|
// Allows support for one MSI-X vector per queue. It also adds 1
|
|
|
|
// as we need to take into account the dedicated vector to notify
|
|
|
|
// about a virtio config change.
|
2019-11-18 23:10:42 +00:00
|
|
|
let msix_num = (virtio_device.lock().unwrap().queue_max_sizes().len() + 1) as u16;
|
2019-09-04 13:55:14 +00:00
|
|
|
|
2019-10-02 20:57:20 +00:00
|
|
|
// Create the callback from the implementation of the DmaRemapping
|
|
|
|
// trait. The point with the callback is to simplify the code as we
|
|
|
|
// know about the device ID from this point.
|
|
|
|
let iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>> =
|
|
|
|
if let Some(mapping) = iommu_mapping {
|
|
|
|
let mapping_clone = mapping.clone();
|
|
|
|
Some(Arc::new(Box::new(move |addr: u64| {
|
2020-03-12 17:12:06 +00:00
|
|
|
mapping_clone.translate(pci_device_bdf, addr).map_err(|e| {
|
2019-10-02 20:57:20 +00:00
|
|
|
std::io::Error::new(
|
|
|
|
std::io::ErrorKind::Other,
|
|
|
|
format!(
|
|
|
|
"failed to translate addr 0x{:x} for device 00:{:02x}.0 {}",
|
2020-03-12 17:12:06 +00:00
|
|
|
addr, pci_device_bdf, e
|
2019-10-02 20:57:20 +00:00
|
|
|
),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
}) as VirtioIommuRemapping))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
2020-01-31 11:55:30 +00:00
|
|
|
let memory = self.memory_manager.lock().unwrap().guest_memory();
|
2020-01-09 16:03:06 +00:00
|
|
|
let mut virtio_pci_device = VirtioPciDevice::new(
|
2020-04-30 18:08:04 +00:00
|
|
|
id.clone(),
|
2020-01-31 11:55:30 +00:00
|
|
|
memory,
|
2020-01-09 16:03:06 +00:00
|
|
|
virtio_device,
|
|
|
|
msix_num,
|
|
|
|
iommu_mapping_cb,
|
2020-01-14 22:47:41 +00:00
|
|
|
interrupt_manager,
|
2020-06-17 13:58:15 +00:00
|
|
|
pci_device_bdf,
|
2020-01-09 16:03:06 +00:00
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::VirtioDevice)?;
|
|
|
|
|
2020-05-11 18:02:19 +00:00
|
|
|
// This is important as this will set the BAR address if it exists,
|
|
|
|
// which is mandatory on the restore path.
|
|
|
|
if let Some(addr) = config_bar_addr {
|
|
|
|
virtio_pci_device.set_config_bar_addr(addr);
|
|
|
|
}
|
|
|
|
|
2020-04-27 14:19:40 +00:00
|
|
|
let allocator = self.address_manager.allocator.clone();
|
|
|
|
let mut allocator = allocator.lock().unwrap();
|
2019-09-04 13:55:14 +00:00
|
|
|
let bars = virtio_pci_device
|
2019-10-23 22:14:13 +00:00
|
|
|
.allocate_bars(&mut allocator)
|
2019-09-04 13:55:14 +00:00
|
|
|
.map_err(DeviceManagerError::AllocateBars)?;
|
|
|
|
|
2019-10-30 16:13:29 +00:00
|
|
|
let bar_addr = virtio_pci_device.config_bar_addr();
|
2019-10-30 18:03:02 +00:00
|
|
|
for (event, addr) in virtio_pci_device.ioeventfds(bar_addr) {
|
2019-09-04 13:55:14 +00:00
|
|
|
let io_addr = IoEventAddress::Mmio(addr);
|
2020-01-31 16:07:08 +00:00
|
|
|
self.address_manager
|
2020-07-03 09:16:49 +00:00
|
|
|
.vm
|
2020-06-02 02:29:54 +00:00
|
|
|
.register_ioevent(event, &io_addr, None)
|
|
|
|
.map_err(|e| DeviceManagerError::RegisterIoevent(e.into()))?;
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let virtio_pci_device = Arc::new(Mutex::new(virtio_pci_device));
|
|
|
|
|
2020-03-12 17:12:06 +00:00
|
|
|
pci.add_device(pci_device_bdf, virtio_pci_device.clone())
|
2019-09-04 13:55:14 +00:00
|
|
|
.map_err(DeviceManagerError::AddPciDevice)?;
|
2020-03-18 17:29:23 +00:00
|
|
|
self.pci_devices.insert(
|
|
|
|
pci_device_bdf,
|
|
|
|
Arc::clone(&virtio_pci_device) as Arc<dyn Any + Send + Sync>,
|
|
|
|
);
|
2020-03-04 14:46:40 +00:00
|
|
|
self.bus_devices
|
|
|
|
.push(Arc::clone(&virtio_pci_device) as Arc<Mutex<dyn BusDevice>>);
|
|
|
|
|
2020-04-27 14:19:40 +00:00
|
|
|
if self.pci_id_list.contains_key(&virtio_device_id) {
|
2020-04-27 12:38:24 +00:00
|
|
|
return Err(DeviceManagerError::DeviceIdAlreadyInUse);
|
2020-04-08 13:04:03 +00:00
|
|
|
}
|
2020-04-27 14:19:40 +00:00
|
|
|
self.pci_id_list.insert(virtio_device_id, pci_device_bdf);
|
2020-04-08 13:04:03 +00:00
|
|
|
|
2019-09-04 13:55:14 +00:00
|
|
|
pci.register_mapping(
|
|
|
|
virtio_pci_device.clone(),
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-03-04 15:24:15 +00:00
|
|
|
self.address_manager.io_bus.as_ref(),
|
2020-01-29 15:53:12 +00:00
|
|
|
self.address_manager.mmio_bus.as_ref(),
|
2020-05-11 18:02:19 +00:00
|
|
|
bars.clone(),
|
2019-09-04 13:55:14 +00:00
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::AddPciDevice)?;
|
|
|
|
|
2020-05-11 18:02:19 +00:00
|
|
|
// Update the device tree with correct resource information.
|
|
|
|
for pci_bar in bars.iter() {
|
|
|
|
node.resources.push(Resource::MmioAddressRange {
|
|
|
|
base: pci_bar.0.raw_value(),
|
|
|
|
size: pci_bar.1 as u64,
|
|
|
|
});
|
|
|
|
}
|
2020-04-30 18:08:04 +00:00
|
|
|
node.migratable = Some(Arc::clone(&virtio_pci_device) as Arc<Mutex<dyn Migratable>>);
|
2020-05-11 16:09:00 +00:00
|
|
|
node.pci_bdf = Some(pci_device_bdf);
|
2020-05-12 13:53:09 +00:00
|
|
|
self.device_tree.lock().unwrap().insert(id, node);
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2020-03-18 17:24:35 +00:00
|
|
|
Ok(pci_device_bdf)
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-11 16:25:07 +00:00
|
|
|
#[cfg(feature = "mmio_support")]
|
|
|
|
fn add_virtio_mmio_device(
|
2020-01-29 15:53:12 +00:00
|
|
|
&mut self,
|
2020-04-27 14:05:04 +00:00
|
|
|
virtio_device_id: String,
|
2020-04-08 13:04:03 +00:00
|
|
|
virtio_device: VirtioDeviceArc,
|
2020-02-04 11:04:10 +00:00
|
|
|
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
|
2019-09-11 16:25:07 +00:00
|
|
|
) -> DeviceManagerResult<()> {
|
2020-04-27 14:05:04 +00:00
|
|
|
let id = format!("{}-{}", VIRTIO_MMIO_DEVICE_NAME_PREFIX, virtio_device_id);
|
2020-04-27 17:12:00 +00:00
|
|
|
|
2020-04-30 18:08:04 +00:00
|
|
|
// Create the new virtio-mmio node that will be added later to the
|
|
|
|
// device tree.
|
2020-05-05 08:23:32 +00:00
|
|
|
let mut node = device_node!(id);
|
2020-04-30 18:08:04 +00:00
|
|
|
node.children = vec![virtio_device_id.clone()];
|
|
|
|
|
2020-04-29 13:18:04 +00:00
|
|
|
// Look for the id in the device tree. If it can be found, that means
|
|
|
|
// the device is being restored, otherwise it's created from scratch.
|
2020-05-12 13:53:09 +00:00
|
|
|
let (mmio_range, mmio_irq) = if let Some(node) = self.device_tree.lock().unwrap().get(&id) {
|
2020-04-29 13:18:04 +00:00
|
|
|
debug!("Restoring virtio-mmio {} resources", id);
|
|
|
|
|
|
|
|
let mut mmio_range: Option<(u64, u64)> = None;
|
|
|
|
let mut mmio_irq: Option<u32> = None;
|
|
|
|
for resource in node.resources.iter() {
|
|
|
|
match resource {
|
|
|
|
Resource::MmioAddressRange { base, size } => {
|
|
|
|
if mmio_range.is_some() {
|
|
|
|
return Err(DeviceManagerError::ResourceAlreadyExists);
|
|
|
|
}
|
|
|
|
|
|
|
|
mmio_range = Some((*base, *size));
|
|
|
|
}
|
|
|
|
Resource::LegacyIrq(irq) => {
|
|
|
|
if mmio_irq.is_some() {
|
|
|
|
return Err(DeviceManagerError::ResourceAlreadyExists);
|
|
|
|
}
|
|
|
|
|
|
|
|
mmio_irq = Some(*irq);
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
error!("Unexpected resource {:?} for {}", resource, id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if mmio_range.is_none() || mmio_irq.is_none() {
|
|
|
|
return Err(DeviceManagerError::MissingVirtioMmioResources);
|
|
|
|
}
|
|
|
|
|
|
|
|
(mmio_range, mmio_irq)
|
|
|
|
} else {
|
|
|
|
(None, None)
|
2020-04-27 17:12:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Update the existing virtio node by setting the parent.
|
2020-05-12 13:53:09 +00:00
|
|
|
if let Some(node) = self.device_tree.lock().unwrap().get_mut(&virtio_device_id) {
|
2020-04-27 17:12:00 +00:00
|
|
|
node.parent = Some(id.clone());
|
|
|
|
} else {
|
|
|
|
return Err(DeviceManagerError::MissingNode);
|
|
|
|
}
|
|
|
|
|
2020-04-29 13:18:04 +00:00
|
|
|
let (mmio_base, mmio_size) = if let Some((base, size)) = mmio_range {
|
|
|
|
self.address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_mmio_addresses(Some(GuestAddress(base)), size, Some(size))
|
|
|
|
.ok_or(DeviceManagerError::MmioRangeAllocation)?;
|
|
|
|
|
|
|
|
(base, size)
|
|
|
|
} else {
|
|
|
|
let size = MMIO_LEN;
|
|
|
|
let base = self
|
|
|
|
.address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_mmio_addresses(None, size, Some(size))
|
|
|
|
.ok_or(DeviceManagerError::MmioRangeAllocation)?;
|
|
|
|
|
|
|
|
(base.raw_value(), size)
|
|
|
|
};
|
2020-04-29 10:02:54 +00:00
|
|
|
|
2020-06-09 06:17:42 +00:00
|
|
|
let irq_num = if let Some(irq) = mmio_irq {
|
|
|
|
irq
|
|
|
|
} else {
|
|
|
|
self.address_manager
|
|
|
|
.allocator
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.allocate_irq()
|
|
|
|
.ok_or(DeviceManagerError::AllocateIrq)?
|
|
|
|
};
|
|
|
|
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
{
|
|
|
|
let device_type = virtio_device.lock().unwrap().device_type();
|
|
|
|
self.id_to_dev_info.insert(
|
|
|
|
(DeviceType::Virtio(device_type), virtio_device_id),
|
|
|
|
MMIODeviceInfo {
|
|
|
|
addr: mmio_base,
|
|
|
|
len: mmio_size,
|
|
|
|
irq: irq_num,
|
|
|
|
},
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-01-31 11:55:30 +00:00
|
|
|
let memory = self.memory_manager.lock().unwrap().guest_memory();
|
2020-04-29 13:18:04 +00:00
|
|
|
let mut mmio_device =
|
2020-07-02 12:25:19 +00:00
|
|
|
virtio_devices::transport::MmioDevice::new(id.clone(), memory, virtio_device)
|
2020-04-29 13:18:04 +00:00
|
|
|
.map_err(DeviceManagerError::VirtioDevice)?;
|
2019-09-11 16:25:07 +00:00
|
|
|
|
2020-04-29 13:18:04 +00:00
|
|
|
for (i, (event, addr)) in mmio_device.ioeventfds(mmio_base).iter().enumerate() {
|
2019-10-30 18:03:02 +00:00
|
|
|
let io_addr = IoEventAddress::Mmio(*addr);
|
2020-01-31 16:07:08 +00:00
|
|
|
self.address_manager
|
2020-07-03 09:16:49 +00:00
|
|
|
.vm
|
2020-06-02 02:29:54 +00:00
|
|
|
.register_ioevent(event, &io_addr, Some(DataMatch::DataMatch32(i as u32)))
|
|
|
|
.map_err(|e| DeviceManagerError::RegisterIoevent(e.into()))?;
|
2019-09-11 16:25:07 +00:00
|
|
|
}
|
|
|
|
|
2020-01-20 14:07:08 +00:00
|
|
|
let interrupt_group = interrupt_manager
|
2020-02-04 11:04:10 +00:00
|
|
|
.create_group(LegacyIrqGroupConfig {
|
|
|
|
irq: irq_num as InterruptIndex,
|
|
|
|
})
|
2020-01-20 14:07:08 +00:00
|
|
|
.map_err(DeviceManagerError::CreateInterruptGroup)?;
|
2019-09-11 16:25:07 +00:00
|
|
|
|
2020-01-20 14:07:08 +00:00
|
|
|
mmio_device.assign_interrupt(interrupt_group);
|
2019-09-11 16:25:07 +00:00
|
|
|
|
2019-11-19 00:53:23 +00:00
|
|
|
let mmio_device_arc = Arc::new(Mutex::new(mmio_device));
|
2020-03-04 14:46:40 +00:00
|
|
|
self.bus_devices
|
|
|
|
.push(Arc::clone(&mmio_device_arc) as Arc<Mutex<dyn BusDevice>>);
|
2020-01-29 15:53:12 +00:00
|
|
|
self.address_manager
|
2019-10-23 22:14:13 +00:00
|
|
|
.mmio_bus
|
2020-04-29 13:18:04 +00:00
|
|
|
.insert(mmio_device_arc.clone(), mmio_base, MMIO_LEN)
|
2019-09-11 16:25:07 +00:00
|
|
|
.map_err(DeviceManagerError::BusError)?;
|
|
|
|
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-01-29 15:53:12 +00:00
|
|
|
self.cmdline_additions.push(format!(
|
2019-09-11 16:25:07 +00:00
|
|
|
"virtio_mmio.device={}K@0x{:08x}:{}",
|
2020-04-29 13:18:04 +00:00
|
|
|
mmio_size / 1024,
|
|
|
|
mmio_base,
|
2019-09-11 16:25:07 +00:00
|
|
|
irq_num
|
|
|
|
));
|
|
|
|
|
2020-04-30 18:08:04 +00:00
|
|
|
// Update the device tree with correct resource information.
|
|
|
|
node.resources.push(Resource::MmioAddressRange {
|
|
|
|
base: mmio_base,
|
|
|
|
size: mmio_size,
|
|
|
|
});
|
|
|
|
node.resources.push(Resource::LegacyIrq(irq_num));
|
|
|
|
node.migratable = Some(Arc::clone(&mmio_device_arc) as Arc<Mutex<dyn Migratable>>);
|
2020-05-12 13:53:09 +00:00
|
|
|
self.device_tree.lock().unwrap().insert(id, node);
|
2020-04-30 18:08:04 +00:00
|
|
|
|
2019-09-11 16:25:07 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-03-04 15:24:15 +00:00
|
|
|
pub fn io_bus(&self) -> &Arc<devices::Bus> {
|
|
|
|
&self.address_manager.io_bus
|
2019-09-04 14:20:09 +00:00
|
|
|
}
|
|
|
|
|
2019-10-23 21:06:13 +00:00
|
|
|
pub fn mmio_bus(&self) -> &Arc<devices::Bus> {
|
2019-10-23 22:14:13 +00:00
|
|
|
&self.address_manager.mmio_bus
|
2019-09-04 14:20:09 +00:00
|
|
|
}
|
|
|
|
|
2019-11-11 14:57:41 +00:00
|
|
|
pub fn allocator(&self) -> &Arc<Mutex<SystemAllocator>> {
|
|
|
|
&self.address_manager.allocator
|
|
|
|
}
|
|
|
|
|
2020-05-25 08:27:08 +00:00
|
|
|
pub fn interrupt_controller(&self) -> Option<Arc<Mutex<dyn InterruptController>>> {
|
|
|
|
if let Some(interrupt_controller) = &self.interrupt_controller {
|
|
|
|
Some(interrupt_controller.clone() as Arc<Mutex<dyn InterruptController>>)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
2019-09-04 14:20:09 +00:00
|
|
|
}
|
2019-09-06 15:42:41 +00:00
|
|
|
|
|
|
|
pub fn console(&self) -> &Arc<Console> {
|
|
|
|
&self.console
|
|
|
|
}
|
2019-09-11 15:22:00 +00:00
|
|
|
|
|
|
|
pub fn cmdline_additions(&self) -> &[String] {
|
|
|
|
self.cmdline_additions.as_slice()
|
|
|
|
}
|
2019-10-02 20:57:20 +00:00
|
|
|
|
2020-03-26 13:36:15 +00:00
|
|
|
pub fn update_memory(&self, _new_region: &Arc<GuestRegionMmap>) -> DeviceManagerResult<()> {
|
2020-03-23 11:10:26 +00:00
|
|
|
let memory = self.memory_manager.lock().unwrap().guest_memory();
|
2020-04-08 13:04:03 +00:00
|
|
|
for (virtio_device, _, _) in self.virtio_devices.iter() {
|
2020-03-23 11:10:26 +00:00
|
|
|
virtio_device
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.update_memory(&memory.memory())
|
|
|
|
.map_err(DeviceManagerError::UpdateMemoryForVirtioDevice)?;
|
|
|
|
}
|
|
|
|
|
2020-03-26 13:53:43 +00:00
|
|
|
// Take care of updating the memory for VFIO PCI devices.
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
{
|
|
|
|
for (_, any_device) in self.pci_devices.iter() {
|
|
|
|
if let Ok(vfio_pci_device) =
|
|
|
|
Arc::clone(any_device).downcast::<Mutex<VfioPciDevice>>()
|
|
|
|
{
|
|
|
|
vfio_pci_device
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.update_memory(_new_region)
|
|
|
|
.map_err(DeviceManagerError::UpdateMemoryForVfioPciDevice)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-23 11:10:26 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-11-27 15:28:22 +00:00
|
|
|
pub fn notify_hotplug(
|
|
|
|
&self,
|
2020-01-14 10:17:23 +00:00
|
|
|
_notification_type: HotPlugNotificationFlags,
|
2019-11-27 15:28:22 +00:00
|
|
|
) -> DeviceManagerResult<()> {
|
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
return self
|
|
|
|
.ged_notification_device
|
|
|
|
.as_ref()
|
|
|
|
.unwrap()
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.notify(_notification_type)
|
|
|
|
.map_err(DeviceManagerError::HotPlugNotification);
|
|
|
|
#[cfg(not(feature = "acpi"))]
|
|
|
|
return Ok(());
|
|
|
|
}
|
2020-02-27 13:00:46 +00:00
|
|
|
|
2020-02-28 11:29:43 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2020-06-11 14:48:25 +00:00
|
|
|
pub fn add_device(
|
|
|
|
&mut self,
|
|
|
|
device_cfg: &mut DeviceConfig,
|
|
|
|
) -> DeviceManagerResult<PciDeviceInfo> {
|
2020-03-04 14:54:42 +00:00
|
|
|
let pci = if let Some(pci_bus) = &self.pci_bus {
|
|
|
|
Arc::clone(&pci_bus)
|
2020-02-28 11:29:43 +00:00
|
|
|
} else {
|
|
|
|
return Err(DeviceManagerError::NoPciBus);
|
|
|
|
};
|
|
|
|
|
|
|
|
let interrupt_manager = Arc::clone(&self.msi_interrupt_manager);
|
|
|
|
|
2020-07-17 15:20:47 +00:00
|
|
|
let device_fd = if let Some(device_fd) = &self.passthrough_device {
|
2020-02-28 11:29:43 +00:00
|
|
|
Arc::clone(&device_fd)
|
|
|
|
} else {
|
2020-07-17 15:20:47 +00:00
|
|
|
// If the passthrough device file descriptor has not been created yet,
|
2020-02-28 11:29:43 +00:00
|
|
|
// it is created here and stored in the DeviceManager structure for
|
|
|
|
// future needs.
|
2020-07-17 15:16:45 +00:00
|
|
|
let device_fd = self
|
|
|
|
.address_manager
|
|
|
|
.vm
|
|
|
|
.create_passthrough_device()
|
2020-07-17 15:20:47 +00:00
|
|
|
.map_err(|e| DeviceManagerError::CreatePassthroughDevice(e.into()))?;
|
2020-02-28 11:29:43 +00:00
|
|
|
let device_fd = Arc::new(device_fd);
|
2020-07-17 15:20:47 +00:00
|
|
|
self.passthrough_device = Some(Arc::clone(&device_fd));
|
2020-02-28 11:29:43 +00:00
|
|
|
device_fd
|
|
|
|
};
|
|
|
|
|
2020-07-17 15:28:12 +00:00
|
|
|
let (device_id, device_name) = self.add_passthrough_device(
|
2020-03-04 14:54:42 +00:00
|
|
|
&mut pci.lock().unwrap(),
|
|
|
|
&interrupt_manager,
|
|
|
|
&device_fd,
|
2020-03-11 15:17:51 +00:00
|
|
|
device_cfg,
|
2020-03-04 14:54:42 +00:00
|
|
|
)?;
|
2020-02-28 11:29:43 +00:00
|
|
|
|
|
|
|
// Update the PCIU bitmap
|
|
|
|
self.pci_devices_up |= 1 << (device_id >> 3);
|
|
|
|
|
2020-06-11 14:48:25 +00:00
|
|
|
Ok(PciDeviceInfo {
|
|
|
|
id: device_name,
|
|
|
|
bdf: device_id,
|
|
|
|
})
|
2020-02-27 13:00:46 +00:00
|
|
|
}
|
2020-03-06 15:53:20 +00:00
|
|
|
|
2020-03-09 10:49:15 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pub fn remove_device(&mut self, id: String) -> DeviceManagerResult<()> {
|
|
|
|
if let Some(pci_device_bdf) = self.pci_id_list.get(&id) {
|
2020-04-27 14:55:52 +00:00
|
|
|
if let Some(any_device) = self.pci_devices.get(&pci_device_bdf) {
|
|
|
|
if let Ok(virtio_pci_device) =
|
|
|
|
Arc::clone(any_device).downcast::<Mutex<VirtioPciDevice>>()
|
|
|
|
{
|
|
|
|
let device_type = VirtioDeviceType::from(
|
|
|
|
virtio_pci_device
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.virtio_device()
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.device_type(),
|
|
|
|
);
|
|
|
|
match device_type {
|
|
|
|
VirtioDeviceType::TYPE_NET
|
|
|
|
| VirtioDeviceType::TYPE_BLOCK
|
|
|
|
| VirtioDeviceType::TYPE_PMEM
|
|
|
|
| VirtioDeviceType::TYPE_FS
|
|
|
|
| VirtioDeviceType::TYPE_VSOCK => {}
|
|
|
|
_ => return Err(DeviceManagerError::RemovalNotAllowed(device_type)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return Err(DeviceManagerError::UnknownPciBdf(*pci_device_bdf));
|
|
|
|
}
|
|
|
|
|
2020-03-09 10:49:15 +00:00
|
|
|
// Update the PCID bitmap
|
|
|
|
self.pci_devices_down |= 1 << (*pci_device_bdf >> 3);
|
|
|
|
|
2020-04-27 17:12:00 +00:00
|
|
|
// Remove the device from the device tree along with its parent.
|
2020-05-12 13:53:09 +00:00
|
|
|
let mut device_tree = self.device_tree.lock().unwrap();
|
|
|
|
if let Some(node) = device_tree.remove(&id) {
|
2020-04-27 17:12:00 +00:00
|
|
|
if let Some(parent) = &node.parent {
|
2020-05-12 13:53:09 +00:00
|
|
|
device_tree.remove(parent);
|
2020-04-27 17:12:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-09 10:49:15 +00:00
|
|
|
Ok(())
|
|
|
|
} else {
|
2020-04-27 14:55:52 +00:00
|
|
|
Err(DeviceManagerError::UnknownDeviceId(id))
|
2020-03-09 10:49:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-06 15:53:20 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2020-03-06 16:52:40 +00:00
|
|
|
pub fn eject_device(&mut self, device_id: u8) -> DeviceManagerResult<()> {
|
|
|
|
// Retrieve the PCI bus.
|
|
|
|
let pci = if let Some(pci_bus) = &self.pci_bus {
|
|
|
|
Arc::clone(&pci_bus)
|
|
|
|
} else {
|
|
|
|
return Err(DeviceManagerError::NoPciBus);
|
|
|
|
};
|
|
|
|
|
|
|
|
// Convert the device ID into the corresponding b/d/f.
|
|
|
|
let pci_device_bdf = (device_id as u32) << 3;
|
|
|
|
|
|
|
|
// Find the device name corresponding to the PCI b/d/f while removing
|
|
|
|
// the device entry.
|
|
|
|
self.pci_id_list.retain(|_, bdf| *bdf != pci_device_bdf);
|
|
|
|
|
2020-03-09 15:32:27 +00:00
|
|
|
// Give the PCI device ID back to the PCI bus.
|
|
|
|
pci.lock()
|
|
|
|
.unwrap()
|
|
|
|
.put_device_id(device_id as usize)
|
|
|
|
.map_err(DeviceManagerError::PutPciDeviceId)?;
|
|
|
|
|
2020-03-06 16:52:40 +00:00
|
|
|
if let Some(any_device) = self.pci_devices.remove(&pci_device_bdf) {
|
2020-04-30 18:27:19 +00:00
|
|
|
let (pci_device, bus_device, virtio_device) = if let Ok(vfio_pci_device) =
|
2020-04-08 14:39:50 +00:00
|
|
|
any_device.clone().downcast::<Mutex<VfioPciDevice>>()
|
|
|
|
{
|
|
|
|
(
|
|
|
|
Arc::clone(&vfio_pci_device) as Arc<Mutex<dyn PciDevice>>,
|
|
|
|
Arc::clone(&vfio_pci_device) as Arc<Mutex<dyn BusDevice>>,
|
|
|
|
None as Option<VirtioDeviceArc>,
|
|
|
|
)
|
|
|
|
} else if let Ok(virtio_pci_device) = any_device.downcast::<Mutex<VirtioPciDevice>>() {
|
|
|
|
let bar_addr = virtio_pci_device.lock().unwrap().config_bar_addr();
|
|
|
|
for (event, addr) in virtio_pci_device.lock().unwrap().ioeventfds(bar_addr) {
|
|
|
|
let io_addr = IoEventAddress::Mmio(addr);
|
|
|
|
self.address_manager
|
2020-07-03 09:16:49 +00:00
|
|
|
.vm
|
2020-04-08 14:39:50 +00:00
|
|
|
.unregister_ioevent(event, &io_addr)
|
2020-06-02 02:29:54 +00:00
|
|
|
.map_err(|e| DeviceManagerError::UnRegisterIoevent(e.into()))?;
|
2020-04-08 14:39:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
(
|
|
|
|
Arc::clone(&virtio_pci_device) as Arc<Mutex<dyn PciDevice>>,
|
|
|
|
Arc::clone(&virtio_pci_device) as Arc<Mutex<dyn BusDevice>>,
|
|
|
|
Some(virtio_pci_device.lock().unwrap().virtio_device()),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
return Ok(());
|
|
|
|
};
|
2020-03-06 16:52:40 +00:00
|
|
|
|
2020-03-11 09:05:37 +00:00
|
|
|
// Free the allocated BARs
|
|
|
|
pci_device
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.free_bars(&mut self.address_manager.allocator.lock().unwrap())
|
|
|
|
.map_err(DeviceManagerError::FreePciBars)?;
|
|
|
|
|
2020-03-06 16:52:40 +00:00
|
|
|
// Remove the device from the PCI bus
|
|
|
|
pci.lock()
|
|
|
|
.unwrap()
|
|
|
|
.remove_by_device(&pci_device)
|
|
|
|
.map_err(DeviceManagerError::RemoveDeviceFromPciBus)?;
|
|
|
|
|
2020-06-09 06:17:42 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-03-06 16:52:40 +00:00
|
|
|
// Remove the device from the IO bus
|
|
|
|
self.io_bus()
|
|
|
|
.remove_by_device(&bus_device)
|
|
|
|
.map_err(DeviceManagerError::RemoveDeviceFromIoBus)?;
|
|
|
|
|
|
|
|
// Remove the device from the MMIO bus
|
|
|
|
self.mmio_bus()
|
|
|
|
.remove_by_device(&bus_device)
|
|
|
|
.map_err(DeviceManagerError::RemoveDeviceFromMmioBus)?;
|
|
|
|
|
|
|
|
// Remove the device from the list of BusDevice held by the
|
|
|
|
// DeviceManager.
|
|
|
|
self.bus_devices
|
|
|
|
.retain(|dev| !Arc::ptr_eq(dev, &bus_device));
|
|
|
|
|
2020-04-08 14:39:50 +00:00
|
|
|
// Shutdown and remove the underlying virtio-device if present
|
|
|
|
if let Some(virtio_device) = virtio_device {
|
2020-04-20 15:40:11 +00:00
|
|
|
for mapping in virtio_device.lock().unwrap().userspace_mappings() {
|
|
|
|
self.memory_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.remove_userspace_mapping(
|
|
|
|
mapping.addr.raw_value(),
|
|
|
|
mapping.len,
|
|
|
|
mapping.host_addr,
|
|
|
|
mapping.mergeable,
|
|
|
|
mapping.mem_slot,
|
|
|
|
)
|
|
|
|
.map_err(DeviceManagerError::MemoryManager)?;
|
|
|
|
}
|
|
|
|
|
2020-04-08 14:39:50 +00:00
|
|
|
virtio_device.lock().unwrap().shutdown();
|
2020-04-20 15:40:11 +00:00
|
|
|
|
2020-04-08 14:39:50 +00:00
|
|
|
self.virtio_devices
|
|
|
|
.retain(|(d, _, _)| !Arc::ptr_eq(d, &virtio_device));
|
|
|
|
}
|
|
|
|
|
2020-03-06 16:52:40 +00:00
|
|
|
// At this point, the device has been removed from all the list and
|
|
|
|
// buses where it was stored. At the end of this function, after
|
|
|
|
// any_device, bus_device and pci_device are released, the actual
|
|
|
|
// device will be dropped.
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(DeviceManagerError::MissingPciDevice)
|
|
|
|
}
|
2020-03-06 15:53:20 +00:00
|
|
|
}
|
2020-03-23 16:18:10 +00:00
|
|
|
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
fn hotplug_virtio_pci_device(
|
|
|
|
&mut self,
|
|
|
|
device: VirtioDeviceArc,
|
|
|
|
iommu_attached: bool,
|
2020-04-27 12:38:24 +00:00
|
|
|
id: String,
|
2020-06-11 14:48:25 +00:00
|
|
|
) -> DeviceManagerResult<PciDeviceInfo> {
|
2020-03-23 16:18:10 +00:00
|
|
|
if iommu_attached {
|
|
|
|
warn!("Placing device behind vIOMMU is not available for hotplugged devices");
|
|
|
|
}
|
|
|
|
|
|
|
|
let pci = if let Some(pci_bus) = &self.pci_bus {
|
|
|
|
Arc::clone(&pci_bus)
|
|
|
|
} else {
|
|
|
|
return Err(DeviceManagerError::NoPciBus);
|
|
|
|
};
|
|
|
|
|
|
|
|
let interrupt_manager = Arc::clone(&self.msi_interrupt_manager);
|
|
|
|
|
2020-04-20 17:35:16 +00:00
|
|
|
// Add the virtio device to the device manager list. This is important
|
|
|
|
// as the list is used to notify virtio devices about memory updates
|
|
|
|
// for instance.
|
|
|
|
self.virtio_devices
|
|
|
|
.push((device.clone(), iommu_attached, id.clone()));
|
|
|
|
|
2020-03-23 16:18:10 +00:00
|
|
|
let device_id = self.add_virtio_pci_device(
|
|
|
|
device,
|
|
|
|
&mut pci.lock().unwrap(),
|
|
|
|
&None,
|
|
|
|
&interrupt_manager,
|
2020-06-11 14:48:25 +00:00
|
|
|
id.clone(),
|
2020-03-23 16:18:10 +00:00
|
|
|
)?;
|
|
|
|
|
|
|
|
// Update the PCIU bitmap
|
|
|
|
self.pci_devices_up |= 1 << (device_id >> 3);
|
|
|
|
|
2020-06-11 14:48:25 +00:00
|
|
|
Ok(PciDeviceInfo { id, bdf: device_id })
|
2020-03-23 16:18:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(feature = "pci_support")]
|
2020-06-11 14:48:25 +00:00
|
|
|
pub fn add_disk(&mut self, disk_cfg: &mut DiskConfig) -> DeviceManagerResult<PciDeviceInfo> {
|
2020-04-08 13:04:03 +00:00
|
|
|
let (device, iommu_attached, id) = self.make_virtio_block_device(disk_cfg)?;
|
|
|
|
self.hotplug_virtio_pci_device(device, iommu_attached, id)
|
2020-03-23 16:18:10 +00:00
|
|
|
}
|
2020-03-23 16:18:10 +00:00
|
|
|
|
2020-04-14 09:21:24 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2020-06-11 14:48:25 +00:00
|
|
|
pub fn add_fs(&mut self, fs_cfg: &mut FsConfig) -> DeviceManagerResult<PciDeviceInfo> {
|
2020-04-14 09:21:24 +00:00
|
|
|
let (device, iommu_attached, id) = self.make_virtio_fs_device(fs_cfg)?;
|
|
|
|
self.hotplug_virtio_pci_device(device, iommu_attached, id)
|
|
|
|
}
|
|
|
|
|
2020-03-23 16:18:10 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2020-06-11 14:48:25 +00:00
|
|
|
pub fn add_pmem(&mut self, pmem_cfg: &mut PmemConfig) -> DeviceManagerResult<PciDeviceInfo> {
|
2020-04-08 13:04:03 +00:00
|
|
|
let (device, iommu_attached, id) = self.make_virtio_pmem_device(pmem_cfg)?;
|
|
|
|
self.hotplug_virtio_pci_device(device, iommu_attached, id)
|
2020-03-23 16:18:10 +00:00
|
|
|
}
|
2020-03-23 16:18:10 +00:00
|
|
|
|
|
|
|
#[cfg(feature = "pci_support")]
|
2020-06-11 14:48:25 +00:00
|
|
|
pub fn add_net(&mut self, net_cfg: &mut NetConfig) -> DeviceManagerResult<PciDeviceInfo> {
|
2020-04-08 13:04:03 +00:00
|
|
|
let (device, iommu_attached, id) = self.make_virtio_net_device(net_cfg)?;
|
|
|
|
self.hotplug_virtio_pci_device(device, iommu_attached, id)
|
2020-03-23 16:18:10 +00:00
|
|
|
}
|
2020-04-28 14:59:08 +00:00
|
|
|
|
|
|
|
#[cfg(feature = "pci_support")]
|
2020-06-11 14:48:25 +00:00
|
|
|
pub fn add_vsock(&mut self, vsock_cfg: &mut VsockConfig) -> DeviceManagerResult<PciDeviceInfo> {
|
2020-04-28 14:59:08 +00:00
|
|
|
let (device, iommu_attached, id) = self.make_virtio_vsock_device(vsock_cfg)?;
|
|
|
|
self.hotplug_virtio_pci_device(device, iommu_attached, id)
|
|
|
|
}
|
2020-06-24 11:53:19 +00:00
|
|
|
|
|
|
|
pub fn counters(&self) -> HashMap<String, HashMap<&'static str, Wrapping<u64>>> {
|
|
|
|
let mut counters = HashMap::new();
|
|
|
|
|
|
|
|
for (virtio_device, _, id) in &self.virtio_devices {
|
|
|
|
let virtio_device = virtio_device.lock().unwrap();
|
|
|
|
if let Some(device_counters) = virtio_device.counters() {
|
|
|
|
counters.insert(id.clone(), device_counters.clone());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
counters
|
|
|
|
}
|
2019-09-04 13:55:14 +00:00
|
|
|
}
|
|
|
|
|
2020-02-26 12:03:29 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
struct PciDevSlot {
|
|
|
|
device_id: u8,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
impl Aml for PciDevSlot {
|
|
|
|
fn to_aml_bytes(&self) -> Vec<u8> {
|
|
|
|
let sun = self.device_id;
|
|
|
|
let adr: u32 = (self.device_id as u32) << 16;
|
|
|
|
aml::Device::new(
|
|
|
|
format!("S{:03}", self.device_id).as_str().into(),
|
|
|
|
vec![
|
|
|
|
&aml::Name::new("_SUN".into(), &sun),
|
|
|
|
&aml::Name::new("_ADR".into(), &adr),
|
2020-02-27 08:15:15 +00:00
|
|
|
&aml::Method::new(
|
|
|
|
"_EJ0".into(),
|
|
|
|
1,
|
|
|
|
true,
|
|
|
|
vec![&aml::MethodCall::new(
|
|
|
|
"\\_SB_.PHPR.PCEJ".into(),
|
|
|
|
vec![&aml::Path::new("_SUN")],
|
|
|
|
)],
|
|
|
|
),
|
2020-02-26 12:03:29 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
.to_aml_bytes()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-26 15:35:15 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
struct PciDevSlotNotify {
|
|
|
|
device_id: u8,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
impl Aml for PciDevSlotNotify {
|
|
|
|
fn to_aml_bytes(&self) -> Vec<u8> {
|
|
|
|
let device_id_mask: u32 = 1 << self.device_id;
|
|
|
|
let object = aml::Path::new(&format!("S{:03}", self.device_id));
|
|
|
|
let mut bytes = aml::And::new(&aml::Local(0), &aml::Arg(0), &device_id_mask).to_aml_bytes();
|
|
|
|
bytes.extend_from_slice(
|
|
|
|
&aml::If::new(
|
|
|
|
&aml::Equal::new(&aml::Local(0), &device_id_mask),
|
|
|
|
vec![&aml::Notify::new(&object, &aml::Arg(1))],
|
|
|
|
)
|
|
|
|
.to_aml_bytes(),
|
|
|
|
);
|
|
|
|
bytes
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
struct PciDevSlotMethods {}
|
|
|
|
|
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
impl Aml for PciDevSlotMethods {
|
|
|
|
fn to_aml_bytes(&self) -> Vec<u8> {
|
|
|
|
let mut device_notifies = Vec::new();
|
|
|
|
for device_id in 0..32 {
|
|
|
|
device_notifies.push(PciDevSlotNotify { device_id });
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut device_notifies_refs: Vec<&dyn aml::Aml> = Vec::new();
|
|
|
|
for device_notify in device_notifies.iter() {
|
|
|
|
device_notifies_refs.push(device_notify);
|
|
|
|
}
|
|
|
|
|
2020-02-26 16:32:55 +00:00
|
|
|
let mut bytes =
|
|
|
|
aml::Method::new("DVNT".into(), 2, true, device_notifies_refs).to_aml_bytes();
|
|
|
|
|
|
|
|
bytes.extend_from_slice(
|
|
|
|
&aml::Method::new(
|
|
|
|
"PCNT".into(),
|
|
|
|
0,
|
|
|
|
true,
|
|
|
|
vec![
|
|
|
|
&aml::MethodCall::new(
|
|
|
|
"DVNT".into(),
|
|
|
|
vec![&aml::Path::new("\\_SB_.PHPR.PCIU"), &aml::ONE],
|
|
|
|
),
|
|
|
|
&aml::MethodCall::new(
|
|
|
|
"DVNT".into(),
|
|
|
|
vec![&aml::Path::new("\\_SB_.PHPR.PCID"), &3usize],
|
|
|
|
),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
.to_aml_bytes(),
|
|
|
|
);
|
|
|
|
bytes
|
2020-02-26 15:35:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-06 16:14:32 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
impl Aml for DeviceManager {
|
|
|
|
fn to_aml_bytes(&self) -> Vec<u8> {
|
|
|
|
let mut bytes = Vec::new();
|
2020-02-26 16:03:15 +00:00
|
|
|
// PCI hotplug controller
|
|
|
|
bytes.extend_from_slice(
|
|
|
|
&aml::Device::new(
|
|
|
|
"_SB_.PHPR".into(),
|
|
|
|
vec![
|
|
|
|
&aml::Name::new("_HID".into(), &aml::EISAName::new("PNP0A06")),
|
|
|
|
&aml::Name::new("_STA".into(), &0x0bu8),
|
|
|
|
&aml::Mutex::new("BLCK".into(), 0),
|
|
|
|
// I/O port for PCI hotplug controller
|
|
|
|
&aml::Name::new(
|
|
|
|
"_CRS".into(),
|
|
|
|
&aml::ResourceTemplate::new(vec![&aml::IO::new(
|
|
|
|
0xae00, 0xae00, 0x01, 0x10,
|
|
|
|
)]),
|
|
|
|
),
|
|
|
|
// OpRegion and Fields map I/O port into individual field values
|
|
|
|
&aml::OpRegion::new("PCST".into(), aml::OpRegionSpace::SystemIO, 0xae00, 0x10),
|
|
|
|
&aml::Field::new(
|
|
|
|
"PCST".into(),
|
|
|
|
aml::FieldAccessType::DWord,
|
|
|
|
aml::FieldUpdateRule::WriteAsZeroes,
|
|
|
|
vec![
|
|
|
|
aml::FieldEntry::Named(*b"PCIU", 32),
|
|
|
|
aml::FieldEntry::Named(*b"PCID", 32),
|
|
|
|
aml::FieldEntry::Named(*b"B0EJ", 32),
|
|
|
|
],
|
|
|
|
),
|
|
|
|
&aml::Method::new(
|
|
|
|
"PCEJ".into(),
|
|
|
|
1,
|
|
|
|
true,
|
|
|
|
vec![
|
|
|
|
// Take lock defined above
|
|
|
|
&aml::Acquire::new("BLCK".into(), 0xffff),
|
|
|
|
// Write PCI bus number (in first argument) to I/O port via field
|
|
|
|
&aml::ShiftLeft::new(&aml::Path::new("B0EJ"), &aml::ONE, &aml::Arg(0)),
|
|
|
|
// Release lock
|
|
|
|
&aml::Release::new("BLCK".into()),
|
|
|
|
// Return 0
|
|
|
|
&aml::Return::new(&aml::ZERO),
|
|
|
|
],
|
|
|
|
),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
.to_aml_bytes(),
|
|
|
|
);
|
|
|
|
|
2020-01-31 11:55:30 +00:00
|
|
|
let start_of_device_area = self.memory_manager.lock().unwrap().start_of_device_area().0;
|
|
|
|
let end_of_device_area = self.memory_manager.lock().unwrap().end_of_device_area().0;
|
2020-02-26 12:03:29 +00:00
|
|
|
|
|
|
|
let mut pci_dsdt_inner_data: Vec<&dyn aml::Aml> = Vec::new();
|
|
|
|
let hid = aml::Name::new("_HID".into(), &aml::EISAName::new("PNP0A08"));
|
|
|
|
pci_dsdt_inner_data.push(&hid);
|
|
|
|
let cid = aml::Name::new("_CID".into(), &aml::EISAName::new("PNP0A03"));
|
|
|
|
pci_dsdt_inner_data.push(&cid);
|
|
|
|
let adr = aml::Name::new("_ADR".into(), &aml::ZERO);
|
|
|
|
pci_dsdt_inner_data.push(&adr);
|
|
|
|
let seg = aml::Name::new("_SEG".into(), &aml::ZERO);
|
|
|
|
pci_dsdt_inner_data.push(&seg);
|
|
|
|
let uid = aml::Name::new("_UID".into(), &aml::ZERO);
|
|
|
|
pci_dsdt_inner_data.push(&uid);
|
|
|
|
let supp = aml::Name::new("SUPP".into(), &aml::ZERO);
|
|
|
|
pci_dsdt_inner_data.push(&supp);
|
|
|
|
let crs = aml::Name::new(
|
|
|
|
"_CRS".into(),
|
|
|
|
&aml::ResourceTemplate::new(vec![
|
|
|
|
&aml::AddressSpace::new_bus_number(0x0u16, 0xffu16),
|
|
|
|
&aml::IO::new(0xcf8, 0xcf8, 1, 0x8),
|
|
|
|
&aml::AddressSpace::new_io(0x0u16, 0xcf7u16),
|
|
|
|
&aml::AddressSpace::new_io(0xd00u16, 0xffffu16),
|
|
|
|
&aml::AddressSpace::new_memory(
|
|
|
|
aml::AddressSpaceCachable::NotCacheable,
|
|
|
|
true,
|
|
|
|
layout::MEM_32BIT_DEVICES_START.0 as u32,
|
|
|
|
(layout::MEM_32BIT_DEVICES_START.0 + layout::MEM_32BIT_DEVICES_SIZE - 1) as u32,
|
2019-12-06 16:14:32 +00:00
|
|
|
),
|
2020-02-26 12:03:29 +00:00
|
|
|
&aml::AddressSpace::new_memory(
|
|
|
|
aml::AddressSpaceCachable::NotCacheable,
|
|
|
|
true,
|
|
|
|
start_of_device_area,
|
|
|
|
end_of_device_area,
|
|
|
|
),
|
|
|
|
]),
|
|
|
|
);
|
|
|
|
pci_dsdt_inner_data.push(&crs);
|
|
|
|
|
|
|
|
let mut pci_devices = Vec::new();
|
|
|
|
for device_id in 0..32 {
|
|
|
|
let pci_device = PciDevSlot { device_id };
|
|
|
|
pci_devices.push(pci_device);
|
|
|
|
}
|
|
|
|
for pci_device in pci_devices.iter() {
|
|
|
|
pci_dsdt_inner_data.push(pci_device);
|
|
|
|
}
|
|
|
|
|
2020-02-26 15:35:15 +00:00
|
|
|
let pci_device_methods = PciDevSlotMethods {};
|
|
|
|
pci_dsdt_inner_data.push(&pci_device_methods);
|
|
|
|
|
2020-02-26 12:03:29 +00:00
|
|
|
let pci_dsdt_data =
|
|
|
|
aml::Device::new("_SB_.PCI0".into(), pci_dsdt_inner_data).to_aml_bytes();
|
2019-12-06 16:14:32 +00:00
|
|
|
|
|
|
|
let mbrd_dsdt_data = aml::Device::new(
|
|
|
|
"_SB_.MBRD".into(),
|
|
|
|
vec![
|
|
|
|
&aml::Name::new("_HID".into(), &aml::EISAName::new("PNP0C02")),
|
|
|
|
&aml::Name::new("_UID".into(), &aml::ZERO),
|
|
|
|
&aml::Name::new(
|
|
|
|
"_CRS".into(),
|
|
|
|
&aml::ResourceTemplate::new(vec![&aml::Memory32Fixed::new(
|
|
|
|
true,
|
|
|
|
layout::PCI_MMCONFIG_START.0 as u32,
|
|
|
|
layout::PCI_MMCONFIG_SIZE as u32,
|
|
|
|
)]),
|
|
|
|
),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
.to_aml_bytes();
|
|
|
|
|
|
|
|
let com1_dsdt_data = aml::Device::new(
|
|
|
|
"_SB_.COM1".into(),
|
|
|
|
vec![
|
|
|
|
&aml::Name::new("_HID".into(), &aml::EISAName::new("PNP0501")),
|
|
|
|
&aml::Name::new("_UID".into(), &aml::ZERO),
|
|
|
|
&aml::Name::new(
|
|
|
|
"_CRS".into(),
|
|
|
|
&aml::ResourceTemplate::new(vec![
|
|
|
|
&aml::Interrupt::new(true, true, false, false, 4),
|
|
|
|
&aml::IO::new(0x3f8, 0x3f8, 0, 0x8),
|
|
|
|
]),
|
|
|
|
),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
.to_aml_bytes();
|
|
|
|
|
|
|
|
let s5_sleep_data =
|
|
|
|
aml::Name::new("_S5_".into(), &aml::Package::new(vec![&5u8])).to_aml_bytes();
|
|
|
|
|
2020-02-25 07:20:59 +00:00
|
|
|
let ged_data = self
|
|
|
|
.ged_notification_device
|
|
|
|
.as_ref()
|
|
|
|
.unwrap()
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.to_aml_bytes();
|
2019-12-06 16:14:32 +00:00
|
|
|
|
|
|
|
bytes.extend_from_slice(pci_dsdt_data.as_slice());
|
|
|
|
bytes.extend_from_slice(mbrd_dsdt_data.as_slice());
|
|
|
|
if self.config.lock().unwrap().serial.mode != ConsoleOutputMode::Off {
|
|
|
|
bytes.extend_from_slice(com1_dsdt_data.as_slice());
|
|
|
|
}
|
|
|
|
bytes.extend_from_slice(s5_sleep_data.as_slice());
|
|
|
|
bytes.extend_from_slice(ged_data.as_slice());
|
|
|
|
bytes
|
|
|
|
}
|
|
|
|
}
|
2019-11-18 23:24:31 +00:00
|
|
|
|
|
|
|
impl Pausable for DeviceManager {
|
|
|
|
fn pause(&mut self) -> result::Result<(), MigratableError> {
|
2020-05-12 13:53:09 +00:00
|
|
|
for (_, device_node) in self.device_tree.lock().unwrap().iter() {
|
2020-04-30 18:27:19 +00:00
|
|
|
if let Some(migratable) = &device_node.migratable {
|
|
|
|
migratable.lock().unwrap().pause()?;
|
|
|
|
}
|
2019-11-18 23:24:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn resume(&mut self) -> result::Result<(), MigratableError> {
|
2020-05-12 13:53:09 +00:00
|
|
|
for (_, device_node) in self.device_tree.lock().unwrap().iter() {
|
2020-04-30 18:27:19 +00:00
|
|
|
if let Some(migratable) = &device_node.migratable {
|
|
|
|
migratable.lock().unwrap().resume()?;
|
|
|
|
}
|
2019-11-18 23:24:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-02 21:50:38 +00:00
|
|
|
impl Snapshottable for DeviceManager {
|
|
|
|
fn id(&self) -> String {
|
|
|
|
DEVICE_MANAGER_SNAPSHOT_ID.to_string()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn snapshot(&self) -> std::result::Result<Snapshot, MigratableError> {
|
|
|
|
let mut snapshot = Snapshot::new(DEVICE_MANAGER_SNAPSHOT_ID);
|
|
|
|
|
2020-04-28 10:08:51 +00:00
|
|
|
// We aggregate all devices snapshots.
|
2020-05-12 13:53:09 +00:00
|
|
|
for (_, device_node) in self.device_tree.lock().unwrap().iter() {
|
2020-04-30 18:19:02 +00:00
|
|
|
if let Some(migratable) = &device_node.migratable {
|
|
|
|
let device_snapshot = migratable.lock().unwrap().snapshot()?;
|
|
|
|
snapshot.add_snapshot(device_snapshot);
|
|
|
|
}
|
2019-12-02 21:50:38 +00:00
|
|
|
}
|
|
|
|
|
2020-04-28 10:08:51 +00:00
|
|
|
// Then we store the DeviceManager state.
|
|
|
|
snapshot.add_data_section(SnapshotDataSection {
|
|
|
|
id: format!("{}-section", DEVICE_MANAGER_SNAPSHOT_ID),
|
|
|
|
snapshot: serde_json::to_vec(&self.state())
|
|
|
|
.map_err(|e| MigratableError::Snapshot(e.into()))?,
|
|
|
|
});
|
|
|
|
|
2019-12-02 21:50:38 +00:00
|
|
|
Ok(snapshot)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn restore(&mut self, snapshot: Snapshot) -> std::result::Result<(), MigratableError> {
|
2020-04-28 10:08:51 +00:00
|
|
|
// Let's first restore the DeviceManager.
|
|
|
|
if let Some(device_manager_section) = snapshot
|
|
|
|
.snapshot_data
|
|
|
|
.get(&format!("{}-section", DEVICE_MANAGER_SNAPSHOT_ID))
|
|
|
|
{
|
|
|
|
let device_manager_state = serde_json::from_slice(&device_manager_section.snapshot)
|
|
|
|
.map_err(|e| {
|
|
|
|
MigratableError::Restore(anyhow!("Could not deserialize DeviceManager {}", e))
|
|
|
|
})?;
|
|
|
|
|
|
|
|
self.set_state(&device_manager_state).map_err(|e| {
|
|
|
|
MigratableError::Restore(anyhow!("Could not restore DeviceManager state {:?}", e))
|
|
|
|
})?;
|
|
|
|
} else {
|
|
|
|
return Err(MigratableError::Restore(anyhow!(
|
|
|
|
"Could not find DeviceManager snapshot section"
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
2020-04-29 09:09:04 +00:00
|
|
|
// Now that DeviceManager is updated with the right states, it's time
|
|
|
|
// to create the devices based on the configuration.
|
|
|
|
self.create_devices()
|
|
|
|
.map_err(|e| MigratableError::Restore(anyhow!("Could not create devices {:?}", e)))?;
|
|
|
|
|
|
|
|
// Finally, restore all devices associated with the DeviceManager.
|
2020-04-28 11:09:14 +00:00
|
|
|
// It's important to restore devices in the right order, that's why
|
|
|
|
// the device tree is the right way to ensure we restore a child before
|
|
|
|
// its parent node.
|
2020-05-12 13:53:09 +00:00
|
|
|
for node in self
|
|
|
|
.device_tree
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.breadth_first_traversal()
|
|
|
|
.rev()
|
|
|
|
{
|
2020-04-28 11:09:14 +00:00
|
|
|
// Restore the node
|
2020-04-30 18:19:02 +00:00
|
|
|
if let Some(migratable) = &node.migratable {
|
2020-05-04 09:12:44 +00:00
|
|
|
debug!("Restoring {} from DeviceManager", node.id);
|
|
|
|
if let Some(snapshot) = snapshot.snapshots.get(&node.id) {
|
2020-06-22 14:00:02 +00:00
|
|
|
migratable.lock().unwrap().pause()?;
|
2020-04-30 18:19:02 +00:00
|
|
|
migratable.lock().unwrap().restore(*snapshot.clone())?;
|
2020-04-28 11:09:14 +00:00
|
|
|
} else {
|
2020-05-04 09:12:44 +00:00
|
|
|
return Err(MigratableError::Restore(anyhow!(
|
|
|
|
"Missing device {}",
|
|
|
|
node.id
|
|
|
|
)));
|
2020-04-28 11:09:14 +00:00
|
|
|
}
|
2019-12-02 21:50:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-01 16:59:51 +00:00
|
|
|
impl Transportable for DeviceManager {}
|
2019-11-18 23:24:31 +00:00
|
|
|
impl Migratable for DeviceManager {}
|
2020-02-14 10:08:14 +00:00
|
|
|
|
2020-02-28 11:29:43 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
const PCIU_FIELD_OFFSET: u64 = 0;
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
const PCID_FIELD_OFFSET: u64 = 4;
|
2020-03-06 15:53:20 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
const B0EJ_FIELD_OFFSET: u64 = 8;
|
2020-02-28 11:29:43 +00:00
|
|
|
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
const PCIU_FIELD_SIZE: usize = 4;
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
const PCID_FIELD_SIZE: usize = 4;
|
2020-03-06 15:53:20 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
const B0EJ_FIELD_SIZE: usize = 4;
|
2020-02-28 11:29:43 +00:00
|
|
|
|
|
|
|
impl BusDevice for DeviceManager {
|
|
|
|
fn read(&mut self, base: u64, offset: u64, data: &mut [u8]) {
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
match offset {
|
|
|
|
PCIU_FIELD_OFFSET => {
|
|
|
|
assert!(data.len() == PCIU_FIELD_SIZE);
|
|
|
|
data.copy_from_slice(&self.pci_devices_up.to_le_bytes());
|
|
|
|
// Clear the PCIU bitmap
|
|
|
|
self.pci_devices_up = 0;
|
|
|
|
}
|
|
|
|
PCID_FIELD_OFFSET => {
|
|
|
|
assert!(data.len() == PCID_FIELD_SIZE);
|
|
|
|
data.copy_from_slice(&self.pci_devices_down.to_le_bytes());
|
|
|
|
// Clear the PCID bitmap
|
|
|
|
self.pci_devices_down = 0;
|
|
|
|
}
|
2020-03-06 15:53:20 +00:00
|
|
|
_ => error!(
|
|
|
|
"Accessing unknown location at base 0x{:x}, offset 0x{:x}",
|
|
|
|
base, offset
|
|
|
|
),
|
2020-02-28 11:29:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
"PCI_HP_REG_R: base 0x{:x}, offset 0x{:x}, data {:?}",
|
|
|
|
base, offset, data
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn write(&mut self, base: u64, offset: u64, data: &[u8]) {
|
2020-03-06 15:53:20 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
match offset {
|
|
|
|
B0EJ_FIELD_OFFSET => {
|
|
|
|
assert!(data.len() == B0EJ_FIELD_SIZE);
|
|
|
|
let mut data_array: [u8; 4] = [0, 0, 0, 0];
|
|
|
|
data_array.copy_from_slice(&data[..]);
|
|
|
|
let device_bitmap = u32::from_le_bytes(data_array);
|
|
|
|
|
|
|
|
for device_id in 0..32 {
|
|
|
|
let mask = 1u32 << device_id;
|
|
|
|
if (device_bitmap & mask) == mask {
|
|
|
|
if let Err(e) = self.eject_device(device_id as u8) {
|
|
|
|
error!("Failed ejecting device {}: {:?}", device_id, e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => error!(
|
|
|
|
"Accessing unknown location at base 0x{:x}, offset 0x{:x}",
|
|
|
|
base, offset
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
2020-02-28 11:29:43 +00:00
|
|
|
debug!(
|
|
|
|
"PCI_HP_REG_W: base 0x{:x}, offset 0x{:x}, data {:?}",
|
|
|
|
base, offset, data
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
2020-02-27 10:40:05 +00:00
|
|
|
|
2020-02-14 10:08:14 +00:00
|
|
|
impl Drop for DeviceManager {
|
|
|
|
fn drop(&mut self) {
|
2020-04-08 13:04:03 +00:00
|
|
|
for (device, _, _) in self.virtio_devices.drain(..) {
|
2020-02-14 10:08:14 +00:00
|
|
|
device.lock().unwrap().shutdown();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|