arch, devices, vmm: Remove "acpi" feature gate

Compile this feature in by default as it's well supported on both
aarch64 and x86_64 and we only officially support using it (no non-acpi
binaries are available.)

Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
Rob Bradford 2022-03-28 11:53:22 +01:00 committed by Bo Chen
parent fa7487629f
commit 7c0cf8cc23
16 changed files with 63 additions and 214 deletions

View File

@ -53,8 +53,7 @@ wait-timeout = "0.2.0"
[features]
default = ["common", "kvm"]
# Common features for all hypervisors
common = ["acpi", "cmos", "fwdebug"]
acpi = ["vmm/acpi"]
common = ["cmos", "fwdebug"]
amx = ["vmm/amx"]
cmos = ["vmm/cmos"]
fwdebug = ["vmm/fwdebug"]

View File

@ -6,11 +6,10 @@ edition = "2018"
[features]
default = []
acpi = ["acpi_tables"]
tdx = []
[dependencies]
acpi_tables = { path = "../acpi_tables", optional = true }
acpi_tables = { path = "../acpi_tables" }
anyhow = "1.0.56"
byteorder = "1.4.3"
hypervisor = { path = "../hypervisor" }

View File

@ -242,7 +242,6 @@ fn create_memory_node(
let mem_reg_prop = [memory_region_start_addr, memory_region_size];
// With feature `acpi` enabled, RAM at 0-4M is for edk2 only
// and should be hidden to the guest.
#[cfg(feature = "acpi")]
if memory_region_start_addr == 0 {
continue;
}
@ -548,20 +547,19 @@ fn create_pci_nodes(
// could fall bellow 4G.
// Here we cut off PCI device space below 8G in FDT to workaround the EDK2 check.
// But the address written in ACPI is not impacted.
let (pci_device_base_64bit, pci_device_size_64bit) = if cfg!(feature = "acpi")
&& (pci_device_info_elem.pci_device_space_start < PCI_HIGH_BASE)
{
(
PCI_HIGH_BASE,
pci_device_info_elem.pci_device_space_size
- (PCI_HIGH_BASE - pci_device_info_elem.pci_device_space_start),
)
} else {
(
pci_device_info_elem.pci_device_space_start,
pci_device_info_elem.pci_device_space_size,
)
};
let (pci_device_base_64bit, pci_device_size_64bit) =
if pci_device_info_elem.pci_device_space_start < PCI_HIGH_BASE {
(
PCI_HIGH_BASE,
pci_device_info_elem.pci_device_space_size
- (PCI_HIGH_BASE - pci_device_info_elem.pci_device_space_start),
)
} else {
(
pci_device_info_elem.pci_device_space_start,
pci_device_info_elem.pci_device_space_size,
)
};
// There is no specific requirement of the 32bit MMIO range, and
// therefore at least we can make these ranges 4K aligned.
let pci_device_size_32bit: u64 =

View File

@ -83,22 +83,11 @@ pub fn arch_memory_regions(size: GuestUsize) -> Vec<(GuestAddress, usize, Region
// As a workaround, we take 4 MiB memory from the main RAM for UEFI.
// As a result, the RAM that the guest can see is less than what has been
// assigned in command line, when ACPI and UEFI is enabled.
let ram_deduction = if cfg!(feature = "acpi") {
layout::UEFI_SIZE
} else {
0
};
let ram_deduction = layout::UEFI_SIZE;
vec![
// 0 ~ 4 MiB: Reserved for UEFI space
#[cfg(feature = "acpi")]
(GuestAddress(0), layout::UEFI_SIZE as usize, RegionType::Ram),
#[cfg(not(feature = "acpi"))]
(
GuestAddress(0),
layout::UEFI_SIZE as usize,
RegionType::Reserved,
),
// 4 MiB ~ 256 MiB: Gic and legacy devices
(
GuestAddress(layout::UEFI_SIZE),
@ -224,7 +213,7 @@ mod tests {
let regions = arch_memory_regions((1usize << 32) as u64); //4GB
assert_eq!(5, regions.len());
assert_eq!(GuestAddress(layout::RAM_64BIT_START), regions[4].0);
assert_eq!(1usize << 32, regions[4].1);
assert_eq!(((1 << 32) - layout::UEFI_SIZE) as usize, regions[4].1);
assert_eq!(RegionType::Ram, regions[4].2);
}
}

View File

@ -5,7 +5,7 @@ authors = ["The Chromium OS Authors"]
edition = "2018"
[dependencies]
acpi_tables = { path = "../acpi_tables", optional = true }
acpi_tables = { path = "../acpi_tables" }
anyhow = "1.0.56"
arch = { path = "../arch" }
bitflags = "1.3.2"
@ -22,6 +22,5 @@ vmm-sys-util = "0.9.0"
[features]
default = []
acpi = ["acpi_tables"]
cmos = []
fwdebug = []

View File

@ -102,7 +102,6 @@ impl BusDevice for AcpiGedDevice {
}
}
#[cfg(feature = "acpi")]
impl Aml for AcpiGedDevice {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
aml::Device::new(

View File

@ -12,7 +12,6 @@ extern crate bitflags;
#[macro_use]
extern crate log;
#[cfg(feature = "acpi")]
pub mod acpi;
#[cfg(target_arch = "aarch64")]
pub mod gic;
@ -21,7 +20,6 @@ pub mod interrupt_controller;
pub mod ioapic;
pub mod legacy;
#[cfg(feature = "acpi")]
pub use self::acpi::{AcpiGedDevice, AcpiPmTimerDevice, AcpiShutdownDevice};
bitflags! {

View File

@ -744,7 +744,7 @@ fn test_vhost_user_net(
);
// ACPI feature is needed.
#[cfg(all(target_arch = "x86_64", feature = "acpi"))]
#[cfg(target_arch = "x86_64")]
{
guest.enable_memory_hotplug();
@ -883,7 +883,7 @@ fn test_vhost_user_blk(
);
// ACPI feature is needed.
#[cfg(all(target_arch = "x86_64", feature = "acpi"))]
#[cfg(target_arch = "x86_64")]
{
guest.enable_memory_hotplug();
@ -1121,7 +1121,7 @@ fn test_virtio_fs(
);
// ACPI feature is needed.
#[cfg(all(target_arch = "x86_64", feature = "acpi"))]
#[cfg(target_arch = "x86_64")]
{
guest.enable_memory_hotplug();
@ -3303,7 +3303,6 @@ mod parallel {
let r = std::panic::catch_unwind(|| {
guest.wait_vm_boot(None).unwrap();
#[cfg(feature = "acpi")]
assert!(guest
.does_device_vendor_pair_match("0x1043", "0x1af4")
.unwrap_or_default());
@ -4305,7 +4304,7 @@ mod parallel {
// On AArch64 when acpi is enabled, there is a 4 MiB gap between the RAM
// that the VMM gives and the guest can see.
// This is a temporary solution, will be fixed in future.
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
#[cfg(target_arch = "aarch64")]
let guest_memory_size_kb = guest_memory_size_kb - 4 * 1024;
let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb);
@ -7782,7 +7781,7 @@ mod live_migration {
}
}
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
#[cfg(target_arch = "aarch64")]
mod aarch64_acpi {
use crate::*;

View File

@ -6,7 +6,6 @@ edition = "2018"
[features]
default = []
acpi = ["acpi_tables","devices/acpi", "arch/acpi"]
amx = []
cmos = ["devices/cmos"]
fwdebug = ["devices/fwdebug"]
@ -16,7 +15,7 @@ mshv = ["hypervisor/mshv", "virtio-devices/mshv", "vfio-ioctls/mshv", "vm-device
tdx = ["arch/tdx", "hypervisor/tdx"]
[dependencies]
acpi_tables = { path = "../acpi_tables", optional = true }
acpi_tables = { path = "../acpi_tables" }
anyhow = "1.0.56"
arc-swap = "1.5.0"
arch = { path = "../arch" }

View File

@ -13,7 +13,6 @@ use acpi_tables::{aml::Aml, rsdp::Rsdp, sdt::Sdt};
use arch::aarch64::DeviceInfoForFdt;
#[cfg(target_arch = "aarch64")]
use arch::DeviceType;
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use arch::NumaNodes;
use bitflags::bitflags;

View File

@ -21,11 +21,9 @@ use crate::seccomp_filters::{get_seccomp_filter, Thread};
use crate::vm::physical_bits;
use crate::GuestMemoryMmap;
use crate::CPU_MANAGER_SNAPSHOT_ID;
#[cfg(feature = "acpi")]
use acpi_tables::{aml, aml::Aml, sdt::Sdt};
use anyhow::anyhow;
use arch::EntryPoint;
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use arch::NumaNodes;
use devices::interrupt_controller::InterruptController;
#[cfg(all(target_arch = "x86_64", feature = "gdb"))]
@ -47,7 +45,6 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Barrier, Mutex};
use std::{cmp, io, result, thread};
use vm_device::BusDevice;
#[cfg(feature = "acpi")]
use vm_memory::GuestAddress;
use vm_memory::GuestMemoryAtomic;
use vm_migration::{
@ -57,7 +54,6 @@ use vm_migration::{
use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::signal::{register_signal_handler, SIGRTMIN};
#[cfg(feature = "acpi")]
pub const CPU_MANAGER_ACPI_SIZE: usize = 0xc;
#[derive(Debug)]
@ -135,7 +131,7 @@ pub enum Error {
}
pub type Result<T> = result::Result<T, Error>;
#[cfg(all(target_arch = "x86_64", feature = "acpi"))]
#[cfg(target_arch = "x86_64")]
#[allow(dead_code)]
#[repr(packed)]
struct LocalApic {
@ -158,7 +154,7 @@ struct Ioapic {
pub gsi_base: u32,
}
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
#[cfg(target_arch = "aarch64")]
#[allow(dead_code)]
#[repr(packed)]
struct GicC {
@ -182,7 +178,7 @@ struct GicC {
pub spe_overflow_interrupt: u16,
}
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
#[cfg(target_arch = "aarch64")]
#[allow(dead_code)]
#[repr(packed)]
struct GicD {
@ -196,7 +192,7 @@ struct GicD {
pub reserved1: [u8; 3],
}
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
#[cfg(target_arch = "aarch64")]
#[allow(dead_code)]
#[repr(packed)]
struct GicR {
@ -207,7 +203,7 @@ struct GicR {
pub range_length: u32,
}
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
#[cfg(target_arch = "aarch64")]
#[allow(dead_code)]
#[repr(packed)]
struct GicIts {
@ -219,7 +215,7 @@ struct GicIts {
pub reserved1: u32,
}
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
#[cfg(target_arch = "aarch64")]
#[allow(dead_code)]
#[repr(packed)]
struct ProcessorHierarchyNode {
@ -419,10 +415,8 @@ pub struct CpuManager {
vcpus: Vec<Arc<Mutex<Vcpu>>>,
seccomp_action: SeccompAction,
vmmops: Arc<dyn VmmOps>,
#[cfg(feature = "acpi")]
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
acpi_address: Option<GuestAddress>,
#[cfg(feature = "acpi")]
proximity_domain_per_cpu: BTreeMap<u8, u32>,
affinity: BTreeMap<u8, Vec<u8>>,
dynamic: bool,
@ -572,7 +566,7 @@ impl CpuManager {
seccomp_action: SeccompAction,
vmmops: Arc<dyn VmmOps>,
#[cfg(feature = "tdx")] tdx_enabled: bool,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))] numa_nodes: &NumaNodes,
numa_nodes: &NumaNodes,
) -> Result<Arc<Mutex<CpuManager>>> {
let guest_memory = memory_manager.lock().unwrap().guest_memory();
let mut vcpu_states = Vec::with_capacity(usize::from(config.max_vcpus));
@ -636,7 +630,6 @@ impl CpuManager {
let device_manager = device_manager.lock().unwrap();
#[cfg(feature = "acpi")]
let proximity_domain_per_cpu: BTreeMap<u8, u32> = {
let mut cpu_list = Vec::new();
for (proximity_domain, numa_node) in numa_nodes.iter() {
@ -663,7 +656,6 @@ impl CpuManager {
#[cfg(not(feature = "tdx"))]
let dynamic = true;
#[cfg(feature = "acpi")]
let acpi_address = if dynamic {
Some(
device_manager
@ -695,15 +687,12 @@ impl CpuManager {
vcpus: Vec::with_capacity(usize::from(config.max_vcpus)),
seccomp_action,
vmmops,
#[cfg(feature = "acpi")]
acpi_address,
#[cfg(feature = "acpi")]
proximity_domain_per_cpu,
affinity,
dynamic,
}));
#[cfg(feature = "acpi")]
if let Some(acpi_address) = acpi_address {
device_manager
.mmio_bus()
@ -1240,7 +1229,6 @@ impl CpuManager {
.map(|t| (t.threads_per_core, t.cores_per_die, t.packages))
}
#[cfg(feature = "acpi")]
pub fn create_madt(&self) -> Sdt {
use crate::acpi;
// This is also checked in the commandline parsing.
@ -1370,7 +1358,7 @@ impl CpuManager {
madt
}
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
#[cfg(target_arch = "aarch64")]
pub fn create_pptt(&self) -> Sdt {
let pptt_start = 0;
let mut cpus = 0;
@ -1503,17 +1491,15 @@ impl CpuManager {
}
}
#[cfg(feature = "acpi")]
struct Cpu {
cpu_id: u8,
proximity_domain: u32,
dynamic: bool,
}
#[cfg(all(target_arch = "x86_64", feature = "acpi"))]
#[cfg(target_arch = "x86_64")]
const MADT_CPU_ENABLE_FLAG: usize = 0;
#[cfg(feature = "acpi")]
impl Cpu {
#[cfg(target_arch = "x86_64")]
fn generate_mat(&self) -> Vec<u8> {
@ -1533,7 +1519,6 @@ impl Cpu {
}
}
#[cfg(feature = "acpi")]
impl Aml for Cpu {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
#[cfg(target_arch = "x86_64")]
@ -1621,12 +1606,10 @@ impl Aml for Cpu {
}
}
#[cfg(feature = "acpi")]
struct CpuNotify {
cpu_id: u8,
}
#[cfg(feature = "acpi")]
impl Aml for CpuNotify {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
let object = aml::Path::new(&format!("C{:03}", self.cpu_id));
@ -1638,13 +1621,11 @@ impl Aml for CpuNotify {
}
}
#[cfg(feature = "acpi")]
struct CpuMethods {
max_vcpus: u8,
dynamic: bool,
}
#[cfg(feature = "acpi")]
impl Aml for CpuMethods {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
if self.dynamic {
@ -1758,7 +1739,6 @@ impl Aml for CpuMethods {
}
}
#[cfg(feature = "acpi")]
impl Aml for CpuManager {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
#[cfg(target_arch = "x86_64")]

View File

@ -19,7 +19,6 @@ use crate::interrupt::kvm::KvmMsiInterruptManager as MsiInterruptManager;
#[cfg(feature = "mshv")]
use crate::interrupt::mshv::MshvMsiInterruptManager as MsiInterruptManager;
use crate::interrupt::LegacyUserspaceInterruptManager;
#[cfg(feature = "acpi")]
use crate::memory_manager::MEMORY_MANAGER_ACPI_SIZE;
use crate::memory_manager::{Error as MemoryManagerError, MemoryManager};
use crate::pci_segment::PciSegment;
@ -29,16 +28,13 @@ use crate::sigwinch_listener::start_sigwinch_listener;
use crate::GuestRegionMmap;
use crate::PciDeviceInfo;
use crate::{device_node, DEVICE_MANAGER_SNAPSHOT_ID};
#[cfg(feature = "acpi")]
use acpi_tables::{aml, aml::Aml};
use anyhow::anyhow;
#[cfg(target_arch = "aarch64")]
use arch::aarch64::gic::gicv3_its::kvm::KvmGicV3Its;
#[cfg(feature = "acpi")]
use arch::layout;
#[cfg(target_arch = "x86_64")]
use arch::layout::{APIC_START, IOAPIC_SIZE, IOAPIC_START};
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use arch::NumaNodes;
#[cfg(target_arch = "aarch64")]
use arch::{DeviceType, MmioDeviceInfo};
@ -480,7 +476,6 @@ pub enum DeviceManagerError {
}
pub type DeviceManagerResult<T> = result::Result<T, DeviceManagerError>;
#[cfg(feature = "acpi")]
const DEVICE_MANAGER_ACPI_SIZE: usize = 0x10;
const TIOCSPTLCK: libc::c_int = 0x4004_5431;
@ -846,7 +841,6 @@ pub struct DeviceManager {
cmdline_additions: Vec<String>,
// ACPI GED notification device
#[cfg(feature = "acpi")]
ged_notification_device: Option<Arc<Mutex<devices::AcpiGedDevice>>>,
// VM configuration
@ -910,7 +904,6 @@ pub struct DeviceManager {
seccomp_action: SeccompAction,
// List of guest NUMA nodes.
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes: NumaNodes,
// Possible handle to the virtio-balloon device
@ -920,9 +913,8 @@ pub struct DeviceManager {
// activation and thus start the threads from the VMM thread
activate_evt: EventFd,
#[cfg(feature = "acpi")]
acpi_address: GuestAddress,
#[cfg(feature = "acpi")]
selected_segment: usize,
// Possible handle to the virtio-mem device
@ -951,7 +943,7 @@ impl DeviceManager {
exit_evt: &EventFd,
reset_evt: &EventFd,
seccomp_action: SeccompAction,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))] numa_nodes: NumaNodes,
numa_nodes: NumaNodes,
activate_evt: &EventFd,
force_iommu: bool,
restoring: bool,
@ -1004,7 +996,6 @@ impl DeviceManager {
vm,
));
#[cfg(feature = "acpi")]
let acpi_address = address_manager
.allocator
.lock()
@ -1038,7 +1029,7 @@ impl DeviceManager {
console: Arc::new(Console::default()),
interrupt_controller: None,
cmdline_additions: Vec::new(),
#[cfg(feature = "acpi")]
ged_notification_device: None,
config,
memory_manager,
@ -1059,15 +1050,15 @@ impl DeviceManager {
#[cfg(target_arch = "aarch64")]
id_to_dev_info: HashMap::new(),
seccomp_action,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes,
balloon: None,
activate_evt: activate_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
#[cfg(feature = "acpi")]
acpi_address,
#[cfg(feature = "acpi")]
selected_segment: 0,
serial_pty: None,
serial_manager: None,
@ -1083,7 +1074,6 @@ impl DeviceManager {
let device_manager = Arc::new(Mutex::new(device_manager));
#[cfg(feature = "acpi")]
address_manager
.mmio_bus
.insert(
@ -1130,7 +1120,6 @@ impl DeviceManager {
&interrupt_controller,
)));
#[cfg(feature = "acpi")]
{
if let Some(acpi_address) = self.memory_manager.lock().unwrap().acpi_address() {
self.address_manager
@ -1154,7 +1143,6 @@ impl DeviceManager {
#[cfg(target_arch = "aarch64")]
self.add_legacy_devices(&legacy_interrupt_manager)?;
#[cfg(feature = "acpi")]
{
self.ged_notification_device = self.add_acpi_devices(
&legacy_interrupt_manager,
@ -1378,7 +1366,6 @@ impl DeviceManager {
Ok(interrupt_controller)
}
#[cfg(feature = "acpi")]
fn add_acpi_devices(
&mut self,
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
@ -2787,9 +2774,6 @@ impl DeviceManager {
if let Some(virtio_mem_zone) = memory_zone.virtio_mem_zone() {
info!("Creating virtio-mem device: id = {}", memory_zone_id);
#[cfg(all(target_arch = "x86_64", not(feature = "acpi")))]
let node_id: Option<u16> = None;
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
let node_id = numa_node_id_from_memory_zone_id(&self.numa_nodes, memory_zone_id)
.map(|i| i as u16);
@ -3585,7 +3569,6 @@ impl DeviceManager {
Arc::clone(self.pci_segments[0].pci_config_io.as_ref().unwrap())
}
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
pub(crate) fn pci_segments(&self) -> &Vec<PciSegment> {
&self.pci_segments
}
@ -3670,7 +3653,6 @@ impl DeviceManager {
&self,
_notification_type: AcpiNotificationFlags,
) -> DeviceManagerResult<()> {
#[cfg(feature = "acpi")]
return self
.ged_notification_device
.as_ref()
@ -3679,8 +3661,6 @@ impl DeviceManager {
.unwrap()
.notify(_notification_type)
.map_err(DeviceManagerError::HotPlugNotification);
#[cfg(not(feature = "acpi"))]
return Ok(());
}
pub fn add_device(
@ -4113,7 +4093,6 @@ impl DeviceManager {
Ok(())
}
#[cfg(feature = "acpi")]
#[cfg(target_arch = "x86_64")]
pub fn notify_power_button(&self) -> DeviceManagerResult<()> {
self.ged_notification_device
@ -4127,43 +4106,27 @@ impl DeviceManager {
#[cfg(target_arch = "aarch64")]
pub fn notify_power_button(&self) -> DeviceManagerResult<()> {
// There are three use cases:
// 1. The Cloud Hypervisor is built without feature acpi.
// 2. The Cloud Hypervisor is built with feature acpi, but users will
// use direct kernel boot with device tree.
// 3. The Cloud Hypervisor is built with feature acpi, and users will
// use ACPI+UEFI boot.
#[cfg(not(feature = "acpi"))]
// The `return` here will trigger a GPIO pin 3 event, which will trigger
// a power button event for use case 1.
return self
.gpio_device
// There are two use cases:
// 1. Users will use direct kernel boot with device tree.
// 2. Users will use ACPI+UEFI boot.
// Trigger a GPIO pin 3 event to satisify use case 1.
self.gpio_device
.as_ref()
.unwrap()
.lock()
.unwrap()
.trigger_key(3)
.map_err(DeviceManagerError::AArch64PowerButtonNotification);
#[cfg(feature = "acpi")]
{
// Trigger a GPIO pin 3 event to satisify use case 2.
self.gpio_device
.as_ref()
.unwrap()
.lock()
.unwrap()
.trigger_key(3)
.map_err(DeviceManagerError::AArch64PowerButtonNotification)?;
// Trigger a GED power button event to satisify use case 3.
return self
.ged_notification_device
.as_ref()
.unwrap()
.lock()
.unwrap()
.notify(AcpiNotificationFlags::POWER_BUTTON_CHANGED)
.map_err(DeviceManagerError::PowerButtonNotification);
}
.map_err(DeviceManagerError::AArch64PowerButtonNotification)?;
// Trigger a GED power button event to satisify use case 2.
return self
.ged_notification_device
.as_ref()
.unwrap()
.lock()
.unwrap()
.notify(AcpiNotificationFlags::POWER_BUTTON_CHANGED)
.map_err(DeviceManagerError::PowerButtonNotification);
}
pub fn iommu_attached_devices(&self) -> &Option<(PciBdf, Vec<PciBdf>)> {
@ -4171,7 +4134,6 @@ impl DeviceManager {
}
}
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
fn numa_node_id_from_memory_zone_id(numa_nodes: &NumaNodes, memory_zone_id: &str) -> Option<u32> {
for (numa_node_id, numa_node) in numa_nodes.iter() {
if numa_node.memory_zones.contains(&memory_zone_id.to_owned()) {
@ -4182,7 +4144,6 @@ fn numa_node_id_from_memory_zone_id(numa_nodes: &NumaNodes, memory_zone_id: &str
None
}
#[cfg(feature = "acpi")]
impl Aml for DeviceManager {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
#[cfg(target_arch = "aarch64")]
@ -4490,24 +4451,15 @@ impl Migratable for DeviceManager {
}
}
#[cfg(feature = "acpi")]
const PCIU_FIELD_OFFSET: u64 = 0;
#[cfg(feature = "acpi")]
const PCID_FIELD_OFFSET: u64 = 4;
#[cfg(feature = "acpi")]
const B0EJ_FIELD_OFFSET: u64 = 8;
#[cfg(feature = "acpi")]
const PSEG_FIELD_OFFSET: u64 = 12;
#[cfg(feature = "acpi")]
const PCIU_FIELD_SIZE: usize = 4;
#[cfg(feature = "acpi")]
const PCID_FIELD_SIZE: usize = 4;
#[cfg(feature = "acpi")]
const B0EJ_FIELD_SIZE: usize = 4;
#[cfg(feature = "acpi")]
const PSEG_FIELD_SIZE: usize = 4;
#[cfg(feature = "acpi")]
impl BusDevice for DeviceManager {
fn read(&mut self, base: u64, offset: u64, data: &mut [u8]) {
match offset {

View File

@ -49,7 +49,6 @@ use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, Transport
use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::sock_ctrl_msg::ScmSocket;
#[cfg(feature = "acpi")]
mod acpi;
pub mod api;
mod clone3;
@ -571,16 +570,6 @@ impl Vmm {
}
fn vm_reboot(&mut self) -> result::Result<(), VmError> {
// Without ACPI, a reset is equivalent to a shutdown
// On AArch64, before ACPI is supported, we simply jump over this check and continue to reset.
#[cfg(all(target_arch = "x86_64", not(feature = "acpi")))]
{
if self.vm.is_some() {
self.exit_evt.write(1).unwrap();
return Ok(());
}
}
// First we stop the current VM
let (config, serial_pty, console_pty, console_resize_pipe) =
if let Some(mut vm) = self.vm.take() {

View File

@ -8,7 +8,6 @@ use crate::config::{HotplugMethod, MemoryConfig, MemoryZoneConfig};
use crate::migration::url_to_path;
use crate::MEMORY_MANAGER_SNAPSHOT_ID;
use crate::{GuestMemoryMmap, GuestRegionMmap};
#[cfg(feature = "acpi")]
use acpi_tables::{aml, aml::Aml};
use anyhow::anyhow;
#[cfg(target_arch = "x86_64")]
@ -46,7 +45,6 @@ use vm_migration::{
Snapshot, SnapshotDataSection, Snapshottable, Transportable, VersionMapped,
};
#[cfg(feature = "acpi")]
pub const MEMORY_MANAGER_ACPI_SIZE: usize = 0x18;
const DEFAULT_MEMORY_ZONE: &str = "mem0";
@ -179,7 +177,6 @@ pub struct MemoryManager {
// slots that the mapping is created in.
guest_ram_mappings: Vec<GuestRamMapping>,
#[cfg(feature = "acpi")]
pub acpi_address: Option<GuestAddress>,
}
@ -1031,7 +1028,6 @@ impl MemoryManager {
let hotplug_method = config.hotplug_method.clone();
#[cfg(feature = "acpi")]
let acpi_address = if dynamic && hotplug_method == HotplugMethod::Acpi {
Some(
allocator
@ -1075,7 +1071,7 @@ impl MemoryManager {
snapshot_memory_ranges: MemoryRangeTable::default(),
memory_zones,
guest_ram_mappings: Vec::new(),
#[cfg(feature = "acpi")]
acpi_address,
log_dirty: dynamic, // Cannot log dirty pages on a TD
arch_mem_regions,
@ -1846,18 +1842,15 @@ impl MemoryManager {
memory_slot_fds
}
#[cfg(feature = "acpi")]
pub fn acpi_address(&self) -> Option<GuestAddress> {
self.acpi_address
}
}
#[cfg(feature = "acpi")]
struct MemoryNotify {
slot_id: usize,
}
#[cfg(feature = "acpi")]
impl Aml for MemoryNotify {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
let object = aml::Path::new(&format!("M{:03}", self.slot_id));
@ -1869,12 +1862,10 @@ impl Aml for MemoryNotify {
}
}
#[cfg(feature = "acpi")]
struct MemorySlot {
slot_id: usize,
}
#[cfg(feature = "acpi")]
impl Aml for MemorySlot {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
aml::Device::new(
@ -1918,12 +1909,10 @@ impl Aml for MemorySlot {
}
}
#[cfg(feature = "acpi")]
struct MemorySlots {
slots: usize,
}
#[cfg(feature = "acpi")]
impl Aml for MemorySlots {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
for slot_id in 0..self.slots {
@ -1932,12 +1921,10 @@ impl Aml for MemorySlots {
}
}
#[cfg(feature = "acpi")]
struct MemoryMethods {
slots: usize,
}
#[cfg(feature = "acpi")]
impl Aml for MemoryMethods {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
// Add "MTFY" notification method
@ -2080,7 +2067,6 @@ impl Aml for MemoryMethods {
}
}
#[cfg(feature = "acpi")]
impl Aml for MemoryManager {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
if let Some(acpi_address) = self.acpi_address {

View File

@ -10,14 +10,12 @@
//
use crate::device_manager::{AddressManager, DeviceManagerError, DeviceManagerResult};
#[cfg(feature = "acpi")]
use acpi_tables::aml::{self, Aml};
use arch::layout;
use pci::{DeviceRelocation, PciBdf, PciBus, PciConfigMmio, PciRoot};
#[cfg(target_arch = "x86_64")]
use pci::{PciConfigIo, PCI_CONFIG_IO_PORT, PCI_CONFIG_IO_PORT_SIZE};
use std::sync::{Arc, Mutex};
#[cfg(feature = "acpi")]
use uuid::Uuid;
use vm_allocator::AddressAllocator;
use vm_device::BusDevice;
@ -168,12 +166,10 @@ impl PciSegment {
}
}
#[cfg(feature = "acpi")]
struct PciDevSlot {
device_id: u8,
}
#[cfg(feature = "acpi")]
impl Aml for PciDevSlot {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
let sun = self.device_id;
@ -198,12 +194,10 @@ impl Aml for PciDevSlot {
}
}
#[cfg(feature = "acpi")]
struct PciDevSlotNotify {
device_id: u8,
}
#[cfg(feature = "acpi")]
impl Aml for PciDevSlotNotify {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
let device_id_mask: u32 = 1 << self.device_id;
@ -217,10 +211,8 @@ impl Aml for PciDevSlotNotify {
}
}
#[cfg(feature = "acpi")]
struct PciDevSlotMethods {}
#[cfg(feature = "acpi")]
impl Aml for PciDevSlotMethods {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
let mut device_notifies = Vec::new();
@ -256,10 +248,8 @@ impl Aml for PciDevSlotMethods {
}
}
#[cfg(feature = "acpi")]
struct PciDsmMethod {}
#[cfg(feature = "acpi")]
impl Aml for PciDsmMethod {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
// Refer to ACPI spec v6.3 Ch 9.1.1 and PCI Firmware spec v3.3 Ch 4.6.1
@ -321,7 +311,6 @@ impl Aml for PciDsmMethod {
}
}
#[cfg(feature = "acpi")]
impl Aml for PciSegment {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
let mut pci_dsdt_inner_data: Vec<&dyn aml::Aml> = Vec::new();

View File

@ -11,7 +11,6 @@
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
//
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use crate::config::NumaConfig;
use crate::config::{
add_to_config, DeviceConfig, DiskConfig, FsConfig, HotplugMethod, NetConfig, PmemConfig,
@ -40,7 +39,6 @@ use arch::x86_64::tdx::TdvfSection;
use arch::EntryPoint;
#[cfg(target_arch = "aarch64")]
use arch::PciSpaceInfo;
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use arch::{NumaNode, NumaNodes};
use devices::AcpiNotificationFlags;
#[cfg(all(target_arch = "x86_64", feature = "gdb"))]
@ -59,7 +57,6 @@ use signal_hook::{
iterator::Signals,
};
use std::cmp;
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::convert::TryInto;
@ -240,9 +237,6 @@ pub enum Error {
/// Cannot activate virtio devices
ActivateVirtioDevices(device_manager::DeviceManagerError),
/// Power button not supported
PowerButtonNotSupported,
/// Error triggering power button
PowerButton(device_manager::DeviceManagerError),
@ -535,7 +529,7 @@ pub struct Vm {
vm: Arc<dyn hypervisor::Vm>,
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
saved_clock: Option<hypervisor::ClockData>,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes: NumaNodes,
seccomp_action: SeccompAction,
exit_evt: EventFd,
@ -567,7 +561,6 @@ impl Vm {
info!("Booting VM from config: {:?}", &config);
// Create NUMA nodes based on NumaConfig.
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
let numa_nodes =
Self::create_numa_nodes(config.lock().unwrap().numa.clone(), &memory_manager)?;
@ -588,7 +581,6 @@ impl Vm {
&exit_evt,
&reset_evt,
seccomp_action.clone(),
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes.clone(),
&activate_evt,
force_iommu,
@ -635,7 +627,6 @@ impl Vm {
vm_ops,
#[cfg(feature = "tdx")]
tdx_enabled,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
&numa_nodes,
)
.map_err(Error::CpuManager)?;
@ -673,7 +664,7 @@ impl Vm {
vm,
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
saved_clock: None,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes,
seccomp_action: seccomp_action.clone(),
exit_evt,
@ -683,7 +674,6 @@ impl Vm {
})
}
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
fn create_numa_nodes(
configs: Option<Vec<NumaConfig>>,
memory_manager: &Arc<Mutex<MemoryManager>>,
@ -1107,7 +1097,7 @@ impl Vm {
}
#[cfg(target_arch = "x86_64")]
fn configure_system(&mut self, #[cfg(feature = "acpi")] rsdp_addr: GuestAddress) -> Result<()> {
fn configure_system(&mut self, rsdp_addr: GuestAddress) -> Result<()> {
info!("Configuring system");
let mem = self.memory_manager.lock().unwrap().boot_guest_memory();
@ -1117,12 +1107,7 @@ impl Vm {
};
let boot_vcpus = self.cpu_manager.lock().unwrap().boot_vcpus();
#[cfg(feature = "acpi")]
let rsdp_addr = Some(rsdp_addr);
#[cfg(not(feature = "acpi"))]
let rsdp_addr = None;
let sgx_epc_region = self
.memory_manager
.lock()
@ -1144,10 +1129,7 @@ impl Vm {
}
#[cfg(target_arch = "aarch64")]
fn configure_system(
&mut self,
#[cfg(feature = "acpi")] _rsdp_addr: GuestAddress,
) -> Result<()> {
fn configure_system(&mut self, _rsdp_addr: GuestAddress) -> Result<()> {
let cmdline = self.get_cmdline()?;
let vcpu_mpidrs = self.cpu_manager.lock().unwrap().get_mpidrs();
let vcpu_topology = self.cpu_manager.lock().unwrap().get_vcpu_topology();
@ -1919,7 +1901,7 @@ impl Vm {
.map_err(Error::PopulateHob)?;
// Loop over the ACPI tables and copy them to the HOB.
#[cfg(feature = "acpi")]
for acpi_table in crate::acpi::create_acpi_tables_tdx(
&self.device_manager,
&self.cpu_manager,
@ -2016,7 +1998,7 @@ impl Vm {
// Creates ACPI tables
// In case of TDX being used, this is a no-op since the tables will be
// created and passed when populating the HOB.
#[cfg(feature = "acpi")]
fn create_acpi_tables(&self) -> Option<GuestAddress> {
#[cfg(feature = "tdx")]
if self.config.lock().unwrap().tdx.is_some() {
@ -2088,7 +2070,6 @@ impl Vm {
Vec::new()
};
#[cfg(feature = "acpi")]
let rsdp_addr = self.create_acpi_tables();
// Configuring the TDX regions requires that the vCPUs are created.
@ -2105,10 +2086,7 @@ impl Vm {
.map(|_| {
// Safe to unwrap rsdp_addr as we know it can't be None when
// the entry_point is Some.
self.configure_system(
#[cfg(feature = "acpi")]
rsdp_addr.unwrap(),
)
self.configure_system(rsdp_addr.unwrap())
})
.transpose()?;
@ -2434,15 +2412,12 @@ impl Vm {
#[cfg(target_arch = "x86_64")]
pub fn power_button(&self) -> Result<()> {
#[cfg(feature = "acpi")]
return self
.device_manager
.lock()
.unwrap()
.notify_power_button()
.map_err(Error::PowerButton);
#[cfg(not(feature = "acpi"))]
Err(Error::PowerButtonNotSupported)
}
#[cfg(target_arch = "aarch64")]