arch, devices, vmm: Remove "acpi" feature gate

Compile this feature in by default as it's well supported on both
aarch64 and x86_64 and we only officially support using it (no non-acpi
binaries are available.)

Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
Rob Bradford 2022-03-28 11:53:22 +01:00 committed by Bo Chen
parent fa7487629f
commit 7c0cf8cc23
16 changed files with 63 additions and 214 deletions

View File

@ -53,8 +53,7 @@ wait-timeout = "0.2.0"
[features] [features]
default = ["common", "kvm"] default = ["common", "kvm"]
# Common features for all hypervisors # Common features for all hypervisors
common = ["acpi", "cmos", "fwdebug"] common = ["cmos", "fwdebug"]
acpi = ["vmm/acpi"]
amx = ["vmm/amx"] amx = ["vmm/amx"]
cmos = ["vmm/cmos"] cmos = ["vmm/cmos"]
fwdebug = ["vmm/fwdebug"] fwdebug = ["vmm/fwdebug"]

View File

@ -6,11 +6,10 @@ edition = "2018"
[features] [features]
default = [] default = []
acpi = ["acpi_tables"]
tdx = [] tdx = []
[dependencies] [dependencies]
acpi_tables = { path = "../acpi_tables", optional = true } acpi_tables = { path = "../acpi_tables" }
anyhow = "1.0.56" anyhow = "1.0.56"
byteorder = "1.4.3" byteorder = "1.4.3"
hypervisor = { path = "../hypervisor" } hypervisor = { path = "../hypervisor" }

View File

@ -242,7 +242,6 @@ fn create_memory_node(
let mem_reg_prop = [memory_region_start_addr, memory_region_size]; let mem_reg_prop = [memory_region_start_addr, memory_region_size];
// With feature `acpi` enabled, RAM at 0-4M is for edk2 only // With feature `acpi` enabled, RAM at 0-4M is for edk2 only
// and should be hidden to the guest. // and should be hidden to the guest.
#[cfg(feature = "acpi")]
if memory_region_start_addr == 0 { if memory_region_start_addr == 0 {
continue; continue;
} }
@ -548,20 +547,19 @@ fn create_pci_nodes(
// could fall bellow 4G. // could fall bellow 4G.
// Here we cut off PCI device space below 8G in FDT to workaround the EDK2 check. // Here we cut off PCI device space below 8G in FDT to workaround the EDK2 check.
// But the address written in ACPI is not impacted. // But the address written in ACPI is not impacted.
let (pci_device_base_64bit, pci_device_size_64bit) = if cfg!(feature = "acpi") let (pci_device_base_64bit, pci_device_size_64bit) =
&& (pci_device_info_elem.pci_device_space_start < PCI_HIGH_BASE) if pci_device_info_elem.pci_device_space_start < PCI_HIGH_BASE {
{ (
( PCI_HIGH_BASE,
PCI_HIGH_BASE, pci_device_info_elem.pci_device_space_size
pci_device_info_elem.pci_device_space_size - (PCI_HIGH_BASE - pci_device_info_elem.pci_device_space_start),
- (PCI_HIGH_BASE - pci_device_info_elem.pci_device_space_start), )
) } else {
} else { (
( pci_device_info_elem.pci_device_space_start,
pci_device_info_elem.pci_device_space_start, pci_device_info_elem.pci_device_space_size,
pci_device_info_elem.pci_device_space_size, )
) };
};
// There is no specific requirement of the 32bit MMIO range, and // There is no specific requirement of the 32bit MMIO range, and
// therefore at least we can make these ranges 4K aligned. // therefore at least we can make these ranges 4K aligned.
let pci_device_size_32bit: u64 = let pci_device_size_32bit: u64 =

View File

@ -83,22 +83,11 @@ pub fn arch_memory_regions(size: GuestUsize) -> Vec<(GuestAddress, usize, Region
// As a workaround, we take 4 MiB memory from the main RAM for UEFI. // As a workaround, we take 4 MiB memory from the main RAM for UEFI.
// As a result, the RAM that the guest can see is less than what has been // As a result, the RAM that the guest can see is less than what has been
// assigned in command line, when ACPI and UEFI is enabled. // assigned in command line, when ACPI and UEFI is enabled.
let ram_deduction = if cfg!(feature = "acpi") { let ram_deduction = layout::UEFI_SIZE;
layout::UEFI_SIZE
} else {
0
};
vec![ vec![
// 0 ~ 4 MiB: Reserved for UEFI space // 0 ~ 4 MiB: Reserved for UEFI space
#[cfg(feature = "acpi")]
(GuestAddress(0), layout::UEFI_SIZE as usize, RegionType::Ram), (GuestAddress(0), layout::UEFI_SIZE as usize, RegionType::Ram),
#[cfg(not(feature = "acpi"))]
(
GuestAddress(0),
layout::UEFI_SIZE as usize,
RegionType::Reserved,
),
// 4 MiB ~ 256 MiB: Gic and legacy devices // 4 MiB ~ 256 MiB: Gic and legacy devices
( (
GuestAddress(layout::UEFI_SIZE), GuestAddress(layout::UEFI_SIZE),
@ -224,7 +213,7 @@ mod tests {
let regions = arch_memory_regions((1usize << 32) as u64); //4GB let regions = arch_memory_regions((1usize << 32) as u64); //4GB
assert_eq!(5, regions.len()); assert_eq!(5, regions.len());
assert_eq!(GuestAddress(layout::RAM_64BIT_START), regions[4].0); assert_eq!(GuestAddress(layout::RAM_64BIT_START), regions[4].0);
assert_eq!(1usize << 32, regions[4].1); assert_eq!(((1 << 32) - layout::UEFI_SIZE) as usize, regions[4].1);
assert_eq!(RegionType::Ram, regions[4].2); assert_eq!(RegionType::Ram, regions[4].2);
} }
} }

View File

@ -5,7 +5,7 @@ authors = ["The Chromium OS Authors"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
acpi_tables = { path = "../acpi_tables", optional = true } acpi_tables = { path = "../acpi_tables" }
anyhow = "1.0.56" anyhow = "1.0.56"
arch = { path = "../arch" } arch = { path = "../arch" }
bitflags = "1.3.2" bitflags = "1.3.2"
@ -22,6 +22,5 @@ vmm-sys-util = "0.9.0"
[features] [features]
default = [] default = []
acpi = ["acpi_tables"]
cmos = [] cmos = []
fwdebug = [] fwdebug = []

View File

@ -102,7 +102,6 @@ impl BusDevice for AcpiGedDevice {
} }
} }
#[cfg(feature = "acpi")]
impl Aml for AcpiGedDevice { impl Aml for AcpiGedDevice {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
aml::Device::new( aml::Device::new(

View File

@ -12,7 +12,6 @@ extern crate bitflags;
#[macro_use] #[macro_use]
extern crate log; extern crate log;
#[cfg(feature = "acpi")]
pub mod acpi; pub mod acpi;
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
pub mod gic; pub mod gic;
@ -21,7 +20,6 @@ pub mod interrupt_controller;
pub mod ioapic; pub mod ioapic;
pub mod legacy; pub mod legacy;
#[cfg(feature = "acpi")]
pub use self::acpi::{AcpiGedDevice, AcpiPmTimerDevice, AcpiShutdownDevice}; pub use self::acpi::{AcpiGedDevice, AcpiPmTimerDevice, AcpiShutdownDevice};
bitflags! { bitflags! {

View File

@ -744,7 +744,7 @@ fn test_vhost_user_net(
); );
// ACPI feature is needed. // ACPI feature is needed.
#[cfg(all(target_arch = "x86_64", feature = "acpi"))] #[cfg(target_arch = "x86_64")]
{ {
guest.enable_memory_hotplug(); guest.enable_memory_hotplug();
@ -883,7 +883,7 @@ fn test_vhost_user_blk(
); );
// ACPI feature is needed. // ACPI feature is needed.
#[cfg(all(target_arch = "x86_64", feature = "acpi"))] #[cfg(target_arch = "x86_64")]
{ {
guest.enable_memory_hotplug(); guest.enable_memory_hotplug();
@ -1121,7 +1121,7 @@ fn test_virtio_fs(
); );
// ACPI feature is needed. // ACPI feature is needed.
#[cfg(all(target_arch = "x86_64", feature = "acpi"))] #[cfg(target_arch = "x86_64")]
{ {
guest.enable_memory_hotplug(); guest.enable_memory_hotplug();
@ -3303,7 +3303,6 @@ mod parallel {
let r = std::panic::catch_unwind(|| { let r = std::panic::catch_unwind(|| {
guest.wait_vm_boot(None).unwrap(); guest.wait_vm_boot(None).unwrap();
#[cfg(feature = "acpi")]
assert!(guest assert!(guest
.does_device_vendor_pair_match("0x1043", "0x1af4") .does_device_vendor_pair_match("0x1043", "0x1af4")
.unwrap_or_default()); .unwrap_or_default());
@ -4305,7 +4304,7 @@ mod parallel {
// On AArch64 when acpi is enabled, there is a 4 MiB gap between the RAM // On AArch64 when acpi is enabled, there is a 4 MiB gap between the RAM
// that the VMM gives and the guest can see. // that the VMM gives and the guest can see.
// This is a temporary solution, will be fixed in future. // This is a temporary solution, will be fixed in future.
#[cfg(all(target_arch = "aarch64", feature = "acpi"))] #[cfg(target_arch = "aarch64")]
let guest_memory_size_kb = guest_memory_size_kb - 4 * 1024; let guest_memory_size_kb = guest_memory_size_kb - 4 * 1024;
let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb); let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb);
@ -7782,7 +7781,7 @@ mod live_migration {
} }
} }
#[cfg(all(target_arch = "aarch64", feature = "acpi"))] #[cfg(target_arch = "aarch64")]
mod aarch64_acpi { mod aarch64_acpi {
use crate::*; use crate::*;

View File

@ -6,7 +6,6 @@ edition = "2018"
[features] [features]
default = [] default = []
acpi = ["acpi_tables","devices/acpi", "arch/acpi"]
amx = [] amx = []
cmos = ["devices/cmos"] cmos = ["devices/cmos"]
fwdebug = ["devices/fwdebug"] fwdebug = ["devices/fwdebug"]
@ -16,7 +15,7 @@ mshv = ["hypervisor/mshv", "virtio-devices/mshv", "vfio-ioctls/mshv", "vm-device
tdx = ["arch/tdx", "hypervisor/tdx"] tdx = ["arch/tdx", "hypervisor/tdx"]
[dependencies] [dependencies]
acpi_tables = { path = "../acpi_tables", optional = true } acpi_tables = { path = "../acpi_tables" }
anyhow = "1.0.56" anyhow = "1.0.56"
arc-swap = "1.5.0" arc-swap = "1.5.0"
arch = { path = "../arch" } arch = { path = "../arch" }

View File

@ -13,7 +13,6 @@ use acpi_tables::{aml::Aml, rsdp::Rsdp, sdt::Sdt};
use arch::aarch64::DeviceInfoForFdt; use arch::aarch64::DeviceInfoForFdt;
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
use arch::DeviceType; use arch::DeviceType;
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use arch::NumaNodes; use arch::NumaNodes;
use bitflags::bitflags; use bitflags::bitflags;

View File

@ -21,11 +21,9 @@ use crate::seccomp_filters::{get_seccomp_filter, Thread};
use crate::vm::physical_bits; use crate::vm::physical_bits;
use crate::GuestMemoryMmap; use crate::GuestMemoryMmap;
use crate::CPU_MANAGER_SNAPSHOT_ID; use crate::CPU_MANAGER_SNAPSHOT_ID;
#[cfg(feature = "acpi")]
use acpi_tables::{aml, aml::Aml, sdt::Sdt}; use acpi_tables::{aml, aml::Aml, sdt::Sdt};
use anyhow::anyhow; use anyhow::anyhow;
use arch::EntryPoint; use arch::EntryPoint;
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use arch::NumaNodes; use arch::NumaNodes;
use devices::interrupt_controller::InterruptController; use devices::interrupt_controller::InterruptController;
#[cfg(all(target_arch = "x86_64", feature = "gdb"))] #[cfg(all(target_arch = "x86_64", feature = "gdb"))]
@ -47,7 +45,6 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Barrier, Mutex}; use std::sync::{Arc, Barrier, Mutex};
use std::{cmp, io, result, thread}; use std::{cmp, io, result, thread};
use vm_device::BusDevice; use vm_device::BusDevice;
#[cfg(feature = "acpi")]
use vm_memory::GuestAddress; use vm_memory::GuestAddress;
use vm_memory::GuestMemoryAtomic; use vm_memory::GuestMemoryAtomic;
use vm_migration::{ use vm_migration::{
@ -57,7 +54,6 @@ use vm_migration::{
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::signal::{register_signal_handler, SIGRTMIN}; use vmm_sys_util::signal::{register_signal_handler, SIGRTMIN};
#[cfg(feature = "acpi")]
pub const CPU_MANAGER_ACPI_SIZE: usize = 0xc; pub const CPU_MANAGER_ACPI_SIZE: usize = 0xc;
#[derive(Debug)] #[derive(Debug)]
@ -135,7 +131,7 @@ pub enum Error {
} }
pub type Result<T> = result::Result<T, Error>; pub type Result<T> = result::Result<T, Error>;
#[cfg(all(target_arch = "x86_64", feature = "acpi"))] #[cfg(target_arch = "x86_64")]
#[allow(dead_code)] #[allow(dead_code)]
#[repr(packed)] #[repr(packed)]
struct LocalApic { struct LocalApic {
@ -158,7 +154,7 @@ struct Ioapic {
pub gsi_base: u32, pub gsi_base: u32,
} }
#[cfg(all(target_arch = "aarch64", feature = "acpi"))] #[cfg(target_arch = "aarch64")]
#[allow(dead_code)] #[allow(dead_code)]
#[repr(packed)] #[repr(packed)]
struct GicC { struct GicC {
@ -182,7 +178,7 @@ struct GicC {
pub spe_overflow_interrupt: u16, pub spe_overflow_interrupt: u16,
} }
#[cfg(all(target_arch = "aarch64", feature = "acpi"))] #[cfg(target_arch = "aarch64")]
#[allow(dead_code)] #[allow(dead_code)]
#[repr(packed)] #[repr(packed)]
struct GicD { struct GicD {
@ -196,7 +192,7 @@ struct GicD {
pub reserved1: [u8; 3], pub reserved1: [u8; 3],
} }
#[cfg(all(target_arch = "aarch64", feature = "acpi"))] #[cfg(target_arch = "aarch64")]
#[allow(dead_code)] #[allow(dead_code)]
#[repr(packed)] #[repr(packed)]
struct GicR { struct GicR {
@ -207,7 +203,7 @@ struct GicR {
pub range_length: u32, pub range_length: u32,
} }
#[cfg(all(target_arch = "aarch64", feature = "acpi"))] #[cfg(target_arch = "aarch64")]
#[allow(dead_code)] #[allow(dead_code)]
#[repr(packed)] #[repr(packed)]
struct GicIts { struct GicIts {
@ -219,7 +215,7 @@ struct GicIts {
pub reserved1: u32, pub reserved1: u32,
} }
#[cfg(all(target_arch = "aarch64", feature = "acpi"))] #[cfg(target_arch = "aarch64")]
#[allow(dead_code)] #[allow(dead_code)]
#[repr(packed)] #[repr(packed)]
struct ProcessorHierarchyNode { struct ProcessorHierarchyNode {
@ -419,10 +415,8 @@ pub struct CpuManager {
vcpus: Vec<Arc<Mutex<Vcpu>>>, vcpus: Vec<Arc<Mutex<Vcpu>>>,
seccomp_action: SeccompAction, seccomp_action: SeccompAction,
vmmops: Arc<dyn VmmOps>, vmmops: Arc<dyn VmmOps>,
#[cfg(feature = "acpi")]
#[cfg_attr(target_arch = "aarch64", allow(dead_code))] #[cfg_attr(target_arch = "aarch64", allow(dead_code))]
acpi_address: Option<GuestAddress>, acpi_address: Option<GuestAddress>,
#[cfg(feature = "acpi")]
proximity_domain_per_cpu: BTreeMap<u8, u32>, proximity_domain_per_cpu: BTreeMap<u8, u32>,
affinity: BTreeMap<u8, Vec<u8>>, affinity: BTreeMap<u8, Vec<u8>>,
dynamic: bool, dynamic: bool,
@ -572,7 +566,7 @@ impl CpuManager {
seccomp_action: SeccompAction, seccomp_action: SeccompAction,
vmmops: Arc<dyn VmmOps>, vmmops: Arc<dyn VmmOps>,
#[cfg(feature = "tdx")] tdx_enabled: bool, #[cfg(feature = "tdx")] tdx_enabled: bool,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))] numa_nodes: &NumaNodes, numa_nodes: &NumaNodes,
) -> Result<Arc<Mutex<CpuManager>>> { ) -> Result<Arc<Mutex<CpuManager>>> {
let guest_memory = memory_manager.lock().unwrap().guest_memory(); let guest_memory = memory_manager.lock().unwrap().guest_memory();
let mut vcpu_states = Vec::with_capacity(usize::from(config.max_vcpus)); let mut vcpu_states = Vec::with_capacity(usize::from(config.max_vcpus));
@ -636,7 +630,6 @@ impl CpuManager {
let device_manager = device_manager.lock().unwrap(); let device_manager = device_manager.lock().unwrap();
#[cfg(feature = "acpi")]
let proximity_domain_per_cpu: BTreeMap<u8, u32> = { let proximity_domain_per_cpu: BTreeMap<u8, u32> = {
let mut cpu_list = Vec::new(); let mut cpu_list = Vec::new();
for (proximity_domain, numa_node) in numa_nodes.iter() { for (proximity_domain, numa_node) in numa_nodes.iter() {
@ -663,7 +656,6 @@ impl CpuManager {
#[cfg(not(feature = "tdx"))] #[cfg(not(feature = "tdx"))]
let dynamic = true; let dynamic = true;
#[cfg(feature = "acpi")]
let acpi_address = if dynamic { let acpi_address = if dynamic {
Some( Some(
device_manager device_manager
@ -695,15 +687,12 @@ impl CpuManager {
vcpus: Vec::with_capacity(usize::from(config.max_vcpus)), vcpus: Vec::with_capacity(usize::from(config.max_vcpus)),
seccomp_action, seccomp_action,
vmmops, vmmops,
#[cfg(feature = "acpi")]
acpi_address, acpi_address,
#[cfg(feature = "acpi")]
proximity_domain_per_cpu, proximity_domain_per_cpu,
affinity, affinity,
dynamic, dynamic,
})); }));
#[cfg(feature = "acpi")]
if let Some(acpi_address) = acpi_address { if let Some(acpi_address) = acpi_address {
device_manager device_manager
.mmio_bus() .mmio_bus()
@ -1240,7 +1229,6 @@ impl CpuManager {
.map(|t| (t.threads_per_core, t.cores_per_die, t.packages)) .map(|t| (t.threads_per_core, t.cores_per_die, t.packages))
} }
#[cfg(feature = "acpi")]
pub fn create_madt(&self) -> Sdt { pub fn create_madt(&self) -> Sdt {
use crate::acpi; use crate::acpi;
// This is also checked in the commandline parsing. // This is also checked in the commandline parsing.
@ -1370,7 +1358,7 @@ impl CpuManager {
madt madt
} }
#[cfg(all(target_arch = "aarch64", feature = "acpi"))] #[cfg(target_arch = "aarch64")]
pub fn create_pptt(&self) -> Sdt { pub fn create_pptt(&self) -> Sdt {
let pptt_start = 0; let pptt_start = 0;
let mut cpus = 0; let mut cpus = 0;
@ -1503,17 +1491,15 @@ impl CpuManager {
} }
} }
#[cfg(feature = "acpi")]
struct Cpu { struct Cpu {
cpu_id: u8, cpu_id: u8,
proximity_domain: u32, proximity_domain: u32,
dynamic: bool, dynamic: bool,
} }
#[cfg(all(target_arch = "x86_64", feature = "acpi"))] #[cfg(target_arch = "x86_64")]
const MADT_CPU_ENABLE_FLAG: usize = 0; const MADT_CPU_ENABLE_FLAG: usize = 0;
#[cfg(feature = "acpi")]
impl Cpu { impl Cpu {
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
fn generate_mat(&self) -> Vec<u8> { fn generate_mat(&self) -> Vec<u8> {
@ -1533,7 +1519,6 @@ impl Cpu {
} }
} }
#[cfg(feature = "acpi")]
impl Aml for Cpu { impl Aml for Cpu {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
@ -1621,12 +1606,10 @@ impl Aml for Cpu {
} }
} }
#[cfg(feature = "acpi")]
struct CpuNotify { struct CpuNotify {
cpu_id: u8, cpu_id: u8,
} }
#[cfg(feature = "acpi")]
impl Aml for CpuNotify { impl Aml for CpuNotify {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
let object = aml::Path::new(&format!("C{:03}", self.cpu_id)); let object = aml::Path::new(&format!("C{:03}", self.cpu_id));
@ -1638,13 +1621,11 @@ impl Aml for CpuNotify {
} }
} }
#[cfg(feature = "acpi")]
struct CpuMethods { struct CpuMethods {
max_vcpus: u8, max_vcpus: u8,
dynamic: bool, dynamic: bool,
} }
#[cfg(feature = "acpi")]
impl Aml for CpuMethods { impl Aml for CpuMethods {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
if self.dynamic { if self.dynamic {
@ -1758,7 +1739,6 @@ impl Aml for CpuMethods {
} }
} }
#[cfg(feature = "acpi")]
impl Aml for CpuManager { impl Aml for CpuManager {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]

View File

@ -19,7 +19,6 @@ use crate::interrupt::kvm::KvmMsiInterruptManager as MsiInterruptManager;
#[cfg(feature = "mshv")] #[cfg(feature = "mshv")]
use crate::interrupt::mshv::MshvMsiInterruptManager as MsiInterruptManager; use crate::interrupt::mshv::MshvMsiInterruptManager as MsiInterruptManager;
use crate::interrupt::LegacyUserspaceInterruptManager; use crate::interrupt::LegacyUserspaceInterruptManager;
#[cfg(feature = "acpi")]
use crate::memory_manager::MEMORY_MANAGER_ACPI_SIZE; use crate::memory_manager::MEMORY_MANAGER_ACPI_SIZE;
use crate::memory_manager::{Error as MemoryManagerError, MemoryManager}; use crate::memory_manager::{Error as MemoryManagerError, MemoryManager};
use crate::pci_segment::PciSegment; use crate::pci_segment::PciSegment;
@ -29,16 +28,13 @@ use crate::sigwinch_listener::start_sigwinch_listener;
use crate::GuestRegionMmap; use crate::GuestRegionMmap;
use crate::PciDeviceInfo; use crate::PciDeviceInfo;
use crate::{device_node, DEVICE_MANAGER_SNAPSHOT_ID}; use crate::{device_node, DEVICE_MANAGER_SNAPSHOT_ID};
#[cfg(feature = "acpi")]
use acpi_tables::{aml, aml::Aml}; use acpi_tables::{aml, aml::Aml};
use anyhow::anyhow; use anyhow::anyhow;
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
use arch::aarch64::gic::gicv3_its::kvm::KvmGicV3Its; use arch::aarch64::gic::gicv3_its::kvm::KvmGicV3Its;
#[cfg(feature = "acpi")]
use arch::layout; use arch::layout;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
use arch::layout::{APIC_START, IOAPIC_SIZE, IOAPIC_START}; use arch::layout::{APIC_START, IOAPIC_SIZE, IOAPIC_START};
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use arch::NumaNodes; use arch::NumaNodes;
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
use arch::{DeviceType, MmioDeviceInfo}; use arch::{DeviceType, MmioDeviceInfo};
@ -480,7 +476,6 @@ pub enum DeviceManagerError {
} }
pub type DeviceManagerResult<T> = result::Result<T, DeviceManagerError>; pub type DeviceManagerResult<T> = result::Result<T, DeviceManagerError>;
#[cfg(feature = "acpi")]
const DEVICE_MANAGER_ACPI_SIZE: usize = 0x10; const DEVICE_MANAGER_ACPI_SIZE: usize = 0x10;
const TIOCSPTLCK: libc::c_int = 0x4004_5431; const TIOCSPTLCK: libc::c_int = 0x4004_5431;
@ -846,7 +841,6 @@ pub struct DeviceManager {
cmdline_additions: Vec<String>, cmdline_additions: Vec<String>,
// ACPI GED notification device // ACPI GED notification device
#[cfg(feature = "acpi")]
ged_notification_device: Option<Arc<Mutex<devices::AcpiGedDevice>>>, ged_notification_device: Option<Arc<Mutex<devices::AcpiGedDevice>>>,
// VM configuration // VM configuration
@ -910,7 +904,6 @@ pub struct DeviceManager {
seccomp_action: SeccompAction, seccomp_action: SeccompAction,
// List of guest NUMA nodes. // List of guest NUMA nodes.
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes: NumaNodes, numa_nodes: NumaNodes,
// Possible handle to the virtio-balloon device // Possible handle to the virtio-balloon device
@ -920,9 +913,8 @@ pub struct DeviceManager {
// activation and thus start the threads from the VMM thread // activation and thus start the threads from the VMM thread
activate_evt: EventFd, activate_evt: EventFd,
#[cfg(feature = "acpi")]
acpi_address: GuestAddress, acpi_address: GuestAddress,
#[cfg(feature = "acpi")]
selected_segment: usize, selected_segment: usize,
// Possible handle to the virtio-mem device // Possible handle to the virtio-mem device
@ -951,7 +943,7 @@ impl DeviceManager {
exit_evt: &EventFd, exit_evt: &EventFd,
reset_evt: &EventFd, reset_evt: &EventFd,
seccomp_action: SeccompAction, seccomp_action: SeccompAction,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))] numa_nodes: NumaNodes, numa_nodes: NumaNodes,
activate_evt: &EventFd, activate_evt: &EventFd,
force_iommu: bool, force_iommu: bool,
restoring: bool, restoring: bool,
@ -1004,7 +996,6 @@ impl DeviceManager {
vm, vm,
)); ));
#[cfg(feature = "acpi")]
let acpi_address = address_manager let acpi_address = address_manager
.allocator .allocator
.lock() .lock()
@ -1038,7 +1029,7 @@ impl DeviceManager {
console: Arc::new(Console::default()), console: Arc::new(Console::default()),
interrupt_controller: None, interrupt_controller: None,
cmdline_additions: Vec::new(), cmdline_additions: Vec::new(),
#[cfg(feature = "acpi")]
ged_notification_device: None, ged_notification_device: None,
config, config,
memory_manager, memory_manager,
@ -1059,15 +1050,15 @@ impl DeviceManager {
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
id_to_dev_info: HashMap::new(), id_to_dev_info: HashMap::new(),
seccomp_action, seccomp_action,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes, numa_nodes,
balloon: None, balloon: None,
activate_evt: activate_evt activate_evt: activate_evt
.try_clone() .try_clone()
.map_err(DeviceManagerError::EventFd)?, .map_err(DeviceManagerError::EventFd)?,
#[cfg(feature = "acpi")]
acpi_address, acpi_address,
#[cfg(feature = "acpi")]
selected_segment: 0, selected_segment: 0,
serial_pty: None, serial_pty: None,
serial_manager: None, serial_manager: None,
@ -1083,7 +1074,6 @@ impl DeviceManager {
let device_manager = Arc::new(Mutex::new(device_manager)); let device_manager = Arc::new(Mutex::new(device_manager));
#[cfg(feature = "acpi")]
address_manager address_manager
.mmio_bus .mmio_bus
.insert( .insert(
@ -1130,7 +1120,6 @@ impl DeviceManager {
&interrupt_controller, &interrupt_controller,
))); )));
#[cfg(feature = "acpi")]
{ {
if let Some(acpi_address) = self.memory_manager.lock().unwrap().acpi_address() { if let Some(acpi_address) = self.memory_manager.lock().unwrap().acpi_address() {
self.address_manager self.address_manager
@ -1154,7 +1143,6 @@ impl DeviceManager {
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
self.add_legacy_devices(&legacy_interrupt_manager)?; self.add_legacy_devices(&legacy_interrupt_manager)?;
#[cfg(feature = "acpi")]
{ {
self.ged_notification_device = self.add_acpi_devices( self.ged_notification_device = self.add_acpi_devices(
&legacy_interrupt_manager, &legacy_interrupt_manager,
@ -1378,7 +1366,6 @@ impl DeviceManager {
Ok(interrupt_controller) Ok(interrupt_controller)
} }
#[cfg(feature = "acpi")]
fn add_acpi_devices( fn add_acpi_devices(
&mut self, &mut self,
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>, interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
@ -2787,9 +2774,6 @@ impl DeviceManager {
if let Some(virtio_mem_zone) = memory_zone.virtio_mem_zone() { if let Some(virtio_mem_zone) = memory_zone.virtio_mem_zone() {
info!("Creating virtio-mem device: id = {}", memory_zone_id); info!("Creating virtio-mem device: id = {}", memory_zone_id);
#[cfg(all(target_arch = "x86_64", not(feature = "acpi")))]
let node_id: Option<u16> = None;
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
let node_id = numa_node_id_from_memory_zone_id(&self.numa_nodes, memory_zone_id) let node_id = numa_node_id_from_memory_zone_id(&self.numa_nodes, memory_zone_id)
.map(|i| i as u16); .map(|i| i as u16);
@ -3585,7 +3569,6 @@ impl DeviceManager {
Arc::clone(self.pci_segments[0].pci_config_io.as_ref().unwrap()) Arc::clone(self.pci_segments[0].pci_config_io.as_ref().unwrap())
} }
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
pub(crate) fn pci_segments(&self) -> &Vec<PciSegment> { pub(crate) fn pci_segments(&self) -> &Vec<PciSegment> {
&self.pci_segments &self.pci_segments
} }
@ -3670,7 +3653,6 @@ impl DeviceManager {
&self, &self,
_notification_type: AcpiNotificationFlags, _notification_type: AcpiNotificationFlags,
) -> DeviceManagerResult<()> { ) -> DeviceManagerResult<()> {
#[cfg(feature = "acpi")]
return self return self
.ged_notification_device .ged_notification_device
.as_ref() .as_ref()
@ -3679,8 +3661,6 @@ impl DeviceManager {
.unwrap() .unwrap()
.notify(_notification_type) .notify(_notification_type)
.map_err(DeviceManagerError::HotPlugNotification); .map_err(DeviceManagerError::HotPlugNotification);
#[cfg(not(feature = "acpi"))]
return Ok(());
} }
pub fn add_device( pub fn add_device(
@ -4113,7 +4093,6 @@ impl DeviceManager {
Ok(()) Ok(())
} }
#[cfg(feature = "acpi")]
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
pub fn notify_power_button(&self) -> DeviceManagerResult<()> { pub fn notify_power_button(&self) -> DeviceManagerResult<()> {
self.ged_notification_device self.ged_notification_device
@ -4127,43 +4106,27 @@ impl DeviceManager {
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
pub fn notify_power_button(&self) -> DeviceManagerResult<()> { pub fn notify_power_button(&self) -> DeviceManagerResult<()> {
// There are three use cases: // There are two use cases:
// 1. The Cloud Hypervisor is built without feature acpi. // 1. Users will use direct kernel boot with device tree.
// 2. The Cloud Hypervisor is built with feature acpi, but users will // 2. Users will use ACPI+UEFI boot.
// use direct kernel boot with device tree.
// 3. The Cloud Hypervisor is built with feature acpi, and users will // Trigger a GPIO pin 3 event to satisify use case 1.
// use ACPI+UEFI boot. self.gpio_device
#[cfg(not(feature = "acpi"))]
// The `return` here will trigger a GPIO pin 3 event, which will trigger
// a power button event for use case 1.
return self
.gpio_device
.as_ref() .as_ref()
.unwrap() .unwrap()
.lock() .lock()
.unwrap() .unwrap()
.trigger_key(3) .trigger_key(3)
.map_err(DeviceManagerError::AArch64PowerButtonNotification); .map_err(DeviceManagerError::AArch64PowerButtonNotification)?;
#[cfg(feature = "acpi")] // Trigger a GED power button event to satisify use case 2.
{ return self
// Trigger a GPIO pin 3 event to satisify use case 2. .ged_notification_device
self.gpio_device .as_ref()
.as_ref() .unwrap()
.unwrap() .lock()
.lock() .unwrap()
.unwrap() .notify(AcpiNotificationFlags::POWER_BUTTON_CHANGED)
.trigger_key(3) .map_err(DeviceManagerError::PowerButtonNotification);
.map_err(DeviceManagerError::AArch64PowerButtonNotification)?;
// Trigger a GED power button event to satisify use case 3.
return self
.ged_notification_device
.as_ref()
.unwrap()
.lock()
.unwrap()
.notify(AcpiNotificationFlags::POWER_BUTTON_CHANGED)
.map_err(DeviceManagerError::PowerButtonNotification);
}
} }
pub fn iommu_attached_devices(&self) -> &Option<(PciBdf, Vec<PciBdf>)> { pub fn iommu_attached_devices(&self) -> &Option<(PciBdf, Vec<PciBdf>)> {
@ -4171,7 +4134,6 @@ impl DeviceManager {
} }
} }
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
fn numa_node_id_from_memory_zone_id(numa_nodes: &NumaNodes, memory_zone_id: &str) -> Option<u32> { fn numa_node_id_from_memory_zone_id(numa_nodes: &NumaNodes, memory_zone_id: &str) -> Option<u32> {
for (numa_node_id, numa_node) in numa_nodes.iter() { for (numa_node_id, numa_node) in numa_nodes.iter() {
if numa_node.memory_zones.contains(&memory_zone_id.to_owned()) { if numa_node.memory_zones.contains(&memory_zone_id.to_owned()) {
@ -4182,7 +4144,6 @@ fn numa_node_id_from_memory_zone_id(numa_nodes: &NumaNodes, memory_zone_id: &str
None None
} }
#[cfg(feature = "acpi")]
impl Aml for DeviceManager { impl Aml for DeviceManager {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
@ -4490,24 +4451,15 @@ impl Migratable for DeviceManager {
} }
} }
#[cfg(feature = "acpi")]
const PCIU_FIELD_OFFSET: u64 = 0; const PCIU_FIELD_OFFSET: u64 = 0;
#[cfg(feature = "acpi")]
const PCID_FIELD_OFFSET: u64 = 4; const PCID_FIELD_OFFSET: u64 = 4;
#[cfg(feature = "acpi")]
const B0EJ_FIELD_OFFSET: u64 = 8; const B0EJ_FIELD_OFFSET: u64 = 8;
#[cfg(feature = "acpi")]
const PSEG_FIELD_OFFSET: u64 = 12; const PSEG_FIELD_OFFSET: u64 = 12;
#[cfg(feature = "acpi")]
const PCIU_FIELD_SIZE: usize = 4; const PCIU_FIELD_SIZE: usize = 4;
#[cfg(feature = "acpi")]
const PCID_FIELD_SIZE: usize = 4; const PCID_FIELD_SIZE: usize = 4;
#[cfg(feature = "acpi")]
const B0EJ_FIELD_SIZE: usize = 4; const B0EJ_FIELD_SIZE: usize = 4;
#[cfg(feature = "acpi")]
const PSEG_FIELD_SIZE: usize = 4; const PSEG_FIELD_SIZE: usize = 4;
#[cfg(feature = "acpi")]
impl BusDevice for DeviceManager { impl BusDevice for DeviceManager {
fn read(&mut self, base: u64, offset: u64, data: &mut [u8]) { fn read(&mut self, base: u64, offset: u64, data: &mut [u8]) {
match offset { match offset {

View File

@ -49,7 +49,6 @@ use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, Transport
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::sock_ctrl_msg::ScmSocket; use vmm_sys_util::sock_ctrl_msg::ScmSocket;
#[cfg(feature = "acpi")]
mod acpi; mod acpi;
pub mod api; pub mod api;
mod clone3; mod clone3;
@ -571,16 +570,6 @@ impl Vmm {
} }
fn vm_reboot(&mut self) -> result::Result<(), VmError> { fn vm_reboot(&mut self) -> result::Result<(), VmError> {
// Without ACPI, a reset is equivalent to a shutdown
// On AArch64, before ACPI is supported, we simply jump over this check and continue to reset.
#[cfg(all(target_arch = "x86_64", not(feature = "acpi")))]
{
if self.vm.is_some() {
self.exit_evt.write(1).unwrap();
return Ok(());
}
}
// First we stop the current VM // First we stop the current VM
let (config, serial_pty, console_pty, console_resize_pipe) = let (config, serial_pty, console_pty, console_resize_pipe) =
if let Some(mut vm) = self.vm.take() { if let Some(mut vm) = self.vm.take() {

View File

@ -8,7 +8,6 @@ use crate::config::{HotplugMethod, MemoryConfig, MemoryZoneConfig};
use crate::migration::url_to_path; use crate::migration::url_to_path;
use crate::MEMORY_MANAGER_SNAPSHOT_ID; use crate::MEMORY_MANAGER_SNAPSHOT_ID;
use crate::{GuestMemoryMmap, GuestRegionMmap}; use crate::{GuestMemoryMmap, GuestRegionMmap};
#[cfg(feature = "acpi")]
use acpi_tables::{aml, aml::Aml}; use acpi_tables::{aml, aml::Aml};
use anyhow::anyhow; use anyhow::anyhow;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
@ -46,7 +45,6 @@ use vm_migration::{
Snapshot, SnapshotDataSection, Snapshottable, Transportable, VersionMapped, Snapshot, SnapshotDataSection, Snapshottable, Transportable, VersionMapped,
}; };
#[cfg(feature = "acpi")]
pub const MEMORY_MANAGER_ACPI_SIZE: usize = 0x18; pub const MEMORY_MANAGER_ACPI_SIZE: usize = 0x18;
const DEFAULT_MEMORY_ZONE: &str = "mem0"; const DEFAULT_MEMORY_ZONE: &str = "mem0";
@ -179,7 +177,6 @@ pub struct MemoryManager {
// slots that the mapping is created in. // slots that the mapping is created in.
guest_ram_mappings: Vec<GuestRamMapping>, guest_ram_mappings: Vec<GuestRamMapping>,
#[cfg(feature = "acpi")]
pub acpi_address: Option<GuestAddress>, pub acpi_address: Option<GuestAddress>,
} }
@ -1031,7 +1028,6 @@ impl MemoryManager {
let hotplug_method = config.hotplug_method.clone(); let hotplug_method = config.hotplug_method.clone();
#[cfg(feature = "acpi")]
let acpi_address = if dynamic && hotplug_method == HotplugMethod::Acpi { let acpi_address = if dynamic && hotplug_method == HotplugMethod::Acpi {
Some( Some(
allocator allocator
@ -1075,7 +1071,7 @@ impl MemoryManager {
snapshot_memory_ranges: MemoryRangeTable::default(), snapshot_memory_ranges: MemoryRangeTable::default(),
memory_zones, memory_zones,
guest_ram_mappings: Vec::new(), guest_ram_mappings: Vec::new(),
#[cfg(feature = "acpi")]
acpi_address, acpi_address,
log_dirty: dynamic, // Cannot log dirty pages on a TD log_dirty: dynamic, // Cannot log dirty pages on a TD
arch_mem_regions, arch_mem_regions,
@ -1846,18 +1842,15 @@ impl MemoryManager {
memory_slot_fds memory_slot_fds
} }
#[cfg(feature = "acpi")]
pub fn acpi_address(&self) -> Option<GuestAddress> { pub fn acpi_address(&self) -> Option<GuestAddress> {
self.acpi_address self.acpi_address
} }
} }
#[cfg(feature = "acpi")]
struct MemoryNotify { struct MemoryNotify {
slot_id: usize, slot_id: usize,
} }
#[cfg(feature = "acpi")]
impl Aml for MemoryNotify { impl Aml for MemoryNotify {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
let object = aml::Path::new(&format!("M{:03}", self.slot_id)); let object = aml::Path::new(&format!("M{:03}", self.slot_id));
@ -1869,12 +1862,10 @@ impl Aml for MemoryNotify {
} }
} }
#[cfg(feature = "acpi")]
struct MemorySlot { struct MemorySlot {
slot_id: usize, slot_id: usize,
} }
#[cfg(feature = "acpi")]
impl Aml for MemorySlot { impl Aml for MemorySlot {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
aml::Device::new( aml::Device::new(
@ -1918,12 +1909,10 @@ impl Aml for MemorySlot {
} }
} }
#[cfg(feature = "acpi")]
struct MemorySlots { struct MemorySlots {
slots: usize, slots: usize,
} }
#[cfg(feature = "acpi")]
impl Aml for MemorySlots { impl Aml for MemorySlots {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
for slot_id in 0..self.slots { for slot_id in 0..self.slots {
@ -1932,12 +1921,10 @@ impl Aml for MemorySlots {
} }
} }
#[cfg(feature = "acpi")]
struct MemoryMethods { struct MemoryMethods {
slots: usize, slots: usize,
} }
#[cfg(feature = "acpi")]
impl Aml for MemoryMethods { impl Aml for MemoryMethods {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
// Add "MTFY" notification method // Add "MTFY" notification method
@ -2080,7 +2067,6 @@ impl Aml for MemoryMethods {
} }
} }
#[cfg(feature = "acpi")]
impl Aml for MemoryManager { impl Aml for MemoryManager {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
if let Some(acpi_address) = self.acpi_address { if let Some(acpi_address) = self.acpi_address {

View File

@ -10,14 +10,12 @@
// //
use crate::device_manager::{AddressManager, DeviceManagerError, DeviceManagerResult}; use crate::device_manager::{AddressManager, DeviceManagerError, DeviceManagerResult};
#[cfg(feature = "acpi")]
use acpi_tables::aml::{self, Aml}; use acpi_tables::aml::{self, Aml};
use arch::layout; use arch::layout;
use pci::{DeviceRelocation, PciBdf, PciBus, PciConfigMmio, PciRoot}; use pci::{DeviceRelocation, PciBdf, PciBus, PciConfigMmio, PciRoot};
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
use pci::{PciConfigIo, PCI_CONFIG_IO_PORT, PCI_CONFIG_IO_PORT_SIZE}; use pci::{PciConfigIo, PCI_CONFIG_IO_PORT, PCI_CONFIG_IO_PORT_SIZE};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
#[cfg(feature = "acpi")]
use uuid::Uuid; use uuid::Uuid;
use vm_allocator::AddressAllocator; use vm_allocator::AddressAllocator;
use vm_device::BusDevice; use vm_device::BusDevice;
@ -168,12 +166,10 @@ impl PciSegment {
} }
} }
#[cfg(feature = "acpi")]
struct PciDevSlot { struct PciDevSlot {
device_id: u8, device_id: u8,
} }
#[cfg(feature = "acpi")]
impl Aml for PciDevSlot { impl Aml for PciDevSlot {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
let sun = self.device_id; let sun = self.device_id;
@ -198,12 +194,10 @@ impl Aml for PciDevSlot {
} }
} }
#[cfg(feature = "acpi")]
struct PciDevSlotNotify { struct PciDevSlotNotify {
device_id: u8, device_id: u8,
} }
#[cfg(feature = "acpi")]
impl Aml for PciDevSlotNotify { impl Aml for PciDevSlotNotify {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
let device_id_mask: u32 = 1 << self.device_id; let device_id_mask: u32 = 1 << self.device_id;
@ -217,10 +211,8 @@ impl Aml for PciDevSlotNotify {
} }
} }
#[cfg(feature = "acpi")]
struct PciDevSlotMethods {} struct PciDevSlotMethods {}
#[cfg(feature = "acpi")]
impl Aml for PciDevSlotMethods { impl Aml for PciDevSlotMethods {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
let mut device_notifies = Vec::new(); let mut device_notifies = Vec::new();
@ -256,10 +248,8 @@ impl Aml for PciDevSlotMethods {
} }
} }
#[cfg(feature = "acpi")]
struct PciDsmMethod {} struct PciDsmMethod {}
#[cfg(feature = "acpi")]
impl Aml for PciDsmMethod { impl Aml for PciDsmMethod {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
// Refer to ACPI spec v6.3 Ch 9.1.1 and PCI Firmware spec v3.3 Ch 4.6.1 // Refer to ACPI spec v6.3 Ch 9.1.1 and PCI Firmware spec v3.3 Ch 4.6.1
@ -321,7 +311,6 @@ impl Aml for PciDsmMethod {
} }
} }
#[cfg(feature = "acpi")]
impl Aml for PciSegment { impl Aml for PciSegment {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
let mut pci_dsdt_inner_data: Vec<&dyn aml::Aml> = Vec::new(); let mut pci_dsdt_inner_data: Vec<&dyn aml::Aml> = Vec::new();

View File

@ -11,7 +11,6 @@
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
// //
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use crate::config::NumaConfig; use crate::config::NumaConfig;
use crate::config::{ use crate::config::{
add_to_config, DeviceConfig, DiskConfig, FsConfig, HotplugMethod, NetConfig, PmemConfig, add_to_config, DeviceConfig, DiskConfig, FsConfig, HotplugMethod, NetConfig, PmemConfig,
@ -40,7 +39,6 @@ use arch::x86_64::tdx::TdvfSection;
use arch::EntryPoint; use arch::EntryPoint;
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
use arch::PciSpaceInfo; use arch::PciSpaceInfo;
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use arch::{NumaNode, NumaNodes}; use arch::{NumaNode, NumaNodes};
use devices::AcpiNotificationFlags; use devices::AcpiNotificationFlags;
#[cfg(all(target_arch = "x86_64", feature = "gdb"))] #[cfg(all(target_arch = "x86_64", feature = "gdb"))]
@ -59,7 +57,6 @@ use signal_hook::{
iterator::Signals, iterator::Signals,
}; };
use std::cmp; use std::cmp;
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::collections::HashMap; use std::collections::HashMap;
use std::convert::TryInto; use std::convert::TryInto;
@ -240,9 +237,6 @@ pub enum Error {
/// Cannot activate virtio devices /// Cannot activate virtio devices
ActivateVirtioDevices(device_manager::DeviceManagerError), ActivateVirtioDevices(device_manager::DeviceManagerError),
/// Power button not supported
PowerButtonNotSupported,
/// Error triggering power button /// Error triggering power button
PowerButton(device_manager::DeviceManagerError), PowerButton(device_manager::DeviceManagerError),
@ -535,7 +529,7 @@ pub struct Vm {
vm: Arc<dyn hypervisor::Vm>, vm: Arc<dyn hypervisor::Vm>,
#[cfg(all(feature = "kvm", target_arch = "x86_64"))] #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
saved_clock: Option<hypervisor::ClockData>, saved_clock: Option<hypervisor::ClockData>,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes: NumaNodes, numa_nodes: NumaNodes,
seccomp_action: SeccompAction, seccomp_action: SeccompAction,
exit_evt: EventFd, exit_evt: EventFd,
@ -567,7 +561,6 @@ impl Vm {
info!("Booting VM from config: {:?}", &config); info!("Booting VM from config: {:?}", &config);
// Create NUMA nodes based on NumaConfig. // Create NUMA nodes based on NumaConfig.
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
let numa_nodes = let numa_nodes =
Self::create_numa_nodes(config.lock().unwrap().numa.clone(), &memory_manager)?; Self::create_numa_nodes(config.lock().unwrap().numa.clone(), &memory_manager)?;
@ -588,7 +581,6 @@ impl Vm {
&exit_evt, &exit_evt,
&reset_evt, &reset_evt,
seccomp_action.clone(), seccomp_action.clone(),
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes.clone(), numa_nodes.clone(),
&activate_evt, &activate_evt,
force_iommu, force_iommu,
@ -635,7 +627,6 @@ impl Vm {
vm_ops, vm_ops,
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
tdx_enabled, tdx_enabled,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
&numa_nodes, &numa_nodes,
) )
.map_err(Error::CpuManager)?; .map_err(Error::CpuManager)?;
@ -673,7 +664,7 @@ impl Vm {
vm, vm,
#[cfg(all(feature = "kvm", target_arch = "x86_64"))] #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
saved_clock: None, saved_clock: None,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes, numa_nodes,
seccomp_action: seccomp_action.clone(), seccomp_action: seccomp_action.clone(),
exit_evt, exit_evt,
@ -683,7 +674,6 @@ impl Vm {
}) })
} }
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
fn create_numa_nodes( fn create_numa_nodes(
configs: Option<Vec<NumaConfig>>, configs: Option<Vec<NumaConfig>>,
memory_manager: &Arc<Mutex<MemoryManager>>, memory_manager: &Arc<Mutex<MemoryManager>>,
@ -1107,7 +1097,7 @@ impl Vm {
} }
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
fn configure_system(&mut self, #[cfg(feature = "acpi")] rsdp_addr: GuestAddress) -> Result<()> { fn configure_system(&mut self, rsdp_addr: GuestAddress) -> Result<()> {
info!("Configuring system"); info!("Configuring system");
let mem = self.memory_manager.lock().unwrap().boot_guest_memory(); let mem = self.memory_manager.lock().unwrap().boot_guest_memory();
@ -1117,12 +1107,7 @@ impl Vm {
}; };
let boot_vcpus = self.cpu_manager.lock().unwrap().boot_vcpus(); let boot_vcpus = self.cpu_manager.lock().unwrap().boot_vcpus();
#[cfg(feature = "acpi")]
let rsdp_addr = Some(rsdp_addr); let rsdp_addr = Some(rsdp_addr);
#[cfg(not(feature = "acpi"))]
let rsdp_addr = None;
let sgx_epc_region = self let sgx_epc_region = self
.memory_manager .memory_manager
.lock() .lock()
@ -1144,10 +1129,7 @@ impl Vm {
} }
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
fn configure_system( fn configure_system(&mut self, _rsdp_addr: GuestAddress) -> Result<()> {
&mut self,
#[cfg(feature = "acpi")] _rsdp_addr: GuestAddress,
) -> Result<()> {
let cmdline = self.get_cmdline()?; let cmdline = self.get_cmdline()?;
let vcpu_mpidrs = self.cpu_manager.lock().unwrap().get_mpidrs(); let vcpu_mpidrs = self.cpu_manager.lock().unwrap().get_mpidrs();
let vcpu_topology = self.cpu_manager.lock().unwrap().get_vcpu_topology(); let vcpu_topology = self.cpu_manager.lock().unwrap().get_vcpu_topology();
@ -1919,7 +1901,7 @@ impl Vm {
.map_err(Error::PopulateHob)?; .map_err(Error::PopulateHob)?;
// Loop over the ACPI tables and copy them to the HOB. // Loop over the ACPI tables and copy them to the HOB.
#[cfg(feature = "acpi")]
for acpi_table in crate::acpi::create_acpi_tables_tdx( for acpi_table in crate::acpi::create_acpi_tables_tdx(
&self.device_manager, &self.device_manager,
&self.cpu_manager, &self.cpu_manager,
@ -2016,7 +1998,7 @@ impl Vm {
// Creates ACPI tables // Creates ACPI tables
// In case of TDX being used, this is a no-op since the tables will be // In case of TDX being used, this is a no-op since the tables will be
// created and passed when populating the HOB. // created and passed when populating the HOB.
#[cfg(feature = "acpi")]
fn create_acpi_tables(&self) -> Option<GuestAddress> { fn create_acpi_tables(&self) -> Option<GuestAddress> {
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
if self.config.lock().unwrap().tdx.is_some() { if self.config.lock().unwrap().tdx.is_some() {
@ -2088,7 +2070,6 @@ impl Vm {
Vec::new() Vec::new()
}; };
#[cfg(feature = "acpi")]
let rsdp_addr = self.create_acpi_tables(); let rsdp_addr = self.create_acpi_tables();
// Configuring the TDX regions requires that the vCPUs are created. // Configuring the TDX regions requires that the vCPUs are created.
@ -2105,10 +2086,7 @@ impl Vm {
.map(|_| { .map(|_| {
// Safe to unwrap rsdp_addr as we know it can't be None when // Safe to unwrap rsdp_addr as we know it can't be None when
// the entry_point is Some. // the entry_point is Some.
self.configure_system( self.configure_system(rsdp_addr.unwrap())
#[cfg(feature = "acpi")]
rsdp_addr.unwrap(),
)
}) })
.transpose()?; .transpose()?;
@ -2434,15 +2412,12 @@ impl Vm {
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
pub fn power_button(&self) -> Result<()> { pub fn power_button(&self) -> Result<()> {
#[cfg(feature = "acpi")]
return self return self
.device_manager .device_manager
.lock() .lock()
.unwrap() .unwrap()
.notify_power_button() .notify_power_button()
.map_err(Error::PowerButton); .map_err(Error::PowerButton);
#[cfg(not(feature = "acpi"))]
Err(Error::PowerButtonNotSupported)
} }
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]