vmm: Don't expose MemoryManager ACPI functionality unless required

When running non-dynamic or with virtio-mem for hotplug the ACPI
functionality should not be included on the DSDT nor does the
MemoryManager need to be placed on the MMIO bus.

Fixes: #3883

Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
Rob Bradford 2022-03-24 11:03:26 +00:00
parent f6dfb42a64
commit 7a8061818e
2 changed files with 120 additions and 93 deletions

View File

@ -1132,15 +1132,16 @@ impl DeviceManager {
#[cfg(feature = "acpi")] #[cfg(feature = "acpi")]
{ {
let memory_manager_acpi_address = self.memory_manager.lock().unwrap().acpi_address; if let Some(acpi_address) = self.memory_manager.lock().unwrap().acpi_address() {
self.address_manager self.address_manager
.mmio_bus .mmio_bus
.insert( .insert(
Arc::clone(&self.memory_manager) as Arc<Mutex<dyn BusDevice>>, Arc::clone(&self.memory_manager) as Arc<Mutex<dyn BusDevice>>,
memory_manager_acpi_address.0, acpi_address.0,
MEMORY_MANAGER_ACPI_SIZE as u64, MEMORY_MANAGER_ACPI_SIZE as u64,
) )
.map_err(DeviceManagerError::BusError)?; .map_err(DeviceManagerError::BusError)?;
}
} }
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]

View File

@ -180,7 +180,7 @@ pub struct MemoryManager {
guest_ram_mappings: Vec<GuestRamMapping>, guest_ram_mappings: Vec<GuestRamMapping>,
#[cfg(feature = "acpi")] #[cfg(feature = "acpi")]
pub acpi_address: GuestAddress, pub acpi_address: Option<GuestAddress>,
} }
#[derive(Debug)] #[derive(Debug)]
@ -1024,18 +1024,26 @@ impl MemoryManager {
.ok_or(Error::CreateSystemAllocator)?, .ok_or(Error::CreateSystemAllocator)?,
)); ));
#[cfg(feature = "acpi")]
let acpi_address = allocator
.lock()
.unwrap()
.allocate_platform_mmio_addresses(None, MEMORY_MANAGER_ACPI_SIZE as u64, None)
.ok_or(Error::AllocateMmioAddress)?;
#[cfg(not(feature = "tdx"))] #[cfg(not(feature = "tdx"))]
let dynamic = true; let dynamic = true;
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
let dynamic = !tdx_enabled; let dynamic = !tdx_enabled;
let hotplug_method = config.hotplug_method.clone();
#[cfg(feature = "acpi")]
let acpi_address = if dynamic && hotplug_method == HotplugMethod::Acpi {
Some(
allocator
.lock()
.unwrap()
.allocate_platform_mmio_addresses(None, MEMORY_MANAGER_ACPI_SIZE as u64, None)
.ok_or(Error::AllocateMmioAddress)?,
)
} else {
None
};
// If running on SGX the start of device area and RAM area may diverge but // If running on SGX the start of device area and RAM area may diverge but
// at this point they are next to each other. // at this point they are next to each other.
let end_of_ram_area = start_of_device_area.unchecked_sub(1); let end_of_ram_area = start_of_device_area.unchecked_sub(1);
@ -1053,7 +1061,7 @@ impl MemoryManager {
selected_slot, selected_slot,
mergeable: config.mergeable, mergeable: config.mergeable,
allocator, allocator,
hotplug_method: config.hotplug_method.clone(), hotplug_method,
boot_ram, boot_ram,
current_ram, current_ram,
next_hotplug_slot, next_hotplug_slot,
@ -1837,6 +1845,11 @@ impl MemoryManager {
} }
memory_slot_fds memory_slot_fds
} }
#[cfg(feature = "acpi")]
pub fn acpi_address(&self) -> Option<GuestAddress> {
self.acpi_address
}
} }
#[cfg(feature = "acpi")] #[cfg(feature = "acpi")]
@ -2070,81 +2083,94 @@ impl Aml for MemoryMethods {
#[cfg(feature = "acpi")] #[cfg(feature = "acpi")]
impl Aml for MemoryManager { impl Aml for MemoryManager {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) { fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
// Memory Hotplug Controller if let Some(acpi_address) = self.acpi_address {
aml::Device::new( // Memory Hotplug Controller
"_SB_.MHPC".into(), aml::Device::new(
vec![ "_SB_.MHPC".into(),
&aml::Name::new("_HID".into(), &aml::EisaName::new("PNP0A06")), vec![
&aml::Name::new("_UID".into(), &"Memory Hotplug Controller"), &aml::Name::new("_HID".into(), &aml::EisaName::new("PNP0A06")),
// Mutex to protect concurrent access as we write to choose slot and then read back status &aml::Name::new("_UID".into(), &"Memory Hotplug Controller"),
&aml::Mutex::new("MLCK".into(), 0), // Mutex to protect concurrent access as we write to choose slot and then read back status
&aml::Name::new( &aml::Mutex::new("MLCK".into(), 0),
"_CRS".into(), &aml::Name::new(
&aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory( "_CRS".into(),
aml::AddressSpaceCachable::NotCacheable, &aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory(
true, aml::AddressSpaceCachable::NotCacheable,
self.acpi_address.0 as u64, true,
self.acpi_address.0 + MEMORY_MANAGER_ACPI_SIZE as u64 - 1, acpi_address.0 as u64,
)]), acpi_address.0 + MEMORY_MANAGER_ACPI_SIZE as u64 - 1,
), )]),
// OpRegion and Fields map MMIO range into individual field values ),
&aml::OpRegion::new( // OpRegion and Fields map MMIO range into individual field values
"MHPR".into(), &aml::OpRegion::new(
aml::OpRegionSpace::SystemMemory, "MHPR".into(),
self.acpi_address.0 as usize, aml::OpRegionSpace::SystemMemory,
MEMORY_MANAGER_ACPI_SIZE, acpi_address.0 as usize,
), MEMORY_MANAGER_ACPI_SIZE,
&aml::Field::new( ),
"MHPR".into(), &aml::Field::new(
aml::FieldAccessType::DWord, "MHPR".into(),
aml::FieldUpdateRule::Preserve, aml::FieldAccessType::DWord,
vec![ aml::FieldUpdateRule::Preserve,
aml::FieldEntry::Named(*b"MHBL", 32), // Base (low 4 bytes) vec![
aml::FieldEntry::Named(*b"MHBH", 32), // Base (high 4 bytes) aml::FieldEntry::Named(*b"MHBL", 32), // Base (low 4 bytes)
aml::FieldEntry::Named(*b"MHLL", 32), // Length (low 4 bytes) aml::FieldEntry::Named(*b"MHBH", 32), // Base (high 4 bytes)
aml::FieldEntry::Named(*b"MHLH", 32), // Length (high 4 bytes) aml::FieldEntry::Named(*b"MHLL", 32), // Length (low 4 bytes)
], aml::FieldEntry::Named(*b"MHLH", 32), // Length (high 4 bytes)
), ],
&aml::Field::new( ),
"MHPR".into(), &aml::Field::new(
aml::FieldAccessType::DWord, "MHPR".into(),
aml::FieldUpdateRule::Preserve, aml::FieldAccessType::DWord,
vec![ aml::FieldUpdateRule::Preserve,
aml::FieldEntry::Reserved(128), vec![
aml::FieldEntry::Named(*b"MHPX", 32), // PXM aml::FieldEntry::Reserved(128),
], aml::FieldEntry::Named(*b"MHPX", 32), // PXM
), ],
&aml::Field::new( ),
"MHPR".into(), &aml::Field::new(
aml::FieldAccessType::Byte, "MHPR".into(),
aml::FieldUpdateRule::WriteAsZeroes, aml::FieldAccessType::Byte,
vec![ aml::FieldUpdateRule::WriteAsZeroes,
aml::FieldEntry::Reserved(160), vec![
aml::FieldEntry::Named(*b"MEN_", 1), // Enabled aml::FieldEntry::Reserved(160),
aml::FieldEntry::Named(*b"MINS", 1), // Inserting aml::FieldEntry::Named(*b"MEN_", 1), // Enabled
aml::FieldEntry::Named(*b"MRMV", 1), // Removing aml::FieldEntry::Named(*b"MINS", 1), // Inserting
aml::FieldEntry::Named(*b"MEJ0", 1), // Ejecting aml::FieldEntry::Named(*b"MRMV", 1), // Removing
], aml::FieldEntry::Named(*b"MEJ0", 1), // Ejecting
), ],
&aml::Field::new( ),
"MHPR".into(), &aml::Field::new(
aml::FieldAccessType::DWord, "MHPR".into(),
aml::FieldUpdateRule::Preserve, aml::FieldAccessType::DWord,
vec![ aml::FieldUpdateRule::Preserve,
aml::FieldEntry::Named(*b"MSEL", 32), // Selector vec![
aml::FieldEntry::Named(*b"MOEV", 32), // Event aml::FieldEntry::Named(*b"MSEL", 32), // Selector
aml::FieldEntry::Named(*b"MOSC", 32), // OSC aml::FieldEntry::Named(*b"MOEV", 32), // Event
], aml::FieldEntry::Named(*b"MOSC", 32), // OSC
), ],
&MemoryMethods { ),
slots: self.hotplug_slots.len(), &MemoryMethods {
}, slots: self.hotplug_slots.len(),
&MemorySlots { },
slots: self.hotplug_slots.len(), &MemorySlots {
}, slots: self.hotplug_slots.len(),
], },
) ],
.append_aml_bytes(bytes); )
.append_aml_bytes(bytes);
} else {
aml::Device::new(
"_SB_.MHPC".into(),
vec![
&aml::Name::new("_HID".into(), &aml::EisaName::new("PNP0A06")),
&aml::Name::new("_UID".into(), &"Memory Hotplug Controller"),
// Empty MSCN for GED
&aml::Method::new("MSCN".into(), 0, true, vec![]),
],
)
.append_aml_bytes(bytes);
}
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
{ {