vmm: Add ACPI tables to TdVmmData when running TDX

Whenever running TDX, we must pass the ACPI tables to the TDVF firmware
running in the guest. The proper way to do this is by adding the tables
to the TdHob as a TdVmmData type, so that TDVF will know how to access
these tables and expose them to the guest OS.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2021-09-30 12:38:16 +02:00 committed by Bo Chen
parent b99a3a7dc9
commit 495e444ca6

View File

@ -29,8 +29,10 @@ use crate::{
}; };
use anyhow::anyhow; use anyhow::anyhow;
use arch::get_host_cpu_phys_bits; use arch::get_host_cpu_phys_bits;
#[cfg(all(feature = "tdx", feature = "acpi"))]
use arch::x86_64::tdx::TdVmmDataRegionType;
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
use arch::x86_64::tdx::TdvfSection; use arch::x86_64::tdx::{TdVmmDataRegion, TdvfSection};
use arch::EntryPoint; use arch::EntryPoint;
#[cfg(any(target_arch = "aarch64", feature = "acpi"))] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use arch::{NumaNode, NumaNodes}; use arch::{NumaNode, NumaNodes};
@ -1658,7 +1660,11 @@ impl Vm {
} }
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
fn populate_tdx_sections(&mut self, sections: &[TdvfSection]) -> Result<Option<u64>> { fn populate_tdx_sections(
&mut self,
sections: &[TdvfSection],
vmm_data_regions: &[TdVmmDataRegion],
) -> Result<Option<u64>> {
use arch::x86_64::tdx::*; use arch::x86_64::tdx::*;
// Get the memory end *before* we start adding TDVF ram regions // Get the memory end *before* we start adding TDVF ram regions
let boot_guest_memory = self let boot_guest_memory = self
@ -1724,6 +1730,20 @@ impl Vm {
sorted_sections.retain(|section| { sorted_sections.retain(|section| {
!matches!(section.r#type, TdvfSectionType::Bfv | TdvfSectionType::Cfv) !matches!(section.r#type, TdvfSectionType::Bfv | TdvfSectionType::Cfv)
}); });
// Add VMM specific data memory region to TdvfSections as TdHob type
// to ensure the firmware won't ignore/reject the ranges.
for region in vmm_data_regions {
sorted_sections.push(TdvfSection {
data_offset: 0,
data_size: 0,
address: region.start_address,
size: region.length,
r#type: TdvfSectionType::TdHob,
attributes: 0,
});
}
sorted_sections.sort_by_key(|section| section.address); sorted_sections.sort_by_key(|section| section.address);
sorted_sections.reverse(); sorted_sections.reverse();
let mut current_section = sorted_sections.pop(); let mut current_section = sorted_sections.pop();
@ -1791,13 +1811,25 @@ impl Vm {
) )
.map_err(Error::PopulateHob)?; .map_err(Error::PopulateHob)?;
// Add VMM specific data to the TdHob. The content of the data is
// is written as part of the HOB, which will be retrieved from the
// firmware, and processed accordingly to the type.
for region in vmm_data_regions {
hob.add_td_vmm_data(&mem, *region)
.map_err(Error::PopulateHob)?;
}
hob.finish(&mem).map_err(Error::PopulateHob)?; hob.finish(&mem).map_err(Error::PopulateHob)?;
Ok(hob_offset) Ok(hob_offset)
} }
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
fn init_tdx_memory(&mut self, sections: &[TdvfSection]) -> Result<()> { fn init_tdx_memory(
&mut self,
sections: &[TdvfSection],
regions: &[TdVmmDataRegion],
) -> Result<()> {
let guest_memory = self.memory_manager.lock().as_ref().unwrap().guest_memory(); let guest_memory = self.memory_manager.lock().as_ref().unwrap().guest_memory();
let mem = guest_memory.memory(); let mem = guest_memory.memory();
@ -1812,6 +1844,22 @@ impl Vm {
) )
.map_err(Error::InitializeTdxMemoryRegion)?; .map_err(Error::InitializeTdxMemoryRegion)?;
} }
// The same way we let the hypervisor know about the TDVF sections, we
// must declare the VMM specific regions shared with the guest so that
// they won't be discarded.
for region in regions {
self.vm
.tdx_init_memory_region(
mem.get_host_address(GuestAddress(region.start_address))
.unwrap() as u64,
region.start_address,
region.length,
false,
)
.map_err(Error::InitializeTdxMemoryRegion)?;
}
Ok(()) Ok(())
} }
@ -1918,10 +1966,23 @@ impl Vm {
rsdp_addr rsdp_addr
}; };
// Configuring the TDX regions requires that the vCPUs are created #[cfg(all(feature = "tdx", not(feature = "acpi")))]
let vmm_data_regions: Vec<TdVmmDataRegion> = Vec::new();
// Create a VMM specific data region to share the ACPI tables with
// the guest. Reserving 64kiB to ensure the ACPI tables will fit.
#[cfg(all(feature = "tdx", feature = "acpi"))]
let vmm_data_regions = vec![TdVmmDataRegion {
start_address: rsdp_addr.0,
length: 0x10000,
region_type: TdVmmDataRegionType::AcpiTables,
}];
// Configuring the TDX regions requires that the vCPUs are created.
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
let hob_address = if self.config.lock().unwrap().tdx.is_some() { let hob_address = if self.config.lock().unwrap().tdx.is_some() {
self.populate_tdx_sections(&sections)? // TDX sections are written to memory.
self.populate_tdx_sections(&sections, &vmm_data_regions)?
} else { } else {
None None
}; };
@ -1945,7 +2006,10 @@ impl Vm {
.unwrap() .unwrap()
.initialize_tdx(hob_address) .initialize_tdx(hob_address)
.map_err(Error::CpuManager)?; .map_err(Error::CpuManager)?;
self.init_tdx_memory(&sections)?; // Let the hypervisor know which memory ranges are shared with the
// guest. This prevents the guest from ignoring/discarding memory
// regions provided by the host.
self.init_tdx_memory(&sections, &vmm_data_regions)?;
// With TDX memory and CPU state configured TDX setup is complete // With TDX memory and CPU state configured TDX setup is complete
self.vm.tdx_finalize().map_err(Error::FinalizeTdx)?; self.vm.tdx_finalize().map_err(Error::FinalizeTdx)?;
} }