arch, vmm: Fix TDVF section handling

This patch fixes a few things to support TDVF correctly.

The HOB memory resources must contain EFI_RESOURCE_ATTRIBUTE_ENCRYPTED
attribute.

Any section with a base address within the already allocated guest RAM
must not be allocated.

The list of TD_HOB memory resources should contain both TempMem and
TdHob sections as well.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2021-07-05 18:41:35 +02:00
parent 76ac0f1555
commit 5b6d424a77
2 changed files with 60 additions and 25 deletions

View File

@ -277,10 +277,10 @@ impl TdHob {
}, },
/* TODO: /* TODO:
* QEMU currently fills it in like this: * QEMU currently fills it in like this:
* EFI_RESOURCE_ATTRIBUTE_PRESENT | EFI_RESOURCE_ATTRIBUTE_INITIALIZED | EFI_RESOURCE_ATTRIBUTE_TESTED * EFI_RESOURCE_ATTRIBUTE_PRESENT | EFI_RESOURCE_ATTRIBUTE_INITIALIZED | EFI_RESOURCE_ATTRIBUTE_ENCRYPTED | EFI_RESOURCE_ATTRIBUTE_TESTED
* which differs from the spec (due to TDVF implementation issue?) * which differs from the spec (due to TDVF implementation issue?)
*/ */
0x7, 0x04000007,
) )
} }

View File

@ -1574,13 +1574,23 @@ impl Vm {
fn populate_tdx_sections(&mut self, sections: &[TdvfSection]) -> Result<Option<u64>> { fn populate_tdx_sections(&mut self, sections: &[TdvfSection]) -> Result<Option<u64>> {
use arch::x86_64::tdx::*; use arch::x86_64::tdx::*;
// Get the memory end *before* we start adding TDVF ram regions // Get the memory end *before* we start adding TDVF ram regions
let mem_end = { let boot_guest_memory = self
let guest_memory = self.memory_manager.lock().as_ref().unwrap().guest_memory(); .memory_manager
let mem = guest_memory.memory(); .lock()
mem.last_addr() .as_ref()
}; .unwrap()
.boot_guest_memory();
for section in sections { for section in sections {
info!("Allocating TDVF Section: {:?}", section); // No need to allocate if the section falls within guest RAM ranges
if boot_guest_memory.address_in_range(GuestAddress(section.address)) {
info!(
"Not allocating TDVF Section: {:x?} since it is already part of guest RAM",
section
);
continue;
}
info!("Allocating TDVF Section: {:x?}", section);
self.memory_manager self.memory_manager
.lock() .lock()
.unwrap() .unwrap()
@ -1599,7 +1609,7 @@ impl Vm {
let mem = guest_memory.memory(); let mem = guest_memory.memory();
let mut hob_offset = None; let mut hob_offset = None;
for section in sections { for section in sections {
info!("Populating TDVF Section: {:?}", section); info!("Populating TDVF Section: {:x?}", section);
match section.r#type { match section.r#type {
TdvfSectionType::Bfv | TdvfSectionType::Cfv => { TdvfSectionType::Bfv | TdvfSectionType::Cfv => {
info!("Copying section to guest memory"); info!("Copying section to guest memory");
@ -1623,22 +1633,47 @@ impl Vm {
// Generate HOB // Generate HOB
let mut hob = TdHob::start(hob_offset.unwrap()); let mut hob = TdHob::start(hob_offset.unwrap());
// RAM regions (all below 3GiB case) let mut sorted_sections = sections.to_vec();
if mem_end < arch::layout::MEM_32BIT_RESERVED_START { sorted_sections.retain(|section| {
hob.add_memory_resource(&mem, 0, mem_end.0 + 1, true) !matches!(section.r#type, TdvfSectionType::Bfv | TdvfSectionType::Cfv)
.map_err(Error::PopulateHob)?; });
} else { sorted_sections.sort_by_key(|section| section.address);
// Otherwise split into two sorted_sections.reverse();
hob.add_memory_resource(&mem, 0, arch::layout::MEM_32BIT_RESERVED_START.0, true) let mut current_section = sorted_sections.pop();
.map_err(Error::PopulateHob)?;
if mem_end > arch::layout::RAM_64BIT_START { // RAM regions interleaved with TDVF sections
hob.add_memory_resource( let mut next_start_addr = 0;
&mem, for region in boot_guest_memory.iter() {
arch::layout::RAM_64BIT_START.raw_value(), let region_start = region.start_addr().0;
mem_end.unchecked_offset_from(arch::layout::RAM_64BIT_START) + 1, let region_end = region.last_addr().0;
true, if region_start > next_start_addr {
) next_start_addr = region_start;
.map_err(Error::PopulateHob)?; }
loop {
let (start, size, ram) = if let Some(section) = &current_section {
if section.address <= next_start_addr {
(section.address, section.size, false)
} else {
let last_addr = std::cmp::min(section.address - 1, region_end);
(next_start_addr, last_addr - next_start_addr + 1, true)
}
} else {
(next_start_addr, region_end - next_start_addr + 1, true)
};
hob.add_memory_resource(&mem, start, size, ram)
.map_err(Error::PopulateHob)?;
if !ram {
current_section = sorted_sections.pop();
}
next_start_addr = start + size;
if next_start_addr > region_end {
break;
}
} }
} }