mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2025-02-01 17:35:19 +00:00
arch: Add a Reserved memory region to the memory hole
We add a Reserved region type at the end of the memory hole to prevent 32-bit devices allocations to overlap with architectural address ranges like IOAPIC, TSS or APIC ones. Eventually we should remove that reserved range by allocating all the architectural ranges before letting 32-bit devices use the memory hole. Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
This commit is contained in:
parent
299d887856
commit
fa41ddd94f
@ -46,44 +46,60 @@ impl From<Error> for super::Error {
|
||||
// Where BIOS/VGA magic would live on a real PC.
|
||||
const EBDA_START: GuestAddress = GuestAddress(0x9fc00);
|
||||
const FIRST_ADDR_PAST_32BITS: GuestAddress = GuestAddress(1 << 32);
|
||||
const MEM_32BIT_GAP_SIZE: GuestUsize = (768 << 20);
|
||||
|
||||
// Our 32-bit memory gap starts at 3G.
|
||||
const MEM_32BIT_GAP_START: GuestAddress = GuestAddress(0xc000_0000);
|
||||
|
||||
// Our 32-bit memory gap size is 1GB.
|
||||
const MEM_32BIT_GAP_SIZE: GuestUsize = (1024 << 20);
|
||||
|
||||
// We reserve 768MB in our memory gap for 32-bit devices (e.g. 32-bit PCI BARs).
|
||||
const MEM_32BIT_DEVICES_GAP_SIZE: GuestUsize = (768 << 20);
|
||||
|
||||
/// Returns a Vec of the valid memory addresses.
|
||||
/// These should be used to configure the GuestMemory structure for the platform.
|
||||
/// For x86_64 all addresses are valid from the start of the kernel except a
|
||||
/// carve out at the end of 32bit address space.
|
||||
pub fn arch_memory_regions(size: GuestUsize) -> Vec<(GuestAddress, usize, RegionType)> {
|
||||
let memory_gap_start = FIRST_ADDR_PAST_32BITS
|
||||
.checked_sub(MEM_32BIT_GAP_SIZE as u64)
|
||||
.expect("32-bit hole is too large");
|
||||
let reserved_memory_gap_start = MEM_32BIT_GAP_START
|
||||
.checked_add(MEM_32BIT_DEVICES_GAP_SIZE)
|
||||
.expect("32-bit reserved region is too large");
|
||||
|
||||
let requested_memory_size = GuestAddress(size as u64);
|
||||
let mut regions = Vec::new();
|
||||
|
||||
// case1: guest memory fits before the gap
|
||||
if size as u64 <= memory_gap_start.raw_value() {
|
||||
if size as u64 <= MEM_32BIT_GAP_START.raw_value() {
|
||||
regions.push((GuestAddress(0), size as usize, RegionType::Ram));
|
||||
// case2: guest memory extends beyond the gap
|
||||
} else {
|
||||
// push memory before the gap
|
||||
regions.push((
|
||||
GuestAddress(0),
|
||||
memory_gap_start.raw_value() as usize,
|
||||
MEM_32BIT_GAP_START.raw_value() as usize,
|
||||
RegionType::Ram,
|
||||
));
|
||||
regions.push((
|
||||
FIRST_ADDR_PAST_32BITS,
|
||||
requested_memory_size.unchecked_offset_from(memory_gap_start) as usize,
|
||||
requested_memory_size.unchecked_offset_from(MEM_32BIT_GAP_START) as usize,
|
||||
RegionType::Ram,
|
||||
));
|
||||
}
|
||||
|
||||
// Add the 32 bits hole as a sub region.
|
||||
// Add the 32-bit device memory hole as a sub region.
|
||||
regions.push((
|
||||
memory_gap_start,
|
||||
MEM_32BIT_GAP_SIZE as usize,
|
||||
MEM_32BIT_GAP_START,
|
||||
MEM_32BIT_DEVICES_GAP_SIZE as usize,
|
||||
RegionType::SubRegion,
|
||||
));
|
||||
|
||||
// Add the 32-bit reserved memory hole as a sub region.
|
||||
regions.push((
|
||||
reserved_memory_gap_start,
|
||||
(MEM_32BIT_GAP_SIZE - MEM_32BIT_DEVICES_GAP_SIZE) as usize,
|
||||
RegionType::Reserved,
|
||||
));
|
||||
|
||||
regions
|
||||
}
|
||||
|
||||
@ -202,7 +218,7 @@ mod tests {
|
||||
#[test]
|
||||
fn regions_lt_4gb() {
|
||||
let regions = arch_memory_regions(1 << 29 as GuestUsize);
|
||||
assert_eq!(2, regions.len());
|
||||
assert_eq!(3, regions.len());
|
||||
assert_eq!(GuestAddress(0), regions[0].0);
|
||||
assert_eq!(1usize << 29, regions[0].1);
|
||||
}
|
||||
@ -210,7 +226,7 @@ mod tests {
|
||||
#[test]
|
||||
fn regions_gt_4gb() {
|
||||
let regions = arch_memory_regions((1 << 32 as GuestUsize) + 0x8000);
|
||||
assert_eq!(3, regions.len());
|
||||
assert_eq!(4, regions.len());
|
||||
assert_eq!(GuestAddress(0), regions[0].0);
|
||||
assert_eq!(GuestAddress(1 << 32), regions[1].0);
|
||||
}
|
||||
|
@ -1237,11 +1237,6 @@ impl<'a> Vm<'a> {
|
||||
.filter(|r| r.2 == RegionType::SubRegion)
|
||||
.map(|r| (r.0, r.1))
|
||||
.collect();
|
||||
let _reserved_regions: Vec<(GuestAddress, usize)> = arch_mem_regions
|
||||
.iter()
|
||||
.filter(|r| r.2 == RegionType::Reserved)
|
||||
.map(|r| (r.0, r.1))
|
||||
.collect();
|
||||
|
||||
// Check the number of reserved regions, and only take the first one
|
||||
// that's acrtually a 32-bit hole.
|
||||
@ -1380,25 +1375,6 @@ impl<'a> Vm<'a> {
|
||||
.ok_or(Error::MemoryRangeAllocation)?;
|
||||
}
|
||||
|
||||
// Allocate IOAPIC address in the memory hole if necessary.
|
||||
if IOAPIC_RANGE_ADDR >= mem_hole.0.raw_value() && IOAPIC_RANGE_SIZE < mem_hole.1 as u64 {
|
||||
allocator
|
||||
.allocate_mmio_hole_addresses(
|
||||
Some(GuestAddress(IOAPIC_RANGE_ADDR)),
|
||||
IOAPIC_RANGE_SIZE as GuestUsize,
|
||||
None,
|
||||
)
|
||||
.ok_or(Error::IoapicRangeAllocation)?;
|
||||
} else {
|
||||
allocator
|
||||
.allocate_mmio_addresses(
|
||||
Some(GuestAddress(IOAPIC_RANGE_ADDR)),
|
||||
IOAPIC_RANGE_SIZE as GuestUsize,
|
||||
None,
|
||||
)
|
||||
.ok_or(Error::IoapicRangeAllocation)?;
|
||||
}
|
||||
|
||||
let device_manager = DeviceManager::new(
|
||||
guest_memory.clone(),
|
||||
&mut allocator,
|
||||
|
Loading…
x
Reference in New Issue
Block a user