vmm: Use an allocator specifically for RAM regions

Rather than use the system MMIO allocator for RAM use an allocator that
covers the full RAM range.

Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
Rob Bradford 2021-10-29 10:02:15 +01:00
parent b8fee11822
commit afe95e5a2a

View File

@ -33,7 +33,7 @@ use versionize_derive::Versionize;
use virtio_devices::BlocksState; use virtio_devices::BlocksState;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
use vm_allocator::GsiApic; use vm_allocator::GsiApic;
use vm_allocator::SystemAllocator; use vm_allocator::{AddressAllocator, SystemAllocator};
use vm_device::BusDevice; use vm_device::BusDevice;
use vm_memory::bitmap::AtomicBitmap; use vm_memory::bitmap::AtomicBitmap;
use vm_memory::guest_memory::FileOffset; use vm_memory::guest_memory::FileOffset;
@ -165,6 +165,7 @@ pub struct MemoryManager {
memory_zones: MemoryZones, memory_zones: MemoryZones,
log_dirty: bool, // Enable dirty logging for created RAM regions log_dirty: bool, // Enable dirty logging for created RAM regions
arch_mem_regions: Vec<ArchMemRegion>, arch_mem_regions: Vec<ArchMemRegion>,
ram_allocator: AddressAllocator,
// Keep track of calls to create_userspace_mapping() for guest RAM. // Keep track of calls to create_userspace_mapping() for guest RAM.
// This is useful for getting the dirty pages as we need to know the // This is useful for getting the dirty pages as we need to know the
@ -785,10 +786,8 @@ impl MemoryManager {
virtio_mem, virtio_mem,
file_offset, file_offset,
}); });
self.allocator self.ram_allocator
.lock() .allocate(Some(region.start_addr()), region.len(), None)
.unwrap()
.allocate_mmio_addresses(Some(region.start_addr()), region.len(), None)
.ok_or(Error::MemoryRangeAllocation)?; .ok_or(Error::MemoryRangeAllocation)?;
} }
} }
@ -800,10 +799,8 @@ impl MemoryManager {
// based on the GuestMemory regions. // based on the GuestMemory regions.
continue; continue;
} }
self.allocator self.ram_allocator
.lock() .allocate(
.unwrap()
.allocate_mmio_addresses(
Some(GuestAddress(region.base)), Some(GuestAddress(region.base)),
region.size as GuestUsize, region.size as GuestUsize,
None, None,
@ -1020,6 +1017,7 @@ impl MemoryManager {
// If running on SGX the start of device area and RAM area may diverge but // If running on SGX the start of device area and RAM area may diverge but
// at this point they are next to each other. // at this point they are next to each other.
let end_of_ram_area = start_of_device_area.unchecked_sub(1); let end_of_ram_area = start_of_device_area.unchecked_sub(1);
let ram_allocator = AddressAllocator::new(GuestAddress(0), start_of_device_area.0).unwrap();
let mut memory_manager = MemoryManager { let mut memory_manager = MemoryManager {
boot_guest_memory, boot_guest_memory,
@ -1051,6 +1049,7 @@ impl MemoryManager {
acpi_address, acpi_address,
log_dirty, log_dirty,
arch_mem_regions, arch_mem_regions,
ram_allocator,
}; };
memory_manager.allocate_address_space()?; memory_manager.allocate_address_space()?;
@ -1376,10 +1375,8 @@ impl MemoryManager {
} }
// Tell the allocator // Tell the allocator
self.allocator self.ram_allocator
.lock() .allocate(Some(start_addr), size as GuestUsize, None)
.unwrap()
.allocate_mmio_addresses(Some(start_addr), size as GuestUsize, None)
.ok_or(Error::MemoryRangeAllocation)?; .ok_or(Error::MemoryRangeAllocation)?;
// Update the slot so that it can be queried via the I/O port // Update the slot so that it can be queried via the I/O port