diff --git a/devices/src/pvpanic.rs b/devices/src/pvpanic.rs index 1f02989d2..57a26310e 100644 --- a/devices/src/pvpanic.rs +++ b/devices/src/pvpanic.rs @@ -181,8 +181,9 @@ impl PciDevice for PvPanicDevice { fn allocate_bars( &mut self, - allocator: &Arc>, - _mmio_allocator: &mut AddressAllocator, + _allocator: &Arc>, + mmio32_allocator: &mut AddressAllocator, + _mmio64_allocator: &mut AddressAllocator, resources: Option>, ) -> std::result::Result, PciDeviceError> { let mut bars = Vec::new(); @@ -190,10 +191,8 @@ impl PciDevice for PvPanicDevice { let bar_id = 0; let region_size = PVPANIC_DEVICE_MMIO_SIZE; let restoring = resources.is_some(); - let bar_addr = allocator - .lock() - .unwrap() - .allocate_mmio_hole_addresses(None, region_size, Some(PVPANIC_DEVICE_MMIO_ALIGNMENT)) + let bar_addr = mmio32_allocator + .allocate(None, region_size, Some(PVPANIC_DEVICE_MMIO_ALIGNMENT)) .ok_or(PciDeviceError::IoAllocationFailed(region_size))?; let bar = PciBarConfiguration::default() @@ -218,11 +217,12 @@ impl PciDevice for PvPanicDevice { fn free_bars( &mut self, - allocator: &mut SystemAllocator, - _mmio_allocator: &mut AddressAllocator, + _allocator: &mut SystemAllocator, + mmio32_allocator: &mut AddressAllocator, + _mmio64_allocator: &mut AddressAllocator, ) -> std::result::Result<(), PciDeviceError> { for bar in self.bar_regions.drain(..) { - allocator.free_mmio_hole_addresses(GuestAddress(bar.addr()), bar.size()); + mmio32_allocator.free(GuestAddress(bar.addr()), bar.size()); } Ok(()) diff --git a/pci/src/device.rs b/pci/src/device.rs index f51e3902f..73f1b0821 100644 --- a/pci/src/device.rs +++ b/pci/src/device.rs @@ -58,7 +58,8 @@ pub trait PciDevice: BusDevice { fn allocate_bars( &mut self, _allocator: &Arc>, - _mmio_allocator: &mut AddressAllocator, + _mmio32_allocator: &mut AddressAllocator, + _mmio64_allocator: &mut AddressAllocator, _resources: Option>, ) -> Result> { Ok(Vec::new()) @@ -68,7 +69,8 @@ pub trait PciDevice: BusDevice { fn free_bars( &mut self, _allocator: &mut SystemAllocator, - _mmio_allocator: &mut AddressAllocator, + _mmio32_allocator: &mut AddressAllocator, + _mmio64_allocator: &mut AddressAllocator, ) -> Result<()> { Ok(()) } diff --git a/pci/src/vfio.rs b/pci/src/vfio.rs index b4092c76f..a685ceedf 100644 --- a/pci/src/vfio.rs +++ b/pci/src/vfio.rs @@ -529,10 +529,13 @@ impl VfioCommon { } } + // The `allocator` argument is unused on `aarch64` + #[allow(unused_variables)] pub(crate) fn allocate_bars( &mut self, allocator: &Arc>, - mmio_allocator: &mut AddressAllocator, + mmio32_allocator: &mut AddressAllocator, + mmio64_allocator: &mut AddressAllocator, resources: Option>, ) -> Result, PciDeviceError> { let mut bars = Vec::new(); @@ -681,21 +684,15 @@ impl VfioCommon { } PciBarRegionType::Memory32BitRegion => { // BAR allocation must be naturally aligned - allocator - .lock() - .unwrap() - .allocate_mmio_hole_addresses( - restored_bar_addr, - region_size, - Some(region_size), - ) + mmio32_allocator + .allocate(restored_bar_addr, region_size, Some(region_size)) .ok_or(PciDeviceError::IoAllocationFailed(region_size))? } PciBarRegionType::Memory64BitRegion => { // We need do some fixup to keep MMIO RW region and msix cap region page size // aligned. region_size = self.fixup_msix_region(bar_id, region_size); - mmio_allocator + mmio64_allocator .allocate( restored_bar_addr, region_size, @@ -742,10 +739,13 @@ impl VfioCommon { Ok(bars) } + // The `allocator` argument is unused on `aarch64` + #[allow(unused_variables)] pub(crate) fn free_bars( &mut self, allocator: &mut SystemAllocator, - mmio_allocator: &mut AddressAllocator, + mmio32_allocator: &mut AddressAllocator, + mmio64_allocator: &mut AddressAllocator, ) -> Result<(), PciDeviceError> { for region in self.mmio_regions.iter() { match region.type_ { @@ -756,10 +756,10 @@ impl VfioCommon { error!("I/O region is not supported"); } PciBarRegionType::Memory32BitRegion => { - allocator.free_mmio_hole_addresses(region.start, region.length); + mmio32_allocator.free(region.start, region.length); } PciBarRegionType::Memory64BitRegion => { - mmio_allocator.free(region.start, region.length); + mmio64_allocator.free(region.start, region.length); } } } @@ -1694,19 +1694,22 @@ impl PciDevice for VfioPciDevice { fn allocate_bars( &mut self, allocator: &Arc>, - mmio_allocator: &mut AddressAllocator, + mmio32_allocator: &mut AddressAllocator, + mmio64_allocator: &mut AddressAllocator, resources: Option>, ) -> Result, PciDeviceError> { self.common - .allocate_bars(allocator, mmio_allocator, resources) + .allocate_bars(allocator, mmio32_allocator, mmio64_allocator, resources) } fn free_bars( &mut self, allocator: &mut SystemAllocator, - mmio_allocator: &mut AddressAllocator, + mmio32_allocator: &mut AddressAllocator, + mmio64_allocator: &mut AddressAllocator, ) -> Result<(), PciDeviceError> { - self.common.free_bars(allocator, mmio_allocator) + self.common + .free_bars(allocator, mmio32_allocator, mmio64_allocator) } fn write_config_register( diff --git a/pci/src/vfio_user.rs b/pci/src/vfio_user.rs index 5033b55dd..c5e64b970 100644 --- a/pci/src/vfio_user.rs +++ b/pci/src/vfio_user.rs @@ -397,19 +397,22 @@ impl PciDevice for VfioUserPciDevice { fn allocate_bars( &mut self, allocator: &Arc>, - mmio_allocator: &mut AddressAllocator, + mmio32_allocator: &mut AddressAllocator, + mmio64_allocator: &mut AddressAllocator, resources: Option>, ) -> Result, PciDeviceError> { self.common - .allocate_bars(allocator, mmio_allocator, resources) + .allocate_bars(allocator, mmio32_allocator, mmio64_allocator, resources) } fn free_bars( &mut self, allocator: &mut SystemAllocator, - mmio_allocator: &mut AddressAllocator, + mmio32_allocator: &mut AddressAllocator, + mmio64_allocator: &mut AddressAllocator, ) -> Result<(), PciDeviceError> { - self.common.free_bars(allocator, mmio_allocator) + self.common + .free_bars(allocator, mmio32_allocator, mmio64_allocator) } fn as_any(&mut self) -> &mut dyn Any { diff --git a/virtio-devices/src/transport/pci_device.rs b/virtio-devices/src/transport/pci_device.rs index 609795734..2f711c80a 100644 --- a/virtio-devices/src/transport/pci_device.rs +++ b/virtio-devices/src/transport/pci_device.rs @@ -955,8 +955,9 @@ impl PciDevice for VirtioPciDevice { fn allocate_bars( &mut self, - allocator: &Arc>, - mmio_allocator: &mut AddressAllocator, + _allocator: &Arc>, + mmio32_allocator: &mut AddressAllocator, + mmio64_allocator: &mut AddressAllocator, resources: Option>, ) -> std::result::Result, PciDeviceError> { let mut bars = Vec::new(); @@ -995,7 +996,7 @@ impl PciDevice for VirtioPciDevice { // See http://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.html#x1-740004 let (virtio_pci_bar_addr, region_type) = if use_64bit_bar { let region_type = PciBarRegionType::Memory64BitRegion; - let addr = mmio_allocator + let addr = mmio64_allocator .allocate( settings_bar_addr, CAPABILITY_BAR_SIZE, @@ -1005,10 +1006,8 @@ impl PciDevice for VirtioPciDevice { (addr, region_type) } else { let region_type = PciBarRegionType::Memory32BitRegion; - let addr = allocator - .lock() - .unwrap() - .allocate_mmio_hole_addresses( + let addr = mmio32_allocator + .allocate( settings_bar_addr, CAPABILITY_BAR_SIZE, Some(CAPABILITY_BAR_SIZE), @@ -1078,16 +1077,17 @@ impl PciDevice for VirtioPciDevice { fn free_bars( &mut self, - allocator: &mut SystemAllocator, - mmio_allocator: &mut AddressAllocator, + _allocator: &mut SystemAllocator, + mmio32_allocator: &mut AddressAllocator, + mmio64_allocator: &mut AddressAllocator, ) -> std::result::Result<(), PciDeviceError> { for bar in self.bar_regions.drain(..) { match bar.region_type() { PciBarRegionType::Memory32BitRegion => { - allocator.free_mmio_hole_addresses(GuestAddress(bar.addr()), bar.size()); + mmio32_allocator.free(GuestAddress(bar.addr()), bar.size()); } PciBarRegionType::Memory64BitRegion => { - mmio_allocator.free(GuestAddress(bar.addr()), bar.size()); + mmio64_allocator.free(GuestAddress(bar.addr()), bar.size()); } _ => error!("Unexpected PCI bar type"), } diff --git a/vm-allocator/src/system.rs b/vm-allocator/src/system.rs index cb7770b2d..b6215e404 100644 --- a/vm-allocator/src/system.rs +++ b/vm-allocator/src/system.rs @@ -30,7 +30,6 @@ use crate::page_size::get_page_size; /// #[cfg(target_arch = "x86_64")] GuestAddress(0x1000), /// #[cfg(target_arch = "x86_64")] 0x10000, /// GuestAddress(0x10000000), 0x10000000, -/// GuestAddress(0x20000000), 0x100000, /// #[cfg(target_arch = "x86_64")] vec![GsiApic::new(5, 19)]).unwrap(); /// #[cfg(target_arch = "x86_64")] /// assert_eq!(allocator.allocate_irq(), Some(5)); @@ -47,7 +46,6 @@ pub struct SystemAllocator { #[cfg(target_arch = "x86_64")] io_address_space: AddressAllocator, platform_mmio_address_space: AddressAllocator, - mmio_hole_address_space: AddressAllocator, gsi_allocator: GsiAllocator, } @@ -59,8 +57,6 @@ impl SystemAllocator { /// * `io_size` - (X86) The size of IO memory. /// * `platform_mmio_base` - The starting address of platform MMIO memory. /// * `platform_mmio_size` - The size of platform MMIO memory. - /// * `mmio_hole_base` - The starting address of MMIO memory in 32-bit address space. - /// * `mmio_hole_size` - The size of MMIO memory in 32-bit address space. /// * `apics` - (X86) Vector of APIC's. /// pub fn new( @@ -68,8 +64,6 @@ impl SystemAllocator { #[cfg(target_arch = "x86_64")] io_size: GuestUsize, platform_mmio_base: GuestAddress, platform_mmio_size: GuestUsize, - mmio_hole_base: GuestAddress, - mmio_hole_size: GuestUsize, #[cfg(target_arch = "x86_64")] apics: Vec, ) -> Option { Some(SystemAllocator { @@ -79,7 +73,6 @@ impl SystemAllocator { platform_mmio_base, platform_mmio_size, )?, - mmio_hole_address_space: AddressAllocator::new(mmio_hole_base, mmio_hole_size)?, #[cfg(target_arch = "x86_64")] gsi_allocator: GsiAllocator::new(apics), #[cfg(target_arch = "aarch64")] @@ -123,20 +116,6 @@ impl SystemAllocator { ) } - /// Reserves a section of `size` bytes of MMIO address space. - pub fn allocate_mmio_hole_addresses( - &mut self, - address: Option, - size: GuestUsize, - align_size: Option, - ) -> Option { - self.mmio_hole_address_space.allocate( - address, - size, - Some(align_size.unwrap_or_else(get_page_size)), - ) - } - #[cfg(target_arch = "x86_64")] /// Free an IO address range. /// We can only free a range if it matches exactly an already allocated range. @@ -149,10 +128,4 @@ impl SystemAllocator { pub fn free_platform_mmio_addresses(&mut self, address: GuestAddress, size: GuestUsize) { self.platform_mmio_address_space.free(address, size) } - - /// Free an MMIO address range from the 32 bits hole. - /// We can only free a range if it matches exactly an already allocated range. - pub fn free_mmio_hole_addresses(&mut self, address: GuestAddress, size: GuestUsize) { - self.mmio_hole_address_space.free(address, size) - } } diff --git a/vmm/src/device_manager.rs b/vmm/src/device_manager.rs index 24900e787..ed0f7d116 100644 --- a/vmm/src/device_manager.rs +++ b/vmm/src/device_manager.rs @@ -561,7 +561,8 @@ pub(crate) struct AddressManager { pub(crate) mmio_bus: Arc, pub(crate) vm: Arc, device_tree: Arc>, - pci_mmio_allocators: Vec>>, + pci_mmio32_allocators: Vec>>, + pci_mmio64_allocators: Vec>>, } impl DeviceRelocation for AddressManager { @@ -604,56 +605,35 @@ impl DeviceRelocation for AddressManager { error!("I/O region is not supported"); } PciBarRegionType::Memory32BitRegion | PciBarRegionType::Memory64BitRegion => { - // Update system allocator - if region_type == PciBarRegionType::Memory32BitRegion { - self.allocator - .lock() - .unwrap() - .free_mmio_hole_addresses(GuestAddress(old_base), len as GuestUsize); - - self.allocator - .lock() - .unwrap() - .allocate_mmio_hole_addresses( - Some(GuestAddress(new_base)), - len as GuestUsize, - Some(len), - ) - .ok_or_else(|| { - io::Error::new( - io::ErrorKind::Other, - "failed allocating new 32 bits MMIO range", - ) - })?; + let allocators = if region_type == PciBarRegionType::Memory32BitRegion { + &self.pci_mmio32_allocators } else { - // Find the specific allocator that this BAR was allocated from and use it for new one - for allocator in &self.pci_mmio_allocators { - let allocator_base = allocator.lock().unwrap().base(); - let allocator_end = allocator.lock().unwrap().end(); + &self.pci_mmio64_allocators + }; - if old_base >= allocator_base.0 && old_base <= allocator_end.0 { - allocator - .lock() - .unwrap() - .free(GuestAddress(old_base), len as GuestUsize); + // Find the specific allocator that this BAR was allocated from and use it for new one + for allocator in allocators { + let allocator_base = allocator.lock().unwrap().base(); + let allocator_end = allocator.lock().unwrap().end(); - allocator - .lock() - .unwrap() - .allocate( - Some(GuestAddress(new_base)), - len as GuestUsize, - Some(len), + if old_base >= allocator_base.0 && old_base <= allocator_end.0 { + allocator + .lock() + .unwrap() + .free(GuestAddress(old_base), len as GuestUsize); + + allocator + .lock() + .unwrap() + .allocate(Some(GuestAddress(new_base)), len as GuestUsize, Some(len)) + .ok_or_else(|| { + io::Error::new( + io::ErrorKind::Other, + "failed allocating new MMIO range", ) - .ok_or_else(|| { - io::Error::new( - io::ErrorKind::Other, - "failed allocating new 64 bits MMIO range", - ) - })?; + })?; - break; - } + break; } } @@ -1007,22 +987,40 @@ impl DeviceManager { 1 }; - let start_of_device_area = memory_manager.lock().unwrap().start_of_device_area().0; - let end_of_device_area = memory_manager.lock().unwrap().end_of_device_area().0; + let create_mmio_allocators = |start, end, num_pci_segments, alignment| { + // Start each PCI segment mmio range on an aligned boundary + let pci_segment_mmio_size = + (end - start + 1) / (alignment * num_pci_segments as u64) * alignment; - // Start each PCI segment range on a 4GiB boundary - let pci_segment_size = (end_of_device_area - start_of_device_area + 1) - / ((4 << 30) * num_pci_segments as u64) - * (4 << 30); + let mut mmio_allocators = vec![]; + for i in 0..num_pci_segments as u64 { + let mmio_start = start + i * pci_segment_mmio_size; + let allocator = Arc::new(Mutex::new( + AddressAllocator::new(GuestAddress(mmio_start), pci_segment_mmio_size).unwrap(), + )); + mmio_allocators.push(allocator) + } - let mut pci_mmio_allocators = vec![]; - for i in 0..num_pci_segments as u64 { - let mmio_start = start_of_device_area + i * pci_segment_size; - let allocator = Arc::new(Mutex::new( - AddressAllocator::new(GuestAddress(mmio_start), pci_segment_size).unwrap(), - )); - pci_mmio_allocators.push(allocator) - } + mmio_allocators + }; + + let start_of_mmio32_area = layout::MEM_32BIT_DEVICES_START.0; + let end_of_mmio32_area = layout::MEM_32BIT_DEVICES_START.0 + layout::MEM_32BIT_DEVICES_SIZE; + let pci_mmio32_allocators = create_mmio_allocators( + start_of_mmio32_area, + end_of_mmio32_area, + num_pci_segments, + 4 << 10, + ); + + let start_of_mmio64_area = memory_manager.lock().unwrap().start_of_device_area().0; + let end_of_mmio64_area = memory_manager.lock().unwrap().end_of_device_area().0; + let pci_mmio64_allocators = create_mmio_allocators( + start_of_mmio64_area, + end_of_mmio64_area, + num_pci_segments, + 4 << 30, + ); let address_manager = Arc::new(AddressManager { allocator: memory_manager.lock().unwrap().allocator(), @@ -1031,7 +1029,8 @@ impl DeviceManager { mmio_bus, vm: vm.clone(), device_tree: Arc::clone(&device_tree), - pci_mmio_allocators, + pci_mmio32_allocators, + pci_mmio64_allocators, }); // First we create the MSI interrupt manager, the legacy one is created @@ -1061,7 +1060,8 @@ impl DeviceManager { let mut pci_segments = vec![PciSegment::new_default_segment( &address_manager, - Arc::clone(&address_manager.pci_mmio_allocators[0]), + Arc::clone(&address_manager.pci_mmio32_allocators[0]), + Arc::clone(&address_manager.pci_mmio64_allocators[0]), &pci_irq_slots, )?]; @@ -1070,7 +1070,8 @@ impl DeviceManager { i as u16, numa_node_id_from_pci_segment_id(&numa_nodes, i as u16), &address_manager, - Arc::clone(&address_manager.pci_mmio_allocators[i]), + Arc::clone(&address_manager.pci_mmio32_allocators[i]), + Arc::clone(&address_manager.pci_mmio64_allocators[i]), &pci_irq_slots, )?); } @@ -2765,7 +2766,7 @@ impl DeviceManager { // The memory needs to be 2MiB aligned in order to support // hugepages. self.pci_segments[pmem_cfg.pci_segment as usize] - .allocator + .mem64_allocator .lock() .unwrap() .allocate( @@ -2780,7 +2781,7 @@ impl DeviceManager { // The memory needs to be 2MiB aligned in order to support // hugepages. let base = self.pci_segments[pmem_cfg.pci_segment as usize] - .allocator + .mem64_allocator .lock() .unwrap() .allocate(None, size as GuestUsize, Some(0x0020_0000)) @@ -3367,7 +3368,11 @@ impl DeviceManager { .allocate_bars( &self.address_manager.allocator, &mut self.pci_segments[segment_id as usize] - .allocator + .mem32_allocator + .lock() + .unwrap(), + &mut self.pci_segments[segment_id as usize] + .mem64_allocator .lock() .unwrap(), resources, @@ -4080,7 +4085,11 @@ impl DeviceManager { .free_bars( &mut self.address_manager.allocator.lock().unwrap(), &mut self.pci_segments[pci_segment_id as usize] - .allocator + .mem32_allocator + .lock() + .unwrap(), + &mut self.pci_segments[pci_segment_id as usize] + .mem64_allocator .lock() .unwrap(), ) diff --git a/vmm/src/memory_manager.rs b/vmm/src/memory_manager.rs index 8a1f1894a..c84cb57b2 100644 --- a/vmm/src/memory_manager.rs +++ b/vmm/src/memory_manager.rs @@ -16,7 +16,7 @@ use acpi_tables::{aml, Aml}; use anyhow::anyhow; #[cfg(target_arch = "x86_64")] use arch::x86_64::{SgxEpcRegion, SgxEpcSection}; -use arch::{layout, RegionType}; +use arch::RegionType; #[cfg(target_arch = "x86_64")] use devices::ioapic; #[cfg(target_arch = "aarch64")] @@ -1160,8 +1160,6 @@ impl MemoryManager { }, start_of_platform_device_area, PLATFORM_DEVICE_AREA_SIZE, - layout::MEM_32BIT_DEVICES_START, - layout::MEM_32BIT_DEVICES_SIZE, #[cfg(target_arch = "x86_64")] vec![GsiApic::new( X86_64_IRQ_BASE, diff --git a/vmm/src/pci_segment.rs b/vmm/src/pci_segment.rs index 3ae51f471..7736027fb 100644 --- a/vmm/src/pci_segment.rs +++ b/vmm/src/pci_segment.rs @@ -38,10 +38,14 @@ pub(crate) struct PciSegment { pub(crate) pci_irq_slots: [u8; 32], // Device memory covered by this segment - pub(crate) start_of_device_area: u64, - pub(crate) end_of_device_area: u64, + pub(crate) start_of_mem32_area: u64, + pub(crate) end_of_mem32_area: u64, - pub(crate) allocator: Arc>, + pub(crate) start_of_mem64_area: u64, + pub(crate) end_of_mem64_area: u64, + + pub(crate) mem32_allocator: Arc>, + pub(crate) mem64_allocator: Arc>, } impl PciSegment { @@ -49,7 +53,8 @@ impl PciSegment { id: u16, numa_node: u32, address_manager: &Arc, - allocator: Arc>, + mem32_allocator: Arc>, + mem64_allocator: Arc>, pci_irq_slots: &[u8; 32], ) -> DeviceManagerResult { let pci_root = PciRoot::new(None); @@ -71,8 +76,11 @@ impl PciSegment { ) .map_err(DeviceManagerError::BusError)?; - let start_of_device_area = allocator.lock().unwrap().base().0; - let end_of_device_area = allocator.lock().unwrap().end().0; + let start_of_mem32_area = mem32_allocator.lock().unwrap().base().0; + let end_of_mem32_area = mem32_allocator.lock().unwrap().end().0; + + let start_of_mem64_area = mem64_allocator.lock().unwrap().base().0; + let end_of_mem64_area = mem64_allocator.lock().unwrap().end().0; let segment = PciSegment { id, @@ -84,15 +92,18 @@ impl PciSegment { pci_devices_down: 0, #[cfg(target_arch = "x86_64")] pci_config_io: None, - allocator, - start_of_device_area, - end_of_device_area, + mem32_allocator, + mem64_allocator, + start_of_mem32_area, + end_of_mem32_area, + start_of_mem64_area, + end_of_mem64_area, pci_irq_slots: *pci_irq_slots, }; info!( - "Adding PCI segment: id={}, PCI MMIO config address: 0x{:x}, device area [0x{:x}-0x{:x}", - segment.id, segment.mmio_config_address, segment.start_of_device_area, segment.end_of_device_area + "Adding PCI segment: id={}, PCI MMIO config address: 0x{:x}, mem32 area [0x{:x}-0x{:x}, mem64 area [0x{:x}-0x{:x}", + segment.id, segment.mmio_config_address, segment.start_of_mem32_area, segment.end_of_mem32_area, segment.start_of_mem64_area, segment.end_of_mem64_area ); Ok(segment) } @@ -100,10 +111,18 @@ impl PciSegment { #[cfg(target_arch = "x86_64")] pub(crate) fn new_default_segment( address_manager: &Arc, - allocator: Arc>, + mem32_allocator: Arc>, + mem64_allocator: Arc>, pci_irq_slots: &[u8; 32], ) -> DeviceManagerResult { - let mut segment = Self::new(0, 0, address_manager, allocator, pci_irq_slots)?; + let mut segment = Self::new( + 0, + 0, + address_manager, + mem32_allocator, + mem64_allocator, + pci_irq_slots, + )?; let pci_config_io = Arc::new(Mutex::new(PciConfigIo::new(Arc::clone(&segment.pci_bus)))); address_manager @@ -123,10 +142,18 @@ impl PciSegment { #[cfg(target_arch = "aarch64")] pub(crate) fn new_default_segment( address_manager: &Arc, - allocator: Arc>, + mem32_allocator: Arc>, + mem64_allocator: Arc>, pci_irq_slots: &[u8; 32], ) -> DeviceManagerResult { - Self::new(0, 0, address_manager, allocator, pci_irq_slots) + Self::new( + 0, + 0, + address_manager, + mem32_allocator, + mem64_allocator, + pci_irq_slots, + ) } pub(crate) fn next_device_bdf(&self) -> DeviceManagerResult { @@ -340,6 +367,7 @@ impl Aml for PciSegment { let pci_dsm = PciDsmMethod {}; pci_dsdt_inner_data.push(&pci_dsm); + #[allow(clippy::if_same_then_else)] let crs = if self.id == 0 { aml::Name::new( "_CRS".into(), @@ -347,19 +375,23 @@ impl Aml for PciSegment { &aml::AddressSpace::new_bus_number(0x0u16, 0x0u16), #[cfg(target_arch = "x86_64")] &aml::IO::new(0xcf8, 0xcf8, 1, 0x8), + &aml::Memory32Fixed::new( + true, + self.mmio_config_address as u32, + layout::PCI_MMIO_CONFIG_SIZE_PER_SEGMENT as u32, + ), &aml::AddressSpace::new_memory( aml::AddressSpaceCacheable::NotCacheable, true, - layout::MEM_32BIT_DEVICES_START.0 as u32, - (layout::MEM_32BIT_DEVICES_START.0 + layout::MEM_32BIT_DEVICES_SIZE - 1) - as u32, + self.start_of_mem32_area, + self.end_of_mem32_area, None, ), &aml::AddressSpace::new_memory( aml::AddressSpaceCacheable::NotCacheable, true, - self.start_of_device_area, - self.end_of_device_area, + self.start_of_mem64_area, + self.end_of_mem64_area, None, ), #[cfg(target_arch = "x86_64")] @@ -381,8 +413,15 @@ impl Aml for PciSegment { &aml::AddressSpace::new_memory( aml::AddressSpaceCacheable::NotCacheable, true, - self.start_of_device_area, - self.end_of_device_area, + self.start_of_mem32_area, + self.end_of_mem32_area, + None, + ), + &aml::AddressSpace::new_memory( + aml::AddressSpaceCacheable::NotCacheable, + true, + self.start_of_mem64_area, + self.end_of_mem64_area, None, ), ]), diff --git a/vmm/src/vm.rs b/vmm/src/vm.rs index 2b1ca3aee..97ef185f7 100644 --- a/vmm/src/vm.rs +++ b/vmm/src/vm.rs @@ -1162,9 +1162,9 @@ impl Vm { let pci_space = PciSpaceInfo { pci_segment_id: pci_segment.id, mmio_config_address: pci_segment.mmio_config_address, - pci_device_space_start: pci_segment.start_of_device_area, - pci_device_space_size: pci_segment.end_of_device_area - - pci_segment.start_of_device_area + pci_device_space_start: pci_segment.start_of_mem64_area, + pci_device_space_size: pci_segment.end_of_mem64_area + - pci_segment.start_of_mem64_area + 1, }; pci_space_info.push(pci_space);