aarch64: Introduce struct PciSpaceInfo for FDT

Currently, a tuple containing PCI space start address and PCI space
size is used to pass the PCI space information to the FDT creator.
In order to support the multiple PCI segment for FDT, more information
such as the PCI segment ID should be passed to the FDT creator. If we
still use a tuple to store these information, the code flexibility and
readablity will be harmed.

To address this issue, this commit replaces the tuple containing the
PCI space information to a structure `PciSpaceInfo` and uses a vector
of `PciSpaceInfo` to store PCI space information for each segment, so
that multiple PCI segment information can be passed to the FDT together.

Note that the scope of this commit will only contain the refactor of
original code, the actual multiple PCI segments support will be in
following series, and for now `--platform num_pci_segments` should only
be 1.

Signed-off-by: Henry Wang <Henry.Wang@arm.com>
This commit is contained in:
Henry Wang 2021-11-25 03:12:37 -05:00 committed by Rob Bradford
parent e1151482fc
commit 07bef815cc
6 changed files with 130 additions and 124 deletions

View File

@ -6,7 +6,7 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file. // found in the THIRD-PARTY file.
use crate::NumaNodes; use crate::{NumaNodes, PciSpaceInfo};
use byteorder::{BigEndian, ByteOrder}; use byteorder::{BigEndian, ByteOrder};
use std::cmp; use std::cmp;
use std::collections::HashMap; use std::collections::HashMap;
@ -88,7 +88,7 @@ pub fn create_fdt<T: DeviceInfoForFdt + Clone + Debug, S: ::std::hash::BuildHash
device_info: &HashMap<(DeviceType, String), T, S>, device_info: &HashMap<(DeviceType, String), T, S>,
gic_device: &dyn GicDevice, gic_device: &dyn GicDevice,
initrd: &Option<InitramfsConfig>, initrd: &Option<InitramfsConfig>,
pci_space_address: &(u64, u64), pci_space_info: &[PciSpaceInfo],
numa_nodes: &NumaNodes, numa_nodes: &NumaNodes,
virtio_iommu_bdf: Option<u32>, virtio_iommu_bdf: Option<u32>,
) -> FdtWriterResult<Vec<u8>> { ) -> FdtWriterResult<Vec<u8>> {
@ -117,12 +117,7 @@ pub fn create_fdt<T: DeviceInfoForFdt + Clone + Debug, S: ::std::hash::BuildHash
create_clock_node(&mut fdt)?; create_clock_node(&mut fdt)?;
create_psci_node(&mut fdt)?; create_psci_node(&mut fdt)?;
create_devices_node(&mut fdt, device_info)?; create_devices_node(&mut fdt, device_info)?;
create_pci_nodes( create_pci_nodes(&mut fdt, pci_space_info, virtio_iommu_bdf)?;
&mut fdt,
pci_space_address.0,
pci_space_address.1,
virtio_iommu_bdf,
)?;
if numa_nodes.len() > 1 { if numa_nodes.len() > 1 {
create_distance_map_node(&mut fdt, numa_nodes)?; create_distance_map_node(&mut fdt, numa_nodes)?;
} }
@ -547,28 +542,32 @@ fn create_devices_node<T: DeviceInfoForFdt + Clone + Debug, S: ::std::hash::Buil
fn create_pci_nodes( fn create_pci_nodes(
fdt: &mut FdtWriter, fdt: &mut FdtWriter,
pci_device_base: u64, pci_device_info: &[PciSpaceInfo],
pci_device_size: u64,
virtio_iommu_bdf: Option<u32>, virtio_iommu_bdf: Option<u32>,
) -> FdtWriterResult<()> { ) -> FdtWriterResult<()> {
// Add node for PCIe controller. // Add node for PCIe controller.
// See Documentation/devicetree/bindings/pci/host-generic-pci.txt in the kernel // See Documentation/devicetree/bindings/pci/host-generic-pci.txt in the kernel
// and https://elinux.org/Device_Tree_Usage. // and https://elinux.org/Device_Tree_Usage.
// In multiple PCI segments setup, each PCI segment needs a PCI node.
for pci_device_info_elem in pci_device_info.iter() {
// EDK2 requires the PCIe high space above 4G address. // EDK2 requires the PCIe high space above 4G address.
// The actual space in CLH follows the RAM. If the RAM space is small, the PCIe high space // The actual space in CLH follows the RAM. If the RAM space is small, the PCIe high space
// could fall bellow 4G. // could fall bellow 4G.
// Here we put it above 512G in FDT to workaround the EDK2 check. // Here we cut off PCI device space below 8G in FDT to workaround the EDK2 check.
// But the address written in ACPI is not impacted. // But the address written in ACPI is not impacted.
let pci_device_base_64bit: u64 = if cfg!(feature = "acpi") { let (pci_device_base_64bit, pci_device_size_64bit) = if cfg!(feature = "acpi")
pci_device_base + PCI_HIGH_BASE && (pci_device_info_elem.pci_device_space_start < PCI_HIGH_BASE)
{
(
PCI_HIGH_BASE,
pci_device_info_elem.pci_device_space_size
- (PCI_HIGH_BASE - pci_device_info_elem.pci_device_space_start),
)
} else { } else {
pci_device_base (
}; pci_device_info_elem.pci_device_space_start,
let pci_device_size_64bit: u64 = if cfg!(feature = "acpi") { pci_device_info_elem.pci_device_space_size,
pci_device_size - PCI_HIGH_BASE )
} else {
pci_device_size
}; };
let ranges = [ let ranges = [
@ -648,6 +647,7 @@ fn create_pci_nodes(
} }
fdt.end_node(pci_node)?; fdt.end_node(pci_node)?;
}
Ok(()) Ok(())
} }

View File

@ -104,7 +104,7 @@ pub const RSDP_POINTER: GuestAddress = GuestAddress(ACPI_START);
pub const KERNEL_START: u64 = ACPI_START + ACPI_MAX_SIZE as u64; pub const KERNEL_START: u64 = ACPI_START + ACPI_MAX_SIZE as u64;
/// Pci high memory base /// Pci high memory base
pub const PCI_HIGH_BASE: u64 = 0x80_0000_0000_u64; pub const PCI_HIGH_BASE: u64 = 0x2_0000_0000_u64;
// As per virt/kvm/arm/vgic/vgic-kvm-device.c we need // As per virt/kvm/arm/vgic/vgic-kvm-device.c we need
// the number of interrupts our GIC will support to be: // the number of interrupts our GIC will support to be:

View File

@ -14,7 +14,7 @@ pub mod regs;
pub mod uefi; pub mod uefi;
pub use self::fdt::DeviceInfoForFdt; pub use self::fdt::DeviceInfoForFdt;
use crate::{DeviceType, GuestMemoryMmap, NumaNodes, RegionType}; use crate::{DeviceType, GuestMemoryMmap, NumaNodes, PciSpaceInfo, RegionType};
use gic::GicDevice; use gic::GicDevice;
use log::{log_enabled, Level}; use log::{log_enabled, Level};
use std::collections::HashMap; use std::collections::HashMap;
@ -140,7 +140,7 @@ pub fn configure_system<T: DeviceInfoForFdt + Clone + Debug, S: ::std::hash::Bui
vcpu_topology: Option<(u8, u8, u8)>, vcpu_topology: Option<(u8, u8, u8)>,
device_info: &HashMap<(DeviceType, String), T, S>, device_info: &HashMap<(DeviceType, String), T, S>,
initrd: &Option<super::InitramfsConfig>, initrd: &Option<super::InitramfsConfig>,
pci_space_address: &(u64, u64), pci_space_info: &[PciSpaceInfo],
virtio_iommu_bdf: Option<u32>, virtio_iommu_bdf: Option<u32>,
gic_device: &dyn GicDevice, gic_device: &dyn GicDevice,
numa_nodes: &NumaNodes, numa_nodes: &NumaNodes,
@ -153,7 +153,7 @@ pub fn configure_system<T: DeviceInfoForFdt + Clone + Debug, S: ::std::hash::Bui
device_info, device_info,
gic_device, gic_device,
initrd, initrd,
pci_space_address, pci_space_info,
numa_nodes, numa_nodes,
virtio_iommu_bdf, virtio_iommu_bdf,
) )

View File

@ -162,6 +162,16 @@ pub struct MmioDeviceInfo {
pub irq: u32, pub irq: u32,
} }
/// Structure to describe PCI space information
#[derive(Clone, Debug)]
#[cfg(target_arch = "aarch64")]
pub struct PciSpaceInfo {
pub pci_segment_id: u16,
pub mmio_config_address: u64,
pub pci_device_space_start: u64,
pub pci_device_space_size: u64,
}
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
impl DeviceInfoForFdt for MmioDeviceInfo { impl DeviceInfoForFdt for MmioDeviceInfo {
fn addr(&self) -> u64 { fn addr(&self) -> u64 {

View File

@ -3407,7 +3407,7 @@ impl DeviceManager {
Arc::clone(self.pci_segments[0].pci_config_io.as_ref().unwrap()) Arc::clone(self.pci_segments[0].pci_config_io.as_ref().unwrap())
} }
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
pub(crate) fn pci_segments(&self) -> &Vec<PciSegment> { pub(crate) fn pci_segments(&self) -> &Vec<PciSegment> {
&self.pci_segments &self.pci_segments
} }

View File

@ -38,6 +38,8 @@ use arch::x86_64::tdx::TdVmmDataRegionType;
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
use arch::x86_64::tdx::{TdVmmDataRegion, TdvfSection}; use arch::x86_64::tdx::{TdVmmDataRegion, TdvfSection};
use arch::EntryPoint; use arch::EntryPoint;
#[cfg(target_arch = "aarch64")]
use arch::PciSpaceInfo;
#[cfg(any(target_arch = "aarch64", feature = "acpi"))] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use arch::{NumaNode, NumaNodes}; use arch::{NumaNode, NumaNodes};
use devices::AcpiNotificationFlags; use devices::AcpiNotificationFlags;
@ -72,7 +74,9 @@ use std::{result, str, thread};
use vm_device::Bus; use vm_device::Bus;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
use vm_device::BusDevice; use vm_device::BusDevice;
use vm_memory::{Address, Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic}; #[cfg(target_arch = "x86_64")]
use vm_memory::Address;
use vm_memory::{Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic};
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
use vm_memory::{GuestMemory, GuestMemoryRegion}; use vm_memory::{GuestMemory, GuestMemoryRegion};
use vm_migration::{ use vm_migration::{
@ -1109,6 +1113,7 @@ impl Vm {
let vcpu_mpidrs = self.cpu_manager.lock().unwrap().get_mpidrs(); let vcpu_mpidrs = self.cpu_manager.lock().unwrap().get_mpidrs();
let vcpu_topology = self.cpu_manager.lock().unwrap().get_vcpu_topology(); let vcpu_topology = self.cpu_manager.lock().unwrap().get_vcpu_topology();
let mem = self.memory_manager.lock().unwrap().boot_guest_memory(); let mem = self.memory_manager.lock().unwrap().boot_guest_memory();
let mut pci_space_info: Vec<PciSpaceInfo> = Vec::new();
let initramfs_config = match self.initramfs { let initramfs_config = match self.initramfs {
Some(_) => Some(self.load_initramfs(&mem)?), Some(_) => Some(self.load_initramfs(&mem)?),
None => None, None => None,
@ -1121,26 +1126,17 @@ impl Vm {
.get_device_info() .get_device_info()
.clone(); .clone();
let pci_space_start: GuestAddress = self for pci_segment in self.device_manager.lock().unwrap().pci_segments().iter() {
.memory_manager let pci_space = PciSpaceInfo {
.lock() pci_segment_id: pci_segment.id,
.as_ref() mmio_config_address: pci_segment.mmio_config_address,
.unwrap() pci_device_space_start: pci_segment.start_of_device_area,
.start_of_device_area(); pci_device_space_size: pci_segment.end_of_device_area
- pci_segment.start_of_device_area
let pci_space_end: GuestAddress = self + 1,
.memory_manager };
.lock() pci_space_info.push(pci_space);
.as_ref() }
.unwrap()
.end_of_device_area();
let pci_space_size = pci_space_end
.checked_offset_from(pci_space_start)
.ok_or(Error::MemOverflow)?
+ 1;
let pci_space = (pci_space_start.0, pci_space_size);
let virtio_iommu_bdf = self let virtio_iommu_bdf = self
.device_manager .device_manager
@ -1165,7 +1161,7 @@ impl Vm {
vcpu_topology, vcpu_topology,
device_info, device_info,
&initramfs_config, &initramfs_config,
&pci_space, &pci_space_info,
virtio_iommu_bdf.map(|bdf| bdf.into()), virtio_iommu_bdf.map(|bdf| bdf.into()),
&*gic_device, &*gic_device,
&self.numa_nodes, &self.numa_nodes,
@ -2765,7 +2761,7 @@ mod tests {
&dev_info, &dev_info,
&*gic, &*gic,
&None, &None,
&(0x1_0000_0000, 0x1_0000), &Vec::new(),
&BTreeMap::new(), &BTreeMap::new(),
None, None,
) )