vmm: support PCI I/O regions on all architectures

While non-Intel CPU architectures don't have a special concept of IO
address space, support for PCI I/O regions is still needed to be able
to handle PCI devices that use them.

With this change, I'm able to pass through an e1000e device from QEMU
to a cloud-hypervisor VM on aarch64 and use it in the cloud-hypervisor
guest.  Previously, it would hit the unimplemented!().

Signed-off-by: Alyssa Ross <hi@alyssa.is>
This commit is contained in:
Alyssa Ross 2024-12-13 12:55:33 +01:00 committed by Rob Bradford
parent dd8a5a7de8
commit 50bac1694f
6 changed files with 24 additions and 58 deletions

View File

@ -125,19 +125,16 @@ impl PciBus {
pub fn register_mapping( pub fn register_mapping(
&self, &self,
dev: Arc<dyn BusDeviceSync>, dev: Arc<dyn BusDeviceSync>,
#[cfg(target_arch = "x86_64")] io_bus: &Bus, io_bus: &Bus,
mmio_bus: &Bus, mmio_bus: &Bus,
bars: Vec<PciBarConfiguration>, bars: Vec<PciBarConfiguration>,
) -> Result<()> { ) -> Result<()> {
for bar in bars { for bar in bars {
match bar.region_type() { match bar.region_type() {
PciBarRegionType::IoRegion => { PciBarRegionType::IoRegion => {
#[cfg(target_arch = "x86_64")]
io_bus io_bus
.insert(dev.clone(), bar.addr(), bar.size()) .insert(dev.clone(), bar.addr(), bar.size())
.map_err(PciRootError::PioInsert)?; .map_err(PciRootError::PioInsert)?;
#[cfg(not(target_arch = "x86_64"))]
error!("I/O region is not supported");
} }
PciBarRegionType::Memory32BitRegion | PciBarRegionType::Memory64BitRegion => { PciBarRegionType::Memory32BitRegion | PciBarRegionType::Memory64BitRegion => {
mmio_bus mmio_bus

View File

@ -713,11 +713,7 @@ impl VfioCommon {
let bar_addr = match region_type { let bar_addr = match region_type {
PciBarRegionType::IoRegion => { PciBarRegionType::IoRegion => {
#[cfg(not(target_arch = "x86_64"))]
unimplemented!();
// The address needs to be 4 bytes aligned. // The address needs to be 4 bytes aligned.
#[cfg(target_arch = "x86_64")]
allocator allocator
.lock() .lock()
.unwrap() .unwrap()
@ -795,10 +791,7 @@ impl VfioCommon {
for region in self.mmio_regions.iter() { for region in self.mmio_regions.iter() {
match region.type_ { match region.type_ {
PciBarRegionType::IoRegion => { PciBarRegionType::IoRegion => {
#[cfg(target_arch = "x86_64")]
allocator.free_io_addresses(region.start, region.length); allocator.free_io_addresses(region.start, region.length);
#[cfg(not(target_arch = "x86_64"))]
error!("I/O region is not supported");
} }
PciBarRegionType::Memory32BitRegion => { PciBarRegionType::Memory32BitRegion => {
mmio32_allocator.free(region.start, region.length); mmio32_allocator.free(region.start, region.length);

View File

@ -26,8 +26,8 @@ use crate::page_size::get_page_size;
/// # use vm_allocator::SystemAllocator; /// # use vm_allocator::SystemAllocator;
/// # use vm_memory::{Address, GuestAddress, GuestUsize}; /// # use vm_memory::{Address, GuestAddress, GuestUsize};
/// let mut allocator = SystemAllocator::new( /// let mut allocator = SystemAllocator::new(
/// #[cfg(target_arch = "x86_64")] GuestAddress(0x1000), /// GuestAddress(0x1000),
/// #[cfg(target_arch = "x86_64")] 0x10000, /// 0x10000,
/// GuestAddress(0x10000000), 0x10000000, /// GuestAddress(0x10000000), 0x10000000,
/// #[cfg(target_arch = "x86_64")] vec![GsiApic::new(5, 19)]).unwrap(); /// #[cfg(target_arch = "x86_64")] vec![GsiApic::new(5, 19)]).unwrap();
/// #[cfg(target_arch = "x86_64")] /// #[cfg(target_arch = "x86_64")]
@ -46,7 +46,6 @@ use crate::page_size::get_page_size;
/// ///
/// ``` /// ```
pub struct SystemAllocator { pub struct SystemAllocator {
#[cfg(target_arch = "x86_64")]
io_address_space: AddressAllocator, io_address_space: AddressAllocator,
platform_mmio_address_space: AddressAllocator, platform_mmio_address_space: AddressAllocator,
gsi_allocator: GsiAllocator, gsi_allocator: GsiAllocator,
@ -63,14 +62,13 @@ impl SystemAllocator {
/// * `apics` - (X86) Vector of APIC's. /// * `apics` - (X86) Vector of APIC's.
/// ///
pub fn new( pub fn new(
#[cfg(target_arch = "x86_64")] io_base: GuestAddress, io_base: GuestAddress,
#[cfg(target_arch = "x86_64")] io_size: GuestUsize, io_size: GuestUsize,
platform_mmio_base: GuestAddress, platform_mmio_base: GuestAddress,
platform_mmio_size: GuestUsize, platform_mmio_size: GuestUsize,
#[cfg(target_arch = "x86_64")] apics: Vec<GsiApic>, #[cfg(target_arch = "x86_64")] apics: Vec<GsiApic>,
) -> Option<Self> { ) -> Option<Self> {
Some(SystemAllocator { Some(SystemAllocator {
#[cfg(target_arch = "x86_64")]
io_address_space: AddressAllocator::new(io_base, io_size)?, io_address_space: AddressAllocator::new(io_base, io_size)?,
platform_mmio_address_space: AddressAllocator::new( platform_mmio_address_space: AddressAllocator::new(
platform_mmio_base, platform_mmio_base,
@ -93,7 +91,6 @@ impl SystemAllocator {
self.gsi_allocator.allocate_gsi().ok() self.gsi_allocator.allocate_gsi().ok()
} }
#[cfg(target_arch = "x86_64")]
/// Reserves a section of `size` bytes of IO address space. /// Reserves a section of `size` bytes of IO address space.
pub fn allocate_io_addresses( pub fn allocate_io_addresses(
&mut self, &mut self,
@ -119,7 +116,6 @@ impl SystemAllocator {
) )
} }
#[cfg(target_arch = "x86_64")]
/// Free an IO address range. /// Free an IO address range.
/// We can only free a range if it matches exactly an already allocated range. /// We can only free a range if it matches exactly an already allocated range.
pub fn free_io_addresses(&mut self, address: GuestAddress, size: GuestUsize) { pub fn free_io_addresses(&mut self, address: GuestAddress, size: GuestUsize) {

View File

@ -536,7 +536,6 @@ impl Console {
pub(crate) struct AddressManager { pub(crate) struct AddressManager {
pub(crate) allocator: Arc<Mutex<SystemAllocator>>, pub(crate) allocator: Arc<Mutex<SystemAllocator>>,
#[cfg(target_arch = "x86_64")]
pub(crate) io_bus: Arc<Bus>, pub(crate) io_bus: Arc<Bus>,
pub(crate) mmio_bus: Arc<Bus>, pub(crate) mmio_bus: Arc<Bus>,
pub(crate) vm: Arc<dyn hypervisor::Vm>, pub(crate) vm: Arc<dyn hypervisor::Vm>,
@ -556,8 +555,6 @@ impl DeviceRelocation for AddressManager {
) -> std::result::Result<(), std::io::Error> { ) -> std::result::Result<(), std::io::Error> {
match region_type { match region_type {
PciBarRegionType::IoRegion => { PciBarRegionType::IoRegion => {
#[cfg(target_arch = "x86_64")]
{
// Update system allocator // Update system allocator
self.allocator self.allocator
.lock() .lock()
@ -567,11 +564,7 @@ impl DeviceRelocation for AddressManager {
self.allocator self.allocator
.lock() .lock()
.unwrap() .unwrap()
.allocate_io_addresses( .allocate_io_addresses(Some(GuestAddress(new_base)), len as GuestUsize, None)
Some(GuestAddress(new_base)),
len as GuestUsize,
None,
)
.ok_or_else(|| { .ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "failed allocating new IO range") io::Error::new(io::ErrorKind::Other, "failed allocating new IO range")
})?; })?;
@ -581,9 +574,6 @@ impl DeviceRelocation for AddressManager {
.update_range(old_base, len, new_base, len) .update_range(old_base, len, new_base, len)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
} }
#[cfg(target_arch = "aarch64")]
error!("I/O region is not supported");
}
PciBarRegionType::Memory32BitRegion | PciBarRegionType::Memory64BitRegion => { PciBarRegionType::Memory32BitRegion | PciBarRegionType::Memory64BitRegion => {
let allocators = if region_type == PciBarRegionType::Memory32BitRegion { let allocators = if region_type == PciBarRegionType::Memory32BitRegion {
&self.pci_mmio32_allocators &self.pci_mmio32_allocators
@ -992,7 +982,7 @@ fn create_mmio_allocators(
impl DeviceManager { impl DeviceManager {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn new( pub fn new(
#[cfg(target_arch = "x86_64")] io_bus: Arc<Bus>, io_bus: Arc<Bus>,
mmio_bus: Arc<Bus>, mmio_bus: Arc<Bus>,
vm: Arc<dyn hypervisor::Vm>, vm: Arc<dyn hypervisor::Vm>,
config: Arc<Mutex<VmConfig>>, config: Arc<Mutex<VmConfig>>,
@ -1072,7 +1062,6 @@ impl DeviceManager {
let address_manager = Arc::new(AddressManager { let address_manager = Arc::new(AddressManager {
allocator: memory_manager.lock().unwrap().allocator(), allocator: memory_manager.lock().unwrap().allocator(),
#[cfg(target_arch = "x86_64")]
io_bus, io_bus,
mmio_bus, mmio_bus,
vm: vm.clone(), vm: vm.clone(),
@ -3512,7 +3501,6 @@ impl DeviceManager {
pci_bus pci_bus
.register_mapping( .register_mapping(
bus_device, bus_device,
#[cfg(target_arch = "x86_64")]
self.address_manager.io_bus.as_ref(), self.address_manager.io_bus.as_ref(),
self.address_manager.mmio_bus.as_ref(), self.address_manager.mmio_bus.as_ref(),
bars.clone(), bars.clone(),

View File

@ -1159,14 +1159,8 @@ impl MemoryManager {
let allocator = Arc::new(Mutex::new( let allocator = Arc::new(Mutex::new(
SystemAllocator::new( SystemAllocator::new(
#[cfg(target_arch = "x86_64")] GuestAddress(0),
{ 1 << 16,
GuestAddress(0)
},
#[cfg(target_arch = "x86_64")]
{
1 << 16
},
start_of_platform_device_area, start_of_platform_device_area,
PLATFORM_DEVICE_AREA_SIZE, PLATFORM_DEVICE_AREA_SIZE,
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]

View File

@ -531,7 +531,6 @@ impl Vm {
let stop_on_boot = false; let stop_on_boot = false;
let memory = memory_manager.lock().unwrap().guest_memory(); let memory = memory_manager.lock().unwrap().guest_memory();
#[cfg(target_arch = "x86_64")]
let io_bus = Arc::new(Bus::new()); let io_bus = Arc::new(Bus::new());
let mmio_bus = Arc::new(Bus::new()); let mmio_bus = Arc::new(Bus::new());
@ -622,7 +621,6 @@ impl Vm {
let dynamic = true; let dynamic = true;
let device_manager = DeviceManager::new( let device_manager = DeviceManager::new(
#[cfg(target_arch = "x86_64")]
io_bus, io_bus,
mmio_bus, mmio_bus,
vm.clone(), vm.clone(),