vmm: Extend NUMA setup to more than ACPI

The AArch64 platform provides a NUMA binding for the device tree,
which means on AArch64 platform, the NUMA setup can be extended to
more than the ACPI feature.

Based on above, this commit extends the NUMA setup and data
structures to following scenarios:

- All AArch64 platform
- x86_64 platform with ACPI feature enabled

Signed-off-by: Henry Wang <Henry.Wang@arm.com>
Signed-off-by: Michael Zhao <Michael.Zhao@arm.com>
This commit is contained in:
Henry Wang 2021-08-05 06:01:35 -04:00 committed by Sebastien Boeuf
parent ba8d3f2c1c
commit 20aa811de7
3 changed files with 20 additions and 17 deletions

View File

@ -17,7 +17,7 @@ use crate::memory_manager::MemoryManager;
use crate::seccomp_filters::{get_seccomp_filter, Thread}; use crate::seccomp_filters::{get_seccomp_filter, Thread};
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
use crate::vm::physical_bits; use crate::vm::physical_bits;
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use crate::vm::NumaNodes; use crate::vm::NumaNodes;
use crate::GuestMemoryMmap; use crate::GuestMemoryMmap;
use crate::CPU_MANAGER_SNAPSHOT_ID; use crate::CPU_MANAGER_SNAPSHOT_ID;
@ -531,7 +531,7 @@ impl CpuManager {
seccomp_action: SeccompAction, seccomp_action: SeccompAction,
vmmops: Arc<dyn VmmOps>, vmmops: Arc<dyn VmmOps>,
#[cfg(feature = "tdx")] tdx_enabled: bool, #[cfg(feature = "tdx")] tdx_enabled: bool,
#[cfg(feature = "acpi")] numa_nodes: &NumaNodes, #[cfg(any(target_arch = "aarch64", feature = "acpi"))] numa_nodes: &NumaNodes,
) -> Result<Arc<Mutex<CpuManager>>> { ) -> Result<Arc<Mutex<CpuManager>>> {
let guest_memory = memory_manager.lock().unwrap().guest_memory(); let guest_memory = memory_manager.lock().unwrap().guest_memory();
let mut vcpu_states = Vec::with_capacity(usize::from(config.max_vcpus)); let mut vcpu_states = Vec::with_capacity(usize::from(config.max_vcpus));

View File

@ -22,7 +22,7 @@ use crate::interrupt::LegacyUserspaceInterruptManager;
#[cfg(feature = "acpi")] #[cfg(feature = "acpi")]
use crate::memory_manager::MEMORY_MANAGER_ACPI_SIZE; use crate::memory_manager::MEMORY_MANAGER_ACPI_SIZE;
use crate::memory_manager::{Error as MemoryManagerError, MemoryManager}; use crate::memory_manager::{Error as MemoryManagerError, MemoryManager};
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use crate::vm::NumaNodes; use crate::vm::NumaNodes;
use crate::GuestRegionMmap; use crate::GuestRegionMmap;
use crate::PciDeviceInfo; use crate::PciDeviceInfo;
@ -922,7 +922,7 @@ pub struct DeviceManager {
seccomp_action: SeccompAction, seccomp_action: SeccompAction,
// List of guest NUMA nodes. // List of guest NUMA nodes.
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes: NumaNodes, numa_nodes: NumaNodes,
// Possible handle to the virtio-balloon device // Possible handle to the virtio-balloon device
@ -958,7 +958,7 @@ impl DeviceManager {
_exit_evt: &EventFd, _exit_evt: &EventFd,
reset_evt: &EventFd, reset_evt: &EventFd,
seccomp_action: SeccompAction, seccomp_action: SeccompAction,
#[cfg(feature = "acpi")] numa_nodes: NumaNodes, #[cfg(any(target_arch = "aarch64", feature = "acpi"))] numa_nodes: NumaNodes,
activate_evt: &EventFd, activate_evt: &EventFd,
force_iommu: bool, force_iommu: bool,
restoring: bool, restoring: bool,
@ -1021,7 +1021,7 @@ impl DeviceManager {
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
id_to_dev_info: HashMap::new(), id_to_dev_info: HashMap::new(),
seccomp_action, seccomp_action,
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes, numa_nodes,
balloon: None, balloon: None,
activate_evt: activate_evt activate_evt: activate_evt
@ -2588,9 +2588,10 @@ impl DeviceManager {
if let Some(virtio_mem_zone) = memory_zone.virtio_mem_zone() { if let Some(virtio_mem_zone) = memory_zone.virtio_mem_zone() {
let id = self.next_device_name(MEM_DEVICE_NAME_PREFIX)?; let id = self.next_device_name(MEM_DEVICE_NAME_PREFIX)?;
info!("Creating virtio-mem device: id = {}", id); info!("Creating virtio-mem device: id = {}", id);
#[cfg(not(feature = "acpi"))]
#[cfg(all(target_arch = "x86_64", not(feature = "acpi")))]
let node_id: Option<u16> = None; let node_id: Option<u16> = None;
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
let node_id = numa_node_id_from_memory_zone_id(&self.numa_nodes, _memory_zone_id) let node_id = numa_node_id_from_memory_zone_id(&self.numa_nodes, _memory_zone_id)
.map(|i| i as u16); .map(|i| i as u16);
@ -3685,7 +3686,7 @@ impl DeviceManager {
} }
} }
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
fn numa_node_id_from_memory_zone_id(numa_nodes: &NumaNodes, memory_zone_id: &str) -> Option<u32> { fn numa_node_id_from_memory_zone_id(numa_nodes: &NumaNodes, memory_zone_id: &str) -> Option<u32> {
for (numa_node_id, numa_node) in numa_nodes.iter() { for (numa_node_id, numa_node) in numa_nodes.iter() {
if numa_node if numa_node

View File

@ -11,7 +11,7 @@
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
// //
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use crate::config::NumaConfig; use crate::config::NumaConfig;
use crate::config::{ use crate::config::{
DeviceConfig, DiskConfig, FsConfig, HotplugMethod, NetConfig, PmemConfig, ValidationError, DeviceConfig, DiskConfig, FsConfig, HotplugMethod, NetConfig, PmemConfig, ValidationError,
@ -51,7 +51,9 @@ use signal_hook::{
iterator::Signals, iterator::Signals,
}; };
use std::cmp; use std::cmp;
use std::collections::{BTreeMap, HashMap}; #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::convert::TryInto; use std::convert::TryInto;
use std::ffi::CString; use std::ffi::CString;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
@ -519,7 +521,7 @@ pub struct Vm {
vm: Arc<dyn hypervisor::Vm>, vm: Arc<dyn hypervisor::Vm>,
#[cfg(all(feature = "kvm", target_arch = "x86_64"))] #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
saved_clock: Option<hypervisor::ClockData>, saved_clock: Option<hypervisor::ClockData>,
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes: NumaNodes, numa_nodes: NumaNodes,
seccomp_action: SeccompAction, seccomp_action: SeccompAction,
exit_evt: EventFd, exit_evt: EventFd,
@ -552,7 +554,7 @@ impl Vm {
info!("Booting VM from config: {:?}", &config); info!("Booting VM from config: {:?}", &config);
// Create NUMA nodes based on NumaConfig. // Create NUMA nodes based on NumaConfig.
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
let numa_nodes = let numa_nodes =
Self::create_numa_nodes(config.lock().unwrap().numa.clone(), &memory_manager)?; Self::create_numa_nodes(config.lock().unwrap().numa.clone(), &memory_manager)?;
@ -568,7 +570,7 @@ impl Vm {
&exit_evt, &exit_evt,
&reset_evt, &reset_evt,
seccomp_action.clone(), seccomp_action.clone(),
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes.clone(), numa_nodes.clone(),
&activate_evt, &activate_evt,
force_iommu, force_iommu,
@ -606,7 +608,7 @@ impl Vm {
vm_ops, vm_ops,
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
tdx_enabled, tdx_enabled,
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
&numa_nodes, &numa_nodes,
) )
.map_err(Error::CpuManager)?; .map_err(Error::CpuManager)?;
@ -644,7 +646,7 @@ impl Vm {
vm, vm,
#[cfg(all(feature = "kvm", target_arch = "x86_64"))] #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
saved_clock: _saved_clock, saved_clock: _saved_clock,
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
numa_nodes, numa_nodes,
seccomp_action: seccomp_action.clone(), seccomp_action: seccomp_action.clone(),
exit_evt, exit_evt,
@ -653,7 +655,7 @@ impl Vm {
}) })
} }
#[cfg(feature = "acpi")] #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
fn create_numa_nodes( fn create_numa_nodes(
configs: Option<Vec<NumaConfig>>, configs: Option<Vec<NumaConfig>>,
memory_manager: &Arc<Mutex<MemoryManager>>, memory_manager: &Arc<Mutex<MemoryManager>>,