From 165364e08bffbf6b4dac31955f2fc6630e86979e Mon Sep 17 00:00:00 2001 From: Henry Wang Date: Fri, 6 Aug 2021 19:28:42 -0400 Subject: [PATCH] vmm: Move NUMA node data structures to `arch` This is to make sure the NUMA node data structures can be accessed both from the `vmm` crate and `arch` crate. Signed-off-by: Henry Wang --- arch/src/lib.rs | 18 +++++++++++++++ vmm/src/acpi.rs | 13 ++++++----- vmm/src/cpu.rs | 6 ++--- vmm/src/device_manager.rs | 9 +++----- vmm/src/vm.rs | 46 +++------------------------------------ 5 files changed, 34 insertions(+), 58 deletions(-) diff --git a/arch/src/lib.rs b/arch/src/lib.rs index 727b38dda..fb5c80705 100644 --- a/arch/src/lib.rs +++ b/arch/src/lib.rs @@ -11,10 +11,15 @@ #[macro_use] extern crate log; +#[cfg(target_arch = "x86_64")] +use crate::x86_64::SgxEpcSection; +use std::collections::BTreeMap; use std::fmt; use std::result; +use std::sync::Arc; type GuestMemoryMmap = vm_memory::GuestMemoryMmap; +type GuestRegionMmap = vm_memory::GuestRegionMmap; /// Type for returning error code. #[derive(Debug)] @@ -96,6 +101,19 @@ fn pagesize() -> usize { unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize } } +#[derive(Clone, Default)] +pub struct NumaNode { + pub memory_regions: Vec>, + pub hotplug_regions: Vec>, + pub cpus: Vec, + pub distances: BTreeMap, + pub memory_zones: Vec, + #[cfg(target_arch = "x86_64")] + pub sgx_epc_sections: Vec, +} + +pub type NumaNodes = BTreeMap; + /// Type for passing information about the initramfs in the guest memory. pub struct InitramfsConfig { /// Load address of initramfs in guest memory diff --git a/vmm/src/acpi.rs b/vmm/src/acpi.rs index 551927d42..9680cee85 100644 --- a/vmm/src/acpi.rs +++ b/vmm/src/acpi.rs @@ -5,7 +5,6 @@ use crate::cpu::CpuManager; use crate::device_manager::DeviceManager; use crate::memory_manager::MemoryManager; -use crate::vm::NumaNodes; use crate::{GuestMemoryMmap, GuestRegionMmap}; use acpi_tables::sdt::GenericAddress; use acpi_tables::{aml::Aml, rsdp::Rsdp, sdt::Sdt}; @@ -13,6 +12,8 @@ use acpi_tables::{aml::Aml, rsdp::Rsdp, sdt::Sdt}; use arch::aarch64::DeviceInfoForFdt; #[cfg(target_arch = "aarch64")] use arch::DeviceType; +#[cfg(any(target_arch = "aarch64", feature = "acpi"))] +use arch::NumaNodes; use bitflags::bitflags; use std::sync::{Arc, Mutex}; @@ -250,7 +251,7 @@ fn create_srat_table(numa_nodes: &NumaNodes) -> Sdt { for (node_id, node) in numa_nodes.iter() { let proximity_domain = *node_id as u32; - for region in node.memory_regions() { + for region in &node.memory_regions { srat.append(MemoryAffinity::from_region( region, proximity_domain, @@ -258,7 +259,7 @@ fn create_srat_table(numa_nodes: &NumaNodes) -> Sdt { )) } - for region in node.hotplug_regions() { + for region in &node.hotplug_regions { srat.append(MemoryAffinity::from_region( region, proximity_domain, @@ -267,7 +268,7 @@ fn create_srat_table(numa_nodes: &NumaNodes) -> Sdt { } #[cfg(target_arch = "x86_64")] - for section in node.sgx_epc_sections() { + for section in &node.sgx_epc_sections { srat.append(MemoryAffinity::from_range( section.start().raw_value(), section.size(), @@ -276,7 +277,7 @@ fn create_srat_table(numa_nodes: &NumaNodes) -> Sdt { )) } - for cpu in node.cpus() { + for cpu in &node.cpus { let x2apic_id = *cpu as u32; // Flags @@ -315,7 +316,7 @@ fn create_slit_table(numa_nodes: &NumaNodes) -> Sdt { let existing_nodes: Vec = numa_nodes.keys().cloned().collect(); for (node_id, node) in numa_nodes.iter() { - let distances = node.distances(); + let distances = &node.distances; for i in existing_nodes.iter() { let dist: u8 = if *node_id == *i { 10 diff --git a/vmm/src/cpu.rs b/vmm/src/cpu.rs index c1bb67007..ca8bb5400 100644 --- a/vmm/src/cpu.rs +++ b/vmm/src/cpu.rs @@ -17,14 +17,14 @@ use crate::memory_manager::MemoryManager; use crate::seccomp_filters::{get_seccomp_filter, Thread}; #[cfg(target_arch = "x86_64")] use crate::vm::physical_bits; -#[cfg(any(target_arch = "aarch64", feature = "acpi"))] -use crate::vm::NumaNodes; use crate::GuestMemoryMmap; use crate::CPU_MANAGER_SNAPSHOT_ID; #[cfg(feature = "acpi")] use acpi_tables::{aml, aml::Aml, sdt::Sdt}; use anyhow::anyhow; use arch::EntryPoint; +#[cfg(any(target_arch = "aarch64", feature = "acpi"))] +use arch::NumaNodes; use devices::interrupt_controller::InterruptController; #[cfg(target_arch = "aarch64")] use hypervisor::kvm::kvm_bindings; @@ -579,7 +579,7 @@ impl CpuManager { let proximity_domain_per_cpu: BTreeMap = { let mut cpu_list = Vec::new(); for (proximity_domain, numa_node) in numa_nodes.iter() { - for cpu in numa_node.cpus().iter() { + for cpu in numa_node.cpus.iter() { cpu_list.push((*cpu, *proximity_domain)) } } diff --git a/vmm/src/device_manager.rs b/vmm/src/device_manager.rs index 8eaede6df..de213767c 100644 --- a/vmm/src/device_manager.rs +++ b/vmm/src/device_manager.rs @@ -22,8 +22,6 @@ use crate::interrupt::LegacyUserspaceInterruptManager; #[cfg(feature = "acpi")] use crate::memory_manager::MEMORY_MANAGER_ACPI_SIZE; use crate::memory_manager::{Error as MemoryManagerError, MemoryManager}; -#[cfg(any(target_arch = "aarch64", feature = "acpi"))] -use crate::vm::NumaNodes; use crate::GuestRegionMmap; use crate::PciDeviceInfo; use crate::{device_node, DEVICE_MANAGER_SNAPSHOT_ID}; @@ -34,6 +32,8 @@ use anyhow::anyhow; use arch::layout; #[cfg(target_arch = "x86_64")] use arch::layout::{APIC_START, IOAPIC_SIZE, IOAPIC_START}; +#[cfg(any(target_arch = "aarch64", feature = "acpi"))] +use arch::NumaNodes; #[cfg(target_arch = "aarch64")] use arch::{DeviceType, MmioDeviceInfo}; use block_util::{ @@ -3689,10 +3689,7 @@ impl DeviceManager { #[cfg(any(target_arch = "aarch64", feature = "acpi"))] fn numa_node_id_from_memory_zone_id(numa_nodes: &NumaNodes, memory_zone_id: &str) -> Option { for (numa_node_id, numa_node) in numa_nodes.iter() { - if numa_node - .memory_zones() - .contains(&memory_zone_id.to_owned()) - { + if numa_node.memory_zones.contains(&memory_zone_id.to_owned()) { return Some(*numa_node_id); } } diff --git a/vmm/src/vm.rs b/vmm/src/vm.rs index 954096a40..127750501 100644 --- a/vmm/src/vm.rs +++ b/vmm/src/vm.rs @@ -25,7 +25,7 @@ use crate::device_tree::DeviceTree; use crate::memory_manager::{Error as MemoryManagerError, MemoryManager}; use crate::migration::{get_vm_snapshot, url_to_path, VM_SNAPSHOT_FILE}; use crate::seccomp_filters::{get_seccomp_filter, Thread}; -use crate::{GuestMemoryMmap, GuestRegionMmap}; +use crate::GuestMemoryMmap; use crate::{ PciDeviceInfo, CPU_MANAGER_SNAPSHOT_ID, DEVICE_MANAGER_SNAPSHOT_ID, MEMORY_MANAGER_SNAPSHOT_ID, }; @@ -33,9 +33,9 @@ use anyhow::anyhow; use arch::get_host_cpu_phys_bits; #[cfg(feature = "tdx")] use arch::x86_64::tdx::TdvfSection; -#[cfg(target_arch = "x86_64")] -use arch::x86_64::SgxEpcSection; use arch::EntryPoint; +#[cfg(any(target_arch = "aarch64", feature = "acpi"))] +use arch::{NumaNode, NumaNodes}; use devices::AcpiNotificationFlags; use hypervisor::vm::{HypervisorVmError, VmmOps}; use linux_loader::cmdline::Cmdline; @@ -266,46 +266,6 @@ pub enum Error { } pub type Result = result::Result; -#[derive(Clone, Default)] -pub struct NumaNode { - memory_regions: Vec>, - hotplug_regions: Vec>, - cpus: Vec, - distances: BTreeMap, - memory_zones: Vec, - #[cfg(target_arch = "x86_64")] - sgx_epc_sections: Vec, -} - -impl NumaNode { - pub fn memory_regions(&self) -> &Vec> { - &self.memory_regions - } - - pub fn hotplug_regions(&self) -> &Vec> { - &self.hotplug_regions - } - - pub fn cpus(&self) -> &Vec { - &self.cpus - } - - pub fn distances(&self) -> &BTreeMap { - &self.distances - } - - pub fn memory_zones(&self) -> &Vec { - &self.memory_zones - } - - #[cfg(target_arch = "x86_64")] - pub fn sgx_epc_sections(&self) -> &Vec { - &self.sgx_epc_sections - } -} - -pub type NumaNodes = BTreeMap; - #[derive(Clone, Copy, Debug, Deserialize, Serialize, PartialEq)] pub enum VmState { Created,