mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2024-12-22 05:35:20 +00:00
vmm: Move NUMA node data structures to arch
This is to make sure the NUMA node data structures can be accessed both from the `vmm` crate and `arch` crate. Signed-off-by: Henry Wang <Henry.Wang@arm.com>
This commit is contained in:
parent
20aa811de7
commit
165364e08b
@ -11,10 +11,15 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use crate::x86_64::SgxEpcSection;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt;
|
||||
use std::result;
|
||||
use std::sync::Arc;
|
||||
|
||||
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<vm_memory::bitmap::AtomicBitmap>;
|
||||
type GuestRegionMmap = vm_memory::GuestRegionMmap<vm_memory::bitmap::AtomicBitmap>;
|
||||
|
||||
/// Type for returning error code.
|
||||
#[derive(Debug)]
|
||||
@ -96,6 +101,19 @@ fn pagesize() -> usize {
|
||||
unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct NumaNode {
|
||||
pub memory_regions: Vec<Arc<GuestRegionMmap>>,
|
||||
pub hotplug_regions: Vec<Arc<GuestRegionMmap>>,
|
||||
pub cpus: Vec<u8>,
|
||||
pub distances: BTreeMap<u32, u8>,
|
||||
pub memory_zones: Vec<String>,
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
pub sgx_epc_sections: Vec<SgxEpcSection>,
|
||||
}
|
||||
|
||||
pub type NumaNodes = BTreeMap<u32, NumaNode>;
|
||||
|
||||
/// Type for passing information about the initramfs in the guest memory.
|
||||
pub struct InitramfsConfig {
|
||||
/// Load address of initramfs in guest memory
|
||||
|
@ -5,7 +5,6 @@
|
||||
use crate::cpu::CpuManager;
|
||||
use crate::device_manager::DeviceManager;
|
||||
use crate::memory_manager::MemoryManager;
|
||||
use crate::vm::NumaNodes;
|
||||
use crate::{GuestMemoryMmap, GuestRegionMmap};
|
||||
use acpi_tables::sdt::GenericAddress;
|
||||
use acpi_tables::{aml::Aml, rsdp::Rsdp, sdt::Sdt};
|
||||
@ -13,6 +12,8 @@ use acpi_tables::{aml::Aml, rsdp::Rsdp, sdt::Sdt};
|
||||
use arch::aarch64::DeviceInfoForFdt;
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
use arch::DeviceType;
|
||||
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
|
||||
use arch::NumaNodes;
|
||||
|
||||
use bitflags::bitflags;
|
||||
use std::sync::{Arc, Mutex};
|
||||
@ -250,7 +251,7 @@ fn create_srat_table(numa_nodes: &NumaNodes) -> Sdt {
|
||||
for (node_id, node) in numa_nodes.iter() {
|
||||
let proximity_domain = *node_id as u32;
|
||||
|
||||
for region in node.memory_regions() {
|
||||
for region in &node.memory_regions {
|
||||
srat.append(MemoryAffinity::from_region(
|
||||
region,
|
||||
proximity_domain,
|
||||
@ -258,7 +259,7 @@ fn create_srat_table(numa_nodes: &NumaNodes) -> Sdt {
|
||||
))
|
||||
}
|
||||
|
||||
for region in node.hotplug_regions() {
|
||||
for region in &node.hotplug_regions {
|
||||
srat.append(MemoryAffinity::from_region(
|
||||
region,
|
||||
proximity_domain,
|
||||
@ -267,7 +268,7 @@ fn create_srat_table(numa_nodes: &NumaNodes) -> Sdt {
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
for section in node.sgx_epc_sections() {
|
||||
for section in &node.sgx_epc_sections {
|
||||
srat.append(MemoryAffinity::from_range(
|
||||
section.start().raw_value(),
|
||||
section.size(),
|
||||
@ -276,7 +277,7 @@ fn create_srat_table(numa_nodes: &NumaNodes) -> Sdt {
|
||||
))
|
||||
}
|
||||
|
||||
for cpu in node.cpus() {
|
||||
for cpu in &node.cpus {
|
||||
let x2apic_id = *cpu as u32;
|
||||
|
||||
// Flags
|
||||
@ -315,7 +316,7 @@ fn create_slit_table(numa_nodes: &NumaNodes) -> Sdt {
|
||||
|
||||
let existing_nodes: Vec<u32> = numa_nodes.keys().cloned().collect();
|
||||
for (node_id, node) in numa_nodes.iter() {
|
||||
let distances = node.distances();
|
||||
let distances = &node.distances;
|
||||
for i in existing_nodes.iter() {
|
||||
let dist: u8 = if *node_id == *i {
|
||||
10
|
||||
|
@ -17,14 +17,14 @@ use crate::memory_manager::MemoryManager;
|
||||
use crate::seccomp_filters::{get_seccomp_filter, Thread};
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use crate::vm::physical_bits;
|
||||
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
|
||||
use crate::vm::NumaNodes;
|
||||
use crate::GuestMemoryMmap;
|
||||
use crate::CPU_MANAGER_SNAPSHOT_ID;
|
||||
#[cfg(feature = "acpi")]
|
||||
use acpi_tables::{aml, aml::Aml, sdt::Sdt};
|
||||
use anyhow::anyhow;
|
||||
use arch::EntryPoint;
|
||||
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
|
||||
use arch::NumaNodes;
|
||||
use devices::interrupt_controller::InterruptController;
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
use hypervisor::kvm::kvm_bindings;
|
||||
@ -579,7 +579,7 @@ impl CpuManager {
|
||||
let proximity_domain_per_cpu: BTreeMap<u8, u32> = {
|
||||
let mut cpu_list = Vec::new();
|
||||
for (proximity_domain, numa_node) in numa_nodes.iter() {
|
||||
for cpu in numa_node.cpus().iter() {
|
||||
for cpu in numa_node.cpus.iter() {
|
||||
cpu_list.push((*cpu, *proximity_domain))
|
||||
}
|
||||
}
|
||||
|
@ -22,8 +22,6 @@ use crate::interrupt::LegacyUserspaceInterruptManager;
|
||||
#[cfg(feature = "acpi")]
|
||||
use crate::memory_manager::MEMORY_MANAGER_ACPI_SIZE;
|
||||
use crate::memory_manager::{Error as MemoryManagerError, MemoryManager};
|
||||
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
|
||||
use crate::vm::NumaNodes;
|
||||
use crate::GuestRegionMmap;
|
||||
use crate::PciDeviceInfo;
|
||||
use crate::{device_node, DEVICE_MANAGER_SNAPSHOT_ID};
|
||||
@ -34,6 +32,8 @@ use anyhow::anyhow;
|
||||
use arch::layout;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use arch::layout::{APIC_START, IOAPIC_SIZE, IOAPIC_START};
|
||||
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
|
||||
use arch::NumaNodes;
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
use arch::{DeviceType, MmioDeviceInfo};
|
||||
use block_util::{
|
||||
@ -3689,10 +3689,7 @@ impl DeviceManager {
|
||||
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
|
||||
fn numa_node_id_from_memory_zone_id(numa_nodes: &NumaNodes, memory_zone_id: &str) -> Option<u32> {
|
||||
for (numa_node_id, numa_node) in numa_nodes.iter() {
|
||||
if numa_node
|
||||
.memory_zones()
|
||||
.contains(&memory_zone_id.to_owned())
|
||||
{
|
||||
if numa_node.memory_zones.contains(&memory_zone_id.to_owned()) {
|
||||
return Some(*numa_node_id);
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ use crate::device_tree::DeviceTree;
|
||||
use crate::memory_manager::{Error as MemoryManagerError, MemoryManager};
|
||||
use crate::migration::{get_vm_snapshot, url_to_path, VM_SNAPSHOT_FILE};
|
||||
use crate::seccomp_filters::{get_seccomp_filter, Thread};
|
||||
use crate::{GuestMemoryMmap, GuestRegionMmap};
|
||||
use crate::GuestMemoryMmap;
|
||||
use crate::{
|
||||
PciDeviceInfo, CPU_MANAGER_SNAPSHOT_ID, DEVICE_MANAGER_SNAPSHOT_ID, MEMORY_MANAGER_SNAPSHOT_ID,
|
||||
};
|
||||
@ -33,9 +33,9 @@ use anyhow::anyhow;
|
||||
use arch::get_host_cpu_phys_bits;
|
||||
#[cfg(feature = "tdx")]
|
||||
use arch::x86_64::tdx::TdvfSection;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use arch::x86_64::SgxEpcSection;
|
||||
use arch::EntryPoint;
|
||||
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
|
||||
use arch::{NumaNode, NumaNodes};
|
||||
use devices::AcpiNotificationFlags;
|
||||
use hypervisor::vm::{HypervisorVmError, VmmOps};
|
||||
use linux_loader::cmdline::Cmdline;
|
||||
@ -266,46 +266,6 @@ pub enum Error {
|
||||
}
|
||||
pub type Result<T> = result::Result<T, Error>;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct NumaNode {
|
||||
memory_regions: Vec<Arc<GuestRegionMmap>>,
|
||||
hotplug_regions: Vec<Arc<GuestRegionMmap>>,
|
||||
cpus: Vec<u8>,
|
||||
distances: BTreeMap<u32, u8>,
|
||||
memory_zones: Vec<String>,
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
sgx_epc_sections: Vec<SgxEpcSection>,
|
||||
}
|
||||
|
||||
impl NumaNode {
|
||||
pub fn memory_regions(&self) -> &Vec<Arc<GuestRegionMmap>> {
|
||||
&self.memory_regions
|
||||
}
|
||||
|
||||
pub fn hotplug_regions(&self) -> &Vec<Arc<GuestRegionMmap>> {
|
||||
&self.hotplug_regions
|
||||
}
|
||||
|
||||
pub fn cpus(&self) -> &Vec<u8> {
|
||||
&self.cpus
|
||||
}
|
||||
|
||||
pub fn distances(&self) -> &BTreeMap<u32, u8> {
|
||||
&self.distances
|
||||
}
|
||||
|
||||
pub fn memory_zones(&self) -> &Vec<String> {
|
||||
&self.memory_zones
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
pub fn sgx_epc_sections(&self) -> &Vec<SgxEpcSection> {
|
||||
&self.sgx_epc_sections
|
||||
}
|
||||
}
|
||||
|
||||
pub type NumaNodes = BTreeMap<u32, NumaNode>;
|
||||
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Serialize, PartialEq)]
|
||||
pub enum VmState {
|
||||
Created,
|
||||
|
Loading…
Reference in New Issue
Block a user