vmm: Switch order between DeviceManager and CpuManager creation

The CpuManager is now created before the DeviceManager. This is required
as preliminary work for creating the vCPUs before the DeviceManager,
which is required to ensure both x86_64 and aarch64 follow the same
sequence.

It's important to note the optimization for faster PIO accesses on the
PCI config space had to be removed given the VmOps was required by the
CpuManager and by the Vcpu by extension. But given the PciConfigIo is
created as part of the DeviceManager, there was no proper way of moving
things around so that we could provide PciConfigIo early enough.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2022-11-03 16:10:18 +01:00
parent d828fcd4ed
commit ec01062ada
3 changed files with 89 additions and 66 deletions

View File

@ -18,7 +18,6 @@ use crate::coredump::{
GuestDebuggableError, NoteDescType, X86_64ElfPrStatus, X86_64UserRegs, COREDUMP_NAME_SIZE,
NT_PRSTATUS,
};
use crate::device_manager::DeviceManager;
#[cfg(feature = "guest_debug")]
use crate::gdb::{get_raw_tid, Debuggable, DebuggableError};
use crate::memory_manager::MemoryManager;
@ -597,7 +596,6 @@ impl CpuManager {
#[allow(clippy::too_many_arguments)]
pub fn new(
config: &CpusConfig,
device_manager: &Arc<Mutex<DeviceManager>>,
memory_manager: &Arc<Mutex<MemoryManager>>,
vm: Arc<dyn hypervisor::Vm>,
exit_evt: EventFd,
@ -670,8 +668,6 @@ impl CpuManager {
}
}
let device_manager = device_manager.lock().unwrap();
let proximity_domain_per_cpu: BTreeMap<u8, u32> = {
let mut cpu_list = Vec::new();
for (proximity_domain, numa_node) in numa_nodes.iter() {
@ -698,23 +694,10 @@ impl CpuManager {
#[cfg(not(feature = "tdx"))]
let dynamic = true;
let acpi_address = if dynamic {
Some(
device_manager
.allocator()
.lock()
.unwrap()
.allocate_platform_mmio_addresses(None, CPU_MANAGER_ACPI_SIZE as u64, None)
.ok_or(Error::AllocateMmmioAddress)?,
)
} else {
None
};
let cpu_manager = Arc::new(Mutex::new(CpuManager {
Ok(Arc::new(Mutex::new(CpuManager {
hypervisor_type,
config: config.clone(),
interrupt_controller: device_manager.interrupt_controller().clone(),
interrupt_controller: None,
vm_memory: guest_memory,
#[cfg(target_arch = "x86_64")]
cpuid,
@ -730,24 +713,11 @@ impl CpuManager {
vcpus: Vec::with_capacity(usize::from(config.max_vcpus)),
seccomp_action,
vm_ops,
acpi_address,
acpi_address: None,
proximity_domain_per_cpu,
affinity,
dynamic,
}));
if let Some(acpi_address) = acpi_address {
device_manager
.mmio_bus()
.insert(
cpu_manager.clone(),
acpi_address.0,
CPU_MANAGER_ACPI_SIZE as u64,
)
.map_err(Error::BusError)?;
}
Ok(cpu_manager)
})))
}
fn create_vcpu(
@ -1676,6 +1646,17 @@ impl CpuManager {
Ok(descaddr)
}
pub(crate) fn set_acpi_address(&mut self, acpi_address: GuestAddress) {
self.acpi_address = Some(acpi_address);
}
pub(crate) fn set_interrupt_controller(
&mut self,
interrupt_controller: Arc<Mutex<dyn InterruptController>>,
) {
self.interrupt_controller = Some(interrupt_controller);
}
}
struct Cpu {

View File

@ -13,6 +13,7 @@ use crate::config::{
ConsoleOutputMode, DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, UserDeviceConfig,
VdpaConfig, VhostMode, VmConfig, VsockConfig,
};
use crate::cpu::{CpuManager, CPU_MANAGER_ACPI_SIZE};
use crate::device_tree::{DeviceNode, DeviceTree};
use crate::interrupt::LegacyUserspaceInterruptManager;
use crate::interrupt::MsiInterruptManager;
@ -849,6 +850,9 @@ pub struct DeviceManager {
// Memory Manager
memory_manager: Arc<Mutex<MemoryManager>>,
// CPU Manager
cpu_manager: Arc<Mutex<CpuManager>>,
// The virtio devices on the system
virtio_devices: Vec<MetaVirtioDevice>,
@ -951,12 +955,15 @@ pub struct DeviceManager {
impl DeviceManager {
#[allow(clippy::too_many_arguments)]
pub fn new(
#[cfg(target_arch = "x86_64")] io_bus: Arc<Bus>,
mmio_bus: Arc<Bus>,
hypervisor_type: HypervisorType,
vm: Arc<dyn hypervisor::Vm>,
config: Arc<Mutex<VmConfig>>,
memory_manager: Arc<Mutex<MemoryManager>>,
exit_evt: &EventFd,
reset_evt: &EventFd,
cpu_manager: Arc<Mutex<CpuManager>>,
exit_evt: EventFd,
reset_evt: EventFd,
seccomp_action: SeccompAction,
numa_nodes: NumaNodes,
activate_evt: &EventFd,
@ -965,6 +972,7 @@ impl DeviceManager {
boot_id_list: BTreeSet<String>,
timestamp: Instant,
snapshot: Option<Snapshot>,
dynamic: bool,
) -> DeviceManagerResult<Arc<Mutex<Self>>> {
trace_scoped!("DeviceManager::new");
@ -997,8 +1005,8 @@ impl DeviceManager {
let address_manager = Arc::new(AddressManager {
allocator: memory_manager.lock().unwrap().allocator(),
#[cfg(target_arch = "x86_64")]
io_bus: Arc::new(Bus::new()),
mmio_bus: Arc::new(Bus::new()),
io_bus,
mmio_bus,
vm: vm.clone(),
device_tree: Arc::clone(&device_tree),
pci_mmio_allocators,
@ -1044,6 +1052,26 @@ impl DeviceManager {
)?);
}
if dynamic {
let acpi_address = address_manager
.allocator
.lock()
.unwrap()
.allocate_platform_mmio_addresses(None, CPU_MANAGER_ACPI_SIZE as u64, None)
.ok_or(DeviceManagerError::AllocateMmioAddress)?;
address_manager
.mmio_bus
.insert(
cpu_manager.clone(),
acpi_address.0,
CPU_MANAGER_ACPI_SIZE as u64,
)
.map_err(DeviceManagerError::BusError)?;
cpu_manager.lock().unwrap().set_acpi_address(acpi_address);
}
let device_manager = DeviceManager {
hypervisor_type,
address_manager: Arc::clone(&address_manager),
@ -1054,6 +1082,7 @@ impl DeviceManager {
ged_notification_device: None,
config,
memory_manager,
cpu_manager,
virtio_devices: Vec::new(),
bus_devices: Vec::new(),
device_id_cnt: Wrapping(0),
@ -1066,8 +1095,8 @@ impl DeviceManager {
iommu_attached_devices: None,
pci_segments,
device_tree,
exit_evt: exit_evt.try_clone().map_err(DeviceManagerError::EventFd)?,
reset_evt: reset_evt.try_clone().map_err(DeviceManagerError::EventFd)?,
exit_evt,
reset_evt,
#[cfg(target_arch = "aarch64")]
id_to_dev_info: HashMap::new(),
seccomp_action,
@ -1137,6 +1166,11 @@ impl DeviceManager {
let interrupt_controller = self.add_interrupt_controller()?;
self.cpu_manager
.lock()
.unwrap()
.set_interrupt_controller(interrupt_controller.clone());
// Now we can create the legacy interrupt manager, which needs the freshly
// formed IOAPIC device.
let legacy_interrupt_manager: Arc<

View File

@ -508,45 +508,25 @@ impl Vm {
#[cfg(not(feature = "guest_debug"))]
let stop_on_boot = false;
let device_manager = DeviceManager::new(
hypervisor.hypervisor_type(),
vm.clone(),
config.clone(),
memory_manager.clone(),
&exit_evt,
&reset_evt,
seccomp_action.clone(),
numa_nodes.clone(),
&activate_evt,
force_iommu,
restoring,
boot_id_list,
timestamp,
snapshot_from_id(snapshot, DEVICE_MANAGER_SNAPSHOT_ID),
)
.map_err(Error::DeviceManager)?;
let memory = memory_manager.lock().unwrap().guest_memory();
#[cfg(target_arch = "x86_64")]
let io_bus = Arc::clone(device_manager.lock().unwrap().io_bus());
let mmio_bus = Arc::clone(device_manager.lock().unwrap().mmio_bus());
let io_bus = Arc::new(Bus::new());
let mmio_bus = Arc::new(Bus::new());
let vm_ops: Arc<dyn VmOps> = Arc::new(VmOpsHandler {
memory,
#[cfg(target_arch = "x86_64")]
io_bus,
mmio_bus,
io_bus: io_bus.clone(),
mmio_bus: mmio_bus.clone(),
});
let exit_evt_clone = exit_evt.try_clone().map_err(Error::EventFdClone)?;
let cpus_config = { &config.lock().unwrap().cpus.clone() };
let cpu_manager = cpu::CpuManager::new(
cpus_config,
&device_manager,
&memory_manager,
vm.clone(),
exit_evt_clone,
reset_evt,
exit_evt.try_clone().map_err(Error::EventFdClone)?,
reset_evt.try_clone().map_err(Error::EventFdClone)?,
#[cfg(feature = "guest_debug")]
vm_debug_evt,
hypervisor.clone(),
@ -558,6 +538,34 @@ impl Vm {
)
.map_err(Error::CpuManager)?;
#[cfg(feature = "tdx")]
let dynamic = !tdx_enabled;
#[cfg(not(feature = "tdx"))]
let dynamic = true;
let device_manager = DeviceManager::new(
#[cfg(target_arch = "x86_64")]
io_bus,
mmio_bus,
hypervisor.hypervisor_type(),
vm.clone(),
config.clone(),
memory_manager.clone(),
cpu_manager.clone(),
exit_evt.try_clone().map_err(Error::EventFdClone)?,
reset_evt,
seccomp_action.clone(),
numa_nodes.clone(),
&activate_evt,
force_iommu,
restoring,
boot_id_list,
timestamp,
snapshot_from_id(snapshot, DEVICE_MANAGER_SNAPSHOT_ID),
dynamic,
)
.map_err(Error::DeviceManager)?;
// SAFETY: trivially safe
let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO) } != 0;