From 58d8206e2b94e351d509eb108705e0d3fd854854 Mon Sep 17 00:00:00 2001 From: Sebastien Boeuf Date: Tue, 5 Oct 2021 17:53:08 +0200 Subject: [PATCH] migration: Use MemoryManager restore code path Instead of creating a MemoryManager from scratch, let's reuse the same code path used by snapshot/restore, so that memory regions are created identically to what they were on the source VM. Signed-off-by: Sebastien Boeuf --- Cargo.lock | 1 + arch/Cargo.toml | 1 + arch/src/lib.rs | 4 +++- vm-migration/src/protocol.rs | 4 ++-- vmm/src/lib.rs | 7 +++++-- vmm/src/memory_manager.rs | 38 ++++++++++++++++++++---------------- vmm/src/vm.rs | 11 +++++++++-- 7 files changed, 42 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 184046dc5..375443d69 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -74,6 +74,7 @@ dependencies = [ "linux-loader", "log", "serde", + "serde_derive", "thiserror", "versionize", "versionize_derive", diff --git a/arch/Cargo.toml b/arch/Cargo.toml index 31ede43dd..6f1919b19 100644 --- a/arch/Cargo.toml +++ b/arch/Cargo.toml @@ -18,6 +18,7 @@ libc = "0.2.103" linux-loader = { version = "0.4.0", features = ["elf", "bzimage", "pe"] } log = "0.4.14" serde = { version = "1.0.130", features = ["rc"] } +serde_derive = "1.0.130" thiserror = "1.0.29" versionize = "0.1.6" versionize_derive = "0.1.4" diff --git a/arch/src/lib.rs b/arch/src/lib.rs index a02a583a9..74c51150d 100644 --- a/arch/src/lib.rs +++ b/arch/src/lib.rs @@ -10,6 +10,8 @@ #[macro_use] extern crate log; +#[macro_use] +extern crate serde_derive; #[cfg(target_arch = "x86_64")] use crate::x86_64::SgxEpcSection; @@ -57,7 +59,7 @@ pub enum Error { pub type Result = result::Result; /// Type for memory region types. -#[derive(Clone, Copy, PartialEq, Debug, Versionize)] +#[derive(Clone, Copy, PartialEq, Debug, Serialize, Deserialize, Versionize)] pub enum RegionType { /// RAM type Ram, diff --git a/vm-migration/src/protocol.rs b/vm-migration/src/protocol.rs index 6f078c5fe..2f17fc7ff 100644 --- a/vm-migration/src/protocol.rs +++ b/vm-migration/src/protocol.rs @@ -174,13 +174,13 @@ impl Response { unsafe impl ByteValued for Response {} #[repr(C)] -#[derive(Clone, Versionize)] +#[derive(Clone, Serialize, Deserialize, Versionize)] pub struct MemoryRange { pub gpa: u64, pub length: u64, } -#[derive(Clone, Default, Versionize)] +#[derive(Clone, Default, Serialize, Deserialize, Versionize)] pub struct MemoryRangeTable { data: Vec, } diff --git a/vmm/src/lib.rs b/vmm/src/lib.rs index 1443e19cb..5e0029125 100644 --- a/vmm/src/lib.rs +++ b/vmm/src/lib.rs @@ -28,6 +28,7 @@ use crate::seccomp_filters::{get_seccomp_filter, Thread}; use crate::vm::{Error as VmError, Vm, VmState}; use anyhow::anyhow; use libc::EFD_NONBLOCK; +use memory_manager::MemoryManagerSnapshotData; use seccompiler::{apply_filter, SeccompAction}; use serde::ser::{Serialize, SerializeStruct, Serializer}; use std::fs::File; @@ -299,6 +300,7 @@ struct VmMigrationConfig { vm_config: Arc>, #[cfg(all(feature = "kvm", target_arch = "x86_64"))] common_cpuid: hypervisor::CpuId, + memory_manager_data: MemoryManagerSnapshotData, } pub struct Vmm { @@ -764,7 +766,7 @@ impl Vmm { where T: Read + Write, { - // Read in config data + // Read in config data along with memory manager data let mut data = Vec::with_capacity(req.length() as usize); unsafe { data.set_len(req.length() as usize); @@ -802,6 +804,7 @@ impl Vmm { &self.seccomp_action, self.hypervisor.clone(), activate_evt, + &vm_migration_config.memory_manager_data, ) .map_err(|e| { MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e)) @@ -1057,8 +1060,8 @@ impl Vmm { vm_config, #[cfg(all(feature = "kvm", target_arch = "x86_64"))] common_cpuid, + memory_manager_data: vm.memory_manager_data(), }; - let config_data = serde_json::to_vec(&vm_migration_config).unwrap(); Request::config(config_data.len() as u64).write_to(&mut socket)?; socket diff --git a/vmm/src/memory_manager.rs b/vmm/src/memory_manager.rs index 35050f0af..f6ba91315 100644 --- a/vmm/src/memory_manager.rs +++ b/vmm/src/memory_manager.rs @@ -63,7 +63,7 @@ const MPOL_BIND: u32 = 2; const MPOL_MF_STRICT: u32 = 1; const MPOL_MF_MOVE: u32 = 1 << 1; -#[derive(Clone, Default, Versionize)] +#[derive(Clone, Default, Serialize, Deserialize, Versionize)] struct HotPlugState { base: u64, length: u64, @@ -121,7 +121,7 @@ impl MemoryZone { pub type MemoryZones = HashMap; -#[derive(Clone, Versionize)] +#[derive(Clone, Serialize, Deserialize, Versionize)] struct GuestRamMapping { slot: u32, gpa: u64, @@ -131,7 +131,7 @@ struct GuestRamMapping { file_offset: u64, } -#[derive(Clone, Versionize)] +#[derive(Clone, Serialize, Deserialize, Versionize)] struct ArchMemRegion { base: u64, size: usize, @@ -1741,6 +1741,21 @@ impl MemoryManager { Ok(table) } + + pub fn snapshot_data(&self) -> MemoryManagerSnapshotData { + MemoryManagerSnapshotData { + memory_ranges: self.snapshot_memory_ranges.clone(), + guest_ram_mappings: self.guest_ram_mappings.clone(), + start_of_device_area: self.start_of_device_area.0, + boot_ram: self.boot_ram, + current_ram: self.current_ram, + arch_mem_regions: self.arch_mem_regions.clone(), + hotplug_slots: self.hotplug_slots.clone(), + next_memory_slot: self.next_memory_slot, + selected_slot: self.selected_slot, + next_hotplug_slot: self.next_hotplug_slot, + } + } } #[cfg(feature = "acpi")] @@ -2121,7 +2136,7 @@ impl Aml for MemoryManager { impl Pausable for MemoryManager {} -#[derive(Versionize)] +#[derive(Clone, Serialize, Deserialize, Versionize)] pub struct MemoryManagerSnapshotData { memory_ranges: MemoryRangeTable, guest_ram_mappings: Vec, @@ -2155,22 +2170,11 @@ impl Snapshottable for MemoryManager { // not. This saves the 'send' step having to go through the same // process, and instead it can directly proceed with storing the // memory range content for the ranges requiring it. - self.snapshot_memory_ranges = memory_ranges.clone(); + self.snapshot_memory_ranges = memory_ranges; memory_manager_snapshot.add_data_section(SnapshotDataSection::new_from_versioned_state( MEMORY_MANAGER_SNAPSHOT_ID, - &MemoryManagerSnapshotData { - memory_ranges, - guest_ram_mappings: self.guest_ram_mappings.clone(), - start_of_device_area: self.start_of_device_area.0, - boot_ram: self.boot_ram, - current_ram: self.current_ram, - arch_mem_regions: self.arch_mem_regions.clone(), - hotplug_slots: self.hotplug_slots.clone(), - next_memory_slot: self.next_memory_slot, - selected_slot: self.selected_slot, - next_hotplug_slot: self.next_hotplug_slot, - }, + &self.snapshot_data(), )?); Ok(memory_manager_snapshot) diff --git a/vmm/src/vm.rs b/vmm/src/vm.rs index f6fcfb2aa..1c6fc010b 100644 --- a/vmm/src/vm.rs +++ b/vmm/src/vm.rs @@ -20,7 +20,9 @@ use crate::config::{ use crate::cpu; use crate::device_manager::{self, Console, DeviceManager, DeviceManagerError, PtyPair}; use crate::device_tree::DeviceTree; -use crate::memory_manager::{Error as MemoryManagerError, MemoryManager}; +use crate::memory_manager::{ + Error as MemoryManagerError, MemoryManager, MemoryManagerSnapshotData, +}; use crate::migration::{get_vm_snapshot, url_to_path, VM_SNAPSHOT_FILE}; use crate::seccomp_filters::{get_seccomp_filter, Thread}; use crate::GuestMemoryMmap; @@ -853,6 +855,7 @@ impl Vm { seccomp_action: &SeccompAction, hypervisor: Arc, activate_evt: EventFd, + memory_manager_data: &MemoryManagerSnapshotData, ) -> Result { hypervisor.check_required_extensions().unwrap(); let vm = hypervisor.create_vm().unwrap(); @@ -867,7 +870,7 @@ impl Vm { phys_bits, #[cfg(feature = "tdx")] false, - None, + Some(memory_manager_data), ) .map_err(Error::MemoryManager)?; @@ -2269,6 +2272,10 @@ impl Vm { .notify_power_button() .map_err(Error::PowerButton) } + + pub fn memory_manager_data(&self) -> MemoryManagerSnapshotData { + self.memory_manager.lock().unwrap().snapshot_data() + } } impl Pausable for Vm {