migration: Use MemoryManager restore code path

Instead of creating a MemoryManager from scratch, let's reuse the same
code path used by snapshot/restore, so that memory regions are created
identically to what they were on the source VM.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2021-10-05 17:53:08 +02:00 committed by Bo Chen
parent 1e1e61614c
commit 58d8206e2b
7 changed files with 42 additions and 24 deletions

1
Cargo.lock generated
View File

@ -74,6 +74,7 @@ dependencies = [
"linux-loader",
"log",
"serde",
"serde_derive",
"thiserror",
"versionize",
"versionize_derive",

View File

@ -18,6 +18,7 @@ libc = "0.2.103"
linux-loader = { version = "0.4.0", features = ["elf", "bzimage", "pe"] }
log = "0.4.14"
serde = { version = "1.0.130", features = ["rc"] }
serde_derive = "1.0.130"
thiserror = "1.0.29"
versionize = "0.1.6"
versionize_derive = "0.1.4"

View File

@ -10,6 +10,8 @@
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[cfg(target_arch = "x86_64")]
use crate::x86_64::SgxEpcSection;
@ -57,7 +59,7 @@ pub enum Error {
pub type Result<T> = result::Result<T, Error>;
/// Type for memory region types.
#[derive(Clone, Copy, PartialEq, Debug, Versionize)]
#[derive(Clone, Copy, PartialEq, Debug, Serialize, Deserialize, Versionize)]
pub enum RegionType {
/// RAM type
Ram,

View File

@ -174,13 +174,13 @@ impl Response {
unsafe impl ByteValued for Response {}
#[repr(C)]
#[derive(Clone, Versionize)]
#[derive(Clone, Serialize, Deserialize, Versionize)]
pub struct MemoryRange {
pub gpa: u64,
pub length: u64,
}
#[derive(Clone, Default, Versionize)]
#[derive(Clone, Default, Serialize, Deserialize, Versionize)]
pub struct MemoryRangeTable {
data: Vec<MemoryRange>,
}

View File

@ -28,6 +28,7 @@ use crate::seccomp_filters::{get_seccomp_filter, Thread};
use crate::vm::{Error as VmError, Vm, VmState};
use anyhow::anyhow;
use libc::EFD_NONBLOCK;
use memory_manager::MemoryManagerSnapshotData;
use seccompiler::{apply_filter, SeccompAction};
use serde::ser::{Serialize, SerializeStruct, Serializer};
use std::fs::File;
@ -299,6 +300,7 @@ struct VmMigrationConfig {
vm_config: Arc<Mutex<VmConfig>>,
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
common_cpuid: hypervisor::CpuId,
memory_manager_data: MemoryManagerSnapshotData,
}
pub struct Vmm {
@ -764,7 +766,7 @@ impl Vmm {
where
T: Read + Write,
{
// Read in config data
// Read in config data along with memory manager data
let mut data = Vec::with_capacity(req.length() as usize);
unsafe {
data.set_len(req.length() as usize);
@ -802,6 +804,7 @@ impl Vmm {
&self.seccomp_action,
self.hypervisor.clone(),
activate_evt,
&vm_migration_config.memory_manager_data,
)
.map_err(|e| {
MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e))
@ -1057,8 +1060,8 @@ impl Vmm {
vm_config,
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
common_cpuid,
memory_manager_data: vm.memory_manager_data(),
};
let config_data = serde_json::to_vec(&vm_migration_config).unwrap();
Request::config(config_data.len() as u64).write_to(&mut socket)?;
socket

View File

@ -63,7 +63,7 @@ const MPOL_BIND: u32 = 2;
const MPOL_MF_STRICT: u32 = 1;
const MPOL_MF_MOVE: u32 = 1 << 1;
#[derive(Clone, Default, Versionize)]
#[derive(Clone, Default, Serialize, Deserialize, Versionize)]
struct HotPlugState {
base: u64,
length: u64,
@ -121,7 +121,7 @@ impl MemoryZone {
pub type MemoryZones = HashMap<String, MemoryZone>;
#[derive(Clone, Versionize)]
#[derive(Clone, Serialize, Deserialize, Versionize)]
struct GuestRamMapping {
slot: u32,
gpa: u64,
@ -131,7 +131,7 @@ struct GuestRamMapping {
file_offset: u64,
}
#[derive(Clone, Versionize)]
#[derive(Clone, Serialize, Deserialize, Versionize)]
struct ArchMemRegion {
base: u64,
size: usize,
@ -1741,6 +1741,21 @@ impl MemoryManager {
Ok(table)
}
pub fn snapshot_data(&self) -> MemoryManagerSnapshotData {
MemoryManagerSnapshotData {
memory_ranges: self.snapshot_memory_ranges.clone(),
guest_ram_mappings: self.guest_ram_mappings.clone(),
start_of_device_area: self.start_of_device_area.0,
boot_ram: self.boot_ram,
current_ram: self.current_ram,
arch_mem_regions: self.arch_mem_regions.clone(),
hotplug_slots: self.hotplug_slots.clone(),
next_memory_slot: self.next_memory_slot,
selected_slot: self.selected_slot,
next_hotplug_slot: self.next_hotplug_slot,
}
}
}
#[cfg(feature = "acpi")]
@ -2121,7 +2136,7 @@ impl Aml for MemoryManager {
impl Pausable for MemoryManager {}
#[derive(Versionize)]
#[derive(Clone, Serialize, Deserialize, Versionize)]
pub struct MemoryManagerSnapshotData {
memory_ranges: MemoryRangeTable,
guest_ram_mappings: Vec<GuestRamMapping>,
@ -2155,22 +2170,11 @@ impl Snapshottable for MemoryManager {
// not. This saves the 'send' step having to go through the same
// process, and instead it can directly proceed with storing the
// memory range content for the ranges requiring it.
self.snapshot_memory_ranges = memory_ranges.clone();
self.snapshot_memory_ranges = memory_ranges;
memory_manager_snapshot.add_data_section(SnapshotDataSection::new_from_versioned_state(
MEMORY_MANAGER_SNAPSHOT_ID,
&MemoryManagerSnapshotData {
memory_ranges,
guest_ram_mappings: self.guest_ram_mappings.clone(),
start_of_device_area: self.start_of_device_area.0,
boot_ram: self.boot_ram,
current_ram: self.current_ram,
arch_mem_regions: self.arch_mem_regions.clone(),
hotplug_slots: self.hotplug_slots.clone(),
next_memory_slot: self.next_memory_slot,
selected_slot: self.selected_slot,
next_hotplug_slot: self.next_hotplug_slot,
},
&self.snapshot_data(),
)?);
Ok(memory_manager_snapshot)

View File

@ -20,7 +20,9 @@ use crate::config::{
use crate::cpu;
use crate::device_manager::{self, Console, DeviceManager, DeviceManagerError, PtyPair};
use crate::device_tree::DeviceTree;
use crate::memory_manager::{Error as MemoryManagerError, MemoryManager};
use crate::memory_manager::{
Error as MemoryManagerError, MemoryManager, MemoryManagerSnapshotData,
};
use crate::migration::{get_vm_snapshot, url_to_path, VM_SNAPSHOT_FILE};
use crate::seccomp_filters::{get_seccomp_filter, Thread};
use crate::GuestMemoryMmap;
@ -853,6 +855,7 @@ impl Vm {
seccomp_action: &SeccompAction,
hypervisor: Arc<dyn hypervisor::Hypervisor>,
activate_evt: EventFd,
memory_manager_data: &MemoryManagerSnapshotData,
) -> Result<Self> {
hypervisor.check_required_extensions().unwrap();
let vm = hypervisor.create_vm().unwrap();
@ -867,7 +870,7 @@ impl Vm {
phys_bits,
#[cfg(feature = "tdx")]
false,
None,
Some(memory_manager_data),
)
.map_err(Error::MemoryManager)?;
@ -2269,6 +2272,10 @@ impl Vm {
.notify_power_button()
.map_err(Error::PowerButton)
}
pub fn memory_manager_data(&self) -> MemoryManagerSnapshotData {
self.memory_manager.lock().unwrap().snapshot_data()
}
}
impl Pausable for Vm {