vmm: refactor vCPU save/restore code in restoring VM

Similarly as the VM booting process, on AArch64 systems,
the vCPUs should be created before the creation of GIC. This
commit refactors the vCPU save/restore code to achieve the
above-mentioned restoring order.

Signed-off-by: Henry Wang <Henry.Wang@arm.com>
This commit is contained in:
Henry Wang 2020-09-04 18:56:30 +08:00 committed by Rob Bradford
parent 970a5a410d
commit c6b47d39e0
3 changed files with 48 additions and 22 deletions

View File

@ -75,7 +75,7 @@ pub struct Snapshot {
pub id: String,
/// The Snapshottable component snapshots.
pub snapshots: std::collections::HashMap<String, Box<Snapshot>>,
pub snapshots: std::collections::BTreeMap<String, Box<Snapshot>>,
/// The Snapshottable component's snapshot data.
/// A map of snapshot sections, indexed by the section ids.

View File

@ -211,6 +211,9 @@ pub enum Error {
/// Cannot apply seccomp filter
ApplySeccompFilter(seccomp::Error),
/// Error starting vCPU after restore
StartRestoreVcpu(anyhow::Error),
}
pub type Result<T> = result::Result<T, Error>;
@ -801,6 +804,11 @@ impl CpuManager {
.set_cpuid2(&cpuid)
.map_err(|e| Error::SetSupportedCpusFailed(e.into()))?;
}
// AArch64 vCPUs should be initialized after created.
#[cfg(target_arch = "aarch64")]
vcpu.lock().unwrap().init(&self.vm)?;
vcpu.lock()
.unwrap()
.restore(snapshot)
@ -1013,6 +1021,25 @@ impl CpuManager {
self.activate_vcpus(self.boot_vcpus(), false)
}
pub fn start_restored_vcpus(&mut self) -> Result<()> {
let vcpu_numbers = self.vcpus.len();
let vcpu_thread_barrier = Arc::new(Barrier::new((vcpu_numbers + 1) as usize));
// Restore the vCPUs in "paused" state.
self.vcpus_pause_signalled.store(true, Ordering::SeqCst);
for vcpu_index in 0..vcpu_numbers {
let vcpu = Arc::clone(&self.vcpus[vcpu_index as usize]);
self.start_vcpu(vcpu, vcpu_thread_barrier.clone(), false)
.map_err(|e| {
Error::StartRestoreVcpu(anyhow!("Failed to start restored vCPUs: {:#?}", e))
})?;
}
// Unblock all restored CPU threads.
vcpu_thread_barrier.wait();
Ok(())
}
pub fn resize(&mut self, desired_vcpus: u8) -> Result<bool> {
match desired_vcpus.cmp(&self.present_vcpus()) {
cmp::Ordering::Greater => {
@ -1476,22 +1503,12 @@ impl Snapshottable for CpuManager {
}
fn restore(&mut self, snapshot: Snapshot) -> std::result::Result<(), MigratableError> {
let vcpu_thread_barrier = Arc::new(Barrier::new((snapshot.snapshots.len() + 1) as usize));
// Restore the vCPUs in "paused" state.
self.vcpus_pause_signalled.store(true, Ordering::SeqCst);
for (cpu_id, snapshot) in snapshot.snapshots.iter() {
debug!("Restoring VCPU {}", cpu_id);
let vcpu = self
.create_vcpu(cpu_id.parse::<u8>().unwrap(), None, Some(*snapshot.clone()))
self.create_vcpu(cpu_id.parse::<u8>().unwrap(), None, Some(*snapshot.clone()))
.map_err(|e| MigratableError::Restore(anyhow!("Could not create vCPU {:?}", e)))?;
self.start_vcpu(vcpu, vcpu_thread_barrier.clone(), false)
.map_err(|e| MigratableError::Restore(anyhow!("Could not restore vCPU {:?}", e)))?;
}
// Unblock all restored CPU threads.
vcpu_thread_barrier.wait();
Ok(())
}
}

View File

@ -1513,6 +1513,17 @@ impl Snapshottable for Vm {
)));
}
if let Some(cpu_manager_snapshot) = snapshot.snapshots.get(CPU_MANAGER_SNAPSHOT_ID) {
self.cpu_manager
.lock()
.unwrap()
.restore(*cpu_manager_snapshot.clone())?;
} else {
return Err(MigratableError::Restore(anyhow!(
"Missing CPU manager snapshot"
)));
}
if let Some(device_manager_snapshot) = snapshot.snapshots.get(DEVICE_MANAGER_SNAPSHOT_ID) {
self.device_manager
.lock()
@ -1524,16 +1535,14 @@ impl Snapshottable for Vm {
)));
}
if let Some(cpu_manager_snapshot) = snapshot.snapshots.get(CPU_MANAGER_SNAPSHOT_ID) {
self.cpu_manager
.lock()
.unwrap()
.restore(*cpu_manager_snapshot.clone())?;
} else {
return Err(MigratableError::Restore(anyhow!(
"Missing CPU manager snapshot"
)));
}
// Now we can start all vCPUs from here.
self.cpu_manager
.lock()
.unwrap()
.start_restored_vcpus()
.map_err(|e| {
MigratableError::Restore(anyhow!("Cannot start restored vCPUs: {:#?}", e))
})?;
if self
.device_manager