mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2025-01-03 11:25:20 +00:00
vmm: Move DeviceManager to new restore design
Based on all the work that has already been merged, it is now possible to fully move DeviceManager out of the previous restore model, meaning there's no need for a dedicated restore() function to be implemented there. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
parent
4487c8376b
commit
a6959a7469
@ -972,7 +972,15 @@ impl DeviceManager {
|
||||
) -> DeviceManagerResult<Arc<Mutex<Self>>> {
|
||||
trace_scoped!("DeviceManager::new");
|
||||
|
||||
let device_tree = Arc::new(Mutex::new(DeviceTree::new()));
|
||||
let (device_tree, device_id_cnt) = if let Some(snapshot) = snapshot.as_ref() {
|
||||
let state: DeviceManagerState = snapshot.to_state(DEVICE_MANAGER_SNAPSHOT_ID).unwrap();
|
||||
(
|
||||
Arc::new(Mutex::new(state.device_tree.clone())),
|
||||
state.device_id_cnt,
|
||||
)
|
||||
} else {
|
||||
(Arc::new(Mutex::new(DeviceTree::new())), Wrapping(0))
|
||||
};
|
||||
|
||||
let num_pci_segments =
|
||||
if let Some(platform_config) = config.lock().unwrap().platform.as_ref() {
|
||||
@ -1081,7 +1089,7 @@ impl DeviceManager {
|
||||
cpu_manager,
|
||||
virtio_devices: Vec::new(),
|
||||
bus_devices: Vec::new(),
|
||||
device_id_cnt: Wrapping(0),
|
||||
device_id_cnt,
|
||||
msi_interrupt_manager,
|
||||
legacy_interrupt_manager: None,
|
||||
passthrough_device: None,
|
||||
@ -1240,11 +1248,6 @@ impl DeviceManager {
|
||||
}
|
||||
}
|
||||
|
||||
fn set_state(&mut self, state: &DeviceManagerState) {
|
||||
*self.device_tree.lock().unwrap() = state.device_tree.clone();
|
||||
self.device_id_cnt = state.device_id_cnt;
|
||||
}
|
||||
|
||||
fn get_msi_iova_space(&mut self) -> (u64, u64) {
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
{
|
||||
@ -4116,38 +4119,6 @@ impl DeviceManager {
|
||||
self.device_tree.clone()
|
||||
}
|
||||
|
||||
pub fn restore_devices(
|
||||
&mut self,
|
||||
snapshot: Snapshot,
|
||||
) -> std::result::Result<(), MigratableError> {
|
||||
// Finally, restore all devices associated with the DeviceManager.
|
||||
// It's important to restore devices in the right order, that's why
|
||||
// the device tree is the right way to ensure we restore a child before
|
||||
// its parent node.
|
||||
for node in self
|
||||
.device_tree
|
||||
.lock()
|
||||
.unwrap()
|
||||
.breadth_first_traversal()
|
||||
.rev()
|
||||
{
|
||||
// Restore the node
|
||||
if let Some(migratable) = &node.migratable {
|
||||
info!("Restoring {} from DeviceManager", node.id);
|
||||
if let Some(snapshot) = snapshot.snapshots.get(&node.id) {
|
||||
migratable.lock().unwrap().restore(*snapshot.clone())?;
|
||||
} else {
|
||||
return Err(MigratableError::Restore(anyhow!(
|
||||
"Missing device {}",
|
||||
node.id
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
pub fn notify_power_button(&self) -> DeviceManagerResult<()> {
|
||||
self.ged_notification_device
|
||||
@ -4474,19 +4445,6 @@ impl Snapshottable for DeviceManager {
|
||||
|
||||
Ok(snapshot)
|
||||
}
|
||||
|
||||
fn restore(&mut self, snapshot: Snapshot) -> std::result::Result<(), MigratableError> {
|
||||
// Let's first restore the DeviceManager.
|
||||
|
||||
self.set_state(&snapshot.to_state(DEVICE_MANAGER_SNAPSHOT_ID)?);
|
||||
|
||||
// Now that DeviceManager is updated with the right states, it's time
|
||||
// to create the devices based on the configuration.
|
||||
self.create_devices(None, None, None)
|
||||
.map_err(|e| MigratableError::Restore(anyhow!("Could not create devices {:?}", e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Transportable for DeviceManager {}
|
||||
|
@ -2610,7 +2610,7 @@ impl Snapshottable for Vm {
|
||||
Ok(vm_snapshot)
|
||||
}
|
||||
|
||||
fn restore(&mut self, snapshot: Snapshot) -> std::result::Result<(), MigratableError> {
|
||||
fn restore(&mut self, _snapshot: Snapshot) -> std::result::Result<(), MigratableError> {
|
||||
event!("vm", "restoring");
|
||||
|
||||
let current_state = self
|
||||
@ -2622,33 +2622,17 @@ impl Snapshottable for Vm {
|
||||
})?;
|
||||
|
||||
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
|
||||
self.load_clock_from_snapshot(&snapshot)
|
||||
self.load_clock_from_snapshot(&_snapshot)
|
||||
.map_err(|e| MigratableError::Restore(anyhow!("Error restoring clock: {:?}", e)))?;
|
||||
|
||||
if let Some(device_manager_snapshot) = snapshot.snapshots.get(DEVICE_MANAGER_SNAPSHOT_ID) {
|
||||
self.device_manager
|
||||
.lock()
|
||||
.unwrap()
|
||||
.restore(*device_manager_snapshot.clone())?;
|
||||
} else {
|
||||
return Err(MigratableError::Restore(anyhow!(
|
||||
"Missing device manager snapshot"
|
||||
)));
|
||||
}
|
||||
self.device_manager
|
||||
.lock()
|
||||
.unwrap()
|
||||
.create_devices(None, None, None)
|
||||
.map_err(|e| MigratableError::Restore(anyhow!("Error restoring devices: {:#?}", e)))?;
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
self.restore_vgic_and_enable_interrupt(&snapshot)?;
|
||||
|
||||
if let Some(device_manager_snapshot) = snapshot.snapshots.get(DEVICE_MANAGER_SNAPSHOT_ID) {
|
||||
self.device_manager
|
||||
.lock()
|
||||
.unwrap()
|
||||
.restore_devices(*device_manager_snapshot.clone())?;
|
||||
} else {
|
||||
return Err(MigratableError::Restore(anyhow!(
|
||||
"Missing device manager snapshot"
|
||||
)));
|
||||
}
|
||||
self.restore_vgic_and_enable_interrupt(&_snapshot)?;
|
||||
|
||||
// Now we can start all vCPUs from here.
|
||||
self.cpu_manager
|
||||
|
Loading…
Reference in New Issue
Block a user