vmm: Refactor migration through Migratable trait

Now that Migratable provides the methods for starting, stopping and
retrieving the dirty pages, we move the existing code to these new
functions.

No functional change intended.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2021-08-04 16:52:31 +02:00 committed by Bo Chen
parent e9637d3733
commit 0411064271
3 changed files with 90 additions and 95 deletions

View File

@ -41,7 +41,7 @@ use std::sync::{Arc, Mutex};
use std::{result, thread};
use thiserror::Error;
use vm_memory::bitmap::AtomicBitmap;
use vm_migration::protocol::*;
use vm_migration::{protocol::*, Migratable};
use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vmm_sys_util::eventfd::EventFd;
@ -966,7 +966,7 @@ impl Vmm {
T: Read + Write,
{
// Send (dirty) memory table
let table = vm.dirty_memory_range_table()?;
let table = vm.dirty_log()?;
// But if there are no regions go straight to pause
if table.regions().is_empty() {
@ -1067,7 +1067,7 @@ impl Vmm {
}
// Start logging dirty pages
vm.start_memory_dirty_log()?;
vm.start_dirty_log()?;
// Send memory table
let table = vm.memory_range_table()?;
@ -1136,7 +1136,7 @@ impl Vmm {
// Stop logging dirty pages and keep the source VM paused unpon successful migration
Ok(()) => {
// Stop logging dirty pages
vm.stop_memory_dirty_log()?;
vm.stop_dirty_log()?;
Ok(())
}
@ -1145,7 +1145,7 @@ impl Vmm {
error!("Migration failed: {:?}", e);
// Stop logging dirty pages
vm.stop_memory_dirty_log()?;
vm.stop_dirty_log()?;
if vm.get_state().unwrap() == VmState::Paused {
vm.resume()?;

View File

@ -1492,77 +1492,6 @@ impl MemoryManager {
pub fn memory_zones(&self) -> &MemoryZones {
&self.memory_zones
}
// Generate a table for the pages that are dirty. The dirty pages are collapsed
// together in the table if they are contiguous.
pub fn dirty_memory_range_table(
&self,
) -> std::result::Result<MemoryRangeTable, MigratableError> {
let mut table = MemoryRangeTable::default();
for r in &self.guest_ram_mappings {
let vm_dirty_bitmap = self.vm.get_dirty_log(r.slot, r.gpa, r.size).map_err(|e| {
MigratableError::MigrateSend(anyhow!("Error getting VM dirty log {}", e))
})?;
let vmm_dirty_bitmap = match self.guest_memory.memory().find_region(GuestAddress(r.gpa))
{
Some(region) => {
assert!(region.start_addr().raw_value() == r.gpa);
assert!(region.len() == r.size);
region.bitmap().get_and_reset()
}
None => {
return Err(MigratableError::MigrateSend(anyhow!(
"Error finding 'guest memory region' with address {:x}",
r.gpa
)))
}
};
let dirty_bitmap: Vec<u64> = vm_dirty_bitmap
.iter()
.zip(vmm_dirty_bitmap.iter())
.map(|(x, y)| x | y)
.collect();
let sub_table = MemoryRangeTable::from_bitmap(dirty_bitmap, r.gpa);
if sub_table.regions().is_empty() {
info!("Dirty Memory Range Table is empty");
} else {
info!("Dirty Memory Range Table:");
for range in sub_table.regions() {
info!("GPA: {:x} size: {} (KiB)", range.gpa, range.length / 1024);
}
}
table.extend(sub_table);
}
Ok(table)
}
// Start the dirty log in the hypervisor (kvm/mshv).
// Also, reset the dirty bitmap logged by the vmm.
// Just before we do a bulk copy we want to start/clear the dirty log so that
// pages touched during our bulk copy are tracked.
pub fn start_memory_dirty_log(&self) -> std::result::Result<(), MigratableError> {
self.vm.start_dirty_log().map_err(|e| {
MigratableError::MigrateSend(anyhow!("Error starting VM dirty log {}", e))
})?;
for r in self.guest_memory.memory().iter() {
r.bitmap().reset();
}
Ok(())
}
pub fn stop_memory_dirty_log(&self) -> std::result::Result<(), MigratableError> {
self.vm.stop_dirty_log().map_err(|e| {
MigratableError::MigrateSend(anyhow!("Error stopping VM dirty log {}", e))
})?;
Ok(())
}
}
#[cfg(feature = "acpi")]
@ -2055,4 +1984,74 @@ impl Transportable for MemoryManager {
Ok(())
}
}
impl Migratable for MemoryManager {}
impl Migratable for MemoryManager {
// Start the dirty log in the hypervisor (kvm/mshv).
// Also, reset the dirty bitmap logged by the vmm.
// Just before we do a bulk copy we want to start/clear the dirty log so that
// pages touched during our bulk copy are tracked.
fn start_dirty_log(&mut self) -> std::result::Result<(), MigratableError> {
self.vm.start_dirty_log().map_err(|e| {
MigratableError::MigrateSend(anyhow!("Error starting VM dirty log {}", e))
})?;
for r in self.guest_memory.memory().iter() {
r.bitmap().reset();
}
Ok(())
}
fn stop_dirty_log(&mut self) -> std::result::Result<(), MigratableError> {
self.vm.stop_dirty_log().map_err(|e| {
MigratableError::MigrateSend(anyhow!("Error stopping VM dirty log {}", e))
})?;
Ok(())
}
// Generate a table for the pages that are dirty. The dirty pages are collapsed
// together in the table if they are contiguous.
fn dirty_log(&mut self) -> std::result::Result<MemoryRangeTable, MigratableError> {
let mut table = MemoryRangeTable::default();
for r in &self.guest_ram_mappings {
let vm_dirty_bitmap = self.vm.get_dirty_log(r.slot, r.gpa, r.size).map_err(|e| {
MigratableError::MigrateSend(anyhow!("Error getting VM dirty log {}", e))
})?;
let vmm_dirty_bitmap = match self.guest_memory.memory().find_region(GuestAddress(r.gpa))
{
Some(region) => {
assert!(region.start_addr().raw_value() == r.gpa);
assert!(region.len() == r.size);
region.bitmap().get_and_reset()
}
None => {
return Err(MigratableError::MigrateSend(anyhow!(
"Error finding 'guest memory region' with address {:x}",
r.gpa
)))
}
};
let dirty_bitmap: Vec<u64> = vm_dirty_bitmap
.iter()
.zip(vmm_dirty_bitmap.iter())
.map(|(x, y)| x | y)
.collect();
let sub_table = MemoryRangeTable::from_bitmap(dirty_bitmap, r.gpa);
if sub_table.regions().is_empty() {
info!("Dirty Memory Range Table is empty");
} else {
info!("Dirty Memory Range Table:");
for range in sub_table.regions() {
info!("GPA: {:x} size: {} (KiB)", range.gpa, range.length / 1024);
}
}
table.extend(sub_table);
}
Ok(table)
}
}

View File

@ -2156,23 +2156,6 @@ impl Vm {
Ok(table)
}
pub fn start_memory_dirty_log(&self) -> std::result::Result<(), MigratableError> {
self.memory_manager.lock().unwrap().start_memory_dirty_log()
}
pub fn stop_memory_dirty_log(&self) -> std::result::Result<(), MigratableError> {
self.memory_manager.lock().unwrap().stop_memory_dirty_log()
}
pub fn dirty_memory_range_table(
&self,
) -> std::result::Result<MemoryRangeTable, MigratableError> {
self.memory_manager
.lock()
.unwrap()
.dirty_memory_range_table()
}
pub fn device_tree(&self) -> Arc<Mutex<DeviceTree>> {
self.device_manager.lock().unwrap().device_tree()
}
@ -2539,7 +2522,20 @@ impl Transportable for Vm {
Ok(())
}
}
impl Migratable for Vm {}
impl Migratable for Vm {
fn start_dirty_log(&mut self) -> std::result::Result<(), MigratableError> {
self.memory_manager.lock().unwrap().start_dirty_log()
}
fn stop_dirty_log(&mut self) -> std::result::Result<(), MigratableError> {
self.memory_manager.lock().unwrap().stop_dirty_log()
}
fn dirty_log(&mut self) -> std::result::Result<MemoryRangeTable, MigratableError> {
self.memory_manager.lock().unwrap().dirty_log()
}
}
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
#[cfg(test)]