hypervisor, vmm: Add dynamic control of logging dirty pages

This patch extends slightly the current live-migration code path with
the ability to dynamically start and stop logging dirty-pages, which
relies on two new methods added to the `hypervisor::vm::Vm` Trait. This
patch also contains a complete implementation of the two new methods
based on `kvm` and placeholders for `mshv` in the `hypervisor` crate.

Fixes: #2858

Signed-off-by: Bo Chen <chen.bo@intel.com>
This commit is contained in:
Bo Chen 2021-07-21 19:16:30 -07:00 committed by Bo Chen
parent a33280b8ad
commit 5e0d498582
6 changed files with 144 additions and 5 deletions

View File

@ -383,6 +383,59 @@ impl vm::Vm for KvmVm {
Ok(())
}
///
/// Start logging dirty pages
///
fn start_dirty_log(
&self,
slot: u32,
guest_phys_addr: u64,
memory_size: u64,
userspace_addr: u64,
) -> vm::Result<()> {
let region = self.make_user_memory_region(
slot,
guest_phys_addr,
memory_size,
userspace_addr,
false,
true,
);
// Safe because guest regions are guaranteed not to overlap.
unsafe {
self.fd
.set_user_memory_region(region)
.map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))
}
}
///
/// Stop logging dirty pages
///
fn stop_dirty_log(
&self,
slot: u32,
guest_phys_addr: u64,
memory_size: u64,
userspace_addr: u64,
) -> vm::Result<()> {
let region = self.make_user_memory_region(
slot,
guest_phys_addr,
memory_size,
userspace_addr,
false,
false,
);
// Safe because guest regions are guaranteed not to overlap.
unsafe {
self.fd
.set_user_memory_region(region)
.map_err(|e| vm::HypervisorVmError::StopDirtyLog(e.into()))
}
}
///
/// Get dirty pages bitmap (one bit per page)
///

View File

@ -877,6 +877,34 @@ impl vm::Vm for MshvVm {
Ok(())
}
///
/// Start logging dirty pages
///
fn start_dirty_log(
&self,
_slot: u32,
_guest_phys_addr: u64,
_memory_size: u64,
_userspace_addr: u64,
) -> vm::Result<()> {
Err(vm::HypervisorVmError::StartDirtyLog(anyhow!(
"functionality not implemented"
)))
}
///
/// Stop logging dirty pages
///
fn stop_dirty_log(
&self,
_slot: u32,
_guest_phys_addr: u64,
_memory_size: u64,
_userspace_addr: u64,
) -> vm::Result<()> {
Err(vm::HypervisorVmError::StopDirtyLog(anyhow!(
"functionality not implemented"
)))
}
///
/// Get dirty pages bitmap (one bit per page)
///
fn get_dirty_log(&self, _slot: u32, _memory_size: u64) -> vm::Result<Vec<u64>> {

View File

@ -168,6 +168,16 @@ pub enum HypervisorVmError {
#[error("Failed to write to IO Bus: {0}")]
IoBusWrite(#[source] anyhow::Error),
///
/// Start dirty log error
///
#[error("Failed to get dirty log: {0}")]
StartDirtyLog(#[source] anyhow::Error),
///
/// Stop dirty log error
///
#[error("Failed to get dirty log: {0}")]
StopDirtyLog(#[source] anyhow::Error),
///
/// Get dirty log error
///
#[error("Failed to get dirty log: {0}")]
@ -270,6 +280,22 @@ pub trait Vm: Send + Sync {
fn state(&self) -> Result<VmState>;
/// Set the VM state
fn set_state(&self, state: VmState) -> Result<()>;
/// Start logging dirty pages
fn start_dirty_log(
&self,
slot: u32,
guest_phys_addr: u64,
memory_size: u64,
userspace_addr: u64,
) -> Result<()>;
/// Stop logging dirty pages
fn stop_dirty_log(
&self,
slot: u32,
guest_phys_addr: u64,
memory_size: u64,
userspace_addr: u64,
) -> Result<()>;
/// Get dirty pages bitmap
fn get_dirty_log(&self, slot: u32, memory_size: u64) -> Result<Vec<u64>>;
#[cfg(feature = "tdx")]

View File

@ -1049,6 +1049,9 @@ impl Vmm {
// Send last batch of dirty pages
Self::vm_maybe_send_dirty_pages(vm, &mut socket)?;
// Stop logging dirty pages
vm.stop_memory_dirty_log()?;
// Capture snapshot and send it
let vm_snapshot = vm.snapshot()?;
let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap();

View File

@ -1561,14 +1561,22 @@ impl MemoryManager {
Ok(table)
}
// The dirty log is cleared by the kernel by calling the KVM_GET_DIRTY_LOG ioctl.
// Just before we do a bulk copy we want to clear the dirty log so that
// Start the dirty log on guest RAM in the hypervisor (kvm/mshv).
// Also, reset the dirty bitmap logged by the vmm.
// Just before we do a bulk copy we want to start/clear the dirty log so that
// pages touched during our bulk copy are tracked.
pub fn start_memory_dirty_log(&self) -> std::result::Result<(), MigratableError> {
for r in &self.guest_ram_mappings {
self.vm.get_dirty_log(r.slot, r.size).map_err(|e| {
MigratableError::MigrateSend(anyhow!("Error getting VM dirty log {}", e))
})?;
let user_addr = self
.guest_memory()
.memory()
.get_host_address(GuestAddress(r.gpa))
.unwrap();
self.vm
.start_dirty_log(r.slot, r.gpa, r.size, user_addr as u64)
.map_err(|e| {
MigratableError::MigrateSend(anyhow!("Error starting VM dirty log {}", e))
})?;
}
for r in self.guest_memory.memory().iter() {
@ -1577,6 +1585,23 @@ impl MemoryManager {
Ok(())
}
pub fn stop_memory_dirty_log(&self) -> std::result::Result<(), MigratableError> {
for r in &self.guest_ram_mappings {
let user_addr = self
.guest_memory()
.memory()
.get_host_address(GuestAddress(r.gpa))
.unwrap();
self.vm
.stop_dirty_log(r.slot, r.gpa, r.size, user_addr as u64)
.map_err(|e| {
MigratableError::MigrateSend(anyhow!("Error stopping VM dirty log {}", e))
})?;
}
Ok(())
}
}
#[cfg(feature = "acpi")]

View File

@ -2156,6 +2156,10 @@ impl Vm {
self.memory_manager.lock().unwrap().start_memory_dirty_log()
}
pub fn stop_memory_dirty_log(&self) -> std::result::Result<(), MigratableError> {
self.memory_manager.lock().unwrap().stop_memory_dirty_log()
}
pub fn dirty_memory_range_table(
&self,
) -> std::result::Result<MemoryRangeTable, MigratableError> {