From 79425b6aa8e675e9baefb53b4b50a44a838a38af Mon Sep 17 00:00:00 2001 From: Sebastien Boeuf Date: Wed, 4 Aug 2021 15:30:28 +0200 Subject: [PATCH] vm-migration, vmm: Extend methods for MemoryRangeTable In anticipation for supporting the merge of multiple dirty pages coming from multiple devices, this patch factorizes the creation of a MemoryRangeTable from a bitmap, as well as providing a simple method for merging the dirty pages regions under a single MemoryRangeTable. Signed-off-by: Sebastien Boeuf --- vm-migration/src/protocol.rs | 45 ++++++++++++++++++++++++++++++++++++ vmm/src/memory_manager.rs | 35 ++++++---------------------- 2 files changed, 52 insertions(+), 28 deletions(-) diff --git a/vm-migration/src/protocol.rs b/vm-migration/src/protocol.rs index 96f2d7eac..aaffad317 100644 --- a/vm-migration/src/protocol.rs +++ b/vm-migration/src/protocol.rs @@ -184,6 +184,35 @@ pub struct MemoryRangeTable { } impl MemoryRangeTable { + pub fn from_bitmap(bitmap: Vec, start_addr: u64) -> Self { + let page_size = 4096; + let mut table = MemoryRangeTable::default(); + let mut entry: Option = None; + for (i, block) in bitmap.iter().enumerate() { + for j in 0..64 { + let is_page_dirty = ((block >> j) & 1u64) != 0u64; + let page_offset = ((i * 64) + j) as u64 * page_size; + if is_page_dirty { + if let Some(entry) = &mut entry { + entry.length += page_size; + } else { + entry = Some(MemoryRange { + gpa: start_addr + page_offset, + length: page_size, + }); + } + } else if let Some(entry) = entry.take() { + table.push(entry); + } + } + } + if let Some(entry) = entry.take() { + table.push(entry); + } + + table + } + pub fn regions(&self) -> &[MemoryRange] { &self.data } @@ -223,4 +252,20 @@ impl MemoryRangeTable { }) .map_err(MigratableError::MigrateSocket) } + + pub fn is_empty(&self) -> bool { + self.data.is_empty() + } + + pub fn extend(&mut self, table: Self) { + self.data.extend(table.data) + } + + pub fn new_from_tables(tables: Vec) -> Self { + let mut data = Vec::new(); + for table in tables { + data.extend(table.data); + } + Self { data } + } } diff --git a/vmm/src/memory_manager.rs b/vmm/src/memory_manager.rs index 939dff707..68f3e023b 100644 --- a/vmm/src/memory_manager.rs +++ b/vmm/src/memory_manager.rs @@ -41,9 +41,8 @@ use vm_memory::{ GuestUsize, MmapRegion, }; use vm_migration::{ - protocol::{MemoryRange, MemoryRangeTable}, - Migratable, MigratableError, Pausable, Snapshot, SnapshotDataSection, Snapshottable, - Transportable, VersionMapped, + protocol::MemoryRangeTable, Migratable, MigratableError, Pausable, Snapshot, + SnapshotDataSection, Snapshottable, Transportable, VersionMapped, }; #[cfg(feature = "acpi")] @@ -1499,7 +1498,6 @@ impl MemoryManager { pub fn dirty_memory_range_table( &self, ) -> std::result::Result { - let page_size = 4096; // TODO: Does this need to vary? let mut table = MemoryRangeTable::default(); for r in &self.guest_ram_mappings { let vm_dirty_bitmap = self.vm.get_dirty_log(r.slot, r.gpa, r.size).map_err(|e| { @@ -1526,37 +1524,18 @@ impl MemoryManager { .map(|(x, y)| x | y) .collect(); - let mut entry: Option = None; - for (i, block) in dirty_bitmap.iter().enumerate() { - for j in 0..64 { - let is_page_dirty = ((block >> j) & 1u64) != 0u64; - let page_offset = ((i * 64) + j) as u64 * page_size; - if is_page_dirty { - if let Some(entry) = &mut entry { - entry.length += page_size; - } else { - entry = Some(MemoryRange { - gpa: r.gpa + page_offset, - length: page_size, - }); - } - } else if let Some(entry) = entry.take() { - table.push(entry); - } - } - } - if let Some(entry) = entry.take() { - table.push(entry); - } + let sub_table = MemoryRangeTable::from_bitmap(dirty_bitmap, r.gpa); - if table.regions().is_empty() { + if sub_table.regions().is_empty() { info!("Dirty Memory Range Table is empty"); } else { info!("Dirty Memory Range Table:"); - for range in table.regions() { + for range in sub_table.regions() { info!("GPA: {:x} size: {} (KiB)", range.gpa, range.length / 1024); } } + + table.extend(sub_table); } Ok(table) }