vm-migration, vmm: Extend methods for MemoryRangeTable

In anticipation for supporting the merge of multiple dirty pages coming
from multiple devices, this patch factorizes the creation of a
MemoryRangeTable from a bitmap, as well as providing a simple method for
merging the dirty pages regions under a single MemoryRangeTable.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2021-08-04 15:30:28 +02:00 committed by Bo Chen
parent 2a1e33ee35
commit 79425b6aa8
2 changed files with 52 additions and 28 deletions

View File

@ -184,6 +184,35 @@ pub struct MemoryRangeTable {
}
impl MemoryRangeTable {
pub fn from_bitmap(bitmap: Vec<u64>, start_addr: u64) -> Self {
let page_size = 4096;
let mut table = MemoryRangeTable::default();
let mut entry: Option<MemoryRange> = None;
for (i, block) in bitmap.iter().enumerate() {
for j in 0..64 {
let is_page_dirty = ((block >> j) & 1u64) != 0u64;
let page_offset = ((i * 64) + j) as u64 * page_size;
if is_page_dirty {
if let Some(entry) = &mut entry {
entry.length += page_size;
} else {
entry = Some(MemoryRange {
gpa: start_addr + page_offset,
length: page_size,
});
}
} else if let Some(entry) = entry.take() {
table.push(entry);
}
}
}
if let Some(entry) = entry.take() {
table.push(entry);
}
table
}
pub fn regions(&self) -> &[MemoryRange] {
&self.data
}
@ -223,4 +252,20 @@ impl MemoryRangeTable {
})
.map_err(MigratableError::MigrateSocket)
}
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
pub fn extend(&mut self, table: Self) {
self.data.extend(table.data)
}
pub fn new_from_tables(tables: Vec<Self>) -> Self {
let mut data = Vec::new();
for table in tables {
data.extend(table.data);
}
Self { data }
}
}

View File

@ -41,9 +41,8 @@ use vm_memory::{
GuestUsize, MmapRegion,
};
use vm_migration::{
protocol::{MemoryRange, MemoryRangeTable},
Migratable, MigratableError, Pausable, Snapshot, SnapshotDataSection, Snapshottable,
Transportable, VersionMapped,
protocol::MemoryRangeTable, Migratable, MigratableError, Pausable, Snapshot,
SnapshotDataSection, Snapshottable, Transportable, VersionMapped,
};
#[cfg(feature = "acpi")]
@ -1499,7 +1498,6 @@ impl MemoryManager {
pub fn dirty_memory_range_table(
&self,
) -> std::result::Result<MemoryRangeTable, MigratableError> {
let page_size = 4096; // TODO: Does this need to vary?
let mut table = MemoryRangeTable::default();
for r in &self.guest_ram_mappings {
let vm_dirty_bitmap = self.vm.get_dirty_log(r.slot, r.gpa, r.size).map_err(|e| {
@ -1526,37 +1524,18 @@ impl MemoryManager {
.map(|(x, y)| x | y)
.collect();
let mut entry: Option<MemoryRange> = None;
for (i, block) in dirty_bitmap.iter().enumerate() {
for j in 0..64 {
let is_page_dirty = ((block >> j) & 1u64) != 0u64;
let page_offset = ((i * 64) + j) as u64 * page_size;
if is_page_dirty {
if let Some(entry) = &mut entry {
entry.length += page_size;
} else {
entry = Some(MemoryRange {
gpa: r.gpa + page_offset,
length: page_size,
});
}
} else if let Some(entry) = entry.take() {
table.push(entry);
}
}
}
if let Some(entry) = entry.take() {
table.push(entry);
}
let sub_table = MemoryRangeTable::from_bitmap(dirty_bitmap, r.gpa);
if table.regions().is_empty() {
if sub_table.regions().is_empty() {
info!("Dirty Memory Range Table is empty");
} else {
info!("Dirty Memory Range Table:");
for range in table.regions() {
for range in sub_table.regions() {
info!("GPA: {:x} size: {} (KiB)", range.gpa, range.length / 1024);
}
}
table.extend(sub_table);
}
Ok(table)
}