mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2024-10-01 02:55:45 +00:00
vm-migration, vmm: Extend methods for MemoryRangeTable
In anticipation for supporting the merge of multiple dirty pages coming from multiple devices, this patch factorizes the creation of a MemoryRangeTable from a bitmap, as well as providing a simple method for merging the dirty pages regions under a single MemoryRangeTable. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
parent
2a1e33ee35
commit
79425b6aa8
@ -184,6 +184,35 @@ pub struct MemoryRangeTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl MemoryRangeTable {
|
impl MemoryRangeTable {
|
||||||
|
pub fn from_bitmap(bitmap: Vec<u64>, start_addr: u64) -> Self {
|
||||||
|
let page_size = 4096;
|
||||||
|
let mut table = MemoryRangeTable::default();
|
||||||
|
let mut entry: Option<MemoryRange> = None;
|
||||||
|
for (i, block) in bitmap.iter().enumerate() {
|
||||||
|
for j in 0..64 {
|
||||||
|
let is_page_dirty = ((block >> j) & 1u64) != 0u64;
|
||||||
|
let page_offset = ((i * 64) + j) as u64 * page_size;
|
||||||
|
if is_page_dirty {
|
||||||
|
if let Some(entry) = &mut entry {
|
||||||
|
entry.length += page_size;
|
||||||
|
} else {
|
||||||
|
entry = Some(MemoryRange {
|
||||||
|
gpa: start_addr + page_offset,
|
||||||
|
length: page_size,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} else if let Some(entry) = entry.take() {
|
||||||
|
table.push(entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(entry) = entry.take() {
|
||||||
|
table.push(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
table
|
||||||
|
}
|
||||||
|
|
||||||
pub fn regions(&self) -> &[MemoryRange] {
|
pub fn regions(&self) -> &[MemoryRange] {
|
||||||
&self.data
|
&self.data
|
||||||
}
|
}
|
||||||
@ -223,4 +252,20 @@ impl MemoryRangeTable {
|
|||||||
})
|
})
|
||||||
.map_err(MigratableError::MigrateSocket)
|
.map_err(MigratableError::MigrateSocket)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.data.is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn extend(&mut self, table: Self) {
|
||||||
|
self.data.extend(table.data)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_from_tables(tables: Vec<Self>) -> Self {
|
||||||
|
let mut data = Vec::new();
|
||||||
|
for table in tables {
|
||||||
|
data.extend(table.data);
|
||||||
|
}
|
||||||
|
Self { data }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -41,9 +41,8 @@ use vm_memory::{
|
|||||||
GuestUsize, MmapRegion,
|
GuestUsize, MmapRegion,
|
||||||
};
|
};
|
||||||
use vm_migration::{
|
use vm_migration::{
|
||||||
protocol::{MemoryRange, MemoryRangeTable},
|
protocol::MemoryRangeTable, Migratable, MigratableError, Pausable, Snapshot,
|
||||||
Migratable, MigratableError, Pausable, Snapshot, SnapshotDataSection, Snapshottable,
|
SnapshotDataSection, Snapshottable, Transportable, VersionMapped,
|
||||||
Transportable, VersionMapped,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(feature = "acpi")]
|
#[cfg(feature = "acpi")]
|
||||||
@ -1499,7 +1498,6 @@ impl MemoryManager {
|
|||||||
pub fn dirty_memory_range_table(
|
pub fn dirty_memory_range_table(
|
||||||
&self,
|
&self,
|
||||||
) -> std::result::Result<MemoryRangeTable, MigratableError> {
|
) -> std::result::Result<MemoryRangeTable, MigratableError> {
|
||||||
let page_size = 4096; // TODO: Does this need to vary?
|
|
||||||
let mut table = MemoryRangeTable::default();
|
let mut table = MemoryRangeTable::default();
|
||||||
for r in &self.guest_ram_mappings {
|
for r in &self.guest_ram_mappings {
|
||||||
let vm_dirty_bitmap = self.vm.get_dirty_log(r.slot, r.gpa, r.size).map_err(|e| {
|
let vm_dirty_bitmap = self.vm.get_dirty_log(r.slot, r.gpa, r.size).map_err(|e| {
|
||||||
@ -1526,37 +1524,18 @@ impl MemoryManager {
|
|||||||
.map(|(x, y)| x | y)
|
.map(|(x, y)| x | y)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut entry: Option<MemoryRange> = None;
|
let sub_table = MemoryRangeTable::from_bitmap(dirty_bitmap, r.gpa);
|
||||||
for (i, block) in dirty_bitmap.iter().enumerate() {
|
|
||||||
for j in 0..64 {
|
|
||||||
let is_page_dirty = ((block >> j) & 1u64) != 0u64;
|
|
||||||
let page_offset = ((i * 64) + j) as u64 * page_size;
|
|
||||||
if is_page_dirty {
|
|
||||||
if let Some(entry) = &mut entry {
|
|
||||||
entry.length += page_size;
|
|
||||||
} else {
|
|
||||||
entry = Some(MemoryRange {
|
|
||||||
gpa: r.gpa + page_offset,
|
|
||||||
length: page_size,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
} else if let Some(entry) = entry.take() {
|
|
||||||
table.push(entry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(entry) = entry.take() {
|
|
||||||
table.push(entry);
|
|
||||||
}
|
|
||||||
|
|
||||||
if table.regions().is_empty() {
|
if sub_table.regions().is_empty() {
|
||||||
info!("Dirty Memory Range Table is empty");
|
info!("Dirty Memory Range Table is empty");
|
||||||
} else {
|
} else {
|
||||||
info!("Dirty Memory Range Table:");
|
info!("Dirty Memory Range Table:");
|
||||||
for range in table.regions() {
|
for range in sub_table.regions() {
|
||||||
info!("GPA: {:x} size: {} (KiB)", range.gpa, range.length / 1024);
|
info!("GPA: {:x} size: {} (KiB)", range.gpa, range.length / 1024);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table.extend(sub_table);
|
||||||
}
|
}
|
||||||
Ok(table)
|
Ok(table)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user