vmm: Create guest memory regions with explicit dirty-pages-log flags

As we are now using an global control to start/stop dirty pages log from
the `hypervisor` crate, we need to explicitly tell the hypervisor (KVM)
whether a region needs dirty page tracking when it is created.

This reverts commit f063346de3.

Signed-off-by: Bo Chen <chen.bo@intel.com>
This commit is contained in:
Bo Chen 2021-07-26 11:30:01 -07:00 committed by Bo Chen
parent e7c9954dc1
commit b00a6a8519
3 changed files with 30 additions and 3 deletions

View File

@ -2233,7 +2233,9 @@ impl DeviceManager {
.memory_manager
.lock()
.unwrap()
.create_userspace_mapping(cache_base, cache_size, host_addr, false, false)
.create_userspace_mapping(
cache_base, cache_size, host_addr, false, false, false,
)
.map_err(DeviceManagerError::MemoryManager)?;
let region_list = vec![VirtioSharedMemory {
@ -2425,6 +2427,7 @@ impl DeviceManager {
host_addr,
pmem_cfg.mergeable,
false,
false,
)
.map_err(DeviceManagerError::MemoryManager)?;

View File

@ -139,6 +139,7 @@ pub struct MemoryManager {
user_provided_zones: bool,
snapshot_memory_regions: Vec<MemoryRegion>,
memory_zones: MemoryZones,
log_dirty: bool, // Enable dirty logging for created RAM regions
// Keep track of calls to create_userspace_mapping() for guest RAM.
// This is useful for getting the dirty pages as we need to know the
@ -509,6 +510,7 @@ impl MemoryManager {
config: &MemoryConfig,
prefault: bool,
phys_bits: u8,
#[cfg(feature = "tdx")] tdx_enabled: bool,
) -> Result<Arc<Mutex<MemoryManager>>, Error> {
let user_provided_zones = config.size == 0;
let mut allow_mem_hotplug: bool = false;
@ -747,6 +749,11 @@ impl MemoryManager {
.allocate_mmio_addresses(None, MEMORY_MANAGER_ACPI_SIZE as u64, None)
.ok_or(Error::AllocateMmioAddress)?;
#[cfg(not(feature = "tdx"))]
let log_dirty = true;
#[cfg(feature = "tdx")]
let log_dirty = !tdx_enabled; // Cannot log dirty pages on a TD
let memory_manager = Arc::new(Mutex::new(MemoryManager {
boot_guest_memory,
guest_memory: guest_memory.clone(),
@ -774,6 +781,7 @@ impl MemoryManager {
guest_ram_mappings: Vec::new(),
#[cfg(feature = "acpi")]
acpi_address,
log_dirty,
}));
for region in guest_memory.memory().iter() {
@ -784,6 +792,7 @@ impl MemoryManager {
region.as_ptr() as u64,
config.mergeable,
false,
log_dirty,
)?;
mm.guest_ram_mappings.push(GuestRamMapping {
gpa: region.start_addr().raw_value(),
@ -800,6 +809,7 @@ impl MemoryManager {
region.as_ptr() as u64,
config.mergeable,
false,
log_dirty,
)?;
mm.guest_ram_mappings.push(GuestRamMapping {
@ -835,7 +845,14 @@ impl MemoryManager {
prefault: bool,
phys_bits: u8,
) -> Result<Arc<Mutex<MemoryManager>>, Error> {
let mm = MemoryManager::new(vm, config, prefault, phys_bits)?;
let mm = MemoryManager::new(
vm,
config,
prefault,
phys_bits,
#[cfg(feature = "tdx")]
false,
)?;
if let Some(source_url) = source_url {
let vm_snapshot_path = url_to_path(source_url).map_err(Error::Restore)?;
@ -1101,6 +1118,7 @@ impl MemoryManager {
region.as_ptr() as u64,
self.mergeable,
false,
self.log_dirty,
)?;
self.guest_ram_mappings.push(GuestRamMapping {
gpa: region.start_addr().raw_value(),
@ -1192,6 +1210,7 @@ impl MemoryManager {
userspace_addr: u64,
mergeable: bool,
readonly: bool,
log_dirty: bool,
) -> Result<u32, Error> {
let slot = self.allocate_memory_slot();
let mem_region = self.vm.make_user_memory_region(
@ -1200,7 +1219,7 @@ impl MemoryManager {
memory_size,
userspace_addr,
readonly,
false, // Always create memory regions without dirty pages log
log_dirty,
);
self.vm
@ -1436,6 +1455,7 @@ impl MemoryManager {
host_addr,
false,
false,
false,
)?;
sgx_epc_region.insert(

View File

@ -769,6 +769,8 @@ impl Vm {
&config.lock().unwrap().memory.clone(),
false,
phys_bits,
#[cfg(feature = "tdx")]
tdx_enabled,
)
.map_err(Error::MemoryManager)?;
@ -889,6 +891,8 @@ impl Vm {
&config.lock().unwrap().memory.clone(),
false,
phys_bits,
#[cfg(feature = "tdx")]
false,
)
.map_err(Error::MemoryManager)?;