vmm: Retrieve new memory region when memory is extended

Whenever the memory is resized, it's important to retrieve the new
region to pass it down to the device manager, this way it can decide
what to do with it.

Also, there's no need to use a boolean as we can instead use an Option
to carry the information about the region. In case of virtio-mem, there
will be no region since the whole memory has been reserved up front by
the VMM at boot. This means only the ACPI hotplug will return a region
and is the only method that requires the memory to be updated from the
device manager.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2020-03-26 14:36:15 +01:00
parent e4a034aef9
commit cc67131ecc
3 changed files with 24 additions and 18 deletions

View File

@ -53,7 +53,9 @@ use vm_device::interrupt::{
};
use vm_device::{Migratable, MigratableError, Pausable, Snapshotable};
use vm_memory::guest_memory::FileOffset;
use vm_memory::{Address, GuestAddress, GuestAddressSpace, GuestUsize, MmapRegion};
use vm_memory::{
Address, GuestAddress, GuestAddressSpace, GuestRegionMmap, GuestUsize, MmapRegion,
};
#[cfg(feature = "pci_support")]
use vm_virtio::transport::VirtioPciDevice;
use vm_virtio::transport::VirtioTransport;
@ -1874,7 +1876,7 @@ impl DeviceManager {
self.cmdline_additions.as_slice()
}
pub fn update_memory(&self) -> DeviceManagerResult<()> {
pub fn update_memory(&self, _new_region: &Arc<GuestRegionMmap>) -> DeviceManagerResult<()> {
let memory = self.memory_manager.lock().unwrap().guest_memory();
for (virtio_device, _) in self.virtio_devices.iter() {
virtio_device

View File

@ -379,7 +379,7 @@ impl MemoryManager {
Ok(())
}
fn hotplug_ram_region(&mut self, size: usize) -> Result<(), Error> {
fn hotplug_ram_region(&mut self, size: usize) -> Result<Arc<GuestRegionMmap>, Error> {
info!("Hotplugging new RAM: {}", size);
// Check that there is a free slot
@ -434,9 +434,9 @@ impl MemoryManager {
self.next_hotplug_slot += 1;
self.add_region(region)?;
self.add_region(Arc::clone(&region))?;
Ok(())
Ok(region)
}
pub fn guest_memory(&self) -> GuestMemoryAtomic<GuestMemoryMmap> {
@ -526,25 +526,28 @@ impl MemoryManager {
Ok(())
}
pub fn resize(&mut self, desired_ram: u64) -> Result<bool, Error> {
let mut resized = false;
/// In case this function resulted in adding a new memory region to the
/// guest memory, the new region is returned to the caller. The virtio-mem
/// use case never adds a new region as the whole hotpluggable memory has
/// already been allocated at boot time.
pub fn resize(&mut self, desired_ram: u64) -> Result<Option<Arc<GuestRegionMmap>>, Error> {
let mut region: Option<Arc<GuestRegionMmap>> = None;
match self.hotplug_method {
HotplugMethod::VirtioMem => {
if desired_ram >= self.boot_ram {
self.virtiomem_resize(desired_ram - self.boot_ram)?;
self.current_ram = desired_ram;
resized = true;
}
}
HotplugMethod::Acpi => {
if desired_ram >= self.current_ram {
self.hotplug_ram_region((desired_ram - self.current_ram) as usize)?;
region =
Some(self.hotplug_ram_region((desired_ram - self.current_ram) as usize)?);
self.current_ram = desired_ram;
resized = true;
}
}
}
Ok(resized)
Ok(region)
}
}

View File

@ -606,17 +606,18 @@ impl Vm {
}
if let Some(desired_memory) = desired_memory {
if self
let new_region = self
.memory_manager
.lock()
.unwrap()
.resize(desired_memory)
.map_err(Error::MemoryManager)?
{
.map_err(Error::MemoryManager)?;
if let Some(new_region) = &new_region {
self.device_manager
.lock()
.unwrap()
.update_memory()
.update_memory(&new_region)
.map_err(Error::DeviceManager)?;
let memory_config = &self.config.lock().unwrap().memory;
@ -632,9 +633,9 @@ impl Vm {
}
}
// We update the VM config regardless of the actual guest resize operation
// result (true or false, happened or not), so that if the VM reboots it
// will be running with the last configure memory size.
// We update the VM config regardless of the actual guest resize
// operation result (happened or not), so that if the VM reboots
// it will be running with the last configure memory size.
self.config.lock().unwrap().memory.size = desired_memory;
}
Ok(())