2019-12-19 15:47:36 +00:00
|
|
|
|
// Copyright © 2019 Intel Corporation
|
|
|
|
|
//
|
|
|
|
|
// SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
//
|
2020-07-08 09:53:28 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
use crate::config::SgxEpcConfig;
|
2020-08-18 15:17:15 +00:00
|
|
|
|
use crate::config::{HotplugMethod, MemoryConfig, MemoryZoneConfig};
|
2022-12-05 16:33:54 +00:00
|
|
|
|
#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
|
|
|
|
|
use crate::coredump::{
|
|
|
|
|
CoredumpMemoryRegion, CoredumpMemoryRegions, DumpState, GuestDebuggableError,
|
|
|
|
|
};
|
2021-03-12 10:46:36 +00:00
|
|
|
|
use crate::migration::url_to_path;
|
2019-05-12 11:53:47 +00:00
|
|
|
|
use crate::MEMORY_MANAGER_SNAPSHOT_ID;
|
2021-06-02 19:08:04 +00:00
|
|
|
|
use crate::{GuestMemoryMmap, GuestRegionMmap};
|
2020-01-10 16:11:32 +00:00
|
|
|
|
use acpi_tables::{aml, aml::Aml};
|
2020-02-06 15:03:29 +00:00
|
|
|
|
use anyhow::anyhow;
|
2020-07-08 09:53:28 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
use arch::x86_64::{SgxEpcRegion, SgxEpcSection};
|
2020-10-13 07:44:37 +00:00
|
|
|
|
use arch::{layout, RegionType};
|
2020-05-12 09:49:12 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
use devices::ioapic;
|
2022-08-30 12:01:09 +00:00
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
|
use hypervisor::HypervisorVmError;
|
2020-07-08 09:53:28 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
use libc::{MAP_NORESERVE, MAP_POPULATE, MAP_SHARED, PROT_READ, PROT_WRITE};
|
2022-05-17 21:04:38 +00:00
|
|
|
|
use serde::{Deserialize, Serialize};
|
2022-12-05 16:33:54 +00:00
|
|
|
|
#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
|
2022-05-24 00:35:15 +00:00
|
|
|
|
use std::collections::BTreeMap;
|
2020-09-04 09:26:43 +00:00
|
|
|
|
use std::collections::HashMap;
|
2020-01-10 16:02:19 +00:00
|
|
|
|
use std::convert::TryInto;
|
2020-04-22 22:51:06 +00:00
|
|
|
|
use std::ffi;
|
2019-12-19 15:47:36 +00:00
|
|
|
|
use std::fs::{File, OpenOptions};
|
2022-10-18 12:35:38 +00:00
|
|
|
|
use std::io::{self, Read};
|
2020-08-25 16:02:34 +00:00
|
|
|
|
use std::ops::Deref;
|
2020-08-20 18:05:34 +00:00
|
|
|
|
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
2019-12-19 15:47:36 +00:00
|
|
|
|
use std::path::PathBuf;
|
2020-04-22 22:51:06 +00:00
|
|
|
|
use std::result;
|
2020-12-04 09:23:47 +00:00
|
|
|
|
use std::sync::{Arc, Barrier, Mutex};
|
2022-09-16 15:56:36 +00:00
|
|
|
|
use tracer::trace_scoped;
|
2021-05-21 12:50:41 +00:00
|
|
|
|
use versionize::{VersionMap, Versionize, VersionizeResult};
|
|
|
|
|
use versionize_derive::Versionize;
|
2021-09-23 08:42:00 +00:00
|
|
|
|
use virtio_devices::BlocksState;
|
2020-05-12 09:49:12 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
use vm_allocator::GsiApic;
|
2021-10-29 09:02:15 +00:00
|
|
|
|
use vm_allocator::{AddressAllocator, SystemAllocator};
|
2020-09-09 14:30:31 +00:00
|
|
|
|
use vm_device::BusDevice;
|
2021-10-05 09:37:39 +00:00
|
|
|
|
use vm_memory::bitmap::AtomicBitmap;
|
2019-12-19 15:47:36 +00:00
|
|
|
|
use vm_memory::guest_memory::FileOffset;
|
|
|
|
|
use vm_memory::{
|
2020-02-25 01:38:08 +00:00
|
|
|
|
mmap::MmapRegionError, Address, Bytes, Error as MmapError, GuestAddress, GuestAddressSpace,
|
2021-09-22 09:56:25 +00:00
|
|
|
|
GuestMemory, GuestMemoryAtomic, GuestMemoryError, GuestMemoryRegion, GuestUsize, MmapRegion,
|
2020-02-06 15:03:29 +00:00
|
|
|
|
};
|
|
|
|
|
use vm_migration::{
|
2021-09-27 11:43:04 +00:00
|
|
|
|
protocol::MemoryRange, protocol::MemoryRangeTable, Migratable, MigratableError, Pausable,
|
|
|
|
|
Snapshot, SnapshotDataSection, Snapshottable, Transportable, VersionMapped,
|
2019-12-19 15:47:36 +00:00
|
|
|
|
};
|
|
|
|
|
|
2021-01-20 16:12:02 +00:00
|
|
|
|
pub const MEMORY_MANAGER_ACPI_SIZE: usize = 0x18;
|
|
|
|
|
|
2020-09-03 18:06:38 +00:00
|
|
|
|
const DEFAULT_MEMORY_ZONE: &str = "mem0";
|
|
|
|
|
|
2021-09-27 13:31:28 +00:00
|
|
|
|
const SNAPSHOT_FILENAME: &str = "memory-ranges";
|
|
|
|
|
|
2020-05-12 09:49:12 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-03-16 17:58:23 +00:00
|
|
|
|
const X86_64_IRQ_BASE: u32 = 5;
|
|
|
|
|
|
2021-11-29 07:16:03 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
const SGX_PAGE_SIZE: u64 = 1 << 12;
|
|
|
|
|
|
2020-01-10 16:02:19 +00:00
|
|
|
|
const HOTPLUG_COUNT: usize = 8;
|
|
|
|
|
|
2020-08-25 16:02:34 +00:00
|
|
|
|
// Memory policy constants
|
|
|
|
|
const MPOL_BIND: u32 = 2;
|
2020-09-24 21:16:38 +00:00
|
|
|
|
const MPOL_MF_STRICT: u32 = 1;
|
2020-08-25 16:02:34 +00:00
|
|
|
|
const MPOL_MF_MOVE: u32 = 1 << 1;
|
|
|
|
|
|
2021-10-29 09:58:41 +00:00
|
|
|
|
// Reserve 1 MiB for platform MMIO devices (e.g. ACPI control devices)
|
|
|
|
|
const PLATFORM_DEVICE_AREA_SIZE: u64 = 1 << 20;
|
|
|
|
|
|
2021-10-05 15:53:08 +00:00
|
|
|
|
#[derive(Clone, Default, Serialize, Deserialize, Versionize)]
|
2020-01-10 16:02:19 +00:00
|
|
|
|
struct HotPlugState {
|
|
|
|
|
base: u64,
|
|
|
|
|
length: u64,
|
|
|
|
|
active: bool,
|
2020-01-10 16:07:34 +00:00
|
|
|
|
inserting: bool,
|
|
|
|
|
removing: bool,
|
2020-01-10 16:02:19 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 20:24:36 +00:00
|
|
|
|
pub struct VirtioMemZone {
|
|
|
|
|
region: Arc<GuestRegionMmap>,
|
2022-09-16 16:18:22 +00:00
|
|
|
|
virtio_device: Option<Arc<Mutex<virtio_devices::Mem>>>,
|
2020-09-14 20:36:58 +00:00
|
|
|
|
hotplugged_size: u64,
|
2021-02-03 08:46:14 +00:00
|
|
|
|
hugepages: bool,
|
2021-09-23 08:42:00 +00:00
|
|
|
|
blocks_state: Arc<Mutex<BlocksState>>,
|
2020-09-14 20:24:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl VirtioMemZone {
|
|
|
|
|
pub fn region(&self) -> &Arc<GuestRegionMmap> {
|
|
|
|
|
&self.region
|
|
|
|
|
}
|
2022-09-16 16:18:22 +00:00
|
|
|
|
pub fn set_virtio_device(&mut self, virtio_device: Arc<Mutex<virtio_devices::Mem>>) {
|
|
|
|
|
self.virtio_device = Some(virtio_device);
|
2020-09-14 20:24:36 +00:00
|
|
|
|
}
|
2020-09-14 20:36:58 +00:00
|
|
|
|
pub fn hotplugged_size(&self) -> u64 {
|
|
|
|
|
self.hotplugged_size
|
|
|
|
|
}
|
2021-02-03 08:46:14 +00:00
|
|
|
|
pub fn hugepages(&self) -> bool {
|
|
|
|
|
self.hugepages
|
|
|
|
|
}
|
2021-09-23 08:42:00 +00:00
|
|
|
|
pub fn blocks_state(&self) -> &Arc<Mutex<BlocksState>> {
|
|
|
|
|
&self.blocks_state
|
|
|
|
|
}
|
|
|
|
|
pub fn plugged_ranges(&self) -> MemoryRangeTable {
|
|
|
|
|
self.blocks_state
|
|
|
|
|
.lock()
|
|
|
|
|
.unwrap()
|
|
|
|
|
.memory_ranges(self.region.start_addr().raw_value(), true)
|
|
|
|
|
}
|
2020-09-14 20:24:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-09-10 08:08:02 +00:00
|
|
|
|
#[derive(Default)]
|
|
|
|
|
pub struct MemoryZone {
|
|
|
|
|
regions: Vec<Arc<GuestRegionMmap>>,
|
2020-09-14 20:24:36 +00:00
|
|
|
|
virtio_mem_zone: Option<VirtioMemZone>,
|
2020-09-10 08:08:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl MemoryZone {
|
|
|
|
|
pub fn regions(&self) -> &Vec<Arc<GuestRegionMmap>> {
|
|
|
|
|
&self.regions
|
|
|
|
|
}
|
2020-09-14 20:24:36 +00:00
|
|
|
|
pub fn virtio_mem_zone(&self) -> &Option<VirtioMemZone> {
|
|
|
|
|
&self.virtio_mem_zone
|
2020-09-10 08:08:02 +00:00
|
|
|
|
}
|
2022-09-16 16:18:22 +00:00
|
|
|
|
pub fn virtio_mem_zone_mut(&mut self) -> Option<&mut VirtioMemZone> {
|
|
|
|
|
self.virtio_mem_zone.as_mut()
|
|
|
|
|
}
|
2020-09-10 08:08:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub type MemoryZones = HashMap<String, MemoryZone>;
|
2020-09-04 07:22:16 +00:00
|
|
|
|
|
2021-10-05 15:53:08 +00:00
|
|
|
|
#[derive(Clone, Serialize, Deserialize, Versionize)]
|
2020-11-12 14:53:57 +00:00
|
|
|
|
struct GuestRamMapping {
|
|
|
|
|
slot: u32,
|
|
|
|
|
gpa: u64,
|
|
|
|
|
size: u64,
|
2021-10-04 15:54:49 +00:00
|
|
|
|
zone_id: String,
|
2021-10-05 09:37:39 +00:00
|
|
|
|
virtio_mem: bool,
|
2021-10-05 11:46:56 +00:00
|
|
|
|
file_offset: u64,
|
2020-11-12 14:53:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
2021-10-05 15:53:08 +00:00
|
|
|
|
#[derive(Clone, Serialize, Deserialize, Versionize)]
|
2021-10-05 12:20:19 +00:00
|
|
|
|
struct ArchMemRegion {
|
|
|
|
|
base: u64,
|
|
|
|
|
size: usize,
|
|
|
|
|
r_type: RegionType,
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-19 15:47:36 +00:00
|
|
|
|
pub struct MemoryManager {
|
2020-09-14 15:24:42 +00:00
|
|
|
|
boot_guest_memory: GuestMemoryMmap,
|
2020-02-11 16:22:40 +00:00
|
|
|
|
guest_memory: GuestMemoryAtomic<GuestMemoryMmap>,
|
2020-07-04 11:08:52 +00:00
|
|
|
|
next_memory_slot: u32,
|
2019-12-19 15:47:36 +00:00
|
|
|
|
start_of_device_area: GuestAddress,
|
|
|
|
|
end_of_device_area: GuestAddress,
|
2021-10-29 08:49:20 +00:00
|
|
|
|
end_of_ram_area: GuestAddress,
|
2020-07-03 09:13:40 +00:00
|
|
|
|
pub vm: Arc<dyn hypervisor::Vm>,
|
2020-01-10 16:02:19 +00:00
|
|
|
|
hotplug_slots: Vec<HotPlugState>,
|
2020-01-10 16:07:34 +00:00
|
|
|
|
selected_slot: usize,
|
2020-01-10 16:02:19 +00:00
|
|
|
|
mergeable: bool,
|
|
|
|
|
allocator: Arc<Mutex<SystemAllocator>>,
|
2020-03-04 02:16:07 +00:00
|
|
|
|
hotplug_method: HotplugMethod,
|
|
|
|
|
boot_ram: u64,
|
2020-01-10 16:02:19 +00:00
|
|
|
|
current_ram: u64,
|
|
|
|
|
next_hotplug_slot: usize,
|
2020-04-22 21:20:17 +00:00
|
|
|
|
shared: bool,
|
|
|
|
|
hugepages: bool,
|
2021-02-04 16:50:37 +00:00
|
|
|
|
hugepage_size: Option<u64>,
|
2021-09-29 04:54:22 +00:00
|
|
|
|
prefault: bool,
|
2022-11-07 16:25:36 +00:00
|
|
|
|
thp: bool,
|
2020-07-08 09:53:28 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
sgx_epc_region: Option<SgxEpcRegion>,
|
2020-09-11 12:09:00 +00:00
|
|
|
|
user_provided_zones: bool,
|
2021-09-27 13:31:28 +00:00
|
|
|
|
snapshot_memory_ranges: MemoryRangeTable,
|
2020-09-04 07:22:16 +00:00
|
|
|
|
memory_zones: MemoryZones,
|
2021-07-26 18:30:01 +00:00
|
|
|
|
log_dirty: bool, // Enable dirty logging for created RAM regions
|
2021-10-05 12:20:19 +00:00
|
|
|
|
arch_mem_regions: Vec<ArchMemRegion>,
|
2021-10-29 09:02:15 +00:00
|
|
|
|
ram_allocator: AddressAllocator,
|
2022-03-23 15:48:38 +00:00
|
|
|
|
dynamic: bool,
|
2020-11-12 14:53:57 +00:00
|
|
|
|
|
|
|
|
|
// Keep track of calls to create_userspace_mapping() for guest RAM.
|
|
|
|
|
// This is useful for getting the dirty pages as we need to know the
|
|
|
|
|
// slots that the mapping is created in.
|
|
|
|
|
guest_ram_mappings: Vec<GuestRamMapping>,
|
2021-01-20 16:12:02 +00:00
|
|
|
|
|
2022-03-24 11:03:26 +00:00
|
|
|
|
pub acpi_address: Option<GuestAddress>,
|
2022-08-30 12:01:09 +00:00
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
|
uefi_flash: Option<GuestMemoryAtomic<GuestMemoryMmap>>,
|
2019-12-19 15:47:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
|
pub enum Error {
|
|
|
|
|
/// Failed to create shared file.
|
|
|
|
|
SharedFileCreate(io::Error),
|
|
|
|
|
|
|
|
|
|
/// Failed to set shared file length.
|
|
|
|
|
SharedFileSetLen(io::Error),
|
|
|
|
|
|
|
|
|
|
/// Mmap backed guest memory error
|
|
|
|
|
GuestMemory(MmapError),
|
|
|
|
|
|
|
|
|
|
/// Failed to allocate a memory range.
|
|
|
|
|
MemoryRangeAllocation,
|
2019-12-31 11:58:07 +00:00
|
|
|
|
|
|
|
|
|
/// Error from region creation
|
|
|
|
|
GuestMemoryRegion(MmapRegionError),
|
2020-01-10 16:02:19 +00:00
|
|
|
|
|
|
|
|
|
/// No ACPI slot available
|
|
|
|
|
NoSlotAvailable,
|
|
|
|
|
|
|
|
|
|
/// Not enough space in the hotplug RAM region
|
2021-03-25 17:01:21 +00:00
|
|
|
|
InsufficientHotplugRam,
|
2020-01-10 16:02:19 +00:00
|
|
|
|
|
|
|
|
|
/// The requested hotplug memory addition is not a valid size
|
|
|
|
|
InvalidSize,
|
2020-01-24 08:34:51 +00:00
|
|
|
|
|
2021-07-03 13:58:39 +00:00
|
|
|
|
/// Failed to create the user memory region.
|
|
|
|
|
CreateUserMemoryRegion(hypervisor::HypervisorVmError),
|
|
|
|
|
|
|
|
|
|
/// Failed to remove the user memory region.
|
|
|
|
|
RemoveUserMemoryRegion(hypervisor::HypervisorVmError),
|
2020-03-04 02:16:07 +00:00
|
|
|
|
|
|
|
|
|
/// Failed to EventFd.
|
2020-04-22 22:51:06 +00:00
|
|
|
|
EventFdFail(io::Error),
|
2020-03-04 02:16:07 +00:00
|
|
|
|
|
|
|
|
|
/// Eventfd write error
|
2020-04-22 22:51:06 +00:00
|
|
|
|
EventfdError(io::Error),
|
2020-03-04 02:16:07 +00:00
|
|
|
|
|
|
|
|
|
/// Failed to virtio-mem resize
|
2020-07-02 12:25:19 +00:00
|
|
|
|
VirtioMemResizeFail(virtio_devices::mem::Error),
|
2020-03-16 17:58:23 +00:00
|
|
|
|
|
2020-03-16 23:48:12 +00:00
|
|
|
|
/// Cannot restore VM
|
|
|
|
|
Restore(MigratableError),
|
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
/// Cannot restore VM because source URL is missing
|
|
|
|
|
RestoreMissingSourceUrl,
|
|
|
|
|
|
2020-03-16 17:58:23 +00:00
|
|
|
|
/// Cannot create the system allocator
|
|
|
|
|
CreateSystemAllocator,
|
2020-04-07 09:19:28 +00:00
|
|
|
|
|
2020-07-08 09:53:28 +00:00
|
|
|
|
/// Invalid SGX EPC section size
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
EpcSectionSizeInvalid,
|
|
|
|
|
|
|
|
|
|
/// Failed allocating SGX EPC region
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
SgxEpcRangeAllocation,
|
|
|
|
|
|
|
|
|
|
/// Failed opening SGX virtual EPC device
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
SgxVirtEpcOpen(io::Error),
|
|
|
|
|
|
|
|
|
|
/// Failed setting the SGX virtual EPC section size
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
SgxVirtEpcFileSetLen(io::Error),
|
|
|
|
|
|
2021-07-07 10:19:26 +00:00
|
|
|
|
/// Failed opening SGX provisioning device
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
SgxProvisionOpen(io::Error),
|
|
|
|
|
|
|
|
|
|
/// Failed enabling SGX provisioning
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
SgxEnableProvisioning(hypervisor::HypervisorVmError),
|
|
|
|
|
|
2020-07-08 09:53:28 +00:00
|
|
|
|
/// Failed creating a new MmapRegion instance.
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
NewMmapRegion(vm_memory::mmap::MmapRegionError),
|
2020-08-18 15:17:15 +00:00
|
|
|
|
|
|
|
|
|
/// No memory zones found.
|
|
|
|
|
MissingMemoryZones,
|
|
|
|
|
|
|
|
|
|
/// Memory configuration is not valid.
|
|
|
|
|
InvalidMemoryParameters,
|
|
|
|
|
|
|
|
|
|
/// Forbidden operation. Impossible to resize guest memory if it is
|
|
|
|
|
/// backed by user defined memory regions.
|
|
|
|
|
InvalidResizeWithMemoryZones,
|
2020-08-25 16:02:34 +00:00
|
|
|
|
|
|
|
|
|
/// It's invalid to try applying a NUMA policy to a memory zone that is
|
|
|
|
|
/// memory mapped with MAP_SHARED.
|
|
|
|
|
InvalidSharedMemoryZoneWithHostNuma,
|
|
|
|
|
|
|
|
|
|
/// Failed applying NUMA memory policy.
|
|
|
|
|
ApplyNumaPolicy(io::Error),
|
2020-09-04 07:22:16 +00:00
|
|
|
|
|
|
|
|
|
/// Memory zone identifier is not unique.
|
|
|
|
|
DuplicateZoneId,
|
2020-09-10 09:28:21 +00:00
|
|
|
|
|
|
|
|
|
/// No virtio-mem resizing handler found.
|
|
|
|
|
MissingVirtioMemHandler,
|
|
|
|
|
|
2020-09-10 16:24:06 +00:00
|
|
|
|
/// Unknown memory zone.
|
|
|
|
|
UnknownMemoryZone,
|
2020-09-10 09:28:21 +00:00
|
|
|
|
|
|
|
|
|
/// Invalid size for resizing. Can be anything except 0.
|
|
|
|
|
InvalidHotplugSize,
|
2020-09-10 10:20:22 +00:00
|
|
|
|
|
|
|
|
|
/// Invalid hotplug method associated with memory zones resizing capability.
|
|
|
|
|
InvalidHotplugMethodWithMemoryZones,
|
2020-09-10 12:20:26 +00:00
|
|
|
|
|
|
|
|
|
/// Could not find specified memory zone identifier from hash map.
|
|
|
|
|
MissingZoneIdentifier,
|
2020-09-10 15:34:15 +00:00
|
|
|
|
|
|
|
|
|
/// Resizing the memory zone failed.
|
|
|
|
|
ResizeZone,
|
2020-10-12 22:49:07 +00:00
|
|
|
|
|
|
|
|
|
/// Guest address overflow
|
|
|
|
|
GuestAddressOverFlow,
|
2020-10-23 16:00:03 +00:00
|
|
|
|
|
|
|
|
|
/// Error opening snapshot file
|
|
|
|
|
SnapshotOpen(io::Error),
|
|
|
|
|
|
|
|
|
|
// Error copying snapshot into region
|
|
|
|
|
SnapshotCopy(GuestMemoryError),
|
2021-01-20 16:12:02 +00:00
|
|
|
|
|
|
|
|
|
/// Failed to allocate MMIO address
|
2021-03-25 17:01:21 +00:00
|
|
|
|
AllocateMmioAddress,
|
2022-08-30 12:01:09 +00:00
|
|
|
|
|
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
|
/// Failed to create UEFI flash
|
|
|
|
|
CreateUefiFlash(HypervisorVmError),
|
2019-12-19 15:47:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-01-10 16:07:34 +00:00
|
|
|
|
const ENABLE_FLAG: usize = 0;
|
|
|
|
|
const INSERTING_FLAG: usize = 1;
|
|
|
|
|
const REMOVING_FLAG: usize = 2;
|
|
|
|
|
const EJECT_FLAG: usize = 3;
|
|
|
|
|
|
|
|
|
|
const BASE_OFFSET_LOW: u64 = 0;
|
|
|
|
|
const BASE_OFFSET_HIGH: u64 = 0x4;
|
|
|
|
|
const LENGTH_OFFSET_LOW: u64 = 0x8;
|
2020-01-21 17:21:32 +00:00
|
|
|
|
const LENGTH_OFFSET_HIGH: u64 = 0xC;
|
2020-01-10 16:07:34 +00:00
|
|
|
|
const STATUS_OFFSET: u64 = 0x14;
|
|
|
|
|
const SELECTION_OFFSET: u64 = 0;
|
|
|
|
|
|
2020-11-14 14:34:18 +00:00
|
|
|
|
// The MMIO address space size is subtracted with 64k. This is done for the
|
|
|
|
|
// following reasons:
|
|
|
|
|
// - Reduce the addressable space size by at least 4k to workaround a Linux
|
|
|
|
|
// bug when the VMM allocates devices at the end of the addressable space
|
|
|
|
|
// - Windows requires the addressable space size to be 64k aligned
|
2020-10-13 07:44:37 +00:00
|
|
|
|
fn mmio_address_space_size(phys_bits: u8) -> u64 {
|
2020-11-14 14:34:18 +00:00
|
|
|
|
(1 << phys_bits) - (1 << 16)
|
2020-06-09 11:58:26 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-01-10 16:07:34 +00:00
|
|
|
|
impl BusDevice for MemoryManager {
|
|
|
|
|
fn read(&mut self, _base: u64, offset: u64, data: &mut [u8]) {
|
|
|
|
|
if self.selected_slot < self.hotplug_slots.len() {
|
|
|
|
|
let state = &self.hotplug_slots[self.selected_slot];
|
|
|
|
|
match offset {
|
|
|
|
|
BASE_OFFSET_LOW => {
|
|
|
|
|
data.copy_from_slice(&state.base.to_le_bytes()[..4]);
|
|
|
|
|
}
|
|
|
|
|
BASE_OFFSET_HIGH => {
|
|
|
|
|
data.copy_from_slice(&state.base.to_le_bytes()[4..]);
|
|
|
|
|
}
|
|
|
|
|
LENGTH_OFFSET_LOW => {
|
|
|
|
|
data.copy_from_slice(&state.length.to_le_bytes()[..4]);
|
|
|
|
|
}
|
|
|
|
|
LENGTH_OFFSET_HIGH => {
|
|
|
|
|
data.copy_from_slice(&state.length.to_le_bytes()[4..]);
|
|
|
|
|
}
|
|
|
|
|
STATUS_OFFSET => {
|
2021-01-20 16:12:02 +00:00
|
|
|
|
// The Linux kernel, quite reasonably, doesn't zero the memory it gives us.
|
2021-09-10 09:41:38 +00:00
|
|
|
|
data.fill(0);
|
2020-01-10 16:07:34 +00:00
|
|
|
|
if state.active {
|
|
|
|
|
data[0] |= 1 << ENABLE_FLAG;
|
|
|
|
|
}
|
|
|
|
|
if state.inserting {
|
|
|
|
|
data[0] |= 1 << INSERTING_FLAG;
|
|
|
|
|
}
|
|
|
|
|
if state.removing {
|
|
|
|
|
data[0] |= 1 << REMOVING_FLAG;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => {
|
|
|
|
|
warn!(
|
|
|
|
|
"Unexpected offset for accessing memory manager device: {:#}",
|
|
|
|
|
offset
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-09-10 09:41:38 +00:00
|
|
|
|
} else {
|
|
|
|
|
warn!("Out of range memory slot: {}", self.selected_slot);
|
2020-01-10 16:07:34 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-04 09:23:47 +00:00
|
|
|
|
fn write(&mut self, _base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
|
2020-01-10 16:07:34 +00:00
|
|
|
|
match offset {
|
|
|
|
|
SELECTION_OFFSET => {
|
|
|
|
|
self.selected_slot = usize::from(data[0]);
|
|
|
|
|
}
|
|
|
|
|
STATUS_OFFSET => {
|
2021-09-10 09:41:38 +00:00
|
|
|
|
if self.selected_slot < self.hotplug_slots.len() {
|
|
|
|
|
let state = &mut self.hotplug_slots[self.selected_slot];
|
|
|
|
|
// The ACPI code writes back a 1 to acknowledge the insertion
|
|
|
|
|
if (data[0] & (1 << INSERTING_FLAG) == 1 << INSERTING_FLAG) && state.inserting {
|
|
|
|
|
state.inserting = false;
|
|
|
|
|
}
|
|
|
|
|
// Ditto for removal
|
|
|
|
|
if (data[0] & (1 << REMOVING_FLAG) == 1 << REMOVING_FLAG) && state.removing {
|
|
|
|
|
state.removing = false;
|
|
|
|
|
}
|
|
|
|
|
// Trigger removal of "DIMM"
|
|
|
|
|
if data[0] & (1 << EJECT_FLAG) == 1 << EJECT_FLAG {
|
|
|
|
|
warn!("Ejection of memory not currently supported");
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
warn!("Out of range memory slot: {}", self.selected_slot);
|
2020-01-10 16:07:34 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => {
|
|
|
|
|
warn!(
|
|
|
|
|
"Unexpected offset for accessing memory manager device: {:#}",
|
|
|
|
|
offset
|
|
|
|
|
);
|
|
|
|
|
}
|
2020-12-04 09:23:47 +00:00
|
|
|
|
};
|
|
|
|
|
None
|
2020-01-10 16:07:34 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-19 15:47:36 +00:00
|
|
|
|
impl MemoryManager {
|
2020-08-18 15:17:15 +00:00
|
|
|
|
/// Creates all memory regions based on the available RAM ranges defined
|
|
|
|
|
/// by `ram_regions`, and based on the description of the memory zones.
|
|
|
|
|
/// In practice, this function can perform multiple memory mappings of the
|
|
|
|
|
/// same backing file if there's a hole in the address space between two
|
|
|
|
|
/// RAM ranges.
|
|
|
|
|
/// One example might be ram_regions containing 2 regions (0-3G and 4G-6G)
|
|
|
|
|
/// and zones containing two zones (size 1G and size 4G).
|
|
|
|
|
/// This function will create 3 resulting memory regions:
|
|
|
|
|
/// - First one mapping entirely the first memory zone on 0-1G range
|
|
|
|
|
/// - Second one mapping partially the second memory zone on 1G-3G range
|
|
|
|
|
/// - Third one mapping partially the second memory zone on 4G-6G range
|
|
|
|
|
fn create_memory_regions_from_zones(
|
|
|
|
|
ram_regions: &[(GuestAddress, usize)],
|
|
|
|
|
zones: &[MemoryZoneConfig],
|
2021-09-29 04:54:22 +00:00
|
|
|
|
prefault: Option<bool>,
|
2022-11-07 16:25:36 +00:00
|
|
|
|
thp: bool,
|
2020-09-04 09:26:43 +00:00
|
|
|
|
) -> Result<(Vec<Arc<GuestRegionMmap>>, MemoryZones), Error> {
|
2020-08-18 15:17:15 +00:00
|
|
|
|
let mut zones = zones.to_owned();
|
|
|
|
|
let mut mem_regions = Vec::new();
|
|
|
|
|
let mut zone = zones.remove(0);
|
|
|
|
|
let mut zone_offset = 0;
|
2020-09-04 07:22:16 +00:00
|
|
|
|
let mut memory_zones = HashMap::new();
|
|
|
|
|
|
|
|
|
|
// Add zone id to the list of memory zones.
|
2020-09-10 08:08:02 +00:00
|
|
|
|
memory_zones.insert(zone.id.clone(), MemoryZone::default());
|
2020-08-18 15:17:15 +00:00
|
|
|
|
|
|
|
|
|
for ram_region in ram_regions.iter() {
|
|
|
|
|
let mut ram_region_offset = 0;
|
|
|
|
|
let mut exit = false;
|
|
|
|
|
|
|
|
|
|
loop {
|
|
|
|
|
let mut ram_region_consumed = false;
|
|
|
|
|
let mut pull_next_zone = false;
|
|
|
|
|
|
|
|
|
|
let ram_region_sub_size = ram_region.1 - ram_region_offset;
|
|
|
|
|
let zone_sub_size = zone.size as usize - zone_offset;
|
|
|
|
|
|
|
|
|
|
let file_offset = zone_offset as u64;
|
2020-10-12 22:49:07 +00:00
|
|
|
|
let region_start = ram_region
|
|
|
|
|
.0
|
|
|
|
|
.checked_add(ram_region_offset as u64)
|
|
|
|
|
.ok_or(Error::GuestAddressOverFlow)?;
|
2020-08-18 15:17:15 +00:00
|
|
|
|
let region_size = if zone_sub_size <= ram_region_sub_size {
|
|
|
|
|
if zone_sub_size == ram_region_sub_size {
|
|
|
|
|
ram_region_consumed = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ram_region_offset += zone_sub_size;
|
|
|
|
|
pull_next_zone = true;
|
|
|
|
|
|
|
|
|
|
zone_sub_size
|
|
|
|
|
} else {
|
|
|
|
|
zone_offset += ram_region_sub_size;
|
|
|
|
|
ram_region_consumed = true;
|
|
|
|
|
|
|
|
|
|
ram_region_sub_size
|
|
|
|
|
};
|
|
|
|
|
|
2020-08-27 12:16:50 +00:00
|
|
|
|
let region = MemoryManager::create_ram_region(
|
2020-08-18 15:17:15 +00:00
|
|
|
|
&zone.file,
|
|
|
|
|
file_offset,
|
|
|
|
|
region_start,
|
|
|
|
|
region_size,
|
2021-09-29 04:54:22 +00:00
|
|
|
|
match prefault {
|
|
|
|
|
Some(pf) => pf,
|
|
|
|
|
None => zone.prefault,
|
|
|
|
|
},
|
2020-08-18 15:17:15 +00:00
|
|
|
|
zone.shared,
|
|
|
|
|
zone.hugepages,
|
2021-02-04 16:50:37 +00:00
|
|
|
|
zone.hugepage_size,
|
2020-08-25 16:02:34 +00:00
|
|
|
|
zone.host_numa_node,
|
2022-01-12 10:12:05 +00:00
|
|
|
|
None,
|
2022-11-07 16:25:36 +00:00
|
|
|
|
thp,
|
2020-08-27 12:16:50 +00:00
|
|
|
|
)?;
|
|
|
|
|
|
2020-09-04 07:22:16 +00:00
|
|
|
|
// Add region to the list of regions associated with the
|
|
|
|
|
// current memory zone.
|
|
|
|
|
if let Some(memory_zone) = memory_zones.get_mut(&zone.id) {
|
2020-09-10 08:08:02 +00:00
|
|
|
|
memory_zone.regions.push(region.clone());
|
2020-09-04 07:22:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-08-27 12:16:50 +00:00
|
|
|
|
mem_regions.push(region);
|
2020-08-18 15:17:15 +00:00
|
|
|
|
|
|
|
|
|
if pull_next_zone {
|
|
|
|
|
// Get the next zone and reset the offset.
|
|
|
|
|
zone_offset = 0;
|
|
|
|
|
if zones.is_empty() {
|
|
|
|
|
exit = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
zone = zones.remove(0);
|
2020-09-04 07:22:16 +00:00
|
|
|
|
|
|
|
|
|
// Check if zone id already exist. In case it does, throw
|
|
|
|
|
// an error as we need unique identifiers. Otherwise, add
|
|
|
|
|
// the new zone id to the list of memory zones.
|
|
|
|
|
if memory_zones.contains_key(&zone.id) {
|
|
|
|
|
error!(
|
|
|
|
|
"Memory zone identifier '{}' found more than once. \
|
|
|
|
|
It must be unique",
|
|
|
|
|
zone.id,
|
|
|
|
|
);
|
|
|
|
|
return Err(Error::DuplicateZoneId);
|
|
|
|
|
}
|
2020-09-10 08:08:02 +00:00
|
|
|
|
memory_zones.insert(zone.id.clone(), MemoryZone::default());
|
2020-08-18 15:17:15 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ram_region_consumed {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if exit {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-04 09:26:43 +00:00
|
|
|
|
Ok((mem_regions, memory_zones))
|
2020-08-18 15:17:15 +00:00
|
|
|
|
}
|
|
|
|
|
|
2021-10-05 11:52:02 +00:00
|
|
|
|
// Restore both GuestMemory regions along with MemoryZone zones.
|
|
|
|
|
fn restore_memory_regions_and_zones(
|
|
|
|
|
guest_ram_mappings: &[GuestRamMapping],
|
|
|
|
|
zones_config: &[MemoryZoneConfig],
|
|
|
|
|
prefault: Option<bool>,
|
2022-01-12 10:50:08 +00:00
|
|
|
|
mut existing_memory_files: HashMap<u32, File>,
|
2022-11-07 16:25:36 +00:00
|
|
|
|
thp: bool,
|
2021-10-05 11:52:02 +00:00
|
|
|
|
) -> Result<(Vec<Arc<GuestRegionMmap>>, MemoryZones), Error> {
|
|
|
|
|
let mut memory_regions = Vec::new();
|
|
|
|
|
let mut memory_zones = HashMap::new();
|
|
|
|
|
|
|
|
|
|
for zone_config in zones_config {
|
|
|
|
|
memory_zones.insert(zone_config.id.clone(), MemoryZone::default());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for guest_ram_mapping in guest_ram_mappings {
|
|
|
|
|
for zone_config in zones_config {
|
|
|
|
|
if guest_ram_mapping.zone_id == zone_config.id {
|
|
|
|
|
let region = MemoryManager::create_ram_region(
|
|
|
|
|
&zone_config.file,
|
|
|
|
|
guest_ram_mapping.file_offset,
|
|
|
|
|
GuestAddress(guest_ram_mapping.gpa),
|
|
|
|
|
guest_ram_mapping.size as usize,
|
|
|
|
|
match prefault {
|
|
|
|
|
Some(pf) => pf,
|
|
|
|
|
None => zone_config.prefault,
|
|
|
|
|
},
|
|
|
|
|
zone_config.shared,
|
|
|
|
|
zone_config.hugepages,
|
|
|
|
|
zone_config.hugepage_size,
|
|
|
|
|
zone_config.host_numa_node,
|
2022-01-12 10:50:08 +00:00
|
|
|
|
existing_memory_files.remove(&guest_ram_mapping.slot),
|
2022-11-07 16:25:36 +00:00
|
|
|
|
thp,
|
2021-10-05 11:52:02 +00:00
|
|
|
|
)?;
|
|
|
|
|
memory_regions.push(Arc::clone(®ion));
|
|
|
|
|
if let Some(memory_zone) = memory_zones.get_mut(&guest_ram_mapping.zone_id) {
|
|
|
|
|
if guest_ram_mapping.virtio_mem {
|
|
|
|
|
let hotplugged_size = zone_config.hotplugged_size.unwrap_or(0);
|
|
|
|
|
let region_size = region.len();
|
|
|
|
|
memory_zone.virtio_mem_zone = Some(VirtioMemZone {
|
|
|
|
|
region,
|
2022-09-16 16:18:22 +00:00
|
|
|
|
virtio_device: None,
|
2021-10-05 11:52:02 +00:00
|
|
|
|
hotplugged_size,
|
|
|
|
|
hugepages: zone_config.hugepages,
|
|
|
|
|
blocks_state: Arc::new(Mutex::new(BlocksState::new(region_size))),
|
|
|
|
|
});
|
|
|
|
|
} else {
|
|
|
|
|
memory_zone.regions.push(region);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memory_regions.sort_by_key(|x| x.start_addr());
|
|
|
|
|
|
|
|
|
|
Ok((memory_regions, memory_zones))
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-27 13:31:28 +00:00
|
|
|
|
fn fill_saved_regions(
|
|
|
|
|
&mut self,
|
|
|
|
|
file_path: PathBuf,
|
|
|
|
|
saved_regions: MemoryRangeTable,
|
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
if saved_regions.is_empty() {
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Open (read only) the snapshot file.
|
|
|
|
|
let mut memory_file = OpenOptions::new()
|
|
|
|
|
.read(true)
|
|
|
|
|
.open(file_path)
|
|
|
|
|
.map_err(Error::SnapshotOpen)?;
|
|
|
|
|
|
|
|
|
|
let guest_memory = self.guest_memory.memory();
|
|
|
|
|
for range in saved_regions.regions() {
|
|
|
|
|
let mut offset: u64 = 0;
|
|
|
|
|
// Here we are manually handling the retry in case we can't write
|
|
|
|
|
// the whole region at once because we can't use the implementation
|
|
|
|
|
// from vm-memory::GuestMemory of read_exact_from() as it is not
|
|
|
|
|
// following the correct behavior. For more info about this issue
|
|
|
|
|
// see: https://github.com/rust-vmm/vm-memory/issues/174
|
|
|
|
|
loop {
|
|
|
|
|
let bytes_read = guest_memory
|
|
|
|
|
.read_from(
|
|
|
|
|
GuestAddress(range.gpa + offset),
|
|
|
|
|
&mut memory_file,
|
|
|
|
|
(range.length - offset) as usize,
|
2020-10-30 10:26:31 +00:00
|
|
|
|
)
|
|
|
|
|
.map_err(Error::SnapshotCopy)?;
|
2021-09-27 13:31:28 +00:00
|
|
|
|
offset += bytes_read as u64;
|
|
|
|
|
|
|
|
|
|
if offset == range.length {
|
|
|
|
|
break;
|
|
|
|
|
}
|
2020-10-30 10:26:31 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-04 13:59:22 +00:00
|
|
|
|
fn validate_memory_config(
|
2020-04-07 09:19:28 +00:00
|
|
|
|
config: &MemoryConfig,
|
2021-10-04 12:47:40 +00:00
|
|
|
|
user_provided_zones: bool,
|
|
|
|
|
) -> Result<(u64, Vec<MemoryZoneConfig>, bool), Error> {
|
|
|
|
|
let mut allow_mem_hotplug = false;
|
2020-08-18 15:17:15 +00:00
|
|
|
|
|
2021-10-04 12:47:40 +00:00
|
|
|
|
if !user_provided_zones {
|
2020-08-18 15:17:15 +00:00
|
|
|
|
if config.zones.is_some() {
|
|
|
|
|
error!(
|
|
|
|
|
"User defined memory regions can't be provided if the \
|
|
|
|
|
memory size is not 0"
|
|
|
|
|
);
|
|
|
|
|
return Err(Error::InvalidMemoryParameters);
|
2020-04-07 09:19:28 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-10-08 04:58:43 +00:00
|
|
|
|
if config.hotplug_size.is_some() {
|
|
|
|
|
allow_mem_hotplug = true;
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 18:23:45 +00:00
|
|
|
|
if let Some(hotplugged_size) = config.hotplugged_size {
|
|
|
|
|
if let Some(hotplug_size) = config.hotplug_size {
|
|
|
|
|
if hotplugged_size > hotplug_size {
|
|
|
|
|
error!(
|
|
|
|
|
"'hotplugged_size' {} can't be bigger than \
|
|
|
|
|
'hotplug_size' {}",
|
|
|
|
|
hotplugged_size, hotplug_size,
|
|
|
|
|
);
|
|
|
|
|
return Err(Error::InvalidMemoryParameters);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
error!(
|
|
|
|
|
"Invalid to define 'hotplugged_size' when there is\
|
|
|
|
|
no 'hotplug_size'"
|
|
|
|
|
);
|
|
|
|
|
return Err(Error::InvalidMemoryParameters);
|
|
|
|
|
}
|
|
|
|
|
if config.hotplug_method == HotplugMethod::Acpi {
|
|
|
|
|
error!(
|
|
|
|
|
"Invalid to define 'hotplugged_size' with hotplug \
|
|
|
|
|
method 'acpi'"
|
|
|
|
|
);
|
|
|
|
|
return Err(Error::InvalidMemoryParameters);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-24 11:12:46 +00:00
|
|
|
|
// Create a single zone from the global memory config. This lets
|
|
|
|
|
// us reuse the codepath for user defined memory zones.
|
|
|
|
|
let zones = vec![MemoryZoneConfig {
|
2020-09-03 18:06:38 +00:00
|
|
|
|
id: String::from(DEFAULT_MEMORY_ZONE),
|
2020-08-24 11:12:46 +00:00
|
|
|
|
size: config.size,
|
|
|
|
|
file: None,
|
|
|
|
|
shared: config.shared,
|
|
|
|
|
hugepages: config.hugepages,
|
2021-02-04 16:21:53 +00:00
|
|
|
|
hugepage_size: config.hugepage_size,
|
2020-08-25 15:36:19 +00:00
|
|
|
|
host_numa_node: None,
|
2020-09-10 10:20:22 +00:00
|
|
|
|
hotplug_size: config.hotplug_size,
|
2020-09-14 18:23:45 +00:00
|
|
|
|
hotplugged_size: config.hotplugged_size,
|
2021-09-29 04:54:22 +00:00
|
|
|
|
prefault: config.prefault,
|
2020-08-24 11:12:46 +00:00
|
|
|
|
}];
|
|
|
|
|
|
2021-10-04 12:47:40 +00:00
|
|
|
|
Ok((config.size, zones, allow_mem_hotplug))
|
2020-04-07 09:19:28 +00:00
|
|
|
|
} else {
|
2020-08-18 15:17:15 +00:00
|
|
|
|
if config.zones.is_none() {
|
|
|
|
|
error!(
|
|
|
|
|
"User defined memory regions must be provided if the \
|
|
|
|
|
memory size is 0"
|
|
|
|
|
);
|
|
|
|
|
return Err(Error::MissingMemoryZones);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Safe to unwrap as we checked right above there were some
|
|
|
|
|
// regions.
|
|
|
|
|
let zones = config.zones.clone().unwrap();
|
|
|
|
|
if zones.is_empty() {
|
|
|
|
|
return Err(Error::MissingMemoryZones);
|
2020-04-07 09:19:28 +00:00
|
|
|
|
}
|
2020-08-18 15:17:15 +00:00
|
|
|
|
|
|
|
|
|
let mut total_ram_size: u64 = 0;
|
|
|
|
|
for zone in zones.iter() {
|
|
|
|
|
total_ram_size += zone.size;
|
2020-08-25 16:02:34 +00:00
|
|
|
|
|
2020-08-27 13:57:58 +00:00
|
|
|
|
if zone.shared && zone.file.is_some() && zone.host_numa_node.is_some() {
|
2020-08-25 16:02:34 +00:00
|
|
|
|
error!(
|
|
|
|
|
"Invalid to set host NUMA policy for a memory zone \
|
2020-08-27 13:57:58 +00:00
|
|
|
|
backed by a regular file and mapped as 'shared'"
|
2020-08-25 16:02:34 +00:00
|
|
|
|
);
|
|
|
|
|
return Err(Error::InvalidSharedMemoryZoneWithHostNuma);
|
|
|
|
|
}
|
2020-09-10 10:20:22 +00:00
|
|
|
|
|
|
|
|
|
if zone.hotplug_size.is_some() && config.hotplug_method == HotplugMethod::Acpi {
|
|
|
|
|
error!("Invalid to set ACPI hotplug method for memory zones");
|
|
|
|
|
return Err(Error::InvalidHotplugMethodWithMemoryZones);
|
|
|
|
|
}
|
2020-09-14 18:23:45 +00:00
|
|
|
|
|
|
|
|
|
if let Some(hotplugged_size) = zone.hotplugged_size {
|
|
|
|
|
if let Some(hotplug_size) = zone.hotplug_size {
|
|
|
|
|
if hotplugged_size > hotplug_size {
|
|
|
|
|
error!(
|
|
|
|
|
"'hotplugged_size' {} can't be bigger than \
|
|
|
|
|
'hotplug_size' {}",
|
|
|
|
|
hotplugged_size, hotplug_size,
|
|
|
|
|
);
|
|
|
|
|
return Err(Error::InvalidMemoryParameters);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
error!(
|
|
|
|
|
"Invalid to define 'hotplugged_size' when there is\
|
|
|
|
|
no 'hotplug_size' for a memory zone"
|
|
|
|
|
);
|
|
|
|
|
return Err(Error::InvalidMemoryParameters);
|
|
|
|
|
}
|
|
|
|
|
if config.hotplug_method == HotplugMethod::Acpi {
|
|
|
|
|
error!(
|
|
|
|
|
"Invalid to define 'hotplugged_size' with hotplug \
|
|
|
|
|
method 'acpi'"
|
|
|
|
|
);
|
|
|
|
|
return Err(Error::InvalidMemoryParameters);
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-08-18 15:17:15 +00:00
|
|
|
|
}
|
|
|
|
|
|
2021-10-04 12:47:40 +00:00
|
|
|
|
Ok((total_ram_size, zones, allow_mem_hotplug))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-04 13:59:22 +00:00
|
|
|
|
fn allocate_address_space(&mut self) -> Result<(), Error> {
|
2021-10-04 15:54:49 +00:00
|
|
|
|
let mut list = Vec::new();
|
|
|
|
|
|
|
|
|
|
for (zone_id, memory_zone) in self.memory_zones.iter() {
|
2021-10-05 09:37:39 +00:00
|
|
|
|
let mut regions: Vec<(Arc<vm_memory::GuestRegionMmap<AtomicBitmap>>, bool)> =
|
|
|
|
|
memory_zone
|
|
|
|
|
.regions()
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|r| (r.clone(), false))
|
|
|
|
|
.collect();
|
2021-10-04 15:54:49 +00:00
|
|
|
|
|
|
|
|
|
if let Some(virtio_mem_zone) = memory_zone.virtio_mem_zone() {
|
2021-10-05 09:37:39 +00:00
|
|
|
|
regions.push((virtio_mem_zone.region().clone(), true));
|
2021-10-04 15:54:49 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
list.push((zone_id.clone(), regions));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (zone_id, regions) in list {
|
2021-10-05 09:37:39 +00:00
|
|
|
|
for (region, virtio_mem) in regions {
|
2021-10-04 15:54:49 +00:00
|
|
|
|
let slot = self.create_userspace_mapping(
|
|
|
|
|
region.start_addr().raw_value(),
|
2022-11-01 21:52:40 +00:00
|
|
|
|
region.len(),
|
2021-10-04 15:54:49 +00:00
|
|
|
|
region.as_ptr() as u64,
|
|
|
|
|
self.mergeable,
|
|
|
|
|
false,
|
|
|
|
|
self.log_dirty,
|
|
|
|
|
)?;
|
2021-10-05 11:46:56 +00:00
|
|
|
|
|
|
|
|
|
let file_offset = if let Some(file_offset) = region.file_offset() {
|
|
|
|
|
file_offset.start()
|
|
|
|
|
} else {
|
|
|
|
|
0
|
|
|
|
|
};
|
|
|
|
|
|
2021-10-04 15:54:49 +00:00
|
|
|
|
self.guest_ram_mappings.push(GuestRamMapping {
|
|
|
|
|
gpa: region.start_addr().raw_value(),
|
|
|
|
|
size: region.len(),
|
|
|
|
|
slot,
|
|
|
|
|
zone_id: zone_id.clone(),
|
2021-10-05 09:37:39 +00:00
|
|
|
|
virtio_mem,
|
2021-10-05 11:46:56 +00:00
|
|
|
|
file_offset,
|
2021-10-04 15:54:49 +00:00
|
|
|
|
});
|
2021-10-29 09:02:15 +00:00
|
|
|
|
self.ram_allocator
|
|
|
|
|
.allocate(Some(region.start_addr()), region.len(), None)
|
2021-10-04 15:54:49 +00:00
|
|
|
|
.ok_or(Error::MemoryRangeAllocation)?;
|
|
|
|
|
}
|
2021-10-04 13:59:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Allocate SubRegion and Reserved address ranges.
|
|
|
|
|
for region in self.arch_mem_regions.iter() {
|
2021-10-05 12:20:19 +00:00
|
|
|
|
if region.r_type == RegionType::Ram {
|
2021-10-04 13:59:22 +00:00
|
|
|
|
// Ignore the RAM type since ranges have already been allocated
|
|
|
|
|
// based on the GuestMemory regions.
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2021-10-29 09:02:15 +00:00
|
|
|
|
self.ram_allocator
|
|
|
|
|
.allocate(
|
2021-10-05 12:20:19 +00:00
|
|
|
|
Some(GuestAddress(region.base)),
|
|
|
|
|
region.size as GuestUsize,
|
|
|
|
|
None,
|
|
|
|
|
)
|
2021-10-04 13:59:22 +00:00
|
|
|
|
.ok_or(Error::MemoryRangeAllocation)?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-30 12:01:09 +00:00
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
|
fn add_uefi_flash(&mut self) -> Result<(), Error> {
|
|
|
|
|
// On AArch64, the UEFI binary requires a flash device at address 0.
|
|
|
|
|
// 4 MiB memory is mapped to simulate the flash.
|
|
|
|
|
let uefi_mem_slot = self.allocate_memory_slot();
|
|
|
|
|
let uefi_region = GuestRegionMmap::new(
|
|
|
|
|
MmapRegion::new(arch::layout::UEFI_SIZE as usize).unwrap(),
|
|
|
|
|
arch::layout::UEFI_START,
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
let uefi_mem_region = self.vm.make_user_memory_region(
|
|
|
|
|
uefi_mem_slot,
|
|
|
|
|
uefi_region.start_addr().raw_value(),
|
2022-12-01 14:36:26 +00:00
|
|
|
|
uefi_region.len(),
|
2022-08-30 12:01:09 +00:00
|
|
|
|
uefi_region.as_ptr() as u64,
|
|
|
|
|
false,
|
|
|
|
|
false,
|
|
|
|
|
);
|
|
|
|
|
self.vm
|
|
|
|
|
.create_user_memory_region(uefi_mem_region)
|
|
|
|
|
.map_err(Error::CreateUefiFlash)?;
|
|
|
|
|
|
|
|
|
|
let uefi_flash =
|
|
|
|
|
GuestMemoryAtomic::new(GuestMemoryMmap::from_regions(vec![uefi_region]).unwrap());
|
|
|
|
|
|
|
|
|
|
self.uefi_flash = Some(uefi_flash);
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-12 10:50:08 +00:00
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
2021-10-04 12:47:40 +00:00
|
|
|
|
pub fn new(
|
|
|
|
|
vm: Arc<dyn hypervisor::Vm>,
|
|
|
|
|
config: &MemoryConfig,
|
|
|
|
|
prefault: Option<bool>,
|
|
|
|
|
phys_bits: u8,
|
|
|
|
|
#[cfg(feature = "tdx")] tdx_enabled: bool,
|
2021-10-05 13:14:01 +00:00
|
|
|
|
restore_data: Option<&MemoryManagerSnapshotData>,
|
2022-01-12 10:50:08 +00:00
|
|
|
|
existing_memory_files: Option<HashMap<u32, File>>,
|
2021-10-29 08:30:01 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")] sgx_epc_config: Option<Vec<SgxEpcConfig>>,
|
2021-10-04 12:47:40 +00:00
|
|
|
|
) -> Result<Arc<Mutex<MemoryManager>>, Error> {
|
2022-09-16 15:56:36 +00:00
|
|
|
|
trace_scoped!("MemoryManager::new");
|
|
|
|
|
|
2021-10-04 12:47:40 +00:00
|
|
|
|
let user_provided_zones = config.size == 0;
|
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
let mmio_address_space_size = mmio_address_space_size(phys_bits);
|
|
|
|
|
debug_assert_eq!(
|
|
|
|
|
(((mmio_address_space_size) >> 16) << 16),
|
|
|
|
|
mmio_address_space_size
|
|
|
|
|
);
|
2021-10-29 09:58:41 +00:00
|
|
|
|
let start_of_platform_device_area =
|
|
|
|
|
GuestAddress(mmio_address_space_size - PLATFORM_DEVICE_AREA_SIZE);
|
|
|
|
|
let end_of_device_area = start_of_platform_device_area.unchecked_sub(1);
|
2021-10-05 13:14:01 +00:00
|
|
|
|
|
2021-10-04 12:47:40 +00:00
|
|
|
|
let (ram_size, zones, allow_mem_hotplug) =
|
|
|
|
|
Self::validate_memory_config(config, user_provided_zones)?;
|
2020-08-18 15:17:15 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
let (
|
|
|
|
|
start_of_device_area,
|
|
|
|
|
boot_ram,
|
|
|
|
|
current_ram,
|
|
|
|
|
arch_mem_regions,
|
|
|
|
|
memory_zones,
|
|
|
|
|
guest_memory,
|
|
|
|
|
boot_guest_memory,
|
|
|
|
|
hotplug_slots,
|
|
|
|
|
next_memory_slot,
|
|
|
|
|
selected_slot,
|
|
|
|
|
next_hotplug_slot,
|
|
|
|
|
) = if let Some(data) = restore_data {
|
2022-01-12 10:50:08 +00:00
|
|
|
|
let (regions, memory_zones) = Self::restore_memory_regions_and_zones(
|
|
|
|
|
&data.guest_ram_mappings,
|
|
|
|
|
&zones,
|
|
|
|
|
prefault,
|
|
|
|
|
existing_memory_files.unwrap_or_default(),
|
2022-11-07 16:25:36 +00:00
|
|
|
|
config.thp,
|
2022-01-12 10:50:08 +00:00
|
|
|
|
)?;
|
2021-10-05 13:14:01 +00:00
|
|
|
|
let guest_memory =
|
|
|
|
|
GuestMemoryMmap::from_arc_regions(regions).map_err(Error::GuestMemory)?;
|
|
|
|
|
let boot_guest_memory = guest_memory.clone();
|
|
|
|
|
(
|
|
|
|
|
GuestAddress(data.start_of_device_area),
|
|
|
|
|
data.boot_ram,
|
|
|
|
|
data.current_ram,
|
|
|
|
|
data.arch_mem_regions.clone(),
|
|
|
|
|
memory_zones,
|
|
|
|
|
guest_memory,
|
|
|
|
|
boot_guest_memory,
|
|
|
|
|
data.hotplug_slots.clone(),
|
|
|
|
|
data.next_memory_slot,
|
|
|
|
|
data.selected_slot,
|
|
|
|
|
data.next_hotplug_slot,
|
|
|
|
|
)
|
|
|
|
|
} else {
|
|
|
|
|
// Init guest memory
|
|
|
|
|
let arch_mem_regions = arch::arch_memory_regions(ram_size);
|
2020-08-18 15:17:15 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions
|
|
|
|
|
.iter()
|
|
|
|
|
.filter(|r| r.2 == RegionType::Ram)
|
|
|
|
|
.map(|r| (r.0, r.1))
|
|
|
|
|
.collect();
|
2020-08-24 11:12:46 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
let arch_mem_regions: Vec<ArchMemRegion> = arch_mem_regions
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|(a, b, c)| ArchMemRegion {
|
|
|
|
|
base: a.0,
|
|
|
|
|
size: *b,
|
|
|
|
|
r_type: *c,
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
2021-10-05 12:20:19 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
let (mem_regions, mut memory_zones) =
|
2022-11-07 16:25:36 +00:00
|
|
|
|
Self::create_memory_regions_from_zones(&ram_regions, &zones, prefault, config.thp)?;
|
2019-12-19 15:47:36 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
let mut guest_memory =
|
|
|
|
|
GuestMemoryMmap::from_arc_regions(mem_regions).map_err(Error::GuestMemory)?;
|
2019-12-19 15:47:36 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
let boot_guest_memory = guest_memory.clone();
|
2020-09-14 15:24:42 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
let mut start_of_device_area =
|
|
|
|
|
MemoryManager::start_addr(guest_memory.last_addr(), allow_mem_hotplug)?;
|
2020-05-12 09:49:12 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
// Update list of memory zones for resize.
|
|
|
|
|
for zone in zones.iter() {
|
|
|
|
|
if let Some(memory_zone) = memory_zones.get_mut(&zone.id) {
|
|
|
|
|
if let Some(hotplug_size) = zone.hotplug_size {
|
|
|
|
|
if hotplug_size == 0 {
|
|
|
|
|
error!("'hotplug_size' can't be 0");
|
|
|
|
|
return Err(Error::InvalidHotplugSize);
|
|
|
|
|
}
|
2019-12-19 15:47:36 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
if !user_provided_zones && config.hotplug_method == HotplugMethod::Acpi {
|
|
|
|
|
start_of_device_area = start_of_device_area
|
|
|
|
|
.checked_add(hotplug_size)
|
|
|
|
|
.ok_or(Error::GuestAddressOverFlow)?;
|
|
|
|
|
} else {
|
|
|
|
|
// Alignment must be "natural" i.e. same as size of block
|
|
|
|
|
let start_addr = GuestAddress(
|
|
|
|
|
(start_of_device_area.0 + virtio_devices::VIRTIO_MEM_ALIGN_SIZE
|
|
|
|
|
- 1)
|
|
|
|
|
/ virtio_devices::VIRTIO_MEM_ALIGN_SIZE
|
|
|
|
|
* virtio_devices::VIRTIO_MEM_ALIGN_SIZE,
|
|
|
|
|
);
|
2020-03-04 02:16:07 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
// When `prefault` is set by vm_restore, memory manager
|
|
|
|
|
// will create ram region with `prefault` option in
|
|
|
|
|
// restore config rather than same option in zone
|
|
|
|
|
let region = MemoryManager::create_ram_region(
|
|
|
|
|
&None,
|
|
|
|
|
0,
|
|
|
|
|
start_addr,
|
|
|
|
|
hotplug_size as usize,
|
|
|
|
|
match prefault {
|
|
|
|
|
Some(pf) => pf,
|
|
|
|
|
None => zone.prefault,
|
|
|
|
|
},
|
|
|
|
|
zone.shared,
|
|
|
|
|
zone.hugepages,
|
|
|
|
|
zone.hugepage_size,
|
|
|
|
|
zone.host_numa_node,
|
2022-01-12 10:12:05 +00:00
|
|
|
|
None,
|
2022-11-07 16:25:36 +00:00
|
|
|
|
config.thp,
|
2021-10-05 13:14:01 +00:00
|
|
|
|
)?;
|
|
|
|
|
|
|
|
|
|
guest_memory = guest_memory
|
|
|
|
|
.insert_region(Arc::clone(®ion))
|
|
|
|
|
.map_err(Error::GuestMemory)?;
|
|
|
|
|
|
|
|
|
|
let hotplugged_size = zone.hotplugged_size.unwrap_or(0);
|
|
|
|
|
let region_size = region.len();
|
|
|
|
|
memory_zone.virtio_mem_zone = Some(VirtioMemZone {
|
|
|
|
|
region,
|
2022-09-16 16:18:22 +00:00
|
|
|
|
virtio_device: None,
|
2021-10-05 13:14:01 +00:00
|
|
|
|
hotplugged_size,
|
|
|
|
|
hugepages: zone.hugepages,
|
|
|
|
|
blocks_state: Arc::new(Mutex::new(BlocksState::new(region_size))),
|
|
|
|
|
});
|
2020-03-04 02:16:07 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
start_of_device_area = start_addr
|
|
|
|
|
.checked_add(hotplug_size)
|
|
|
|
|
.ok_or(Error::GuestAddressOverFlow)?;
|
|
|
|
|
}
|
2020-09-10 09:28:21 +00:00
|
|
|
|
}
|
2021-10-05 13:14:01 +00:00
|
|
|
|
} else {
|
|
|
|
|
return Err(Error::MissingZoneIdentifier);
|
2020-09-10 09:28:21 +00:00
|
|
|
|
}
|
2020-03-04 02:16:07 +00:00
|
|
|
|
}
|
2020-01-07 11:25:55 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
let mut hotplug_slots = Vec::with_capacity(HOTPLUG_COUNT);
|
|
|
|
|
hotplug_slots.resize_with(HOTPLUG_COUNT, HotPlugState::default);
|
|
|
|
|
|
|
|
|
|
(
|
|
|
|
|
start_of_device_area,
|
|
|
|
|
ram_size,
|
|
|
|
|
ram_size,
|
|
|
|
|
arch_mem_regions,
|
|
|
|
|
memory_zones,
|
|
|
|
|
guest_memory,
|
|
|
|
|
boot_guest_memory,
|
|
|
|
|
hotplug_slots,
|
|
|
|
|
0,
|
|
|
|
|
0,
|
|
|
|
|
0,
|
|
|
|
|
)
|
|
|
|
|
};
|
2019-12-19 15:47:36 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
let guest_memory = GuestMemoryAtomic::new(guest_memory);
|
2020-01-10 16:02:19 +00:00
|
|
|
|
|
2020-06-09 11:58:26 +00:00
|
|
|
|
// Both MMIO and PIO address spaces start at address 0.
|
2020-03-16 17:58:23 +00:00
|
|
|
|
let allocator = Arc::new(Mutex::new(
|
|
|
|
|
SystemAllocator::new(
|
2020-06-09 06:54:15 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
2021-01-02 20:47:24 +00:00
|
|
|
|
{
|
|
|
|
|
GuestAddress(0)
|
|
|
|
|
},
|
2020-06-09 06:54:15 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
2021-01-02 20:47:24 +00:00
|
|
|
|
{
|
2021-01-02 20:37:31 +00:00
|
|
|
|
1 << 16
|
2021-01-02 20:47:24 +00:00
|
|
|
|
},
|
2021-10-29 09:58:41 +00:00
|
|
|
|
start_of_platform_device_area,
|
|
|
|
|
PLATFORM_DEVICE_AREA_SIZE,
|
2020-06-09 04:04:40 +00:00
|
|
|
|
layout::MEM_32BIT_DEVICES_START,
|
2020-03-16 17:58:23 +00:00
|
|
|
|
layout::MEM_32BIT_DEVICES_SIZE,
|
2020-06-09 04:04:40 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-03-16 17:58:23 +00:00
|
|
|
|
vec![GsiApic::new(
|
|
|
|
|
X86_64_IRQ_BASE,
|
|
|
|
|
ioapic::NUM_IOAPIC_PINS as u32 - X86_64_IRQ_BASE,
|
|
|
|
|
)],
|
|
|
|
|
)
|
|
|
|
|
.ok_or(Error::CreateSystemAllocator)?,
|
2020-05-12 09:49:12 +00:00
|
|
|
|
));
|
|
|
|
|
|
2021-07-26 18:30:01 +00:00
|
|
|
|
#[cfg(not(feature = "tdx"))]
|
2022-03-23 15:48:38 +00:00
|
|
|
|
let dynamic = true;
|
2021-07-26 18:30:01 +00:00
|
|
|
|
#[cfg(feature = "tdx")]
|
2022-03-23 15:48:38 +00:00
|
|
|
|
let dynamic = !tdx_enabled;
|
2021-07-26 18:30:01 +00:00
|
|
|
|
|
2022-04-25 15:21:34 +00:00
|
|
|
|
let acpi_address = if dynamic
|
|
|
|
|
&& config.hotplug_method == HotplugMethod::Acpi
|
|
|
|
|
&& (config.hotplug_size.unwrap_or_default() > 0)
|
|
|
|
|
{
|
2022-03-24 11:03:26 +00:00
|
|
|
|
Some(
|
|
|
|
|
allocator
|
|
|
|
|
.lock()
|
|
|
|
|
.unwrap()
|
|
|
|
|
.allocate_platform_mmio_addresses(None, MEMORY_MANAGER_ACPI_SIZE as u64, None)
|
|
|
|
|
.ok_or(Error::AllocateMmioAddress)?,
|
|
|
|
|
)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
};
|
|
|
|
|
|
2021-10-29 08:49:20 +00:00
|
|
|
|
// If running on SGX the start of device area and RAM area may diverge but
|
|
|
|
|
// at this point they are next to each other.
|
|
|
|
|
let end_of_ram_area = start_of_device_area.unchecked_sub(1);
|
2021-10-29 09:02:15 +00:00
|
|
|
|
let ram_allocator = AddressAllocator::new(GuestAddress(0), start_of_device_area.0).unwrap();
|
2021-10-29 08:49:20 +00:00
|
|
|
|
|
2021-09-22 09:44:51 +00:00
|
|
|
|
let mut memory_manager = MemoryManager {
|
2020-09-14 15:24:42 +00:00
|
|
|
|
boot_guest_memory,
|
2021-10-04 13:59:22 +00:00
|
|
|
|
guest_memory,
|
2021-10-05 13:14:01 +00:00
|
|
|
|
next_memory_slot,
|
2019-12-19 15:47:36 +00:00
|
|
|
|
start_of_device_area,
|
|
|
|
|
end_of_device_area,
|
2021-10-29 08:49:20 +00:00
|
|
|
|
end_of_ram_area,
|
2020-07-03 09:13:40 +00:00
|
|
|
|
vm,
|
2020-01-10 16:02:19 +00:00
|
|
|
|
hotplug_slots,
|
2021-10-05 13:14:01 +00:00
|
|
|
|
selected_slot,
|
2020-03-16 17:04:50 +00:00
|
|
|
|
mergeable: config.mergeable,
|
2021-10-04 13:59:22 +00:00
|
|
|
|
allocator,
|
2022-04-25 15:21:34 +00:00
|
|
|
|
hotplug_method: config.hotplug_method,
|
2021-10-05 13:14:01 +00:00
|
|
|
|
boot_ram,
|
|
|
|
|
current_ram,
|
|
|
|
|
next_hotplug_slot,
|
2020-04-22 21:20:17 +00:00
|
|
|
|
shared: config.shared,
|
|
|
|
|
hugepages: config.hugepages,
|
2021-02-04 16:50:37 +00:00
|
|
|
|
hugepage_size: config.hugepage_size,
|
2021-09-29 04:54:22 +00:00
|
|
|
|
prefault: config.prefault,
|
2020-07-08 09:53:28 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
sgx_epc_region: None,
|
2020-09-11 12:09:00 +00:00
|
|
|
|
user_provided_zones,
|
2021-09-27 13:31:28 +00:00
|
|
|
|
snapshot_memory_ranges: MemoryRangeTable::default(),
|
2020-09-04 07:22:16 +00:00
|
|
|
|
memory_zones,
|
2020-11-12 14:53:57 +00:00
|
|
|
|
guest_ram_mappings: Vec::new(),
|
2021-01-20 16:12:02 +00:00
|
|
|
|
acpi_address,
|
2022-03-23 15:48:38 +00:00
|
|
|
|
log_dirty: dynamic, // Cannot log dirty pages on a TD
|
2021-10-04 13:59:22 +00:00
|
|
|
|
arch_mem_regions,
|
2021-10-29 09:02:15 +00:00
|
|
|
|
ram_allocator,
|
2022-03-23 15:48:38 +00:00
|
|
|
|
dynamic,
|
2022-08-30 12:01:09 +00:00
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
|
uefi_flash: None,
|
2022-11-07 16:25:36 +00:00
|
|
|
|
thp: config.thp,
|
2021-09-22 09:44:51 +00:00
|
|
|
|
};
|
2019-12-20 16:10:27 +00:00
|
|
|
|
|
2021-10-04 13:59:22 +00:00
|
|
|
|
memory_manager.allocate_address_space()?;
|
2022-08-30 12:01:09 +00:00
|
|
|
|
|
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
|
memory_manager.add_uefi_flash()?;
|
|
|
|
|
|
2021-10-29 08:30:01 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
if let Some(sgx_epc_config) = sgx_epc_config {
|
|
|
|
|
memory_manager.setup_sgx(sgx_epc_config)?;
|
|
|
|
|
}
|
2019-12-20 16:10:27 +00:00
|
|
|
|
|
2021-09-22 09:44:51 +00:00
|
|
|
|
Ok(Arc::new(Mutex::new(memory_manager)))
|
2019-12-19 15:47:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-03-16 23:48:12 +00:00
|
|
|
|
pub fn new_from_snapshot(
|
|
|
|
|
snapshot: &Snapshot,
|
2020-07-03 09:13:40 +00:00
|
|
|
|
vm: Arc<dyn hypervisor::Vm>,
|
2020-03-16 23:48:12 +00:00
|
|
|
|
config: &MemoryConfig,
|
2020-11-04 14:58:18 +00:00
|
|
|
|
source_url: Option<&str>,
|
2020-04-07 13:54:33 +00:00
|
|
|
|
prefault: bool,
|
2020-10-13 07:44:37 +00:00
|
|
|
|
phys_bits: u8,
|
2020-03-16 23:48:12 +00:00
|
|
|
|
) -> Result<Arc<Mutex<MemoryManager>>, Error> {
|
2020-11-04 14:58:18 +00:00
|
|
|
|
if let Some(source_url) = source_url {
|
2021-09-27 13:31:28 +00:00
|
|
|
|
let mut memory_file_path = url_to_path(source_url).map_err(Error::Restore)?;
|
|
|
|
|
memory_file_path.push(String::from(SNAPSHOT_FILENAME));
|
2020-11-04 14:58:18 +00:00
|
|
|
|
|
2021-04-14 15:50:29 +00:00
|
|
|
|
let mem_snapshot: MemoryManagerSnapshotData = snapshot
|
2021-05-21 12:50:41 +00:00
|
|
|
|
.to_versioned_state(MEMORY_MANAGER_SNAPSHOT_ID)
|
2021-04-14 15:50:29 +00:00
|
|
|
|
.map_err(Error::Restore)?;
|
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
let mm = MemoryManager::new(
|
|
|
|
|
vm,
|
|
|
|
|
config,
|
|
|
|
|
Some(prefault),
|
|
|
|
|
phys_bits,
|
|
|
|
|
#[cfg(feature = "tdx")]
|
|
|
|
|
false,
|
2021-10-05 14:18:28 +00:00
|
|
|
|
Some(&mem_snapshot),
|
2022-01-12 10:50:08 +00:00
|
|
|
|
None,
|
2021-10-29 08:30:01 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
None,
|
2021-10-05 13:14:01 +00:00
|
|
|
|
)?;
|
|
|
|
|
|
2021-09-27 13:31:28 +00:00
|
|
|
|
mm.lock()
|
|
|
|
|
.unwrap()
|
|
|
|
|
.fill_saved_regions(memory_file_path, mem_snapshot.memory_ranges)?;
|
2021-09-08 18:44:57 +00:00
|
|
|
|
|
2021-10-05 13:14:01 +00:00
|
|
|
|
Ok(mm)
|
|
|
|
|
} else {
|
|
|
|
|
Err(Error::RestoreMissingSourceUrl)
|
|
|
|
|
}
|
2020-03-16 23:48:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-04-22 22:51:06 +00:00
|
|
|
|
fn memfd_create(name: &ffi::CStr, flags: u32) -> Result<RawFd, io::Error> {
|
2022-11-16 23:23:22 +00:00
|
|
|
|
// SAFETY: FFI call with correct arguments
|
2020-04-22 21:20:17 +00:00
|
|
|
|
let res = unsafe { libc::syscall(libc::SYS_memfd_create, name.as_ptr(), flags) };
|
|
|
|
|
|
|
|
|
|
if res < 0 {
|
2020-04-22 22:51:06 +00:00
|
|
|
|
Err(io::Error::last_os_error())
|
2020-04-22 21:20:17 +00:00
|
|
|
|
} else {
|
|
|
|
|
Ok(res as RawFd)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-25 16:02:34 +00:00
|
|
|
|
fn mbind(
|
|
|
|
|
addr: *mut u8,
|
|
|
|
|
len: u64,
|
|
|
|
|
mode: u32,
|
|
|
|
|
nodemask: Vec<u64>,
|
|
|
|
|
maxnode: u64,
|
|
|
|
|
flags: u32,
|
|
|
|
|
) -> Result<(), io::Error> {
|
2022-11-16 23:23:22 +00:00
|
|
|
|
// SAFETY: FFI call with correct arguments
|
2020-08-25 16:02:34 +00:00
|
|
|
|
let res = unsafe {
|
|
|
|
|
libc::syscall(
|
|
|
|
|
libc::SYS_mbind,
|
|
|
|
|
addr as *mut libc::c_void,
|
|
|
|
|
len,
|
|
|
|
|
mode,
|
|
|
|
|
nodemask.as_ptr(),
|
|
|
|
|
maxnode,
|
|
|
|
|
flags,
|
|
|
|
|
)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if res < 0 {
|
|
|
|
|
Err(io::Error::last_os_error())
|
|
|
|
|
} else {
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-30 10:25:19 +00:00
|
|
|
|
fn create_anonymous_file(
|
2020-01-06 17:12:03 +00:00
|
|
|
|
size: usize,
|
2020-04-22 21:20:17 +00:00
|
|
|
|
hugepages: bool,
|
2021-02-04 16:50:37 +00:00
|
|
|
|
hugepage_size: Option<u64>,
|
2022-10-30 10:07:28 +00:00
|
|
|
|
) -> Result<FileOffset, Error> {
|
2022-10-30 10:25:19 +00:00
|
|
|
|
let fd = Self::memfd_create(
|
|
|
|
|
&ffi::CString::new("ch_ram").unwrap(),
|
|
|
|
|
libc::MFD_CLOEXEC
|
|
|
|
|
| if hugepages {
|
|
|
|
|
libc::MFD_HUGETLB
|
|
|
|
|
| if let Some(hugepage_size) = hugepage_size {
|
|
|
|
|
/*
|
|
|
|
|
* From the Linux kernel:
|
|
|
|
|
* Several system calls take a flag to request "hugetlb" huge pages.
|
|
|
|
|
* Without further specification, these system calls will use the
|
|
|
|
|
* system's default huge page size. If a system supports multiple
|
|
|
|
|
* huge page sizes, the desired huge page size can be specified in
|
|
|
|
|
* bits [26:31] of the flag arguments. The value in these 6 bits
|
|
|
|
|
* will encode the log2 of the huge page size.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
hugepage_size.trailing_zeros() << 26
|
|
|
|
|
} else {
|
|
|
|
|
// Use the system default huge page size
|
|
|
|
|
0
|
|
|
|
|
}
|
2020-01-06 17:12:03 +00:00
|
|
|
|
} else {
|
2022-10-30 10:25:19 +00:00
|
|
|
|
0
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
.map_err(Error::SharedFileCreate)?;
|
2020-01-06 17:12:03 +00:00
|
|
|
|
|
2022-11-16 23:23:22 +00:00
|
|
|
|
// SAFETY: fd is valid
|
2022-10-30 10:25:19 +00:00
|
|
|
|
let f = unsafe { File::from_raw_fd(fd) };
|
|
|
|
|
f.set_len(size as u64).map_err(Error::SharedFileSetLen)?;
|
2020-04-22 21:20:17 +00:00
|
|
|
|
|
2022-10-30 10:25:19 +00:00
|
|
|
|
Ok(FileOffset::new(f, 0))
|
|
|
|
|
}
|
2020-04-22 21:20:17 +00:00
|
|
|
|
|
2022-10-30 10:25:19 +00:00
|
|
|
|
fn open_backing_file(
|
|
|
|
|
backing_file: &PathBuf,
|
|
|
|
|
file_offset: u64,
|
|
|
|
|
size: usize,
|
|
|
|
|
) -> Result<FileOffset, Error> {
|
|
|
|
|
if backing_file.is_dir() {
|
|
|
|
|
// Override file offset as it does not apply in this case.
|
|
|
|
|
info!(
|
|
|
|
|
"Ignoring file offset since the backing file is a \
|
|
|
|
|
temporary file created from the specified directory."
|
|
|
|
|
);
|
|
|
|
|
let fs_str = format!("{}{}", backing_file.display(), "/tmpfile_XXXXXX");
|
|
|
|
|
let fs = ffi::CString::new(fs_str).unwrap();
|
|
|
|
|
let mut path = fs.as_bytes_with_nul().to_owned();
|
|
|
|
|
let path_ptr = path.as_mut_ptr() as *mut _;
|
2022-11-16 23:23:22 +00:00
|
|
|
|
// SAFETY: FFI call
|
2022-10-30 10:25:19 +00:00
|
|
|
|
let fd = unsafe { libc::mkstemp(path_ptr) };
|
2022-11-16 23:23:22 +00:00
|
|
|
|
if fd == -1 {
|
|
|
|
|
return Err(Error::SharedFileCreate(std::io::Error::last_os_error()));
|
|
|
|
|
}
|
|
|
|
|
// SAFETY: FFI call
|
2022-10-30 10:25:19 +00:00
|
|
|
|
unsafe { libc::unlink(path_ptr) };
|
2022-11-16 23:23:22 +00:00
|
|
|
|
// SAFETY: fd is valid
|
2022-10-30 10:25:19 +00:00
|
|
|
|
let f = unsafe { File::from_raw_fd(fd) };
|
|
|
|
|
f.set_len(size as u64).map_err(Error::SharedFileSetLen)?;
|
|
|
|
|
|
|
|
|
|
Ok(FileOffset::new(f, 0))
|
|
|
|
|
} else {
|
|
|
|
|
let f = OpenOptions::new()
|
|
|
|
|
.read(true)
|
|
|
|
|
.write(true)
|
|
|
|
|
.open(backing_file)
|
|
|
|
|
.map_err(Error::SharedFileCreate)?;
|
2020-08-21 17:34:28 +00:00
|
|
|
|
|
2022-10-30 10:25:19 +00:00
|
|
|
|
Ok(FileOffset::new(f, file_offset))
|
|
|
|
|
}
|
2022-01-12 10:06:46 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
2022-10-11 01:18:31 +00:00
|
|
|
|
pub fn create_ram_region(
|
2022-01-12 10:06:46 +00:00
|
|
|
|
backing_file: &Option<PathBuf>,
|
|
|
|
|
file_offset: u64,
|
|
|
|
|
start_addr: GuestAddress,
|
|
|
|
|
size: usize,
|
|
|
|
|
prefault: bool,
|
|
|
|
|
shared: bool,
|
|
|
|
|
hugepages: bool,
|
|
|
|
|
hugepage_size: Option<u64>,
|
|
|
|
|
host_numa_node: Option<u32>,
|
2022-01-12 10:12:05 +00:00
|
|
|
|
existing_memory_file: Option<File>,
|
2022-11-07 16:25:36 +00:00
|
|
|
|
thp: bool,
|
2022-01-12 10:06:46 +00:00
|
|
|
|
) -> Result<Arc<GuestRegionMmap>, Error> {
|
2022-10-30 10:32:52 +00:00
|
|
|
|
let mut mmap_flags = libc::MAP_NORESERVE;
|
|
|
|
|
|
|
|
|
|
// The duplication of mmap_flags ORing here is unfortunate but it also makes
|
|
|
|
|
// the complexity of the handling clear.
|
2022-10-30 10:07:28 +00:00
|
|
|
|
let fo = if let Some(f) = existing_memory_file {
|
2022-10-30 10:32:52 +00:00
|
|
|
|
// It must be MAP_SHARED as we wouldn't already have an FD
|
|
|
|
|
mmap_flags |= libc::MAP_SHARED;
|
2022-10-30 10:07:28 +00:00
|
|
|
|
Some(FileOffset::new(f, file_offset))
|
2022-10-30 10:25:19 +00:00
|
|
|
|
} else if let Some(backing_file) = backing_file {
|
2022-10-30 10:32:52 +00:00
|
|
|
|
if shared {
|
|
|
|
|
mmap_flags |= libc::MAP_SHARED;
|
|
|
|
|
} else {
|
|
|
|
|
mmap_flags |= libc::MAP_PRIVATE;
|
|
|
|
|
}
|
2022-10-30 10:25:19 +00:00
|
|
|
|
Some(Self::open_backing_file(backing_file, file_offset, size)?)
|
2022-10-30 10:27:56 +00:00
|
|
|
|
} else if shared || hugepages {
|
2022-10-30 10:32:52 +00:00
|
|
|
|
// For hugepages we must also MAP_SHARED otherwise we will trigger #4805
|
|
|
|
|
// because the MAP_PRIVATE will trigger CoW against the backing file with
|
|
|
|
|
// the VFIO pinning
|
|
|
|
|
mmap_flags |= libc::MAP_SHARED;
|
2022-10-30 10:25:19 +00:00
|
|
|
|
Some(Self::create_anonymous_file(size, hugepages, hugepage_size)?)
|
2022-10-30 10:27:56 +00:00
|
|
|
|
} else {
|
2022-10-30 10:32:52 +00:00
|
|
|
|
mmap_flags |= libc::MAP_PRIVATE | libc::MAP_ANONYMOUS;
|
2022-10-30 10:27:56 +00:00
|
|
|
|
None
|
2022-01-12 10:12:05 +00:00
|
|
|
|
};
|
2022-01-12 10:06:46 +00:00
|
|
|
|
|
2020-08-21 17:34:28 +00:00
|
|
|
|
if prefault {
|
|
|
|
|
mmap_flags |= libc::MAP_POPULATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let region = GuestRegionMmap::new(
|
2022-10-30 10:07:28 +00:00
|
|
|
|
MmapRegion::build(fo, size, libc::PROT_READ | libc::PROT_WRITE, mmap_flags)
|
|
|
|
|
.map_err(Error::GuestMemoryRegion)?,
|
2020-08-21 17:34:28 +00:00
|
|
|
|
start_addr,
|
|
|
|
|
)
|
|
|
|
|
.map_err(Error::GuestMemory)?;
|
|
|
|
|
|
2022-11-07 16:25:36 +00:00
|
|
|
|
if region.file_offset().is_none() && thp {
|
2022-11-02 16:02:57 +00:00
|
|
|
|
info!(
|
|
|
|
|
"Anonymous mapping at 0x{:x} (size = 0x{:x})",
|
|
|
|
|
region.as_ptr() as u64,
|
|
|
|
|
size
|
|
|
|
|
);
|
2022-11-16 23:23:22 +00:00
|
|
|
|
// SAFETY: FFI call with corect arguments
|
2022-11-02 16:02:57 +00:00
|
|
|
|
let ret = unsafe { libc::madvise(region.as_ptr() as _, size, libc::MADV_HUGEPAGE) };
|
|
|
|
|
if ret != 0 {
|
|
|
|
|
let e = io::Error::last_os_error();
|
|
|
|
|
warn!("Failed to mark pages as THP eligible: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-25 16:02:34 +00:00
|
|
|
|
// Apply NUMA policy if needed.
|
|
|
|
|
if let Some(node) = host_numa_node {
|
|
|
|
|
let addr = region.deref().as_ptr();
|
|
|
|
|
let len = region.deref().size() as u64;
|
|
|
|
|
let mode = MPOL_BIND;
|
|
|
|
|
let mut nodemask: Vec<u64> = Vec::new();
|
|
|
|
|
let flags = MPOL_MF_STRICT | MPOL_MF_MOVE;
|
|
|
|
|
|
|
|
|
|
// Linux is kind of buggy in the way it interprets maxnode as it
|
|
|
|
|
// will cut off the last node. That's why we have to add 1 to what
|
|
|
|
|
// we would consider as the proper maxnode value.
|
2020-09-01 10:51:25 +00:00
|
|
|
|
let maxnode = node as u64 + 1 + 1;
|
2020-08-25 16:02:34 +00:00
|
|
|
|
|
|
|
|
|
// Allocate the right size for the vector.
|
|
|
|
|
nodemask.resize((node as usize / 64) + 1, 0);
|
|
|
|
|
|
|
|
|
|
// Fill the global bitmask through the nodemask vector.
|
|
|
|
|
let idx = (node / 64) as usize;
|
|
|
|
|
let shift = node % 64;
|
|
|
|
|
nodemask[idx] |= 1u64 << shift;
|
|
|
|
|
|
|
|
|
|
// Policies are enforced by using MPOL_MF_MOVE flag as it will
|
|
|
|
|
// force the kernel to move all pages that might have been already
|
|
|
|
|
// allocated to the proper set of NUMA nodes. MPOL_MF_STRICT is
|
|
|
|
|
// used to throw an error if MPOL_MF_MOVE didn't succeed.
|
|
|
|
|
// MPOL_BIND is the selected mode as it specifies a strict policy
|
|
|
|
|
// that restricts memory allocation to the nodes specified in the
|
|
|
|
|
// nodemask.
|
|
|
|
|
Self::mbind(addr, len, mode, nodemask, maxnode, flags)
|
|
|
|
|
.map_err(Error::ApplyNumaPolicy)?;
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-21 17:34:28 +00:00
|
|
|
|
Ok(Arc::new(region))
|
2020-01-06 17:12:03 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-03-04 02:16:07 +00:00
|
|
|
|
// Update the GuestMemoryMmap with the new range
|
|
|
|
|
fn add_region(&mut self, region: Arc<GuestRegionMmap>) -> Result<(), Error> {
|
|
|
|
|
let guest_memory = self
|
|
|
|
|
.guest_memory
|
|
|
|
|
.memory()
|
|
|
|
|
.insert_region(region)
|
|
|
|
|
.map_err(Error::GuestMemory)?;
|
|
|
|
|
self.guest_memory.lock().unwrap().replace(guest_memory);
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-12 09:49:12 +00:00
|
|
|
|
//
|
|
|
|
|
// Calculate the start address of an area next to RAM.
|
|
|
|
|
//
|
2020-10-08 04:58:43 +00:00
|
|
|
|
// If memory hotplug is allowed, the start address needs to be aligned
|
|
|
|
|
// (rounded-up) to 128MiB boundary.
|
|
|
|
|
// If memory hotplug is not allowed, there is no alignment required.
|
2022-03-30 02:22:26 +00:00
|
|
|
|
// And it must also start at the 64bit start.
|
2020-10-12 22:49:07 +00:00
|
|
|
|
fn start_addr(mem_end: GuestAddress, allow_mem_hotplug: bool) -> Result<GuestAddress, Error> {
|
2020-10-08 04:58:43 +00:00
|
|
|
|
let mut start_addr = if allow_mem_hotplug {
|
|
|
|
|
GuestAddress(mem_end.0 | ((128 << 20) - 1))
|
2020-05-12 09:49:12 +00:00
|
|
|
|
} else {
|
2020-10-08 04:58:43 +00:00
|
|
|
|
mem_end
|
2020-05-12 09:49:12 +00:00
|
|
|
|
};
|
|
|
|
|
|
2020-10-12 22:49:07 +00:00
|
|
|
|
start_addr = start_addr
|
|
|
|
|
.checked_add(1)
|
|
|
|
|
.ok_or(Error::GuestAddressOverFlow)?;
|
2020-10-08 04:58:43 +00:00
|
|
|
|
|
2020-08-17 12:49:45 +00:00
|
|
|
|
if mem_end < arch::layout::MEM_32BIT_RESERVED_START {
|
2020-10-12 22:49:07 +00:00
|
|
|
|
return Ok(arch::layout::RAM_64BIT_START);
|
2020-08-17 12:49:45 +00:00
|
|
|
|
}
|
2020-05-12 09:49:12 +00:00
|
|
|
|
|
2020-10-12 22:49:07 +00:00
|
|
|
|
Ok(start_addr)
|
2020-05-12 09:49:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-15 14:00:27 +00:00
|
|
|
|
pub fn add_ram_region(
|
|
|
|
|
&mut self,
|
|
|
|
|
start_addr: GuestAddress,
|
|
|
|
|
size: usize,
|
|
|
|
|
) -> Result<Arc<GuestRegionMmap>, Error> {
|
2020-01-10 16:02:19 +00:00
|
|
|
|
// Allocate memory for the region
|
2020-04-22 21:20:17 +00:00
|
|
|
|
let region = MemoryManager::create_ram_region(
|
2020-08-24 09:09:01 +00:00
|
|
|
|
&None,
|
2020-08-18 15:32:44 +00:00
|
|
|
|
0,
|
2020-04-22 21:20:17 +00:00
|
|
|
|
start_addr,
|
|
|
|
|
size,
|
2021-09-29 04:54:22 +00:00
|
|
|
|
self.prefault,
|
2020-04-22 21:20:17 +00:00
|
|
|
|
self.shared,
|
|
|
|
|
self.hugepages,
|
2021-02-04 16:50:37 +00:00
|
|
|
|
self.hugepage_size,
|
2020-08-25 16:02:34 +00:00
|
|
|
|
None,
|
2022-01-12 10:12:05 +00:00
|
|
|
|
None,
|
2022-11-07 16:25:36 +00:00
|
|
|
|
self.thp,
|
2020-04-22 21:20:17 +00:00
|
|
|
|
)?;
|
2020-01-10 16:02:19 +00:00
|
|
|
|
|
|
|
|
|
// Map it into the guest
|
2020-11-12 14:53:57 +00:00
|
|
|
|
let slot = self.create_userspace_mapping(
|
2020-01-10 16:02:19 +00:00
|
|
|
|
region.start_addr().0,
|
2022-11-01 21:52:40 +00:00
|
|
|
|
region.len(),
|
2020-01-10 16:02:19 +00:00
|
|
|
|
region.as_ptr() as u64,
|
|
|
|
|
self.mergeable,
|
2020-03-19 10:00:09 +00:00
|
|
|
|
false,
|
2021-07-26 18:30:01 +00:00
|
|
|
|
self.log_dirty,
|
2020-01-10 16:02:19 +00:00
|
|
|
|
)?;
|
2020-11-12 14:53:57 +00:00
|
|
|
|
self.guest_ram_mappings.push(GuestRamMapping {
|
|
|
|
|
gpa: region.start_addr().raw_value(),
|
|
|
|
|
size: region.len(),
|
|
|
|
|
slot,
|
2021-10-04 15:54:49 +00:00
|
|
|
|
zone_id: DEFAULT_MEMORY_ZONE.to_string(),
|
2021-10-05 09:37:39 +00:00
|
|
|
|
virtio_mem: false,
|
2021-10-05 11:46:56 +00:00
|
|
|
|
file_offset: 0,
|
2020-11-12 14:53:57 +00:00
|
|
|
|
});
|
2020-01-10 16:02:19 +00:00
|
|
|
|
|
2021-02-15 14:00:27 +00:00
|
|
|
|
self.add_region(Arc::clone(®ion))?;
|
|
|
|
|
|
|
|
|
|
Ok(region)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn hotplug_ram_region(&mut self, size: usize) -> Result<Arc<GuestRegionMmap>, Error> {
|
|
|
|
|
info!("Hotplugging new RAM: {}", size);
|
|
|
|
|
|
|
|
|
|
// Check that there is a free slot
|
|
|
|
|
if self.next_hotplug_slot >= HOTPLUG_COUNT {
|
|
|
|
|
return Err(Error::NoSlotAvailable);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// "Inserted" DIMM must have a size that is a multiple of 128MiB
|
|
|
|
|
if size % (128 << 20) != 0 {
|
|
|
|
|
return Err(Error::InvalidSize);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let start_addr = MemoryManager::start_addr(self.guest_memory.memory().last_addr(), true)?;
|
|
|
|
|
|
2021-10-29 08:49:20 +00:00
|
|
|
|
if start_addr.checked_add(size.try_into().unwrap()).unwrap() >= self.end_of_ram_area {
|
2021-03-25 17:01:21 +00:00
|
|
|
|
return Err(Error::InsufficientHotplugRam);
|
2021-02-15 14:00:27 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let region = self.add_ram_region(start_addr, size)?;
|
|
|
|
|
|
2021-03-03 10:58:54 +00:00
|
|
|
|
// Add region to the list of regions associated with the default
|
|
|
|
|
// memory zone.
|
|
|
|
|
if let Some(memory_zone) = self.memory_zones.get_mut(DEFAULT_MEMORY_ZONE) {
|
|
|
|
|
memory_zone.regions.push(Arc::clone(®ion));
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-10 16:02:19 +00:00
|
|
|
|
// Tell the allocator
|
2021-10-29 09:02:15 +00:00
|
|
|
|
self.ram_allocator
|
|
|
|
|
.allocate(Some(start_addr), size as GuestUsize, None)
|
2020-01-10 16:02:19 +00:00
|
|
|
|
.ok_or(Error::MemoryRangeAllocation)?;
|
|
|
|
|
|
2020-01-24 10:36:39 +00:00
|
|
|
|
// Update the slot so that it can be queried via the I/O port
|
2020-01-10 16:02:19 +00:00
|
|
|
|
let mut slot = &mut self.hotplug_slots[self.next_hotplug_slot];
|
|
|
|
|
slot.active = true;
|
2020-01-10 16:07:34 +00:00
|
|
|
|
slot.inserting = true;
|
2020-01-10 16:02:19 +00:00
|
|
|
|
slot.base = region.start_addr().0;
|
2022-11-01 21:52:40 +00:00
|
|
|
|
slot.length = region.len();
|
2020-01-10 16:02:19 +00:00
|
|
|
|
|
|
|
|
|
self.next_hotplug_slot += 1;
|
|
|
|
|
|
2020-03-26 13:36:15 +00:00
|
|
|
|
Ok(region)
|
2020-01-10 16:02:19 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-02-11 16:22:40 +00:00
|
|
|
|
pub fn guest_memory(&self) -> GuestMemoryAtomic<GuestMemoryMmap> {
|
2019-12-19 15:47:36 +00:00
|
|
|
|
self.guest_memory.clone()
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 15:24:42 +00:00
|
|
|
|
pub fn boot_guest_memory(&self) -> GuestMemoryMmap {
|
|
|
|
|
self.boot_guest_memory.clone()
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-16 17:58:23 +00:00
|
|
|
|
pub fn allocator(&self) -> Arc<Mutex<SystemAllocator>> {
|
|
|
|
|
self.allocator.clone()
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-19 15:47:36 +00:00
|
|
|
|
pub fn start_of_device_area(&self) -> GuestAddress {
|
|
|
|
|
self.start_of_device_area
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn end_of_device_area(&self) -> GuestAddress {
|
|
|
|
|
self.end_of_device_area
|
|
|
|
|
}
|
2019-12-20 15:17:49 +00:00
|
|
|
|
|
2020-07-04 11:08:52 +00:00
|
|
|
|
pub fn allocate_memory_slot(&mut self) -> u32 {
|
|
|
|
|
let slot_id = self.next_memory_slot;
|
|
|
|
|
self.next_memory_slot += 1;
|
2019-12-20 15:17:49 +00:00
|
|
|
|
slot_id
|
|
|
|
|
}
|
2019-12-20 16:10:27 +00:00
|
|
|
|
|
|
|
|
|
pub fn create_userspace_mapping(
|
|
|
|
|
&mut self,
|
|
|
|
|
guest_phys_addr: u64,
|
|
|
|
|
memory_size: u64,
|
|
|
|
|
userspace_addr: u64,
|
|
|
|
|
mergeable: bool,
|
2020-03-19 10:00:09 +00:00
|
|
|
|
readonly: bool,
|
2021-07-26 18:30:01 +00:00
|
|
|
|
log_dirty: bool,
|
2019-12-20 16:10:27 +00:00
|
|
|
|
) -> Result<u32, Error> {
|
2020-07-04 11:08:52 +00:00
|
|
|
|
let slot = self.allocate_memory_slot();
|
2020-07-04 11:38:17 +00:00
|
|
|
|
let mem_region = self.vm.make_user_memory_region(
|
2019-12-20 16:10:27 +00:00
|
|
|
|
slot,
|
|
|
|
|
guest_phys_addr,
|
|
|
|
|
memory_size,
|
|
|
|
|
userspace_addr,
|
2020-07-04 11:38:17 +00:00
|
|
|
|
readonly,
|
2021-07-26 18:30:01 +00:00
|
|
|
|
log_dirty,
|
2020-07-04 11:38:17 +00:00
|
|
|
|
);
|
2019-12-20 16:10:27 +00:00
|
|
|
|
|
2021-12-02 14:41:47 +00:00
|
|
|
|
info!(
|
|
|
|
|
"Creating userspace mapping: {:x} -> {:x} {:x}, slot {}",
|
|
|
|
|
guest_phys_addr, userspace_addr, memory_size, slot
|
|
|
|
|
);
|
|
|
|
|
|
2020-07-03 09:13:40 +00:00
|
|
|
|
self.vm
|
2021-07-03 13:58:39 +00:00
|
|
|
|
.create_user_memory_region(mem_region)
|
|
|
|
|
.map_err(Error::CreateUserMemoryRegion)?;
|
2019-12-20 16:10:27 +00:00
|
|
|
|
|
|
|
|
|
// Mark the pages as mergeable if explicitly asked for.
|
|
|
|
|
if mergeable {
|
2022-11-16 23:23:22 +00:00
|
|
|
|
// SAFETY: the address and size are valid since the
|
2019-12-20 16:10:27 +00:00
|
|
|
|
// mmap succeeded.
|
|
|
|
|
let ret = unsafe {
|
|
|
|
|
libc::madvise(
|
|
|
|
|
userspace_addr as *mut libc::c_void,
|
|
|
|
|
memory_size as libc::size_t,
|
|
|
|
|
libc::MADV_MERGEABLE,
|
|
|
|
|
)
|
|
|
|
|
};
|
|
|
|
|
if ret != 0 {
|
|
|
|
|
let err = io::Error::last_os_error();
|
|
|
|
|
// Safe to unwrap because the error is constructed with
|
|
|
|
|
// last_os_error(), which ensures the output will be Some().
|
|
|
|
|
let errno = err.raw_os_error().unwrap();
|
|
|
|
|
if errno == libc::EINVAL {
|
|
|
|
|
warn!("kernel not configured with CONFIG_KSM");
|
|
|
|
|
} else {
|
|
|
|
|
warn!("madvise error: {}", err);
|
|
|
|
|
}
|
|
|
|
|
warn!("failed to mark pages as mergeable");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-10 16:02:19 +00:00
|
|
|
|
info!(
|
|
|
|
|
"Created userspace mapping: {:x} -> {:x} {:x}",
|
|
|
|
|
guest_phys_addr, userspace_addr, memory_size
|
|
|
|
|
);
|
|
|
|
|
|
2019-12-20 16:10:27 +00:00
|
|
|
|
Ok(slot)
|
|
|
|
|
}
|
2020-01-10 16:02:19 +00:00
|
|
|
|
|
2020-04-20 15:37:00 +00:00
|
|
|
|
pub fn remove_userspace_mapping(
|
|
|
|
|
&mut self,
|
|
|
|
|
guest_phys_addr: u64,
|
|
|
|
|
memory_size: u64,
|
|
|
|
|
userspace_addr: u64,
|
|
|
|
|
mergeable: bool,
|
|
|
|
|
slot: u32,
|
|
|
|
|
) -> Result<(), Error> {
|
2020-07-04 11:38:17 +00:00
|
|
|
|
let mem_region = self.vm.make_user_memory_region(
|
2020-04-20 15:37:00 +00:00
|
|
|
|
slot,
|
|
|
|
|
guest_phys_addr,
|
2021-07-03 13:58:39 +00:00
|
|
|
|
memory_size,
|
2020-04-20 15:37:00 +00:00
|
|
|
|
userspace_addr,
|
2020-07-04 11:38:17 +00:00
|
|
|
|
false, /* readonly -- don't care */
|
2020-11-11 18:16:39 +00:00
|
|
|
|
false, /* log dirty */
|
2020-07-04 11:38:17 +00:00
|
|
|
|
);
|
2020-04-20 15:37:00 +00:00
|
|
|
|
|
2020-07-03 09:13:40 +00:00
|
|
|
|
self.vm
|
2021-07-03 13:58:39 +00:00
|
|
|
|
.remove_user_memory_region(mem_region)
|
|
|
|
|
.map_err(Error::RemoveUserMemoryRegion)?;
|
2020-04-20 15:37:00 +00:00
|
|
|
|
|
|
|
|
|
// Mark the pages as unmergeable if there were previously marked as
|
|
|
|
|
// mergeable.
|
|
|
|
|
if mergeable {
|
2022-11-16 23:23:22 +00:00
|
|
|
|
// SAFETY: the address and size are valid as the region was
|
2020-04-20 15:37:00 +00:00
|
|
|
|
// previously advised.
|
|
|
|
|
let ret = unsafe {
|
|
|
|
|
libc::madvise(
|
|
|
|
|
userspace_addr as *mut libc::c_void,
|
|
|
|
|
memory_size as libc::size_t,
|
|
|
|
|
libc::MADV_UNMERGEABLE,
|
|
|
|
|
)
|
|
|
|
|
};
|
|
|
|
|
if ret != 0 {
|
|
|
|
|
let err = io::Error::last_os_error();
|
|
|
|
|
// Safe to unwrap because the error is constructed with
|
|
|
|
|
// last_os_error(), which ensures the output will be Some().
|
|
|
|
|
let errno = err.raw_os_error().unwrap();
|
|
|
|
|
if errno == libc::EINVAL {
|
|
|
|
|
warn!("kernel not configured with CONFIG_KSM");
|
|
|
|
|
} else {
|
|
|
|
|
warn!("madvise error: {}", err);
|
|
|
|
|
}
|
|
|
|
|
warn!("failed to mark pages as unmergeable");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info!(
|
|
|
|
|
"Removed userspace mapping: {:x} -> {:x} {:x}",
|
|
|
|
|
guest_phys_addr, userspace_addr, memory_size
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 15:48:13 +00:00
|
|
|
|
pub fn virtio_mem_resize(&mut self, id: &str, size: u64) -> Result<(), Error> {
|
|
|
|
|
if let Some(memory_zone) = self.memory_zones.get_mut(id) {
|
2021-09-22 12:46:04 +00:00
|
|
|
|
if let Some(virtio_mem_zone) = &mut memory_zone.virtio_mem_zone {
|
2022-09-16 16:18:22 +00:00
|
|
|
|
if let Some(virtio_mem_device) = virtio_mem_zone.virtio_device.as_ref() {
|
|
|
|
|
virtio_mem_device
|
|
|
|
|
.lock()
|
|
|
|
|
.unwrap()
|
|
|
|
|
.resize(size)
|
|
|
|
|
.map_err(Error::VirtioMemResizeFail)?;
|
|
|
|
|
}
|
2021-09-22 12:46:04 +00:00
|
|
|
|
|
|
|
|
|
// Keep the hotplugged_size up to date.
|
|
|
|
|
virtio_mem_zone.hotplugged_size = size;
|
2020-09-10 09:28:21 +00:00
|
|
|
|
} else {
|
2020-09-10 16:24:06 +00:00
|
|
|
|
error!("Failed resizing virtio-mem region: No virtio-mem handler");
|
|
|
|
|
return Err(Error::MissingVirtioMemHandler);
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 15:48:13 +00:00
|
|
|
|
return Ok(());
|
2020-03-04 02:16:07 +00:00
|
|
|
|
}
|
2020-09-10 15:42:55 +00:00
|
|
|
|
|
2020-09-14 15:48:13 +00:00
|
|
|
|
error!("Failed resizing virtio-mem region: Unknown memory zone");
|
|
|
|
|
Err(Error::UnknownMemoryZone)
|
2020-03-04 02:16:07 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-03-26 13:36:15 +00:00
|
|
|
|
/// In case this function resulted in adding a new memory region to the
|
|
|
|
|
/// guest memory, the new region is returned to the caller. The virtio-mem
|
|
|
|
|
/// use case never adds a new region as the whole hotpluggable memory has
|
|
|
|
|
/// already been allocated at boot time.
|
|
|
|
|
pub fn resize(&mut self, desired_ram: u64) -> Result<Option<Arc<GuestRegionMmap>>, Error> {
|
2020-09-11 12:09:00 +00:00
|
|
|
|
if self.user_provided_zones {
|
2020-08-18 15:17:15 +00:00
|
|
|
|
error!(
|
|
|
|
|
"Not allowed to resize guest memory when backed with user \
|
2020-09-10 15:34:15 +00:00
|
|
|
|
defined memory zones."
|
2020-08-18 15:17:15 +00:00
|
|
|
|
);
|
|
|
|
|
return Err(Error::InvalidResizeWithMemoryZones);
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-26 13:36:15 +00:00
|
|
|
|
let mut region: Option<Arc<GuestRegionMmap>> = None;
|
2020-03-04 02:16:07 +00:00
|
|
|
|
match self.hotplug_method {
|
|
|
|
|
HotplugMethod::VirtioMem => {
|
|
|
|
|
if desired_ram >= self.boot_ram {
|
2022-03-29 19:32:01 +00:00
|
|
|
|
if !self.dynamic {
|
2022-03-31 13:36:37 +00:00
|
|
|
|
return Ok(region);
|
2022-03-29 19:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 15:48:13 +00:00
|
|
|
|
self.virtio_mem_resize(DEFAULT_MEMORY_ZONE, desired_ram - self.boot_ram)?;
|
2020-03-04 02:16:07 +00:00
|
|
|
|
self.current_ram = desired_ram;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
HotplugMethod::Acpi => {
|
2020-10-09 23:26:58 +00:00
|
|
|
|
if desired_ram > self.current_ram {
|
2022-03-29 19:32:01 +00:00
|
|
|
|
if !self.dynamic {
|
2022-03-31 13:36:37 +00:00
|
|
|
|
return Ok(region);
|
2022-03-29 19:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-03-26 13:36:15 +00:00
|
|
|
|
region =
|
|
|
|
|
Some(self.hotplug_ram_region((desired_ram - self.current_ram) as usize)?);
|
2020-03-04 02:16:07 +00:00
|
|
|
|
self.current_ram = desired_ram;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-01-10 16:02:19 +00:00
|
|
|
|
}
|
2020-03-26 13:36:15 +00:00
|
|
|
|
Ok(region)
|
2020-01-10 16:02:19 +00:00
|
|
|
|
}
|
2020-07-08 09:53:28 +00:00
|
|
|
|
|
2020-09-14 21:50:30 +00:00
|
|
|
|
pub fn resize_zone(&mut self, id: &str, virtio_mem_size: u64) -> Result<(), Error> {
|
2020-09-11 12:09:00 +00:00
|
|
|
|
if !self.user_provided_zones {
|
2020-09-10 15:34:15 +00:00
|
|
|
|
error!(
|
|
|
|
|
"Not allowed to resize guest memory zone when no zone is \
|
|
|
|
|
defined."
|
|
|
|
|
);
|
|
|
|
|
return Err(Error::ResizeZone);
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 21:50:30 +00:00
|
|
|
|
self.virtio_mem_resize(id, virtio_mem_size)
|
2020-09-10 15:34:15 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-07-08 09:53:28 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
2021-10-29 08:30:01 +00:00
|
|
|
|
pub fn setup_sgx(&mut self, sgx_epc_config: Vec<SgxEpcConfig>) -> Result<(), Error> {
|
2021-07-07 10:19:26 +00:00
|
|
|
|
let file = OpenOptions::new()
|
|
|
|
|
.read(true)
|
|
|
|
|
.open("/dev/sgx_provision")
|
|
|
|
|
.map_err(Error::SgxProvisionOpen)?;
|
2021-10-29 08:30:01 +00:00
|
|
|
|
self.vm
|
|
|
|
|
.enable_sgx_attribute(file)
|
2021-07-07 10:19:26 +00:00
|
|
|
|
.map_err(Error::SgxEnableProvisioning)?;
|
|
|
|
|
|
2020-07-08 09:53:28 +00:00
|
|
|
|
// Go over each EPC section and verify its size is a 4k multiple. At
|
|
|
|
|
// the same time, calculate the total size needed for the contiguous
|
|
|
|
|
// EPC region.
|
|
|
|
|
let mut epc_region_size = 0;
|
|
|
|
|
for epc_section in sgx_epc_config.iter() {
|
|
|
|
|
if epc_section.size == 0 {
|
|
|
|
|
return Err(Error::EpcSectionSizeInvalid);
|
|
|
|
|
}
|
2021-11-29 07:16:03 +00:00
|
|
|
|
if epc_section.size & (SGX_PAGE_SIZE - 1) != 0 {
|
2020-07-08 09:53:28 +00:00
|
|
|
|
return Err(Error::EpcSectionSizeInvalid);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
epc_region_size += epc_section.size;
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-29 08:56:18 +00:00
|
|
|
|
// Place the SGX EPC region on a 4k boundary between the RAM and the device area
|
2021-11-29 07:16:03 +00:00
|
|
|
|
let epc_region_start = GuestAddress(
|
|
|
|
|
((self.start_of_device_area.0 + SGX_PAGE_SIZE - 1) / SGX_PAGE_SIZE) * SGX_PAGE_SIZE,
|
|
|
|
|
);
|
|
|
|
|
|
2021-10-29 08:56:18 +00:00
|
|
|
|
self.start_of_device_area = epc_region_start
|
|
|
|
|
.checked_add(epc_region_size)
|
|
|
|
|
.ok_or(Error::GuestAddressOverFlow)?;
|
2020-07-08 09:53:28 +00:00
|
|
|
|
|
|
|
|
|
let mut sgx_epc_region = SgxEpcRegion::new(epc_region_start, epc_region_size as GuestUsize);
|
2021-10-29 11:30:37 +00:00
|
|
|
|
info!(
|
|
|
|
|
"SGX EPC region: 0x{:x} (0x{:x})",
|
|
|
|
|
epc_region_start.0, epc_region_size
|
|
|
|
|
);
|
2020-07-08 09:53:28 +00:00
|
|
|
|
|
|
|
|
|
// Each section can be memory mapped into the allocated region.
|
|
|
|
|
let mut epc_section_start = epc_region_start.raw_value();
|
|
|
|
|
for epc_section in sgx_epc_config.iter() {
|
|
|
|
|
let file = OpenOptions::new()
|
|
|
|
|
.read(true)
|
|
|
|
|
.write(true)
|
2021-04-29 11:52:16 +00:00
|
|
|
|
.open("/dev/sgx_vepc")
|
2020-07-08 09:53:28 +00:00
|
|
|
|
.map_err(Error::SgxVirtEpcOpen)?;
|
|
|
|
|
|
|
|
|
|
let prot = PROT_READ | PROT_WRITE;
|
|
|
|
|
let mut flags = MAP_NORESERVE | MAP_SHARED;
|
|
|
|
|
if epc_section.prefault {
|
|
|
|
|
flags |= MAP_POPULATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// We can't use the vm-memory crate to perform the memory mapping
|
|
|
|
|
// here as it would try to ensure the size of the backing file is
|
2021-04-29 11:52:16 +00:00
|
|
|
|
// matching the size of the expected mapping. The /dev/sgx_vepc
|
2020-07-08 09:53:28 +00:00
|
|
|
|
// device does not work that way, it provides a file descriptor
|
|
|
|
|
// which is not matching the mapping size, as it's a just a way to
|
|
|
|
|
// let KVM know that an EPC section is being created for the guest.
|
2022-11-16 23:23:22 +00:00
|
|
|
|
// SAFETY: FFI call with correct arguments
|
2020-07-08 09:53:28 +00:00
|
|
|
|
let host_addr = unsafe {
|
|
|
|
|
libc::mmap(
|
|
|
|
|
std::ptr::null_mut(),
|
|
|
|
|
epc_section.size as usize,
|
|
|
|
|
prot,
|
|
|
|
|
flags,
|
|
|
|
|
file.as_raw_fd(),
|
2021-01-02 20:37:31 +00:00
|
|
|
|
0,
|
2020-07-08 09:53:28 +00:00
|
|
|
|
)
|
|
|
|
|
} as u64;
|
|
|
|
|
|
2021-10-29 11:30:37 +00:00
|
|
|
|
info!(
|
|
|
|
|
"Adding SGX EPC section: 0x{:x} (0x{:x})",
|
|
|
|
|
epc_section_start, epc_section.size
|
|
|
|
|
);
|
|
|
|
|
|
2020-07-08 09:53:28 +00:00
|
|
|
|
let _mem_slot = self.create_userspace_mapping(
|
|
|
|
|
epc_section_start,
|
|
|
|
|
epc_section.size,
|
|
|
|
|
host_addr,
|
|
|
|
|
false,
|
|
|
|
|
false,
|
2021-07-26 18:30:01 +00:00
|
|
|
|
false,
|
2020-07-08 09:53:28 +00:00
|
|
|
|
)?;
|
|
|
|
|
|
2021-07-09 08:48:32 +00:00
|
|
|
|
sgx_epc_region.insert(
|
|
|
|
|
epc_section.id.clone(),
|
|
|
|
|
SgxEpcSection::new(
|
|
|
|
|
GuestAddress(epc_section_start),
|
|
|
|
|
epc_section.size as GuestUsize,
|
|
|
|
|
),
|
|
|
|
|
);
|
2020-07-08 09:53:28 +00:00
|
|
|
|
|
|
|
|
|
epc_section_start += epc_section.size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.sgx_epc_region = Some(sgx_epc_region);
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
pub fn sgx_epc_region(&self) -> &Option<SgxEpcRegion> {
|
|
|
|
|
&self.sgx_epc_region
|
|
|
|
|
}
|
2020-08-20 18:05:34 +00:00
|
|
|
|
|
|
|
|
|
pub fn is_hardlink(f: &File) -> bool {
|
|
|
|
|
let mut stat = std::mem::MaybeUninit::<libc::stat>::uninit();
|
2022-11-16 23:23:22 +00:00
|
|
|
|
// SAFETY: FFI call with correct arguments
|
2020-08-20 18:05:34 +00:00
|
|
|
|
let ret = unsafe { libc::fstat(f.as_raw_fd(), stat.as_mut_ptr()) };
|
|
|
|
|
if ret != 0 {
|
|
|
|
|
error!("Couldn't fstat the backing file");
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-16 23:23:22 +00:00
|
|
|
|
// SAFETY: stat is valid
|
2020-08-20 18:05:34 +00:00
|
|
|
|
unsafe { (*stat.as_ptr()).st_nlink as usize > 0 }
|
|
|
|
|
}
|
2020-08-27 12:16:50 +00:00
|
|
|
|
|
2020-09-04 07:22:16 +00:00
|
|
|
|
pub fn memory_zones(&self) -> &MemoryZones {
|
|
|
|
|
&self.memory_zones
|
|
|
|
|
}
|
2021-09-27 11:43:04 +00:00
|
|
|
|
|
2022-09-16 16:18:22 +00:00
|
|
|
|
pub fn memory_zones_mut(&mut self) -> &mut MemoryZones {
|
|
|
|
|
&mut self.memory_zones
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-27 13:31:28 +00:00
|
|
|
|
pub fn memory_range_table(
|
|
|
|
|
&self,
|
|
|
|
|
snapshot: bool,
|
|
|
|
|
) -> std::result::Result<MemoryRangeTable, MigratableError> {
|
2021-09-27 11:43:04 +00:00
|
|
|
|
let mut table = MemoryRangeTable::default();
|
|
|
|
|
|
|
|
|
|
for memory_zone in self.memory_zones.values() {
|
2021-09-27 13:31:28 +00:00
|
|
|
|
if let Some(virtio_mem_zone) = memory_zone.virtio_mem_zone() {
|
|
|
|
|
table.extend(virtio_mem_zone.plugged_ranges());
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-27 11:43:04 +00:00
|
|
|
|
for region in memory_zone.regions() {
|
2021-09-27 13:31:28 +00:00
|
|
|
|
if snapshot {
|
|
|
|
|
if let Some(file_offset) = region.file_offset() {
|
|
|
|
|
if (region.flags() & libc::MAP_SHARED == libc::MAP_SHARED)
|
|
|
|
|
&& Self::is_hardlink(file_offset.file())
|
|
|
|
|
{
|
|
|
|
|
// In this very specific case, we know the memory
|
|
|
|
|
// region is backed by a file on the host filesystem
|
|
|
|
|
// that can be accessed by the user, and additionally
|
|
|
|
|
// the mapping is shared, which means that modifications
|
|
|
|
|
// to the content are written to the actual file.
|
|
|
|
|
// When meeting these conditions, we can skip the
|
|
|
|
|
// copy of the memory content for this specific region,
|
|
|
|
|
// as we can assume the user will have it saved through
|
|
|
|
|
// the backing file already.
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-27 11:43:04 +00:00
|
|
|
|
table.push(MemoryRange {
|
|
|
|
|
gpa: region.start_addr().raw_value(),
|
2022-11-01 21:52:40 +00:00
|
|
|
|
length: region.len(),
|
2021-09-27 11:43:04 +00:00
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(table)
|
|
|
|
|
}
|
2021-10-05 15:53:08 +00:00
|
|
|
|
|
|
|
|
|
pub fn snapshot_data(&self) -> MemoryManagerSnapshotData {
|
|
|
|
|
MemoryManagerSnapshotData {
|
|
|
|
|
memory_ranges: self.snapshot_memory_ranges.clone(),
|
|
|
|
|
guest_ram_mappings: self.guest_ram_mappings.clone(),
|
|
|
|
|
start_of_device_area: self.start_of_device_area.0,
|
|
|
|
|
boot_ram: self.boot_ram,
|
|
|
|
|
current_ram: self.current_ram,
|
|
|
|
|
arch_mem_regions: self.arch_mem_regions.clone(),
|
|
|
|
|
hotplug_slots: self.hotplug_slots.clone(),
|
|
|
|
|
next_memory_slot: self.next_memory_slot,
|
|
|
|
|
selected_slot: self.selected_slot,
|
|
|
|
|
next_hotplug_slot: self.next_hotplug_slot,
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-01-13 10:17:30 +00:00
|
|
|
|
|
|
|
|
|
pub fn memory_slot_fds(&self) -> HashMap<u32, RawFd> {
|
|
|
|
|
let mut memory_slot_fds = HashMap::new();
|
|
|
|
|
for guest_ram_mapping in &self.guest_ram_mappings {
|
|
|
|
|
let slot = guest_ram_mapping.slot;
|
|
|
|
|
let guest_memory = self.guest_memory.memory();
|
|
|
|
|
let file = guest_memory
|
|
|
|
|
.find_region(GuestAddress(guest_ram_mapping.gpa))
|
|
|
|
|
.unwrap()
|
|
|
|
|
.file_offset()
|
|
|
|
|
.unwrap()
|
|
|
|
|
.file();
|
|
|
|
|
memory_slot_fds.insert(slot, file.as_raw_fd());
|
|
|
|
|
}
|
|
|
|
|
memory_slot_fds
|
|
|
|
|
}
|
2022-03-24 11:03:26 +00:00
|
|
|
|
|
|
|
|
|
pub fn acpi_address(&self) -> Option<GuestAddress> {
|
|
|
|
|
self.acpi_address
|
|
|
|
|
}
|
2022-05-24 00:35:15 +00:00
|
|
|
|
|
|
|
|
|
pub fn num_guest_ram_mappings(&self) -> u32 {
|
|
|
|
|
self.guest_ram_mappings.len() as u32
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-30 12:01:09 +00:00
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
|
pub fn uefi_flash(&self) -> GuestMemoryAtomic<GuestMemoryMmap> {
|
|
|
|
|
self.uefi_flash.as_ref().unwrap().clone()
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-05 16:33:54 +00:00
|
|
|
|
#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
|
2022-05-24 00:35:15 +00:00
|
|
|
|
pub fn coredump_memory_regions(&self, mem_offset: u64) -> CoredumpMemoryRegions {
|
|
|
|
|
let mut mapping_sorted_by_gpa = self.guest_ram_mappings.clone();
|
|
|
|
|
mapping_sorted_by_gpa.sort_by_key(|m| m.gpa);
|
|
|
|
|
|
|
|
|
|
let mut mem_offset_in_elf = mem_offset;
|
|
|
|
|
let mut ram_maps = BTreeMap::new();
|
|
|
|
|
for mapping in mapping_sorted_by_gpa.iter() {
|
|
|
|
|
ram_maps.insert(
|
|
|
|
|
mapping.gpa,
|
|
|
|
|
CoredumpMemoryRegion {
|
|
|
|
|
mem_offset_in_elf,
|
|
|
|
|
mem_size: mapping.size,
|
|
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
mem_offset_in_elf += mapping.size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
CoredumpMemoryRegions { ram_maps }
|
|
|
|
|
}
|
2022-05-23 02:26:02 +00:00
|
|
|
|
|
2022-12-05 16:33:54 +00:00
|
|
|
|
#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
|
2022-05-23 02:26:02 +00:00
|
|
|
|
pub fn coredump_iterate_save_mem(
|
|
|
|
|
&mut self,
|
|
|
|
|
dump_state: &DumpState,
|
|
|
|
|
) -> std::result::Result<(), GuestDebuggableError> {
|
|
|
|
|
let snapshot_memory_ranges = self
|
|
|
|
|
.memory_range_table(false)
|
|
|
|
|
.map_err(|e| GuestDebuggableError::Coredump(e.into()))?;
|
|
|
|
|
|
|
|
|
|
if snapshot_memory_ranges.is_empty() {
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut coredump_file = dump_state.file.as_ref().unwrap();
|
|
|
|
|
|
|
|
|
|
let guest_memory = self.guest_memory.memory();
|
|
|
|
|
let mut total_bytes: u64 = 0;
|
|
|
|
|
|
|
|
|
|
for range in snapshot_memory_ranges.regions() {
|
|
|
|
|
let mut offset: u64 = 0;
|
|
|
|
|
loop {
|
|
|
|
|
let bytes_written = guest_memory
|
|
|
|
|
.write_to(
|
|
|
|
|
GuestAddress(range.gpa + offset),
|
|
|
|
|
&mut coredump_file,
|
|
|
|
|
(range.length - offset) as usize,
|
|
|
|
|
)
|
|
|
|
|
.map_err(|e| GuestDebuggableError::Coredump(e.into()))?;
|
|
|
|
|
offset += bytes_written as u64;
|
|
|
|
|
total_bytes += bytes_written as u64;
|
|
|
|
|
|
|
|
|
|
if offset == range.length {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
debug!("coredump total bytes {}", total_bytes);
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2022-10-18 12:35:38 +00:00
|
|
|
|
|
|
|
|
|
pub fn receive_memory_regions<F>(
|
|
|
|
|
&mut self,
|
|
|
|
|
ranges: &MemoryRangeTable,
|
|
|
|
|
fd: &mut F,
|
|
|
|
|
) -> std::result::Result<(), MigratableError>
|
|
|
|
|
where
|
|
|
|
|
F: Read,
|
|
|
|
|
{
|
|
|
|
|
let guest_memory = self.guest_memory();
|
|
|
|
|
let mem = guest_memory.memory();
|
|
|
|
|
|
|
|
|
|
for range in ranges.regions() {
|
|
|
|
|
let mut offset: u64 = 0;
|
|
|
|
|
// Here we are manually handling the retry in case we can't the
|
|
|
|
|
// whole region at once because we can't use the implementation
|
|
|
|
|
// from vm-memory::GuestMemory of read_exact_from() as it is not
|
|
|
|
|
// following the correct behavior. For more info about this issue
|
|
|
|
|
// see: https://github.com/rust-vmm/vm-memory/issues/174
|
|
|
|
|
loop {
|
|
|
|
|
let bytes_read = mem
|
|
|
|
|
.read_from(
|
|
|
|
|
GuestAddress(range.gpa + offset),
|
|
|
|
|
fd,
|
|
|
|
|
(range.length - offset) as usize,
|
|
|
|
|
)
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
MigratableError::MigrateReceive(anyhow!(
|
|
|
|
|
"Error receiving memory from socket: {}",
|
|
|
|
|
e
|
|
|
|
|
))
|
|
|
|
|
})?;
|
|
|
|
|
offset += bytes_read as u64;
|
|
|
|
|
|
|
|
|
|
if offset == range.length {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2020-01-10 16:02:19 +00:00
|
|
|
|
}
|
2020-01-10 16:11:32 +00:00
|
|
|
|
|
|
|
|
|
struct MemoryNotify {
|
|
|
|
|
slot_id: usize,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl Aml for MemoryNotify {
|
2021-11-04 17:12:59 +00:00
|
|
|
|
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
|
2020-01-10 16:11:32 +00:00
|
|
|
|
let object = aml::Path::new(&format!("M{:03}", self.slot_id));
|
|
|
|
|
aml::If::new(
|
|
|
|
|
&aml::Equal::new(&aml::Arg(0), &self.slot_id),
|
|
|
|
|
vec![&aml::Notify::new(&object, &aml::Arg(1))],
|
|
|
|
|
)
|
2021-11-04 17:12:59 +00:00
|
|
|
|
.append_aml_bytes(bytes)
|
2020-01-10 16:11:32 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct MemorySlot {
|
|
|
|
|
slot_id: usize,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl Aml for MemorySlot {
|
2021-11-04 17:12:59 +00:00
|
|
|
|
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
|
2020-01-10 16:11:32 +00:00
|
|
|
|
aml::Device::new(
|
|
|
|
|
format!("M{:03}", self.slot_id).as_str().into(),
|
|
|
|
|
vec![
|
2021-03-25 15:57:27 +00:00
|
|
|
|
&aml::Name::new("_HID".into(), &aml::EisaName::new("PNP0C80")),
|
2020-01-10 16:11:32 +00:00
|
|
|
|
&aml::Name::new("_UID".into(), &self.slot_id),
|
|
|
|
|
/*
|
|
|
|
|
_STA return value:
|
|
|
|
|
Bit [0] – Set if the device is present.
|
|
|
|
|
Bit [1] – Set if the device is enabled and decoding its resources.
|
|
|
|
|
Bit [2] – Set if the device should be shown in the UI.
|
|
|
|
|
Bit [3] – Set if the device is functioning properly (cleared if device failed its diagnostics).
|
|
|
|
|
Bit [4] – Set if the battery is present.
|
|
|
|
|
Bits [31:5] – Reserved (must be cleared).
|
|
|
|
|
*/
|
|
|
|
|
&aml::Method::new(
|
|
|
|
|
"_STA".into(),
|
|
|
|
|
0,
|
|
|
|
|
false,
|
|
|
|
|
// Call into MSTA method which will interrogate device
|
|
|
|
|
vec![&aml::Return::new(&aml::MethodCall::new(
|
|
|
|
|
"MSTA".into(),
|
|
|
|
|
vec![&self.slot_id],
|
|
|
|
|
))],
|
|
|
|
|
),
|
|
|
|
|
// Get details of memory
|
|
|
|
|
&aml::Method::new(
|
|
|
|
|
"_CRS".into(),
|
|
|
|
|
0,
|
|
|
|
|
false,
|
|
|
|
|
// Call into MCRS which provides actual memory details
|
|
|
|
|
vec![&aml::Return::new(&aml::MethodCall::new(
|
|
|
|
|
"MCRS".into(),
|
|
|
|
|
vec![&self.slot_id],
|
|
|
|
|
))],
|
|
|
|
|
),
|
|
|
|
|
],
|
|
|
|
|
)
|
2021-11-04 17:12:59 +00:00
|
|
|
|
.append_aml_bytes(bytes)
|
2020-01-10 16:11:32 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct MemorySlots {
|
|
|
|
|
slots: usize,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl Aml for MemorySlots {
|
2021-11-04 17:12:59 +00:00
|
|
|
|
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
|
2020-01-10 16:11:32 +00:00
|
|
|
|
for slot_id in 0..self.slots {
|
2021-11-04 17:12:59 +00:00
|
|
|
|
MemorySlot { slot_id }.append_aml_bytes(bytes);
|
2020-01-10 16:11:32 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct MemoryMethods {
|
|
|
|
|
slots: usize,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl Aml for MemoryMethods {
|
2021-11-04 17:12:59 +00:00
|
|
|
|
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
|
2020-01-10 16:11:32 +00:00
|
|
|
|
// Add "MTFY" notification method
|
|
|
|
|
let mut memory_notifies = Vec::new();
|
|
|
|
|
for slot_id in 0..self.slots {
|
|
|
|
|
memory_notifies.push(MemoryNotify { slot_id });
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut memory_notifies_refs: Vec<&dyn aml::Aml> = Vec::new();
|
|
|
|
|
for memory_notifier in memory_notifies.iter() {
|
|
|
|
|
memory_notifies_refs.push(memory_notifier);
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-04 17:12:59 +00:00
|
|
|
|
aml::Method::new("MTFY".into(), 2, true, memory_notifies_refs).append_aml_bytes(bytes);
|
2020-01-10 16:11:32 +00:00
|
|
|
|
|
|
|
|
|
// MSCN method
|
2021-11-04 17:12:59 +00:00
|
|
|
|
aml::Method::new(
|
|
|
|
|
"MSCN".into(),
|
|
|
|
|
0,
|
|
|
|
|
true,
|
|
|
|
|
vec![
|
|
|
|
|
// Take lock defined above
|
|
|
|
|
&aml::Acquire::new("MLCK".into(), 0xffff),
|
|
|
|
|
&aml::Store::new(&aml::Local(0), &aml::ZERO),
|
|
|
|
|
&aml::While::new(
|
|
|
|
|
&aml::LessThan::new(&aml::Local(0), &self.slots),
|
|
|
|
|
vec![
|
|
|
|
|
// Write slot number (in first argument) to I/O port via field
|
|
|
|
|
&aml::Store::new(&aml::Path::new("\\_SB_.MHPC.MSEL"), &aml::Local(0)),
|
|
|
|
|
// Check if MINS bit is set (inserting)
|
|
|
|
|
&aml::If::new(
|
|
|
|
|
&aml::Equal::new(&aml::Path::new("\\_SB_.MHPC.MINS"), &aml::ONE),
|
|
|
|
|
// Notify device if it is
|
|
|
|
|
vec![
|
|
|
|
|
&aml::MethodCall::new(
|
|
|
|
|
"MTFY".into(),
|
|
|
|
|
vec![&aml::Local(0), &aml::ONE],
|
|
|
|
|
),
|
|
|
|
|
// Reset MINS bit
|
|
|
|
|
&aml::Store::new(&aml::Path::new("\\_SB_.MHPC.MINS"), &aml::ONE),
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
// Check if MRMV bit is set
|
|
|
|
|
&aml::If::new(
|
|
|
|
|
&aml::Equal::new(&aml::Path::new("\\_SB_.MHPC.MRMV"), &aml::ONE),
|
|
|
|
|
// Notify device if it is (with the eject constant 0x3)
|
|
|
|
|
vec![
|
|
|
|
|
&aml::MethodCall::new("MTFY".into(), vec![&aml::Local(0), &3u8]),
|
|
|
|
|
// Reset MRMV bit
|
|
|
|
|
&aml::Store::new(&aml::Path::new("\\_SB_.MHPC.MRMV"), &aml::ONE),
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
&aml::Add::new(&aml::Local(0), &aml::Local(0), &aml::ONE),
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
// Release lock
|
|
|
|
|
&aml::Release::new("MLCK".into()),
|
|
|
|
|
],
|
|
|
|
|
)
|
|
|
|
|
.append_aml_bytes(bytes);
|
2020-01-10 16:11:32 +00:00
|
|
|
|
|
2021-11-04 17:12:59 +00:00
|
|
|
|
// Memory status method
|
|
|
|
|
aml::Method::new(
|
|
|
|
|
"MSTA".into(),
|
|
|
|
|
1,
|
|
|
|
|
true,
|
|
|
|
|
vec![
|
|
|
|
|
// Take lock defined above
|
|
|
|
|
&aml::Acquire::new("MLCK".into(), 0xffff),
|
|
|
|
|
// Write slot number (in first argument) to I/O port via field
|
|
|
|
|
&aml::Store::new(&aml::Path::new("\\_SB_.MHPC.MSEL"), &aml::Arg(0)),
|
|
|
|
|
&aml::Store::new(&aml::Local(0), &aml::ZERO),
|
|
|
|
|
// Check if MEN_ bit is set, if so make the local variable 0xf (see _STA for details of meaning)
|
|
|
|
|
&aml::If::new(
|
|
|
|
|
&aml::Equal::new(&aml::Path::new("\\_SB_.MHPC.MEN_"), &aml::ONE),
|
|
|
|
|
vec![&aml::Store::new(&aml::Local(0), &0xfu8)],
|
|
|
|
|
),
|
|
|
|
|
// Release lock
|
|
|
|
|
&aml::Release::new("MLCK".into()),
|
|
|
|
|
// Return 0 or 0xf
|
|
|
|
|
&aml::Return::new(&aml::Local(0)),
|
|
|
|
|
],
|
|
|
|
|
)
|
|
|
|
|
.append_aml_bytes(bytes);
|
2020-01-10 16:11:32 +00:00
|
|
|
|
|
2021-11-04 17:12:59 +00:00
|
|
|
|
// Memory range method
|
|
|
|
|
aml::Method::new(
|
|
|
|
|
"MCRS".into(),
|
|
|
|
|
1,
|
|
|
|
|
true,
|
|
|
|
|
vec![
|
|
|
|
|
// Take lock defined above
|
|
|
|
|
&aml::Acquire::new("MLCK".into(), 0xffff),
|
|
|
|
|
// Write slot number (in first argument) to I/O port via field
|
|
|
|
|
&aml::Store::new(&aml::Path::new("\\_SB_.MHPC.MSEL"), &aml::Arg(0)),
|
|
|
|
|
&aml::Name::new(
|
|
|
|
|
"MR64".into(),
|
|
|
|
|
&aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory(
|
|
|
|
|
aml::AddressSpaceCachable::Cacheable,
|
|
|
|
|
true,
|
|
|
|
|
0x0000_0000_0000_0000u64,
|
|
|
|
|
0xFFFF_FFFF_FFFF_FFFEu64,
|
|
|
|
|
)]),
|
|
|
|
|
),
|
|
|
|
|
&aml::CreateField::<u64>::new(&aml::Path::new("MR64"), &14usize, "MINL".into()),
|
|
|
|
|
&aml::CreateField::<u32>::new(&aml::Path::new("MR64"), &18usize, "MINH".into()),
|
|
|
|
|
&aml::CreateField::<u64>::new(&aml::Path::new("MR64"), &22usize, "MAXL".into()),
|
|
|
|
|
&aml::CreateField::<u32>::new(&aml::Path::new("MR64"), &26usize, "MAXH".into()),
|
|
|
|
|
&aml::CreateField::<u64>::new(&aml::Path::new("MR64"), &38usize, "LENL".into()),
|
|
|
|
|
&aml::CreateField::<u32>::new(&aml::Path::new("MR64"), &42usize, "LENH".into()),
|
|
|
|
|
&aml::Store::new(&aml::Path::new("MINL"), &aml::Path::new("\\_SB_.MHPC.MHBL")),
|
|
|
|
|
&aml::Store::new(&aml::Path::new("MINH"), &aml::Path::new("\\_SB_.MHPC.MHBH")),
|
|
|
|
|
&aml::Store::new(&aml::Path::new("LENL"), &aml::Path::new("\\_SB_.MHPC.MHLL")),
|
|
|
|
|
&aml::Store::new(&aml::Path::new("LENH"), &aml::Path::new("\\_SB_.MHPC.MHLH")),
|
|
|
|
|
&aml::Add::new(
|
|
|
|
|
&aml::Path::new("MAXL"),
|
|
|
|
|
&aml::Path::new("MINL"),
|
|
|
|
|
&aml::Path::new("LENL"),
|
|
|
|
|
),
|
|
|
|
|
&aml::Add::new(
|
|
|
|
|
&aml::Path::new("MAXH"),
|
|
|
|
|
&aml::Path::new("MINH"),
|
|
|
|
|
&aml::Path::new("LENH"),
|
|
|
|
|
),
|
|
|
|
|
&aml::If::new(
|
|
|
|
|
&aml::LessThan::new(&aml::Path::new("MAXL"), &aml::Path::new("MINL")),
|
|
|
|
|
vec![&aml::Add::new(
|
2020-01-10 16:11:32 +00:00
|
|
|
|
&aml::Path::new("MAXH"),
|
|
|
|
|
&aml::ONE,
|
2021-11-04 17:12:59 +00:00
|
|
|
|
&aml::Path::new("MAXH"),
|
|
|
|
|
)],
|
|
|
|
|
),
|
|
|
|
|
&aml::Subtract::new(&aml::Path::new("MAXL"), &aml::Path::new("MAXL"), &aml::ONE),
|
|
|
|
|
// Release lock
|
|
|
|
|
&aml::Release::new("MLCK".into()),
|
|
|
|
|
&aml::Return::new(&aml::Path::new("MR64")),
|
|
|
|
|
],
|
|
|
|
|
)
|
|
|
|
|
.append_aml_bytes(bytes)
|
2020-01-10 16:11:32 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl Aml for MemoryManager {
|
2021-11-04 17:12:59 +00:00
|
|
|
|
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
|
2022-03-24 11:03:26 +00:00
|
|
|
|
if let Some(acpi_address) = self.acpi_address {
|
|
|
|
|
// Memory Hotplug Controller
|
|
|
|
|
aml::Device::new(
|
|
|
|
|
"_SB_.MHPC".into(),
|
|
|
|
|
vec![
|
|
|
|
|
&aml::Name::new("_HID".into(), &aml::EisaName::new("PNP0A06")),
|
|
|
|
|
&aml::Name::new("_UID".into(), &"Memory Hotplug Controller"),
|
|
|
|
|
// Mutex to protect concurrent access as we write to choose slot and then read back status
|
|
|
|
|
&aml::Mutex::new("MLCK".into(), 0),
|
|
|
|
|
&aml::Name::new(
|
|
|
|
|
"_CRS".into(),
|
|
|
|
|
&aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory(
|
|
|
|
|
aml::AddressSpaceCachable::NotCacheable,
|
|
|
|
|
true,
|
2022-11-01 21:52:40 +00:00
|
|
|
|
acpi_address.0,
|
2022-03-24 11:03:26 +00:00
|
|
|
|
acpi_address.0 + MEMORY_MANAGER_ACPI_SIZE as u64 - 1,
|
|
|
|
|
)]),
|
|
|
|
|
),
|
|
|
|
|
// OpRegion and Fields map MMIO range into individual field values
|
|
|
|
|
&aml::OpRegion::new(
|
|
|
|
|
"MHPR".into(),
|
|
|
|
|
aml::OpRegionSpace::SystemMemory,
|
|
|
|
|
acpi_address.0 as usize,
|
|
|
|
|
MEMORY_MANAGER_ACPI_SIZE,
|
|
|
|
|
),
|
|
|
|
|
&aml::Field::new(
|
|
|
|
|
"MHPR".into(),
|
|
|
|
|
aml::FieldAccessType::DWord,
|
|
|
|
|
aml::FieldUpdateRule::Preserve,
|
|
|
|
|
vec![
|
|
|
|
|
aml::FieldEntry::Named(*b"MHBL", 32), // Base (low 4 bytes)
|
|
|
|
|
aml::FieldEntry::Named(*b"MHBH", 32), // Base (high 4 bytes)
|
|
|
|
|
aml::FieldEntry::Named(*b"MHLL", 32), // Length (low 4 bytes)
|
|
|
|
|
aml::FieldEntry::Named(*b"MHLH", 32), // Length (high 4 bytes)
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
&aml::Field::new(
|
|
|
|
|
"MHPR".into(),
|
|
|
|
|
aml::FieldAccessType::DWord,
|
|
|
|
|
aml::FieldUpdateRule::Preserve,
|
|
|
|
|
vec![
|
|
|
|
|
aml::FieldEntry::Reserved(128),
|
|
|
|
|
aml::FieldEntry::Named(*b"MHPX", 32), // PXM
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
&aml::Field::new(
|
|
|
|
|
"MHPR".into(),
|
|
|
|
|
aml::FieldAccessType::Byte,
|
|
|
|
|
aml::FieldUpdateRule::WriteAsZeroes,
|
|
|
|
|
vec![
|
|
|
|
|
aml::FieldEntry::Reserved(160),
|
|
|
|
|
aml::FieldEntry::Named(*b"MEN_", 1), // Enabled
|
|
|
|
|
aml::FieldEntry::Named(*b"MINS", 1), // Inserting
|
|
|
|
|
aml::FieldEntry::Named(*b"MRMV", 1), // Removing
|
|
|
|
|
aml::FieldEntry::Named(*b"MEJ0", 1), // Ejecting
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
&aml::Field::new(
|
|
|
|
|
"MHPR".into(),
|
|
|
|
|
aml::FieldAccessType::DWord,
|
|
|
|
|
aml::FieldUpdateRule::Preserve,
|
|
|
|
|
vec![
|
|
|
|
|
aml::FieldEntry::Named(*b"MSEL", 32), // Selector
|
|
|
|
|
aml::FieldEntry::Named(*b"MOEV", 32), // Event
|
|
|
|
|
aml::FieldEntry::Named(*b"MOSC", 32), // OSC
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
&MemoryMethods {
|
|
|
|
|
slots: self.hotplug_slots.len(),
|
|
|
|
|
},
|
|
|
|
|
&MemorySlots {
|
|
|
|
|
slots: self.hotplug_slots.len(),
|
|
|
|
|
},
|
|
|
|
|
],
|
|
|
|
|
)
|
|
|
|
|
.append_aml_bytes(bytes);
|
|
|
|
|
} else {
|
|
|
|
|
aml::Device::new(
|
|
|
|
|
"_SB_.MHPC".into(),
|
|
|
|
|
vec![
|
|
|
|
|
&aml::Name::new("_HID".into(), &aml::EisaName::new("PNP0A06")),
|
|
|
|
|
&aml::Name::new("_UID".into(), &"Memory Hotplug Controller"),
|
|
|
|
|
// Empty MSCN for GED
|
|
|
|
|
&aml::Method::new("MSCN".into(), 0, true, vec![]),
|
|
|
|
|
],
|
|
|
|
|
)
|
|
|
|
|
.append_aml_bytes(bytes);
|
|
|
|
|
}
|
2020-01-10 16:11:32 +00:00
|
|
|
|
|
2020-07-08 12:27:42 +00:00
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
|
{
|
|
|
|
|
if let Some(sgx_epc_region) = &self.sgx_epc_region {
|
2022-11-01 21:52:40 +00:00
|
|
|
|
let min = sgx_epc_region.start().raw_value();
|
|
|
|
|
let max = min + sgx_epc_region.size() - 1;
|
2020-07-08 12:27:42 +00:00
|
|
|
|
// SGX EPC region
|
2021-11-04 17:12:59 +00:00
|
|
|
|
aml::Device::new(
|
|
|
|
|
"_SB_.EPC_".into(),
|
|
|
|
|
vec![
|
|
|
|
|
&aml::Name::new("_HID".into(), &aml::EisaName::new("INT0E0C")),
|
|
|
|
|
// QWORD describing the EPC region start and size
|
|
|
|
|
&aml::Name::new(
|
|
|
|
|
"_CRS".into(),
|
|
|
|
|
&aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory(
|
|
|
|
|
aml::AddressSpaceCachable::NotCacheable,
|
|
|
|
|
true,
|
|
|
|
|
min,
|
|
|
|
|
max,
|
|
|
|
|
)]),
|
|
|
|
|
),
|
|
|
|
|
&aml::Method::new("_STA".into(), 0, false, vec![&aml::Return::new(&0xfu8)]),
|
|
|
|
|
],
|
|
|
|
|
)
|
|
|
|
|
.append_aml_bytes(bytes);
|
2020-07-08 12:27:42 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2020-01-10 16:11:32 +00:00
|
|
|
|
}
|
2019-12-19 15:47:36 +00:00
|
|
|
|
}
|
2019-05-01 16:59:51 +00:00
|
|
|
|
|
|
|
|
|
impl Pausable for MemoryManager {}
|
2020-02-06 15:03:29 +00:00
|
|
|
|
|
2021-10-05 15:53:08 +00:00
|
|
|
|
#[derive(Clone, Serialize, Deserialize, Versionize)]
|
2020-02-06 15:03:29 +00:00
|
|
|
|
pub struct MemoryManagerSnapshotData {
|
2021-09-27 13:31:28 +00:00
|
|
|
|
memory_ranges: MemoryRangeTable,
|
2021-10-05 12:20:19 +00:00
|
|
|
|
guest_ram_mappings: Vec<GuestRamMapping>,
|
|
|
|
|
start_of_device_area: u64,
|
|
|
|
|
boot_ram: u64,
|
|
|
|
|
current_ram: u64,
|
|
|
|
|
arch_mem_regions: Vec<ArchMemRegion>,
|
|
|
|
|
hotplug_slots: Vec<HotPlugState>,
|
|
|
|
|
next_memory_slot: u32,
|
|
|
|
|
selected_slot: usize,
|
|
|
|
|
next_hotplug_slot: usize,
|
2020-02-06 15:03:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
2021-05-21 12:50:41 +00:00
|
|
|
|
impl VersionMapped for MemoryManagerSnapshotData {}
|
|
|
|
|
|
2020-02-06 15:03:29 +00:00
|
|
|
|
impl Snapshottable for MemoryManager {
|
|
|
|
|
fn id(&self) -> String {
|
|
|
|
|
MEMORY_MANAGER_SNAPSHOT_ID.to_string()
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-21 12:31:58 +00:00
|
|
|
|
fn snapshot(&mut self) -> result::Result<Snapshot, MigratableError> {
|
2020-02-06 15:03:29 +00:00
|
|
|
|
let mut memory_manager_snapshot = Snapshot::new(MEMORY_MANAGER_SNAPSHOT_ID);
|
2020-08-20 18:05:34 +00:00
|
|
|
|
|
2021-09-27 13:31:28 +00:00
|
|
|
|
let memory_ranges = self.memory_range_table(true)?;
|
2020-02-06 15:03:29 +00:00
|
|
|
|
|
2021-09-27 13:31:28 +00:00
|
|
|
|
// Store locally this list of ranges as it will be used through the
|
2020-08-20 18:05:34 +00:00
|
|
|
|
// Transportable::send() implementation. The point is to avoid the
|
|
|
|
|
// duplication of code regarding the creation of the path for each
|
|
|
|
|
// region. The 'snapshot' step creates the list of memory regions,
|
|
|
|
|
// including information about the need to copy a memory region or
|
|
|
|
|
// not. This saves the 'send' step having to go through the same
|
|
|
|
|
// process, and instead it can directly proceed with storing the
|
2021-09-27 13:31:28 +00:00
|
|
|
|
// memory range content for the ranges requiring it.
|
2021-10-05 15:53:08 +00:00
|
|
|
|
self.snapshot_memory_ranges = memory_ranges;
|
2020-08-20 18:05:34 +00:00
|
|
|
|
|
2021-05-21 12:50:41 +00:00
|
|
|
|
memory_manager_snapshot.add_data_section(SnapshotDataSection::new_from_versioned_state(
|
2021-04-14 15:50:29 +00:00
|
|
|
|
MEMORY_MANAGER_SNAPSHOT_ID,
|
2021-10-05 15:53:08 +00:00
|
|
|
|
&self.snapshot_data(),
|
2021-04-14 15:50:29 +00:00
|
|
|
|
)?);
|
2020-02-06 15:03:29 +00:00
|
|
|
|
|
|
|
|
|
Ok(memory_manager_snapshot)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-25 01:38:08 +00:00
|
|
|
|
impl Transportable for MemoryManager {
|
|
|
|
|
fn send(
|
|
|
|
|
&self,
|
|
|
|
|
_snapshot: &Snapshot,
|
|
|
|
|
destination_url: &str,
|
2020-04-22 22:51:06 +00:00
|
|
|
|
) -> result::Result<(), MigratableError> {
|
2021-09-27 13:31:28 +00:00
|
|
|
|
if self.snapshot_memory_ranges.is_empty() {
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut memory_file_path = url_to_path(destination_url)?;
|
|
|
|
|
memory_file_path.push(String::from(SNAPSHOT_FILENAME));
|
|
|
|
|
|
|
|
|
|
// Create the snapshot file for the entire memory
|
|
|
|
|
let mut memory_file = OpenOptions::new()
|
|
|
|
|
.read(true)
|
|
|
|
|
.write(true)
|
|
|
|
|
.create_new(true)
|
|
|
|
|
.open(memory_file_path)
|
|
|
|
|
.map_err(|e| MigratableError::MigrateSend(e.into()))?;
|
2020-02-25 01:38:08 +00:00
|
|
|
|
|
2021-09-22 09:56:25 +00:00
|
|
|
|
let guest_memory = self.guest_memory.memory();
|
|
|
|
|
|
2021-09-27 13:31:28 +00:00
|
|
|
|
for range in self.snapshot_memory_ranges.regions() {
|
|
|
|
|
let mut offset: u64 = 0;
|
|
|
|
|
// Here we are manually handling the retry in case we can't read
|
|
|
|
|
// the whole region at once because we can't use the implementation
|
|
|
|
|
// from vm-memory::GuestMemory of write_all_to() as it is not
|
|
|
|
|
// following the correct behavior. For more info about this issue
|
|
|
|
|
// see: https://github.com/rust-vmm/vm-memory/issues/174
|
|
|
|
|
loop {
|
|
|
|
|
let bytes_written = guest_memory
|
|
|
|
|
.write_to(
|
|
|
|
|
GuestAddress(range.gpa + offset),
|
|
|
|
|
&mut memory_file,
|
|
|
|
|
(range.length - offset) as usize,
|
2021-09-22 09:56:25 +00:00
|
|
|
|
)
|
|
|
|
|
.map_err(|e| MigratableError::MigrateSend(e.into()))?;
|
2021-09-27 13:31:28 +00:00
|
|
|
|
offset += bytes_written as u64;
|
|
|
|
|
|
|
|
|
|
if offset == range.length {
|
|
|
|
|
break;
|
|
|
|
|
}
|
2020-02-25 01:38:08 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-08-04 14:52:31 +00:00
|
|
|
|
|
|
|
|
|
impl Migratable for MemoryManager {
|
|
|
|
|
// Start the dirty log in the hypervisor (kvm/mshv).
|
|
|
|
|
// Also, reset the dirty bitmap logged by the vmm.
|
|
|
|
|
// Just before we do a bulk copy we want to start/clear the dirty log so that
|
|
|
|
|
// pages touched during our bulk copy are tracked.
|
|
|
|
|
fn start_dirty_log(&mut self) -> std::result::Result<(), MigratableError> {
|
|
|
|
|
self.vm.start_dirty_log().map_err(|e| {
|
|
|
|
|
MigratableError::MigrateSend(anyhow!("Error starting VM dirty log {}", e))
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
for r in self.guest_memory.memory().iter() {
|
|
|
|
|
r.bitmap().reset();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn stop_dirty_log(&mut self) -> std::result::Result<(), MigratableError> {
|
|
|
|
|
self.vm.stop_dirty_log().map_err(|e| {
|
|
|
|
|
MigratableError::MigrateSend(anyhow!("Error stopping VM dirty log {}", e))
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Generate a table for the pages that are dirty. The dirty pages are collapsed
|
|
|
|
|
// together in the table if they are contiguous.
|
|
|
|
|
fn dirty_log(&mut self) -> std::result::Result<MemoryRangeTable, MigratableError> {
|
|
|
|
|
let mut table = MemoryRangeTable::default();
|
|
|
|
|
for r in &self.guest_ram_mappings {
|
|
|
|
|
let vm_dirty_bitmap = self.vm.get_dirty_log(r.slot, r.gpa, r.size).map_err(|e| {
|
|
|
|
|
MigratableError::MigrateSend(anyhow!("Error getting VM dirty log {}", e))
|
|
|
|
|
})?;
|
|
|
|
|
let vmm_dirty_bitmap = match self.guest_memory.memory().find_region(GuestAddress(r.gpa))
|
|
|
|
|
{
|
|
|
|
|
Some(region) => {
|
|
|
|
|
assert!(region.start_addr().raw_value() == r.gpa);
|
|
|
|
|
assert!(region.len() == r.size);
|
|
|
|
|
region.bitmap().get_and_reset()
|
|
|
|
|
}
|
|
|
|
|
None => {
|
|
|
|
|
return Err(MigratableError::MigrateSend(anyhow!(
|
|
|
|
|
"Error finding 'guest memory region' with address {:x}",
|
|
|
|
|
r.gpa
|
|
|
|
|
)))
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let dirty_bitmap: Vec<u64> = vm_dirty_bitmap
|
|
|
|
|
.iter()
|
|
|
|
|
.zip(vmm_dirty_bitmap.iter())
|
|
|
|
|
.map(|(x, y)| x | y)
|
|
|
|
|
.collect();
|
|
|
|
|
|
2021-09-23 07:54:40 +00:00
|
|
|
|
let sub_table = MemoryRangeTable::from_bitmap(dirty_bitmap, r.gpa, 4096);
|
2021-08-04 14:52:31 +00:00
|
|
|
|
|
|
|
|
|
if sub_table.regions().is_empty() {
|
|
|
|
|
info!("Dirty Memory Range Table is empty");
|
|
|
|
|
} else {
|
|
|
|
|
info!("Dirty Memory Range Table:");
|
|
|
|
|
for range in sub_table.regions() {
|
|
|
|
|
info!("GPA: {:x} size: {} (KiB)", range.gpa, range.length / 1024);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
table.extend(sub_table);
|
|
|
|
|
}
|
|
|
|
|
Ok(table)
|
|
|
|
|
}
|
|
|
|
|
}
|