vmm: Share the guest memory instead of cloning it

The VMM guest memory was cloned (copied) everywhere the code needed to
have ownership of it. In order to clean the code, and in anticipation
for future support of modifying this guest memory instance at runtime,
it is important that every part of the code share the same instance.

Because VirtioDevice implementations need to have access to it from
different threads, that's why Arc must be used in this case.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2019-08-20 14:12:00 -07:00 committed by Rob Bradford
parent f4d41d600b
commit ec0b5567c8
11 changed files with 40 additions and 32 deletions

View File

@ -513,14 +513,18 @@ pub struct VfioDevice {
group: VfioGroup,
regions: Vec<VfioRegion>,
irqs: HashMap<u32, VfioIrq>,
mem: GuestMemoryMmap,
mem: Arc<GuestMemoryMmap>,
}
impl VfioDevice {
/// Create a new vfio device, then guest read/write on this device could be
/// transfered into kernel vfio.
/// sysfspath specify the vfio device path in sys file system.
pub fn new(sysfspath: &Path, device_fd: Arc<DeviceFd>, mem: GuestMemoryMmap) -> Result<Self> {
pub fn new(
sysfspath: &Path,
device_fd: Arc<DeviceFd>,
mem: Arc<GuestMemoryMmap>,
) -> Result<Self> {
let uuid_path: PathBuf = [sysfspath, Path::new("iommu_group")].iter().collect();
let group_path = uuid_path.read_link().map_err(|_| VfioError::InvalidPath)?;
let group_osstr = group_path.file_name().ok_or(VfioError::InvalidPath)?;

View File

@ -321,7 +321,7 @@ impl Request {
struct BlockEpollHandler<T: DiskFile> {
queues: Vec<Queue>,
mem: GuestMemoryMmap,
mem: Arc<GuestMemoryMmap>,
disk_image: T,
disk_nsectors: u64,
interrupt_cb: Arc<VirtioInterrupt>,
@ -610,7 +610,7 @@ impl<T: 'static + DiskFile + Send> VirtioDevice for Block<T> {
fn activate(
&mut self,
mem: GuestMemoryMmap,
mem: Arc<GuestMemoryMmap>,
interrupt_cb: Arc<VirtioInterrupt>,
queues: Vec<Queue>,
mut queue_evts: Vec<EventFd>,

View File

@ -54,7 +54,7 @@ unsafe impl ByteValued for VirtioConsoleConfig {}
struct ConsoleEpollHandler {
queues: Vec<Queue>,
mem: GuestMemoryMmap,
mem: Arc<GuestMemoryMmap>,
interrupt_cb: Arc<VirtioInterrupt>,
in_buffer: Arc<Mutex<VecDeque<u8>>>,
out: Box<dyn io::Write + Send>,
@ -432,7 +432,7 @@ impl VirtioDevice for Console {
fn activate(
&mut self,
mem: GuestMemoryMmap,
mem: Arc<GuestMemoryMmap>,
interrupt_cb: Arc<VirtioInterrupt>,
queues: Vec<Queue>,
mut queue_evts: Vec<EventFd>,

View File

@ -67,7 +67,7 @@ pub trait VirtioDevice: Send {
/// Activates this device for real usage.
fn activate(
&mut self,
mem: GuestMemoryMmap,
mem: Arc<GuestMemoryMmap>,
interrupt_evt: Arc<VirtioInterrupt>,
queues: Vec<Queue>,
queue_evts: Vec<EventFd>,

View File

@ -368,7 +368,7 @@ impl Fs {
fn setup_vu(
&mut self,
mem: &GuestMemoryMmap,
mem: &Arc<GuestMemoryMmap>,
queues: Vec<Queue>,
queue_evts: Vec<EventFd>,
) -> Result<Vec<(EventFd, Queue)>> {
@ -527,7 +527,7 @@ impl VirtioDevice for Fs {
fn activate(
&mut self,
mem: GuestMemoryMmap,
mem: Arc<GuestMemoryMmap>,
interrupt_cb: Arc<VirtioInterrupt>,
queues: Vec<Queue>,
queue_evts: Vec<EventFd>,

View File

@ -115,7 +115,7 @@ fn vnet_hdr_len() -> usize {
}
struct NetEpollHandler {
mem: GuestMemoryMmap,
mem: Arc<GuestMemoryMmap>,
tap: Tap,
rx: RxVirtio,
tx: TxVirtio,
@ -572,7 +572,7 @@ impl VirtioDevice for Net {
fn activate(
&mut self,
mem: GuestMemoryMmap,
mem: Arc<GuestMemoryMmap>,
interrupt_cb: Arc<VirtioInterrupt>,
mut queues: Vec<Queue>,
mut queue_evts: Vec<EventFd>,

View File

@ -154,7 +154,7 @@ impl Request {
struct PmemEpollHandler {
queue: Queue,
mem: GuestMemoryMmap,
mem: Arc<GuestMemoryMmap>,
disk: File,
interrupt_cb: Arc<VirtioInterrupt>,
queue_evt: EventFd,
@ -382,7 +382,7 @@ impl VirtioDevice for Pmem {
fn activate(
&mut self,
mem: GuestMemoryMmap,
mem: Arc<GuestMemoryMmap>,
interrupt_cb: Arc<VirtioInterrupt>,
mut queues: Vec<Queue>,
mut queue_evts: Vec<EventFd>,

View File

@ -32,7 +32,7 @@ const KILL_EVENT: DeviceEventT = 1;
struct RngEpollHandler {
queues: Vec<Queue>,
mem: GuestMemoryMmap,
mem: Arc<GuestMemoryMmap>,
random_file: File,
interrupt_cb: Arc<VirtioInterrupt>,
queue_evt: EventFd,
@ -237,7 +237,7 @@ impl VirtioDevice for Rng {
fn activate(
&mut self,
mem: GuestMemoryMmap,
mem: Arc<GuestMemoryMmap>,
interrupt_cb: Arc<VirtioInterrupt>,
queues: Vec<Queue>,
mut queue_evts: Vec<EventFd>,

View File

@ -271,7 +271,7 @@ mod tests {
}
fn activate(
&mut self,
_mem: GuestMemoryMmap,
_mem: Arc<GuestMemoryMmap>,
_interrupt_evt: Arc<VirtioInterrupt>,
_queues: Vec<Queue>,
_queue_evts: Vec<EventFd>,

View File

@ -235,7 +235,7 @@ pub struct VirtioPciDevice {
queue_evts: Vec<EventFd>,
// Guest memory
memory: Option<GuestMemoryMmap>,
memory: Option<Arc<GuestMemoryMmap>>,
// Setting PCI BAR
settings_bar: u8,
@ -244,7 +244,7 @@ pub struct VirtioPciDevice {
impl VirtioPciDevice {
/// Constructs a new PCI transport for the given virtio device.
pub fn new(
memory: GuestMemoryMmap,
memory: Arc<GuestMemoryMmap>,
device: Box<dyn VirtioDevice>,
msix_num: u16,
) -> Result<Self> {

View File

@ -518,7 +518,7 @@ impl Vcpu {
}
struct VmInfo<'a> {
memory: GuestMemoryMmap,
memory: &'a Arc<GuestMemoryMmap>,
vm_fd: &'a Arc<VmFd>,
vm_cfg: &'a VmConfig<'a>,
}
@ -697,7 +697,7 @@ impl DeviceManager {
.map_err(DeviceManagerError::CreateVirtioConsole)?;
DeviceManager::add_virtio_pci_device(
Box::new(virtio_console_device),
vm_info.memory.clone(),
vm_info.memory,
allocator,
vm_info.vm_fd,
&mut pci,
@ -812,7 +812,7 @@ impl DeviceManager {
DeviceManager::add_virtio_pci_device(
block,
vm_info.memory.clone(),
vm_info.memory,
allocator,
vm_info.vm_fd,
pci,
@ -849,7 +849,7 @@ impl DeviceManager {
DeviceManager::add_virtio_pci_device(
Box::new(virtio_net_device),
vm_info.memory.clone(),
vm_info.memory,
allocator,
vm_info.vm_fd,
pci,
@ -876,7 +876,7 @@ impl DeviceManager {
DeviceManager::add_virtio_pci_device(
Box::new(virtio_rng_device),
vm_info.memory.clone(),
vm_info.memory,
allocator,
vm_info.vm_fd,
pci,
@ -965,7 +965,7 @@ impl DeviceManager {
DeviceManager::add_virtio_pci_device(
Box::new(virtio_fs_device),
vm_info.memory.clone(),
vm_info.memory,
allocator,
vm_info.vm_fd,
pci,
@ -1046,7 +1046,7 @@ impl DeviceManager {
DeviceManager::add_virtio_pci_device(
Box::new(virtio_pmem_device),
vm_info.memory.clone(),
vm_info.memory,
allocator,
vm_info.vm_fd,
pci,
@ -1113,7 +1113,7 @@ impl DeviceManager {
fn add_virtio_pci_device(
virtio_device: Box<dyn vm_virtio::VirtioDevice>,
memory: GuestMemoryMmap,
memory: &Arc<GuestMemoryMmap>,
allocator: &mut SystemAllocator,
vm_fd: &Arc<VmFd>,
pci: &mut PciConfigIo,
@ -1126,7 +1126,7 @@ impl DeviceManager {
0
};
let mut virtio_pci_device = VirtioPciDevice::new(memory, virtio_device, msix_num)
let mut virtio_pci_device = VirtioPciDevice::new(memory.clone(), virtio_device, msix_num)
.map_err(DeviceManagerError::VirtioDevice)?;
let bars = virtio_pci_device
@ -1317,7 +1317,7 @@ impl AsRawFd for EpollContext {
pub struct Vm<'a> {
fd: Arc<VmFd>,
kernel: File,
memory: GuestMemoryMmap,
memory: Arc<GuestMemoryMmap>,
vcpus: Vec<thread::JoinHandle<()>>,
devices: DeviceManager,
cpuid: CpuId,
@ -1485,8 +1485,12 @@ impl<'a> Vm<'a> {
.ok_or(Error::MemoryRangeAllocation)?;
}
// Convert the guest memory into an Arc. The point being able to use it
// anywhere in the code, no matter which thread might use it.
let guest_memory = Arc::new(guest_memory);
let vm_info = VmInfo {
memory: guest_memory.clone(),
memory: &guest_memory,
vm_fd: &fd,
vm_cfg: &config,
};
@ -1533,7 +1537,7 @@ impl<'a> Vm<'a> {
let cmdline_cstring =
CString::new(self.config.cmdline.args.clone()).map_err(|_| Error::CmdLine)?;
let entry_addr = match linux_loader::loader::Elf::load(
&self.memory,
self.memory.as_ref(),
None,
&mut self.kernel,
Some(arch::HIMEM_START),
@ -1541,7 +1545,7 @@ impl<'a> Vm<'a> {
Ok(entry_addr) => entry_addr,
Err(linux_loader::loader::Error::InvalidElfMagicNumber) => {
linux_loader::loader::BzImage::load(
&self.memory,
self.memory.as_ref(),
None,
&mut self.kernel,
Some(arch::HIMEM_START),
@ -1552,7 +1556,7 @@ impl<'a> Vm<'a> {
};
linux_loader::loader::load_cmdline(
&self.memory,
self.memory.as_ref(),
self.config.cmdline.offset,
&cmdline_cstring,
)