mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2025-03-20 07:58:55 +00:00
vmm: Add RwLock to the GuestMemoryMmap
Following the refactoring of the code allowing multiple threads to access the same instance of the guest memory, this patch goes one step further by adding RwLock to it. This anticipates the future need for being able to modify the content of the guest memory at runtime. The reasons for adding regions to an existing guest memory could be: - Add virtio-pmem and virtio-fs regions after the guest memory was created. - Support future hotplug of devices, memory, or anything that would require more memory at runtime. Because most of the time, the lock will be taken as read only, using RwLock instead of Mutex is the right approach. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
parent
ec0b5567c8
commit
0b8856d148
@ -14,7 +14,7 @@ use std::mem;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
||||
use std::os::unix::prelude::FileExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::u32;
|
||||
use vfio_bindings::bindings::vfio::*;
|
||||
use vfio_ioctls::*;
|
||||
@ -513,7 +513,7 @@ pub struct VfioDevice {
|
||||
group: VfioGroup,
|
||||
regions: Vec<VfioRegion>,
|
||||
irqs: HashMap<u32, VfioIrq>,
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
}
|
||||
|
||||
impl VfioDevice {
|
||||
@ -523,7 +523,7 @@ impl VfioDevice {
|
||||
pub fn new(
|
||||
sysfspath: &Path,
|
||||
device_fd: Arc<DeviceFd>,
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
) -> Result<Self> {
|
||||
let uuid_path: PathBuf = [sysfspath, Path::new("iommu_group")].iter().collect();
|
||||
let group_path = uuid_path.read_link().map_err(|_| VfioError::InvalidPath)?;
|
||||
@ -773,7 +773,7 @@ impl VfioDevice {
|
||||
/// Add all guest memory regions into vfio container's iommu table,
|
||||
/// then vfio kernel driver could access guest memory from gfn
|
||||
pub fn setup_dma_map(&self) -> Result<()> {
|
||||
self.mem.with_regions(|_index, region| {
|
||||
self.mem.read().unwrap().with_regions(|_index, region| {
|
||||
self.vfio_dma_map(
|
||||
region.start_addr().raw_value(),
|
||||
region.len() as u64,
|
||||
@ -786,7 +786,7 @@ impl VfioDevice {
|
||||
/// remove all guest memory regions from vfio containers iommu table
|
||||
/// then vfio kernel driver couldn't access this guest memory
|
||||
pub fn unset_dma_map(&self) -> Result<()> {
|
||||
self.mem.with_regions(|_index, region| {
|
||||
self.mem.read().unwrap().with_regions(|_index, region| {
|
||||
self.vfio_dma_unmap(region.start_addr().raw_value(), region.len() as u64)
|
||||
})?;
|
||||
Ok(())
|
||||
|
@ -17,7 +17,7 @@ use std::os::linux::fs::MetadataExt;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::PathBuf;
|
||||
use std::result;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
|
||||
use super::Error as DeviceError;
|
||||
@ -321,7 +321,7 @@ impl Request {
|
||||
|
||||
struct BlockEpollHandler<T: DiskFile> {
|
||||
queues: Vec<Queue>,
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
disk_image: T,
|
||||
disk_nsectors: u64,
|
||||
interrupt_cb: Arc<VirtioInterrupt>,
|
||||
@ -334,14 +334,15 @@ impl<T: DiskFile> BlockEpollHandler<T> {
|
||||
|
||||
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
|
||||
let mut used_count = 0;
|
||||
for avail_desc in queue.iter(&self.mem) {
|
||||
let mem = self.mem.read().unwrap();
|
||||
for avail_desc in queue.iter(&mem) {
|
||||
let len;
|
||||
match Request::parse(&avail_desc, &self.mem) {
|
||||
match Request::parse(&avail_desc, &mem) {
|
||||
Ok(request) => {
|
||||
let status = match request.execute(
|
||||
&mut self.disk_image,
|
||||
self.disk_nsectors,
|
||||
&self.mem,
|
||||
&mem,
|
||||
&self.disk_image_id,
|
||||
) {
|
||||
Ok(l) => {
|
||||
@ -356,7 +357,7 @@ impl<T: DiskFile> BlockEpollHandler<T> {
|
||||
};
|
||||
// We use unwrap because the request parsing process already checked that the
|
||||
// status_addr was valid.
|
||||
self.mem.write_obj(status, request.status_addr).unwrap();
|
||||
mem.write_obj(status, request.status_addr).unwrap();
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to parse available descriptor chain: {:?}", e);
|
||||
@ -368,7 +369,7 @@ impl<T: DiskFile> BlockEpollHandler<T> {
|
||||
}
|
||||
|
||||
for &(desc_index, len) in &used_desc_heads[..used_count] {
|
||||
queue.add_used(&self.mem, desc_index, len);
|
||||
queue.add_used(&mem, desc_index, len);
|
||||
}
|
||||
used_count > 0
|
||||
}
|
||||
@ -610,7 +611,7 @@ impl<T: 'static + DiskFile + Send> VirtioDevice for Block<T> {
|
||||
|
||||
fn activate(
|
||||
&mut self,
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
interrupt_cb: Arc<VirtioInterrupt>,
|
||||
queues: Vec<Queue>,
|
||||
mut queue_evts: Vec<EventFd>,
|
||||
|
@ -10,7 +10,7 @@ use std::io;
|
||||
use std::io::Write;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::result;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::thread;
|
||||
|
||||
use super::Error as DeviceError;
|
||||
@ -54,7 +54,7 @@ unsafe impl ByteValued for VirtioConsoleConfig {}
|
||||
|
||||
struct ConsoleEpollHandler {
|
||||
queues: Vec<Queue>,
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
interrupt_cb: Arc<VirtioInterrupt>,
|
||||
in_buffer: Arc<Mutex<VecDeque<u8>>>,
|
||||
out: Box<dyn io::Write + Send>,
|
||||
@ -80,14 +80,15 @@ impl ConsoleEpollHandler {
|
||||
let mut used_count = 0;
|
||||
let mut write_count = 0;
|
||||
|
||||
for avail_desc in recv_queue.iter(&self.mem) {
|
||||
let mem = self.mem.read().unwrap();
|
||||
for avail_desc in recv_queue.iter(&mem) {
|
||||
let len;
|
||||
|
||||
let limit = cmp::min(write_count + avail_desc.len as u32, count as u32);
|
||||
let source_slice = in_buffer
|
||||
.drain(write_count as usize..limit as usize)
|
||||
.collect::<Vec<u8>>();
|
||||
let write_result = self.mem.write_slice(&source_slice[..], avail_desc.addr);
|
||||
let write_result = mem.write_slice(&source_slice[..], avail_desc.addr);
|
||||
|
||||
match write_result {
|
||||
Ok(_) => {
|
||||
@ -109,7 +110,7 @@ impl ConsoleEpollHandler {
|
||||
}
|
||||
|
||||
for &(desc_index, len) in &used_desc_heads[..used_count] {
|
||||
recv_queue.add_used(&self.mem, desc_index, len);
|
||||
recv_queue.add_used(&mem, desc_index, len);
|
||||
}
|
||||
used_count > 0
|
||||
}
|
||||
@ -126,11 +127,10 @@ impl ConsoleEpollHandler {
|
||||
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
|
||||
let mut used_count = 0;
|
||||
|
||||
for avail_desc in trans_queue.iter(&self.mem) {
|
||||
let mem = self.mem.read().unwrap();
|
||||
for avail_desc in trans_queue.iter(&mem) {
|
||||
let len;
|
||||
let _ = self
|
||||
.mem
|
||||
.write_to(avail_desc.addr, &mut self.out, avail_desc.len as usize);
|
||||
let _ = mem.write_to(avail_desc.addr, &mut self.out, avail_desc.len as usize);
|
||||
let _ = self.out.flush();
|
||||
|
||||
len = avail_desc.len;
|
||||
@ -139,7 +139,7 @@ impl ConsoleEpollHandler {
|
||||
}
|
||||
|
||||
for &(desc_index, len) in &used_desc_heads[..used_count] {
|
||||
trans_queue.add_used(&self.mem, desc_index, len);
|
||||
trans_queue.add_used(&mem, desc_index, len);
|
||||
}
|
||||
used_count > 0
|
||||
}
|
||||
@ -432,7 +432,7 @@ impl VirtioDevice for Console {
|
||||
|
||||
fn activate(
|
||||
&mut self,
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
interrupt_cb: Arc<VirtioInterrupt>,
|
||||
queues: Vec<Queue>,
|
||||
mut queue_evts: Vec<EventFd>,
|
||||
|
@ -7,7 +7,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
|
||||
|
||||
use super::*;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use vm_memory::{GuestAddress, GuestMemoryMmap, GuestUsize};
|
||||
use vmm_sys_util::eventfd::EventFd;
|
||||
|
||||
@ -67,7 +67,7 @@ pub trait VirtioDevice: Send {
|
||||
/// Activates this device for real usage.
|
||||
fn activate(
|
||||
&mut self,
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
interrupt_evt: Arc<VirtioInterrupt>,
|
||||
queues: Vec<Queue>,
|
||||
queue_evts: Vec<EventFd>,
|
||||
|
@ -13,7 +13,7 @@ use std::io;
|
||||
use std::io::Write;
|
||||
use std::os::unix::io::{AsRawFd, RawFd};
|
||||
use std::result;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::thread;
|
||||
use vhost_rs::vhost_user::message::{
|
||||
VhostUserFSSlaveMsg, VhostUserProtocolFeatures, VhostUserVirtioFeatures,
|
||||
@ -368,7 +368,7 @@ impl Fs {
|
||||
|
||||
fn setup_vu(
|
||||
&mut self,
|
||||
mem: &Arc<GuestMemoryMmap>,
|
||||
mem: &GuestMemoryMmap,
|
||||
queues: Vec<Queue>,
|
||||
queue_evts: Vec<EventFd>,
|
||||
) -> Result<Vec<(EventFd, Queue)>> {
|
||||
@ -527,7 +527,7 @@ impl VirtioDevice for Fs {
|
||||
|
||||
fn activate(
|
||||
&mut self,
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
interrupt_cb: Arc<VirtioInterrupt>,
|
||||
queues: Vec<Queue>,
|
||||
queue_evts: Vec<EventFd>,
|
||||
@ -552,7 +552,7 @@ impl VirtioDevice for Fs {
|
||||
self.kill_evt = Some(self_kill_evt);
|
||||
|
||||
let vu_call_evt_queue_list = self
|
||||
.setup_vu(&mem, queues, queue_evts)
|
||||
.setup_vu(&mem.read().unwrap(), queues, queue_evts)
|
||||
.map_err(ActivateError::VhostUserSetup)?;
|
||||
|
||||
// Initialize slave communication.
|
||||
|
@ -15,7 +15,7 @@ use std::mem;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::os::unix::io::{AsRawFd, RawFd};
|
||||
use std::result;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
use std::vec::Vec;
|
||||
|
||||
@ -115,7 +115,7 @@ fn vnet_hdr_len() -> usize {
|
||||
}
|
||||
|
||||
struct NetEpollHandler {
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
tap: Tap,
|
||||
rx: RxVirtio,
|
||||
tx: TxVirtio,
|
||||
@ -137,7 +137,8 @@ impl NetEpollHandler {
|
||||
// if a buffer was used, and false if the frame must be deferred until a buffer
|
||||
// is made available by the driver.
|
||||
fn rx_single_frame(&mut self) -> bool {
|
||||
let mut next_desc = self.rx.queue.iter(&self.mem).next();
|
||||
let mem = self.mem.read().unwrap();
|
||||
let mut next_desc = self.rx.queue.iter(&mem).next();
|
||||
|
||||
if next_desc.is_none() {
|
||||
// Queue has no available descriptors
|
||||
@ -160,7 +161,7 @@ impl NetEpollHandler {
|
||||
}
|
||||
let limit = cmp::min(write_count + desc.len as usize, self.rx.bytes_read);
|
||||
let source_slice = &self.rx.frame_buf[write_count..limit];
|
||||
let write_result = self.mem.write_slice(source_slice, desc.addr);
|
||||
let write_result = mem.write_slice(source_slice, desc.addr);
|
||||
|
||||
match write_result {
|
||||
Ok(_) => {
|
||||
@ -184,9 +185,7 @@ impl NetEpollHandler {
|
||||
}
|
||||
}
|
||||
|
||||
self.rx
|
||||
.queue
|
||||
.add_used(&self.mem, head_index, write_count as u32);
|
||||
self.rx.queue.add_used(&mem, head_index, write_count as u32);
|
||||
|
||||
// Mark that we have at least one pending packet and we need to interrupt the guest.
|
||||
self.rx.deferred_irqs = true;
|
||||
@ -246,7 +245,8 @@ impl NetEpollHandler {
|
||||
}
|
||||
|
||||
fn process_tx(&mut self) -> result::Result<(), DeviceError> {
|
||||
while let Some(avail_desc) = self.tx.queue.iter(&self.mem).next() {
|
||||
let mem = self.mem.read().unwrap();
|
||||
while let Some(avail_desc) = self.tx.queue.iter(&mem).next() {
|
||||
let head_index = avail_desc.index;
|
||||
let mut read_count = 0;
|
||||
let mut next_desc = Some(avail_desc);
|
||||
@ -268,7 +268,7 @@ impl NetEpollHandler {
|
||||
for (desc_addr, desc_len) in self.tx.iovec.drain(..) {
|
||||
let limit = cmp::min((read_count + desc_len) as usize, self.tx.frame_buf.len());
|
||||
|
||||
let read_result = self.mem.read_slice(
|
||||
let read_result = mem.read_slice(
|
||||
&mut self.tx.frame_buf[read_count..limit as usize],
|
||||
desc_addr,
|
||||
);
|
||||
@ -292,7 +292,7 @@ impl NetEpollHandler {
|
||||
}
|
||||
};
|
||||
|
||||
self.tx.queue.add_used(&self.mem, head_index, 0);
|
||||
self.tx.queue.add_used(&mem, head_index, 0);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -572,7 +572,7 @@ impl VirtioDevice for Net {
|
||||
|
||||
fn activate(
|
||||
&mut self,
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
interrupt_cb: Arc<VirtioInterrupt>,
|
||||
mut queues: Vec<Queue>,
|
||||
mut queue_evts: Vec<EventFd>,
|
||||
|
@ -15,7 +15,7 @@ use std::io::{self, Write};
|
||||
use std::mem::size_of;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::result;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
|
||||
use super::Error as DeviceError;
|
||||
@ -154,7 +154,7 @@ impl Request {
|
||||
|
||||
struct PmemEpollHandler {
|
||||
queue: Queue,
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
disk: File,
|
||||
interrupt_cb: Arc<VirtioInterrupt>,
|
||||
queue_evt: EventFd,
|
||||
@ -165,8 +165,9 @@ impl PmemEpollHandler {
|
||||
fn process_queue(&mut self) -> bool {
|
||||
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
|
||||
let mut used_count = 0;
|
||||
for avail_desc in self.queue.iter(&self.mem) {
|
||||
let len = match Request::parse(&avail_desc, &self.mem) {
|
||||
let mem = self.mem.read().unwrap();
|
||||
for avail_desc in self.queue.iter(&mem) {
|
||||
let len = match Request::parse(&avail_desc, &mem) {
|
||||
Ok(ref req) if (req.type_ == RequestType::Flush) => {
|
||||
let status_code = match self.disk.sync_all() {
|
||||
Ok(()) => VIRTIO_PMEM_RESP_TYPE_OK,
|
||||
@ -177,7 +178,7 @@ impl PmemEpollHandler {
|
||||
};
|
||||
|
||||
let resp = VirtioPmemResp { ret: status_code };
|
||||
match self.mem.write_obj(resp, req.status_addr) {
|
||||
match mem.write_obj(resp, req.status_addr) {
|
||||
Ok(_) => size_of::<VirtioPmemResp>() as u32,
|
||||
Err(e) => {
|
||||
error!("bad guest memory address: {}", e);
|
||||
@ -201,7 +202,7 @@ impl PmemEpollHandler {
|
||||
}
|
||||
|
||||
for &(desc_index, len) in &used_desc_heads[..used_count] {
|
||||
self.queue.add_used(&self.mem, desc_index, len);
|
||||
self.queue.add_used(&mem, desc_index, len);
|
||||
}
|
||||
used_count > 0
|
||||
}
|
||||
@ -382,7 +383,7 @@ impl VirtioDevice for Pmem {
|
||||
|
||||
fn activate(
|
||||
&mut self,
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
interrupt_cb: Arc<VirtioInterrupt>,
|
||||
mut queues: Vec<Queue>,
|
||||
mut queue_evts: Vec<EventFd>,
|
||||
|
@ -9,7 +9,7 @@ use std::fs::File;
|
||||
use std::io;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::result;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
|
||||
use super::Error as DeviceError;
|
||||
@ -32,7 +32,7 @@ const KILL_EVENT: DeviceEventT = 1;
|
||||
|
||||
struct RngEpollHandler {
|
||||
queues: Vec<Queue>,
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
random_file: File,
|
||||
interrupt_cb: Arc<VirtioInterrupt>,
|
||||
queue_evt: EventFd,
|
||||
@ -45,14 +45,14 @@ impl RngEpollHandler {
|
||||
|
||||
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
|
||||
let mut used_count = 0;
|
||||
for avail_desc in queue.iter(&self.mem) {
|
||||
let mem = self.mem.read().unwrap();
|
||||
for avail_desc in queue.iter(&mem) {
|
||||
let mut len = 0;
|
||||
|
||||
// Drivers can only read from the random device.
|
||||
if avail_desc.is_write_only() {
|
||||
// Fill the read with data from the random device on the host.
|
||||
if self
|
||||
.mem
|
||||
if mem
|
||||
.read_from(
|
||||
avail_desc.addr,
|
||||
&mut self.random_file,
|
||||
@ -69,7 +69,7 @@ impl RngEpollHandler {
|
||||
}
|
||||
|
||||
for &(desc_index, len) in &used_desc_heads[..used_count] {
|
||||
queue.add_used(&self.mem, desc_index, len);
|
||||
queue.add_used(&mem, desc_index, len);
|
||||
}
|
||||
used_count > 0
|
||||
}
|
||||
@ -237,7 +237,7 @@ impl VirtioDevice for Rng {
|
||||
|
||||
fn activate(
|
||||
&mut self,
|
||||
mem: Arc<GuestMemoryMmap>,
|
||||
mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
interrupt_cb: Arc<VirtioInterrupt>,
|
||||
queues: Vec<Queue>,
|
||||
mut queue_evts: Vec<EventFd>,
|
||||
|
@ -254,7 +254,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::{ActivateResult, VirtioInterrupt};
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use vm_memory::GuestMemoryMmap;
|
||||
use vmm_sys_util::eventfd::EventFd;
|
||||
|
||||
@ -271,7 +271,7 @@ mod tests {
|
||||
}
|
||||
fn activate(
|
||||
&mut self,
|
||||
_mem: Arc<GuestMemoryMmap>,
|
||||
_mem: Arc<RwLock<GuestMemoryMmap>>,
|
||||
_interrupt_evt: Arc<VirtioInterrupt>,
|
||||
_queues: Vec<Queue>,
|
||||
_queue_evts: Vec<EventFd>,
|
||||
|
@ -14,8 +14,7 @@ extern crate vmm_sys_util;
|
||||
|
||||
use libc::EFD_NONBLOCK;
|
||||
use std::sync::atomic::{AtomicU16, AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
|
||||
use devices::BusDevice;
|
||||
use pci::{
|
||||
@ -235,7 +234,7 @@ pub struct VirtioPciDevice {
|
||||
queue_evts: Vec<EventFd>,
|
||||
|
||||
// Guest memory
|
||||
memory: Option<Arc<GuestMemoryMmap>>,
|
||||
memory: Option<Arc<RwLock<GuestMemoryMmap>>>,
|
||||
|
||||
// Setting PCI BAR
|
||||
settings_bar: u8,
|
||||
@ -244,7 +243,7 @@ pub struct VirtioPciDevice {
|
||||
impl VirtioPciDevice {
|
||||
/// Constructs a new PCI transport for the given virtio device.
|
||||
pub fn new(
|
||||
memory: Arc<GuestMemoryMmap>,
|
||||
memory: Arc<RwLock<GuestMemoryMmap>>,
|
||||
device: Box<dyn VirtioDevice>,
|
||||
msix_num: u16,
|
||||
) -> Result<Self> {
|
||||
@ -339,7 +338,7 @@ impl VirtioPciDevice {
|
||||
|
||||
fn are_queues_valid(&self) -> bool {
|
||||
if let Some(mem) = self.memory.as_ref() {
|
||||
self.queues.iter().all(|q| q.is_valid(mem))
|
||||
self.queues.iter().all(|q| q.is_valid(&mem.read().unwrap()))
|
||||
} else {
|
||||
false
|
||||
}
|
||||
|
@ -43,10 +43,11 @@ use signal_hook::{iterator::Signals, SIGWINCH};
|
||||
use std::ffi::CString;
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::io::{self, sink, stdout};
|
||||
use std::ops::Deref;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::os::unix::io::{AsRawFd, RawFd};
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::{Arc, Barrier, Mutex};
|
||||
use std::sync::{Arc, Barrier, Mutex, RwLock};
|
||||
use std::{fmt, result, str, thread};
|
||||
use vfio::{VfioDevice, VfioPciDevice, VfioPciError};
|
||||
use vm_allocator::{GsiApic, SystemAllocator};
|
||||
@ -450,7 +451,8 @@ impl Vcpu {
|
||||
)
|
||||
.map_err(Error::REGSConfiguration)?;
|
||||
arch::x86_64::regs::setup_fpu(&self.fd).map_err(Error::FPUConfiguration)?;
|
||||
arch::x86_64::regs::setup_sregs(vm_memory, &self.fd).map_err(Error::SREGSConfiguration)?;
|
||||
arch::x86_64::regs::setup_sregs(&vm_memory.read().unwrap(), &self.fd)
|
||||
.map_err(Error::SREGSConfiguration)?;
|
||||
arch::x86_64::interrupts::set_lint(&self.fd).map_err(Error::LocalIntConfiguration)?;
|
||||
Ok(())
|
||||
}
|
||||
@ -518,7 +520,7 @@ impl Vcpu {
|
||||
}
|
||||
|
||||
struct VmInfo<'a> {
|
||||
memory: &'a Arc<GuestMemoryMmap>,
|
||||
memory: &'a Arc<RwLock<GuestMemoryMmap>>,
|
||||
vm_fd: &'a Arc<VmFd>,
|
||||
vm_cfg: &'a VmConfig<'a>,
|
||||
}
|
||||
@ -1113,7 +1115,7 @@ impl DeviceManager {
|
||||
|
||||
fn add_virtio_pci_device(
|
||||
virtio_device: Box<dyn vm_virtio::VirtioDevice>,
|
||||
memory: &Arc<GuestMemoryMmap>,
|
||||
memory: &Arc<RwLock<GuestMemoryMmap>>,
|
||||
allocator: &mut SystemAllocator,
|
||||
vm_fd: &Arc<VmFd>,
|
||||
pci: &mut PciConfigIo,
|
||||
@ -1317,7 +1319,7 @@ impl AsRawFd for EpollContext {
|
||||
pub struct Vm<'a> {
|
||||
fd: Arc<VmFd>,
|
||||
kernel: File,
|
||||
memory: Arc<GuestMemoryMmap>,
|
||||
memory: Arc<RwLock<GuestMemoryMmap>>,
|
||||
vcpus: Vec<thread::JoinHandle<()>>,
|
||||
devices: DeviceManager,
|
||||
cpuid: CpuId,
|
||||
@ -1487,7 +1489,9 @@ impl<'a> Vm<'a> {
|
||||
|
||||
// Convert the guest memory into an Arc. The point being able to use it
|
||||
// anywhere in the code, no matter which thread might use it.
|
||||
let guest_memory = Arc::new(guest_memory);
|
||||
// Add the RwLock aspect to guest memory as we might want to perform
|
||||
// additions to the memory during runtime.
|
||||
let guest_memory = Arc::new(RwLock::new(guest_memory));
|
||||
|
||||
let vm_info = VmInfo {
|
||||
memory: &guest_memory,
|
||||
@ -1536,8 +1540,9 @@ impl<'a> Vm<'a> {
|
||||
pub fn load_kernel(&mut self) -> Result<GuestAddress> {
|
||||
let cmdline_cstring =
|
||||
CString::new(self.config.cmdline.args.clone()).map_err(|_| Error::CmdLine)?;
|
||||
let mem = self.memory.read().unwrap();
|
||||
let entry_addr = match linux_loader::loader::Elf::load(
|
||||
self.memory.as_ref(),
|
||||
mem.deref(),
|
||||
None,
|
||||
&mut self.kernel,
|
||||
Some(arch::HIMEM_START),
|
||||
@ -1545,7 +1550,7 @@ impl<'a> Vm<'a> {
|
||||
Ok(entry_addr) => entry_addr,
|
||||
Err(linux_loader::loader::Error::InvalidElfMagicNumber) => {
|
||||
linux_loader::loader::BzImage::load(
|
||||
self.memory.as_ref(),
|
||||
mem.deref(),
|
||||
None,
|
||||
&mut self.kernel,
|
||||
Some(arch::HIMEM_START),
|
||||
@ -1556,7 +1561,7 @@ impl<'a> Vm<'a> {
|
||||
};
|
||||
|
||||
linux_loader::loader::load_cmdline(
|
||||
self.memory.as_ref(),
|
||||
mem.deref(),
|
||||
self.config.cmdline.offset,
|
||||
&cmdline_cstring,
|
||||
)
|
||||
@ -1567,7 +1572,7 @@ impl<'a> Vm<'a> {
|
||||
match entry_addr.setup_header {
|
||||
Some(hdr) => {
|
||||
arch::configure_system(
|
||||
&self.memory,
|
||||
&mem,
|
||||
self.config.cmdline.offset,
|
||||
cmdline_cstring.to_bytes().len() + 1,
|
||||
vcpu_count,
|
||||
@ -1585,7 +1590,7 @@ impl<'a> Vm<'a> {
|
||||
}
|
||||
None => {
|
||||
arch::configure_system(
|
||||
&self.memory,
|
||||
&mem,
|
||||
self.config.cmdline.offset,
|
||||
cmdline_cstring.to_bytes().len() + 1,
|
||||
vcpu_count,
|
||||
@ -1763,12 +1768,9 @@ impl<'a> Vm<'a> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets a reference to the guest memory owned by this VM.
|
||||
///
|
||||
/// Note that `GuestMemory` does not include any device memory that may have been added after
|
||||
/// this VM was constructed.
|
||||
pub fn get_memory(&self) -> &GuestMemoryMmap {
|
||||
&self.memory
|
||||
/// Gets an Arc to the guest memory owned by this VM.
|
||||
pub fn get_memory(&self) -> Arc<RwLock<GuestMemoryMmap>> {
|
||||
self.memory.clone()
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user