block_util: Port synchronous QCOW file to AsyncIo trait

Based on the synchronous QCOW file implementation present in the qcow
crate, we created a new qcow_sync module in block_util that ports this
synchronous implementation to the AsyncIo trait.

The point is to reuse virtio-blk asynchronous implementation for both
synchronous and asynchronous backends.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2021-01-22 09:56:38 +01:00 committed by Rob Bradford
parent 9fc86a91e2
commit 12e20effd7
4 changed files with 207 additions and 57 deletions

View File

@ -14,6 +14,7 @@ extern crate log;
extern crate serde_derive;
pub mod async_io;
pub mod qcow_sync;
pub mod raw_async;
pub mod raw_sync;

164
block_util/src/qcow_sync.rs Normal file
View File

@ -0,0 +1,164 @@
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use crate::async_io::{
AsyncIo, AsyncIoError, AsyncIoResult, DiskFile, DiskFileError, DiskFileResult,
};
use qcow::{QcowFile, RawFile};
use std::fs::File;
use std::io::{IoSlice, IoSliceMut, Read, Seek, SeekFrom, Write};
use std::sync::{Arc, Mutex};
use vmm_sys_util::eventfd::EventFd;
pub struct QcowDiskSync {
qcow_file: QcowFile,
semaphore: Arc<Mutex<()>>,
}
impl QcowDiskSync {
pub fn new(file: File, direct_io: bool) -> Self {
QcowDiskSync {
qcow_file: QcowFile::from(RawFile::new(file, direct_io))
.expect("Failed creating QcowFile"),
semaphore: Arc::new(Mutex::new(())),
}
}
}
impl DiskFile for QcowDiskSync {
fn size(&mut self) -> DiskFileResult<u64> {
// Take the semaphore to ensure other threads are not interacting with
// the underlying file.
let _lock = self.semaphore.lock().unwrap();
Ok(self
.qcow_file
.seek(SeekFrom::End(0))
.map_err(DiskFileError::Size)? as u64)
}
fn new_async_io(&self, _ring_depth: u32) -> DiskFileResult<Box<dyn AsyncIo>> {
Ok(Box::new(QcowSync::new(
self.qcow_file.clone(),
self.semaphore.clone(),
)) as Box<dyn AsyncIo>)
}
}
pub struct QcowSync {
qcow_file: QcowFile,
eventfd: EventFd,
completion_list: Vec<(u64, i32)>,
semaphore: Arc<Mutex<()>>,
}
impl QcowSync {
pub fn new(qcow_file: QcowFile, semaphore: Arc<Mutex<()>>) -> Self {
QcowSync {
qcow_file,
eventfd: EventFd::new(libc::EFD_NONBLOCK)
.expect("Failed creating EventFd for QcowSync"),
completion_list: Vec::new(),
semaphore,
}
}
}
impl AsyncIo for QcowSync {
fn notifier(&self) -> &EventFd {
&self.eventfd
}
fn read_vectored(
&mut self,
offset: libc::off_t,
iovecs: Vec<libc::iovec>,
user_data: u64,
) -> AsyncIoResult<()> {
// Convert libc::iovec into IoSliceMut
let mut slices = Vec::new();
for iovec in iovecs.iter() {
slices.push(IoSliceMut::new(unsafe { std::mem::transmute(*iovec) }));
}
let result = {
// Take the semaphore to ensure other threads are not interacting
// with the underlying file.
let _lock = self.semaphore.lock().unwrap();
// Move the cursor to the right offset
self.qcow_file
.seek(SeekFrom::Start(offset as u64))
.map_err(AsyncIoError::ReadVectored)?;
// Read vectored
self.qcow_file
.read_vectored(slices.as_mut_slice())
.map_err(AsyncIoError::ReadVectored)?
};
self.completion_list.push((user_data, result as i32));
self.eventfd.write(1).unwrap();
Ok(())
}
fn write_vectored(
&mut self,
offset: libc::off_t,
iovecs: Vec<libc::iovec>,
user_data: u64,
) -> AsyncIoResult<()> {
// Convert libc::iovec into IoSlice
let mut slices = Vec::new();
for iovec in iovecs.iter() {
slices.push(IoSlice::new(unsafe { std::mem::transmute(*iovec) }));
}
let result = {
// Take the semaphore to ensure other threads are not interacting
// with the underlying file.
let _lock = self.semaphore.lock().unwrap();
// Move the cursor to the right offset
self.qcow_file
.seek(SeekFrom::Start(offset as u64))
.map_err(AsyncIoError::WriteVectored)?;
// Write vectored
self.qcow_file
.write_vectored(slices.as_slice())
.map_err(AsyncIoError::WriteVectored)?
};
self.completion_list.push((user_data, result as i32));
self.eventfd.write(1).unwrap();
Ok(())
}
fn fsync(&mut self, user_data: Option<u64>) -> AsyncIoResult<()> {
let result: i32 = {
// Take the semaphore to ensure other threads are not interacting
// with the underlying file.
let _lock = self.semaphore.lock().unwrap();
// Flush
self.qcow_file.flush().map_err(AsyncIoError::Fsync)?;
0
};
if let Some(user_data) = user_data {
self.completion_list.push((user_data, result));
self.eventfd.write(1).unwrap();
}
Ok(())
}
fn complete(&mut self) -> Vec<(u64, i32)> {
self.completion_list.drain(..).collect()
}
}

View File

@ -128,11 +128,19 @@ fn virtio_blk_io_uring_thread_rules() -> Result<Vec<SyscallRuleSet>, Error> {
allow_syscall(libc::SYS_epoll_wait),
allow_syscall(libc::SYS_exit),
allow_syscall(libc::SYS_fsync),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_ftruncate),
#[cfg(target_arch = "aarch64")]
// The definition of libc::SYS_ftruncate is missing on AArch64.
// Use a hard-code number instead.
allow_syscall(46),
allow_syscall(libc::SYS_futex),
allow_syscall(SYS_IO_URING_ENTER),
allow_syscall(libc::SYS_lseek),
allow_syscall(libc::SYS_madvise),
allow_syscall(libc::SYS_mprotect),
allow_syscall(libc::SYS_munmap),
allow_syscall(libc::SYS_openat),
allow_syscall(libc::SYS_read),
allow_syscall(libc::SYS_rt_sigprocmask),
allow_syscall(libc::SYS_sigaltstack),

View File

@ -39,8 +39,8 @@ use arch::layout::{APIC_START, IOAPIC_SIZE, IOAPIC_START};
#[cfg(target_arch = "aarch64")]
use arch::DeviceType;
use block_util::{
async_io::DiskFile, block_io_uring_is_supported, raw_async::RawFileDisk,
raw_sync::RawFileDiskSync,
async_io::DiskFile, block_io_uring_is_supported, qcow_sync::QcowDiskSync,
raw_async::RawFileDisk, raw_sync::RawFileDiskSync,
};
#[cfg(target_arch = "aarch64")]
use devices::gic;
@ -62,7 +62,7 @@ use pci::{
DeviceRelocation, PciBarRegionType, PciBus, PciConfigIo, PciConfigMmio, PciDevice, PciRoot,
VfioPciDevice,
};
use qcow::{self, ImageType, QcowFile};
use qcow::{self, ImageType};
use seccomp::SeccompAction;
use std::any::Any;
use std::collections::HashMap;
@ -1639,7 +1639,7 @@ impl DeviceManager {
options.custom_flags(libc::O_DIRECT);
}
// Open block device path
let image: File = options
let file: File = options
.open(
disk_cfg
.path
@ -1649,70 +1649,47 @@ impl DeviceManager {
)
.map_err(DeviceManagerError::Disk)?;
let mut raw_img = qcow::RawFile::new(image.try_clone().unwrap(), disk_cfg.direct);
let mut raw_img = qcow::RawFile::new(file.try_clone().unwrap(), disk_cfg.direct);
let image_type = qcow::detect_image_type(&mut raw_img)
.map_err(DeviceManagerError::DetectImageType)?;
let (virtio_device, migratable_device) = match image_type {
let image = match image_type {
ImageType::Raw => {
// Use asynchronous backend relying on io_uring if the
// syscalls are supported.
let image = if block_io_uring_is_supported() && !disk_cfg.disable_io_uring {
Box::new(RawFileDisk::new(image)) as Box<dyn DiskFile>
if block_io_uring_is_supported() && !disk_cfg.disable_io_uring {
Box::new(RawFileDisk::new(file)) as Box<dyn DiskFile>
} else {
Box::new(RawFileDiskSync::new(image, disk_cfg.direct)) as Box<dyn DiskFile>
};
let dev = Arc::new(Mutex::new(
virtio_devices::BlockIoUring::new(
id.clone(),
image,
disk_cfg
.path
.as_ref()
.ok_or(DeviceManagerError::NoDiskPath)?
.clone(),
disk_cfg.readonly,
disk_cfg.iommu,
disk_cfg.num_queues,
disk_cfg.queue_size,
self.seccomp_action.clone(),
)
.map_err(DeviceManagerError::CreateVirtioBlock)?,
));
(
Arc::clone(&dev) as VirtioDeviceArc,
dev as Arc<Mutex<dyn Migratable>>,
)
Box::new(RawFileDiskSync::new(file, disk_cfg.direct)) as Box<dyn DiskFile>
}
}
ImageType::Qcow2 => {
let qcow_img =
QcowFile::from(raw_img).map_err(DeviceManagerError::QcowDeviceCreate)?;
let dev = Arc::new(Mutex::new(
virtio_devices::Block::new(
id.clone(),
qcow_img,
disk_cfg
.path
.as_ref()
.ok_or(DeviceManagerError::NoDiskPath)?
.clone(),
disk_cfg.readonly,
disk_cfg.iommu,
disk_cfg.num_queues,
disk_cfg.queue_size,
self.seccomp_action.clone(),
)
.map_err(DeviceManagerError::CreateVirtioBlock)?,
));
(
Arc::clone(&dev) as VirtioDeviceArc,
dev as Arc<Mutex<dyn Migratable>>,
)
Box::new(QcowDiskSync::new(file, disk_cfg.direct)) as Box<dyn DiskFile>
}
};
let dev = Arc::new(Mutex::new(
virtio_devices::BlockIoUring::new(
id.clone(),
image,
disk_cfg
.path
.as_ref()
.ok_or(DeviceManagerError::NoDiskPath)?
.clone(),
disk_cfg.readonly,
disk_cfg.iommu,
disk_cfg.num_queues,
disk_cfg.queue_size,
self.seccomp_action.clone(),
)
.map_err(DeviceManagerError::CreateVirtioBlock)?,
));
let virtio_device = Arc::clone(&dev) as VirtioDeviceArc;
let migratable_device = dev as Arc<Mutex<dyn Migratable>>;
// Fill the device tree with a new node. In case of restore, we
// know there is nothing to do, so we can simply override the
// existing entry.