block_util: Port synchronous RAW file to AsyncIo trait

Based on the synchronous RAW file implementation present in the qcow
crate, we created a new raw_sync module in block_util that ports this
synchronous implementation to the AsyncIo trait.

The point is to reuse virtio-blk asynchronous implementation for both
synchronous and asynchronous backends.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2021-01-20 19:03:59 +01:00 committed by Rob Bradford
parent da8ce25abf
commit 9fc86a91e2
6 changed files with 195 additions and 46 deletions

1
Cargo.lock generated
View File

@ -159,6 +159,7 @@ dependencies = [
"io-uring",
"libc",
"log 0.4.13",
"qcow",
"serde",
"serde_derive",
"serde_json",

View File

@ -12,6 +12,7 @@ io_uring = []
io-uring = ">=0.4.0"
libc = "0.2.82"
log = "0.4.13"
qcow = { path = "../qcow" }
serde = ">=1.0.27"
serde_derive = ">=1.0.27"
serde_json = ">=1.0.9"

View File

@ -15,6 +15,7 @@ extern crate serde_derive;
pub mod async_io;
pub mod raw_async;
pub mod raw_sync;
use crate::async_io::{AsyncIo, AsyncIoError};
#[cfg(feature = "io_uring")]

162
block_util/src/raw_sync.rs Normal file
View File

@ -0,0 +1,162 @@
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use crate::async_io::{
AsyncIo, AsyncIoError, AsyncIoResult, DiskFile, DiskFileError, DiskFileResult,
};
use qcow::RawFile;
use std::fs::File;
use std::io::{IoSlice, IoSliceMut, Read, Seek, SeekFrom, Write};
use std::sync::{Arc, Mutex};
use vmm_sys_util::eventfd::EventFd;
pub struct RawFileDiskSync {
raw_file: RawFile,
semaphore: Arc<Mutex<()>>,
}
impl RawFileDiskSync {
pub fn new(file: File, direct_io: bool) -> Self {
RawFileDiskSync {
raw_file: RawFile::new(file, direct_io),
semaphore: Arc::new(Mutex::new(())),
}
}
}
impl DiskFile for RawFileDiskSync {
fn size(&mut self) -> DiskFileResult<u64> {
// Take the semaphore to ensure other threads are not interacting with
// the underlying file.
let _lock = self.semaphore.lock().unwrap();
Ok(self
.raw_file
.seek(SeekFrom::End(0))
.map_err(DiskFileError::Size)? as u64)
}
fn new_async_io(&self, _ring_depth: u32) -> DiskFileResult<Box<dyn AsyncIo>> {
Ok(Box::new(RawFileSync::new(
self.raw_file.clone(),
self.semaphore.clone(),
)) as Box<dyn AsyncIo>)
}
}
pub struct RawFileSync {
raw_file: RawFile,
eventfd: EventFd,
completion_list: Vec<(u64, i32)>,
semaphore: Arc<Mutex<()>>,
}
impl RawFileSync {
pub fn new(raw_file: RawFile, semaphore: Arc<Mutex<()>>) -> Self {
RawFileSync {
raw_file,
eventfd: EventFd::new(libc::EFD_NONBLOCK).expect("Failed creating EventFd for RawFile"),
completion_list: Vec::new(),
semaphore,
}
}
}
impl AsyncIo for RawFileSync {
fn notifier(&self) -> &EventFd {
&self.eventfd
}
fn read_vectored(
&mut self,
offset: libc::off_t,
iovecs: Vec<libc::iovec>,
user_data: u64,
) -> AsyncIoResult<()> {
// Convert libc::iovec into IoSliceMut
let mut slices = Vec::new();
for iovec in iovecs.iter() {
slices.push(IoSliceMut::new(unsafe { std::mem::transmute(*iovec) }));
}
let result = {
// Take the semaphore to ensure other threads are not interacting
// with the underlying file.
let _lock = self.semaphore.lock().unwrap();
// Move the cursor to the right offset
self.raw_file
.seek(SeekFrom::Start(offset as u64))
.map_err(AsyncIoError::ReadVectored)?;
// Read vectored
self.raw_file
.read_vectored(slices.as_mut_slice())
.map_err(AsyncIoError::ReadVectored)?
};
self.completion_list.push((user_data, result as i32));
self.eventfd.write(1).unwrap();
Ok(())
}
fn write_vectored(
&mut self,
offset: libc::off_t,
iovecs: Vec<libc::iovec>,
user_data: u64,
) -> AsyncIoResult<()> {
// Convert libc::iovec into IoSlice
let mut slices = Vec::new();
for iovec in iovecs.iter() {
slices.push(IoSlice::new(unsafe { std::mem::transmute(*iovec) }));
}
let result = {
// Take the semaphore to ensure other threads are not interacting
// with the underlying file.
let _lock = self.semaphore.lock().unwrap();
// Move the cursor to the right offset
self.raw_file
.seek(SeekFrom::Start(offset as u64))
.map_err(AsyncIoError::WriteVectored)?;
// Write vectored
self.raw_file
.write_vectored(slices.as_slice())
.map_err(AsyncIoError::WriteVectored)?
};
self.completion_list.push((user_data, result as i32));
self.eventfd.write(1).unwrap();
Ok(())
}
fn fsync(&mut self, user_data: Option<u64>) -> AsyncIoResult<()> {
let result: i32 = {
// Take the semaphore to ensure other threads are not interacting
// with the underlying file.
let _lock = self.semaphore.lock().unwrap();
// Flush
self.raw_file.flush().map_err(AsyncIoError::Fsync)?;
0
};
if let Some(user_data) = user_data {
self.completion_list.push((user_data, result));
self.eventfd.write(1).unwrap();
}
Ok(())
}
fn complete(&mut self) -> Vec<(u64, i32)> {
self.completion_list.drain(..).collect()
}
}

View File

@ -130,6 +130,7 @@ fn virtio_blk_io_uring_thread_rules() -> Result<Vec<SyscallRuleSet>, Error> {
allow_syscall(libc::SYS_fsync),
allow_syscall(libc::SYS_futex),
allow_syscall(SYS_IO_URING_ENTER),
allow_syscall(libc::SYS_lseek),
allow_syscall(libc::SYS_madvise),
allow_syscall(libc::SYS_munmap),
allow_syscall(libc::SYS_read),

View File

@ -38,7 +38,10 @@ use arch::layout;
use arch::layout::{APIC_START, IOAPIC_SIZE, IOAPIC_START};
#[cfg(target_arch = "aarch64")]
use arch::DeviceType;
use block_util::{async_io::DiskFile, block_io_uring_is_supported, raw_async::RawFileDisk};
use block_util::{
async_io::DiskFile, block_io_uring_is_supported, raw_async::RawFileDisk,
raw_sync::RawFileDiskSync,
};
#[cfg(target_arch = "aarch64")]
use devices::gic;
#[cfg(target_arch = "x86_64")]
@ -1654,54 +1657,34 @@ impl DeviceManager {
ImageType::Raw => {
// Use asynchronous backend relying on io_uring if the
// syscalls are supported.
if block_io_uring_is_supported() && !disk_cfg.disable_io_uring {
let image = Box::new(RawFileDisk::new(image)) as Box<dyn DiskFile>;
let dev = Arc::new(Mutex::new(
virtio_devices::BlockIoUring::new(
id.clone(),
image,
disk_cfg
.path
.as_ref()
.ok_or(DeviceManagerError::NoDiskPath)?
.clone(),
disk_cfg.readonly,
disk_cfg.iommu,
disk_cfg.num_queues,
disk_cfg.queue_size,
self.seccomp_action.clone(),
)
.map_err(DeviceManagerError::CreateVirtioBlock)?,
));
(
Arc::clone(&dev) as VirtioDeviceArc,
dev as Arc<Mutex<dyn Migratable>>,
)
let image = if block_io_uring_is_supported() && !disk_cfg.disable_io_uring {
Box::new(RawFileDisk::new(image)) as Box<dyn DiskFile>
} else {
let dev = Arc::new(Mutex::new(
virtio_devices::Block::new(
id.clone(),
raw_img,
disk_cfg
.path
.as_ref()
.ok_or(DeviceManagerError::NoDiskPath)?
.clone(),
disk_cfg.readonly,
disk_cfg.iommu,
disk_cfg.num_queues,
disk_cfg.queue_size,
self.seccomp_action.clone(),
)
.map_err(DeviceManagerError::CreateVirtioBlock)?,
));
Box::new(RawFileDiskSync::new(image, disk_cfg.direct)) as Box<dyn DiskFile>
};
(
Arc::clone(&dev) as VirtioDeviceArc,
dev as Arc<Mutex<dyn Migratable>>,
let dev = Arc::new(Mutex::new(
virtio_devices::BlockIoUring::new(
id.clone(),
image,
disk_cfg
.path
.as_ref()
.ok_or(DeviceManagerError::NoDiskPath)?
.clone(),
disk_cfg.readonly,
disk_cfg.iommu,
disk_cfg.num_queues,
disk_cfg.queue_size,
self.seccomp_action.clone(),
)
}
.map_err(DeviceManagerError::CreateVirtioBlock)?,
));
(
Arc::clone(&dev) as VirtioDeviceArc,
dev as Arc<Mutex<dyn Migratable>>,
)
}
ImageType::Qcow2 => {
let qcow_img =