block: Add support for user specified ID_SERIAL

Signed-off-by: Thomas Barrett <tbarrett@crusoeenergy.com>
This commit is contained in:
Thomas Barrett 2023-09-08 08:18:51 -07:00 committed by Rob Bradford
parent 49e342314d
commit c4e8e653ac
8 changed files with 45 additions and 24 deletions

View File

@ -116,8 +116,8 @@ fn build_device_id(disk_path: &Path) -> result::Result<String, Error> {
Ok(device_id) Ok(device_id)
} }
pub fn build_disk_image_id(disk_path: &Path) -> Vec<u8> { pub fn build_serial(disk_path: &Path) -> Vec<u8> {
let mut default_disk_image_id = vec![0; VIRTIO_BLK_ID_BYTES as usize]; let mut default_serial = vec![0; VIRTIO_BLK_ID_BYTES as usize];
match build_device_id(disk_path) { match build_device_id(disk_path) {
Err(_) => { Err(_) => {
warn!("Could not generate device id. We'll use a default."); warn!("Could not generate device id. We'll use a default.");
@ -127,10 +127,10 @@ pub fn build_disk_image_id(disk_path: &Path) -> Vec<u8> {
// This will also zero out any leftover bytes. // This will also zero out any leftover bytes.
let disk_id = m.as_bytes(); let disk_id = m.as_bytes();
let bytes_to_copy = cmp::min(disk_id.len(), VIRTIO_BLK_ID_BYTES as usize); let bytes_to_copy = cmp::min(disk_id.len(), VIRTIO_BLK_ID_BYTES as usize);
default_disk_image_id[..bytes_to_copy].clone_from_slice(&disk_id[..bytes_to_copy]) default_serial[..bytes_to_copy].clone_from_slice(&disk_id[..bytes_to_copy])
} }
} }
default_disk_image_id default_serial
} }
#[derive(Error, Debug)] #[derive(Error, Debug)]
@ -330,7 +330,7 @@ impl Request {
disk: &mut T, disk: &mut T,
disk_nsectors: u64, disk_nsectors: u64,
mem: &GuestMemoryMmap, mem: &GuestMemoryMmap,
disk_id: &[u8], serial: &[u8],
) -> result::Result<u32, ExecuteError> { ) -> result::Result<u32, ExecuteError> {
disk.seek(SeekFrom::Start(self.sector << SECTOR_SHIFT)) disk.seek(SeekFrom::Start(self.sector << SECTOR_SHIFT))
.map_err(ExecuteError::Seek)?; .map_err(ExecuteError::Seek)?;
@ -362,10 +362,10 @@ impl Request {
} }
RequestType::Flush => disk.flush().map_err(ExecuteError::Flush)?, RequestType::Flush => disk.flush().map_err(ExecuteError::Flush)?,
RequestType::GetDeviceId => { RequestType::GetDeviceId => {
if (*data_len as usize) < disk_id.len() { if (*data_len as usize) < serial.len() {
return Err(ExecuteError::BadRequest(Error::InvalidOffset)); return Err(ExecuteError::BadRequest(Error::InvalidOffset));
} }
mem.write_slice(disk_id, *data_addr) mem.write_slice(serial, *data_addr)
.map_err(ExecuteError::Write)?; .map_err(ExecuteError::Write)?;
} }
RequestType::Unsupported(t) => return Err(ExecuteError::Unsupported(t)), RequestType::Unsupported(t) => return Err(ExecuteError::Unsupported(t)),
@ -379,7 +379,7 @@ impl Request {
mem: &GuestMemoryMmap, mem: &GuestMemoryMmap,
disk_nsectors: u64, disk_nsectors: u64,
disk_image: &mut dyn AsyncIo, disk_image: &mut dyn AsyncIo,
disk_id: &[u8], serial: &[u8],
user_data: u64, user_data: u64,
) -> result::Result<bool, ExecuteError> { ) -> result::Result<bool, ExecuteError> {
let sector = self.sector; let sector = self.sector;
@ -481,10 +481,10 @@ impl Request {
} else { } else {
return Err(ExecuteError::BadRequest(Error::TooManyDescriptors)); return Err(ExecuteError::BadRequest(Error::TooManyDescriptors));
}; };
if (data_len as usize) < disk_id.len() { if (data_len as usize) < serial.len() {
return Err(ExecuteError::BadRequest(Error::InvalidOffset)); return Err(ExecuteError::BadRequest(Error::InvalidOffset));
} }
mem.write_slice(disk_id, data_addr) mem.write_slice(serial, data_addr)
.map_err(ExecuteError::Write)?; .map_err(ExecuteError::Write)?;
return Ok(false); return Ok(false);
} }

View File

@ -57,6 +57,7 @@ fuzz_target!(|bytes| {
false, false,
2, 2,
256, 256,
None,
SeccompAction::Allow, SeccompAction::Allow,
None, None,
EventFd::new(EFD_NONBLOCK).unwrap(), EventFd::new(EFD_NONBLOCK).unwrap(),

View File

@ -9,7 +9,7 @@
// SPDX-License-Identifier: (Apache-2.0 AND BSD-3-Clause) // SPDX-License-Identifier: (Apache-2.0 AND BSD-3-Clause)
use block::{ use block::{
build_disk_image_id, build_serial,
qcow::{self, ImageType, QcowFile}, qcow::{self, ImageType, QcowFile},
Request, VirtioBlockConfig, Request, VirtioBlockConfig,
}; };
@ -90,7 +90,7 @@ impl convert::From<Error> for io::Error {
struct VhostUserBlkThread { struct VhostUserBlkThread {
disk_image: Arc<Mutex<dyn DiskFile>>, disk_image: Arc<Mutex<dyn DiskFile>>,
disk_image_id: Vec<u8>, serial: Vec<u8>,
disk_nsectors: u64, disk_nsectors: u64,
event_idx: bool, event_idx: bool,
kill_evt: EventFd, kill_evt: EventFd,
@ -101,14 +101,14 @@ struct VhostUserBlkThread {
impl VhostUserBlkThread { impl VhostUserBlkThread {
fn new( fn new(
disk_image: Arc<Mutex<dyn DiskFile>>, disk_image: Arc<Mutex<dyn DiskFile>>,
disk_image_id: Vec<u8>, serial: Vec<u8>,
disk_nsectors: u64, disk_nsectors: u64,
writeback: Arc<AtomicBool>, writeback: Arc<AtomicBool>,
mem: GuestMemoryAtomic<GuestMemoryMmap>, mem: GuestMemoryAtomic<GuestMemoryMmap>,
) -> Result<Self> { ) -> Result<Self> {
Ok(VhostUserBlkThread { Ok(VhostUserBlkThread {
disk_image, disk_image,
disk_image_id, serial,
disk_nsectors, disk_nsectors,
event_idx: false, event_idx: false,
kill_evt: EventFd::new(EFD_NONBLOCK).map_err(Error::CreateKillEventFd)?, kill_evt: EventFd::new(EFD_NONBLOCK).map_err(Error::CreateKillEventFd)?,
@ -137,7 +137,7 @@ impl VhostUserBlkThread {
&mut self.disk_image.lock().unwrap().deref_mut(), &mut self.disk_image.lock().unwrap().deref_mut(),
self.disk_nsectors, self.disk_nsectors,
desc_chain.memory(), desc_chain.memory(),
&self.disk_image_id, &self.serial,
) { ) {
Ok(l) => { Ok(l) => {
len = l; len = l;
@ -222,7 +222,7 @@ impl VhostUserBlkBackend {
let image: File = options.open(&image_path).unwrap(); let image: File = options.open(&image_path).unwrap();
let mut raw_img: qcow::RawFile = qcow::RawFile::new(image, direct); let mut raw_img: qcow::RawFile = qcow::RawFile::new(image, direct);
let image_id = build_disk_image_id(&PathBuf::from(&image_path)); let serial = build_serial(&PathBuf::from(&image_path));
let image_type = qcow::detect_image_type(&mut raw_img).unwrap(); let image_type = qcow::detect_image_type(&mut raw_img).unwrap();
let image = match image_type { let image = match image_type {
ImageType::Raw => Arc::new(Mutex::new(raw_img)) as Arc<Mutex<dyn DiskFile>>, ImageType::Raw => Arc::new(Mutex::new(raw_img)) as Arc<Mutex<dyn DiskFile>>,
@ -250,7 +250,7 @@ impl VhostUserBlkBackend {
for i in 0..num_queues { for i in 0..num_queues {
let thread = Mutex::new(VhostUserBlkThread::new( let thread = Mutex::new(VhostUserBlkThread::new(
image.clone(), image.clone(),
image_id.clone(), serial.clone(),
nsectors, nsectors,
writeback.clone(), writeback.clone(),
mem.clone(), mem.clone(),

View File

@ -20,7 +20,7 @@ use crate::GuestMemoryMmap;
use crate::VirtioInterrupt; use crate::VirtioInterrupt;
use anyhow::anyhow; use anyhow::anyhow;
use block::{ use block::{
async_io::AsyncIo, async_io::AsyncIoError, async_io::DiskFile, build_disk_image_id, Request, async_io::AsyncIo, async_io::AsyncIoError, async_io::DiskFile, build_serial, Request,
RequestType, VirtioBlockConfig, RequestType, VirtioBlockConfig,
}; };
use rate_limiter::{RateLimiter, TokenType}; use rate_limiter::{RateLimiter, TokenType};
@ -124,7 +124,7 @@ struct BlockEpollHandler {
disk_image: Box<dyn AsyncIo>, disk_image: Box<dyn AsyncIo>,
disk_nsectors: u64, disk_nsectors: u64,
interrupt_cb: Arc<dyn VirtioInterrupt>, interrupt_cb: Arc<dyn VirtioInterrupt>,
disk_image_id: Vec<u8>, serial: Vec<u8>,
kill_evt: EventFd, kill_evt: EventFd,
pause_evt: EventFd, pause_evt: EventFd,
writeback: Arc<AtomicBool>, writeback: Arc<AtomicBool>,
@ -205,7 +205,7 @@ impl BlockEpollHandler {
desc_chain.memory(), desc_chain.memory(),
self.disk_nsectors, self.disk_nsectors,
self.disk_image.as_mut(), self.disk_image.as_mut(),
&self.disk_image_id, &self.serial,
desc_chain.head_index() as u64, desc_chain.head_index() as u64,
) )
.map_err(Error::RequestExecuting)? .map_err(Error::RequestExecuting)?
@ -504,6 +504,7 @@ pub struct Block {
rate_limiter_config: Option<RateLimiterConfig>, rate_limiter_config: Option<RateLimiterConfig>,
exit_evt: EventFd, exit_evt: EventFd,
read_only: bool, read_only: bool,
serial: Vec<u8>,
} }
#[derive(Versionize)] #[derive(Versionize)]
@ -528,6 +529,7 @@ impl Block {
iommu: bool, iommu: bool,
num_queues: usize, num_queues: usize,
queue_size: u16, queue_size: u16,
serial: Option<String>,
seccomp_action: SeccompAction, seccomp_action: SeccompAction,
rate_limiter_config: Option<RateLimiterConfig>, rate_limiter_config: Option<RateLimiterConfig>,
exit_evt: EventFd, exit_evt: EventFd,
@ -608,6 +610,8 @@ impl Block {
(disk_nsectors, avail_features, 0, config, false) (disk_nsectors, avail_features, 0, config, false)
}; };
let serial = serial.map(Vec::from).unwrap_or(build_serial(&disk_path));
Ok(Block { Ok(Block {
common: VirtioCommon { common: VirtioCommon {
device_type: VirtioDeviceType::Block as u32, device_type: VirtioDeviceType::Block as u32,
@ -630,6 +634,7 @@ impl Block {
rate_limiter_config, rate_limiter_config,
exit_evt, exit_evt,
read_only, read_only,
serial,
}) })
} }
@ -726,7 +731,6 @@ impl VirtioDevice for Block {
) -> ActivateResult { ) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?; self.common.activate(&queues, &interrupt_cb)?;
let disk_image_id = build_disk_image_id(&self.disk_path);
self.update_writeback(); self.update_writeback();
let mut epoll_threads = Vec::new(); let mut epoll_threads = Vec::new();
@ -754,7 +758,7 @@ impl VirtioDevice for Block {
})?, })?,
disk_nsectors: self.disk_nsectors, disk_nsectors: self.disk_nsectors,
interrupt_cb: interrupt_cb.clone(), interrupt_cb: interrupt_cb.clone(),
disk_image_id: disk_image_id.clone(), serial: self.serial.clone(),
kill_evt, kill_evt,
pause_evt, pause_evt,
writeback: self.writeback.clone(), writeback: self.writeback.clone(),

View File

@ -820,6 +820,8 @@ components:
format: int16 format: int16
id: id:
type: string type: string
serial:
type: string
NetConfig: NetConfig:
type: object type: object

View File

@ -779,7 +779,8 @@ impl DiskConfig {
.add("ops_refill_time") .add("ops_refill_time")
.add("id") .add("id")
.add("_disable_io_uring") .add("_disable_io_uring")
.add("pci_segment"); .add("pci_segment")
.add("serial");
parser.parse(disk).map_err(Error::ParseDisk)?; parser.parse(disk).map_err(Error::ParseDisk)?;
let path = parser.get("path").map(PathBuf::from); let path = parser.get("path").map(PathBuf::from);
@ -846,6 +847,7 @@ impl DiskConfig {
.convert("ops_refill_time") .convert("ops_refill_time")
.map_err(Error::ParseDisk)? .map_err(Error::ParseDisk)?
.unwrap_or_default(); .unwrap_or_default();
let serial = parser.get("serial");
let bw_tb_config = if bw_size != 0 && bw_refill_time != 0 { let bw_tb_config = if bw_size != 0 && bw_refill_time != 0 {
Some(TokenBucketConfig { Some(TokenBucketConfig {
size: bw_size, size: bw_size,
@ -886,6 +888,7 @@ impl DiskConfig {
id, id,
disable_io_uring, disable_io_uring,
pci_segment, pci_segment,
serial,
}) })
} }
@ -2462,7 +2465,14 @@ mod tests {
..Default::default() ..Default::default()
} }
); );
assert_eq!(
DiskConfig::parse("path=/path/to_file,serial=test")?,
DiskConfig {
path: Some(PathBuf::from("/path/to_file")),
serial: Some(String::from("test")),
..Default::default()
}
);
Ok(()) Ok(())
} }

View File

@ -2315,6 +2315,7 @@ impl DeviceManager {
self.force_iommu | disk_cfg.iommu, self.force_iommu | disk_cfg.iommu,
disk_cfg.num_queues, disk_cfg.num_queues,
disk_cfg.queue_size, disk_cfg.queue_size,
disk_cfg.serial.clone(),
self.seccomp_action.clone(), self.seccomp_action.clone(),
disk_cfg.rate_limiter_config, disk_cfg.rate_limiter_config,
self.exit_evt self.exit_evt

View File

@ -220,6 +220,8 @@ pub struct DiskConfig {
pub disable_io_uring: bool, pub disable_io_uring: bool,
#[serde(default)] #[serde(default)]
pub pci_segment: u16, pub pci_segment: u16,
#[serde(default)]
pub serial: Option<String>,
} }
pub const DEFAULT_DISK_NUM_QUEUES: usize = 1; pub const DEFAULT_DISK_NUM_QUEUES: usize = 1;
@ -249,6 +251,7 @@ impl Default for DiskConfig {
disable_io_uring: false, disable_io_uring: false,
rate_limiter_config: None, rate_limiter_config: None,
pci_segment: 0, pci_segment: 0,
serial: None,
} }
} }
} }