vhdx: Fixed and dynamic VHDx block device implementation

Microsoft’s VHDx block device format specification is implemented
here as a crate. This commit includes the implementation for the
fixed and dynamic formats, where the other format is known as
differencing. The vhdx_header.rs, vhdx_bat.rs, vhdx_metadata.rs
implements parser and manipulators for the VHDx header, Block
Allocation Table, and metadata, respectively, for the VHDx file.
The vhdx_io.rs implements read and write routines for the VHDx file.
The vhdx.rs implements the Vhdx structure, which provides the wrapper
functions for standard I/O operations like read, write, and seek.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
Signed-off-by: Fazla Mehrab <akm.fazla.mehrab@intel.com>
This commit is contained in:
Fazla Mehrab 2021-07-26 11:50:52 -04:00 committed by Sebastien Boeuf
parent fc24f39507
commit 452af9b17c
7 changed files with 1404 additions and 0 deletions

16
vhdx/Cargo.toml Normal file
View File

@ -0,0 +1,16 @@
[package]
name = "vhdx"
version = "0.1.0"
authors = ["The Cloud Hypervisor Authors"]
edition = "2018"
license = "Apache-2.0"
[dependencies]
byteorder = "1.4.3"
crc32c = "0.6.0"
libc = "0.2.98"
log = "0.4.14"
remain = "0.2.2"
thiserror = "1.0"
uuid = { version = "0.8.2", features = ["v4"] }
vmm-sys-util = ">=0.3.1"

31
vhdx/src/lib.rs Normal file
View File

@ -0,0 +1,31 @@
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
use byteorder::{BigEndian, ByteOrder};
use std::result::Result;
use uuid::Uuid;
macro_rules! div_round_up {
($n:expr,$d:expr) => {
($n + $d - 1) / $d
};
}
pub mod vhdx;
mod vhdx_bat;
mod vhdx_header;
mod vhdx_io;
mod vhdx_metadata;
pub(crate) fn uuid_from_guid(buf: &[u8]) -> Result<Uuid, uuid::Error> {
// The first 3 fields of UUID are stored in Big Endian format, and
// the last 8 bytes are stored as byte array. Therefore, we read the
// first 3 fields in Big Endian format instead of Little Endian.
Uuid::from_fields_le(
BigEndian::read_u32(&buf[0..4]),
BigEndian::read_u16(&buf[4..6]),
BigEndian::read_u16(&buf[6..8]),
&buf[8..16],
)
}

212
vhdx/src/vhdx.rs Normal file
View File

@ -0,0 +1,212 @@
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
use crate::vhdx_bat::{BatEntry, VhdxBatError};
use crate::vhdx_header::{self, RegionInfo, RegionTableEntry, VhdxHeader, VhdxHeaderError};
use crate::vhdx_io::{self, VhdxIoError};
use crate::vhdx_metadata::{DiskSpec, VhdxMetadataError};
use remain::sorted;
use std::collections::btree_map::BTreeMap;
use std::fs::File;
use std::io::{Read, Seek, SeekFrom, Write};
use thiserror::Error;
#[sorted]
#[derive(Error, Debug)]
pub enum VhdxError {
#[error("Not a VHDx file {0}")]
NotVhdx(#[source] VhdxHeaderError),
#[error("Failed to parse VHDx header {0}")]
ParseVhdxHeader(#[source] VhdxHeaderError),
#[error("Failed to parse VHDx metadata {0}")]
ParseVhdxMetadata(#[source] VhdxMetadataError),
#[error("Failed to parse VHDx region entries {0}")]
ParseVhdxRegionEntry(#[source] VhdxHeaderError),
#[error("Failed reading metadata {0}")]
ReadBatEntry(#[source] VhdxBatError),
#[error("Failed reading sector from disk {0}")]
ReadFailed(#[source] VhdxIoError),
#[error("Failed writing to sector on disk {0}")]
WriteFailed(#[source] VhdxIoError),
}
pub type Result<T> = std::result::Result<T, VhdxError>;
#[derive(Debug)]
pub struct Vhdx {
file: File,
vhdx_header: VhdxHeader,
region_entries: BTreeMap<u64, u64>,
bat_entry: RegionTableEntry,
mdr_entry: RegionTableEntry,
disk_spec: DiskSpec,
bat_entries: Vec<BatEntry>,
current_offset: u64,
first_write: bool,
}
impl Vhdx {
/// Parse the Vhdx header, BAT, and metadata from a file and store info
// in Vhdx structure.
pub fn new(mut file: File) -> Result<Vhdx> {
let vhdx_header = VhdxHeader::new(&mut file).map_err(VhdxError::ParseVhdxHeader)?;
let collected_entries = RegionInfo::new(
&mut file,
vhdx_header::REGION_TABLE_1_START,
vhdx_header.region_entry_count(),
)
.map_err(VhdxError::ParseVhdxRegionEntry)?;
let bat_entry = collected_entries.bat_entry;
let mdr_entry = collected_entries.mdr_entry;
let disk_spec =
DiskSpec::new(&mut file, &mdr_entry).map_err(VhdxError::ParseVhdxMetadata)?;
let bat_entries = BatEntry::collect_bat_entries(&mut file, &disk_spec, &bat_entry)
.map_err(VhdxError::ReadBatEntry)?;
Ok(Vhdx {
file,
vhdx_header,
region_entries: collected_entries.region_entries,
bat_entry,
mdr_entry,
disk_spec,
bat_entries,
current_offset: 0,
first_write: true,
})
}
pub fn virtual_disk_size(&self) -> u64 {
self.disk_spec.virtual_disk_size
}
}
impl Read for Vhdx {
/// Wrapper function to satisfy Read trait implementation for VHDx disk.
/// Convert the offset to sector index and buffer length to sector count.
fn read(&mut self, buf: &mut [u8]) -> std::result::Result<usize, std::io::Error> {
let sector_count =
div_round_up!(buf.len() as u64, self.disk_spec.logical_sector_size as u64);
let sector_index = self.current_offset / self.disk_spec.logical_sector_size as u64;
vhdx_io::read(
&mut self.file,
buf,
&self.disk_spec,
&self.bat_entries,
sector_index,
sector_count,
)
.map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!(
"Failed reading {} sectors from VHDx at index {}: {}",
sector_count, sector_index, e
),
)
})
}
}
impl Write for Vhdx {
fn flush(&mut self) -> std::result::Result<(), std::io::Error> {
self.file.flush()
}
/// Wrapper function to satisfy Write trait implementation for VHDx disk.
/// Convert the offset to sector index and buffer length to sector count.
fn write(&mut self, buf: &[u8]) -> std::result::Result<usize, std::io::Error> {
let sector_count =
div_round_up!(buf.len() as u64, self.disk_spec.logical_sector_size as u64);
let sector_index = self.current_offset / self.disk_spec.logical_sector_size as u64;
if self.first_write {
self.first_write = false;
self.vhdx_header.update(&mut self.file).map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("Failed to update VHDx header: {}", e),
)
})?;
}
vhdx_io::write(
&mut self.file,
buf,
&mut self.disk_spec,
self.bat_entry.file_offset,
&mut self.bat_entries,
sector_index,
sector_count,
)
.map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!(
"Failed writing {} sectors on VHDx at index {}: {}",
sector_count, sector_index, e
),
)
})
}
}
impl Seek for Vhdx {
/// Wrapper function to satisfy Seek trait implementation for VHDx disk.
/// Updates the offset field in the Vhdx struct.
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
let new_offset: Option<u64> = match pos {
SeekFrom::Start(off) => Some(off),
SeekFrom::End(off) => {
if off < 0 {
0i64.checked_sub(off).and_then(|increment| {
self.virtual_disk_size().checked_sub(increment as u64)
})
} else {
self.virtual_disk_size().checked_add(off as u64)
}
}
SeekFrom::Current(off) => {
if off < 0 {
0i64.checked_sub(off)
.and_then(|increment| self.current_offset.checked_sub(increment as u64))
} else {
self.current_offset.checked_add(off as u64)
}
}
};
if let Some(o) = new_offset {
if o <= self.virtual_disk_size() {
self.current_offset = o;
return Ok(o);
}
}
Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Failed seek operation",
))
}
}
impl Clone for Vhdx {
fn clone(&self) -> Self {
Vhdx {
file: self.file.try_clone().unwrap(),
vhdx_header: self.vhdx_header.clone(),
region_entries: self.region_entries.clone(),
bat_entry: self.bat_entry,
mdr_entry: self.mdr_entry,
disk_spec: self.disk_spec.clone(),
bat_entries: self.bat_entries.clone(),
current_offset: self.current_offset,
first_write: self.first_write,
}
}
}

104
vhdx/src/vhdx_bat.rs Normal file
View File

@ -0,0 +1,104 @@
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
use crate::vhdx_header::RegionTableEntry;
use crate::vhdx_metadata::DiskSpec;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use remain::sorted;
use std::fs::File;
use std::io::{self, Seek, SeekFrom};
use std::mem::size_of;
use thiserror::Error;
// Payload BAT Entry States
pub const PAYLOAD_BLOCK_NOT_PRESENT: u64 = 0;
pub const PAYLOAD_BLOCK_UNDEFINED: u64 = 1;
pub const PAYLOAD_BLOCK_ZERO: u64 = 2;
pub const PAYLOAD_BLOCK_UNMAPPED: u64 = 3;
pub const PAYLOAD_BLOCK_FULLY_PRESENT: u64 = 6;
pub const PAYLOAD_BLOCK_PARTIALLY_PRESENT: u64 = 7;
// Mask for the BAT state
pub const BAT_STATE_BIT_MASK: u64 = 0x07;
// Mask for the offset within the file in units of 1 MB
pub const BAT_FILE_OFF_MASK: u64 = 0xFFFFFFFFFFF00000;
#[sorted]
#[derive(Error, Debug)]
pub enum VhdxBatError {
#[error("Invalid BAT entry")]
InvalidBatEntry,
#[error("Invalid BAT entry count")]
InvalidEntryCount,
#[error("Failed to read BAT entry {0}")]
ReadBat(#[source] io::Error),
#[error("Failed to write BAT entry {0}")]
WriteBat(#[source] io::Error),
}
pub type Result<T> = std::result::Result<T, VhdxBatError>;
#[derive(Default, Clone, Debug)]
pub struct BatEntry(pub u64);
impl BatEntry {
// Read all BAT entries presented on the disk and insert them to a vector
pub fn collect_bat_entries(
f: &mut File,
disk_spec: &DiskSpec,
bat_entry: &RegionTableEntry,
) -> Result<Vec<BatEntry>> {
let entry_count = BatEntry::calculate_entries(
disk_spec.block_size,
disk_spec.virtual_disk_size,
disk_spec.chunk_ratio,
);
if entry_count as usize > (bat_entry.length as usize / size_of::<BatEntry>()) {
return Err(VhdxBatError::InvalidEntryCount);
}
let mut bat: Vec<BatEntry> = Vec::with_capacity(bat_entry.length as usize);
let offset = bat_entry.file_offset;
for i in 0..entry_count {
f.seek(SeekFrom::Start(offset + i * size_of::<u64>() as u64))
.map_err(VhdxBatError::ReadBat)?;
let bat_entry = BatEntry(
f.read_u64::<LittleEndian>()
.map_err(VhdxBatError::ReadBat)?,
);
bat.insert(i as usize, bat_entry);
}
Ok(bat)
}
// Calculate the number of entries in the BAT
fn calculate_entries(block_size: u32, virtual_disk_size: u64, chunk_ratio: u64) -> u64 {
let data_blocks_count = div_round_up!(virtual_disk_size, block_size as u64);
data_blocks_count + (data_blocks_count - 1) / chunk_ratio
}
// Routine for writing BAT entries to the disk
pub fn write_bat_entries(
f: &mut File,
bat_offset: u64,
bat_entries: &[BatEntry],
) -> Result<()> {
for i in 0..bat_entries.len() as u64 {
f.seek(SeekFrom::Start(bat_offset + i * size_of::<u64>() as u64))
.map_err(VhdxBatError::WriteBat)?;
let bat_entry = match bat_entries.get(i as usize) {
Some(entry) => entry.0,
None => {
return Err(VhdxBatError::InvalidBatEntry);
}
};
f.write_u64::<LittleEndian>(bat_entry)
.map_err(VhdxBatError::WriteBat)?;
}
Ok(())
}
}

489
vhdx/src/vhdx_header.rs Normal file
View File

@ -0,0 +1,489 @@
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
extern crate log;
use byteorder::{ByteOrder, LittleEndian, ReadBytesExt};
use remain::sorted;
use std::collections::btree_map::BTreeMap;
use std::convert::TryInto;
use std::fs::File;
use std::io::{self, Read, Seek, SeekFrom, Write};
use std::mem::size_of;
use thiserror::Error;
use uuid::Uuid;
const VHDX_SIGN: u64 = 0x656C_6966_7864_6876; // "vhdxfile"
const HEADER_SIGN: u32 = 0x6461_6568; // "head"
const REGION_SIGN: u32 = 0x6967_6572; // "regi"
const FILE_START: u64 = 0; // The first element
const HEADER_1_START: u64 = 64 * 1024; // Header 1 start in Bytes
const HEADER_2_START: u64 = 128 * 1024; // Header 2 start in Bytes
pub const REGION_TABLE_1_START: u64 = 192 * 1024; // Region 1 start in Bytes
const REGION_TABLE_2_START: u64 = 256 * 1024; // Region 2 start in Bytes
const HEADER_SIZE: u64 = 4 * 1024; // Each header is 64 KiB, but only first 4 kiB contains info
const REGION_SIZE: u64 = 64 * 1024; // Each region size is 64 KiB
const REGION_ENTRY_REQUIRED: u32 = 1;
const BAT_GUID: &str = "2DC27766-F623-4200-9D64-115E9BFD4A08"; // BAT GUID
const MDR_GUID: &str = "8B7CA206-4790-4B9A-B8FE-575F050F886E"; // Metadata GUID
#[sorted]
#[derive(Error, Debug)]
pub enum VhdxHeaderError {
#[error("Failed to calculate checksum")]
CalculateChecksum,
#[error("BAT entry is not unique")]
DuplicateBATEntry,
#[error("Metadata region entry is not unique")]
DuplicateMDREntry,
#[error("Checksum doesn't match for")]
InvalidChecksum(String),
#[error("Invalid entry count")]
InvalidEntryCount,
#[error("Not a valid VHDx header")]
InvalidHeaderSign,
#[error("Not a valid VHDx region")]
InvalidRegionSign,
#[error("Couldn't parse Uuid for region entry {0}")]
InvalidUuid(#[source] uuid::Error),
#[error("Not a VHDx file")]
InvalidVHDXSign,
#[error("No valid header found")]
NoValidHeader,
#[error("Cannot read checksum")]
ReadChecksum,
#[error("Failed to read File Type Identifier {0}")]
ReadFileTypeIdentifier(#[source] io::Error),
#[error("Failed to read headers {0}")]
ReadHeader(#[source] io::Error),
#[error("Failed to read metadata {0}")]
ReadMetadata(#[source] std::io::Error),
#[error("Failed to read region table entries {0}")]
ReadRegionTableEntries(#[source] io::Error),
#[error("Failed to read region table header {0}")]
ReadRegionTableHeader(#[source] io::Error),
#[error("Failed to read region entries")]
RegionEntryCollectionFailed,
#[error("Overlapping regions found")]
RegionOverlap,
#[error("Reserved region has non-zero value")]
ReservedIsNonZero,
#[error("Failed to seek in File Type Identifier {0}")]
SeekFileTypeIdentifier(#[source] io::Error),
#[error("Failed to seek in headers {0}")]
SeekHeader(#[source] io::Error),
#[error("Failed to seek in region table entries {0}")]
SeekRegionTableEntries(#[source] io::Error),
#[error("Failed to seek in region table header {0}")]
SeekRegionTableHeader(#[source] io::Error),
#[error("We do not recongize this entry")]
UnrecognizedRegionEntry,
#[error("Failed to write header {0}")]
WriteHeader(#[source] io::Error),
}
pub type Result<T> = std::result::Result<T, VhdxHeaderError>;
#[derive(Clone, Debug)]
pub struct FileTypeIdentifier {
pub signature: u64,
}
impl FileTypeIdentifier {
/// Reads the File Type Identifier structure from a reference VHDx file
pub fn new(f: &mut File) -> Result<FileTypeIdentifier> {
f.seek(SeekFrom::Start(FILE_START))
.map_err(VhdxHeaderError::SeekFileTypeIdentifier)?;
let signature = f
.read_u64::<LittleEndian>()
.map_err(VhdxHeaderError::ReadFileTypeIdentifier)?;
if signature != VHDX_SIGN {
return Err(VhdxHeaderError::InvalidVHDXSign);
}
Ok(FileTypeIdentifier { signature })
}
}
#[repr(packed)]
#[derive(Clone, Copy, Debug)]
pub struct Header {
pub signature: u32,
pub checksum: u32,
pub sequence_number: u64,
pub file_write_guid: u128,
pub data_write_guid: u128,
pub log_guid: u128,
pub log_version: u16,
pub version: u16,
pub log_length: u32,
pub log_offset: u64,
}
impl Header {
/// Reads the Header structure from a reference VHDx file
pub fn new(f: &mut File, start: u64) -> Result<Header> {
// Read the whole header in to a buffer. We will need it for
// calculating checksum.
let mut buffer = [0; HEADER_SIZE as usize];
f.seek(SeekFrom::Start(start))
.map_err(VhdxHeaderError::SeekHeader)?;
f.read_exact(&mut buffer)
.map_err(VhdxHeaderError::ReadHeader)?;
let header = unsafe { *(buffer.as_ptr() as *mut Header) };
if header.signature != HEADER_SIGN {
return Err(VhdxHeaderError::InvalidHeaderSign);
}
let new_checksum = calculate_checksum(&mut buffer, size_of::<u32>())?;
if header.checksum != new_checksum {
return Err(VhdxHeaderError::InvalidChecksum(String::from("Header")));
}
Ok(header)
}
/// Converts the header structure into a buffer
fn get_header_as_buffer(&self, buffer: &mut [u8; HEADER_SIZE as usize]) {
let reference = unsafe {
std::slice::from_raw_parts(self as *const Header as *const u8, HEADER_SIZE as usize)
};
*buffer = reference.try_into().unwrap();
}
/// Creates and returns new updated header from the provided current header
pub fn update_header(
f: &mut File,
current_header: &Header,
change_data_guid: bool,
mut file_write_guid: u128,
start: u64,
) -> Result<Header> {
let mut buffer = [0u8; HEADER_SIZE as usize];
let mut data_write_guid = current_header.data_write_guid;
if change_data_guid {
data_write_guid = Uuid::new_v4().as_u128();
}
if file_write_guid == 0 {
file_write_guid = current_header.file_write_guid;
}
let mut new_header = Header {
signature: current_header.signature,
checksum: 0,
sequence_number: current_header.sequence_number + 1,
file_write_guid,
data_write_guid,
log_guid: current_header.log_guid,
log_version: current_header.log_version,
version: current_header.version,
log_length: current_header.log_length,
log_offset: current_header.log_offset,
};
new_header.get_header_as_buffer(&mut buffer);
new_header.checksum = crc32c::crc32c(&buffer);
new_header.get_header_as_buffer(&mut buffer);
f.seek(SeekFrom::Start(start))
.map_err(VhdxHeaderError::SeekHeader)?;
f.write(&buffer).map_err(VhdxHeaderError::WriteHeader)?;
Ok(new_header)
}
}
#[repr(packed)]
#[derive(Clone, Copy, Debug)]
struct RegionTableHeader {
pub signature: u32,
pub checksum: u32,
pub entry_count: u32,
pub reserved: u32,
}
impl RegionTableHeader {
/// Reads the Region Table Header structure from a reference VHDx file
pub fn new(f: &mut File, start: u64) -> Result<RegionTableHeader> {
// Read the whole header into a buffer. We will need it for calculating
// checksum.
let mut buffer = [0u8; REGION_SIZE as usize];
f.seek(SeekFrom::Start(start))
.map_err(VhdxHeaderError::SeekRegionTableHeader)?;
f.read_exact(&mut buffer)
.map_err(VhdxHeaderError::ReadRegionTableHeader)?;
let region_table_header = unsafe { *(buffer.as_ptr() as *mut RegionTableHeader) };
if region_table_header.signature != REGION_SIGN {
return Err(VhdxHeaderError::InvalidRegionSign);
}
let new_checksum = calculate_checksum(&mut buffer, size_of::<u32>())?;
if region_table_header.checksum != new_checksum {
return Err(VhdxHeaderError::InvalidChecksum(String::from("Region")));
}
if region_table_header.entry_count > 2047 {
return Err(VhdxHeaderError::InvalidEntryCount);
}
if region_table_header.reserved != 0 {
return Err(VhdxHeaderError::ReservedIsNonZero);
}
Ok(region_table_header)
}
}
pub struct RegionInfo {
pub bat_entry: RegionTableEntry,
pub mdr_entry: RegionTableEntry,
pub region_entries: BTreeMap<u64, u64>,
}
impl RegionInfo {
/// Collect all entries in a BTreeMap from the Region Table and identifies
/// BAT and metadata regions
pub fn new(f: &mut File, region_start: u64, entry_count: u32) -> Result<RegionInfo> {
let mut bat_entry: Option<RegionTableEntry> = None;
let mut mdr_entry: Option<RegionTableEntry> = None;
let mut offset = 0;
let mut region_entries = BTreeMap::new();
let mut buffer = [0; REGION_SIZE as usize];
// Seek after the Region Table Header
f.seek(SeekFrom::Start(
region_start + size_of::<RegionTableHeader>() as u64,
))
.map_err(VhdxHeaderError::SeekRegionTableEntries)?;
f.read_exact(&mut buffer)
.map_err(VhdxHeaderError::ReadRegionTableEntries)?;
for _ in 0..entry_count {
let entry =
RegionTableEntry::new(&buffer[offset..offset + size_of::<RegionTableEntry>()])?;
offset += size_of::<RegionTableEntry>();
let start = entry.file_offset;
let end = start + entry.length as u64;
for (region_ent_start, region_ent_end) in region_entries.iter() {
if !((start >= *region_ent_start) || (end <= *region_ent_end)) {
return Err(VhdxHeaderError::RegionOverlap);
}
}
region_entries.insert(entry.file_offset, entry.file_offset + entry.length as u64);
if entry.guid == Uuid::parse_str(BAT_GUID).map_err(VhdxHeaderError::InvalidUuid)? {
if bat_entry.is_none() {
bat_entry = Some(entry);
continue;
}
return Err(VhdxHeaderError::DuplicateBATEntry);
}
if entry.guid == Uuid::parse_str(MDR_GUID).map_err(VhdxHeaderError::InvalidUuid)? {
if mdr_entry.is_none() {
mdr_entry = Some(entry);
continue;
}
return Err(VhdxHeaderError::DuplicateMDREntry);
}
if (entry.required & REGION_ENTRY_REQUIRED) == 1 {
// This implementation doesn't recognize this field.
// Therefore, accoding to the spec, we are throwing an error.
return Err(VhdxHeaderError::UnrecognizedRegionEntry);
}
}
if bat_entry.is_none() || mdr_entry.is_none() {
region_entries.clear();
return Err(VhdxHeaderError::RegionEntryCollectionFailed);
}
// It's safe to unwrap as we checked both entries have been filled.
// Otherwise, an error is already returned.
let bat_entry = bat_entry.unwrap();
let mdr_entry = mdr_entry.unwrap();
Ok(RegionInfo {
bat_entry,
mdr_entry,
region_entries,
})
}
}
#[repr(packed)]
#[derive(Clone, Copy, Debug)]
pub struct RegionTableEntry {
pub guid: Uuid,
pub file_offset: u64,
pub length: u32,
pub required: u32,
}
impl RegionTableEntry {
/// Reads one Region Entry from a Region Table index that starts from 0
pub fn new(buffer: &[u8]) -> Result<RegionTableEntry> {
let mut region_table_entry = unsafe { *(buffer.as_ptr() as *mut RegionTableEntry) };
let uuid = crate::uuid_from_guid(buffer).map_err(VhdxHeaderError::InvalidUuid)?;
region_table_entry.guid = uuid;
Ok(region_table_entry)
}
}
#[derive(Clone, Debug)]
struct RegionEntry {
start: u64,
end: u64,
}
enum HeaderNo {
First,
Second,
}
/// Contains the information from the header of a VHDx file
#[derive(Clone, Debug)]
pub struct VhdxHeader {
file_type_identifier: FileTypeIdentifier,
header_1: Header,
header_2: Header,
region_table_1: RegionTableHeader,
region_table_2: RegionTableHeader,
}
impl VhdxHeader {
/// Creates a VhdxHeader from a reference to a file
pub fn new(f: &mut File) -> Result<VhdxHeader> {
let file_type_identifier: FileTypeIdentifier = FileTypeIdentifier::new(f)?;
let header_1 = Header::new(f, HEADER_1_START);
let header_2 = Header::new(f, HEADER_2_START);
let mut file_write_guid: u128 = 0;
let metadata = f.metadata().map_err(VhdxHeaderError::ReadMetadata)?;
if !metadata.permissions().readonly() {
file_write_guid = Uuid::new_v4().as_u128();
}
let (header_1, header_2) =
VhdxHeader::update_headers(f, header_1, header_2, file_write_guid)?;
Ok(VhdxHeader {
file_type_identifier,
header_1,
header_2,
region_table_1: RegionTableHeader::new(f, REGION_TABLE_1_START)?,
region_table_2: RegionTableHeader::new(f, REGION_TABLE_2_START)?,
})
}
/// Identify the current header and return both headers along with an
/// integer indicating the current header.
fn current_header(
header_1: Result<Header>,
header_2: Result<Header>,
) -> Result<(HeaderNo, Header)> {
let mut header1_seq_num: u64 = 0;
let mut header2_seq_num: u64 = 0;
let mut valid_hdr_found: bool = false;
if let Ok(ref header_1) = header_1 {
valid_hdr_found = true;
header1_seq_num = header_1.sequence_number;
}
if let Ok(ref header_2) = header_2 {
valid_hdr_found = true;
header2_seq_num = header_2.sequence_number;
}
if !valid_hdr_found {
Err(VhdxHeaderError::NoValidHeader)
} else if header1_seq_num >= header2_seq_num {
Ok((HeaderNo::First, header_1.unwrap()))
} else {
Ok((HeaderNo::Second, header_2.unwrap()))
}
}
/// This takes two headers and update the noncurrent header with the
/// current one. Returns both headers as a tuple sequenced the way it was
/// received from the parameter list.
fn update_header(
f: &mut File,
header_1: Result<Header>,
header_2: Result<Header>,
guid: u128,
) -> Result<(Header, Header)> {
let (header_no, current_header) = VhdxHeader::current_header(header_1, header_2)?;
match header_no {
HeaderNo::First => {
let other_header =
Header::update_header(f, &current_header, true, guid, HEADER_2_START)?;
Ok((current_header, other_header))
}
HeaderNo::Second => {
let other_header =
Header::update_header(f, &current_header, true, guid, HEADER_1_START)?;
Ok((other_header, current_header))
}
}
}
// Update the provided headers accoding to the spec
fn update_headers(
f: &mut File,
header_1: Result<Header>,
header_2: Result<Header>,
guid: u128,
) -> Result<(Header, Header)> {
// According to the spec, update twice
let (header_1, header_2) = VhdxHeader::update_header(f, header_1, header_2, guid)?;
VhdxHeader::update_header(f, Ok(header_1), Ok(header_2), guid)
}
pub fn update(&mut self, f: &mut File) -> Result<()> {
let headers = VhdxHeader::update_headers(f, Ok(self.header_1), Ok(self.header_2), 0)?;
self.header_1 = headers.0;
self.header_2 = headers.1;
Ok(())
}
pub fn region_entry_count(&self) -> u32 {
self.region_table_1.entry_count
}
}
/// Calculates the checksum of a buffer that itself containts its checksum
/// Therefore, before calculating, the existing checksum is retrieved and the
/// corresponding field is made zero. After the calculation, the existing checksum
/// is put back to the buffer.
pub fn calculate_checksum(buffer: &mut [u8], csum_offset: usize) -> Result<u32> {
// Read the checksum into a mutable slice
let csum_buf = &mut buffer[csum_offset..csum_offset + 4];
// Convert the checksum chunk in to a u32 integer
let orig_csum = LittleEndian::read_u32(csum_buf);
// Zero the checksum in the buffer
LittleEndian::write_u32(csum_buf, 0);
// Calculate the checksum on the resulting buffer
let new_csum = crc32c::crc32c(buffer);
// Put back the original checksum in the buffer
LittleEndian::write_u32(&mut buffer[csum_offset..csum_offset + 4], orig_csum);
Ok(new_csum)
}

229
vhdx/src/vhdx_io.rs Normal file
View File

@ -0,0 +1,229 @@
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
use crate::vhdx_bat::{self, BatEntry, VhdxBatError};
use crate::vhdx_metadata::{self, DiskSpec};
use remain::sorted;
use std::fs::File;
use std::io::{self, Read, Seek, SeekFrom, Write};
use thiserror::Error;
const SECTOR_SIZE: u64 = 512;
#[sorted]
#[derive(Error, Debug)]
pub enum VhdxIoError {
#[error("Invalid BAT entry state")]
InvalidBatEntryState,
#[error("Invalid BAT entry count")]
InvalidBatIndex,
#[error("Invalid disk size")]
InvalidDiskSize,
#[error("Failed reading sector blocks from file {0}")]
ReadSectorBlock(#[source] io::Error),
#[error("Failed changing file length {0}")]
ResizeFile(#[source] io::Error),
#[error("Differencing mode is not supported yet")]
UnsupportedMode,
#[error("Failed writing BAT to file {0}")]
WriteBat(#[source] VhdxBatError),
}
pub type Result<T> = std::result::Result<T, VhdxIoError>;
macro_rules! align {
($n:expr, $align:expr) => {{
if $align > $n {
$align
} else {
let rem = $n % $align;
(($n / $align) + rem) * $align
}
}};
}
#[derive(Default)]
struct Sector {
bat_index: u64,
free_sectors: u64,
free_bytes: u64,
file_offset: u64,
block_offset: u64,
}
impl Sector {
/// Translate sector index and count of data in file to actual offsets and
/// BAT index.
pub fn new(
disk_spec: &DiskSpec,
bat: &[BatEntry],
sector_index: u64,
sector_count: u64,
) -> Result<Sector> {
let mut sector = Sector::default();
sector.bat_index = sector_index / disk_spec.sectors_per_block as u64;
sector.block_offset = sector_index % disk_spec.sectors_per_block as u64;
sector.free_sectors = disk_spec.sectors_per_block as u64 - sector.block_offset;
if sector.free_sectors > sector_count {
sector.free_sectors = sector_count;
}
sector.free_bytes = sector.free_sectors * disk_spec.logical_sector_size as u64;
sector.block_offset *= disk_spec.logical_sector_size as u64;
let bat_entry = match bat.get(sector.bat_index as usize) {
Some(entry) => entry.0,
None => {
return Err(VhdxIoError::InvalidBatIndex);
}
};
sector.file_offset = bat_entry & vhdx_bat::BAT_FILE_OFF_MASK;
if sector.file_offset != 0 {
sector.file_offset += sector.block_offset;
}
Ok(sector)
}
}
/// VHDx IO read routine: requires relative sector index and count for the
/// requested data.
pub fn read(
f: &mut File,
buf: &mut [u8],
disk_spec: &DiskSpec,
bat: &[BatEntry],
mut sector_index: u64,
mut sector_count: u64,
) -> Result<usize> {
let mut read_count: usize = 0;
while sector_count > 0 {
if disk_spec.has_parent {
return Err(VhdxIoError::UnsupportedMode);
} else {
let sector = Sector::new(disk_spec, bat, sector_index, sector_count)?;
let bat_entry = match bat.get(sector.bat_index as usize) {
Some(entry) => entry.0,
None => {
return Err(VhdxIoError::InvalidBatIndex);
}
};
match bat_entry & vhdx_bat::BAT_STATE_BIT_MASK {
vhdx_bat::PAYLOAD_BLOCK_NOT_PRESENT
| vhdx_bat::PAYLOAD_BLOCK_UNDEFINED
| vhdx_bat::PAYLOAD_BLOCK_UNMAPPED
| vhdx_bat::PAYLOAD_BLOCK_ZERO => {}
vhdx_bat::PAYLOAD_BLOCK_FULLY_PRESENT => {
f.seek(SeekFrom::Start(sector.file_offset))
.map_err(VhdxIoError::ReadSectorBlock)?;
f.read_exact(
&mut buf[read_count
..(read_count + (sector.free_sectors * SECTOR_SIZE) as usize)],
)
.map_err(VhdxIoError::ReadSectorBlock)?;
}
vhdx_bat::PAYLOAD_BLOCK_PARTIALLY_PRESENT => {
return Err(VhdxIoError::UnsupportedMode);
}
_ => {
return Err(VhdxIoError::InvalidBatEntryState);
}
};
sector_count -= sector.free_sectors;
sector_index += sector.free_sectors;
read_count = sector.free_bytes as usize;
};
}
Ok(read_count)
}
/// VHDx IO write routine: requires relative sector index and count for the
/// requested data.
pub fn write(
f: &mut File,
buf: &[u8],
disk_spec: &mut DiskSpec,
bat_offset: u64,
bat: &mut [BatEntry],
mut sector_index: u64,
mut sector_count: u64,
) -> Result<usize> {
let mut write_count: usize = 0;
while sector_count > 0 {
if disk_spec.has_parent {
return Err(VhdxIoError::UnsupportedMode);
} else {
let sector = Sector::new(disk_spec, bat, sector_index, sector_count)?;
let bat_entry = match bat.get(sector.bat_index as usize) {
Some(entry) => entry.0,
None => {
return Err(VhdxIoError::InvalidBatIndex);
}
};
match bat_entry & vhdx_bat::BAT_STATE_BIT_MASK {
vhdx_bat::PAYLOAD_BLOCK_NOT_PRESENT
| vhdx_bat::PAYLOAD_BLOCK_UNDEFINED
| vhdx_bat::PAYLOAD_BLOCK_UNMAPPED
| vhdx_bat::PAYLOAD_BLOCK_ZERO => {
let file_offset =
align!(disk_spec.image_size, vhdx_metadata::BLOCK_SIZE_MIN as u64);
let new_size = file_offset
.checked_add(disk_spec.block_size as u64)
.ok_or(VhdxIoError::InvalidDiskSize)?;
f.set_len(new_size).map_err(VhdxIoError::ResizeFile)?;
disk_spec.image_size = new_size;
let new_bat_entry = file_offset
| (vhdx_bat::PAYLOAD_BLOCK_FULLY_PRESENT & vhdx_bat::BAT_STATE_BIT_MASK);
bat[sector.bat_index as usize] = BatEntry(new_bat_entry);
BatEntry::write_bat_entries(f, bat_offset, bat)
.map_err(VhdxIoError::WriteBat)?;
if file_offset < vhdx_metadata::BLOCK_SIZE_MIN as u64 {
break;
}
f.seek(SeekFrom::Start(file_offset))
.map_err(VhdxIoError::ReadSectorBlock)?;
f.write_all(
&buf[write_count
..(write_count + (sector.free_sectors * SECTOR_SIZE) as usize)],
)
.map_err(VhdxIoError::ReadSectorBlock)?;
}
vhdx_bat::PAYLOAD_BLOCK_FULLY_PRESENT => {
if sector.file_offset < vhdx_metadata::BLOCK_SIZE_MIN as u64 {
break;
}
f.seek(SeekFrom::Start(sector.file_offset))
.map_err(VhdxIoError::ReadSectorBlock)?;
f.write_all(
&buf[write_count
..(write_count + (sector.free_sectors * SECTOR_SIZE) as usize)],
)
.map_err(VhdxIoError::ReadSectorBlock)?;
}
vhdx_bat::PAYLOAD_BLOCK_PARTIALLY_PRESENT => {
return Err(VhdxIoError::UnsupportedMode);
}
_ => {
return Err(VhdxIoError::InvalidBatEntryState);
}
};
sector_count -= sector.free_sectors;
sector_index += sector.free_sectors;
write_count = sector.free_bytes as usize;
};
}
Ok(write_count)
}

323
vhdx/src/vhdx_metadata.rs Normal file
View File

@ -0,0 +1,323 @@
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
use crate::vhdx_header::RegionTableEntry;
use byteorder::{LittleEndian, ReadBytesExt};
use remain::sorted;
use std::fs::File;
use std::io::{self, Read, Seek, SeekFrom};
use std::mem::size_of;
use thiserror::Error;
use uuid::Uuid;
const METADATA_SIGN: u64 = 0x6174_6164_6174_656D;
const METADATA_ENTRY_SIZE: usize = 32;
const METADATA_MAX_ENTRIES: u16 = 2047;
// The size including the table header and entries
const METADATA_TABLE_MAX_SIZE: usize = METADATA_ENTRY_SIZE * (METADATA_MAX_ENTRIES as usize + 1);
const METADATA_FLAGS_IS_REQUIRED: u32 = 0x04;
pub const BLOCK_SIZE_MIN: u32 = 1 << 20; // 1 MiB
const BLOCK_SIZE_MAX: u32 = 256 << 20; // 256 MiB
const MAX_SECTORS_PER_BLOCK: u64 = 1 << 23;
const BLOCK_HAS_PARENT: u32 = 0x02; // Has a parent or a backing file
// GUID for known metadata items
const METADATA_FILE_PARAMETER: &str = "CAA16737-FA36-4D43-B3B6-33F0AA44E76B";
const METADATA_VIRTUAL_DISK_SIZE: &str = "2FA54224-CD1B-4876-B211-5DBED83BF4B8";
const METADATA_VIRTUAL_DISK_ID: &str = "BECA12AB-B2E6-4523-93EF-C309E000C746";
const METADATA_LOGICAL_SECTOR_SIZE: &str = "8141BF1D-A96F-4709-BA47-F233A8FAAB5F";
const METADATA_PHYSICAL_SECTOR_SIZE: &str = "CDA348C7-445D-4471-9CC9-E9885251C556";
const METADATA_PARENT_LOCATOR: &str = "A8D35F2D-B30B-454D-ABF7-D3D84834AB0C";
const METADATA_FILE_PARAMETER_PRESENT: u16 = 0x01;
const METADATA_VIRTUAL_DISK_SIZE_PRESENT: u16 = 0x02;
const METADATA_VIRTUAL_DISK_ID_PRESENT: u16 = 0x04;
const METADATA_LOGICAL_SECTOR_SIZE_PRESENT: u16 = 0x08;
const METADATA_PHYSICAL_SECTOR_SIZE_PRESENT: u16 = 0x10;
const METADATA_PARENT_LOCATOR_PRESENT: u16 = 0x20;
const METADATA_ALL_PRESENT: u16 = METADATA_FILE_PARAMETER_PRESENT
| METADATA_VIRTUAL_DISK_SIZE_PRESENT
| METADATA_VIRTUAL_DISK_ID_PRESENT
| METADATA_LOGICAL_SECTOR_SIZE_PRESENT
| METADATA_PHYSICAL_SECTOR_SIZE_PRESENT;
const METADATA_LENGTH_MAX: u32 = 1 << 20; // 1 MiB
#[sorted]
#[derive(Error, Debug)]
pub enum VhdxMetadataError {
#[error("Invalid block size count")]
InvalidBlockSize,
#[error("Invalid metadata entry count")]
InvalidEntryCount,
#[error("Invalid logical sector size")]
InvalidLogicalSectorSize,
#[error("Invalid metadata ID")]
InvalidMetadataItem,
#[error("Invalid metadata length")]
InvalidMetadataLength,
#[error("Metadata sign doesn't match")]
InvalidMetadataSign,
#[error("Invalid physical sector size")]
InvalidPhysicalSectorSize,
#[error("Invalid UUID")]
InvalidUuid(#[source] uuid::Error),
#[error("Invalid value")]
InvalidValue,
#[error("Not all required metadata found")]
MissingMetadata,
#[error("Failed to read metadata headers {0}")]
ReadMetadata(#[source] io::Error),
#[error("Reserved region has non-zero value")]
ReservedIsNonZero,
#[error("This implementation doesn't support this metadata flag")]
UnsupportedFlag,
}
pub type Result<T> = std::result::Result<T, VhdxMetadataError>;
#[derive(Default, Clone, Debug)]
pub struct DiskSpec {
pub disk_id: u128,
pub image_size: u64,
pub block_size: u32,
pub has_parent: bool,
pub sectors_per_block: u32,
pub virtual_disk_size: u64,
pub logical_sector_size: u32,
pub physical_sector_size: u32,
pub chunk_ratio: u64,
pub total_sectors: u64,
}
impl DiskSpec {
/// Parse all meatadata from the provided file and store info in DiskSpec
/// structure.
pub fn new(f: &mut File, metadata_region: &RegionTableEntry) -> Result<DiskSpec> {
let mut disk_spec = DiskSpec::default();
let mut metadata_presence: u16 = 0;
let mut offset = 0;
let metadata = f.metadata().map_err(VhdxMetadataError::ReadMetadata)?;
disk_spec.image_size = metadata.len();
let mut buffer = [0u8; METADATA_TABLE_MAX_SIZE];
f.seek(SeekFrom::Start(metadata_region.file_offset))
.map_err(VhdxMetadataError::ReadMetadata)?;
f.read_exact(&mut buffer)
.map_err(VhdxMetadataError::ReadMetadata)?;
let metadata_header =
MetadataTableHeader::new(&buffer[0..size_of::<MetadataTableHeader>()])?;
offset += size_of::<MetadataTableHeader>();
for _ in 0..metadata_header.entry_count {
let metadata_entry =
MetadataTableEntry::new(&buffer[offset..offset + size_of::<MetadataTableEntry>()])?;
f.seek(SeekFrom::Start(
metadata_region.file_offset + metadata_entry.offset as u64,
))
.map_err(VhdxMetadataError::ReadMetadata)?;
if metadata_entry.item_id
== Uuid::parse_str(METADATA_FILE_PARAMETER)
.map_err(VhdxMetadataError::InvalidUuid)?
{
disk_spec.block_size = f
.read_u32::<LittleEndian>()
.map_err(VhdxMetadataError::ReadMetadata)?;
// MUST be at least 1 MiB and not greater than 256 MiB
if disk_spec.block_size < BLOCK_SIZE_MIN && disk_spec.block_size > BLOCK_SIZE_MAX {
return Err(VhdxMetadataError::InvalidBlockSize);
}
// MUST be power of 2
if !disk_spec.block_size.is_power_of_two() {
return Err(VhdxMetadataError::InvalidBlockSize);
}
let bits = f
.read_u32::<LittleEndian>()
.map_err(VhdxMetadataError::ReadMetadata)?;
if bits & BLOCK_HAS_PARENT != 0 {
disk_spec.has_parent = true;
} else {
disk_spec.has_parent = false;
}
metadata_presence |= METADATA_FILE_PARAMETER_PRESENT;
} else if metadata_entry.item_id
== Uuid::parse_str(METADATA_VIRTUAL_DISK_SIZE)
.map_err(VhdxMetadataError::InvalidUuid)?
{
disk_spec.virtual_disk_size = f
.read_u64::<LittleEndian>()
.map_err(VhdxMetadataError::ReadMetadata)?;
metadata_presence |= METADATA_VIRTUAL_DISK_SIZE_PRESENT;
} else if metadata_entry.item_id
== Uuid::parse_str(METADATA_VIRTUAL_DISK_ID)
.map_err(VhdxMetadataError::InvalidUuid)?
{
disk_spec.disk_id = f
.read_u128::<LittleEndian>()
.map_err(VhdxMetadataError::ReadMetadata)?;
metadata_presence |= METADATA_VIRTUAL_DISK_ID_PRESENT;
} else if metadata_entry.item_id
== Uuid::parse_str(METADATA_LOGICAL_SECTOR_SIZE)
.map_err(VhdxMetadataError::InvalidUuid)?
{
disk_spec.logical_sector_size = f
.read_u32::<LittleEndian>()
.map_err(VhdxMetadataError::ReadMetadata)?;
if !(disk_spec.logical_sector_size == 512 || disk_spec.logical_sector_size == 4096)
{
return Err(VhdxMetadataError::InvalidLogicalSectorSize);
}
metadata_presence |= METADATA_LOGICAL_SECTOR_SIZE_PRESENT;
} else if metadata_entry.item_id
== Uuid::parse_str(METADATA_PHYSICAL_SECTOR_SIZE)
.map_err(VhdxMetadataError::InvalidUuid)?
{
disk_spec.physical_sector_size = f
.read_u32::<LittleEndian>()
.map_err(VhdxMetadataError::ReadMetadata)?;
if !(disk_spec.physical_sector_size == 512
|| disk_spec.physical_sector_size == 4096)
{
return Err(VhdxMetadataError::InvalidPhysicalSectorSize);
}
metadata_presence |= METADATA_PHYSICAL_SECTOR_SIZE_PRESENT;
} else if metadata_entry.item_id
== Uuid::parse_str(METADATA_PARENT_LOCATOR)
.map_err(VhdxMetadataError::InvalidUuid)?
{
metadata_presence |= METADATA_PARENT_LOCATOR_PRESENT;
} else {
return Err(VhdxMetadataError::InvalidMetadataItem);
}
if (metadata_entry.flag_bits & METADATA_FLAGS_IS_REQUIRED) == 0 {
return Err(VhdxMetadataError::UnsupportedFlag);
}
offset += size_of::<MetadataTableEntry>();
}
// Check if all required metadata are present
if metadata_presence != METADATA_ALL_PRESENT {
return Err(VhdxMetadataError::MissingMetadata);
}
// Check if the virtual disk size is a multiple of the logical sector
// size.
if ((metadata_presence & METADATA_LOGICAL_SECTOR_SIZE_PRESENT) != 0)
&& (disk_spec.virtual_disk_size % disk_spec.logical_sector_size as u64 != 0)
{
return Err(VhdxMetadataError::InvalidBlockSize);
}
disk_spec.sectors_per_block =
DiskSpec::sectors_per_block(disk_spec.block_size, disk_spec.logical_sector_size)?;
disk_spec.chunk_ratio =
DiskSpec::chunk_ratio(disk_spec.block_size, disk_spec.logical_sector_size)?;
disk_spec.total_sectors =
disk_spec.virtual_disk_size / disk_spec.logical_sector_size as u64;
Ok(disk_spec)
}
/// Calculates the number of sectors per block
fn sectors_per_block(block_size: u32, logical_sector_size: u32) -> Result<u32> {
let sectors_per_block = block_size / logical_sector_size;
if !sectors_per_block.is_power_of_two() {
return Err(VhdxMetadataError::InvalidValue);
}
Ok(sectors_per_block)
}
/// Calculate the chunk ratio
fn chunk_ratio(block_size: u32, logical_sector_size: u32) -> Result<u64> {
let chunk_ratio = (MAX_SECTORS_PER_BLOCK * logical_sector_size as u64) / block_size as u64;
if !chunk_ratio.is_power_of_two() {
return Err(VhdxMetadataError::InvalidValue);
}
Ok(chunk_ratio)
}
}
#[repr(packed)]
#[derive(Default, Debug, Clone, Copy)]
struct MetadataTableHeader {
signature: u64,
reserved: u16,
entry_count: u16,
_reserved2: [u8; 20],
}
impl MetadataTableHeader {
pub fn new(buffer: &[u8]) -> Result<MetadataTableHeader> {
let metadata_table_header = unsafe { *(buffer.as_ptr() as *mut MetadataTableHeader) };
if metadata_table_header.signature != METADATA_SIGN {
return Err(VhdxMetadataError::InvalidMetadataSign);
}
if metadata_table_header.entry_count > METADATA_MAX_ENTRIES {
return Err(VhdxMetadataError::InvalidEntryCount);
}
if metadata_table_header.reserved != 0 {
return Err(VhdxMetadataError::ReservedIsNonZero);
}
Ok(metadata_table_header)
}
}
#[repr(packed)]
#[derive(Default, Debug, Clone, Copy)]
pub struct MetadataTableEntry {
item_id: Uuid,
offset: u32,
length: u32,
flag_bits: u32,
reserved: u32,
}
impl MetadataTableEntry {
/// Parse one metadata entry from the buffer
fn new(buffer: &[u8]) -> Result<MetadataTableEntry> {
let mut metadata_table_entry = unsafe { *(buffer.as_ptr() as *mut MetadataTableEntry) };
let uuid = crate::uuid_from_guid(buffer).map_err(VhdxMetadataError::InvalidUuid)?;
metadata_table_entry.item_id = uuid;
if metadata_table_entry.length > METADATA_LENGTH_MAX {
return Err(VhdxMetadataError::InvalidMetadataLength);
}
if metadata_table_entry.length == 0 && metadata_table_entry.offset != 0 {
return Err(VhdxMetadataError::InvalidMetadataLength);
}
if metadata_table_entry.reserved != 0 {
return Err(VhdxMetadataError::ReservedIsNonZero);
}
Ok(metadata_table_entry)
}
}