vmm: Add igvm module and loader module

vmm: Add igvm module and loader module

Add a separate module named igvm to the vmm crate
with definitions to parse and load igvm to the guest memory.

Signed-off-by: Jinank Jain <jinankjain@microsoft.com>
Signed-off-by: Muminul Islam <muislam@microsoft.com>
This commit is contained in:
Muminul Islam 2023-10-10 16:08:22 -07:00 committed by Bo Chen
parent ec79820b3f
commit 7030b15e63
7 changed files with 630 additions and 2 deletions

5
Cargo.lock generated
View File

@ -2520,15 +2520,20 @@ dependencies = [
"futures",
"gdbstub",
"gdbstub_arch",
"hex",
"hypervisor",
"igvm",
"igvm_defs",
"libc",
"linux-loader",
"log",
"micro_http",
"mshv-bindings",
"net_util",
"once_cell",
"option_parser",
"pci",
"range_map_vec",
"seccompiler",
"serde",
"serde_json",

View File

@ -8,7 +8,7 @@ edition = "2021"
default = []
dbus_api = ["blocking", "futures", "zbus"]
guest_debug = ["kvm", "gdbstub", "gdbstub_arch"]
igvm = []
igvm = ["hex", "igvm_parser", "igvm_defs", "mshv-bindings", "range_map_vec"]
io_uring = ["block/io_uring"]
kvm = ["hypervisor/kvm", "vfio-ioctls/kvm", "vm-device/kvm", "pci/kvm"]
mshv = ["hypervisor/mshv", "vfio-ioctls/mshv", "vm-device/mshv", "pci/mshv"]
@ -33,15 +33,20 @@ flume = "0.10.14"
futures = { version = "0.3.27", optional = true }
gdbstub = { version = "0.7.0", optional = true }
gdbstub_arch = { version = "0.3.0", optional = true }
hex = { version = "0.4.3", optional = true }
hypervisor = { path = "../hypervisor" }
igvm_defs = { git = "https://github.com/microsoft/igvm", branch = "main" , package = "igvm_defs", optional = true }
igvm_parser = { git = "https://github.com/microsoft/igvm", branch = "main" , package = "igvm", optional = true }
libc = "0.2.147"
linux-loader = { version = "0.10.0", features = ["elf", "bzimage", "pe"] }
log = "0.4.20"
micro_http = { git = "https://github.com/firecracker-microvm/micro-http", branch = "main" }
mshv-bindings = { git = "https://github.com/rust-vmm/mshv", branch = "main", features = ["with-serde", "fam-wrappers"], optional = true }
net_util = { path = "../net_util" }
once_cell = "1.18.0"
option_parser = { path = "../option_parser" }
pci = { path = "../pci" }
range_map_vec = { version = "0.1.0", optional = true }
seccompiler = "0.4.0"
serde = { version = "1.0.168", features = ["rc", "derive"] }
serde_json = "1.0.107"
@ -63,4 +68,4 @@ vm-migration = { path = "../vm-migration" }
vm-virtio = { path = "../vm-virtio" }
vmm-sys-util = { version = "0.11.0", features = ["with-serde"] }
zbus = { version = "3.11.1", optional = true }
zerocopy = { version = "0.7.21", features = ["derive"] }
zerocopy = { version = "0.7.21", features = ["alloc","derive"] }

View File

@ -1784,6 +1784,24 @@ impl CpuManager {
pub(crate) fn vcpus_kill_signalled(&self) -> &Arc<AtomicBool> {
&self.vcpus_kill_signalled
}
#[cfg(feature = "igvm")]
pub(crate) fn get_cpuid_leaf(
&self,
cpu_id: u8,
eax: u32,
ecx: u32,
xfem: u64,
xss: u64,
) -> Result<[u32; 4]> {
let leaf_info = self.vcpus[usize::from(cpu_id)]
.lock()
.unwrap()
.vcpu
.get_cpuid_values(eax, ecx, xfem, xss)
.unwrap();
Ok(leaf_info)
}
}
struct Cpu {

374
vmm/src/igvm/igvm_loader.rs Normal file
View File

@ -0,0 +1,374 @@
// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
//
// Copyright © 2023, Microsoft Corporation
//
use crate::cpu::CpuManager;
use zerocopy::AsBytes;
use crate::igvm::loader::Loader;
use crate::igvm::IgvmLoadedInfo;
use crate::igvm::{BootPageAcceptance, StartupMemoryType, HV_PAGE_SIZE};
use crate::memory_manager::MemoryManager;
use igvm_defs::IgvmPageDataType;
use igvm_defs::IgvmPlatformType;
use igvm_parser::IgvmDirectiveHeader;
use igvm_parser::IgvmFile;
use igvm_parser::IgvmPlatformHeader;
use igvm_parser::IsolationType;
use igvm_defs::IGVM_VHS_PARAMETER;
use igvm_defs::IGVM_VHS_PARAMETER_INSERT;
use igvm_parser::snp_defs::SevVmsa;
pub use mshv_bindings::*;
use std::collections::HashMap;
use std::ffi::CString;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom;
use std::mem::size_of;
use std::sync::{Arc, Mutex};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum Error {
#[error("command line is not a valid C string")]
InvalidCommandLine(#[source] std::ffi::NulError),
#[error("failed to read igvm file")]
Igvm(#[source] std::io::Error),
#[error("invalid igvm file")]
InvalidIgvmFile(#[source] igvm_parser::Error),
#[error("loader error")]
Loader(#[source] crate::igvm::loader::Error),
#[error("parameter too large for parameter area")]
ParameterTooLarge,
#[error("Error importing isolated pages: {0}")]
ImportIsolatedPages(#[source] hypervisor::HypervisorVmError),
#[error("Error completing importing isolated pages: {0}")]
CompleteIsolatedImport(#[source] hypervisor::HypervisorVmError),
}
#[allow(dead_code)]
#[derive(Copy, Clone)]
struct GpaPages {
pub gpa: u64,
pub page_type: u32,
pub page_size: u32,
}
#[derive(Debug)]
enum ParameterAreaState {
/// Parameter area has been declared via a ParameterArea header.
Allocated { data: Vec<u8>, max_size: u64 },
/// Parameter area inserted and invalid to use.
Inserted,
}
// Import a parameter to the given parameter area.
fn import_parameter(
parameter_areas: &mut HashMap<u32, ParameterAreaState>,
info: &IGVM_VHS_PARAMETER,
parameter: &[u8],
) -> Result<(), Error> {
let (parameter_area, max_size) = match parameter_areas
.get_mut(&info.parameter_area_index)
.expect("parameter area should be present")
{
ParameterAreaState::Allocated { data, max_size } => (data, max_size),
ParameterAreaState::Inserted => panic!("igvmfile is not valid"),
};
let offset = info.byte_offset as usize;
let end_of_parameter = offset + parameter.len();
if end_of_parameter > *max_size as usize {
// TODO: tracing for which parameter was too big?
return Err(Error::ParameterTooLarge);
}
if parameter_area.len() < end_of_parameter {
parameter_area.resize(end_of_parameter, 0);
}
parameter_area[offset..end_of_parameter].copy_from_slice(parameter);
Ok(())
}
///
/// Load the given IGVM file to guest memory.
/// Right now it only supports SNP based isolation.
/// We can boot legacy VM with an igvm file without
/// any isolation.
///
pub fn load_igvm(
mut file: &std::fs::File,
memory_manager: Arc<Mutex<MemoryManager>>,
cpu_manager: Arc<Mutex<CpuManager>>,
cmdline: &str,
) -> Result<Box<IgvmLoadedInfo>, Error> {
let mut loaded_info: Box<IgvmLoadedInfo> = Box::default();
let command_line = CString::new(cmdline).map_err(Error::InvalidCommandLine)?;
let mut file_contents = Vec::new();
let memory = memory_manager.lock().as_ref().unwrap().guest_memory();
let mut gpas: Vec<GpaPages> = Vec::new();
let proc_count = cpu_manager.lock().unwrap().vcpus().len() as u32;
file.seek(SeekFrom::Start(0)).map_err(Error::Igvm)?;
file.read_to_end(&mut file_contents).map_err(Error::Igvm)?;
let igvm_file = IgvmFile::new_from_binary(&file_contents, Some(IsolationType::Snp))
.map_err(Error::InvalidIgvmFile)?;
let mask = match &igvm_file.platforms()[0] {
IgvmPlatformHeader::SupportedPlatform(info) => {
debug_assert!(info.platform_type == IgvmPlatformType::SEV_SNP);
info.compatibility_mask
}
};
let mut loader = Loader::new(memory);
let mut parameter_areas: HashMap<u32, ParameterAreaState> = HashMap::new();
for header in igvm_file.directives() {
debug_assert!(header.compatibility_mask().unwrap_or(mask) & mask == mask);
match header {
IgvmDirectiveHeader::PageData {
gpa,
compatibility_mask: _,
flags,
data_type,
data,
} => {
debug_assert!(data.len() as u64 % HV_PAGE_SIZE == 0);
// TODO: only 4k or empty page data supported right now
assert!(data.len() as u64 == HV_PAGE_SIZE || data.is_empty());
let acceptance = match *data_type {
IgvmPageDataType::NORMAL => {
if flags.unmeasured() {
gpas.push(GpaPages {
gpa: *gpa,
page_type: hv_isolated_page_type_HV_ISOLATED_PAGE_TYPE_UNMEASURED,
page_size: hv_isolated_page_size_HV_ISOLATED_PAGE_SIZE_4KB,
});
BootPageAcceptance::ExclusiveUnmeasured
} else {
gpas.push(GpaPages {
gpa: *gpa,
page_type: hv_isolated_page_type_HV_ISOLATED_PAGE_TYPE_NORMAL,
page_size: hv_isolated_page_size_HV_ISOLATED_PAGE_SIZE_4KB,
});
BootPageAcceptance::Exclusive
}
}
IgvmPageDataType::SECRETS => {
gpas.push(GpaPages {
gpa: *gpa,
page_type: hv_isolated_page_type_HV_ISOLATED_PAGE_TYPE_SECRETS,
page_size: hv_isolated_page_size_HV_ISOLATED_PAGE_SIZE_4KB,
});
BootPageAcceptance::SecretsPage
}
IgvmPageDataType::CPUID_DATA => {
// SAFETY: CPUID is readonly
unsafe {
let cpuid_page_p: *mut hv_psp_cpuid_page =
data.as_ptr() as *mut hv_psp_cpuid_page; // as *mut hv_psp_cpuid_page;
let cpuid_page: &mut hv_psp_cpuid_page = &mut *cpuid_page_p;
for i in 0..cpuid_page.count {
let leaf = cpuid_page.cpuid_leaf_info[i as usize];
let mut in_leaf = cpu_manager
.lock()
.unwrap()
.get_cpuid_leaf(
0,
leaf.eax_in,
leaf.ecx_in,
leaf.xfem_in,
leaf.xss_in,
)
.unwrap();
if leaf.eax_in == 1 {
in_leaf[2] &= 0x7FFFFFFF;
}
cpuid_page.cpuid_leaf_info[i as usize].eax_out = in_leaf[0];
cpuid_page.cpuid_leaf_info[i as usize].ebx_out = in_leaf[1];
cpuid_page.cpuid_leaf_info[i as usize].ecx_out = in_leaf[2];
cpuid_page.cpuid_leaf_info[i as usize].edx_out = in_leaf[3];
}
}
gpas.push(GpaPages {
gpa: *gpa,
page_type: hv_isolated_page_type_HV_ISOLATED_PAGE_TYPE_CPUID,
page_size: hv_isolated_page_size_HV_ISOLATED_PAGE_SIZE_4KB,
});
BootPageAcceptance::CpuidPage
}
// TODO: other data types SNP / TDX only, unsupported
_ => todo!("unsupported IgvmPageDataType"),
};
loader
.import_pages(gpa / HV_PAGE_SIZE, 1, acceptance, data)
.map_err(Error::Loader)?;
}
IgvmDirectiveHeader::ParameterArea {
number_of_bytes,
parameter_area_index,
initial_data,
} => {
debug_assert!(number_of_bytes % HV_PAGE_SIZE == 0);
debug_assert!(
initial_data.is_empty() || initial_data.len() as u64 == *number_of_bytes
);
// Allocate a new parameter area. It must not be already used.
if parameter_areas
.insert(
*parameter_area_index,
ParameterAreaState::Allocated {
data: initial_data.clone(),
max_size: *number_of_bytes,
},
)
.is_some()
{
panic!("IgvmFile is not valid, invalid invariant");
}
}
IgvmDirectiveHeader::VpCount(info) => {
import_parameter(&mut parameter_areas, info, proc_count.as_bytes())?;
}
IgvmDirectiveHeader::MmioRanges(_info) => {
todo!("unsupported IgvmPageDataType");
}
IgvmDirectiveHeader::MemoryMap(_info) => {
todo!("Not implemented");
}
IgvmDirectiveHeader::CommandLine(info) => {
import_parameter(&mut parameter_areas, info, command_line.as_bytes_with_nul())?;
}
IgvmDirectiveHeader::RequiredMemory {
gpa,
compatibility_mask: _,
number_of_bytes,
vtl2_protectable: _,
} => {
let memory_type = StartupMemoryType::Ram;
loaded_info.gpas.push(*gpa);
loader
.verify_startup_memory_available(
gpa / HV_PAGE_SIZE,
*number_of_bytes as u64 / HV_PAGE_SIZE,
memory_type,
)
.map_err(Error::Loader)?;
}
IgvmDirectiveHeader::SnpVpContext {
gpa,
compatibility_mask: _,
vp_index,
vmsa,
} => {
assert_eq!(gpa % HV_PAGE_SIZE, 0);
let mut data: [u8; 4096] = [0; 4096];
let len = size_of::<SevVmsa>();
loaded_info.vmsa_gpa = *gpa;
loaded_info.vmsa = **vmsa;
// Only supported for index zero
if *vp_index == 0 {
data[..len].copy_from_slice(vmsa.as_bytes());
loader
.import_pages(gpa / HV_PAGE_SIZE, 1, BootPageAcceptance::VpContext, &data)
.map_err(Error::Loader)?;
}
gpas.push(GpaPages {
gpa: *gpa,
page_type: hv_isolated_page_type_HV_ISOLATED_PAGE_TYPE_VMSA,
page_size: hv_isolated_page_size_HV_ISOLATED_PAGE_SIZE_4KB,
});
}
IgvmDirectiveHeader::SnpIdBlock {
compatibility_mask,
author_key_enabled,
reserved,
ld,
family_id,
image_id,
version,
guest_svn,
id_key_algorithm,
author_key_algorithm,
id_key_signature,
id_public_key,
author_key_signature,
author_public_key,
} => {
loaded_info.snp_id_block.compatibility_mask = *compatibility_mask;
loaded_info.snp_id_block.author_key_enabled = *author_key_enabled;
loaded_info.snp_id_block.reserved[..3].copy_from_slice(reserved);
loaded_info.snp_id_block.ld[..48].copy_from_slice(ld);
loaded_info.snp_id_block.family_id[..16].copy_from_slice(family_id);
loaded_info.snp_id_block.image_id[..16].copy_from_slice(image_id);
loaded_info.snp_id_block.version = *version;
loaded_info.snp_id_block.guest_svn = *guest_svn;
loaded_info.snp_id_block.id_key_algorithm = *id_key_algorithm;
loaded_info.snp_id_block.author_key_algorithm = *author_key_algorithm;
loaded_info.snp_id_block.id_key_signature = **id_key_signature;
loaded_info.snp_id_block.id_public_key = **id_public_key;
loaded_info.snp_id_block.author_key_signature = **author_key_signature;
loaded_info.snp_id_block.author_public_key = **author_public_key;
}
IgvmDirectiveHeader::X64VbsVpContext {
vtl: _,
registers: _,
compatibility_mask: _,
} => {
todo!("VbsVpContext not supported");
}
IgvmDirectiveHeader::VbsMeasurement { .. } => {
todo!("VbsMeasurement not supported")
}
IgvmDirectiveHeader::ParameterInsert(IGVM_VHS_PARAMETER_INSERT {
gpa,
compatibility_mask: _,
parameter_area_index,
}) => {
debug_assert!(gpa % HV_PAGE_SIZE == 0);
let area = parameter_areas
.get_mut(parameter_area_index)
.expect("igvmfile should be valid");
match area {
ParameterAreaState::Allocated { data, max_size } => loader
.import_pages(
gpa / HV_PAGE_SIZE,
*max_size / HV_PAGE_SIZE,
BootPageAcceptance::ExclusiveUnmeasured,
data,
)
.map_err(Error::Loader)?,
ParameterAreaState::Inserted => panic!("igvmfile is invalid, multiple insert"),
}
*area = ParameterAreaState::Inserted;
gpas.push(GpaPages {
gpa: *gpa,
page_type: hv_isolated_page_type_HV_ISOLATED_PAGE_TYPE_UNMEASURED,
page_size: hv_isolated_page_size_HV_ISOLATED_PAGE_SIZE_4KB,
});
}
IgvmDirectiveHeader::ErrorRange { .. } => {
todo!("Error Range not supported")
}
_ => {
todo!("Header not supported!!")
}
}
}
debug!("Dumping the contents of VMSA page: {:x?}", loaded_info.vmsa);
Ok(loaded_info)
}

143
vmm/src/igvm/loader.rs Normal file
View File

@ -0,0 +1,143 @@
// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
//
// Copyright © 2023, Microsoft Corporation
//
use crate::igvm::{BootPageAcceptance, StartupMemoryType, HV_PAGE_SIZE};
use range_map_vec::{Entry, RangeMap};
use thiserror::Error;
use vm_memory::bitmap::AtomicBitmap;
use vm_memory::{
Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, GuestMemoryMmap,
GuestMemoryRegion,
};
/// Structure to hold the guest memory info/layout to check
/// the if the memory is accepted within the layout.
/// Adds up the total bytes written to the guest memory
pub struct Loader {
memory: GuestMemoryAtomic<GuestMemoryMmap<AtomicBitmap>>,
accepted_ranges: RangeMap<u64, BootPageAcceptance>,
bytes_written: u64,
}
#[derive(Debug)]
pub struct ImportRegion {
pub page_base: u64,
pub page_count: u64,
pub acceptance: BootPageAcceptance,
}
#[derive(Debug, Error)]
pub enum Error {
#[error("overlaps with existing import region {0:?}")]
OverlapsExistingRegion(ImportRegion),
#[error("memory unavailable")]
MemoryUnavailable,
#[error("failed to import pages")]
ImportPagesFailed,
#[error("invalid vp context memory")]
InvalidVpContextMemory(&'static str),
#[error("data larger than imported region")]
DataTooLarge,
}
impl Loader {
pub fn new(memory: GuestMemoryAtomic<GuestMemoryMmap<AtomicBitmap>>) -> Loader {
Loader {
memory,
accepted_ranges: RangeMap::new(),
bytes_written: 0,
}
}
/// Accept a new page range with a given acceptance into the map of accepted ranges.
pub fn accept_new_range(
&mut self,
page_base: u64,
page_count: u64,
acceptance: BootPageAcceptance,
) -> Result<(), Error> {
let page_end = page_base + page_count - 1;
match self.accepted_ranges.entry(page_base..=page_end) {
Entry::Overlapping(entry) => {
let &(overlap_start, overlap_end, overlap_acceptance) = entry.get();
Err(Error::OverlapsExistingRegion(ImportRegion {
page_base: overlap_start,
page_count: overlap_end - overlap_start + 1,
acceptance: overlap_acceptance,
}))
}
Entry::Vacant(entry) => {
entry.insert(acceptance);
Ok(())
}
}
}
pub fn import_pages(
&mut self,
page_base: u64,
page_count: u64,
acceptance: BootPageAcceptance,
data: &[u8],
) -> Result<(), Error> {
// Page count must be larger or equal to data.
if page_count * HV_PAGE_SIZE < data.len() as u64 {
return Err(Error::DataTooLarge);
}
// Track accepted ranges for duplicate imports.
self.accept_new_range(page_base, page_count, acceptance)?;
let bytes_written = self
.memory
.memory()
.write(data, GuestAddress(page_base * HV_PAGE_SIZE))
.map_err(|_e| {
debug!("Importing pages failed due to MemoryError");
Error::MemoryUnavailable
})?;
if bytes_written != (page_count * HV_PAGE_SIZE) as usize {
return Err(Error::ImportPagesFailed);
}
self.bytes_written += bytes_written as u64;
Ok(())
}
pub fn verify_startup_memory_available(
&mut self,
page_base: u64,
page_count: u64,
memory_type: StartupMemoryType,
) -> Result<(), Error> {
if memory_type != StartupMemoryType::Ram {
return Err(Error::MemoryUnavailable);
}
let mut memory_found = false;
for range in self.memory.memory().iter() {
// Today, the memory layout only describes normal ram and mmio. Thus the memory
// request must live completely within a single range, since any gaps are mmio.
let base_address = page_base * HV_PAGE_SIZE;
let end_address = base_address + (page_count * HV_PAGE_SIZE) - 1;
if base_address >= range.start_addr().0 && base_address < range.last_addr().0 {
if end_address > range.last_addr().0 {
debug!("startup memory end bigger than the current range");
return Err(Error::MemoryUnavailable);
}
memory_found = true;
}
}
if memory_found {
Ok(())
} else {
debug!("no valid memory range available for startup memory verify");
Err(Error::MemoryUnavailable)
}
}
}

81
vmm/src/igvm/mod.rs Normal file
View File

@ -0,0 +1,81 @@
// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
//
// Copyright © 2023, Microsoft Corporation
//
/*
* The IGVM(Independent Guest Virtual Machine) file format
* is designed to encapsulate all information required to
* launch a virtual machine on any given virtualization stack,
* with support for different isolation technologies such as
* AMD SEV-SNP and Intel TDX.
* At a conceptual level, this file format is a set of commands created
* by the tool that generated the file, used by the loader to construct
* the initial guest state. The file format also contains measurement
* information that the underlying platform will use to confirm that
* the file was loaded correctly and signed by the appropriate authorities.
*
* The IGVM file is generated by the tool:
* https://github.com/microsoft/igvm-tooling
*
* The IGVM file is parsed by the following crates:
* https://github.com/microsoft/igvm
*
* This module takes the IGVM file, parses it, and loads it to the
* guest memory. Currently igvm only supported on Microsoft Hypervisor, as
* booting a legacy VM, as well as SNP based isolated VM.
*/
pub mod igvm_loader;
mod loader;
use igvm_defs::IGVM_VHS_SNP_ID_BLOCK;
use igvm_parser::snp_defs::SevVmsa;
use zerocopy::FromZeroes;
#[derive(Debug, Clone)]
pub struct IgvmLoadedInfo {
pub gpas: Vec<u64>,
pub vmsa_gpa: u64,
pub snp_id_block: IGVM_VHS_SNP_ID_BLOCK,
pub vmsa: SevVmsa,
}
impl Default for IgvmLoadedInfo {
fn default() -> Self {
IgvmLoadedInfo {
gpas: Vec::new(),
vmsa_gpa: 0,
snp_id_block: IGVM_VHS_SNP_ID_BLOCK::new_zeroed(),
vmsa: SevVmsa::new_zeroed(),
}
}
}
pub const HV_PAGE_SIZE: u64 = 4096;
/// The page acceptance used for importing pages into the initial launch context of the guest.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum BootPageAcceptance {
/// The page is accepted exclusive (no host visibility) and the page data is measured.
Exclusive,
/// The page is accepted exclusive (no host visibility) and the page data is unmeasured.
ExclusiveUnmeasured,
/// The page contains hardware-specific VP context information.
VpContext,
/// This page communicates error information to the host.
ErrorPage,
/// This page communicates hardware-specific secret information and the page data is unmeasured.
SecretsPage,
/// This page includes guest-specified CPUID information.
CpuidPage,
/// This page should include the enumeration of extended state CPUID leaves.
CpuidExtendedStatePage,
}
/// The startup memory type used to notify a well behaved host that memory should be present before attempting to
/// start the guest.
#[allow(dead_code)]
#[derive(Debug, PartialEq, Eq)]
pub enum StartupMemoryType {
/// The range is normal memory.
Ram,
}

View File

@ -69,6 +69,8 @@ pub mod device_manager;
pub mod device_tree;
#[cfg(feature = "guest_debug")]
mod gdb;
#[cfg(feature = "igvm")]
mod igvm;
pub mod interrupt;
pub mod memory_manager;
pub mod migration;