arch: move test cases to vmm crate

This saves us from adding a "kvm" feature to arch crate merely for the
purpose of running tests.

Signed-off-by: Wei Liu <liuwe@microsoft.com>
This commit is contained in:
Wei Liu 2020-07-08 12:48:46 +00:00 committed by Samuel Ortiz
parent 598eaf9f86
commit d80e383dbb
8 changed files with 277 additions and 259 deletions

View File

@ -589,79 +589,3 @@ fn create_pci_nodes(fdt: &mut Vec<u8>, pci_device_base: u64, pci_device_size: u6
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::aarch64::gic::create_gic;
use crate::aarch64::layout;
use std::sync::Arc;
const LEN: u64 = 4096;
#[derive(Clone, Debug)]
pub struct MMIODeviceInfo {
addr: u64,
irq: u32,
}
impl DeviceInfoForFDT for MMIODeviceInfo {
fn addr(&self) -> u64 {
self.addr
}
fn irq(&self) -> u32 {
self.irq
}
fn length(&self) -> u64 {
LEN
}
}
#[test]
fn test_create_fdt_with_devices() {
let mut regions = Vec::new();
regions.push((
GuestAddress(layout::RAM_64BIT_START),
(layout::FDT_MAX_SIZE + 0x1000) as usize,
));
let mem = GuestMemoryMmap::from_ranges(&regions).expect("Cannot initialize memory");
let dev_info: HashMap<(DeviceType, std::string::String), MMIODeviceInfo> = [
(
(DeviceType::Serial, DeviceType::Serial.to_string()),
MMIODeviceInfo { addr: 0x00, irq: 1 },
),
(
(DeviceType::Virtio(1), "virtio".to_string()),
MMIODeviceInfo {
addr: 0x00 + LEN,
irq: 2,
},
),
(
(DeviceType::RTC, "rtc".to_string()),
MMIODeviceInfo {
addr: 0x00 + 2 * LEN,
irq: 3,
},
),
]
.iter()
.cloned()
.collect();
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().unwrap();
let gic = create_gic(&vm, 1, false).unwrap();
assert!(create_fdt(
&mem,
&CString::new("console=tty0").unwrap(),
vec![0],
&dev_info,
&gic,
&None,
&None,
)
.is_ok())
}
}

View File

@ -170,16 +170,3 @@ pub fn create_gic(
.or_else(|_| GICv3::new(vm, vcpu_count).or_else(|_| GICv2::new(vm, vcpu_count)))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_create_gic() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().unwrap();
assert!(create_gic(&vm, 1, false).is_ok());
}
}

View File

@ -154,43 +154,3 @@ pub fn setup_regs(
pub fn read_mpidr(vcpu: &Arc<dyn hypervisor::Vcpu>) -> Result<u64> {
vcpu.get_one_reg(MPIDR_EL1).map_err(Error::GetSysRegister)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::aarch64::layout;
use vm_memory::{GuestAddress, GuestMemoryMmap};
#[test]
fn test_setup_regs() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let mut regions = Vec::new();
regions.push((
GuestAddress(layout::RAM_64BIT_START),
(layout::FDT_MAX_SIZE + 0x1000) as usize,
));
let mem = GuestMemoryMmap::from_ranges(&regions).expect("Cannot initialize memory");
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
vcpu.vcpu_init(&kvi).unwrap();
assert!(setup_regs(&vcpu, 0, 0x0, &mem).is_ok());
}
#[test]
fn test_read_mpidr() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
// Must fail when vcpu is not initialized yet.
assert!(read_mpidr(&vcpu).is_err());
vcpu.vcpu_init(&kvi).unwrap();
assert_eq!(read_mpidr(&vcpu).unwrap(), 0x80000000);
}
}

View File

@ -23,12 +23,12 @@ pub enum Error {
pub type Result<T> = result::Result<T, hypervisor::HypervisorCpuError>;
// Defines poached from apicdef.h kernel header.
const APIC_LVT0: usize = 0x350;
const APIC_LVT1: usize = 0x360;
const APIC_MODE_NMI: u32 = 0x4;
const APIC_MODE_EXTINT: u32 = 0x7;
pub const APIC_LVT0: usize = 0x350;
pub const APIC_LVT1: usize = 0x360;
pub const APIC_MODE_NMI: u32 = 0x4;
pub const APIC_MODE_EXTINT: u32 = 0x7;
fn get_klapic_reg(klapic: &LapicState, reg_offset: usize) -> u32 {
pub fn get_klapic_reg(klapic: &LapicState, reg_offset: usize) -> u32 {
let sliceu8 = unsafe {
// This array is only accessed as parts of a u32 word, so interpret it as a u8 array.
// Cursors are only readable on arrays of u8, not i8(c_char).
@ -41,7 +41,7 @@ fn get_klapic_reg(klapic: &LapicState, reg_offset: usize) -> u32 {
.expect("Failed to read klapic register")
}
fn set_klapic_reg(klapic: &mut LapicState, reg_offset: usize, value: u32) {
pub fn set_klapic_reg(klapic: &mut LapicState, reg_offset: usize, value: u32) {
let sliceu8 = unsafe {
// This array is only accessed as parts of a u32 word, so interpret it as a u8 array.
// Cursors are only readable on arrays of u8, not i8(c_char).
@ -54,7 +54,7 @@ fn set_klapic_reg(klapic: &mut LapicState, reg_offset: usize, value: u32) {
.expect("Failed to write klapic register")
}
fn set_apic_delivery_mode(reg: u32, mode: u32) -> u32 {
pub fn set_apic_delivery_mode(reg: u32, mode: u32) -> u32 {
((reg) & !0x700) | ((mode) << 8)
}
@ -118,30 +118,4 @@ mod tests {
let after: Vec<u32> = v.iter().map(|x| ((*x & !0x700) | ((2) << 8))).collect();
assert_eq!(v, after);
}
#[test]
fn test_setlint() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
assert!(hv.check_capability(hypervisor::kvm::Cap::Irqchip));
// Calling get_lapic will fail if there is no irqchip before hand.
assert!(vm.create_irq_chip().is_ok());
let vcpu = vm.create_vcpu(0).unwrap();
let klapic_before: LapicState = vcpu.get_lapic().unwrap();
// Compute the value that is expected to represent LVT0 and LVT1.
let lint0 = get_klapic_reg(&klapic_before, APIC_LVT0);
let lint1 = get_klapic_reg(&klapic_before, APIC_LVT1);
let lint0_mode_expected = set_apic_delivery_mode(lint0, APIC_MODE_EXTINT);
let lint1_mode_expected = set_apic_delivery_mode(lint1, APIC_MODE_NMI);
set_lint(&vcpu).unwrap();
// Compute the value that represents LVT0 and LVT1 after set_lint.
let klapic_actual: LapicState = vcpu.get_lapic().unwrap();
let lint0_mode_actual = get_klapic_reg(&klapic_actual, APIC_LVT0);
let lint1_mode_actual = get_klapic_reg(&klapic_actual, APIC_LVT1);
assert_eq!(lint0_mode_expected, lint0_mode_actual);
assert_eq!(lint1_mode_expected, lint1_mode_actual);
}
}

View File

@ -160,7 +160,7 @@ fn write_idt_value(val: u64, guest_mem: &GuestMemoryMmap) -> Result<()> {
.map_err(Error::WriteIDT)
}
fn configure_segments_and_sregs(
pub fn configure_segments_and_sregs(
mem: &GuestMemoryMmap,
sregs: &mut SpecialRegisters,
boot_prot: BootProtocol,
@ -222,7 +222,7 @@ fn configure_segments_and_sregs(
Ok(())
}
fn setup_page_tables(mem: &GuestMemoryMmap, sregs: &mut SpecialRegisters) -> Result<()> {
pub fn setup_page_tables(mem: &GuestMemoryMmap, sregs: &mut SpecialRegisters) -> Result<()> {
// Puts PML5 or PML4 right after zero page but aligned to 4k.
if unsafe { std::arch::x86_64::__cpuid(7).ecx } & (1 << 16) != 0 {
// Entry covering VA [0..256TB)
@ -362,99 +362,4 @@ mod tests {
assert_eq!(X86_CR4_PAE, sregs.cr4);
assert_eq!(X86_CR0_PG, sregs.cr0);
}
#[test]
fn test_setup_fpu() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
let vcpu = vm.create_vcpu(0).unwrap();
setup_fpu(&vcpu).unwrap();
let expected_fpu: FpuState = FpuState {
fcw: 0x37f,
mxcsr: 0x1f80,
..Default::default()
};
let actual_fpu: FpuState = vcpu.get_fpu().unwrap();
// TODO: auto-generate kvm related structures with PartialEq on.
assert_eq!(expected_fpu.fcw, actual_fpu.fcw);
// Setting the mxcsr register from FpuState inside setup_fpu does not influence anything.
// See 'kvm_arch_vcpu_ioctl_set_fpu' from arch/x86/kvm/x86.c.
// The mxcsr will stay 0 and the assert below fails. Decide whether or not we should
// remove it at all.
// assert!(expected_fpu.mxcsr == actual_fpu.mxcsr);
}
#[test]
fn test_setup_msrs() {
use hypervisor::arch::x86::msr_index;
use hypervisor::x86_64::{MsrEntries, MsrEntry};
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
let vcpu = vm.create_vcpu(0).unwrap();
setup_msrs(&vcpu).unwrap();
// This test will check against the last MSR entry configured (the tenth one).
// See create_msr_entries for details.
let mut msrs = MsrEntries::from_entries(&[MsrEntry {
index: msr_index::MSR_IA32_MISC_ENABLE,
..Default::default()
}]);
// get_msrs returns the number of msrs that it succeed in reading. We only want to read 1
// in this test case scenario.
let read_msrs = vcpu.get_msrs(&mut msrs).unwrap();
assert_eq!(read_msrs, 1);
// Official entries that were setup when we did setup_msrs. We need to assert that the
// tenth one (i.e the one with index msr_index::MSR_IA32_MISC_ENABLE has the data we
// expect.
let entry_vec = hypervisor::x86_64::boot_msr_entries();
assert_eq!(entry_vec.as_slice()[9], msrs.as_slice()[0]);
}
#[test]
fn test_setup_regs() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
let vcpu = vm.create_vcpu(0).unwrap();
let expected_regs: StandardRegisters = StandardRegisters {
rflags: 0x0000000000000002u64,
rip: 1,
rsp: 2,
rbp: 2,
rsi: 3,
..Default::default()
};
setup_regs(
&vcpu,
expected_regs.rip,
expected_regs.rsp,
expected_regs.rsi,
BootProtocol::LinuxBoot,
)
.unwrap();
let actual_regs: StandardRegisters = vcpu.get_regs().unwrap();
assert_eq!(actual_regs, expected_regs);
}
#[test]
fn test_setup_sregs() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
let vcpu = vm.create_vcpu(0).unwrap();
let mut expected_sregs: SpecialRegisters = vcpu.get_sregs().unwrap();
let gm = create_guest_mem();
configure_segments_and_sregs(&gm, &mut expected_sregs, BootProtocol::LinuxBoot).unwrap();
setup_page_tables(&gm, &mut expected_sregs).unwrap();
setup_sregs(&gm, &vcpu, BootProtocol::LinuxBoot).unwrap();
let actual_sregs: SpecialRegisters = vcpu.get_sregs().unwrap();
assert_eq!(expected_sregs, actual_sregs);
}
}

View File

@ -1404,3 +1404,178 @@ impl Snapshottable for CpuManager {
impl Transportable for CpuManager {}
impl Migratable for CpuManager {}
#[cfg(target_arch = "x86_64")]
#[cfg(test)]
mod tests {
use super::*;
use arch::x86_64::interrupts::*;
use arch::x86_64::regs::*;
use arch::x86_64::BootProtocol;
use hypervisor::x86_64::{FpuState, LapicState, SpecialRegisters, StandardRegisters};
#[test]
fn test_setlint() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
assert!(hv.check_capability(hypervisor::kvm::Cap::Irqchip));
// Calling get_lapic will fail if there is no irqchip before hand.
assert!(vm.create_irq_chip().is_ok());
let vcpu = vm.create_vcpu(0).unwrap();
let klapic_before: LapicState = vcpu.get_lapic().unwrap();
// Compute the value that is expected to represent LVT0 and LVT1.
let lint0 = get_klapic_reg(&klapic_before, APIC_LVT0);
let lint1 = get_klapic_reg(&klapic_before, APIC_LVT1);
let lint0_mode_expected = set_apic_delivery_mode(lint0, APIC_MODE_EXTINT);
let lint1_mode_expected = set_apic_delivery_mode(lint1, APIC_MODE_NMI);
set_lint(&vcpu).unwrap();
// Compute the value that represents LVT0 and LVT1 after set_lint.
let klapic_actual: LapicState = vcpu.get_lapic().unwrap();
let lint0_mode_actual = get_klapic_reg(&klapic_actual, APIC_LVT0);
let lint1_mode_actual = get_klapic_reg(&klapic_actual, APIC_LVT1);
assert_eq!(lint0_mode_expected, lint0_mode_actual);
assert_eq!(lint1_mode_expected, lint1_mode_actual);
}
#[test]
fn test_setup_fpu() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
let vcpu = vm.create_vcpu(0).unwrap();
setup_fpu(&vcpu).unwrap();
let expected_fpu: FpuState = FpuState {
fcw: 0x37f,
mxcsr: 0x1f80,
..Default::default()
};
let actual_fpu: FpuState = vcpu.get_fpu().unwrap();
// TODO: auto-generate kvm related structures with PartialEq on.
assert_eq!(expected_fpu.fcw, actual_fpu.fcw);
// Setting the mxcsr register from FpuState inside setup_fpu does not influence anything.
// See 'kvm_arch_vcpu_ioctl_set_fpu' from arch/x86/kvm/x86.c.
// The mxcsr will stay 0 and the assert below fails. Decide whether or not we should
// remove it at all.
// assert!(expected_fpu.mxcsr == actual_fpu.mxcsr);
}
#[test]
fn test_setup_msrs() {
use hypervisor::arch::x86::msr_index;
use hypervisor::x86_64::{MsrEntries, MsrEntry};
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
let vcpu = vm.create_vcpu(0).unwrap();
setup_msrs(&vcpu).unwrap();
// This test will check against the last MSR entry configured (the tenth one).
// See create_msr_entries for details.
let mut msrs = MsrEntries::from_entries(&[MsrEntry {
index: msr_index::MSR_IA32_MISC_ENABLE,
..Default::default()
}]);
// get_msrs returns the number of msrs that it succeed in reading. We only want to read 1
// in this test case scenario.
let read_msrs = vcpu.get_msrs(&mut msrs).unwrap();
assert_eq!(read_msrs, 1);
// Official entries that were setup when we did setup_msrs. We need to assert that the
// tenth one (i.e the one with index msr_index::MSR_IA32_MISC_ENABLE has the data we
// expect.
let entry_vec = hypervisor::x86_64::boot_msr_entries();
assert_eq!(entry_vec.as_slice()[9], msrs.as_slice()[0]);
}
#[test]
fn test_setup_regs() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
let vcpu = vm.create_vcpu(0).unwrap();
let expected_regs: StandardRegisters = StandardRegisters {
rflags: 0x0000000000000002u64,
rip: 1,
rsp: 2,
rbp: 2,
rsi: 3,
..Default::default()
};
setup_regs(
&vcpu,
expected_regs.rip,
expected_regs.rsp,
expected_regs.rsi,
BootProtocol::LinuxBoot,
)
.unwrap();
let actual_regs: StandardRegisters = vcpu.get_regs().unwrap();
assert_eq!(actual_regs, expected_regs);
}
#[test]
fn test_setup_sregs() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
let vcpu = vm.create_vcpu(0).unwrap();
let mut expected_sregs: SpecialRegisters = vcpu.get_sregs().unwrap();
let gm = GuestMemoryMmap::from_ranges(&vec![(GuestAddress(0), 0x10000)]).unwrap();
configure_segments_and_sregs(&gm, &mut expected_sregs, BootProtocol::LinuxBoot).unwrap();
setup_page_tables(&gm, &mut expected_sregs).unwrap();
setup_sregs(&gm, &vcpu, BootProtocol::LinuxBoot).unwrap();
let actual_sregs: SpecialRegisters = vcpu.get_sregs().unwrap();
assert_eq!(expected_sregs, actual_sregs);
}
}
#[cfg(target_arch = "aarch64")]
#[cfg(test)]
mod tests {
use arch::aarch64::layout;
use arch::aarch64::regs::*;
use hypervisor::kvm::kvm_bindings;
use vm_memory::{GuestAddress, GuestMemoryMmap};
#[test]
fn test_setup_regs() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let mut regions = Vec::new();
regions.push((
GuestAddress(layout::RAM_64BIT_START),
(layout::FDT_MAX_SIZE + 0x1000) as usize,
));
let mem = GuestMemoryMmap::from_ranges(&regions).expect("Cannot initialize memory");
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
vcpu.vcpu_init(&kvi).unwrap();
assert!(setup_regs(&vcpu, 0, 0x0, &mem).is_ok());
}
#[test]
fn test_read_mpidr() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
// Must fail when vcpu is not initialized yet.
assert!(read_mpidr(&vcpu).is_err());
vcpu.vcpu_init(&kvi).unwrap();
assert_eq!(read_mpidr(&vcpu).unwrap(), 0x80000000);
}
}

View File

@ -432,3 +432,17 @@ pub mod kvm {
}
}
}
#[cfg(target_arch = "aarch64")]
#[cfg(test)]
mod tests {
use arch::aarch64::gic::create_gic;
#[test]
fn test_create_gic() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().unwrap();
assert!(create_gic(&vm, 1, false).is_ok());
}
}

View File

@ -1490,6 +1490,85 @@ mod tests {
}
}
#[cfg(target_arch = "aarch64")]
#[cfg(test)]
mod tests {
use super::*;
use arch::aarch64::fdt::create_fdt;
use arch::aarch64::gic::create_gic;
use arch::aarch64::{layout, DeviceInfoForFDT};
use arch::DeviceType;
use vm_memory::{GuestAddress, GuestMemoryMmap};
const LEN: u64 = 4096;
#[derive(Clone, Debug)]
pub struct MMIODeviceInfo {
addr: u64,
irq: u32,
}
impl DeviceInfoForFDT for MMIODeviceInfo {
fn addr(&self) -> u64 {
self.addr
}
fn irq(&self) -> u32 {
self.irq
}
fn length(&self) -> u64 {
LEN
}
}
#[test]
fn test_create_fdt_with_devices() {
let mut regions = Vec::new();
regions.push((
GuestAddress(layout::RAM_64BIT_START),
(layout::FDT_MAX_SIZE + 0x1000) as usize,
));
let mem = GuestMemoryMmap::from_ranges(&regions).expect("Cannot initialize memory");
let dev_info: HashMap<(DeviceType, std::string::String), MMIODeviceInfo> = [
(
(DeviceType::Serial, DeviceType::Serial.to_string()),
MMIODeviceInfo { addr: 0x00, irq: 1 },
),
(
(DeviceType::Virtio(1), "virtio".to_string()),
MMIODeviceInfo {
addr: 0x00 + LEN,
irq: 2,
},
),
(
(DeviceType::RTC, "rtc".to_string()),
MMIODeviceInfo {
addr: 0x00 + 2 * LEN,
irq: 3,
},
),
]
.iter()
.cloned()
.collect();
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().unwrap();
let gic = create_gic(&vm, 1, false).unwrap();
assert!(create_fdt(
&mem,
&CString::new("console=tty0").unwrap(),
vec![0],
&dev_info,
&gic,
&None,
&None,
)
.is_ok())
}
}
#[cfg(target_arch = "x86_64")]
#[test]
pub fn test_vm() {