build: Fixed build errors and warnings on AArch64

This is a preparing commit to build and test CH on AArch64. All building
issues were fixed, but no functionality was introduced.
For X86, the logic of code was not changed at all.
For ARM, the architecture specific part is still empty. And we applied
some tricks to workaround lint warnings. But such code will be replaced
later by other commits with real functionality.

Signed-off-by: Michael Zhao <michael.zhao@arm.com>
This commit is contained in:
Michael Zhao 2020-05-12 17:49:12 +08:00 committed by Rob Bradford
parent 0090ec2dda
commit 1befae872d
10 changed files with 281 additions and 88 deletions

View File

@ -3,16 +3,26 @@
pub mod layout; pub mod layout;
use memory_model::{GuestAddress, GuestMemory}; use crate::RegionType;
use kvm_ioctls::*;
use vm_memory::{GuestAddress, GuestMemoryMmap, GuestUsize};
/// Stub function that needs to be implemented when aarch64 functionality is added. /// Stub function that needs to be implemented when aarch64 functionality is added.
pub fn arch_memory_regions(size: usize) -> Vec<(GuestAddress, usize, RegionType)> { pub fn arch_memory_regions(size: GuestUsize) -> Vec<(GuestAddress, usize, RegionType)> {
vec![(GuestAddress(0), size, RegionType::Ram)] vec![(GuestAddress(0), size as usize, RegionType::Ram)]
}
#[derive(Debug, Copy, Clone)]
/// Specifies the entry point address where the guest must start
/// executing code.
pub struct EntryPoint {
/// Address in guest memory where the guest must start execution
pub entry_addr: GuestAddress,
} }
/// Stub function that needs to be implemented when aarch64 functionality is added. /// Stub function that needs to be implemented when aarch64 functionality is added.
pub fn configure_system( pub fn configure_system(
_guest_mem: &GuestMemory, _guest_mem: &GuestMemoryMmap,
_cmdline_addr: GuestAddress, _cmdline_addr: GuestAddress,
_cmdline_size: usize, _cmdline_size: usize,
_num_cpus: u8, _num_cpus: u8,
@ -25,3 +35,29 @@ pub fn configure_system(
pub fn get_reserved_mem_addr() -> usize { pub fn get_reserved_mem_addr() -> usize {
0 0
} }
pub fn get_host_cpu_phys_bits() -> u8 {
// The value returned here is used to determine the physical address space size
// for a VM (IPA size).
// In recent kernel versions, the maxium IPA size supported by the host can be
// known by querying cap KVM_CAP_ARM_VM_IPA_SIZE. And the IPA size for a
// guest can be configured smaller.
// But in Cloud-Hypervisor we simply use the maxium value for the VM.
// Reference https://lwn.net/Articles/766767/.
//
// The correct way to query KVM_CAP_ARM_VM_IPA_SIZE is via rust-vmm/kvm-ioctls,
// which wraps all IOCTL's and provides easy interface to user hypervisors.
// For now the cap hasn't been supported. A separate patch will be submitted to
// rust-vmm to add it.
// So a hardcoded value is used here as a temporary solution.
// It will be replace once rust-vmm/kvm-ioctls is ready.
//
40
}
pub fn check_required_kvm_extensions(kvm: &Kvm) -> super::Result<()> {
if !kvm.check_extension(Cap::SignalMsi) {
return Err(super::Error::CapabilityMissing(Cap::SignalMsi));
}
Ok(())
}

View File

@ -22,6 +22,7 @@ extern crate kvm_ioctls;
extern crate linux_loader; extern crate linux_loader;
extern crate vm_memory; extern crate vm_memory;
use kvm_ioctls::*;
use std::result; use std::result;
#[derive(Debug)] #[derive(Debug)]
@ -47,6 +48,8 @@ pub enum Error {
ModlistSetup(vm_memory::GuestMemoryError), ModlistSetup(vm_memory::GuestMemoryError),
/// RSDP Beyond Guest Memory /// RSDP Beyond Guest Memory
RSDPPastRamEnd, RSDPPastRamEnd,
/// Capability missing
CapabilityMissing(Cap),
} }
pub type Result<T> = result::Result<T, Error>; pub type Result<T> = result::Result<T, Error>;
@ -73,8 +76,8 @@ pub mod aarch64;
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
pub use aarch64::{ pub use aarch64::{
arch_memory_regions, configure_system, get_reserved_mem_addr, layout::CMDLINE_MAX_SIZE, arch_memory_regions, check_required_kvm_extensions, configure_system, get_host_cpu_phys_bits,
layout::CMDLINE_START, get_reserved_mem_addr, layout::CMDLINE_MAX_SIZE, layout::CMDLINE_START, EntryPoint,
}; };
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
@ -82,11 +85,13 @@ pub mod x86_64;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
pub use x86_64::{ pub use x86_64::{
arch_memory_regions, configure_system, initramfs_load_addr, layout, layout::CMDLINE_MAX_SIZE, arch_memory_regions, check_required_kvm_extensions, configure_system, get_host_cpu_phys_bits,
layout::CMDLINE_START, regs, BootProtocol, EntryPoint, initramfs_load_addr, layout, layout::CMDLINE_MAX_SIZE, layout::CMDLINE_START, regs,
BootProtocol, EntryPoint,
}; };
/// Safe wrapper for `sysconf(_SC_PAGESIZE)`. /// Safe wrapper for `sysconf(_SC_PAGESIZE)`.
#[cfg(target_arch = "x86_64")]
#[inline(always)] #[inline(always)]
fn pagesize() -> usize { fn pagesize() -> usize {
// Trivially safe // Trivially safe

View File

@ -13,9 +13,9 @@ pub mod layout;
#[cfg(not(feature = "acpi"))] #[cfg(not(feature = "acpi"))]
mod mptable; mod mptable;
pub mod regs; pub mod regs;
use crate::InitramfsConfig; use crate::InitramfsConfig;
use crate::RegionType; use crate::RegionType;
use kvm_ioctls::*;
use linux_loader::loader::bootparam::{boot_params, setup_header}; use linux_loader::loader::bootparam::{boot_params, setup_header};
use linux_loader::loader::elf::start_info::{ use linux_loader::loader::elf::start_info::{
hvm_memmap_table_entry, hvm_modlist_entry, hvm_start_info, hvm_memmap_table_entry, hvm_modlist_entry, hvm_start_info,
@ -459,6 +459,47 @@ pub fn initramfs_load_addr(
Ok(aligned_addr) Ok(aligned_addr)
} }
pub fn get_host_cpu_phys_bits() -> u8 {
use std::arch::x86_64;
unsafe {
let leaf = x86_64::__cpuid(0x8000_0000);
// Detect and handle AMD SME (Secure Memory Encryption) properly.
// Some physical address bits may become reserved when the feature is enabled.
// See AMD64 Architecture Programmer's Manual Volume 2, Section 7.10.1
let reduced = if leaf.eax >= 0x8000_001f
&& leaf.ebx == 0x6874_7541 // Vendor ID: AuthenticAMD
&& leaf.ecx == 0x444d_4163
&& leaf.edx == 0x6974_6e65
&& x86_64::__cpuid(0x8000_001f).eax & 0x1 != 0
{
(x86_64::__cpuid(0x8000_001f).ebx >> 6) & 0x3f
} else {
0
};
if leaf.eax >= 0x8000_0008 {
let leaf = x86_64::__cpuid(0x8000_0008);
((leaf.eax & 0xff) - reduced) as u8
} else {
36
}
}
}
pub fn check_required_kvm_extensions(kvm: &Kvm) -> super::Result<()> {
if !kvm.check_extension(Cap::SignalMsi) {
return Err(super::Error::CapabilityMissing(Cap::SignalMsi));
}
if !kvm.check_extension(Cap::TscDeadlineTimer) {
return Err(super::Error::CapabilityMissing(Cap::TscDeadlineTimer));
}
if !kvm.check_extension(Cap::SplitIrqchip) {
return Err(super::Error::CapabilityMissing(Cap::SplitIrqchip));
}
Ok(())
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -192,7 +192,7 @@ impl Tap {
unsafe { unsafe {
let ifru_hwaddr = ifreq.ifr_ifru.ifru_hwaddr.as_mut(); let ifru_hwaddr = ifreq.ifr_ifru.ifru_hwaddr.as_mut();
for (i, v) in addr.get_bytes().iter().enumerate() { for (i, v) in addr.get_bytes().iter().enumerate() {
ifru_hwaddr.sa_data[i] = *v as i8; ifru_hwaddr.sa_data[i] = *v as c_char;
} }
} }

View File

@ -39,10 +39,12 @@ fn vuf_filter(action: SeccompAction) -> Result<SeccompFilter, Error> {
allow_syscall(libc::SYS_close), allow_syscall(libc::SYS_close),
allow_syscall(libc::SYS_copy_file_range), allow_syscall(libc::SYS_copy_file_range),
allow_syscall(libc::SYS_dup), allow_syscall(libc::SYS_dup),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_epoll_create), allow_syscall(libc::SYS_epoll_create),
allow_syscall(libc::SYS_epoll_create1), allow_syscall(libc::SYS_epoll_create1),
allow_syscall(libc::SYS_epoll_ctl), allow_syscall(libc::SYS_epoll_ctl),
allow_syscall(libc::SYS_epoll_pwait), allow_syscall(libc::SYS_epoll_pwait),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_epoll_wait), allow_syscall(libc::SYS_epoll_wait),
allow_syscall(libc::SYS_eventfd2), allow_syscall(libc::SYS_eventfd2),
allow_syscall(libc::SYS_exit), allow_syscall(libc::SYS_exit),
@ -59,10 +61,13 @@ fn vuf_filter(action: SeccompAction) -> Result<SeccompFilter, Error> {
allow_syscall(libc::SYS_fremovexattr), allow_syscall(libc::SYS_fremovexattr),
allow_syscall(libc::SYS_fsetxattr), allow_syscall(libc::SYS_fsetxattr),
allow_syscall(libc::SYS_fstat), allow_syscall(libc::SYS_fstat),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_fstatfs), allow_syscall(libc::SYS_fstatfs),
allow_syscall(libc::SYS_fsync), allow_syscall(libc::SYS_fsync),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_ftruncate), allow_syscall(libc::SYS_ftruncate),
allow_syscall(libc::SYS_futex), allow_syscall(libc::SYS_futex),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_getdents), allow_syscall(libc::SYS_getdents),
allow_syscall(libc::SYS_getdents64), allow_syscall(libc::SYS_getdents64),
allow_syscall(libc::SYS_getegid), allow_syscall(libc::SYS_getegid),
@ -82,6 +87,7 @@ fn vuf_filter(action: SeccompAction) -> Result<SeccompFilter, Error> {
allow_syscall(libc::SYS_mremap), allow_syscall(libc::SYS_mremap),
allow_syscall(libc::SYS_munmap), allow_syscall(libc::SYS_munmap),
allow_syscall(libc::SYS_newfstatat), allow_syscall(libc::SYS_newfstatat),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_open), allow_syscall(libc::SYS_open),
allow_syscall(libc::SYS_openat), allow_syscall(libc::SYS_openat),
allow_syscall(libc::SYS_prctl), // TODO restrict to just PR_SET_NAME? allow_syscall(libc::SYS_prctl), // TODO restrict to just PR_SET_NAME?
@ -109,9 +115,11 @@ fn vuf_filter(action: SeccompAction) -> Result<SeccompFilter, Error> {
allow_syscall(libc::SYS_sigaltstack), allow_syscall(libc::SYS_sigaltstack),
allow_syscall(libc::SYS_statx), allow_syscall(libc::SYS_statx),
allow_syscall(libc::SYS_symlinkat), allow_syscall(libc::SYS_symlinkat),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_time), // Rarely needed, except on static builds allow_syscall(libc::SYS_time), // Rarely needed, except on static builds
allow_syscall(libc::SYS_tgkill), allow_syscall(libc::SYS_tgkill),
allow_syscall(libc::SYS_umask), allow_syscall(libc::SYS_umask),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_unlink), allow_syscall(libc::SYS_unlink),
allow_syscall(libc::SYS_unlinkat), allow_syscall(libc::SYS_unlinkat),
allow_syscall(libc::SYS_unshare), allow_syscall(libc::SYS_unshare),

View File

@ -21,36 +21,46 @@ use anyhow::anyhow;
use arch::layout; use arch::layout;
use arch::EntryPoint; use arch::EntryPoint;
use devices::{ioapic, BusDevice}; use devices::{ioapic, BusDevice};
#[cfg(target_arch = "x86_64")]
use kvm_bindings::{ use kvm_bindings::{
kvm_fpu, kvm_lapic_state, kvm_mp_state, kvm_regs, kvm_sregs, kvm_vcpu_events, kvm_xcrs, kvm_fpu, kvm_lapic_state, kvm_mp_state, kvm_regs, kvm_sregs, kvm_vcpu_events, kvm_xcrs,
kvm_xsave, CpuId, Msrs, kvm_xsave, CpuId, Msrs,
}; };
use kvm_ioctls::*; use kvm_ioctls::*;
#[cfg(target_arch = "x86_64")]
use libc::{c_void, siginfo_t}; use libc::{c_void, siginfo_t};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use std::cmp; #[cfg(target_arch = "x86_64")]
use std::fmt;
use std::os::unix::thread::JoinHandleExt; use std::os::unix::thread::JoinHandleExt;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Barrier, Mutex}; use std::sync::{Arc, Barrier, Mutex};
use std::thread; use std::{cmp, io, result, thread};
use std::{fmt, io, result}; #[cfg(target_arch = "x86_64")]
use vm_memory::{Address, GuestAddress, GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap}; use vm_memory::{Address, GuestAddressSpace};
use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestMemoryMmap};
use vm_migration::{ use vm_migration::{
Migratable, MigratableError, Pausable, Snapshot, SnapshotDataSection, Snapshottable, Migratable, MigratableError, Pausable, Snapshot, SnapshotDataSection, Snapshottable,
Transportable, Transportable,
}; };
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::signal::{register_signal_handler, SIGRTMIN}; #[cfg(target_arch = "x86_64")]
use vmm_sys_util::signal::register_signal_handler;
use vmm_sys_util::signal::SIGRTMIN;
// CPUID feature bits // CPUID feature bits
#[cfg(target_arch = "x86_64")]
const TSC_DEADLINE_TIMER_ECX_BIT: u8 = 24; // tsc deadline timer ecx bit. const TSC_DEADLINE_TIMER_ECX_BIT: u8 = 24; // tsc deadline timer ecx bit.
#[cfg(target_arch = "x86_64")]
const HYPERVISOR_ECX_BIT: u8 = 31; // Hypervisor ecx bit. const HYPERVISOR_ECX_BIT: u8 = 31; // Hypervisor ecx bit.
// Debug I/O port // Debug I/O port
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
const DEBUG_IOPORT: u16 = 0x80; const DEBUG_IOPORT: u16 = 0x80;
#[cfg(target_arch = "x86_64")]
const DEBUG_IOPORT_PREFIX: &str = "Debug I/O port"; const DEBUG_IOPORT_PREFIX: &str = "Debug I/O port";
#[cfg(target_arch = "x86_64")]
/// Debug I/O port, see: /// Debug I/O port, see:
/// https://www.intel.com/content/www/us/en/support/articles/000005500/boards-and-kits.html /// https://www.intel.com/content/www/us/en/support/articles/000005500/boards-and-kits.html
/// ///
@ -63,7 +73,7 @@ pub enum DebugIoPortRange {
Userspace, Userspace,
Custom, Custom,
} }
#[cfg(target_arch = "x86_64")]
impl DebugIoPortRange { impl DebugIoPortRange {
fn from_u8(value: u8) -> DebugIoPortRange { fn from_u8(value: u8) -> DebugIoPortRange {
match value { match value {
@ -76,6 +86,7 @@ impl DebugIoPortRange {
} }
} }
#[cfg(target_arch = "x86_64")]
impl fmt::Display for DebugIoPortRange { impl fmt::Display for DebugIoPortRange {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self { match self {
@ -199,6 +210,7 @@ pub enum Error {
} }
pub type Result<T> = result::Result<T, Error>; pub type Result<T> = result::Result<T, Error>;
#[cfg(target_arch = "x86_64")]
#[allow(dead_code)] #[allow(dead_code)]
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
enum CpuidReg { enum CpuidReg {
@ -208,6 +220,7 @@ enum CpuidReg {
EDX, EDX,
} }
#[cfg(target_arch = "x86_64")]
pub struct CpuidPatch { pub struct CpuidPatch {
pub function: u32, pub function: u32,
pub index: u32, pub index: u32,
@ -218,6 +231,7 @@ pub struct CpuidPatch {
pub edx_bit: Option<u8>, pub edx_bit: Option<u8>,
} }
#[cfg(target_arch = "x86_64")]
impl CpuidPatch { impl CpuidPatch {
fn set_cpuid_reg( fn set_cpuid_reg(
cpuid: &mut CpuId, cpuid: &mut CpuId,
@ -311,12 +325,16 @@ struct InterruptSourceOverride {
pub struct Vcpu { pub struct Vcpu {
fd: VcpuFd, fd: VcpuFd,
id: u8, id: u8,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
io_bus: Arc<devices::Bus>, io_bus: Arc<devices::Bus>,
mmio_bus: Arc<devices::Bus>, mmio_bus: Arc<devices::Bus>,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
ioapic: Option<Arc<Mutex<ioapic::Ioapic>>>, ioapic: Option<Arc<Mutex<ioapic::Ioapic>>>,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
vm_ts: std::time::Instant, vm_ts: std::time::Instant,
} }
#[cfg(target_arch = "x86_64")]
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct VcpuKvmState { pub struct VcpuKvmState {
msrs: Msrs, msrs: Msrs,
@ -330,6 +348,10 @@ pub struct VcpuKvmState {
mp_state: kvm_mp_state, mp_state: kvm_mp_state,
} }
#[cfg(target_arch = "aarch64")]
#[derive(Clone, Serialize, Deserialize)]
pub struct VcpuKvmState {}
impl Vcpu { impl Vcpu {
/// Constructs a new VCPU for `vm`. /// Constructs a new VCPU for `vm`.
/// ///
@ -357,6 +379,7 @@ impl Vcpu {
}))) })))
} }
#[cfg(target_arch = "x86_64")]
/// Configures a x86_64 specific vcpu and should be called once per vcpu from the vcpu's thread. /// Configures a x86_64 specific vcpu and should be called once per vcpu from the vcpu's thread.
/// ///
/// # Arguments /// # Arguments
@ -406,10 +429,12 @@ impl Vcpu {
pub fn run(&self) -> Result<bool> { pub fn run(&self) -> Result<bool> {
match self.fd.run() { match self.fd.run() {
Ok(run) => match run { Ok(run) => match run {
#[cfg(target_arch = "x86_64")]
VcpuExit::IoIn(addr, data) => { VcpuExit::IoIn(addr, data) => {
self.io_bus.read(u64::from(addr), data); self.io_bus.read(u64::from(addr), data);
Ok(true) Ok(true)
} }
#[cfg(target_arch = "x86_64")]
VcpuExit::IoOut(addr, data) => { VcpuExit::IoOut(addr, data) => {
if addr == DEBUG_IOPORT && data.len() == 1 { if addr == DEBUG_IOPORT && data.len() == 1 {
self.log_debug_ioport(data[0]); self.log_debug_ioport(data[0]);
@ -425,6 +450,7 @@ impl Vcpu {
self.mmio_bus.write(addr as u64, data); self.mmio_bus.write(addr as u64, data);
Ok(true) Ok(true)
} }
#[cfg(target_arch = "x86_64")]
VcpuExit::IoapicEoi(vector) => { VcpuExit::IoapicEoi(vector) => {
if let Some(ioapic) = &self.ioapic { if let Some(ioapic) = &self.ioapic {
ioapic.lock().unwrap().end_of_interrupt(vector); ioapic.lock().unwrap().end_of_interrupt(vector);
@ -451,6 +477,7 @@ impl Vcpu {
} }
} }
#[cfg(target_arch = "x86_64")]
// Log debug io port codes. // Log debug io port codes.
fn log_debug_ioport(&self, code: u8) { fn log_debug_ioport(&self, code: u8) {
let ts = self.vm_ts.elapsed(); let ts = self.vm_ts.elapsed();
@ -464,6 +491,7 @@ impl Vcpu {
); );
} }
#[cfg(target_arch = "x86_64")]
fn kvm_state(&self) -> Result<VcpuKvmState> { fn kvm_state(&self) -> Result<VcpuKvmState> {
let mut msrs = arch::x86_64::regs::boot_msr_entries(); let mut msrs = arch::x86_64::regs::boot_msr_entries();
self.fd.get_msrs(&mut msrs).map_err(Error::VcpuGetMsrs)?; self.fd.get_msrs(&mut msrs).map_err(Error::VcpuGetMsrs)?;
@ -493,6 +521,7 @@ impl Vcpu {
}) })
} }
#[cfg(target_arch = "x86_64")]
fn set_kvm_state(&mut self, state: &VcpuKvmState) -> Result<()> { fn set_kvm_state(&mut self, state: &VcpuKvmState) -> Result<()> {
self.fd.set_regs(&state.regs).map_err(Error::VcpuSetRegs)?; self.fd.set_regs(&state.regs).map_err(Error::VcpuSetRegs)?;
@ -520,6 +549,16 @@ impl Vcpu {
Ok(()) Ok(())
} }
#[cfg(target_arch = "aarch64")]
fn kvm_state(&self) -> Result<VcpuKvmState> {
unimplemented!();
}
#[cfg(target_arch = "aarch64")]
fn set_kvm_state(&mut self, _state: &VcpuKvmState) -> Result<()> {
Ok(())
}
} }
const VCPU_SNAPSHOT_ID: &str = "vcpu"; const VCPU_SNAPSHOT_ID: &str = "vcpu";
@ -576,13 +615,19 @@ pub struct CpuManager {
boot_vcpus: u8, boot_vcpus: u8,
max_vcpus: u8, max_vcpus: u8,
io_bus: Arc<devices::Bus>, io_bus: Arc<devices::Bus>,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
mmio_bus: Arc<devices::Bus>, mmio_bus: Arc<devices::Bus>,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
ioapic: Option<Arc<Mutex<ioapic::Ioapic>>>, ioapic: Option<Arc<Mutex<ioapic::Ioapic>>>,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
vm_memory: GuestMemoryAtomic<GuestMemoryMmap>, vm_memory: GuestMemoryAtomic<GuestMemoryMmap>,
#[cfg(target_arch = "x86_64")]
cpuid: CpuId, cpuid: CpuId,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
fd: Arc<VmFd>, fd: Arc<VmFd>,
vcpus_kill_signalled: Arc<AtomicBool>, vcpus_kill_signalled: Arc<AtomicBool>,
vcpus_pause_signalled: Arc<AtomicBool>, vcpus_pause_signalled: Arc<AtomicBool>,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
reset_evt: EventFd, reset_evt: EventFd,
vcpu_states: Vec<VcpuState>, vcpu_states: Vec<VcpuState>,
selected_cpu: u8, selected_cpu: u8,
@ -709,7 +754,7 @@ impl CpuManager {
config: &CpusConfig, config: &CpusConfig,
device_manager: &Arc<Mutex<DeviceManager>>, device_manager: &Arc<Mutex<DeviceManager>>,
guest_memory: GuestMemoryAtomic<GuestMemoryMmap>, guest_memory: GuestMemoryAtomic<GuestMemoryMmap>,
kvm: &Kvm, #[cfg_attr(target_arch = "aarch64", allow(unused_variables))] kvm: &Kvm,
fd: Arc<VmFd>, fd: Arc<VmFd>,
reset_evt: EventFd, reset_evt: EventFd,
) -> Result<Arc<Mutex<CpuManager>>> { ) -> Result<Arc<Mutex<CpuManager>>> {
@ -717,6 +762,7 @@ impl CpuManager {
vcpu_states.resize_with(usize::from(config.max_vcpus), VcpuState::default); vcpu_states.resize_with(usize::from(config.max_vcpus), VcpuState::default);
let device_manager = device_manager.lock().unwrap(); let device_manager = device_manager.lock().unwrap();
#[cfg(target_arch = "x86_64")]
let cpuid = CpuManager::patch_cpuid(kvm)?; let cpuid = CpuManager::patch_cpuid(kvm)?;
let cpu_manager = Arc::new(Mutex::new(CpuManager { let cpu_manager = Arc::new(Mutex::new(CpuManager {
boot_vcpus: config.boot_vcpus, boot_vcpus: config.boot_vcpus,
@ -725,6 +771,7 @@ impl CpuManager {
mmio_bus: device_manager.mmio_bus().clone(), mmio_bus: device_manager.mmio_bus().clone(),
ioapic: device_manager.ioapic().clone(), ioapic: device_manager.ioapic().clone(),
vm_memory: guest_memory, vm_memory: guest_memory,
#[cfg(target_arch = "x86_64")]
cpuid, cpuid,
fd, fd,
vcpus_kill_signalled: Arc::new(AtomicBool::new(false)), vcpus_kill_signalled: Arc::new(AtomicBool::new(false)),
@ -752,6 +799,7 @@ impl CpuManager {
Ok(cpu_manager) Ok(cpu_manager)
} }
#[cfg(target_arch = "x86_64")]
fn patch_cpuid(kvm: &Kvm) -> Result<CpuId> { fn patch_cpuid(kvm: &Kvm) -> Result<CpuId> {
let mut cpuid_patches = Vec::new(); let mut cpuid_patches = Vec::new();
@ -787,6 +835,20 @@ impl CpuManager {
Ok(cpuid) Ok(cpuid)
} }
#[cfg(target_arch = "aarch64")]
fn start_vcpu(
&mut self,
_cpu_id: u8,
_creation_ts: std::time::Instant,
_vcpu_thread_barrier: Arc<Barrier>,
_entry_point: Option<EntryPoint>,
_inserting: bool,
_snapshot: Option<Snapshot>,
) -> Result<()> {
unimplemented!();
}
#[cfg(target_arch = "x86_64")]
fn start_vcpu( fn start_vcpu(
&mut self, &mut self,
cpu_id: u8, cpu_id: u8,

View File

@ -24,6 +24,7 @@ use acpi_tables::{aml, aml::Aml};
use anyhow::anyhow; use anyhow::anyhow;
#[cfg(feature = "acpi")] #[cfg(feature = "acpi")]
use arch::layout; use arch::layout;
#[cfg(target_arch = "x86_64")]
use arch::layout::{APIC_START, IOAPIC_SIZE, IOAPIC_START}; use arch::layout::{APIC_START, IOAPIC_SIZE, IOAPIC_START};
use devices::{ioapic, BusDevice, HotPlugNotificationFlags}; use devices::{ioapic, BusDevice, HotPlugNotificationFlags};
use kvm_ioctls::*; use kvm_ioctls::*;
@ -75,6 +76,7 @@ const MMIO_LEN: u64 = 0x1000;
#[cfg(feature = "pci_support")] #[cfg(feature = "pci_support")]
const VFIO_DEVICE_NAME_PREFIX: &str = "_vfio"; const VFIO_DEVICE_NAME_PREFIX: &str = "_vfio";
#[cfg(target_arch = "x86_64")]
const IOAPIC_DEVICE_NAME: &str = "_ioapic"; const IOAPIC_DEVICE_NAME: &str = "_ioapic";
const SERIAL_DEVICE_NAME_PREFIX: &str = "_serial"; const SERIAL_DEVICE_NAME_PREFIX: &str = "_serial";
@ -662,6 +664,7 @@ pub struct DeviceManager {
#[cfg(feature = "pci_support")] #[cfg(feature = "pci_support")]
pci_bus: Option<Arc<Mutex<PciBus>>>, pci_bus: Option<Arc<Mutex<PciBus>>>,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
// MSI Interrupt Manager // MSI Interrupt Manager
msi_interrupt_manager: Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>, msi_interrupt_manager: Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>,
@ -999,6 +1002,12 @@ impl DeviceManager {
Ok(()) Ok(())
} }
#[cfg(target_arch = "aarch64")]
fn add_ioapic(&mut self) -> DeviceManagerResult<Arc<Mutex<ioapic::Ioapic>>> {
unimplemented!();
}
#[cfg(target_arch = "x86_64")]
fn add_ioapic(&mut self) -> DeviceManagerResult<Arc<Mutex<ioapic::Ioapic>>> { fn add_ioapic(&mut self) -> DeviceManagerResult<Arc<Mutex<ioapic::Ioapic>>> {
let id = String::from(IOAPIC_DEVICE_NAME); let id = String::from(IOAPIC_DEVICE_NAME);

View File

@ -8,8 +8,12 @@ use crate::MEMORY_MANAGER_SNAPSHOT_ID;
#[cfg(feature = "acpi")] #[cfg(feature = "acpi")]
use acpi_tables::{aml, aml::Aml}; use acpi_tables::{aml, aml::Aml};
use anyhow::anyhow; use anyhow::anyhow;
use arch::{layout, RegionType}; #[cfg(target_arch = "x86_64")]
use devices::{ioapic, BusDevice}; use arch::layout;
use arch::{get_host_cpu_phys_bits, RegionType};
#[cfg(target_arch = "x86_64")]
use devices::ioapic;
use devices::BusDevice;
use kvm_bindings::{kvm_userspace_memory_region, KVM_MEM_READONLY}; use kvm_bindings::{kvm_userspace_memory_region, KVM_MEM_READONLY};
use kvm_ioctls::*; use kvm_ioctls::*;
use std::convert::TryInto; use std::convert::TryInto;
@ -21,7 +25,9 @@ use std::path::PathBuf;
use std::result; use std::result;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use url::Url; use url::Url;
use vm_allocator::{GsiApic, SystemAllocator}; #[cfg(target_arch = "x86_64")]
use vm_allocator::GsiApic;
use vm_allocator::SystemAllocator;
use vm_memory::guest_memory::FileOffset; use vm_memory::guest_memory::FileOffset;
use vm_memory::{ use vm_memory::{
mmap::MmapRegionError, Address, Bytes, Error as MmapError, GuestAddress, GuestAddressSpace, mmap::MmapRegionError, Address, Bytes, Error as MmapError, GuestAddress, GuestAddressSpace,
@ -33,6 +39,7 @@ use vm_migration::{
Transportable, Transportable,
}; };
#[cfg(target_arch = "x86_64")]
const X86_64_IRQ_BASE: u32 = 5; const X86_64_IRQ_BASE: u32 = 5;
const HOTPLUG_COUNT: usize = 8; const HOTPLUG_COUNT: usize = 8;
@ -120,34 +127,6 @@ pub enum Error {
InvalidAmountExternalBackingFiles, InvalidAmountExternalBackingFiles,
} }
pub fn get_host_cpu_phys_bits() -> u8 {
use core::arch::x86_64;
unsafe {
let leaf = x86_64::__cpuid(0x8000_0000);
// Detect and handle AMD SME (Secure Memory Encryption) properly.
// Some physical address bits may become reserved when the feature is enabled.
// See AMD64 Architecture Programmer's Manual Volume 2, Section 7.10.1
let reduced = if leaf.eax >= 0x8000_001f
&& leaf.ebx == 0x6874_7541 // Vendor ID: AuthenticAMD
&& leaf.ecx == 0x444d_4163
&& leaf.edx == 0x6974_6e65
&& x86_64::__cpuid(0x8000_001f).eax & 0x1 != 0
{
(x86_64::__cpuid(0x8000_001f).ebx >> 6) & 0x3f
} else {
0
};
if leaf.eax >= 0x8000_0008 {
let leaf = x86_64::__cpuid(0x8000_0008);
((leaf.eax & 0xff) - reduced) as u8
} else {
36
}
}
}
const ENABLE_FLAG: usize = 0; const ENABLE_FLAG: usize = 0;
const INSERTING_FLAG: usize = 1; const INSERTING_FLAG: usize = 1;
const REMOVING_FLAG: usize = 2; const REMOVING_FLAG: usize = 2;
@ -279,12 +258,8 @@ impl MemoryManager {
GuestMemoryMmap::from_arc_regions(mem_regions).map_err(Error::GuestMemory)?; GuestMemoryMmap::from_arc_regions(mem_regions).map_err(Error::GuestMemory)?;
let end_of_device_area = GuestAddress((1 << get_host_cpu_phys_bits()) - 1); let end_of_device_area = GuestAddress((1 << get_host_cpu_phys_bits()) - 1);
let mem_end = guest_memory.last_addr();
let mut start_of_device_area = if mem_end < arch::layout::MEM_32BIT_RESERVED_START { let mut start_of_device_area = MemoryManager::start_addr(guest_memory.last_addr(), false);
arch::layout::RAM_64BIT_START
} else {
mem_end.unchecked_add(1)
};
let mut virtiomem_region = None; let mut virtiomem_region = None;
let mut virtiomem_resize = None; let mut virtiomem_resize = None;
@ -319,6 +294,7 @@ impl MemoryManager {
let mut hotplug_slots = Vec::with_capacity(HOTPLUG_COUNT); let mut hotplug_slots = Vec::with_capacity(HOTPLUG_COUNT);
hotplug_slots.resize_with(HOTPLUG_COUNT, HotPlugState::default); hotplug_slots.resize_with(HOTPLUG_COUNT, HotPlugState::default);
#[cfg(target_arch = "x86_64")]
// Let's allocate 64 GiB of addressable MMIO space, starting at 0. // Let's allocate 64 GiB of addressable MMIO space, starting at 0.
let allocator = Arc::new(Mutex::new( let allocator = Arc::new(Mutex::new(
SystemAllocator::new( SystemAllocator::new(
@ -336,6 +312,20 @@ impl MemoryManager {
.ok_or(Error::CreateSystemAllocator)?, .ok_or(Error::CreateSystemAllocator)?,
)); ));
#[cfg(target_arch = "aarch64")]
let allocator = Arc::new(Mutex::new(
SystemAllocator::new(
GuestAddress(0),
0,
GuestAddress(0),
0,
GuestAddress(0),
0,
vec![],
)
.ok_or(Error::CreateSystemAllocator)?,
));
let memory_manager = Arc::new(Mutex::new(MemoryManager { let memory_manager = Arc::new(Mutex::new(MemoryManager {
guest_memory: guest_memory.clone(), guest_memory: guest_memory.clone(),
next_kvm_memory_slot: 0, next_kvm_memory_slot: 0,
@ -578,6 +568,30 @@ impl MemoryManager {
Ok(()) Ok(())
} }
//
// Calculate the start address of an area next to RAM.
//
// If the next area is device space, there is no gap.
// If the next area is hotplugged RAM, the start address needs to be aligned
// to 128MiB boundary, and a gap of 256MiB need to be set before it.
// On x86_64, it must also start at the 64bit start.
fn start_addr(mem_end: GuestAddress, with_gap: bool) -> GuestAddress {
let start_addr = if with_gap {
GuestAddress((mem_end.0 + 1 + (256 << 20)) & !((128 << 20) - 1))
} else {
mem_end.unchecked_add(1)
};
#[cfg(target_arch = "x86_64")]
let start_addr = if mem_end < arch::layout::MEM_32BIT_RESERVED_START {
arch::layout::RAM_64BIT_START
} else {
start_addr
};
start_addr
}
fn hotplug_ram_region(&mut self, size: usize) -> Result<Arc<GuestRegionMmap>, Error> { fn hotplug_ram_region(&mut self, size: usize) -> Result<Arc<GuestRegionMmap>, Error> {
info!("Hotplugging new RAM: {}", size); info!("Hotplugging new RAM: {}", size);
@ -591,14 +605,7 @@ impl MemoryManager {
return Err(Error::InvalidSize); return Err(Error::InvalidSize);
} }
// Start address needs to be non-contiguous with last memory added (leaving a gap of 256MiB) let start_addr = MemoryManager::start_addr(self.guest_memory.memory().last_addr(), true);
// and also aligned to 128MiB boundary. It must also start at the 64bit start.
let mem_end = self.guest_memory.memory().last_addr();
let start_addr = if mem_end < arch::layout::MEM_32BIT_RESERVED_START {
arch::layout::RAM_64BIT_START
} else {
GuestAddress((mem_end.0 + 1 + (256 << 20)) & !((128 << 20) - 1))
};
if start_addr.checked_add(size.try_into().unwrap()).unwrap() >= self.start_of_device_area() if start_addr.checked_add(size.try_into().unwrap()).unwrap() >= self.start_of_device_area()
{ {

View File

@ -195,7 +195,9 @@ pub fn vmm_thread_filter() -> Result<SeccompFilter, Error> {
Ok(SeccompFilter::new( Ok(SeccompFilter::new(
vec![ vec![
allow_syscall(libc::SYS_accept4), allow_syscall(libc::SYS_accept4),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_access), allow_syscall(libc::SYS_access),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_arch_prctl), allow_syscall(libc::SYS_arch_prctl),
allow_syscall(libc::SYS_bind), allow_syscall(libc::SYS_bind),
allow_syscall(libc::SYS_brk), allow_syscall(libc::SYS_brk),
@ -208,6 +210,7 @@ pub fn vmm_thread_filter() -> Result<SeccompFilter, Error> {
allow_syscall(libc::SYS_epoll_create1), allow_syscall(libc::SYS_epoll_create1),
allow_syscall(libc::SYS_epoll_ctl), allow_syscall(libc::SYS_epoll_ctl),
allow_syscall(libc::SYS_epoll_pwait), allow_syscall(libc::SYS_epoll_pwait),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_epoll_wait), allow_syscall(libc::SYS_epoll_wait),
allow_syscall(libc::SYS_eventfd2), allow_syscall(libc::SYS_eventfd2),
allow_syscall(libc::SYS_execve), allow_syscall(libc::SYS_execve),
@ -216,9 +219,11 @@ pub fn vmm_thread_filter() -> Result<SeccompFilter, Error> {
allow_syscall(libc::SYS_fallocate), allow_syscall(libc::SYS_fallocate),
allow_syscall(libc::SYS_fcntl), allow_syscall(libc::SYS_fcntl),
allow_syscall(libc::SYS_fdatasync), allow_syscall(libc::SYS_fdatasync),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_fork), allow_syscall(libc::SYS_fork),
allow_syscall(libc::SYS_fstat), allow_syscall(libc::SYS_fstat),
allow_syscall(libc::SYS_fsync), allow_syscall(libc::SYS_fsync),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_ftruncate), allow_syscall(libc::SYS_ftruncate),
allow_syscall(libc::SYS_futex), allow_syscall(libc::SYS_futex),
allow_syscall(libc::SYS_getpid), allow_syscall(libc::SYS_getpid),
@ -236,6 +241,7 @@ pub fn vmm_thread_filter() -> Result<SeccompFilter, Error> {
allow_syscall(libc::SYS_mremap), allow_syscall(libc::SYS_mremap),
allow_syscall(libc::SYS_munmap), allow_syscall(libc::SYS_munmap),
allow_syscall(libc::SYS_nanosleep), allow_syscall(libc::SYS_nanosleep),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_open), allow_syscall(libc::SYS_open),
allow_syscall(libc::SYS_openat), allow_syscall(libc::SYS_openat),
allow_syscall(libc::SYS_pipe2), allow_syscall(libc::SYS_pipe2),
@ -244,6 +250,7 @@ pub fn vmm_thread_filter() -> Result<SeccompFilter, Error> {
allow_syscall(libc::SYS_prlimit64), allow_syscall(libc::SYS_prlimit64),
allow_syscall(libc::SYS_pwrite64), allow_syscall(libc::SYS_pwrite64),
allow_syscall(libc::SYS_read), allow_syscall(libc::SYS_read),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_readlink), allow_syscall(libc::SYS_readlink),
allow_syscall(libc::SYS_recvfrom), allow_syscall(libc::SYS_recvfrom),
allow_syscall(libc::SYS_recvmsg), allow_syscall(libc::SYS_recvmsg),
@ -264,10 +271,12 @@ pub fn vmm_thread_filter() -> Result<SeccompFilter, Error> {
], ],
), ),
allow_syscall(libc::SYS_socketpair), allow_syscall(libc::SYS_socketpair),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_stat), allow_syscall(libc::SYS_stat),
allow_syscall(libc::SYS_statx), allow_syscall(libc::SYS_statx),
allow_syscall(libc::SYS_tgkill), allow_syscall(libc::SYS_tgkill),
allow_syscall(libc::SYS_tkill), allow_syscall(libc::SYS_tkill),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_unlink), allow_syscall(libc::SYS_unlink),
allow_syscall(libc::SYS_wait4), allow_syscall(libc::SYS_wait4),
allow_syscall(libc::SYS_write), allow_syscall(libc::SYS_write),
@ -290,6 +299,7 @@ pub fn api_thread_filter() -> Result<SeccompFilter, Error> {
allow_syscall(libc::SYS_epoll_create1), allow_syscall(libc::SYS_epoll_create1),
allow_syscall(libc::SYS_epoll_ctl), allow_syscall(libc::SYS_epoll_ctl),
allow_syscall(libc::SYS_epoll_pwait), allow_syscall(libc::SYS_epoll_pwait),
#[cfg(target_arch = "x86_64")]
allow_syscall(libc::SYS_epoll_wait), allow_syscall(libc::SYS_epoll_wait),
allow_syscall(libc::SYS_exit), allow_syscall(libc::SYS_exit),
allow_syscall(libc::SYS_futex), allow_syscall(libc::SYS_futex),

View File

@ -35,24 +35,37 @@ use crate::memory_manager::{Error as MemoryManagerError, MemoryManager};
use crate::migration::{url_to_path, vm_config_from_snapshot, VM_SNAPSHOT_FILE}; use crate::migration::{url_to_path, vm_config_from_snapshot, VM_SNAPSHOT_FILE};
use crate::{CPU_MANAGER_SNAPSHOT_ID, DEVICE_MANAGER_SNAPSHOT_ID, MEMORY_MANAGER_SNAPSHOT_ID}; use crate::{CPU_MANAGER_SNAPSHOT_ID, DEVICE_MANAGER_SNAPSHOT_ID, MEMORY_MANAGER_SNAPSHOT_ID};
use anyhow::anyhow; use anyhow::anyhow;
use arch::{BootProtocol, EntryPoint}; #[cfg(target_arch = "x86_64")]
use devices::{ioapic, HotPlugNotificationFlags}; use arch::BootProtocol;
use arch::{check_required_kvm_extensions, EntryPoint};
#[cfg(target_arch = "x86_64")]
use devices::ioapic;
use devices::HotPlugNotificationFlags;
#[cfg(target_arch = "x86_64")]
use kvm_bindings::{kvm_enable_cap, kvm_userspace_memory_region, KVM_CAP_SPLIT_IRQCHIP}; use kvm_bindings::{kvm_enable_cap, kvm_userspace_memory_region, KVM_CAP_SPLIT_IRQCHIP};
use kvm_ioctls::*; use kvm_ioctls::*;
#[cfg(target_arch = "x86_64")]
use linux_loader::cmdline::Cmdline; use linux_loader::cmdline::Cmdline;
#[cfg(target_arch = "x86_64")]
use linux_loader::loader::elf::Error::InvalidElfMagicNumber; use linux_loader::loader::elf::Error::InvalidElfMagicNumber;
#[cfg(target_arch = "x86_64")]
use linux_loader::loader::KernelLoader; use linux_loader::loader::KernelLoader;
use signal_hook::{iterator::Signals, SIGINT, SIGTERM, SIGWINCH}; use signal_hook::{iterator::Signals, SIGINT, SIGTERM, SIGWINCH};
#[cfg(target_arch = "x86_64")]
use std::convert::TryInto; use std::convert::TryInto;
#[cfg(target_arch = "x86_64")]
use std::ffi::CString; use std::ffi::CString;
use std::fs::{File, OpenOptions}; use std::fs::{File, OpenOptions};
use std::io::Write; use std::io::{self, Write};
use std::io::{self, Seek, SeekFrom}; #[cfg(target_arch = "x86_64")]
use std::io::{Seek, SeekFrom};
#[cfg(target_arch = "x86_64")]
use std::ops::Deref; use std::ops::Deref;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::{Arc, Mutex, RwLock}; use std::sync::{Arc, Mutex, RwLock};
use std::{result, str, thread}; use std::{result, str, thread};
use url::Url; use url::Url;
#[cfg(target_arch = "x86_64")]
use vm_memory::{ use vm_memory::{
Address, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryMmap, Address, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryMmap,
GuestMemoryRegion, GuestMemoryRegion,
@ -65,6 +78,7 @@ use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::terminal::Terminal; use vmm_sys_util::terminal::Terminal;
// 64 bit direct boot entry offset for bzImage // 64 bit direct boot entry offset for bzImage
#[cfg(target_arch = "x86_64")]
const KERNEL_64BIT_ENTRY_OFFSET: u64 = 0x200; const KERNEL_64BIT_ENTRY_OFFSET: u64 = 0x200;
/// Errors associated with VM management /// Errors associated with VM management
@ -153,9 +167,6 @@ pub enum Error {
/// Error from CPU handling /// Error from CPU handling
CpuManager(cpu::Error), CpuManager(cpu::Error),
/// Capability missing
CapabilityMissing(Cap),
/// Cannot pause devices /// Cannot pause devices
PauseDevices(MigratableError), PauseDevices(MigratableError),
@ -246,7 +257,9 @@ impl VmState {
} }
pub struct Vm { pub struct Vm {
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
kernel: File, kernel: File,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
initramfs: Option<File>, initramfs: Option<File>,
threads: Vec<thread::JoinHandle<()>>, threads: Vec<thread::JoinHandle<()>>,
device_manager: Arc<Mutex<DeviceManager>>, device_manager: Arc<Mutex<DeviceManager>>,
@ -262,18 +275,7 @@ impl Vm {
fn kvm_new() -> Result<(Kvm, Arc<VmFd>)> { fn kvm_new() -> Result<(Kvm, Arc<VmFd>)> {
let kvm = Kvm::new().map_err(Error::KvmNew)?; let kvm = Kvm::new().map_err(Error::KvmNew)?;
// Check required capabilities: check_required_kvm_extensions(&kvm).expect("Missing KVM capabilities");
if !kvm.check_extension(Cap::SignalMsi) {
return Err(Error::CapabilityMissing(Cap::SignalMsi));
}
if !kvm.check_extension(Cap::TscDeadlineTimer) {
return Err(Error::CapabilityMissing(Cap::TscDeadlineTimer));
}
if !kvm.check_extension(Cap::SplitIrqchip) {
return Err(Error::CapabilityMissing(Cap::SplitIrqchip));
}
let fd: VmFd; let fd: VmFd;
loop { loop {
@ -295,16 +297,20 @@ impl Vm {
let fd = Arc::new(fd); let fd = Arc::new(fd);
// Set TSS // Set TSS
#[cfg(target_arch = "x86_64")]
fd.set_tss_address(arch::x86_64::layout::KVM_TSS_ADDRESS.raw_value() as usize) fd.set_tss_address(arch::x86_64::layout::KVM_TSS_ADDRESS.raw_value() as usize)
.map_err(Error::VmSetup)?; .map_err(Error::VmSetup)?;
// Create split irqchip #[cfg(target_arch = "x86_64")]
// Only the local APIC is emulated in kernel, both PICs and IOAPIC {
// are not. // Create split irqchip
let mut cap: kvm_enable_cap = Default::default(); // Only the local APIC is emulated in kernel, both PICs and IOAPIC
cap.cap = KVM_CAP_SPLIT_IRQCHIP; // are not.
cap.args[0] = ioapic::NUM_IOAPIC_PINS as u64; let mut cap: kvm_enable_cap = Default::default();
fd.enable_cap(&cap).map_err(Error::VmSetup)?; cap.cap = KVM_CAP_SPLIT_IRQCHIP;
cap.args[0] = ioapic::NUM_IOAPIC_PINS as u64;
fd.enable_cap(&cap).map_err(Error::VmSetup)?;
}
Ok((kvm, fd)) Ok((kvm, fd))
} }
@ -446,6 +452,7 @@ impl Vm {
) )
} }
#[cfg(target_arch = "x86_64")]
fn load_initramfs(&mut self, guest_mem: &GuestMemoryMmap) -> Result<arch::InitramfsConfig> { fn load_initramfs(&mut self, guest_mem: &GuestMemoryMmap) -> Result<arch::InitramfsConfig> {
let mut initramfs = self.initramfs.as_ref().unwrap(); let mut initramfs = self.initramfs.as_ref().unwrap();
let size: usize = initramfs let size: usize = initramfs
@ -468,6 +475,12 @@ impl Vm {
Ok(arch::InitramfsConfig { address, size }) Ok(arch::InitramfsConfig { address, size })
} }
#[cfg(target_arch = "aarch64")]
fn load_kernel(&mut self) -> Result<EntryPoint> {
unimplemented!();
}
#[cfg(target_arch = "x86_64")]
fn load_kernel(&mut self) -> Result<EntryPoint> { fn load_kernel(&mut self) -> Result<EntryPoint> {
let mut cmdline = Cmdline::new(arch::CMDLINE_MAX_SIZE); let mut cmdline = Cmdline::new(arch::CMDLINE_MAX_SIZE);
cmdline cmdline
@ -1276,6 +1289,7 @@ impl Transportable for Vm {
} }
impl Migratable for Vm {} impl Migratable for Vm {}
#[cfg(target_arch = "x86_64")]
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -1334,6 +1348,7 @@ mod tests {
} }
} }
#[cfg(target_arch = "x86_64")]
#[allow(unused)] #[allow(unused)]
pub fn test_vm() { pub fn test_vm() {
// This example based on https://lwn.net/Articles/658511/ // This example based on https://lwn.net/Articles/658511/