2020-02-12 03:37:33 +00:00
|
|
|
// Copyright © 2020, Oracle and/or its affiliates.
|
|
|
|
//
|
2019-05-08 10:22:53 +00:00
|
|
|
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
|
|
//
|
|
|
|
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE-BSD-3-Clause file.
|
|
|
|
//
|
2019-02-28 13:16:58 +00:00
|
|
|
// Copyright © 2019 Intel Corporation
|
|
|
|
//
|
2019-05-08 10:22:53 +00:00
|
|
|
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
|
|
|
|
//
|
2019-02-28 13:16:58 +00:00
|
|
|
|
|
|
|
extern crate arch;
|
2019-03-07 13:56:43 +00:00
|
|
|
extern crate devices;
|
2019-03-18 20:59:50 +00:00
|
|
|
extern crate epoll;
|
2020-06-02 02:29:54 +00:00
|
|
|
extern crate hypervisor;
|
2019-02-28 14:26:30 +00:00
|
|
|
extern crate libc;
|
2019-02-28 13:16:58 +00:00
|
|
|
extern crate linux_loader;
|
2019-05-09 15:01:42 +00:00
|
|
|
extern crate net_util;
|
vm-virtio: Implement console size config feature
One of the features of the virtio console device is its size can be
configured and updated. Our first iteration of the console device
implementation is lack of this feature. As a result, it had a
default fixed size which could not be changed. This commit implements
the console config feature and lets us change the console size from
the vmm side.
During the activation of the device, vmm reads the current terminal
size, sets the console configuration accordinly, and lets the driver
know about this configuration by sending an interrupt. Later, if
someone changes the terminal size, the vmm detects the corresponding
event, updates the configuration, and sends interrupt as before. As a
result, the console device driver, in the guest, updates the console
size.
Signed-off-by: A K M Fazla Mehrab <fazla.mehrab.akm@intel.com>
2019-07-23 19:18:20 +00:00
|
|
|
extern crate signal_hook;
|
2019-09-11 16:07:33 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
2019-05-06 17:27:40 +00:00
|
|
|
extern crate vm_allocator;
|
2019-02-28 13:16:58 +00:00
|
|
|
extern crate vm_memory;
|
|
|
|
|
2020-04-06 15:24:46 +00:00
|
|
|
use crate::config::{
|
2020-04-14 09:21:24 +00:00
|
|
|
DeviceConfig, DiskConfig, FsConfig, HotplugMethod, NetConfig, PmemConfig, ValidationError,
|
2020-04-28 15:02:46 +00:00
|
|
|
VmConfig, VsockConfig,
|
2020-04-06 15:24:46 +00:00
|
|
|
};
|
2019-11-11 13:55:50 +00:00
|
|
|
use crate::cpu;
|
2020-06-09 10:28:02 +00:00
|
|
|
use crate::device_manager::{self, get_win_size, Console, DeviceManager, DeviceManagerError};
|
2020-03-16 17:58:23 +00:00
|
|
|
use crate::memory_manager::{Error as MemoryManagerError, MemoryManager};
|
2020-06-23 09:39:39 +00:00
|
|
|
use crate::migration::{get_vm_snapshot, url_to_path, VM_SNAPSHOT_FILE};
|
2020-06-11 15:27:46 +00:00
|
|
|
use crate::{
|
|
|
|
PciDeviceInfo, CPU_MANAGER_SNAPSHOT_ID, DEVICE_MANAGER_SNAPSHOT_ID, MEMORY_MANAGER_SNAPSHOT_ID,
|
|
|
|
};
|
2019-11-22 13:54:52 +00:00
|
|
|
use anyhow::anyhow;
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
use arch::BootProtocol;
|
2020-06-02 02:29:54 +00:00
|
|
|
use arch::EntryPoint;
|
2020-05-12 09:49:12 +00:00
|
|
|
use devices::HotPlugNotificationFlags;
|
2019-09-27 08:39:56 +00:00
|
|
|
use linux_loader::cmdline::Cmdline;
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-03-18 18:42:03 +00:00
|
|
|
use linux_loader::loader::elf::Error::InvalidElfMagicNumber;
|
2020-06-25 06:20:12 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
use linux_loader::loader::elf::PvhBootCapability::PvhEntryPresent;
|
2019-02-28 13:16:58 +00:00
|
|
|
use linux_loader::loader::KernelLoader;
|
2019-12-05 03:27:40 +00:00
|
|
|
use signal_hook::{iterator::Signals, SIGINT, SIGTERM, SIGWINCH};
|
2020-06-24 10:20:13 +00:00
|
|
|
use std::collections::HashMap;
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-03-15 17:56:07 +00:00
|
|
|
use std::convert::TryInto;
|
2019-02-28 13:16:58 +00:00
|
|
|
use std::ffi::CString;
|
2020-02-25 00:09:54 +00:00
|
|
|
use std::fs::{File, OpenOptions};
|
2020-05-12 09:49:12 +00:00
|
|
|
use std::io::{self, Write};
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
use std::io::{Seek, SeekFrom};
|
2020-06-24 10:20:13 +00:00
|
|
|
use std::num::Wrapping;
|
2020-02-11 16:22:40 +00:00
|
|
|
use std::ops::Deref;
|
2020-02-14 09:55:19 +00:00
|
|
|
use std::path::PathBuf;
|
2019-11-11 14:31:11 +00:00
|
|
|
use std::sync::{Arc, Mutex, RwLock};
|
2019-11-11 13:55:50 +00:00
|
|
|
use std::{result, str, thread};
|
2020-02-25 00:09:54 +00:00
|
|
|
use url::Url;
|
2020-06-03 08:30:33 +00:00
|
|
|
use vm_memory::{Address, GuestAddress, GuestAddressSpace};
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-06-03 08:30:33 +00:00
|
|
|
use vm_memory::{Bytes, GuestMemoryMmap};
|
2019-05-12 11:53:47 +00:00
|
|
|
use vm_migration::{
|
|
|
|
Migratable, MigratableError, Pausable, Snapshot, SnapshotDataSection, Snapshottable,
|
|
|
|
Transportable,
|
|
|
|
};
|
2019-09-04 14:28:48 +00:00
|
|
|
use vmm_sys_util::eventfd::EventFd;
|
2019-03-18 20:59:50 +00:00
|
|
|
use vmm_sys_util::terminal::Terminal;
|
2019-02-28 13:16:58 +00:00
|
|
|
|
2019-06-10 09:14:02 +00:00
|
|
|
// 64 bit direct boot entry offset for bzImage
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2019-06-10 09:14:02 +00:00
|
|
|
const KERNEL_64BIT_ENTRY_OFFSET: u64 = 0x200;
|
|
|
|
|
2019-05-10 08:21:53 +00:00
|
|
|
/// Errors associated with VM management
|
2019-02-28 13:16:58 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum Error {
|
|
|
|
/// Cannot open the kernel image
|
|
|
|
KernelFile(io::Error),
|
|
|
|
|
2020-03-15 17:56:07 +00:00
|
|
|
/// Cannot open the initramfs image
|
|
|
|
InitramfsFile(io::Error),
|
|
|
|
|
2019-02-28 13:16:58 +00:00
|
|
|
/// Cannot load the kernel in memory
|
|
|
|
KernelLoad(linux_loader::loader::Error),
|
|
|
|
|
2020-03-15 17:56:07 +00:00
|
|
|
/// Cannot load the initramfs in memory
|
|
|
|
InitramfsLoad,
|
|
|
|
|
2019-02-28 13:16:58 +00:00
|
|
|
/// Cannot load the command line in memory
|
2020-01-24 08:34:51 +00:00
|
|
|
LoadCmdLine(linux_loader::loader::Error),
|
|
|
|
|
|
|
|
/// Cannot modify the command line
|
|
|
|
CmdLineInsertStr(linux_loader::cmdline::Error),
|
|
|
|
|
|
|
|
/// Cannot convert command line into CString
|
|
|
|
CmdLineCString(std::ffi::NulError),
|
|
|
|
|
|
|
|
/// Cannot configure system
|
|
|
|
ConfigureSystem(arch::Error),
|
2019-02-28 14:26:30 +00:00
|
|
|
|
2020-06-09 10:28:02 +00:00
|
|
|
/// Cannot enable interrupt controller
|
|
|
|
EnableInterruptController(device_manager::DeviceManagerError),
|
|
|
|
|
2019-10-01 08:14:08 +00:00
|
|
|
PoisonedState,
|
|
|
|
|
2019-05-14 01:12:40 +00:00
|
|
|
/// Cannot create a device manager.
|
|
|
|
DeviceManager(DeviceManagerError),
|
|
|
|
|
2019-09-06 15:42:41 +00:00
|
|
|
/// Write to the console failed.
|
2019-08-02 14:23:52 +00:00
|
|
|
Console(vmm_sys_util::errno::Error),
|
2019-07-22 19:29:02 +00:00
|
|
|
|
2019-05-07 18:34:03 +00:00
|
|
|
/// Cannot setup terminal in raw mode.
|
2019-08-02 14:23:52 +00:00
|
|
|
SetTerminalRaw(vmm_sys_util::errno::Error),
|
2019-05-07 18:34:03 +00:00
|
|
|
|
|
|
|
/// Cannot setup terminal in canonical mode.
|
2019-08-02 14:23:52 +00:00
|
|
|
SetTerminalCanon(vmm_sys_util::errno::Error),
|
2019-05-07 18:34:03 +00:00
|
|
|
|
2019-05-14 01:12:40 +00:00
|
|
|
/// Failed parsing network parameters
|
|
|
|
ParseNetworkParameters,
|
2019-05-19 02:24:47 +00:00
|
|
|
|
2019-06-10 09:14:02 +00:00
|
|
|
/// Memory is overflow
|
|
|
|
MemOverflow,
|
2019-05-22 20:06:49 +00:00
|
|
|
|
2019-07-17 16:54:11 +00:00
|
|
|
/// Failed to allocate the IOAPIC memory range.
|
|
|
|
IoapicRangeAllocation,
|
2019-08-30 17:24:01 +00:00
|
|
|
|
|
|
|
/// Cannot spawn a signal handler thread
|
|
|
|
SignalHandlerSpawn(io::Error),
|
2019-09-03 09:00:15 +00:00
|
|
|
|
|
|
|
/// Failed to join on vCPU threads
|
2020-01-24 08:34:51 +00:00
|
|
|
ThreadCleanup(std::boxed::Box<dyn std::any::Any + std::marker::Send>),
|
2019-09-23 23:12:07 +00:00
|
|
|
|
2019-10-01 14:41:50 +00:00
|
|
|
/// VM is not created
|
|
|
|
VmNotCreated,
|
|
|
|
|
2020-02-25 23:03:06 +00:00
|
|
|
/// VM is already created
|
|
|
|
VmAlreadyCreated,
|
|
|
|
|
2019-10-10 16:00:44 +00:00
|
|
|
/// VM is not running
|
|
|
|
VmNotRunning,
|
2019-10-01 15:03:38 +00:00
|
|
|
|
2019-10-01 14:41:50 +00:00
|
|
|
/// Cannot clone EventFd.
|
|
|
|
EventFdClone(io::Error),
|
2019-10-11 12:47:57 +00:00
|
|
|
|
|
|
|
/// Invalid VM state transition
|
|
|
|
InvalidStateTransition(VmState, VmState),
|
2019-08-14 12:10:29 +00:00
|
|
|
|
2019-11-11 13:55:50 +00:00
|
|
|
/// Error from CPU handling
|
|
|
|
CpuManager(cpu::Error),
|
2019-12-05 15:24:26 +00:00
|
|
|
|
2019-11-18 23:24:31 +00:00
|
|
|
/// Cannot pause devices
|
|
|
|
PauseDevices(MigratableError),
|
|
|
|
|
|
|
|
/// Cannot resume devices
|
|
|
|
ResumeDevices(MigratableError),
|
2019-11-21 18:04:08 +00:00
|
|
|
|
|
|
|
/// Cannot pause CPUs
|
|
|
|
PauseCpus(MigratableError),
|
|
|
|
|
|
|
|
/// Cannot resume cpus
|
|
|
|
ResumeCpus(MigratableError),
|
2019-11-22 13:54:52 +00:00
|
|
|
|
|
|
|
/// Cannot pause VM
|
|
|
|
Pause(MigratableError),
|
|
|
|
|
|
|
|
/// Cannot resume VM
|
|
|
|
Resume(MigratableError),
|
2019-12-19 15:47:36 +00:00
|
|
|
|
|
|
|
/// Memory manager error
|
|
|
|
MemoryManager(MemoryManagerError),
|
2020-02-28 11:29:43 +00:00
|
|
|
|
|
|
|
/// No PCI support
|
|
|
|
NoPciSupport,
|
2020-03-04 02:16:07 +00:00
|
|
|
|
|
|
|
/// Eventfd write error
|
|
|
|
EventfdError(std::io::Error),
|
2019-11-24 17:39:06 +00:00
|
|
|
|
|
|
|
/// Cannot snapshot VM
|
|
|
|
Snapshot(MigratableError),
|
|
|
|
|
2020-02-25 23:03:06 +00:00
|
|
|
/// Cannot restore VM
|
|
|
|
Restore(MigratableError),
|
|
|
|
|
2019-11-24 17:39:06 +00:00
|
|
|
/// Cannot send VM snapshot
|
|
|
|
SnapshotSend(MigratableError),
|
2020-04-07 12:50:19 +00:00
|
|
|
|
|
|
|
/// Cannot convert source URL from Path into &str
|
|
|
|
RestoreSourceUrlPathToStr,
|
2020-04-06 15:24:46 +00:00
|
|
|
|
|
|
|
/// Failed to validate config
|
|
|
|
ConfigValidation(ValidationError),
|
2020-04-28 15:02:46 +00:00
|
|
|
|
|
|
|
/// No more that one virtio-vsock device
|
|
|
|
TooManyVsockDevices,
|
2020-06-11 15:40:27 +00:00
|
|
|
|
|
|
|
/// Failed serializing into JSON
|
|
|
|
SerializeJson(serde_json::Error),
|
2019-02-28 14:26:30 +00:00
|
|
|
}
|
2019-11-11 13:55:50 +00:00
|
|
|
pub type Result<T> = result::Result<T, Error>;
|
2019-02-28 14:26:30 +00:00
|
|
|
|
2019-10-11 12:47:57 +00:00
|
|
|
#[derive(Clone, Copy, Debug, Deserialize, Serialize, PartialEq)]
|
2019-10-01 08:14:08 +00:00
|
|
|
pub enum VmState {
|
|
|
|
Created,
|
2019-10-10 16:00:44 +00:00
|
|
|
Running,
|
2019-10-01 08:14:08 +00:00
|
|
|
Shutdown,
|
2019-10-10 15:16:58 +00:00
|
|
|
Paused,
|
2019-10-01 08:14:08 +00:00
|
|
|
}
|
|
|
|
|
2019-10-11 12:47:57 +00:00
|
|
|
impl VmState {
|
|
|
|
fn valid_transition(self, new_state: VmState) -> Result<()> {
|
|
|
|
match self {
|
|
|
|
VmState::Created => match new_state {
|
2020-06-22 13:35:27 +00:00
|
|
|
VmState::Created | VmState::Shutdown => {
|
2019-10-11 12:47:57 +00:00
|
|
|
Err(Error::InvalidStateTransition(self, new_state))
|
|
|
|
}
|
2020-06-22 13:35:27 +00:00
|
|
|
VmState::Running | VmState::Paused => Ok(()),
|
2019-10-11 12:47:57 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
VmState::Running => match new_state {
|
|
|
|
VmState::Created | VmState::Running => {
|
|
|
|
Err(Error::InvalidStateTransition(self, new_state))
|
|
|
|
}
|
|
|
|
VmState::Paused | VmState::Shutdown => Ok(()),
|
|
|
|
},
|
|
|
|
|
|
|
|
VmState::Shutdown => match new_state {
|
|
|
|
VmState::Paused | VmState::Created | VmState::Shutdown => {
|
|
|
|
Err(Error::InvalidStateTransition(self, new_state))
|
|
|
|
}
|
|
|
|
VmState::Running => Ok(()),
|
|
|
|
},
|
|
|
|
|
|
|
|
VmState::Paused => match new_state {
|
|
|
|
VmState::Created | VmState::Paused => {
|
|
|
|
Err(Error::InvalidStateTransition(self, new_state))
|
|
|
|
}
|
|
|
|
VmState::Running | VmState::Shutdown => Ok(()),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-07 12:38:09 +00:00
|
|
|
pub struct Vm {
|
2019-02-28 13:16:58 +00:00
|
|
|
kernel: File,
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
|
2020-03-15 17:56:07 +00:00
|
|
|
initramfs: Option<File>,
|
2019-09-04 11:59:14 +00:00
|
|
|
threads: Vec<thread::JoinHandle<()>>,
|
2020-03-04 13:39:15 +00:00
|
|
|
device_manager: Arc<Mutex<DeviceManager>>,
|
2019-12-05 14:50:38 +00:00
|
|
|
config: Arc<Mutex<VmConfig>>,
|
2019-05-30 15:17:57 +00:00
|
|
|
on_tty: bool,
|
2019-09-09 12:43:03 +00:00
|
|
|
signals: Option<Signals>,
|
2019-10-01 08:14:08 +00:00
|
|
|
state: RwLock<VmState>,
|
2019-11-11 14:31:11 +00:00
|
|
|
cpu_manager: Arc<Mutex<cpu::CpuManager>>,
|
2019-12-19 15:47:36 +00:00
|
|
|
memory_manager: Arc<Mutex<MemoryManager>>,
|
2020-06-23 09:39:39 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-07-03 09:10:50 +00:00
|
|
|
// The hypervisor abstracted virtual machine.
|
|
|
|
vm: Arc<dyn hypervisor::Vm>,
|
2020-06-23 09:39:39 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
saved_clock: Option<hypervisor::ClockData>,
|
2019-09-18 14:36:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 14:00:00 +00:00
|
|
|
impl Vm {
|
2020-06-23 09:39:39 +00:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2020-03-16 23:39:50 +00:00
|
|
|
fn new_from_memory_manager(
|
2020-03-16 14:50:06 +00:00
|
|
|
config: Arc<Mutex<VmConfig>>,
|
2020-03-16 23:39:50 +00:00
|
|
|
memory_manager: Arc<Mutex<MemoryManager>>,
|
2020-07-03 09:10:50 +00:00
|
|
|
vm: Arc<dyn hypervisor::Vm>,
|
2020-03-16 14:50:06 +00:00
|
|
|
exit_evt: EventFd,
|
|
|
|
reset_evt: EventFd,
|
|
|
|
vmm_path: PathBuf,
|
2020-06-02 02:29:54 +00:00
|
|
|
hypervisor: Arc<dyn hypervisor::Hypervisor>,
|
2020-06-23 09:39:39 +00:00
|
|
|
_saved_clock: Option<hypervisor::ClockData>,
|
2020-03-16 14:50:06 +00:00
|
|
|
) -> Result<Self> {
|
2020-04-06 15:24:46 +00:00
|
|
|
config
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.validate()
|
|
|
|
.map_err(Error::ConfigValidation)?;
|
|
|
|
|
2019-12-20 15:17:49 +00:00
|
|
|
let device_manager = DeviceManager::new(
|
2020-07-03 09:10:50 +00:00
|
|
|
vm.clone(),
|
2020-01-31 16:23:49 +00:00
|
|
|
config.clone(),
|
2019-12-20 15:17:49 +00:00
|
|
|
memory_manager.clone(),
|
|
|
|
&exit_evt,
|
|
|
|
&reset_evt,
|
2020-02-14 09:55:19 +00:00
|
|
|
vmm_path,
|
2019-12-20 15:17:49 +00:00
|
|
|
)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
2019-03-07 13:56:43 +00:00
|
|
|
|
2019-11-11 13:55:50 +00:00
|
|
|
let cpu_manager = cpu::CpuManager::new(
|
2020-03-16 17:14:15 +00:00
|
|
|
&config.lock().unwrap().cpus.clone(),
|
2019-11-07 12:38:09 +00:00
|
|
|
&device_manager,
|
2020-07-08 13:12:27 +00:00
|
|
|
&memory_manager,
|
2020-07-03 09:10:50 +00:00
|
|
|
vm.clone(),
|
2019-11-07 12:38:09 +00:00
|
|
|
reset_evt,
|
2020-06-02 02:29:54 +00:00
|
|
|
hypervisor,
|
2019-11-11 14:56:10 +00:00
|
|
|
)
|
|
|
|
.map_err(Error::CpuManager)?;
|
2019-11-07 12:38:09 +00:00
|
|
|
|
2020-03-16 17:14:15 +00:00
|
|
|
let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0;
|
2020-03-16 17:15:55 +00:00
|
|
|
let kernel = File::open(&config.lock().unwrap().kernel.as_ref().unwrap().path)
|
|
|
|
.map_err(Error::KernelFile)?;
|
|
|
|
|
|
|
|
let initramfs = config
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.initramfs
|
|
|
|
.as_ref()
|
|
|
|
.map(|i| File::open(&i.path))
|
|
|
|
.transpose()
|
|
|
|
.map_err(Error::InitramfsFile)?;
|
|
|
|
|
2019-11-07 12:38:09 +00:00
|
|
|
Ok(Vm {
|
2019-02-28 13:16:58 +00:00
|
|
|
kernel,
|
2020-03-15 17:56:07 +00:00
|
|
|
initramfs,
|
2020-03-04 13:39:15 +00:00
|
|
|
device_manager,
|
2019-03-07 13:56:43 +00:00
|
|
|
config,
|
2019-05-30 15:17:57 +00:00
|
|
|
on_tty,
|
2019-11-07 12:38:09 +00:00
|
|
|
threads: Vec::with_capacity(1),
|
2019-09-09 12:43:03 +00:00
|
|
|
signals: None,
|
2019-10-01 08:14:08 +00:00
|
|
|
state: RwLock::new(VmState::Created),
|
2019-11-07 12:38:09 +00:00
|
|
|
cpu_manager,
|
2019-12-19 15:47:36 +00:00
|
|
|
memory_manager,
|
2020-06-23 09:39:39 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-07-03 09:10:50 +00:00
|
|
|
vm,
|
2020-06-23 09:39:39 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
saved_clock: _saved_clock,
|
2019-02-28 13:16:58 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-03-16 23:39:50 +00:00
|
|
|
pub fn new(
|
|
|
|
config: Arc<Mutex<VmConfig>>,
|
|
|
|
exit_evt: EventFd,
|
|
|
|
reset_evt: EventFd,
|
|
|
|
vmm_path: PathBuf,
|
2020-06-02 02:29:54 +00:00
|
|
|
hypervisor: Arc<dyn hypervisor::Hypervisor>,
|
2020-03-16 23:39:50 +00:00
|
|
|
) -> Result<Self> {
|
2020-06-02 02:29:54 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
hypervisor.check_required_extensions().unwrap();
|
2020-07-03 09:10:50 +00:00
|
|
|
let vm = hypervisor.create_vm().unwrap();
|
2020-06-02 02:29:54 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-07-03 09:10:50 +00:00
|
|
|
vm.enable_split_irq().unwrap();
|
2020-04-07 11:33:18 +00:00
|
|
|
let memory_manager = MemoryManager::new(
|
2020-07-03 09:10:50 +00:00
|
|
|
vm.clone(),
|
2020-04-07 11:33:18 +00:00
|
|
|
&config.lock().unwrap().memory.clone(),
|
|
|
|
None,
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
.map_err(Error::MemoryManager)?;
|
2020-03-16 23:39:50 +00:00
|
|
|
|
2020-07-08 09:53:28 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
if let Some(sgx_epc_config) = config.lock().unwrap().sgx_epc.clone() {
|
|
|
|
memory_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.setup_sgx(sgx_epc_config)
|
|
|
|
.map_err(Error::MemoryManager)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-03 09:10:50 +00:00
|
|
|
let new_vm = Vm::new_from_memory_manager(
|
2020-03-16 23:48:12 +00:00
|
|
|
config,
|
|
|
|
memory_manager,
|
2020-07-03 09:10:50 +00:00
|
|
|
vm,
|
2020-03-16 23:48:12 +00:00
|
|
|
exit_evt,
|
|
|
|
reset_evt,
|
|
|
|
vmm_path,
|
2020-06-02 02:29:54 +00:00
|
|
|
hypervisor,
|
2020-06-23 09:39:39 +00:00
|
|
|
None,
|
2020-04-29 09:09:04 +00:00
|
|
|
)?;
|
|
|
|
|
|
|
|
// The device manager must create the devices from here as it is part
|
|
|
|
// of the regular code path creating everything from scratch.
|
2020-07-03 09:10:50 +00:00
|
|
|
new_vm
|
|
|
|
.device_manager
|
2020-04-29 09:09:04 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.create_devices()
|
|
|
|
.map_err(Error::DeviceManager)?;
|
|
|
|
|
2020-07-03 09:10:50 +00:00
|
|
|
Ok(new_vm)
|
2020-03-16 23:48:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn new_from_snapshot(
|
|
|
|
snapshot: &Snapshot,
|
|
|
|
exit_evt: EventFd,
|
|
|
|
reset_evt: EventFd,
|
|
|
|
vmm_path: PathBuf,
|
|
|
|
source_url: &str,
|
2020-04-07 13:54:33 +00:00
|
|
|
prefault: bool,
|
2020-06-02 02:29:54 +00:00
|
|
|
hypervisor: Arc<dyn hypervisor::Hypervisor>,
|
2020-03-16 23:48:12 +00:00
|
|
|
) -> Result<Self> {
|
2020-06-02 02:29:54 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
hypervisor.check_required_extensions().unwrap();
|
2020-07-03 09:10:50 +00:00
|
|
|
let vm = hypervisor.create_vm().unwrap();
|
2020-06-02 02:29:54 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-07-03 09:10:50 +00:00
|
|
|
vm.enable_split_irq().unwrap();
|
2020-06-23 09:39:39 +00:00
|
|
|
let vm_snapshot = get_vm_snapshot(snapshot).map_err(Error::Restore)?;
|
|
|
|
let config = vm_snapshot.config.clone();
|
2020-03-16 23:48:12 +00:00
|
|
|
|
|
|
|
let memory_manager = if let Some(memory_manager_snapshot) =
|
|
|
|
snapshot.snapshots.get(MEMORY_MANAGER_SNAPSHOT_ID)
|
|
|
|
{
|
|
|
|
MemoryManager::new_from_snapshot(
|
|
|
|
memory_manager_snapshot,
|
2020-07-03 09:10:50 +00:00
|
|
|
vm.clone(),
|
2020-03-16 23:48:12 +00:00
|
|
|
&config.lock().unwrap().memory.clone(),
|
|
|
|
source_url,
|
2020-04-07 13:54:33 +00:00
|
|
|
prefault,
|
2020-03-16 23:48:12 +00:00
|
|
|
)
|
|
|
|
.map_err(Error::MemoryManager)?
|
|
|
|
} else {
|
|
|
|
return Err(Error::Restore(MigratableError::Restore(anyhow!(
|
|
|
|
"Missing memory manager snapshot"
|
|
|
|
))));
|
|
|
|
};
|
|
|
|
|
2020-03-16 23:39:50 +00:00
|
|
|
Vm::new_from_memory_manager(
|
|
|
|
config,
|
|
|
|
memory_manager,
|
2020-07-03 09:10:50 +00:00
|
|
|
vm,
|
2020-03-16 23:39:50 +00:00
|
|
|
exit_evt,
|
|
|
|
reset_evt,
|
|
|
|
vmm_path,
|
2020-06-02 02:29:54 +00:00
|
|
|
hypervisor,
|
2020-06-23 09:39:39 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
vm_snapshot.clock,
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
None,
|
2020-03-16 23:39:50 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-03-15 17:56:07 +00:00
|
|
|
fn load_initramfs(&mut self, guest_mem: &GuestMemoryMmap) -> Result<arch::InitramfsConfig> {
|
|
|
|
let mut initramfs = self.initramfs.as_ref().unwrap();
|
|
|
|
let size: usize = initramfs
|
|
|
|
.seek(SeekFrom::End(0))
|
|
|
|
.map_err(|_| Error::InitramfsLoad)?
|
|
|
|
.try_into()
|
|
|
|
.unwrap();
|
|
|
|
initramfs
|
|
|
|
.seek(SeekFrom::Start(0))
|
|
|
|
.map_err(|_| Error::InitramfsLoad)?;
|
|
|
|
|
|
|
|
let address =
|
|
|
|
arch::initramfs_load_addr(guest_mem, size).map_err(|_| Error::InitramfsLoad)?;
|
|
|
|
let address = GuestAddress(address);
|
|
|
|
|
|
|
|
guest_mem
|
|
|
|
.read_from(address, &mut initramfs, size)
|
|
|
|
.map_err(|_| Error::InitramfsLoad)?;
|
|
|
|
|
|
|
|
Ok(arch::InitramfsConfig { address, size })
|
|
|
|
}
|
|
|
|
|
2020-05-28 03:31:26 +00:00
|
|
|
fn get_cmdline(&mut self) -> Result<CString> {
|
2019-09-27 08:39:56 +00:00
|
|
|
let mut cmdline = Cmdline::new(arch::CMDLINE_MAX_SIZE);
|
|
|
|
cmdline
|
2019-12-05 14:50:38 +00:00
|
|
|
.insert_str(self.config.lock().unwrap().cmdline.args.clone())
|
2020-01-24 08:34:51 +00:00
|
|
|
.map_err(Error::CmdLineInsertStr)?;
|
2020-03-04 13:39:15 +00:00
|
|
|
for entry in self.device_manager.lock().unwrap().cmdline_additions() {
|
2020-01-24 08:34:51 +00:00
|
|
|
cmdline.insert_str(entry).map_err(Error::CmdLineInsertStr)?;
|
2019-09-11 15:22:00 +00:00
|
|
|
}
|
2020-05-28 03:31:26 +00:00
|
|
|
Ok(CString::new(cmdline).map_err(Error::CmdLineCString)?)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
fn load_kernel(&mut self) -> Result<EntryPoint> {
|
2020-06-09 10:28:02 +00:00
|
|
|
let guest_memory = self.memory_manager.lock().as_ref().unwrap().guest_memory();
|
|
|
|
let mem = guest_memory.memory();
|
|
|
|
let entry_addr = match linux_loader::loader::pe::PE::load(
|
|
|
|
mem.deref(),
|
|
|
|
Some(GuestAddress(arch::get_kernel_start())),
|
|
|
|
&mut self.kernel,
|
|
|
|
None,
|
|
|
|
) {
|
|
|
|
Ok(entry_addr) => entry_addr,
|
|
|
|
Err(e) => {
|
|
|
|
return Err(Error::KernelLoad(e));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let entry_point_addr: GuestAddress = entry_addr.kernel_load;
|
|
|
|
|
|
|
|
Ok(EntryPoint {
|
|
|
|
entry_addr: entry_point_addr,
|
|
|
|
})
|
2020-05-28 03:31:26 +00:00
|
|
|
}
|
2019-09-11 15:22:00 +00:00
|
|
|
|
2020-05-28 03:31:26 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn load_kernel(&mut self) -> Result<EntryPoint> {
|
|
|
|
let cmdline_cstring = self.get_cmdline()?;
|
2019-12-19 15:47:36 +00:00
|
|
|
let guest_memory = self.memory_manager.lock().as_ref().unwrap().guest_memory();
|
2020-02-11 16:22:40 +00:00
|
|
|
let mem = guest_memory.memory();
|
2020-03-18 18:42:03 +00:00
|
|
|
let entry_addr = match linux_loader::loader::elf::Elf::load(
|
2020-02-11 16:22:40 +00:00
|
|
|
mem.deref(),
|
2019-02-28 13:16:58 +00:00
|
|
|
None,
|
|
|
|
&mut self.kernel,
|
2019-09-27 13:11:50 +00:00
|
|
|
Some(arch::layout::HIGH_RAM_START),
|
2019-06-10 09:14:02 +00:00
|
|
|
) {
|
|
|
|
Ok(entry_addr) => entry_addr,
|
2020-03-18 18:42:03 +00:00
|
|
|
Err(linux_loader::loader::Error::Elf(InvalidElfMagicNumber)) => {
|
|
|
|
linux_loader::loader::bzimage::BzImage::load(
|
2020-02-11 16:22:40 +00:00
|
|
|
mem.deref(),
|
2019-06-10 09:14:02 +00:00
|
|
|
None,
|
|
|
|
&mut self.kernel,
|
2019-09-27 13:11:50 +00:00
|
|
|
Some(arch::layout::HIGH_RAM_START),
|
2019-06-10 09:14:02 +00:00
|
|
|
)
|
|
|
|
.map_err(Error::KernelLoad)?
|
|
|
|
}
|
2020-04-15 15:32:33 +00:00
|
|
|
Err(e) => {
|
|
|
|
return Err(Error::KernelLoad(e));
|
|
|
|
}
|
2019-06-10 09:14:02 +00:00
|
|
|
};
|
2019-02-28 13:16:58 +00:00
|
|
|
|
|
|
|
linux_loader::loader::load_cmdline(
|
2020-02-11 16:22:40 +00:00
|
|
|
mem.deref(),
|
2019-09-27 16:06:53 +00:00
|
|
|
arch::layout::CMDLINE_START,
|
2019-02-28 13:16:58 +00:00
|
|
|
&cmdline_cstring,
|
|
|
|
)
|
2020-01-24 08:34:51 +00:00
|
|
|
.map_err(Error::LoadCmdLine)?;
|
2020-03-15 17:56:07 +00:00
|
|
|
|
2020-05-28 03:31:26 +00:00
|
|
|
if entry_addr.setup_header.is_some() {
|
|
|
|
let load_addr = entry_addr
|
|
|
|
.kernel_load
|
|
|
|
.raw_value()
|
|
|
|
.checked_add(KERNEL_64BIT_ENTRY_OFFSET)
|
|
|
|
.ok_or(Error::MemOverflow)?;
|
|
|
|
|
|
|
|
Ok(EntryPoint {
|
|
|
|
entry_addr: GuestAddress(load_addr),
|
|
|
|
protocol: BootProtocol::LinuxBoot,
|
|
|
|
setup_header: entry_addr.setup_header,
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
let entry_point_addr: GuestAddress;
|
|
|
|
let boot_prot: BootProtocol;
|
|
|
|
|
2020-06-25 06:20:12 +00:00
|
|
|
if let PvhEntryPresent(pvh_entry_addr) = entry_addr.pvh_boot_cap {
|
2020-05-28 03:31:26 +00:00
|
|
|
// Use the PVH kernel entry point to boot the guest
|
|
|
|
entry_point_addr = pvh_entry_addr;
|
|
|
|
boot_prot = BootProtocol::PvhBoot;
|
|
|
|
} else {
|
|
|
|
// Use the Linux 64-bit boot protocol
|
|
|
|
entry_point_addr = entry_addr.kernel_load;
|
|
|
|
boot_prot = BootProtocol::LinuxBoot;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(EntryPoint {
|
|
|
|
entry_addr: entry_point_addr,
|
|
|
|
protocol: boot_prot,
|
|
|
|
setup_header: None,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn configure_system(&mut self, entry_addr: EntryPoint) -> Result<()> {
|
|
|
|
let cmdline_cstring = self.get_cmdline()?;
|
|
|
|
let guest_memory = self.memory_manager.lock().as_ref().unwrap().guest_memory();
|
|
|
|
let mem = guest_memory.memory();
|
|
|
|
|
2020-03-15 17:56:07 +00:00
|
|
|
let initramfs_config = match self.initramfs {
|
|
|
|
Some(_) => Some(self.load_initramfs(mem.deref())?),
|
|
|
|
None => None,
|
|
|
|
};
|
|
|
|
|
2019-11-25 14:16:55 +00:00
|
|
|
let boot_vcpus = self.cpu_manager.lock().unwrap().boot_vcpus();
|
2019-11-06 17:20:55 +00:00
|
|
|
|
|
|
|
#[allow(unused_mut, unused_assignments)]
|
|
|
|
let mut rsdp_addr: Option<GuestAddress> = None;
|
|
|
|
|
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
{
|
2019-12-06 16:14:32 +00:00
|
|
|
rsdp_addr = Some(crate::acpi::create_acpi_tables(
|
2020-02-11 16:22:40 +00:00
|
|
|
mem.deref(),
|
2020-03-04 13:39:15 +00:00
|
|
|
&self.device_manager,
|
2019-12-06 16:14:32 +00:00
|
|
|
&self.cpu_manager,
|
2020-01-10 16:11:32 +00:00
|
|
|
&self.memory_manager,
|
2019-12-06 16:14:32 +00:00
|
|
|
));
|
2019-11-06 17:20:55 +00:00
|
|
|
}
|
|
|
|
|
2020-07-09 08:25:37 +00:00
|
|
|
let sgx_epc_region = self
|
|
|
|
.memory_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.sgx_epc_region()
|
|
|
|
.as_ref()
|
|
|
|
.cloned();
|
|
|
|
|
2019-06-10 09:14:02 +00:00
|
|
|
match entry_addr.setup_header {
|
|
|
|
Some(hdr) => {
|
|
|
|
arch::configure_system(
|
2019-08-20 22:43:23 +00:00
|
|
|
&mem,
|
2019-09-27 16:06:53 +00:00
|
|
|
arch::layout::CMDLINE_START,
|
2019-06-10 09:14:02 +00:00
|
|
|
cmdline_cstring.to_bytes().len() + 1,
|
2020-03-15 17:56:07 +00:00
|
|
|
&initramfs_config,
|
2019-11-25 14:16:55 +00:00
|
|
|
boot_vcpus,
|
2019-06-10 09:14:02 +00:00
|
|
|
Some(hdr),
|
2019-11-06 17:20:55 +00:00
|
|
|
rsdp_addr,
|
2020-02-13 04:14:12 +00:00
|
|
|
BootProtocol::LinuxBoot,
|
2020-07-09 08:25:37 +00:00
|
|
|
sgx_epc_region,
|
2019-06-10 09:14:02 +00:00
|
|
|
)
|
2020-01-24 08:34:51 +00:00
|
|
|
.map_err(Error::ConfigureSystem)?;
|
2019-06-10 09:14:02 +00:00
|
|
|
}
|
|
|
|
None => {
|
|
|
|
arch::configure_system(
|
2019-08-20 22:43:23 +00:00
|
|
|
&mem,
|
2019-09-27 16:06:53 +00:00
|
|
|
arch::layout::CMDLINE_START,
|
2019-06-10 09:14:02 +00:00
|
|
|
cmdline_cstring.to_bytes().len() + 1,
|
2020-03-15 17:56:07 +00:00
|
|
|
&initramfs_config,
|
2019-11-25 14:16:55 +00:00
|
|
|
boot_vcpus,
|
2019-06-10 09:14:02 +00:00
|
|
|
None,
|
2019-11-06 17:20:55 +00:00
|
|
|
rsdp_addr,
|
2020-05-28 03:31:26 +00:00
|
|
|
entry_addr.protocol,
|
2020-07-09 08:25:37 +00:00
|
|
|
sgx_epc_region,
|
2019-06-10 09:14:02 +00:00
|
|
|
)
|
2020-01-24 08:34:51 +00:00
|
|
|
.map_err(Error::ConfigureSystem)?;
|
2019-06-10 09:14:02 +00:00
|
|
|
}
|
|
|
|
}
|
2020-05-28 03:31:26 +00:00
|
|
|
Ok(())
|
2019-02-28 13:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-05-26 07:20:22 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
2020-05-28 03:31:26 +00:00
|
|
|
fn configure_system(&mut self, _entry_addr: EntryPoint) -> Result<()> {
|
2020-06-09 10:28:02 +00:00
|
|
|
let cmdline_cstring = self.get_cmdline()?;
|
|
|
|
let vcpu_mpidrs = self.cpu_manager.lock().unwrap().get_mpidrs();
|
|
|
|
let guest_memory = self.memory_manager.lock().as_ref().unwrap().guest_memory();
|
|
|
|
let mem = guest_memory.memory();
|
|
|
|
|
|
|
|
let device_info = &self
|
|
|
|
.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.get_device_info()
|
|
|
|
.clone();
|
|
|
|
|
2020-06-03 08:30:33 +00:00
|
|
|
let pci_space: Option<(u64, u64)> = if cfg!(feature = "pci_support") {
|
|
|
|
let pci_space_start: GuestAddress = self
|
|
|
|
.memory_manager
|
|
|
|
.lock()
|
|
|
|
.as_ref()
|
|
|
|
.unwrap()
|
|
|
|
.start_of_device_area();
|
|
|
|
|
|
|
|
let pci_space_end: GuestAddress = self
|
|
|
|
.memory_manager
|
|
|
|
.lock()
|
|
|
|
.as_ref()
|
|
|
|
.unwrap()
|
|
|
|
.end_of_device_area();
|
|
|
|
|
|
|
|
let pci_space_size = pci_space_end
|
|
|
|
.checked_offset_from(pci_space_start)
|
|
|
|
.ok_or(Error::MemOverflow)?
|
|
|
|
+ 1;
|
|
|
|
|
|
|
|
Some((pci_space_start.0, pci_space_size))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
2020-06-09 10:28:02 +00:00
|
|
|
arch::configure_system(
|
2020-07-03 09:10:50 +00:00
|
|
|
&self.memory_manager.lock().as_ref().unwrap().vm,
|
2020-06-09 10:28:02 +00:00
|
|
|
&mem,
|
|
|
|
&cmdline_cstring,
|
|
|
|
self.cpu_manager.lock().unwrap().boot_vcpus() as u64,
|
|
|
|
vcpu_mpidrs,
|
|
|
|
device_info,
|
|
|
|
&None,
|
2020-06-03 08:30:33 +00:00
|
|
|
&pci_space,
|
2020-06-09 10:28:02 +00:00
|
|
|
)
|
|
|
|
.map_err(Error::ConfigureSystem)?;
|
|
|
|
|
|
|
|
self.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.enable_interrupt_controller()
|
|
|
|
.map_err(Error::EnableInterruptController)?;
|
|
|
|
|
|
|
|
Ok(())
|
2020-05-26 07:20:22 +00:00
|
|
|
}
|
|
|
|
|
2019-09-30 09:53:49 +00:00
|
|
|
pub fn shutdown(&mut self) -> Result<()> {
|
2020-05-06 09:10:58 +00:00
|
|
|
let mut state = self.state.try_write().map_err(|_| Error::PoisonedState)?;
|
2019-10-11 12:47:57 +00:00
|
|
|
let new_state = VmState::Shutdown;
|
|
|
|
|
2020-05-06 09:10:58 +00:00
|
|
|
state.valid_transition(new_state)?;
|
2019-10-11 12:47:57 +00:00
|
|
|
|
2019-05-30 15:17:57 +00:00
|
|
|
if self.on_tty {
|
|
|
|
// Don't forget to set the terminal in canonical mode
|
|
|
|
// before to exit.
|
|
|
|
io::stdin()
|
|
|
|
.lock()
|
|
|
|
.set_canon_mode()
|
|
|
|
.map_err(Error::SetTerminalCanon)?;
|
|
|
|
}
|
|
|
|
|
2019-09-09 12:43:03 +00:00
|
|
|
// Trigger the termination of the signal_handler thread
|
|
|
|
if let Some(signals) = self.signals.take() {
|
|
|
|
signals.close();
|
|
|
|
}
|
|
|
|
|
2020-05-06 09:25:48 +00:00
|
|
|
// Wake up the DeviceManager threads so they will get terminated cleanly
|
|
|
|
self.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.resume()
|
|
|
|
.map_err(Error::Resume)?;
|
|
|
|
|
2019-11-11 14:31:11 +00:00
|
|
|
self.cpu_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.shutdown()
|
|
|
|
.map_err(Error::CpuManager)?;
|
2019-09-03 09:00:15 +00:00
|
|
|
|
2019-09-04 11:59:14 +00:00
|
|
|
// Wait for all the threads to finish
|
|
|
|
for thread in self.threads.drain(..) {
|
2020-01-24 08:34:51 +00:00
|
|
|
thread.join().map_err(Error::ThreadCleanup)?
|
2019-09-03 09:00:15 +00:00
|
|
|
}
|
2019-10-11 12:47:57 +00:00
|
|
|
*state = new_state;
|
2019-10-01 08:14:08 +00:00
|
|
|
|
2019-09-24 14:14:04 +00:00
|
|
|
Ok(())
|
2019-03-18 20:59:50 +00:00
|
|
|
}
|
|
|
|
|
2020-04-03 09:27:20 +00:00
|
|
|
pub fn resize(
|
|
|
|
&mut self,
|
|
|
|
desired_vcpus: Option<u8>,
|
|
|
|
desired_memory: Option<u64>,
|
|
|
|
desired_ram_w_balloon: Option<u64>,
|
|
|
|
) -> Result<()> {
|
2020-01-07 10:43:56 +00:00
|
|
|
if let Some(desired_vcpus) = desired_vcpus {
|
2020-01-17 16:48:46 +00:00
|
|
|
if self
|
|
|
|
.cpu_manager
|
2020-01-07 10:43:56 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.resize(desired_vcpus)
|
2020-01-17 16:48:46 +00:00
|
|
|
.map_err(Error::CpuManager)?
|
|
|
|
{
|
2020-03-04 13:39:15 +00:00
|
|
|
self.device_manager
|
2020-02-27 09:29:03 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-01-17 16:48:46 +00:00
|
|
|
.notify_hotplug(HotPlugNotificationFlags::CPU_DEVICES_CHANGED)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
|
|
|
}
|
2020-01-07 10:43:56 +00:00
|
|
|
self.config.lock().unwrap().cpus.boot_vcpus = desired_vcpus;
|
|
|
|
}
|
|
|
|
|
2020-01-10 15:52:23 +00:00
|
|
|
if let Some(desired_memory) = desired_memory {
|
2020-03-26 13:36:15 +00:00
|
|
|
let new_region = self
|
2020-01-17 16:56:45 +00:00
|
|
|
.memory_manager
|
2020-01-10 16:16:29 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.resize(desired_memory)
|
2020-03-26 13:36:15 +00:00
|
|
|
.map_err(Error::MemoryManager)?;
|
|
|
|
|
|
|
|
if let Some(new_region) = &new_region {
|
2020-03-23 11:10:26 +00:00
|
|
|
self.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-03-26 13:36:15 +00:00
|
|
|
.update_memory(&new_region)
|
2020-03-23 11:10:26 +00:00
|
|
|
.map_err(Error::DeviceManager)?;
|
|
|
|
|
2020-03-23 07:20:15 +00:00
|
|
|
let memory_config = &self.config.lock().unwrap().memory;
|
|
|
|
match memory_config.hotplug_method {
|
|
|
|
HotplugMethod::Acpi => {
|
|
|
|
self.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.notify_hotplug(HotPlugNotificationFlags::MEMORY_DEVICES_CHANGED)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
|
|
|
}
|
|
|
|
HotplugMethod::VirtioMem => {}
|
|
|
|
}
|
2020-01-17 16:56:45 +00:00
|
|
|
}
|
2020-03-18 08:57:28 +00:00
|
|
|
|
2020-03-26 13:36:15 +00:00
|
|
|
// We update the VM config regardless of the actual guest resize
|
|
|
|
// operation result (happened or not), so that if the VM reboots
|
|
|
|
// it will be running with the last configure memory size.
|
2020-01-10 15:52:23 +00:00
|
|
|
self.config.lock().unwrap().memory.size = desired_memory;
|
|
|
|
}
|
2020-04-03 09:27:20 +00:00
|
|
|
|
|
|
|
if let Some(desired_ram_w_balloon) = desired_ram_w_balloon {
|
2020-06-23 09:52:30 +00:00
|
|
|
// update the configuration value for the balloon size to ensure
|
|
|
|
// a reboot would use the right value.
|
|
|
|
self.config.lock().unwrap().memory.balloon_size = self
|
|
|
|
.memory_manager
|
2020-04-03 09:27:20 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.balloon_resize(desired_ram_w_balloon)
|
|
|
|
.map_err(Error::MemoryManager)?;
|
|
|
|
}
|
|
|
|
|
2019-11-27 17:27:31 +00:00
|
|
|
Ok(())
|
2019-11-26 16:46:10 +00:00
|
|
|
}
|
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
#[cfg(not(feature = "pci_support"))]
|
|
|
|
pub fn add_device(&mut self, mut _device_cfg: DeviceConfig) -> Result<PciDeviceInfo> {
|
|
|
|
Err(Error::NoPciSupport)
|
|
|
|
}
|
2020-02-27 13:00:46 +00:00
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pub fn add_device(&mut self, mut _device_cfg: DeviceConfig) -> Result<PciDeviceInfo> {
|
|
|
|
let pci_device_info = self
|
|
|
|
.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.add_device(&mut _device_cfg)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
2020-02-28 14:46:50 +00:00
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
// Update VmConfig by adding the new device. This is important to
|
|
|
|
// ensure the device would be created in case of a reboot.
|
|
|
|
{
|
|
|
|
let mut config = self.config.lock().unwrap();
|
|
|
|
if let Some(devices) = config.devices.as_mut() {
|
|
|
|
devices.push(_device_cfg);
|
|
|
|
} else {
|
|
|
|
config.devices = Some(vec![_device_cfg]);
|
2020-02-28 11:29:43 +00:00
|
|
|
}
|
|
|
|
}
|
2020-06-11 15:27:46 +00:00
|
|
|
|
|
|
|
self.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.notify_hotplug(HotPlugNotificationFlags::PCI_DEVICES_CHANGED)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
|
|
|
|
|
|
|
Ok(pci_device_info)
|
2020-02-27 13:00:46 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 10:49:15 +00:00
|
|
|
pub fn remove_device(&mut self, _id: String) -> Result<()> {
|
|
|
|
if cfg!(feature = "pci_support") {
|
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
{
|
|
|
|
self.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-03-09 13:26:03 +00:00
|
|
|
.remove_device(_id.clone())
|
2020-03-09 10:49:15 +00:00
|
|
|
.map_err(Error::DeviceManager)?;
|
|
|
|
|
2020-03-09 13:26:03 +00:00
|
|
|
// Update VmConfig by removing the device. This is important to
|
|
|
|
// ensure the device would not be created in case of a reboot.
|
|
|
|
{
|
|
|
|
let mut config = self.config.lock().unwrap();
|
2020-04-15 17:18:53 +00:00
|
|
|
|
|
|
|
// Remove if VFIO device
|
2020-03-09 13:26:03 +00:00
|
|
|
if let Some(devices) = config.devices.as_mut() {
|
2020-04-15 17:18:53 +00:00
|
|
|
devices.retain(|dev| dev.id.as_ref() != Some(&_id));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove if disk device
|
|
|
|
if let Some(disks) = config.disks.as_mut() {
|
|
|
|
disks.retain(|dev| dev.id.as_ref() != Some(&_id));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove if net device
|
|
|
|
if let Some(net) = config.net.as_mut() {
|
|
|
|
net.retain(|dev| dev.id.as_ref() != Some(&_id));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove if pmem device
|
|
|
|
if let Some(pmem) = config.pmem.as_mut() {
|
|
|
|
pmem.retain(|dev| dev.id.as_ref() != Some(&_id));
|
2020-03-09 13:26:03 +00:00
|
|
|
}
|
2020-04-28 15:04:32 +00:00
|
|
|
|
|
|
|
// Remove if vsock device
|
|
|
|
if let Some(vsock) = config.vsock.as_ref() {
|
|
|
|
if vsock.id.as_ref() == Some(&_id) {
|
|
|
|
config.vsock = None;
|
|
|
|
}
|
|
|
|
}
|
2020-03-09 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 10:49:15 +00:00
|
|
|
self.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.notify_hotplug(HotPlugNotificationFlags::PCI_DEVICES_CHANGED)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(Error::NoPciSupport)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
#[cfg(not(feature = "pci_support"))]
|
|
|
|
pub fn add_disk(&mut self, mut _disk_cfg: DiskConfig) -> Result<PciDeviceInfo> {
|
|
|
|
Err(Error::NoPciSupport)
|
|
|
|
}
|
2020-03-23 16:20:57 +00:00
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pub fn add_disk(&mut self, mut _disk_cfg: DiskConfig) -> Result<PciDeviceInfo> {
|
|
|
|
let pci_device_info = self
|
|
|
|
.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.add_disk(&mut _disk_cfg)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
2020-03-23 16:20:57 +00:00
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
// Update VmConfig by adding the new device. This is important to
|
|
|
|
// ensure the device would be created in case of a reboot.
|
|
|
|
{
|
|
|
|
let mut config = self.config.lock().unwrap();
|
|
|
|
if let Some(disks) = config.disks.as_mut() {
|
|
|
|
disks.push(_disk_cfg);
|
|
|
|
} else {
|
|
|
|
config.disks = Some(vec![_disk_cfg]);
|
2020-03-23 16:20:57 +00:00
|
|
|
}
|
2020-04-14 09:21:24 +00:00
|
|
|
}
|
2020-06-11 15:27:46 +00:00
|
|
|
|
|
|
|
self.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.notify_hotplug(HotPlugNotificationFlags::PCI_DEVICES_CHANGED)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
|
|
|
|
|
|
|
Ok(pci_device_info)
|
2020-04-14 09:21:24 +00:00
|
|
|
}
|
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
#[cfg(not(feature = "pci_support"))]
|
|
|
|
pub fn add_fs(&mut self, mut _fs_cfg: FsConfig) -> Result<PciDeviceInfo> {
|
|
|
|
Err(Error::NoPciSupport)
|
|
|
|
}
|
2020-04-14 09:21:24 +00:00
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pub fn add_fs(&mut self, mut _fs_cfg: FsConfig) -> Result<PciDeviceInfo> {
|
|
|
|
let pci_device_info = self
|
|
|
|
.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.add_fs(&mut _fs_cfg)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
2020-04-14 09:21:24 +00:00
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
// Update VmConfig by adding the new device. This is important to
|
|
|
|
// ensure the device would be created in case of a reboot.
|
|
|
|
{
|
|
|
|
let mut config = self.config.lock().unwrap();
|
|
|
|
if let Some(fs_config) = config.fs.as_mut() {
|
|
|
|
fs_config.push(_fs_cfg);
|
|
|
|
} else {
|
|
|
|
config.fs = Some(vec![_fs_cfg]);
|
2020-04-14 09:21:24 +00:00
|
|
|
}
|
2020-03-23 16:20:57 +00:00
|
|
|
}
|
2020-06-11 15:27:46 +00:00
|
|
|
|
|
|
|
self.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.notify_hotplug(HotPlugNotificationFlags::PCI_DEVICES_CHANGED)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
|
|
|
|
|
|
|
Ok(pci_device_info)
|
2020-03-23 16:20:57 +00:00
|
|
|
}
|
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
#[cfg(not(feature = "pci_support"))]
|
|
|
|
pub fn add_pmem(&mut self, mut _pmem_cfg: PmemConfig) -> Result<PciDeviceInfo> {
|
|
|
|
Err(Error::NoPciSupport)
|
|
|
|
}
|
2020-03-23 16:20:57 +00:00
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pub fn add_pmem(&mut self, mut _pmem_cfg: PmemConfig) -> Result<PciDeviceInfo> {
|
|
|
|
let pci_device_info = self
|
|
|
|
.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.add_pmem(&mut _pmem_cfg)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
2020-03-23 16:20:57 +00:00
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
// Update VmConfig by adding the new device. This is important to
|
|
|
|
// ensure the device would be created in case of a reboot.
|
|
|
|
{
|
|
|
|
let mut config = self.config.lock().unwrap();
|
|
|
|
if let Some(pmem) = config.pmem.as_mut() {
|
|
|
|
pmem.push(_pmem_cfg);
|
|
|
|
} else {
|
|
|
|
config.pmem = Some(vec![_pmem_cfg]);
|
2020-03-23 16:20:57 +00:00
|
|
|
}
|
|
|
|
}
|
2020-06-11 15:27:46 +00:00
|
|
|
|
|
|
|
self.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.notify_hotplug(HotPlugNotificationFlags::PCI_DEVICES_CHANGED)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
|
|
|
|
|
|
|
Ok(pci_device_info)
|
2020-03-23 16:20:57 +00:00
|
|
|
}
|
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
#[cfg(not(feature = "pci_support"))]
|
|
|
|
pub fn add_net(&mut self, mut _net_cfg: NetConfig) -> Result<PciDeviceInfo> {
|
|
|
|
Err(Error::NoPciSupport)
|
|
|
|
}
|
2020-03-23 16:20:57 +00:00
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pub fn add_net(&mut self, mut _net_cfg: NetConfig) -> Result<PciDeviceInfo> {
|
|
|
|
let pci_device_info = self
|
|
|
|
.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.add_net(&mut _net_cfg)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
2020-03-23 16:20:57 +00:00
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
// Update VmConfig by adding the new device. This is important to
|
|
|
|
// ensure the device would be created in case of a reboot.
|
|
|
|
{
|
|
|
|
let mut config = self.config.lock().unwrap();
|
|
|
|
if let Some(net) = config.net.as_mut() {
|
|
|
|
net.push(_net_cfg);
|
|
|
|
} else {
|
|
|
|
config.net = Some(vec![_net_cfg]);
|
2020-03-23 16:20:57 +00:00
|
|
|
}
|
|
|
|
}
|
2020-06-11 15:27:46 +00:00
|
|
|
|
|
|
|
self.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.notify_hotplug(HotPlugNotificationFlags::PCI_DEVICES_CHANGED)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
|
|
|
|
|
|
|
Ok(pci_device_info)
|
2020-03-23 16:20:57 +00:00
|
|
|
}
|
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
#[cfg(not(feature = "pci_support"))]
|
|
|
|
pub fn add_vsock(&mut self, mut _vsock_cfg: VsockConfig) -> Result<PciDeviceInfo> {
|
|
|
|
Err(Error::NoPciSupport)
|
|
|
|
}
|
2020-04-28 15:02:46 +00:00
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
#[cfg(feature = "pci_support")]
|
|
|
|
pub fn add_vsock(&mut self, mut _vsock_cfg: VsockConfig) -> Result<PciDeviceInfo> {
|
|
|
|
if self.config.lock().unwrap().vsock.is_some() {
|
|
|
|
return Err(Error::TooManyVsockDevices);
|
|
|
|
}
|
2020-04-28 15:02:46 +00:00
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
let pci_device_info = self
|
|
|
|
.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.add_vsock(&mut _vsock_cfg)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
2020-04-28 15:02:46 +00:00
|
|
|
|
2020-06-11 15:27:46 +00:00
|
|
|
// Update VmConfig by adding the new device. This is important to
|
|
|
|
// ensure the device would be created in case of a reboot.
|
|
|
|
{
|
|
|
|
let mut config = self.config.lock().unwrap();
|
|
|
|
config.vsock = Some(_vsock_cfg);
|
2020-04-28 15:02:46 +00:00
|
|
|
}
|
2020-06-11 15:27:46 +00:00
|
|
|
|
|
|
|
self.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.notify_hotplug(HotPlugNotificationFlags::PCI_DEVICES_CHANGED)
|
|
|
|
.map_err(Error::DeviceManager)?;
|
|
|
|
|
|
|
|
Ok(pci_device_info)
|
2020-04-28 15:02:46 +00:00
|
|
|
}
|
|
|
|
|
2020-06-24 10:20:13 +00:00
|
|
|
pub fn counters(&self) -> Result<HashMap<String, HashMap<&'static str, Wrapping<u64>>>> {
|
2020-06-24 11:53:19 +00:00
|
|
|
Ok(self.device_manager.lock().unwrap().counters())
|
2020-06-24 10:20:13 +00:00
|
|
|
}
|
|
|
|
|
2019-12-05 03:27:40 +00:00
|
|
|
fn os_signal_handler(signals: Signals, console_input_clone: Arc<Console>, on_tty: bool) {
|
vm-virtio: Implement console size config feature
One of the features of the virtio console device is its size can be
configured and updated. Our first iteration of the console device
implementation is lack of this feature. As a result, it had a
default fixed size which could not be changed. This commit implements
the console config feature and lets us change the console size from
the vmm side.
During the activation of the device, vmm reads the current terminal
size, sets the console configuration accordinly, and lets the driver
know about this configuration by sending an interrupt. Later, if
someone changes the terminal size, the vmm detects the corresponding
event, updates the configuration, and sends interrupt as before. As a
result, the console device driver, in the guest, updates the console
size.
Signed-off-by: A K M Fazla Mehrab <fazla.mehrab.akm@intel.com>
2019-07-23 19:18:20 +00:00
|
|
|
for signal in signals.forever() {
|
2019-12-05 03:27:40 +00:00
|
|
|
match signal {
|
|
|
|
SIGWINCH => {
|
|
|
|
let (col, row) = get_win_size();
|
|
|
|
console_input_clone.update_console_size(col, row);
|
|
|
|
}
|
|
|
|
SIGTERM | SIGINT => {
|
|
|
|
if on_tty {
|
|
|
|
io::stdin()
|
|
|
|
.lock()
|
|
|
|
.set_canon_mode()
|
|
|
|
.expect("failed to restore terminal mode");
|
|
|
|
}
|
|
|
|
std::process::exit((signal != SIGTERM) as i32);
|
|
|
|
}
|
|
|
|
_ => (),
|
vm-virtio: Implement console size config feature
One of the features of the virtio console device is its size can be
configured and updated. Our first iteration of the console device
implementation is lack of this feature. As a result, it had a
default fixed size which could not be changed. This commit implements
the console config feature and lets us change the console size from
the vmm side.
During the activation of the device, vmm reads the current terminal
size, sets the console configuration accordinly, and lets the driver
know about this configuration by sending an interrupt. Later, if
someone changes the terminal size, the vmm detects the corresponding
event, updates the configuration, and sends interrupt as before. As a
result, the console device driver, in the guest, updates the console
size.
Signed-off-by: A K M Fazla Mehrab <fazla.mehrab.akm@intel.com>
2019-07-23 19:18:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-30 09:53:49 +00:00
|
|
|
pub fn boot(&mut self) -> Result<()> {
|
2019-10-11 12:47:57 +00:00
|
|
|
let current_state = self.get_state()?;
|
|
|
|
if current_state == VmState::Paused {
|
2019-11-22 13:54:52 +00:00
|
|
|
return self.resume().map_err(Error::Resume);
|
2019-10-11 12:47:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let new_state = VmState::Running;
|
|
|
|
current_state.valid_transition(new_state)?;
|
|
|
|
|
2020-05-28 03:31:26 +00:00
|
|
|
let entry_point = self.load_kernel()?;
|
2019-02-28 14:26:30 +00:00
|
|
|
|
2020-05-26 07:20:22 +00:00
|
|
|
// create and configure vcpus
|
|
|
|
self.cpu_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-05-28 03:31:26 +00:00
|
|
|
.create_boot_vcpus(entry_point)
|
2020-05-26 07:20:22 +00:00
|
|
|
.map_err(Error::CpuManager)?;
|
|
|
|
|
2020-05-28 03:31:26 +00:00
|
|
|
self.configure_system(entry_point)?;
|
2020-05-26 07:20:22 +00:00
|
|
|
|
2019-11-11 13:55:50 +00:00
|
|
|
self.cpu_manager
|
2019-11-11 14:31:11 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2020-05-26 07:20:22 +00:00
|
|
|
.start_boot_vcpus()
|
2019-11-11 13:55:50 +00:00
|
|
|
.map_err(Error::CpuManager)?;
|
2019-03-06 11:04:14 +00:00
|
|
|
|
2020-03-04 14:54:42 +00:00
|
|
|
if self
|
|
|
|
.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.console()
|
|
|
|
.input_enabled()
|
|
|
|
{
|
2020-03-04 13:39:15 +00:00
|
|
|
let console = self.device_manager.lock().unwrap().console().clone();
|
2019-12-05 03:27:40 +00:00
|
|
|
let signals = Signals::new(&[SIGWINCH, SIGINT, SIGTERM]);
|
vm-virtio: Implement console size config feature
One of the features of the virtio console device is its size can be
configured and updated. Our first iteration of the console device
implementation is lack of this feature. As a result, it had a
default fixed size which could not be changed. This commit implements
the console config feature and lets us change the console size from
the vmm side.
During the activation of the device, vmm reads the current terminal
size, sets the console configuration accordinly, and lets the driver
know about this configuration by sending an interrupt. Later, if
someone changes the terminal size, the vmm detects the corresponding
event, updates the configuration, and sends interrupt as before. As a
result, the console device driver, in the guest, updates the console
size.
Signed-off-by: A K M Fazla Mehrab <fazla.mehrab.akm@intel.com>
2019-07-23 19:18:20 +00:00
|
|
|
match signals {
|
2019-09-09 12:43:03 +00:00
|
|
|
Ok(signals) => {
|
|
|
|
self.signals = Some(signals.clone());
|
|
|
|
|
2019-12-05 03:27:40 +00:00
|
|
|
let on_tty = self.on_tty;
|
2019-09-04 11:59:14 +00:00
|
|
|
self.threads.push(
|
|
|
|
thread::Builder::new()
|
|
|
|
.name("signal_handler".to_string())
|
2019-12-05 03:27:40 +00:00
|
|
|
.spawn(move || Vm::os_signal_handler(signals, console, on_tty))
|
2019-09-04 11:59:14 +00:00
|
|
|
.map_err(Error::SignalHandlerSpawn)?,
|
|
|
|
);
|
vm-virtio: Implement console size config feature
One of the features of the virtio console device is its size can be
configured and updated. Our first iteration of the console device
implementation is lack of this feature. As a result, it had a
default fixed size which could not be changed. This commit implements
the console config feature and lets us change the console size from
the vmm side.
During the activation of the device, vmm reads the current terminal
size, sets the console configuration accordinly, and lets the driver
know about this configuration by sending an interrupt. Later, if
someone changes the terminal size, the vmm detects the corresponding
event, updates the configuration, and sends interrupt as before. As a
result, the console device driver, in the guest, updates the console
size.
Signed-off-by: A K M Fazla Mehrab <fazla.mehrab.akm@intel.com>
2019-07-23 19:18:20 +00:00
|
|
|
}
|
|
|
|
Err(e) => error!("Signal not found {}", e),
|
|
|
|
}
|
2019-09-24 14:06:56 +00:00
|
|
|
|
|
|
|
if self.on_tty {
|
|
|
|
io::stdin()
|
|
|
|
.lock()
|
|
|
|
.set_raw_mode()
|
|
|
|
.map_err(Error::SetTerminalRaw)?;
|
|
|
|
}
|
vm-virtio: Implement console size config feature
One of the features of the virtio console device is its size can be
configured and updated. Our first iteration of the console device
implementation is lack of this feature. As a result, it had a
default fixed size which could not be changed. This commit implements
the console config feature and lets us change the console size from
the vmm side.
During the activation of the device, vmm reads the current terminal
size, sets the console configuration accordinly, and lets the driver
know about this configuration by sending an interrupt. Later, if
someone changes the terminal size, the vmm detects the corresponding
event, updates the configuration, and sends interrupt as before. As a
result, the console device driver, in the guest, updates the console
size.
Signed-off-by: A K M Fazla Mehrab <fazla.mehrab.akm@intel.com>
2019-07-23 19:18:20 +00:00
|
|
|
}
|
2019-09-24 14:06:56 +00:00
|
|
|
|
2019-10-01 08:14:08 +00:00
|
|
|
let mut state = self.state.try_write().map_err(|_| Error::PoisonedState)?;
|
2019-10-11 12:47:57 +00:00
|
|
|
*state = new_state;
|
2019-10-01 08:14:08 +00:00
|
|
|
|
2019-09-25 13:01:49 +00:00
|
|
|
Ok(())
|
2019-02-28 13:16:58 +00:00
|
|
|
}
|
2019-02-28 14:26:30 +00:00
|
|
|
|
2019-09-24 14:22:35 +00:00
|
|
|
pub fn handle_stdin(&self) -> Result<()> {
|
|
|
|
let mut out = [0u8; 64];
|
|
|
|
let count = io::stdin()
|
|
|
|
.lock()
|
|
|
|
.read_raw(&mut out)
|
|
|
|
.map_err(Error::Console)?;
|
|
|
|
|
2020-03-04 14:54:42 +00:00
|
|
|
if self
|
|
|
|
.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.console()
|
|
|
|
.input_enabled()
|
|
|
|
{
|
2020-03-04 13:39:15 +00:00
|
|
|
self.device_manager
|
2020-02-27 09:29:03 +00:00
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2019-09-24 14:22:35 +00:00
|
|
|
.console()
|
|
|
|
.queue_input_bytes(&out[..count])
|
|
|
|
.map_err(Error::Console)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2019-09-25 09:26:11 +00:00
|
|
|
|
|
|
|
/// Gets a thread-safe reference counted pointer to the VM configuration.
|
2019-12-05 14:50:38 +00:00
|
|
|
pub fn get_config(&self) -> Arc<Mutex<VmConfig>> {
|
2019-09-25 09:26:11 +00:00
|
|
|
Arc::clone(&self.config)
|
|
|
|
}
|
2019-10-01 08:14:08 +00:00
|
|
|
|
|
|
|
/// Get the VM state. Returns an error if the state is poisoned.
|
|
|
|
pub fn get_state(&self) -> Result<VmState> {
|
|
|
|
self.state
|
|
|
|
.try_read()
|
|
|
|
.map_err(|_| Error::PoisonedState)
|
2019-10-11 12:47:57 +00:00
|
|
|
.map(|state| *state)
|
2019-10-01 08:14:08 +00:00
|
|
|
}
|
2019-02-28 13:16:58 +00:00
|
|
|
}
|
|
|
|
|
2019-11-22 13:54:52 +00:00
|
|
|
impl Pausable for Vm {
|
|
|
|
fn pause(&mut self) -> std::result::Result<(), MigratableError> {
|
|
|
|
let mut state = self
|
|
|
|
.state
|
|
|
|
.try_write()
|
|
|
|
.map_err(|e| MigratableError::Pause(anyhow!("Could not get VM state: {}", e)))?;
|
|
|
|
let new_state = VmState::Paused;
|
|
|
|
|
|
|
|
state
|
|
|
|
.valid_transition(new_state)
|
|
|
|
.map_err(|e| MigratableError::Pause(anyhow!("Invalid transition: {:?}", e)))?;
|
|
|
|
|
2020-06-23 09:39:39 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
let mut clock = self
|
2020-07-03 09:10:50 +00:00
|
|
|
.vm
|
2020-06-23 09:39:39 +00:00
|
|
|
.get_clock()
|
|
|
|
.map_err(|e| MigratableError::Pause(anyhow!("Could not get VM clock: {}", e)))?;
|
|
|
|
// Reset clock flags.
|
|
|
|
clock.flags = 0;
|
|
|
|
self.saved_clock = Some(clock);
|
|
|
|
}
|
2019-11-22 13:54:52 +00:00
|
|
|
self.cpu_manager.lock().unwrap().pause()?;
|
2020-03-04 13:39:15 +00:00
|
|
|
self.device_manager.lock().unwrap().pause()?;
|
2019-11-22 13:54:52 +00:00
|
|
|
|
|
|
|
*state = new_state;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn resume(&mut self) -> std::result::Result<(), MigratableError> {
|
|
|
|
let mut state = self
|
|
|
|
.state
|
|
|
|
.try_write()
|
|
|
|
.map_err(|e| MigratableError::Resume(anyhow!("Could not get VM state: {}", e)))?;
|
|
|
|
let new_state = VmState::Running;
|
|
|
|
|
|
|
|
state
|
|
|
|
.valid_transition(new_state)
|
2020-06-23 09:39:39 +00:00
|
|
|
.map_err(|e| MigratableError::Resume(anyhow!("Invalid transition: {:?}", e)))?;
|
2019-11-22 13:54:52 +00:00
|
|
|
|
|
|
|
self.cpu_manager.lock().unwrap().resume()?;
|
2020-06-23 09:39:39 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
if let Some(clock) = &self.saved_clock {
|
2020-07-03 09:10:50 +00:00
|
|
|
self.vm.set_clock(clock).map_err(|e| {
|
2020-06-23 09:39:39 +00:00
|
|
|
MigratableError::Resume(anyhow!("Could not set VM clock: {}", e))
|
|
|
|
})?;
|
|
|
|
}
|
|
|
|
}
|
2020-06-25 07:58:13 +00:00
|
|
|
self.device_manager.lock().unwrap().resume()?;
|
2019-11-22 13:54:52 +00:00
|
|
|
|
|
|
|
// And we're back to the Running state.
|
|
|
|
*state = new_state;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-12 11:53:47 +00:00
|
|
|
#[derive(Serialize, Deserialize)]
|
|
|
|
pub struct VmSnapshot {
|
2020-02-25 00:09:54 +00:00
|
|
|
pub config: Arc<Mutex<VmConfig>>,
|
2020-06-23 09:39:39 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
pub clock: Option<hypervisor::ClockData>,
|
2019-05-12 11:53:47 +00:00
|
|
|
}
|
|
|
|
|
2020-02-25 00:09:54 +00:00
|
|
|
pub const VM_SNAPSHOT_ID: &str = "vm";
|
2019-05-12 11:53:47 +00:00
|
|
|
impl Snapshottable for Vm {
|
|
|
|
fn id(&self) -> String {
|
|
|
|
VM_SNAPSHOT_ID.to_string()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn snapshot(&self) -> std::result::Result<Snapshot, MigratableError> {
|
|
|
|
let current_state = self.get_state().unwrap();
|
|
|
|
if current_state != VmState::Paused {
|
|
|
|
return Err(MigratableError::Snapshot(anyhow!(
|
|
|
|
"Trying to snapshot while VM is running"
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut vm_snapshot = Snapshot::new(VM_SNAPSHOT_ID);
|
|
|
|
let vm_snapshot_data = serde_json::to_vec(&VmSnapshot {
|
|
|
|
config: self.get_config(),
|
2020-06-23 09:39:39 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
clock: self.saved_clock,
|
2019-05-12 11:53:47 +00:00
|
|
|
})
|
|
|
|
.map_err(|e| MigratableError::Snapshot(e.into()))?;
|
|
|
|
|
|
|
|
vm_snapshot.add_snapshot(self.cpu_manager.lock().unwrap().snapshot()?);
|
|
|
|
vm_snapshot.add_snapshot(self.memory_manager.lock().unwrap().snapshot()?);
|
|
|
|
vm_snapshot.add_snapshot(self.device_manager.lock().unwrap().snapshot()?);
|
|
|
|
vm_snapshot.add_data_section(SnapshotDataSection {
|
|
|
|
id: format!("{}-section", VM_SNAPSHOT_ID),
|
|
|
|
snapshot: vm_snapshot_data,
|
|
|
|
});
|
|
|
|
|
|
|
|
Ok(vm_snapshot)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn restore(&mut self, snapshot: Snapshot) -> std::result::Result<(), MigratableError> {
|
|
|
|
let current_state = self
|
|
|
|
.get_state()
|
|
|
|
.map_err(|e| MigratableError::Restore(anyhow!("Could not get VM state: {:#?}", e)))?;
|
2020-06-22 13:35:27 +00:00
|
|
|
let new_state = VmState::Paused;
|
2019-05-12 11:53:47 +00:00
|
|
|
current_state.valid_transition(new_state).map_err(|e| {
|
|
|
|
MigratableError::Restore(anyhow!("Could not restore VM state: {:#?}", e))
|
|
|
|
})?;
|
|
|
|
|
|
|
|
if let Some(memory_manager_snapshot) = snapshot.snapshots.get(MEMORY_MANAGER_SNAPSHOT_ID) {
|
|
|
|
self.memory_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.restore(*memory_manager_snapshot.clone())?;
|
|
|
|
} else {
|
|
|
|
return Err(MigratableError::Restore(anyhow!(
|
|
|
|
"Missing memory manager snapshot"
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(device_manager_snapshot) = snapshot.snapshots.get(DEVICE_MANAGER_SNAPSHOT_ID) {
|
|
|
|
self.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.restore(*device_manager_snapshot.clone())?;
|
|
|
|
} else {
|
|
|
|
return Err(MigratableError::Restore(anyhow!(
|
|
|
|
"Missing device manager snapshot"
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(cpu_manager_snapshot) = snapshot.snapshots.get(CPU_MANAGER_SNAPSHOT_ID) {
|
|
|
|
self.cpu_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.restore(*cpu_manager_snapshot.clone())?;
|
|
|
|
} else {
|
|
|
|
return Err(MigratableError::Restore(anyhow!(
|
|
|
|
"Missing CPU manager snapshot"
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
if self
|
|
|
|
.device_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.console()
|
|
|
|
.input_enabled()
|
|
|
|
{
|
|
|
|
let console = self.device_manager.lock().unwrap().console().clone();
|
|
|
|
let signals = Signals::new(&[SIGWINCH, SIGINT, SIGTERM]);
|
|
|
|
match signals {
|
|
|
|
Ok(signals) => {
|
|
|
|
self.signals = Some(signals.clone());
|
|
|
|
|
|
|
|
let on_tty = self.on_tty;
|
|
|
|
self.threads.push(
|
|
|
|
thread::Builder::new()
|
|
|
|
.name("signal_handler".to_string())
|
|
|
|
.spawn(move || Vm::os_signal_handler(signals, console, on_tty))
|
|
|
|
.map_err(|e| {
|
|
|
|
MigratableError::Restore(anyhow!(
|
|
|
|
"Could not start console signal thread: {:#?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
Err(e) => error!("Signal not found {}", e),
|
|
|
|
}
|
|
|
|
|
|
|
|
if self.on_tty {
|
|
|
|
io::stdin().lock().set_raw_mode().map_err(|e| {
|
|
|
|
MigratableError::Restore(anyhow!(
|
|
|
|
"Could not set terminal in raw mode: {:#?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut state = self
|
|
|
|
.state
|
|
|
|
.try_write()
|
|
|
|
.map_err(|e| MigratableError::Restore(anyhow!("Could not set VM state: {:#?}", e)))?;
|
|
|
|
*state = new_state;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-25 00:09:54 +00:00
|
|
|
impl Transportable for Vm {
|
|
|
|
fn send(
|
|
|
|
&self,
|
|
|
|
snapshot: &Snapshot,
|
|
|
|
destination_url: &str,
|
|
|
|
) -> std::result::Result<(), MigratableError> {
|
|
|
|
let url = Url::parse(destination_url).map_err(|e| {
|
|
|
|
MigratableError::MigrateSend(anyhow!("Could not parse destination URL: {}", e))
|
|
|
|
})?;
|
|
|
|
|
|
|
|
match url.scheme() {
|
|
|
|
"file" => {
|
|
|
|
let mut vm_snapshot_path = url_to_path(&url)?;
|
|
|
|
vm_snapshot_path.push(VM_SNAPSHOT_FILE);
|
|
|
|
|
|
|
|
// Create the snapshot file
|
|
|
|
let mut vm_snapshot_file = OpenOptions::new()
|
|
|
|
.read(true)
|
|
|
|
.write(true)
|
|
|
|
.create_new(true)
|
|
|
|
.open(vm_snapshot_path)
|
|
|
|
.map_err(|e| MigratableError::MigrateSend(e.into()))?;
|
|
|
|
|
|
|
|
// Serialize and write the snapshot
|
|
|
|
let vm_snapshot = serde_json::to_vec(snapshot)
|
|
|
|
.map_err(|e| MigratableError::MigrateSend(e.into()))?;
|
|
|
|
|
|
|
|
vm_snapshot_file
|
|
|
|
.write(&vm_snapshot)
|
|
|
|
.map_err(|e| MigratableError::MigrateSend(e.into()))?;
|
|
|
|
|
|
|
|
// Tell the memory manager to also send/write its own snapshot.
|
|
|
|
if let Some(memory_manager_snapshot) =
|
|
|
|
snapshot.snapshots.get(MEMORY_MANAGER_SNAPSHOT_ID)
|
|
|
|
{
|
|
|
|
self.memory_manager
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.send(&*memory_manager_snapshot.clone(), destination_url)?;
|
|
|
|
} else {
|
|
|
|
return Err(MigratableError::Restore(anyhow!(
|
|
|
|
"Missing memory manager snapshot"
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
return Err(MigratableError::MigrateSend(anyhow!(
|
|
|
|
"Unsupported VM transport URL scheme: {}",
|
|
|
|
url.scheme()
|
|
|
|
)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
2019-11-22 13:54:52 +00:00
|
|
|
impl Migratable for Vm {}
|
|
|
|
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2019-10-11 16:59:29 +00:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
fn test_vm_state_transitions(state: VmState) {
|
|
|
|
match state {
|
|
|
|
VmState::Created => {
|
|
|
|
// Check the transitions from Created
|
|
|
|
assert!(state.valid_transition(VmState::Created).is_err());
|
|
|
|
assert!(state.valid_transition(VmState::Running).is_ok());
|
|
|
|
assert!(state.valid_transition(VmState::Shutdown).is_err());
|
2020-06-22 13:35:27 +00:00
|
|
|
assert!(state.valid_transition(VmState::Paused).is_ok());
|
2019-10-11 16:59:29 +00:00
|
|
|
}
|
|
|
|
VmState::Running => {
|
|
|
|
// Check the transitions from Running
|
|
|
|
assert!(state.valid_transition(VmState::Created).is_err());
|
|
|
|
assert!(state.valid_transition(VmState::Running).is_err());
|
|
|
|
assert!(state.valid_transition(VmState::Shutdown).is_ok());
|
|
|
|
assert!(state.valid_transition(VmState::Paused).is_ok());
|
|
|
|
}
|
|
|
|
VmState::Shutdown => {
|
|
|
|
// Check the transitions from Shutdown
|
|
|
|
assert!(state.valid_transition(VmState::Created).is_err());
|
|
|
|
assert!(state.valid_transition(VmState::Running).is_ok());
|
|
|
|
assert!(state.valid_transition(VmState::Shutdown).is_err());
|
|
|
|
assert!(state.valid_transition(VmState::Paused).is_err());
|
|
|
|
}
|
|
|
|
VmState::Paused => {
|
|
|
|
// Check the transitions from Paused
|
|
|
|
assert!(state.valid_transition(VmState::Created).is_err());
|
|
|
|
assert!(state.valid_transition(VmState::Running).is_ok());
|
|
|
|
assert!(state.valid_transition(VmState::Shutdown).is_ok());
|
|
|
|
assert!(state.valid_transition(VmState::Paused).is_err());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_vm_created_transitions() {
|
|
|
|
test_vm_state_transitions(VmState::Created);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_vm_running_transitions() {
|
|
|
|
test_vm_state_transitions(VmState::Running);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_vm_shutdown_transitions() {
|
|
|
|
test_vm_state_transitions(VmState::Shutdown);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_vm_paused_transitions() {
|
|
|
|
test_vm_state_transitions(VmState::Paused);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-12 09:49:12 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2020-06-12 09:53:11 +00:00
|
|
|
#[test]
|
2019-02-28 13:16:58 +00:00
|
|
|
pub fn test_vm() {
|
2020-07-03 14:27:53 +00:00
|
|
|
use hypervisor::VmExit;
|
2020-06-12 09:59:49 +00:00
|
|
|
use vm_memory::{GuestMemory, GuestMemoryRegion};
|
2019-02-28 13:16:58 +00:00
|
|
|
// This example based on https://lwn.net/Articles/658511/
|
|
|
|
let code = [
|
|
|
|
0xba, 0xf8, 0x03, /* mov $0x3f8, %dx */
|
|
|
|
0x00, 0xd8, /* add %bl, %al */
|
|
|
|
0x04, b'0', /* add $'0', %al */
|
|
|
|
0xee, /* out %al, (%dx) */
|
|
|
|
0xb0, b'\n', /* mov $'\n', %al */
|
|
|
|
0xee, /* out %al, (%dx) */
|
|
|
|
0xf4, /* hlt */
|
|
|
|
];
|
|
|
|
|
|
|
|
let mem_size = 0x1000;
|
|
|
|
let load_addr = GuestAddress(0x1000);
|
2020-02-06 08:00:41 +00:00
|
|
|
let mem = GuestMemoryMmap::from_ranges(&[(load_addr, mem_size)]).unwrap();
|
2019-02-28 13:16:58 +00:00
|
|
|
|
2020-06-24 12:06:29 +00:00
|
|
|
let kvm = hypervisor::kvm::KvmHypervisor::new().unwrap();
|
2020-06-02 02:29:54 +00:00
|
|
|
let hv: Arc<dyn hypervisor::Hypervisor> = Arc::new(kvm);
|
2020-07-03 09:10:50 +00:00
|
|
|
let vm = hv.create_vm().expect("new VM creation failed");
|
2019-02-28 13:16:58 +00:00
|
|
|
|
|
|
|
mem.with_regions(|index, region| {
|
2020-07-04 11:38:17 +00:00
|
|
|
let mem_region = vm.make_user_memory_region(
|
|
|
|
index as u32,
|
|
|
|
region.start_addr().raw_value(),
|
|
|
|
region.len() as u64,
|
|
|
|
region.as_ptr() as u64,
|
|
|
|
false,
|
|
|
|
);
|
2019-02-28 13:16:58 +00:00
|
|
|
|
2020-07-03 09:10:50 +00:00
|
|
|
vm.set_user_memory_region(mem_region)
|
2019-02-28 13:16:58 +00:00
|
|
|
})
|
|
|
|
.expect("Cannot configure guest memory");
|
|
|
|
mem.write_slice(&code, load_addr)
|
|
|
|
.expect("Writing code to memory failed");
|
|
|
|
|
2020-07-03 09:10:50 +00:00
|
|
|
let vcpu = vm.create_vcpu(0).expect("new Vcpu failed");
|
2019-02-28 13:16:58 +00:00
|
|
|
|
2020-07-03 09:10:50 +00:00
|
|
|
let mut vcpu_sregs = vcpu.get_sregs().expect("get sregs failed");
|
2019-02-28 13:16:58 +00:00
|
|
|
vcpu_sregs.cs.base = 0;
|
|
|
|
vcpu_sregs.cs.selector = 0;
|
2020-07-03 09:10:50 +00:00
|
|
|
vcpu.set_sregs(&vcpu_sregs).expect("set sregs failed");
|
2019-02-28 13:16:58 +00:00
|
|
|
|
2020-07-03 09:10:50 +00:00
|
|
|
let mut vcpu_regs = vcpu.get_regs().expect("get regs failed");
|
2019-02-28 13:16:58 +00:00
|
|
|
vcpu_regs.rip = 0x1000;
|
|
|
|
vcpu_regs.rax = 2;
|
|
|
|
vcpu_regs.rbx = 3;
|
|
|
|
vcpu_regs.rflags = 2;
|
2020-07-03 09:10:50 +00:00
|
|
|
vcpu.set_regs(&vcpu_regs).expect("set regs failed");
|
2019-02-28 13:16:58 +00:00
|
|
|
|
|
|
|
loop {
|
2020-07-03 09:10:50 +00:00
|
|
|
match vcpu.run().expect("run failed") {
|
2020-07-03 14:27:53 +00:00
|
|
|
VmExit::IoOut(addr, data) => {
|
2019-02-28 13:16:58 +00:00
|
|
|
println!(
|
|
|
|
"IO out -- addr: {:#x} data [{:?}]",
|
|
|
|
addr,
|
|
|
|
str::from_utf8(&data).unwrap()
|
|
|
|
);
|
|
|
|
}
|
2020-07-03 14:27:53 +00:00
|
|
|
VmExit::Reset => {
|
2019-02-28 13:16:58 +00:00
|
|
|
println!("HLT");
|
2020-06-11 16:42:29 +00:00
|
|
|
break;
|
2019-02-28 13:16:58 +00:00
|
|
|
}
|
2020-07-03 14:27:53 +00:00
|
|
|
r => panic!("unexpected exit reason: {:?}", r),
|
2019-02-28 13:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|