2019-02-22 16:04:35 +00:00
|
|
|
// Copyright © 2019 Intel Corporation
|
|
|
|
//
|
|
|
|
// SPDX-License-Identifier: Apache-2.0
|
|
|
|
//
|
|
|
|
|
2021-02-17 11:24:36 +00:00
|
|
|
#[macro_use]
|
|
|
|
extern crate event_monitor;
|
2019-09-18 09:14:49 +00:00
|
|
|
#[macro_use]
|
|
|
|
extern crate lazy_static;
|
2019-05-19 02:24:47 +00:00
|
|
|
#[macro_use]
|
|
|
|
extern crate log;
|
2019-09-27 09:40:50 +00:00
|
|
|
#[macro_use]
|
|
|
|
extern crate serde_derive;
|
2019-02-22 16:04:35 +00:00
|
|
|
|
2020-11-02 15:18:31 +00:00
|
|
|
use crate::api::{
|
|
|
|
ApiError, ApiRequest, ApiResponse, ApiResponsePayload, VmInfo, VmReceiveMigrationData,
|
|
|
|
VmSendMigrationData, VmmPingResponse,
|
|
|
|
};
|
2020-04-14 09:21:24 +00:00
|
|
|
use crate::config::{
|
2021-07-30 14:48:58 +00:00
|
|
|
DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, RestoreConfig, UserDeviceConfig,
|
|
|
|
VmConfig, VsockConfig,
|
2020-04-14 09:21:24 +00:00
|
|
|
};
|
2020-06-23 09:39:39 +00:00
|
|
|
use crate::migration::{get_vm_snapshot, recv_vm_snapshot};
|
2020-03-20 16:57:03 +00:00
|
|
|
use crate::seccomp_filters::{get_seccomp_filter, Thread};
|
2019-10-08 13:53:39 +00:00
|
|
|
use crate::vm::{Error as VmError, Vm, VmState};
|
2020-11-04 15:00:23 +00:00
|
|
|
use anyhow::anyhow;
|
2019-09-25 12:09:33 +00:00
|
|
|
use libc::EFD_NONBLOCK;
|
2021-10-05 15:53:08 +00:00
|
|
|
use memory_manager::MemoryManagerSnapshotData;
|
2021-10-18 16:29:42 +00:00
|
|
|
use pci::PciBdf;
|
2021-08-17 03:40:11 +00:00
|
|
|
use seccompiler::{apply_filter, SeccompAction};
|
2020-06-11 16:50:25 +00:00
|
|
|
use serde::ser::{Serialize, SerializeStruct, Serializer};
|
2020-05-15 18:31:35 +00:00
|
|
|
use std::fs::File;
|
2019-09-25 12:09:33 +00:00
|
|
|
use std::io;
|
2020-11-04 15:00:23 +00:00
|
|
|
use std::io::{Read, Write};
|
2020-05-15 18:31:35 +00:00
|
|
|
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
2020-11-04 15:00:23 +00:00
|
|
|
use std::os::unix::net::UnixListener;
|
|
|
|
use std::os::unix::net::UnixStream;
|
2021-03-12 13:24:03 +00:00
|
|
|
use std::path::PathBuf;
|
2019-09-26 17:00:43 +00:00
|
|
|
use std::sync::mpsc::{Receiver, RecvError, SendError, Sender};
|
2019-12-05 14:50:38 +00:00
|
|
|
use std::sync::{Arc, Mutex};
|
2019-09-25 12:40:14 +00:00
|
|
|
use std::{result, thread};
|
2020-10-26 16:11:02 +00:00
|
|
|
use thiserror::Error;
|
2021-06-02 19:08:04 +00:00
|
|
|
use vm_memory::bitmap::AtomicBitmap;
|
2021-08-04 14:52:31 +00:00
|
|
|
use vm_migration::{protocol::*, Migratable};
|
2020-11-04 15:00:23 +00:00
|
|
|
use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
|
2019-09-25 12:09:33 +00:00
|
|
|
use vmm_sys_util::eventfd::EventFd;
|
2019-03-07 13:56:43 +00:00
|
|
|
|
2021-10-04 15:08:43 +00:00
|
|
|
#[cfg(feature = "acpi")]
|
|
|
|
mod acpi;
|
2019-09-25 12:14:15 +00:00
|
|
|
pub mod api;
|
vmm: notify virtio-console of pty resizes
When a pty is resized (using the TIOCSWINSZ ioctl -- see ioctl_tty(2)),
the kernel will send a SIGWINCH signal to the pty's foreground process
group to notify it of the resize. This is the only way to be notified
by the kernel of a pty resize.
We can't just make the cloud-hypervisor process's process group the
foreground process group though, because a process can only set the
foreground process group of its controlling terminal, and
cloud-hypervisor's controlling terminal will often be the terminal the
user is running it in. To work around this, we fork a subprocess in a
new process group, and set its process group to be the foreground
process group of the pty. The subprocess additionally must be running
in a new session so that it can have a different controlling
terminal. This subprocess writes a byte to a pipe every time the pty
is resized, and the virtio-console device can listen for this in its
epoll loop.
Alternatives I considered were to have the subprocess just send
SIGWINCH to its parent, and to use an eventfd instead of a pipe.
I decided against the signal approach because re-purposing a signal
that has a very specific meaning (even if this use was only slightly
different to its normal meaning) felt unclean, and because it would
have required using pidfds to avoid race conditions if
cloud-hypervisor had terminated, which added complexity. I decided
against using an eventfd because using a pipe instead allows the child
to be notified (via poll(2)) when nothing is reading from the pipe any
more, meaning it can be reliably notified of parent death and
terminate itself immediately.
I used clone3(2) instead of fork(2) because without
CLONE_CLEAR_SIGHAND the subprocess would inherit signal-hook's signal
handlers, and there's no other straightforward way to restore all signal
handlers to their defaults in the child process. The only way to do
it would be to iterate through all possible signals, or maintain a
global list of monitored signals ourselves (vmm:vm::HANDLED_SIGNALS is
insufficient because it doesn't take into account e.g. the SIGSYS
signal handler that catches seccomp violations).
Signed-off-by: Alyssa Ross <hi@alyssa.is>
2021-09-10 11:12:17 +00:00
|
|
|
mod clone3;
|
2019-05-23 19:48:05 +00:00
|
|
|
pub mod config;
|
2019-11-11 13:55:50 +00:00
|
|
|
pub mod cpu;
|
2019-09-04 13:55:14 +00:00
|
|
|
pub mod device_manager;
|
2020-05-05 10:19:43 +00:00
|
|
|
pub mod device_tree;
|
2020-01-14 11:02:42 +00:00
|
|
|
pub mod interrupt;
|
2019-12-19 15:47:36 +00:00
|
|
|
pub mod memory_manager;
|
2020-02-25 00:09:54 +00:00
|
|
|
pub mod migration;
|
2021-10-04 15:08:43 +00:00
|
|
|
mod pci_segment;
|
2020-03-20 14:57:17 +00:00
|
|
|
pub mod seccomp_filters;
|
2021-08-31 15:03:44 +00:00
|
|
|
mod serial_buffer;
|
2021-09-16 19:47:36 +00:00
|
|
|
mod serial_manager;
|
2021-10-04 15:08:43 +00:00
|
|
|
mod sigwinch_listener;
|
|
|
|
pub mod vm;
|
2019-11-06 17:20:55 +00:00
|
|
|
|
2021-06-02 19:08:04 +00:00
|
|
|
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
|
|
|
|
type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>;
|
|
|
|
|
2019-09-25 12:40:14 +00:00
|
|
|
/// Errors associated with VMM management
|
2020-10-26 16:11:02 +00:00
|
|
|
#[derive(Debug, Error)]
|
2019-09-25 12:40:14 +00:00
|
|
|
#[allow(clippy::large_enum_variant)]
|
2019-05-10 08:46:27 +00:00
|
|
|
pub enum Error {
|
2019-09-25 12:40:14 +00:00
|
|
|
/// API request receive error
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error receiving API request: {0}")]
|
|
|
|
ApiRequestRecv(#[source] RecvError),
|
2019-09-25 12:40:14 +00:00
|
|
|
|
|
|
|
/// API response send error
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error sending API request: {0}")]
|
|
|
|
ApiResponseSend(#[source] SendError<ApiResponse>),
|
2019-09-25 12:40:14 +00:00
|
|
|
|
2019-09-18 09:14:49 +00:00
|
|
|
/// Cannot bind to the UNIX domain socket path
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error binding to UNIX domain socket: {0}")]
|
|
|
|
Bind(#[source] io::Error),
|
2019-09-18 09:14:49 +00:00
|
|
|
|
2019-09-25 12:40:14 +00:00
|
|
|
/// Cannot clone EventFd.
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error cloning EventFd: {0}")]
|
|
|
|
EventFdClone(#[source] io::Error),
|
2019-09-25 12:40:14 +00:00
|
|
|
|
2019-09-25 12:09:33 +00:00
|
|
|
/// Cannot create EventFd.
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error creating EventFd: {0}")]
|
|
|
|
EventFdCreate(#[source] io::Error),
|
2019-09-25 12:09:33 +00:00
|
|
|
|
2019-09-25 12:40:14 +00:00
|
|
|
/// Cannot read from EventFd.
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error reading from EventFd: {0}")]
|
|
|
|
EventFdRead(#[source] io::Error),
|
2019-05-10 08:46:27 +00:00
|
|
|
|
2019-09-25 12:40:14 +00:00
|
|
|
/// Cannot create epoll context.
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error creating epoll context: {0}")]
|
|
|
|
Epoll(#[source] io::Error),
|
2019-05-10 08:46:27 +00:00
|
|
|
|
2019-09-18 09:14:49 +00:00
|
|
|
/// Cannot create HTTP thread
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error spawning HTTP thread: {0}")]
|
|
|
|
HttpThreadSpawn(#[source] io::Error),
|
2019-09-18 09:14:49 +00:00
|
|
|
|
2019-09-25 12:40:14 +00:00
|
|
|
/// Cannot handle the VM STDIN stream
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error handling VM stdin: {0:?}")]
|
2019-09-25 12:40:14 +00:00
|
|
|
Stdin(VmError),
|
2019-05-10 08:46:27 +00:00
|
|
|
|
2021-01-14 03:03:53 +00:00
|
|
|
/// Cannot handle the VM pty stream
|
|
|
|
#[error("Error handling VM pty: {0:?}")]
|
|
|
|
Pty(VmError),
|
|
|
|
|
2019-10-01 14:41:50 +00:00
|
|
|
/// Cannot reboot the VM
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error rebooting VM: {0:?}")]
|
2019-10-01 14:41:50 +00:00
|
|
|
VmReboot(VmError),
|
2019-09-30 14:24:15 +00:00
|
|
|
|
2019-09-25 12:45:23 +00:00
|
|
|
/// Cannot create VMM thread
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error spawning VMM thread {0:?}")]
|
|
|
|
VmmThreadSpawn(#[source] io::Error),
|
2019-10-08 13:23:29 +00:00
|
|
|
|
|
|
|
/// Cannot shut the VMM down
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error shutting down VMM: {0:?}")]
|
2019-10-08 13:23:29 +00:00
|
|
|
VmmShutdown(VmError),
|
2020-02-14 09:55:19 +00:00
|
|
|
|
2020-03-20 14:57:17 +00:00
|
|
|
/// Cannot create seccomp filter
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error creating seccomp filter: {0}")]
|
2021-08-17 03:40:11 +00:00
|
|
|
CreateSeccompFilter(seccompiler::Error),
|
2020-03-20 14:57:17 +00:00
|
|
|
|
|
|
|
/// Cannot apply seccomp filter
|
2020-10-26 16:11:02 +00:00
|
|
|
#[error("Error applying seccomp filter: {0}")]
|
2021-08-17 03:40:11 +00:00
|
|
|
ApplySeccompFilter(seccompiler::Error),
|
2020-11-09 13:29:05 +00:00
|
|
|
|
|
|
|
/// Error activating virtio devices
|
|
|
|
#[error("Error activating virtio devices: {0:?}")]
|
|
|
|
ActivateVirtioDevices(VmError),
|
2021-03-17 09:19:30 +00:00
|
|
|
|
|
|
|
/// Error creating API server
|
|
|
|
#[error("Error creating API server {0:?}")]
|
2021-03-25 17:01:21 +00:00
|
|
|
CreateApiServer(micro_http::ServerError),
|
2021-04-28 18:22:14 +00:00
|
|
|
|
|
|
|
/// Error binding API server socket
|
|
|
|
#[error("Error creation API server's socket {0:?}")]
|
|
|
|
CreateApiServerSocket(#[source] io::Error),
|
2019-05-10 08:46:27 +00:00
|
|
|
}
|
2019-09-25 12:40:14 +00:00
|
|
|
pub type Result<T> = result::Result<T, Error>;
|
2019-02-22 16:04:35 +00:00
|
|
|
|
2019-09-25 12:25:08 +00:00
|
|
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
2021-08-31 15:33:13 +00:00
|
|
|
#[repr(u64)]
|
2019-09-25 12:25:08 +00:00
|
|
|
pub enum EpollDispatch {
|
2021-08-31 15:33:13 +00:00
|
|
|
Exit = 0,
|
|
|
|
Reset = 1,
|
2021-09-16 19:47:36 +00:00
|
|
|
Api = 2,
|
|
|
|
ActivateVirtioDevices = 3,
|
2021-08-31 15:33:13 +00:00
|
|
|
Unknown,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<u64> for EpollDispatch {
|
|
|
|
fn from(v: u64) -> Self {
|
|
|
|
use EpollDispatch::*;
|
|
|
|
match v {
|
|
|
|
0 => Exit,
|
|
|
|
1 => Reset,
|
2021-09-16 19:47:36 +00:00
|
|
|
2 => Api,
|
|
|
|
3 => ActivateVirtioDevices,
|
2021-08-31 15:33:13 +00:00
|
|
|
_ => Unknown,
|
|
|
|
}
|
|
|
|
}
|
2019-09-25 12:25:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub struct EpollContext {
|
2020-05-15 18:31:35 +00:00
|
|
|
epoll_file: File,
|
2019-09-25 12:25:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl EpollContext {
|
|
|
|
pub fn new() -> result::Result<EpollContext, io::Error> {
|
2020-05-15 18:31:35 +00:00
|
|
|
let epoll_fd = epoll::create(true)?;
|
|
|
|
// Use 'File' to enforce closing on 'epoll_fd'
|
2021-11-17 17:42:33 +00:00
|
|
|
// SAFETY: the epoll_fd returned by epoll::create is valid and owned by us.
|
2020-05-15 18:31:35 +00:00
|
|
|
let epoll_file = unsafe { File::from_raw_fd(epoll_fd) };
|
2019-09-25 12:25:08 +00:00
|
|
|
|
2021-08-31 15:33:13 +00:00
|
|
|
Ok(EpollContext { epoll_file })
|
2019-09-25 12:25:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn add_event<T>(&mut self, fd: &T, token: EpollDispatch) -> result::Result<(), io::Error>
|
|
|
|
where
|
|
|
|
T: AsRawFd,
|
|
|
|
{
|
2021-08-31 15:33:13 +00:00
|
|
|
let dispatch_index = token as u64;
|
2019-09-25 12:25:08 +00:00
|
|
|
epoll::ctl(
|
2020-05-15 18:31:35 +00:00
|
|
|
self.epoll_file.as_raw_fd(),
|
2019-09-25 12:25:08 +00:00
|
|
|
epoll::ControlOptions::EPOLL_CTL_ADD,
|
|
|
|
fd.as_raw_fd(),
|
|
|
|
epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index),
|
|
|
|
)?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl AsRawFd for EpollContext {
|
|
|
|
fn as_raw_fd(&self) -> RawFd {
|
2020-05-15 18:31:35 +00:00
|
|
|
self.epoll_file.as_raw_fd()
|
2019-09-25 12:25:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-11 14:48:25 +00:00
|
|
|
pub struct PciDeviceInfo {
|
|
|
|
pub id: String,
|
2021-10-18 16:29:42 +00:00
|
|
|
pub bdf: PciBdf,
|
2020-06-11 14:48:25 +00:00
|
|
|
}
|
|
|
|
|
2020-06-11 16:50:25 +00:00
|
|
|
impl Serialize for PciDeviceInfo {
|
|
|
|
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
|
|
|
where
|
|
|
|
S: Serializer,
|
|
|
|
{
|
2021-10-18 16:29:42 +00:00
|
|
|
let bdf_str = self.bdf.to_string();
|
2020-06-11 16:50:25 +00:00
|
|
|
|
|
|
|
// Serialize the structure.
|
|
|
|
let mut state = serializer.serialize_struct("PciDeviceInfo", 2)?;
|
|
|
|
state.serialize_field("id", &self.id)?;
|
|
|
|
state.serialize_field("bdf", &bdf_str)?;
|
|
|
|
state.end()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-09 23:46:55 +00:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2019-09-25 12:45:23 +00:00
|
|
|
pub fn start_vmm_thread(
|
2019-11-21 18:32:39 +00:00
|
|
|
vmm_version: String,
|
2021-03-16 18:26:10 +00:00
|
|
|
http_path: &Option<String>,
|
2021-04-09 23:46:55 +00:00
|
|
|
http_fd: Option<RawFd>,
|
2019-09-25 12:45:23 +00:00
|
|
|
api_event: EventFd,
|
2019-09-26 16:19:00 +00:00
|
|
|
api_sender: Sender<ApiRequest>,
|
2019-09-25 12:45:23 +00:00
|
|
|
api_receiver: Receiver<ApiRequest>,
|
2020-07-30 21:21:58 +00:00
|
|
|
seccomp_action: &SeccompAction,
|
2020-06-02 02:29:54 +00:00
|
|
|
hypervisor: Arc<dyn hypervisor::Hypervisor>,
|
2019-09-25 12:45:23 +00:00
|
|
|
) -> Result<thread::JoinHandle<Result<()>>> {
|
2019-09-26 16:19:00 +00:00
|
|
|
let http_api_event = api_event.try_clone().map_err(Error::EventFdClone)?;
|
|
|
|
|
2020-03-20 14:57:17 +00:00
|
|
|
// Retrieve seccomp filter
|
|
|
|
let vmm_seccomp_filter =
|
2020-07-30 21:21:58 +00:00
|
|
|
get_seccomp_filter(seccomp_action, Thread::Vmm).map_err(Error::CreateSeccompFilter)?;
|
2020-03-20 14:57:17 +00:00
|
|
|
|
2020-07-30 22:33:52 +00:00
|
|
|
let vmm_seccomp_action = seccomp_action.clone();
|
2021-09-10 09:21:23 +00:00
|
|
|
let exit_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
|
|
|
|
let thread = {
|
|
|
|
let exit_evt = exit_evt.try_clone().map_err(Error::EventFdClone)?;
|
|
|
|
thread::Builder::new()
|
|
|
|
.name("vmm".to_string())
|
|
|
|
.spawn(move || {
|
|
|
|
// Apply seccomp filter for VMM thread.
|
|
|
|
if !vmm_seccomp_filter.is_empty() {
|
|
|
|
apply_filter(&vmm_seccomp_filter).map_err(Error::ApplySeccompFilter)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut vmm = Vmm::new(
|
|
|
|
vmm_version.to_string(),
|
|
|
|
api_event,
|
|
|
|
vmm_seccomp_action,
|
|
|
|
hypervisor,
|
|
|
|
exit_evt,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
vmm.control_loop(Arc::new(api_receiver))
|
|
|
|
})
|
|
|
|
.map_err(Error::VmmThreadSpawn)?
|
|
|
|
};
|
2019-09-26 16:19:00 +00:00
|
|
|
|
2021-04-09 23:46:55 +00:00
|
|
|
// The VMM thread is started, we can start serving HTTP requests
|
2021-03-16 18:26:10 +00:00
|
|
|
if let Some(http_path) = http_path {
|
2021-09-10 09:21:23 +00:00
|
|
|
api::start_http_path_thread(
|
|
|
|
http_path,
|
|
|
|
http_api_event,
|
|
|
|
api_sender,
|
|
|
|
seccomp_action,
|
|
|
|
exit_evt,
|
|
|
|
)?;
|
2021-04-09 23:46:55 +00:00
|
|
|
} else if let Some(http_fd) = http_fd {
|
2021-09-10 09:21:23 +00:00
|
|
|
api::start_http_fd_thread(
|
|
|
|
http_fd,
|
|
|
|
http_api_event,
|
|
|
|
api_sender,
|
|
|
|
seccomp_action,
|
|
|
|
exit_evt,
|
|
|
|
)?;
|
2021-03-16 18:26:10 +00:00
|
|
|
}
|
2019-09-26 16:19:00 +00:00
|
|
|
Ok(thread)
|
2019-09-25 12:45:23 +00:00
|
|
|
}
|
|
|
|
|
2021-07-16 18:33:03 +00:00
|
|
|
#[derive(Clone, Deserialize, Serialize)]
|
|
|
|
struct VmMigrationConfig {
|
|
|
|
vm_config: Arc<Mutex<VmConfig>>,
|
|
|
|
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
|
|
|
|
common_cpuid: hypervisor::CpuId,
|
2021-10-05 15:53:08 +00:00
|
|
|
memory_manager_data: MemoryManagerSnapshotData,
|
2021-07-16 18:33:03 +00:00
|
|
|
}
|
|
|
|
|
2019-09-25 12:40:14 +00:00
|
|
|
pub struct Vmm {
|
|
|
|
epoll: EpollContext,
|
|
|
|
exit_evt: EventFd,
|
|
|
|
reset_evt: EventFd,
|
|
|
|
api_evt: EventFd,
|
2019-11-21 18:32:39 +00:00
|
|
|
version: String,
|
2019-09-25 12:40:14 +00:00
|
|
|
vm: Option<Vm>,
|
2019-12-05 14:50:38 +00:00
|
|
|
vm_config: Option<Arc<Mutex<VmConfig>>>,
|
2020-07-30 22:33:52 +00:00
|
|
|
seccomp_action: SeccompAction,
|
2020-06-02 02:29:54 +00:00
|
|
|
hypervisor: Arc<dyn hypervisor::Hypervisor>,
|
2020-11-09 13:29:05 +00:00
|
|
|
activate_evt: EventFd,
|
2019-09-25 12:40:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Vmm {
|
2020-06-02 02:29:54 +00:00
|
|
|
fn new(
|
|
|
|
vmm_version: String,
|
|
|
|
api_evt: EventFd,
|
2020-07-30 22:33:52 +00:00
|
|
|
seccomp_action: SeccompAction,
|
2020-06-02 02:29:54 +00:00
|
|
|
hypervisor: Arc<dyn hypervisor::Hypervisor>,
|
2021-09-10 09:21:23 +00:00
|
|
|
exit_evt: EventFd,
|
2020-06-02 02:29:54 +00:00
|
|
|
) -> Result<Self> {
|
2019-09-25 12:40:14 +00:00
|
|
|
let mut epoll = EpollContext::new().map_err(Error::Epoll)?;
|
|
|
|
let reset_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
|
2020-11-09 13:29:05 +00:00
|
|
|
let activate_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
|
2019-09-25 12:40:14 +00:00
|
|
|
|
|
|
|
epoll
|
|
|
|
.add_event(&exit_evt, EpollDispatch::Exit)
|
|
|
|
.map_err(Error::Epoll)?;
|
|
|
|
|
|
|
|
epoll
|
|
|
|
.add_event(&reset_evt, EpollDispatch::Reset)
|
|
|
|
.map_err(Error::Epoll)?;
|
|
|
|
|
2020-11-09 13:29:05 +00:00
|
|
|
epoll
|
|
|
|
.add_event(&activate_evt, EpollDispatch::ActivateVirtioDevices)
|
|
|
|
.map_err(Error::Epoll)?;
|
|
|
|
|
2019-09-25 12:40:14 +00:00
|
|
|
epoll
|
|
|
|
.add_event(&api_evt, EpollDispatch::Api)
|
|
|
|
.map_err(Error::Epoll)?;
|
|
|
|
|
|
|
|
Ok(Vmm {
|
|
|
|
epoll,
|
|
|
|
exit_evt,
|
|
|
|
reset_evt,
|
|
|
|
api_evt,
|
2019-11-21 18:32:39 +00:00
|
|
|
version: vmm_version,
|
2019-09-25 12:40:14 +00:00
|
|
|
vm: None,
|
2019-09-30 14:17:28 +00:00
|
|
|
vm_config: None,
|
2020-07-30 22:33:52 +00:00
|
|
|
seccomp_action,
|
2020-06-02 02:29:54 +00:00
|
|
|
hypervisor,
|
2020-11-09 13:29:05 +00:00
|
|
|
activate_evt,
|
2019-09-25 12:40:14 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-06-24 13:29:42 +00:00
|
|
|
fn vm_create(&mut self, config: Arc<Mutex<VmConfig>>) -> result::Result<(), VmError> {
|
|
|
|
// We only store the passed VM config.
|
|
|
|
// The VM will be created when being asked to boot it.
|
|
|
|
if self.vm_config.is_none() {
|
|
|
|
self.vm_config = Some(config);
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(VmError::VmAlreadyCreated)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-01 15:03:38 +00:00
|
|
|
fn vm_boot(&mut self) -> result::Result<(), VmError> {
|
2021-06-24 13:29:08 +00:00
|
|
|
// If we don't have a config, we can not boot a VM.
|
|
|
|
if self.vm_config.is_none() {
|
|
|
|
return Err(VmError::VmMissingConfig);
|
|
|
|
};
|
|
|
|
|
2021-05-18 01:12:08 +00:00
|
|
|
// Create a new VM if we don't have one yet.
|
2019-10-01 15:03:38 +00:00
|
|
|
if self.vm.is_none() {
|
|
|
|
let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?;
|
|
|
|
let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?;
|
2020-11-09 13:29:05 +00:00
|
|
|
let activate_evt = self
|
|
|
|
.activate_evt
|
|
|
|
.try_clone()
|
|
|
|
.map_err(VmError::EventFdClone)?;
|
2019-10-01 15:03:38 +00:00
|
|
|
|
|
|
|
if let Some(ref vm_config) = self.vm_config {
|
2020-02-14 09:55:19 +00:00
|
|
|
let vm = Vm::new(
|
|
|
|
Arc::clone(vm_config),
|
|
|
|
exit_evt,
|
|
|
|
reset_evt,
|
2020-07-30 22:33:52 +00:00
|
|
|
&self.seccomp_action,
|
2020-06-02 02:29:54 +00:00
|
|
|
self.hypervisor.clone(),
|
2020-11-09 13:29:05 +00:00
|
|
|
activate_evt,
|
2021-03-04 23:34:45 +00:00
|
|
|
None,
|
|
|
|
None,
|
vmm: notify virtio-console of pty resizes
When a pty is resized (using the TIOCSWINSZ ioctl -- see ioctl_tty(2)),
the kernel will send a SIGWINCH signal to the pty's foreground process
group to notify it of the resize. This is the only way to be notified
by the kernel of a pty resize.
We can't just make the cloud-hypervisor process's process group the
foreground process group though, because a process can only set the
foreground process group of its controlling terminal, and
cloud-hypervisor's controlling terminal will often be the terminal the
user is running it in. To work around this, we fork a subprocess in a
new process group, and set its process group to be the foreground
process group of the pty. The subprocess additionally must be running
in a new session so that it can have a different controlling
terminal. This subprocess writes a byte to a pipe every time the pty
is resized, and the virtio-console device can listen for this in its
epoll loop.
Alternatives I considered were to have the subprocess just send
SIGWINCH to its parent, and to use an eventfd instead of a pipe.
I decided against the signal approach because re-purposing a signal
that has a very specific meaning (even if this use was only slightly
different to its normal meaning) felt unclean, and because it would
have required using pidfds to avoid race conditions if
cloud-hypervisor had terminated, which added complexity. I decided
against using an eventfd because using a pipe instead allows the child
to be notified (via poll(2)) when nothing is reading from the pipe any
more, meaning it can be reliably notified of parent death and
terminate itself immediately.
I used clone3(2) instead of fork(2) because without
CLONE_CLEAR_SIGHAND the subprocess would inherit signal-hook's signal
handlers, and there's no other straightforward way to restore all signal
handlers to their defaults in the child process. The only way to do
it would be to iterate through all possible signals, or maintain a
global list of monitored signals ourselves (vmm:vm::HANDLED_SIGNALS is
insufficient because it doesn't take into account e.g. the SIGSYS
signal handler that catches seccomp violations).
Signed-off-by: Alyssa Ross <hi@alyssa.is>
2021-09-10 11:12:17 +00:00
|
|
|
None,
|
2020-02-14 09:55:19 +00:00
|
|
|
)?;
|
2021-09-02 15:56:37 +00:00
|
|
|
|
2019-10-01 15:03:38 +00:00
|
|
|
self.vm = Some(vm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we can boot the VM.
|
|
|
|
if let Some(ref mut vm) = self.vm {
|
|
|
|
vm.boot()
|
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotCreated)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-10 15:16:58 +00:00
|
|
|
fn vm_pause(&mut self) -> result::Result<(), VmError> {
|
|
|
|
if let Some(ref mut vm) = self.vm {
|
2019-11-22 13:54:52 +00:00
|
|
|
vm.pause().map_err(VmError::Pause)
|
2019-10-10 15:16:58 +00:00
|
|
|
} else {
|
2019-10-10 16:00:44 +00:00
|
|
|
Err(VmError::VmNotRunning)
|
2019-10-10 15:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-10 15:34:55 +00:00
|
|
|
fn vm_resume(&mut self) -> result::Result<(), VmError> {
|
|
|
|
if let Some(ref mut vm) = self.vm {
|
2019-11-22 13:54:52 +00:00
|
|
|
vm.resume().map_err(VmError::Resume)
|
2019-10-10 15:34:55 +00:00
|
|
|
} else {
|
2019-10-10 16:00:44 +00:00
|
|
|
Err(VmError::VmNotRunning)
|
2019-10-10 15:34:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-02 06:53:32 +00:00
|
|
|
fn vm_snapshot(&mut self, destination_url: &str) -> result::Result<(), VmError> {
|
|
|
|
if let Some(ref mut vm) = self.vm {
|
|
|
|
vm.snapshot()
|
|
|
|
.map_err(VmError::Snapshot)
|
|
|
|
.and_then(|snapshot| {
|
|
|
|
vm.send(&snapshot, destination_url)
|
|
|
|
.map_err(VmError::SnapshotSend)
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotRunning)
|
|
|
|
}
|
2019-11-24 17:39:06 +00:00
|
|
|
}
|
|
|
|
|
2020-04-07 12:50:19 +00:00
|
|
|
fn vm_restore(&mut self, restore_cfg: RestoreConfig) -> result::Result<(), VmError> {
|
2020-04-02 06:56:47 +00:00
|
|
|
if self.vm.is_some() || self.vm_config.is_some() {
|
|
|
|
return Err(VmError::VmAlreadyCreated);
|
|
|
|
}
|
|
|
|
|
2020-04-07 12:50:19 +00:00
|
|
|
let source_url = restore_cfg.source_url.as_path().to_str();
|
|
|
|
if source_url.is_none() {
|
|
|
|
return Err(VmError::RestoreSourceUrlPathToStr);
|
|
|
|
}
|
|
|
|
// Safe to unwrap as we checked it was Some(&str).
|
|
|
|
let source_url = source_url.unwrap();
|
|
|
|
|
2020-06-23 09:39:39 +00:00
|
|
|
let snapshot = recv_vm_snapshot(source_url).map_err(VmError::Restore)?;
|
|
|
|
let vm_snapshot = get_vm_snapshot(&snapshot).map_err(VmError::Restore)?;
|
2020-04-02 06:56:47 +00:00
|
|
|
|
2021-07-20 22:18:33 +00:00
|
|
|
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
|
|
|
|
self.vm_check_cpuid_compatibility(&vm_snapshot.config, &vm_snapshot.common_cpuid)
|
|
|
|
.map_err(VmError::Restore)?;
|
|
|
|
|
2020-06-23 09:39:39 +00:00
|
|
|
self.vm_config = Some(Arc::clone(&vm_snapshot.config));
|
2020-04-02 06:56:47 +00:00
|
|
|
|
|
|
|
let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?;
|
|
|
|
let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?;
|
2020-11-09 13:29:05 +00:00
|
|
|
let activate_evt = self
|
|
|
|
.activate_evt
|
|
|
|
.try_clone()
|
|
|
|
.map_err(VmError::EventFdClone)?;
|
2020-04-02 06:56:47 +00:00
|
|
|
|
|
|
|
let vm = Vm::new_from_snapshot(
|
2020-06-23 09:39:39 +00:00
|
|
|
&snapshot,
|
2020-04-02 06:56:47 +00:00
|
|
|
exit_evt,
|
|
|
|
reset_evt,
|
2020-11-04 14:58:18 +00:00
|
|
|
Some(source_url),
|
2020-04-07 13:54:33 +00:00
|
|
|
restore_cfg.prefault,
|
2020-07-30 22:33:52 +00:00
|
|
|
&self.seccomp_action,
|
2020-06-02 02:29:54 +00:00
|
|
|
self.hypervisor.clone(),
|
2020-11-09 13:29:05 +00:00
|
|
|
activate_evt,
|
2020-04-02 06:56:47 +00:00
|
|
|
)?;
|
|
|
|
self.vm = Some(vm);
|
|
|
|
|
|
|
|
// Now we can restore the rest of the VM.
|
|
|
|
if let Some(ref mut vm) = self.vm {
|
2020-06-23 09:39:39 +00:00
|
|
|
vm.restore(snapshot).map_err(VmError::Restore)
|
2020-04-02 06:56:47 +00:00
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotCreated)
|
|
|
|
}
|
2020-02-25 23:03:06 +00:00
|
|
|
}
|
|
|
|
|
2019-10-01 15:03:38 +00:00
|
|
|
fn vm_shutdown(&mut self) -> result::Result<(), VmError> {
|
2019-10-08 09:20:09 +00:00
|
|
|
if let Some(ref mut vm) = self.vm.take() {
|
2019-10-01 15:03:38 +00:00
|
|
|
vm.shutdown()
|
|
|
|
} else {
|
2019-10-10 16:00:44 +00:00
|
|
|
Err(VmError::VmNotRunning)
|
2019-10-01 15:03:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-01 14:41:50 +00:00
|
|
|
fn vm_reboot(&mut self) -> result::Result<(), VmError> {
|
2019-09-27 14:48:16 +00:00
|
|
|
// Without ACPI, a reset is equivalent to a shutdown
|
2020-10-30 13:34:16 +00:00
|
|
|
// On AArch64, before ACPI is supported, we simply jump over this check and continue to reset.
|
|
|
|
#[cfg(all(target_arch = "x86_64", not(feature = "acpi")))]
|
2019-09-27 14:48:16 +00:00
|
|
|
{
|
2019-11-05 06:03:29 +00:00
|
|
|
if self.vm.is_some() {
|
2020-04-26 02:39:11 +00:00
|
|
|
self.exit_evt.write(1).unwrap();
|
|
|
|
return Ok(());
|
2019-09-27 14:48:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// First we stop the current VM and create a new one.
|
|
|
|
if let Some(ref mut vm) = self.vm {
|
|
|
|
let config = vm.get_config();
|
2021-03-04 23:34:45 +00:00
|
|
|
let serial_pty = vm.serial_pty();
|
|
|
|
let console_pty = vm.console_pty();
|
vmm: notify virtio-console of pty resizes
When a pty is resized (using the TIOCSWINSZ ioctl -- see ioctl_tty(2)),
the kernel will send a SIGWINCH signal to the pty's foreground process
group to notify it of the resize. This is the only way to be notified
by the kernel of a pty resize.
We can't just make the cloud-hypervisor process's process group the
foreground process group though, because a process can only set the
foreground process group of its controlling terminal, and
cloud-hypervisor's controlling terminal will often be the terminal the
user is running it in. To work around this, we fork a subprocess in a
new process group, and set its process group to be the foreground
process group of the pty. The subprocess additionally must be running
in a new session so that it can have a different controlling
terminal. This subprocess writes a byte to a pipe every time the pty
is resized, and the virtio-console device can listen for this in its
epoll loop.
Alternatives I considered were to have the subprocess just send
SIGWINCH to its parent, and to use an eventfd instead of a pipe.
I decided against the signal approach because re-purposing a signal
that has a very specific meaning (even if this use was only slightly
different to its normal meaning) felt unclean, and because it would
have required using pidfds to avoid race conditions if
cloud-hypervisor had terminated, which added complexity. I decided
against using an eventfd because using a pipe instead allows the child
to be notified (via poll(2)) when nothing is reading from the pipe any
more, meaning it can be reliably notified of parent death and
terminate itself immediately.
I used clone3(2) instead of fork(2) because without
CLONE_CLEAR_SIGHAND the subprocess would inherit signal-hook's signal
handlers, and there's no other straightforward way to restore all signal
handlers to their defaults in the child process. The only way to do
it would be to iterate through all possible signals, or maintain a
global list of monitored signals ourselves (vmm:vm::HANDLED_SIGNALS is
insufficient because it doesn't take into account e.g. the SIGSYS
signal handler that catches seccomp violations).
Signed-off-by: Alyssa Ross <hi@alyssa.is>
2021-09-10 11:12:17 +00:00
|
|
|
let console_resize_pipe = vm
|
|
|
|
.console_resize_pipe()
|
|
|
|
.as_ref()
|
|
|
|
.map(|pipe| pipe.try_clone().unwrap());
|
2019-11-05 06:03:29 +00:00
|
|
|
self.vm_shutdown()?;
|
2019-09-27 14:48:16 +00:00
|
|
|
|
2019-10-01 14:41:50 +00:00
|
|
|
let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?;
|
|
|
|
let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?;
|
2020-11-09 13:29:05 +00:00
|
|
|
let activate_evt = self
|
|
|
|
.activate_evt
|
|
|
|
.try_clone()
|
|
|
|
.map_err(VmError::EventFdClone)?;
|
2019-09-27 14:48:16 +00:00
|
|
|
|
2020-02-19 16:10:13 +00:00
|
|
|
// The Linux kernel fires off an i8042 reset after doing the ACPI reset so there may be
|
|
|
|
// an event sitting in the shared reset_evt. Without doing this we get very early reboots
|
|
|
|
// during the boot process.
|
|
|
|
if self.reset_evt.read().is_ok() {
|
|
|
|
warn!("Spurious second reset event received. Ignoring.");
|
|
|
|
}
|
2020-06-02 02:29:54 +00:00
|
|
|
self.vm = Some(Vm::new(
|
|
|
|
config,
|
|
|
|
exit_evt,
|
|
|
|
reset_evt,
|
2020-07-30 22:33:52 +00:00
|
|
|
&self.seccomp_action,
|
2020-06-02 02:29:54 +00:00
|
|
|
self.hypervisor.clone(),
|
2020-11-09 13:29:05 +00:00
|
|
|
activate_evt,
|
2021-03-04 23:34:45 +00:00
|
|
|
serial_pty,
|
|
|
|
console_pty,
|
vmm: notify virtio-console of pty resizes
When a pty is resized (using the TIOCSWINSZ ioctl -- see ioctl_tty(2)),
the kernel will send a SIGWINCH signal to the pty's foreground process
group to notify it of the resize. This is the only way to be notified
by the kernel of a pty resize.
We can't just make the cloud-hypervisor process's process group the
foreground process group though, because a process can only set the
foreground process group of its controlling terminal, and
cloud-hypervisor's controlling terminal will often be the terminal the
user is running it in. To work around this, we fork a subprocess in a
new process group, and set its process group to be the foreground
process group of the pty. The subprocess additionally must be running
in a new session so that it can have a different controlling
terminal. This subprocess writes a byte to a pipe every time the pty
is resized, and the virtio-console device can listen for this in its
epoll loop.
Alternatives I considered were to have the subprocess just send
SIGWINCH to its parent, and to use an eventfd instead of a pipe.
I decided against the signal approach because re-purposing a signal
that has a very specific meaning (even if this use was only slightly
different to its normal meaning) felt unclean, and because it would
have required using pidfds to avoid race conditions if
cloud-hypervisor had terminated, which added complexity. I decided
against using an eventfd because using a pipe instead allows the child
to be notified (via poll(2)) when nothing is reading from the pipe any
more, meaning it can be reliably notified of parent death and
terminate itself immediately.
I used clone3(2) instead of fork(2) because without
CLONE_CLEAR_SIGHAND the subprocess would inherit signal-hook's signal
handlers, and there's no other straightforward way to restore all signal
handlers to their defaults in the child process. The only way to do
it would be to iterate through all possible signals, or maintain a
global list of monitored signals ourselves (vmm:vm::HANDLED_SIGNALS is
insufficient because it doesn't take into account e.g. the SIGSYS
signal handler that catches seccomp violations).
Signed-off-by: Alyssa Ross <hi@alyssa.is>
2021-09-10 11:12:17 +00:00
|
|
|
console_resize_pipe,
|
2020-06-02 02:29:54 +00:00
|
|
|
)?);
|
2019-09-27 14:48:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Then we start the new VM.
|
|
|
|
if let Some(ref mut vm) = self.vm {
|
2021-02-15 13:59:05 +00:00
|
|
|
vm.boot()
|
2019-09-30 14:24:15 +00:00
|
|
|
} else {
|
2021-02-15 13:59:05 +00:00
|
|
|
Err(VmError::VmNotCreated)
|
2019-09-27 14:48:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-01 14:41:50 +00:00
|
|
|
fn vm_info(&self) -> result::Result<VmInfo, VmError> {
|
2019-10-01 12:07:15 +00:00
|
|
|
match &self.vm_config {
|
|
|
|
Some(config) => {
|
|
|
|
let state = match &self.vm {
|
2019-10-01 14:41:50 +00:00
|
|
|
Some(vm) => vm.get_state()?,
|
2019-10-01 12:07:15 +00:00
|
|
|
None => VmState::Created,
|
|
|
|
};
|
|
|
|
|
2020-08-25 06:44:21 +00:00
|
|
|
let config = Arc::clone(config);
|
|
|
|
|
|
|
|
let mut memory_actual_size = config.lock().unwrap().memory.total_size();
|
|
|
|
if let Some(vm) = &self.vm {
|
2020-10-14 10:04:42 +00:00
|
|
|
memory_actual_size -= vm.balloon_size();
|
2020-08-25 06:44:21 +00:00
|
|
|
}
|
|
|
|
|
2020-11-30 09:06:13 +00:00
|
|
|
let device_tree = self.vm.as_ref().map(|vm| vm.device_tree());
|
|
|
|
|
2019-10-01 12:07:15 +00:00
|
|
|
Ok(VmInfo {
|
2020-08-25 06:44:21 +00:00
|
|
|
config,
|
2019-10-01 12:07:15 +00:00
|
|
|
state,
|
2020-08-25 06:44:21 +00:00
|
|
|
memory_actual_size,
|
2020-11-30 09:06:13 +00:00
|
|
|
device_tree,
|
2019-10-01 12:07:15 +00:00
|
|
|
})
|
|
|
|
}
|
2019-10-01 14:41:50 +00:00
|
|
|
None => Err(VmError::VmNotCreated),
|
2019-10-01 12:07:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-11 16:00:53 +00:00
|
|
|
fn vmm_ping(&self) -> VmmPingResponse {
|
|
|
|
VmmPingResponse {
|
2019-11-21 18:32:39 +00:00
|
|
|
version: self.version.clone(),
|
2021-02-11 16:00:53 +00:00
|
|
|
}
|
2019-11-04 18:06:45 +00:00
|
|
|
}
|
|
|
|
|
2019-10-01 15:24:39 +00:00
|
|
|
fn vm_delete(&mut self) -> result::Result<(), VmError> {
|
|
|
|
if self.vm_config.is_none() {
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2020-06-08 20:41:35 +00:00
|
|
|
// If a VM is booted, we first try to shut it down.
|
|
|
|
if self.vm.is_some() {
|
|
|
|
self.vm_shutdown()?;
|
|
|
|
}
|
2019-10-01 15:24:39 +00:00
|
|
|
|
2019-10-08 09:20:09 +00:00
|
|
|
self.vm_config = None;
|
2019-10-01 15:24:39 +00:00
|
|
|
|
2021-02-17 11:24:36 +00:00
|
|
|
event!("vm", "deleted");
|
|
|
|
|
2019-10-01 15:24:39 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-10-08 13:23:29 +00:00
|
|
|
fn vmm_shutdown(&mut self) -> result::Result<(), VmError> {
|
2021-02-17 11:24:36 +00:00
|
|
|
self.vm_delete()?;
|
|
|
|
event!("vmm", "shutdown");
|
|
|
|
Ok(())
|
2019-10-08 13:23:29 +00:00
|
|
|
}
|
|
|
|
|
2020-01-10 15:52:23 +00:00
|
|
|
fn vm_resize(
|
|
|
|
&mut self,
|
|
|
|
desired_vcpus: Option<u8>,
|
|
|
|
desired_ram: Option<u64>,
|
2020-10-14 10:04:42 +00:00
|
|
|
desired_balloon: Option<u64>,
|
2020-01-10 15:52:23 +00:00
|
|
|
) -> result::Result<(), VmError> {
|
2019-11-26 16:46:10 +00:00
|
|
|
if let Some(ref mut vm) = self.vm {
|
2020-10-14 10:04:42 +00:00
|
|
|
if let Err(e) = vm.resize(desired_vcpus, desired_ram, desired_balloon) {
|
2020-01-17 17:22:09 +00:00
|
|
|
error!("Error when resizing VM: {:?}", e);
|
|
|
|
Err(e)
|
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
2019-11-26 16:46:10 +00:00
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotRunning)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-10 15:34:15 +00:00
|
|
|
fn vm_resize_zone(&mut self, id: String, desired_ram: u64) -> result::Result<(), VmError> {
|
|
|
|
if let Some(ref mut vm) = self.vm {
|
|
|
|
if let Err(e) = vm.resize_zone(id, desired_ram) {
|
|
|
|
error!("Error when resizing VM: {:?}", e);
|
|
|
|
Err(e)
|
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotRunning)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-11 15:40:27 +00:00
|
|
|
fn vm_add_device(&mut self, device_cfg: DeviceConfig) -> result::Result<Vec<u8>, VmError> {
|
2020-02-27 13:00:46 +00:00
|
|
|
if let Some(ref mut vm) = self.vm {
|
2020-06-11 15:40:27 +00:00
|
|
|
let info = vm.add_device(device_cfg).map_err(|e| {
|
2020-02-27 13:00:46 +00:00
|
|
|
error!("Error when adding new device to the VM: {:?}", e);
|
2020-06-11 15:40:27 +00:00
|
|
|
e
|
|
|
|
})?;
|
|
|
|
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
|
2020-02-27 13:00:46 +00:00
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotRunning)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-30 14:48:58 +00:00
|
|
|
fn vm_add_user_device(
|
|
|
|
&mut self,
|
|
|
|
device_cfg: UserDeviceConfig,
|
|
|
|
) -> result::Result<Vec<u8>, VmError> {
|
|
|
|
if let Some(ref mut vm) = self.vm {
|
|
|
|
let info = vm.add_user_device(device_cfg).map_err(|e| {
|
|
|
|
error!("Error when adding new user device to the VM: {:?}", e);
|
|
|
|
e
|
|
|
|
})?;
|
|
|
|
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
|
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotRunning)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-09 10:49:15 +00:00
|
|
|
fn vm_remove_device(&mut self, id: String) -> result::Result<(), VmError> {
|
|
|
|
if let Some(ref mut vm) = self.vm {
|
|
|
|
if let Err(e) = vm.remove_device(id) {
|
2020-03-23 16:35:58 +00:00
|
|
|
error!("Error when removing new device to the VM: {:?}", e);
|
2020-03-09 10:49:15 +00:00
|
|
|
Err(e)
|
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotRunning)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-11 15:40:27 +00:00
|
|
|
fn vm_add_disk(&mut self, disk_cfg: DiskConfig) -> result::Result<Vec<u8>, VmError> {
|
2020-03-23 16:21:58 +00:00
|
|
|
if let Some(ref mut vm) = self.vm {
|
2020-06-11 15:40:27 +00:00
|
|
|
let info = vm.add_disk(disk_cfg).map_err(|e| {
|
2020-03-23 16:21:58 +00:00
|
|
|
error!("Error when adding new disk to the VM: {:?}", e);
|
2020-06-11 15:40:27 +00:00
|
|
|
e
|
|
|
|
})?;
|
|
|
|
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
|
2020-03-23 16:21:58 +00:00
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotRunning)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-11 15:40:27 +00:00
|
|
|
fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> result::Result<Vec<u8>, VmError> {
|
2020-04-14 09:21:24 +00:00
|
|
|
if let Some(ref mut vm) = self.vm {
|
2020-06-11 15:40:27 +00:00
|
|
|
let info = vm.add_fs(fs_cfg).map_err(|e| {
|
2020-04-14 09:21:24 +00:00
|
|
|
error!("Error when adding new fs to the VM: {:?}", e);
|
2020-06-11 15:40:27 +00:00
|
|
|
e
|
|
|
|
})?;
|
|
|
|
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
|
2020-04-14 09:21:24 +00:00
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotRunning)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-11 15:40:27 +00:00
|
|
|
fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> result::Result<Vec<u8>, VmError> {
|
2020-03-23 16:21:58 +00:00
|
|
|
if let Some(ref mut vm) = self.vm {
|
2020-06-11 15:40:27 +00:00
|
|
|
let info = vm.add_pmem(pmem_cfg).map_err(|e| {
|
2020-03-23 16:21:58 +00:00
|
|
|
error!("Error when adding new pmem device to the VM: {:?}", e);
|
2020-06-11 15:40:27 +00:00
|
|
|
e
|
|
|
|
})?;
|
|
|
|
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
|
2020-03-23 16:21:58 +00:00
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotRunning)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-11 15:40:27 +00:00
|
|
|
fn vm_add_net(&mut self, net_cfg: NetConfig) -> result::Result<Vec<u8>, VmError> {
|
2020-03-23 16:21:58 +00:00
|
|
|
if let Some(ref mut vm) = self.vm {
|
2020-06-11 15:40:27 +00:00
|
|
|
let info = vm.add_net(net_cfg).map_err(|e| {
|
2020-03-23 16:21:58 +00:00
|
|
|
error!("Error when adding new network device to the VM: {:?}", e);
|
2020-06-11 15:40:27 +00:00
|
|
|
e
|
|
|
|
})?;
|
|
|
|
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
|
2020-03-23 16:21:58 +00:00
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotRunning)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-11 15:40:27 +00:00
|
|
|
fn vm_add_vsock(&mut self, vsock_cfg: VsockConfig) -> result::Result<Vec<u8>, VmError> {
|
2020-04-28 15:21:22 +00:00
|
|
|
if let Some(ref mut vm) = self.vm {
|
2020-06-11 15:40:27 +00:00
|
|
|
let info = vm.add_vsock(vsock_cfg).map_err(|e| {
|
2020-04-28 15:21:22 +00:00
|
|
|
error!("Error when adding new vsock device to the VM: {:?}", e);
|
2020-06-11 15:40:27 +00:00
|
|
|
e
|
|
|
|
})?;
|
|
|
|
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
|
2020-04-28 15:21:22 +00:00
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotRunning)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-24 10:20:13 +00:00
|
|
|
fn vm_counters(&mut self) -> result::Result<Vec<u8>, VmError> {
|
|
|
|
if let Some(ref mut vm) = self.vm {
|
|
|
|
let info = vm.counters().map_err(|e| {
|
|
|
|
error!("Error when getting counters from the VM: {:?}", e);
|
|
|
|
e
|
|
|
|
})?;
|
|
|
|
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
|
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotRunning)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-13 10:06:36 +00:00
|
|
|
fn vm_power_button(&mut self) -> result::Result<(), VmError> {
|
|
|
|
if let Some(ref mut vm) = self.vm {
|
|
|
|
vm.power_button()
|
|
|
|
} else {
|
|
|
|
Err(VmError::VmNotRunning)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-12 16:55:55 +00:00
|
|
|
fn vm_receive_config<T>(
|
2020-11-04 15:00:23 +00:00
|
|
|
&mut self,
|
|
|
|
req: &Request,
|
|
|
|
socket: &mut T,
|
2020-11-12 16:55:55 +00:00
|
|
|
) -> std::result::Result<Vm, MigratableError>
|
2020-11-04 15:00:23 +00:00
|
|
|
where
|
|
|
|
T: Read + Write,
|
|
|
|
{
|
2021-10-05 15:53:08 +00:00
|
|
|
// Read in config data along with memory manager data
|
2021-11-10 08:46:28 +00:00
|
|
|
let mut data: Vec<u8> = Vec::new();
|
|
|
|
data.resize_with(req.length() as usize, Default::default);
|
2020-11-04 15:00:23 +00:00
|
|
|
socket
|
|
|
|
.read_exact(&mut data)
|
|
|
|
.map_err(MigratableError::MigrateSocket)?;
|
2021-07-16 18:33:03 +00:00
|
|
|
|
|
|
|
let vm_migration_config: VmMigrationConfig =
|
|
|
|
serde_json::from_slice(&data).map_err(|e| {
|
|
|
|
MigratableError::MigrateReceive(anyhow!("Error deserialising config: {}", e))
|
|
|
|
})?;
|
|
|
|
|
|
|
|
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
|
2021-07-20 22:10:44 +00:00
|
|
|
self.vm_check_cpuid_compatibility(
|
|
|
|
&vm_migration_config.vm_config,
|
2021-07-16 18:33:03 +00:00
|
|
|
&vm_migration_config.common_cpuid,
|
2021-07-20 22:10:44 +00:00
|
|
|
)?;
|
2020-11-04 15:00:23 +00:00
|
|
|
|
|
|
|
let exit_evt = self.exit_evt.try_clone().map_err(|e| {
|
|
|
|
MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {}", e))
|
|
|
|
})?;
|
|
|
|
let reset_evt = self.reset_evt.try_clone().map_err(|e| {
|
|
|
|
MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {}", e))
|
|
|
|
})?;
|
2020-11-09 13:29:05 +00:00
|
|
|
let activate_evt = self.activate_evt.try_clone().map_err(|e| {
|
|
|
|
MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {}", e))
|
|
|
|
})?;
|
|
|
|
|
2021-07-16 18:33:03 +00:00
|
|
|
self.vm_config = Some(vm_migration_config.vm_config);
|
2020-11-12 16:55:55 +00:00
|
|
|
let vm = Vm::new_from_migration(
|
2020-11-24 17:26:02 +00:00
|
|
|
self.vm_config.clone().unwrap(),
|
2020-11-04 15:00:23 +00:00
|
|
|
exit_evt,
|
|
|
|
reset_evt,
|
|
|
|
&self.seccomp_action,
|
|
|
|
self.hypervisor.clone(),
|
2020-11-09 13:29:05 +00:00
|
|
|
activate_evt,
|
2021-10-05 15:53:08 +00:00
|
|
|
&vm_migration_config.memory_manager_data,
|
2020-11-04 15:00:23 +00:00
|
|
|
)
|
|
|
|
.map_err(|e| {
|
|
|
|
MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e))
|
|
|
|
})?;
|
2020-11-12 16:55:55 +00:00
|
|
|
|
|
|
|
Response::ok().write_to(socket)?;
|
|
|
|
|
|
|
|
Ok(vm)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn vm_receive_state<T>(
|
|
|
|
&mut self,
|
|
|
|
req: &Request,
|
|
|
|
socket: &mut T,
|
|
|
|
mut vm: Vm,
|
|
|
|
) -> std::result::Result<(), MigratableError>
|
|
|
|
where
|
|
|
|
T: Read + Write,
|
|
|
|
{
|
|
|
|
// Read in state data
|
2021-11-10 08:46:28 +00:00
|
|
|
let mut data: Vec<u8> = Vec::new();
|
|
|
|
data.resize_with(req.length() as usize, Default::default);
|
2020-11-12 16:55:55 +00:00
|
|
|
socket
|
|
|
|
.read_exact(&mut data)
|
|
|
|
.map_err(MigratableError::MigrateSocket)?;
|
|
|
|
let snapshot: Snapshot = serde_json::from_slice(&data).map_err(|e| {
|
|
|
|
MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {}", e))
|
|
|
|
})?;
|
|
|
|
|
|
|
|
// Create VM
|
2020-11-04 15:00:23 +00:00
|
|
|
vm.restore(snapshot).map_err(|e| {
|
|
|
|
Response::error().write_to(socket).ok();
|
|
|
|
e
|
|
|
|
})?;
|
|
|
|
self.vm = Some(vm);
|
|
|
|
|
|
|
|
Response::ok().write_to(socket)?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn vm_receive_memory<T>(
|
|
|
|
&mut self,
|
|
|
|
req: &Request,
|
|
|
|
socket: &mut T,
|
2020-11-12 16:55:55 +00:00
|
|
|
vm: &mut Vm,
|
2020-11-04 15:00:23 +00:00
|
|
|
) -> std::result::Result<(), MigratableError>
|
|
|
|
where
|
|
|
|
T: Read + Write,
|
|
|
|
{
|
|
|
|
// Read table
|
|
|
|
let table = MemoryRangeTable::read_from(socket, req.length())?;
|
|
|
|
|
|
|
|
// And then read the memory itself
|
2020-11-12 16:55:55 +00:00
|
|
|
vm.receive_memory_regions(&table, socket).map_err(|e| {
|
|
|
|
Response::error().write_to(socket).ok();
|
|
|
|
e
|
|
|
|
})?;
|
2020-11-04 15:00:23 +00:00
|
|
|
Response::ok().write_to(socket)?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-03-12 13:24:03 +00:00
|
|
|
fn socket_url_to_path(url: &str) -> result::Result<PathBuf, MigratableError> {
|
|
|
|
url.strip_prefix("unix:")
|
|
|
|
.ok_or_else(|| {
|
|
|
|
MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url))
|
|
|
|
})
|
|
|
|
.map(|s| s.into())
|
|
|
|
}
|
|
|
|
|
2020-11-02 15:18:31 +00:00
|
|
|
fn vm_receive_migration(
|
|
|
|
&mut self,
|
|
|
|
receive_data_migration: VmReceiveMigrationData,
|
|
|
|
) -> result::Result<(), MigratableError> {
|
|
|
|
info!(
|
2020-11-04 15:00:23 +00:00
|
|
|
"Receiving migration: receiver_url = {}",
|
2020-11-02 15:18:31 +00:00
|
|
|
receive_data_migration.receiver_url
|
|
|
|
);
|
2020-11-04 15:00:23 +00:00
|
|
|
|
2021-03-12 13:24:03 +00:00
|
|
|
let path = Self::socket_url_to_path(&receive_data_migration.receiver_url)?;
|
|
|
|
let listener = UnixListener::bind(&path).map_err(|e| {
|
|
|
|
MigratableError::MigrateReceive(anyhow!("Error binding to UNIX socket: {}", e))
|
|
|
|
})?;
|
|
|
|
let (mut socket, _addr) = listener.accept().map_err(|e| {
|
|
|
|
MigratableError::MigrateReceive(anyhow!("Error accepting on UNIX socket: {}", e))
|
|
|
|
})?;
|
|
|
|
std::fs::remove_file(&path).map_err(|e| {
|
|
|
|
MigratableError::MigrateReceive(anyhow!("Error unlinking UNIX socket: {}", e))
|
|
|
|
})?;
|
2020-11-04 15:00:23 +00:00
|
|
|
|
|
|
|
let mut started = false;
|
2020-11-12 16:55:55 +00:00
|
|
|
let mut vm: Option<Vm> = None;
|
2020-11-04 15:00:23 +00:00
|
|
|
|
|
|
|
loop {
|
|
|
|
let req = Request::read_from(&mut socket)?;
|
|
|
|
match req.command() {
|
|
|
|
Command::Invalid => info!("Invalid Command Received"),
|
|
|
|
Command::Start => {
|
|
|
|
info!("Start Command Received");
|
|
|
|
started = true;
|
|
|
|
|
|
|
|
Response::ok().write_to(&mut socket)?;
|
|
|
|
}
|
2020-11-12 16:55:55 +00:00
|
|
|
Command::Config => {
|
|
|
|
info!("Config Command Received");
|
2020-11-04 15:00:23 +00:00
|
|
|
|
|
|
|
if !started {
|
|
|
|
warn!("Migration not started yet");
|
|
|
|
Response::error().write_to(&mut socket)?;
|
|
|
|
continue;
|
|
|
|
}
|
2020-11-12 16:55:55 +00:00
|
|
|
vm = Some(self.vm_receive_config(&req, &mut socket)?);
|
|
|
|
}
|
|
|
|
Command::State => {
|
|
|
|
info!("State Command Received");
|
2020-11-04 15:00:23 +00:00
|
|
|
|
2020-11-12 16:55:55 +00:00
|
|
|
if !started {
|
|
|
|
warn!("Migration not started yet");
|
2020-11-04 15:00:23 +00:00
|
|
|
Response::error().write_to(&mut socket)?;
|
|
|
|
continue;
|
|
|
|
}
|
2020-11-12 16:55:55 +00:00
|
|
|
if let Some(vm) = vm.take() {
|
|
|
|
self.vm_receive_state(&req, &mut socket, vm)?;
|
|
|
|
} else {
|
|
|
|
warn!("Configuration not sent yet");
|
|
|
|
Response::error().write_to(&mut socket)?;
|
|
|
|
}
|
2020-11-04 15:00:23 +00:00
|
|
|
}
|
|
|
|
Command::Memory => {
|
|
|
|
info!("Memory Command Received");
|
|
|
|
|
|
|
|
if !started {
|
|
|
|
warn!("Migration not started yet");
|
|
|
|
Response::error().write_to(&mut socket)?;
|
|
|
|
continue;
|
|
|
|
}
|
2020-11-12 16:55:55 +00:00
|
|
|
if let Some(ref mut vm) = vm.as_mut() {
|
|
|
|
self.vm_receive_memory(&req, &mut socket, vm)?;
|
|
|
|
} else {
|
|
|
|
warn!("Configuration not sent yet");
|
|
|
|
Response::error().write_to(&mut socket)?;
|
|
|
|
}
|
2020-11-04 15:00:23 +00:00
|
|
|
}
|
|
|
|
Command::Complete => {
|
|
|
|
info!("Complete Command Received");
|
2020-11-13 15:46:53 +00:00
|
|
|
if let Some(ref mut vm) = self.vm.as_mut() {
|
|
|
|
vm.resume()?;
|
|
|
|
Response::ok().write_to(&mut socket)?;
|
|
|
|
} else {
|
|
|
|
warn!("VM not created yet");
|
|
|
|
Response::error().write_to(&mut socket)?;
|
|
|
|
}
|
2020-11-04 15:00:23 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
Command::Abandon => {
|
|
|
|
info!("Abandon Command Received");
|
|
|
|
self.vm = None;
|
2020-11-24 17:26:02 +00:00
|
|
|
self.vm_config = None;
|
2020-11-04 15:00:23 +00:00
|
|
|
Response::ok().write_to(&mut socket).ok();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-02 15:18:31 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-11-13 15:46:53 +00:00
|
|
|
// Returns true if there were dirty pages to send
|
|
|
|
fn vm_maybe_send_dirty_pages<T>(
|
|
|
|
vm: &mut Vm,
|
|
|
|
socket: &mut T,
|
|
|
|
) -> result::Result<bool, MigratableError>
|
|
|
|
where
|
|
|
|
T: Read + Write,
|
|
|
|
{
|
|
|
|
// Send (dirty) memory table
|
2021-08-04 14:52:31 +00:00
|
|
|
let table = vm.dirty_log()?;
|
2020-11-13 15:46:53 +00:00
|
|
|
|
|
|
|
// But if there are no regions go straight to pause
|
|
|
|
if table.regions().is_empty() {
|
|
|
|
return Ok(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
Request::memory(table.length()).write_to(socket).unwrap();
|
|
|
|
table.write_to(socket)?;
|
|
|
|
// And then the memory itself
|
|
|
|
vm.send_memory_regions(&table, socket)?;
|
|
|
|
let res = Response::read_from(socket)?;
|
|
|
|
if res.status() != Status::Ok {
|
|
|
|
warn!("Error during dirty memory migration");
|
|
|
|
Request::abandon().write_to(socket)?;
|
|
|
|
Response::read_from(socket).ok();
|
|
|
|
return Err(MigratableError::MigrateSend(anyhow!(
|
|
|
|
"Error during dirty memory migration"
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(true)
|
|
|
|
}
|
|
|
|
|
2021-08-09 09:38:36 +00:00
|
|
|
fn send_migration(
|
|
|
|
vm: &mut Vm,
|
|
|
|
#[cfg(all(feature = "kvm", target_arch = "x86_64"))] hypervisor: Arc<
|
|
|
|
dyn hypervisor::Hypervisor,
|
|
|
|
>,
|
|
|
|
send_data_migration: VmSendMigrationData,
|
|
|
|
) -> result::Result<(), MigratableError> {
|
|
|
|
let path = Self::socket_url_to_path(&send_data_migration.destination_url)?;
|
|
|
|
let mut socket = UnixStream::connect(&path).map_err(|e| {
|
|
|
|
MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e))
|
|
|
|
})?;
|
|
|
|
|
|
|
|
// Start the migration
|
|
|
|
Request::start().write_to(&mut socket)?;
|
|
|
|
let res = Response::read_from(&mut socket)?;
|
|
|
|
if res.status() != Status::Ok {
|
|
|
|
warn!("Error starting migration");
|
|
|
|
Request::abandon().write_to(&mut socket)?;
|
|
|
|
Response::read_from(&mut socket).ok();
|
|
|
|
return Err(MigratableError::MigrateSend(anyhow!(
|
|
|
|
"Error starting migration"
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send config
|
|
|
|
let vm_config = vm.get_config();
|
|
|
|
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
|
|
|
|
let common_cpuid = {
|
|
|
|
#[cfg(feature = "tdx")]
|
|
|
|
let tdx_enabled = vm_config.lock().unwrap().tdx.is_some();
|
2021-10-01 09:54:27 +00:00
|
|
|
let phys_bits = vm::physical_bits(vm_config.lock().unwrap().cpus.max_phys_bits);
|
2021-08-09 09:38:36 +00:00
|
|
|
arch::generate_common_cpuid(
|
|
|
|
hypervisor,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
phys_bits,
|
|
|
|
vm_config.lock().unwrap().cpus.kvm_hyperv,
|
|
|
|
#[cfg(feature = "tdx")]
|
|
|
|
tdx_enabled,
|
|
|
|
)
|
|
|
|
.map_err(|e| {
|
|
|
|
MigratableError::MigrateReceive(anyhow!("Error generating common cpuid': {:?}", e))
|
|
|
|
})?
|
|
|
|
};
|
|
|
|
|
|
|
|
let vm_migration_config = VmMigrationConfig {
|
|
|
|
vm_config,
|
|
|
|
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
|
|
|
|
common_cpuid,
|
2021-10-05 15:53:08 +00:00
|
|
|
memory_manager_data: vm.memory_manager_data(),
|
2021-08-09 09:38:36 +00:00
|
|
|
};
|
|
|
|
let config_data = serde_json::to_vec(&vm_migration_config).unwrap();
|
|
|
|
Request::config(config_data.len() as u64).write_to(&mut socket)?;
|
|
|
|
socket
|
|
|
|
.write_all(&config_data)
|
|
|
|
.map_err(MigratableError::MigrateSocket)?;
|
|
|
|
let res = Response::read_from(&mut socket)?;
|
|
|
|
if res.status() != Status::Ok {
|
|
|
|
warn!("Error during config migration");
|
|
|
|
Request::abandon().write_to(&mut socket)?;
|
|
|
|
Response::read_from(&mut socket).ok();
|
|
|
|
return Err(MigratableError::MigrateSend(anyhow!(
|
|
|
|
"Error during config migration"
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start logging dirty pages
|
|
|
|
vm.start_dirty_log()?;
|
|
|
|
|
|
|
|
// Send memory table
|
|
|
|
let table = vm.memory_range_table()?;
|
|
|
|
Request::memory(table.length())
|
|
|
|
.write_to(&mut socket)
|
|
|
|
.unwrap();
|
|
|
|
table.write_to(&mut socket)?;
|
|
|
|
// And then the memory itself
|
|
|
|
vm.send_memory_regions(&table, &mut socket)?;
|
|
|
|
let res = Response::read_from(&mut socket)?;
|
|
|
|
if res.status() != Status::Ok {
|
|
|
|
warn!("Error during memory migration");
|
|
|
|
Request::abandon().write_to(&mut socket)?;
|
|
|
|
Response::read_from(&mut socket).ok();
|
|
|
|
return Err(MigratableError::MigrateSend(anyhow!(
|
|
|
|
"Error during memory migration"
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try at most 5 passes of dirty memory sending
|
|
|
|
const MAX_DIRTY_MIGRATIONS: usize = 5;
|
|
|
|
for i in 0..MAX_DIRTY_MIGRATIONS {
|
|
|
|
info!("Dirty memory migration {} of {}", i, MAX_DIRTY_MIGRATIONS);
|
|
|
|
if !Self::vm_maybe_send_dirty_pages(vm, &mut socket)? {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now pause VM
|
|
|
|
vm.pause()?;
|
|
|
|
|
|
|
|
// Send last batch of dirty pages
|
|
|
|
Self::vm_maybe_send_dirty_pages(vm, &mut socket)?;
|
|
|
|
|
|
|
|
// Capture snapshot and send it
|
|
|
|
let vm_snapshot = vm.snapshot()?;
|
|
|
|
let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap();
|
|
|
|
Request::state(snapshot_data.len() as u64).write_to(&mut socket)?;
|
|
|
|
socket
|
|
|
|
.write_all(&snapshot_data)
|
|
|
|
.map_err(MigratableError::MigrateSocket)?;
|
|
|
|
let res = Response::read_from(&mut socket)?;
|
|
|
|
if res.status() != Status::Ok {
|
|
|
|
warn!("Error during state migration");
|
|
|
|
Request::abandon().write_to(&mut socket)?;
|
|
|
|
Response::read_from(&mut socket).ok();
|
|
|
|
return Err(MigratableError::MigrateSend(anyhow!(
|
|
|
|
"Error during state migration"
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Complete the migration
|
|
|
|
Request::complete().write_to(&mut socket)?;
|
|
|
|
let res = Response::read_from(&mut socket)?;
|
|
|
|
if res.status() != Status::Ok {
|
|
|
|
warn!("Error completing migration");
|
|
|
|
Request::abandon().write_to(&mut socket)?;
|
|
|
|
Response::read_from(&mut socket).ok();
|
|
|
|
return Err(MigratableError::MigrateSend(anyhow!(
|
|
|
|
"Error completing migration"
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
info!("Migration complete");
|
|
|
|
|
|
|
|
// Let every Migratable object know about the migration being complete
|
|
|
|
vm.complete_migration()?;
|
|
|
|
|
|
|
|
// Stop logging dirty pages
|
|
|
|
vm.stop_dirty_log()?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-11-02 15:18:31 +00:00
|
|
|
fn vm_send_migration(
|
|
|
|
&mut self,
|
|
|
|
send_data_migration: VmSendMigrationData,
|
|
|
|
) -> result::Result<(), MigratableError> {
|
2020-11-04 15:00:23 +00:00
|
|
|
info!(
|
|
|
|
"Sending migration: destination_url = {}",
|
|
|
|
send_data_migration.destination_url
|
|
|
|
);
|
2021-08-09 09:38:36 +00:00
|
|
|
if let Some(vm) = self.vm.as_mut() {
|
|
|
|
Self::send_migration(
|
|
|
|
vm,
|
2021-07-16 18:33:03 +00:00
|
|
|
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
|
2021-08-09 09:38:36 +00:00
|
|
|
self.hypervisor.clone(),
|
|
|
|
send_data_migration,
|
|
|
|
)
|
|
|
|
.map_err(|migration_err| {
|
|
|
|
error!("Migration failed: {:?}", migration_err);
|
|
|
|
|
|
|
|
// Stop logging dirty pages
|
|
|
|
if let Err(e) = vm.stop_dirty_log() {
|
|
|
|
return e;
|
2021-08-02 18:23:48 +00:00
|
|
|
}
|
|
|
|
|
2021-08-09 09:38:36 +00:00
|
|
|
if vm.get_state().unwrap() == VmState::Paused {
|
|
|
|
if let Err(e) = vm.resume() {
|
|
|
|
return e;
|
2021-08-02 18:23:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-09 09:38:36 +00:00
|
|
|
migration_err
|
|
|
|
})?;
|
2021-08-02 18:23:48 +00:00
|
|
|
|
2021-08-09 09:38:36 +00:00
|
|
|
// Shutdown the VM after the migration succeeded
|
|
|
|
self.exit_evt.write(1).map_err(|e| {
|
|
|
|
MigratableError::MigrateSend(anyhow!(
|
|
|
|
"Failed shutting down the VM after migration: {:?}",
|
2021-08-02 18:23:48 +00:00
|
|
|
e
|
2021-08-09 09:38:36 +00:00
|
|
|
))
|
|
|
|
})
|
2020-11-04 15:00:23 +00:00
|
|
|
} else {
|
|
|
|
Err(MigratableError::MigrateSend(anyhow!("VM is not running")))
|
|
|
|
}
|
2020-11-02 15:18:31 +00:00
|
|
|
}
|
|
|
|
|
2021-07-20 22:10:44 +00:00
|
|
|
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
|
|
|
|
fn vm_check_cpuid_compatibility(
|
|
|
|
&self,
|
|
|
|
src_vm_config: &Arc<Mutex<VmConfig>>,
|
|
|
|
src_vm_cpuid: &hypervisor::CpuId,
|
|
|
|
) -> result::Result<(), MigratableError> {
|
|
|
|
// We check the `CPUID` compatibility of between the source vm and destination, which is
|
|
|
|
// mostly about feature compatibility and "topology/sgx" leaves are not relevant.
|
|
|
|
let dest_cpuid = &{
|
|
|
|
let vm_config = &src_vm_config.lock().unwrap();
|
|
|
|
|
|
|
|
#[cfg(feature = "tdx")]
|
|
|
|
let tdx_enabled = vm_config.tdx.is_some();
|
2021-10-01 09:54:27 +00:00
|
|
|
let phys_bits = vm::physical_bits(vm_config.cpus.max_phys_bits);
|
2021-07-20 22:10:44 +00:00
|
|
|
arch::generate_common_cpuid(
|
|
|
|
self.hypervisor.clone(),
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
phys_bits,
|
|
|
|
vm_config.cpus.kvm_hyperv,
|
|
|
|
#[cfg(feature = "tdx")]
|
|
|
|
tdx_enabled,
|
|
|
|
)
|
|
|
|
.map_err(|e| {
|
|
|
|
MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e))
|
|
|
|
})?
|
|
|
|
};
|
|
|
|
arch::CpuidFeatureEntry::check_cpuid_compatibility(src_vm_cpuid, dest_cpuid).map_err(|e| {
|
|
|
|
MigratableError::MigrateReceive(anyhow!(
|
|
|
|
"Error checking cpu feature compatibility': {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-10-08 13:53:39 +00:00
|
|
|
fn control_loop(&mut self, api_receiver: Arc<Receiver<ApiRequest>>) -> Result<()> {
|
2019-09-25 12:40:14 +00:00
|
|
|
const EPOLL_EVENTS_LEN: usize = 100;
|
|
|
|
|
|
|
|
let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN];
|
|
|
|
let epoll_fd = self.epoll.as_raw_fd();
|
|
|
|
|
|
|
|
'outer: loop {
|
|
|
|
let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) {
|
|
|
|
Ok(res) => res,
|
|
|
|
Err(e) => {
|
|
|
|
if e.kind() == io::ErrorKind::Interrupted {
|
|
|
|
// It's well defined from the epoll_wait() syscall
|
|
|
|
// documentation that the epoll loop can be interrupted
|
|
|
|
// before any of the requested events occurred or the
|
|
|
|
// timeout expired. In both those cases, epoll_wait()
|
|
|
|
// returns an error of type EINTR, but this should not
|
|
|
|
// be considered as a regular error. Instead it is more
|
|
|
|
// appropriate to retry, by calling into epoll_wait().
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
return Err(Error::Epoll(e));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
for event in events.iter().take(num_events) {
|
2021-08-31 15:33:13 +00:00
|
|
|
let dispatch_event: EpollDispatch = event.data.into();
|
|
|
|
match dispatch_event {
|
|
|
|
EpollDispatch::Unknown => {
|
|
|
|
let event = event.data;
|
|
|
|
warn!("Unknown VMM loop event: {}", event);
|
|
|
|
}
|
|
|
|
EpollDispatch::Exit => {
|
|
|
|
info!("VM exit event");
|
|
|
|
// Consume the event.
|
|
|
|
self.exit_evt.read().map_err(Error::EventFdRead)?;
|
|
|
|
self.vmm_shutdown().map_err(Error::VmmShutdown)?;
|
2019-09-25 12:40:14 +00:00
|
|
|
|
2021-08-31 15:33:13 +00:00
|
|
|
break 'outer;
|
|
|
|
}
|
|
|
|
EpollDispatch::Reset => {
|
|
|
|
info!("VM reset event");
|
|
|
|
// Consume the event.
|
|
|
|
self.reset_evt.read().map_err(Error::EventFdRead)?;
|
|
|
|
self.vm_reboot().map_err(Error::VmReboot)?;
|
|
|
|
}
|
|
|
|
EpollDispatch::ActivateVirtioDevices => {
|
|
|
|
if let Some(ref vm) = self.vm {
|
|
|
|
let count = self.activate_evt.read().map_err(Error::EventFdRead)?;
|
|
|
|
info!(
|
|
|
|
"Trying to activate pending virtio devices: count = {}",
|
|
|
|
count
|
|
|
|
);
|
|
|
|
vm.activate_virtio_devices()
|
|
|
|
.map_err(Error::ActivateVirtioDevices)?;
|
2019-09-25 12:40:14 +00:00
|
|
|
}
|
2021-08-31 15:33:13 +00:00
|
|
|
}
|
|
|
|
EpollDispatch::Api => {
|
|
|
|
// Consume the event.
|
|
|
|
self.api_evt.read().map_err(Error::EventFdRead)?;
|
|
|
|
|
|
|
|
// Read from the API receiver channel
|
|
|
|
let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?;
|
|
|
|
|
|
|
|
info!("API request event: {:?}", api_request);
|
|
|
|
match api_request {
|
|
|
|
ApiRequest::VmCreate(config, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_create(config)
|
|
|
|
.map_err(ApiError::VmCreate)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
2020-11-09 13:29:05 +00:00
|
|
|
}
|
2021-08-31 15:33:13 +00:00
|
|
|
ApiRequest::VmDelete(sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_delete()
|
|
|
|
.map_err(ApiError::VmDelete)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
2021-01-14 03:03:53 +00:00
|
|
|
}
|
2021-08-31 15:33:13 +00:00
|
|
|
ApiRequest::VmBoot(sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_boot()
|
|
|
|
.map_err(ApiError::VmBoot)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmShutdown(sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_shutdown()
|
|
|
|
.map_err(ApiError::VmShutdown)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmReboot(sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_reboot()
|
|
|
|
.map_err(ApiError::VmReboot)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmInfo(sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_info()
|
|
|
|
.map_err(ApiError::VmInfo)
|
|
|
|
.map(ApiResponsePayload::VmInfo);
|
|
|
|
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmmPing(sender) => {
|
|
|
|
let response = ApiResponsePayload::VmmPing(self.vmm_ping());
|
|
|
|
|
|
|
|
sender.send(Ok(response)).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmPause(sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_pause()
|
|
|
|
.map_err(ApiError::VmPause)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmResume(sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_resume()
|
|
|
|
.map_err(ApiError::VmResume)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmSnapshot(snapshot_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_snapshot(&snapshot_data.destination_url)
|
|
|
|
.map_err(ApiError::VmSnapshot)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmRestore(restore_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_restore(restore_data.as_ref().clone())
|
|
|
|
.map_err(ApiError::VmRestore)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmmShutdown(sender) => {
|
|
|
|
let response = self
|
|
|
|
.vmm_shutdown()
|
|
|
|
.map_err(ApiError::VmmShutdown)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
|
|
|
|
break 'outer;
|
|
|
|
}
|
|
|
|
ApiRequest::VmResize(resize_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_resize(
|
|
|
|
resize_data.desired_vcpus,
|
|
|
|
resize_data.desired_ram,
|
|
|
|
resize_data.desired_balloon,
|
|
|
|
)
|
|
|
|
.map_err(ApiError::VmResize)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmResizeZone(resize_zone_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_resize_zone(
|
|
|
|
resize_zone_data.id.clone(),
|
|
|
|
resize_zone_data.desired_ram,
|
|
|
|
)
|
|
|
|
.map_err(ApiError::VmResizeZone)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmAddDevice(add_device_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_add_device(add_device_data.as_ref().clone())
|
|
|
|
.map_err(ApiError::VmAddDevice)
|
|
|
|
.map(ApiResponsePayload::VmAction);
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmAddUserDevice(add_device_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_add_user_device(add_device_data.as_ref().clone())
|
|
|
|
.map_err(ApiError::VmAddUserDevice)
|
|
|
|
.map(ApiResponsePayload::VmAction);
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmRemoveDevice(remove_device_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_remove_device(remove_device_data.id.clone())
|
|
|
|
.map_err(ApiError::VmRemoveDevice)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmAddDisk(add_disk_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_add_disk(add_disk_data.as_ref().clone())
|
|
|
|
.map_err(ApiError::VmAddDisk)
|
|
|
|
.map(ApiResponsePayload::VmAction);
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmAddFs(add_fs_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_add_fs(add_fs_data.as_ref().clone())
|
|
|
|
.map_err(ApiError::VmAddFs)
|
|
|
|
.map(ApiResponsePayload::VmAction);
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmAddPmem(add_pmem_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_add_pmem(add_pmem_data.as_ref().clone())
|
|
|
|
.map_err(ApiError::VmAddPmem)
|
|
|
|
.map(ApiResponsePayload::VmAction);
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmAddNet(add_net_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_add_net(add_net_data.as_ref().clone())
|
|
|
|
.map_err(ApiError::VmAddNet)
|
|
|
|
.map(ApiResponsePayload::VmAction);
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmAddVsock(add_vsock_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_add_vsock(add_vsock_data.as_ref().clone())
|
|
|
|
.map_err(ApiError::VmAddVsock)
|
|
|
|
.map(ApiResponsePayload::VmAction);
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmCounters(sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_counters()
|
|
|
|
.map_err(ApiError::VmInfo)
|
|
|
|
.map(ApiResponsePayload::VmAction);
|
|
|
|
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmReceiveMigration(receive_migration_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_receive_migration(receive_migration_data.as_ref().clone())
|
|
|
|
.map_err(ApiError::VmReceiveMigration)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmSendMigration(send_migration_data, sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_send_migration(send_migration_data.as_ref().clone())
|
|
|
|
.map_err(ApiError::VmSendMigration)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
|
|
|
}
|
|
|
|
ApiRequest::VmPowerButton(sender) => {
|
|
|
|
let response = self
|
|
|
|
.vm_power_button()
|
|
|
|
.map_err(ApiError::VmPowerButton)
|
|
|
|
.map(|_| ApiResponsePayload::Empty);
|
|
|
|
|
|
|
|
sender.send(response).map_err(Error::ApiResponseSend)?;
|
2019-09-25 12:40:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-08 13:53:39 +00:00
|
|
|
Ok(())
|
2019-09-25 12:40:14 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-12 11:53:47 +00:00
|
|
|
|
|
|
|
const CPU_MANAGER_SNAPSHOT_ID: &str = "cpu-manager";
|
|
|
|
const MEMORY_MANAGER_SNAPSHOT_ID: &str = "memory-manager";
|
|
|
|
const DEVICE_MANAGER_SNAPSHOT_ID: &str = "device-manager";
|