From 4ca18c082e13268ab6e7d859dbd3e8c7b9b53793 Mon Sep 17 00:00:00 2001 From: Alyssa Ross Date: Fri, 5 Jan 2024 15:08:53 +0100 Subject: [PATCH] vmm: use trait objects for API actions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Uses of the old ApiRequest enum conflated two different concerns: identifying an API request endpoint, and storing data for an API request. This led to ApiRequest values being passed around with junk data just to communicate a request type, which forced all API request body types to implement Default, which in some cases doesn't make any sense — what's the "default" path for a vhost-user socket? The nonsensical Default values have led to tests relying on being able to use nonsensical data, which is an impediment to adding better validation for these types. Rather than having API request types be represented by an enum, which has to carry associated body data everywhere it's used, it makes more sense to represent API request types as trait objects. These can have an associated type for the type of the request body, and this makes it possible to pass API request types and data around as siblings in a type-safe way without forcing them into a single value even where it doesn't make sense. Trait objects also give us dynamic dispatch, which lets us get rid of several large match blocks. To keep it possible to fuzz the HTTP API, all the Vmm methods called by the HTTP API are pulled out into a trait, so the fuzzer can provide its own stub implementation of the VMM. Signed-off-by: Alyssa Ross --- fuzz/Cargo.lock | 1 + fuzz/Cargo.toml | 1 + fuzz/fuzz_targets/http_api.rs | 286 ++++-- src/main.rs | 31 +- vmm/src/api/dbus/mod.rs | 84 +- vmm/src/api/http/http_endpoint.rs | 302 +++--- vmm/src/api/http/mod.rs | 64 +- vmm/src/api/mod.rs | 1424 +++++++++++++++++++++-------- vmm/src/lib.rs | 1257 +++++++++++-------------- 9 files changed, 2041 insertions(+), 1409 deletions(-) diff --git a/fuzz/Cargo.lock b/fuzz/Cargo.lock index ba5e2ba76..543426798 100644 --- a/fuzz/Cargo.lock +++ b/fuzz/Cargo.lock @@ -252,6 +252,7 @@ dependencies = [ "virtio-queue", "vm-device", "vm-memory", + "vm-migration", "vm-virtio", "vmm", "vmm-sys-util 0.11.2", diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index 640e79c76..0d6341588 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -27,6 +27,7 @@ virtio-queue = "0.10.0" vmm = { path = "../vmm" } vmm-sys-util = "0.11.2" vm-memory = "0.13.1" +vm-migration = { path = "../vm-migration" } vm-device = { path = "../vm-device" } vm-virtio = { path = "../vm-virtio" } diff --git a/fuzz/fuzz_targets/http_api.rs b/fuzz/fuzz_targets/http_api.rs index f68201b6b..a485c495d 100644 --- a/fuzz/fuzz_targets/http_api.rs +++ b/fuzz/fuzz_targets/http_api.rs @@ -7,9 +7,18 @@ use libfuzzer_sys::fuzz_target; use micro_http::Request; use once_cell::sync::Lazy; use std::os::unix::io::AsRawFd; +use std::path::PathBuf; use std::sync::mpsc::{channel, Receiver}; +use std::sync::{Arc, Mutex}; use std::thread; -use vmm::api::{http::*, ApiRequest, ApiResponsePayload}; +use vm_migration::MigratableError; +use vmm::api::{ + http::*, ApiRequest, RequestHandler, VmInfoResponse, VmReceiveMigrationData, + VmSendMigrationData, VmmPingResponse, +}; +use vmm::config::RestoreConfig; +use vmm::vm::{Error as VmError, VmState}; +use vmm::vm_config::*; use vmm::{EpollContext, EpollDispatch}; use vmm_sys_util::eventfd::EventFd; @@ -69,6 +78,197 @@ fn generate_request(bytes: &[u8]) -> Option { Request::try_from(&request, None).ok() } +struct StubApiRequestHandler; + +impl RequestHandler for StubApiRequestHandler { + fn vm_create(&mut self, _: Arc>) -> Result<(), VmError> { + Ok(()) + } + + fn vm_boot(&mut self) -> Result<(), VmError> { + Ok(()) + } + + fn vm_pause(&mut self) -> Result<(), VmError> { + Ok(()) + } + + fn vm_resume(&mut self) -> Result<(), VmError> { + Ok(()) + } + + fn vm_snapshot(&mut self, _: &str) -> Result<(), VmError> { + Ok(()) + } + + fn vm_restore(&mut self, _: RestoreConfig) -> Result<(), VmError> { + Ok(()) + } + + #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] + fn vm_coredump(&mut self, _: &str) -> Result<(), VmError> { + Ok(()) + } + + fn vm_shutdown(&mut self) -> Result<(), VmError> { + Ok(()) + } + + fn vm_reboot(&mut self) -> Result<(), VmError> { + Ok(()) + } + + fn vm_info(&self) -> Result { + Ok(VmInfoResponse { + config: Arc::new(Mutex::new(VmConfig { + cpus: CpusConfig { + boot_vcpus: 1, + max_vcpus: 1, + topology: None, + kvm_hyperv: false, + max_phys_bits: 46, + affinity: None, + features: CpuFeatures::default(), + }, + memory: MemoryConfig { + size: 536_870_912, + mergeable: false, + hotplug_method: HotplugMethod::Acpi, + hotplug_size: None, + hotplugged_size: None, + shared: false, + hugepages: false, + hugepage_size: None, + prefault: false, + zones: None, + thp: true, + }, + payload: Some(PayloadConfig { + kernel: Some(PathBuf::from("/path/to/kernel")), + ..Default::default() + }), + rate_limit_groups: None, + disks: None, + net: None, + rng: RngConfig { + src: PathBuf::from("/dev/urandom"), + iommu: false, + }, + balloon: None, + fs: None, + pmem: None, + serial: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Null, + iommu: false, + socket: None, + }, + console: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Tty, + iommu: false, + socket: None, + }, + devices: None, + user_devices: None, + vdpa: None, + vsock: None, + pvpanic: false, + iommu: false, + #[cfg(target_arch = "x86_64")] + sgx_epc: None, + numa: None, + watchdog: false, + #[cfg(feature = "guest_debug")] + gdb: false, + platform: None, + tpm: None, + preserved_fds: None, + })), + state: VmState::Running, + memory_actual_size: 0, + device_tree: None, + }) + } + + fn vmm_ping(&self) -> VmmPingResponse { + VmmPingResponse { + build_version: String::new(), + version: String::new(), + pid: 0, + features: Vec::new(), + } + } + + fn vm_delete(&mut self) -> Result<(), VmError> { + Ok(()) + } + + fn vmm_shutdown(&mut self) -> Result<(), VmError> { + Ok(()) + } + + fn vm_resize(&mut self, _: Option, _: Option, _: Option) -> Result<(), VmError> { + Ok(()) + } + + fn vm_resize_zone(&mut self, _: String, _: u64) -> Result<(), VmError> { + Ok(()) + } + + fn vm_add_device(&mut self, _: DeviceConfig) -> Result>, VmError> { + Ok(None) + } + + fn vm_add_user_device(&mut self, _: UserDeviceConfig) -> Result>, VmError> { + Ok(None) + } + + fn vm_remove_device(&mut self, _: String) -> Result<(), VmError> { + Ok(()) + } + + fn vm_add_disk(&mut self, _: DiskConfig) -> Result>, VmError> { + Ok(None) + } + + fn vm_add_fs(&mut self, _: FsConfig) -> Result>, VmError> { + Ok(None) + } + + fn vm_add_pmem(&mut self, _: PmemConfig) -> Result>, VmError> { + Ok(None) + } + + fn vm_add_net(&mut self, _: NetConfig) -> Result>, VmError> { + Ok(None) + } + + fn vm_add_vdpa(&mut self, _: VdpaConfig) -> Result>, VmError> { + Ok(None) + } + + fn vm_add_vsock(&mut self, _: VsockConfig) -> Result>, VmError> { + Ok(None) + } + + fn vm_counters(&mut self) -> Result>, VmError> { + Ok(None) + } + + fn vm_power_button(&mut self) -> Result<(), VmError> { + Ok(()) + } + + fn vm_receive_migration(&mut self, _: VmReceiveMigrationData) -> Result<(), MigratableError> { + Ok(()) + } + + fn vm_send_migration(&mut self, _: VmSendMigrationData) -> Result<(), MigratableError> { + Ok(()) + } +} + fn http_receiver_stub(exit_evt: EventFd, api_evt: EventFd, api_receiver: Receiver) { let mut epoll = EpollContext::new().unwrap(); epoll.add_event(&exit_evt, EpollDispatch::Exit).unwrap(); @@ -98,89 +298,7 @@ fn http_receiver_stub(exit_evt: EventFd, api_evt: EventFd, api_receiver: Receive EpollDispatch::Api => { for _ in 0..api_evt.read().unwrap() { let api_request = api_receiver.recv().unwrap(); - match api_request { - ApiRequest::VmCreate(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmDelete(sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmBoot(sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmShutdown(sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmReboot(sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmInfo(sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmmPing(sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmPause(sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmResume(sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmSnapshot(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmRestore(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmmShutdown(sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmResize(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmResizeZone(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmAddDevice(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmAddUserDevice(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmRemoveDevice(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmAddDisk(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmAddFs(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmAddPmem(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmAddNet(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmAddVdpa(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmAddVsock(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmCounters(sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmReceiveMigration(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmSendMigration(_, sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - ApiRequest::VmPowerButton(sender) => { - sender.send(Ok(ApiResponsePayload::Empty)).unwrap(); - } - } + api_request(&mut StubApiRequestHandler).unwrap(); } } _ => { diff --git a/src/main.rs b/src/main.rs index 2fd2c91f4..a06f9a008 100644 --- a/src/main.rs +++ b/src/main.rs @@ -20,6 +20,7 @@ use std::sync::{Arc, Mutex}; use thiserror::Error; #[cfg(feature = "dbus_api")] use vmm::api::dbus::{dbus_api_graceful_shutdown, DBusApiOptions}; +use vmm::api::ApiAction; use vmm::config; use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::signal::block_signal; @@ -695,22 +696,24 @@ fn start_vmm(cmd_arguments: ArgMatches) -> Result, Error> { // Create and boot the VM based off the VM config we just built. let sender = api_request_sender.clone(); - vmm::api::vm_create( - api_evt.try_clone().unwrap(), - api_request_sender, - Arc::new(Mutex::new(vm_config)), - ) - .map_err(Error::VmCreate)?; - vmm::api::vm_boot(api_evt.try_clone().unwrap(), sender).map_err(Error::VmBoot)?; + vmm::api::VmCreate + .send( + api_evt.try_clone().unwrap(), + api_request_sender, + Arc::new(Mutex::new(vm_config)), + ) + .map_err(Error::VmCreate)?; + vmm::api::VmBoot + .send(api_evt.try_clone().unwrap(), sender, ()) + .map_err(Error::VmBoot)?; } else if let Some(restore_params) = cmd_arguments.get_one::("restore") { - vmm::api::vm_restore( - api_evt.try_clone().unwrap(), - api_request_sender, - Arc::new( + vmm::api::VmRestore + .send( + api_evt.try_clone().unwrap(), + api_request_sender, config::RestoreConfig::parse(restore_params).map_err(Error::ParsingRestore)?, - ), - ) - .map_err(Error::VmRestore)?; + ) + .map_err(Error::VmRestore)?; } Ok(()) diff --git a/vmm/src/api/dbus/mod.rs b/vmm/src/api/dbus/mod.rs index c063e1008..c14295afb 100644 --- a/vmm/src/api/dbus/mod.rs +++ b/vmm/src/api/dbus/mod.rs @@ -2,7 +2,15 @@ // // SPDX-License-Identifier: Apache-2.0 // -use super::{ApiRequest, VmAction}; +use super::{ApiAction, ApiRequest}; +#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] +use crate::api::VmCoredump; +use crate::api::{ + AddDisk, Body, VmAddDevice, VmAddFs, VmAddNet, VmAddPmem, VmAddUserDevice, VmAddVdpa, + VmAddVsock, VmBoot, VmCounters, VmCreate, VmDelete, VmInfo, VmPause, VmPowerButton, VmReboot, + VmReceiveMigration, VmRemoveDevice, VmResize, VmResizeZone, VmRestore, VmResume, + VmSendMigration, VmShutdown, VmSnapshot, VmmPing, VmmShutdown, +}; use crate::seccomp_filters::{get_seccomp_filter, Thread}; use crate::{Error as VmmError, Result as VmmResult}; use crate::{NetConfig, VmConfig}; @@ -78,11 +86,15 @@ impl DBusApi { .map_err(|err| fdo::Error::IOError(format!("{err:?}"))) } - async fn vm_action(&self, action: VmAction) -> Result> { + async fn vm_action>>( + &self, + action: &'static Action, + body: Action::RequestBody, + ) -> Result> { let api_sender = self.clone_api_sender().await; let api_notifier = self.clone_api_notifier()?; - let result = blocking::unblock(move || super::vm_action(api_notifier, api_sender, action)) + let result = blocking::unblock(move || action.send(api_notifier, api_sender, body)) .await .map_err(api_error)? // We're using `from_utf8_lossy` here to not deal with the @@ -99,7 +111,7 @@ impl DBusApi { let api_sender = self.clone_api_sender().await; let api_notifier = self.clone_api_notifier()?; - let result = blocking::unblock(move || super::vmm_ping(api_notifier, api_sender)) + let result = blocking::unblock(move || VmmPing.send(api_notifier, api_sender, ())) .await .map_err(api_error)?; serde_json::to_string(&result).map_err(api_error) @@ -109,26 +121,24 @@ impl DBusApi { let api_sender = self.clone_api_sender().await; let api_notifier = self.clone_api_notifier()?; - blocking::unblock(move || super::vmm_shutdown(api_notifier, api_sender)) + blocking::unblock(move || VmmShutdown.send(api_notifier, api_sender, ())) .await .map_err(api_error) } async fn vm_add_device(&self, device_config: String) -> Result> { let device_config = serde_json::from_str(&device_config).map_err(api_error)?; - self.vm_action(VmAction::AddDevice(Arc::new(device_config))) - .await + self.vm_action(&VmAddDevice, device_config).await } async fn vm_add_disk(&self, disk_config: String) -> Result> { let disk_config = serde_json::from_str(&disk_config).map_err(api_error)?; - self.vm_action(VmAction::AddDisk(Arc::new(disk_config))) - .await + self.vm_action(&AddDisk, disk_config).await } async fn vm_add_fs(&self, fs_config: String) -> Result> { let fs_config = serde_json::from_str(&fs_config).map_err(api_error)?; - self.vm_action(VmAction::AddFs(Arc::new(fs_config))).await + self.vm_action(&VmAddFs, fs_config).await } async fn vm_add_net(&self, net_config: String) -> Result> { @@ -137,35 +147,31 @@ impl DBusApi { warn!("Ignoring FDs sent via the D-Bus request body"); net_config.fds = None; } - self.vm_action(VmAction::AddNet(Arc::new(net_config))).await + self.vm_action(&VmAddNet, net_config).await } async fn vm_add_pmem(&self, pmem_config: String) -> Result> { let pmem_config = serde_json::from_str(&pmem_config).map_err(api_error)?; - self.vm_action(VmAction::AddPmem(Arc::new(pmem_config))) - .await + self.vm_action(&VmAddPmem, pmem_config).await } async fn vm_add_user_device(&self, vm_add_user_device: String) -> Result> { let vm_add_user_device = serde_json::from_str(&vm_add_user_device).map_err(api_error)?; - self.vm_action(VmAction::AddUserDevice(Arc::new(vm_add_user_device))) - .await + self.vm_action(&VmAddUserDevice, vm_add_user_device).await } async fn vm_add_vdpa(&self, vdpa_config: String) -> Result> { let vdpa_config = serde_json::from_str(&vdpa_config).map_err(api_error)?; - self.vm_action(VmAction::AddVdpa(Arc::new(vdpa_config))) - .await + self.vm_action(&VmAddVdpa, vdpa_config).await } async fn vm_add_vsock(&self, vsock_config: String) -> Result> { let vsock_config = serde_json::from_str(&vsock_config).map_err(api_error)?; - self.vm_action(VmAction::AddVsock(Arc::new(vsock_config))) - .await + self.vm_action(&VmAddVsock, vsock_config).await } async fn vm_boot(&self) -> Result<()> { - self.vm_action(VmAction::Boot).await.map(|_| ()) + self.vm_action(&VmBoot, ()).await.map(|_| ()) } #[allow(unused_variables)] @@ -176,7 +182,7 @@ impl DBusApi { #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] { let vm_coredump_data = serde_json::from_str(&vm_coredump_data).map_err(api_error)?; - self.vm_action(VmAction::Coredump(Arc::new(vm_coredump_data))) + self.vm_action(&VmCoredump, vm_coredump_data) .await .map(|_| ()) } @@ -188,7 +194,7 @@ impl DBusApi { } async fn vm_counters(&self) -> Result> { - self.vm_action(VmAction::Counters).await + self.vm_action(&VmCounters, ()).await } async fn vm_create(&self, vm_config: String) -> Result<()> { @@ -207,7 +213,7 @@ impl DBusApi { } blocking::unblock(move || { - super::vm_create(api_notifier, api_sender, Arc::new(Mutex::new(vm_config))) + VmCreate.send(api_notifier, api_sender, Arc::new(Mutex::new(vm_config))) }) .await .map_err(api_error)?; @@ -216,85 +222,81 @@ impl DBusApi { } async fn vm_delete(&self) -> Result<()> { - self.vm_action(VmAction::Delete).await.map(|_| ()) + self.vm_action(&VmDelete, ()).await.map(|_| ()) } async fn vm_info(&self) -> Result { let api_sender = self.clone_api_sender().await; let api_notifier = self.clone_api_notifier()?; - let result = blocking::unblock(move || super::vm_info(api_notifier, api_sender)) + let result = blocking::unblock(move || VmInfo.send(api_notifier, api_sender, ())) .await .map_err(api_error)?; serde_json::to_string(&result).map_err(api_error) } async fn vm_pause(&self) -> Result<()> { - self.vm_action(VmAction::Pause).await.map(|_| ()) + self.vm_action(&VmPause, ()).await.map(|_| ()) } async fn vm_power_button(&self) -> Result<()> { - self.vm_action(VmAction::PowerButton).await.map(|_| ()) + self.vm_action(&VmPowerButton, ()).await.map(|_| ()) } async fn vm_reboot(&self) -> Result<()> { - self.vm_action(VmAction::Reboot).await.map(|_| ()) + self.vm_action(&VmReboot, ()).await.map(|_| ()) } async fn vm_remove_device(&self, vm_remove_device: String) -> Result<()> { let vm_remove_device = serde_json::from_str(&vm_remove_device).map_err(api_error)?; - self.vm_action(VmAction::RemoveDevice(Arc::new(vm_remove_device))) + self.vm_action(&VmRemoveDevice, vm_remove_device) .await .map(|_| ()) } async fn vm_resize(&self, vm_resize: String) -> Result<()> { let vm_resize = serde_json::from_str(&vm_resize).map_err(api_error)?; - self.vm_action(VmAction::Resize(Arc::new(vm_resize))) - .await - .map(|_| ()) + self.vm_action(&VmResize, vm_resize).await.map(|_| ()) } async fn vm_resize_zone(&self, vm_resize_zone: String) -> Result<()> { let vm_resize_zone = serde_json::from_str(&vm_resize_zone).map_err(api_error)?; - self.vm_action(VmAction::ResizeZone(Arc::new(vm_resize_zone))) + self.vm_action(&VmResizeZone, vm_resize_zone) .await .map(|_| ()) } async fn vm_restore(&self, restore_config: String) -> Result<()> { let restore_config = serde_json::from_str(&restore_config).map_err(api_error)?; - self.vm_action(VmAction::Restore(Arc::new(restore_config))) - .await - .map(|_| ()) + self.vm_action(&VmRestore, restore_config).await.map(|_| ()) } async fn vm_receive_migration(&self, receive_migration_data: String) -> Result<()> { let receive_migration_data = serde_json::from_str(&receive_migration_data).map_err(api_error)?; - self.vm_action(VmAction::ReceiveMigration(Arc::new(receive_migration_data))) + self.vm_action(&VmReceiveMigration, receive_migration_data) .await .map(|_| ()) } async fn vm_send_migration(&self, send_migration_data: String) -> Result<()> { let send_migration_data = serde_json::from_str(&send_migration_data).map_err(api_error)?; - self.vm_action(VmAction::SendMigration(Arc::new(send_migration_data))) + self.vm_action(&VmSendMigration, send_migration_data) .await .map(|_| ()) } async fn vm_resume(&self) -> Result<()> { - self.vm_action(VmAction::Resume).await.map(|_| ()) + self.vm_action(&VmResume, ()).await.map(|_| ()) } async fn vm_shutdown(&self) -> Result<()> { - self.vm_action(VmAction::Shutdown).await.map(|_| ()) + self.vm_action(&VmShutdown, ()).await.map(|_| ()) } async fn vm_snapshot(&self, vm_snapshot_config: String) -> Result<()> { let vm_snapshot_config = serde_json::from_str(&vm_snapshot_config).map_err(api_error)?; - self.vm_action(VmAction::Snapshot(Arc::new(vm_snapshot_config))) + self.vm_action(&VmSnapshot, vm_snapshot_config) .await .map(|_| ()) } diff --git a/vmm/src/api/http/http_endpoint.rs b/vmm/src/api/http/http_endpoint.rs index 727939dde..141cc1d0f 100644 --- a/vmm/src/api/http/http_endpoint.rs +++ b/vmm/src/api/http/http_endpoint.rs @@ -1,17 +1,17 @@ // Copyright © 2019 Intel Corporation +// Copyright 2024 Alyssa Ross // // SPDX-License-Identifier: Apache-2.0 // use crate::api::http::{error_response, EndpointHandler, HttpError}; #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] -use crate::api::vm_coredump; +use crate::api::VmCoredump; use crate::api::{ - vm_add_device, vm_add_disk, vm_add_fs, vm_add_net, vm_add_pmem, vm_add_user_device, - vm_add_vdpa, vm_add_vsock, vm_boot, vm_counters, vm_create, vm_delete, vm_info, vm_pause, - vm_power_button, vm_reboot, vm_receive_migration, vm_remove_device, vm_resize, vm_resize_zone, - vm_restore, vm_resume, vm_send_migration, vm_shutdown, vm_snapshot, vmm_ping, vmm_shutdown, - ApiRequest, VmAction, VmConfig, + AddDisk, ApiAction, ApiRequest, VmAddDevice, VmAddFs, VmAddNet, VmAddPmem, VmAddUserDevice, + VmAddVdpa, VmAddVsock, VmBoot, VmConfig, VmCounters, VmDelete, VmPause, VmPowerButton, + VmReboot, VmReceiveMigration, VmRemoveDevice, VmResize, VmResizeZone, VmRestore, VmResume, + VmSendMigration, VmShutdown, VmSnapshot, }; use crate::config::NetConfig; use micro_http::{Body, Method, Request, Response, StatusCode, Version}; @@ -52,8 +52,8 @@ impl EndpointHandler for VmCreate { } } - // Call vm_create() - match vm_create(api_notifier, api_sender, Arc::new(Mutex::new(vm_config))) + match crate::api::VmCreate + .send(api_notifier, api_sender, Arc::new(Mutex::new(vm_config))) .map_err(HttpError::ApiError) { Ok(_) => Response::new(Version::Http11, StatusCode::NoContent), @@ -70,13 +70,162 @@ impl EndpointHandler for VmCreate { } } +pub trait GetHandler { + fn handle_request( + &'static self, + _api_notifier: EventFd, + _api_sender: Sender, + ) -> std::result::Result, HttpError> { + Err(HttpError::BadRequest) + } +} + +pub trait PutHandler { + fn handle_request( + &'static self, + _api_notifier: EventFd, + _api_sender: Sender, + _body: &Option, + _files: Vec, + ) -> std::result::Result, HttpError> { + Err(HttpError::BadRequest) + } +} + +pub trait HttpVmAction: GetHandler + PutHandler + Sync {} + +impl HttpVmAction for T {} + +macro_rules! vm_action_get_handler { + ($action:ty) => { + impl GetHandler for $action { + fn handle_request( + &'static self, + api_notifier: EventFd, + api_sender: Sender, + ) -> std::result::Result, HttpError> { + self.send(api_notifier, api_sender, ()) + .map_err(HttpError::ApiError) + } + } + + impl PutHandler for $action {} + }; +} + +macro_rules! vm_action_put_handler { + ($action:ty) => { + impl PutHandler for $action { + fn handle_request( + &'static self, + api_notifier: EventFd, + api_sender: Sender, + body: &Option, + _files: Vec, + ) -> std::result::Result, HttpError> { + if body.is_some() { + Err(HttpError::BadRequest) + } else { + self.send(api_notifier, api_sender, ()) + .map_err(HttpError::ApiError) + } + } + } + + impl GetHandler for $action {} + }; +} + +macro_rules! vm_action_put_handler_body { + ($action:ty) => { + impl PutHandler for $action { + fn handle_request( + &'static self, + api_notifier: EventFd, + api_sender: Sender, + body: &Option, + _files: Vec, + ) -> std::result::Result, HttpError> { + if let Some(body) = body { + self.send( + api_notifier, + api_sender, + serde_json::from_slice(body.raw())?, + ) + .map_err(HttpError::ApiError) + } else { + Err(HttpError::BadRequest) + } + } + } + + impl GetHandler for $action {} + }; +} + +vm_action_get_handler!(VmCounters); + +vm_action_put_handler!(VmBoot); +vm_action_put_handler!(VmDelete); +vm_action_put_handler!(VmShutdown); +vm_action_put_handler!(VmReboot); +vm_action_put_handler!(VmPause); +vm_action_put_handler!(VmResume); +vm_action_put_handler!(VmPowerButton); + +vm_action_put_handler_body!(VmAddDevice); +vm_action_put_handler_body!(AddDisk); +vm_action_put_handler_body!(VmAddFs); +vm_action_put_handler_body!(VmAddPmem); +vm_action_put_handler_body!(VmAddVdpa); +vm_action_put_handler_body!(VmAddVsock); +vm_action_put_handler_body!(VmAddUserDevice); +vm_action_put_handler_body!(VmRemoveDevice); +vm_action_put_handler_body!(VmResize); +vm_action_put_handler_body!(VmResizeZone); +vm_action_put_handler_body!(VmRestore); +vm_action_put_handler_body!(VmSnapshot); +vm_action_put_handler_body!(VmReceiveMigration); +vm_action_put_handler_body!(VmSendMigration); + +#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] +vm_action_put_handler_body!(VmCoredump); + +impl PutHandler for VmAddNet { + fn handle_request( + &'static self, + api_notifier: EventFd, + api_sender: Sender, + body: &Option, + mut files: Vec, + ) -> std::result::Result, HttpError> { + if let Some(body) = body { + let mut net_cfg: NetConfig = serde_json::from_slice(body.raw())?; + if net_cfg.fds.is_some() { + warn!("Ignoring FDs sent via the HTTP request body"); + net_cfg.fds = None; + } + if !files.is_empty() { + let fds = files.drain(..).map(|f| f.into_raw_fd()).collect(); + net_cfg.fds = Some(fds); + } + self.send(api_notifier, api_sender, net_cfg) + .map_err(HttpError::ApiError) + } else { + Err(HttpError::BadRequest) + } + } +} + +impl GetHandler for VmAddNet {} + // Common handler for boot, shutdown and reboot pub struct VmActionHandler { - action: VmAction, + action: &'static dyn HttpVmAction, } impl VmActionHandler { - pub fn new(action: VmAction) -> Self { + pub fn new(action: &'static dyn HttpVmAction) -> Self { VmActionHandler { action } } } @@ -87,117 +236,9 @@ impl EndpointHandler for VmActionHandler { api_notifier: EventFd, api_sender: Sender, body: &Option, - mut files: Vec, + files: Vec, ) -> std::result::Result, HttpError> { - use VmAction::*; - if let Some(body) = body { - match self.action { - AddDevice(_) => vm_add_device( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - AddDisk(_) => vm_add_disk( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - AddFs(_) => vm_add_fs( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - AddPmem(_) => vm_add_pmem( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - AddNet(_) => { - let mut net_cfg: NetConfig = serde_json::from_slice(body.raw())?; - if net_cfg.fds.is_some() { - warn!("Ignoring FDs sent via the HTTP request body"); - net_cfg.fds = None; - } - // Update network config with optional files that might have - // been sent through control message. - if !files.is_empty() { - let fds = files.drain(..).map(|f| f.into_raw_fd()).collect(); - net_cfg.fds = Some(fds); - } - vm_add_net(api_notifier, api_sender, Arc::new(net_cfg)) - } - AddVdpa(_) => vm_add_vdpa( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - AddVsock(_) => vm_add_vsock( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - AddUserDevice(_) => vm_add_user_device( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - RemoveDevice(_) => vm_remove_device( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - Resize(_) => vm_resize( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - ResizeZone(_) => vm_resize_zone( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - Restore(_) => vm_restore( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - Snapshot(_) => vm_snapshot( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] - Coredump(_) => vm_coredump( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - ReceiveMigration(_) => vm_receive_migration( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - SendMigration(_) => vm_send_migration( - api_notifier, - api_sender, - Arc::new(serde_json::from_slice(body.raw())?), - ), - - _ => return Err(HttpError::BadRequest), - } - } else { - match self.action { - Boot => vm_boot(api_notifier, api_sender), - Delete => vm_delete(api_notifier, api_sender), - Shutdown => vm_shutdown(api_notifier, api_sender), - Reboot => vm_reboot(api_notifier, api_sender), - Pause => vm_pause(api_notifier, api_sender), - Resume => vm_resume(api_notifier, api_sender), - PowerButton => vm_power_button(api_notifier, api_sender), - _ => return Err(HttpError::BadRequest), - } - } - .map_err(HttpError::ApiError) + PutHandler::handle_request(self.action, api_notifier, api_sender, body, files) } fn get_handler( @@ -206,11 +247,7 @@ impl EndpointHandler for VmActionHandler { api_sender: Sender, _body: &Option, ) -> std::result::Result, HttpError> { - use VmAction::*; - match self.action { - Counters => vm_counters(api_notifier, api_sender).map_err(HttpError::ApiError), - _ => Err(HttpError::BadRequest), - } + GetHandler::handle_request(self.action, api_notifier, api_sender) } } @@ -225,7 +262,10 @@ impl EndpointHandler for VmInfo { api_sender: Sender, ) -> Response { match req.method() { - Method::Get => match vm_info(api_notifier, api_sender).map_err(HttpError::ApiError) { + Method::Get => match crate::api::VmInfo + .send(api_notifier, api_sender, ()) + .map_err(HttpError::ApiError) + { Ok(info) => { let mut response = Response::new(Version::Http11, StatusCode::OK); let info_serialized = serde_json::to_string(&info).unwrap(); @@ -251,7 +291,10 @@ impl EndpointHandler for VmmPing { api_sender: Sender, ) -> Response { match req.method() { - Method::Get => match vmm_ping(api_notifier, api_sender).map_err(HttpError::ApiError) { + Method::Get => match crate::api::VmmPing + .send(api_notifier, api_sender, ()) + .map_err(HttpError::ApiError) + { Ok(pong) => { let mut response = Response::new(Version::Http11, StatusCode::OK); let info_serialized = serde_json::to_string(&pong).unwrap(); @@ -279,7 +322,10 @@ impl EndpointHandler for VmmShutdown { ) -> Response { match req.method() { Method::Put => { - match vmm_shutdown(api_notifier, api_sender).map_err(HttpError::ApiError) { + match crate::api::VmmShutdown + .send(api_notifier, api_sender, ()) + .map_err(HttpError::ApiError) + { Ok(_) => Response::new(Version::Http11, StatusCode::OK), Err(e) => error_response(e, StatusCode::InternalServerError), } diff --git a/vmm/src/api/http/mod.rs b/vmm/src/api/http/mod.rs index 2f4b0283c..2a40d249a 100644 --- a/vmm/src/api/http/mod.rs +++ b/vmm/src/api/http/mod.rs @@ -4,7 +4,14 @@ // use self::http_endpoint::{VmActionHandler, VmCreate, VmInfo, VmmPing, VmmShutdown}; -use crate::api::{ApiError, ApiRequest, VmAction}; +#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] +use crate::api::VmCoredump; +use crate::api::{ + AddDisk, ApiError, ApiRequest, VmAddDevice, VmAddFs, VmAddNet, VmAddPmem, VmAddUserDevice, + VmAddVdpa, VmAddVsock, VmBoot, VmCounters, VmDelete, VmPause, VmPowerButton, VmReboot, + VmReceiveMigration, VmRemoveDevice, VmResize, VmResizeZone, VmRestore, VmResume, + VmSendMigration, VmShutdown, VmSnapshot, +}; use crate::seccomp_filters::{get_seccomp_filter, Thread}; use crate::{Error as VmmError, Result}; use hypervisor::HypervisorType; @@ -19,7 +26,6 @@ use std::os::unix::net::UnixListener; use std::panic::AssertUnwindSafe; use std::path::PathBuf; use std::sync::mpsc::Sender; -use std::sync::Arc; use std::thread; use vmm_sys_util::eventfd::EventFd; @@ -141,109 +147,103 @@ pub static HTTP_ROUTES: Lazy = Lazy::new(|| { r.routes.insert( endpoint!("/vm.add-device"), - Box::new(VmActionHandler::new(VmAction::AddDevice(Arc::default()))), + Box::new(VmActionHandler::new(&VmAddDevice)), ); r.routes.insert( endpoint!("/vm.add-user-device"), - Box::new(VmActionHandler::new( - VmAction::AddUserDevice(Arc::default()), - )), + Box::new(VmActionHandler::new(&VmAddUserDevice)), ); r.routes.insert( endpoint!("/vm.add-disk"), - Box::new(VmActionHandler::new(VmAction::AddDisk(Arc::default()))), + Box::new(VmActionHandler::new(&AddDisk)), ); r.routes.insert( endpoint!("/vm.add-fs"), - Box::new(VmActionHandler::new(VmAction::AddFs(Arc::default()))), + Box::new(VmActionHandler::new(&VmAddFs)), ); r.routes.insert( endpoint!("/vm.add-net"), - Box::new(VmActionHandler::new(VmAction::AddNet(Arc::default()))), + Box::new(VmActionHandler::new(&VmAddNet)), ); r.routes.insert( endpoint!("/vm.add-pmem"), - Box::new(VmActionHandler::new(VmAction::AddPmem(Arc::default()))), + Box::new(VmActionHandler::new(&VmAddPmem)), ); r.routes.insert( endpoint!("/vm.add-vdpa"), - Box::new(VmActionHandler::new(VmAction::AddVdpa(Arc::default()))), + Box::new(VmActionHandler::new(&VmAddVdpa)), ); r.routes.insert( endpoint!("/vm.add-vsock"), - Box::new(VmActionHandler::new(VmAction::AddVsock(Arc::default()))), + Box::new(VmActionHandler::new(&VmAddVsock)), ); r.routes.insert( endpoint!("/vm.boot"), - Box::new(VmActionHandler::new(VmAction::Boot)), + Box::new(VmActionHandler::new(&VmBoot)), ); r.routes.insert( endpoint!("/vm.counters"), - Box::new(VmActionHandler::new(VmAction::Counters)), + Box::new(VmActionHandler::new(&VmCounters)), ); r.routes .insert(endpoint!("/vm.create"), Box::new(VmCreate {})); r.routes.insert( endpoint!("/vm.delete"), - Box::new(VmActionHandler::new(VmAction::Delete)), + Box::new(VmActionHandler::new(&VmDelete)), ); r.routes.insert(endpoint!("/vm.info"), Box::new(VmInfo {})); r.routes.insert( endpoint!("/vm.pause"), - Box::new(VmActionHandler::new(VmAction::Pause)), + Box::new(VmActionHandler::new(&VmPause)), ); r.routes.insert( endpoint!("/vm.power-button"), - Box::new(VmActionHandler::new(VmAction::PowerButton)), + Box::new(VmActionHandler::new(&VmPowerButton)), ); r.routes.insert( endpoint!("/vm.reboot"), - Box::new(VmActionHandler::new(VmAction::Reboot)), + Box::new(VmActionHandler::new(&VmReboot)), ); r.routes.insert( endpoint!("/vm.receive-migration"), - Box::new(VmActionHandler::new(VmAction::ReceiveMigration( - Arc::default(), - ))), + Box::new(VmActionHandler::new(&VmReceiveMigration)), ); r.routes.insert( endpoint!("/vm.remove-device"), - Box::new(VmActionHandler::new(VmAction::RemoveDevice(Arc::default()))), + Box::new(VmActionHandler::new(&VmRemoveDevice)), ); r.routes.insert( endpoint!("/vm.resize"), - Box::new(VmActionHandler::new(VmAction::Resize(Arc::default()))), + Box::new(VmActionHandler::new(&VmResize)), ); r.routes.insert( endpoint!("/vm.resize-zone"), - Box::new(VmActionHandler::new(VmAction::ResizeZone(Arc::default()))), + Box::new(VmActionHandler::new(&VmResizeZone)), ); r.routes.insert( endpoint!("/vm.restore"), - Box::new(VmActionHandler::new(VmAction::Restore(Arc::default()))), + Box::new(VmActionHandler::new(&VmRestore)), ); r.routes.insert( endpoint!("/vm.resume"), - Box::new(VmActionHandler::new(VmAction::Resume)), + Box::new(VmActionHandler::new(&VmResume)), ); r.routes.insert( endpoint!("/vm.send-migration"), - Box::new(VmActionHandler::new( - VmAction::SendMigration(Arc::default()), - )), + Box::new(VmActionHandler::new(&VmSendMigration)), ); r.routes.insert( endpoint!("/vm.shutdown"), - Box::new(VmActionHandler::new(VmAction::Shutdown)), + Box::new(VmActionHandler::new(&VmShutdown)), ); r.routes.insert( endpoint!("/vm.snapshot"), - Box::new(VmActionHandler::new(VmAction::Snapshot(Arc::default()))), + Box::new(VmActionHandler::new(&VmSnapshot)), ); #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] r.routes.insert( endpoint!("/vm.coredump"), - Box::new(VmActionHandler::new(VmAction::Coredump(Arc::default()))), + Box::new(VmActionHandler::new(&VmCoredump)), ); r.routes .insert(endpoint!("/vmm.ping"), Box::new(VmmPing {})); diff --git a/vmm/src/api/mod.rs b/vmm/src/api/mod.rs index aaae8ee34..7989bd463 100644 --- a/vmm/src/api/mod.rs +++ b/vmm/src/api/mod.rs @@ -1,4 +1,5 @@ // Copyright © 2019 Intel Corporation +// Copyright 2024 Alyssa Ross // // SPDX-License-Identifier: Apache-2.0 // @@ -20,8 +21,8 @@ //! //! 1. The thread creates an mpsc channel for receiving the command response. //! 2. The thread sends an ApiRequest to the Sender endpoint. The ApiRequest -//! contains the response channel Sender, for the VMM API server to be able -//! to send the response back. +//! encapsulates the response channel Sender, for the VMM API server to be +//! able to send the response back. //! 3. The thread writes to the API event file descriptor to notify the VMM //! API server about a pending command. //! 4. The thread reads the response back from the VMM API server, from the @@ -43,6 +44,7 @@ use crate::config::{ }; use crate::device_tree::DeviceTree; use crate::vm::{Error as VmError, VmState}; +use crate::Error as VmmError; use micro_http::Body; use serde::{Deserialize, Serialize}; use std::io; @@ -156,10 +158,10 @@ pub enum ApiError { /// Error triggering power button VmPowerButton(VmError), } -pub type ApiResult = std::result::Result; +pub type ApiResult = Result; #[derive(Clone, Deserialize, Serialize)] -pub struct VmInfo { +pub struct VmInfoResponse { pub config: Arc>, pub state: VmState, pub memory_actual_size: u64, @@ -224,7 +226,7 @@ pub enum ApiResponsePayload { Empty, /// Virtual machine information - VmInfo(VmInfo), + VmInfo(VmInfoResponse), /// Vmm ping response VmmPing(VmmPingResponse), @@ -234,247 +236,120 @@ pub enum ApiResponsePayload { } /// This is the response sent by the VMM API server through the mpsc channel. -pub type ApiResponse = std::result::Result; +pub type ApiResponse = Result; -#[derive(Debug)] -pub enum ApiRequest { - /// Create the virtual machine. This request payload is a VM configuration - /// (VmConfig). - /// If the VMM API server could not create the VM, it will send a VmCreate - /// error back. - VmCreate(Arc>, Sender), +pub trait RequestHandler { + fn vm_create(&mut self, config: Arc>) -> Result<(), VmError>; - /// Boot the previously created virtual machine. - /// If the VM was not previously created, the VMM API server will send a - /// VmBoot error back. - VmBoot(Sender), + fn vm_boot(&mut self) -> Result<(), VmError>; - /// Delete the previously created virtual machine. - /// If the VM was not previously created, the VMM API server will send a - /// VmDelete error back. - /// If the VM is booted, we shut it down first. - VmDelete(Sender), + fn vm_pause(&mut self) -> Result<(), VmError>; - /// Request the VM information. - VmInfo(Sender), + fn vm_resume(&mut self) -> Result<(), VmError>; - /// Request the VMM API server status - VmmPing(Sender), + fn vm_snapshot(&mut self, destination_url: &str) -> Result<(), VmError>; - /// Pause a VM. - VmPause(Sender), + fn vm_restore(&mut self, restore_cfg: RestoreConfig) -> Result<(), VmError>; - /// Resume a VM. - VmResume(Sender), - - /// Get counters for a VM. - VmCounters(Sender), - - /// Shut the previously booted virtual machine down. - /// If the VM was not previously booted or created, the VMM API server - /// will send a VmShutdown error back. - VmShutdown(Sender), - - /// Reboot the previously booted virtual machine. - /// If the VM was not previously booted or created, the VMM API server - /// will send a VmReboot error back. - VmReboot(Sender), - - /// Shut the VMM down. - /// This will shutdown and delete the current VM, if any, and then exit the - /// VMM process. - VmmShutdown(Sender), - - /// Resize the VM. - VmResize(Arc, Sender), - - /// Resize the memory zone. - VmResizeZone(Arc, Sender), - - /// Add a device to the VM. - VmAddDevice(Arc, Sender), - - /// Add a user device to the VM. - VmAddUserDevice(Arc, Sender), - - /// Remove a device from the VM. - VmRemoveDevice(Arc, Sender), - - /// Add a disk to the VM. - VmAddDisk(Arc, Sender), - - /// Add a fs to the VM. - VmAddFs(Arc, Sender), - - /// Add a pmem device to the VM. - VmAddPmem(Arc, Sender), - - /// Add a network device to the VM. - VmAddNet(Arc, Sender), - - /// Add a vDPA device to the VM. - VmAddVdpa(Arc, Sender), - - /// Add a vsock device to the VM. - VmAddVsock(Arc, Sender), - - /// Take a VM snapshot - VmSnapshot(Arc, Sender), - - /// Restore from a VM snapshot - VmRestore(Arc, Sender), - - /// Take a VM coredump #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] - VmCoredump(Arc, Sender), + fn vm_coredump(&mut self, destination_url: &str) -> Result<(), VmError>; - /// Incoming migration - VmReceiveMigration(Arc, Sender), + fn vm_shutdown(&mut self) -> Result<(), VmError>; - /// Outgoing migration - VmSendMigration(Arc, Sender), + fn vm_reboot(&mut self) -> Result<(), VmError>; - // Trigger power button - VmPowerButton(Sender), + fn vm_info(&self) -> Result; + + fn vmm_ping(&self) -> VmmPingResponse; + + fn vm_delete(&mut self) -> Result<(), VmError>; + + fn vmm_shutdown(&mut self) -> Result<(), VmError>; + + fn vm_resize( + &mut self, + desired_vcpus: Option, + desired_ram: Option, + desired_balloon: Option, + ) -> Result<(), VmError>; + + fn vm_resize_zone(&mut self, id: String, desired_ram: u64) -> Result<(), VmError>; + + fn vm_add_device(&mut self, device_cfg: DeviceConfig) -> Result>, VmError>; + + fn vm_add_user_device( + &mut self, + device_cfg: UserDeviceConfig, + ) -> Result>, VmError>; + + fn vm_remove_device(&mut self, id: String) -> Result<(), VmError>; + + fn vm_add_disk(&mut self, disk_cfg: DiskConfig) -> Result>, VmError>; + + fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> Result>, VmError>; + + fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> Result>, VmError>; + + fn vm_add_net(&mut self, net_cfg: NetConfig) -> Result>, VmError>; + + fn vm_add_vdpa(&mut self, vdpa_cfg: VdpaConfig) -> Result>, VmError>; + + fn vm_add_vsock(&mut self, vsock_cfg: VsockConfig) -> Result>, VmError>; + + fn vm_counters(&mut self) -> Result>, VmError>; + + fn vm_power_button(&mut self) -> Result<(), VmError>; + + fn vm_receive_migration( + &mut self, + receive_data_migration: VmReceiveMigrationData, + ) -> Result<(), MigratableError>; + + fn vm_send_migration( + &mut self, + send_data_migration: VmSendMigrationData, + ) -> Result<(), MigratableError>; } -pub fn vm_create( +/// It would be nice if we could pass around an object like this: +/// +/// ``` +/// # use vmm::api::ApiAction; +/// struct ApiRequest { +/// action: &'static Action, +/// body: Action::RequestBody, +/// } +/// ``` +/// +/// Unfortunately, it's not possible to use such a type in a trait object, +/// so as a workaround, we instead encapsulate that data in a closure, and have +/// the event loop call that closure to process a request. +pub type ApiRequest = + Box Result + Send + 'static>; + +fn get_response( + action: &Action, api_evt: EventFd, api_sender: Sender, - config: Arc>, -) -> ApiResult<()> { + data: Action::RequestBody, +) -> ApiResult { let (response_sender, response_receiver) = channel(); - // Send the VM creation request. - api_sender - .send(ApiRequest::VmCreate(config, response_sender)) - .map_err(ApiError::RequestSend)?; - api_evt.write(1).map_err(ApiError::EventFdWrite)?; - - response_receiver.recv().map_err(ApiError::ResponseRecv)??; - - Ok(()) -} - -/// Represents a VM related action. -/// This is mostly used to factorize code between VM routines -/// that only differ by the IPC command they send. -pub enum VmAction { - /// Boot a VM - Boot, - - /// Delete a VM - Delete, - - /// Shut a VM down - Shutdown, - - /// Reboot a VM - Reboot, - - /// Pause a VM - Pause, - - /// Resume a VM - Resume, - - /// Return VM counters - Counters, - - /// Add VFIO device - AddDevice(Arc), - - /// Add disk - AddDisk(Arc), - - /// Add filesystem - AddFs(Arc), - - /// Add pmem - AddPmem(Arc), - - /// Add network - AddNet(Arc), - - /// Add vdpa - AddVdpa(Arc), - - /// Add vsock - AddVsock(Arc), - - /// Add user device - AddUserDevice(Arc), - - /// Remove VFIO device - RemoveDevice(Arc), - - /// Resize VM - Resize(Arc), - - /// Resize memory zone - ResizeZone(Arc), - - /// Restore VM - Restore(Arc), - - /// Snapshot VM - Snapshot(Arc), - - /// Coredump VM - #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] - Coredump(Arc), - - /// Incoming migration - ReceiveMigration(Arc), - - /// Outgoing migration - SendMigration(Arc), - - /// Power Button for clean shutdown - PowerButton, -} - -fn vm_action( - api_evt: EventFd, - api_sender: Sender, - action: VmAction, -) -> ApiResult> { - let (response_sender, response_receiver) = channel(); - - use VmAction::*; - let request = match action { - Boot => ApiRequest::VmBoot(response_sender), - Delete => ApiRequest::VmDelete(response_sender), - Shutdown => ApiRequest::VmShutdown(response_sender), - Reboot => ApiRequest::VmReboot(response_sender), - Pause => ApiRequest::VmPause(response_sender), - Resume => ApiRequest::VmResume(response_sender), - Counters => ApiRequest::VmCounters(response_sender), - AddDevice(v) => ApiRequest::VmAddDevice(v, response_sender), - AddDisk(v) => ApiRequest::VmAddDisk(v, response_sender), - AddFs(v) => ApiRequest::VmAddFs(v, response_sender), - AddPmem(v) => ApiRequest::VmAddPmem(v, response_sender), - AddNet(v) => ApiRequest::VmAddNet(v, response_sender), - AddVdpa(v) => ApiRequest::VmAddVdpa(v, response_sender), - AddVsock(v) => ApiRequest::VmAddVsock(v, response_sender), - AddUserDevice(v) => ApiRequest::VmAddUserDevice(v, response_sender), - RemoveDevice(v) => ApiRequest::VmRemoveDevice(v, response_sender), - Resize(v) => ApiRequest::VmResize(v, response_sender), - ResizeZone(v) => ApiRequest::VmResizeZone(v, response_sender), - Restore(v) => ApiRequest::VmRestore(v, response_sender), - Snapshot(v) => ApiRequest::VmSnapshot(v, response_sender), - #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] - Coredump(v) => ApiRequest::VmCoredump(v, response_sender), - ReceiveMigration(v) => ApiRequest::VmReceiveMigration(v, response_sender), - SendMigration(v) => ApiRequest::VmSendMigration(v, response_sender), - PowerButton => ApiRequest::VmPowerButton(response_sender), - }; - + let request = action.request(data, response_sender); // Send the VM request. api_sender.send(request).map_err(ApiError::RequestSend)?; api_evt.write(1).map_err(ApiError::EventFdWrite)?; - let body = match response_receiver.recv().map_err(ApiError::ResponseRecv)?? { + response_receiver.recv().map_err(ApiError::ResponseRecv)? +} + +fn get_response_body>>( + action: &Action, + api_evt: EventFd, + api_sender: Sender, + data: Action::RequestBody, +) -> ApiResult> { + let body = match get_response(action, api_evt, api_sender, data)? { ApiResponsePayload::VmAction(response) => response.map(Body::new), ApiResponsePayload::Empty => None, _ => return Err(ApiError::ResponsePayloadType), @@ -483,213 +358,1016 @@ fn vm_action( Ok(body) } -pub fn vm_boot(api_evt: EventFd, api_sender: Sender) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::Boot) +pub trait ApiAction: Send + Sync { + type RequestBody: Send + Sync + Sized; + type ResponseBody: Send + Sized; + + fn request(&self, body: Self::RequestBody, response_sender: Sender) -> ApiRequest; + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult; } -pub fn vm_delete(api_evt: EventFd, api_sender: Sender) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::Delete) +pub struct VmAddDevice; + +impl ApiAction for VmAddDevice { + type RequestBody = DeviceConfig; + type ResponseBody = Option; + + fn request( + &self, + config: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmAddDevice {:?}", config); + + let response = vmm + .vm_add_device(config) + .map_err(ApiError::VmAddDevice) + .map(ApiResponsePayload::VmAction); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_shutdown(api_evt: EventFd, api_sender: Sender) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::Shutdown) +pub struct AddDisk; + +impl ApiAction for AddDisk { + type RequestBody = DiskConfig; + type ResponseBody = Option; + + fn request( + &self, + config: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: AddDisk {:?}", config); + + let response = vmm + .vm_add_disk(config) + .map_err(ApiError::VmAddDisk) + .map(ApiResponsePayload::VmAction); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_reboot(api_evt: EventFd, api_sender: Sender) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::Reboot) +pub struct VmAddFs; + +impl ApiAction for VmAddFs { + type RequestBody = FsConfig; + type ResponseBody = Option; + + fn request( + &self, + config: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmAddFs {:?}", config); + + let response = vmm + .vm_add_fs(config) + .map_err(ApiError::VmAddFs) + .map(ApiResponsePayload::VmAction); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_pause(api_evt: EventFd, api_sender: Sender) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::Pause) +pub struct VmAddPmem; + +impl ApiAction for VmAddPmem { + type RequestBody = PmemConfig; + type ResponseBody = Option; + + fn request( + &self, + config: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmAddPmem {:?}", config); + + let response = vmm + .vm_add_pmem(config) + .map_err(ApiError::VmAddPmem) + .map(ApiResponsePayload::VmAction); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_resume(api_evt: EventFd, api_sender: Sender) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::Resume) +pub struct VmAddNet; + +impl ApiAction for VmAddNet { + type RequestBody = NetConfig; + type ResponseBody = Option; + + fn request( + &self, + config: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmAddNet {:?}", config); + + let response = vmm + .vm_add_net(config) + .map_err(ApiError::VmAddNet) + .map(ApiResponsePayload::VmAction); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_counters(api_evt: EventFd, api_sender: Sender) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::Counters) +pub struct VmAddVdpa; + +impl ApiAction for VmAddVdpa { + type RequestBody = VdpaConfig; + type ResponseBody = Option; + + fn request( + &self, + config: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmAddVdpa {:?}", config); + + let response = vmm + .vm_add_vdpa(config) + .map_err(ApiError::VmAddVdpa) + .map(ApiResponsePayload::VmAction); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_power_button( - api_evt: EventFd, - api_sender: Sender, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::PowerButton) +pub struct VmAddVsock; + +impl ApiAction for VmAddVsock { + type RequestBody = VsockConfig; + type ResponseBody = Option; + + fn request( + &self, + config: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmAddVsock {:?}", config); + + let response = vmm + .vm_add_vsock(config) + .map_err(ApiError::VmAddVsock) + .map(ApiResponsePayload::VmAction); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_receive_migration( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::ReceiveMigration(data)) +pub struct VmAddUserDevice; + +impl ApiAction for VmAddUserDevice { + type RequestBody = UserDeviceConfig; + type ResponseBody = Option; + + fn request( + &self, + config: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmAddUserDevice {:?}", config); + + let response = vmm + .vm_add_user_device(config) + .map_err(ApiError::VmAddUserDevice) + .map(ApiResponsePayload::VmAction); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_send_migration( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::SendMigration(data)) -} +pub struct VmBoot; -pub fn vm_snapshot( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::Snapshot(data)) -} +impl ApiAction for VmBoot { + type RequestBody = (); + type ResponseBody = Option; -pub fn vm_restore( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::Restore(data)) + fn request(&self, _: Self::RequestBody, response_sender: Sender) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmBoot"); + + let response = vmm + .vm_boot() + .map_err(ApiError::VmBoot) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] -pub fn vm_coredump( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::Coredump(data)) -} +pub struct VmCoredump; -pub fn vm_info(api_evt: EventFd, api_sender: Sender) -> ApiResult { - let (response_sender, response_receiver) = channel(); +#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] +impl ApiAction for VmCoredump { + type RequestBody = VmCoredumpData; + type ResponseBody = Option; - // Send the VM request. - api_sender - .send(ApiRequest::VmInfo(response_sender)) - .map_err(ApiError::RequestSend)?; - api_evt.write(1).map_err(ApiError::EventFdWrite)?; + fn request( + &self, + coredump_data: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmCoredump {:?}", coredump_data); - let vm_info = response_receiver.recv().map_err(ApiError::ResponseRecv)??; + let response = vmm + .vm_coredump(&coredump_data.destination_url) + .map_err(ApiError::VmCoredump) + .map(|_| ApiResponsePayload::Empty); - match vm_info { - ApiResponsePayload::VmInfo(info) => Ok(info), - _ => Err(ApiError::ResponsePayloadType), + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) } } -pub fn vmm_ping(api_evt: EventFd, api_sender: Sender) -> ApiResult { - let (response_sender, response_receiver) = channel(); +pub struct VmCounters; - api_sender - .send(ApiRequest::VmmPing(response_sender)) - .map_err(ApiError::RequestSend)?; - api_evt.write(1).map_err(ApiError::EventFdWrite)?; +impl ApiAction for VmCounters { + type RequestBody = (); + type ResponseBody = Option; - let vmm_pong = response_receiver.recv().map_err(ApiError::ResponseRecv)??; + fn request(&self, _: Self::RequestBody, response_sender: Sender) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmCounters"); - match vmm_pong { - ApiResponsePayload::VmmPing(pong) => Ok(pong), - _ => Err(ApiError::ResponsePayloadType), + let response = vmm + .vm_counters() + .map_err(ApiError::VmInfo) + .map(ApiResponsePayload::VmAction); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) } } -pub fn vmm_shutdown(api_evt: EventFd, api_sender: Sender) -> ApiResult<()> { - let (response_sender, response_receiver) = channel(); +pub struct VmCreate; - // Send the VMM shutdown request. - api_sender - .send(ApiRequest::VmmShutdown(response_sender)) - .map_err(ApiError::RequestSend)?; - api_evt.write(1).map_err(ApiError::EventFdWrite)?; +impl ApiAction for VmCreate { + type RequestBody = Arc>; + type ResponseBody = (); - response_receiver.recv().map_err(ApiError::ResponseRecv)??; + fn request( + &self, + config: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmCreate {:?}", config); - Ok(()) + let response = vmm + .vm_create(config) + .map_err(ApiError::VmCreate) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult<()> { + get_response(self, api_evt, api_sender, data)?; + + Ok(()) + } } -pub fn vm_resize( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::Resize(data)) +pub struct VmDelete; + +impl ApiAction for VmDelete { + type RequestBody = (); + type ResponseBody = Option; + + fn request(&self, _: Self::RequestBody, response_sender: Sender) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmDelete"); + + let response = vmm + .vm_delete() + .map_err(ApiError::VmDelete) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_resize_zone( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::ResizeZone(data)) +pub struct VmInfo; + +impl ApiAction for VmInfo { + type RequestBody = (); + type ResponseBody = VmInfoResponse; + + fn request(&self, _: Self::RequestBody, response_sender: Sender) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmInfo"); + + let response = vmm + .vm_info() + .map_err(ApiError::VmInfo) + .map(ApiResponsePayload::VmInfo); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: (), + ) -> ApiResult { + let vm_info = get_response(self, api_evt, api_sender, data)?; + + match vm_info { + ApiResponsePayload::VmInfo(info) => Ok(info), + _ => Err(ApiError::ResponsePayloadType), + } + } } -pub fn vm_add_device( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::AddDevice(data)) +pub struct VmPause; + +impl ApiAction for VmPause { + type RequestBody = (); + type ResponseBody = Option; + + fn request(&self, _: Self::RequestBody, response_sender: Sender) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmPause"); + + let response = vmm + .vm_pause() + .map_err(ApiError::VmPause) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_add_user_device( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::AddUserDevice(data)) +pub struct VmPowerButton; + +impl ApiAction for VmPowerButton { + type RequestBody = (); + type ResponseBody = Option; + + fn request(&self, _: Self::RequestBody, response_sender: Sender) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmPowerButton"); + + let response = vmm + .vm_power_button() + .map_err(ApiError::VmPowerButton) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_remove_device( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::RemoveDevice(data)) +pub struct VmReboot; + +impl ApiAction for VmReboot { + type RequestBody = (); + type ResponseBody = Option; + + fn request(&self, _: Self::RequestBody, response_sender: Sender) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmReboot"); + + let response = vmm + .vm_reboot() + .map_err(ApiError::VmReboot) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_add_disk( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::AddDisk(data)) +pub struct VmReceiveMigration; + +impl ApiAction for VmReceiveMigration { + type RequestBody = VmReceiveMigrationData; + type ResponseBody = Option; + + fn request(&self, data: Self::RequestBody, response_sender: Sender) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmReceiveMigration {:?}", data); + + let response = vmm + .vm_receive_migration(data) + .map_err(ApiError::VmReceiveMigration) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_add_fs( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::AddFs(data)) +pub struct VmRemoveDevice; + +impl ApiAction for VmRemoveDevice { + type RequestBody = VmRemoveDeviceData; + type ResponseBody = Option; + + fn request( + &self, + remove_device_data: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmRemoveDevice {:?}", remove_device_data); + + let response = vmm + .vm_remove_device(remove_device_data.id) + .map_err(ApiError::VmRemoveDevice) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_add_pmem( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::AddPmem(data)) +pub struct VmResize; + +impl ApiAction for VmResize { + type RequestBody = VmResizeData; + type ResponseBody = Option; + + fn request( + &self, + resize_data: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmResize {:?}", resize_data); + + let response = vmm + .vm_resize( + resize_data.desired_vcpus, + resize_data.desired_ram, + resize_data.desired_balloon, + ) + .map_err(ApiError::VmResize) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_add_net( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::AddNet(data)) +pub struct VmResizeZone; + +impl ApiAction for VmResizeZone { + type RequestBody = VmResizeZoneData; + type ResponseBody = Option; + + fn request( + &self, + resize_zone_data: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmResizeZone {:?}", resize_zone_data); + + let response = vmm + .vm_resize_zone(resize_zone_data.id, resize_zone_data.desired_ram) + .map_err(ApiError::VmResizeZone) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_add_vdpa( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::AddVdpa(data)) +pub struct VmRestore; + +impl ApiAction for VmRestore { + type RequestBody = RestoreConfig; + type ResponseBody = Option; + + fn request( + &self, + config: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmRestore {:?}", config); + + let response = vmm + .vm_restore(config) + .map_err(ApiError::VmRestore) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } } -pub fn vm_add_vsock( - api_evt: EventFd, - api_sender: Sender, - data: Arc, -) -> ApiResult> { - vm_action(api_evt, api_sender, VmAction::AddVsock(data)) +pub struct VmResume; + +impl ApiAction for VmResume { + type RequestBody = (); + type ResponseBody = Option; + + fn request(&self, _: Self::RequestBody, response_sender: Sender) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmResume"); + + let response = vmm + .vm_resume() + .map_err(ApiError::VmResume) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } +} + +pub struct VmSendMigration; + +impl ApiAction for VmSendMigration { + type RequestBody = VmSendMigrationData; + type ResponseBody = Option; + + fn request(&self, data: Self::RequestBody, response_sender: Sender) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmSendMigration {:?}", data); + + let response = vmm + .vm_send_migration(data) + .map_err(ApiError::VmSendMigration) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } +} + +pub struct VmShutdown; + +impl ApiAction for VmShutdown { + type RequestBody = (); + type ResponseBody = Option; + + fn request( + &self, + config: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmShutdown {:?}", config); + + let response = vmm + .vm_shutdown() + .map_err(ApiError::VmShutdown) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } +} + +pub struct VmSnapshot; + +impl ApiAction for VmSnapshot { + type RequestBody = VmSnapshotConfig; + type ResponseBody = Option; + + fn request( + &self, + config: Self::RequestBody, + response_sender: Sender, + ) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmSnapshot {:?}", config); + + let response = vmm + .vm_snapshot(&config.destination_url) + .map_err(ApiError::VmSnapshot) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: Self::RequestBody, + ) -> ApiResult { + get_response_body(self, api_evt, api_sender, data) + } +} + +pub struct VmmPing; + +impl ApiAction for VmmPing { + type RequestBody = (); + type ResponseBody = VmmPingResponse; + + fn request(&self, _: Self::RequestBody, response_sender: Sender) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmmPing"); + + let response = ApiResponsePayload::VmmPing(vmm.vmm_ping()); + + response_sender + .send(Ok(response)) + .map_err(VmmError::ApiResponseSend)?; + + Ok(false) + }) + } + + fn send( + &self, + api_evt: EventFd, + api_sender: Sender, + data: (), + ) -> ApiResult { + let vmm_pong = get_response(self, api_evt, api_sender, data)?; + + match vmm_pong { + ApiResponsePayload::VmmPing(pong) => Ok(pong), + _ => Err(ApiError::ResponsePayloadType), + } + } +} + +pub struct VmmShutdown; + +impl ApiAction for VmmShutdown { + type RequestBody = (); + type ResponseBody = (); + + fn request(&self, _: Self::RequestBody, response_sender: Sender) -> ApiRequest { + Box::new(move |vmm| { + info!("API request event: VmmShutdown"); + + let response = vmm + .vmm_shutdown() + .map_err(ApiError::VmmShutdown) + .map(|_| ApiResponsePayload::Empty); + + response_sender + .send(response) + .map_err(VmmError::ApiResponseSend)?; + + Ok(true) + }) + } + + fn send(&self, api_evt: EventFd, api_sender: Sender, data: ()) -> ApiResult<()> { + get_response(self, api_evt, api_sender, data)?; + + Ok(()) + } } diff --git a/vmm/src/lib.rs b/vmm/src/lib.rs index 8c6e409ed..d39e80b75 100644 --- a/vmm/src/lib.rs +++ b/vmm/src/lib.rs @@ -9,7 +9,7 @@ extern crate event_monitor; extern crate log; use crate::api::{ - ApiError, ApiRequest, ApiResponse, ApiResponsePayload, VmInfo, VmReceiveMigrationData, + ApiRequest, ApiResponse, RequestHandler, VmInfoResponse, VmReceiveMigrationData, VmSendMigrationData, VmmPingResponse, }; use crate::config::{ @@ -680,6 +680,523 @@ impl Vmm { }) } + fn vm_receive_config( + &mut self, + req: &Request, + socket: &mut T, + existing_memory_files: Option>, + ) -> std::result::Result>, MigratableError> + where + T: Read + Write, + { + // Read in config data along with memory manager data + let mut data: Vec = Vec::new(); + data.resize_with(req.length() as usize, Default::default); + socket + .read_exact(&mut data) + .map_err(MigratableError::MigrateSocket)?; + + let vm_migration_config: VmMigrationConfig = + serde_json::from_slice(&data).map_err(|e| { + MigratableError::MigrateReceive(anyhow!("Error deserialising config: {}", e)) + })?; + + #[cfg(all(feature = "kvm", target_arch = "x86_64"))] + self.vm_check_cpuid_compatibility( + &vm_migration_config.vm_config, + &vm_migration_config.common_cpuid, + )?; + + let config = vm_migration_config.vm_config.clone(); + self.vm_config = Some(vm_migration_config.vm_config); + + let vm = Vm::create_hypervisor_vm( + &self.hypervisor, + #[cfg(feature = "tdx")] + false, + #[cfg(feature = "sev_snp")] + false, + ) + .map_err(|e| { + MigratableError::MigrateReceive(anyhow!( + "Error creating hypervisor VM from snapshot: {:?}", + e + )) + })?; + + let phys_bits = + vm::physical_bits(&self.hypervisor, config.lock().unwrap().cpus.max_phys_bits); + + let memory_manager = MemoryManager::new( + vm, + &config.lock().unwrap().memory.clone(), + None, + phys_bits, + #[cfg(feature = "tdx")] + false, + Some(&vm_migration_config.memory_manager_data), + existing_memory_files, + #[cfg(target_arch = "x86_64")] + None, + ) + .map_err(|e| { + MigratableError::MigrateReceive(anyhow!( + "Error creating MemoryManager from snapshot: {:?}", + e + )) + })?; + + Response::ok().write_to(socket)?; + + Ok(memory_manager) + } + + fn vm_receive_state( + &mut self, + req: &Request, + socket: &mut T, + mm: Arc>, + ) -> std::result::Result<(), MigratableError> + where + T: Read + Write, + { + // Read in state data + let mut data: Vec = Vec::new(); + data.resize_with(req.length() as usize, Default::default); + socket + .read_exact(&mut data) + .map_err(MigratableError::MigrateSocket)?; + let snapshot: Snapshot = serde_json::from_slice(&data).map_err(|e| { + MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {}", e)) + })?; + + let exit_evt = self.exit_evt.try_clone().map_err(|e| { + MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {}", e)) + })?; + let reset_evt = self.reset_evt.try_clone().map_err(|e| { + MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {}", e)) + })?; + #[cfg(feature = "guest_debug")] + let debug_evt = self.vm_debug_evt.try_clone().map_err(|e| { + MigratableError::MigrateReceive(anyhow!("Error cloning debug EventFd: {}", e)) + })?; + let activate_evt = self.activate_evt.try_clone().map_err(|e| { + MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {}", e)) + })?; + + let timestamp = Instant::now(); + let hypervisor_vm = mm.lock().unwrap().vm.clone(); + let mut vm = Vm::new_from_memory_manager( + self.vm_config.clone().unwrap(), + mm, + hypervisor_vm, + exit_evt, + reset_evt, + #[cfg(feature = "guest_debug")] + debug_evt, + &self.seccomp_action, + self.hypervisor.clone(), + activate_evt, + timestamp, + None, + None, + None, + Arc::clone(&self.original_termios_opt), + Some(snapshot), + ) + .map_err(|e| { + MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e)) + })?; + + // Create VM + vm.restore().map_err(|e| { + Response::error().write_to(socket).ok(); + MigratableError::MigrateReceive(anyhow!("Failed restoring the Vm: {}", e)) + })?; + self.vm = Some(vm); + + Response::ok().write_to(socket)?; + + Ok(()) + } + + fn vm_receive_memory( + &mut self, + req: &Request, + socket: &mut T, + memory_manager: &mut MemoryManager, + ) -> std::result::Result<(), MigratableError> + where + T: Read + ReadVolatile + Write, + { + // Read table + let table = MemoryRangeTable::read_from(socket, req.length())?; + + // And then read the memory itself + memory_manager + .receive_memory_regions(&table, socket) + .map_err(|e| { + Response::error().write_to(socket).ok(); + e + })?; + Response::ok().write_to(socket)?; + Ok(()) + } + + fn socket_url_to_path(url: &str) -> result::Result { + url.strip_prefix("unix:") + .ok_or_else(|| { + MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url)) + }) + .map(|s| s.into()) + } + + // Returns true if there were dirty pages to send + fn vm_maybe_send_dirty_pages( + vm: &mut Vm, + socket: &mut T, + ) -> result::Result + where + T: Read + Write + WriteVolatile, + { + // Send (dirty) memory table + let table = vm.dirty_log()?; + + // But if there are no regions go straight to pause + if table.regions().is_empty() { + return Ok(false); + } + + Request::memory(table.length()).write_to(socket).unwrap(); + table.write_to(socket)?; + // And then the memory itself + vm.send_memory_regions(&table, socket)?; + let res = Response::read_from(socket)?; + if res.status() != Status::Ok { + warn!("Error during dirty memory migration"); + Request::abandon().write_to(socket)?; + Response::read_from(socket).ok(); + return Err(MigratableError::MigrateSend(anyhow!( + "Error during dirty memory migration" + ))); + } + + Ok(true) + } + + fn send_migration( + vm: &mut Vm, + #[cfg(all(feature = "kvm", target_arch = "x86_64"))] hypervisor: Arc< + dyn hypervisor::Hypervisor, + >, + send_data_migration: VmSendMigrationData, + ) -> result::Result<(), MigratableError> { + let path = Self::socket_url_to_path(&send_data_migration.destination_url)?; + let mut socket = UnixStream::connect(path).map_err(|e| { + MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e)) + })?; + + // Start the migration + Request::start().write_to(&mut socket)?; + let res = Response::read_from(&mut socket)?; + if res.status() != Status::Ok { + warn!("Error starting migration"); + Request::abandon().write_to(&mut socket)?; + Response::read_from(&mut socket).ok(); + return Err(MigratableError::MigrateSend(anyhow!( + "Error starting migration" + ))); + } + + // Send config + let vm_config = vm.get_config(); + #[cfg(all(feature = "kvm", target_arch = "x86_64"))] + let common_cpuid = { + #[cfg(feature = "tdx")] + if vm_config.lock().unwrap().is_tdx_enabled() { + return Err(MigratableError::MigrateSend(anyhow!( + "Live Migration is not supported when TDX is enabled" + ))); + }; + + let amx = vm_config.lock().unwrap().cpus.features.amx; + let phys_bits = + vm::physical_bits(&hypervisor, vm_config.lock().unwrap().cpus.max_phys_bits); + arch::generate_common_cpuid( + &hypervisor, + &arch::CpuidConfig { + sgx_epc_sections: None, + phys_bits, + kvm_hyperv: vm_config.lock().unwrap().cpus.kvm_hyperv, + #[cfg(feature = "tdx")] + tdx: false, + amx, + }, + ) + .map_err(|e| { + MigratableError::MigrateSend(anyhow!("Error generating common cpuid': {:?}", e)) + })? + }; + + if send_data_migration.local { + vm.send_memory_fds(&mut socket)?; + } + + let vm_migration_config = VmMigrationConfig { + vm_config, + #[cfg(all(feature = "kvm", target_arch = "x86_64"))] + common_cpuid, + memory_manager_data: vm.memory_manager_data(), + }; + let config_data = serde_json::to_vec(&vm_migration_config).unwrap(); + Request::config(config_data.len() as u64).write_to(&mut socket)?; + socket + .write_all(&config_data) + .map_err(MigratableError::MigrateSocket)?; + let res = Response::read_from(&mut socket)?; + if res.status() != Status::Ok { + warn!("Error during config migration"); + Request::abandon().write_to(&mut socket)?; + Response::read_from(&mut socket).ok(); + return Err(MigratableError::MigrateSend(anyhow!( + "Error during config migration" + ))); + } + + // Let every Migratable object know about the migration being started. + vm.start_migration()?; + + if send_data_migration.local { + // Now pause VM + vm.pause()?; + } else { + // Start logging dirty pages + vm.start_dirty_log()?; + + // Send memory table + let table = vm.memory_range_table()?; + Request::memory(table.length()) + .write_to(&mut socket) + .unwrap(); + table.write_to(&mut socket)?; + // And then the memory itself + vm.send_memory_regions(&table, &mut socket)?; + let res = Response::read_from(&mut socket)?; + if res.status() != Status::Ok { + warn!("Error during memory migration"); + Request::abandon().write_to(&mut socket)?; + Response::read_from(&mut socket).ok(); + return Err(MigratableError::MigrateSend(anyhow!( + "Error during memory migration" + ))); + } + + // Try at most 5 passes of dirty memory sending + const MAX_DIRTY_MIGRATIONS: usize = 5; + for i in 0..MAX_DIRTY_MIGRATIONS { + info!("Dirty memory migration {} of {}", i, MAX_DIRTY_MIGRATIONS); + if !Self::vm_maybe_send_dirty_pages(vm, &mut socket)? { + break; + } + } + + // Now pause VM + vm.pause()?; + + // Send last batch of dirty pages + Self::vm_maybe_send_dirty_pages(vm, &mut socket)?; + + // Stop logging dirty pages + vm.stop_dirty_log()?; + } + // Capture snapshot and send it + let vm_snapshot = vm.snapshot()?; + let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap(); + Request::state(snapshot_data.len() as u64).write_to(&mut socket)?; + socket + .write_all(&snapshot_data) + .map_err(MigratableError::MigrateSocket)?; + let res = Response::read_from(&mut socket)?; + if res.status() != Status::Ok { + warn!("Error during state migration"); + Request::abandon().write_to(&mut socket)?; + Response::read_from(&mut socket).ok(); + return Err(MigratableError::MigrateSend(anyhow!( + "Error during state migration" + ))); + } + + // Complete the migration + Request::complete().write_to(&mut socket)?; + let res = Response::read_from(&mut socket)?; + if res.status() != Status::Ok { + warn!("Error completing migration"); + Request::abandon().write_to(&mut socket)?; + Response::read_from(&mut socket).ok(); + return Err(MigratableError::MigrateSend(anyhow!( + "Error completing migration" + ))); + } + info!("Migration complete"); + + // Let every Migratable object know about the migration being complete + vm.complete_migration() + } + + #[cfg(all(feature = "kvm", target_arch = "x86_64"))] + fn vm_check_cpuid_compatibility( + &self, + src_vm_config: &Arc>, + src_vm_cpuid: &[hypervisor::arch::x86::CpuIdEntry], + ) -> result::Result<(), MigratableError> { + #[cfg(feature = "tdx")] + if src_vm_config.lock().unwrap().is_tdx_enabled() { + return Err(MigratableError::MigrateReceive(anyhow!( + "Live Migration is not supported when TDX is enabled" + ))); + }; + + // We check the `CPUID` compatibility of between the source vm and destination, which is + // mostly about feature compatibility and "topology/sgx" leaves are not relevant. + let dest_cpuid = &{ + let vm_config = &src_vm_config.lock().unwrap(); + + let phys_bits = vm::physical_bits(&self.hypervisor, vm_config.cpus.max_phys_bits); + arch::generate_common_cpuid( + &self.hypervisor.clone(), + &arch::CpuidConfig { + sgx_epc_sections: None, + phys_bits, + kvm_hyperv: vm_config.cpus.kvm_hyperv, + #[cfg(feature = "tdx")] + tdx: false, + amx: vm_config.cpus.features.amx, + }, + ) + .map_err(|e| { + MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e)) + })? + }; + arch::CpuidFeatureEntry::check_cpuid_compatibility(src_vm_cpuid, dest_cpuid).map_err(|e| { + MigratableError::MigrateReceive(anyhow!( + "Error checking cpu feature compatibility': {:?}", + e + )) + }) + } + + fn control_loop( + &mut self, + api_receiver: Rc>, + #[cfg(feature = "guest_debug")] gdb_receiver: Rc>, + ) -> Result<()> { + const EPOLL_EVENTS_LEN: usize = 100; + + let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN]; + let epoll_fd = self.epoll.as_raw_fd(); + + 'outer: loop { + let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) { + Ok(res) => res, + Err(e) => { + if e.kind() == io::ErrorKind::Interrupted { + // It's well defined from the epoll_wait() syscall + // documentation that the epoll loop can be interrupted + // before any of the requested events occurred or the + // timeout expired. In both those cases, epoll_wait() + // returns an error of type EINTR, but this should not + // be considered as a regular error. Instead it is more + // appropriate to retry, by calling into epoll_wait(). + continue; + } + return Err(Error::Epoll(e)); + } + }; + + for event in events.iter().take(num_events) { + let dispatch_event: EpollDispatch = event.data.into(); + match dispatch_event { + EpollDispatch::Unknown => { + let event = event.data; + warn!("Unknown VMM loop event: {}", event); + } + EpollDispatch::Exit => { + info!("VM exit event"); + // Consume the event. + self.exit_evt.read().map_err(Error::EventFdRead)?; + self.vmm_shutdown().map_err(Error::VmmShutdown)?; + + break 'outer; + } + EpollDispatch::Reset => { + info!("VM reset event"); + // Consume the event. + self.reset_evt.read().map_err(Error::EventFdRead)?; + self.vm_reboot().map_err(Error::VmReboot)?; + } + EpollDispatch::ActivateVirtioDevices => { + if let Some(ref vm) = self.vm { + let count = self.activate_evt.read().map_err(Error::EventFdRead)?; + info!( + "Trying to activate pending virtio devices: count = {}", + count + ); + vm.activate_virtio_devices() + .map_err(Error::ActivateVirtioDevices)?; + } + } + EpollDispatch::Api => { + // Consume the events. + for _ in 0..self.api_evt.read().map_err(Error::EventFdRead)? { + // Read from the API receiver channel + let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?; + + if api_request(self)? { + break 'outer; + } + } + } + #[cfg(feature = "guest_debug")] + EpollDispatch::Debug => { + // Consume the events. + for _ in 0..self.debug_evt.read().map_err(Error::EventFdRead)? { + // Read from the API receiver channel + let gdb_request = gdb_receiver.recv().map_err(Error::GdbRequestRecv)?; + + let response = if let Some(ref mut vm) = self.vm { + vm.debug_request(&gdb_request.payload, gdb_request.cpu_id) + } else { + Err(VmError::VmNotRunning) + } + .map_err(gdb::Error::Vm); + + gdb_request + .sender + .send(response) + .map_err(Error::GdbResponseSend)?; + } + } + #[cfg(not(feature = "guest_debug"))] + EpollDispatch::Debug => {} + } + } + } + + // Trigger the termination of the signal_handler thread + if let Some(signals) = self.signals.take() { + signals.close(); + } + + // Wait for all the threads to finish + for thread in self.threads.drain(..) { + thread.join().map_err(Error::ThreadCleanup)? + } + + Ok(()) + } +} + +impl RequestHandler for Vmm { fn vm_create(&mut self, config: Arc>) -> result::Result<(), VmError> { // We only store the passed VM config. // The VM will be created when being asked to boot it. @@ -921,7 +1438,7 @@ impl Vmm { Ok(()) } - fn vm_info(&self) -> result::Result { + fn vm_info(&self) -> result::Result { match &self.vm_config { Some(config) => { let state = match &self.vm { @@ -938,7 +1455,7 @@ impl Vmm { let device_tree = self.vm.as_ref().map(|vm| vm.device_tree()); - Ok(VmInfo { + Ok(VmInfoResponse { config, state, memory_actual_size, @@ -1307,177 +1824,6 @@ impl Vmm { } } - fn vm_receive_config( - &mut self, - req: &Request, - socket: &mut T, - existing_memory_files: Option>, - ) -> std::result::Result>, MigratableError> - where - T: Read + Write, - { - // Read in config data along with memory manager data - let mut data: Vec = Vec::new(); - data.resize_with(req.length() as usize, Default::default); - socket - .read_exact(&mut data) - .map_err(MigratableError::MigrateSocket)?; - - let vm_migration_config: VmMigrationConfig = - serde_json::from_slice(&data).map_err(|e| { - MigratableError::MigrateReceive(anyhow!("Error deserialising config: {}", e)) - })?; - - #[cfg(all(feature = "kvm", target_arch = "x86_64"))] - self.vm_check_cpuid_compatibility( - &vm_migration_config.vm_config, - &vm_migration_config.common_cpuid, - )?; - - let config = vm_migration_config.vm_config.clone(); - self.vm_config = Some(vm_migration_config.vm_config); - - let vm = Vm::create_hypervisor_vm( - &self.hypervisor, - #[cfg(feature = "tdx")] - false, - #[cfg(feature = "sev_snp")] - false, - ) - .map_err(|e| { - MigratableError::MigrateReceive(anyhow!( - "Error creating hypervisor VM from snapshot: {:?}", - e - )) - })?; - - let phys_bits = - vm::physical_bits(&self.hypervisor, config.lock().unwrap().cpus.max_phys_bits); - - let memory_manager = MemoryManager::new( - vm, - &config.lock().unwrap().memory.clone(), - None, - phys_bits, - #[cfg(feature = "tdx")] - false, - Some(&vm_migration_config.memory_manager_data), - existing_memory_files, - #[cfg(target_arch = "x86_64")] - None, - ) - .map_err(|e| { - MigratableError::MigrateReceive(anyhow!( - "Error creating MemoryManager from snapshot: {:?}", - e - )) - })?; - - Response::ok().write_to(socket)?; - - Ok(memory_manager) - } - - fn vm_receive_state( - &mut self, - req: &Request, - socket: &mut T, - mm: Arc>, - ) -> std::result::Result<(), MigratableError> - where - T: Read + Write, - { - // Read in state data - let mut data: Vec = Vec::new(); - data.resize_with(req.length() as usize, Default::default); - socket - .read_exact(&mut data) - .map_err(MigratableError::MigrateSocket)?; - let snapshot: Snapshot = serde_json::from_slice(&data).map_err(|e| { - MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {}", e)) - })?; - - let exit_evt = self.exit_evt.try_clone().map_err(|e| { - MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {}", e)) - })?; - let reset_evt = self.reset_evt.try_clone().map_err(|e| { - MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {}", e)) - })?; - #[cfg(feature = "guest_debug")] - let debug_evt = self.vm_debug_evt.try_clone().map_err(|e| { - MigratableError::MigrateReceive(anyhow!("Error cloning debug EventFd: {}", e)) - })?; - let activate_evt = self.activate_evt.try_clone().map_err(|e| { - MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {}", e)) - })?; - - let timestamp = Instant::now(); - let hypervisor_vm = mm.lock().unwrap().vm.clone(); - let mut vm = Vm::new_from_memory_manager( - self.vm_config.clone().unwrap(), - mm, - hypervisor_vm, - exit_evt, - reset_evt, - #[cfg(feature = "guest_debug")] - debug_evt, - &self.seccomp_action, - self.hypervisor.clone(), - activate_evt, - timestamp, - None, - None, - None, - Arc::clone(&self.original_termios_opt), - Some(snapshot), - ) - .map_err(|e| { - MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e)) - })?; - - // Create VM - vm.restore().map_err(|e| { - Response::error().write_to(socket).ok(); - MigratableError::MigrateReceive(anyhow!("Failed restoring the Vm: {}", e)) - })?; - self.vm = Some(vm); - - Response::ok().write_to(socket)?; - - Ok(()) - } - - fn vm_receive_memory( - &mut self, - req: &Request, - socket: &mut T, - memory_manager: &mut MemoryManager, - ) -> std::result::Result<(), MigratableError> - where - T: Read + ReadVolatile + Write, - { - // Read table - let table = MemoryRangeTable::read_from(socket, req.length())?; - - // And then read the memory itself - memory_manager - .receive_memory_regions(&table, socket) - .map_err(|e| { - Response::error().write_to(socket).ok(); - e - })?; - Response::ok().write_to(socket)?; - Ok(()) - } - - fn socket_url_to_path(url: &str) -> result::Result { - url.strip_prefix("unix:") - .ok_or_else(|| { - MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url)) - }) - .map(|s| s.into()) - } - fn vm_receive_migration( &mut self, receive_data_migration: VmReceiveMigrationData, @@ -1607,198 +1953,6 @@ impl Vmm { Ok(()) } - // Returns true if there were dirty pages to send - fn vm_maybe_send_dirty_pages( - vm: &mut Vm, - socket: &mut T, - ) -> result::Result - where - T: Read + Write + WriteVolatile, - { - // Send (dirty) memory table - let table = vm.dirty_log()?; - - // But if there are no regions go straight to pause - if table.regions().is_empty() { - return Ok(false); - } - - Request::memory(table.length()).write_to(socket).unwrap(); - table.write_to(socket)?; - // And then the memory itself - vm.send_memory_regions(&table, socket)?; - let res = Response::read_from(socket)?; - if res.status() != Status::Ok { - warn!("Error during dirty memory migration"); - Request::abandon().write_to(socket)?; - Response::read_from(socket).ok(); - return Err(MigratableError::MigrateSend(anyhow!( - "Error during dirty memory migration" - ))); - } - - Ok(true) - } - - fn send_migration( - vm: &mut Vm, - #[cfg(all(feature = "kvm", target_arch = "x86_64"))] hypervisor: Arc< - dyn hypervisor::Hypervisor, - >, - send_data_migration: VmSendMigrationData, - ) -> result::Result<(), MigratableError> { - let path = Self::socket_url_to_path(&send_data_migration.destination_url)?; - let mut socket = UnixStream::connect(path).map_err(|e| { - MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e)) - })?; - - // Start the migration - Request::start().write_to(&mut socket)?; - let res = Response::read_from(&mut socket)?; - if res.status() != Status::Ok { - warn!("Error starting migration"); - Request::abandon().write_to(&mut socket)?; - Response::read_from(&mut socket).ok(); - return Err(MigratableError::MigrateSend(anyhow!( - "Error starting migration" - ))); - } - - // Send config - let vm_config = vm.get_config(); - #[cfg(all(feature = "kvm", target_arch = "x86_64"))] - let common_cpuid = { - #[cfg(feature = "tdx")] - if vm_config.lock().unwrap().is_tdx_enabled() { - return Err(MigratableError::MigrateSend(anyhow!( - "Live Migration is not supported when TDX is enabled" - ))); - }; - - let amx = vm_config.lock().unwrap().cpus.features.amx; - let phys_bits = - vm::physical_bits(&hypervisor, vm_config.lock().unwrap().cpus.max_phys_bits); - arch::generate_common_cpuid( - &hypervisor, - &arch::CpuidConfig { - sgx_epc_sections: None, - phys_bits, - kvm_hyperv: vm_config.lock().unwrap().cpus.kvm_hyperv, - #[cfg(feature = "tdx")] - tdx: false, - amx, - }, - ) - .map_err(|e| { - MigratableError::MigrateSend(anyhow!("Error generating common cpuid': {:?}", e)) - })? - }; - - if send_data_migration.local { - vm.send_memory_fds(&mut socket)?; - } - - let vm_migration_config = VmMigrationConfig { - vm_config, - #[cfg(all(feature = "kvm", target_arch = "x86_64"))] - common_cpuid, - memory_manager_data: vm.memory_manager_data(), - }; - let config_data = serde_json::to_vec(&vm_migration_config).unwrap(); - Request::config(config_data.len() as u64).write_to(&mut socket)?; - socket - .write_all(&config_data) - .map_err(MigratableError::MigrateSocket)?; - let res = Response::read_from(&mut socket)?; - if res.status() != Status::Ok { - warn!("Error during config migration"); - Request::abandon().write_to(&mut socket)?; - Response::read_from(&mut socket).ok(); - return Err(MigratableError::MigrateSend(anyhow!( - "Error during config migration" - ))); - } - - // Let every Migratable object know about the migration being started. - vm.start_migration()?; - - if send_data_migration.local { - // Now pause VM - vm.pause()?; - } else { - // Start logging dirty pages - vm.start_dirty_log()?; - - // Send memory table - let table = vm.memory_range_table()?; - Request::memory(table.length()) - .write_to(&mut socket) - .unwrap(); - table.write_to(&mut socket)?; - // And then the memory itself - vm.send_memory_regions(&table, &mut socket)?; - let res = Response::read_from(&mut socket)?; - if res.status() != Status::Ok { - warn!("Error during memory migration"); - Request::abandon().write_to(&mut socket)?; - Response::read_from(&mut socket).ok(); - return Err(MigratableError::MigrateSend(anyhow!( - "Error during memory migration" - ))); - } - - // Try at most 5 passes of dirty memory sending - const MAX_DIRTY_MIGRATIONS: usize = 5; - for i in 0..MAX_DIRTY_MIGRATIONS { - info!("Dirty memory migration {} of {}", i, MAX_DIRTY_MIGRATIONS); - if !Self::vm_maybe_send_dirty_pages(vm, &mut socket)? { - break; - } - } - - // Now pause VM - vm.pause()?; - - // Send last batch of dirty pages - Self::vm_maybe_send_dirty_pages(vm, &mut socket)?; - - // Stop logging dirty pages - vm.stop_dirty_log()?; - } - // Capture snapshot and send it - let vm_snapshot = vm.snapshot()?; - let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap(); - Request::state(snapshot_data.len() as u64).write_to(&mut socket)?; - socket - .write_all(&snapshot_data) - .map_err(MigratableError::MigrateSocket)?; - let res = Response::read_from(&mut socket)?; - if res.status() != Status::Ok { - warn!("Error during state migration"); - Request::abandon().write_to(&mut socket)?; - Response::read_from(&mut socket).ok(); - return Err(MigratableError::MigrateSend(anyhow!( - "Error during state migration" - ))); - } - - // Complete the migration - Request::complete().write_to(&mut socket)?; - let res = Response::read_from(&mut socket)?; - if res.status() != Status::Ok { - warn!("Error completing migration"); - Request::abandon().write_to(&mut socket)?; - Response::read_from(&mut socket).ok(); - return Err(MigratableError::MigrateSend(anyhow!( - "Error completing migration" - ))); - } - info!("Migration complete"); - - // Let every Migratable object know about the migration being complete - vm.complete_migration() - } - fn vm_send_migration( &mut self, send_data_migration: VmSendMigrationData, @@ -1857,377 +2011,6 @@ impl Vmm { Err(MigratableError::MigrateSend(anyhow!("VM is not running"))) } } - - #[cfg(all(feature = "kvm", target_arch = "x86_64"))] - fn vm_check_cpuid_compatibility( - &self, - src_vm_config: &Arc>, - src_vm_cpuid: &[hypervisor::arch::x86::CpuIdEntry], - ) -> result::Result<(), MigratableError> { - #[cfg(feature = "tdx")] - if src_vm_config.lock().unwrap().is_tdx_enabled() { - return Err(MigratableError::MigrateReceive(anyhow!( - "Live Migration is not supported when TDX is enabled" - ))); - }; - - // We check the `CPUID` compatibility of between the source vm and destination, which is - // mostly about feature compatibility and "topology/sgx" leaves are not relevant. - let dest_cpuid = &{ - let vm_config = &src_vm_config.lock().unwrap(); - - let phys_bits = vm::physical_bits(&self.hypervisor, vm_config.cpus.max_phys_bits); - arch::generate_common_cpuid( - &self.hypervisor.clone(), - &arch::CpuidConfig { - sgx_epc_sections: None, - phys_bits, - kvm_hyperv: vm_config.cpus.kvm_hyperv, - #[cfg(feature = "tdx")] - tdx: false, - amx: vm_config.cpus.features.amx, - }, - ) - .map_err(|e| { - MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e)) - })? - }; - arch::CpuidFeatureEntry::check_cpuid_compatibility(src_vm_cpuid, dest_cpuid).map_err(|e| { - MigratableError::MigrateReceive(anyhow!( - "Error checking cpu feature compatibility': {:?}", - e - )) - }) - } - - fn control_loop( - &mut self, - api_receiver: Rc>, - #[cfg(feature = "guest_debug")] gdb_receiver: Rc>, - ) -> Result<()> { - const EPOLL_EVENTS_LEN: usize = 100; - - let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN]; - let epoll_fd = self.epoll.as_raw_fd(); - - 'outer: loop { - let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) { - Ok(res) => res, - Err(e) => { - if e.kind() == io::ErrorKind::Interrupted { - // It's well defined from the epoll_wait() syscall - // documentation that the epoll loop can be interrupted - // before any of the requested events occurred or the - // timeout expired. In both those cases, epoll_wait() - // returns an error of type EINTR, but this should not - // be considered as a regular error. Instead it is more - // appropriate to retry, by calling into epoll_wait(). - continue; - } - return Err(Error::Epoll(e)); - } - }; - - for event in events.iter().take(num_events) { - let dispatch_event: EpollDispatch = event.data.into(); - match dispatch_event { - EpollDispatch::Unknown => { - let event = event.data; - warn!("Unknown VMM loop event: {}", event); - } - EpollDispatch::Exit => { - info!("VM exit event"); - // Consume the event. - self.exit_evt.read().map_err(Error::EventFdRead)?; - self.vmm_shutdown().map_err(Error::VmmShutdown)?; - - break 'outer; - } - EpollDispatch::Reset => { - info!("VM reset event"); - // Consume the event. - self.reset_evt.read().map_err(Error::EventFdRead)?; - self.vm_reboot().map_err(Error::VmReboot)?; - } - EpollDispatch::ActivateVirtioDevices => { - if let Some(ref vm) = self.vm { - let count = self.activate_evt.read().map_err(Error::EventFdRead)?; - info!( - "Trying to activate pending virtio devices: count = {}", - count - ); - vm.activate_virtio_devices() - .map_err(Error::ActivateVirtioDevices)?; - } - } - EpollDispatch::Api => { - // Consume the events. - for _ in 0..self.api_evt.read().map_err(Error::EventFdRead)? { - // Read from the API receiver channel - let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?; - - info!("API request event: {:?}", api_request); - match api_request { - ApiRequest::VmCreate(config, sender) => { - let response = self - .vm_create(config) - .map_err(ApiError::VmCreate) - .map(|_| ApiResponsePayload::Empty); - - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmDelete(sender) => { - let response = self - .vm_delete() - .map_err(ApiError::VmDelete) - .map(|_| ApiResponsePayload::Empty); - - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmBoot(sender) => { - let response = self - .vm_boot() - .map_err(ApiError::VmBoot) - .map(|_| ApiResponsePayload::Empty); - - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmShutdown(sender) => { - let response = self - .vm_shutdown() - .map_err(ApiError::VmShutdown) - .map(|_| ApiResponsePayload::Empty); - - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmReboot(sender) => { - let response = self - .vm_reboot() - .map_err(ApiError::VmReboot) - .map(|_| ApiResponsePayload::Empty); - - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmInfo(sender) => { - let response = self - .vm_info() - .map_err(ApiError::VmInfo) - .map(ApiResponsePayload::VmInfo); - - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmmPing(sender) => { - let response = ApiResponsePayload::VmmPing(self.vmm_ping()); - - sender.send(Ok(response)).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmPause(sender) => { - let response = self - .vm_pause() - .map_err(ApiError::VmPause) - .map(|_| ApiResponsePayload::Empty); - - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmResume(sender) => { - let response = self - .vm_resume() - .map_err(ApiError::VmResume) - .map(|_| ApiResponsePayload::Empty); - - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmSnapshot(snapshot_data, sender) => { - let response = self - .vm_snapshot(&snapshot_data.destination_url) - .map_err(ApiError::VmSnapshot) - .map(|_| ApiResponsePayload::Empty); - - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmRestore(restore_data, sender) => { - let response = self - .vm_restore(restore_data.as_ref().clone()) - .map_err(ApiError::VmRestore) - .map(|_| ApiResponsePayload::Empty); - - sender.send(response).map_err(Error::ApiResponseSend)?; - } - #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] - ApiRequest::VmCoredump(coredump_data, sender) => { - let response = self - .vm_coredump(&coredump_data.destination_url) - .map_err(ApiError::VmCoredump) - .map(|_| ApiResponsePayload::Empty); - - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmmShutdown(sender) => { - let response = self - .vmm_shutdown() - .map_err(ApiError::VmmShutdown) - .map(|_| ApiResponsePayload::Empty); - - sender.send(response).map_err(Error::ApiResponseSend)?; - - break 'outer; - } - ApiRequest::VmResize(resize_data, sender) => { - let response = self - .vm_resize( - resize_data.desired_vcpus, - resize_data.desired_ram, - resize_data.desired_balloon, - ) - .map_err(ApiError::VmResize) - .map(|_| ApiResponsePayload::Empty); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmResizeZone(resize_zone_data, sender) => { - let response = self - .vm_resize_zone( - resize_zone_data.id.clone(), - resize_zone_data.desired_ram, - ) - .map_err(ApiError::VmResizeZone) - .map(|_| ApiResponsePayload::Empty); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmAddDevice(add_device_data, sender) => { - let response = self - .vm_add_device(add_device_data.as_ref().clone()) - .map_err(ApiError::VmAddDevice) - .map(ApiResponsePayload::VmAction); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmAddUserDevice(add_device_data, sender) => { - let response = self - .vm_add_user_device(add_device_data.as_ref().clone()) - .map_err(ApiError::VmAddUserDevice) - .map(ApiResponsePayload::VmAction); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmRemoveDevice(remove_device_data, sender) => { - let response = self - .vm_remove_device(remove_device_data.id.clone()) - .map_err(ApiError::VmRemoveDevice) - .map(|_| ApiResponsePayload::Empty); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmAddDisk(add_disk_data, sender) => { - let response = self - .vm_add_disk(add_disk_data.as_ref().clone()) - .map_err(ApiError::VmAddDisk) - .map(ApiResponsePayload::VmAction); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmAddFs(add_fs_data, sender) => { - let response = self - .vm_add_fs(add_fs_data.as_ref().clone()) - .map_err(ApiError::VmAddFs) - .map(ApiResponsePayload::VmAction); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmAddPmem(add_pmem_data, sender) => { - let response = self - .vm_add_pmem(add_pmem_data.as_ref().clone()) - .map_err(ApiError::VmAddPmem) - .map(ApiResponsePayload::VmAction); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmAddNet(add_net_data, sender) => { - let response = self - .vm_add_net(add_net_data.as_ref().clone()) - .map_err(ApiError::VmAddNet) - .map(ApiResponsePayload::VmAction); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmAddVdpa(add_vdpa_data, sender) => { - let response = self - .vm_add_vdpa(add_vdpa_data.as_ref().clone()) - .map_err(ApiError::VmAddVdpa) - .map(ApiResponsePayload::VmAction); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmAddVsock(add_vsock_data, sender) => { - let response = self - .vm_add_vsock(add_vsock_data.as_ref().clone()) - .map_err(ApiError::VmAddVsock) - .map(ApiResponsePayload::VmAction); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmCounters(sender) => { - let response = self - .vm_counters() - .map_err(ApiError::VmInfo) - .map(ApiResponsePayload::VmAction); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmReceiveMigration(receive_migration_data, sender) => { - let response = self - .vm_receive_migration( - receive_migration_data.as_ref().clone(), - ) - .map_err(ApiError::VmReceiveMigration) - .map(|_| ApiResponsePayload::Empty); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmSendMigration(send_migration_data, sender) => { - let response = self - .vm_send_migration(send_migration_data.as_ref().clone()) - .map_err(ApiError::VmSendMigration) - .map(|_| ApiResponsePayload::Empty); - sender.send(response).map_err(Error::ApiResponseSend)?; - } - ApiRequest::VmPowerButton(sender) => { - let response = self - .vm_power_button() - .map_err(ApiError::VmPowerButton) - .map(|_| ApiResponsePayload::Empty); - - sender.send(response).map_err(Error::ApiResponseSend)?; - } - } - } - } - #[cfg(feature = "guest_debug")] - EpollDispatch::Debug => { - // Consume the events. - for _ in 0..self.debug_evt.read().map_err(Error::EventFdRead)? { - // Read from the API receiver channel - let gdb_request = gdb_receiver.recv().map_err(Error::GdbRequestRecv)?; - - let response = if let Some(ref mut vm) = self.vm { - vm.debug_request(&gdb_request.payload, gdb_request.cpu_id) - } else { - Err(VmError::VmNotRunning) - } - .map_err(gdb::Error::Vm); - - gdb_request - .sender - .send(response) - .map_err(Error::GdbResponseSend)?; - } - } - #[cfg(not(feature = "guest_debug"))] - EpollDispatch::Debug => {} - } - } - } - - // Trigger the termination of the signal_handler thread - if let Some(signals) = self.signals.take() { - signals.close(); - } - - // Wait for all the threads to finish - for thread in self.threads.drain(..) { - thread.join().map_err(Error::ThreadCleanup)? - } - - Ok(()) - } } const CPU_MANAGER_SNAPSHOT_ID: &str = "cpu-manager";