vmm: use trait objects for API actions

Uses of the old ApiRequest enum conflated two different concerns:
identifying an API request endpoint, and storing data for an API
request.  This led to ApiRequest values being passed around with junk
data just to communicate a request type, which forced all API request
body types to implement Default, which in some cases doesn't make any
sense — what's the "default" path for a vhost-user socket?  The
nonsensical Default values have led to tests relying on being able to
use nonsensical data, which is an impediment to adding better
validation for these types.

Rather than having API request types be represented by an enum, which
has to carry associated body data everywhere it's used, it makes more
sense to represent API request types as trait objects.  These can have
an associated type for the type of the request body, and this makes it
possible to pass API request types and data around as siblings in a
type-safe way without forcing them into a single value even where it
doesn't make sense.  Trait objects also give us dynamic dispatch,
which lets us get rid of several large match blocks.

To keep it possible to fuzz the HTTP API, all the Vmm methods called
by the HTTP API are pulled out into a trait, so the fuzzer can provide
its own stub implementation of the VMM.

Signed-off-by: Alyssa Ross <hi@alyssa.is>
This commit is contained in:
Alyssa Ross 2024-01-05 15:08:53 +01:00 committed by Rob Bradford
parent 6aa7afbb6f
commit 4ca18c082e
9 changed files with 2041 additions and 1409 deletions

1
fuzz/Cargo.lock generated
View File

@ -252,6 +252,7 @@ dependencies = [
"virtio-queue", "virtio-queue",
"vm-device", "vm-device",
"vm-memory", "vm-memory",
"vm-migration",
"vm-virtio", "vm-virtio",
"vmm", "vmm",
"vmm-sys-util 0.11.2", "vmm-sys-util 0.11.2",

View File

@ -27,6 +27,7 @@ virtio-queue = "0.10.0"
vmm = { path = "../vmm" } vmm = { path = "../vmm" }
vmm-sys-util = "0.11.2" vmm-sys-util = "0.11.2"
vm-memory = "0.13.1" vm-memory = "0.13.1"
vm-migration = { path = "../vm-migration" }
vm-device = { path = "../vm-device" } vm-device = { path = "../vm-device" }
vm-virtio = { path = "../vm-virtio" } vm-virtio = { path = "../vm-virtio" }

View File

@ -7,9 +7,18 @@ use libfuzzer_sys::fuzz_target;
use micro_http::Request; use micro_http::Request;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::path::PathBuf;
use std::sync::mpsc::{channel, Receiver}; use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex};
use std::thread; use std::thread;
use vmm::api::{http::*, ApiRequest, ApiResponsePayload}; use vm_migration::MigratableError;
use vmm::api::{
http::*, ApiRequest, RequestHandler, VmInfoResponse, VmReceiveMigrationData,
VmSendMigrationData, VmmPingResponse,
};
use vmm::config::RestoreConfig;
use vmm::vm::{Error as VmError, VmState};
use vmm::vm_config::*;
use vmm::{EpollContext, EpollDispatch}; use vmm::{EpollContext, EpollDispatch};
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -69,6 +78,197 @@ fn generate_request(bytes: &[u8]) -> Option<Request> {
Request::try_from(&request, None).ok() Request::try_from(&request, None).ok()
} }
struct StubApiRequestHandler;
impl RequestHandler for StubApiRequestHandler {
fn vm_create(&mut self, _: Arc<Mutex<VmConfig>>) -> Result<(), VmError> {
Ok(())
}
fn vm_boot(&mut self) -> Result<(), VmError> {
Ok(())
}
fn vm_pause(&mut self) -> Result<(), VmError> {
Ok(())
}
fn vm_resume(&mut self) -> Result<(), VmError> {
Ok(())
}
fn vm_snapshot(&mut self, _: &str) -> Result<(), VmError> {
Ok(())
}
fn vm_restore(&mut self, _: RestoreConfig) -> Result<(), VmError> {
Ok(())
}
#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
fn vm_coredump(&mut self, _: &str) -> Result<(), VmError> {
Ok(())
}
fn vm_shutdown(&mut self) -> Result<(), VmError> {
Ok(())
}
fn vm_reboot(&mut self) -> Result<(), VmError> {
Ok(())
}
fn vm_info(&self) -> Result<VmInfoResponse, VmError> {
Ok(VmInfoResponse {
config: Arc::new(Mutex::new(VmConfig {
cpus: CpusConfig {
boot_vcpus: 1,
max_vcpus: 1,
topology: None,
kvm_hyperv: false,
max_phys_bits: 46,
affinity: None,
features: CpuFeatures::default(),
},
memory: MemoryConfig {
size: 536_870_912,
mergeable: false,
hotplug_method: HotplugMethod::Acpi,
hotplug_size: None,
hotplugged_size: None,
shared: false,
hugepages: false,
hugepage_size: None,
prefault: false,
zones: None,
thp: true,
},
payload: Some(PayloadConfig {
kernel: Some(PathBuf::from("/path/to/kernel")),
..Default::default()
}),
rate_limit_groups: None,
disks: None,
net: None,
rng: RngConfig {
src: PathBuf::from("/dev/urandom"),
iommu: false,
},
balloon: None,
fs: None,
pmem: None,
serial: ConsoleConfig {
file: None,
mode: ConsoleOutputMode::Null,
iommu: false,
socket: None,
},
console: ConsoleConfig {
file: None,
mode: ConsoleOutputMode::Tty,
iommu: false,
socket: None,
},
devices: None,
user_devices: None,
vdpa: None,
vsock: None,
pvpanic: false,
iommu: false,
#[cfg(target_arch = "x86_64")]
sgx_epc: None,
numa: None,
watchdog: false,
#[cfg(feature = "guest_debug")]
gdb: false,
platform: None,
tpm: None,
preserved_fds: None,
})),
state: VmState::Running,
memory_actual_size: 0,
device_tree: None,
})
}
fn vmm_ping(&self) -> VmmPingResponse {
VmmPingResponse {
build_version: String::new(),
version: String::new(),
pid: 0,
features: Vec::new(),
}
}
fn vm_delete(&mut self) -> Result<(), VmError> {
Ok(())
}
fn vmm_shutdown(&mut self) -> Result<(), VmError> {
Ok(())
}
fn vm_resize(&mut self, _: Option<u8>, _: Option<u64>, _: Option<u64>) -> Result<(), VmError> {
Ok(())
}
fn vm_resize_zone(&mut self, _: String, _: u64) -> Result<(), VmError> {
Ok(())
}
fn vm_add_device(&mut self, _: DeviceConfig) -> Result<Option<Vec<u8>>, VmError> {
Ok(None)
}
fn vm_add_user_device(&mut self, _: UserDeviceConfig) -> Result<Option<Vec<u8>>, VmError> {
Ok(None)
}
fn vm_remove_device(&mut self, _: String) -> Result<(), VmError> {
Ok(())
}
fn vm_add_disk(&mut self, _: DiskConfig) -> Result<Option<Vec<u8>>, VmError> {
Ok(None)
}
fn vm_add_fs(&mut self, _: FsConfig) -> Result<Option<Vec<u8>>, VmError> {
Ok(None)
}
fn vm_add_pmem(&mut self, _: PmemConfig) -> Result<Option<Vec<u8>>, VmError> {
Ok(None)
}
fn vm_add_net(&mut self, _: NetConfig) -> Result<Option<Vec<u8>>, VmError> {
Ok(None)
}
fn vm_add_vdpa(&mut self, _: VdpaConfig) -> Result<Option<Vec<u8>>, VmError> {
Ok(None)
}
fn vm_add_vsock(&mut self, _: VsockConfig) -> Result<Option<Vec<u8>>, VmError> {
Ok(None)
}
fn vm_counters(&mut self) -> Result<Option<Vec<u8>>, VmError> {
Ok(None)
}
fn vm_power_button(&mut self) -> Result<(), VmError> {
Ok(())
}
fn vm_receive_migration(&mut self, _: VmReceiveMigrationData) -> Result<(), MigratableError> {
Ok(())
}
fn vm_send_migration(&mut self, _: VmSendMigrationData) -> Result<(), MigratableError> {
Ok(())
}
}
fn http_receiver_stub(exit_evt: EventFd, api_evt: EventFd, api_receiver: Receiver<ApiRequest>) { fn http_receiver_stub(exit_evt: EventFd, api_evt: EventFd, api_receiver: Receiver<ApiRequest>) {
let mut epoll = EpollContext::new().unwrap(); let mut epoll = EpollContext::new().unwrap();
epoll.add_event(&exit_evt, EpollDispatch::Exit).unwrap(); epoll.add_event(&exit_evt, EpollDispatch::Exit).unwrap();
@ -98,89 +298,7 @@ fn http_receiver_stub(exit_evt: EventFd, api_evt: EventFd, api_receiver: Receive
EpollDispatch::Api => { EpollDispatch::Api => {
for _ in 0..api_evt.read().unwrap() { for _ in 0..api_evt.read().unwrap() {
let api_request = api_receiver.recv().unwrap(); let api_request = api_receiver.recv().unwrap();
match api_request { api_request(&mut StubApiRequestHandler).unwrap();
ApiRequest::VmCreate(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmDelete(sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmBoot(sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmShutdown(sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmReboot(sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmInfo(sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmmPing(sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmPause(sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmResume(sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmSnapshot(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmRestore(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmmShutdown(sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmResize(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmResizeZone(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmAddDevice(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmAddUserDevice(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmRemoveDevice(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmAddDisk(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmAddFs(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmAddPmem(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmAddNet(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmAddVdpa(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmAddVsock(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmCounters(sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmReceiveMigration(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmSendMigration(_, sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
ApiRequest::VmPowerButton(sender) => {
sender.send(Ok(ApiResponsePayload::Empty)).unwrap();
}
}
} }
} }
_ => { _ => {

View File

@ -20,6 +20,7 @@ use std::sync::{Arc, Mutex};
use thiserror::Error; use thiserror::Error;
#[cfg(feature = "dbus_api")] #[cfg(feature = "dbus_api")]
use vmm::api::dbus::{dbus_api_graceful_shutdown, DBusApiOptions}; use vmm::api::dbus::{dbus_api_graceful_shutdown, DBusApiOptions};
use vmm::api::ApiAction;
use vmm::config; use vmm::config;
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::signal::block_signal; use vmm_sys_util::signal::block_signal;
@ -695,22 +696,24 @@ fn start_vmm(cmd_arguments: ArgMatches) -> Result<Option<String>, Error> {
// Create and boot the VM based off the VM config we just built. // Create and boot the VM based off the VM config we just built.
let sender = api_request_sender.clone(); let sender = api_request_sender.clone();
vmm::api::vm_create( vmm::api::VmCreate
api_evt.try_clone().unwrap(), .send(
api_request_sender, api_evt.try_clone().unwrap(),
Arc::new(Mutex::new(vm_config)), api_request_sender,
) Arc::new(Mutex::new(vm_config)),
.map_err(Error::VmCreate)?; )
vmm::api::vm_boot(api_evt.try_clone().unwrap(), sender).map_err(Error::VmBoot)?; .map_err(Error::VmCreate)?;
vmm::api::VmBoot
.send(api_evt.try_clone().unwrap(), sender, ())
.map_err(Error::VmBoot)?;
} else if let Some(restore_params) = cmd_arguments.get_one::<String>("restore") { } else if let Some(restore_params) = cmd_arguments.get_one::<String>("restore") {
vmm::api::vm_restore( vmm::api::VmRestore
api_evt.try_clone().unwrap(), .send(
api_request_sender, api_evt.try_clone().unwrap(),
Arc::new( api_request_sender,
config::RestoreConfig::parse(restore_params).map_err(Error::ParsingRestore)?, config::RestoreConfig::parse(restore_params).map_err(Error::ParsingRestore)?,
), )
) .map_err(Error::VmRestore)?;
.map_err(Error::VmRestore)?;
} }
Ok(()) Ok(())

View File

@ -2,7 +2,15 @@
// //
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
use super::{ApiRequest, VmAction}; use super::{ApiAction, ApiRequest};
#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
use crate::api::VmCoredump;
use crate::api::{
AddDisk, Body, VmAddDevice, VmAddFs, VmAddNet, VmAddPmem, VmAddUserDevice, VmAddVdpa,
VmAddVsock, VmBoot, VmCounters, VmCreate, VmDelete, VmInfo, VmPause, VmPowerButton, VmReboot,
VmReceiveMigration, VmRemoveDevice, VmResize, VmResizeZone, VmRestore, VmResume,
VmSendMigration, VmShutdown, VmSnapshot, VmmPing, VmmShutdown,
};
use crate::seccomp_filters::{get_seccomp_filter, Thread}; use crate::seccomp_filters::{get_seccomp_filter, Thread};
use crate::{Error as VmmError, Result as VmmResult}; use crate::{Error as VmmError, Result as VmmResult};
use crate::{NetConfig, VmConfig}; use crate::{NetConfig, VmConfig};
@ -78,11 +86,15 @@ impl DBusApi {
.map_err(|err| fdo::Error::IOError(format!("{err:?}"))) .map_err(|err| fdo::Error::IOError(format!("{err:?}")))
} }
async fn vm_action(&self, action: VmAction) -> Result<Optional<String>> { async fn vm_action<Action: ApiAction<ResponseBody = Option<Body>>>(
&self,
action: &'static Action,
body: Action::RequestBody,
) -> Result<Optional<String>> {
let api_sender = self.clone_api_sender().await; let api_sender = self.clone_api_sender().await;
let api_notifier = self.clone_api_notifier()?; let api_notifier = self.clone_api_notifier()?;
let result = blocking::unblock(move || super::vm_action(api_notifier, api_sender, action)) let result = blocking::unblock(move || action.send(api_notifier, api_sender, body))
.await .await
.map_err(api_error)? .map_err(api_error)?
// We're using `from_utf8_lossy` here to not deal with the // We're using `from_utf8_lossy` here to not deal with the
@ -99,7 +111,7 @@ impl DBusApi {
let api_sender = self.clone_api_sender().await; let api_sender = self.clone_api_sender().await;
let api_notifier = self.clone_api_notifier()?; let api_notifier = self.clone_api_notifier()?;
let result = blocking::unblock(move || super::vmm_ping(api_notifier, api_sender)) let result = blocking::unblock(move || VmmPing.send(api_notifier, api_sender, ()))
.await .await
.map_err(api_error)?; .map_err(api_error)?;
serde_json::to_string(&result).map_err(api_error) serde_json::to_string(&result).map_err(api_error)
@ -109,26 +121,24 @@ impl DBusApi {
let api_sender = self.clone_api_sender().await; let api_sender = self.clone_api_sender().await;
let api_notifier = self.clone_api_notifier()?; let api_notifier = self.clone_api_notifier()?;
blocking::unblock(move || super::vmm_shutdown(api_notifier, api_sender)) blocking::unblock(move || VmmShutdown.send(api_notifier, api_sender, ()))
.await .await
.map_err(api_error) .map_err(api_error)
} }
async fn vm_add_device(&self, device_config: String) -> Result<Optional<String>> { async fn vm_add_device(&self, device_config: String) -> Result<Optional<String>> {
let device_config = serde_json::from_str(&device_config).map_err(api_error)?; let device_config = serde_json::from_str(&device_config).map_err(api_error)?;
self.vm_action(VmAction::AddDevice(Arc::new(device_config))) self.vm_action(&VmAddDevice, device_config).await
.await
} }
async fn vm_add_disk(&self, disk_config: String) -> Result<Optional<String>> { async fn vm_add_disk(&self, disk_config: String) -> Result<Optional<String>> {
let disk_config = serde_json::from_str(&disk_config).map_err(api_error)?; let disk_config = serde_json::from_str(&disk_config).map_err(api_error)?;
self.vm_action(VmAction::AddDisk(Arc::new(disk_config))) self.vm_action(&AddDisk, disk_config).await
.await
} }
async fn vm_add_fs(&self, fs_config: String) -> Result<Optional<String>> { async fn vm_add_fs(&self, fs_config: String) -> Result<Optional<String>> {
let fs_config = serde_json::from_str(&fs_config).map_err(api_error)?; let fs_config = serde_json::from_str(&fs_config).map_err(api_error)?;
self.vm_action(VmAction::AddFs(Arc::new(fs_config))).await self.vm_action(&VmAddFs, fs_config).await
} }
async fn vm_add_net(&self, net_config: String) -> Result<Optional<String>> { async fn vm_add_net(&self, net_config: String) -> Result<Optional<String>> {
@ -137,35 +147,31 @@ impl DBusApi {
warn!("Ignoring FDs sent via the D-Bus request body"); warn!("Ignoring FDs sent via the D-Bus request body");
net_config.fds = None; net_config.fds = None;
} }
self.vm_action(VmAction::AddNet(Arc::new(net_config))).await self.vm_action(&VmAddNet, net_config).await
} }
async fn vm_add_pmem(&self, pmem_config: String) -> Result<Optional<String>> { async fn vm_add_pmem(&self, pmem_config: String) -> Result<Optional<String>> {
let pmem_config = serde_json::from_str(&pmem_config).map_err(api_error)?; let pmem_config = serde_json::from_str(&pmem_config).map_err(api_error)?;
self.vm_action(VmAction::AddPmem(Arc::new(pmem_config))) self.vm_action(&VmAddPmem, pmem_config).await
.await
} }
async fn vm_add_user_device(&self, vm_add_user_device: String) -> Result<Optional<String>> { async fn vm_add_user_device(&self, vm_add_user_device: String) -> Result<Optional<String>> {
let vm_add_user_device = serde_json::from_str(&vm_add_user_device).map_err(api_error)?; let vm_add_user_device = serde_json::from_str(&vm_add_user_device).map_err(api_error)?;
self.vm_action(VmAction::AddUserDevice(Arc::new(vm_add_user_device))) self.vm_action(&VmAddUserDevice, vm_add_user_device).await
.await
} }
async fn vm_add_vdpa(&self, vdpa_config: String) -> Result<Optional<String>> { async fn vm_add_vdpa(&self, vdpa_config: String) -> Result<Optional<String>> {
let vdpa_config = serde_json::from_str(&vdpa_config).map_err(api_error)?; let vdpa_config = serde_json::from_str(&vdpa_config).map_err(api_error)?;
self.vm_action(VmAction::AddVdpa(Arc::new(vdpa_config))) self.vm_action(&VmAddVdpa, vdpa_config).await
.await
} }
async fn vm_add_vsock(&self, vsock_config: String) -> Result<Optional<String>> { async fn vm_add_vsock(&self, vsock_config: String) -> Result<Optional<String>> {
let vsock_config = serde_json::from_str(&vsock_config).map_err(api_error)?; let vsock_config = serde_json::from_str(&vsock_config).map_err(api_error)?;
self.vm_action(VmAction::AddVsock(Arc::new(vsock_config))) self.vm_action(&VmAddVsock, vsock_config).await
.await
} }
async fn vm_boot(&self) -> Result<()> { async fn vm_boot(&self) -> Result<()> {
self.vm_action(VmAction::Boot).await.map(|_| ()) self.vm_action(&VmBoot, ()).await.map(|_| ())
} }
#[allow(unused_variables)] #[allow(unused_variables)]
@ -176,7 +182,7 @@ impl DBusApi {
#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
{ {
let vm_coredump_data = serde_json::from_str(&vm_coredump_data).map_err(api_error)?; let vm_coredump_data = serde_json::from_str(&vm_coredump_data).map_err(api_error)?;
self.vm_action(VmAction::Coredump(Arc::new(vm_coredump_data))) self.vm_action(&VmCoredump, vm_coredump_data)
.await .await
.map(|_| ()) .map(|_| ())
} }
@ -188,7 +194,7 @@ impl DBusApi {
} }
async fn vm_counters(&self) -> Result<Optional<String>> { async fn vm_counters(&self) -> Result<Optional<String>> {
self.vm_action(VmAction::Counters).await self.vm_action(&VmCounters, ()).await
} }
async fn vm_create(&self, vm_config: String) -> Result<()> { async fn vm_create(&self, vm_config: String) -> Result<()> {
@ -207,7 +213,7 @@ impl DBusApi {
} }
blocking::unblock(move || { blocking::unblock(move || {
super::vm_create(api_notifier, api_sender, Arc::new(Mutex::new(vm_config))) VmCreate.send(api_notifier, api_sender, Arc::new(Mutex::new(vm_config)))
}) })
.await .await
.map_err(api_error)?; .map_err(api_error)?;
@ -216,85 +222,81 @@ impl DBusApi {
} }
async fn vm_delete(&self) -> Result<()> { async fn vm_delete(&self) -> Result<()> {
self.vm_action(VmAction::Delete).await.map(|_| ()) self.vm_action(&VmDelete, ()).await.map(|_| ())
} }
async fn vm_info(&self) -> Result<String> { async fn vm_info(&self) -> Result<String> {
let api_sender = self.clone_api_sender().await; let api_sender = self.clone_api_sender().await;
let api_notifier = self.clone_api_notifier()?; let api_notifier = self.clone_api_notifier()?;
let result = blocking::unblock(move || super::vm_info(api_notifier, api_sender)) let result = blocking::unblock(move || VmInfo.send(api_notifier, api_sender, ()))
.await .await
.map_err(api_error)?; .map_err(api_error)?;
serde_json::to_string(&result).map_err(api_error) serde_json::to_string(&result).map_err(api_error)
} }
async fn vm_pause(&self) -> Result<()> { async fn vm_pause(&self) -> Result<()> {
self.vm_action(VmAction::Pause).await.map(|_| ()) self.vm_action(&VmPause, ()).await.map(|_| ())
} }
async fn vm_power_button(&self) -> Result<()> { async fn vm_power_button(&self) -> Result<()> {
self.vm_action(VmAction::PowerButton).await.map(|_| ()) self.vm_action(&VmPowerButton, ()).await.map(|_| ())
} }
async fn vm_reboot(&self) -> Result<()> { async fn vm_reboot(&self) -> Result<()> {
self.vm_action(VmAction::Reboot).await.map(|_| ()) self.vm_action(&VmReboot, ()).await.map(|_| ())
} }
async fn vm_remove_device(&self, vm_remove_device: String) -> Result<()> { async fn vm_remove_device(&self, vm_remove_device: String) -> Result<()> {
let vm_remove_device = serde_json::from_str(&vm_remove_device).map_err(api_error)?; let vm_remove_device = serde_json::from_str(&vm_remove_device).map_err(api_error)?;
self.vm_action(VmAction::RemoveDevice(Arc::new(vm_remove_device))) self.vm_action(&VmRemoveDevice, vm_remove_device)
.await .await
.map(|_| ()) .map(|_| ())
} }
async fn vm_resize(&self, vm_resize: String) -> Result<()> { async fn vm_resize(&self, vm_resize: String) -> Result<()> {
let vm_resize = serde_json::from_str(&vm_resize).map_err(api_error)?; let vm_resize = serde_json::from_str(&vm_resize).map_err(api_error)?;
self.vm_action(VmAction::Resize(Arc::new(vm_resize))) self.vm_action(&VmResize, vm_resize).await.map(|_| ())
.await
.map(|_| ())
} }
async fn vm_resize_zone(&self, vm_resize_zone: String) -> Result<()> { async fn vm_resize_zone(&self, vm_resize_zone: String) -> Result<()> {
let vm_resize_zone = serde_json::from_str(&vm_resize_zone).map_err(api_error)?; let vm_resize_zone = serde_json::from_str(&vm_resize_zone).map_err(api_error)?;
self.vm_action(VmAction::ResizeZone(Arc::new(vm_resize_zone))) self.vm_action(&VmResizeZone, vm_resize_zone)
.await .await
.map(|_| ()) .map(|_| ())
} }
async fn vm_restore(&self, restore_config: String) -> Result<()> { async fn vm_restore(&self, restore_config: String) -> Result<()> {
let restore_config = serde_json::from_str(&restore_config).map_err(api_error)?; let restore_config = serde_json::from_str(&restore_config).map_err(api_error)?;
self.vm_action(VmAction::Restore(Arc::new(restore_config))) self.vm_action(&VmRestore, restore_config).await.map(|_| ())
.await
.map(|_| ())
} }
async fn vm_receive_migration(&self, receive_migration_data: String) -> Result<()> { async fn vm_receive_migration(&self, receive_migration_data: String) -> Result<()> {
let receive_migration_data = let receive_migration_data =
serde_json::from_str(&receive_migration_data).map_err(api_error)?; serde_json::from_str(&receive_migration_data).map_err(api_error)?;
self.vm_action(VmAction::ReceiveMigration(Arc::new(receive_migration_data))) self.vm_action(&VmReceiveMigration, receive_migration_data)
.await .await
.map(|_| ()) .map(|_| ())
} }
async fn vm_send_migration(&self, send_migration_data: String) -> Result<()> { async fn vm_send_migration(&self, send_migration_data: String) -> Result<()> {
let send_migration_data = serde_json::from_str(&send_migration_data).map_err(api_error)?; let send_migration_data = serde_json::from_str(&send_migration_data).map_err(api_error)?;
self.vm_action(VmAction::SendMigration(Arc::new(send_migration_data))) self.vm_action(&VmSendMigration, send_migration_data)
.await .await
.map(|_| ()) .map(|_| ())
} }
async fn vm_resume(&self) -> Result<()> { async fn vm_resume(&self) -> Result<()> {
self.vm_action(VmAction::Resume).await.map(|_| ()) self.vm_action(&VmResume, ()).await.map(|_| ())
} }
async fn vm_shutdown(&self) -> Result<()> { async fn vm_shutdown(&self) -> Result<()> {
self.vm_action(VmAction::Shutdown).await.map(|_| ()) self.vm_action(&VmShutdown, ()).await.map(|_| ())
} }
async fn vm_snapshot(&self, vm_snapshot_config: String) -> Result<()> { async fn vm_snapshot(&self, vm_snapshot_config: String) -> Result<()> {
let vm_snapshot_config = serde_json::from_str(&vm_snapshot_config).map_err(api_error)?; let vm_snapshot_config = serde_json::from_str(&vm_snapshot_config).map_err(api_error)?;
self.vm_action(VmAction::Snapshot(Arc::new(vm_snapshot_config))) self.vm_action(&VmSnapshot, vm_snapshot_config)
.await .await
.map(|_| ()) .map(|_| ())
} }

View File

@ -1,17 +1,17 @@
// Copyright © 2019 Intel Corporation // Copyright © 2019 Intel Corporation
// Copyright 2024 Alyssa Ross <hi@alyssa.is>
// //
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
use crate::api::http::{error_response, EndpointHandler, HttpError}; use crate::api::http::{error_response, EndpointHandler, HttpError};
#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
use crate::api::vm_coredump; use crate::api::VmCoredump;
use crate::api::{ use crate::api::{
vm_add_device, vm_add_disk, vm_add_fs, vm_add_net, vm_add_pmem, vm_add_user_device, AddDisk, ApiAction, ApiRequest, VmAddDevice, VmAddFs, VmAddNet, VmAddPmem, VmAddUserDevice,
vm_add_vdpa, vm_add_vsock, vm_boot, vm_counters, vm_create, vm_delete, vm_info, vm_pause, VmAddVdpa, VmAddVsock, VmBoot, VmConfig, VmCounters, VmDelete, VmPause, VmPowerButton,
vm_power_button, vm_reboot, vm_receive_migration, vm_remove_device, vm_resize, vm_resize_zone, VmReboot, VmReceiveMigration, VmRemoveDevice, VmResize, VmResizeZone, VmRestore, VmResume,
vm_restore, vm_resume, vm_send_migration, vm_shutdown, vm_snapshot, vmm_ping, vmm_shutdown, VmSendMigration, VmShutdown, VmSnapshot,
ApiRequest, VmAction, VmConfig,
}; };
use crate::config::NetConfig; use crate::config::NetConfig;
use micro_http::{Body, Method, Request, Response, StatusCode, Version}; use micro_http::{Body, Method, Request, Response, StatusCode, Version};
@ -52,8 +52,8 @@ impl EndpointHandler for VmCreate {
} }
} }
// Call vm_create() match crate::api::VmCreate
match vm_create(api_notifier, api_sender, Arc::new(Mutex::new(vm_config))) .send(api_notifier, api_sender, Arc::new(Mutex::new(vm_config)))
.map_err(HttpError::ApiError) .map_err(HttpError::ApiError)
{ {
Ok(_) => Response::new(Version::Http11, StatusCode::NoContent), Ok(_) => Response::new(Version::Http11, StatusCode::NoContent),
@ -70,13 +70,162 @@ impl EndpointHandler for VmCreate {
} }
} }
pub trait GetHandler {
fn handle_request(
&'static self,
_api_notifier: EventFd,
_api_sender: Sender<ApiRequest>,
) -> std::result::Result<Option<Body>, HttpError> {
Err(HttpError::BadRequest)
}
}
pub trait PutHandler {
fn handle_request(
&'static self,
_api_notifier: EventFd,
_api_sender: Sender<ApiRequest>,
_body: &Option<Body>,
_files: Vec<File>,
) -> std::result::Result<Option<Body>, HttpError> {
Err(HttpError::BadRequest)
}
}
pub trait HttpVmAction: GetHandler + PutHandler + Sync {}
impl<T: GetHandler + PutHandler + Sync> HttpVmAction for T {}
macro_rules! vm_action_get_handler {
($action:ty) => {
impl GetHandler for $action {
fn handle_request(
&'static self,
api_notifier: EventFd,
api_sender: Sender<ApiRequest>,
) -> std::result::Result<Option<Body>, HttpError> {
self.send(api_notifier, api_sender, ())
.map_err(HttpError::ApiError)
}
}
impl PutHandler for $action {}
};
}
macro_rules! vm_action_put_handler {
($action:ty) => {
impl PutHandler for $action {
fn handle_request(
&'static self,
api_notifier: EventFd,
api_sender: Sender<ApiRequest>,
body: &Option<Body>,
_files: Vec<File>,
) -> std::result::Result<Option<Body>, HttpError> {
if body.is_some() {
Err(HttpError::BadRequest)
} else {
self.send(api_notifier, api_sender, ())
.map_err(HttpError::ApiError)
}
}
}
impl GetHandler for $action {}
};
}
macro_rules! vm_action_put_handler_body {
($action:ty) => {
impl PutHandler for $action {
fn handle_request(
&'static self,
api_notifier: EventFd,
api_sender: Sender<ApiRequest>,
body: &Option<Body>,
_files: Vec<File>,
) -> std::result::Result<Option<Body>, HttpError> {
if let Some(body) = body {
self.send(
api_notifier,
api_sender,
serde_json::from_slice(body.raw())?,
)
.map_err(HttpError::ApiError)
} else {
Err(HttpError::BadRequest)
}
}
}
impl GetHandler for $action {}
};
}
vm_action_get_handler!(VmCounters);
vm_action_put_handler!(VmBoot);
vm_action_put_handler!(VmDelete);
vm_action_put_handler!(VmShutdown);
vm_action_put_handler!(VmReboot);
vm_action_put_handler!(VmPause);
vm_action_put_handler!(VmResume);
vm_action_put_handler!(VmPowerButton);
vm_action_put_handler_body!(VmAddDevice);
vm_action_put_handler_body!(AddDisk);
vm_action_put_handler_body!(VmAddFs);
vm_action_put_handler_body!(VmAddPmem);
vm_action_put_handler_body!(VmAddVdpa);
vm_action_put_handler_body!(VmAddVsock);
vm_action_put_handler_body!(VmAddUserDevice);
vm_action_put_handler_body!(VmRemoveDevice);
vm_action_put_handler_body!(VmResize);
vm_action_put_handler_body!(VmResizeZone);
vm_action_put_handler_body!(VmRestore);
vm_action_put_handler_body!(VmSnapshot);
vm_action_put_handler_body!(VmReceiveMigration);
vm_action_put_handler_body!(VmSendMigration);
#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
vm_action_put_handler_body!(VmCoredump);
impl PutHandler for VmAddNet {
fn handle_request(
&'static self,
api_notifier: EventFd,
api_sender: Sender<ApiRequest>,
body: &Option<Body>,
mut files: Vec<File>,
) -> std::result::Result<Option<Body>, HttpError> {
if let Some(body) = body {
let mut net_cfg: NetConfig = serde_json::from_slice(body.raw())?;
if net_cfg.fds.is_some() {
warn!("Ignoring FDs sent via the HTTP request body");
net_cfg.fds = None;
}
if !files.is_empty() {
let fds = files.drain(..).map(|f| f.into_raw_fd()).collect();
net_cfg.fds = Some(fds);
}
self.send(api_notifier, api_sender, net_cfg)
.map_err(HttpError::ApiError)
} else {
Err(HttpError::BadRequest)
}
}
}
impl GetHandler for VmAddNet {}
// Common handler for boot, shutdown and reboot // Common handler for boot, shutdown and reboot
pub struct VmActionHandler { pub struct VmActionHandler {
action: VmAction, action: &'static dyn HttpVmAction,
} }
impl VmActionHandler { impl VmActionHandler {
pub fn new(action: VmAction) -> Self { pub fn new(action: &'static dyn HttpVmAction) -> Self {
VmActionHandler { action } VmActionHandler { action }
} }
} }
@ -87,117 +236,9 @@ impl EndpointHandler for VmActionHandler {
api_notifier: EventFd, api_notifier: EventFd,
api_sender: Sender<ApiRequest>, api_sender: Sender<ApiRequest>,
body: &Option<Body>, body: &Option<Body>,
mut files: Vec<File>, files: Vec<File>,
) -> std::result::Result<Option<Body>, HttpError> { ) -> std::result::Result<Option<Body>, HttpError> {
use VmAction::*; PutHandler::handle_request(self.action, api_notifier, api_sender, body, files)
if let Some(body) = body {
match self.action {
AddDevice(_) => vm_add_device(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
AddDisk(_) => vm_add_disk(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
AddFs(_) => vm_add_fs(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
AddPmem(_) => vm_add_pmem(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
AddNet(_) => {
let mut net_cfg: NetConfig = serde_json::from_slice(body.raw())?;
if net_cfg.fds.is_some() {
warn!("Ignoring FDs sent via the HTTP request body");
net_cfg.fds = None;
}
// Update network config with optional files that might have
// been sent through control message.
if !files.is_empty() {
let fds = files.drain(..).map(|f| f.into_raw_fd()).collect();
net_cfg.fds = Some(fds);
}
vm_add_net(api_notifier, api_sender, Arc::new(net_cfg))
}
AddVdpa(_) => vm_add_vdpa(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
AddVsock(_) => vm_add_vsock(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
AddUserDevice(_) => vm_add_user_device(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
RemoveDevice(_) => vm_remove_device(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
Resize(_) => vm_resize(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
ResizeZone(_) => vm_resize_zone(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
Restore(_) => vm_restore(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
Snapshot(_) => vm_snapshot(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
Coredump(_) => vm_coredump(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
ReceiveMigration(_) => vm_receive_migration(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
SendMigration(_) => vm_send_migration(
api_notifier,
api_sender,
Arc::new(serde_json::from_slice(body.raw())?),
),
_ => return Err(HttpError::BadRequest),
}
} else {
match self.action {
Boot => vm_boot(api_notifier, api_sender),
Delete => vm_delete(api_notifier, api_sender),
Shutdown => vm_shutdown(api_notifier, api_sender),
Reboot => vm_reboot(api_notifier, api_sender),
Pause => vm_pause(api_notifier, api_sender),
Resume => vm_resume(api_notifier, api_sender),
PowerButton => vm_power_button(api_notifier, api_sender),
_ => return Err(HttpError::BadRequest),
}
}
.map_err(HttpError::ApiError)
} }
fn get_handler( fn get_handler(
@ -206,11 +247,7 @@ impl EndpointHandler for VmActionHandler {
api_sender: Sender<ApiRequest>, api_sender: Sender<ApiRequest>,
_body: &Option<Body>, _body: &Option<Body>,
) -> std::result::Result<Option<Body>, HttpError> { ) -> std::result::Result<Option<Body>, HttpError> {
use VmAction::*; GetHandler::handle_request(self.action, api_notifier, api_sender)
match self.action {
Counters => vm_counters(api_notifier, api_sender).map_err(HttpError::ApiError),
_ => Err(HttpError::BadRequest),
}
} }
} }
@ -225,7 +262,10 @@ impl EndpointHandler for VmInfo {
api_sender: Sender<ApiRequest>, api_sender: Sender<ApiRequest>,
) -> Response { ) -> Response {
match req.method() { match req.method() {
Method::Get => match vm_info(api_notifier, api_sender).map_err(HttpError::ApiError) { Method::Get => match crate::api::VmInfo
.send(api_notifier, api_sender, ())
.map_err(HttpError::ApiError)
{
Ok(info) => { Ok(info) => {
let mut response = Response::new(Version::Http11, StatusCode::OK); let mut response = Response::new(Version::Http11, StatusCode::OK);
let info_serialized = serde_json::to_string(&info).unwrap(); let info_serialized = serde_json::to_string(&info).unwrap();
@ -251,7 +291,10 @@ impl EndpointHandler for VmmPing {
api_sender: Sender<ApiRequest>, api_sender: Sender<ApiRequest>,
) -> Response { ) -> Response {
match req.method() { match req.method() {
Method::Get => match vmm_ping(api_notifier, api_sender).map_err(HttpError::ApiError) { Method::Get => match crate::api::VmmPing
.send(api_notifier, api_sender, ())
.map_err(HttpError::ApiError)
{
Ok(pong) => { Ok(pong) => {
let mut response = Response::new(Version::Http11, StatusCode::OK); let mut response = Response::new(Version::Http11, StatusCode::OK);
let info_serialized = serde_json::to_string(&pong).unwrap(); let info_serialized = serde_json::to_string(&pong).unwrap();
@ -279,7 +322,10 @@ impl EndpointHandler for VmmShutdown {
) -> Response { ) -> Response {
match req.method() { match req.method() {
Method::Put => { Method::Put => {
match vmm_shutdown(api_notifier, api_sender).map_err(HttpError::ApiError) { match crate::api::VmmShutdown
.send(api_notifier, api_sender, ())
.map_err(HttpError::ApiError)
{
Ok(_) => Response::new(Version::Http11, StatusCode::OK), Ok(_) => Response::new(Version::Http11, StatusCode::OK),
Err(e) => error_response(e, StatusCode::InternalServerError), Err(e) => error_response(e, StatusCode::InternalServerError),
} }

View File

@ -4,7 +4,14 @@
// //
use self::http_endpoint::{VmActionHandler, VmCreate, VmInfo, VmmPing, VmmShutdown}; use self::http_endpoint::{VmActionHandler, VmCreate, VmInfo, VmmPing, VmmShutdown};
use crate::api::{ApiError, ApiRequest, VmAction}; #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
use crate::api::VmCoredump;
use crate::api::{
AddDisk, ApiError, ApiRequest, VmAddDevice, VmAddFs, VmAddNet, VmAddPmem, VmAddUserDevice,
VmAddVdpa, VmAddVsock, VmBoot, VmCounters, VmDelete, VmPause, VmPowerButton, VmReboot,
VmReceiveMigration, VmRemoveDevice, VmResize, VmResizeZone, VmRestore, VmResume,
VmSendMigration, VmShutdown, VmSnapshot,
};
use crate::seccomp_filters::{get_seccomp_filter, Thread}; use crate::seccomp_filters::{get_seccomp_filter, Thread};
use crate::{Error as VmmError, Result}; use crate::{Error as VmmError, Result};
use hypervisor::HypervisorType; use hypervisor::HypervisorType;
@ -19,7 +26,6 @@ use std::os::unix::net::UnixListener;
use std::panic::AssertUnwindSafe; use std::panic::AssertUnwindSafe;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::mpsc::Sender; use std::sync::mpsc::Sender;
use std::sync::Arc;
use std::thread; use std::thread;
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
@ -141,109 +147,103 @@ pub static HTTP_ROUTES: Lazy<HttpRoutes> = Lazy::new(|| {
r.routes.insert( r.routes.insert(
endpoint!("/vm.add-device"), endpoint!("/vm.add-device"),
Box::new(VmActionHandler::new(VmAction::AddDevice(Arc::default()))), Box::new(VmActionHandler::new(&VmAddDevice)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.add-user-device"), endpoint!("/vm.add-user-device"),
Box::new(VmActionHandler::new( Box::new(VmActionHandler::new(&VmAddUserDevice)),
VmAction::AddUserDevice(Arc::default()),
)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.add-disk"), endpoint!("/vm.add-disk"),
Box::new(VmActionHandler::new(VmAction::AddDisk(Arc::default()))), Box::new(VmActionHandler::new(&AddDisk)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.add-fs"), endpoint!("/vm.add-fs"),
Box::new(VmActionHandler::new(VmAction::AddFs(Arc::default()))), Box::new(VmActionHandler::new(&VmAddFs)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.add-net"), endpoint!("/vm.add-net"),
Box::new(VmActionHandler::new(VmAction::AddNet(Arc::default()))), Box::new(VmActionHandler::new(&VmAddNet)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.add-pmem"), endpoint!("/vm.add-pmem"),
Box::new(VmActionHandler::new(VmAction::AddPmem(Arc::default()))), Box::new(VmActionHandler::new(&VmAddPmem)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.add-vdpa"), endpoint!("/vm.add-vdpa"),
Box::new(VmActionHandler::new(VmAction::AddVdpa(Arc::default()))), Box::new(VmActionHandler::new(&VmAddVdpa)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.add-vsock"), endpoint!("/vm.add-vsock"),
Box::new(VmActionHandler::new(VmAction::AddVsock(Arc::default()))), Box::new(VmActionHandler::new(&VmAddVsock)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.boot"), endpoint!("/vm.boot"),
Box::new(VmActionHandler::new(VmAction::Boot)), Box::new(VmActionHandler::new(&VmBoot)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.counters"), endpoint!("/vm.counters"),
Box::new(VmActionHandler::new(VmAction::Counters)), Box::new(VmActionHandler::new(&VmCounters)),
); );
r.routes r.routes
.insert(endpoint!("/vm.create"), Box::new(VmCreate {})); .insert(endpoint!("/vm.create"), Box::new(VmCreate {}));
r.routes.insert( r.routes.insert(
endpoint!("/vm.delete"), endpoint!("/vm.delete"),
Box::new(VmActionHandler::new(VmAction::Delete)), Box::new(VmActionHandler::new(&VmDelete)),
); );
r.routes.insert(endpoint!("/vm.info"), Box::new(VmInfo {})); r.routes.insert(endpoint!("/vm.info"), Box::new(VmInfo {}));
r.routes.insert( r.routes.insert(
endpoint!("/vm.pause"), endpoint!("/vm.pause"),
Box::new(VmActionHandler::new(VmAction::Pause)), Box::new(VmActionHandler::new(&VmPause)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.power-button"), endpoint!("/vm.power-button"),
Box::new(VmActionHandler::new(VmAction::PowerButton)), Box::new(VmActionHandler::new(&VmPowerButton)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.reboot"), endpoint!("/vm.reboot"),
Box::new(VmActionHandler::new(VmAction::Reboot)), Box::new(VmActionHandler::new(&VmReboot)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.receive-migration"), endpoint!("/vm.receive-migration"),
Box::new(VmActionHandler::new(VmAction::ReceiveMigration( Box::new(VmActionHandler::new(&VmReceiveMigration)),
Arc::default(),
))),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.remove-device"), endpoint!("/vm.remove-device"),
Box::new(VmActionHandler::new(VmAction::RemoveDevice(Arc::default()))), Box::new(VmActionHandler::new(&VmRemoveDevice)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.resize"), endpoint!("/vm.resize"),
Box::new(VmActionHandler::new(VmAction::Resize(Arc::default()))), Box::new(VmActionHandler::new(&VmResize)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.resize-zone"), endpoint!("/vm.resize-zone"),
Box::new(VmActionHandler::new(VmAction::ResizeZone(Arc::default()))), Box::new(VmActionHandler::new(&VmResizeZone)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.restore"), endpoint!("/vm.restore"),
Box::new(VmActionHandler::new(VmAction::Restore(Arc::default()))), Box::new(VmActionHandler::new(&VmRestore)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.resume"), endpoint!("/vm.resume"),
Box::new(VmActionHandler::new(VmAction::Resume)), Box::new(VmActionHandler::new(&VmResume)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.send-migration"), endpoint!("/vm.send-migration"),
Box::new(VmActionHandler::new( Box::new(VmActionHandler::new(&VmSendMigration)),
VmAction::SendMigration(Arc::default()),
)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.shutdown"), endpoint!("/vm.shutdown"),
Box::new(VmActionHandler::new(VmAction::Shutdown)), Box::new(VmActionHandler::new(&VmShutdown)),
); );
r.routes.insert( r.routes.insert(
endpoint!("/vm.snapshot"), endpoint!("/vm.snapshot"),
Box::new(VmActionHandler::new(VmAction::Snapshot(Arc::default()))), Box::new(VmActionHandler::new(&VmSnapshot)),
); );
#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
r.routes.insert( r.routes.insert(
endpoint!("/vm.coredump"), endpoint!("/vm.coredump"),
Box::new(VmActionHandler::new(VmAction::Coredump(Arc::default()))), Box::new(VmActionHandler::new(&VmCoredump)),
); );
r.routes r.routes
.insert(endpoint!("/vmm.ping"), Box::new(VmmPing {})); .insert(endpoint!("/vmm.ping"), Box::new(VmmPing {}));

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff