2020-02-25 09:42:15 +00:00
|
|
|
// Copyright © 2020 Intel Corporation
|
|
|
|
//
|
|
|
|
// SPDX-License-Identifier: Apache-2.0
|
|
|
|
//
|
2022-11-16 23:22:35 +00:00
|
|
|
#![allow(clippy::undocumented_unsafe_blocks)]
|
2022-01-06 23:59:50 +00:00
|
|
|
// When enabling the `mshv` feature, we skip quite some tests and
|
|
|
|
// hence have known dead-code. This annotation silences dead-code
|
|
|
|
// related warnings for our quality workflow to pass.
|
|
|
|
#![allow(dead_code)]
|
|
|
|
|
2021-03-23 16:15:48 +00:00
|
|
|
extern crate test_infra;
|
2021-03-17 09:33:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
use net_util::MacAddr;
|
|
|
|
use std::collections::HashMap;
|
|
|
|
use std::fs;
|
|
|
|
use std::io;
|
|
|
|
use std::io::BufRead;
|
|
|
|
use std::io::Read;
|
|
|
|
use std::io::Seek;
|
|
|
|
use std::io::Write;
|
|
|
|
use std::os::unix::io::AsRawFd;
|
|
|
|
use std::path::PathBuf;
|
|
|
|
use std::process::{Child, Command, Stdio};
|
|
|
|
use std::string::String;
|
2022-02-04 19:14:41 +00:00
|
|
|
use std::sync::mpsc;
|
2022-01-06 22:24:38 +00:00
|
|
|
use std::sync::mpsc::Receiver;
|
2022-02-04 19:14:41 +00:00
|
|
|
use std::sync::Mutex;
|
2022-01-06 22:24:38 +00:00
|
|
|
use std::thread;
|
|
|
|
use test_infra::*;
|
|
|
|
use vmm_sys_util::{tempdir::TempDir, tempfile::TempFile};
|
|
|
|
use wait_timeout::ChildExt;
|
|
|
|
|
2023-04-26 01:20:18 +00:00
|
|
|
// Constant taken from the VMM crate.
|
2023-05-01 17:34:11 +00:00
|
|
|
const MAX_NUM_PCI_SEGMENTS: u16 = 96;
|
2023-04-26 01:20:18 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
mod x86_64 {
|
|
|
|
pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-amd64-custom-20210609-0.raw";
|
2022-11-24 16:27:20 +00:00
|
|
|
pub const JAMMY_NVIDIA_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-nvidia.raw";
|
2022-01-06 22:24:38 +00:00
|
|
|
pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-amd64-custom-20210609-0.qcow2";
|
2023-07-05 07:14:38 +00:00
|
|
|
pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str =
|
|
|
|
"focal-server-cloudimg-amd64-custom-20210609-0-backing.qcow2";
|
2022-01-06 22:24:38 +00:00
|
|
|
pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhd";
|
|
|
|
pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhdx";
|
2023-01-19 17:33:06 +00:00
|
|
|
pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-custom-20230119-0.raw";
|
2023-02-10 15:51:42 +00:00
|
|
|
pub const WINDOWS_IMAGE_NAME: &str = "windows-server-2022-amd64-2.raw";
|
2022-01-18 10:07:01 +00:00
|
|
|
pub const OVMF_NAME: &str = "CLOUDHV.fd";
|
2022-01-06 22:24:38 +00:00
|
|
|
pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'IO-APIC.*ttyS0' /proc/interrupts || true";
|
|
|
|
}
|
2021-10-08 15:29:13 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
use x86_64::*;
|
|
|
|
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
mod aarch64 {
|
|
|
|
pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-arm64-custom-20210929-0.raw";
|
|
|
|
pub const FOCAL_IMAGE_UPDATE_KERNEL_NAME: &str =
|
|
|
|
"focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw";
|
|
|
|
pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-arm64-custom-20210929-0.qcow2";
|
2023-07-05 07:14:38 +00:00
|
|
|
pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str =
|
|
|
|
"focal-server-cloudimg-arm64-custom-20210929-0-backing.qcow2";
|
2022-01-06 22:24:38 +00:00
|
|
|
pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhd";
|
|
|
|
pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhdx";
|
2022-03-29 22:24:36 +00:00
|
|
|
pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-arm64-custom-20220329-0.raw";
|
2022-08-03 21:52:53 +00:00
|
|
|
pub const WINDOWS_IMAGE_NAME: &str = "windows-11-iot-enterprise-aarch64.raw";
|
|
|
|
pub const OVMF_NAME: &str = "CLOUDHV_EFI.fd";
|
2022-01-06 22:24:38 +00:00
|
|
|
pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'GICv3.*uart-pl011' /proc/interrupts || true";
|
2022-01-20 03:48:21 +00:00
|
|
|
pub const GREP_PMU_IRQ_CMD: &str = "grep -c 'GICv3.*arm-pmu' /proc/interrupts || true";
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-10-08 15:29:13 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
use aarch64::*;
|
2021-10-08 15:29:13 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
const DIRECT_KERNEL_BOOT_CMDLINE: &str =
|
|
|
|
"root=/dev/vda1 console=hvc0 rw systemd.journald.forward_to_console=1";
|
2020-06-17 07:50:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
const CONSOLE_TEST_STRING: &str = "Started OpenBSD Secure Shell server";
|
2020-09-25 02:52:51 +00:00
|
|
|
|
2023-03-30 20:47:21 +00:00
|
|
|
// This enum exists to make it more convenient to
|
|
|
|
// implement test for both D-Bus and REST APIs.
|
|
|
|
enum TargetApi {
|
|
|
|
// API socket
|
|
|
|
HttpApi(String),
|
|
|
|
// well known service name, object path
|
|
|
|
DBusApi(String, String),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl TargetApi {
|
|
|
|
fn new_http_api(tmp_dir: &TempDir) -> Self {
|
|
|
|
Self::HttpApi(temp_api_path(tmp_dir))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn new_dbus_api(tmp_dir: &TempDir) -> Self {
|
|
|
|
// `tmp_dir` is in the form of "/tmp/chXXXXXX"
|
|
|
|
// and we take the `chXXXXXX` part as a unique identifier for the guest
|
|
|
|
let id = tmp_dir.as_path().file_name().unwrap().to_str().unwrap();
|
|
|
|
|
|
|
|
Self::DBusApi(
|
|
|
|
format!("org.cloudhypervisor.{id}"),
|
|
|
|
format!("/org/cloudhypervisor/{id}"),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2023-07-08 01:38:51 +00:00
|
|
|
fn guest_args(&self) -> Vec<String> {
|
2023-03-30 20:47:21 +00:00
|
|
|
match self {
|
|
|
|
TargetApi::HttpApi(api_socket) => {
|
2023-07-08 01:38:51 +00:00
|
|
|
vec![format!("--api-socket={}", api_socket.as_str())]
|
2023-03-30 20:47:21 +00:00
|
|
|
}
|
|
|
|
TargetApi::DBusApi(service_name, object_path) => {
|
|
|
|
vec![
|
2023-07-08 01:38:51 +00:00
|
|
|
format!("--dbus-service-name={}", service_name.as_str()),
|
|
|
|
format!("--dbus-object-path={}", object_path.as_str()),
|
2023-03-30 20:47:21 +00:00
|
|
|
]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-08 01:38:51 +00:00
|
|
|
fn remote_args(&self) -> Vec<String> {
|
2023-03-30 20:47:21 +00:00
|
|
|
// `guest_args` and `remote_args` are consistent with each other
|
|
|
|
self.guest_args()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn remote_command(&self, command: &str, arg: Option<&str>) -> bool {
|
|
|
|
let mut cmd = Command::new(clh_command("ch-remote"));
|
|
|
|
cmd.args(self.remote_args());
|
|
|
|
cmd.arg(command);
|
|
|
|
|
|
|
|
if let Some(arg) = arg {
|
|
|
|
cmd.arg(arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
let output = cmd.output().unwrap();
|
|
|
|
if output.status.success() {
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
eprintln!("Error running ch-remote command: {:?}", &cmd);
|
|
|
|
let stderr = String::from_utf8_lossy(&output.stderr);
|
|
|
|
eprintln!("stderr: {stderr}");
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-06 19:36:36 +00:00
|
|
|
// Start cloud-hypervisor with no VM parameters, only the API server running.
|
|
|
|
// From the API: Create a VM, boot it and check that it looks as expected.
|
|
|
|
fn _test_api_create_boot(target_api: TargetApi, guest: Guest) {
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
|
|
|
.args(target_api.guest_args())
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
|
|
|
|
|
|
|
// Verify API server is running
|
|
|
|
assert!(target_api.remote_command("ping", None));
|
|
|
|
|
|
|
|
// Create the VM first
|
|
|
|
let cpu_count: u8 = 4;
|
|
|
|
let request_body = guest.api_create_body(
|
|
|
|
cpu_count,
|
|
|
|
direct_kernel_boot_path().to_str().unwrap(),
|
|
|
|
DIRECT_KERNEL_BOOT_CMDLINE,
|
|
|
|
);
|
|
|
|
|
|
|
|
let temp_config_path = guest.tmp_dir.as_path().join("config");
|
|
|
|
std::fs::write(&temp_config_path, request_body).unwrap();
|
|
|
|
let create_config = temp_config_path.as_os_str().to_str().unwrap();
|
|
|
|
|
|
|
|
assert!(target_api.remote_command("create", Some(create_config),));
|
|
|
|
|
|
|
|
// Then boot it
|
|
|
|
assert!(target_api.remote_command("boot", None));
|
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Check that the VM booted as expected
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start cloud-hypervisor with no VM parameters, only the API server running.
|
|
|
|
// From the API: Create a VM, boot it and check it can be shutdown and then
|
|
|
|
// booted again
|
|
|
|
fn _test_api_shutdown(target_api: TargetApi, guest: Guest) {
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
|
|
|
.args(target_api.guest_args())
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
|
|
|
|
|
|
|
// Verify API server is running
|
|
|
|
assert!(target_api.remote_command("ping", None));
|
|
|
|
|
|
|
|
// Create the VM first
|
|
|
|
let cpu_count: u8 = 4;
|
|
|
|
let request_body = guest.api_create_body(
|
|
|
|
cpu_count,
|
|
|
|
direct_kernel_boot_path().to_str().unwrap(),
|
|
|
|
DIRECT_KERNEL_BOOT_CMDLINE,
|
|
|
|
);
|
|
|
|
|
|
|
|
let temp_config_path = guest.tmp_dir.as_path().join("config");
|
|
|
|
std::fs::write(&temp_config_path, request_body).unwrap();
|
|
|
|
let create_config = temp_config_path.as_os_str().to_str().unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert!(target_api.remote_command("create", Some(create_config)));
|
|
|
|
|
|
|
|
// Then boot it
|
|
|
|
assert!(target_api.remote_command("boot", None));
|
|
|
|
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Check that the VM booted as expected
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
|
|
|
|
// Sync and shutdown without powering off to prevent filesystem
|
|
|
|
// corruption.
|
|
|
|
guest.ssh_command("sync").unwrap();
|
|
|
|
guest.ssh_command("sudo shutdown -H now").unwrap();
|
|
|
|
|
|
|
|
// Wait for the guest to be fully shutdown
|
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
|
|
|
|
|
|
|
// Then shut it down
|
|
|
|
assert!(target_api.remote_command("shutdown", None));
|
|
|
|
|
|
|
|
// Then boot it again
|
|
|
|
assert!(target_api.remote_command("boot", None));
|
|
|
|
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Check that the VM booted as expected
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start cloud-hypervisor with no VM parameters, only the API server running.
|
|
|
|
// From the API: Create a VM, boot it and check it can be deleted and then recreated
|
|
|
|
// booted again.
|
|
|
|
fn _test_api_delete(target_api: TargetApi, guest: Guest) {
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
|
|
|
.args(target_api.guest_args())
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
|
|
|
|
|
|
|
// Verify API server is running
|
|
|
|
assert!(target_api.remote_command("ping", None));
|
|
|
|
|
|
|
|
// Create the VM first
|
|
|
|
let cpu_count: u8 = 4;
|
|
|
|
let request_body = guest.api_create_body(
|
|
|
|
cpu_count,
|
|
|
|
direct_kernel_boot_path().to_str().unwrap(),
|
|
|
|
DIRECT_KERNEL_BOOT_CMDLINE,
|
|
|
|
);
|
|
|
|
let temp_config_path = guest.tmp_dir.as_path().join("config");
|
|
|
|
std::fs::write(&temp_config_path, request_body).unwrap();
|
|
|
|
let create_config = temp_config_path.as_os_str().to_str().unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert!(target_api.remote_command("create", Some(create_config)));
|
|
|
|
|
|
|
|
// Then boot it
|
|
|
|
assert!(target_api.remote_command("boot", None));
|
|
|
|
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Check that the VM booted as expected
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
|
|
|
|
// Sync and shutdown without powering off to prevent filesystem
|
|
|
|
// corruption.
|
|
|
|
guest.ssh_command("sync").unwrap();
|
|
|
|
guest.ssh_command("sudo shutdown -H now").unwrap();
|
|
|
|
|
|
|
|
// Wait for the guest to be fully shutdown
|
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
|
|
|
|
|
|
|
// Then delete it
|
|
|
|
assert!(target_api.remote_command("delete", None));
|
|
|
|
|
|
|
|
assert!(target_api.remote_command("create", Some(create_config)));
|
|
|
|
|
|
|
|
// Then boot it again
|
|
|
|
assert!(target_api.remote_command("boot", None));
|
|
|
|
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Check that the VM booted as expected
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start cloud-hypervisor with no VM parameters, only the API server running.
|
|
|
|
// From the API: Create a VM, boot it and check that it looks as expected.
|
|
|
|
// Then we pause the VM, check that it's no longer available.
|
|
|
|
// Finally we resume the VM and check that it's available.
|
|
|
|
fn _test_api_pause_resume(target_api: TargetApi, guest: Guest) {
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
|
|
|
.args(target_api.guest_args())
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
|
|
|
|
|
|
|
// Verify API server is running
|
|
|
|
assert!(target_api.remote_command("ping", None));
|
|
|
|
|
|
|
|
// Create the VM first
|
|
|
|
let cpu_count: u8 = 4;
|
|
|
|
let request_body = guest.api_create_body(
|
|
|
|
cpu_count,
|
|
|
|
direct_kernel_boot_path().to_str().unwrap(),
|
|
|
|
DIRECT_KERNEL_BOOT_CMDLINE,
|
|
|
|
);
|
|
|
|
|
|
|
|
let temp_config_path = guest.tmp_dir.as_path().join("config");
|
|
|
|
std::fs::write(&temp_config_path, request_body).unwrap();
|
|
|
|
let create_config = temp_config_path.as_os_str().to_str().unwrap();
|
|
|
|
|
|
|
|
assert!(target_api.remote_command("create", Some(create_config)));
|
|
|
|
|
|
|
|
// Then boot it
|
|
|
|
assert!(target_api.remote_command("boot", None));
|
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Check that the VM booted as expected
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
|
|
|
|
// We now pause the VM
|
|
|
|
assert!(target_api.remote_command("pause", None));
|
|
|
|
|
|
|
|
// Check pausing again fails
|
|
|
|
assert!(!target_api.remote_command("pause", None));
|
|
|
|
|
|
|
|
thread::sleep(std::time::Duration::new(2, 0));
|
|
|
|
|
|
|
|
// SSH into the VM should fail
|
|
|
|
assert!(ssh_command_ip(
|
|
|
|
"grep -c processor /proc/cpuinfo",
|
|
|
|
&guest.network.guest_ip,
|
|
|
|
2,
|
|
|
|
5
|
|
|
|
)
|
|
|
|
.is_err());
|
|
|
|
|
|
|
|
// Resume the VM
|
|
|
|
assert!(target_api.remote_command("resume", None));
|
|
|
|
|
|
|
|
// Check resuming again fails
|
|
|
|
assert!(!target_api.remote_command("resume", None));
|
|
|
|
|
|
|
|
thread::sleep(std::time::Duration::new(2, 0));
|
|
|
|
|
|
|
|
// Now we should be able to SSH back in and get the right number of CPUs
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2023-08-30 00:18:04 +00:00
|
|
|
fn _test_pty_interaction(pty_path: PathBuf) {
|
|
|
|
let mut cf = std::fs::OpenOptions::new()
|
|
|
|
.write(true)
|
|
|
|
.read(true)
|
|
|
|
.open(pty_path)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// Some dumb sleeps but we don't want to write
|
|
|
|
// before the console is up and we don't want
|
|
|
|
// to try and write the next line before the
|
|
|
|
// login process is ready.
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
assert_eq!(cf.write(b"cloud\n").unwrap(), 6);
|
|
|
|
thread::sleep(std::time::Duration::new(2, 0));
|
|
|
|
assert_eq!(cf.write(b"cloud123\n").unwrap(), 9);
|
|
|
|
thread::sleep(std::time::Duration::new(2, 0));
|
|
|
|
assert_eq!(cf.write(b"echo test_pty_console\n").unwrap(), 22);
|
|
|
|
thread::sleep(std::time::Duration::new(2, 0));
|
|
|
|
|
|
|
|
// read pty and ensure they have a login shell
|
|
|
|
// some fairly hacky workarounds to avoid looping
|
|
|
|
// forever in case the channel is blocked getting output
|
|
|
|
let ptyc = pty_read(cf);
|
|
|
|
let mut empty = 0;
|
|
|
|
let mut prev = String::new();
|
|
|
|
loop {
|
|
|
|
thread::sleep(std::time::Duration::new(2, 0));
|
|
|
|
match ptyc.try_recv() {
|
|
|
|
Ok(line) => {
|
|
|
|
empty = 0;
|
|
|
|
prev = prev + &line;
|
|
|
|
if prev.contains("test_pty_console") {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(mpsc::TryRecvError::Empty) => {
|
|
|
|
empty += 1;
|
|
|
|
assert!(empty <= 5, "No login on pty");
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
panic!("No login on pty")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-26 11:30:16 +00:00
|
|
|
fn prepare_virtiofsd(tmp_dir: &TempDir, shared_dir: &str) -> (std::process::Child, String) {
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut workload_path = dirs::home_dir().unwrap();
|
|
|
|
workload_path.push("workloads");
|
2021-06-04 16:14:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut virtiofsd_path = workload_path;
|
2022-02-21 09:55:43 +00:00
|
|
|
virtiofsd_path.push("virtiofsd");
|
2022-01-06 22:24:38 +00:00
|
|
|
let virtiofsd_path = String::from(virtiofsd_path.to_str().unwrap());
|
2021-06-04 16:14:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let virtiofsd_socket_path =
|
|
|
|
String::from(tmp_dir.as_path().join("virtiofs.sock").to_str().unwrap());
|
2021-06-04 16:14:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Start the daemon
|
|
|
|
let child = Command::new(virtiofsd_path.as_str())
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--shared-dir", shared_dir])
|
|
|
|
.args(["--socket-path", virtiofsd_socket_path.as_str()])
|
|
|
|
.args(["--cache", "never"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-06-04 16:14:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2020-11-27 15:04:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
(child, virtiofsd_socket_path)
|
|
|
|
}
|
2020-11-27 15:04:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn prepare_vubd(
|
|
|
|
tmp_dir: &TempDir,
|
|
|
|
blk_img: &str,
|
|
|
|
num_queues: usize,
|
|
|
|
rdonly: bool,
|
|
|
|
direct: bool,
|
|
|
|
) -> (std::process::Child, String) {
|
|
|
|
let mut workload_path = dirs::home_dir().unwrap();
|
|
|
|
workload_path.push("workloads");
|
|
|
|
|
|
|
|
let mut blk_file_path = workload_path;
|
|
|
|
blk_file_path.push(blk_img);
|
|
|
|
let blk_file_path = String::from(blk_file_path.to_str().unwrap());
|
|
|
|
|
|
|
|
let vubd_socket_path = String::from(tmp_dir.as_path().join("vub.sock").to_str().unwrap());
|
|
|
|
|
|
|
|
// Start the daemon
|
|
|
|
let child = Command::new(clh_command("vhost_user_block"))
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--block-backend",
|
|
|
|
format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"path={blk_file_path},socket={vubd_socket_path},num_queues={num_queues},readonly={rdonly},direct={direct}"
|
2022-01-06 22:24:38 +00:00
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
])
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
(child, vubd_socket_path)
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn temp_vsock_path(tmp_dir: &TempDir) -> String {
|
|
|
|
String::from(tmp_dir.as_path().join("vsock").to_str().unwrap())
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn temp_api_path(tmp_dir: &TempDir) -> String {
|
|
|
|
String::from(
|
|
|
|
tmp_dir
|
|
|
|
.as_path()
|
|
|
|
.join("cloud-hypervisor.sock")
|
|
|
|
.to_str()
|
|
|
|
.unwrap(),
|
|
|
|
)
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-05-12 03:14:06 +00:00
|
|
|
fn temp_event_monitor_path(tmp_dir: &TempDir) -> String {
|
|
|
|
String::from(tmp_dir.as_path().join("event.json").to_str().unwrap())
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Creates the directory and returns the path.
|
|
|
|
fn temp_snapshot_dir_path(tmp_dir: &TempDir) -> String {
|
|
|
|
let snapshot_dir = String::from(tmp_dir.as_path().join("snapshot").to_str().unwrap());
|
|
|
|
std::fs::create_dir(&snapshot_dir).unwrap();
|
|
|
|
snapshot_dir
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-05-20 08:33:37 +00:00
|
|
|
fn temp_vmcore_file_path(tmp_dir: &TempDir) -> String {
|
|
|
|
let vmcore_file = String::from(tmp_dir.as_path().join("vmcore").to_str().unwrap());
|
|
|
|
vmcore_file
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Creates the path for direct kernel boot and return the path.
|
|
|
|
// For x86_64, this function returns the vmlinux kernel path.
|
|
|
|
// For AArch64, this function returns the PE kernel path.
|
|
|
|
fn direct_kernel_boot_path() -> PathBuf {
|
|
|
|
let mut workload_path = dirs::home_dir().unwrap();
|
|
|
|
workload_path.push("workloads");
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut kernel_path = workload_path;
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
kernel_path.push("vmlinux");
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
kernel_path.push("Image");
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
kernel_path
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn edk2_path() -> PathBuf {
|
|
|
|
let mut workload_path = dirs::home_dir().unwrap();
|
|
|
|
workload_path.push("workloads");
|
|
|
|
let mut edk2_path = workload_path;
|
2022-08-03 21:52:53 +00:00
|
|
|
edk2_path.push(OVMF_NAME);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
edk2_path
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-04-13 18:33:22 +00:00
|
|
|
fn cloud_hypervisor_release_path() -> String {
|
|
|
|
let mut workload_path = dirs::home_dir().unwrap();
|
|
|
|
workload_path.push("workloads");
|
|
|
|
|
|
|
|
let mut ch_release_path = workload_path;
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
ch_release_path.push("cloud-hypervisor-static");
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
ch_release_path.push("cloud-hypervisor-static-aarch64");
|
|
|
|
|
|
|
|
ch_release_path.into_os_string().into_string().unwrap()
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn prepare_vhost_user_net_daemon(
|
|
|
|
tmp_dir: &TempDir,
|
|
|
|
ip: &str,
|
|
|
|
tap: Option<&str>,
|
2022-09-26 14:28:40 +00:00
|
|
|
mtu: Option<u16>,
|
2022-01-06 22:24:38 +00:00
|
|
|
num_queues: usize,
|
|
|
|
client_mode: bool,
|
|
|
|
) -> (std::process::Command, String) {
|
|
|
|
let vunet_socket_path = String::from(tmp_dir.as_path().join("vunet.sock").to_str().unwrap());
|
|
|
|
|
|
|
|
// Start the daemon
|
2022-09-26 14:28:40 +00:00
|
|
|
let mut net_params = format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"ip={ip},mask=255.255.255.0,socket={vunet_socket_path},num_queues={num_queues},queue_size=1024,client={client_mode}"
|
2022-09-26 14:28:40 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
if let Some(tap) = tap {
|
2022-12-14 11:41:15 +00:00
|
|
|
net_params.push_str(format!(",tap={tap}").as_str());
|
2022-09-26 14:28:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(mtu) = mtu {
|
2022-12-14 11:41:15 +00:00
|
|
|
net_params.push_str(format!(",mtu={mtu}").as_str());
|
2022-09-26 14:28:40 +00:00
|
|
|
}
|
2020-07-13 05:05:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut command = Command::new(clh_command("vhost_user_net"));
|
2022-09-20 08:46:19 +00:00
|
|
|
command.args(["--net-backend", net_params.as_str()]);
|
2020-07-13 05:05:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
(command, vunet_socket_path)
|
|
|
|
}
|
2020-07-13 05:05:05 +00:00
|
|
|
|
2023-01-12 21:06:43 +00:00
|
|
|
fn prepare_swtpm_daemon(tmp_dir: &TempDir) -> (std::process::Command, String) {
|
|
|
|
let swtpm_tpm_dir = String::from(tmp_dir.as_path().join("swtpm").to_str().unwrap());
|
|
|
|
let swtpm_socket_path = String::from(
|
|
|
|
tmp_dir
|
|
|
|
.as_path()
|
|
|
|
.join("swtpm")
|
|
|
|
.join("swtpm.sock")
|
|
|
|
.to_str()
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
std::fs::create_dir(&swtpm_tpm_dir).unwrap();
|
|
|
|
|
|
|
|
let mut swtpm_command = Command::new("swtpm");
|
|
|
|
let swtpm_args = [
|
|
|
|
"socket",
|
|
|
|
"--tpmstate",
|
|
|
|
&format!("dir={swtpm_tpm_dir}"),
|
|
|
|
"--ctrl",
|
|
|
|
&format!("type=unixio,path={swtpm_socket_path}"),
|
|
|
|
"--flags",
|
|
|
|
"startup-clear",
|
|
|
|
"--tpm2",
|
|
|
|
];
|
|
|
|
swtpm_command.args(swtpm_args);
|
|
|
|
|
|
|
|
(swtpm_command, swtpm_socket_path)
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn remote_command(api_socket: &str, command: &str, arg: Option<&str>) -> bool {
|
|
|
|
let mut cmd = Command::new(clh_command("ch-remote"));
|
2023-07-08 01:38:51 +00:00
|
|
|
cmd.args([&format!("--api-socket={api_socket}"), command]);
|
2020-03-03 06:48:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if let Some(arg) = arg {
|
|
|
|
cmd.arg(arg);
|
2020-03-03 06:48:07 +00:00
|
|
|
}
|
2022-07-01 14:47:41 +00:00
|
|
|
let output = cmd.output().unwrap();
|
|
|
|
if output.status.success() {
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
eprintln!("Error running ch-remote command: {:?}", &cmd);
|
|
|
|
let stderr = String::from_utf8_lossy(&output.stderr);
|
2022-12-14 11:41:15 +00:00
|
|
|
eprintln!("stderr: {stderr}");
|
2022-07-01 14:47:41 +00:00
|
|
|
false
|
|
|
|
}
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn remote_command_w_output(api_socket: &str, command: &str, arg: Option<&str>) -> (bool, Vec<u8>) {
|
|
|
|
let mut cmd = Command::new(clh_command("ch-remote"));
|
2023-07-08 01:38:51 +00:00
|
|
|
cmd.args([&format!("--api-socket={api_socket}"), command]);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if let Some(arg) = arg {
|
|
|
|
cmd.arg(arg);
|
2020-02-25 09:42:15 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let output = cmd.output().expect("Failed to launch ch-remote");
|
2020-03-11 16:48:48 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
(output.status.success(), output.stdout)
|
|
|
|
}
|
2020-03-11 16:48:48 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn resize_command(
|
|
|
|
api_socket: &str,
|
|
|
|
desired_vcpus: Option<u8>,
|
|
|
|
desired_ram: Option<usize>,
|
|
|
|
desired_balloon: Option<usize>,
|
2022-05-12 19:51:13 +00:00
|
|
|
event_file: Option<&str>,
|
2022-01-06 22:24:38 +00:00
|
|
|
) -> bool {
|
|
|
|
let mut cmd = Command::new(clh_command("ch-remote"));
|
2023-07-08 01:38:51 +00:00
|
|
|
cmd.args([&format!("--api-socket={api_socket}"), "resize"]);
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
if let Some(desired_vcpus) = desired_vcpus {
|
2023-07-08 01:38:51 +00:00
|
|
|
cmd.arg(format!("--cpus={desired_vcpus}"));
|
2020-03-06 15:49:01 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if let Some(desired_ram) = desired_ram {
|
2023-07-08 01:38:51 +00:00
|
|
|
cmd.arg(format!("--memory={desired_ram}"));
|
2020-06-12 09:44:04 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if let Some(desired_balloon) = desired_balloon {
|
2023-07-08 01:38:51 +00:00
|
|
|
cmd.arg(format!("--balloon={desired_balloon}"));
|
2020-03-10 17:00:19 +00:00
|
|
|
}
|
|
|
|
|
2022-05-12 19:51:13 +00:00
|
|
|
let ret = cmd.status().expect("Failed to launch ch-remote").success();
|
|
|
|
|
|
|
|
if let Some(event_path) = event_file {
|
|
|
|
let latest_events = [
|
|
|
|
&MetaEvent {
|
|
|
|
event: "resizing".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
&MetaEvent {
|
|
|
|
event: "resized".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
];
|
2023-12-05 17:44:02 +00:00
|
|
|
// See: #5938
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
2022-05-12 19:51:13 +00:00
|
|
|
assert!(check_latest_events_exact(&latest_events, event_path));
|
|
|
|
}
|
|
|
|
|
|
|
|
ret
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-09-11 08:41:23 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn resize_zone_command(api_socket: &str, id: &str, desired_size: &str) -> bool {
|
|
|
|
let mut cmd = Command::new(clh_command("ch-remote"));
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args([
|
2023-07-08 01:38:51 +00:00
|
|
|
&format!("--api-socket={api_socket}"),
|
2022-01-06 22:24:38 +00:00
|
|
|
"resize-zone",
|
2023-07-08 01:38:51 +00:00
|
|
|
&format!("--id={id}"),
|
|
|
|
&format!("--size={desired_size}"),
|
2022-01-06 22:24:38 +00:00
|
|
|
]);
|
2020-09-11 08:41:23 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
cmd.status().expect("Failed to launch ch-remote").success()
|
|
|
|
}
|
2021-09-09 23:23:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// setup OVS-DPDK bridge and ports
|
|
|
|
fn setup_ovs_dpdk() {
|
|
|
|
// setup OVS-DPDK
|
|
|
|
assert!(exec_host_command_status("service openvswitch-switch start").success());
|
|
|
|
assert!(exec_host_command_status("ovs-vsctl init").success());
|
|
|
|
assert!(
|
|
|
|
exec_host_command_status("ovs-vsctl set Open_vSwitch . other_config:dpdk-init=true")
|
|
|
|
.success()
|
|
|
|
);
|
|
|
|
assert!(exec_host_command_status("service openvswitch-switch restart").success());
|
|
|
|
|
|
|
|
// Create OVS-DPDK bridge and ports
|
|
|
|
assert!(exec_host_command_status(
|
|
|
|
"ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev",
|
|
|
|
)
|
|
|
|
.success());
|
|
|
|
assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success());
|
|
|
|
assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient2").success());
|
|
|
|
assert!(exec_host_command_status("ip link set up dev ovsbr0").success());
|
|
|
|
assert!(exec_host_command_status("service openvswitch-switch restart").success());
|
|
|
|
}
|
|
|
|
fn cleanup_ovs_dpdk() {
|
|
|
|
assert!(exec_host_command_status("ovs-vsctl del-br ovsbr0").success());
|
|
|
|
exec_host_command_status("rm -f ovs-vsctl /tmp/dpdkvhostclient1 /tmp/dpdkvhostclient2");
|
|
|
|
}
|
|
|
|
// Setup two guests and ensure they are connected through ovs-dpdk
|
2022-04-13 18:33:22 +00:00
|
|
|
fn setup_ovs_dpdk_guests(
|
|
|
|
guest1: &Guest,
|
|
|
|
guest2: &Guest,
|
|
|
|
api_socket: &str,
|
|
|
|
release_binary: bool,
|
|
|
|
) -> (Child, Child) {
|
2022-01-06 22:24:38 +00:00
|
|
|
setup_ovs_dpdk();
|
2021-09-09 03:39:03 +00:00
|
|
|
|
2022-04-13 18:33:22 +00:00
|
|
|
let clh_path = if !release_binary {
|
|
|
|
clh_command("cloud-hypervisor")
|
|
|
|
} else {
|
|
|
|
cloud_hypervisor_release_path()
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut child1 = GuestCommand::new_with_binary_path(guest1, &clh_path)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=2"])
|
|
|
|
.args(["--memory", "size=0,shared=on"])
|
|
|
|
.args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2021-09-09 03:39:03 +00:00
|
|
|
.default_disks()
|
2023-07-08 01:38:51 +00:00
|
|
|
.args(["--net", guest1.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient1,num_queues=2,queue_size=256,vhost_mode=server"])
|
2021-09-09 03:39:03 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let guest_net_iface = "ens5";
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let guest_net_iface = "enp0s5";
|
2021-09-09 03:39:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest1.wait_vm_boot(None).unwrap();
|
2021-09-09 03:39:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest1
|
|
|
|
.ssh_command(&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"sudo ip addr add 172.100.0.1/24 dev {guest_net_iface}"
|
2022-01-06 22:24:38 +00:00
|
|
|
))
|
|
|
|
.unwrap();
|
|
|
|
guest1
|
2022-12-14 11:41:15 +00:00
|
|
|
.ssh_command(&format!("sudo ip link set up dev {guest_net_iface}"))
|
2022-01-06 22:24:38 +00:00
|
|
|
.unwrap();
|
2021-09-09 03:39:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let guest_ip = guest1.network.guest_ip.clone();
|
|
|
|
thread::spawn(move || {
|
|
|
|
ssh_command_ip(
|
|
|
|
"nc -l 12345",
|
|
|
|
&guest_ip,
|
|
|
|
DEFAULT_SSH_RETRIES,
|
|
|
|
DEFAULT_SSH_TIMEOUT,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2021-09-09 03:39:03 +00:00
|
|
|
});
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
|
|
|
if r.is_err() {
|
|
|
|
cleanup_ovs_dpdk();
|
2021-09-09 23:23:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child1.kill();
|
|
|
|
let output = child1.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
panic!("Test should already be failed/panicked"); // To explicitly mark this block never return
|
|
|
|
}
|
2021-09-09 03:39:03 +00:00
|
|
|
|
2022-04-13 18:33:22 +00:00
|
|
|
let mut child2 = GuestCommand::new_with_binary_path(guest2, &clh_path)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", api_socket])
|
|
|
|
.args(["--cpus", "boot=2"])
|
|
|
|
.args(["--memory", "size=0,shared=on"])
|
|
|
|
.args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2021-09-09 03:39:03 +00:00
|
|
|
.default_disks()
|
2023-07-08 01:38:51 +00:00
|
|
|
.args(["--net", guest2.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient2,num_queues=2,queue_size=256,vhost_mode=server"])
|
2021-09-09 03:39:03 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest2.wait_vm_boot(None).unwrap();
|
2021-09-09 03:39:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest2
|
|
|
|
.ssh_command(&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"sudo ip addr add 172.100.0.2/24 dev {guest_net_iface}"
|
2022-01-06 22:24:38 +00:00
|
|
|
))
|
|
|
|
.unwrap();
|
|
|
|
guest2
|
2022-12-14 11:41:15 +00:00
|
|
|
.ssh_command(&format!("sudo ip link set up dev {guest_net_iface}"))
|
2022-01-06 22:24:38 +00:00
|
|
|
.unwrap();
|
2021-09-09 23:23:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the connection works properly between the two VMs
|
|
|
|
guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap();
|
|
|
|
});
|
|
|
|
if r.is_err() {
|
|
|
|
cleanup_ovs_dpdk();
|
2021-09-09 03:39:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child1.kill();
|
|
|
|
let _ = child2.kill();
|
|
|
|
let output = child2.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
panic!("Test should already be failed/panicked"); // To explicitly mark this block never return
|
2021-09-09 03:39:03 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
(child1, child2)
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
enum FwType {
|
|
|
|
Ovmf,
|
|
|
|
RustHypervisorFirmware,
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-04-13 11:38:14 +00:00
|
|
|
fn fw_path(_fw_type: FwType) -> String {
|
2022-02-04 17:36:50 +00:00
|
|
|
let mut workload_path = dirs::home_dir().unwrap();
|
|
|
|
workload_path.push("workloads");
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-02-04 17:36:50 +00:00
|
|
|
let mut fw_path = workload_path;
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
fw_path.push("CLOUDHV_EFI.fd");
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
2022-04-13 11:38:14 +00:00
|
|
|
match _fw_type {
|
2022-02-04 17:36:50 +00:00
|
|
|
FwType::Ovmf => fw_path.push(OVMF_NAME),
|
|
|
|
FwType::RustHypervisorFirmware => fw_path.push("hypervisor-fw"),
|
2020-02-25 09:42:15 +00:00
|
|
|
}
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-02-04 17:36:50 +00:00
|
|
|
fw_path.to_str().unwrap().to_string()
|
|
|
|
}
|
|
|
|
|
2023-11-02 23:09:57 +00:00
|
|
|
#[derive(Debug)]
|
2022-05-12 03:14:06 +00:00
|
|
|
struct MetaEvent {
|
|
|
|
event: String,
|
|
|
|
device_id: Option<String>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl MetaEvent {
|
|
|
|
pub fn match_with_json_event(&self, v: &serde_json::Value) -> bool {
|
|
|
|
let mut matched = false;
|
|
|
|
if v["event"].as_str().unwrap() == self.event {
|
|
|
|
if let Some(device_id) = &self.device_id {
|
|
|
|
if v["properties"]["id"].as_str().unwrap() == device_id {
|
|
|
|
matched = true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
matched = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
matched
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the event_monitor file based on the format that each event
|
2022-05-16 18:52:25 +00:00
|
|
|
// is followed by a double newline
|
2022-05-12 03:14:06 +00:00
|
|
|
fn parse_event_file(event_file: &str) -> Vec<serde_json::Value> {
|
|
|
|
let content = fs::read(event_file).unwrap();
|
|
|
|
let mut ret = Vec::new();
|
2022-05-16 18:52:25 +00:00
|
|
|
for entry in String::from_utf8_lossy(&content)
|
|
|
|
.trim()
|
|
|
|
.split("\n\n")
|
|
|
|
.collect::<Vec<&str>>()
|
|
|
|
{
|
|
|
|
ret.push(serde_json::from_str(entry).unwrap());
|
2022-05-12 03:14:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return true if all events from the input 'expected_events' are matched sequentially
|
|
|
|
// with events from the 'event_file'
|
|
|
|
fn check_sequential_events(expected_events: &[&MetaEvent], event_file: &str) -> bool {
|
|
|
|
let json_events = parse_event_file(event_file);
|
|
|
|
let len = expected_events.len();
|
|
|
|
let mut idx = 0;
|
|
|
|
for e in &json_events {
|
|
|
|
if idx == len {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if expected_events[idx].match_with_json_event(e) {
|
|
|
|
idx += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-02 23:09:57 +00:00
|
|
|
let ret = idx == len;
|
|
|
|
|
|
|
|
if !ret {
|
|
|
|
eprintln!(
|
|
|
|
"\n\n==== Start 'check_sequential_events' failed ==== \
|
|
|
|
\n\nexpected_events={:?}\nactual_events={:?} \
|
|
|
|
\n\n==== End 'check_sequential_events' failed ====",
|
|
|
|
expected_events, json_events,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret
|
2022-05-12 03:14:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return true if all events from the input 'expected_events' are matched exactly
|
|
|
|
// with events from the 'event_file'
|
|
|
|
fn check_sequential_events_exact(expected_events: &[&MetaEvent], event_file: &str) -> bool {
|
|
|
|
let json_events = parse_event_file(event_file);
|
|
|
|
assert!(expected_events.len() <= json_events.len());
|
|
|
|
let json_events = &json_events[..expected_events.len()];
|
|
|
|
|
|
|
|
for (idx, e) in json_events.iter().enumerate() {
|
|
|
|
if !expected_events[idx].match_with_json_event(e) {
|
2023-11-02 23:09:57 +00:00
|
|
|
eprintln!(
|
|
|
|
"\n\n==== Start 'check_sequential_events_exact' failed ==== \
|
|
|
|
\n\nexpected_events={:?}\nactual_events={:?} \
|
|
|
|
\n\n==== End 'check_sequential_events_exact' failed ====",
|
|
|
|
expected_events, json_events,
|
|
|
|
);
|
|
|
|
|
2022-05-12 03:14:06 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
2023-08-30 14:20:58 +00:00
|
|
|
// Return true if events from the input 'latest_events' are matched exactly
|
2022-05-12 03:14:06 +00:00
|
|
|
// with the most recent events from the 'event_file'
|
|
|
|
fn check_latest_events_exact(latest_events: &[&MetaEvent], event_file: &str) -> bool {
|
|
|
|
let json_events = parse_event_file(event_file);
|
|
|
|
assert!(latest_events.len() <= json_events.len());
|
|
|
|
let json_events = &json_events[(json_events.len() - latest_events.len())..];
|
|
|
|
|
|
|
|
for (idx, e) in json_events.iter().enumerate() {
|
|
|
|
if !latest_events[idx].match_with_json_event(e) {
|
2023-11-02 23:09:57 +00:00
|
|
|
eprintln!(
|
|
|
|
"\n\n==== Start 'check_latest_events_exact' failed ==== \
|
|
|
|
\n\nexpected_events={:?}\nactual_events={:?} \
|
|
|
|
\n\n==== End 'check_latest_events_exact' failed ====",
|
|
|
|
latest_events, json_events,
|
|
|
|
);
|
|
|
|
|
2022-05-12 03:14:06 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_cpu_topology(threads_per_core: u8, cores_per_package: u8, packages: u8, use_fw: bool) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let total_vcpus = threads_per_core * cores_per_package * packages;
|
|
|
|
let direct_kernel_boot_path = direct_kernel_boot_path();
|
|
|
|
let mut kernel_path = direct_kernel_boot_path.to_str().unwrap();
|
2022-02-04 17:36:50 +00:00
|
|
|
let fw_path = fw_path(FwType::RustHypervisorFirmware);
|
2022-01-06 22:24:38 +00:00
|
|
|
if use_fw {
|
2022-02-04 17:36:50 +00:00
|
|
|
kernel_path = fw_path.as_str();
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-08-05 05:54:43 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--cpus",
|
|
|
|
&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"boot={total_vcpus},topology={threads_per_core}:{cores_per_package}:1:{packages}"
|
2022-01-06 22:24:38 +00:00
|
|
|
),
|
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
guest.get_cpu_count().unwrap_or_default(),
|
|
|
|
u32::from(total_vcpus)
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lscpu | grep \"per core\" | cut -f 2 -d \":\" | sed \"s# *##\"")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u8>()
|
|
|
|
.unwrap_or(0),
|
|
|
|
threads_per_core
|
|
|
|
);
|
2020-06-16 14:39:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lscpu | grep \"per socket\" | cut -f 2 -d \":\" | sed \"s# *##\"")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u8>()
|
|
|
|
.unwrap_or(0),
|
|
|
|
cores_per_package
|
|
|
|
);
|
2020-06-16 14:39:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lscpu | grep \"Socket\" | cut -f 2 -d \":\" | sed \"s# *##\"")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u8>()
|
|
|
|
.unwrap_or(0),
|
|
|
|
packages
|
|
|
|
);
|
arch: x86_64: handle npot CPU topology
This PR addresses a bug in which the cpu topology of a guest
with non power-of-two number of cores is incorrect. For example,
in some contexts, a virtual machine with 2-sockets and 12-cores
will incorrectly believe that 16 cores are on socket 1 and 8
cores are on socket 2. In other cases, common topology enumeration
software such as hwloc will crash.
The root of the problem was the way that cloud-hypervisor generates
apic_id. On x86_64, the (x2) apic_id embeds information about cpu
topology. The cpuid instruction is primarily used to discover the
number of sockets, dies, cores, threads, etc. Using this information,
the (x2) apic_id is masked to determine which {core, die, socket} the
cpu is on. When the cpu topology is not a power of two
(e.g. a 12-core machine), this requires non-contiguous (x2) apic_id.
Signed-off-by: Thomas Barrett <tbarrett@crusoeenergy.com>
2023-12-22 03:38:13 +00:00
|
|
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
let mut cpu_id = 0;
|
|
|
|
for package_id in 0..packages {
|
|
|
|
for core_id in 0..cores_per_package {
|
|
|
|
for _ in 0..threads_per_core {
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command(&format!("cat /sys/devices/system/cpu/cpu{cpu_id}/topology/physical_package_id"))
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u8>()
|
|
|
|
.unwrap_or(0),
|
|
|
|
package_id
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command(&format!(
|
|
|
|
"cat /sys/devices/system/cpu/cpu{cpu_id}/topology/core_id"
|
|
|
|
))
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u8>()
|
|
|
|
.unwrap_or(0),
|
|
|
|
core_id
|
|
|
|
);
|
|
|
|
|
|
|
|
cpu_id += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
2020-06-16 14:39:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-08-12 11:19:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-08-12 11:19:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[allow(unused_variables)]
|
|
|
|
fn _test_guest_numa_nodes(acpi: bool) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let kernel_path = if acpi {
|
|
|
|
edk2_path()
|
|
|
|
} else {
|
|
|
|
direct_kernel_boot_path()
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=6,max=12"])
|
|
|
|
.args(["--memory", "size=0,hotplug_method=virtio-mem"])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--memory-zone",
|
|
|
|
"id=mem0,size=1G,hotplug_size=3G",
|
|
|
|
"id=mem1,size=2G,hotplug_size=3G",
|
|
|
|
"id=mem2,size=3G,hotplug_size=3G",
|
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--numa",
|
|
|
|
"guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0",
|
|
|
|
"guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1",
|
|
|
|
"guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2",
|
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args(["--api-socket", &api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
guest.check_numa_common(
|
|
|
|
Some(&[960_000, 1_920_000, 2_880_000]),
|
|
|
|
Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
|
|
|
|
Some(&["10 15 20", "20 10 25", "25 30 10"]),
|
|
|
|
);
|
2020-06-16 14:39:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// AArch64 currently does not support hotplug, and therefore we only
|
|
|
|
// test hotplug-related function on x86_64 here.
|
2021-08-08 03:49:39 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2022-01-06 22:24:38 +00:00
|
|
|
{
|
|
|
|
guest.enable_memory_hotplug();
|
|
|
|
|
|
|
|
// Resize every memory zone and check each associated NUMA node
|
|
|
|
// has been assigned the right amount of memory.
|
|
|
|
resize_zone_command(&api_socket, "mem0", "4G");
|
|
|
|
resize_zone_command(&api_socket, "mem1", "4G");
|
|
|
|
resize_zone_command(&api_socket, "mem2", "4G");
|
|
|
|
// Resize to the maximum amount of CPUs and check each NUMA
|
|
|
|
// node has been assigned the right CPUs set.
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, Some(12), None, None, None);
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
2021-08-08 03:49:39 +00:00
|
|
|
|
2021-09-01 20:12:04 +00:00
|
|
|
guest.check_numa_common(
|
2022-01-06 22:24:38 +00:00
|
|
|
Some(&[3_840_000, 3_840_000, 3_840_000]),
|
|
|
|
Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]),
|
|
|
|
None,
|
2021-09-01 20:12:04 +00:00
|
|
|
);
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
|
|
|
});
|
2021-08-08 03:49:39 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-09-01 20:12:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-08-08 03:49:39 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[allow(unused_variables)]
|
|
|
|
fn _test_power_button(acpi: bool) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2021-08-08 03:49:39 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let kernel_path = if acpi {
|
|
|
|
edk2_path()
|
|
|
|
} else {
|
|
|
|
direct_kernel_boot_path()
|
|
|
|
};
|
|
|
|
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket]);
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
let child = cmd.spawn().unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
assert!(remote_command(&api_socket, "power-button", None));
|
|
|
|
});
|
|
|
|
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
assert!(output.status.success());
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-09-26 14:28:40 +00:00
|
|
|
type PrepareNetDaemon = dyn Fn(
|
|
|
|
&TempDir,
|
|
|
|
&str,
|
|
|
|
Option<&str>,
|
|
|
|
Option<u16>,
|
|
|
|
usize,
|
|
|
|
bool,
|
|
|
|
) -> (std::process::Command, String);
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
fn test_vhost_user_net(
|
|
|
|
tap: Option<&str>,
|
|
|
|
num_queues: usize,
|
|
|
|
prepare_daemon: &PrepareNetDaemon,
|
|
|
|
generate_host_mac: bool,
|
|
|
|
client_mode_daemon: bool,
|
|
|
|
) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
|
|
|
|
let host_mac = if generate_host_mac {
|
|
|
|
Some(MacAddr::local_random())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
2022-09-26 14:28:40 +00:00
|
|
|
let mtu = Some(3000);
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let (mut daemon_command, vunet_socket_path) = prepare_daemon(
|
|
|
|
&guest.tmp_dir,
|
|
|
|
&guest.network.host_ip,
|
|
|
|
tap,
|
2022-09-26 14:28:40 +00:00
|
|
|
mtu,
|
2022-01-06 22:24:38 +00:00
|
|
|
num_queues,
|
|
|
|
client_mode_daemon,
|
|
|
|
);
|
|
|
|
|
|
|
|
let net_params = format!(
|
2022-09-26 14:28:40 +00:00
|
|
|
"vhost_user=true,mac={},socket={},num_queues={},queue_size=1024{},vhost_mode={},mtu=3000",
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.network.guest_mac,
|
|
|
|
vunet_socket_path,
|
|
|
|
num_queues,
|
|
|
|
if let Some(host_mac) = host_mac {
|
2022-12-14 11:41:15 +00:00
|
|
|
format!(",host_mac={host_mac}")
|
2022-01-06 22:24:38 +00:00
|
|
|
} else {
|
|
|
|
"".to_owned()
|
|
|
|
},
|
|
|
|
if client_mode_daemon {
|
|
|
|
"server"
|
|
|
|
} else {
|
|
|
|
"client"
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
let mut ch_command = GuestCommand::new(&guest);
|
|
|
|
ch_command
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", format!("boot={}", num_queues / 2).as_str()])
|
|
|
|
.args(["--memory", "size=512M,hotplug_size=2048M,shared=on"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--net", net_params.as_str()])
|
|
|
|
.args(["--api-socket", &api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output();
|
|
|
|
|
|
|
|
let mut daemon_child: std::process::Child;
|
|
|
|
let mut child: std::process::Child;
|
|
|
|
|
|
|
|
if client_mode_daemon {
|
|
|
|
child = ch_command.spawn().unwrap();
|
|
|
|
// Make sure the VMM is waiting for the backend to connect
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
daemon_child = daemon_command.spawn().unwrap();
|
|
|
|
} else {
|
|
|
|
daemon_child = daemon_command.spawn().unwrap();
|
|
|
|
// Make sure the backend is waiting for the VMM to connect
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
child = ch_command.spawn().unwrap();
|
2021-08-08 03:49:39 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
if let Some(tap_name) = tap {
|
2022-12-14 11:41:15 +00:00
|
|
|
let tap_count = exec_host_command_output(&format!("ip link | grep -c {tap_name}"));
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1");
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(host_mac) = tap {
|
2022-12-14 11:41:15 +00:00
|
|
|
let mac_count = exec_host_command_output(&format!("ip link | grep -c {host_mac}"));
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(String::from_utf8_lossy(&mac_count.stdout).trim(), "1");
|
|
|
|
}
|
|
|
|
|
2022-09-26 14:28:40 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let iface = "enp0s4";
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let iface = "ens4";
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
2022-12-14 11:41:15 +00:00
|
|
|
.ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str())
|
2022-09-26 14:28:40 +00:00
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
"3000"
|
|
|
|
);
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// 1 network interface + default localhost ==> 2 interfaces
|
|
|
|
// It's important to note that this test is fully exercising the
|
|
|
|
// vhost-user-net implementation and the associated backend since
|
|
|
|
// it does not define any --net network interface. That means all
|
|
|
|
// the ssh communication in that test happens through the network
|
|
|
|
// interface backed by vhost-user-net.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -o link | wc -l")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
2
|
|
|
|
);
|
2021-09-02 04:59:11 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// The following pci devices will appear on guest with PCI-MSI
|
|
|
|
// interrupt vectors assigned.
|
|
|
|
// 1 virtio-console with 3 vectors: config, Rx, Tx
|
|
|
|
// 1 virtio-blk with 2 vectors: config, Request
|
|
|
|
// 1 virtio-blk with 2 vectors: config, Request
|
|
|
|
// 1 virtio-rng with 2 vectors: config, Request
|
|
|
|
// Since virtio-net has 2 queue pairs, its vectors is as follows:
|
|
|
|
// 1 virtio-net with 5 vectors: config, Rx (2), Tx (2)
|
|
|
|
// Based on the above, the total vectors should 14.
|
2021-09-02 04:59:11 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2022-01-06 22:24:38 +00:00
|
|
|
let grep_cmd = "grep -c PCI-MSI /proc/interrupts";
|
2021-09-02 04:59:11 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
2022-01-06 22:24:38 +00:00
|
|
|
let grep_cmd = "grep -c ITS-MSI /proc/interrupts";
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command(grep_cmd)
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
10 + (num_queues as u32)
|
|
|
|
);
|
2021-09-02 04:59:11 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// ACPI feature is needed.
|
2022-03-28 10:53:22 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2022-01-06 22:24:38 +00:00
|
|
|
{
|
|
|
|
guest.enable_memory_hotplug();
|
2021-09-02 04:59:11 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Add RAM to the VM
|
|
|
|
let desired_ram = 1024 << 20;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, Some(desired_ram), None, None);
|
2021-09-02 04:59:11 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2021-09-02 04:59:11 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Here by simply checking the size (through ssh), we validate
|
|
|
|
// the connection is still working, which means vhost-user-net
|
|
|
|
// keeps working after the resize.
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
|
|
|
|
}
|
|
|
|
});
|
2021-09-02 04:59:11 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
let _ = daemon_child.kill();
|
|
|
|
let _ = daemon_child.wait();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
type PrepareBlkDaemon = dyn Fn(&TempDir, &str, usize, bool, bool) -> (std::process::Child, String);
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_vhost_user_blk(
|
|
|
|
num_queues: usize,
|
|
|
|
readonly: bool,
|
|
|
|
direct: bool,
|
|
|
|
prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>,
|
|
|
|
) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
|
|
|
|
let (blk_params, daemon_child) = {
|
|
|
|
let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap();
|
|
|
|
// Start the daemon
|
|
|
|
let (daemon_child, vubd_socket_path) =
|
|
|
|
prepare_daemon(&guest.tmp_dir, "blk.img", num_queues, readonly, direct);
|
|
|
|
|
|
|
|
(
|
|
|
|
format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128",
|
2022-01-06 22:24:38 +00:00
|
|
|
),
|
|
|
|
Some(daemon_child),
|
|
|
|
)
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["--cpus", format!("boot={num_queues}").as_str()])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--memory", "size=512M,hotplug_size=2048M,shared=on"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--disk",
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
blk_params.as_str(),
|
|
|
|
])
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Check both if /dev/vdc exists and if the block size is 16M.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep vdc | grep -c 16M")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
2021-05-04 21:48:35 +00:00
|
|
|
);
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check if this block is RO or RW.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep vdc | awk '{print $5}'")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
readonly as u32
|
2021-05-04 21:48:35 +00:00
|
|
|
);
|
2020-03-11 05:11:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check if the number of queues in /sys/block/vdc/mq matches the
|
|
|
|
// expected num_queues.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
num_queues as u32
|
|
|
|
);
|
2021-05-04 21:48:35 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Mount the device
|
|
|
|
let mount_ro_rw_flag = if readonly { "ro,noload" } else { "rw" };
|
|
|
|
guest.ssh_command("mkdir mount_image").unwrap();
|
|
|
|
guest
|
|
|
|
.ssh_command(
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("sudo mount -o {mount_ro_rw_flag} -t ext4 /dev/vdc mount_image/").as_str(),
|
2022-01-06 22:24:38 +00:00
|
|
|
)
|
|
|
|
.unwrap();
|
2021-05-04 21:48:35 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the content of the block device. The file "foo" should
|
|
|
|
// contain "bar".
|
|
|
|
assert_eq!(
|
|
|
|
guest.ssh_command("cat mount_image/foo").unwrap().trim(),
|
|
|
|
"bar"
|
|
|
|
);
|
2020-03-11 05:11:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// ACPI feature is needed.
|
2022-03-28 10:53:22 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2022-01-06 22:24:38 +00:00
|
|
|
{
|
|
|
|
guest.enable_memory_hotplug();
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Add RAM to the VM
|
|
|
|
let desired_ram = 1024 << 20;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, Some(desired_ram), None, None);
|
2020-03-11 05:11:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
|
2020-03-11 05:11:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check again the content of the block device after the resize
|
|
|
|
// has been performed.
|
2020-10-15 12:37:37 +00:00
|
|
|
assert_eq!(
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.ssh_command("cat mount_image/foo").unwrap().trim(),
|
|
|
|
"bar"
|
2020-10-15 12:37:37 +00:00
|
|
|
);
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Unmount the device
|
|
|
|
guest.ssh_command("sudo umount /dev/vdc").unwrap();
|
|
|
|
guest.ssh_command("rm -r mount_image").unwrap();
|
|
|
|
});
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if let Some(mut daemon_child) = daemon_child {
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
let _ = daemon_child.kill();
|
|
|
|
let _ = daemon_child.wait();
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_boot_from_vhost_user_blk(
|
|
|
|
num_queues: usize,
|
|
|
|
readonly: bool,
|
|
|
|
direct: bool,
|
|
|
|
prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>,
|
|
|
|
) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
|
|
|
|
let disk_path = guest.disk_config.disk(DiskType::OperatingSystem).unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let (blk_boot_params, daemon_child) = {
|
|
|
|
let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap();
|
|
|
|
// Start the daemon
|
|
|
|
let (daemon_child, vubd_socket_path) = prepare_daemon(
|
|
|
|
&guest.tmp_dir,
|
|
|
|
disk_path.as_str(),
|
|
|
|
num_queues,
|
|
|
|
readonly,
|
|
|
|
direct,
|
|
|
|
);
|
|
|
|
|
|
|
|
(
|
|
|
|
format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128",
|
2022-01-06 22:24:38 +00:00
|
|
|
),
|
|
|
|
Some(daemon_child),
|
|
|
|
)
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["--cpus", format!("boot={num_queues}").as_str()])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--memory", "size=512M,shared=on"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--disk",
|
|
|
|
blk_boot_params.as_str(),
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
])
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Just check the VM booted correctly.
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), num_queues as u32);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
});
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
if let Some(mut daemon_child) = daemon_child {
|
2021-05-04 21:48:35 +00:00
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
let _ = daemon_child.kill();
|
|
|
|
let _ = daemon_child.wait();
|
2020-02-25 09:42:15 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-05-26 11:32:57 +00:00
|
|
|
fn _test_virtio_fs(
|
2022-05-26 11:30:16 +00:00
|
|
|
prepare_daemon: &dyn Fn(&TempDir, &str) -> (std::process::Child, String),
|
2022-01-06 22:24:38 +00:00
|
|
|
hotplug: bool,
|
|
|
|
pci_segment: Option<u16>,
|
|
|
|
) {
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let focal_image = if hotplug {
|
|
|
|
FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string()
|
|
|
|
} else {
|
|
|
|
FOCAL_IMAGE_NAME.to_string()
|
|
|
|
};
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let focal_image = FOCAL_IMAGE_NAME.to_string();
|
|
|
|
let focal = UbuntuDiskConfig::new(focal_image);
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut workload_path = dirs::home_dir().unwrap();
|
|
|
|
workload_path.push("workloads");
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut shared_dir = workload_path;
|
|
|
|
shared_dir.push("shared_dir");
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let kernel_path = if hotplug {
|
|
|
|
edk2_path()
|
|
|
|
} else {
|
|
|
|
direct_kernel_boot_path()
|
|
|
|
};
|
|
|
|
|
2022-05-26 11:30:16 +00:00
|
|
|
let (mut daemon_child, virtiofsd_socket_path) =
|
|
|
|
prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap());
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
let mut guest_command = GuestCommand::new(&guest);
|
|
|
|
guest_command
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M,hotplug_size=2048M,shared=on"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket]);
|
2022-01-06 22:24:38 +00:00
|
|
|
if pci_segment.is_some() {
|
2023-04-26 01:20:18 +00:00
|
|
|
guest_command.args([
|
|
|
|
"--platform",
|
|
|
|
&format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"),
|
|
|
|
]);
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let fs_params = format!(
|
2022-05-26 11:47:15 +00:00
|
|
|
"id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}",
|
2022-01-06 22:24:38 +00:00
|
|
|
virtiofsd_socket_path,
|
|
|
|
if let Some(pci_segment) = pci_segment {
|
2022-12-14 11:41:15 +00:00
|
|
|
format!(",pci_segment={pci_segment}")
|
2022-01-06 22:24:38 +00:00
|
|
|
} else {
|
|
|
|
"".to_owned()
|
|
|
|
}
|
|
|
|
);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if !hotplug {
|
2022-09-20 08:46:19 +00:00
|
|
|
guest_command.args(["--fs", fs_params.as_str()]);
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = guest_command.capture_output().spawn().unwrap();
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if hotplug {
|
|
|
|
// Add fs to the VM
|
|
|
|
let (cmd_success, cmd_output) =
|
|
|
|
remote_command_w_output(&api_socket, "add-fs", Some(&fs_params));
|
|
|
|
assert!(cmd_success);
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if let Some(pci_segment) = pci_segment {
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output).contains(&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}"
|
2022-01-06 22:24:38 +00:00
|
|
|
)));
|
|
|
|
} else {
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
|
|
|
.contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}"));
|
2020-07-03 08:00:12 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2020-08-12 11:20:14 +00:00
|
|
|
}
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Mount shared directory through virtio_fs filesystem
|
2022-05-26 11:47:15 +00:00
|
|
|
guest
|
|
|
|
.ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/")
|
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check file1 exists and its content is "foo"
|
|
|
|
assert_eq!(
|
|
|
|
guest.ssh_command("cat mount_dir/file1").unwrap().trim(),
|
|
|
|
"foo"
|
|
|
|
);
|
|
|
|
// Check file2 does not exist
|
|
|
|
guest
|
|
|
|
.ssh_command("[ ! -f 'mount_dir/file2' ] || true")
|
2020-08-12 11:20:14 +00:00
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check file3 exists and its content is "bar"
|
|
|
|
assert_eq!(
|
|
|
|
guest.ssh_command("cat mount_dir/file3").unwrap().trim(),
|
|
|
|
"bar"
|
|
|
|
);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// ACPI feature is needed.
|
2022-03-28 10:53:22 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2022-01-06 22:24:38 +00:00
|
|
|
{
|
|
|
|
guest.enable_memory_hotplug();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Add RAM to the VM
|
|
|
|
let desired_ram = 1024 << 20;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, Some(desired_ram), None, None);
|
2020-03-24 08:16:32 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(30, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// After the resize, check again that file1 exists and its
|
|
|
|
// content is "foo".
|
|
|
|
assert_eq!(
|
|
|
|
guest.ssh_command("cat mount_dir/file1").unwrap().trim(),
|
|
|
|
"foo"
|
|
|
|
);
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if hotplug {
|
|
|
|
// Remove from VM
|
|
|
|
guest.ssh_command("sudo umount mount_dir").unwrap();
|
|
|
|
assert!(remote_command(&api_socket, "remove-device", Some("myfs0")));
|
|
|
|
}
|
|
|
|
});
|
2020-03-23 15:54:43 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let (r, hotplug_daemon_child) = if r.is_ok() && hotplug {
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2022-05-26 11:30:16 +00:00
|
|
|
let (daemon_child, virtiofsd_socket_path) =
|
|
|
|
prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap());
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2020-08-12 11:20:14 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
let fs_params = format!(
|
2022-05-26 11:47:15 +00:00
|
|
|
"id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}",
|
2022-01-06 22:24:38 +00:00
|
|
|
virtiofsd_socket_path,
|
2021-10-25 15:17:33 +00:00
|
|
|
if let Some(pci_segment) = pci_segment {
|
2022-12-14 11:41:15 +00:00
|
|
|
format!(",pci_segment={pci_segment}")
|
2021-10-25 15:17:33 +00:00
|
|
|
} else {
|
2022-01-06 22:24:38 +00:00
|
|
|
"".to_owned()
|
2021-10-25 15:17:33 +00:00
|
|
|
}
|
2022-01-06 22:24:38 +00:00
|
|
|
);
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Add back and check it works
|
|
|
|
let (cmd_success, cmd_output) =
|
|
|
|
remote_command_w_output(&api_socket, "add-fs", Some(&fs_params));
|
|
|
|
assert!(cmd_success);
|
|
|
|
if let Some(pci_segment) = pci_segment {
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output).contains(&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}"
|
2022-01-06 22:24:38 +00:00
|
|
|
)));
|
|
|
|
} else {
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
|
|
|
.contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}"));
|
2020-07-03 08:00:12 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2020-07-03 08:00:12 +00:00
|
|
|
// Mount shared directory through virtio_fs filesystem
|
2022-05-26 11:47:15 +00:00
|
|
|
guest
|
|
|
|
.ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/")
|
|
|
|
.unwrap();
|
|
|
|
|
2020-07-03 08:00:12 +00:00
|
|
|
// Check file1 exists and its content is "foo"
|
2020-08-12 11:20:14 +00:00
|
|
|
assert_eq!(
|
2021-01-06 10:56:20 +00:00
|
|
|
guest.ssh_command("cat mount_dir/file1").unwrap().trim(),
|
2020-07-03 08:00:12 +00:00
|
|
|
"foo"
|
2020-02-25 09:42:15 +00:00
|
|
|
);
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
2021-06-03 10:39:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
(r, Some(daemon_child))
|
|
|
|
} else {
|
|
|
|
(r, None)
|
|
|
|
};
|
2020-03-23 15:54:43 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-03-24 09:57:45 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = daemon_child.kill();
|
|
|
|
let _ = daemon_child.wait();
|
2020-03-24 09:57:45 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if let Some(mut daemon_child) = hotplug_daemon_child {
|
|
|
|
let _ = daemon_child.kill();
|
|
|
|
let _ = daemon_child.wait();
|
|
|
|
}
|
2020-03-24 09:57:45 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-03-24 09:57:45 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_virtio_pmem(discard_writes: bool, specify_size: bool) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
|
|
|
|
let pmem_temp_file = TempFile::new().unwrap();
|
|
|
|
pmem_temp_file.as_file().set_len(128 << 20).unwrap();
|
|
|
|
|
|
|
|
std::process::Command::new("mkfs.ext4")
|
|
|
|
.arg(pmem_temp_file.as_path())
|
|
|
|
.output()
|
|
|
|
.expect("Expect creating disk image to succeed");
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--pmem",
|
|
|
|
format!(
|
|
|
|
"file={}{}{}",
|
|
|
|
pmem_temp_file.as_path().to_str().unwrap(),
|
|
|
|
if specify_size { ",size=128M" } else { "" },
|
|
|
|
if discard_writes {
|
|
|
|
",discard_writes=on"
|
|
|
|
} else {
|
|
|
|
""
|
|
|
|
}
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
])
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Check for the presence of /dev/pmem0
|
|
|
|
assert_eq!(
|
|
|
|
guest.ssh_command("ls /dev/pmem0").unwrap().trim(),
|
|
|
|
"/dev/pmem0"
|
|
|
|
);
|
2020-03-23 15:54:43 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check changes persist after reboot
|
|
|
|
assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), "");
|
|
|
|
assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n");
|
|
|
|
guest
|
|
|
|
.ssh_command("echo test123 | sudo tee /mnt/test")
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), "");
|
|
|
|
assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "");
|
2020-08-12 11:20:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.reboot_linux(0, None);
|
|
|
|
assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), "");
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo cat /mnt/test || true")
|
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
if discard_writes { "" } else { "test123" }
|
|
|
|
);
|
|
|
|
});
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-10-25 15:17:33 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-08-12 11:20:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn get_fd_count(pid: u32) -> usize {
|
2022-12-14 11:41:15 +00:00
|
|
|
fs::read_dir(format!("/proc/{pid}/fd")).unwrap().count()
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn _test_virtio_vsock(hotplug: bool) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-08-12 11:20:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let kernel_path = if hotplug {
|
|
|
|
edk2_path()
|
|
|
|
} else {
|
|
|
|
direct_kernel_boot_path()
|
|
|
|
};
|
|
|
|
|
|
|
|
let socket = temp_vsock_path(&guest.tmp_dir);
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--api-socket", &api_socket]);
|
|
|
|
cmd.args(["--cpus", "boot=1"]);
|
|
|
|
cmd.args(["--memory", "size=512M"]);
|
|
|
|
cmd.args(["--kernel", kernel_path.to_str().unwrap()]);
|
|
|
|
cmd.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]);
|
2022-01-06 22:24:38 +00:00
|
|
|
cmd.default_disks();
|
|
|
|
cmd.default_net();
|
|
|
|
|
|
|
|
if !hotplug {
|
2022-12-14 11:41:15 +00:00
|
|
|
cmd.args(["--vsock", format!("cid=3,socket={socket}").as_str()]);
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let mut child = cmd.capture_output().spawn().unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
if hotplug {
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-vsock",
|
2022-12-14 11:41:15 +00:00
|
|
|
Some(format!("cid=3,socket={socket},id=test0").as_str()),
|
2022-01-06 22:24:38 +00:00
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
|
|
|
.contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}"));
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
// Check adding a second one fails
|
|
|
|
assert!(!remote_command(
|
|
|
|
&api_socket,
|
|
|
|
"add-vsock",
|
|
|
|
Some("cid=1234,socket=/tmp/fail")
|
|
|
|
));
|
|
|
|
}
|
2020-08-12 11:20:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Validate vsock works as expected.
|
|
|
|
guest.check_vsock(socket.as_str());
|
|
|
|
guest.reboot_linux(0, None);
|
|
|
|
// Validate vsock still works after a reboot.
|
|
|
|
guest.check_vsock(socket.as_str());
|
|
|
|
|
|
|
|
if hotplug {
|
|
|
|
assert!(remote_command(&api_socket, "remove-device", Some("test0")));
|
2020-08-12 11:20:14 +00:00
|
|
|
}
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_ksm_pages_shared() -> u32 {
|
|
|
|
fs::read_to_string("/sys/kernel/mm/ksm/pages_shared")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap()
|
|
|
|
}
|
2020-08-12 11:20:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_memory_mergeable(mergeable: bool) {
|
|
|
|
let memory_param = if mergeable {
|
|
|
|
"mergeable=on"
|
|
|
|
} else {
|
|
|
|
"mergeable=off"
|
|
|
|
};
|
|
|
|
|
2023-08-31 13:00:19 +00:00
|
|
|
// We are assuming the rest of the system in our CI is not using mergeable memory
|
2022-01-06 22:24:38 +00:00
|
|
|
let ksm_ps_init = get_ksm_pages_shared();
|
|
|
|
assert!(ksm_ps_init == 0);
|
|
|
|
|
|
|
|
let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest1 = Guest::new(Box::new(focal1));
|
|
|
|
let mut child1 = GuestCommand::new(&guest1)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["--memory", format!("size=512M,{memory_param}").as_str()])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--net", guest1.default_net_string().as_str()])
|
|
|
|
.args(["--serial", "tty", "--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest1.wait_vm_boot(None).unwrap();
|
|
|
|
});
|
|
|
|
if r.is_err() {
|
|
|
|
let _ = child1.kill();
|
|
|
|
let output = child1.wait_with_output().unwrap();
|
2020-08-12 11:20:14 +00:00
|
|
|
handle_child_output(r, &output);
|
2022-01-06 22:24:38 +00:00
|
|
|
panic!("Test should already be failed/panicked"); // To explicitly mark this block never return
|
2020-02-25 09:42:15 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let ksm_ps_guest1 = get_ksm_pages_shared();
|
|
|
|
|
|
|
|
let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest2 = Guest::new(Box::new(focal2));
|
|
|
|
let mut child2 = GuestCommand::new(&guest2)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["--memory", format!("size=512M,{memory_param}").as_str()])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--net", guest2.default_net_string().as_str()])
|
|
|
|
.args(["--serial", "tty", "--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest2.wait_vm_boot(None).unwrap();
|
|
|
|
let ksm_ps_guest2 = get_ksm_pages_shared();
|
|
|
|
|
|
|
|
if mergeable {
|
|
|
|
println!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"ksm pages_shared after vm1 booted '{ksm_ps_guest1}', ksm pages_shared after vm2 booted '{ksm_ps_guest2}'"
|
2022-01-06 22:24:38 +00:00
|
|
|
);
|
|
|
|
// We are expecting the number of shared pages to increase as the number of VM increases
|
|
|
|
assert!(ksm_ps_guest1 < ksm_ps_guest2);
|
|
|
|
} else {
|
|
|
|
assert!(ksm_ps_guest1 == 0);
|
|
|
|
assert!(ksm_ps_guest2 == 0);
|
|
|
|
}
|
|
|
|
});
|
2020-03-24 08:16:32 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child1.kill();
|
|
|
|
let _ = child2.kill();
|
2020-03-23 17:59:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let output = child1.wait_with_output().unwrap();
|
|
|
|
child2.wait().unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn _get_vmm_overhead(pid: u32, guest_memory_size: u32) -> HashMap<String, u32> {
|
2022-12-14 11:41:15 +00:00
|
|
|
let smaps = fs::File::open(format!("/proc/{pid}/smaps")).unwrap();
|
2022-01-06 22:24:38 +00:00
|
|
|
let reader = io::BufReader::new(smaps);
|
|
|
|
|
|
|
|
let mut skip_map: bool = false;
|
|
|
|
let mut region_name: String = "".to_string();
|
|
|
|
let mut region_maps = HashMap::new();
|
|
|
|
for line in reader.lines() {
|
|
|
|
let l = line.unwrap();
|
|
|
|
|
|
|
|
if l.contains('-') {
|
|
|
|
let values: Vec<&str> = l.split_whitespace().collect();
|
|
|
|
region_name = values.last().unwrap().trim().to_string();
|
|
|
|
if region_name == "0" {
|
|
|
|
region_name = "anonymous".to_string()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Each section begins with something that looks like:
|
|
|
|
// Size: 2184 kB
|
|
|
|
if l.starts_with("Size:") {
|
|
|
|
let values: Vec<&str> = l.split_whitespace().collect();
|
|
|
|
let map_size = values[1].parse::<u32>().unwrap();
|
|
|
|
// We skip the assigned guest RAM map, its RSS is only
|
|
|
|
// dependent on the guest actual memory usage.
|
|
|
|
// Everything else can be added to the VMM overhead.
|
|
|
|
skip_map = map_size >= guest_memory_size;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a map we're taking into account, then we only
|
|
|
|
// count the RSS. The sum of all counted RSS is the VMM overhead.
|
|
|
|
if !skip_map && l.starts_with("Rss:") {
|
|
|
|
let values: Vec<&str> = l.split_whitespace().collect();
|
|
|
|
let value = values[1].trim().parse::<u32>().unwrap();
|
|
|
|
*region_maps.entry(region_name.clone()).or_insert(0) += value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
region_maps
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_vmm_overhead(pid: u32, guest_memory_size: u32) -> u32 {
|
|
|
|
let mut total = 0;
|
|
|
|
|
|
|
|
for (region_name, value) in &_get_vmm_overhead(pid, guest_memory_size) {
|
2022-12-14 11:41:15 +00:00
|
|
|
eprintln!("{region_name}: {value}");
|
2022-01-06 22:24:38 +00:00
|
|
|
total += value;
|
|
|
|
}
|
|
|
|
|
|
|
|
total
|
|
|
|
}
|
|
|
|
|
2022-02-10 16:12:08 +00:00
|
|
|
fn process_rss_kib(pid: u32) -> usize {
|
2022-12-14 11:41:15 +00:00
|
|
|
let command = format!("ps -q {pid} -o rss=");
|
2022-02-10 16:12:08 +00:00
|
|
|
let rss = exec_host_command_output(&command);
|
|
|
|
String::from_utf8_lossy(&rss.stdout).trim().parse().unwrap()
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// 10MB is our maximum accepted overhead.
|
|
|
|
const MAXIMUM_VMM_OVERHEAD_KB: u32 = 10 * 1024;
|
|
|
|
|
2022-06-30 15:41:46 +00:00
|
|
|
#[derive(PartialEq, Eq, PartialOrd)]
|
2022-01-06 22:24:38 +00:00
|
|
|
struct Counters {
|
|
|
|
rx_bytes: u64,
|
|
|
|
rx_frames: u64,
|
|
|
|
tx_bytes: u64,
|
|
|
|
tx_frames: u64,
|
|
|
|
read_bytes: u64,
|
|
|
|
write_bytes: u64,
|
|
|
|
read_ops: u64,
|
|
|
|
write_ops: u64,
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_counters(api_socket: &str) -> Counters {
|
|
|
|
// Get counters
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "counters", None);
|
|
|
|
assert!(cmd_success);
|
|
|
|
|
|
|
|
let counters: HashMap<&str, HashMap<&str, u64>> =
|
|
|
|
serde_json::from_slice(&cmd_output).unwrap_or_default();
|
|
|
|
|
|
|
|
let rx_bytes = *counters.get("_net2").unwrap().get("rx_bytes").unwrap();
|
|
|
|
let rx_frames = *counters.get("_net2").unwrap().get("rx_frames").unwrap();
|
|
|
|
let tx_bytes = *counters.get("_net2").unwrap().get("tx_bytes").unwrap();
|
|
|
|
let tx_frames = *counters.get("_net2").unwrap().get("tx_frames").unwrap();
|
|
|
|
|
|
|
|
let read_bytes = *counters.get("_disk0").unwrap().get("read_bytes").unwrap();
|
|
|
|
let write_bytes = *counters.get("_disk0").unwrap().get("write_bytes").unwrap();
|
|
|
|
let read_ops = *counters.get("_disk0").unwrap().get("read_ops").unwrap();
|
|
|
|
let write_ops = *counters.get("_disk0").unwrap().get("write_ops").unwrap();
|
|
|
|
|
|
|
|
Counters {
|
|
|
|
rx_bytes,
|
|
|
|
rx_frames,
|
|
|
|
tx_bytes,
|
|
|
|
tx_frames,
|
|
|
|
read_bytes,
|
|
|
|
write_bytes,
|
|
|
|
read_ops,
|
|
|
|
write_ops,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn pty_read(mut pty: std::fs::File) -> Receiver<String> {
|
|
|
|
let (tx, rx) = mpsc::channel::<String>();
|
|
|
|
thread::spawn(move || loop {
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
|
|
|
let mut buf = [0; 512];
|
|
|
|
match pty.read(&mut buf) {
|
2024-02-06 22:46:10 +00:00
|
|
|
Ok(_bytes) => {
|
2022-01-06 22:24:38 +00:00
|
|
|
let output = std::str::from_utf8(&buf).unwrap().to_string();
|
|
|
|
match tx.send(output) {
|
|
|
|
Ok(_) => (),
|
|
|
|
Err(_) => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(_) => break,
|
|
|
|
}
|
|
|
|
});
|
|
|
|
rx
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_pty_path(api_socket: &str, pty_type: &str) -> PathBuf {
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None);
|
|
|
|
assert!(cmd_success);
|
|
|
|
let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default();
|
|
|
|
assert_eq!("Pty", info["config"][pty_type]["mode"]);
|
|
|
|
PathBuf::from(
|
|
|
|
info["config"][pty_type]["file"]
|
|
|
|
.as_str()
|
|
|
|
.expect("Missing pty path"),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// VFIO test network setup.
|
|
|
|
// We reserve a different IP class for it: 172.18.0.0/24.
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn setup_vfio_network_interfaces() {
|
|
|
|
// 'vfio-br0'
|
|
|
|
assert!(exec_host_command_status("sudo ip link add name vfio-br0 type bridge").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip link set vfio-br0 up").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip addr add 172.18.0.1/24 dev vfio-br0").success());
|
|
|
|
// 'vfio-tap0'
|
|
|
|
assert!(exec_host_command_status("sudo ip tuntap add vfio-tap0 mode tap").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip link set vfio-tap0 master vfio-br0").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip link set vfio-tap0 up").success());
|
|
|
|
// 'vfio-tap1'
|
|
|
|
assert!(exec_host_command_status("sudo ip tuntap add vfio-tap1 mode tap").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip link set vfio-tap1 master vfio-br0").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip link set vfio-tap1 up").success());
|
|
|
|
// 'vfio-tap2'
|
|
|
|
assert!(exec_host_command_status("sudo ip tuntap add vfio-tap2 mode tap").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip link set vfio-tap2 master vfio-br0").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip link set vfio-tap2 up").success());
|
|
|
|
// 'vfio-tap3'
|
|
|
|
assert!(exec_host_command_status("sudo ip tuntap add vfio-tap3 mode tap").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip link set vfio-tap3 master vfio-br0").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip link set vfio-tap3 up").success());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tear VFIO test network down
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn cleanup_vfio_network_interfaces() {
|
|
|
|
assert!(exec_host_command_status("sudo ip link del vfio-br0").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip link del vfio-tap0").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip link del vfio-tap1").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip link del vfio-tap2").success());
|
|
|
|
assert!(exec_host_command_status("sudo ip link del vfio-tap3").success());
|
|
|
|
}
|
|
|
|
|
|
|
|
fn balloon_size(api_socket: &str) -> u64 {
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None);
|
|
|
|
assert!(cmd_success);
|
|
|
|
|
|
|
|
let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default();
|
|
|
|
let total_mem = &info["config"]["memory"]["size"]
|
|
|
|
.to_string()
|
|
|
|
.parse::<u64>()
|
|
|
|
.unwrap();
|
|
|
|
let actual_mem = &info["memory_actual_size"]
|
|
|
|
.to_string()
|
|
|
|
.parse::<u64>()
|
|
|
|
.unwrap();
|
|
|
|
total_mem - actual_mem
|
|
|
|
}
|
|
|
|
|
2023-07-17 13:57:59 +00:00
|
|
|
fn vm_state(api_socket: &str) -> String {
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None);
|
|
|
|
assert!(cmd_success);
|
|
|
|
|
|
|
|
let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default();
|
|
|
|
let state = &info["state"].as_str().unwrap();
|
|
|
|
|
|
|
|
state.to_string()
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// This test validates that it can find the virtio-iommu device at first.
|
|
|
|
// It also verifies that both disks and the network card are attached to
|
|
|
|
// the virtual IOMMU by looking at /sys/kernel/iommu_groups directory.
|
|
|
|
// The last interesting part of this test is that it exercises the network
|
|
|
|
// interface attached to the virtual IOMMU since this is the one used to
|
|
|
|
// send all commands through SSH.
|
|
|
|
fn _test_virtio_iommu(acpi: bool) {
|
|
|
|
// Virtio-iommu support is ready in recent kernel (v5.14). But the kernel in
|
|
|
|
// Focal image is still old.
|
|
|
|
// So if ACPI is enabled on AArch64, we use a modified Focal image in which
|
|
|
|
// the kernel binary has been updated.
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string();
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let focal_image = FOCAL_IMAGE_NAME.to_string();
|
|
|
|
let focal = UbuntuDiskConfig::new(focal_image);
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let kernel_path = if acpi {
|
|
|
|
edk2_path()
|
|
|
|
} else {
|
|
|
|
direct_kernel_boot_path()
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--disk",
|
|
|
|
format!(
|
|
|
|
"path={},iommu=on",
|
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!(
|
|
|
|
"path={},iommu=on",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--net", guest.default_net_string_w_iommu().as_str()])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Verify the virtio-iommu device is present.
|
|
|
|
assert!(guest
|
|
|
|
.does_device_vendor_pair_match("0x1057", "0x1af4")
|
|
|
|
.unwrap_or_default());
|
|
|
|
|
|
|
|
// On AArch64, if the guest system boots from FDT, the behavior of IOMMU is a bit
|
|
|
|
// different with ACPI.
|
|
|
|
// All devices on the PCI bus will be attached to the virtual IOMMU, except the
|
|
|
|
// virtio-iommu device itself. So these devices will all be added to IOMMU groups,
|
|
|
|
// and appear under folder '/sys/kernel/iommu_groups/'.
|
|
|
|
// The result is, in the case of FDT, IOMMU group '0' contains "0000:00:01.0"
|
|
|
|
// which is the console. The first disk "0000:00:02.0" is in group '1'.
|
|
|
|
// While on ACPI, console device is not attached to IOMMU. So the IOMMU group '0'
|
|
|
|
// contains "0000:00:02.0" which is the first disk.
|
|
|
|
//
|
|
|
|
// Verify the iommu group of the first disk.
|
2022-11-01 21:52:40 +00:00
|
|
|
let iommu_group = !acpi as i32;
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(
|
|
|
|
guest
|
2022-12-14 11:41:15 +00:00
|
|
|
.ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str())
|
2022-01-06 22:24:38 +00:00
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
"0000:00:02.0"
|
|
|
|
);
|
|
|
|
|
|
|
|
// Verify the iommu group of the second disk.
|
|
|
|
let iommu_group = if acpi { 1 } else { 2 };
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
2022-12-14 11:41:15 +00:00
|
|
|
.ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str())
|
2022-01-06 22:24:38 +00:00
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
"0000:00:03.0"
|
|
|
|
);
|
|
|
|
|
|
|
|
// Verify the iommu group of the network card.
|
|
|
|
let iommu_group = if acpi { 2 } else { 3 };
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
2022-12-14 11:41:15 +00:00
|
|
|
.ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str())
|
2022-01-06 22:24:38 +00:00
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
"0000:00:04.0"
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-09-06 22:44:13 +00:00
|
|
|
fn get_reboot_count(guest: &Guest) -> u32 {
|
|
|
|
guest
|
2022-10-12 23:39:51 +00:00
|
|
|
.ssh_command("sudo last | grep -c reboot")
|
2022-04-26 04:17:20 +00:00
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
2022-09-06 22:44:13 +00:00
|
|
|
.unwrap_or_default()
|
2022-04-26 04:17:20 +00:00
|
|
|
}
|
|
|
|
|
2022-09-06 22:44:13 +00:00
|
|
|
fn enable_guest_watchdog(guest: &Guest, watchdog_sec: u32) {
|
|
|
|
// Check for PCI device
|
|
|
|
assert!(guest
|
|
|
|
.does_device_vendor_pair_match("0x1063", "0x1af4")
|
|
|
|
.unwrap_or_default());
|
2022-04-26 04:17:20 +00:00
|
|
|
|
2022-09-06 22:44:13 +00:00
|
|
|
// Enable systemd watchdog
|
|
|
|
guest
|
|
|
|
.ssh_command(&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"echo RuntimeWatchdogSec={watchdog_sec}s | sudo tee -a /etc/systemd/system.conf"
|
2022-09-06 22:44:13 +00:00
|
|
|
))
|
|
|
|
.unwrap();
|
2022-04-26 04:17:20 +00:00
|
|
|
}
|
|
|
|
|
2023-06-15 13:00:57 +00:00
|
|
|
fn make_guest_panic(guest: &Guest) {
|
|
|
|
// Check for pvpanic device
|
|
|
|
assert!(guest
|
|
|
|
.does_device_vendor_pair_match("0x0011", "0x1b36")
|
|
|
|
.unwrap_or_default());
|
|
|
|
|
|
|
|
// Trigger guest a panic
|
|
|
|
guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap();
|
|
|
|
}
|
|
|
|
|
2022-08-16 18:42:28 +00:00
|
|
|
mod common_parallel {
|
2023-03-30 20:47:21 +00:00
|
|
|
use std::{fs::OpenOptions, io::SeekFrom};
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
use crate::*;
|
|
|
|
|
2022-03-29 13:09:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_focal_hypervisor_fw() {
|
|
|
|
test_simple_launch(fw_path(FwType::RustHypervisorFirmware), FOCAL_IMAGE_NAME)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_focal_ovmf() {
|
|
|
|
test_simple_launch(fw_path(FwType::Ovmf), FOCAL_IMAGE_NAME)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_simple_launch(fw_path: String, disk_path: &str) {
|
|
|
|
let disk_config = Box::new(UbuntuDiskConfig::new(disk_path.to_string()));
|
|
|
|
let guest = Guest::new(disk_config);
|
2022-05-12 03:14:06 +00:00
|
|
|
let event_path = temp_event_monitor_path(&guest.tmp_dir);
|
2022-03-29 13:09:38 +00:00
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", fw_path.as_str()])
|
2022-03-29 13:09:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--serial", "tty", "--console", "off"])
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["--event-monitor", format!("path={event_path}").as_str()])
|
2022-03-29 13:09:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(Some(120)).unwrap();
|
|
|
|
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000");
|
2022-05-12 03:14:06 +00:00
|
|
|
|
|
|
|
let expected_sequential_events = [
|
|
|
|
&MetaEvent {
|
|
|
|
event: "starting".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
&MetaEvent {
|
|
|
|
event: "booting".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
&MetaEvent {
|
|
|
|
event: "booted".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
&MetaEvent {
|
|
|
|
event: "activated".to_string(),
|
|
|
|
device_id: Some("_disk0".to_string()),
|
|
|
|
},
|
|
|
|
&MetaEvent {
|
|
|
|
event: "reset".to_string(),
|
|
|
|
device_id: Some("_disk0".to_string()),
|
|
|
|
},
|
|
|
|
];
|
|
|
|
assert!(check_sequential_events(
|
|
|
|
&expected_sequential_events,
|
|
|
|
&event_path
|
|
|
|
));
|
|
|
|
|
2022-08-05 08:57:41 +00:00
|
|
|
// It's been observed on the Bionic image that udev and snapd
|
|
|
|
// services can cause some delay in the VM's shutdown. Disabling
|
|
|
|
// them improves the reliability of this test.
|
|
|
|
let _ = guest.ssh_command("sudo systemctl disable udev");
|
|
|
|
let _ = guest.ssh_command("sudo systemctl stop udev");
|
|
|
|
let _ = guest.ssh_command("sudo systemctl disable snapd");
|
|
|
|
let _ = guest.ssh_command("sudo systemctl stop snapd");
|
|
|
|
|
2022-05-12 03:14:06 +00:00
|
|
|
guest.ssh_command("sudo poweroff").unwrap();
|
2022-11-23 15:57:23 +00:00
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
2022-05-12 03:14:06 +00:00
|
|
|
let latest_events = [
|
|
|
|
&MetaEvent {
|
|
|
|
event: "shutdown".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
&MetaEvent {
|
|
|
|
event: "deleted".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
&MetaEvent {
|
|
|
|
event: "shutdown".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
];
|
|
|
|
assert!(check_latest_events_exact(&latest_events, &event_path));
|
2022-03-29 13:09:38 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_multi_cpu() {
|
2022-11-23 17:14:01 +00:00
|
|
|
let jammy_image = JAMMY_IMAGE_NAME.to_string();
|
|
|
|
let jammy = UbuntuDiskConfig::new(jammy_image);
|
|
|
|
let guest = Guest::new(Box::new(jammy));
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--cpus", "boot=2,max=4"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2020-08-12 11:20:14 +00:00
|
|
|
.capture_output()
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net();
|
|
|
|
|
|
|
|
let mut child = cmd.spawn().unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2020-08-12 11:20:14 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.wait_vm_boot(Some(120)).unwrap();
|
|
|
|
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2);
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2020-08-12 11:20:14 +00:00
|
|
|
assert_eq!(
|
2021-06-03 10:39:57 +00:00
|
|
|
guest
|
2022-11-23 17:14:01 +00:00
|
|
|
.ssh_command(
|
|
|
|
r#"sudo dmesg | grep "smp: Brought up" | sed "s/\[\ *[0-9.]*\] //""#
|
|
|
|
)
|
2021-06-03 10:39:57 +00:00
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
2022-01-06 22:24:38 +00:00
|
|
|
"smp: Brought up 1 node, 2 CPUs"
|
2020-07-03 08:00:12 +00:00
|
|
|
);
|
2020-08-12 11:20:14 +00:00
|
|
|
});
|
2020-03-23 17:59:12 +00:00
|
|
|
|
2020-08-12 11:20:14 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-03-23 17:59:12 +00:00
|
|
|
|
2020-08-12 11:20:14 +00:00
|
|
|
handle_child_output(r, &output);
|
2020-07-03 08:00:12 +00:00
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_cpu_topology_421() {
|
|
|
|
test_cpu_topology(4, 2, 1, false);
|
2020-02-25 09:42:15 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_cpu_topology_142() {
|
|
|
|
test_cpu_topology(1, 4, 2, false);
|
|
|
|
}
|
2020-04-20 12:54:59 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_cpu_topology_262() {
|
|
|
|
test_cpu_topology(2, 6, 2, false);
|
|
|
|
}
|
2020-04-20 12:54:59 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_cpu_physical_bits() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let max_phys_bits: u8 = 36;
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["--cpus", &format!("max_phys_bits={max_phys_bits}")])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2020-08-12 11:20:14 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
2020-10-27 05:37:25 +00:00
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lscpu | grep \"Address sizes:\" | cut -f 2 -d \":\" | sed \"s# *##\" | cut -f 1 -d \" \"")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u8>()
|
|
|
|
.unwrap_or(max_phys_bits + 1) <= max_phys_bits,
|
2020-07-03 08:00:12 +00:00
|
|
|
);
|
2020-08-12 11:20:14 +00:00
|
|
|
});
|
2020-05-12 11:14:03 +00:00
|
|
|
|
2020-08-12 11:20:14 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-05-12 11:14:03 +00:00
|
|
|
|
2020-08-12 11:20:14 +00:00
|
|
|
handle_child_output(r, &output);
|
2020-02-25 09:42:15 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_cpu_affinity() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// We need the host to have at least 4 CPUs if we want to be able
|
|
|
|
// to run this test.
|
|
|
|
let host_cpus_count = exec_host_command_output("nproc");
|
|
|
|
assert!(
|
|
|
|
String::from_utf8_lossy(&host_cpus_count.stdout)
|
|
|
|
.trim()
|
2023-07-06 08:41:05 +00:00
|
|
|
.parse::<u16>()
|
2022-01-06 22:24:38 +00:00
|
|
|
.unwrap_or(0)
|
|
|
|
>= 4
|
|
|
|
);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=2,affinity=[0@[0,2],1@[1,3]]"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2020-08-12 11:20:14 +00:00
|
|
|
.default_disks()
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_net()
|
2020-09-25 03:44:52 +00:00
|
|
|
.capture_output()
|
2020-08-12 11:20:14 +00:00
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-04-15 08:16:17 +00:00
|
|
|
|
2021-04-08 22:10:12 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
let pid = child.id();
|
2022-12-14 11:41:15 +00:00
|
|
|
let taskset_vcpu0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str());
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(String::from_utf8_lossy(&taskset_vcpu0.stdout).trim(), "0,2");
|
2022-12-14 11:41:15 +00:00
|
|
|
let taskset_vcpu1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str());
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(String::from_utf8_lossy(&taskset_vcpu1.stdout).trim(), "1,3");
|
2021-04-08 22:10:12 +00:00
|
|
|
});
|
2020-04-15 08:16:17 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2024-01-25 00:37:47 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_virtio_queue_affinity() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
// We need the host to have at least 4 CPUs if we want to be able
|
|
|
|
// to run this test.
|
|
|
|
let host_cpus_count = exec_host_command_output("nproc");
|
|
|
|
assert!(
|
|
|
|
String::from_utf8_lossy(&host_cpus_count.stdout)
|
|
|
|
.trim()
|
|
|
|
.parse::<u16>()
|
|
|
|
.unwrap_or(0)
|
|
|
|
>= 4
|
|
|
|
);
|
2020-04-15 08:16:17 +00:00
|
|
|
|
2024-01-25 00:37:47 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
|
|
|
.args(["--cpus", "boot=4"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args([
|
|
|
|
"--disk",
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!(
|
|
|
|
"path={},num_queues=4,queue_affinity=[0@[0,2],1@[1,3],2@[1],3@[3]]",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
])
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
let pid = child.id();
|
|
|
|
let taskset_q0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str());
|
|
|
|
assert_eq!(String::from_utf8_lossy(&taskset_q0.stdout).trim(), "0,2");
|
|
|
|
let taskset_q1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str());
|
|
|
|
assert_eq!(String::from_utf8_lossy(&taskset_q1.stdout).trim(), "1,3");
|
|
|
|
let taskset_q2 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q2 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str());
|
|
|
|
assert_eq!(String::from_utf8_lossy(&taskset_q2.stdout).trim(), "1");
|
|
|
|
let taskset_q3 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q3 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str());
|
|
|
|
assert_eq!(String::from_utf8_lossy(&taskset_q3.stdout).trim(), "3");
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_large_vm() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--cpus", "boot=48"])
|
|
|
|
.args(["--memory", "size=5120M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args(["--serial", "tty"])
|
|
|
|
.args(["--console", "off"])
|
2020-08-12 11:20:14 +00:00
|
|
|
.capture_output()
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net();
|
|
|
|
|
|
|
|
let mut child = cmd.spawn().unwrap();
|
|
|
|
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-03-19 10:48:17 +00:00
|
|
|
|
2020-08-12 11:20:14 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 48);
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lscpu | grep \"On-line\" | cut -f 2 -d \":\" | sed \"s# *##\"")
|
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
"0-47"
|
|
|
|
);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 5_000_000);
|
2020-07-03 08:00:12 +00:00
|
|
|
});
|
2020-08-12 11:20:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-08-12 11:20:14 +00:00
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
2020-07-03 08:00:12 +00:00
|
|
|
}
|
2020-06-30 16:59:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_huge_memory() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=128G"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.default_disks()
|
|
|
|
.default_net();
|
2020-03-19 10:48:17 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = cmd.spawn().unwrap();
|
2020-03-19 10:48:17 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.wait_vm_boot(Some(120)).unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 128_000_000);
|
|
|
|
});
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
2020-07-03 08:00:12 +00:00
|
|
|
}
|
2020-03-19 10:48:17 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_power_button() {
|
|
|
|
_test_power_button(false);
|
2020-02-25 09:42:15 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_user_defined_memory_regions() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=0,hotplug_method=virtio-mem"])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--memory-zone",
|
|
|
|
"id=mem0,size=1G,hotplug_size=2G",
|
2023-03-31 16:52:41 +00:00
|
|
|
"id=mem1,size=1G,shared=on",
|
2022-01-06 22:24:38 +00:00
|
|
|
"id=mem2,size=1G,host_numa_node=0,hotplug_size=2G",
|
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args(["--api-socket", &api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000);
|
|
|
|
|
|
|
|
guest.enable_memory_hotplug();
|
|
|
|
|
|
|
|
resize_zone_command(&api_socket, "mem0", "3G");
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000);
|
|
|
|
resize_zone_command(&api_socket, "mem2", "3G");
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000);
|
|
|
|
resize_zone_command(&api_socket, "mem0", "2G");
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000);
|
|
|
|
resize_zone_command(&api_socket, "mem2", "2G");
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000);
|
|
|
|
|
|
|
|
guest.reboot_linux(0, None);
|
|
|
|
|
|
|
|
// Check the amount of RAM after reboot
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() < 5_760_000);
|
|
|
|
|
|
|
|
// Check if we can still resize down to the initial 'boot'size
|
|
|
|
resize_zone_command(&api_socket, "mem0", "1G");
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() < 4_800_000);
|
|
|
|
resize_zone_command(&api_socket, "mem2", "1G");
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() < 3_840_000);
|
2021-01-14 03:03:53 +00:00
|
|
|
});
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
2021-01-14 03:03:53 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_guest_numa_nodes() {
|
|
|
|
_test_guest_numa_nodes(false);
|
|
|
|
}
|
|
|
|
|
2022-02-11 10:26:15 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_iommu_segments() {
|
|
|
|
let focal_image = FOCAL_IMAGE_NAME.to_string();
|
|
|
|
let focal = UbuntuDiskConfig::new(focal_image);
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
// Prepare another disk file for the virtio-disk device
|
|
|
|
let test_disk_path = String::from(
|
|
|
|
guest
|
|
|
|
.tmp_dir
|
|
|
|
.as_path()
|
|
|
|
.join("test-disk.raw")
|
|
|
|
.to_str()
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
assert!(
|
2022-12-14 11:41:15 +00:00
|
|
|
exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success()
|
2022-02-11 10:26:15 +00:00
|
|
|
);
|
2022-12-14 11:41:15 +00:00
|
|
|
assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success());
|
2022-02-11 10:26:15 +00:00
|
|
|
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
|
|
|
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2023-04-26 01:20:18 +00:00
|
|
|
.args([
|
|
|
|
"--platform",
|
|
|
|
&format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS},iommu_segments=[1]"),
|
|
|
|
])
|
2022-02-11 10:26:15 +00:00
|
|
|
.default_disks()
|
|
|
|
.capture_output()
|
|
|
|
.default_net();
|
|
|
|
|
|
|
|
let mut child = cmd.spawn().unwrap();
|
|
|
|
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-disk",
|
2022-03-18 16:33:08 +00:00
|
|
|
Some(
|
|
|
|
format!(
|
|
|
|
"path={},id=test0,pci_segment=1,iommu=on",
|
|
|
|
test_disk_path.as_str()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
),
|
2022-02-11 10:26:15 +00:00
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
|
|
|
.contains("{\"id\":\"test0\",\"bdf\":\"0001:00:01.0\"}"));
|
|
|
|
|
|
|
|
// Check IOMMU setup
|
|
|
|
assert!(guest
|
|
|
|
.does_device_vendor_pair_match("0x1057", "0x1af4")
|
|
|
|
.unwrap_or_default());
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ls /sys/kernel/iommu_groups/0/devices")
|
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
"0001:00:01.0"
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_pci_msi() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
2021-09-23 01:04:08 +00:00
|
|
|
let guest = Guest::new(Box::new(focal));
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.default_disks()
|
|
|
|
.default_net();
|
|
|
|
|
|
|
|
let mut child = cmd.spawn().unwrap();
|
|
|
|
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2021-09-23 01:04:08 +00:00
|
|
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
2022-01-06 22:24:38 +00:00
|
|
|
let grep_cmd = "grep -c PCI-MSI /proc/interrupts";
|
2021-09-23 01:04:08 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
2022-01-06 22:24:38 +00:00
|
|
|
let grep_cmd = "grep -c ITS-MSI /proc/interrupts";
|
2021-09-23 01:04:08 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command(grep_cmd)
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
12
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-03-10 09:09:00 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_net_ctrl_queue() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-09-21 09:56:24 +00:00
|
|
|
.args(["--net", guest.default_net_string_w_mtu(3000).as_str()])
|
2022-03-10 09:09:00 +00:00
|
|
|
.capture_output()
|
2022-09-21 09:56:24 +00:00
|
|
|
.default_disks();
|
2022-03-10 09:09:00 +00:00
|
|
|
|
|
|
|
let mut child = cmd.spawn().unwrap();
|
|
|
|
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let iface = "enp0s4";
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let iface = "ens4";
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command(
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("sudo ethtool -K {iface} rx-gro-hw off && echo success").as_str()
|
2022-03-10 09:09:00 +00:00
|
|
|
)
|
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
"success"
|
|
|
|
);
|
2022-09-21 09:56:24 +00:00
|
|
|
assert_eq!(
|
|
|
|
guest
|
2022-12-14 11:41:15 +00:00
|
|
|
.ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str())
|
2022-09-21 09:56:24 +00:00
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
"3000"
|
|
|
|
);
|
2022-03-10 09:09:00 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_pci_multiple_segments() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
// Prepare another disk file for the virtio-disk device
|
|
|
|
let test_disk_path = String::from(
|
|
|
|
guest
|
|
|
|
.tmp_dir
|
|
|
|
.as_path()
|
|
|
|
.join("test-disk.raw")
|
|
|
|
.to_str()
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
assert!(
|
2022-12-14 11:41:15 +00:00
|
|
|
exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success()
|
2022-01-06 22:24:38 +00:00
|
|
|
);
|
2022-12-14 11:41:15 +00:00
|
|
|
assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success());
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2023-04-26 01:20:18 +00:00
|
|
|
.args([
|
|
|
|
"--platform",
|
|
|
|
&format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"),
|
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2021-09-23 01:04:08 +00:00
|
|
|
"--disk",
|
|
|
|
format!(
|
2022-01-06 22:24:38 +00:00
|
|
|
"path={}",
|
2021-09-23 01:04:08 +00:00
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!(
|
2022-01-06 22:24:38 +00:00
|
|
|
"path={}",
|
2021-09-23 01:04:08 +00:00
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("path={test_disk_path},pci_segment=15").as_str(),
|
2021-09-23 01:04:08 +00:00
|
|
|
])
|
|
|
|
.capture_output()
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_net();
|
2021-09-23 01:04:08 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = cmd.spawn().unwrap();
|
2021-09-23 01:04:08 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
let grep_cmd = "lspci | grep \"Host bridge\" | wc -l";
|
2021-09-23 01:04:08 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
2023-04-26 01:20:18 +00:00
|
|
|
// There should be MAX_NUM_PCI_SEGMENTS PCI host bridges in the guest.
|
2021-09-23 01:04:08 +00:00
|
|
|
assert_eq!(
|
|
|
|
guest
|
2022-01-06 22:24:38 +00:00
|
|
|
.ssh_command(grep_cmd)
|
2021-09-23 01:04:08 +00:00
|
|
|
.unwrap()
|
2022-01-06 22:24:38 +00:00
|
|
|
.trim()
|
2023-04-26 01:20:18 +00:00
|
|
|
.parse::<u16>()
|
2022-01-06 22:24:38 +00:00
|
|
|
.unwrap_or_default(),
|
2023-04-26 01:20:18 +00:00
|
|
|
MAX_NUM_PCI_SEGMENTS
|
2021-09-23 01:04:08 +00:00
|
|
|
);
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check both if /dev/vdc exists and if the block size is 4M.
|
2021-09-23 01:04:08 +00:00
|
|
|
assert_eq!(
|
|
|
|
guest
|
2022-01-06 22:24:38 +00:00
|
|
|
.ssh_command("lsblk | grep vdc | grep -c 4M")
|
2021-09-23 01:04:08 +00:00
|
|
|
.unwrap()
|
2022-01-06 22:24:38 +00:00
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
2021-09-23 01:04:08 +00:00
|
|
|
);
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Mount the device.
|
|
|
|
guest.ssh_command("mkdir mount_image").unwrap();
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo mount -o rw -t ext4 /dev/vdc mount_image/")
|
|
|
|
.unwrap();
|
|
|
|
// Grant all users with write permission.
|
|
|
|
guest.ssh_command("sudo chmod a+w mount_image/").unwrap();
|
|
|
|
|
|
|
|
// Write something to the device.
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo echo \"bar\" >> mount_image/foo")
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// Check the content of the block device. The file "foo" should
|
|
|
|
// contain "bar".
|
2021-09-23 01:04:08 +00:00
|
|
|
assert_eq!(
|
|
|
|
guest
|
2022-01-06 22:24:38 +00:00
|
|
|
.ssh_command("sudo cat mount_image/foo")
|
2021-09-23 01:04:08 +00:00
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
2022-01-06 22:24:38 +00:00
|
|
|
"bar"
|
2021-09-23 01:04:08 +00:00
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2023-10-21 00:45:53 +00:00
|
|
|
#[test]
|
2023-10-16 02:36:34 +00:00
|
|
|
fn test_pci_multiple_segments_numa_node() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let kernel_path = edk2_path();
|
|
|
|
|
|
|
|
// Prepare another disk file for the virtio-disk device
|
|
|
|
let test_disk_path = String::from(
|
|
|
|
guest
|
|
|
|
.tmp_dir
|
|
|
|
.as_path()
|
|
|
|
.join("test-disk.raw")
|
|
|
|
.to_str()
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success()
|
|
|
|
);
|
|
|
|
assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success());
|
|
|
|
const TEST_DISK_NODE: u16 = 1;
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
|
|
|
.args(["--platform", "num_pci_segments=2"])
|
|
|
|
.args(["--cpus", "boot=2"])
|
|
|
|
.args(["--memory", "size=0"])
|
2023-10-21 00:45:53 +00:00
|
|
|
.args(["--memory-zone", "id=mem0,size=256M", "id=mem1,size=256M"])
|
2023-10-16 02:36:34 +00:00
|
|
|
.args([
|
|
|
|
"--numa",
|
2023-10-21 00:45:53 +00:00
|
|
|
"guest_numa_id=0,cpus=[0],distances=[1@20],memory_zones=mem0,pci_segments=[0]",
|
|
|
|
"guest_numa_id=1,cpus=[1],distances=[0@20],memory_zones=mem1,pci_segments=[1]",
|
2023-10-16 02:36:34 +00:00
|
|
|
])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.capture_output()
|
|
|
|
.args([
|
|
|
|
"--disk",
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!("path={test_disk_path},pci_segment={TEST_DISK_NODE}").as_str(),
|
|
|
|
])
|
|
|
|
.default_net()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let cmd = "cat /sys/block/vdc/device/../numa_node";
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command(cmd)
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u16>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
TEST_DISK_NODE
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_direct_kernel_boot() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2021-07-26 15:51:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-05-06 09:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-06-05 13:54:37 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1);
|
2024-02-15 14:08:31 +00:00
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
|
|
|
|
let grep_cmd = if cfg!(target_arch = "x86_64") {
|
|
|
|
"grep -c PCI-MSI /proc/interrupts"
|
|
|
|
} else {
|
|
|
|
"grep -c ITS-MSI /proc/interrupts"
|
|
|
|
};
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command(grep_cmd)
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
12
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_direct_kernel_boot_bzimage() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
let mut kernel_path = direct_kernel_boot_path();
|
|
|
|
// Replace the default kernel with the bzImage.
|
|
|
|
kernel_path.pop();
|
|
|
|
kernel_path.push("bzImage");
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1);
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
2020-08-26 14:10:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let grep_cmd = if cfg!(target_arch = "x86_64") {
|
|
|
|
"grep -c PCI-MSI /proc/interrupts"
|
|
|
|
} else {
|
|
|
|
"grep -c ITS-MSI /proc/interrupts"
|
|
|
|
};
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command(grep_cmd)
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
12
|
|
|
|
);
|
|
|
|
});
|
2020-06-05 13:54:37 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-06-05 13:54:37 +00:00
|
|
|
|
2023-10-19 02:59:22 +00:00
|
|
|
fn _test_virtio_block(image_name: &str, disable_io_uring: bool, disable_aio: bool) {
|
2022-01-06 22:24:38 +00:00
|
|
|
let focal = UbuntuDiskConfig::new(image_name.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut workload_path = dirs::home_dir().unwrap();
|
|
|
|
workload_path.push("workloads");
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut blk_file_path = workload_path;
|
|
|
|
blk_file_path.push("blk.img");
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-05-19 06:44:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut cloud_child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=4"])
|
|
|
|
.args(["--memory", "size=512M,shared=on"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--disk",
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!(
|
2023-10-19 02:59:22 +00:00
|
|
|
"path={},readonly=on,direct=on,num_queues=4,_disable_io_uring={},_disable_aio={}",
|
2022-01-06 22:24:38 +00:00
|
|
|
blk_file_path.to_str().unwrap(),
|
2023-10-19 02:59:22 +00:00
|
|
|
disable_io_uring,
|
|
|
|
disable_aio,
|
2022-01-06 22:24:38 +00:00
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
])
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-05-19 06:44:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2021-08-05 05:54:43 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check both if /dev/vdc exists and if the block size is 16M.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep vdc | grep -c 16M")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
2020-10-13 08:55:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check both if /dev/vdc exists and if this block is RO.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep vdc | awk '{print $5}'")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check if the number of queues is 4.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l")
|
|
|
|
.unwrap()
|
2021-11-10 09:45:40 +00:00
|
|
|
.trim()
|
2022-01-06 22:24:38 +00:00
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
4
|
2021-11-10 09:45:40 +00:00
|
|
|
);
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
2021-11-10 09:45:40 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = cloud_child.kill();
|
|
|
|
let output = cloud_child.wait_with_output().unwrap();
|
2021-11-10 09:45:40 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-11-10 09:45:40 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2023-10-19 02:59:22 +00:00
|
|
|
fn test_virtio_block_io_uring() {
|
|
|
|
_test_virtio_block(FOCAL_IMAGE_NAME, false, true)
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-11-10 09:45:40 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2023-10-19 02:59:22 +00:00
|
|
|
fn test_virtio_block_aio() {
|
|
|
|
_test_virtio_block(FOCAL_IMAGE_NAME, true, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_virtio_block_sync() {
|
|
|
|
_test_virtio_block(FOCAL_IMAGE_NAME, true, true)
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-11-10 09:45:40 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_block_qcow2() {
|
2023-10-19 02:59:22 +00:00
|
|
|
_test_virtio_block(FOCAL_IMAGE_NAME_QCOW2, false, false)
|
2023-07-05 07:14:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_virtio_block_qcow2_backing_file() {
|
2023-10-19 02:59:22 +00:00
|
|
|
_test_virtio_block(FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE, false, false)
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-08-05 04:42:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_block_vhd() {
|
|
|
|
let mut workload_path = dirs::home_dir().unwrap();
|
|
|
|
workload_path.push("workloads");
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut raw_file_path = workload_path.clone();
|
|
|
|
let mut vhd_file_path = workload_path;
|
|
|
|
raw_file_path.push(FOCAL_IMAGE_NAME);
|
|
|
|
vhd_file_path.push(FOCAL_IMAGE_NAME_VHD);
|
|
|
|
|
|
|
|
// Generate VHD file from RAW file
|
|
|
|
std::process::Command::new("qemu-img")
|
|
|
|
.arg("convert")
|
|
|
|
.arg("-p")
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["-f", "raw"])
|
|
|
|
.args(["-O", "vpc"])
|
|
|
|
.args(["-o", "subformat=fixed"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.arg(raw_file_path.to_str().unwrap())
|
|
|
|
.arg(vhd_file_path.to_str().unwrap())
|
|
|
|
.output()
|
|
|
|
.expect("Expect generating VHD image from RAW image");
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2023-10-19 02:59:22 +00:00
|
|
|
_test_virtio_block(FOCAL_IMAGE_NAME_VHD, false, false)
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-06-16 08:02:55 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_block_vhdx() {
|
|
|
|
let mut workload_path = dirs::home_dir().unwrap();
|
|
|
|
workload_path.push("workloads");
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut raw_file_path = workload_path.clone();
|
|
|
|
let mut vhdx_file_path = workload_path;
|
|
|
|
raw_file_path.push(FOCAL_IMAGE_NAME);
|
|
|
|
vhdx_file_path.push(FOCAL_IMAGE_NAME_VHDX);
|
|
|
|
|
|
|
|
// Generate dynamic VHDX file from RAW file
|
|
|
|
std::process::Command::new("qemu-img")
|
|
|
|
.arg("convert")
|
|
|
|
.arg("-p")
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["-f", "raw"])
|
|
|
|
.args(["-O", "vhdx"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.arg(raw_file_path.to_str().unwrap())
|
|
|
|
.arg(vhdx_file_path.to_str().unwrap())
|
|
|
|
.output()
|
|
|
|
.expect("Expect generating dynamic VHDx image from RAW image");
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2023-10-19 02:59:22 +00:00
|
|
|
_test_virtio_block(FOCAL_IMAGE_NAME_VHDX, false, false)
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_block_dynamic_vhdx_expand() {
|
|
|
|
const VIRTUAL_DISK_SIZE: u64 = 100 << 20;
|
|
|
|
const EMPTY_VHDX_FILE_SIZE: u64 = 8 << 20;
|
|
|
|
const FULL_VHDX_FILE_SIZE: u64 = 112 << 20;
|
|
|
|
const DYNAMIC_VHDX_NAME: &str = "dynamic.vhdx";
|
2020-08-05 04:42:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut workload_path = dirs::home_dir().unwrap();
|
|
|
|
workload_path.push("workloads");
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut vhdx_file_path = workload_path;
|
|
|
|
vhdx_file_path.push(DYNAMIC_VHDX_NAME);
|
|
|
|
let vhdx_path = vhdx_file_path.to_str().unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Generate a 100 MiB dynamic VHDX file
|
|
|
|
std::process::Command::new("qemu-img")
|
|
|
|
.arg("create")
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["-f", "vhdx"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.arg(vhdx_path)
|
|
|
|
.arg(VIRTUAL_DISK_SIZE.to_string())
|
|
|
|
.output()
|
|
|
|
.expect("Expect generating dynamic VHDx image from RAW image");
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check if the size matches with empty VHDx file size
|
|
|
|
assert_eq!(vhdx_image_size(vhdx_path), EMPTY_VHDX_FILE_SIZE);
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-06-30 16:59:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut cloud_child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--disk",
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("path={vhdx_path}").as_str(),
|
2022-01-06 22:24:38 +00:00
|
|
|
])
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-01-13 10:20:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-09-11 08:41:23 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check both if /dev/vdc exists and if the block size is 100 MiB.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep vdc | grep -c 100M")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
2020-09-11 08:41:23 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Write 100 MB of data to the VHDx disk
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo dd if=/dev/urandom of=/dev/vdc bs=1M count=100")
|
2020-09-11 08:41:23 +00:00
|
|
|
.unwrap();
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
2020-08-19 15:40:45 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check if the size matches with expected expanded VHDx file size
|
|
|
|
assert_eq!(vhdx_image_size(vhdx_path), FULL_VHDX_FILE_SIZE);
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = cloud_child.kill();
|
|
|
|
let output = cloud_child.wait_with_output().unwrap();
|
2020-09-11 08:41:23 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-09-11 08:41:23 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn vhdx_image_size(disk_name: &str) -> u64 {
|
|
|
|
std::fs::File::open(disk_name)
|
|
|
|
.unwrap()
|
|
|
|
.seek(SeekFrom::End(0))
|
|
|
|
.unwrap()
|
|
|
|
}
|
2020-09-15 08:20:29 +00:00
|
|
|
|
2022-01-19 16:21:54 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_block_direct_and_firmware() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
// The OS disk must be copied to a location that is not backed by
|
|
|
|
// tmpfs, otherwise the syscall openat(2) with O_DIRECT simply fails
|
|
|
|
// with EINVAL because tmpfs doesn't support this flag.
|
|
|
|
let mut workloads_path = dirs::home_dir().unwrap();
|
|
|
|
workloads_path.push("workloads");
|
|
|
|
let os_dir = TempDir::new_in(workloads_path.as_path()).unwrap();
|
|
|
|
let mut os_path = os_dir.as_path().to_path_buf();
|
|
|
|
os_path.push("osdisk.img");
|
|
|
|
rate_limited_copy(
|
2022-11-01 21:52:40 +00:00
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap(),
|
2022-01-19 16:21:54 +00:00
|
|
|
os_path.as_path(),
|
|
|
|
)
|
|
|
|
.expect("copying of OS disk failed");
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
|
|
|
|
.args([
|
2022-01-19 16:21:54 +00:00
|
|
|
"--disk",
|
|
|
|
format!("path={},direct=on", os_path.as_path().to_str().unwrap()).as_str(),
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
])
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
2022-01-21 09:04:25 +00:00
|
|
|
guest.wait_vm_boot(Some(120)).unwrap();
|
2022-01-19 16:21:54 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_vhost_user_net_default() {
|
|
|
|
test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, false)
|
|
|
|
}
|
2020-09-15 08:20:29 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_vhost_user_net_named_tap() {
|
|
|
|
test_vhost_user_net(
|
|
|
|
Some("mytap0"),
|
|
|
|
2,
|
|
|
|
&prepare_vhost_user_net_daemon,
|
|
|
|
false,
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
}
|
2020-09-15 08:20:29 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_vhost_user_net_existing_tap() {
|
|
|
|
test_vhost_user_net(
|
|
|
|
Some("vunet-tap0"),
|
|
|
|
2,
|
|
|
|
&prepare_vhost_user_net_daemon,
|
|
|
|
false,
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
}
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_vhost_user_net_multiple_queues() {
|
|
|
|
test_vhost_user_net(None, 4, &prepare_vhost_user_net_daemon, false, false)
|
|
|
|
}
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_vhost_user_net_tap_multiple_queues() {
|
|
|
|
test_vhost_user_net(
|
|
|
|
Some("vunet-tap1"),
|
|
|
|
4,
|
|
|
|
&prepare_vhost_user_net_daemon,
|
|
|
|
false,
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
}
|
2020-08-19 15:40:45 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_vhost_user_net_host_mac() {
|
|
|
|
test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, true, false)
|
|
|
|
}
|
2020-08-28 13:54:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_vhost_user_net_client_mode() {
|
|
|
|
test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, true)
|
|
|
|
}
|
2020-08-28 08:36:43 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2023-11-09 04:56:58 +00:00
|
|
|
#[cfg(not(target_arch = "aarch64"))]
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_vhost_user_blk_default() {
|
|
|
|
test_vhost_user_blk(2, false, false, Some(&prepare_vubd))
|
|
|
|
}
|
2020-06-30 16:59:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2023-11-09 04:56:58 +00:00
|
|
|
#[cfg(not(target_arch = "aarch64"))]
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_vhost_user_blk_readonly() {
|
|
|
|
test_vhost_user_blk(1, true, false, Some(&prepare_vubd))
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2023-11-09 04:56:58 +00:00
|
|
|
#[cfg(not(target_arch = "aarch64"))]
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_vhost_user_blk_direct() {
|
|
|
|
test_vhost_user_blk(1, false, true, Some(&prepare_vubd))
|
|
|
|
}
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_boot_from_vhost_user_blk_default() {
|
|
|
|
test_boot_from_vhost_user_blk(1, false, false, Some(&prepare_vubd))
|
|
|
|
}
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_split_irqchip() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2021-11-29 05:15:59 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(
|
2021-11-29 05:15:59 +00:00
|
|
|
guest
|
2022-01-06 22:24:38 +00:00
|
|
|
.ssh_command("grep -c IO-APIC.*timer /proc/interrupts || true")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(1),
|
|
|
|
0
|
2021-11-29 05:15:59 +00:00
|
|
|
);
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("grep -c IO-APIC.*cascade /proc/interrupts || true")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(1),
|
|
|
|
0
|
2021-11-29 05:15:59 +00:00
|
|
|
);
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
2021-11-29 05:15:59 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2022-04-21 13:53:36 +00:00
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_dmi_serial_number() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args(["--platform", "serial_number=a=b;c=d"])
|
2022-04-21 13:53:36 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo cat /sys/class/dmi/id/product_serial")
|
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
"a=b;c=d"
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-11-29 05:15:59 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-11-29 05:15:59 +00:00
|
|
|
|
2022-08-04 03:36:17 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_dmi_uuid() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args(["--platform", "uuid=1e8aa28a-435d-4027-87f4-40dceff1fa0a"])
|
2022-08-04 03:36:17 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo cat /sys/class/dmi/id/product_uuid")
|
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
"1e8aa28a-435d-4027-87f4-40dceff1fa0a"
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-08-06 16:09:07 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_dmi_oem_strings() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
let s1 = "io.systemd.credential:xx=yy";
|
|
|
|
let s2 = "This is a test string";
|
|
|
|
|
2022-12-14 11:41:15 +00:00
|
|
|
let oem_strings = format!("oem_strings=[{s1},{s2}]");
|
2022-08-06 16:09:07 +00:00
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args(["--platform", &oem_strings])
|
2022-08-06 16:09:07 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo dmidecode --oem-string count")
|
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
"2"
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo dmidecode --oem-string 1")
|
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
s1
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo dmidecode --oem-string 2")
|
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
s2
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2022-05-26 11:47:15 +00:00
|
|
|
fn test_virtio_fs() {
|
|
|
|
_test_virtio_fs(&prepare_virtiofsd, false, None)
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-11-29 05:15:59 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2022-05-26 11:47:15 +00:00
|
|
|
fn test_virtio_fs_hotplug() {
|
|
|
|
_test_virtio_fs(&prepare_virtiofsd, true, None)
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_virtio_fs_multi_segment_hotplug() {
|
2022-05-26 11:47:15 +00:00
|
|
|
_test_virtio_fs(&prepare_virtiofsd, true, Some(15))
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_virtio_fs_multi_segment() {
|
2022-05-26 11:47:15 +00:00
|
|
|
_test_virtio_fs(&prepare_virtiofsd, false, Some(15))
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_pmem_persist_writes() {
|
|
|
|
test_virtio_pmem(false, false)
|
|
|
|
}
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_pmem_discard_writes() {
|
|
|
|
test_virtio_pmem(true, false)
|
|
|
|
}
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_pmem_with_size() {
|
|
|
|
test_virtio_pmem(true, true)
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_boot_from_virtio_pmem() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-07-08 14:14:10 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-07-08 14:14:10 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--disk",
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
])
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--pmem",
|
|
|
|
format!(
|
|
|
|
"file={},size={}",
|
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap(),
|
2022-11-01 21:52:40 +00:00
|
|
|
fs::metadata(guest.disk_config.disk(DiskType::OperatingSystem).unwrap())
|
2022-01-06 22:24:38 +00:00
|
|
|
.unwrap()
|
|
|
|
.len()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--cmdline",
|
|
|
|
DIRECT_KERNEL_BOOT_CMDLINE
|
|
|
|
.replace("vda1", "pmem0p1")
|
|
|
|
.as_str(),
|
|
|
|
])
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-07-08 14:14:10 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Simple checks to validate the VM booted properly
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
});
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_multiple_network_interfaces() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--net",
|
|
|
|
guest.default_net_string().as_str(),
|
|
|
|
"tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0",
|
|
|
|
"tap=mytap1,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0",
|
|
|
|
])
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let tap_count = exec_host_command_output("ip link | grep -c mytap1");
|
|
|
|
assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1");
|
2020-11-17 17:13:43 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// 3 network interfaces + default localhost ==> 4 interfaces
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -o link | wc -l")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
4
|
|
|
|
);
|
|
|
|
});
|
2021-01-06 17:03:42 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-11-17 17:13:43 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-01-29 11:43:07 +00:00
|
|
|
|
2022-01-20 03:48:21 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
fn test_pmu_on() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 09:19:00 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-20 03:48:21 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Test that PMU exists.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command(GREP_PMU_IRQ_CMD)
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_serial_off() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--serial", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-01-29 11:43:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2021-07-26 15:51:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Test that there is no ttyS0
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command(GREP_SERIAL_IRQ_CMD)
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(1),
|
|
|
|
0
|
|
|
|
);
|
|
|
|
});
|
2021-07-26 15:51:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-07-26 15:51:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-07-26 15:51:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_serial_null() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let console_str: &str = "console=ttyS0";
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let console_str: &str = "console=ttyAMA0";
|
2021-07-26 15:51:14 +00:00
|
|
|
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--cmdline",
|
|
|
|
DIRECT_KERNEL_BOOT_CMDLINE
|
|
|
|
.replace("console=hvc0 ", console_str)
|
2021-07-26 15:51:14 +00:00
|
|
|
.as_str(),
|
2022-01-06 22:24:38 +00:00
|
|
|
])
|
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--serial", "null"])
|
|
|
|
.args(["--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output();
|
2021-07-26 15:51:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = cmd.spawn().unwrap();
|
2021-07-26 15:51:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2021-07-26 15:51:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Test that there is a ttyS0
|
|
|
|
assert_eq!(
|
2021-07-26 15:51:14 +00:00
|
|
|
guest
|
2022-01-06 22:24:38 +00:00
|
|
|
.ssh_command(GREP_SERIAL_IRQ_CMD)
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
|
|
|
});
|
2021-07-26 15:51:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
2021-07-26 15:51:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert!(!String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING));
|
|
|
|
});
|
2021-07-26 15:51:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-07-26 15:51:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_serial_tty() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let console_str: &str = "console=ttyS0";
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let console_str: &str = "console=ttyAMA0";
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--cmdline",
|
|
|
|
DIRECT_KERNEL_BOOT_CMDLINE
|
|
|
|
.replace("console=hvc0 ", console_str)
|
|
|
|
.as_str(),
|
|
|
|
])
|
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--serial", "tty"])
|
|
|
|
.args(["--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Test that there is a ttyS0
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command(GREP_SERIAL_IRQ_CMD)
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
|
|
|
});
|
2021-05-04 21:48:35 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// This sleep is needed to wait for the login prompt
|
|
|
|
thread::sleep(std::time::Duration::new(2, 0));
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
2020-03-09 17:32:55 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert!(String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING));
|
|
|
|
});
|
2020-03-12 11:05:19 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-03-03 07:42:13 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_serial_file() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-03-09 17:32:55 +00:00
|
|
|
|
2023-12-26 07:27:45 +00:00
|
|
|
let serial_path = guest.tmp_dir.as_path().join("serial-output");
|
2020-07-23 06:20:41 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2022-01-06 22:24:38 +00:00
|
|
|
let console_str: &str = "console=ttyS0";
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let console_str: &str = "console=ttyAMA0";
|
2020-03-09 17:32:55 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--cmdline",
|
|
|
|
DIRECT_KERNEL_BOOT_CMDLINE
|
|
|
|
.replace("console=hvc0 ", console_str)
|
|
|
|
.as_str(),
|
|
|
|
])
|
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--serial",
|
|
|
|
format!("file={}", serial_path.to_str().unwrap()).as_str(),
|
|
|
|
])
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-03-09 17:32:55 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Test that there is a ttyS0
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command(GREP_SERIAL_IRQ_CMD)
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.ssh_command("sudo shutdown -h now").unwrap();
|
|
|
|
});
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.wait_timeout(std::time::Duration::from_secs(20));
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Check that the cloud-hypervisor binary actually terminated
|
|
|
|
assert!(output.status.success());
|
|
|
|
|
|
|
|
// Do this check after shutdown of the VM as an easy way to ensure
|
|
|
|
// all writes are flushed to disk
|
|
|
|
let mut f = std::fs::File::open(serial_path).unwrap();
|
|
|
|
let mut buf = String::new();
|
|
|
|
f.read_to_string(&mut buf).unwrap();
|
|
|
|
assert!(buf.contains(CONSOLE_TEST_STRING));
|
|
|
|
});
|
2021-06-04 16:14:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-06-04 16:14:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_pty_interaction() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
let serial_option = if cfg!(target_arch = "x86_64") {
|
|
|
|
" console=ttyS0"
|
|
|
|
} else {
|
|
|
|
" console=ttyAMA0"
|
|
|
|
};
|
|
|
|
let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option;
|
2021-06-04 16:14:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", &cmdline])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--serial", "null"])
|
|
|
|
.args(["--console", "pty"])
|
|
|
|
.args(["--api-socket", &api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
// Get pty fd for console
|
|
|
|
let console_path = get_pty_path(&api_socket, "console");
|
2023-08-30 00:18:04 +00:00
|
|
|
_test_pty_interaction(console_path);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2023-08-30 00:18:04 +00:00
|
|
|
guest.ssh_command("sudo shutdown -h now").unwrap();
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.wait_timeout(std::time::Duration::from_secs(20));
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Check that the cloud-hypervisor binary actually terminated
|
|
|
|
assert!(output.status.success())
|
|
|
|
});
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_serial_socket_interaction() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2023-12-26 07:27:45 +00:00
|
|
|
let serial_socket = guest.tmp_dir.as_path().join("serial.socket");
|
|
|
|
let serial_socket_pty = guest.tmp_dir.as_path().join("serial.pty");
|
2023-08-30 00:18:04 +00:00
|
|
|
let serial_option = if cfg!(target_arch = "x86_64") {
|
|
|
|
" console=ttyS0"
|
|
|
|
} else {
|
|
|
|
" console=ttyAMA0"
|
|
|
|
};
|
|
|
|
let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option;
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", &cmdline])
|
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.args(["--console", "null"])
|
|
|
|
.args([
|
|
|
|
"--serial",
|
|
|
|
format!("socket={}", serial_socket.to_str().unwrap()).as_str(),
|
|
|
|
])
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let _ = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
});
|
2021-06-04 16:14:05 +00:00
|
|
|
|
2023-08-30 00:18:04 +00:00
|
|
|
let mut socat_command = Command::new("socat");
|
|
|
|
let socat_args = [
|
|
|
|
&format!("pty,link={},raw", serial_socket_pty.display()),
|
|
|
|
&format!("UNIX-CONNECT:{}", serial_socket.display()),
|
|
|
|
];
|
|
|
|
socat_command.args(socat_args);
|
|
|
|
|
|
|
|
let mut socat_child = socat_command.spawn().unwrap();
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
|
|
|
|
|
|
|
let _ = std::panic::catch_unwind(|| {
|
|
|
|
_test_pty_interaction(serial_socket_pty);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = socat_child.kill();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.ssh_command("sudo shutdown -h now").unwrap();
|
|
|
|
});
|
2021-06-04 16:14:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.wait_timeout(std::time::Duration::from_secs(20));
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Check that the cloud-hypervisor binary actually terminated
|
2023-12-05 18:18:32 +00:00
|
|
|
if !output.status.success() {
|
|
|
|
panic!(
|
|
|
|
"Cloud Hypervisor process failed to terminate gracefully: {:?}",
|
|
|
|
output.status
|
|
|
|
);
|
|
|
|
}
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-10-25 15:17:33 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_console() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2021-10-25 15:17:33 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--console", "tty"])
|
|
|
|
.args(["--serial", "null"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let text = String::from("On a branch floating down river a cricket, singing.");
|
2022-12-14 11:41:15 +00:00
|
|
|
let cmd = format!("echo {text} | sudo tee /dev/hvc0");
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(guest
|
|
|
|
.does_device_vendor_pair_match("0x1043", "0x1af4")
|
|
|
|
.unwrap_or_default());
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.ssh_command(&cmd).unwrap();
|
|
|
|
});
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert!(String::from_utf8_lossy(&output.stdout).contains(&text));
|
|
|
|
});
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_console_file() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2023-12-26 07:27:45 +00:00
|
|
|
let console_path = guest.tmp_dir.as_path().join("console-output");
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--console",
|
|
|
|
format!("file={}", console_path.to_str().unwrap()).as_str(),
|
|
|
|
])
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-07-08 15:55:31 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.ssh_command("sudo shutdown -h now").unwrap();
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.wait_timeout(std::time::Duration::from_secs(20));
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Check that the cloud-hypervisor binary actually terminated
|
|
|
|
assert!(output.status.success());
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Do this check after shutdown of the VM as an easy way to ensure
|
|
|
|
// all writes are flushed to disk
|
|
|
|
let mut f = std::fs::File::open(console_path).unwrap();
|
|
|
|
let mut buf = String::new();
|
|
|
|
f.read_to_string(&mut buf).unwrap();
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if !buf.contains(CONSOLE_TEST_STRING) {
|
|
|
|
eprintln!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"\n\n==== Console file output ====\n\n{buf}\n\n==== End console file output ===="
|
2020-07-03 08:00:12 +00:00
|
|
|
);
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
|
|
|
assert!(buf.contains(CONSOLE_TEST_STRING));
|
|
|
|
});
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
// The VFIO integration test starts cloud-hypervisor guest with 3 TAP
|
|
|
|
// backed networking interfaces, bound through a simple bridge on the host.
|
|
|
|
// So if the nested cloud-hypervisor succeeds in getting a directly
|
|
|
|
// assigned interface from its cloud-hypervisor host, we should be able to
|
|
|
|
// ssh into it, and verify that it's running with the right kernel command
|
|
|
|
// line (We tag the command line from cloud-hypervisor for that purpose).
|
|
|
|
// The third device is added to validate that hotplug works correctly since
|
|
|
|
// it is being added to the L2 VM through hotplugging mechanism.
|
2023-03-13 13:35:58 +00:00
|
|
|
// Also, we pass-through a virtio-blk device to the L2 VM to test the 32-bit
|
2022-01-06 22:24:38 +00:00
|
|
|
// vfio device support
|
|
|
|
fn test_vfio() {
|
|
|
|
setup_vfio_network_interfaces();
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2023-10-23 12:54:13 +00:00
|
|
|
let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new_from_ip_range(Box::new(jammy), "172.18", 0);
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut workload_path = dirs::home_dir().unwrap();
|
|
|
|
workload_path.push("workloads");
|
2020-10-26 23:46:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut vfio_path = workload_path.clone();
|
|
|
|
vfio_path.push("vfio");
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut cloud_init_vfio_base_path = vfio_path.clone();
|
|
|
|
cloud_init_vfio_base_path.push("cloudinit.img");
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// We copy our cloudinit into the vfio mount point, for the nested
|
|
|
|
// cloud-hypervisor guest to use.
|
|
|
|
rate_limited_copy(
|
2022-11-01 21:52:40 +00:00
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap(),
|
2022-01-06 22:24:38 +00:00
|
|
|
&cloud_init_vfio_base_path,
|
|
|
|
)
|
|
|
|
.expect("copying of cloud-init disk failed");
|
2021-03-15 03:04:41 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut vfio_disk_path = workload_path.clone();
|
|
|
|
vfio_disk_path.push("vfio.img");
|
2020-08-05 04:42:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Create the vfio disk image
|
|
|
|
let output = Command::new("mkfs.ext4")
|
|
|
|
.arg("-d")
|
|
|
|
.arg(vfio_path.to_str().unwrap())
|
|
|
|
.arg(vfio_disk_path.to_str().unwrap())
|
|
|
|
.arg("2g")
|
|
|
|
.output()
|
|
|
|
.unwrap();
|
|
|
|
if !output.status.success() {
|
|
|
|
eprintln!("{}", String::from_utf8_lossy(&output.stderr));
|
|
|
|
panic!("mkfs.ext4 command generated an error");
|
|
|
|
}
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut blk_file_path = workload_path;
|
|
|
|
blk_file_path.push("blk.img");
|
2020-10-26 23:46:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let vfio_tap0 = "vfio-tap0";
|
|
|
|
let vfio_tap1 = "vfio-tap1";
|
|
|
|
let vfio_tap2 = "vfio-tap2";
|
|
|
|
let vfio_tap3 = "vfio-tap3";
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=4"])
|
|
|
|
.args(["--memory", "size=2G,hugepages=on,shared=on"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--disk",
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(),
|
|
|
|
format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(),
|
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--cmdline",
|
|
|
|
format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts"
|
2022-01-06 22:24:38 +00:00
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--net",
|
|
|
|
format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(),
|
|
|
|
format!(
|
|
|
|
"tap={},mac={},iommu=on",
|
|
|
|
vfio_tap1, guest.network.l2_guest_mac1
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!(
|
|
|
|
"tap={},mac={},iommu=on",
|
|
|
|
vfio_tap2, guest.network.l2_guest_mac2
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!(
|
|
|
|
"tap={},mac={},iommu=on",
|
|
|
|
vfio_tap3, guest.network.l2_guest_mac3
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
])
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-09-02 21:41:23 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(30, 0));
|
2020-09-02 21:41:23 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.ssh_command_l1("sudo systemctl start vfio").unwrap();
|
|
|
|
thread::sleep(std::time::Duration::new(120, 0));
|
|
|
|
|
|
|
|
// We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag
|
|
|
|
// added to its kernel command line.
|
|
|
|
// Let's ssh into it and verify that it's there. If it is it means
|
|
|
|
// we're in the right guest (The L2 one) because the QEMU L1 guest
|
|
|
|
// does not have this command line tag.
|
2023-11-03 22:02:00 +00:00
|
|
|
assert!(check_matched_lines_count(
|
|
|
|
guest.ssh_command_l2_1("cat /proc/cmdline").unwrap().trim(),
|
|
|
|
vec!["VFIOTAG"],
|
2022-01-06 22:24:38 +00:00
|
|
|
1
|
2023-11-03 22:02:00 +00:00
|
|
|
));
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Let's also verify from the second virtio-net device passed to
|
|
|
|
// the L2 VM.
|
2023-11-03 22:02:00 +00:00
|
|
|
assert!(check_matched_lines_count(
|
|
|
|
guest.ssh_command_l2_2("cat /proc/cmdline").unwrap().trim(),
|
|
|
|
vec!["VFIOTAG"],
|
2022-01-06 22:24:38 +00:00
|
|
|
1
|
2023-11-03 22:02:00 +00:00
|
|
|
));
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the amount of PCI devices appearing in L2 VM.
|
2023-11-03 22:02:00 +00:00
|
|
|
assert!(check_lines_count(
|
2022-01-06 22:24:38 +00:00
|
|
|
guest
|
2023-11-03 22:02:00 +00:00
|
|
|
.ssh_command_l2_1("ls /sys/bus/pci/devices")
|
2022-01-06 22:24:38 +00:00
|
|
|
.unwrap()
|
2023-11-03 22:02:00 +00:00
|
|
|
.trim(),
|
|
|
|
8
|
|
|
|
));
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check both if /dev/vdc exists and if the block size is 16M in L2 VM
|
2023-11-03 22:02:00 +00:00
|
|
|
assert!(check_matched_lines_count(
|
|
|
|
guest.ssh_command_l2_1("lsblk").unwrap().trim(),
|
|
|
|
vec!["vdc", "16M"],
|
2022-01-06 22:24:38 +00:00
|
|
|
1
|
2023-11-03 22:02:00 +00:00
|
|
|
));
|
2021-03-15 03:04:41 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Hotplug an extra virtio-net device through L2 VM.
|
|
|
|
guest
|
|
|
|
.ssh_command_l1(
|
|
|
|
"echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind",
|
|
|
|
)
|
2020-09-02 21:52:44 +00:00
|
|
|
.unwrap();
|
2022-01-06 22:24:38 +00:00
|
|
|
guest
|
|
|
|
.ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind")
|
|
|
|
.unwrap();
|
|
|
|
let vfio_hotplug_output = guest
|
|
|
|
.ssh_command_l1(
|
|
|
|
"sudo /mnt/ch-remote \
|
2023-07-08 01:38:51 +00:00
|
|
|
--api-socket=/tmp/ch_api.sock \
|
2022-01-06 22:24:38 +00:00
|
|
|
add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123",
|
|
|
|
)
|
|
|
|
.unwrap();
|
2023-11-03 22:02:00 +00:00
|
|
|
assert!(check_matched_lines_count(
|
|
|
|
vfio_hotplug_output.trim(),
|
|
|
|
vec!["{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}"],
|
|
|
|
1
|
|
|
|
));
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2020-09-02 21:52:44 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Let's also verify from the third virtio-net device passed to
|
|
|
|
// the L2 VM. This third device has been hotplugged through the L2
|
|
|
|
// VM, so this is our way to validate hotplug works for VFIO PCI.
|
2023-11-03 22:02:00 +00:00
|
|
|
assert!(check_matched_lines_count(
|
|
|
|
guest.ssh_command_l2_3("cat /proc/cmdline").unwrap().trim(),
|
|
|
|
vec!["VFIOTAG"],
|
2022-01-06 22:24:38 +00:00
|
|
|
1
|
2023-11-03 22:02:00 +00:00
|
|
|
));
|
2020-09-02 21:52:44 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the amount of PCI devices appearing in L2 VM.
|
|
|
|
// There should be one more device than before, raising the count
|
|
|
|
// up to 9 PCI devices.
|
2023-11-03 22:02:00 +00:00
|
|
|
assert!(check_lines_count(
|
2022-01-06 22:24:38 +00:00
|
|
|
guest
|
2023-11-03 22:02:00 +00:00
|
|
|
.ssh_command_l2_1("ls /sys/bus/pci/devices")
|
2022-01-06 22:24:38 +00:00
|
|
|
.unwrap()
|
2023-11-03 22:02:00 +00:00
|
|
|
.trim(),
|
|
|
|
9
|
|
|
|
));
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Let's now verify that we can correctly remove the virtio-net
|
|
|
|
// device through the "remove-device" command responsible for
|
|
|
|
// unplugging VFIO devices.
|
|
|
|
guest
|
|
|
|
.ssh_command_l1(
|
|
|
|
"sudo /mnt/ch-remote \
|
2023-07-08 01:38:51 +00:00
|
|
|
--api-socket=/tmp/ch_api.sock \
|
2022-01-06 22:24:38 +00:00
|
|
|
remove-device vfio123",
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the amount of PCI devices appearing in L2 VM is back down
|
|
|
|
// to 8 devices.
|
2023-11-03 22:02:00 +00:00
|
|
|
assert!(check_lines_count(
|
2022-01-06 22:24:38 +00:00
|
|
|
guest
|
2023-11-03 22:02:00 +00:00
|
|
|
.ssh_command_l2_1("ls /sys/bus/pci/devices")
|
2022-01-06 22:24:38 +00:00
|
|
|
.unwrap()
|
2023-11-03 22:02:00 +00:00
|
|
|
.trim(),
|
|
|
|
8
|
|
|
|
));
|
2021-03-15 03:04:41 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Perform memory hotplug in L2 and validate the memory is showing
|
|
|
|
// up as expected. In order to check, we will use the virtio-net
|
|
|
|
// device already passed through L2 as a VFIO device, this will
|
|
|
|
// verify that VFIO devices are functional with memory hotplug.
|
|
|
|
assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000);
|
|
|
|
guest
|
|
|
|
.ssh_command_l2_1(
|
|
|
|
"sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'",
|
|
|
|
)
|
2020-09-02 22:59:47 +00:00
|
|
|
.unwrap();
|
2022-01-06 22:24:38 +00:00
|
|
|
guest
|
|
|
|
.ssh_command_l1(
|
|
|
|
"sudo /mnt/ch-remote \
|
2023-07-08 01:38:51 +00:00
|
|
|
--api-socket=/tmp/ch_api.sock \
|
|
|
|
resize --memory=1073741824",
|
2022-01-06 22:24:38 +00:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000);
|
|
|
|
});
|
2020-04-29 10:25:16 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-06-30 16:59:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
cleanup_vfio_network_interfaces();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-09-02 22:59:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_direct_kernel_boot_noacpi() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-09-02 22:59:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-04-29 10:25:16 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--cmdline",
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("{DIRECT_KERNEL_BOOT_CMDLINE} acpi=off").as_str(),
|
2022-01-06 22:24:38 +00:00
|
|
|
])
|
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-01-14 03:03:53 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2021-01-14 03:03:53 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
});
|
2021-01-14 03:03:53 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-01-14 03:03:53 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-01-14 03:03:53 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_vsock() {
|
|
|
|
_test_virtio_vsock(false)
|
|
|
|
}
|
2020-05-05 10:09:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_vsock_hotplug() {
|
|
|
|
_test_virtio_vsock(true);
|
|
|
|
}
|
2020-09-02 23:16:38 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2023-06-06 19:36:36 +00:00
|
|
|
fn test_api_http_shutdown() {
|
2022-01-06 22:24:38 +00:00
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2023-03-30 20:47:21 +00:00
|
|
|
|
2023-06-06 19:36:36 +00:00
|
|
|
_test_api_shutdown(TargetApi::new_http_api(&guest.tmp_dir), guest)
|
|
|
|
}
|
2023-03-30 20:47:21 +00:00
|
|
|
|
2023-06-06 19:36:36 +00:00
|
|
|
#[test]
|
|
|
|
fn test_api_http_delete() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-04-29 10:25:16 +00:00
|
|
|
|
2023-06-06 19:36:36 +00:00
|
|
|
_test_api_delete(TargetApi::new_http_api(&guest.tmp_dir), guest);
|
2023-03-30 20:47:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2023-06-06 19:36:36 +00:00
|
|
|
fn test_api_http_pause_resume() {
|
2023-03-30 20:47:21 +00:00
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
2023-06-06 19:36:36 +00:00
|
|
|
_test_api_pause_resume(TargetApi::new_http_api(&guest.tmp_dir), guest)
|
2023-03-30 20:47:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_api_http_create_boot() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
_test_api_create_boot(TargetApi::new_http_api(&guest.tmp_dir), guest)
|
|
|
|
}
|
|
|
|
|
2023-06-06 19:36:36 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_iommu() {
|
|
|
|
_test_virtio_iommu(cfg!(target_arch = "x86_64"))
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
// We cannot force the software running in the guest to reprogram the BAR
|
|
|
|
// with some different addresses, but we have a reliable way of testing it
|
|
|
|
// with a standard Linux kernel.
|
|
|
|
// By removing a device from the PCI tree, and then rescanning the tree,
|
|
|
|
// Linux consistently chooses to reorganize the PCI device BARs to other
|
|
|
|
// locations in the guest address space.
|
|
|
|
// This test creates a dedicated PCI network device to be checked as being
|
|
|
|
// properly probed first, then removing it, and adding it again by doing a
|
|
|
|
// rescan.
|
|
|
|
fn test_pci_bar_reprogramming() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let kernel_path = edk2_path();
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2023-06-06 19:36:36 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.default_disks()
|
|
|
|
.args([
|
|
|
|
"--net",
|
|
|
|
guest.default_net_string().as_str(),
|
|
|
|
"tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0",
|
|
|
|
])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2023-06-06 19:36:36 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
// 2 network interfaces + default localhost ==> 3 interfaces
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -o link | wc -l")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
3
|
|
|
|
);
|
|
|
|
|
|
|
|
let init_bar_addr = guest
|
|
|
|
.ssh_command(
|
|
|
|
"sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource",
|
|
|
|
)
|
2020-09-01 20:03:49 +00:00
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Remove the PCI device
|
|
|
|
guest
|
|
|
|
.ssh_command("echo 1 | sudo tee /sys/bus/pci/devices/0000:00:05.0/remove")
|
|
|
|
.unwrap();
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Only 1 network interface left + default localhost ==> 2 interfaces
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -o link | wc -l")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
2
|
|
|
|
);
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Remove the PCI device
|
|
|
|
guest
|
|
|
|
.ssh_command("echo 1 | sudo tee /sys/bus/pci/rescan")
|
|
|
|
.unwrap();
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Back to 2 network interface + default localhost ==> 3 interfaces
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -o link | wc -l")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
3
|
|
|
|
);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let new_bar_addr = guest
|
|
|
|
.ssh_command(
|
|
|
|
"sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource",
|
|
|
|
)
|
|
|
|
.unwrap();
|
2021-05-06 09:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Let's compare the BAR addresses for our virtio-net device.
|
|
|
|
// They should be different as we expect the BAR reprogramming
|
|
|
|
// to have happened.
|
|
|
|
assert_ne!(init_bar_addr, new_bar_addr);
|
|
|
|
});
|
2021-05-06 09:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-05-06 09:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-05-06 09:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_memory_mergeable_off() {
|
|
|
|
test_memory_mergeable(false)
|
|
|
|
}
|
2021-05-06 09:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_cpu_hotplug() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2021-05-06 09:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2021-05-06 09:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=2,max=4"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-05-06 09:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2021-05-06 09:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Resize the VM
|
|
|
|
let desired_vcpus = 4;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, Some(desired_vcpus), None, None, None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest
|
|
|
|
.ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online")
|
|
|
|
.unwrap();
|
|
|
|
guest
|
|
|
|
.ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online")
|
|
|
|
.unwrap();
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
assert_eq!(
|
|
|
|
guest.get_cpu_count().unwrap_or_default(),
|
|
|
|
u32::from(desired_vcpus)
|
|
|
|
);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.reboot_linux(0, None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(
|
|
|
|
guest.get_cpu_count().unwrap_or_default(),
|
|
|
|
u32::from(desired_vcpus)
|
|
|
|
);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Resize the VM
|
|
|
|
let desired_vcpus = 2;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, Some(desired_vcpus), None, None, None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
assert_eq!(
|
|
|
|
guest.get_cpu_count().unwrap_or_default(),
|
|
|
|
u32::from(desired_vcpus)
|
|
|
|
);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Resize the VM back up to 4
|
|
|
|
let desired_vcpus = 4;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, Some(desired_vcpus), None, None, None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest
|
|
|
|
.ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online")
|
|
|
|
.unwrap();
|
|
|
|
guest
|
|
|
|
.ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online")
|
|
|
|
.unwrap();
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
assert_eq!(
|
|
|
|
guest.get_cpu_count().unwrap_or_default(),
|
|
|
|
u32::from(desired_vcpus)
|
2020-09-01 20:03:49 +00:00
|
|
|
);
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_memory_hotplug() {
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string();
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let focal_image = FOCAL_IMAGE_NAME.to_string();
|
|
|
|
let focal = UbuntuDiskConfig::new(focal_image);
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let kernel_path = edk2_path();
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=2,max=4"])
|
|
|
|
.args(["--memory", "size=512M,hotplug_size=8192M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--balloon", "size=0"])
|
|
|
|
.args(["--api-socket", &api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.enable_memory_hotplug();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Add RAM to the VM
|
|
|
|
let desired_ram = 1024 << 20;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, Some(desired_ram), None, None);
|
2020-06-30 16:59:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Use balloon to remove RAM from the VM
|
|
|
|
let desired_balloon = 512 << 20;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, None, Some(desired_balloon), None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() < 960_000);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.reboot_linux(0, None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() < 960_000);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Use balloon add RAM to the VM
|
|
|
|
let desired_balloon = 0;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, None, Some(desired_balloon), None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.enable_memory_hotplug();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Add RAM to the VM
|
|
|
|
let desired_ram = 2048 << 20;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, Some(desired_ram), None, None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Remove RAM to the VM (only applies after reboot)
|
|
|
|
let desired_ram = 1024 << 20;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, Some(desired_ram), None, None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.reboot_linux(1, None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000);
|
|
|
|
});
|
2021-09-08 14:36:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-09-08 14:36:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_virtio_mem() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=2,max=4"])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--memory",
|
|
|
|
"size=512M,hotplug_method=virtio-mem,hotplug_size=8192M",
|
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.enable_memory_hotplug();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Add RAM to the VM
|
|
|
|
let desired_ram = 1024 << 20;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, Some(desired_ram), None, None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Add RAM to the VM
|
|
|
|
let desired_ram = 2048 << 20;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, Some(desired_ram), None, None);
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Remove RAM from the VM
|
|
|
|
let desired_ram = 1024 << 20;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, Some(desired_ram), None, None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000);
|
2020-07-13 05:05:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.reboot_linux(0, None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the amount of memory after reboot is 1GiB
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check we can still resize to 512MiB
|
|
|
|
let desired_ram = 512 << 20;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, Some(desired_ram), None, None);
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() < 960_000);
|
|
|
|
});
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
// Test both vCPU and memory resizing together
|
|
|
|
fn test_resize() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2020-03-17 05:59:58 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-03-17 05:59:58 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=2,max=4"])
|
|
|
|
.args(["--memory", "size=512M,hotplug_size=8192M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-03-17 05:59:58 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-03-17 05:59:58 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
2020-03-17 05:59:58 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.enable_memory_hotplug();
|
2020-03-17 05:59:58 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Resize the VM
|
|
|
|
let desired_vcpus = 4;
|
|
|
|
let desired_ram = 1024 << 20;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(
|
|
|
|
&api_socket,
|
|
|
|
Some(desired_vcpus),
|
|
|
|
Some(desired_ram),
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
);
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest
|
|
|
|
.ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online")
|
|
|
|
.unwrap();
|
|
|
|
guest
|
|
|
|
.ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online")
|
|
|
|
.unwrap();
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
assert_eq!(
|
|
|
|
guest.get_cpu_count().unwrap_or_default(),
|
|
|
|
u32::from(desired_vcpus)
|
|
|
|
);
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
|
|
|
|
});
|
2020-03-17 05:59:58 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-07-13 05:05:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-03-17 05:59:58 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_memory_overhead() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-03-17 05:59:58 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let guest_memory_size_kb = 512 * 1024;
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["--memory", format!("size={guest_memory_size_kb}K").as_str()])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2023-09-07 14:53:15 +00:00
|
|
|
.default_net()
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2023-09-07 14:53:15 +00:00
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb);
|
2022-12-14 11:41:15 +00:00
|
|
|
eprintln!("Guest memory overhead: {overhead} vs {MAXIMUM_VMM_OVERHEAD_KB}");
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(overhead <= MAXIMUM_VMM_OVERHEAD_KB);
|
|
|
|
});
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-03-31 10:13:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-03-31 10:13:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_disk_hotplug() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let kernel_path = edk2_path();
|
2020-03-31 10:13:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2020-03-31 10:13:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-03-31 10:13:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check /dev/vdc is not there
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep -c vdc.*16M || true")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(1),
|
|
|
|
0
|
|
|
|
);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Now let's add the extra disk.
|
|
|
|
let mut blk_file_path = dirs::home_dir().unwrap();
|
|
|
|
blk_file_path.push("workloads");
|
|
|
|
blk_file_path.push("blk.img");
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-disk",
|
|
|
|
Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()),
|
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
|
|
|
.contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}"));
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2020-05-07 13:04:23 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check that /dev/vdc exists and the block size is 16M.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep vdc | grep -c 16M")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
|
|
|
// And check the block device can be read.
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16")
|
|
|
|
.unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Let's remove it the extra disk.
|
|
|
|
assert!(remote_command(&api_socket, "remove-device", Some("test0")));
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
// And check /dev/vdc is not there
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep -c vdc.*16M || true")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(1),
|
|
|
|
0
|
|
|
|
);
|
2020-05-07 13:04:23 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// And add it back to validate unplug did work correctly.
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-disk",
|
|
|
|
Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()),
|
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
|
|
|
.contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}"));
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check that /dev/vdc exists and the block size is 16M.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep vdc | grep -c 16M")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
|
|
|
// And check the block device can be read.
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16")
|
|
|
|
.unwrap();
|
2020-05-07 13:04:23 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Reboot the VM.
|
|
|
|
guest.reboot_linux(0, None);
|
2020-07-13 05:05:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check still there after reboot
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep vdc | grep -c 16M")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(remote_command(&api_socket, "remove-device", Some("test0")));
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check device has gone away
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep -c vdc.*16M || true")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(1),
|
|
|
|
0
|
|
|
|
);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.reboot_linux(1, None);
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check device still absent
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep -c vdc.*16M || true")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(1),
|
|
|
|
0
|
|
|
|
);
|
|
|
|
});
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
|
2022-08-09 16:06:38 +00:00
|
|
|
fn create_loop_device(backing_file_path: &str, block_size: u32, num_retries: usize) -> String {
|
|
|
|
const LOOP_CONFIGURE: u64 = 0x4c0a;
|
|
|
|
const LOOP_CTL_GET_FREE: u64 = 0x4c82;
|
|
|
|
const LOOP_CTL_PATH: &str = "/dev/loop-control";
|
|
|
|
const LOOP_DEVICE_PREFIX: &str = "/dev/loop";
|
|
|
|
|
|
|
|
#[repr(C)]
|
|
|
|
struct LoopInfo64 {
|
|
|
|
lo_device: u64,
|
|
|
|
lo_inode: u64,
|
|
|
|
lo_rdevice: u64,
|
|
|
|
lo_offset: u64,
|
|
|
|
lo_sizelimit: u64,
|
|
|
|
lo_number: u32,
|
|
|
|
lo_encrypt_type: u32,
|
|
|
|
lo_encrypt_key_size: u32,
|
|
|
|
lo_flags: u32,
|
|
|
|
lo_file_name: [u8; 64],
|
|
|
|
lo_crypt_name: [u8; 64],
|
|
|
|
lo_encrypt_key: [u8; 32],
|
|
|
|
lo_init: [u64; 2],
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for LoopInfo64 {
|
|
|
|
fn default() -> Self {
|
|
|
|
LoopInfo64 {
|
|
|
|
lo_device: 0,
|
|
|
|
lo_inode: 0,
|
|
|
|
lo_rdevice: 0,
|
|
|
|
lo_offset: 0,
|
|
|
|
lo_sizelimit: 0,
|
|
|
|
lo_number: 0,
|
|
|
|
lo_encrypt_type: 0,
|
|
|
|
lo_encrypt_key_size: 0,
|
|
|
|
lo_flags: 0,
|
|
|
|
lo_file_name: [0; 64],
|
|
|
|
lo_crypt_name: [0; 64],
|
|
|
|
lo_encrypt_key: [0; 32],
|
|
|
|
lo_init: [0; 2],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Default)]
|
|
|
|
#[repr(C)]
|
|
|
|
struct LoopConfig {
|
|
|
|
fd: u32,
|
|
|
|
block_size: u32,
|
|
|
|
info: LoopInfo64,
|
|
|
|
_reserved: [u64; 8],
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open loop-control device
|
|
|
|
let loop_ctl_file = OpenOptions::new()
|
|
|
|
.read(true)
|
|
|
|
.write(true)
|
|
|
|
.open(LOOP_CTL_PATH)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// Request a free loop device
|
2023-06-27 14:26:12 +00:00
|
|
|
let loop_device_number =
|
|
|
|
unsafe { libc::ioctl(loop_ctl_file.as_raw_fd(), LOOP_CTL_GET_FREE as _) };
|
|
|
|
|
2022-08-09 16:06:38 +00:00
|
|
|
if loop_device_number < 0 {
|
|
|
|
panic!("Couldn't find a free loop device");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create loop device path
|
2022-12-14 11:41:15 +00:00
|
|
|
let loop_device_path = format!("{LOOP_DEVICE_PREFIX}{loop_device_number}");
|
2022-08-09 16:06:38 +00:00
|
|
|
|
|
|
|
// Open loop device
|
|
|
|
let loop_device_file = OpenOptions::new()
|
|
|
|
.read(true)
|
|
|
|
.write(true)
|
|
|
|
.open(&loop_device_path)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// Open backing file
|
|
|
|
let backing_file = OpenOptions::new()
|
|
|
|
.read(true)
|
|
|
|
.write(true)
|
|
|
|
.open(backing_file_path)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let loop_config = LoopConfig {
|
|
|
|
fd: backing_file.as_raw_fd() as u32,
|
|
|
|
block_size,
|
|
|
|
..Default::default()
|
|
|
|
};
|
|
|
|
|
|
|
|
for i in 0..num_retries {
|
|
|
|
let ret = unsafe {
|
|
|
|
libc::ioctl(
|
|
|
|
loop_device_file.as_raw_fd(),
|
2023-06-27 14:26:12 +00:00
|
|
|
LOOP_CONFIGURE as _,
|
2022-08-09 16:06:38 +00:00
|
|
|
&loop_config,
|
|
|
|
)
|
|
|
|
};
|
|
|
|
if ret != 0 {
|
|
|
|
if i < num_retries - 1 {
|
|
|
|
println!(
|
|
|
|
"Iteration {}: Failed to configure the loop device {}: {}",
|
|
|
|
i,
|
|
|
|
loop_device_path,
|
|
|
|
std::io::Error::last_os_error()
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
panic!(
|
|
|
|
"Failed {} times trying to configure the loop device {}: {}",
|
|
|
|
num_retries,
|
|
|
|
loop_device_path,
|
|
|
|
std::io::Error::last_os_error()
|
|
|
|
);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for a bit before retrying
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
loop_device_path
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_block_topology() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
let test_disk_path = guest.tmp_dir.as_path().join("test.img");
|
2020-06-30 16:59:25 +00:00
|
|
|
|
2022-08-02 12:00:32 +00:00
|
|
|
let output = exec_host_command_output(
|
|
|
|
format!(
|
|
|
|
"qemu-img create -f raw {} 16M",
|
|
|
|
test_disk_path.to_str().unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
);
|
|
|
|
if !output.status.success() {
|
|
|
|
let stdout = String::from_utf8_lossy(&output.stdout);
|
|
|
|
let stderr = String::from_utf8_lossy(&output.stderr);
|
2022-12-14 11:41:15 +00:00
|
|
|
panic!("qemu-img command failed\nstdout\n{stdout}\nstderr\n{stderr}");
|
2022-08-02 12:00:32 +00:00
|
|
|
}
|
|
|
|
|
2022-08-09 16:06:38 +00:00
|
|
|
let loop_dev = create_loop_device(test_disk_path.to_str().unwrap(), 4096, 5);
|
2020-09-15 08:20:29 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--disk",
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
2022-08-09 16:06:38 +00:00
|
|
|
format!("path={}", &loop_dev).as_str(),
|
2022-01-06 22:24:38 +00:00
|
|
|
])
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-09-15 08:20:29 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-09-15 08:20:29 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// MIN-IO column
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk -t| grep vdc | awk '{print $3}'")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
4096
|
|
|
|
);
|
|
|
|
// PHY-SEC column
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk -t| grep vdc | awk '{print $5}'")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
4096
|
|
|
|
);
|
|
|
|
// LOG-SEC column
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk -t| grep vdc | awk '{print $6}'")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
4096
|
|
|
|
);
|
|
|
|
});
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
Command::new("losetup")
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["-d", &loop_dev])
|
2022-01-06 22:24:38 +00:00
|
|
|
.output()
|
|
|
|
.expect("loop device not found");
|
|
|
|
}
|
2020-07-13 05:05:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2022-02-10 16:12:08 +00:00
|
|
|
fn test_virtio_balloon_deflate_on_oom() {
|
2022-01-06 22:24:38 +00:00
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
//Let's start a 4G guest with balloon occupied 2G memory
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args(["--balloon", "size=2G,deflate_on_oom=on"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait for balloon memory's initialization and check its size.
|
|
|
|
// The virtio-balloon driver might take a few seconds to report the
|
|
|
|
// balloon effective size back to the VMM.
|
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let orig_balloon = balloon_size(&api_socket);
|
2022-12-14 11:41:15 +00:00
|
|
|
println!("The original balloon memory size is {orig_balloon} bytes");
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(orig_balloon == 2147483648);
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Two steps to verify if the 'deflate_on_oom' parameter works.
|
2022-07-25 09:28:29 +00:00
|
|
|
// 1st: run a command to trigger an OOM in the guest.
|
2022-01-06 22:24:38 +00:00
|
|
|
guest
|
2022-07-25 09:28:29 +00:00
|
|
|
.ssh_command("echo f | sudo tee /proc/sysrq-trigger")
|
2022-01-06 22:24:38 +00:00
|
|
|
.unwrap();
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-07-21 09:16:55 +00:00
|
|
|
// Give some time for the OOM to happen in the guest and be reported
|
|
|
|
// back to the host.
|
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// 2nd: check balloon_mem's value to verify balloon has been automatically deflated
|
|
|
|
let deflated_balloon = balloon_size(&api_socket);
|
2022-12-14 11:41:15 +00:00
|
|
|
println!("After deflating, balloon memory size is {deflated_balloon} bytes");
|
2022-01-06 22:24:38 +00:00
|
|
|
// Verify the balloon size deflated
|
|
|
|
assert!(deflated_balloon < 2147483648);
|
|
|
|
});
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-04-15 17:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-04-15 17:17:51 +00:00
|
|
|
|
2022-02-10 16:12:08 +00:00
|
|
|
#[test]
|
2023-03-29 23:44:08 +00:00
|
|
|
#[cfg(not(feature = "mshv"))]
|
2022-02-10 16:12:08 +00:00
|
|
|
fn test_virtio_balloon_free_page_reporting() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
//Let's start a 4G guest with balloon occupied 2G memory
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args(["--balloon", "size=0,free_page_reporting=on"])
|
2022-02-10 16:12:08 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let pid = child.id();
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Check the initial RSS is less than 1GiB
|
|
|
|
let rss = process_rss_kib(pid);
|
2022-12-14 11:41:15 +00:00
|
|
|
println!("RSS {rss} < 1048576");
|
2022-02-10 16:12:08 +00:00
|
|
|
assert!(rss < 1048576);
|
|
|
|
|
|
|
|
// Spawn a command inside the guest to consume 2GiB of RAM for 60
|
|
|
|
// seconds
|
|
|
|
let guest_ip = guest.network.guest_ip.clone();
|
|
|
|
thread::spawn(move || {
|
|
|
|
ssh_command_ip(
|
|
|
|
"stress --vm 1 --vm-bytes 2G --vm-keep --timeout 60",
|
|
|
|
&guest_ip,
|
|
|
|
DEFAULT_SSH_RETRIES,
|
|
|
|
DEFAULT_SSH_TIMEOUT,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
});
|
|
|
|
|
|
|
|
// Wait for 50 seconds to make sure the stress command is consuming
|
|
|
|
// the expected amount of memory.
|
|
|
|
thread::sleep(std::time::Duration::new(50, 0));
|
|
|
|
let rss = process_rss_kib(pid);
|
2022-12-14 11:41:15 +00:00
|
|
|
println!("RSS {rss} >= 2097152");
|
2022-02-10 16:12:08 +00:00
|
|
|
assert!(rss >= 2097152);
|
|
|
|
|
|
|
|
// Wait for an extra minute to make sure the stress command has
|
|
|
|
// completed and that the guest reported the free pages to the VMM
|
|
|
|
// through the virtio-balloon device. We expect the RSS to be under
|
|
|
|
// 2GiB.
|
|
|
|
thread::sleep(std::time::Duration::new(60, 0));
|
|
|
|
let rss = process_rss_kib(pid);
|
2022-12-14 11:41:15 +00:00
|
|
|
println!("RSS {rss} < 2097152");
|
2022-02-10 16:12:08 +00:00
|
|
|
assert!(rss < 2097152);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_pmem_hotplug() {
|
|
|
|
_test_pmem_hotplug(None)
|
|
|
|
}
|
2020-04-15 17:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_pmem_multi_segment_hotplug() {
|
|
|
|
_test_pmem_hotplug(Some(15))
|
|
|
|
}
|
2020-04-15 17:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn _test_pmem_hotplug(pci_segment: Option<u16>) {
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string();
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let focal_image = FOCAL_IMAGE_NAME.to_string();
|
|
|
|
let focal = UbuntuDiskConfig::new(focal_image);
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-04-15 17:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let kernel_path = edk2_path();
|
2020-04-15 17:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2021-06-24 03:07:06 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output();
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if pci_segment.is_some() {
|
2023-04-26 01:20:18 +00:00
|
|
|
cmd.args([
|
|
|
|
"--platform",
|
|
|
|
&format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"),
|
|
|
|
]);
|
2020-07-03 08:00:12 +00:00
|
|
|
}
|
2020-06-30 16:59:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = cmd.spawn().unwrap();
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check /dev/pmem0 is not there
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep -c pmem0 || true")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(1),
|
|
|
|
0
|
|
|
|
);
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let pmem_temp_file = TempFile::new().unwrap();
|
|
|
|
pmem_temp_file.as_file().set_len(128 << 20).unwrap();
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-pmem",
|
|
|
|
Some(&format!(
|
|
|
|
"file={},id=test0{}",
|
|
|
|
pmem_temp_file.as_path().to_str().unwrap(),
|
|
|
|
if let Some(pci_segment) = pci_segment {
|
2022-12-14 11:41:15 +00:00
|
|
|
format!(",pci_segment={pci_segment}")
|
2022-01-06 22:24:38 +00:00
|
|
|
} else {
|
|
|
|
"".to_owned()
|
|
|
|
}
|
|
|
|
)),
|
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
|
|
|
if let Some(pci_segment) = pci_segment {
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output).contains(&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}"
|
2022-01-06 22:24:38 +00:00
|
|
|
)));
|
|
|
|
} else {
|
2020-09-01 20:03:49 +00:00
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
|
|
|
.contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}"));
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check that /dev/pmem0 exists and the block size is 128M
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep pmem0 | grep -c 128M")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
2020-07-13 09:45:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.reboot_linux(0, None);
|
|
|
|
|
|
|
|
// Check still there after reboot
|
|
|
|
assert_eq!(
|
2021-06-03 22:53:07 +00:00
|
|
|
guest
|
2022-01-06 22:24:38 +00:00
|
|
|
.ssh_command("lsblk | grep pmem0 | grep -c 128M")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
2020-10-06 14:23:31 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(remote_command(&api_socket, "remove-device", Some("test0")));
|
2020-10-06 14:23:31 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
2020-10-06 14:23:31 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check device has gone away
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep -c pmem0.*128M || true")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(1),
|
|
|
|
0
|
|
|
|
);
|
2020-10-06 14:23:31 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.reboot_linux(1, None);
|
|
|
|
|
|
|
|
// Check still absent after reboot
|
|
|
|
assert_eq!(
|
2021-06-03 22:53:07 +00:00
|
|
|
guest
|
2022-01-06 22:24:38 +00:00
|
|
|
.ssh_command("lsblk | grep -c pmem0.*128M || true")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(1),
|
|
|
|
0
|
|
|
|
);
|
|
|
|
});
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_net_hotplug() {
|
|
|
|
_test_net_hotplug(None)
|
|
|
|
}
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_net_multi_segment_hotplug() {
|
|
|
|
_test_net_hotplug(Some(15))
|
|
|
|
}
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn _test_net_hotplug(pci_segment: Option<u16>) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
2020-04-15 17:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let kernel_path = edk2_path();
|
2020-04-15 17:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Boot without network
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.capture_output();
|
2020-04-15 17:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if pci_segment.is_some() {
|
2023-04-26 01:20:18 +00:00
|
|
|
cmd.args([
|
|
|
|
"--platform",
|
|
|
|
&format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"),
|
|
|
|
]);
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-12-16 17:18:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = cmd.spawn().unwrap();
|
2021-12-16 17:18:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
2021-12-16 17:18:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Add network
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-net",
|
|
|
|
Some(
|
2021-12-16 17:18:04 +00:00
|
|
|
format!(
|
2022-01-06 22:24:38 +00:00
|
|
|
"{}{},id=test0",
|
|
|
|
guest.default_net_string(),
|
|
|
|
if let Some(pci_segment) = pci_segment {
|
2022-12-14 11:41:15 +00:00
|
|
|
format!(",pci_segment={pci_segment}")
|
2022-01-06 22:24:38 +00:00
|
|
|
} else {
|
|
|
|
"".to_owned()
|
|
|
|
}
|
2021-12-16 17:18:04 +00:00
|
|
|
)
|
|
|
|
.as_str(),
|
2022-01-06 22:24:38 +00:00
|
|
|
),
|
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
2021-12-16 17:18:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if let Some(pci_segment) = pci_segment {
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output).contains(&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}"
|
2022-01-06 22:24:38 +00:00
|
|
|
)));
|
|
|
|
} else {
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
|
|
|
.contains("{\"id\":\"test0\",\"bdf\":\"0000:00:05.0\"}"));
|
|
|
|
}
|
2021-12-16 17:18:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
2021-12-16 17:18:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// 1 network interfaces + default localhost ==> 2 interfaces
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -o link | wc -l")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
2
|
|
|
|
);
|
2021-12-16 17:18:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Remove network
|
|
|
|
assert!(remote_command(&api_socket, "remove-device", Some("test0"),));
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
2021-12-16 17:18:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-net",
|
|
|
|
Some(
|
|
|
|
format!(
|
|
|
|
"{}{},id=test1",
|
|
|
|
guest.default_net_string(),
|
|
|
|
if let Some(pci_segment) = pci_segment {
|
2022-12-14 11:41:15 +00:00
|
|
|
format!(",pci_segment={pci_segment}")
|
2022-01-06 22:24:38 +00:00
|
|
|
} else {
|
|
|
|
"".to_owned()
|
|
|
|
}
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
),
|
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
2021-03-28 08:47:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if let Some(pci_segment) = pci_segment {
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output).contains(&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"{{\"id\":\"test1\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}"
|
2022-01-06 22:24:38 +00:00
|
|
|
)));
|
|
|
|
} else {
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
|
|
|
.contains("{\"id\":\"test1\",\"bdf\":\"0000:00:05.0\"}"));
|
|
|
|
}
|
2021-03-28 08:47:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
2021-03-28 08:47:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// 1 network interfaces + default localhost ==> 2 interfaces
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -o link | wc -l")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
2
|
|
|
|
);
|
2021-03-28 08:47:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.reboot_linux(0, None);
|
2021-03-28 08:47:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check still there after reboot
|
|
|
|
// 1 network interfaces + default localhost ==> 2 interfaces
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -o link | wc -l")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
2
|
|
|
|
);
|
|
|
|
});
|
2021-03-28 08:47:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-03-28 08:47:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-03-28 08:47:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_initramfs() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let mut workload_path = dirs::home_dir().unwrap();
|
|
|
|
workload_path.push("workloads");
|
2021-03-28 08:47:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let mut kernels = vec![direct_kernel_boot_path()];
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
2024-02-06 21:05:06 +00:00
|
|
|
let kernels = [direct_kernel_boot_path()];
|
2021-03-28 08:47:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
let mut pvh_kernel_path = workload_path.clone();
|
|
|
|
pvh_kernel_path.push("vmlinux");
|
|
|
|
kernels.push(pvh_kernel_path);
|
2021-03-28 08:47:26 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut initramfs_path = workload_path;
|
|
|
|
initramfs_path.push("alpine_initramfs.img");
|
2021-10-19 09:46:52 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let test_string = String::from("axz34i9rylotd8n50wbv6kcj7f2qushme1pg");
|
2022-12-14 11:41:15 +00:00
|
|
|
let cmdline = format!("console=hvc0 quiet TEST_STRING={test_string}");
|
2021-10-19 09:46:52 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
kernels.iter().for_each(|k_path| {
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--kernel", k_path.to_str().unwrap()])
|
|
|
|
.args(["--initramfs", initramfs_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", &cmdline])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-04-15 17:17:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-06-30 16:59:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
let s = String::from_utf8_lossy(&output.stdout);
|
2021-10-19 09:46:52 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_ne!(s.lines().position(|line| line == test_string), None);
|
|
|
|
});
|
2021-10-19 09:46:52 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
});
|
|
|
|
}
|
2021-10-19 09:46:52 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// One thing to note about this test. The virtio-net device is heavily used
|
|
|
|
// through each ssh command. There's no need to perform a dedicated test to
|
|
|
|
// verify the migration went well for virtio-net.
|
|
|
|
#[test]
|
2022-11-03 21:55:36 +00:00
|
|
|
#[cfg(not(feature = "mshv"))]
|
2022-11-03 18:44:39 +00:00
|
|
|
fn test_snapshot_restore_hotplug_virtiomem() {
|
|
|
|
_test_snapshot_restore(true);
|
|
|
|
}
|
|
|
|
|
2022-11-03 18:45:49 +00:00
|
|
|
#[test]
|
|
|
|
fn test_snapshot_restore_basic() {
|
|
|
|
_test_snapshot_restore(false);
|
|
|
|
}
|
|
|
|
|
2022-11-03 18:44:39 +00:00
|
|
|
fn _test_snapshot_restore(use_hotplug: bool) {
|
2022-01-06 22:24:38 +00:00
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-04-29 16:50:18 +00:00
|
|
|
let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir));
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let net_id = "net123";
|
|
|
|
let net_params = format!(
|
|
|
|
"id={},tap=,mac={},ip={},mask=255.255.255.0",
|
|
|
|
net_id, guest.network.guest_mac, guest.network.host_ip
|
|
|
|
);
|
2022-11-03 18:44:39 +00:00
|
|
|
let mut mem_params = "size=4G";
|
|
|
|
|
|
|
|
if use_hotplug {
|
|
|
|
mem_params = "size=4G,hotplug_method=virtio-mem,hotplug_size=32G"
|
|
|
|
}
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let cloudinit_params = format!(
|
|
|
|
"path={},iommu=on",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
);
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let socket = temp_vsock_path(&guest.tmp_dir);
|
2022-05-12 03:14:06 +00:00
|
|
|
let event_path = temp_event_monitor_path(&guest.tmp_dir);
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket_source])
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["--event-monitor", format!("path={event_path}").as_str()])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=4"])
|
2022-11-03 18:44:39 +00:00
|
|
|
.args(["--memory", mem_params])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--balloon", "size=0"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--disk",
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
cloudinit_params.as_str(),
|
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--net", net_params.as_str()])
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["--vsock", format!("cid=3,socket={socket}").as_str()])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let console_text = String::from("On a branch floating down river a cricket, singing.");
|
|
|
|
// Create the snapshot directory
|
|
|
|
let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir);
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-03-23 16:28:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the number of vCPUs
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4);
|
|
|
|
// Check the guest RAM
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
|
2022-11-03 18:44:39 +00:00
|
|
|
if use_hotplug {
|
|
|
|
// Increase guest RAM with virtio-mem
|
|
|
|
resize_command(
|
|
|
|
&api_socket_source,
|
|
|
|
None,
|
|
|
|
Some(6 << 30),
|
|
|
|
None,
|
|
|
|
Some(&event_path),
|
|
|
|
);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000);
|
|
|
|
// Use balloon to remove RAM from the VM
|
|
|
|
resize_command(
|
|
|
|
&api_socket_source,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
Some(1 << 30),
|
|
|
|
Some(&event_path),
|
|
|
|
);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
let total_memory = guest.get_total_memory().unwrap_or_default();
|
|
|
|
assert!(total_memory > 4_800_000);
|
|
|
|
assert!(total_memory < 5_760_000);
|
|
|
|
}
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the guest virtio-devices, e.g. block, rng, vsock, console, and net
|
2022-04-26 02:24:45 +00:00
|
|
|
guest.check_devices_common(Some(&socket), Some(&console_text), None);
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
// x86_64: We check that removing and adding back the virtio-net device
|
|
|
|
// does not break the snapshot/restore support for virtio-pci.
|
|
|
|
// This is an important thing to test as the hotplug will
|
|
|
|
// trigger a PCI BAR reprogramming, which is a good way of
|
|
|
|
// checking if the stored resources are correctly restored.
|
|
|
|
// Unplug the virtio-net device
|
|
|
|
// AArch64: Device hotplug is currently not supported, skipping here.
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
2022-04-29 16:50:18 +00:00
|
|
|
assert!(remote_command(
|
|
|
|
&api_socket_source,
|
|
|
|
"remove-device",
|
|
|
|
Some(net_id),
|
|
|
|
));
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2022-05-12 03:14:06 +00:00
|
|
|
let latest_events = [&MetaEvent {
|
|
|
|
event: "device-removed".to_string(),
|
|
|
|
device_id: Some(net_id.to_string()),
|
|
|
|
}];
|
2023-12-05 17:44:02 +00:00
|
|
|
// See: #5938
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
2022-05-12 03:14:06 +00:00
|
|
|
assert!(check_latest_events_exact(&latest_events, &event_path));
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Plug the virtio-net device again
|
|
|
|
assert!(remote_command(
|
2022-04-29 16:50:18 +00:00
|
|
|
&api_socket_source,
|
2022-01-06 22:24:38 +00:00
|
|
|
"add-net",
|
|
|
|
Some(net_params.as_str()),
|
|
|
|
));
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
}
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Pause the VM
|
2022-04-29 16:50:18 +00:00
|
|
|
assert!(remote_command(&api_socket_source, "pause", None));
|
2022-05-12 03:14:06 +00:00
|
|
|
let latest_events = [
|
|
|
|
&MetaEvent {
|
|
|
|
event: "pausing".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
&MetaEvent {
|
|
|
|
event: "paused".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
];
|
2023-12-05 17:44:02 +00:00
|
|
|
// See: #5938
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
2022-05-12 03:14:06 +00:00
|
|
|
assert!(check_latest_events_exact(&latest_events, &event_path));
|
2020-03-22 23:38:00 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Take a snapshot from the VM
|
|
|
|
assert!(remote_command(
|
2022-04-29 16:50:18 +00:00
|
|
|
&api_socket_source,
|
2022-01-06 22:24:38 +00:00
|
|
|
"snapshot",
|
2022-12-14 11:41:15 +00:00
|
|
|
Some(format!("file://{snapshot_dir}").as_str()),
|
2022-01-06 22:24:38 +00:00
|
|
|
));
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait to make sure the snapshot is completed
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2022-05-12 03:14:06 +00:00
|
|
|
|
|
|
|
let latest_events = [
|
|
|
|
&MetaEvent {
|
|
|
|
event: "snapshotting".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
&MetaEvent {
|
|
|
|
event: "snapshotted".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
];
|
2023-12-05 17:44:02 +00:00
|
|
|
// See: #5938
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
2022-05-12 03:14:06 +00:00
|
|
|
assert!(check_latest_events_exact(&latest_events, &event_path));
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Shutdown the source VM and check console output
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
2020-03-22 23:38:00 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text));
|
|
|
|
});
|
2021-10-19 09:46:52 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
2021-10-19 09:46:52 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Remove the vsock socket file.
|
|
|
|
Command::new("rm")
|
|
|
|
.arg("-f")
|
|
|
|
.arg(socket.as_str())
|
|
|
|
.output()
|
|
|
|
.unwrap();
|
2020-04-08 17:20:56 +00:00
|
|
|
|
2022-04-29 16:50:18 +00:00
|
|
|
let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir));
|
2022-05-12 03:14:06 +00:00
|
|
|
let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir));
|
2022-04-29 16:50:18 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Restore the VM from the snapshot
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket_restored])
|
|
|
|
.args([
|
2022-05-12 03:14:06 +00:00
|
|
|
"--event-monitor",
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("path={event_path_restored}").as_str(),
|
2022-05-12 03:14:06 +00:00
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--restore",
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("source_url=file://{snapshot_dir}").as_str(),
|
2022-01-06 22:24:38 +00:00
|
|
|
])
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-03-22 23:38:00 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait for the VM to be restored
|
2024-02-29 22:00:29 +00:00
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
2022-05-12 03:14:06 +00:00
|
|
|
let expected_events = [
|
|
|
|
&MetaEvent {
|
|
|
|
event: "starting".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
2022-12-01 09:32:56 +00:00
|
|
|
&MetaEvent {
|
|
|
|
event: "activated".to_string(),
|
|
|
|
device_id: Some("__console".to_string()),
|
|
|
|
},
|
|
|
|
&MetaEvent {
|
|
|
|
event: "activated".to_string(),
|
|
|
|
device_id: Some("__rng".to_string()),
|
|
|
|
},
|
2022-05-12 03:14:06 +00:00
|
|
|
&MetaEvent {
|
|
|
|
event: "restoring".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
];
|
2022-12-01 09:32:56 +00:00
|
|
|
assert!(check_sequential_events(
|
2022-05-12 03:14:06 +00:00
|
|
|
&expected_events,
|
|
|
|
&event_path_restored
|
|
|
|
));
|
|
|
|
let latest_events = [&MetaEvent {
|
|
|
|
event: "restored".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
}];
|
|
|
|
assert!(check_latest_events_exact(
|
|
|
|
&latest_events,
|
|
|
|
&event_path_restored
|
|
|
|
));
|
2020-03-22 23:38:00 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Resume the VM
|
2022-04-29 16:50:18 +00:00
|
|
|
assert!(remote_command(&api_socket_restored, "resume", None));
|
2023-11-09 19:26:57 +00:00
|
|
|
// There is no way that we can ensure the 'write()' to the
|
|
|
|
// event file is completed when the 'resume' request is
|
|
|
|
// returned successfully, because the 'write()' was done
|
|
|
|
// asynchronously from a different thread of Cloud
|
|
|
|
// Hypervisor (e.g. the event-monitor thread).
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
2022-05-12 03:14:06 +00:00
|
|
|
let latest_events = [
|
|
|
|
&MetaEvent {
|
|
|
|
event: "resuming".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
&MetaEvent {
|
|
|
|
event: "resumed".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
];
|
|
|
|
assert!(check_latest_events_exact(
|
|
|
|
&latest_events,
|
|
|
|
&event_path_restored
|
|
|
|
));
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
// Perform same checks to validate VM has been properly restored
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4);
|
|
|
|
let total_memory = guest.get_total_memory().unwrap_or_default();
|
2022-11-03 18:44:39 +00:00
|
|
|
if !use_hotplug {
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
|
|
|
|
} else {
|
|
|
|
assert!(total_memory > 4_800_000);
|
|
|
|
assert!(total_memory < 5_760_000);
|
|
|
|
// Deflate balloon to restore entire RAM to the VM
|
|
|
|
resize_command(&api_socket_restored, None, None, Some(0), None);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000);
|
|
|
|
// Decrease guest RAM with virtio-mem
|
|
|
|
resize_command(&api_socket_restored, None, Some(5 << 30), None, None);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
let total_memory = guest.get_total_memory().unwrap_or_default();
|
|
|
|
assert!(total_memory > 4_800_000);
|
|
|
|
assert!(total_memory < 5_760_000);
|
|
|
|
}
|
2021-10-19 09:46:52 +00:00
|
|
|
|
2022-04-26 02:24:45 +00:00
|
|
|
guest.check_devices_common(Some(&socket), Some(&console_text), None);
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
|
|
|
// Shutdown the target VM and check console output
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
2021-10-19 09:46:52 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text));
|
|
|
|
});
|
2021-10-19 09:46:52 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-03-22 23:38:00 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_counters() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2020-03-22 23:38:00 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--net", guest.default_net_string().as_str()])
|
|
|
|
.args(["--api-socket", &api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output();
|
2021-09-08 14:36:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = cmd.spawn().unwrap();
|
2020-04-22 13:27:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-10-06 10:13:42 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let orig_counters = get_counters(&api_socket);
|
|
|
|
guest
|
|
|
|
.ssh_command("dd if=/dev/zero of=test count=8 bs=1M")
|
|
|
|
.unwrap();
|
2020-10-06 10:13:42 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let new_counters = get_counters(&api_socket);
|
2020-10-06 10:13:42 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check that all the counters have increased
|
|
|
|
assert!(new_counters > orig_counters);
|
|
|
|
});
|
2021-10-19 09:46:52 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-10-06 10:13:42 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-10-06 10:13:42 +00:00
|
|
|
|
2022-05-20 08:33:37 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(feature = "guest_debug")]
|
|
|
|
fn test_coredump() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--cpus", "boot=4"])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
|
2022-05-20 08:33:37 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--net", guest.default_net_string().as_str()])
|
|
|
|
.args(["--api-socket", &api_socket])
|
2022-05-20 08:33:37 +00:00
|
|
|
.capture_output();
|
|
|
|
|
|
|
|
let mut child = cmd.spawn().unwrap();
|
|
|
|
let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir);
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
assert!(remote_command(&api_socket, "pause", None));
|
|
|
|
|
|
|
|
assert!(remote_command(
|
|
|
|
&api_socket,
|
|
|
|
"coredump",
|
2022-12-14 11:41:15 +00:00
|
|
|
Some(format!("file://{vmcore_file}").as_str()),
|
2022-05-20 08:33:37 +00:00
|
|
|
));
|
|
|
|
|
|
|
|
// the num of CORE notes should equals to vcpu
|
2022-12-14 11:41:15 +00:00
|
|
|
let readelf_core_num_cmd =
|
|
|
|
format!("readelf --all {vmcore_file} |grep CORE |grep -v Type |wc -l");
|
2022-05-20 08:33:37 +00:00
|
|
|
let core_num_in_elf = exec_host_command_output(&readelf_core_num_cmd);
|
|
|
|
assert_eq!(String::from_utf8_lossy(&core_num_in_elf.stdout).trim(), "4");
|
|
|
|
|
|
|
|
// the num of QEMU notes should equals to vcpu
|
2022-12-14 11:41:15 +00:00
|
|
|
let readelf_vmm_num_cmd = format!("readelf --all {vmcore_file} |grep QEMU |wc -l");
|
2022-05-20 08:33:37 +00:00
|
|
|
let vmm_num_in_elf = exec_host_command_output(&readelf_vmm_num_cmd);
|
|
|
|
assert_eq!(String::from_utf8_lossy(&vmm_num_in_elf.stdout).trim(), "4");
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2023-07-17 13:57:59 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(feature = "guest_debug")]
|
|
|
|
fn test_coredump_no_pause() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
|
|
|
cmd.args(["--cpus", "boot=4"])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
|
|
|
|
.default_disks()
|
|
|
|
.args(["--net", guest.default_net_string().as_str()])
|
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.capture_output();
|
|
|
|
|
|
|
|
let mut child = cmd.spawn().unwrap();
|
|
|
|
let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir);
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
assert!(remote_command(
|
|
|
|
&api_socket,
|
|
|
|
"coredump",
|
|
|
|
Some(format!("file://{vmcore_file}").as_str()),
|
|
|
|
));
|
|
|
|
|
|
|
|
assert_eq!(vm_state(&api_socket), "Running");
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_watchdog() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2020-04-22 13:27:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2023-11-24 15:15:35 +00:00
|
|
|
let event_path = temp_event_monitor_path(&guest.tmp_dir);
|
2020-04-22 13:27:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
2022-09-20 08:46:19 +00:00
|
|
|
cmd.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--net", guest.default_net_string().as_str()])
|
|
|
|
.args(["--watchdog"])
|
|
|
|
.args(["--api-socket", &api_socket])
|
2023-11-24 15:15:35 +00:00
|
|
|
.args(["--event-monitor", format!("path={event_path}").as_str()])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output();
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = cmd.spawn().unwrap();
|
2020-09-01 20:03:49 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2022-09-06 22:44:13 +00:00
|
|
|
|
|
|
|
let mut expected_reboot_count = 1;
|
|
|
|
|
|
|
|
// Enable the watchdog with a 15s timeout
|
|
|
|
enable_guest_watchdog(&guest, 15);
|
|
|
|
|
|
|
|
// Reboot and check that systemd has activated the watchdog
|
|
|
|
guest.ssh_command("sudo reboot").unwrap();
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
expected_reboot_count += 1;
|
|
|
|
assert_eq!(get_reboot_count(&guest), expected_reboot_count);
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
2
|
|
|
|
);
|
|
|
|
|
|
|
|
// Allow some normal time to elapse to check we don't get spurious reboots
|
|
|
|
thread::sleep(std::time::Duration::new(40, 0));
|
|
|
|
// Check no reboot
|
|
|
|
assert_eq!(get_reboot_count(&guest), expected_reboot_count);
|
|
|
|
|
|
|
|
// Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns.
|
|
|
|
guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap();
|
|
|
|
// Allow some time for the watchdog to trigger (max 30s) and reboot to happen
|
|
|
|
guest.wait_vm_boot(Some(50)).unwrap();
|
2023-08-31 13:00:19 +00:00
|
|
|
// Check a reboot is triggered by the watchdog
|
2022-09-06 22:44:13 +00:00
|
|
|
expected_reboot_count += 1;
|
|
|
|
assert_eq!(get_reboot_count(&guest), expected_reboot_count);
|
|
|
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
// Now pause the VM and remain offline for 30s
|
|
|
|
assert!(remote_command(&api_socket, "pause", None));
|
2023-11-24 15:15:35 +00:00
|
|
|
let latest_events = [
|
|
|
|
&MetaEvent {
|
|
|
|
event: "pausing".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
&MetaEvent {
|
|
|
|
event: "paused".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
},
|
|
|
|
];
|
|
|
|
assert!(check_latest_events_exact(&latest_events, &event_path));
|
2022-09-06 22:44:13 +00:00
|
|
|
assert!(remote_command(&api_socket, "resume", None));
|
|
|
|
|
|
|
|
// Check no reboot
|
|
|
|
assert_eq!(get_reboot_count(&guest), expected_reboot_count);
|
|
|
|
}
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
2020-08-10 11:41:16 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-04-22 13:27:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2023-06-15 13:00:57 +00:00
|
|
|
#[test]
|
|
|
|
fn test_pvpanic() {
|
|
|
|
let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(jammy));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
let event_path = temp_event_monitor_path(&guest.tmp_dir);
|
|
|
|
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
|
|
|
cmd.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.default_disks()
|
|
|
|
.args(["--net", guest.default_net_string().as_str()])
|
|
|
|
.args(["--pvpanic"])
|
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--event-monitor", format!("path={event_path}").as_str()])
|
|
|
|
.capture_output();
|
|
|
|
|
|
|
|
let mut child = cmd.spawn().unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Trigger guest a panic
|
|
|
|
make_guest_panic(&guest);
|
|
|
|
|
|
|
|
// Wait a while for guest
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
|
|
|
|
let expected_sequential_events = [&MetaEvent {
|
|
|
|
event: "panic".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
}];
|
|
|
|
assert!(check_latest_events_exact(
|
|
|
|
&expected_sequential_events,
|
|
|
|
&event_path
|
|
|
|
));
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_tap_from_fd() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Create a TAP interface with multi-queue enabled
|
|
|
|
let num_queue_pairs: usize = 2;
|
|
|
|
|
|
|
|
use std::str::FromStr;
|
|
|
|
let taps = net_util::open_tap(
|
|
|
|
Some("chtap0"),
|
|
|
|
Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()),
|
|
|
|
None,
|
|
|
|
&mut None,
|
2022-09-21 09:56:24 +00:00
|
|
|
None,
|
2022-01-06 22:24:38 +00:00
|
|
|
num_queue_pairs,
|
|
|
|
Some(libc::O_RDWR | libc::O_NONBLOCK),
|
|
|
|
)
|
|
|
|
.unwrap();
|
2020-04-22 13:27:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["--cpus", &format!("boot={num_queue_pairs}")])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--net",
|
|
|
|
&format!(
|
|
|
|
"fd=[{},{}],mac={},num_queues={}",
|
|
|
|
taps[0].as_raw_fd(),
|
|
|
|
taps[1].as_raw_fd(),
|
|
|
|
guest.network.guest_mac,
|
|
|
|
num_queue_pairs * 2
|
|
|
|
),
|
|
|
|
])
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-04-22 13:27:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-06-22 15:17:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -o link | wc -l")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
2
|
|
|
|
);
|
2020-04-22 13:27:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.reboot_linux(0, None);
|
2020-04-22 13:27:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -o link | wc -l")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
2
|
|
|
|
);
|
|
|
|
});
|
2020-06-24 13:47:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-06-24 13:47:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-06-24 13:47:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// By design, a guest VM won't be able to connect to the host
|
|
|
|
// machine when using a macvtap network interface (while it can
|
|
|
|
// communicate externally). As a workaround, this integration
|
|
|
|
// test creates two macvtap interfaces in 'bridge' mode on the
|
|
|
|
// same physical net interface, one for the guest and one for
|
|
|
|
// the host. With additional setup on the IP address and the
|
|
|
|
// routing table, it enables the communications between the
|
|
|
|
// guest VM and the host machine.
|
|
|
|
// Details: https://wiki.libvirt.org/page/TroubleshootMacvtapHostFail
|
|
|
|
fn _test_macvtap(hotplug: bool, guest_macvtap_name: &str, host_macvtap_name: &str) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2020-08-10 11:41:16 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
let kernel_path = edk2_path();
|
2020-08-10 11:41:16 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let phy_net = "eth0";
|
2020-06-24 13:47:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Create a macvtap interface for the guest VM to use
|
|
|
|
assert!(exec_host_command_status(&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"sudo ip link add link {phy_net} name {guest_macvtap_name} type macvtap mod bridge"
|
2022-01-06 22:24:38 +00:00
|
|
|
))
|
|
|
|
.success());
|
|
|
|
assert!(exec_host_command_status(&format!(
|
|
|
|
"sudo ip link set {} address {} up",
|
|
|
|
guest_macvtap_name, guest.network.guest_mac
|
|
|
|
))
|
|
|
|
.success());
|
|
|
|
assert!(
|
2022-12-14 11:41:15 +00:00
|
|
|
exec_host_command_status(&format!("sudo ip link show {guest_macvtap_name}")).success()
|
2022-01-06 22:24:38 +00:00
|
|
|
);
|
2021-09-28 12:54:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let tap_index =
|
2022-12-14 11:41:15 +00:00
|
|
|
fs::read_to_string(format!("/sys/class/net/{guest_macvtap_name}/ifindex")).unwrap();
|
2022-01-06 22:24:38 +00:00
|
|
|
let tap_device = format!("/dev/tap{}", tap_index.trim());
|
2020-06-24 13:47:14 +00:00
|
|
|
|
2022-12-14 11:41:15 +00:00
|
|
|
assert!(exec_host_command_status(&format!("sudo chown $UID.$UID {tap_device}")).success());
|
2020-08-10 11:41:16 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let cstr_tap_device = std::ffi::CString::new(tap_device).unwrap();
|
2022-01-21 09:10:18 +00:00
|
|
|
let tap_fd1 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) };
|
|
|
|
assert!(tap_fd1 > 0);
|
|
|
|
let tap_fd2 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) };
|
|
|
|
assert!(tap_fd2 > 0);
|
2020-06-24 13:47:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Create a macvtap on the same physical net interface for
|
|
|
|
// the host machine to use
|
|
|
|
assert!(exec_host_command_status(&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"sudo ip link add link {phy_net} name {host_macvtap_name} type macvtap mod bridge"
|
2022-01-06 22:24:38 +00:00
|
|
|
))
|
|
|
|
.success());
|
|
|
|
// Use default mask "255.255.255.0"
|
|
|
|
assert!(exec_host_command_status(&format!(
|
|
|
|
"sudo ip address add {}/24 dev {}",
|
|
|
|
guest.network.host_ip, host_macvtap_name
|
|
|
|
))
|
|
|
|
.success());
|
2022-12-14 11:41:15 +00:00
|
|
|
assert!(
|
|
|
|
exec_host_command_status(&format!("sudo ip link set dev {host_macvtap_name} up"))
|
|
|
|
.success()
|
|
|
|
);
|
2020-06-24 13:47:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut guest_command = GuestCommand::new(&guest);
|
|
|
|
guest_command
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=2"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket]);
|
2020-08-17 15:28:44 +00:00
|
|
|
|
2022-01-21 09:10:18 +00:00
|
|
|
let net_params = format!(
|
|
|
|
"fd=[{},{}],mac={},num_queues=4",
|
|
|
|
tap_fd1, tap_fd2, guest.network.guest_mac
|
|
|
|
);
|
2020-06-24 13:47:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if !hotplug {
|
2022-09-20 08:46:19 +00:00
|
|
|
guest_command.args(["--net", &net_params]);
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-10-26 23:46:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = guest_command.capture_output().spawn().unwrap();
|
2020-06-24 13:47:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
if hotplug {
|
|
|
|
// Give some time to the VMM process to listen to the API
|
|
|
|
// socket. This is the only requirement to avoid the following
|
|
|
|
// call to ch-remote from failing.
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
// Hotplug the virtio-net device
|
|
|
|
let (cmd_success, cmd_output) =
|
|
|
|
remote_command_w_output(&api_socket, "add-net", Some(&net_params));
|
|
|
|
assert!(cmd_success);
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
|
|
|
.contains("{\"id\":\"_net2\",\"bdf\":\"0000:00:05.0\"}"));
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
|
|
|
.contains("{\"id\":\"_net0\",\"bdf\":\"0000:00:05.0\"}"));
|
|
|
|
}
|
2020-06-24 13:47:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// The functional connectivity provided by the virtio-net device
|
|
|
|
// gets tested through wait_vm_boot() as it expects to receive a
|
|
|
|
// HTTP request, and through the SSH command as well.
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-08-10 09:51:36 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -o link | wc -l")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
2
|
|
|
|
);
|
2023-04-15 01:40:45 +00:00
|
|
|
|
|
|
|
guest.reboot_linux(0, None);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -o link | wc -l")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
2
|
|
|
|
);
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
2020-08-10 09:51:36 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-12-14 11:41:15 +00:00
|
|
|
exec_host_command_status(&format!("sudo ip link del {guest_macvtap_name}"));
|
|
|
|
exec_host_command_status(&format!("sudo ip link del {host_macvtap_name}"));
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2023-05-18 12:07:26 +00:00
|
|
|
#[cfg_attr(target_arch = "aarch64", ignore = "See #5443")]
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_macvtap() {
|
|
|
|
_test_macvtap(false, "guestmacvtap0", "hostmacvtap0")
|
|
|
|
}
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2023-05-18 12:07:26 +00:00
|
|
|
#[cfg_attr(target_arch = "aarch64", ignore = "See #5443")]
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_macvtap_hotplug() {
|
|
|
|
_test_macvtap(true, "guestmacvtap1", "hostmacvtap1")
|
|
|
|
}
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_ovs_dpdk() {
|
|
|
|
let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest1 = Guest::new(Box::new(focal1));
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest2 = Guest::new(Box::new(focal2));
|
2022-04-29 16:50:18 +00:00
|
|
|
let api_socket_source = format!("{}.1", temp_api_path(&guest2.tmp_dir));
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-04-29 16:50:18 +00:00
|
|
|
let (mut child1, mut child2) =
|
|
|
|
setup_ovs_dpdk_guests(&guest1, &guest2, &api_socket_source, false);
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Create the snapshot directory
|
|
|
|
let snapshot_dir = temp_snapshot_dir_path(&guest2.tmp_dir);
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Remove one of the two ports from the OVS bridge
|
|
|
|
assert!(exec_host_command_status("ovs-vsctl del-port vhost-user1").success());
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Spawn a new netcat listener in the first VM
|
|
|
|
let guest_ip = guest1.network.guest_ip.clone();
|
|
|
|
thread::spawn(move || {
|
|
|
|
ssh_command_ip(
|
|
|
|
"nc -l 12345",
|
|
|
|
&guest_ip,
|
|
|
|
DEFAULT_SSH_RETRIES,
|
|
|
|
DEFAULT_SSH_TIMEOUT,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
});
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait for the server to be listening
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the connection fails this time
|
|
|
|
assert!(guest2.ssh_command("nc -vz 172.100.0.1 12345").is_err());
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Add the OVS port back
|
|
|
|
assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success());
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// And finally check the connection is functional again
|
|
|
|
guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap();
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Pause the VM
|
2022-04-29 16:50:18 +00:00
|
|
|
assert!(remote_command(&api_socket_source, "pause", None));
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Take a snapshot from the VM
|
|
|
|
assert!(remote_command(
|
2022-04-29 16:50:18 +00:00
|
|
|
&api_socket_source,
|
2022-01-06 22:24:38 +00:00
|
|
|
"snapshot",
|
2022-12-14 11:41:15 +00:00
|
|
|
Some(format!("file://{snapshot_dir}").as_str()),
|
2022-01-06 22:24:38 +00:00
|
|
|
));
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait to make sure the snapshot is completed
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
});
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Shutdown the source VM
|
|
|
|
let _ = child2.kill();
|
|
|
|
let output = child2.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
2020-09-25 09:46:50 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Remove the vhost-user socket file.
|
|
|
|
Command::new("rm")
|
|
|
|
.arg("-f")
|
|
|
|
.arg("/tmp/dpdkvhostclient2")
|
|
|
|
.output()
|
|
|
|
.unwrap();
|
2020-12-16 17:34:07 +00:00
|
|
|
|
2022-04-29 16:50:18 +00:00
|
|
|
let api_socket_restored = format!("{}.2", temp_api_path(&guest2.tmp_dir));
|
2022-01-06 22:24:38 +00:00
|
|
|
// Restore the VM from the snapshot
|
|
|
|
let mut child2 = GuestCommand::new(&guest2)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket_restored])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--restore",
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("source_url=file://{snapshot_dir}").as_str(),
|
2022-01-06 22:24:38 +00:00
|
|
|
])
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-12-16 17:34:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait for the VM to be restored
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2021-01-27 05:13:58 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Resume the VM
|
2022-04-29 16:50:18 +00:00
|
|
|
assert!(remote_command(&api_socket_restored, "resume", None));
|
2020-12-16 17:34:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Spawn a new netcat listener in the first VM
|
|
|
|
let guest_ip = guest1.network.guest_ip.clone();
|
|
|
|
thread::spawn(move || {
|
|
|
|
ssh_command_ip(
|
|
|
|
"nc -l 12345",
|
|
|
|
&guest_ip,
|
|
|
|
DEFAULT_SSH_RETRIES,
|
|
|
|
DEFAULT_SSH_TIMEOUT,
|
|
|
|
)
|
2020-12-16 17:34:07 +00:00
|
|
|
.unwrap();
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
2020-12-16 17:34:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait for the server to be listening
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
2020-12-16 17:34:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// And check the connection is still functional after restore
|
|
|
|
guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap();
|
|
|
|
});
|
2021-04-28 09:30:52 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child1.kill();
|
|
|
|
let _ = child2.kill();
|
2020-12-16 17:34:07 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let output = child1.wait_with_output().unwrap();
|
|
|
|
child2.wait().unwrap();
|
2020-12-16 17:34:07 +00:00
|
|
|
|
2022-08-31 18:33:06 +00:00
|
|
|
cleanup_ovs_dpdk();
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-02-03 01:57:30 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn setup_spdk_nvme(nvme_dir: &std::path::Path) {
|
|
|
|
cleanup_spdk_nvme();
|
2021-09-08 14:36:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(exec_host_command_status(&format!(
|
|
|
|
"mkdir -p {}",
|
|
|
|
nvme_dir.join("nvme-vfio-user").to_str().unwrap()
|
|
|
|
))
|
|
|
|
.success());
|
|
|
|
assert!(exec_host_command_status(&format!(
|
|
|
|
"truncate {} -s 128M",
|
|
|
|
nvme_dir.join("test-disk.raw").to_str().unwrap()
|
|
|
|
))
|
|
|
|
.success());
|
|
|
|
assert!(exec_host_command_status(&format!(
|
|
|
|
"mkfs.ext4 {}",
|
|
|
|
nvme_dir.join("test-disk.raw").to_str().unwrap()
|
|
|
|
))
|
|
|
|
.success());
|
2021-09-08 14:36:51 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Start the SPDK nvmf_tgt daemon to present NVMe device as a VFIO user device
|
|
|
|
Command::new("/usr/local/bin/spdk-nvme/nvmf_tgt")
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["-i", "0", "-m", "0x1"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
thread::sleep(std::time::Duration::new(2, 0));
|
2021-02-03 01:57:30 +00:00
|
|
|
|
2023-12-13 19:25:39 +00:00
|
|
|
assert!(exec_host_command_with_retries(
|
|
|
|
"/usr/local/bin/spdk-nvme/rpc.py nvmf_create_transport -t VFIOUSER",
|
|
|
|
3,
|
|
|
|
std::time::Duration::new(5, 0),
|
|
|
|
));
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(exec_host_command_status(&format!(
|
|
|
|
"/usr/local/bin/spdk-nvme/rpc.py bdev_aio_create {} test 512",
|
|
|
|
nvme_dir.join("test-disk.raw").to_str().unwrap()
|
|
|
|
))
|
|
|
|
.success());
|
|
|
|
assert!(exec_host_command_status(
|
|
|
|
"/usr/local/bin/spdk-nvme/rpc.py nvmf_create_subsystem nqn.2019-07.io.spdk:cnode -a -s test"
|
|
|
|
)
|
2021-07-21 08:18:01 +00:00
|
|
|
.success());
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(exec_host_command_status(
|
|
|
|
"/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_ns nqn.2019-07.io.spdk:cnode test"
|
|
|
|
)
|
|
|
|
.success());
|
|
|
|
assert!(exec_host_command_status(&format!(
|
|
|
|
"/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_listener nqn.2019-07.io.spdk:cnode -t VFIOUSER -a {} -s 0",
|
|
|
|
nvme_dir.join("nvme-vfio-user").to_str().unwrap()
|
2021-07-21 08:18:01 +00:00
|
|
|
))
|
|
|
|
.success());
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn cleanup_spdk_nvme() {
|
|
|
|
exec_host_command_status("pkill -f nvmf_tgt");
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_vfio_user() {
|
2022-03-28 21:17:43 +00:00
|
|
|
let jammy_image = JAMMY_IMAGE_NAME.to_string();
|
|
|
|
let jammy = UbuntuDiskConfig::new(jammy_image);
|
|
|
|
let guest = Guest::new(Box::new(jammy));
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
let spdk_nvme_dir = guest.tmp_dir.as_path().join("test-vfio-user");
|
|
|
|
setup_spdk_nvme(spdk_nvme_dir.as_path());
|
|
|
|
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M,shared=on,hugepages=on"])
|
|
|
|
.args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
|
|
|
|
.args(["--serial", "tty", "--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Hotplug the SPDK-NVMe device to the VM
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-user-device",
|
|
|
|
Some(&format!(
|
|
|
|
"socket={},id=vfio_user0",
|
|
|
|
spdk_nvme_dir
|
|
|
|
.as_path()
|
|
|
|
.join("nvme-vfio-user/cntrl")
|
|
|
|
.to_str()
|
|
|
|
.unwrap(),
|
|
|
|
)),
|
2021-07-21 08:18:01 +00:00
|
|
|
);
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(cmd_success);
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
2022-03-18 04:06:41 +00:00
|
|
|
.contains("{\"id\":\"vfio_user0\",\"bdf\":\"0000:00:05.0\"}"));
|
2021-02-03 01:57:30 +00:00
|
|
|
|
2022-03-28 16:28:31 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2021-02-03 01:57:30 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check both if /dev/nvme exists and if the block size is 128M.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep nvme0n1 | grep -c 128M")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
2021-07-21 08:18:01 +00:00
|
|
|
);
|
2021-02-03 01:57:30 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check changes persist after reboot
|
|
|
|
assert_eq!(
|
|
|
|
guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(),
|
|
|
|
""
|
|
|
|
);
|
|
|
|
assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n");
|
|
|
|
guest
|
|
|
|
.ssh_command("echo test123 | sudo tee /mnt/test")
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), "");
|
|
|
|
assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "");
|
2021-02-03 01:57:30 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.reboot_linux(0, None);
|
|
|
|
assert_eq!(
|
|
|
|
guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(),
|
|
|
|
""
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
guest.ssh_command("sudo cat /mnt/test").unwrap().trim(),
|
|
|
|
"test123"
|
|
|
|
);
|
|
|
|
});
|
2021-02-03 01:57:30 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
cleanup_spdk_nvme();
|
2021-07-21 09:55:16 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
2022-03-15 17:02:11 +00:00
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_vdpa_block() {
|
|
|
|
// Before trying to run the test, verify the vdpa_sim_blk module is correctly loaded.
|
2023-11-24 16:43:02 +00:00
|
|
|
assert!(exec_host_command_status("lsmod | grep vdpa_sim_blk").success());
|
2022-03-15 17:02:11 +00:00
|
|
|
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=2"])
|
|
|
|
.args(["--memory", "size=512M,hugepages=on"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-03-15 17:02:11 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--vdpa", "path=/dev/vhost-vdpa-0,num_queues=1"])
|
|
|
|
.args(["--platform", "num_pci_segments=2,iommu_segments=1"])
|
|
|
|
.args(["--api-socket", &api_socket])
|
2022-03-15 17:02:11 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Check both if /dev/vdc exists and if the block size is 128M.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep vdc | grep -c 128M")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
|
|
|
|
|
|
|
// Check the content of the block device after we wrote to it.
|
|
|
|
// The vpda-sim-blk should let us read what we previously wrote.
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo bash -c 'echo foobar > /dev/vdc'")
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
guest.ssh_command("sudo head -1 /dev/vdc").unwrap().trim(),
|
|
|
|
"foobar"
|
|
|
|
);
|
|
|
|
|
2022-04-04 14:47:04 +00:00
|
|
|
// Hotplug an extra vDPA block device behind the vIOMMU
|
2022-03-15 17:02:11 +00:00
|
|
|
// Add a new vDPA device to the VM
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-vdpa",
|
2022-04-04 14:47:04 +00:00
|
|
|
Some("id=myvdpa0,path=/dev/vhost-vdpa-1,num_queues=1,pci_segment=1,iommu=on"),
|
2022-03-15 17:02:11 +00:00
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
2022-04-04 14:47:04 +00:00
|
|
|
.contains("{\"id\":\"myvdpa0\",\"bdf\":\"0001:00:01.0\"}"));
|
2022-03-15 17:02:11 +00:00
|
|
|
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
|
2022-04-04 14:47:04 +00:00
|
|
|
// Check IOMMU setup
|
|
|
|
assert!(guest
|
|
|
|
.does_device_vendor_pair_match("0x1057", "0x1af4")
|
|
|
|
.unwrap_or_default());
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ls /sys/kernel/iommu_groups/0/devices")
|
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
"0001:00:01.0"
|
|
|
|
);
|
|
|
|
|
2022-03-15 17:02:11 +00:00
|
|
|
// Check both if /dev/vdd exists and if the block size is 128M.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep vdd | grep -c 128M")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
1
|
|
|
|
);
|
|
|
|
|
|
|
|
// Write some content to the block device we've just plugged.
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo bash -c 'echo foobar > /dev/vdd'")
|
|
|
|
.unwrap();
|
|
|
|
|
2022-03-28 07:57:39 +00:00
|
|
|
// Check we can read the content back.
|
|
|
|
assert_eq!(
|
|
|
|
guest.ssh_command("sudo head -1 /dev/vdd").unwrap().trim(),
|
|
|
|
"foobar"
|
|
|
|
);
|
|
|
|
|
2022-03-15 17:02:11 +00:00
|
|
|
// Unplug the device
|
|
|
|
let cmd_success = remote_command(&api_socket, "remove-device", Some("myvdpa0"));
|
|
|
|
assert!(cmd_success);
|
2022-03-28 07:57:39 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2022-03-15 17:02:11 +00:00
|
|
|
|
|
|
|
// Check /dev/vdd doesn't exist anymore
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("lsblk | grep -c vdd || true")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(1),
|
|
|
|
0
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
2023-09-09 06:57:16 +00:00
|
|
|
#[ignore = "See #5756"]
|
2022-03-15 17:02:11 +00:00
|
|
|
fn test_vdpa_net() {
|
|
|
|
// Before trying to run the test, verify the vdpa_sim_net module is correctly loaded.
|
|
|
|
if !exec_host_command_status("lsmod | grep vdpa_sim_net").success() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=2"])
|
|
|
|
.args(["--memory", "size=512M,hugepages=on"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-03-15 17:02:11 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--vdpa", "path=/dev/vhost-vdpa-2,num_queues=2"])
|
2022-03-15 17:02:11 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Check we can find network interface related to vDPA device
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -o link | grep -c ens6")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(0),
|
|
|
|
1
|
|
|
|
);
|
|
|
|
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo ip addr add 172.16.1.2/24 dev ens6")
|
|
|
|
.unwrap();
|
|
|
|
guest.ssh_command("sudo ip link set up dev ens6").unwrap();
|
|
|
|
|
|
|
|
// Check there is no packet yet on both TX/RX of the network interface
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 0'")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(0),
|
|
|
|
2
|
|
|
|
);
|
|
|
|
|
|
|
|
// Send 6 packets with ping command
|
|
|
|
guest.ssh_command("ping 172.16.1.10 -c 6 || true").unwrap();
|
|
|
|
|
|
|
|
// Check we can find 6 packets on both TX/RX of the network interface
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 6'")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or(0),
|
|
|
|
2
|
|
|
|
);
|
|
|
|
|
|
|
|
// No need to check for hotplug as we already tested it through
|
|
|
|
// test_vdpa_block()
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
2022-01-06 22:24:38 +00:00
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-07-21 09:55:16 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2023-01-12 21:06:43 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_tpm() {
|
|
|
|
let focal = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
let (mut swtpm_command, swtpm_socket_path) = prepare_swtpm_daemon(&guest.tmp_dir);
|
|
|
|
|
|
|
|
let mut guest_cmd = GuestCommand::new(&guest);
|
|
|
|
guest_cmd
|
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
|
|
|
|
.args(["--tpm", &format!("socket={swtpm_socket_path}")])
|
|
|
|
.capture_output()
|
|
|
|
.default_disks()
|
|
|
|
.default_net();
|
|
|
|
|
|
|
|
// Start swtpm daemon
|
|
|
|
let mut swtpm_child = swtpm_command.spawn().unwrap();
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
let mut child = guest_cmd.spawn().unwrap();
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
guest.ssh_command("ls /dev/tpm0").unwrap().trim(),
|
|
|
|
"/dev/tpm0"
|
|
|
|
);
|
|
|
|
guest.ssh_command("sudo tpm2_selftest -f").unwrap();
|
|
|
|
guest
|
|
|
|
.ssh_command("echo 'hello' > /tmp/checksum_test; ")
|
|
|
|
.unwrap();
|
|
|
|
guest.ssh_command("cmp <(sudo tpm2_pcrevent /tmp/checksum_test | grep sha256 | awk '{print $2}') <(sha256sum /tmp/checksum_test| awk '{print $1}')").unwrap();
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = swtpm_child.kill();
|
|
|
|
let _d_out = swtpm_child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2023-10-31 09:51:13 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_double_tty() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
let tty_str: &str = "console=hvc0 earlyprintk=ttyS0 ";
|
|
|
|
// linux printk module enable console log.
|
|
|
|
let con_dis_str: &str = "console [hvc0] enabled";
|
|
|
|
// linux printk module disable console log.
|
|
|
|
let con_enb_str: &str = "bootconsole [earlyser0] disabled";
|
|
|
|
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
|
|
|
|
cmd.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args([
|
|
|
|
"--cmdline",
|
|
|
|
DIRECT_KERNEL_BOOT_CMDLINE
|
|
|
|
.replace("console=hvc0 ", tty_str)
|
|
|
|
.as_str(),
|
|
|
|
])
|
|
|
|
.capture_output()
|
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.args(["--serial", "tty"])
|
|
|
|
.args(["--console", "tty"])
|
|
|
|
.args(["--api-socket", &api_socket]);
|
|
|
|
|
|
|
|
let mut child = cmd.spawn().unwrap();
|
|
|
|
|
|
|
|
let mut r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
if r.is_ok() {
|
|
|
|
r = std::panic::catch_unwind(|| {
|
|
|
|
let s = String::from_utf8_lossy(&output.stdout);
|
|
|
|
assert!(s.contains(tty_str));
|
|
|
|
assert!(s.contains(con_dis_str));
|
|
|
|
assert!(s.contains(con_enb_str));
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2024-02-26 12:29:13 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
fn test_nmi() {
|
|
|
|
let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(jammy));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
let event_path = temp_event_monitor_path(&guest.tmp_dir);
|
|
|
|
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
let cmd_line = format!("{} {}", DIRECT_KERNEL_BOOT_CMDLINE, "unknown_nmi_panic=1");
|
|
|
|
|
|
|
|
let mut cmd = GuestCommand::new(&guest);
|
|
|
|
cmd.args(["--cpus", "boot=4"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", cmd_line.as_str()])
|
|
|
|
.default_disks()
|
|
|
|
.args(["--net", guest.default_net_string().as_str()])
|
|
|
|
.args(["--pvpanic"])
|
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--event-monitor", format!("path={event_path}").as_str()])
|
|
|
|
.capture_output();
|
|
|
|
|
|
|
|
let mut child = cmd.spawn().unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
assert!(remote_command(&api_socket, "nmi", None));
|
|
|
|
|
|
|
|
// Wait a while for guest
|
|
|
|
thread::sleep(std::time::Duration::new(3, 0));
|
|
|
|
|
|
|
|
let expected_sequential_events = [&MetaEvent {
|
|
|
|
event: "panic".to_string(),
|
|
|
|
device_id: None,
|
|
|
|
}];
|
|
|
|
assert!(check_latest_events_exact(
|
|
|
|
&expected_sequential_events,
|
|
|
|
&event_path
|
|
|
|
));
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-07-21 09:55:16 +00:00
|
|
|
|
2023-06-06 19:36:36 +00:00
|
|
|
mod dbus_api {
|
|
|
|
use crate::*;
|
|
|
|
|
|
|
|
// Start cloud-hypervisor with no VM parameters, running both the HTTP
|
|
|
|
// and DBus APIs. Alternate calls to the external APIs (HTTP and DBus)
|
|
|
|
// to create a VM, boot it, and verify that it can be shut down and then
|
|
|
|
// booted again.
|
|
|
|
#[test]
|
|
|
|
fn test_api_dbus_and_http_interleaved() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let dbus_api = TargetApi::new_dbus_api(&guest.tmp_dir);
|
|
|
|
let http_api = TargetApi::new_http_api(&guest.tmp_dir);
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
|
|
|
.args(dbus_api.guest_args())
|
|
|
|
.args(http_api.guest_args())
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
|
|
|
|
|
|
|
// Verify API servers are running
|
|
|
|
assert!(dbus_api.remote_command("ping", None));
|
|
|
|
assert!(http_api.remote_command("ping", None));
|
|
|
|
|
|
|
|
// Create the VM first
|
|
|
|
let cpu_count: u8 = 4;
|
|
|
|
let request_body = guest.api_create_body(
|
|
|
|
cpu_count,
|
|
|
|
direct_kernel_boot_path().to_str().unwrap(),
|
|
|
|
DIRECT_KERNEL_BOOT_CMDLINE,
|
|
|
|
);
|
|
|
|
|
|
|
|
let temp_config_path = guest.tmp_dir.as_path().join("config");
|
|
|
|
std::fs::write(&temp_config_path, request_body).unwrap();
|
|
|
|
let create_config = temp_config_path.as_os_str().to_str().unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Create the VM
|
|
|
|
assert!(dbus_api.remote_command("create", Some(create_config),));
|
|
|
|
|
|
|
|
// Then boot it
|
|
|
|
assert!(http_api.remote_command("boot", None));
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Check that the VM booted as expected
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
|
|
|
|
// Sync and shutdown without powering off to prevent filesystem
|
|
|
|
// corruption.
|
|
|
|
guest.ssh_command("sync").unwrap();
|
|
|
|
guest.ssh_command("sudo shutdown -H now").unwrap();
|
|
|
|
|
|
|
|
// Wait for the guest to be fully shutdown
|
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
|
|
|
|
|
|
|
// Then shutdown the VM
|
|
|
|
assert!(dbus_api.remote_command("shutdown", None));
|
|
|
|
|
|
|
|
// Then boot it again
|
|
|
|
assert!(http_api.remote_command("boot", None));
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Check that the VM booted as expected
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_api_dbus_create_boot() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
_test_api_create_boot(TargetApi::new_dbus_api(&guest.tmp_dir), guest)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_api_dbus_shutdown() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
_test_api_shutdown(TargetApi::new_dbus_api(&guest.tmp_dir), guest)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_api_dbus_delete() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
_test_api_delete(TargetApi::new_dbus_api(&guest.tmp_dir), guest);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_api_dbus_pause_resume() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
_test_api_pause_resume(TargetApi::new_dbus_api(&guest.tmp_dir), guest)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-16 18:42:28 +00:00
|
|
|
mod common_sequential {
|
2023-01-18 15:35:03 +00:00
|
|
|
#[cfg(not(feature = "mshv"))]
|
2022-01-06 22:24:38 +00:00
|
|
|
use crate::*;
|
2021-02-03 01:57:30 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2023-01-18 15:35:03 +00:00
|
|
|
#[cfg(not(feature = "mshv"))]
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_memory_mergeable_on() {
|
|
|
|
test_memory_mergeable(true)
|
|
|
|
}
|
|
|
|
}
|
2021-07-21 09:55:16 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
mod windows {
|
|
|
|
use crate::*;
|
2022-06-20 14:05:17 +00:00
|
|
|
use once_cell::sync::Lazy;
|
2021-02-03 01:57:30 +00:00
|
|
|
|
2022-06-20 14:05:17 +00:00
|
|
|
static NEXT_DISK_ID: Lazy<Mutex<u8>> = Lazy::new(|| Mutex::new(1));
|
2021-02-03 01:57:30 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
struct WindowsGuest {
|
|
|
|
guest: Guest,
|
|
|
|
auth: PasswordAuth,
|
|
|
|
}
|
2021-02-03 01:57:30 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
trait FsType {
|
|
|
|
const FS_FAT: u8;
|
|
|
|
const FS_NTFS: u8;
|
|
|
|
}
|
|
|
|
impl FsType for WindowsGuest {
|
|
|
|
const FS_FAT: u8 = 0;
|
|
|
|
const FS_NTFS: u8 = 1;
|
|
|
|
}
|
2021-02-03 01:57:30 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
impl WindowsGuest {
|
|
|
|
fn new() -> Self {
|
|
|
|
let disk = WindowsDiskConfig::new(WINDOWS_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(disk));
|
|
|
|
let auth = PasswordAuth {
|
|
|
|
username: String::from("administrator"),
|
|
|
|
password: String::from("Admin123"),
|
|
|
|
};
|
2021-02-03 01:57:30 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
WindowsGuest { guest, auth }
|
2020-12-16 17:34:07 +00:00
|
|
|
}
|
2021-05-27 12:55:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn guest(&self) -> &Guest {
|
|
|
|
&self.guest
|
2021-07-21 09:55:16 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn ssh_cmd(&self, cmd: &str) -> String {
|
|
|
|
ssh_command_ip_with_auth(
|
|
|
|
cmd,
|
|
|
|
&self.auth,
|
|
|
|
&self.guest.network.guest_ip,
|
|
|
|
DEFAULT_SSH_RETRIES,
|
|
|
|
DEFAULT_SSH_TIMEOUT,
|
|
|
|
)
|
|
|
|
.unwrap()
|
2021-07-21 09:55:16 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn cpu_count(&self) -> u8 {
|
|
|
|
self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).NumberOfLogicalProcessors\"")
|
|
|
|
.trim()
|
|
|
|
.parse::<u8>()
|
|
|
|
.unwrap_or(0)
|
|
|
|
}
|
2021-09-09 03:39:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn ram_size(&self) -> usize {
|
|
|
|
self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).TotalPhysicalMemory\"")
|
|
|
|
.trim()
|
|
|
|
.parse::<usize>()
|
|
|
|
.unwrap_or(0)
|
|
|
|
}
|
2021-07-29 08:31:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn netdev_count(&self) -> u8 {
|
|
|
|
self.ssh_cmd("powershell -Command \"netsh int ipv4 show interfaces | Select-String ethernet | Measure-Object -Line | Format-Table -HideTableHeaders\"")
|
|
|
|
.trim()
|
|
|
|
.parse::<u8>()
|
|
|
|
.unwrap_or(0)
|
|
|
|
}
|
2021-06-03 22:35:36 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn disk_count(&self) -> u8 {
|
|
|
|
self.ssh_cmd("powershell -Command \"Get-Disk | Measure-Object -Line | Format-Table -HideTableHeaders\"")
|
|
|
|
.trim()
|
|
|
|
.parse::<u8>()
|
|
|
|
.unwrap_or(0)
|
|
|
|
}
|
2021-07-29 08:31:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn reboot(&self) {
|
|
|
|
let _ = self.ssh_cmd("shutdown /r /t 0");
|
|
|
|
}
|
2021-06-03 22:35:36 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn shutdown(&self) {
|
|
|
|
let _ = self.ssh_cmd("shutdown /s /t 0");
|
|
|
|
}
|
2021-06-03 22:35:36 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn run_dnsmasq(&self) -> std::process::Child {
|
|
|
|
let listen_address = format!("--listen-address={}", self.guest.network.host_ip);
|
|
|
|
let dhcp_host = format!(
|
|
|
|
"--dhcp-host={},{}",
|
|
|
|
self.guest.network.guest_mac, self.guest.network.guest_ip
|
|
|
|
);
|
|
|
|
let dhcp_range = format!(
|
|
|
|
"--dhcp-range=eth,{},{}",
|
|
|
|
self.guest.network.guest_ip, self.guest.network.guest_ip
|
|
|
|
);
|
2021-07-29 08:31:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
Command::new("dnsmasq")
|
|
|
|
.arg("--no-daemon")
|
|
|
|
.arg("--log-queries")
|
|
|
|
.arg(listen_address.as_str())
|
|
|
|
.arg("--except-interface=lo")
|
|
|
|
.arg("--bind-dynamic") // Allow listening to host_ip while the interface is not ready yet.
|
|
|
|
.arg("--conf-file=/dev/null")
|
|
|
|
.arg(dhcp_host.as_str())
|
|
|
|
.arg(dhcp_range.as_str())
|
|
|
|
.spawn()
|
|
|
|
.unwrap()
|
|
|
|
}
|
2021-07-29 08:31:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// TODO Cleanup image file explicitly after test, if there's some space issues.
|
|
|
|
fn disk_new(&self, fs: u8, sz: usize) -> String {
|
|
|
|
let mut guard = NEXT_DISK_ID.lock().unwrap();
|
|
|
|
let id = *guard;
|
|
|
|
*guard = id + 1;
|
2021-07-29 08:31:02 +00:00
|
|
|
|
2022-12-14 11:41:15 +00:00
|
|
|
let img = PathBuf::from(format!("/tmp/test-hotplug-{id}.raw"));
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = fs::remove_file(&img);
|
2021-07-29 08:31:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Create an image file
|
|
|
|
let out = Command::new("qemu-img")
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"create",
|
|
|
|
"-f",
|
|
|
|
"raw",
|
|
|
|
img.to_str().unwrap(),
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("{sz}m").as_str(),
|
2022-01-06 22:24:38 +00:00
|
|
|
])
|
|
|
|
.output()
|
|
|
|
.expect("qemu-img command failed")
|
|
|
|
.stdout;
|
2022-12-14 11:41:15 +00:00
|
|
|
println!("{out:?}");
|
2021-07-29 08:31:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Associate image to a loop device
|
|
|
|
let out = Command::new("losetup")
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--show", "-f", img.to_str().unwrap()])
|
2021-07-29 08:31:02 +00:00
|
|
|
.output()
|
2022-01-06 22:24:38 +00:00
|
|
|
.expect("failed to create loop device")
|
|
|
|
.stdout;
|
|
|
|
let _tmp = String::from_utf8_lossy(&out);
|
|
|
|
let loop_dev = _tmp.trim();
|
2022-12-14 11:41:15 +00:00
|
|
|
println!("{out:?}");
|
2021-07-29 08:31:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Create a partition table
|
|
|
|
// echo 'type=7' | sudo sfdisk "${LOOP}"
|
|
|
|
let mut child = Command::new("sfdisk")
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([loop_dev])
|
2022-01-06 22:24:38 +00:00
|
|
|
.stdin(Stdio::piped())
|
2021-07-29 08:31:02 +00:00
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2022-01-06 22:24:38 +00:00
|
|
|
let stdin = child.stdin.as_mut().expect("failed to open stdin");
|
2022-05-20 07:57:46 +00:00
|
|
|
stdin
|
2022-01-06 22:24:38 +00:00
|
|
|
.write_all("type=7".as_bytes())
|
|
|
|
.expect("failed to write stdin");
|
|
|
|
let out = child.wait_with_output().expect("sfdisk failed").stdout;
|
2022-12-14 11:41:15 +00:00
|
|
|
println!("{out:?}");
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
// Disengage the loop device
|
|
|
|
let out = Command::new("losetup")
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["-d", loop_dev])
|
2022-01-06 22:24:38 +00:00
|
|
|
.output()
|
|
|
|
.expect("loop device not found")
|
|
|
|
.stdout;
|
2022-12-14 11:41:15 +00:00
|
|
|
println!("{out:?}");
|
2021-07-29 08:31:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Re-associate loop device pointing to the partition only
|
|
|
|
let out = Command::new("losetup")
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--show",
|
|
|
|
"--offset",
|
|
|
|
(512 * 2048).to_string().as_str(),
|
|
|
|
"-f",
|
|
|
|
img.to_str().unwrap(),
|
|
|
|
])
|
|
|
|
.output()
|
|
|
|
.expect("failed to create loop device")
|
|
|
|
.stdout;
|
|
|
|
let _tmp = String::from_utf8_lossy(&out);
|
|
|
|
let loop_dev = _tmp.trim();
|
2022-12-14 11:41:15 +00:00
|
|
|
println!("{out:?}");
|
2021-07-29 08:31:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Create filesystem.
|
|
|
|
let fs_cmd = match fs {
|
|
|
|
WindowsGuest::FS_FAT => "mkfs.msdos",
|
|
|
|
WindowsGuest::FS_NTFS => "mkfs.ntfs",
|
2022-12-14 11:41:15 +00:00
|
|
|
_ => panic!("Unknown filesystem type '{fs}'"),
|
2022-01-06 22:24:38 +00:00
|
|
|
};
|
|
|
|
let out = Command::new(fs_cmd)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([&loop_dev])
|
2022-01-06 22:24:38 +00:00
|
|
|
.output()
|
2022-12-14 11:41:15 +00:00
|
|
|
.unwrap_or_else(|_| panic!("{fs_cmd} failed"))
|
2022-01-06 22:24:38 +00:00
|
|
|
.stdout;
|
2022-12-14 11:41:15 +00:00
|
|
|
println!("{out:?}");
|
2021-05-27 12:55:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Disengage the loop device
|
|
|
|
let out = Command::new("losetup")
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["-d", loop_dev])
|
2022-01-06 22:24:38 +00:00
|
|
|
.output()
|
2022-12-14 11:41:15 +00:00
|
|
|
.unwrap_or_else(|_| panic!("loop device '{loop_dev}' not found"))
|
2022-01-06 22:24:38 +00:00
|
|
|
.stdout;
|
2022-12-14 11:41:15 +00:00
|
|
|
println!("{out:?}");
|
2021-09-09 23:23:03 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
img.to_str().unwrap().to_string()
|
|
|
|
}
|
2021-05-27 12:55:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn disks_set_rw(&self) {
|
|
|
|
let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsReadOnly $False\"");
|
|
|
|
}
|
2021-05-27 12:55:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn disks_online(&self) {
|
|
|
|
let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsOffline $False\"");
|
2021-05-27 12:55:57 +00:00
|
|
|
}
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn disk_file_put(&self, fname: &str, data: &str) {
|
|
|
|
let _ = self.ssh_cmd(&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"powershell -Command \"'{data}' | Set-Content -Path {fname}\""
|
2022-01-06 22:24:38 +00:00
|
|
|
));
|
|
|
|
}
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn disk_file_read(&self, fname: &str) -> String {
|
|
|
|
self.ssh_cmd(&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"powershell -Command \"Get-Content -Path {fname}\""
|
2021-11-25 05:03:02 +00:00
|
|
|
))
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn wait_for_boot(&self) -> bool {
|
|
|
|
let cmd = "dir /b c:\\ | find \"Windows\"";
|
|
|
|
let tmo_max = 180;
|
|
|
|
// The timeout increase by n*1+n*2+n*3+..., therefore the initial
|
|
|
|
// interval must be small.
|
|
|
|
let tmo_int = 2;
|
|
|
|
let out = ssh_command_ip_with_auth(
|
|
|
|
cmd,
|
|
|
|
&self.auth,
|
|
|
|
&self.guest.network.guest_ip,
|
|
|
|
{
|
|
|
|
let mut ret = 1;
|
|
|
|
let mut tmo_acc = 0;
|
|
|
|
loop {
|
|
|
|
tmo_acc += tmo_int * ret;
|
|
|
|
if tmo_acc >= tmo_max {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ret += 1;
|
|
|
|
}
|
|
|
|
ret
|
|
|
|
},
|
|
|
|
tmo_int,
|
2021-11-25 05:03:02 +00:00
|
|
|
)
|
2022-01-06 22:24:38 +00:00
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
if "Windows" == out.trim() {
|
|
|
|
return true;
|
|
|
|
}
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
false
|
2021-11-25 05:03:02 +00:00
|
|
|
}
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn vcpu_threads_count(pid: u32) -> u8 {
|
|
|
|
// ps -T -p 12345 | grep vcpu | wc -l
|
|
|
|
let out = Command::new("ps")
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["-T", "-p", format!("{pid}").as_str()])
|
2022-01-06 22:24:38 +00:00
|
|
|
.output()
|
|
|
|
.expect("ps command failed")
|
|
|
|
.stdout;
|
|
|
|
return String::from_utf8_lossy(&out).matches("vcpu").count() as u8;
|
|
|
|
}
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn netdev_ctrl_threads_count(pid: u32) -> u8 {
|
|
|
|
// ps -T -p 12345 | grep "_net[0-9]*_ctrl" | wc -l
|
|
|
|
let out = Command::new("ps")
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["-T", "-p", format!("{pid}").as_str()])
|
2022-01-06 22:24:38 +00:00
|
|
|
.output()
|
|
|
|
.expect("ps command failed")
|
|
|
|
.stdout;
|
|
|
|
let mut n = 0;
|
|
|
|
String::from_utf8_lossy(&out)
|
|
|
|
.split_whitespace()
|
|
|
|
.for_each(|s| n += (s.starts_with("_net") && s.ends_with("_ctrl")) as u8); // _net1_ctrl
|
|
|
|
n
|
|
|
|
}
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn disk_ctrl_threads_count(pid: u32) -> u8 {
|
|
|
|
// ps -T -p 15782 | grep "_disk[0-9]*_q0" | wc -l
|
|
|
|
let out = Command::new("ps")
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["-T", "-p", format!("{pid}").as_str()])
|
2022-01-06 22:24:38 +00:00
|
|
|
.output()
|
|
|
|
.expect("ps command failed")
|
|
|
|
.stdout;
|
|
|
|
let mut n = 0;
|
|
|
|
String::from_utf8_lossy(&out)
|
|
|
|
.split_whitespace()
|
|
|
|
.for_each(|s| n += (s.starts_with("_disk") && s.ends_with("_q0")) as u8); // _disk0_q0, don't care about multiple queues as they're related to the same hdd
|
|
|
|
n
|
|
|
|
}
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_windows_guest() {
|
|
|
|
let windows_guest = WindowsGuest::new();
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(windows_guest.guest())
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=2,kvm_hyperv=on"])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", edk2_path().to_str().unwrap()])
|
|
|
|
.args(["--serial", "tty"])
|
|
|
|
.args(["--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let fd = child.stdout.as_ref().unwrap().as_raw_fd();
|
|
|
|
let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) };
|
|
|
|
let fd = child.stderr.as_ref().unwrap().as_raw_fd();
|
|
|
|
let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) };
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE);
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child_dnsmasq = windows_guest.run_dnsmasq();
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Wait to make sure Windows boots up
|
|
|
|
assert!(windows_guest.wait_for_boot());
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
windows_guest.shutdown();
|
|
|
|
});
|
2021-11-25 05:03:02 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.wait_timeout(std::time::Duration::from_secs(60));
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child_dnsmasq.kill();
|
|
|
|
let _ = child_dnsmasq.wait();
|
2020-07-03 08:00:12 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
2020-06-24 13:47:14 +00:00
|
|
|
}
|
2020-09-08 12:20:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_windows_guest_multiple_queues() {
|
|
|
|
let windows_guest = WindowsGuest::new();
|
2021-05-06 08:47:38 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut ovmf_path = dirs::home_dir().unwrap();
|
|
|
|
ovmf_path.push("workloads");
|
|
|
|
ovmf_path.push(OVMF_NAME);
|
2021-05-06 08:47:38 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(windows_guest.guest())
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=4,kvm_hyperv=on"])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", ovmf_path.to_str().unwrap()])
|
|
|
|
.args(["--serial", "tty"])
|
|
|
|
.args(["--console", "off"])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--disk",
|
|
|
|
format!(
|
|
|
|
"path={},num_queues=4",
|
|
|
|
windows_guest
|
|
|
|
.guest()
|
|
|
|
.disk_config
|
|
|
|
.disk(DiskType::OperatingSystem)
|
|
|
|
.unwrap()
|
2021-05-19 14:44:34 +00:00
|
|
|
)
|
2022-01-06 22:24:38 +00:00
|
|
|
.as_str(),
|
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--net",
|
|
|
|
format!(
|
|
|
|
"tap=,mac={},ip={},mask=255.255.255.0,num_queues=8",
|
|
|
|
windows_guest.guest().network.guest_mac,
|
|
|
|
windows_guest.guest().network.host_ip
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
])
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-03-29 20:10:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let fd = child.stdout.as_ref().unwrap().as_raw_fd();
|
|
|
|
let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) };
|
|
|
|
let fd = child.stderr.as_ref().unwrap().as_raw_fd();
|
|
|
|
let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) };
|
2021-05-06 08:47:38 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE);
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child_dnsmasq = windows_guest.run_dnsmasq();
|
2021-05-06 08:47:38 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Wait to make sure Windows boots up
|
|
|
|
assert!(windows_guest.wait_for_boot());
|
2021-05-06 13:18:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
windows_guest.shutdown();
|
|
|
|
});
|
2021-05-06 13:18:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.wait_timeout(std::time::Duration::from_secs(60));
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child_dnsmasq.kill();
|
|
|
|
let _ = child_dnsmasq.wait();
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
2022-07-18 15:18:23 +00:00
|
|
|
#[ignore = "See #4327"]
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_windows_guest_snapshot_restore() {
|
|
|
|
let windows_guest = WindowsGuest::new();
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut ovmf_path = dirs::home_dir().unwrap();
|
|
|
|
ovmf_path.push("workloads");
|
|
|
|
ovmf_path.push(OVMF_NAME);
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
|
2022-04-29 16:50:18 +00:00
|
|
|
let api_socket_source = format!("{}.1", temp_api_path(&tmp_dir));
|
2021-06-10 15:03:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(windows_guest.guest())
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket_source])
|
|
|
|
.args(["--cpus", "boot=2,kvm_hyperv=on"])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", ovmf_path.to_str().unwrap()])
|
|
|
|
.args(["--serial", "tty"])
|
|
|
|
.args(["--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-06-10 15:03:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let fd = child.stdout.as_ref().unwrap().as_raw_fd();
|
|
|
|
let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) };
|
|
|
|
let fd = child.stderr.as_ref().unwrap().as_raw_fd();
|
|
|
|
let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) };
|
2021-06-10 15:03:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE);
|
2021-05-06 08:47:38 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child_dnsmasq = windows_guest.run_dnsmasq();
|
2021-04-22 15:26:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait to make sure Windows boots up
|
|
|
|
assert!(windows_guest.wait_for_boot());
|
2021-04-22 15:26:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let snapshot_dir = temp_snapshot_dir_path(&tmp_dir);
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Pause the VM
|
2022-04-29 16:50:18 +00:00
|
|
|
assert!(remote_command(&api_socket_source, "pause", None));
|
2021-01-12 10:01:20 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Take a snapshot from the VM
|
|
|
|
assert!(remote_command(
|
2022-04-29 16:50:18 +00:00
|
|
|
&api_socket_source,
|
2022-01-06 22:24:38 +00:00
|
|
|
"snapshot",
|
2022-12-14 11:41:15 +00:00
|
|
|
Some(format!("file://{snapshot_dir}").as_str()),
|
2022-01-06 22:24:38 +00:00
|
|
|
));
|
2020-10-08 14:10:33 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait to make sure the snapshot is completed
|
|
|
|
thread::sleep(std::time::Duration::new(30, 0));
|
2020-10-08 14:10:33 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
child.wait().unwrap();
|
2021-05-06 09:19:27 +00:00
|
|
|
|
2022-04-29 16:50:18 +00:00
|
|
|
let api_socket_restored = format!("{}.2", temp_api_path(&tmp_dir));
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Restore the VM from the snapshot
|
|
|
|
let mut child = GuestCommand::new(windows_guest.guest())
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket_restored])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--restore",
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("source_url=file://{snapshot_dir}").as_str(),
|
2022-01-06 22:24:38 +00:00
|
|
|
])
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-05-06 09:19:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait for the VM to be restored
|
|
|
|
thread::sleep(std::time::Duration::new(20, 0));
|
2021-05-06 09:19:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Resume the VM
|
2022-04-29 16:50:18 +00:00
|
|
|
assert!(remote_command(&api_socket_restored, "resume", None));
|
2021-06-10 15:03:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
windows_guest.shutdown();
|
|
|
|
});
|
2021-05-06 09:19:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.wait_timeout(std::time::Duration::from_secs(60));
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-05-06 09:19:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child_dnsmasq.kill();
|
|
|
|
let _ = child_dnsmasq.wait();
|
2021-05-06 09:19:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-05-06 09:19:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
2022-08-03 21:52:53 +00:00
|
|
|
#[cfg(not(target_arch = "aarch64"))]
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_windows_guest_cpu_hotplug() {
|
|
|
|
let windows_guest = WindowsGuest::new();
|
2021-05-06 09:19:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut ovmf_path = dirs::home_dir().unwrap();
|
|
|
|
ovmf_path.push("workloads");
|
|
|
|
ovmf_path.push(OVMF_NAME);
|
2021-05-06 09:19:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
|
|
|
|
let api_socket = temp_api_path(&tmp_dir);
|
2021-05-06 09:19:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(windows_guest.guest())
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--cpus", "boot=2,max=8,kvm_hyperv=on"])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", ovmf_path.to_str().unwrap()])
|
|
|
|
.args(["--serial", "tty"])
|
|
|
|
.args(["--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-10-08 14:10:33 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child_dnsmasq = windows_guest.run_dnsmasq();
|
2020-10-08 14:10:33 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Wait to make sure Windows boots up
|
|
|
|
assert!(windows_guest.wait_for_boot());
|
2021-05-06 13:18:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let vcpu_num = 2;
|
|
|
|
// Check the initial number of CPUs the guest sees
|
|
|
|
assert_eq!(windows_guest.cpu_count(), vcpu_num);
|
|
|
|
// Check the initial number of vcpu threads in the CH process
|
|
|
|
assert_eq!(vcpu_threads_count(child.id()), vcpu_num);
|
2021-06-10 15:03:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let vcpu_num = 6;
|
|
|
|
// Hotplug some CPUs
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, Some(vcpu_num), None, None, None);
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait to make sure CPUs are added
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
// Check the guest sees the correct number
|
|
|
|
assert_eq!(windows_guest.cpu_count(), vcpu_num);
|
|
|
|
// Check the CH process has the correct number of vcpu threads
|
|
|
|
assert_eq!(vcpu_threads_count(child.id()), vcpu_num);
|
|
|
|
|
|
|
|
let vcpu_num = 4;
|
|
|
|
// Remove some CPUs. Note that Windows doesn't support hot-remove.
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, Some(vcpu_num), None, None, None);
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait to make sure CPUs are removed
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
// Reboot to let Windows catch up
|
|
|
|
windows_guest.reboot();
|
|
|
|
// Wait to make sure Windows completely rebooted
|
|
|
|
thread::sleep(std::time::Duration::new(60, 0));
|
|
|
|
// Check the guest sees the correct number
|
|
|
|
assert_eq!(windows_guest.cpu_count(), vcpu_num);
|
|
|
|
// Check the CH process has the correct number of vcpu threads
|
|
|
|
assert_eq!(vcpu_threads_count(child.id()), vcpu_num);
|
|
|
|
|
|
|
|
windows_guest.shutdown();
|
|
|
|
});
|
2020-10-08 14:10:33 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.wait_timeout(std::time::Duration::from_secs(60));
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-10-08 14:10:33 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child_dnsmasq.kill();
|
|
|
|
let _ = child_dnsmasq.wait();
|
2021-05-06 13:18:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-10-21 22:31:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
2022-08-03 21:52:53 +00:00
|
|
|
#[cfg(not(target_arch = "aarch64"))]
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_windows_guest_ram_hotplug() {
|
|
|
|
let windows_guest = WindowsGuest::new();
|
2020-10-21 22:31:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut ovmf_path = dirs::home_dir().unwrap();
|
|
|
|
ovmf_path.push("workloads");
|
|
|
|
ovmf_path.push(OVMF_NAME);
|
|
|
|
|
|
|
|
let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
|
|
|
|
let api_socket = temp_api_path(&tmp_dir);
|
2020-10-21 22:31:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(windows_guest.guest())
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--cpus", "boot=2,kvm_hyperv=on"])
|
|
|
|
.args(["--memory", "size=2G,hotplug_size=5G"])
|
|
|
|
.args(["--kernel", ovmf_path.to_str().unwrap()])
|
|
|
|
.args(["--serial", "tty"])
|
|
|
|
.args(["--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-10-21 22:31:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child_dnsmasq = windows_guest.run_dnsmasq();
|
2021-06-10 15:03:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
2021-06-10 15:03:57 +00:00
|
|
|
// Wait to make sure Windows boots up
|
|
|
|
assert!(windows_guest.wait_for_boot());
|
2021-05-06 13:18:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let ram_size = 2 * 1024 * 1024 * 1024;
|
|
|
|
// Check the initial number of RAM the guest sees
|
|
|
|
let current_ram_size = windows_guest.ram_size();
|
|
|
|
// This size seems to be reserved by the system and thus the
|
|
|
|
// reported amount differs by this constant value.
|
|
|
|
let reserved_ram_size = ram_size - current_ram_size;
|
|
|
|
// Verify that there's not more than 4mb constant diff wasted
|
|
|
|
// by the reserved ram.
|
|
|
|
assert!(reserved_ram_size < 4 * 1024 * 1024);
|
|
|
|
|
|
|
|
let ram_size = 4 * 1024 * 1024 * 1024;
|
|
|
|
// Hotplug some RAM
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, Some(ram_size), None, None);
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait to make sure RAM has been added
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
// Check the guest sees the correct number
|
|
|
|
assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size);
|
2020-10-21 22:31:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let ram_size = 3 * 1024 * 1024 * 1024;
|
|
|
|
// Unplug some RAM. Note that hot-remove most likely won't work.
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, Some(ram_size), None, None);
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait to make sure RAM has been added
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
// Reboot to let Windows catch up
|
|
|
|
windows_guest.reboot();
|
|
|
|
// Wait to make sure guest completely rebooted
|
|
|
|
thread::sleep(std::time::Duration::new(60, 0));
|
|
|
|
// Check the guest sees the correct number
|
|
|
|
assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size);
|
|
|
|
|
|
|
|
windows_guest.shutdown();
|
|
|
|
});
|
2020-10-21 22:31:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.wait_timeout(std::time::Duration::from_secs(60));
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-10-21 22:31:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child_dnsmasq.kill();
|
|
|
|
let _ = child_dnsmasq.wait();
|
2020-10-21 22:31:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2020-10-21 22:31:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_windows_guest_netdev_hotplug() {
|
|
|
|
let windows_guest = WindowsGuest::new();
|
2020-10-21 22:31:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut ovmf_path = dirs::home_dir().unwrap();
|
|
|
|
ovmf_path.push("workloads");
|
|
|
|
ovmf_path.push(OVMF_NAME);
|
2020-10-21 22:31:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
|
|
|
|
let api_socket = temp_api_path(&tmp_dir);
|
2020-10-21 22:31:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(windows_guest.guest())
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--cpus", "boot=2,kvm_hyperv=on"])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", ovmf_path.to_str().unwrap()])
|
|
|
|
.args(["--serial", "tty"])
|
|
|
|
.args(["--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-10-21 22:31:01 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child_dnsmasq = windows_guest.run_dnsmasq();
|
2021-05-06 13:18:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Wait to make sure Windows boots up
|
|
|
|
assert!(windows_guest.wait_for_boot());
|
2021-03-29 20:10:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Initially present network device
|
|
|
|
let netdev_num = 1;
|
|
|
|
assert_eq!(windows_guest.netdev_count(), netdev_num);
|
|
|
|
assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num);
|
2021-03-29 20:10:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Hotplug network device
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-net",
|
|
|
|
Some(windows_guest.guest().default_net_string().as_str()),
|
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_net2\""));
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
// Verify the device is on the system
|
|
|
|
let netdev_num = 2;
|
|
|
|
assert_eq!(windows_guest.netdev_count(), netdev_num);
|
|
|
|
assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num);
|
|
|
|
|
|
|
|
// Remove network device
|
|
|
|
let cmd_success = remote_command(&api_socket, "remove-device", Some("_net2"));
|
|
|
|
assert!(cmd_success);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
// Verify the device has been removed
|
|
|
|
let netdev_num = 1;
|
|
|
|
assert_eq!(windows_guest.netdev_count(), netdev_num);
|
|
|
|
assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num);
|
2021-05-06 13:18:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
windows_guest.shutdown();
|
|
|
|
});
|
2021-03-29 20:10:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.wait_timeout(std::time::Duration::from_secs(60));
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-03-29 20:10:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child_dnsmasq.kill();
|
|
|
|
let _ = child_dnsmasq.wait();
|
2021-05-06 13:18:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
memory_manager: Fix address range calculation in MemorySlot
The MCRS method returns a 64-bit memory range descriptor. The
calculation is supposed to be done as follows:
max = min + len - 1
However, every operand is represented not as a QWORD but as combination
of two DWORDs for high and low part. Till now, the calculation was done
this way, please see also inline comments:
max.lo = min.lo + len.lo //this may overflow, need to carry over to high
max.hi = min.hi + len.hi
max.hi = max.hi - 1 // subtraction needs to happen on the low part
This calculation has been corrected the following way:
max.lo = min.lo + len.lo
max.hi = min.hi + len.hi + (max.lo < min.lo) // check for overflow
max.lo = max.lo - 1 // subtract from low part
The relevant part from the generated ASL for the MCRS method:
```
Method (MCRS, 1, Serialized)
{
Acquire (MLCK, 0xFFFF)
\_SB.MHPC.MSEL = Arg0
Name (MR64, ResourceTemplate ()
{
QWordMemory (ResourceProducer, PosDecode, MinFixed, MaxFixed, Cacheable, ReadWrite,
0x0000000000000000, // Granularity
0x0000000000000000, // Range Minimum
0xFFFFFFFFFFFFFFFE, // Range Maximum
0x0000000000000000, // Translation Offset
0xFFFFFFFFFFFFFFFF, // Length
,, _Y00, AddressRangeMemory, TypeStatic)
})
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._MIN, MINL) // _MIN: Minimum Base Address
CreateDWordField (MR64, 0x12, MINH)
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._MAX, MAXL) // _MAX: Maximum Base Address
CreateDWordField (MR64, 0x1A, MAXH)
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._LEN, LENL) // _LEN: Length
CreateDWordField (MR64, 0x2A, LENH)
MINL = \_SB.MHPC.MHBL
MINH = \_SB.MHPC.MHBH
LENL = \_SB.MHPC.MHLL
LENH = \_SB.MHPC.MHLH
MAXL = (MINL + LENL) /* \_SB_.MHPC.MCRS.LENL */
MAXH = (MINH + LENH) /* \_SB_.MHPC.MCRS.LENH */
If ((MAXL < MINL))
{
MAXH += One /* \_SB_.MHPC.MCRS.MAXH */
}
MAXL -= One
Release (MLCK)
Return (MR64) /* \_SB_.MHPC.MCRS.MR64 */
}
```
Fixes #1800.
Signed-off-by: Anatol Belski <anbelski@linux.microsoft.com>
2021-04-08 18:05:38 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2023-10-23 12:54:13 +00:00
|
|
|
#[ignore = "See #6037"]
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(not(feature = "mshv"))]
|
2022-09-05 14:29:19 +00:00
|
|
|
#[cfg(not(target_arch = "aarch64"))]
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_windows_guest_disk_hotplug() {
|
|
|
|
let windows_guest = WindowsGuest::new();
|
memory_manager: Fix address range calculation in MemorySlot
The MCRS method returns a 64-bit memory range descriptor. The
calculation is supposed to be done as follows:
max = min + len - 1
However, every operand is represented not as a QWORD but as combination
of two DWORDs for high and low part. Till now, the calculation was done
this way, please see also inline comments:
max.lo = min.lo + len.lo //this may overflow, need to carry over to high
max.hi = min.hi + len.hi
max.hi = max.hi - 1 // subtraction needs to happen on the low part
This calculation has been corrected the following way:
max.lo = min.lo + len.lo
max.hi = min.hi + len.hi + (max.lo < min.lo) // check for overflow
max.lo = max.lo - 1 // subtract from low part
The relevant part from the generated ASL for the MCRS method:
```
Method (MCRS, 1, Serialized)
{
Acquire (MLCK, 0xFFFF)
\_SB.MHPC.MSEL = Arg0
Name (MR64, ResourceTemplate ()
{
QWordMemory (ResourceProducer, PosDecode, MinFixed, MaxFixed, Cacheable, ReadWrite,
0x0000000000000000, // Granularity
0x0000000000000000, // Range Minimum
0xFFFFFFFFFFFFFFFE, // Range Maximum
0x0000000000000000, // Translation Offset
0xFFFFFFFFFFFFFFFF, // Length
,, _Y00, AddressRangeMemory, TypeStatic)
})
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._MIN, MINL) // _MIN: Minimum Base Address
CreateDWordField (MR64, 0x12, MINH)
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._MAX, MAXL) // _MAX: Maximum Base Address
CreateDWordField (MR64, 0x1A, MAXH)
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._LEN, LENL) // _LEN: Length
CreateDWordField (MR64, 0x2A, LENH)
MINL = \_SB.MHPC.MHBL
MINH = \_SB.MHPC.MHBH
LENL = \_SB.MHPC.MHLL
LENH = \_SB.MHPC.MHLH
MAXL = (MINL + LENL) /* \_SB_.MHPC.MCRS.LENL */
MAXH = (MINH + LENH) /* \_SB_.MHPC.MCRS.LENH */
If ((MAXL < MINL))
{
MAXH += One /* \_SB_.MHPC.MCRS.MAXH */
}
MAXL -= One
Release (MLCK)
Return (MR64) /* \_SB_.MHPC.MCRS.MR64 */
}
```
Fixes #1800.
Signed-off-by: Anatol Belski <anbelski@linux.microsoft.com>
2021-04-08 18:05:38 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut ovmf_path = dirs::home_dir().unwrap();
|
|
|
|
ovmf_path.push("workloads");
|
|
|
|
ovmf_path.push(OVMF_NAME);
|
2021-05-06 13:18:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
|
|
|
|
let api_socket = temp_api_path(&tmp_dir);
|
memory_manager: Fix address range calculation in MemorySlot
The MCRS method returns a 64-bit memory range descriptor. The
calculation is supposed to be done as follows:
max = min + len - 1
However, every operand is represented not as a QWORD but as combination
of two DWORDs for high and low part. Till now, the calculation was done
this way, please see also inline comments:
max.lo = min.lo + len.lo //this may overflow, need to carry over to high
max.hi = min.hi + len.hi
max.hi = max.hi - 1 // subtraction needs to happen on the low part
This calculation has been corrected the following way:
max.lo = min.lo + len.lo
max.hi = min.hi + len.hi + (max.lo < min.lo) // check for overflow
max.lo = max.lo - 1 // subtract from low part
The relevant part from the generated ASL for the MCRS method:
```
Method (MCRS, 1, Serialized)
{
Acquire (MLCK, 0xFFFF)
\_SB.MHPC.MSEL = Arg0
Name (MR64, ResourceTemplate ()
{
QWordMemory (ResourceProducer, PosDecode, MinFixed, MaxFixed, Cacheable, ReadWrite,
0x0000000000000000, // Granularity
0x0000000000000000, // Range Minimum
0xFFFFFFFFFFFFFFFE, // Range Maximum
0x0000000000000000, // Translation Offset
0xFFFFFFFFFFFFFFFF, // Length
,, _Y00, AddressRangeMemory, TypeStatic)
})
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._MIN, MINL) // _MIN: Minimum Base Address
CreateDWordField (MR64, 0x12, MINH)
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._MAX, MAXL) // _MAX: Maximum Base Address
CreateDWordField (MR64, 0x1A, MAXH)
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._LEN, LENL) // _LEN: Length
CreateDWordField (MR64, 0x2A, LENH)
MINL = \_SB.MHPC.MHBL
MINH = \_SB.MHPC.MHBH
LENL = \_SB.MHPC.MHLL
LENH = \_SB.MHPC.MHLH
MAXL = (MINL + LENL) /* \_SB_.MHPC.MCRS.LENL */
MAXH = (MINH + LENH) /* \_SB_.MHPC.MCRS.LENH */
If ((MAXL < MINL))
{
MAXH += One /* \_SB_.MHPC.MCRS.MAXH */
}
MAXL -= One
Release (MLCK)
Return (MR64) /* \_SB_.MHPC.MCRS.MR64 */
}
```
Fixes #1800.
Signed-off-by: Anatol Belski <anbelski@linux.microsoft.com>
2021-04-08 18:05:38 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(windows_guest.guest())
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--cpus", "boot=2,kvm_hyperv=on"])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", ovmf_path.to_str().unwrap()])
|
|
|
|
.args(["--serial", "tty"])
|
|
|
|
.args(["--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
memory_manager: Fix address range calculation in MemorySlot
The MCRS method returns a 64-bit memory range descriptor. The
calculation is supposed to be done as follows:
max = min + len - 1
However, every operand is represented not as a QWORD but as combination
of two DWORDs for high and low part. Till now, the calculation was done
this way, please see also inline comments:
max.lo = min.lo + len.lo //this may overflow, need to carry over to high
max.hi = min.hi + len.hi
max.hi = max.hi - 1 // subtraction needs to happen on the low part
This calculation has been corrected the following way:
max.lo = min.lo + len.lo
max.hi = min.hi + len.hi + (max.lo < min.lo) // check for overflow
max.lo = max.lo - 1 // subtract from low part
The relevant part from the generated ASL for the MCRS method:
```
Method (MCRS, 1, Serialized)
{
Acquire (MLCK, 0xFFFF)
\_SB.MHPC.MSEL = Arg0
Name (MR64, ResourceTemplate ()
{
QWordMemory (ResourceProducer, PosDecode, MinFixed, MaxFixed, Cacheable, ReadWrite,
0x0000000000000000, // Granularity
0x0000000000000000, // Range Minimum
0xFFFFFFFFFFFFFFFE, // Range Maximum
0x0000000000000000, // Translation Offset
0xFFFFFFFFFFFFFFFF, // Length
,, _Y00, AddressRangeMemory, TypeStatic)
})
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._MIN, MINL) // _MIN: Minimum Base Address
CreateDWordField (MR64, 0x12, MINH)
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._MAX, MAXL) // _MAX: Maximum Base Address
CreateDWordField (MR64, 0x1A, MAXH)
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._LEN, LENL) // _LEN: Length
CreateDWordField (MR64, 0x2A, LENH)
MINL = \_SB.MHPC.MHBL
MINH = \_SB.MHPC.MHBH
LENL = \_SB.MHPC.MHLL
LENH = \_SB.MHPC.MHLH
MAXL = (MINL + LENL) /* \_SB_.MHPC.MCRS.LENL */
MAXH = (MINH + LENH) /* \_SB_.MHPC.MCRS.LENH */
If ((MAXL < MINL))
{
MAXH += One /* \_SB_.MHPC.MCRS.MAXH */
}
MAXL -= One
Release (MLCK)
Return (MR64) /* \_SB_.MHPC.MCRS.MR64 */
}
```
Fixes #1800.
Signed-off-by: Anatol Belski <anbelski@linux.microsoft.com>
2021-04-08 18:05:38 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child_dnsmasq = windows_guest.run_dnsmasq();
|
memory_manager: Fix address range calculation in MemorySlot
The MCRS method returns a 64-bit memory range descriptor. The
calculation is supposed to be done as follows:
max = min + len - 1
However, every operand is represented not as a QWORD but as combination
of two DWORDs for high and low part. Till now, the calculation was done
this way, please see also inline comments:
max.lo = min.lo + len.lo //this may overflow, need to carry over to high
max.hi = min.hi + len.hi
max.hi = max.hi - 1 // subtraction needs to happen on the low part
This calculation has been corrected the following way:
max.lo = min.lo + len.lo
max.hi = min.hi + len.hi + (max.lo < min.lo) // check for overflow
max.lo = max.lo - 1 // subtract from low part
The relevant part from the generated ASL for the MCRS method:
```
Method (MCRS, 1, Serialized)
{
Acquire (MLCK, 0xFFFF)
\_SB.MHPC.MSEL = Arg0
Name (MR64, ResourceTemplate ()
{
QWordMemory (ResourceProducer, PosDecode, MinFixed, MaxFixed, Cacheable, ReadWrite,
0x0000000000000000, // Granularity
0x0000000000000000, // Range Minimum
0xFFFFFFFFFFFFFFFE, // Range Maximum
0x0000000000000000, // Translation Offset
0xFFFFFFFFFFFFFFFF, // Length
,, _Y00, AddressRangeMemory, TypeStatic)
})
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._MIN, MINL) // _MIN: Minimum Base Address
CreateDWordField (MR64, 0x12, MINH)
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._MAX, MAXL) // _MAX: Maximum Base Address
CreateDWordField (MR64, 0x1A, MAXH)
CreateQWordField (MR64, \_SB.MHPC.MCRS._Y00._LEN, LENL) // _LEN: Length
CreateDWordField (MR64, 0x2A, LENH)
MINL = \_SB.MHPC.MHBL
MINH = \_SB.MHPC.MHBH
LENL = \_SB.MHPC.MHLL
LENH = \_SB.MHPC.MHLH
MAXL = (MINL + LENL) /* \_SB_.MHPC.MCRS.LENL */
MAXH = (MINH + LENH) /* \_SB_.MHPC.MCRS.LENH */
If ((MAXL < MINL))
{
MAXH += One /* \_SB_.MHPC.MCRS.MAXH */
}
MAXL -= One
Release (MLCK)
Return (MR64) /* \_SB_.MHPC.MCRS.MR64 */
}
```
Fixes #1800.
Signed-off-by: Anatol Belski <anbelski@linux.microsoft.com>
2021-04-08 18:05:38 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let disk = windows_guest.disk_new(WindowsGuest::FS_FAT, 100);
|
2021-05-06 13:18:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Wait to make sure Windows boots up
|
|
|
|
assert!(windows_guest.wait_for_boot());
|
2021-04-22 15:26:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Initially present disk device
|
|
|
|
let disk_num = 1;
|
|
|
|
assert_eq!(windows_guest.disk_count(), disk_num);
|
|
|
|
assert_eq!(disk_ctrl_threads_count(child.id()), disk_num);
|
2021-04-22 15:26:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Hotplug disk device
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-disk",
|
2022-12-14 11:41:15 +00:00
|
|
|
Some(format!("path={disk},readonly=off").as_str()),
|
2022-01-06 22:24:38 +00:00
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_disk2\""));
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
// Online disk device
|
|
|
|
windows_guest.disks_set_rw();
|
|
|
|
windows_guest.disks_online();
|
|
|
|
// Verify the device is on the system
|
|
|
|
let disk_num = 2;
|
|
|
|
assert_eq!(windows_guest.disk_count(), disk_num);
|
|
|
|
assert_eq!(disk_ctrl_threads_count(child.id()), disk_num);
|
|
|
|
|
|
|
|
let data = "hello";
|
|
|
|
let fname = "d:\\world";
|
|
|
|
windows_guest.disk_file_put(fname, data);
|
|
|
|
|
|
|
|
// Unmount disk device
|
|
|
|
let cmd_success = remote_command(&api_socket, "remove-device", Some("_disk2"));
|
|
|
|
assert!(cmd_success);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
// Verify the device has been removed
|
|
|
|
let disk_num = 1;
|
|
|
|
assert_eq!(windows_guest.disk_count(), disk_num);
|
|
|
|
assert_eq!(disk_ctrl_threads_count(child.id()), disk_num);
|
2021-05-06 13:18:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Remount and check the file exists with the expected contents
|
|
|
|
let (cmd_success, _cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-disk",
|
2022-12-14 11:41:15 +00:00
|
|
|
Some(format!("path={disk},readonly=off").as_str()),
|
2022-01-06 22:24:38 +00:00
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
let out = windows_guest.disk_file_read(fname);
|
|
|
|
assert_eq!(data, out.trim());
|
2021-06-10 15:03:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Intentionally no unmount, it'll happen at shutdown.
|
2021-04-22 15:26:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
windows_guest.shutdown();
|
|
|
|
});
|
2021-04-22 15:26:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.wait_timeout(std::time::Duration::from_secs(60));
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-04-22 15:26:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child_dnsmasq.kill();
|
|
|
|
let _ = child_dnsmasq.wait();
|
2021-04-22 15:26:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-04-22 15:26:47 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
2023-10-23 12:54:13 +00:00
|
|
|
#[ignore = "See #6037"]
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(not(feature = "mshv"))]
|
2022-08-03 21:52:53 +00:00
|
|
|
#[cfg(not(target_arch = "aarch64"))]
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_windows_guest_disk_hotplug_multi() {
|
|
|
|
let windows_guest = WindowsGuest::new();
|
2021-05-06 13:18:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut ovmf_path = dirs::home_dir().unwrap();
|
|
|
|
ovmf_path.push("workloads");
|
|
|
|
ovmf_path.push(OVMF_NAME);
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
|
|
|
|
let api_socket = temp_api_path(&tmp_dir);
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(windows_guest.guest())
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--cpus", "boot=2,kvm_hyperv=on"])
|
|
|
|
.args(["--memory", "size=2G"])
|
|
|
|
.args(["--kernel", ovmf_path.to_str().unwrap()])
|
|
|
|
.args(["--serial", "tty"])
|
|
|
|
.args(["--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child_dnsmasq = windows_guest.run_dnsmasq();
|
|
|
|
|
|
|
|
// Predefined data to used at various test stages
|
|
|
|
let disk_test_data: [[String; 4]; 2] = [
|
|
|
|
[
|
|
|
|
"_disk2".to_string(),
|
|
|
|
windows_guest.disk_new(WindowsGuest::FS_FAT, 123),
|
|
|
|
"d:\\world".to_string(),
|
|
|
|
"hello".to_string(),
|
|
|
|
],
|
|
|
|
[
|
|
|
|
"_disk3".to_string(),
|
|
|
|
windows_guest.disk_new(WindowsGuest::FS_NTFS, 333),
|
|
|
|
"e:\\hello".to_string(),
|
|
|
|
"world".to_string(),
|
|
|
|
],
|
|
|
|
];
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Wait to make sure Windows boots up
|
|
|
|
assert!(windows_guest.wait_for_boot());
|
2021-06-10 15:03:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Initially present disk device
|
|
|
|
let disk_num = 1;
|
|
|
|
assert_eq!(windows_guest.disk_count(), disk_num);
|
|
|
|
assert_eq!(disk_ctrl_threads_count(child.id()), disk_num);
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
for it in &disk_test_data {
|
|
|
|
let disk_id = it[0].as_str();
|
|
|
|
let disk = it[1].as_str();
|
2021-05-13 15:25:05 +00:00
|
|
|
// Hotplug disk device
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-disk",
|
2022-12-14 11:41:15 +00:00
|
|
|
Some(format!("path={disk},readonly=off").as_str()),
|
2021-05-13 15:25:05 +00:00
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
2022-12-14 11:41:15 +00:00
|
|
|
.contains(format!("\"id\":\"{disk_id}\"").as_str()));
|
2021-05-13 15:25:05 +00:00
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
2022-01-06 22:24:38 +00:00
|
|
|
// Online disk devices
|
2021-05-13 15:25:05 +00:00
|
|
|
windows_guest.disks_set_rw();
|
|
|
|
windows_guest.disks_online();
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
|
|
|
// Verify the devices are on the system
|
|
|
|
let disk_num = (disk_test_data.len() + 1) as u8;
|
|
|
|
assert_eq!(windows_guest.disk_count(), disk_num);
|
|
|
|
assert_eq!(disk_ctrl_threads_count(child.id()), disk_num);
|
|
|
|
|
|
|
|
// Put test data
|
|
|
|
for it in &disk_test_data {
|
|
|
|
let fname = it[2].as_str();
|
|
|
|
let data = it[3].as_str();
|
2021-05-13 15:25:05 +00:00
|
|
|
windows_guest.disk_file_put(fname, data);
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Unmount disk devices
|
|
|
|
for it in &disk_test_data {
|
|
|
|
let disk_id = it[0].as_str();
|
|
|
|
let cmd_success = remote_command(&api_socket, "remove-device", Some(disk_id));
|
2021-05-13 15:25:05 +00:00
|
|
|
assert!(cmd_success);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the devices have been removed
|
|
|
|
let disk_num = 1;
|
|
|
|
assert_eq!(windows_guest.disk_count(), disk_num);
|
|
|
|
assert_eq!(disk_ctrl_threads_count(child.id()), disk_num);
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Remount
|
|
|
|
for it in &disk_test_data {
|
|
|
|
let disk = it[1].as_str();
|
2021-05-13 15:25:05 +00:00
|
|
|
let (cmd_success, _cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-disk",
|
2022-12-14 11:41:15 +00:00
|
|
|
Some(format!("path={disk},readonly=off").as_str()),
|
2021-05-13 15:25:05 +00:00
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the files exists with the expected contents
|
|
|
|
for it in &disk_test_data {
|
|
|
|
let fname = it[2].as_str();
|
|
|
|
let data = it[3].as_str();
|
2021-05-13 15:25:05 +00:00
|
|
|
let out = windows_guest.disk_file_read(fname);
|
|
|
|
assert_eq!(data, out.trim());
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Intentionally no unmount, it'll happen at shutdown.
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
windows_guest.shutdown();
|
|
|
|
});
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.wait_timeout(std::time::Duration::from_secs(60));
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child_dnsmasq.kill();
|
|
|
|
let _ = child_dnsmasq.wait();
|
2021-05-13 15:25:05 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-06-16 16:57:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
2022-08-03 21:52:53 +00:00
|
|
|
#[cfg(not(target_arch = "aarch64"))]
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_windows_guest_netdev_multi() {
|
|
|
|
let windows_guest = WindowsGuest::new();
|
2021-06-16 16:57:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut ovmf_path = dirs::home_dir().unwrap();
|
|
|
|
ovmf_path.push("workloads");
|
|
|
|
ovmf_path.push(OVMF_NAME);
|
2021-06-16 16:57:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
|
|
|
|
let api_socket = temp_api_path(&tmp_dir);
|
2021-06-16 16:57:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(windows_guest.guest())
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.args(["--cpus", "boot=2,kvm_hyperv=on"])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", ovmf_path.to_str().unwrap()])
|
|
|
|
.args(["--serial", "tty"])
|
|
|
|
.args(["--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
// The multi net dev config is borrowed from test_multiple_network_interfaces
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--net",
|
|
|
|
windows_guest.guest().default_net_string().as_str(),
|
|
|
|
"tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0",
|
|
|
|
"tap=mytap42,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0",
|
|
|
|
])
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-06-16 16:57:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child_dnsmasq = windows_guest.run_dnsmasq();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Wait to make sure Windows boots up
|
|
|
|
assert!(windows_guest.wait_for_boot());
|
|
|
|
|
|
|
|
let netdev_num = 3;
|
|
|
|
assert_eq!(windows_guest.netdev_count(), netdev_num);
|
|
|
|
assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num);
|
2021-06-16 16:57:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let tap_count = exec_host_command_output("ip link | grep -c mytap42");
|
|
|
|
assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1");
|
2021-06-16 16:57:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
windows_guest.shutdown();
|
|
|
|
});
|
2021-06-16 16:57:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.wait_timeout(std::time::Duration::from_secs(60));
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-06-16 16:57:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child_dnsmasq.kill();
|
|
|
|
let _ = child_dnsmasq.wait();
|
2021-06-16 16:57:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
}
|
2021-06-16 16:57:04 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
mod sgx {
|
|
|
|
use crate::*;
|
2021-06-24 14:08:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_sgx() {
|
2022-11-18 15:54:33 +00:00
|
|
|
let jammy_image = JAMMY_IMAGE_NAME.to_string();
|
|
|
|
let jammy = UbuntuDiskConfig::new(jammy_image);
|
|
|
|
let guest = Guest::new(Box::new(jammy));
|
2021-06-24 14:08:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
2022-11-18 15:54:33 +00:00
|
|
|
.args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--sgx-epc", "id=epc0,size=64M"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-06-24 14:08:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2021-06-24 14:08:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check if SGX is correctly detected in the guest.
|
|
|
|
guest.check_sgx_support().unwrap();
|
2021-06-24 14:08:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Validate the SGX EPC section is 64MiB.
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("cpuid -l 0x12 -s 2 | grep 'section size' | cut -d '=' -f 2")
|
|
|
|
.unwrap()
|
|
|
|
.trim(),
|
|
|
|
"0x0000000004000000"
|
|
|
|
);
|
|
|
|
});
|
2021-06-24 14:08:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-06-24 14:08:14 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
2020-10-08 14:10:33 +00:00
|
|
|
}
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2020-10-08 14:10:33 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
mod vfio {
|
|
|
|
use crate::*;
|
2020-09-08 12:20:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
fn test_nvidia_card_memory_hotplug(hotplug_method: &str) {
|
2022-11-24 16:27:20 +00:00
|
|
|
let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(jammy));
|
2022-01-06 22:24:38 +00:00
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2020-09-08 12:20:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=4"])
|
|
|
|
.args([
|
2022-01-06 22:24:38 +00:00
|
|
|
"--memory",
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("size=4G,hotplug_size=4G,hotplug_method={hotplug_method}").as_str(),
|
2022-01-06 22:24:38 +00:00
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
|
|
|
|
.args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"])
|
|
|
|
.args(["--api-socket", &api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2020-09-08 12:20:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2020-09-08 12:20:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
|
2020-10-27 05:37:25 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.enable_memory_hotplug();
|
2020-09-08 12:20:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Add RAM to the VM
|
|
|
|
let desired_ram = 6 << 30;
|
2022-05-12 19:51:13 +00:00
|
|
|
resize_command(&api_socket, None, Some(desired_ram), None, None);
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(30, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000);
|
2020-09-08 12:20:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the VFIO device works when RAM is increased to 6GiB
|
|
|
|
guest.check_nvidia_gpu();
|
|
|
|
});
|
2020-09-08 12:20:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2020-09-08 12:20:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
2020-09-08 12:20:26 +00:00
|
|
|
}
|
2021-03-16 15:11:00 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_nvidia_card_memory_hotplug_acpi() {
|
|
|
|
test_nvidia_card_memory_hotplug("acpi")
|
|
|
|
}
|
2021-03-16 15:11:00 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_nvidia_card_memory_hotplug_virtio_mem() {
|
|
|
|
test_nvidia_card_memory_hotplug("virtio-mem")
|
|
|
|
}
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_nvidia_card_pci_hotplug() {
|
2022-11-24 16:27:20 +00:00
|
|
|
let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(jammy));
|
2022-01-06 22:24:38 +00:00
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=4"])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
|
|
|
|
.args(["--api-socket", &api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Hotplug the card to the VM
|
|
|
|
let (cmd_success, cmd_output) = remote_command_w_output(
|
|
|
|
&api_socket,
|
|
|
|
"add-device",
|
|
|
|
Some("id=vfio0,path=/sys/bus/pci/devices/0000:31:00.0/"),
|
|
|
|
);
|
|
|
|
assert!(cmd_success);
|
|
|
|
assert!(String::from_utf8_lossy(&cmd_output)
|
|
|
|
.contains("{\"id\":\"vfio0\",\"bdf\":\"0000:00:06.0\"}"));
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the VFIO device works after hotplug
|
|
|
|
guest.check_nvidia_gpu();
|
|
|
|
});
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_nvidia_card_reboot() {
|
2022-11-24 16:27:20 +00:00
|
|
|
let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(jammy));
|
2022-01-06 22:24:38 +00:00
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", "boot=4"])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
|
|
|
|
.args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"])
|
|
|
|
.args(["--api-socket", &api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2021-03-16 15:11:00 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the VFIO device works after boot
|
|
|
|
guest.check_nvidia_gpu();
|
2021-03-16 15:11:00 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
guest.reboot_linux(0, None);
|
2021-03-16 15:11:00 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the VFIO device works after reboot
|
|
|
|
guest.check_nvidia_gpu();
|
|
|
|
});
|
2021-04-28 09:30:52 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
}
|
2021-04-28 09:34:23 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
mod live_migration {
|
|
|
|
use crate::*;
|
|
|
|
|
2022-09-07 20:55:00 +00:00
|
|
|
fn start_live_migration(
|
|
|
|
migration_socket: &str,
|
|
|
|
src_api_socket: &str,
|
|
|
|
dest_api_socket: &str,
|
|
|
|
local: bool,
|
|
|
|
) -> bool {
|
|
|
|
// Start to receive migration from the destintion VM
|
|
|
|
let mut receive_migration = Command::new(clh_command("ch-remote"))
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2023-07-08 01:38:51 +00:00
|
|
|
&format!("--api-socket={dest_api_socket}"),
|
2022-09-07 20:55:00 +00:00
|
|
|
"receive-migration",
|
2022-12-14 11:41:15 +00:00
|
|
|
&format! {"unix:{migration_socket}"},
|
2022-09-07 20:55:00 +00:00
|
|
|
])
|
|
|
|
.stderr(Stdio::piped())
|
|
|
|
.stdout(Stdio::piped())
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
// Give it '1s' to make sure the 'migration_socket' file is properly created
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
|
|
|
// Start to send migration from the source VM
|
|
|
|
|
|
|
|
let mut args = [
|
2023-07-08 01:38:51 +00:00
|
|
|
format!("--api-socket={}", &src_api_socket),
|
2022-09-07 20:55:00 +00:00
|
|
|
"send-migration".to_string(),
|
2022-12-14 11:41:15 +00:00
|
|
|
format! {"unix:{migration_socket}"},
|
2022-09-07 20:55:00 +00:00
|
|
|
]
|
|
|
|
.to_vec();
|
|
|
|
|
|
|
|
if local {
|
2023-07-08 01:38:51 +00:00
|
|
|
args.insert(2, "--local".to_string());
|
2022-09-07 20:55:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let mut send_migration = Command::new(clh_command("ch-remote"))
|
|
|
|
.args(&args)
|
|
|
|
.stderr(Stdio::piped())
|
|
|
|
.stdout(Stdio::piped())
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// The 'send-migration' command should be executed successfully within the given timeout
|
|
|
|
let send_success = if let Some(status) = send_migration
|
|
|
|
.wait_timeout(std::time::Duration::from_secs(30))
|
|
|
|
.unwrap()
|
|
|
|
{
|
|
|
|
status.success()
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
};
|
|
|
|
|
|
|
|
if !send_success {
|
|
|
|
let _ = send_migration.kill();
|
|
|
|
let output = send_migration.wait_with_output().unwrap();
|
2023-11-03 18:45:46 +00:00
|
|
|
eprintln!(
|
|
|
|
"\n\n==== Start 'send_migration' output ==== \
|
|
|
|
\n\n---stdout---\n{}\n\n---stderr---\n{} \
|
|
|
|
\n\n==== End 'send_migration' output ====\n\n",
|
|
|
|
String::from_utf8_lossy(&output.stdout),
|
|
|
|
String::from_utf8_lossy(&output.stderr)
|
|
|
|
);
|
2022-09-07 20:55:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The 'receive-migration' command should be executed successfully within the given timeout
|
|
|
|
let receive_success = if let Some(status) = receive_migration
|
|
|
|
.wait_timeout(std::time::Duration::from_secs(30))
|
|
|
|
.unwrap()
|
|
|
|
{
|
|
|
|
status.success()
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
};
|
|
|
|
|
|
|
|
if !receive_success {
|
|
|
|
let _ = receive_migration.kill();
|
|
|
|
let output = receive_migration.wait_with_output().unwrap();
|
2023-11-03 18:45:46 +00:00
|
|
|
eprintln!(
|
|
|
|
"\n\n==== Start 'receive_migration' output ==== \
|
|
|
|
\n\n---stdout---\n{}\n\n---stderr---\n{} \
|
|
|
|
\n\n==== End 'receive_migration' output ====\n\n",
|
|
|
|
String::from_utf8_lossy(&output.stdout),
|
|
|
|
String::from_utf8_lossy(&output.stderr)
|
|
|
|
);
|
2022-09-07 20:55:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
send_success && receive_success
|
|
|
|
}
|
|
|
|
|
2022-09-07 21:23:10 +00:00
|
|
|
fn print_and_panic(src_vm: Child, dest_vm: Child, ovs_vm: Option<Child>, message: &str) -> ! {
|
|
|
|
let mut src_vm = src_vm;
|
|
|
|
let mut dest_vm = dest_vm;
|
|
|
|
|
|
|
|
let _ = src_vm.kill();
|
|
|
|
let src_output = src_vm.wait_with_output().unwrap();
|
|
|
|
eprintln!(
|
|
|
|
"\n\n==== Start 'source_vm' stdout ====\n\n{}\n\n==== End 'source_vm' stdout ====",
|
|
|
|
String::from_utf8_lossy(&src_output.stdout)
|
|
|
|
);
|
|
|
|
eprintln!(
|
|
|
|
"\n\n==== Start 'source_vm' stderr ====\n\n{}\n\n==== End 'source_vm' stderr ====",
|
|
|
|
String::from_utf8_lossy(&src_output.stderr)
|
|
|
|
);
|
|
|
|
let _ = dest_vm.kill();
|
|
|
|
let dest_output = dest_vm.wait_with_output().unwrap();
|
|
|
|
eprintln!(
|
|
|
|
"\n\n==== Start 'destination_vm' stdout ====\n\n{}\n\n==== End 'destination_vm' stdout ====",
|
|
|
|
String::from_utf8_lossy(&dest_output.stdout)
|
|
|
|
);
|
|
|
|
eprintln!(
|
|
|
|
"\n\n==== Start 'destination_vm' stderr ====\n\n{}\n\n==== End 'destination_vm' stderr ====",
|
|
|
|
String::from_utf8_lossy(&dest_output.stderr)
|
|
|
|
);
|
|
|
|
|
|
|
|
if let Some(ovs_vm) = ovs_vm {
|
|
|
|
let mut ovs_vm = ovs_vm;
|
|
|
|
let _ = ovs_vm.kill();
|
|
|
|
let ovs_output = ovs_vm.wait_with_output().unwrap();
|
|
|
|
eprintln!(
|
|
|
|
"\n\n==== Start 'ovs_vm' stdout ====\n\n{}\n\n==== End 'ovs_vm' stdout ====",
|
|
|
|
String::from_utf8_lossy(&ovs_output.stdout)
|
|
|
|
);
|
|
|
|
eprintln!(
|
|
|
|
"\n\n==== Start 'ovs_vm' stderr ====\n\n{}\n\n==== End 'ovs_vm' stderr ====",
|
|
|
|
String::from_utf8_lossy(&ovs_output.stderr)
|
|
|
|
);
|
|
|
|
|
|
|
|
cleanup_ovs_dpdk();
|
|
|
|
}
|
|
|
|
|
2022-12-14 11:41:15 +00:00
|
|
|
panic!("Test failed: {message}")
|
2022-09-07 21:23:10 +00:00
|
|
|
}
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// This test exercises the local live-migration between two Cloud Hypervisor VMs on the
|
|
|
|
// same host. It ensures the following behaviors:
|
|
|
|
// 1. The source VM is up and functional (including various virtio-devices are working properly);
|
|
|
|
// 2. The 'send-migration' and 'receive-migration' command finished successfully;
|
|
|
|
// 3. The source VM terminated gracefully after live migration;
|
|
|
|
// 4. The destination VM is functional (including various virtio-devices are working properly) after
|
|
|
|
// live migration;
|
|
|
|
// Note: This test does not use vsock as we can't create two identical vsock on the same host.
|
2022-09-08 00:20:25 +00:00
|
|
|
fn _test_live_migration(upgrade_test: bool, local: bool) {
|
2022-01-06 22:24:38 +00:00
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
let console_text = String::from("On a branch floating down river a cricket, singing.");
|
|
|
|
let net_id = "net123";
|
|
|
|
let net_params = format!(
|
|
|
|
"id={},tap=,mac={},ip={},mask=255.255.255.0",
|
|
|
|
net_id, guest.network.guest_mac, guest.network.host_ip
|
|
|
|
);
|
2021-04-28 09:34:23 +00:00
|
|
|
|
2022-09-08 00:20:25 +00:00
|
|
|
let memory_param: &[&str] = if local {
|
|
|
|
&["--memory", "size=4G,shared=on"]
|
|
|
|
} else {
|
|
|
|
&["--memory", "size=4G"]
|
2022-01-06 22:24:38 +00:00
|
|
|
};
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-09-08 00:20:25 +00:00
|
|
|
let boot_vcpus = 2;
|
|
|
|
let max_vcpus = 4;
|
2022-08-03 14:17:19 +00:00
|
|
|
|
2022-04-26 02:24:45 +00:00
|
|
|
let pmem_temp_file = TempFile::new().unwrap();
|
|
|
|
pmem_temp_file.as_file().set_len(128 << 20).unwrap();
|
|
|
|
std::process::Command::new("mkfs.ext4")
|
|
|
|
.arg(pmem_temp_file.as_path())
|
|
|
|
.output()
|
|
|
|
.expect("Expect creating disk image to succeed");
|
|
|
|
let pmem_path = String::from("/dev/pmem0");
|
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Start the source VM
|
2022-04-13 18:33:22 +00:00
|
|
|
let src_vm_path = if !upgrade_test {
|
|
|
|
clh_command("cloud-hypervisor")
|
|
|
|
} else {
|
|
|
|
cloud_hypervisor_release_path()
|
|
|
|
};
|
2022-01-06 22:24:38 +00:00
|
|
|
let src_api_socket = temp_api_path(&guest.tmp_dir);
|
2022-04-26 04:35:37 +00:00
|
|
|
let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path);
|
|
|
|
src_vm_cmd
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-08-03 14:17:19 +00:00
|
|
|
"--cpus",
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("boot={boot_vcpus},max={max_vcpus}").as_str(),
|
2022-08-03 14:17:19 +00:00
|
|
|
])
|
2022-01-06 22:24:38 +00:00
|
|
|
.args(memory_param)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--net", net_params.as_str()])
|
|
|
|
.args(["--api-socket", &src_api_socket])
|
|
|
|
.args([
|
2022-04-26 02:24:45 +00:00
|
|
|
"--pmem",
|
|
|
|
format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(),
|
2022-04-26 04:35:37 +00:00
|
|
|
]);
|
|
|
|
let mut src_child = src_vm_cmd.capture_output().spawn().unwrap();
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Start the destination VM
|
|
|
|
let mut dest_api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
dest_api_socket.push_str(".dest");
|
|
|
|
let mut dest_child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &dest_api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-04-28 10:25:57 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
2022-05-25 10:55:34 +00:00
|
|
|
guest.wait_vm_boot(None).unwrap();
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
// Make sure the source VM is functaionl
|
|
|
|
// Check the number of vCPUs
|
2022-08-03 14:17:19 +00:00
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
|
2022-04-26 05:34:58 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the guest RAM
|
2022-09-08 00:20:25 +00:00
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
|
2022-04-26 05:34:58 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the guest virtio-devices, e.g. block, rng, console, and net
|
2022-04-26 02:24:45 +00:00
|
|
|
guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
// x86_64: Following what's done in the `test_snapshot_restore`, we need
|
|
|
|
// to make sure that removing and adding back the virtio-net device does
|
|
|
|
// not break the live-migration support for virtio-pci.
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
assert!(remote_command(
|
|
|
|
&src_api_socket,
|
|
|
|
"remove-device",
|
|
|
|
Some(net_id),
|
|
|
|
));
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2021-03-16 15:11:00 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Plug the virtio-net device again
|
|
|
|
assert!(remote_command(
|
|
|
|
&src_api_socket,
|
|
|
|
"add-net",
|
|
|
|
Some(net_params.as_str()),
|
|
|
|
));
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start the live-migration
|
|
|
|
let migration_socket = String::from(
|
|
|
|
guest
|
|
|
|
.tmp_dir
|
|
|
|
.as_path()
|
|
|
|
.join("live-migration.sock")
|
|
|
|
.to_str()
|
|
|
|
.unwrap(),
|
|
|
|
);
|
2022-01-17 15:24:36 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(
|
2022-09-07 20:55:00 +00:00
|
|
|
start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local),
|
2022-01-06 22:24:38 +00:00
|
|
|
"Unsuccessful command: 'send-migration' or 'receive-migration'."
|
|
|
|
);
|
|
|
|
});
|
2021-08-24 22:42:37 +00:00
|
|
|
|
2023-08-31 13:00:19 +00:00
|
|
|
// Check and report any errors occurred during the live-migration
|
2022-01-06 22:24:38 +00:00
|
|
|
if r.is_err() {
|
2022-09-07 21:23:10 +00:00
|
|
|
print_and_panic(
|
|
|
|
src_child,
|
|
|
|
dest_child,
|
|
|
|
None,
|
2023-08-31 13:00:19 +00:00
|
|
|
"Error occurred during live-migration",
|
2022-09-07 21:23:10 +00:00
|
|
|
);
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-08-24 22:42:37 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the source vm has been terminated successful (give it '3s' to settle)
|
|
|
|
thread::sleep(std::time::Duration::new(3, 0));
|
|
|
|
if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) {
|
|
|
|
print_and_panic(
|
|
|
|
src_child,
|
|
|
|
dest_child,
|
2022-09-07 21:23:10 +00:00
|
|
|
None,
|
2022-01-06 22:24:38 +00:00
|
|
|
"source VM was not terminated successfully.",
|
|
|
|
);
|
|
|
|
};
|
2021-08-24 22:42:37 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Post live-migration check to make sure the destination VM is funcational
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Perform same checks to validate VM has been properly migrated
|
2022-08-03 14:17:19 +00:00
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
|
2022-04-13 11:38:14 +00:00
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
|
|
|
|
|
2022-04-26 02:24:45 +00:00
|
|
|
guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
|
2022-09-07 23:51:37 +00:00
|
|
|
});
|
2022-04-26 04:35:37 +00:00
|
|
|
|
2022-09-07 23:51:37 +00:00
|
|
|
// Clean-up the destination VM and make sure it terminated correctly
|
|
|
|
let _ = dest_child.kill();
|
|
|
|
let dest_output = dest_child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &dest_output);
|
|
|
|
|
|
|
|
// Check the destination VM has the expected 'concole_text' from its output
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text));
|
|
|
|
});
|
|
|
|
handle_child_output(r, &dest_output);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn _test_live_migration_balloon(upgrade_test: bool, local: bool) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
let console_text = String::from("On a branch floating down river a cricket, singing.");
|
|
|
|
let net_id = "net123";
|
|
|
|
let net_params = format!(
|
|
|
|
"id={},tap=,mac={},ip={},mask=255.255.255.0",
|
|
|
|
net_id, guest.network.guest_mac, guest.network.host_ip
|
|
|
|
);
|
|
|
|
|
|
|
|
let memory_param: &[&str] = if local {
|
|
|
|
&[
|
|
|
|
"--memory",
|
|
|
|
"size=4G,hotplug_method=virtio-mem,hotplug_size=8G,shared=on",
|
|
|
|
"--balloon",
|
|
|
|
"size=0",
|
|
|
|
]
|
|
|
|
} else {
|
|
|
|
&[
|
|
|
|
"--memory",
|
|
|
|
"size=4G,hotplug_method=virtio-mem,hotplug_size=8G",
|
|
|
|
"--balloon",
|
|
|
|
"size=0",
|
|
|
|
]
|
|
|
|
};
|
|
|
|
|
|
|
|
let boot_vcpus = 2;
|
|
|
|
let max_vcpus = 4;
|
|
|
|
|
|
|
|
let pmem_temp_file = TempFile::new().unwrap();
|
|
|
|
pmem_temp_file.as_file().set_len(128 << 20).unwrap();
|
|
|
|
std::process::Command::new("mkfs.ext4")
|
|
|
|
.arg(pmem_temp_file.as_path())
|
|
|
|
.output()
|
|
|
|
.expect("Expect creating disk image to succeed");
|
|
|
|
let pmem_path = String::from("/dev/pmem0");
|
|
|
|
|
|
|
|
// Start the source VM
|
|
|
|
let src_vm_path = if !upgrade_test {
|
|
|
|
clh_command("cloud-hypervisor")
|
|
|
|
} else {
|
|
|
|
cloud_hypervisor_release_path()
|
|
|
|
};
|
|
|
|
let src_api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path);
|
|
|
|
src_vm_cmd
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-09-07 23:51:37 +00:00
|
|
|
"--cpus",
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("boot={boot_vcpus},max={max_vcpus}").as_str(),
|
2022-09-07 23:51:37 +00:00
|
|
|
])
|
|
|
|
.args(memory_param)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-09-07 23:51:37 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--net", net_params.as_str()])
|
|
|
|
.args(["--api-socket", &src_api_socket])
|
|
|
|
.args([
|
2022-09-07 23:51:37 +00:00
|
|
|
"--pmem",
|
|
|
|
format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(),
|
|
|
|
]);
|
|
|
|
let mut src_child = src_vm_cmd.capture_output().spawn().unwrap();
|
|
|
|
|
|
|
|
// Start the destination VM
|
|
|
|
let mut dest_api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
dest_api_socket.push_str(".dest");
|
|
|
|
let mut dest_child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &dest_api_socket])
|
2022-09-07 23:51:37 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Make sure the source VM is functaionl
|
|
|
|
// Check the number of vCPUs
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
|
|
|
|
|
|
|
|
// Check the guest RAM
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
|
|
|
|
// Increase the guest RAM
|
|
|
|
resize_command(&src_api_socket, None, Some(6 << 30), None, None);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000);
|
|
|
|
// Use balloon to remove RAM from the VM
|
|
|
|
resize_command(&src_api_socket, None, None, Some(1 << 30), None);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
let total_memory = guest.get_total_memory().unwrap_or_default();
|
|
|
|
assert!(total_memory > 4_800_000);
|
|
|
|
assert!(total_memory < 5_760_000);
|
|
|
|
|
|
|
|
// Check the guest virtio-devices, e.g. block, rng, console, and net
|
|
|
|
guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
|
|
|
|
|
|
|
|
// x86_64: Following what's done in the `test_snapshot_restore`, we need
|
|
|
|
// to make sure that removing and adding back the virtio-net device does
|
|
|
|
// not break the live-migration support for virtio-pci.
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
assert!(remote_command(
|
|
|
|
&src_api_socket,
|
|
|
|
"remove-device",
|
|
|
|
Some(net_id),
|
|
|
|
));
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
|
|
|
|
// Plug the virtio-net device again
|
|
|
|
assert!(remote_command(
|
|
|
|
&src_api_socket,
|
|
|
|
"add-net",
|
|
|
|
Some(net_params.as_str()),
|
|
|
|
));
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
2022-04-26 05:34:58 +00:00
|
|
|
}
|
2022-09-07 23:51:37 +00:00
|
|
|
|
|
|
|
// Start the live-migration
|
|
|
|
let migration_socket = String::from(
|
|
|
|
guest
|
|
|
|
.tmp_dir
|
|
|
|
.as_path()
|
|
|
|
.join("live-migration.sock")
|
|
|
|
.to_str()
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local),
|
|
|
|
"Unsuccessful command: 'send-migration' or 'receive-migration'."
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
2023-08-31 13:00:19 +00:00
|
|
|
// Check and report any errors occurred during the live-migration
|
2022-09-07 23:51:37 +00:00
|
|
|
if r.is_err() {
|
|
|
|
print_and_panic(
|
|
|
|
src_child,
|
|
|
|
dest_child,
|
|
|
|
None,
|
2023-08-31 13:00:19 +00:00
|
|
|
"Error occurred during live-migration",
|
2022-09-07 23:51:37 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the source vm has been terminated successful (give it '3s' to settle)
|
|
|
|
thread::sleep(std::time::Duration::new(3, 0));
|
|
|
|
if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) {
|
|
|
|
print_and_panic(
|
|
|
|
src_child,
|
|
|
|
dest_child,
|
|
|
|
None,
|
|
|
|
"source VM was not terminated successfully.",
|
|
|
|
);
|
|
|
|
};
|
|
|
|
|
|
|
|
// Post live-migration check to make sure the destination VM is funcational
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Perform same checks to validate VM has been properly migrated
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
|
|
|
|
|
|
|
|
guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
|
|
|
|
|
|
|
|
// Perform checks on guest RAM using balloon
|
|
|
|
let total_memory = guest.get_total_memory().unwrap_or_default();
|
|
|
|
assert!(total_memory > 4_800_000);
|
|
|
|
assert!(total_memory < 5_760_000);
|
|
|
|
// Deflate balloon to restore entire RAM to the VM
|
|
|
|
resize_command(&dest_api_socket, None, None, Some(0), None);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000);
|
|
|
|
// Decrease guest RAM with virtio-mem
|
|
|
|
resize_command(&dest_api_socket, None, Some(5 << 30), None, None);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
let total_memory = guest.get_total_memory().unwrap_or_default();
|
|
|
|
assert!(total_memory > 4_800_000);
|
|
|
|
assert!(total_memory < 5_760_000);
|
2022-01-06 22:24:38 +00:00
|
|
|
});
|
2021-08-24 22:42:37 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Clean-up the destination VM and make sure it terminated correctly
|
|
|
|
let _ = dest_child.kill();
|
|
|
|
let dest_output = dest_child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &dest_output);
|
2021-09-01 22:13:48 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the destination VM has the expected 'concole_text' from its output
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text));
|
|
|
|
});
|
|
|
|
handle_child_output(r, &dest_output);
|
|
|
|
}
|
2021-08-24 22:42:37 +00:00
|
|
|
|
2022-09-08 00:20:25 +00:00
|
|
|
fn _test_live_migration_numa(upgrade_test: bool, local: bool) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
let console_text = String::from("On a branch floating down river a cricket, singing.");
|
|
|
|
let net_id = "net123";
|
|
|
|
let net_params = format!(
|
|
|
|
"id={},tap=,mac={},ip={},mask=255.255.255.0",
|
|
|
|
net_id, guest.network.guest_mac, guest.network.host_ip
|
|
|
|
);
|
|
|
|
|
|
|
|
let memory_param: &[&str] = if local {
|
|
|
|
&[
|
|
|
|
"--memory",
|
|
|
|
"size=0,hotplug_method=virtio-mem,shared=on",
|
|
|
|
"--memory-zone",
|
|
|
|
"id=mem0,size=1G,hotplug_size=4G,shared=on",
|
|
|
|
"id=mem1,size=1G,hotplug_size=4G,shared=on",
|
|
|
|
"id=mem2,size=2G,hotplug_size=4G,shared=on",
|
|
|
|
"--numa",
|
|
|
|
"guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0",
|
|
|
|
"guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1",
|
|
|
|
"guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2",
|
|
|
|
]
|
|
|
|
} else {
|
|
|
|
&[
|
|
|
|
"--memory",
|
|
|
|
"size=0,hotplug_method=virtio-mem",
|
|
|
|
"--memory-zone",
|
|
|
|
"id=mem0,size=1G,hotplug_size=4G",
|
|
|
|
"id=mem1,size=1G,hotplug_size=4G",
|
|
|
|
"id=mem2,size=2G,hotplug_size=4G",
|
|
|
|
"--numa",
|
|
|
|
"guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0",
|
|
|
|
"guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1",
|
|
|
|
"guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2",
|
|
|
|
]
|
|
|
|
};
|
|
|
|
|
|
|
|
let boot_vcpus = 6;
|
|
|
|
let max_vcpus = 12;
|
|
|
|
|
|
|
|
let pmem_temp_file = TempFile::new().unwrap();
|
|
|
|
pmem_temp_file.as_file().set_len(128 << 20).unwrap();
|
|
|
|
std::process::Command::new("mkfs.ext4")
|
|
|
|
.arg(pmem_temp_file.as_path())
|
|
|
|
.output()
|
|
|
|
.expect("Expect creating disk image to succeed");
|
|
|
|
let pmem_path = String::from("/dev/pmem0");
|
|
|
|
|
|
|
|
// Start the source VM
|
|
|
|
let src_vm_path = if !upgrade_test {
|
|
|
|
clh_command("cloud-hypervisor")
|
|
|
|
} else {
|
|
|
|
cloud_hypervisor_release_path()
|
|
|
|
};
|
|
|
|
let src_api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path);
|
|
|
|
src_vm_cmd
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-09-08 00:20:25 +00:00
|
|
|
"--cpus",
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("boot={boot_vcpus},max={max_vcpus}").as_str(),
|
2022-09-08 00:20:25 +00:00
|
|
|
])
|
|
|
|
.args(memory_param)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-09-08 00:20:25 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--net", net_params.as_str()])
|
|
|
|
.args(["--api-socket", &src_api_socket])
|
|
|
|
.args([
|
2022-09-08 00:20:25 +00:00
|
|
|
"--pmem",
|
|
|
|
format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(),
|
|
|
|
]);
|
|
|
|
let mut src_child = src_vm_cmd.capture_output().spawn().unwrap();
|
|
|
|
|
|
|
|
// Start the destination VM
|
|
|
|
let mut dest_api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
dest_api_socket.push_str(".dest");
|
|
|
|
let mut dest_child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &dest_api_socket])
|
2022-09-08 00:20:25 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Make sure the source VM is functaionl
|
|
|
|
// Check the number of vCPUs
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
|
|
|
|
|
|
|
|
// Check the guest RAM
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000);
|
|
|
|
|
|
|
|
// Check the guest virtio-devices, e.g. block, rng, console, and net
|
|
|
|
guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
|
|
|
|
|
|
|
|
// Check the NUMA parameters are applied correctly and resize
|
|
|
|
// each zone to test the case where we migrate a VM with the
|
|
|
|
// virtio-mem regions being used.
|
|
|
|
{
|
|
|
|
guest.check_numa_common(
|
|
|
|
Some(&[960_000, 960_000, 1_920_000]),
|
|
|
|
Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
|
|
|
|
Some(&["10 15 20", "20 10 25", "25 30 10"]),
|
|
|
|
);
|
|
|
|
|
|
|
|
// AArch64 currently does not support hotplug, and therefore we only
|
|
|
|
// test hotplug-related function on x86_64 here.
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
guest.enable_memory_hotplug();
|
|
|
|
|
|
|
|
// Resize every memory zone and check each associated NUMA node
|
|
|
|
// has been assigned the right amount of memory.
|
|
|
|
resize_zone_command(&src_api_socket, "mem0", "2G");
|
|
|
|
resize_zone_command(&src_api_socket, "mem1", "2G");
|
|
|
|
resize_zone_command(&src_api_socket, "mem2", "3G");
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
|
|
|
|
guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// x86_64: Following what's done in the `test_snapshot_restore`, we need
|
|
|
|
// to make sure that removing and adding back the virtio-net device does
|
|
|
|
// not break the live-migration support for virtio-pci.
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
assert!(remote_command(
|
|
|
|
&src_api_socket,
|
|
|
|
"remove-device",
|
|
|
|
Some(net_id),
|
|
|
|
));
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
|
|
|
|
// Plug the virtio-net device again
|
|
|
|
assert!(remote_command(
|
|
|
|
&src_api_socket,
|
|
|
|
"add-net",
|
|
|
|
Some(net_params.as_str()),
|
|
|
|
));
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start the live-migration
|
|
|
|
let migration_socket = String::from(
|
|
|
|
guest
|
|
|
|
.tmp_dir
|
|
|
|
.as_path()
|
|
|
|
.join("live-migration.sock")
|
|
|
|
.to_str()
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local),
|
|
|
|
"Unsuccessful command: 'send-migration' or 'receive-migration'."
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
2023-08-31 13:00:19 +00:00
|
|
|
// Check and report any errors occurred during the live-migration
|
2022-09-08 00:20:25 +00:00
|
|
|
if r.is_err() {
|
|
|
|
print_and_panic(
|
|
|
|
src_child,
|
|
|
|
dest_child,
|
|
|
|
None,
|
2023-08-31 13:00:19 +00:00
|
|
|
"Error occurred during live-migration",
|
2022-09-08 00:20:25 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the source vm has been terminated successful (give it '3s' to settle)
|
|
|
|
thread::sleep(std::time::Duration::new(3, 0));
|
|
|
|
if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) {
|
|
|
|
print_and_panic(
|
|
|
|
src_child,
|
|
|
|
dest_child,
|
|
|
|
None,
|
|
|
|
"source VM was not terminated successfully.",
|
|
|
|
);
|
|
|
|
};
|
|
|
|
|
|
|
|
// Post live-migration check to make sure the destination VM is funcational
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Perform same checks to validate VM has been properly migrated
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000);
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
|
|
|
|
|
|
|
|
guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
|
|
|
|
|
|
|
|
// Perform NUMA related checks
|
|
|
|
{
|
|
|
|
#[cfg(target_arch = "aarch64")]
|
|
|
|
{
|
|
|
|
guest.check_numa_common(
|
|
|
|
Some(&[960_000, 960_000, 1_920_000]),
|
|
|
|
Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
|
|
|
|
Some(&["10 15 20", "20 10 25", "25 30 10"]),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// AArch64 currently does not support hotplug, and therefore we only
|
|
|
|
// test hotplug-related function on x86_64 here.
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
guest.check_numa_common(
|
|
|
|
Some(&[1_920_000, 1_920_000, 2_880_000]),
|
|
|
|
Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
|
|
|
|
Some(&["10 15 20", "20 10 25", "25 30 10"]),
|
|
|
|
);
|
|
|
|
|
|
|
|
guest.enable_memory_hotplug();
|
|
|
|
|
|
|
|
// Resize every memory zone and check each associated NUMA node
|
|
|
|
// has been assigned the right amount of memory.
|
|
|
|
resize_zone_command(&dest_api_socket, "mem0", "4G");
|
|
|
|
resize_zone_command(&dest_api_socket, "mem1", "4G");
|
|
|
|
resize_zone_command(&dest_api_socket, "mem2", "4G");
|
|
|
|
// Resize to the maximum amount of CPUs and check each NUMA
|
|
|
|
// node has been assigned the right CPUs set.
|
|
|
|
resize_command(&dest_api_socket, Some(max_vcpus), None, None, None);
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
|
|
|
|
|
|
|
guest.check_numa_common(
|
|
|
|
Some(&[3_840_000, 3_840_000, 3_840_000]),
|
|
|
|
Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]),
|
|
|
|
None,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Clean-up the destination VM and make sure it terminated correctly
|
|
|
|
let _ = dest_child.kill();
|
|
|
|
let dest_output = dest_child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &dest_output);
|
|
|
|
|
|
|
|
// Check the destination VM has the expected 'concole_text' from its output
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text));
|
|
|
|
});
|
|
|
|
handle_child_output(r, &dest_output);
|
|
|
|
}
|
|
|
|
|
2022-09-07 22:13:57 +00:00
|
|
|
fn _test_live_migration_watchdog(upgrade_test: bool, local: bool) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let kernel_path = direct_kernel_boot_path();
|
|
|
|
let console_text = String::from("On a branch floating down river a cricket, singing.");
|
|
|
|
let net_id = "net123";
|
|
|
|
let net_params = format!(
|
|
|
|
"id={},tap=,mac={},ip={},mask=255.255.255.0",
|
|
|
|
net_id, guest.network.guest_mac, guest.network.host_ip
|
|
|
|
);
|
|
|
|
|
|
|
|
let memory_param: &[&str] = if local {
|
|
|
|
&["--memory", "size=4G,shared=on"]
|
|
|
|
} else {
|
|
|
|
&["--memory", "size=4G"]
|
|
|
|
};
|
|
|
|
|
|
|
|
let boot_vcpus = 2;
|
|
|
|
let max_vcpus = 4;
|
|
|
|
|
|
|
|
let pmem_temp_file = TempFile::new().unwrap();
|
|
|
|
pmem_temp_file.as_file().set_len(128 << 20).unwrap();
|
|
|
|
std::process::Command::new("mkfs.ext4")
|
|
|
|
.arg(pmem_temp_file.as_path())
|
|
|
|
.output()
|
|
|
|
.expect("Expect creating disk image to succeed");
|
|
|
|
let pmem_path = String::from("/dev/pmem0");
|
|
|
|
|
|
|
|
// Start the source VM
|
|
|
|
let src_vm_path = if !upgrade_test {
|
|
|
|
clh_command("cloud-hypervisor")
|
|
|
|
} else {
|
|
|
|
cloud_hypervisor_release_path()
|
|
|
|
};
|
|
|
|
let src_api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path);
|
|
|
|
src_vm_cmd
|
2022-09-20 08:46:19 +00:00
|
|
|
.args([
|
2022-09-07 22:13:57 +00:00
|
|
|
"--cpus",
|
2022-12-14 11:41:15 +00:00
|
|
|
format!("boot={boot_vcpus},max={max_vcpus}").as_str(),
|
2022-09-07 22:13:57 +00:00
|
|
|
])
|
|
|
|
.args(memory_param)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--kernel", kernel_path.to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-09-07 22:13:57 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--net", net_params.as_str()])
|
|
|
|
.args(["--api-socket", &src_api_socket])
|
|
|
|
.args([
|
2022-09-07 22:13:57 +00:00
|
|
|
"--pmem",
|
|
|
|
format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(),
|
|
|
|
])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--watchdog"]);
|
2022-09-07 22:13:57 +00:00
|
|
|
let mut src_child = src_vm_cmd.capture_output().spawn().unwrap();
|
|
|
|
|
|
|
|
// Start the destination VM
|
|
|
|
let mut dest_api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
dest_api_socket.push_str(".dest");
|
|
|
|
let mut dest_child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &dest_api_socket])
|
2022-09-07 22:13:57 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
// Make sure the source VM is functaionl
|
|
|
|
// Check the number of vCPUs
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
|
|
|
|
// Check the guest RAM
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
|
|
|
|
// Check the guest virtio-devices, e.g. block, rng, console, and net
|
|
|
|
guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
|
|
|
|
// x86_64: Following what's done in the `test_snapshot_restore`, we need
|
|
|
|
// to make sure that removing and adding back the virtio-net device does
|
|
|
|
// not break the live-migration support for virtio-pci.
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
assert!(remote_command(
|
|
|
|
&src_api_socket,
|
|
|
|
"remove-device",
|
|
|
|
Some(net_id),
|
|
|
|
));
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
|
|
|
|
// Plug the virtio-net device again
|
|
|
|
assert!(remote_command(
|
|
|
|
&src_api_socket,
|
|
|
|
"add-net",
|
|
|
|
Some(net_params.as_str()),
|
|
|
|
));
|
|
|
|
thread::sleep(std::time::Duration::new(10, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enable watchdog and ensure its functional
|
|
|
|
let mut expected_reboot_count = 1;
|
|
|
|
// Enable the watchdog with a 15s timeout
|
|
|
|
enable_guest_watchdog(&guest, 15);
|
|
|
|
// Reboot and check that systemd has activated the watchdog
|
|
|
|
guest.ssh_command("sudo reboot").unwrap();
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
expected_reboot_count += 1;
|
|
|
|
assert_eq!(get_reboot_count(&guest), expected_reboot_count);
|
|
|
|
assert_eq!(
|
|
|
|
guest
|
|
|
|
.ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"")
|
|
|
|
.unwrap()
|
|
|
|
.trim()
|
|
|
|
.parse::<u32>()
|
|
|
|
.unwrap_or_default(),
|
|
|
|
2
|
|
|
|
);
|
|
|
|
// Allow some normal time to elapse to check we don't get spurious reboots
|
|
|
|
thread::sleep(std::time::Duration::new(40, 0));
|
|
|
|
// Check no reboot
|
|
|
|
assert_eq!(get_reboot_count(&guest), expected_reboot_count);
|
|
|
|
|
|
|
|
// Start the live-migration
|
|
|
|
let migration_socket = String::from(
|
|
|
|
guest
|
|
|
|
.tmp_dir
|
|
|
|
.as_path()
|
|
|
|
.join("live-migration.sock")
|
|
|
|
.to_str()
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local),
|
|
|
|
"Unsuccessful command: 'send-migration' or 'receive-migration'."
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
2023-08-31 13:00:19 +00:00
|
|
|
// Check and report any errors occurred during the live-migration
|
2022-09-07 22:13:57 +00:00
|
|
|
if r.is_err() {
|
|
|
|
print_and_panic(
|
|
|
|
src_child,
|
|
|
|
dest_child,
|
|
|
|
None,
|
2023-08-31 13:00:19 +00:00
|
|
|
"Error occurred during live-migration",
|
2022-09-07 22:13:57 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the source vm has been terminated successful (give it '3s' to settle)
|
|
|
|
thread::sleep(std::time::Duration::new(3, 0));
|
|
|
|
if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) {
|
|
|
|
print_and_panic(
|
|
|
|
src_child,
|
|
|
|
dest_child,
|
|
|
|
None,
|
|
|
|
"source VM was not terminated successfully.",
|
|
|
|
);
|
|
|
|
};
|
|
|
|
|
|
|
|
// Post live-migration check to make sure the destination VM is funcational
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Perform same checks to validate VM has been properly migrated
|
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
|
|
|
|
|
|
|
|
guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
|
|
|
|
|
|
|
|
// Perform checks on watchdog
|
|
|
|
let mut expected_reboot_count = 2;
|
|
|
|
|
|
|
|
// Allow some normal time to elapse to check we don't get spurious reboots
|
|
|
|
thread::sleep(std::time::Duration::new(40, 0));
|
|
|
|
// Check no reboot
|
|
|
|
assert_eq!(get_reboot_count(&guest), expected_reboot_count);
|
|
|
|
|
|
|
|
// Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns.
|
|
|
|
guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap();
|
|
|
|
// Allow some time for the watchdog to trigger (max 30s) and reboot to happen
|
|
|
|
guest.wait_vm_boot(Some(50)).unwrap();
|
2023-08-31 13:00:19 +00:00
|
|
|
// Check a reboot is triggered by the watchdog
|
2022-09-07 22:13:57 +00:00
|
|
|
expected_reboot_count += 1;
|
|
|
|
assert_eq!(get_reboot_count(&guest), expected_reboot_count);
|
|
|
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
|
|
|
{
|
|
|
|
// Now pause the VM and remain offline for 30s
|
|
|
|
assert!(remote_command(&dest_api_socket, "pause", None));
|
|
|
|
thread::sleep(std::time::Duration::new(30, 0));
|
|
|
|
assert!(remote_command(&dest_api_socket, "resume", None));
|
|
|
|
|
|
|
|
// Check no reboot
|
|
|
|
assert_eq!(get_reboot_count(&guest), expected_reboot_count);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Clean-up the destination VM and make sure it terminated correctly
|
|
|
|
let _ = dest_child.kill();
|
|
|
|
let dest_output = dest_child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &dest_output);
|
|
|
|
|
|
|
|
// Check the destination VM has the expected 'concole_text' from its output
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text));
|
|
|
|
});
|
|
|
|
handle_child_output(r, &dest_output);
|
|
|
|
}
|
|
|
|
|
2022-04-13 18:33:22 +00:00
|
|
|
fn _test_live_migration_ovs_dpdk(upgrade_test: bool, local: bool) {
|
2022-01-06 22:24:38 +00:00
|
|
|
let ovs_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let ovs_guest = Guest::new(Box::new(ovs_focal));
|
|
|
|
|
|
|
|
let migration_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let migration_guest = Guest::new(Box::new(migration_focal));
|
|
|
|
let src_api_socket = temp_api_path(&migration_guest.tmp_dir);
|
|
|
|
|
|
|
|
// Start two VMs that are connected through ovs-dpdk and one of the VMs is the source VM for live-migration
|
|
|
|
let (mut ovs_child, mut src_child) =
|
2022-04-13 18:33:22 +00:00
|
|
|
setup_ovs_dpdk_guests(&ovs_guest, &migration_guest, &src_api_socket, upgrade_test);
|
2022-01-06 22:24:38 +00:00
|
|
|
|
|
|
|
// Start the destination VM
|
|
|
|
let mut dest_api_socket = temp_api_path(&migration_guest.tmp_dir);
|
|
|
|
dest_api_socket.push_str(".dest");
|
|
|
|
let mut dest_child = GuestCommand::new(&migration_guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &dest_api_socket])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-09-01 22:13:48 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Give it '1s' to make sure the 'dest_api_socket' file is properly created
|
|
|
|
thread::sleep(std::time::Duration::new(1, 0));
|
2021-09-01 22:13:48 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Start the live-migration
|
|
|
|
let migration_socket = String::from(
|
|
|
|
migration_guest
|
|
|
|
.tmp_dir
|
|
|
|
.as_path()
|
|
|
|
.join("live-migration.sock")
|
|
|
|
.to_str()
|
|
|
|
.unwrap(),
|
|
|
|
);
|
2022-01-14 13:12:40 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert!(
|
2022-09-07 20:55:00 +00:00
|
|
|
start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local),
|
2022-01-06 22:24:38 +00:00
|
|
|
"Unsuccessful command: 'send-migration' or 'receive-migration'."
|
|
|
|
);
|
|
|
|
});
|
2021-09-09 04:44:27 +00:00
|
|
|
|
2023-08-31 13:00:19 +00:00
|
|
|
// Check and report any errors occurred during the live-migration
|
2022-01-06 22:24:38 +00:00
|
|
|
if r.is_err() {
|
|
|
|
print_and_panic(
|
|
|
|
src_child,
|
|
|
|
dest_child,
|
2022-09-07 21:23:10 +00:00
|
|
|
Some(ovs_child),
|
2023-08-31 13:00:19 +00:00
|
|
|
"Error occurred during live-migration",
|
2022-01-06 22:24:38 +00:00
|
|
|
);
|
|
|
|
}
|
2021-09-09 04:44:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Check the source vm has been terminated successful (give it '3s' to settle)
|
|
|
|
thread::sleep(std::time::Duration::new(3, 0));
|
|
|
|
if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) {
|
|
|
|
print_and_panic(
|
|
|
|
src_child,
|
|
|
|
dest_child,
|
2022-09-07 21:23:10 +00:00
|
|
|
Some(ovs_child),
|
2022-01-06 22:24:38 +00:00
|
|
|
"source VM was not terminated successfully.",
|
|
|
|
);
|
|
|
|
};
|
2021-09-09 04:44:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Post live-migration check to make sure the destination VM is funcational
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
// Perform same checks to validate VM has been properly migrated
|
|
|
|
// Spawn a new netcat listener in the OVS VM
|
|
|
|
let guest_ip = ovs_guest.network.guest_ip.clone();
|
|
|
|
thread::spawn(move || {
|
|
|
|
ssh_command_ip(
|
|
|
|
"nc -l 12345",
|
|
|
|
&guest_ip,
|
|
|
|
DEFAULT_SSH_RETRIES,
|
|
|
|
DEFAULT_SSH_TIMEOUT,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
});
|
2021-09-09 04:44:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Wait for the server to be listening
|
|
|
|
thread::sleep(std::time::Duration::new(5, 0));
|
2021-09-09 04:44:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// And check the connection is still functional after live-migration
|
|
|
|
migration_guest
|
|
|
|
.ssh_command("nc -vz 172.100.0.1 12345")
|
|
|
|
.unwrap();
|
|
|
|
});
|
2021-09-09 04:44:27 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
// Clean-up the destination VM and OVS VM, and make sure they terminated correctly
|
|
|
|
let _ = dest_child.kill();
|
|
|
|
let _ = ovs_child.kill();
|
|
|
|
let dest_output = dest_child.wait_with_output().unwrap();
|
|
|
|
let ovs_output = ovs_child.wait_with_output().unwrap();
|
2022-08-15 22:36:35 +00:00
|
|
|
|
|
|
|
cleanup_ovs_dpdk();
|
2022-08-31 18:30:41 +00:00
|
|
|
|
|
|
|
handle_child_output(r, &dest_output);
|
|
|
|
handle_child_output(Ok(()), &ovs_output);
|
2021-08-24 22:42:37 +00:00
|
|
|
}
|
2022-02-18 20:21:28 +00:00
|
|
|
|
2022-08-16 19:03:45 +00:00
|
|
|
mod live_migration_parallel {
|
|
|
|
use super::*;
|
|
|
|
#[test]
|
|
|
|
fn test_live_migration_basic() {
|
2022-09-08 00:20:25 +00:00
|
|
|
_test_live_migration(false, false)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
2022-02-18 20:21:28 +00:00
|
|
|
|
2022-08-16 19:03:45 +00:00
|
|
|
#[test]
|
|
|
|
fn test_live_migration_local() {
|
2022-09-08 00:20:25 +00:00
|
|
|
_test_live_migration(false, true)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
2022-04-13 18:33:22 +00:00
|
|
|
|
2022-08-16 19:03:45 +00:00
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_live_migration_numa() {
|
2022-09-08 00:20:25 +00:00
|
|
|
_test_live_migration_numa(false, false)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_live_migration_numa_local() {
|
2022-09-08 00:20:25 +00:00
|
|
|
_test_live_migration_numa(false, true)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_live_migration_watchdog() {
|
2022-09-07 22:13:57 +00:00
|
|
|
_test_live_migration_watchdog(false, false)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_live_migration_watchdog_local() {
|
2022-09-07 22:13:57 +00:00
|
|
|
_test_live_migration_watchdog(false, true)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_live_migration_balloon() {
|
2022-09-07 23:51:37 +00:00
|
|
|
_test_live_migration_balloon(false, false)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_live_migration_balloon_local() {
|
2022-09-07 23:51:37 +00:00
|
|
|
_test_live_migration_balloon(false, true)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
build: Bump vmm-sys-util crate and its consumers
This patch bumps the following crates, including `kvm-bindings@0.7.0`*,
`kvm-ioctls@0.16.0`**, `linux-loader@0.11.0`, `versionize@0.2.0`,
`versionize_derive@0.1.6`***, `vhost@0.10.0`,
`vhost-user-backend@0.13.1`, `virtio-queue@0.11.0`, `vm-memory@0.14.0`,
`vmm-sys-util@0.12.1`, and the latest of `vfio-bindings`, `vfio-ioctls`,
`mshv-bindings`,`mshv-ioctls`, and `vfio-user`.
* A fork of the `kvm-bindings` crate is being used to support
serialization of various structs for migration [1]. Also, code changes
are made to accommodate the updated `struct xsave` from the Linux
kernel. Note: these changes related to `struct xsave` break
live-upgrade.
** The new `kvm-ioctls` crate introduced breaking changes for
the `get/set_one_reg` API on `aarch64` [2], so code changes are made to
the new APIs.
*** A fork of the `versionize_derive` crate is being used to support
versionize on packed structs [3].
[1] https://github.com/cloud-hypervisor/kvm-bindings/tree/ch-v0.7.0
[2] https://github.com/rust-vmm/kvm-ioctls/pull/223
[3] https://github.com/cloud-hypervisor/versionize_derive/tree/ch-0.1.6
Fixes: #6072
Signed-off-by: Bo Chen <chen.bo@intel.com>
2024-01-23 17:29:40 +00:00
|
|
|
#[ignore = "See #6134"]
|
2022-08-16 19:03:45 +00:00
|
|
|
fn test_live_upgrade_basic() {
|
2022-09-08 00:20:25 +00:00
|
|
|
_test_live_migration(true, false)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
build: Bump vmm-sys-util crate and its consumers
This patch bumps the following crates, including `kvm-bindings@0.7.0`*,
`kvm-ioctls@0.16.0`**, `linux-loader@0.11.0`, `versionize@0.2.0`,
`versionize_derive@0.1.6`***, `vhost@0.10.0`,
`vhost-user-backend@0.13.1`, `virtio-queue@0.11.0`, `vm-memory@0.14.0`,
`vmm-sys-util@0.12.1`, and the latest of `vfio-bindings`, `vfio-ioctls`,
`mshv-bindings`,`mshv-ioctls`, and `vfio-user`.
* A fork of the `kvm-bindings` crate is being used to support
serialization of various structs for migration [1]. Also, code changes
are made to accommodate the updated `struct xsave` from the Linux
kernel. Note: these changes related to `struct xsave` break
live-upgrade.
** The new `kvm-ioctls` crate introduced breaking changes for
the `get/set_one_reg` API on `aarch64` [2], so code changes are made to
the new APIs.
*** A fork of the `versionize_derive` crate is being used to support
versionize on packed structs [3].
[1] https://github.com/cloud-hypervisor/kvm-bindings/tree/ch-v0.7.0
[2] https://github.com/rust-vmm/kvm-ioctls/pull/223
[3] https://github.com/cloud-hypervisor/versionize_derive/tree/ch-0.1.6
Fixes: #6072
Signed-off-by: Bo Chen <chen.bo@intel.com>
2024-01-23 17:29:40 +00:00
|
|
|
#[ignore = "See #6134"]
|
2022-08-16 19:03:45 +00:00
|
|
|
fn test_live_upgrade_local() {
|
2022-09-08 00:20:25 +00:00
|
|
|
_test_live_migration(true, true)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
build: Bump vmm-sys-util crate and its consumers
This patch bumps the following crates, including `kvm-bindings@0.7.0`*,
`kvm-ioctls@0.16.0`**, `linux-loader@0.11.0`, `versionize@0.2.0`,
`versionize_derive@0.1.6`***, `vhost@0.10.0`,
`vhost-user-backend@0.13.1`, `virtio-queue@0.11.0`, `vm-memory@0.14.0`,
`vmm-sys-util@0.12.1`, and the latest of `vfio-bindings`, `vfio-ioctls`,
`mshv-bindings`,`mshv-ioctls`, and `vfio-user`.
* A fork of the `kvm-bindings` crate is being used to support
serialization of various structs for migration [1]. Also, code changes
are made to accommodate the updated `struct xsave` from the Linux
kernel. Note: these changes related to `struct xsave` break
live-upgrade.
** The new `kvm-ioctls` crate introduced breaking changes for
the `get/set_one_reg` API on `aarch64` [2], so code changes are made to
the new APIs.
*** A fork of the `versionize_derive` crate is being used to support
versionize on packed structs [3].
[1] https://github.com/cloud-hypervisor/kvm-bindings/tree/ch-v0.7.0
[2] https://github.com/rust-vmm/kvm-ioctls/pull/223
[3] https://github.com/cloud-hypervisor/versionize_derive/tree/ch-0.1.6
Fixes: #6072
Signed-off-by: Bo Chen <chen.bo@intel.com>
2024-01-23 17:29:40 +00:00
|
|
|
#[ignore = "See #6134"]
|
2022-08-16 19:03:45 +00:00
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_live_upgrade_numa() {
|
2022-09-08 00:20:25 +00:00
|
|
|
_test_live_migration_numa(true, false)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
build: Bump vmm-sys-util crate and its consumers
This patch bumps the following crates, including `kvm-bindings@0.7.0`*,
`kvm-ioctls@0.16.0`**, `linux-loader@0.11.0`, `versionize@0.2.0`,
`versionize_derive@0.1.6`***, `vhost@0.10.0`,
`vhost-user-backend@0.13.1`, `virtio-queue@0.11.0`, `vm-memory@0.14.0`,
`vmm-sys-util@0.12.1`, and the latest of `vfio-bindings`, `vfio-ioctls`,
`mshv-bindings`,`mshv-ioctls`, and `vfio-user`.
* A fork of the `kvm-bindings` crate is being used to support
serialization of various structs for migration [1]. Also, code changes
are made to accommodate the updated `struct xsave` from the Linux
kernel. Note: these changes related to `struct xsave` break
live-upgrade.
** The new `kvm-ioctls` crate introduced breaking changes for
the `get/set_one_reg` API on `aarch64` [2], so code changes are made to
the new APIs.
*** A fork of the `versionize_derive` crate is being used to support
versionize on packed structs [3].
[1] https://github.com/cloud-hypervisor/kvm-bindings/tree/ch-v0.7.0
[2] https://github.com/rust-vmm/kvm-ioctls/pull/223
[3] https://github.com/cloud-hypervisor/versionize_derive/tree/ch-0.1.6
Fixes: #6072
Signed-off-by: Bo Chen <chen.bo@intel.com>
2024-01-23 17:29:40 +00:00
|
|
|
#[ignore = "See #6134"]
|
2022-08-16 19:03:45 +00:00
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_live_upgrade_numa_local() {
|
2022-09-08 00:20:25 +00:00
|
|
|
_test_live_migration_numa(true, true)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
build: Bump vmm-sys-util crate and its consumers
This patch bumps the following crates, including `kvm-bindings@0.7.0`*,
`kvm-ioctls@0.16.0`**, `linux-loader@0.11.0`, `versionize@0.2.0`,
`versionize_derive@0.1.6`***, `vhost@0.10.0`,
`vhost-user-backend@0.13.1`, `virtio-queue@0.11.0`, `vm-memory@0.14.0`,
`vmm-sys-util@0.12.1`, and the latest of `vfio-bindings`, `vfio-ioctls`,
`mshv-bindings`,`mshv-ioctls`, and `vfio-user`.
* A fork of the `kvm-bindings` crate is being used to support
serialization of various structs for migration [1]. Also, code changes
are made to accommodate the updated `struct xsave` from the Linux
kernel. Note: these changes related to `struct xsave` break
live-upgrade.
** The new `kvm-ioctls` crate introduced breaking changes for
the `get/set_one_reg` API on `aarch64` [2], so code changes are made to
the new APIs.
*** A fork of the `versionize_derive` crate is being used to support
versionize on packed structs [3].
[1] https://github.com/cloud-hypervisor/kvm-bindings/tree/ch-v0.7.0
[2] https://github.com/rust-vmm/kvm-ioctls/pull/223
[3] https://github.com/cloud-hypervisor/versionize_derive/tree/ch-0.1.6
Fixes: #6072
Signed-off-by: Bo Chen <chen.bo@intel.com>
2024-01-23 17:29:40 +00:00
|
|
|
#[ignore = "See #6134"]
|
2022-08-16 19:03:45 +00:00
|
|
|
fn test_live_upgrade_watchdog() {
|
2022-09-07 22:13:57 +00:00
|
|
|
_test_live_migration_watchdog(true, false)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
build: Bump vmm-sys-util crate and its consumers
This patch bumps the following crates, including `kvm-bindings@0.7.0`*,
`kvm-ioctls@0.16.0`**, `linux-loader@0.11.0`, `versionize@0.2.0`,
`versionize_derive@0.1.6`***, `vhost@0.10.0`,
`vhost-user-backend@0.13.1`, `virtio-queue@0.11.0`, `vm-memory@0.14.0`,
`vmm-sys-util@0.12.1`, and the latest of `vfio-bindings`, `vfio-ioctls`,
`mshv-bindings`,`mshv-ioctls`, and `vfio-user`.
* A fork of the `kvm-bindings` crate is being used to support
serialization of various structs for migration [1]. Also, code changes
are made to accommodate the updated `struct xsave` from the Linux
kernel. Note: these changes related to `struct xsave` break
live-upgrade.
** The new `kvm-ioctls` crate introduced breaking changes for
the `get/set_one_reg` API on `aarch64` [2], so code changes are made to
the new APIs.
*** A fork of the `versionize_derive` crate is being used to support
versionize on packed structs [3].
[1] https://github.com/cloud-hypervisor/kvm-bindings/tree/ch-v0.7.0
[2] https://github.com/rust-vmm/kvm-ioctls/pull/223
[3] https://github.com/cloud-hypervisor/versionize_derive/tree/ch-0.1.6
Fixes: #6072
Signed-off-by: Bo Chen <chen.bo@intel.com>
2024-01-23 17:29:40 +00:00
|
|
|
#[ignore = "See #6134"]
|
2022-08-16 19:03:45 +00:00
|
|
|
fn test_live_upgrade_watchdog_local() {
|
2022-09-07 22:13:57 +00:00
|
|
|
_test_live_migration_watchdog(true, true)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
build: Bump vmm-sys-util crate and its consumers
This patch bumps the following crates, including `kvm-bindings@0.7.0`*,
`kvm-ioctls@0.16.0`**, `linux-loader@0.11.0`, `versionize@0.2.0`,
`versionize_derive@0.1.6`***, `vhost@0.10.0`,
`vhost-user-backend@0.13.1`, `virtio-queue@0.11.0`, `vm-memory@0.14.0`,
`vmm-sys-util@0.12.1`, and the latest of `vfio-bindings`, `vfio-ioctls`,
`mshv-bindings`,`mshv-ioctls`, and `vfio-user`.
* A fork of the `kvm-bindings` crate is being used to support
serialization of various structs for migration [1]. Also, code changes
are made to accommodate the updated `struct xsave` from the Linux
kernel. Note: these changes related to `struct xsave` break
live-upgrade.
** The new `kvm-ioctls` crate introduced breaking changes for
the `get/set_one_reg` API on `aarch64` [2], so code changes are made to
the new APIs.
*** A fork of the `versionize_derive` crate is being used to support
versionize on packed structs [3].
[1] https://github.com/cloud-hypervisor/kvm-bindings/tree/ch-v0.7.0
[2] https://github.com/rust-vmm/kvm-ioctls/pull/223
[3] https://github.com/cloud-hypervisor/versionize_derive/tree/ch-0.1.6
Fixes: #6072
Signed-off-by: Bo Chen <chen.bo@intel.com>
2024-01-23 17:29:40 +00:00
|
|
|
#[ignore = "See #6134"]
|
2022-08-16 19:03:45 +00:00
|
|
|
fn test_live_upgrade_balloon() {
|
2022-09-07 23:51:37 +00:00
|
|
|
_test_live_migration_balloon(true, false)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
build: Bump vmm-sys-util crate and its consumers
This patch bumps the following crates, including `kvm-bindings@0.7.0`*,
`kvm-ioctls@0.16.0`**, `linux-loader@0.11.0`, `versionize@0.2.0`,
`versionize_derive@0.1.6`***, `vhost@0.10.0`,
`vhost-user-backend@0.13.1`, `virtio-queue@0.11.0`, `vm-memory@0.14.0`,
`vmm-sys-util@0.12.1`, and the latest of `vfio-bindings`, `vfio-ioctls`,
`mshv-bindings`,`mshv-ioctls`, and `vfio-user`.
* A fork of the `kvm-bindings` crate is being used to support
serialization of various structs for migration [1]. Also, code changes
are made to accommodate the updated `struct xsave` from the Linux
kernel. Note: these changes related to `struct xsave` break
live-upgrade.
** The new `kvm-ioctls` crate introduced breaking changes for
the `get/set_one_reg` API on `aarch64` [2], so code changes are made to
the new APIs.
*** A fork of the `versionize_derive` crate is being used to support
versionize on packed structs [3].
[1] https://github.com/cloud-hypervisor/kvm-bindings/tree/ch-v0.7.0
[2] https://github.com/rust-vmm/kvm-ioctls/pull/223
[3] https://github.com/cloud-hypervisor/versionize_derive/tree/ch-0.1.6
Fixes: #6072
Signed-off-by: Bo Chen <chen.bo@intel.com>
2024-01-23 17:29:40 +00:00
|
|
|
#[ignore = "See #6134"]
|
2022-08-16 19:03:45 +00:00
|
|
|
fn test_live_upgrade_balloon_local() {
|
2022-09-07 23:51:37 +00:00
|
|
|
_test_live_migration_balloon(true, true)
|
2022-08-16 19:03:45 +00:00
|
|
|
}
|
2022-04-13 18:33:22 +00:00
|
|
|
}
|
|
|
|
|
2022-08-16 19:03:45 +00:00
|
|
|
mod live_migration_sequential {
|
2022-08-18 11:26:26 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2022-08-16 19:03:45 +00:00
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
// Require to run ovs-dpdk tests sequentially because they rely on the same ovs-dpdk setup
|
|
|
|
#[test]
|
2023-10-23 12:54:13 +00:00
|
|
|
#[ignore = "See #5532"]
|
2022-08-18 11:26:26 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2022-08-16 19:03:45 +00:00
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_live_migration_ovs_dpdk() {
|
|
|
|
_test_live_migration_ovs_dpdk(false, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2022-08-18 11:26:26 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2022-08-16 19:03:45 +00:00
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_live_migration_ovs_dpdk_local() {
|
|
|
|
_test_live_migration_ovs_dpdk(false, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2023-10-23 12:54:13 +00:00
|
|
|
#[ignore = "See #5532"]
|
2022-08-18 11:26:26 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2022-08-16 19:03:45 +00:00
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_live_upgrade_ovs_dpdk() {
|
|
|
|
_test_live_migration_ovs_dpdk(true, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
build: Bump vmm-sys-util crate and its consumers
This patch bumps the following crates, including `kvm-bindings@0.7.0`*,
`kvm-ioctls@0.16.0`**, `linux-loader@0.11.0`, `versionize@0.2.0`,
`versionize_derive@0.1.6`***, `vhost@0.10.0`,
`vhost-user-backend@0.13.1`, `virtio-queue@0.11.0`, `vm-memory@0.14.0`,
`vmm-sys-util@0.12.1`, and the latest of `vfio-bindings`, `vfio-ioctls`,
`mshv-bindings`,`mshv-ioctls`, and `vfio-user`.
* A fork of the `kvm-bindings` crate is being used to support
serialization of various structs for migration [1]. Also, code changes
are made to accommodate the updated `struct xsave` from the Linux
kernel. Note: these changes related to `struct xsave` break
live-upgrade.
** The new `kvm-ioctls` crate introduced breaking changes for
the `get/set_one_reg` API on `aarch64` [2], so code changes are made to
the new APIs.
*** A fork of the `versionize_derive` crate is being used to support
versionize on packed structs [3].
[1] https://github.com/cloud-hypervisor/kvm-bindings/tree/ch-v0.7.0
[2] https://github.com/rust-vmm/kvm-ioctls/pull/223
[3] https://github.com/cloud-hypervisor/versionize_derive/tree/ch-0.1.6
Fixes: #6072
Signed-off-by: Bo Chen <chen.bo@intel.com>
2024-01-23 17:29:40 +00:00
|
|
|
#[ignore = "See #5532"]
|
2022-08-18 11:26:26 +00:00
|
|
|
#[cfg(target_arch = "x86_64")]
|
2022-08-16 19:03:45 +00:00
|
|
|
#[cfg(not(feature = "mshv"))]
|
|
|
|
fn test_live_upgrade_ovs_dpdk_local() {
|
|
|
|
_test_live_migration_ovs_dpdk(true, true);
|
|
|
|
}
|
2022-02-18 20:21:28 +00:00
|
|
|
}
|
2022-01-06 22:24:38 +00:00
|
|
|
}
|
2021-09-07 02:21:26 +00:00
|
|
|
|
2022-03-28 10:53:22 +00:00
|
|
|
#[cfg(target_arch = "aarch64")]
|
2022-01-06 22:24:38 +00:00
|
|
|
mod aarch64_acpi {
|
|
|
|
use crate::*;
|
2021-09-07 02:21:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_simple_launch_acpi() {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
2021-09-07 02:21:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
vec![Box::new(focal)].drain(..).for_each(|disk_config| {
|
|
|
|
let guest = Guest::new(disk_config);
|
2021-09-07 02:21:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 09:19:00 +00:00
|
|
|
.args(["--cpus", "boot=1"])
|
|
|
|
.args(["--memory", "size=512M"])
|
|
|
|
.args(["--kernel", edk2_path().to_str().unwrap()])
|
2022-01-06 22:24:38 +00:00
|
|
|
.default_disks()
|
|
|
|
.default_net()
|
2022-09-20 09:19:00 +00:00
|
|
|
.args(["--serial", "tty", "--console", "off"])
|
2022-01-06 22:24:38 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
2021-09-07 02:21:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(Some(120)).unwrap();
|
2021-09-07 02:21:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1);
|
|
|
|
assert!(guest.get_total_memory().unwrap_or_default() > 400_000);
|
|
|
|
assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000");
|
|
|
|
});
|
2021-09-07 02:21:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
2021-09-07 02:21:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
handle_child_output(r, &output);
|
|
|
|
});
|
|
|
|
}
|
2021-09-07 02:21:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_guest_numa_nodes_acpi() {
|
|
|
|
_test_guest_numa_nodes(true);
|
|
|
|
}
|
2021-09-07 02:21:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_cpu_topology_421_acpi() {
|
|
|
|
test_cpu_topology(4, 2, 1, true);
|
|
|
|
}
|
2021-09-07 02:21:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_cpu_topology_142_acpi() {
|
|
|
|
test_cpu_topology(1, 4, 2, true);
|
|
|
|
}
|
2021-09-07 02:21:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_cpu_topology_262_acpi() {
|
|
|
|
test_cpu_topology(2, 6, 2, true);
|
|
|
|
}
|
2021-09-07 02:21:26 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_power_button_acpi() {
|
|
|
|
_test_power_button(true);
|
|
|
|
}
|
2021-09-23 01:04:08 +00:00
|
|
|
|
2022-01-06 22:24:38 +00:00
|
|
|
#[test]
|
|
|
|
fn test_virtio_iommu() {
|
|
|
|
_test_virtio_iommu(true)
|
2021-09-07 02:21:26 +00:00
|
|
|
}
|
2020-02-25 09:42:15 +00:00
|
|
|
}
|
2022-08-24 22:07:44 +00:00
|
|
|
|
|
|
|
mod rate_limiter {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
// Check if the 'measured' rate is within the expected 'difference' (in percentage)
|
|
|
|
// compared to given 'limit' rate.
|
|
|
|
fn check_rate_limit(measured: f64, limit: f64, difference: f64) -> bool {
|
|
|
|
let upper_limit = limit * (1_f64 + difference);
|
|
|
|
let lower_limit = limit * (1_f64 - difference);
|
|
|
|
|
|
|
|
if measured > lower_limit && measured < upper_limit {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
eprintln!(
|
2023-11-03 18:45:46 +00:00
|
|
|
"\n\n==== Start 'check_rate_limit' failed ==== \
|
|
|
|
\n\nmeasured={measured}, , lower_limit={lower_limit}, upper_limit={upper_limit} \
|
|
|
|
\n\n==== End 'check_rate_limit' failed ====\n\n"
|
2022-08-24 22:07:44 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
|
|
|
fn _test_rate_limiter_net(rx: bool) {
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
|
|
|
|
let test_timeout = 10;
|
|
|
|
let num_queues = 2;
|
|
|
|
let queue_size = 256;
|
|
|
|
let bw_size = 10485760_u64; // bytes
|
|
|
|
let bw_refill_time = 100; // ms
|
|
|
|
let limit_bps = (bw_size * 8 * 1000) as f64 / bw_refill_time as f64;
|
|
|
|
|
|
|
|
let net_params = format!(
|
|
|
|
"tap=,mac={},ip={},mask=255.255.255.0,num_queues={},queue_size={},bw_size={},bw_refill_time={}",
|
|
|
|
guest.network.guest_mac,
|
|
|
|
guest.network.host_ip,
|
|
|
|
num_queues,
|
|
|
|
queue_size,
|
|
|
|
bw_size,
|
|
|
|
bw_refill_time,
|
|
|
|
);
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--cpus", &format!("boot={}", num_queues / 2)])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
2022-08-24 22:07:44 +00:00
|
|
|
.default_disks()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--net", net_params.as_str()])
|
2022-08-24 22:07:44 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
let measured_bps =
|
2023-01-05 20:52:41 +00:00
|
|
|
measure_virtio_net_throughput(test_timeout, num_queues / 2, &guest, rx, true)
|
|
|
|
.unwrap();
|
2022-08-24 22:07:44 +00:00
|
|
|
assert!(check_rate_limit(measured_bps, limit_bps, 0.1));
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_rate_limiter_net_rx() {
|
|
|
|
_test_rate_limiter_net(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_rate_limiter_net_tx() {
|
|
|
|
_test_rate_limiter_net(false);
|
|
|
|
}
|
2022-08-30 00:43:50 +00:00
|
|
|
|
2023-12-13 23:19:27 +00:00
|
|
|
fn _test_rate_limiter_block(bandwidth: bool, num_queues: u32) {
|
2022-08-30 00:43:50 +00:00
|
|
|
let test_timeout = 10;
|
|
|
|
let fio_ops = FioOps::RandRW;
|
|
|
|
|
|
|
|
let bw_size = if bandwidth {
|
|
|
|
10485760_u64 // bytes
|
|
|
|
} else {
|
|
|
|
100_u64 // I/O
|
|
|
|
};
|
|
|
|
let bw_refill_time = 100; // ms
|
|
|
|
let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64;
|
|
|
|
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap();
|
|
|
|
let blk_rate_limiter_test_img =
|
|
|
|
String::from(test_img_dir.as_path().join("blk.img").to_str().unwrap());
|
|
|
|
|
|
|
|
// Create the test block image
|
|
|
|
assert!(exec_host_command_output(&format!(
|
2022-12-14 11:41:15 +00:00
|
|
|
"dd if=/dev/zero of={blk_rate_limiter_test_img} bs=1M count=1024"
|
2022-08-30 00:43:50 +00:00
|
|
|
))
|
|
|
|
.status
|
|
|
|
.success());
|
|
|
|
|
|
|
|
let test_blk_params = if bandwidth {
|
|
|
|
format!(
|
2023-12-13 23:19:27 +00:00
|
|
|
"path={blk_rate_limiter_test_img},num_queues={num_queues},bw_size={bw_size},bw_refill_time={bw_refill_time}"
|
2022-08-30 00:43:50 +00:00
|
|
|
)
|
|
|
|
} else {
|
|
|
|
format!(
|
2023-12-13 23:19:27 +00:00
|
|
|
"path={blk_rate_limiter_test_img},num_queues={num_queues},ops_size={bw_size},ops_refill_time={bw_refill_time}"
|
2022-08-30 00:43:50 +00:00
|
|
|
)
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
2022-12-14 11:41:15 +00:00
|
|
|
.args(["--cpus", &format!("boot={num_queues}")])
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args([
|
2022-08-30 00:43:50 +00:00
|
|
|
"--disk",
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
)
|
|
|
|
.as_str(),
|
|
|
|
test_blk_params.as_str(),
|
|
|
|
])
|
|
|
|
.default_net()
|
2022-09-20 08:46:19 +00:00
|
|
|
.args(["--api-socket", &api_socket])
|
2022-08-30 00:43:50 +00:00
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
let fio_command = format!(
|
|
|
|
"sudo fio --filename=/dev/vdc --name=test --output-format=json \
|
|
|
|
--direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \
|
2022-12-14 11:41:15 +00:00
|
|
|
--rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}"
|
2022-08-30 00:43:50 +00:00
|
|
|
);
|
|
|
|
let output = guest.ssh_command(&fio_command).unwrap();
|
|
|
|
|
|
|
|
// Parse fio output
|
|
|
|
let measured_rate = if bandwidth {
|
|
|
|
parse_fio_output(&output, &fio_ops, num_queues).unwrap()
|
|
|
|
} else {
|
|
|
|
parse_fio_output_iops(&output, &fio_ops, num_queues).unwrap()
|
|
|
|
};
|
|
|
|
assert!(check_rate_limit(measured_rate, limit_rate, 0.1));
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2023-12-13 23:19:27 +00:00
|
|
|
fn _test_rate_limiter_group_block(bandwidth: bool, num_queues: u32, num_disks: u32) {
|
|
|
|
let test_timeout = 10;
|
|
|
|
let fio_ops = FioOps::RandRW;
|
|
|
|
|
|
|
|
let bw_size = if bandwidth {
|
|
|
|
10485760_u64 // bytes
|
|
|
|
} else {
|
|
|
|
100_u64 // I/O
|
|
|
|
};
|
|
|
|
let bw_refill_time = 100; // ms
|
|
|
|
let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64;
|
|
|
|
|
|
|
|
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
|
|
|
|
let guest = Guest::new(Box::new(focal));
|
|
|
|
let api_socket = temp_api_path(&guest.tmp_dir);
|
|
|
|
let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap();
|
|
|
|
|
|
|
|
let rate_limit_group_arg = if bandwidth {
|
|
|
|
format!("id=group0,bw_size={bw_size},bw_refill_time={bw_refill_time}")
|
|
|
|
} else {
|
|
|
|
format!("id=group0,ops_size={bw_size},ops_refill_time={bw_refill_time}")
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut disk_args = vec![
|
|
|
|
"--disk".to_string(),
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
|
|
|
|
),
|
|
|
|
format!(
|
|
|
|
"path={}",
|
|
|
|
guest.disk_config.disk(DiskType::CloudInit).unwrap()
|
|
|
|
),
|
|
|
|
];
|
|
|
|
|
|
|
|
for i in 0..num_disks {
|
|
|
|
let test_img_path = String::from(
|
|
|
|
test_img_dir
|
|
|
|
.as_path()
|
|
|
|
.join(format!("blk{}.img", i))
|
|
|
|
.to_str()
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(exec_host_command_output(&format!(
|
|
|
|
"dd if=/dev/zero of={test_img_path} bs=1M count=1024"
|
|
|
|
))
|
|
|
|
.status
|
|
|
|
.success());
|
|
|
|
|
|
|
|
disk_args.push(format!(
|
|
|
|
"path={test_img_path},num_queues={num_queues},rate_limit_group=group0"
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut child = GuestCommand::new(&guest)
|
|
|
|
.args(["--cpus", &format!("boot={}", num_queues * num_disks)])
|
|
|
|
.args(["--memory", "size=4G"])
|
|
|
|
.args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
|
|
|
|
.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
|
|
|
|
.args(["--rate-limit-group", &rate_limit_group_arg])
|
|
|
|
.args(disk_args)
|
|
|
|
.default_net()
|
|
|
|
.args(["--api-socket", &api_socket])
|
|
|
|
.capture_output()
|
|
|
|
.spawn()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let r = std::panic::catch_unwind(|| {
|
|
|
|
guest.wait_vm_boot(None).unwrap();
|
|
|
|
|
|
|
|
let mut fio_command = format!(
|
|
|
|
"sudo fio --name=global --output-format=json \
|
|
|
|
--direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \
|
|
|
|
--rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}"
|
|
|
|
);
|
|
|
|
|
|
|
|
// Generate additional argument for each disk:
|
|
|
|
// --name=job0 --filename=/dev/vdc \
|
|
|
|
// --name=job1 --filename=/dev/vdd \
|
|
|
|
// --name=job2 --filename=/dev/vde \
|
|
|
|
// ...
|
|
|
|
for i in 0..num_disks {
|
|
|
|
let c: char = 'c';
|
|
|
|
let arg = format!(
|
|
|
|
" --name=job{i} --filename=/dev/vd{}",
|
|
|
|
char::from_u32((c as u32) + i).unwrap()
|
|
|
|
);
|
|
|
|
fio_command += &arg;
|
|
|
|
}
|
|
|
|
let output = guest.ssh_command(&fio_command).unwrap();
|
|
|
|
|
|
|
|
// Parse fio output
|
|
|
|
let measured_rate = if bandwidth {
|
|
|
|
parse_fio_output(&output, &fio_ops, num_queues * num_disks).unwrap()
|
|
|
|
} else {
|
|
|
|
parse_fio_output_iops(&output, &fio_ops, num_queues * num_disks).unwrap()
|
|
|
|
};
|
|
|
|
assert!(check_rate_limit(measured_rate, limit_rate, 0.1));
|
|
|
|
});
|
|
|
|
|
|
|
|
let _ = child.kill();
|
|
|
|
let output = child.wait_with_output().unwrap();
|
|
|
|
handle_child_output(r, &output);
|
|
|
|
}
|
|
|
|
|
2022-08-30 00:43:50 +00:00
|
|
|
#[test]
|
|
|
|
fn test_rate_limiter_block_bandwidth() {
|
2023-12-13 23:19:27 +00:00
|
|
|
_test_rate_limiter_block(true, 1);
|
|
|
|
_test_rate_limiter_block(true, 2)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_rate_limiter_group_block_bandwidth() {
|
|
|
|
_test_rate_limiter_group_block(true, 1, 1);
|
|
|
|
_test_rate_limiter_group_block(true, 2, 1);
|
|
|
|
_test_rate_limiter_group_block(true, 1, 2);
|
|
|
|
_test_rate_limiter_group_block(true, 2, 2);
|
2022-08-30 00:43:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_rate_limiter_block_iops() {
|
2023-12-13 23:19:27 +00:00
|
|
|
_test_rate_limiter_block(false, 1);
|
|
|
|
_test_rate_limiter_block(false, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_rate_limiter_group_block_iops() {
|
|
|
|
_test_rate_limiter_group_block(false, 1, 1);
|
|
|
|
_test_rate_limiter_group_block(false, 2, 1);
|
|
|
|
_test_rate_limiter_group_block(false, 1, 2);
|
|
|
|
_test_rate_limiter_group_block(false, 2, 2);
|
2022-08-30 00:43:50 +00:00
|
|
|
}
|
2022-08-24 22:07:44 +00:00
|
|
|
}
|