tests: Remove unnecessary annotations integration_tests and test

Given integration tests are placed in a dedicate directory, they don't
need annotations (e.g. `#[cfg(integration_test)]` and `#[cfg(test)]`) or
defining `test mod` to exclude themselves from the common compilation
process.

Signed-off-by: Bo Chen <chen.bo@intel.com>
This commit is contained in:
Bo Chen 2022-01-06 14:24:38 -08:00 committed by Sebastien Boeuf
parent 430f72f96c
commit e7a2a715e0

View File

@ -3,18 +3,11 @@
// SPDX-License-Identifier: Apache-2.0
//
#[cfg(test)]
#[cfg(feature = "integration_tests")]
#[macro_use]
extern crate lazy_static;
#[cfg(test)]
#[cfg(feature = "integration_tests")]
extern crate test_infra;
#[cfg(test)]
#[cfg(feature = "integration_tests")]
mod tests {
use net_util::MacAddr;
use std::collections::HashMap;
use std::env;
@ -57,11 +50,9 @@ mod tests {
pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-amd64-custom-20210609-0.raw";
pub const FOCAL_SGX_IMAGE_NAME: &str = "focal-server-cloudimg-amd64-sgx.raw";
pub const HIRSUTE_NVIDIA_IMAGE_NAME: &str = "hirsute-server-cloudimg-amd64-nvidia.raw";
pub const FOCAL_IMAGE_NAME_QCOW2: &str =
"focal-server-cloudimg-amd64-custom-20210609-0.qcow2";
pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-amd64-custom-20210609-0.qcow2";
pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhd";
pub const FOCAL_IMAGE_NAME_VHDX: &str =
"focal-server-cloudimg-amd64-custom-20210609-0.vhdx";
pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhdx";
pub const WINDOWS_IMAGE_NAME: &str = "windows-server-2019.raw";
pub const OVMF_NAME: &str = "OVMF.fd";
pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'IO-APIC.*ttyS0' /proc/interrupts || true";
@ -76,13 +67,10 @@ mod tests {
pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-arm64-custom-20210929-0.raw";
pub const FOCAL_IMAGE_UPDATE_KERNEL_NAME: &str =
"focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw";
pub const FOCAL_IMAGE_NAME_QCOW2: &str =
"focal-server-cloudimg-arm64-custom-20210929-0.qcow2";
pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-arm64-custom-20210929-0.qcow2";
pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhd";
pub const FOCAL_IMAGE_NAME_VHDX: &str =
"focal-server-cloudimg-arm64-custom-20210929-0.vhdx";
pub const GREP_SERIAL_IRQ_CMD: &str =
"grep -c 'GICv3.*uart-pl011' /proc/interrupts || true";
pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhdx";
pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'GICv3.*uart-pl011' /proc/interrupts || true";
}
#[cfg(target_arch = "aarch64")]
@ -239,8 +227,7 @@ mod tests {
num_queues: usize,
client_mode: bool,
) -> (std::process::Command, String) {
let vunet_socket_path =
String::from(tmp_dir.as_path().join("vunet.sock").to_str().unwrap());
let vunet_socket_path = String::from(tmp_dir.as_path().join("vunet.sock").to_str().unwrap());
// Start the daemon
let net_params = if let Some(tap_str) = tap {
@ -262,8 +249,7 @@ mod tests {
}
fn curl_command(api_socket: &str, method: &str, url: &str, http_body: Option<&str>) {
let mut curl_args: Vec<&str> =
["--unix-socket", api_socket, "-i", "-X", method, url].to_vec();
let mut curl_args: Vec<&str> = ["--unix-socket", api_socket, "-i", "-X", method, url].to_vec();
if let Some(body) = http_body {
curl_args.push("-H");
@ -293,11 +279,7 @@ mod tests {
cmd.status().expect("Failed to launch ch-remote").success()
}
fn remote_command_w_output(
api_socket: &str,
command: &str,
arg: Option<&str>,
) -> (bool, Vec<u8>) {
fn remote_command_w_output(api_socket: &str, command: &str, arg: Option<&str>) -> (bool, Vec<u8>) {
let mut cmd = Command::new(clh_command("ch-remote"));
cmd.args(&[&format!("--api-socket={}", api_socket), command]);
@ -351,10 +333,10 @@ mod tests {
// setup OVS-DPDK
assert!(exec_host_command_status("service openvswitch-switch start").success());
assert!(exec_host_command_status("ovs-vsctl init").success());
assert!(exec_host_command_status(
"ovs-vsctl set Open_vSwitch . other_config:dpdk-init=true"
)
.success());
assert!(
exec_host_command_status("ovs-vsctl set Open_vSwitch . other_config:dpdk-init=true")
.success()
);
assert!(exec_host_command_status("service openvswitch-switch restart").success());
// Create OVS-DPDK bridge and ports
@ -688,11 +670,7 @@ mod tests {
Ok(())
}
fn check_numa_node_distances(
&self,
node_id: usize,
distances: &str,
) -> Result<bool, Error> {
fn check_numa_node_distances(&self, node_id: usize, distances: &str) -> Result<bool, Error> {
let cmd = format!("cat /sys/devices/system/node/node{}/distance", node_id);
if self.ssh_command(cmd.as_str())?.trim() == distances {
Ok(true)
@ -875,9 +853,7 @@ mod tests {
fn check_nvidia_gpu(&self) {
// Run CUDA sample to validate it can find the device
let device_query_result = self
.ssh_command(
"sudo /root/NVIDIA_CUDA-11.3_Samples/bin/x86_64/linux/release/deviceQuery",
)
.ssh_command("sudo /root/NVIDIA_CUDA-11.3_Samples/bin/x86_64/linux/release/deviceQuery")
.unwrap();
assert!(device_query_result.contains("Detected 1 CUDA Capable device"));
assert!(device_query_result.contains("Device 0: \"NVIDIA Tesla T4\""));
@ -921,9 +897,7 @@ mod tests {
}
fn enable_memory_hotplug(&self) {
self.ssh_command(
"echo online | sudo tee /sys/devices/system/memory/auto_online_blocks",
)
self.ssh_command("echo online | sudo tee /sys/devices/system/memory/auto_online_blocks")
.unwrap();
}
@ -1316,14 +1290,12 @@ mod tests {
guest.wait_vm_boot(None).unwrap();
if let Some(tap_name) = tap {
let tap_count =
exec_host_command_output(&format!("ip link | grep -c {}", tap_name));
let tap_count = exec_host_command_output(&format!("ip link | grep -c {}", tap_name));
assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1");
}
if let Some(host_mac) = tap {
let mac_count =
exec_host_command_output(&format!("ip link | grep -c {}", host_mac));
let mac_count = exec_host_command_output(&format!("ip link | grep -c {}", host_mac));
assert_eq!(String::from_utf8_lossy(&mac_count.stdout).trim(), "1");
}
@ -1394,8 +1366,7 @@ mod tests {
handle_child_output(r, &output);
}
type PrepareBlkDaemon =
dyn Fn(&TempDir, &str, usize, bool, bool) -> (std::process::Child, String);
type PrepareBlkDaemon = dyn Fn(&TempDir, &str, usize, bool, bool) -> (std::process::Child, String);
fn test_vhost_user_blk(
num_queues: usize,
@ -2368,7 +2339,7 @@ mod tests {
mod parallel {
use std::io::SeekFrom;
use crate::tests::*;
use crate::*;
#[test]
#[cfg(target_arch = "x86_64")]
@ -2405,10 +2376,7 @@ mod tests {
assert_eq!(guest.get_initial_apicid().unwrap_or(1), 0);
assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
assert!(guest.get_entropy().unwrap_or_default() >= 900);
assert_eq!(
guest.get_pci_bridge_class().unwrap_or_default(),
"0x060000"
);
assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000");
});
let _ = child.kill();
@ -2442,9 +2410,7 @@ mod tests {
#[cfg(target_arch = "x86_64")]
assert_eq!(
guest
.ssh_command(
r#"dmesg | grep "smpboot: Allowing" | sed "s/\[\ *[0-9.]*\] //""#
)
.ssh_command(r#"dmesg | grep "smpboot: Allowing" | sed "s/\[\ *[0-9.]*\] //""#)
.unwrap()
.trim(),
"smpboot: Allowing 4 CPUs, 2 hotplug CPUs"
@ -2452,9 +2418,7 @@ mod tests {
#[cfg(target_arch = "aarch64")]
assert_eq!(
guest
.ssh_command(
r#"dmesg | grep "smp: Brought up" | sed "s/\[\ *[0-9.]*\] //""#
)
.ssh_command(r#"dmesg | grep "smp: Brought up" | sed "s/\[\ *[0-9.]*\] //""#)
.unwrap()
.trim(),
"smp: Brought up 1 node, 2 CPUs"
@ -2588,9 +2552,7 @@ mod tests {
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 48);
assert_eq!(
guest
.ssh_command(
"lscpu | grep \"On-line\" | cut -f 2 -d \":\" | sed \"s# *##\""
)
.ssh_command("lscpu | grep \"On-line\" | cut -f 2 -d \":\" | sed \"s# *##\"")
.unwrap()
.trim(),
"0-47"
@ -2767,14 +2729,13 @@ mod tests {
.to_str()
.unwrap(),
);
assert!(exec_host_command_status(
format!("truncate {} -s 4M", test_disk_path).as_str()
)
.success());
assert!(
exec_host_command_status(format!("mkfs.ext4 {}", test_disk_path).as_str())
exec_host_command_status(format!("truncate {} -s 4M", test_disk_path).as_str())
.success()
);
assert!(
exec_host_command_status(format!("mkfs.ext4 {}", test_disk_path).as_str()).success()
);
let mut cmd = GuestCommand::new(&guest);
cmd.args(&["--cpus", "boot=1"])
@ -4071,14 +4032,14 @@ mod tests {
);
// Hotplug an extra virtio-net device through L2 VM.
guest.ssh_command_l1(
"echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind",
).unwrap();
guest
.ssh_command_l1(
"echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind",
"echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind",
)
.unwrap();
guest
.ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind")
.unwrap();
let vfio_hotplug_output = guest
.ssh_command_l1(
"sudo /mnt/ch-remote \
@ -4086,9 +4047,7 @@ mod tests {
add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123",
)
.unwrap();
assert!(
vfio_hotplug_output.contains("{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}")
);
assert!(vfio_hotplug_output.contains("{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}"));
thread::sleep(std::time::Duration::new(10, 0));
@ -4147,9 +4106,11 @@ mod tests {
// device already passed through L2 as a VFIO device, this will
// verify that VFIO devices are functional with memory hotplug.
assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000);
guest.ssh_command_l2_1(
guest
.ssh_command_l2_1(
"sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'",
).unwrap();
)
.unwrap();
guest
.ssh_command_l1(
"sudo /mnt/ch-remote \
@ -4422,8 +4383,7 @@ mod tests {
.spawn()
.unwrap();
let r =
std::panic::catch_unwind(|| {
let r = std::panic::catch_unwind(|| {
guest.wait_vm_boot(None).unwrap();
// 2 network interfaces + default localhost ==> 3 interfaces
@ -4437,9 +4397,11 @@ mod tests {
3
);
let init_bar_addr = guest.ssh_command(
let init_bar_addr = guest
.ssh_command(
"sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource",
).unwrap();
)
.unwrap();
// Remove the PCI device
guest
@ -4473,9 +4435,11 @@ mod tests {
3
);
let new_bar_addr = guest.ssh_command(
let new_bar_addr = guest
.ssh_command(
"sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource",
).unwrap();
)
.unwrap();
// Let's compare the BAR addresses for our virtio-net device.
// They should be different as we expect the BAR reprogramming
@ -5739,9 +5703,7 @@ mod tests {
// Enable systemd watchdog
guest
.ssh_command(
"echo RuntimeWatchdogSec=15s | sudo tee -a /etc/systemd/system.conf",
)
.ssh_command("echo RuntimeWatchdogSec=15s | sudo tee -a /etc/systemd/system.conf")
.unwrap();
guest.ssh_command("sudo reboot").unwrap();
@ -5933,8 +5895,7 @@ mod tests {
);
let tap_index =
fs::read_to_string(&format!("/sys/class/net/{}/ifindex", guest_macvtap_name))
.unwrap();
fs::read_to_string(&format!("/sys/class/net/{}/ifindex", guest_macvtap_name)).unwrap();
let tap_device = format!("/dev/tap{}", tap_index.trim());
assert!(
@ -6294,7 +6255,7 @@ mod tests {
}
mod sequential {
use crate::tests::*;
use crate::*;
#[test]
fn test_memory_mergeable_on() {
@ -6304,7 +6265,7 @@ mod tests {
#[cfg(target_arch = "x86_64")]
mod windows {
use crate::tests::*;
use crate::*;
lazy_static! {
static ref NEXT_DISK_ID: Mutex<u8> = Mutex::new(1);
@ -7300,7 +7261,7 @@ mod tests {
#[cfg(target_arch = "x86_64")]
mod sgx {
use crate::tests::*;
use crate::*;
#[test]
fn test_sgx() {
@ -7361,7 +7322,7 @@ mod tests {
#[cfg(target_arch = "x86_64")]
mod vfio {
use crate::tests::*;
use crate::*;
fn test_nvidia_card_memory_hotplug(hotplug_method: &str) {
let hirsute = UbuntuDiskConfig::new(HIRSUTE_NVIDIA_IMAGE_NAME.to_string());
@ -7496,7 +7457,7 @@ mod tests {
}
mod live_migration {
use crate::tests::*;
use crate::*;
// This test exercises the local live-migration between two Cloud Hypervisor VMs on the
// same host. It ensures the following behaviors:
@ -7591,11 +7552,7 @@ mod tests {
resize_zone_command(&src_api_socket, "mem2", "2G");
thread::sleep(std::time::Duration::new(5, 0));
guest.check_numa_common(
Some(&[1_920_000, 1_920_000, 1_920_000]),
None,
None,
);
guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None);
}
}
@ -7910,11 +7867,7 @@ mod tests {
);
});
let print_and_panic = |src_vm: Child,
dest_vm: Child,
ovs_vm: Child,
message: &str|
-> ! {
let print_and_panic = |src_vm: Child, dest_vm: Child, ovs_vm: Child, message: &str| -> ! {
let mut src_vm = src_vm;
let mut dest_vm = dest_vm;
let mut ovs_vm = ovs_vm;
@ -8014,7 +7967,7 @@ mod tests {
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
mod aarch64_acpi {
use crate::tests::*;
use crate::*;
#[test]
fn test_simple_launch_acpi() {
@ -8080,4 +8033,3 @@ mod tests {
_test_virtio_iommu(true)
}
}
}