tests: Add test_guest_numa_nodes_dt test case

This commit adds an AArch64-only integration test case called
`test_guest_numa_nodes_dt` so that it is possible to test the
NUMA for the FDT on AArch64 platform.

Signed-off-by: Henry Wang <Henry.Wang@arm.com>
This commit is contained in:
Henry Wang 2021-08-07 23:49:39 -04:00 committed by Sebastien Boeuf
parent c9cc97e9a0
commit 1d403ea5d6

View File

@ -929,6 +929,96 @@ mod tests {
handle_child_output(r, &output);
}
#[allow(unused_variables)]
fn test_guest_numa_nodes(acpi: bool) {
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
let guest = Guest::new(Box::new(focal));
let api_socket = temp_api_path(&guest.tmp_dir);
#[cfg(target_arch = "x86_64")]
let kernel_path = direct_kernel_boot_path();
#[cfg(target_arch = "aarch64")]
let kernel_path = if acpi {
edk2_path()
} else {
direct_kernel_boot_path()
};
let mut child = GuestCommand::new(&guest)
.args(&["--cpus", "boot=6,max=12"])
.args(&["--memory", "size=0,hotplug_method=virtio-mem"])
.args(&[
"--memory-zone",
"id=mem0,size=1G,hotplug_size=3G",
"id=mem1,size=2G,hotplug_size=3G",
"id=mem2,size=3G,hotplug_size=3G",
])
.args(&[
"--numa",
"guest_numa_id=0,cpus=0-2:9,distances=1@15:2@20,memory_zones=mem0",
"guest_numa_id=1,cpus=3-4:6-8,distances=0@20:2@25,memory_zones=mem1",
"guest_numa_id=2,cpus=5:10-11,distances=0@25:1@30,memory_zones=mem2",
])
.args(&["--kernel", kernel_path.to_str().unwrap()])
.args(&["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
.args(&["--api-socket", &api_socket])
.capture_output()
.default_disks()
.default_net()
.spawn()
.unwrap();
let r = std::panic::catch_unwind(|| {
guest.wait_vm_boot(None).unwrap();
// Check each NUMA node has been assigned the right amount of
// memory.
assert!(guest.get_numa_node_memory(0).unwrap_or_default() > 960_000);
assert!(guest.get_numa_node_memory(1).unwrap_or_default() > 1_920_000);
assert!(guest.get_numa_node_memory(2).unwrap_or_default() > 2_880_000);
// Check each NUMA node has been assigned the right CPUs set.
guest.check_numa_node_cpus(0, vec![0, 1, 2]).unwrap();
guest.check_numa_node_cpus(1, vec![3, 4]).unwrap();
guest.check_numa_node_cpus(2, vec![5]).unwrap();
// Check each NUMA node has been assigned the right distances.
assert!(guest.check_numa_node_distances(0, "10 15 20").unwrap());
assert!(guest.check_numa_node_distances(1, "20 10 25").unwrap());
assert!(guest.check_numa_node_distances(2, "25 30 10").unwrap());
// AArch64 currently does not support hotplug, and therefore we only
// test hotplug-related function on x86_64 here.
#[cfg(target_arch = "x86_64")]
{
guest.enable_memory_hotplug();
// Resize every memory zone and check each associated NUMA node
// has been assigned the right amount of memory.
resize_zone_command(&api_socket, "mem0", "4G");
thread::sleep(std::time::Duration::new(5, 0));
assert!(guest.get_numa_node_memory(0).unwrap_or_default() > 3_840_000);
resize_zone_command(&api_socket, "mem1", "4G");
thread::sleep(std::time::Duration::new(5, 0));
assert!(guest.get_numa_node_memory(1).unwrap_or_default() > 3_840_000);
resize_zone_command(&api_socket, "mem2", "4G");
thread::sleep(std::time::Duration::new(5, 0));
assert!(guest.get_numa_node_memory(2).unwrap_or_default() > 3_840_000);
// Resize to the maximum amount of CPUs and check each NUMA
// node has been assigned the right CPUs set.
resize_command(&api_socket, Some(12), None, None);
guest.check_numa_node_cpus(0, vec![0, 1, 2, 9]).unwrap();
guest.check_numa_node_cpus(1, vec![3, 4, 6, 7, 8]).unwrap();
guest.check_numa_node_cpus(2, vec![5, 10, 11]).unwrap();
}
});
let _ = child.kill();
let output = child.wait_with_output().unwrap();
handle_child_output(r, &output);
}
type PrepareNetDaemon =
dyn Fn(&TempDir, &str, Option<&str>, usize, bool) -> (std::process::Command, String);
@ -2251,90 +2341,14 @@ mod tests {
#[test]
#[cfg(feature = "acpi")]
fn test_guest_numa_nodes() {
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
let guest = Guest::new(Box::new(focal));
let api_socket = temp_api_path(&guest.tmp_dir);
fn test_guest_numa_nodes_acpi() {
test_guest_numa_nodes(true);
}
#[cfg(target_arch = "x86_64")]
let kernel_path = direct_kernel_boot_path();
#[cfg(target_arch = "aarch64")]
let kernel_path = edk2_path();
let mut child = GuestCommand::new(&guest)
.args(&["--cpus", "boot=6,max=12"])
.args(&["--memory", "size=0,hotplug_method=virtio-mem"])
.args(&[
"--memory-zone",
"id=mem0,size=1G,hotplug_size=3G",
"id=mem1,size=2G,hotplug_size=3G",
"id=mem2,size=3G,hotplug_size=3G",
])
.args(&[
"--numa",
"guest_numa_id=0,cpus=0-2:9,distances=1@15:2@20,memory_zones=mem0",
"guest_numa_id=1,cpus=3-4:6-8,distances=0@20:2@25,memory_zones=mem1",
"guest_numa_id=2,cpus=5:10-11,distances=0@25:1@30,memory_zones=mem2",
])
.args(&["--kernel", kernel_path.to_str().unwrap()])
.args(&["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
.args(&["--api-socket", &api_socket])
.capture_output()
.default_disks()
.default_net()
.spawn()
.unwrap();
let r = std::panic::catch_unwind(|| {
guest.wait_vm_boot(None).unwrap();
// Check each NUMA node has been assigned the right amount of
// memory.
assert!(guest.get_numa_node_memory(0).unwrap_or_default() > 960_000);
assert!(guest.get_numa_node_memory(1).unwrap_or_default() > 1_920_000);
assert!(guest.get_numa_node_memory(2).unwrap_or_default() > 2_880_000);
// Check each NUMA node has been assigned the right CPUs set.
guest.check_numa_node_cpus(0, vec![0, 1, 2]).unwrap();
guest.check_numa_node_cpus(1, vec![3, 4]).unwrap();
guest.check_numa_node_cpus(2, vec![5]).unwrap();
// Check each NUMA node has been assigned the right distances.
assert!(guest.check_numa_node_distances(0, "10 15 20").unwrap());
assert!(guest.check_numa_node_distances(1, "20 10 25").unwrap());
assert!(guest.check_numa_node_distances(2, "25 30 10").unwrap());
// AArch64 currently does not support hotplug, and therefore we only
// test hotplug-related function on x86_64 here.
#[cfg(target_arch = "x86_64")]
{
guest.enable_memory_hotplug();
// Resize every memory zone and check each associated NUMA node
// has been assigned the right amount of memory.
resize_zone_command(&api_socket, "mem0", "4G");
thread::sleep(std::time::Duration::new(5, 0));
assert!(guest.get_numa_node_memory(0).unwrap_or_default() > 3_840_000);
resize_zone_command(&api_socket, "mem1", "4G");
thread::sleep(std::time::Duration::new(5, 0));
assert!(guest.get_numa_node_memory(1).unwrap_or_default() > 3_840_000);
resize_zone_command(&api_socket, "mem2", "4G");
thread::sleep(std::time::Duration::new(5, 0));
assert!(guest.get_numa_node_memory(2).unwrap_or_default() > 3_840_000);
// Resize to the maximum amount of CPUs and check each NUMA
// node has been assigned the right CPUs set.
resize_command(&api_socket, Some(12), None, None);
guest.check_numa_node_cpus(0, vec![0, 1, 2, 9]).unwrap();
guest.check_numa_node_cpus(1, vec![3, 4, 6, 7, 8]).unwrap();
guest.check_numa_node_cpus(2, vec![5, 10, 11]).unwrap();
}
});
let _ = child.kill();
let output = child.wait_with_output().unwrap();
handle_child_output(r, &output);
#[test]
#[cfg(target_arch = "aarch64")]
fn test_guest_numa_nodes_dt() {
test_guest_numa_nodes(false);
}
#[test]