tests: live-migration: Split numa tests into its own function

Signed-off-by: Bo Chen <chen.bo@intel.com>
This commit is contained in:
Bo Chen 2022-09-07 17:20:25 -07:00 committed by Sebastien Boeuf
parent e145b823a5
commit 60f3bd43f5

View File

@ -8358,7 +8358,7 @@ mod live_migration {
// 4. The destination VM is functional (including various virtio-devices are working properly) after
// live migration;
// Note: This test does not use vsock as we can't create two identical vsock on the same host.
fn _test_live_migration(upgrade_test: bool, numa: bool, local: bool) {
fn _test_live_migration(upgrade_test: bool, local: bool) {
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
let guest = Guest::new(Box::new(focal));
let kernel_path = direct_kernel_boot_path();
@ -8369,37 +8369,14 @@ mod live_migration {
net_id, guest.network.guest_mac, guest.network.host_ip
);
let memory_param: &[&str] = match (local, numa) {
(false, false) => &["--memory", "size=4G"],
(false, true) => &[
"--memory",
"size=0,hotplug_method=virtio-mem",
"--memory-zone",
"id=mem0,size=1G,hotplug_size=4G",
"id=mem1,size=1G,hotplug_size=4G",
"id=mem2,size=2G,hotplug_size=4G",
"--numa",
"guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0",
"guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1",
"guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2",
],
(true, false) => &["--memory", "size=4G,shared=on"],
(true, true) => &[
"--memory",
"size=0,hotplug_method=virtio-mem,shared=on",
"--memory-zone",
"id=mem0,size=1G,hotplug_size=4G,shared=on",
"id=mem1,size=1G,hotplug_size=4G,shared=on",
"id=mem2,size=2G,hotplug_size=4G,shared=on",
"--numa",
"guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0",
"guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1",
"guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2",
],
let memory_param: &[&str] = if local {
&["--memory", "size=4G,shared=on"]
} else {
&["--memory", "size=4G"]
};
let boot_vcpus = if numa { 6 } else { 2 };
let max_vcpus = if numa { 12 } else { 4 };
let boot_vcpus = 2;
let max_vcpus = 4;
let pmem_temp_file = TempFile::new().unwrap();
pmem_temp_file.as_file().set_len(128 << 20).unwrap();
@ -8451,42 +8428,11 @@ mod live_migration {
assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
// Check the guest RAM
if numa {
assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000);
} else {
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
}
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
// Check the guest virtio-devices, e.g. block, rng, console, and net
guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
// Check the NUMA parameters are applied correctly and resize
// each zone to test the case where we migrate a VM with the
// virtio-mem regions being used.
if numa {
guest.check_numa_common(
Some(&[960_000, 960_000, 1_920_000]),
Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
Some(&["10 15 20", "20 10 25", "25 30 10"]),
);
// AArch64 currently does not support hotplug, and therefore we only
// test hotplug-related function on x86_64 here.
#[cfg(target_arch = "x86_64")]
{
guest.enable_memory_hotplug();
// Resize every memory zone and check each associated NUMA node
// has been assigned the right amount of memory.
resize_zone_command(&src_api_socket, "mem0", "2G");
resize_zone_command(&src_api_socket, "mem1", "2G");
resize_zone_command(&src_api_socket, "mem2", "3G");
thread::sleep(std::time::Duration::new(5, 0));
guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None);
}
}
// x86_64: Following what's done in the `test_snapshot_restore`, we need
// to make sure that removing and adding back the virtio-net device does
// not break the live-migration support for virtio-pci.
@ -8549,57 +8495,9 @@ mod live_migration {
let r = std::panic::catch_unwind(|| {
// Perform same checks to validate VM has been properly migrated
assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
#[cfg(target_arch = "x86_64")]
if numa {
assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000);
} else {
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
}
#[cfg(target_arch = "aarch64")]
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
// Perform NUMA related checks
if numa {
#[cfg(target_arch = "aarch64")]
{
guest.check_numa_common(
Some(&[960_000, 960_000, 1_920_000]),
Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
Some(&["10 15 20", "20 10 25", "25 30 10"]),
);
}
// AArch64 currently does not support hotplug, and therefore we only
// test hotplug-related function on x86_64 here.
#[cfg(target_arch = "x86_64")]
{
guest.check_numa_common(
Some(&[1_920_000, 1_920_000, 2_880_000]),
Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
Some(&["10 15 20", "20 10 25", "25 30 10"]),
);
guest.enable_memory_hotplug();
// Resize every memory zone and check each associated NUMA node
// has been assigned the right amount of memory.
resize_zone_command(&dest_api_socket, "mem0", "4G");
resize_zone_command(&dest_api_socket, "mem1", "4G");
resize_zone_command(&dest_api_socket, "mem2", "4G");
// Resize to the maximum amount of CPUs and check each NUMA
// node has been assigned the right CPUs set.
resize_command(&dest_api_socket, Some(max_vcpus), None, None, None);
thread::sleep(std::time::Duration::new(5, 0));
guest.check_numa_common(
Some(&[3_840_000, 3_840_000, 3_840_000]),
Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]),
None,
);
}
}
});
// Clean-up the destination VM and make sure it terminated correctly
@ -8803,6 +8701,253 @@ mod live_migration {
handle_child_output(r, &dest_output);
}
fn _test_live_migration_numa(upgrade_test: bool, local: bool) {
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
let guest = Guest::new(Box::new(focal));
let kernel_path = direct_kernel_boot_path();
let console_text = String::from("On a branch floating down river a cricket, singing.");
let net_id = "net123";
let net_params = format!(
"id={},tap=,mac={},ip={},mask=255.255.255.0",
net_id, guest.network.guest_mac, guest.network.host_ip
);
let memory_param: &[&str] = if local {
&[
"--memory",
"size=0,hotplug_method=virtio-mem,shared=on",
"--memory-zone",
"id=mem0,size=1G,hotplug_size=4G,shared=on",
"id=mem1,size=1G,hotplug_size=4G,shared=on",
"id=mem2,size=2G,hotplug_size=4G,shared=on",
"--numa",
"guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0",
"guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1",
"guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2",
]
} else {
&[
"--memory",
"size=0,hotplug_method=virtio-mem",
"--memory-zone",
"id=mem0,size=1G,hotplug_size=4G",
"id=mem1,size=1G,hotplug_size=4G",
"id=mem2,size=2G,hotplug_size=4G",
"--numa",
"guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0",
"guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1",
"guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2",
]
};
let boot_vcpus = 6;
let max_vcpus = 12;
let pmem_temp_file = TempFile::new().unwrap();
pmem_temp_file.as_file().set_len(128 << 20).unwrap();
std::process::Command::new("mkfs.ext4")
.arg(pmem_temp_file.as_path())
.output()
.expect("Expect creating disk image to succeed");
let pmem_path = String::from("/dev/pmem0");
// Start the source VM
let src_vm_path = if !upgrade_test {
clh_command("cloud-hypervisor")
} else {
cloud_hypervisor_release_path()
};
let src_api_socket = temp_api_path(&guest.tmp_dir);
let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path);
src_vm_cmd
.args(&[
"--cpus",
format!("boot={},max={}", boot_vcpus, max_vcpus).as_str(),
])
.args(memory_param)
.args(&["--kernel", kernel_path.to_str().unwrap()])
.args(&["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
.default_disks()
.args(&["--net", net_params.as_str()])
.args(&["--api-socket", &src_api_socket])
.args(&[
"--pmem",
format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(),
]);
let mut src_child = src_vm_cmd.capture_output().spawn().unwrap();
// Start the destination VM
let mut dest_api_socket = temp_api_path(&guest.tmp_dir);
dest_api_socket.push_str(".dest");
let mut dest_child = GuestCommand::new(&guest)
.args(&["--api-socket", &dest_api_socket])
.capture_output()
.spawn()
.unwrap();
let r = std::panic::catch_unwind(|| {
guest.wait_vm_boot(None).unwrap();
// Make sure the source VM is functaionl
// Check the number of vCPUs
assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
// Check the guest RAM
assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000);
// Check the guest virtio-devices, e.g. block, rng, console, and net
guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
// Check the NUMA parameters are applied correctly and resize
// each zone to test the case where we migrate a VM with the
// virtio-mem regions being used.
{
guest.check_numa_common(
Some(&[960_000, 960_000, 1_920_000]),
Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
Some(&["10 15 20", "20 10 25", "25 30 10"]),
);
// AArch64 currently does not support hotplug, and therefore we only
// test hotplug-related function on x86_64 here.
#[cfg(target_arch = "x86_64")]
{
guest.enable_memory_hotplug();
// Resize every memory zone and check each associated NUMA node
// has been assigned the right amount of memory.
resize_zone_command(&src_api_socket, "mem0", "2G");
resize_zone_command(&src_api_socket, "mem1", "2G");
resize_zone_command(&src_api_socket, "mem2", "3G");
thread::sleep(std::time::Duration::new(5, 0));
guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None);
}
}
// x86_64: Following what's done in the `test_snapshot_restore`, we need
// to make sure that removing and adding back the virtio-net device does
// not break the live-migration support for virtio-pci.
#[cfg(target_arch = "x86_64")]
{
assert!(remote_command(
&src_api_socket,
"remove-device",
Some(net_id),
));
thread::sleep(std::time::Duration::new(10, 0));
// Plug the virtio-net device again
assert!(remote_command(
&src_api_socket,
"add-net",
Some(net_params.as_str()),
));
thread::sleep(std::time::Duration::new(10, 0));
}
// Start the live-migration
let migration_socket = String::from(
guest
.tmp_dir
.as_path()
.join("live-migration.sock")
.to_str()
.unwrap(),
);
assert!(
start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local),
"Unsuccessful command: 'send-migration' or 'receive-migration'."
);
});
// Check and report any errors occured during the live-migration
if r.is_err() {
print_and_panic(
src_child,
dest_child,
None,
"Error occured during live-migration",
);
}
// Check the source vm has been terminated successful (give it '3s' to settle)
thread::sleep(std::time::Duration::new(3, 0));
if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) {
print_and_panic(
src_child,
dest_child,
None,
"source VM was not terminated successfully.",
);
};
// Post live-migration check to make sure the destination VM is funcational
let r = std::panic::catch_unwind(|| {
// Perform same checks to validate VM has been properly migrated
assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
#[cfg(target_arch = "x86_64")]
assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000);
#[cfg(target_arch = "aarch64")]
assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
// Perform NUMA related checks
{
#[cfg(target_arch = "aarch64")]
{
guest.check_numa_common(
Some(&[960_000, 960_000, 1_920_000]),
Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
Some(&["10 15 20", "20 10 25", "25 30 10"]),
);
}
// AArch64 currently does not support hotplug, and therefore we only
// test hotplug-related function on x86_64 here.
#[cfg(target_arch = "x86_64")]
{
guest.check_numa_common(
Some(&[1_920_000, 1_920_000, 2_880_000]),
Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
Some(&["10 15 20", "20 10 25", "25 30 10"]),
);
guest.enable_memory_hotplug();
// Resize every memory zone and check each associated NUMA node
// has been assigned the right amount of memory.
resize_zone_command(&dest_api_socket, "mem0", "4G");
resize_zone_command(&dest_api_socket, "mem1", "4G");
resize_zone_command(&dest_api_socket, "mem2", "4G");
// Resize to the maximum amount of CPUs and check each NUMA
// node has been assigned the right CPUs set.
resize_command(&dest_api_socket, Some(max_vcpus), None, None, None);
thread::sleep(std::time::Duration::new(5, 0));
guest.check_numa_common(
Some(&[3_840_000, 3_840_000, 3_840_000]),
Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]),
None,
);
}
}
});
// Clean-up the destination VM and make sure it terminated correctly
let _ = dest_child.kill();
let dest_output = dest_child.wait_with_output().unwrap();
handle_child_output(r, &dest_output);
// Check the destination VM has the expected 'concole_text' from its output
let r = std::panic::catch_unwind(|| {
assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text));
});
handle_child_output(r, &dest_output);
}
fn _test_live_migration_watchdog(upgrade_test: bool, local: bool) {
let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
let guest = Guest::new(Box::new(focal));
@ -9107,24 +9252,24 @@ mod live_migration {
use super::*;
#[test]
fn test_live_migration_basic() {
_test_live_migration(false, false, false)
_test_live_migration(false, false)
}
#[test]
fn test_live_migration_local() {
_test_live_migration(false, false, true)
_test_live_migration(false, true)
}
#[test]
#[cfg(not(feature = "mshv"))]
fn test_live_migration_numa() {
_test_live_migration(false, true, false)
_test_live_migration_numa(false, false)
}
#[test]
#[cfg(not(feature = "mshv"))]
fn test_live_migration_numa_local() {
_test_live_migration(false, true, true)
_test_live_migration_numa(false, true)
}
#[test]
@ -9149,24 +9294,24 @@ mod live_migration {
#[test]
fn test_live_upgrade_basic() {
_test_live_migration(true, false, false)
_test_live_migration(true, false)
}
#[test]
fn test_live_upgrade_local() {
_test_live_migration(true, false, true)
_test_live_migration(true, true)
}
#[test]
#[cfg(not(feature = "mshv"))]
fn test_live_upgrade_numa() {
_test_live_migration(true, true, false)
_test_live_migration_numa(true, false)
}
#[test]
#[cfg(not(feature = "mshv"))]
fn test_live_upgrade_numa_local() {
_test_live_migration(true, true, true)
_test_live_migration_numa(true, true)
}
#[test]