tests: Improve live migration tests reliability

Move the live migration tests to a 'jammy' worker rather than
'jammy-small'. This type of worker has more CPUs (64 vs 16) and more RAM
(256G vs 64G), which should improve the time it takes to run each test.
With this improvement, the test shouldn't fail anymore due to timeout
being reached.

A second improvement is to reduce the amount of vCPUs created for each
VM. The point is simply to check we can migrate a VM with multiple
vCPUs, therefore using 2 instead of 6 should be enough when possible.
When testing NUMA, we can't lower the amount of vCPUs since there's a
quite complex topology that is expected there.
Also, the total amount of vCPUs is reduced from 12 to 4 (again when not
testing with NUMA).

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2022-08-03 16:17:19 +02:00 committed by Bo Chen
parent 1125fd2667
commit 103494f441
2 changed files with 11 additions and 5 deletions

2
Jenkinsfile vendored
View File

@ -295,7 +295,7 @@ pipeline{
} }
} }
stage ('Worker build - Live Migration') { stage ('Worker build - Live Migration') {
agent { node { label 'jammy-small' } } agent { node { label 'jammy' } }
when { when {
beforeAgent true beforeAgent true
expression { expression {

View File

@ -7851,6 +7851,9 @@ mod live_migration {
], ],
}; };
let boot_vcpus = if numa { 6 } else { 2 };
let max_vcpus = if numa { 12 } else { 4 };
let pmem_temp_file = TempFile::new().unwrap(); let pmem_temp_file = TempFile::new().unwrap();
pmem_temp_file.as_file().set_len(128 << 20).unwrap(); pmem_temp_file.as_file().set_len(128 << 20).unwrap();
std::process::Command::new("mkfs.ext4") std::process::Command::new("mkfs.ext4")
@ -7868,7 +7871,10 @@ mod live_migration {
let src_api_socket = temp_api_path(&guest.tmp_dir); let src_api_socket = temp_api_path(&guest.tmp_dir);
let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path);
src_vm_cmd src_vm_cmd
.args(&["--cpus", "boot=6,max=12"]) .args(&[
"--cpus",
format!("boot={},max={}", boot_vcpus, max_vcpus).as_str(),
])
.args(memory_param) .args(memory_param)
.args(&["--kernel", kernel_path.to_str().unwrap()]) .args(&["--kernel", kernel_path.to_str().unwrap()])
.args(&["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) .args(&["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
@ -7898,7 +7904,7 @@ mod live_migration {
// Make sure the source VM is functaionl // Make sure the source VM is functaionl
// Check the number of vCPUs // Check the number of vCPUs
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 6); assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
// Check the guest RAM // Check the guest RAM
if balloon { if balloon {
@ -8103,7 +8109,7 @@ mod live_migration {
// Post live-migration check to make sure the destination VM is funcational // Post live-migration check to make sure the destination VM is funcational
let r = std::panic::catch_unwind(|| { let r = std::panic::catch_unwind(|| {
// Perform same checks to validate VM has been properly migrated // Perform same checks to validate VM has been properly migrated
assert_eq!(guest.get_cpu_count().unwrap_or_default(), 6); assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
if numa { if numa {
assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000);
@ -8145,7 +8151,7 @@ mod live_migration {
resize_zone_command(&dest_api_socket, "mem2", "4G"); resize_zone_command(&dest_api_socket, "mem2", "4G");
// Resize to the maximum amount of CPUs and check each NUMA // Resize to the maximum amount of CPUs and check each NUMA
// node has been assigned the right CPUs set. // node has been assigned the right CPUs set.
resize_command(&dest_api_socket, Some(12), None, None, None); resize_command(&dest_api_socket, Some(max_vcpus), None, None, None);
thread::sleep(std::time::Duration::new(5, 0)); thread::sleep(std::time::Duration::new(5, 0));
guest.check_numa_common( guest.check_numa_common(