From 103494f441a974c834702ee1b06c3a79235d7fcf Mon Sep 17 00:00:00 2001 From: Sebastien Boeuf Date: Wed, 3 Aug 2022 16:17:19 +0200 Subject: [PATCH] tests: Improve live migration tests reliability Move the live migration tests to a 'jammy' worker rather than 'jammy-small'. This type of worker has more CPUs (64 vs 16) and more RAM (256G vs 64G), which should improve the time it takes to run each test. With this improvement, the test shouldn't fail anymore due to timeout being reached. A second improvement is to reduce the amount of vCPUs created for each VM. The point is simply to check we can migrate a VM with multiple vCPUs, therefore using 2 instead of 6 should be enough when possible. When testing NUMA, we can't lower the amount of vCPUs since there's a quite complex topology that is expected there. Also, the total amount of vCPUs is reduced from 12 to 4 (again when not testing with NUMA). Signed-off-by: Sebastien Boeuf --- Jenkinsfile | 2 +- tests/integration.rs | 14 ++++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 5493cad56..ce5d19b86 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -295,7 +295,7 @@ pipeline{ } } stage ('Worker build - Live Migration') { - agent { node { label 'jammy-small' } } + agent { node { label 'jammy' } } when { beforeAgent true expression { diff --git a/tests/integration.rs b/tests/integration.rs index 799937d79..47b3bfec0 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -7851,6 +7851,9 @@ mod live_migration { ], }; + let boot_vcpus = if numa { 6 } else { 2 }; + let max_vcpus = if numa { 12 } else { 4 }; + let pmem_temp_file = TempFile::new().unwrap(); pmem_temp_file.as_file().set_len(128 << 20).unwrap(); std::process::Command::new("mkfs.ext4") @@ -7868,7 +7871,10 @@ mod live_migration { let src_api_socket = temp_api_path(&guest.tmp_dir); let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); src_vm_cmd - .args(&["--cpus", "boot=6,max=12"]) + .args(&[ + "--cpus", + format!("boot={},max={}", boot_vcpus, max_vcpus).as_str(), + ]) .args(memory_param) .args(&["--kernel", kernel_path.to_str().unwrap()]) .args(&["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) @@ -7898,7 +7904,7 @@ mod live_migration { // Make sure the source VM is functaionl // Check the number of vCPUs - assert_eq!(guest.get_cpu_count().unwrap_or_default(), 6); + assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); // Check the guest RAM if balloon { @@ -8103,7 +8109,7 @@ mod live_migration { // Post live-migration check to make sure the destination VM is funcational let r = std::panic::catch_unwind(|| { // Perform same checks to validate VM has been properly migrated - assert_eq!(guest.get_cpu_count().unwrap_or_default(), 6); + assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); #[cfg(target_arch = "x86_64")] if numa { assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); @@ -8145,7 +8151,7 @@ mod live_migration { resize_zone_command(&dest_api_socket, "mem2", "4G"); // Resize to the maximum amount of CPUs and check each NUMA // node has been assigned the right CPUs set. - resize_command(&dest_api_socket, Some(12), None, None, None); + resize_command(&dest_api_socket, Some(max_vcpus), None, None, None); thread::sleep(std::time::Duration::new(5, 0)); guest.check_numa_common(