mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2024-12-22 13:45:20 +00:00
vmm: cpu: Reuse already allocated vCPUs if available
When a request is made to increase the number of vCPUs in the VM attempt to reuse any previously removed (and hence inactive) vCPUs before creating new ones. This ensures that the APIC ID is not reused for a different KVM vCPU (which is not allowed) and that the APIC IDs are also sequential. The two key changes to support this are: * Clearing the "kill" bit on the old vCPU state so that it does not immediately exit upon thread recreation. * Using the length of the vcpus vector (the number of allocated vcpus) rather than the number of active vCPUs (.present_vcpus()) to determine how many should be created. This change also introduced some new info!() debugging on the vCPU creation/removal path to aid further development in the future. TEST=Expanded test_cpu_hotplug test. Fixes: #1338 Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
parent
9dcd0c37f3
commit
4b64f2a027
@ -3775,6 +3775,19 @@ mod tests {
|
||||
u32::from(desired_vcpus)
|
||||
);
|
||||
|
||||
// Resize the VM back up to 4
|
||||
let desired_vcpus = 4;
|
||||
resize_command(&api_socket, Some(desired_vcpus), None);
|
||||
|
||||
guest.ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online")?;
|
||||
guest.ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online")?;
|
||||
thread::sleep(std::time::Duration::new(10, 0));
|
||||
aver_eq!(
|
||||
tb,
|
||||
guest.get_cpu_count().unwrap_or_default(),
|
||||
u32::from(desired_vcpus)
|
||||
);
|
||||
|
||||
let _ = child.kill();
|
||||
let _ = child.wait();
|
||||
Ok(())
|
||||
|
@ -750,6 +750,7 @@ impl CpuManager {
|
||||
Ok(vcpu_clone)
|
||||
}
|
||||
|
||||
/// Only create new vCPUs if there aren't any inactive ones to reuse
|
||||
fn create_vcpus(&mut self, desired_vcpus: u8, entry_point: Option<EntryPoint>) -> Result<()> {
|
||||
info!(
|
||||
"Request to create new vCPUs: desired = {}, max = {}, allocated = {}, present = {}",
|
||||
@ -763,7 +764,8 @@ impl CpuManager {
|
||||
return Err(Error::DesiredVCPUCountExceedsMax);
|
||||
}
|
||||
|
||||
for cpu_id in self.present_vcpus()..desired_vcpus {
|
||||
// Only create vCPUs in excess of all the allocated vCPUs.
|
||||
for cpu_id in self.vcpus.len() as u8..desired_vcpus {
|
||||
let vcpu = self.create_vcpu(cpu_id, entry_point, None)?;
|
||||
self.vcpus.push(vcpu);
|
||||
}
|
||||
@ -859,6 +861,7 @@ impl CpuManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start up as many vCPUs threads as needed to reach `desired_vcpus`
|
||||
fn activate_vcpus(&mut self, desired_vcpus: u8, inserting: bool) -> Result<()> {
|
||||
if desired_vcpus > self.config.max_vcpus {
|
||||
return Err(Error::DesiredVCPUCountExceedsMax);
|
||||
@ -875,6 +878,7 @@ impl CpuManager {
|
||||
self.present_vcpus()
|
||||
);
|
||||
|
||||
// This reuses any inactive vCPUs as well as any that were newly created
|
||||
for cpu_id in self.present_vcpus()..desired_vcpus {
|
||||
let vcpu = Arc::clone(&self.vcpus[cpu_id as usize]);
|
||||
self.start_vcpu(vcpu, vcpu_thread_barrier.clone(), inserting)?;
|
||||
|
Loading…
Reference in New Issue
Block a user