mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2024-10-01 02:55:45 +00:00
vmm: Send FDs across unix socket for migration when in local mode
When in local migration mode send the FDs for the guest memory over the socket along with the slot that the FD is associated with. This removes the requirement for copying the guest RAM and gives significantly faster live migration performance (of the order of 3s to 60ms). Fixes: #3566 Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
parent
715a7d9065
commit
88952cc500
@ -45,6 +45,7 @@ use vm_memory::bitmap::AtomicBitmap;
|
||||
use vm_migration::{protocol::*, Migratable};
|
||||
use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
|
||||
use vmm_sys_util::eventfd::EventFd;
|
||||
use vmm_sys_util::sock_ctrl_msg::ScmSocket;
|
||||
|
||||
#[cfg(feature = "acpi")]
|
||||
mod acpi;
|
||||
@ -945,7 +946,32 @@ impl Vmm {
|
||||
}
|
||||
}
|
||||
Command::MemoryFd => {
|
||||
unimplemented!()
|
||||
info!("MemoryFd Command Received");
|
||||
|
||||
if !started {
|
||||
warn!("Migration not started yet");
|
||||
Response::error().write_to(&mut socket)?;
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut buf = [0u8; 4];
|
||||
let (_, file) = socket.recv_with_fd(&mut buf).map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!(
|
||||
"Error receiving slot from socket: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
if existing_memory_files.is_none() {
|
||||
existing_memory_files = Some(HashMap::default())
|
||||
}
|
||||
|
||||
if let Some(ref mut existing_memory_files) = existing_memory_files {
|
||||
let slot = u32::from_le_bytes(buf);
|
||||
existing_memory_files.insert(slot, file.unwrap());
|
||||
}
|
||||
|
||||
Response::ok().write_to(&mut socket)?;
|
||||
}
|
||||
Command::Complete => {
|
||||
info!("Complete Command Received");
|
||||
@ -1049,6 +1075,10 @@ impl Vmm {
|
||||
})?
|
||||
};
|
||||
|
||||
if send_data_migration.local {
|
||||
vm.send_memory_fds(&mut socket)?;
|
||||
}
|
||||
|
||||
let vm_migration_config = VmMigrationConfig {
|
||||
vm_config,
|
||||
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
|
||||
@ -1070,6 +1100,10 @@ impl Vmm {
|
||||
)));
|
||||
}
|
||||
|
||||
if send_data_migration.local {
|
||||
// Now pause VM
|
||||
vm.pause()?;
|
||||
} else {
|
||||
// Start logging dirty pages
|
||||
vm.start_dirty_log()?;
|
||||
|
||||
@ -1105,7 +1139,7 @@ impl Vmm {
|
||||
|
||||
// Send last batch of dirty pages
|
||||
Self::vm_maybe_send_dirty_pages(vm, &mut socket)?;
|
||||
|
||||
}
|
||||
// Capture snapshot and send it
|
||||
let vm_snapshot = vm.snapshot()?;
|
||||
let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap();
|
||||
|
@ -68,6 +68,7 @@ use std::io::{self, Read, Write};
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use std::num::Wrapping;
|
||||
use std::ops::Deref;
|
||||
use std::os::unix::net::UnixStream;
|
||||
use std::panic::AssertUnwindSafe;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::{result, str, thread};
|
||||
@ -79,12 +80,14 @@ use vm_memory::Address;
|
||||
use vm_memory::{Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic};
|
||||
#[cfg(feature = "tdx")]
|
||||
use vm_memory::{GuestMemory, GuestMemoryRegion};
|
||||
use vm_migration::protocol::{Request, Response, Status};
|
||||
use vm_migration::{
|
||||
protocol::MemoryRangeTable, Migratable, MigratableError, Pausable, Snapshot,
|
||||
SnapshotDataSection, Snapshottable, Transportable,
|
||||
};
|
||||
use vmm_sys_util::eventfd::EventFd;
|
||||
use vmm_sys_util::signal::unblock_signal;
|
||||
use vmm_sys_util::sock_ctrl_msg::ScmSocket;
|
||||
use vmm_sys_util::terminal::Terminal;
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
@ -2261,6 +2264,42 @@ impl Vm {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn send_memory_fds(
|
||||
&mut self,
|
||||
socket: &mut UnixStream,
|
||||
) -> std::result::Result<(), MigratableError> {
|
||||
for (slot, fd) in self
|
||||
.memory_manager
|
||||
.lock()
|
||||
.unwrap()
|
||||
.memory_slot_fds()
|
||||
.drain()
|
||||
{
|
||||
Request::memory_fd(std::mem::size_of_val(&slot) as u64)
|
||||
.write_to(socket)
|
||||
.map_err(|e| {
|
||||
MigratableError::MigrateSend(anyhow!("Error sending memory fd request: {}", e))
|
||||
})?;
|
||||
socket
|
||||
.send_with_fd(&slot.to_le_bytes()[..], fd)
|
||||
.map_err(|e| {
|
||||
MigratableError::MigrateSend(anyhow!("Error sending memory fd: {}", e))
|
||||
})?;
|
||||
|
||||
let res = Response::read_from(socket)?;
|
||||
if res.status() != Status::Ok {
|
||||
warn!("Error during memory fd migration");
|
||||
Request::abandon().write_to(socket)?;
|
||||
Response::read_from(socket).ok();
|
||||
return Err(MigratableError::MigrateSend(anyhow!(
|
||||
"Error during memory fd migration"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn send_memory_regions<F>(
|
||||
&mut self,
|
||||
ranges: &MemoryRangeTable,
|
||||
|
Loading…
Reference in New Issue
Block a user