diff --git a/vmm/src/api/mod.rs b/vmm/src/api/mod.rs index 0109a7bf5..1eceafe01 100644 --- a/vmm/src/api/mod.rs +++ b/vmm/src/api/mod.rs @@ -163,37 +163,37 @@ pub struct VmmPingResponse { pub version: String, } -#[derive(Clone, Deserialize, Serialize, Default)] +#[derive(Clone, Deserialize, Serialize, Default, Debug)] pub struct VmResizeData { pub desired_vcpus: Option, pub desired_ram: Option, pub desired_balloon: Option, } -#[derive(Clone, Deserialize, Serialize, Default)] +#[derive(Clone, Deserialize, Serialize, Default, Debug)] pub struct VmResizeZoneData { pub id: String, pub desired_ram: u64, } -#[derive(Clone, Deserialize, Serialize, Default)] +#[derive(Clone, Deserialize, Serialize, Default, Debug)] pub struct VmRemoveDeviceData { pub id: String, } -#[derive(Clone, Deserialize, Serialize, Default)] +#[derive(Clone, Deserialize, Serialize, Default, Debug)] pub struct VmSnapshotConfig { /// The snapshot destination URL pub destination_url: String, } -#[derive(Clone, Deserialize, Serialize, Default)] +#[derive(Clone, Deserialize, Serialize, Default, Debug)] pub struct VmReceiveMigrationData { /// URL for the reception of migration state pub receiver_url: String, } -#[derive(Clone, Deserialize, Serialize, Default)] +#[derive(Clone, Deserialize, Serialize, Default, Debug)] pub struct VmSendMigrationData { /// URL to migrate the VM to pub destination_url: String, @@ -217,6 +217,7 @@ pub enum ApiResponsePayload { pub type ApiResponse = std::result::Result; #[allow(clippy::large_enum_variant)] +#[derive(Debug)] pub enum ApiRequest { /// Create the virtual machine. This request payload is a VM configuration /// (VmConfig). diff --git a/vmm/src/cpu.rs b/vmm/src/cpu.rs index ea443d332..fa6eac059 100644 --- a/vmm/src/cpu.rs +++ b/vmm/src/cpu.rs @@ -221,7 +221,7 @@ impl Vcpu { self.mpidr = arch::configure_vcpu(&self.vcpu, self.id, kernel_entry_point, vm_memory) .map_err(Error::VcpuConfiguration)?; } - + info!("Configuring vCPU: cpu_id = {}", self.id); #[cfg(target_arch = "x86_64")] arch::configure_vcpu( &self.vcpu, diff --git a/vmm/src/device_manager.rs b/vmm/src/device_manager.rs index 6aa90b0ac..c7b3e81f1 100644 --- a/vmm/src/device_manager.rs +++ b/vmm/src/device_manager.rs @@ -1878,6 +1878,8 @@ impl DeviceManager { id }; + info!("Creating virtio-block device: {:?}", disk_cfg); + if disk_cfg.vhost_user { let socket = disk_cfg.vhost_socket.as_ref().unwrap().clone(); let vu_cfg = VhostUserConfig { @@ -1928,7 +1930,6 @@ impl DeviceManager { .clone(), ) .map_err(DeviceManagerError::Disk)?; - let image_type = detect_image_type(&mut file).map_err(DeviceManagerError::DetectImageType)?; @@ -2028,6 +2029,7 @@ impl DeviceManager { net_cfg.id = Some(id.clone()); id }; + info!("Creating virtio-net device: {:?}", net_cfg); if net_cfg.vhost_user { let socket = net_cfg.vhost_socket.as_ref().unwrap().clone(); @@ -2158,6 +2160,7 @@ impl DeviceManager { // Add virtio-rng if required let rng_config = self.config.lock().unwrap().rng.clone(); if let Some(rng_path) = rng_config.src.to_str() { + info!("Creating virtio-rng device: {:?}", rng_config); let id = String::from(RNG_DEVICE_NAME); let virtio_rng_device = Arc::new(Mutex::new( @@ -2199,6 +2202,8 @@ impl DeviceManager { id }; + info!("Creating virtio-fs device: {:?}", fs_cfg); + let mut node = device_node!(id); // Look for the id in the device tree. If it can be found, that means @@ -2357,6 +2362,8 @@ impl DeviceManager { id }; + info!("Creating virtio-pmem device: {:?}", pmem_cfg); + let mut node = device_node!(id); // Look for the id in the device tree. If it can be found, that means @@ -2543,6 +2550,8 @@ impl DeviceManager { id }; + info!("Creating virtio-vsock device: {:?}", vsock_cfg); + let socket_path = vsock_cfg .socket .to_str() @@ -2602,7 +2611,7 @@ impl DeviceManager { for (_memory_zone_id, memory_zone) in mm.memory_zones().iter() { if let Some(virtio_mem_zone) = memory_zone.virtio_mem_zone() { let id = self.next_device_name(MEM_DEVICE_NAME_PREFIX)?; - + info!("Creating virtio-mem device: id = {}", id); #[cfg(not(feature = "acpi"))] let node_id: Option = None; #[cfg(feature = "acpi")] @@ -2653,6 +2662,7 @@ impl DeviceManager { if let Some(balloon_config) = &self.config.lock().unwrap().balloon { let id = String::from(BALLOON_DEVICE_NAME); + info!("Creating virtio-balloon device: id = {}", id); let virtio_balloon_device = Arc::new(Mutex::new( virtio_devices::Balloon::new( @@ -2690,6 +2700,7 @@ impl DeviceManager { } let id = String::from(WATCHDOG_DEVICE_NAME); + info!("Creating virtio-watchdog device: id = {}", id); let virtio_watchdog_device = Arc::new(Mutex::new( virtio_devices::Watchdog::new( diff --git a/vmm/src/lib.rs b/vmm/src/lib.rs index f7bbcf615..296ab68df 100644 --- a/vmm/src/lib.rs +++ b/vmm/src/lib.rs @@ -1090,6 +1090,7 @@ impl Vmm { if let Some(dispatch_type) = self.epoll.dispatch_table[dispatch_idx] { match dispatch_type { EpollDispatch::Exit => { + info!("VM exit event"); // Consume the event. self.exit_evt.read().map_err(Error::EventFdRead)?; self.vmm_shutdown().map_err(Error::VmmShutdown)?; @@ -1097,6 +1098,7 @@ impl Vmm { break 'outer; } EpollDispatch::Reset => { + info!("VM reset event"); // Consume the event. self.reset_evt.read().map_err(Error::EventFdRead)?; self.vm_reboot().map_err(Error::VmReboot)?; @@ -1129,6 +1131,7 @@ impl Vmm { // Read from the API receiver channel let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?; + info!("API request event: {:?}", api_request); match api_request { ApiRequest::VmCreate(config, sender) => { // We only store the passed VM config. diff --git a/vmm/src/vm.rs b/vmm/src/vm.rs index 6820f9c10..f0b6a5802 100644 --- a/vmm/src/vm.rs +++ b/vmm/src/vm.rs @@ -849,6 +849,7 @@ impl Vm { .read_from(address, &mut initramfs, size) .map_err(|_| Error::InitramfsLoad)?; + info!("Initramfs loaded: address = 0x{:x}", address.0); Ok(arch::InitramfsConfig { address, size }) } @@ -889,6 +890,7 @@ impl Vm { #[cfg(target_arch = "x86_64")] fn load_kernel(&mut self) -> Result { + info!("Loading kernel"); let cmdline_cstring = self.get_cmdline()?; let guest_memory = self.memory_manager.lock().as_ref().unwrap().guest_memory(); let mem = guest_memory.memory(); @@ -912,13 +914,10 @@ impl Vm { ) .map_err(Error::LoadCmdLine)?; - if let PvhEntryPresent(pvh_entry_addr) = entry_addr.pvh_boot_cap { + if let PvhEntryPresent(entry_addr) = entry_addr.pvh_boot_cap { // Use the PVH kernel entry point to boot the guest - let entry_point_addr: GuestAddress; - entry_point_addr = pvh_entry_addr; - Ok(EntryPoint { - entry_addr: entry_point_addr, - }) + info!("Kernel loaded: entry_addr = 0x{:x}", entry_addr.0); + Ok(EntryPoint { entry_addr }) } else { Err(Error::KernelMissingPvhHeader) } @@ -926,6 +925,7 @@ impl Vm { #[cfg(target_arch = "x86_64")] fn configure_system(&mut self) -> Result<()> { + info!("Configuring system"); let mem = self.memory_manager.lock().unwrap().boot_guest_memory(); let initramfs_config = match self.initramfs { @@ -947,6 +947,10 @@ impl Vm { &self.memory_manager, &self.numa_nodes, )); + info!( + "Created ACPI tables: rsdp_addr = 0x{:x}", + rsdp_addr.unwrap().0 + ); } let sgx_epc_region = self @@ -1645,6 +1649,7 @@ impl Vm { } pub fn boot(&mut self) -> Result<()> { + info!("Booting VM"); event!("vm", "booting"); let current_state = self.get_state()?; if current_state == VmState::Paused {