main: Start the VMM thread

We now start the main VMM thread, which will be listening for VM and IPC
related events.
In order to start the configured VM, we no longer directly call the VM
API but we use the IPC instead, to first create and then start a VM.

Fixes: #303

Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
This commit is contained in:
Samuel Ortiz 2019-09-25 15:01:49 +02:00
parent e235c6de4f
commit 8188074300
5 changed files with 46 additions and 115 deletions

2
Cargo.lock generated
View File

@ -173,10 +173,12 @@ dependencies = [
"credibility 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"ssh2 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"vmm 0.1.0",
"vmm-sys-util 0.1.0 (git+https://github.com/rust-vmm/vmm-sys-util)",
]
[[package]]

View File

@ -7,8 +7,10 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
lazy_static = "1.4.0"
libc = "0.2.62"
log = { version = "0.4.8", features = ["std"] }
vmm = { path = "vmm" }
vmm-sys-util = { git = "https://github.com/rust-vmm/vmm-sys-util" }
[dev-dependencies]
ssh2 = "0.4.0"

View File

@ -4,15 +4,19 @@
//
extern crate vmm;
extern crate vmm_sys_util;
#[macro_use(crate_version, crate_authors)]
extern crate clap;
use clap::{App, Arg};
use libc::EFD_NONBLOCK;
use log::LevelFilter;
use std::process;
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
use vmm::config;
use vmm_sys_util::eventfd::EventFd;
struct Logger {
output: Mutex<Box<dyn std::io::Write + Send>>,
@ -285,9 +289,40 @@ fn main() {
vm_config.disks,
);
if let Err(e) = vmm::start_vm_loop(Arc::new(vm_config)) {
println!("Guest boot failed: {:?}", e);
process::exit(1);
let (api_request_sender, api_request_receiver) = channel();
let api_evt = EventFd::new(EFD_NONBLOCK).expect("Cannot create API EventFd");
let vmm_thread = match vmm::start_vmm_thread(api_evt.try_clone().unwrap(), api_request_receiver)
{
Ok(t) => t,
Err(e) => {
println!("Failed spawning the VMM thread {:?}", e);
process::exit(1);
}
};
// Create and start the VM based off the VM config we just built.
let sender = api_request_sender.clone();
vmm::vm_create(
api_evt.try_clone().unwrap(),
api_request_sender,
Arc::new(vm_config),
)
.expect("Could not create the VM");
vmm::vm_start(api_evt.try_clone().unwrap(), sender).expect("Could not start the VM");
match vmm_thread.join() {
Ok(res) => match res {
Ok(_) => (),
Err(e) => {
println!("VMM thread failed {:?}", e);
process::exit(1);
}
},
Err(e) => {
println!("Could not joing VMM thread {:?}", e);
process::exit(1);
}
}
}

View File

@ -383,29 +383,3 @@ impl Vmm {
Ok(exit_behaviour)
}
}
pub fn start_vm_loop(config: Arc<VmConfig>) -> Result<()> {
let exit_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
let reset_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
loop {
let mut vm = Vm::new(
config.clone(),
exit_evt.try_clone().unwrap(),
reset_evt.try_clone().unwrap(),
)
.expect("Could not create VM");
if vm.start().expect("Could not start VM") == ExitBehaviour::Shutdown {
vm.stop().expect("Could not stop VM");
break;
}
vm.stop().expect("Could not stop VM");
#[cfg(not(feature = "acpi"))]
break;
}
Ok(())
}

View File

@ -25,7 +25,6 @@ extern crate vm_virtio;
use crate::config::{ConsoleOutputMode, VmConfig};
use crate::device_manager::{get_win_size, Console, DeviceManager, DeviceManagerError};
use crate::{EpollContext, EpollDispatch};
use arch::RegionType;
use devices::ioapic;
use kvm_bindings::{
@ -40,7 +39,7 @@ use std::ffi::CString;
use std::fs::{File, OpenOptions};
use std::io;
use std::ops::Deref;
use std::os::unix::io::{AsRawFd, FromRawFd};
use std::os::unix::io::FromRawFd;
use std::os::unix::thread::JoinHandleExt;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Barrier, Mutex, RwLock};
@ -166,12 +165,6 @@ pub enum Error {
/// Cannot create a device manager.
DeviceManager(DeviceManagerError),
/// Cannot create EventFd.
EventFd(io::Error),
/// Cannot create epoll context.
EpollError(io::Error),
/// Write to the console failed.
Console(vmm_sys_util::errno::Error),
@ -445,12 +438,10 @@ pub struct Vm {
devices: DeviceManager,
cpuid: CpuId,
config: Arc<VmConfig>,
epoll: EpollContext,
on_tty: bool,
creation_ts: std::time::Instant,
vcpus_kill_signalled: Arc<AtomicBool>,
// Shutdown (exit) and reboot (reset) control
exit_evt: EventFd,
// Reboot (reset) control
reset_evt: EventFd,
signals: Option<Signals>,
}
@ -665,22 +656,7 @@ impl Vm {
)
.map_err(Error::DeviceManager)?;
// Let's add our STDIN fd.
let mut epoll = EpollContext::new().map_err(Error::EpollError)?;
let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0;
if on_tty {
epoll.add_stdin().map_err(Error::EpollError)?;
}
// Let's add an exit event.
epoll
.add_event(&exit_evt, EpollDispatch::Exit)
.map_err(Error::EpollError)?;
epoll
.add_event(&reset_evt, EpollDispatch::Reset)
.map_err(Error::EpollError)?;
let threads = Vec::with_capacity(u8::from(&config.cpus) as usize + 1);
Ok(Vm {
@ -691,11 +667,9 @@ impl Vm {
devices: device_manager,
cpuid,
config,
epoll,
on_tty,
creation_ts,
vcpus_kill_signalled: Arc::new(AtomicBool::new(false)),
exit_evt,
reset_evt,
signals: None,
})
@ -775,62 +749,6 @@ impl Vm {
}
}
pub fn control_loop(&mut self) -> Result<ExitBehaviour> {
// Let's start the STDIN polling thread.
const EPOLL_EVENTS_LEN: usize = 100;
let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN];
let epoll_fd = self.epoll.as_raw_fd();
let exit_behaviour;
'outer: loop {
let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) {
Ok(res) => res,
Err(e) => {
if e.kind() == io::ErrorKind::Interrupted {
// It's well defined from the epoll_wait() syscall
// documentation that the epoll loop can be interrupted
// before any of the requested events occurred or the
// timeout expired. In both those cases, epoll_wait()
// returns an error of type EINTR, but this should not
// be considered as a regular error. Instead it is more
// appropriate to retry, by calling into epoll_wait().
continue;
}
return Err(Error::EpollError(e));
}
};
for event in events.iter().take(num_events) {
let dispatch_idx = event.data as usize;
if let Some(dispatch_type) = self.epoll.dispatch_table[dispatch_idx] {
match dispatch_type {
EpollDispatch::Exit => {
// Consume the event.
self.exit_evt.read().map_err(Error::EventFd)?;
exit_behaviour = ExitBehaviour::Shutdown;
break 'outer;
}
EpollDispatch::Reset => {
// Consume the event.
self.reset_evt.read().map_err(Error::EventFd)?;
exit_behaviour = ExitBehaviour::Reset;
break 'outer;
}
EpollDispatch::Stdin => self.handle_stdin()?,
EpollDispatch::Api => {}
}
}
}
}
Ok(exit_behaviour)
}
pub fn stop(&mut self) -> Result<()> {
if self.on_tty {
// Don't forget to set the terminal in canonical mode
@ -876,7 +794,7 @@ impl Vm {
}
}
pub fn start(&mut self) -> Result<ExitBehaviour> {
pub fn start(&mut self) -> Result<()> {
let entry_addr = self.load_kernel()?;
let vcpu_count = u8::from(&self.config.cpus);
let vcpu_thread_barrier = Arc::new(Barrier::new((vcpu_count + 1) as usize));
@ -969,7 +887,7 @@ impl Vm {
}
}
self.control_loop()
Ok(())
}
/// Gets an Arc to the guest memory owned by this VM.