diff --git a/vm-virtio/src/lib.rs b/vm-virtio/src/lib.rs index e7278c0fa..d6ce1f348 100755 --- a/vm-virtio/src/lib.rs +++ b/vm-virtio/src/lib.rs @@ -130,6 +130,8 @@ pub enum ActivateError { VhostUserSetup(vhost_user::Error), /// Failed to setup vhost-user daemon. VhostUserNetSetup(vhost_user::Error), + /// Failed to setup vhost-user daemon. + VhostUserBlkSetup(vhost_user::Error), } pub type ActivateResult = std::result::Result<(), ActivateError>; diff --git a/vm-virtio/src/vhost_user/blk.rs b/vm-virtio/src/vhost_user/blk.rs new file mode 100644 index 000000000..9e2ba2ac5 --- /dev/null +++ b/vm-virtio/src/vhost_user/blk.rs @@ -0,0 +1,226 @@ +// Copyright 2019 Intel Corporation. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +use libc; +use libc::EFD_NONBLOCK; +use std::cmp; +use std::io::Write; +use std::ptr::null; +use std::sync::{Arc, RwLock}; +use std::thread; +use std::vec::Vec; + +use crate::VirtioInterrupt; + +use vm_memory::GuestMemoryMmap; +use vmm_sys_util::eventfd::EventFd; + +use super::super::{ActivateError, ActivateResult, Queue, VirtioDevice, VirtioDeviceType}; +use super::handler::*; +use super::vu_common_ctrl::*; +use super::{Error, Result}; +use std::mem; +use vhost_rs::vhost_user::message::VhostUserConfigFlags; +use vhost_rs::vhost_user::message::{VhostUserProtocolFeatures, VhostUserVirtioFeatures}; +use vhost_rs::vhost_user::{Master, VhostUserMaster, VhostUserMasterReqHandler}; +use vhost_rs::VhostBackend; +use virtio_bindings::bindings::virtio_blk::*; + +macro_rules! offset_of { + ($ty:ty, $field:ident) => { + unsafe { &(*(null() as *const $ty)).$field as *const _ as usize } + }; +} + +struct SlaveReqHandler {} +impl VhostUserMasterReqHandler for SlaveReqHandler {} + +pub struct Blk { + vhost_user_blk: Master, + kill_evt: EventFd, + avail_features: u64, + acked_features: u64, + config_space: Vec, + queue_sizes: Vec, +} + +impl<'a> Blk { + /// Create a new vhost-user-blk device + pub fn new(wce: bool, vu_cfg: VhostUserConfig<'a>) -> Result { + let mut vhost_user_blk = Master::connect(vu_cfg.sock, vu_cfg.num_queues as u64) + .map_err(Error::VhostUserCreateMaster)?; + + let kill_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::CreateKillEventFd)?; + + // Filling device and vring features VMM supports. + let mut avail_features = 1 << VIRTIO_BLK_F_SEG_MAX + | 1 << VIRTIO_BLK_F_RO + | 1 << VIRTIO_BLK_F_BLK_SIZE + | 1 << VIRTIO_BLK_F_FLUSH + | 1 << VIRTIO_BLK_F_TOPOLOGY + | 1 << VIRTIO_F_VERSION_1 + | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(); + + // Set vhost-user owner. + vhost_user_blk + .set_owner() + .map_err(Error::VhostUserSetOwner)?; + + // Get features from backend, do negotiation to get a feature collection which + // both VMM and backend support. + let backend_features = vhost_user_blk + .get_features() + .map_err(Error::VhostUserGetFeatures)?; + avail_features &= backend_features; + // Set features back is required by the vhost crate mechanism, since the + // later vhost call will check if features is filled in master before execution. + vhost_user_blk + .set_features(avail_features) + .map_err(Error::VhostUserSetFeatures)?; + + // Identify if protocol features are supported by the slave. + let mut acked_features = 0; + if avail_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() != 0 { + acked_features |= VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(); + + let mut protocol_features = vhost_user_blk + .get_protocol_features() + .map_err(Error::VhostUserGetProtocolFeatures)?; + protocol_features |= VhostUserProtocolFeatures::MQ; + protocol_features &= !VhostUserProtocolFeatures::INFLIGHT_SHMFD; + vhost_user_blk + .set_protocol_features(protocol_features) + .map_err(Error::VhostUserSetProtocolFeatures)?; + } + + let config_len = mem::size_of::(); + let mut config_space: Vec = vec![0u8; config_len as usize]; + + let queue_num_offset = offset_of!(virtio_blk_config, num_queues); + // only setnum_queues value. + config_space[queue_num_offset] = vu_cfg.num_queues as u8; + + Ok(Blk { + vhost_user_blk, + kill_evt, + avail_features, + acked_features, + config_space, + queue_sizes: vec![vu_cfg.queue_size; vu_cfg.num_queues], + }) + } +} + +impl Drop for Blk { + fn drop(&mut self) { + if let Err(_e) = self.kill_evt.write(1) { + error!("failed to kill vhost-user-blk with error {}", _e); + } + } +} + +impl VirtioDevice for Blk { + fn device_type(&self) -> u32 { + VirtioDeviceType::TYPE_BLOCK as u32 + } + + fn queue_max_sizes(&self) -> &[u16] { + &self.queue_sizes + } + + fn features(&self, page: u32) -> u32 { + match page { + 0 => self.avail_features as u32, + 1 => (self.avail_features >> 32) as u32, + _ => { + warn!("Received request for unknown features page: {}", page); + 0u32 + } + } + } + + fn ack_features(&mut self, page: u32, value: u32) { + let mut v = match page { + 0 => u64::from(value), + 1 => u64::from(value) << 32, + _ => { + warn!("Cannot acknowledge unknown features page: {}", page); + 0u64 + } + }; + + // Check if the guest is ACK'ing a feature that we didn't claim to have. + let unrequested_features = v & !self.avail_features; + if unrequested_features != 0 { + warn!("Received acknowledge request for unknown feature: {:x}", v); + // Don't count these features as acked. + v &= !unrequested_features; + } + self.acked_features |= v; + } + + fn read_config(&self, offset: u64, mut data: &mut [u8]) { + let config_len = self.config_space.len() as u64; + if offset >= config_len { + error!("Failed to read config space"); + return; + } + if let Some(end) = offset.checked_add(data.len() as u64) { + // This write can't fail, offset and end are checked against config_len. + data.write_all(&self.config_space[offset as usize..cmp::min(end, config_len) as usize]) + .unwrap(); + } + } + + fn write_config(&mut self, offset: u64, data: &[u8]) { + let data_len = data.len() as u64; + let config_len = self.config_space.len() as u64; + if offset + data_len > config_len { + error!("Failed to write config space"); + return; + } + let (_, right) = self.config_space.split_at_mut(offset as usize); + right.copy_from_slice(&data[..]); + } + + fn activate( + &mut self, + mem: Arc>, + interrupt_cb: Arc, + queues: Vec, + queue_evts: Vec, + ) -> ActivateResult { + let handler_kill_evt = self + .kill_evt + .try_clone() + .map_err(|_| ActivateError::CloneKillEventFd)?; + + let vu_interrupt_list = setup_vhost_user( + &mut self.vhost_user_blk, + &mem.read().unwrap(), + queues, + queue_evts, + self.acked_features, + ) + .map_err(ActivateError::VhostUserBlkSetup)?; + + let mut handler = VhostUserEpollHandler::::new(VhostUserEpollConfig { + interrupt_cb, + kill_evt: handler_kill_evt, + vu_interrupt_list, + slave_req_handler: None, + }); + + let handler_result = thread::Builder::new() + .name("vhost_user_blk".to_string()) + .spawn(move || { + if let Err(e) = handler.run() { + error!("net worker thread exited with error {:?}!", e); + } + }); + if let Err(e) = handler_result { + error!("vhost-user blk thread create failed with error {:?}", e); + } + Ok(()) + } +} diff --git a/vm-virtio/src/vhost_user/mod.rs b/vm-virtio/src/vhost_user/mod.rs index b458a2b91..26fce84e3 100644 --- a/vm-virtio/src/vhost_user/mod.rs +++ b/vm-virtio/src/vhost_user/mod.rs @@ -12,11 +12,13 @@ use std::io; use vhost_rs::Error as VhostError; use vm_memory::Error as MmapError; +pub mod blk; pub mod fs; mod handler; pub mod net; pub mod vu_common_ctrl; +pub use self::blk::Blk; pub use self::fs::*; pub use self::net::Net; pub use self::vu_common_ctrl::VhostUserConfig; @@ -91,5 +93,7 @@ pub enum Error { VhostUserSetSlaveRequestFd(vhost_rs::Error), /// Invalid used address. UsedAddress, + /// Invalid features provided from vhost-user backend + InvalidFeatures, } type Result = std::result::Result;