interrupt: Reorganize all interrupt management with InterruptManager

Based on all the previous changes, we can at this point replace the
entire interrupt management with the implementation of InterruptManager
and InterruptSourceGroup traits.

By using KvmInterruptManager from the DeviceManager, we can provide both
VirtioPciDevice and VfioPciDevice a way to pick the kind of
InterruptSourceGroup they want to create. Because they choose the type
of interrupt to be MSI/MSI-X, they will be given a MsiInterruptGroup.

Both MsixConfig and MsiConfig are responsible for the update of the GSI
routes, which is why, by passing the MsiInterruptGroup to them, they can
still perform the GSI route management without knowing implementation
details. That's where the InterruptSourceGroup is powerful, as it
provides a generic way to manage interrupt, no matter the type of
interrupt and no matter which hypervisor might be in use.

Once the full replacement has been achieved, both SystemAllocator and
KVM specific dependencies can be removed.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2020-01-14 23:47:41 +01:00 committed by Samuel Ortiz
parent 92082ad439
commit 4bb12a2d8d
9 changed files with 207 additions and 208 deletions

1
Cargo.lock generated
View File

@ -478,6 +478,7 @@ dependencies = [
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
"vm-allocator 0.1.0",
"vm-device 0.1.0",
"vm-memory 0.1.0 (git+https://github.com/rust-vmm/vm-memory)",
"vmm-sys-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
]

View File

@ -12,5 +12,6 @@ kvm-bindings = "0.2.0"
kvm-ioctls = "0.4.0"
libc = "0.2.60"
log = "0.4.8"
vm-device = { path = "../vm-device" }
vm-memory = { git = "https://github.com/rust-vmm/vm-memory" }
vmm-sys-util = ">=0.3.1"

View File

@ -25,7 +25,7 @@ pub use self::device::{
BarReprogrammingParams, DeviceRelocation, Error as PciDeviceError, InterruptDelivery,
InterruptParameters, PciDevice,
};
pub use self::msi::{MsiCap, MsiConfig};
pub use self::msi::{msi_num_enabled_vectors, MsiCap, MsiConfig};
pub use self::msix::{MsixCap, MsixConfig, MsixTableEntry, MSIX_TABLE_ENTRY_SIZE};
use kvm_bindings::{kvm_irq_routing, kvm_irq_routing_entry};
use kvm_ioctls::*;

View File

@ -6,13 +6,11 @@
extern crate byteorder;
extern crate vm_memory;
use crate::{set_kvm_routes, InterruptRoute};
use byteorder::{ByteOrder, LittleEndian};
use kvm_bindings::{kvm_irq_routing_entry, KVM_IRQ_ROUTING_MSI};
use kvm_ioctls::VmFd;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use vm_allocator::SystemAllocator;
use std::sync::Arc;
use vm_device::interrupt::{
InterruptIndex, InterruptSourceConfig, InterruptSourceGroup, MsiIrqSourceConfig,
};
// MSI control masks
const MSI_CTL_ENABLE: u16 = 0x1;
@ -27,6 +25,16 @@ const MSI_MSG_ADDR_LO_OFFSET: u64 = 0x4;
// MSI message masks
const MSI_MSG_ADDR_LO_MASK: u32 = 0xffff_fffc;
pub fn msi_num_enabled_vectors(msg_ctl: u16) -> usize {
let field = (msg_ctl >> 4) & 0x7;
if field > 5 {
return 0;
}
1 << field
}
#[derive(Clone, Copy, Default)]
pub struct MsiCap {
// Message Control Register
@ -69,13 +77,7 @@ impl MsiCap {
}
fn num_enabled_vectors(&self) -> usize {
let field = (self.msg_ctl >> 4) & 0x7;
if field > 5 {
return 0;
}
1 << field
msi_num_enabled_vectors(self.msg_ctl)
}
fn vector_masked(&self, vector: usize) -> bool {
@ -160,33 +162,19 @@ impl MsiCap {
pub struct MsiConfig {
cap: MsiCap,
pub irq_routes: Vec<InterruptRoute>,
vm_fd: Arc<VmFd>,
gsi_msi_routes: Arc<Mutex<HashMap<u32, kvm_irq_routing_entry>>>,
interrupt_source_group: Arc<Box<dyn InterruptSourceGroup>>,
}
impl MsiConfig {
pub fn new(
msg_ctl: u16,
allocator: &mut SystemAllocator,
vm_fd: Arc<VmFd>,
gsi_msi_routes: Arc<Mutex<HashMap<u32, kvm_irq_routing_entry>>>,
) -> Self {
pub fn new(msg_ctl: u16, interrupt_source_group: Arc<Box<dyn InterruptSourceGroup>>) -> Self {
let cap = MsiCap {
msg_ctl,
..Default::default()
};
let mut irq_routes: Vec<InterruptRoute> = Vec::new();
for _ in 0..cap.num_enabled_vectors() {
irq_routes.push(InterruptRoute::new(allocator).unwrap());
}
MsiConfig {
cap,
irq_routes,
vm_fd,
gsi_msi_routes,
interrupt_source_group,
}
}
@ -198,52 +186,46 @@ impl MsiConfig {
self.cap.size()
}
pub fn num_enabled_vectors(&self) -> usize {
self.cap.num_enabled_vectors()
}
pub fn update(&mut self, offset: u64, data: &[u8]) {
let old_enabled = self.cap.enabled();
self.cap.update(offset, data);
let mut gsi_msi_routes = self.gsi_msi_routes.lock().unwrap();
if self.cap.enabled() {
for (idx, route) in self.irq_routes.iter().enumerate() {
if !old_enabled {
if let Err(e) = self.irq_routes[idx].enable(&self.vm_fd) {
error!("Failed enabling irq_fd: {:?}", e);
}
}
// Ignore MSI vector if masked.
if self.cap.vector_masked(idx) {
continue;
}
let mut entry = kvm_irq_routing_entry {
gsi: route.gsi,
type_: KVM_IRQ_ROUTING_MSI,
..Default::default()
for idx in 0..self.num_enabled_vectors() {
let config = MsiIrqSourceConfig {
high_addr: self.cap.msg_addr_hi,
low_addr: self.cap.msg_addr_lo,
data: self.cap.msg_data as u32,
};
entry.u.msi.address_lo = self.cap.msg_addr_lo;
entry.u.msi.address_hi = self.cap.msg_addr_hi;
entry.u.msi.data = u32::from(self.cap.msg_data) | (idx as u32);
gsi_msi_routes.insert(route.gsi, entry);
}
} else {
for route in self.irq_routes.iter() {
if old_enabled {
if let Err(e) = route.disable(&self.vm_fd) {
error!("Failed disabling irq_fd: {:?}", e);
}
if let Err(e) = self
.interrupt_source_group
.update(idx as InterruptIndex, InterruptSourceConfig::MsiIrq(config))
{
error!("Failed updating vector: {:?}", e);
}
gsi_msi_routes.remove(&route.gsi);
if self.cap.vector_masked(idx) {
if let Err(e) = self.interrupt_source_group.mask(idx as InterruptIndex) {
error!("Failed masking vector: {:?}", e);
}
}
}
}
if let Err(e) = set_kvm_routes(&self.vm_fd, &gsi_msi_routes) {
error!("Failed updating KVM routes: {:?}", e);
if !old_enabled {
if let Err(e) = self.interrupt_source_group.enable() {
error!("Failed enabling irq_fd: {:?}", e);
}
}
} else if old_enabled {
if let Err(e) = self.interrupt_source_group.disable() {
error!("Failed disabling irq_fd: {:?}", e);
}
}
}
}

View File

@ -8,13 +8,11 @@ extern crate vm_memory;
use std::sync::Arc;
use crate::{set_kvm_routes, InterruptRoute, PciCapability, PciCapabilityID};
use crate::{PciCapability, PciCapabilityID};
use byteorder::{ByteOrder, LittleEndian};
use kvm_bindings::{kvm_irq_routing_entry, KVM_IRQ_ROUTING_MSI};
use kvm_ioctls::VmFd;
use std::collections::HashMap;
use std::sync::Mutex;
use vm_allocator::SystemAllocator;
use vm_device::interrupt::{
InterruptIndex, InterruptSourceConfig, InterruptSourceGroup, MsiIrqSourceConfig,
};
use vm_memory::ByteValued;
const MAX_MSIX_VECTORS_PER_DEVICE: u16 = 2048;
@ -55,9 +53,7 @@ impl Default for MsixTableEntry {
pub struct MsixConfig {
pub table_entries: Vec<MsixTableEntry>,
pub pba_entries: Vec<u64>,
pub irq_routes: Vec<InterruptRoute>,
vm_fd: Arc<VmFd>,
gsi_msi_routes: Arc<Mutex<HashMap<u32, kvm_irq_routing_entry>>>,
interrupt_source_group: Arc<Box<dyn InterruptSourceGroup>>,
masked: bool,
enabled: bool,
}
@ -65,9 +61,7 @@ pub struct MsixConfig {
impl MsixConfig {
pub fn new(
msix_vectors: u16,
allocator: &mut SystemAllocator,
vm_fd: Arc<VmFd>,
gsi_msi_routes: Arc<Mutex<HashMap<u32, kvm_irq_routing_entry>>>,
interrupt_source_group: Arc<Box<dyn InterruptSourceGroup>>,
) -> Self {
assert!(msix_vectors <= MAX_MSIX_VECTORS_PER_DEVICE);
@ -77,17 +71,10 @@ impl MsixConfig {
let num_pba_entries: usize = ((msix_vectors as usize) / BITS_PER_PBA_ENTRY) + 1;
pba_entries.resize_with(num_pba_entries, Default::default);
let mut irq_routes: Vec<InterruptRoute> = Vec::new();
for _ in 0..msix_vectors {
irq_routes.push(InterruptRoute::new(allocator).unwrap());
}
MsixConfig {
table_entries,
pba_entries,
irq_routes,
vm_fd,
gsi_msi_routes,
interrupt_source_group,
masked: false,
enabled: false,
}
@ -110,47 +97,37 @@ impl MsixConfig {
// Update KVM routes
if old_masked != self.masked || old_enabled != self.enabled {
let mut gsi_msi_routes = self.gsi_msi_routes.lock().unwrap();
if self.enabled && !self.masked {
for (idx, table_entry) in self.table_entries.iter().enumerate() {
if !old_enabled || old_masked {
if let Err(e) = self.irq_routes[idx].enable(&self.vm_fd) {
error!("Failed enabling irq_fd: {:?}", e);
}
}
// Ignore MSI-X vector if masked.
if table_entry.masked() {
continue;
}
let gsi = self.irq_routes[idx].gsi;
let mut entry = kvm_irq_routing_entry {
gsi,
type_: KVM_IRQ_ROUTING_MSI,
..Default::default()
let config = MsiIrqSourceConfig {
high_addr: table_entry.msg_addr_hi,
low_addr: table_entry.msg_addr_lo,
data: table_entry.msg_data,
};
entry.u.msi.address_lo = table_entry.msg_addr_lo;
entry.u.msi.address_hi = table_entry.msg_addr_hi;
entry.u.msi.data = table_entry.msg_data;
gsi_msi_routes.insert(gsi, entry);
}
} else {
for route in self.irq_routes.iter() {
if old_enabled || !old_masked {
if let Err(e) = route.disable(&self.vm_fd) {
error!("Failed disabling irq_fd: {:?}", e);
}
if let Err(e) = self
.interrupt_source_group
.update(idx as InterruptIndex, InterruptSourceConfig::MsiIrq(config))
{
error!("Failed updating vector: {:?}", e);
}
gsi_msi_routes.remove(&route.gsi);
if table_entry.masked() {
if let Err(e) = self.interrupt_source_group.mask(idx as InterruptIndex) {
error!("Failed masking vector: {:?}", e);
}
}
}
if !old_enabled || old_masked {
if let Err(e) = self.interrupt_source_group.enable() {
error!("Failed enabling irq_fd: {:?}", e);
}
}
} else if old_enabled || !old_masked {
if let Err(e) = self.interrupt_source_group.disable() {
error!("Failed disabling irq_fd: {:?}", e);
}
}
if let Err(e) = set_kvm_routes(&self.vm_fd, &gsi_msi_routes) {
error!("Failed updating KVM routes: {:?}", e);
}
}
@ -261,26 +238,27 @@ impl MsixConfig {
// Update interrupt routes
if self.enabled && !self.masked {
let mut gsi_msi_routes = self.gsi_msi_routes.lock().unwrap();
let table_entry = &self.table_entries[index];
let gsi = self.irq_routes[index].gsi;
if !table_entry.masked() {
let mut entry = kvm_irq_routing_entry {
gsi,
type_: KVM_IRQ_ROUTING_MSI,
..Default::default()
};
entry.u.msi.address_lo = table_entry.msg_addr_lo;
entry.u.msi.address_hi = table_entry.msg_addr_hi;
entry.u.msi.data = table_entry.msg_data;
let config = MsiIrqSourceConfig {
high_addr: table_entry.msg_addr_hi,
low_addr: table_entry.msg_addr_lo,
data: table_entry.msg_data,
};
gsi_msi_routes.insert(gsi, entry);
} else {
gsi_msi_routes.remove(&gsi);
if let Err(e) = self.interrupt_source_group.update(
index as InterruptIndex,
InterruptSourceConfig::MsiIrq(config),
) {
error!("Failed updating vector: {:?}", e);
}
if let Err(e) = set_kvm_routes(&self.vm_fd, &gsi_msi_routes) {
error!("Failed updating KVM routes: {:?}", e);
if table_entry.masked() {
if let Err(e) = self.interrupt_source_group.mask(index as InterruptIndex) {
error!("Failed masking vector: {:?}", e);
}
} else if let Err(e) = self.interrupt_source_group.unmask(index as InterruptIndex) {
error!("Failed unmasking vector: {:?}", e);
}
}
@ -371,7 +349,10 @@ impl MsixConfig {
fn inject_msix_and_clear_pba(&mut self, vector: usize) {
// Inject the MSI message
match self.irq_routes[vector].irq_fd.write(1) {
match self
.interrupt_source_group
.trigger(vector as InterruptIndex)
{
Ok(_) => debug!("MSI-X injected on vector control flip"),
Err(e) => error!("failed to inject MSI-X: {}", e),
}

View File

@ -10,21 +10,21 @@ extern crate vm_allocator;
use crate::vfio_device::VfioDevice;
use byteorder::{ByteOrder, LittleEndian};
use devices::BusDevice;
use kvm_bindings::{kvm_irq_routing_entry, kvm_userspace_memory_region};
use kvm_bindings::kvm_userspace_memory_region;
use kvm_ioctls::*;
use pci::{
BarReprogrammingParams, MsiConfig, MsixCap, MsixConfig, PciBarConfiguration, PciBarRegionType,
PciCapabilityID, PciClassCode, PciConfiguration, PciDevice, PciDeviceError, PciHeaderType,
PciSubclass, MSIX_TABLE_ENTRY_SIZE,
msi_num_enabled_vectors, BarReprogrammingParams, MsiConfig, MsixCap, MsixConfig,
PciBarConfiguration, PciBarRegionType, PciCapabilityID, PciClassCode, PciConfiguration,
PciDevice, PciDeviceError, PciHeaderType, PciSubclass, MSIX_TABLE_ENTRY_SIZE,
};
use std::any::Any;
use std::collections::HashMap;
use std::os::unix::io::AsRawFd;
use std::ptr::null_mut;
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use std::{fmt, io, result};
use vfio_bindings::bindings::vfio::*;
use vm_allocator::SystemAllocator;
use vm_device::interrupt::{InterruptIndex, InterruptManager, InterruptSourceGroup, PCI_MSI_IRQ};
use vm_memory::{Address, GuestAddress, GuestUsize};
use vmm_sys_util::eventfd::EventFd;
@ -32,12 +32,15 @@ use vmm_sys_util::eventfd::EventFd;
pub enum VfioPciError {
AllocateGsi,
EventFd(io::Error),
InterruptSourceGroupCreate(io::Error),
IrqFd(kvm_ioctls::Error),
NewVfioPciDevice,
MapRegionGuest(kvm_ioctls::Error),
SetGsiRouting(kvm_ioctls::Error),
MsiNotConfigured,
MsixNotConfigured,
UpdateMsiEventFd,
UpdateMsixEventFd,
}
pub type Result<T> = std::result::Result<T, VfioPciError>;
@ -46,6 +49,9 @@ impl fmt::Display for VfioPciError {
match self {
VfioPciError::AllocateGsi => write!(f, "failed to allocate GSI"),
VfioPciError::EventFd(e) => write!(f, "failed to create eventfd: {}", e),
VfioPciError::InterruptSourceGroupCreate(e) => {
write!(f, "failed to create interrupt source group: {}", e)
}
VfioPciError::IrqFd(e) => write!(f, "failed to register irqfd: {}", e),
VfioPciError::NewVfioPciDevice => write!(f, "failed to create VFIO PCI device"),
VfioPciError::MapRegionGuest(e) => {
@ -54,6 +60,8 @@ impl fmt::Display for VfioPciError {
VfioPciError::SetGsiRouting(e) => write!(f, "failed to set GSI routes for KVM: {}", e),
VfioPciError::MsiNotConfigured => write!(f, "MSI interrupt not yet configured"),
VfioPciError::MsixNotConfigured => write!(f, "MSI-X interrupt not yet configured"),
VfioPciError::UpdateMsiEventFd => write!(f, "failed to update MSI eventfd"),
VfioPciError::UpdateMsixEventFd => write!(f, "failed to update MSI-X eventfd"),
}
}
}
@ -79,6 +87,7 @@ enum InterruptUpdateAction {
struct VfioMsi {
cfg: MsiConfig,
cap_offset: u32,
interrupt_source_group: Arc<Box<dyn InterruptSourceGroup>>,
}
impl VfioMsi {
@ -105,6 +114,7 @@ struct VfioMsix {
bar: MsixConfig,
cap: MsixCap,
cap_offset: u32,
interrupt_source_group: Arc<Box<dyn InterruptSourceGroup>>,
}
impl VfioMsix {
@ -269,16 +279,14 @@ pub struct VfioPciDevice {
configuration: PciConfiguration,
mmio_regions: Vec<MmioRegion>,
interrupt: Interrupt,
gsi_msi_routes: Arc<Mutex<HashMap<u32, kvm_irq_routing_entry>>>,
}
impl VfioPciDevice {
/// Constructs a new Vfio Pci device for the given Vfio device
pub fn new(
vm_fd: &Arc<VmFd>,
allocator: &mut SystemAllocator,
device: VfioDevice,
gsi_msi_routes: Arc<Mutex<HashMap<u32, kvm_irq_routing_entry>>>,
interrupt_manager: &Arc<dyn InterruptManager>,
) -> Result<Self> {
let device = Arc::new(device);
device.reset();
@ -307,15 +315,14 @@ impl VfioPciDevice {
msi: None,
msix: None,
},
gsi_msi_routes,
};
vfio_pci_device.parse_capabilities(allocator);
vfio_pci_device.parse_capabilities(interrupt_manager);
Ok(vfio_pci_device)
}
fn parse_msix_capabilities(&mut self, cap: u8, allocator: &mut SystemAllocator) {
fn parse_msix_capabilities(&mut self, cap: u8, interrupt_manager: &Arc<dyn InterruptManager>) {
let msg_ctl = self
.vfio_pci_configuration
.read_config_word((cap + 2).into());
@ -333,37 +340,44 @@ impl VfioPciDevice {
table,
pba,
};
let msix_config = MsixConfig::new(
msix_cap.table_size(),
allocator,
self.vm_fd.clone(),
self.gsi_msi_routes.clone(),
);
let interrupt_source_group = interrupt_manager
.create_group(PCI_MSI_IRQ, 0, msix_cap.table_size() as InterruptIndex)
.unwrap();
let msix_config = MsixConfig::new(msix_cap.table_size(), interrupt_source_group.clone());
self.interrupt.msix = Some(VfioMsix {
bar: msix_config,
cap: msix_cap,
cap_offset: cap.into(),
interrupt_source_group,
});
}
fn parse_msi_capabilities(&mut self, cap: u8, allocator: &mut SystemAllocator) {
fn parse_msi_capabilities(&mut self, cap: u8, interrupt_manager: &Arc<dyn InterruptManager>) {
let msg_ctl = self
.vfio_pci_configuration
.read_config_word((cap + 2).into());
let interrupt_source_group = interrupt_manager
.create_group(
PCI_MSI_IRQ,
0,
msi_num_enabled_vectors(msg_ctl) as InterruptIndex,
)
.unwrap();
let msi_config = MsiConfig::new(msg_ctl, interrupt_source_group.clone());
self.interrupt.msi = Some(VfioMsi {
cfg: MsiConfig::new(
msg_ctl,
allocator,
self.vm_fd.clone(),
self.gsi_msi_routes.clone(),
),
cfg: msi_config,
cap_offset: cap.into(),
interrupt_source_group,
});
}
fn parse_capabilities(&mut self, allocator: &mut SystemAllocator) {
fn parse_capabilities(&mut self, interrupt_manager: &Arc<dyn InterruptManager>) {
let mut cap_next = self
.vfio_pci_configuration
.read_config_byte(PCI_CONFIG_CAPABILITY_OFFSET);
@ -375,10 +389,10 @@ impl VfioPciDevice {
match PciCapabilityID::from(cap_id) {
PciCapabilityID::MessageSignalledInterrupts => {
self.parse_msi_capabilities(cap_next, allocator);
self.parse_msi_capabilities(cap_next, interrupt_manager);
}
PciCapabilityID::MSIX => {
self.parse_msix_capabilities(cap_next, allocator);
self.parse_msix_capabilities(cap_next, interrupt_manager);
}
_ => {}
};
@ -394,8 +408,14 @@ impl VfioPciDevice {
Some(InterruptUpdateAction::EnableMsi) => {
if let Some(msi) = &self.interrupt.msi {
let mut irq_fds: Vec<&EventFd> = Vec::new();
for r in msi.cfg.irq_routes.iter() {
irq_fds.push(&r.irq_fd);
for i in 0..msi.cfg.num_enabled_vectors() {
if let Some(eventfd) =
msi.interrupt_source_group.notifier(i as InterruptIndex)
{
irq_fds.push(eventfd);
} else {
return Err(VfioPciError::UpdateMsiEventFd);
}
}
if let Err(e) = self.device.enable_msi(irq_fds) {
@ -419,8 +439,14 @@ impl VfioPciDevice {
Some(InterruptUpdateAction::EnableMsix) => {
if let Some(msix) = &self.interrupt.msix {
let mut irq_fds: Vec<&EventFd> = Vec::new();
for r in msix.bar.irq_routes.iter() {
irq_fds.push(&r.irq_fd);
for i in 0..msix.bar.table_entries.len() {
if let Some(eventfd) =
msix.interrupt_source_group.notifier(i as InterruptIndex)
{
irq_fds.push(eventfd);
} else {
return Err(VfioPciError::UpdateMsiEventFd);
}
}
if let Err(e) = self.device.enable_msix(irq_fds) {

View File

@ -23,7 +23,11 @@ pub trait VirtioInterrupt: Send + Sync {
int_type: &VirtioInterruptType,
queue: Option<&Queue>,
) -> std::result::Result<(), std::io::Error>;
fn notifier(&self, _int_type: &VirtioInterruptType, _queue: Option<&Queue>) -> Option<EventFd> {
fn notifier(
&self,
_int_type: &VirtioInterruptType,
_queue: Option<&Queue>,
) -> Option<&EventFd> {
None
}
}

View File

@ -22,8 +22,6 @@ use crate::{
};
use arc_swap::ArcSwap;
use devices::BusDevice;
use kvm_bindings::kvm_irq_routing_entry;
use kvm_ioctls::VmFd;
use libc::EFD_NONBLOCK;
use pci::{
BarReprogrammingParams, InterruptDelivery, MsixCap, MsixConfig, PciBarConfiguration,
@ -32,11 +30,11 @@ use pci::{
PciNetworkControllerSubclass, PciSubclass,
};
use std::any::Any;
use std::collections::HashMap;
use std::result;
use std::sync::atomic::{AtomicU16, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use vm_allocator::SystemAllocator;
use vm_device::interrupt::{InterruptIndex, InterruptManager, InterruptSourceGroup, PCI_MSI_IRQ};
use vm_device::{Migratable, MigratableError, Pausable, Snapshotable};
use vm_memory::{Address, ByteValued, GuestAddress, GuestMemoryMmap, GuestUsize, Le32};
use vmm_sys_util::{errno::Result, eventfd::EventFd};
@ -235,6 +233,7 @@ pub struct VirtioPciDevice {
// PCI interrupts.
interrupt_status: Arc<AtomicUsize>,
virtio_interrupt: Option<Arc<dyn VirtioInterrupt>>,
interrupt_source_group: Arc<Box<dyn InterruptSourceGroup>>,
// virtio queues
queues: Vec<Queue>,
@ -257,9 +256,7 @@ impl VirtioPciDevice {
device: Arc<Mutex<dyn VirtioDevice>>,
msix_num: u16,
iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
allocator: &mut SystemAllocator,
vm_fd: &Arc<VmFd>,
gsi_msi_routes: Arc<Mutex<HashMap<u32, kvm_irq_routing_entry>>>,
interrupt_manager: &Arc<dyn InterruptManager>,
) -> Result<Self> {
let device_clone = device.clone();
let locked_device = device_clone.lock().unwrap();
@ -279,12 +276,13 @@ impl VirtioPciDevice {
let pci_device_id = VIRTIO_PCI_DEVICE_ID_BASE + locked_device.device_type() as u16;
let interrupt_source_group =
interrupt_manager.create_group(PCI_MSI_IRQ, 0, msix_num as InterruptIndex)?;
let (msix_config, msix_config_clone) = if msix_num > 0 {
let msix_config = Arc::new(Mutex::new(MsixConfig::new(
msix_num,
allocator,
vm_fd.clone(),
gsi_msi_routes,
interrupt_source_group.clone(),
)));
let msix_config_clone = msix_config.clone();
(Some(msix_config), Some(msix_config_clone))
@ -347,6 +345,7 @@ impl VirtioPciDevice {
memory: Some(memory),
settings_bar: 0,
use_64bit_bar,
interrupt_source_group,
})
}
@ -471,13 +470,19 @@ impl VirtioTransport for VirtioPciDevice {
pub struct VirtioInterruptMsix {
msix_config: Arc<Mutex<MsixConfig>>,
config_vector: Arc<AtomicU16>,
interrupt_source_group: Arc<Box<dyn InterruptSourceGroup>>,
}
impl VirtioInterruptMsix {
pub fn new(msix_config: Arc<Mutex<MsixConfig>>, config_vector: Arc<AtomicU16>) -> Self {
pub fn new(
msix_config: Arc<Mutex<MsixConfig>>,
config_vector: Arc<AtomicU16>,
interrupt_source_group: Arc<Box<dyn InterruptSourceGroup>>,
) -> Self {
VirtioInterruptMsix {
msix_config,
config_vector,
interrupt_source_group,
}
}
}
@ -515,10 +520,11 @@ impl VirtioInterrupt for VirtioInterruptMsix {
return Ok(());
}
config.irq_routes[vector as usize].irq_fd.write(1)
self.interrupt_source_group
.trigger(vector as InterruptIndex)
}
fn notifier(&self, int_type: &VirtioInterruptType, queue: Option<&Queue>) -> Option<EventFd> {
fn notifier(&self, int_type: &VirtioInterruptType, queue: Option<&Queue>) -> Option<&EventFd> {
let vector = match int_type {
VirtioInterruptType::Config => self.config_vector.load(Ordering::SeqCst),
VirtioInterruptType::Queue => {
@ -530,12 +536,8 @@ impl VirtioInterrupt for VirtioInterruptMsix {
}
};
Some(
self.msix_config.lock().unwrap().irq_routes[vector as usize]
.irq_fd
.try_clone()
.unwrap(),
)
self.interrupt_source_group
.notifier(vector as InterruptIndex)
}
}
@ -553,6 +555,7 @@ impl PciDevice for VirtioPciDevice {
let virtio_interrupt_msix = Arc::new(VirtioInterruptMsix::new(
msix_config.clone(),
self.common_config.msix_config.clone(),
self.interrupt_source_group.clone(),
));
self.virtio_interrupt = Some(virtio_interrupt_msix);

View File

@ -12,6 +12,8 @@
extern crate vm_device;
use crate::config::{ConsoleOutputMode, VmConfig};
#[cfg(feature = "pci_support")]
use crate::interrupt::{KvmInterruptManager, KvmRoutingEntry};
use crate::memory_manager::{Error as MemoryManagerError, MemoryManager};
use crate::vm::VmInfo;
#[cfg(feature = "acpi")]
@ -20,7 +22,6 @@ use arc_swap::ArcSwap;
use arch::layout;
use arch::layout::{APIC_START, IOAPIC_SIZE, IOAPIC_START};
use devices::{ioapic, HotPlugNotificationFlags};
use kvm_bindings::kvm_irq_routing_entry;
use kvm_ioctls::*;
use libc::O_TMPFILE;
use libc::TIOCGWINSZ;
@ -43,6 +44,8 @@ use std::sync::{Arc, Mutex};
#[cfg(feature = "pci_support")]
use vfio::{VfioDevice, VfioDmaMapping, VfioPciDevice, VfioPciError};
use vm_allocator::SystemAllocator;
#[cfg(feature = "pci_support")]
use vm_device::interrupt::InterruptManager;
use vm_device::{Migratable, MigratableError, Pausable, Snapshotable};
use vm_memory::GuestAddress;
use vm_memory::{Address, GuestMemoryMmap, GuestUsize};
@ -547,9 +550,15 @@ impl DeviceManager {
// devices. This way, we can maintain the full list of used GSI,
// preventing one device from overriding interrupts setting from
// another one.
let gsi_msi_routes: Arc<Mutex<HashMap<u32, kvm_irq_routing_entry>>> =
let kvm_gsi_msi_routes: Arc<Mutex<HashMap<u32, KvmRoutingEntry>>> =
Arc::new(Mutex::new(HashMap::new()));
let interrupt_manager: Arc<dyn InterruptManager> = Arc::new(KvmInterruptManager::new(
address_manager.allocator.clone(),
vm_info.vm_fd.clone(),
kvm_gsi_msi_routes,
));
let (mut iommu_device, iommu_mapping) = if vm_info.vm_cfg.lock().unwrap().iommu {
let (device, mapping) =
vm_virtio::Iommu::new().map_err(DeviceManagerError::CreateVirtioIommu)?;
@ -575,7 +584,7 @@ impl DeviceManager {
&mut pci_bus,
mapping,
migratable_devices,
&gsi_msi_routes,
&interrupt_manager,
)?;
if let Some(dev_id) = virtio_iommu_attach_dev {
@ -589,7 +598,7 @@ impl DeviceManager {
&mut pci_bus,
memory_manager,
&mut iommu_device,
&gsi_msi_routes,
&interrupt_manager,
)?;
iommu_attached_devices.append(&mut vfio_iommu_device_ids);
@ -613,7 +622,7 @@ impl DeviceManager {
&mut pci_bus,
&None,
migratable_devices,
&gsi_msi_routes,
&interrupt_manager,
)?;
*virt_iommu = Some((iommu_id, iommu_attached_devices));
@ -1350,11 +1359,10 @@ impl DeviceManager {
pci: &mut PciBus,
memory_manager: &Arc<Mutex<MemoryManager>>,
iommu_device: &mut Option<vm_virtio::Iommu>,
gsi_msi_routes: &Arc<Mutex<HashMap<u32, kvm_irq_routing_entry>>>,
interrupt_manager: &Arc<dyn InterruptManager>,
) -> DeviceManagerResult<Vec<u32>> {
let mut mem_slot = memory_manager.lock().unwrap().allocate_kvm_memory_slot();
let mut iommu_attached_device_ids = Vec::new();
let mut allocator = address_manager.allocator.lock().unwrap();
if let Some(device_list_cfg) = &vm_info.vm_cfg.lock().unwrap().devices {
// Create the KVM VFIO device
@ -1389,16 +1397,12 @@ impl DeviceManager {
}
}
let mut vfio_pci_device = VfioPciDevice::new(
vm_info.vm_fd,
&mut allocator,
vfio_device,
gsi_msi_routes.clone(),
)
.map_err(DeviceManagerError::VfioPciCreate)?;
let mut vfio_pci_device =
VfioPciDevice::new(vm_info.vm_fd, vfio_device, &interrupt_manager)
.map_err(DeviceManagerError::VfioPciCreate)?;
let bars = vfio_pci_device
.allocate_bars(&mut allocator)
.allocate_bars(&mut address_manager.allocator.lock().unwrap())
.map_err(DeviceManagerError::AllocateBars)?;
mem_slot = vfio_pci_device
@ -1432,7 +1436,7 @@ impl DeviceManager {
pci: &mut PciBus,
iommu_mapping: &Option<Arc<IommuMapping>>,
migratable_devices: &mut Vec<Arc<Mutex<dyn Migratable>>>,
gsi_msi_routes: &Arc<Mutex<HashMap<u32, kvm_irq_routing_entry>>>,
interrupt_manager: &Arc<dyn InterruptManager>,
) -> DeviceManagerResult<Option<u32>> {
// Allows support for one MSI-X vector per queue. It also adds 1
// as we need to take into account the dedicated vector to notify
@ -1466,19 +1470,16 @@ impl DeviceManager {
None
};
let mut allocator = address_manager.allocator.lock().unwrap();
let mut virtio_pci_device = VirtioPciDevice::new(
memory.clone(),
virtio_device,
msix_num,
iommu_mapping_cb,
&mut allocator,
vm_fd,
gsi_msi_routes.clone(),
interrupt_manager,
)
.map_err(DeviceManagerError::VirtioDevice)?;
let mut allocator = address_manager.allocator.lock().unwrap();
let bars = virtio_pci_device
.allocate_bars(&mut allocator)
.map_err(DeviceManagerError::AllocateBars)?;