msi: Create MsiConfig to embed MsiCap

The same way we have MsixConfig in charge of managing whatever relates
to MSI-X vectors, we need a MsiConfig structure to manage MSI vectors.
The MsiCap structure is still needed as a low level API, but it is now
part of the MsiConfig which oversees anything related to MSI.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2020-01-10 09:33:01 +01:00 committed by Samuel Ortiz
parent 1e5e02801f
commit f3c3870159
3 changed files with 85 additions and 25 deletions

View File

@ -25,7 +25,7 @@ pub use self::device::{
BarReprogrammingParams, DeviceRelocation, Error as PciDeviceError, InterruptDelivery, BarReprogrammingParams, DeviceRelocation, Error as PciDeviceError, InterruptDelivery,
InterruptParameters, PciDevice, InterruptParameters, PciDevice,
}; };
pub use self::msi::MsiCap; pub use self::msi::{MsiCap, MsiConfig};
pub use self::msix::{MsixCap, MsixConfig, MsixTableEntry, MSIX_TABLE_ENTRY_SIZE}; pub use self::msix::{MsixCap, MsixConfig, MsixTableEntry, MSIX_TABLE_ENTRY_SIZE};
use kvm_bindings::{kvm_irq_routing, kvm_irq_routing_entry}; use kvm_bindings::{kvm_irq_routing, kvm_irq_routing_entry};
use kvm_ioctls::*; use kvm_ioctls::*;

View File

@ -6,7 +6,13 @@
extern crate byteorder; extern crate byteorder;
extern crate vm_memory; extern crate vm_memory;
use crate::InterruptRoute;
use byteorder::{ByteOrder, LittleEndian}; use byteorder::{ByteOrder, LittleEndian};
use kvm_bindings::kvm_irq_routing_entry;
use kvm_ioctls::VmFd;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use vm_allocator::SystemAllocator;
// MSI control masks // MSI control masks
const MSI_CTL_ENABLE: u16 = 0x1; const MSI_CTL_ENABLE: u16 = 0x1;
@ -58,11 +64,11 @@ impl MsiCap {
self.msg_ctl & MSI_CTL_PER_VECTOR == MSI_CTL_PER_VECTOR self.msg_ctl & MSI_CTL_PER_VECTOR == MSI_CTL_PER_VECTOR
} }
pub fn enabled(&self) -> bool { fn enabled(&self) -> bool {
self.msg_ctl & MSI_CTL_ENABLE == MSI_CTL_ENABLE self.msg_ctl & MSI_CTL_ENABLE == MSI_CTL_ENABLE
} }
pub fn num_enabled_vectors(&self) -> usize { fn num_enabled_vectors(&self) -> usize {
let field = (self.msg_ctl >> 4) & 0x7; let field = (self.msg_ctl >> 4) & 0x7;
if field > 5 { if field > 5 {
@ -72,7 +78,7 @@ impl MsiCap {
1 << field 1 << field
} }
pub fn vector_masked(&self, vector: usize) -> bool { fn vector_masked(&self, vector: usize) -> bool {
if !self.per_vector_mask() { if !self.per_vector_mask() {
return false; return false;
} }
@ -80,7 +86,7 @@ impl MsiCap {
(self.mask_bits >> vector) & 0x1 == 0x1 (self.mask_bits >> vector) & 0x1 == 0x1
} }
pub fn size(&self) -> u64 { fn size(&self) -> u64 {
let mut size: u64 = 0xa; let mut size: u64 = 0xa;
if self.addr_64_bits() { if self.addr_64_bits() {
@ -93,7 +99,7 @@ impl MsiCap {
size size
} }
pub fn update(&mut self, offset: u64, data: &[u8]) { fn update(&mut self, offset: u64, data: &[u8]) {
// Calculate message data offset depending on the address being 32 or // Calculate message data offset depending on the address being 32 or
// 64 bits. // 64 bits.
// Calculate upper address offset if the address is 64 bits. // Calculate upper address offset if the address is 64 bits.
@ -151,3 +157,56 @@ impl MsiCap {
} }
} }
} }
pub struct MsiConfig {
pub cap: MsiCap,
pub irq_routes: Vec<InterruptRoute>,
_vm_fd: Arc<VmFd>,
_gsi_msi_routes: Arc<Mutex<HashMap<u32, kvm_irq_routing_entry>>>,
}
impl MsiConfig {
pub fn new(
msg_ctl: u16,
allocator: &mut SystemAllocator,
vm_fd: Arc<VmFd>,
gsi_msi_routes: Arc<Mutex<HashMap<u32, kvm_irq_routing_entry>>>,
) -> Self {
let cap = MsiCap {
msg_ctl,
..Default::default()
};
let mut irq_routes: Vec<InterruptRoute> = Vec::new();
for _ in 0..cap.num_enabled_vectors() {
irq_routes.push(InterruptRoute::new(allocator).unwrap());
}
MsiConfig {
cap,
irq_routes,
_vm_fd: vm_fd,
_gsi_msi_routes: gsi_msi_routes,
}
}
pub fn enabled(&self) -> bool {
self.cap.enabled()
}
pub fn update(&mut self, offset: u64, data: &[u8]) {
self.cap.update(offset, data)
}
pub fn size(&self) -> u64 {
self.cap.size()
}
pub fn num_enabled_vectors(&self) -> usize {
self.cap.num_enabled_vectors()
}
pub fn vector_masked(&self, vector: usize) -> bool {
self.cap.vector_masked(vector)
}
}

View File

@ -16,7 +16,7 @@ use kvm_bindings::{
}; };
use kvm_ioctls::*; use kvm_ioctls::*;
use pci::{ use pci::{
BarReprogrammingParams, MsiCap, MsixCap, MsixConfig, PciBarConfiguration, PciBarRegionType, BarReprogrammingParams, MsiConfig, MsixCap, MsixConfig, PciBarConfiguration, PciBarRegionType,
PciCapabilityID, PciClassCode, PciConfiguration, PciDevice, PciDeviceError, PciHeaderType, PciCapabilityID, PciClassCode, PciConfiguration, PciDevice, PciDeviceError, PciHeaderType,
PciSubclass, MSIX_TABLE_ENTRY_SIZE, PciSubclass, MSIX_TABLE_ENTRY_SIZE,
}; };
@ -79,19 +79,18 @@ enum InterruptUpdateAction {
DisableMsix, DisableMsix,
} }
#[derive(Copy, Clone)]
struct VfioMsi { struct VfioMsi {
cap: MsiCap, cfg: MsiConfig,
cap_offset: u32, cap_offset: u32,
} }
impl VfioMsi { impl VfioMsi {
fn update(&mut self, offset: u64, data: &[u8]) -> Option<InterruptUpdateAction> { fn update(&mut self, offset: u64, data: &[u8]) -> Option<InterruptUpdateAction> {
let old_enabled = self.cap.enabled(); let old_enabled = self.cfg.enabled();
self.cap.update(offset, data); self.cfg.update(offset, data);
let new_enabled = self.cap.enabled(); let new_enabled = self.cfg.enabled();
if !old_enabled && new_enabled { if !old_enabled && new_enabled {
return Some(InterruptUpdateAction::EnableMsi); return Some(InterruptUpdateAction::EnableMsi);
@ -169,7 +168,7 @@ impl Interrupt {
fn accessed(&self, offset: u64) -> Option<(PciCapabilityID, u64)> { fn accessed(&self, offset: u64) -> Option<(PciCapabilityID, u64)> {
if let Some(msi) = &self.msi { if let Some(msi) = &self.msi {
if offset >= u64::from(msi.cap_offset) if offset >= u64::from(msi.cap_offset)
&& offset < u64::from(msi.cap_offset) + msi.cap.size() && offset < u64::from(msi.cap_offset) + msi.cfg.size()
{ {
return Some(( return Some((
PciCapabilityID::MessageSignalledInterrupts, PciCapabilityID::MessageSignalledInterrupts,
@ -426,16 +425,18 @@ impl VfioPciDevice {
}); });
} }
fn parse_msi_capabilities(&mut self, cap: u8) { fn parse_msi_capabilities(&mut self, cap: u8, allocator: &mut SystemAllocator) {
let msg_ctl = self let msg_ctl = self
.vfio_pci_configuration .vfio_pci_configuration
.read_config_word((cap + 2).into()); .read_config_word((cap + 2).into());
self.interrupt.msi = Some(VfioMsi { self.interrupt.msi = Some(VfioMsi {
cap: MsiCap { cfg: MsiConfig::new(
msg_ctl, msg_ctl,
..Default::default() allocator,
}, self.vm_fd.clone(),
self.gsi_msi_routes.clone(),
),
cap_offset: cap.into(), cap_offset: cap.into(),
}); });
} }
@ -452,7 +453,7 @@ impl VfioPciDevice {
match PciCapabilityID::from(cap_id) { match PciCapabilityID::from(cap_id) {
PciCapabilityID::MessageSignalledInterrupts => { PciCapabilityID::MessageSignalledInterrupts => {
self.parse_msi_capabilities(cap_next); self.parse_msi_capabilities(cap_next, allocator);
} }
PciCapabilityID::MSIX => { PciCapabilityID::MSIX => {
self.parse_msix_capabilities(cap_next, allocator); self.parse_msix_capabilities(cap_next, allocator);
@ -467,7 +468,7 @@ impl VfioPciDevice {
} }
fn update_msi_interrupt_routes(&self, msi: &VfioMsi) -> Result<()> { fn update_msi_interrupt_routes(&self, msi: &VfioMsi) -> Result<()> {
if msi.cap.enabled() { if msi.cfg.enabled() {
let mut gsi_msi_routes = self.gsi_msi_routes.lock().unwrap(); let mut gsi_msi_routes = self.gsi_msi_routes.lock().unwrap();
for (idx, route) in self.interrupt_routes.iter().enumerate() { for (idx, route) in self.interrupt_routes.iter().enumerate() {
@ -475,12 +476,12 @@ impl VfioPciDevice {
// guest OS does not match the expected amount. This is related // guest OS does not match the expected amount. This is related
// to "Multiple Message Capable" and "Multiple Message Enable" // to "Multiple Message Capable" and "Multiple Message Enable"
// fields from the "Message Control" register. // fields from the "Message Control" register.
if idx >= msi.cap.num_enabled_vectors() { if idx >= msi.cfg.num_enabled_vectors() {
continue; continue;
} }
// Ignore MSI vector if masked. // Ignore MSI vector if masked.
if msi.cap.vector_masked(idx) { if msi.cfg.vector_masked(idx) {
continue; continue;
} }
@ -490,9 +491,9 @@ impl VfioPciDevice {
..Default::default() ..Default::default()
}; };
entry.u.msi.address_lo = msi.cap.msg_addr_lo; entry.u.msi.address_lo = msi.cfg.cap.msg_addr_lo;
entry.u.msi.address_hi = msi.cap.msg_addr_hi; entry.u.msi.address_hi = msi.cfg.cap.msg_addr_hi;
entry.u.msi.data = u32::from(msi.cap.msg_data) | (idx as u32); entry.u.msi.data = u32::from(msi.cfg.cap.msg_data) | (idx as u32);
gsi_msi_routes.insert(route.gsi, entry); gsi_msi_routes.insert(route.gsi, entry);
} }
@ -693,7 +694,7 @@ impl Drop for VfioPciDevice {
} }
if let Some(msi) = &self.interrupt.msi { if let Some(msi) = &self.interrupt.msi {
if msi.cap.enabled() && self.device.disable_msi().is_err() { if msi.cfg.enabled() && self.device.disable_msi().is_err() {
error!("Could not disable MSI"); error!("Could not disable MSI");
} }
} }