mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2025-01-20 11:35:21 +00:00
virtio-devices: iommu: allow limiting maximum address width in bits
Currently, Cloud Hypervisor does not set a VIRTIO_IOMMU_F_INPUT_RANGE feature bit for the VirtIO IOMMU device, which, according to spec[1], means that the guest may use the whole 64-bit address space is for IOMMU purposes: >If the feature is not offered, virtual mappings span over the whole >64-bit address space (start = 0, end = 0xffffffff ffffffff) As far as I am aware, there are currently no host platforms on the market capable of addressing the whole 64-bit address space. For example, I am currently working with a host platform that reports 39-bit address space for IOMMU purposes: >DMAR: Host address width 39 When running a VFIO pass-through guest on such a platform, NVIDIA driver in guest gets DMA mapping failures when working with large data, and this results in Cloud Hypervisor exiting with the following error: >cloud-hypervisor: 1501.220535s: <__iommu> >ERROR:virtio-devices/src/thread_helper.rs:53 -- Error running worker: >HandleEvent(Failed to process request queue : ExternalMapping(Custom >{ kind: Other, error: "failed to map memory for VFIO container, iova >0x7fff00000000, gpa 0x24ce25000, size 0x1000: IommuDmaMap(Error(22))" >})) Passing "--platform iommu_address_width=39" to Cloud Hypervisor built with this change fixes this. [1]: https://docs.oasis-open.org/virtio/virtio/v1.3/csd01/ virtio-v1.3-csd01.html#x1-5420006 Signed-off-by: Nikolay Edigaryev <edigaryev@gmail.com>
This commit is contained in:
parent
74ca38f7a9
commit
27fda753e1
@ -66,6 +66,7 @@ fuzz_target!(|bytes: &[u8]| -> Corpus {
|
|||||||
SeccompAction::Allow,
|
SeccompAction::Allow,
|
||||||
EventFd::new(EFD_NONBLOCK).unwrap(),
|
EventFd::new(EFD_NONBLOCK).unwrap(),
|
||||||
((MEM_SIZE - IOVA_SPACE_SIZE) as u64, (MEM_SIZE - 1) as u64),
|
((MEM_SIZE - IOVA_SPACE_SIZE) as u64, (MEM_SIZE - 1) as u64),
|
||||||
|
64,
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -906,9 +906,10 @@ impl Iommu {
|
|||||||
seccomp_action: SeccompAction,
|
seccomp_action: SeccompAction,
|
||||||
exit_evt: EventFd,
|
exit_evt: EventFd,
|
||||||
msi_iova_space: (u64, u64),
|
msi_iova_space: (u64, u64),
|
||||||
|
address_width_bits: u8,
|
||||||
state: Option<IommuState>,
|
state: Option<IommuState>,
|
||||||
) -> io::Result<(Self, Arc<IommuMapping>)> {
|
) -> io::Result<(Self, Arc<IommuMapping>)> {
|
||||||
let (avail_features, acked_features, endpoints, domains, paused) =
|
let (mut avail_features, acked_features, endpoints, domains, paused) =
|
||||||
if let Some(state) = state {
|
if let Some(state) = state {
|
||||||
info!("Restoring virtio-iommu {}", id);
|
info!("Restoring virtio-iommu {}", id);
|
||||||
(
|
(
|
||||||
@ -939,12 +940,20 @@ impl Iommu {
|
|||||||
(avail_features, 0, BTreeMap::new(), BTreeMap::new(), false)
|
(avail_features, 0, BTreeMap::new(), BTreeMap::new(), false)
|
||||||
};
|
};
|
||||||
|
|
||||||
let config = VirtioIommuConfig {
|
let mut config = VirtioIommuConfig {
|
||||||
page_size_mask: VIRTIO_IOMMU_PAGE_SIZE_MASK,
|
page_size_mask: VIRTIO_IOMMU_PAGE_SIZE_MASK,
|
||||||
probe_size: PROBE_PROP_SIZE,
|
probe_size: PROBE_PROP_SIZE,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if address_width_bits < 64 {
|
||||||
|
avail_features |= 1u64 << VIRTIO_IOMMU_F_INPUT_RANGE;
|
||||||
|
config.input_range = VirtioIommuRange64 {
|
||||||
|
start: 0,
|
||||||
|
end: (1u64 << address_width_bits) - 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mapping = Arc::new(IommuMapping {
|
let mapping = Arc::new(IommuMapping {
|
||||||
endpoints: Arc::new(RwLock::new(endpoints)),
|
endpoints: Arc::new(RwLock::new(endpoints)),
|
||||||
domains: Arc::new(RwLock::new(domains)),
|
domains: Arc::new(RwLock::new(domains)),
|
||||||
|
@ -100,7 +100,8 @@ use crate::pci_segment::PciSegment;
|
|||||||
use crate::serial_manager::{Error as SerialManagerError, SerialManager};
|
use crate::serial_manager::{Error as SerialManagerError, SerialManager};
|
||||||
use crate::vm_config::{
|
use crate::vm_config::{
|
||||||
ConsoleOutputMode, DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, UserDeviceConfig,
|
ConsoleOutputMode, DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, UserDeviceConfig,
|
||||||
VdpaConfig, VhostMode, VmConfig, VsockConfig, DEFAULT_PCI_SEGMENT_APERTURE_WEIGHT,
|
VdpaConfig, VhostMode, VmConfig, VsockConfig, DEFAULT_IOMMU_ADDRESS_WIDTH_BITS,
|
||||||
|
DEFAULT_PCI_SEGMENT_APERTURE_WEIGHT,
|
||||||
};
|
};
|
||||||
use crate::{device_node, GuestRegionMmap, PciDeviceInfo, DEVICE_MANAGER_SNAPSHOT_ID};
|
use crate::{device_node, GuestRegionMmap, PciDeviceInfo, DEVICE_MANAGER_SNAPSHOT_ID};
|
||||||
|
|
||||||
@ -1365,6 +1366,13 @@ impl DeviceManager {
|
|||||||
) -> DeviceManagerResult<()> {
|
) -> DeviceManagerResult<()> {
|
||||||
let iommu_id = String::from(IOMMU_DEVICE_NAME);
|
let iommu_id = String::from(IOMMU_DEVICE_NAME);
|
||||||
|
|
||||||
|
let iommu_address_width_bits =
|
||||||
|
if let Some(ref platform) = self.config.lock().unwrap().platform {
|
||||||
|
platform.iommu_address_width_bits
|
||||||
|
} else {
|
||||||
|
DEFAULT_IOMMU_ADDRESS_WIDTH_BITS
|
||||||
|
};
|
||||||
|
|
||||||
let iommu_device = if self.config.lock().unwrap().iommu {
|
let iommu_device = if self.config.lock().unwrap().iommu {
|
||||||
let (device, mapping) = virtio_devices::Iommu::new(
|
let (device, mapping) = virtio_devices::Iommu::new(
|
||||||
iommu_id.clone(),
|
iommu_id.clone(),
|
||||||
@ -1373,6 +1381,7 @@ impl DeviceManager {
|
|||||||
.try_clone()
|
.try_clone()
|
||||||
.map_err(DeviceManagerError::EventFd)?,
|
.map_err(DeviceManagerError::EventFd)?,
|
||||||
self.get_msi_iova_space(),
|
self.get_msi_iova_space(),
|
||||||
|
iommu_address_width_bits,
|
||||||
state_from_id(self.snapshot.as_ref(), iommu_id.as_str())
|
state_from_id(self.snapshot.as_ref(), iommu_id.as_str())
|
||||||
.map_err(DeviceManagerError::RestoreGetState)?,
|
.map_err(DeviceManagerError::RestoreGetState)?,
|
||||||
)
|
)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user