vmm: Force VIRTIO_F_IOMMU_PLATFORM when running TDX

When running a TDX guest, we need the virtio drivers to use the DMA API
to share specific memory pages with the VMM on the host. The point is to
let the VMM get access to the pages related to the buffers pointed by
the virtqueues.

The way to force the virtio drivers to use the DMA API is by exposing
the virtio devices with the feature VIRTIO_F_IOMMU_PLATFORM. This is a
feature indicating the device will require some address translation, as
it will not deal directly with physical addresses.

Cloud Hypervisor takes care of this requirement by adding a generic
parameter called "force_iommu". This parameter value is decided based on
the "tdx" feature gate, and then passed to the DeviceManager. It's up to
the DeviceManager to use this parameter on every virtio device creation,
which will imply setting the VIRTIO_F_IOMMU_PLATFORM feature.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2021-07-20 09:59:32 +02:00
parent 569be6e706
commit 05f7651cf5
2 changed files with 19 additions and 8 deletions

View File

@ -919,6 +919,9 @@ pub struct DeviceManager {
#[cfg(target_arch = "aarch64")]
// GPIO device for AArch64
gpio_device: Option<Arc<Mutex<devices::legacy::Gpio>>>,
// Flag to force setting the iommu on virtio devices
force_iommu: bool,
}
impl DeviceManager {
@ -932,6 +935,7 @@ impl DeviceManager {
seccomp_action: SeccompAction,
#[cfg(feature = "acpi")] numa_nodes: NumaNodes,
activate_evt: &EventFd,
force_iommu: bool,
) -> DeviceManagerResult<Arc<Mutex<Self>>> {
let device_tree = Arc::new(Mutex::new(DeviceTree::new()));
@ -1004,6 +1008,7 @@ impl DeviceManager {
virtio_mem_devices: Vec::new(),
#[cfg(target_arch = "aarch64")]
gpio_device: None,
force_iommu,
};
let device_manager = Arc::new(Mutex::new(device_manager));
@ -1744,7 +1749,7 @@ impl DeviceManager {
writer,
col,
row,
console_config.iommu,
self.force_iommu | console_config.iommu,
self.seccomp_action.clone(),
)
.map_err(DeviceManagerError::CreateVirtioConsole)?;
@ -1919,7 +1924,7 @@ impl DeviceManager {
.ok_or(DeviceManagerError::NoDiskPath)?
.clone(),
disk_cfg.readonly,
disk_cfg.iommu,
self.force_iommu | disk_cfg.iommu,
disk_cfg.num_queues,
disk_cfg.queue_size,
self.seccomp_action.clone(),
@ -2021,7 +2026,7 @@ impl DeviceManager {
None,
Some(net_cfg.mac),
&mut net_cfg.host_mac,
net_cfg.iommu,
self.force_iommu | net_cfg.iommu,
net_cfg.num_queues,
net_cfg.queue_size,
self.seccomp_action.clone(),
@ -2035,7 +2040,7 @@ impl DeviceManager {
id.clone(),
fds,
Some(net_cfg.mac),
net_cfg.iommu,
self.force_iommu | net_cfg.iommu,
net_cfg.queue_size,
self.seccomp_action.clone(),
net_cfg.rate_limiter_config,
@ -2051,7 +2056,7 @@ impl DeviceManager {
Some(net_cfg.mask),
Some(net_cfg.mac),
&mut net_cfg.host_mac,
net_cfg.iommu,
self.force_iommu | net_cfg.iommu,
net_cfg.num_queues,
net_cfg.queue_size,
self.seccomp_action.clone(),
@ -2108,7 +2113,7 @@ impl DeviceManager {
virtio_devices::Rng::new(
id.clone(),
rng_path,
rng_config.iommu,
self.force_iommu | rng_config.iommu,
self.seccomp_action.clone(),
)
.map_err(DeviceManagerError::CreateVirtioRng)?,
@ -2441,7 +2446,7 @@ impl DeviceManager {
GuestAddress(region_base),
mapping,
mmap_region,
pmem_cfg.iommu,
self.force_iommu | pmem_cfg.iommu,
self.seccomp_action.clone(),
)
.map_err(DeviceManagerError::CreateVirtioPmem)?,
@ -2507,7 +2512,7 @@ impl DeviceManager {
vsock_cfg.cid,
vsock_cfg.socket.clone(),
backend,
vsock_cfg.iommu,
self.force_iommu | vsock_cfg.iommu,
self.seccomp_action.clone(),
)
.map_err(DeviceManagerError::CreateVirtioVsock)?,

View File

@ -538,6 +538,11 @@ impl Vm {
let numa_nodes =
Self::create_numa_nodes(config.lock().unwrap().numa.clone(), &memory_manager)?;
#[cfg(feature = "tdx")]
let force_iommu = config.lock().unwrap().tdx.is_some();
#[cfg(not(feature = "tdx"))]
let force_iommu = false;
let device_manager = DeviceManager::new(
vm.clone(),
config.clone(),
@ -548,6 +553,7 @@ impl Vm {
#[cfg(feature = "acpi")]
numa_nodes.clone(),
&activate_evt,
force_iommu,
)
.map_err(Error::DeviceManager)?;