mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2024-11-04 19:11:11 +00:00
virtio-devices: Attach and detach endpoints from domain properly
Properly detach a device from a domain if that device is already attached to another domain on an attach request (following section 5.13.6.3.2 of the virtio-iommu spec). Resolves nested virtualization reboot. Signed-off-by: Andrew Carp <acarp@crusoeenergy.com>
This commit is contained in:
parent
5668f02eb6
commit
3fa02b34ca
@ -407,6 +407,15 @@ impl Request {
|
||||
let bypass =
|
||||
(req.flags & VIRTIO_IOMMU_ATTACH_F_BYPASS) == VIRTIO_IOMMU_ATTACH_F_BYPASS;
|
||||
|
||||
let mut old_domain_id = domain_id;
|
||||
if let Some(&id) = mapping.endpoints.read().unwrap().get(&endpoint) {
|
||||
old_domain_id = id;
|
||||
}
|
||||
|
||||
if old_domain_id != domain_id {
|
||||
detach_endpoint_from_domain(endpoint, old_domain_id, mapping, ext_mapping)?;
|
||||
}
|
||||
|
||||
// Add endpoint associated with specific domain
|
||||
mapping
|
||||
.endpoints
|
||||
@ -452,22 +461,7 @@ impl Request {
|
||||
let endpoint = req.endpoint;
|
||||
|
||||
// Remove endpoint associated with specific domain
|
||||
mapping.endpoints.write().unwrap().remove(&endpoint);
|
||||
|
||||
// After all endpoints have been successfully detached from a
|
||||
// domain, the domain can be removed. This means we must remove
|
||||
// the mappings associated with this domain.
|
||||
if mapping
|
||||
.endpoints
|
||||
.write()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.filter(|(_, &d)| d == domain_id)
|
||||
.count()
|
||||
== 0
|
||||
{
|
||||
mapping.domains.write().unwrap().remove(&domain_id);
|
||||
}
|
||||
detach_endpoint_from_domain(endpoint, domain_id, mapping, ext_mapping)?;
|
||||
}
|
||||
VIRTIO_IOMMU_T_MAP => {
|
||||
if desc_size_left != size_of::<VirtioIommuReqMap>() {
|
||||
@ -504,7 +498,9 @@ impl Request {
|
||||
.map(|(&e, _)| e)
|
||||
.collect();
|
||||
|
||||
// Trigger external mapping if necessary.
|
||||
// For viommu all endpoints receive their own VFIO container, as a result
|
||||
// Each endpoint within the domain needs to be separately mapped, as the
|
||||
// mapping is done on a per-container level, not a per-domain level
|
||||
for endpoint in endpoints {
|
||||
if let Some(ext_map) = ext_mapping.get(&endpoint) {
|
||||
let size = req.virt_end - req.virt_start + 1;
|
||||
@ -653,6 +649,41 @@ impl Request {
|
||||
}
|
||||
}
|
||||
|
||||
fn detach_endpoint_from_domain(
|
||||
endpoint: u32,
|
||||
domain_id: u32,
|
||||
mapping: &Arc<IommuMapping>,
|
||||
ext_mapping: &BTreeMap<u32, Arc<dyn ExternalDmaMapping>>,
|
||||
) -> result::Result<(), Error> {
|
||||
// Remove endpoint associated with specific domain
|
||||
mapping.endpoints.write().unwrap().remove(&endpoint);
|
||||
|
||||
// Trigger external unmapping for the endpoint if necessary.
|
||||
if let Some(domain_mappings) = &mapping.domains.read().unwrap().get(&domain_id) {
|
||||
if let Some(ext_map) = ext_mapping.get(&endpoint) {
|
||||
for (virt_start, addr_map) in &domain_mappings.mappings {
|
||||
ext_map
|
||||
.unmap(*virt_start, addr_map.size)
|
||||
.map_err(Error::ExternalUnmapping)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if mapping
|
||||
.endpoints
|
||||
.write()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.filter(|(_, &d)| d == domain_id)
|
||||
.count()
|
||||
== 0
|
||||
{
|
||||
mapping.domains.write().unwrap().remove(&domain_id);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct IommuEpollHandler {
|
||||
mem: GuestMemoryAtomic<GuestMemoryMmap>,
|
||||
request_queue: Queue,
|
||||
|
Loading…
Reference in New Issue
Block a user