]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu/vt-d: Make iotlb_sync_map a static property of dmar_domain
authorLu Baolu <baolu.lu@linux.intel.com>
Mon, 21 Jul 2025 05:16:57 +0000 (13:16 +0800)
committerWill Deacon <will@kernel.org>
Mon, 21 Jul 2025 13:25:46 +0000 (14:25 +0100)
Commit 12724ce3fe1a ("iommu/vt-d: Optimize iotlb_sync_map for
non-caching/non-RWBF modes") dynamically set iotlb_sync_map. This causes
synchronization issues due to lack of locking on map and attach paths,
racing iommufd userspace operations.

Invalidation changes must precede device attachment to ensure all flushes
complete before hardware walks page tables, preventing coherence issues.

Make domain->iotlb_sync_map static, set once during domain allocation. If
an IOMMU requires iotlb_sync_map but the domain lacks it, attach is
rejected. This won't reduce domain sharing: RWBF and shadowing page table
caching are legacy uses with legacy hardware. Mixed configs (some IOMMUs
in caching mode, others not) are unlikely in real-world scenarios.

Fixes: 12724ce3fe1a ("iommu/vt-d: Optimize iotlb_sync_map for non-caching/non-RWBF modes")
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20250721051657.1695788-1-baolu.lu@linux.intel.com
Signed-off-by: Will Deacon <will@kernel.org>
drivers/iommu/intel/iommu.c

index 3e774e3bb7355e8a55bbb9ec8a022be6fd59c647..d1791b50f79115210fa0543756926be0e24e3658 100644 (file)
@@ -57,6 +57,8 @@
 static void __init check_tylersburg_isoch(void);
 static int rwbf_quirk;
 
+#define rwbf_required(iommu)   (rwbf_quirk || cap_rwbf((iommu)->cap))
+
 /*
  * set to 1 to panic kernel if can't successfully enable VT-d
  * (used when kernel is launched w/ TXT)
@@ -1780,18 +1782,6 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
                                          __pa(pgd), flags, old);
 }
 
-static bool domain_need_iotlb_sync_map(struct dmar_domain *domain,
-                                      struct intel_iommu *iommu)
-{
-       if (cap_caching_mode(iommu->cap) && intel_domain_is_ss_paging(domain))
-               return true;
-
-       if (rwbf_quirk || cap_rwbf(iommu->cap))
-               return true;
-
-       return false;
-}
-
 static int dmar_domain_attach_device(struct dmar_domain *domain,
                                     struct device *dev)
 {
@@ -1831,8 +1821,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
        if (ret)
                goto out_block_translation;
 
-       domain->iotlb_sync_map |= domain_need_iotlb_sync_map(domain, iommu);
-
        return 0;
 
 out_block_translation:
@@ -3352,6 +3340,14 @@ intel_iommu_domain_alloc_first_stage(struct device *dev,
                return ERR_CAST(dmar_domain);
 
        dmar_domain->domain.ops = &intel_fs_paging_domain_ops;
+       /*
+        * iotlb sync for map is only needed for legacy implementations that
+        * explicitly require flushing internal write buffers to ensure memory
+        * coherence.
+        */
+       if (rwbf_required(iommu))
+               dmar_domain->iotlb_sync_map = true;
+
        return &dmar_domain->domain;
 }
 
@@ -3386,6 +3382,14 @@ intel_iommu_domain_alloc_second_stage(struct device *dev,
        if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
                dmar_domain->domain.dirty_ops = &intel_dirty_ops;
 
+       /*
+        * Besides the internal write buffer flush, the caching mode used for
+        * legacy nested translation (which utilizes shadowing page tables)
+        * also requires iotlb sync on map.
+        */
+       if (rwbf_required(iommu) || cap_caching_mode(iommu->cap))
+               dmar_domain->iotlb_sync_map = true;
+
        return &dmar_domain->domain;
 }
 
@@ -3446,6 +3450,11 @@ static int paging_domain_compatible_first_stage(struct dmar_domain *dmar_domain,
        if (!cap_fl1gp_support(iommu->cap) &&
            (dmar_domain->domain.pgsize_bitmap & SZ_1G))
                return -EINVAL;
+
+       /* iotlb sync on map requirement */
+       if ((rwbf_required(iommu)) && !dmar_domain->iotlb_sync_map)
+               return -EINVAL;
+
        return 0;
 }
 
@@ -3469,6 +3478,12 @@ paging_domain_compatible_second_stage(struct dmar_domain *dmar_domain,
                return -EINVAL;
        if (!(sslps & BIT(1)) && (dmar_domain->domain.pgsize_bitmap & SZ_1G))
                return -EINVAL;
+
+       /* iotlb sync on map requirement */
+       if ((rwbf_required(iommu) || cap_caching_mode(iommu->cap)) &&
+           !dmar_domain->iotlb_sync_map)
+               return -EINVAL;
+
        return 0;
 }