]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu/vt-d: Fold domain_exit() into intel_iommu_domain_free()
authorJason Gunthorpe <jgg@nvidia.com>
Mon, 14 Jul 2025 04:50:21 +0000 (12:50 +0800)
committerWill Deacon <will@kernel.org>
Mon, 14 Jul 2025 10:18:03 +0000 (11:18 +0100)
It has only one caller, no need for two functions.

Correct the WARN_ON() error handling to leak the entire page table if the
HW is still referencing it so we don't UAF during WARN_ON recovery.

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/2-v3-dbbe6f7e7ae3+124ffe-vtd_prep_jgg@nvidia.com
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20250714045028.958850-5-baolu.lu@linux.intel.com
Signed-off-by: Will Deacon <will@kernel.org>
drivers/iommu/intel/iommu.c

index bb29c4a635ea4d2fe31b6f7c96b5447ae8047642..8521566ccf9b71dfcc778d2ac8408b96ae91606b 100644 (file)
@@ -1396,23 +1396,6 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
        }
 }
 
-static void domain_exit(struct dmar_domain *domain)
-{
-       if (domain->pgd) {
-               struct iommu_pages_list freelist =
-                       IOMMU_PAGES_LIST_INIT(freelist);
-
-               domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
-               iommu_put_pages_list(&freelist);
-       }
-
-       if (WARN_ON(!list_empty(&domain->devices)))
-               return;
-
-       kfree(domain->qi_batch);
-       kfree(domain);
-}
-
 /*
  * For kdump cases, old valid entries may be cached due to the
  * in-flight DMA and copied pgtable, but there is no unmapping
@@ -3406,9 +3389,24 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
 {
        struct dmar_domain *dmar_domain = to_dmar_domain(domain);
 
-       WARN_ON(dmar_domain->nested_parent &&
-               !list_empty(&dmar_domain->s1_domains));
-       domain_exit(dmar_domain);
+       if (WARN_ON(dmar_domain->nested_parent &&
+                   !list_empty(&dmar_domain->s1_domains)))
+               return;
+
+       if (WARN_ON(!list_empty(&dmar_domain->devices)))
+               return;
+
+       if (dmar_domain->pgd) {
+               struct iommu_pages_list freelist =
+                       IOMMU_PAGES_LIST_INIT(freelist);
+
+               domain_unmap(dmar_domain, 0, DOMAIN_MAX_PFN(dmar_domain->gaw),
+                            &freelist);
+               iommu_put_pages_list(&freelist);
+       }
+
+       kfree(dmar_domain->qi_batch);
+       kfree(dmar_domain);
 }
 
 int paging_domain_compatible(struct iommu_domain *domain, struct device *dev)