]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu/vt-d: Cleanup intel_context_flush_present()
authorLu Baolu <baolu.lu@linux.intel.com>
Mon, 10 Mar 2025 02:47:49 +0000 (10:47 +0800)
committerJoerg Roedel <jroedel@suse.de>
Mon, 10 Mar 2025 08:31:05 +0000 (09:31 +0100)
The intel_context_flush_present() is called in places where either the
scalable mode is disabled, or scalable mode is enabled but all PASID
entries are known to be non-present. In these cases, the flush_domains
path within intel_context_flush_present() will never execute. This dead
code is therefore removed.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Tested-by: Zhangfei Gao <zhangfei.gao@linaro.org>
Link: https://lore.kernel.org/r/20250228092631.3425464-7-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/intel/iommu.c
drivers/iommu/intel/iommu.h
drivers/iommu/intel/pasid.c

index 9ed8bdb3e9da6ac945d6e39b72385995fef9a7d4..072223fe6fe765d2563255ad7ccab296b98d96d1 100644 (file)
@@ -1783,7 +1783,7 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
        context_clear_entry(context);
        __iommu_flush_cache(iommu, context, sizeof(*context));
        spin_unlock(&iommu->lock);
-       intel_context_flush_present(info, context, did, true);
+       intel_context_flush_no_pasid(info, context, did);
 }
 
 int __domain_setup_first_level(struct intel_iommu *iommu,
index 42b4e500989b248f2f0bbed58402dec395896040..c4916886da5a08039d8039d6fe70776031e810e5 100644 (file)
@@ -1286,9 +1286,8 @@ void cache_tag_flush_all(struct dmar_domain *domain);
 void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
                              unsigned long end);
 
-void intel_context_flush_present(struct device_domain_info *info,
-                                struct context_entry *context,
-                                u16 did, bool affect_domains);
+void intel_context_flush_no_pasid(struct device_domain_info *info,
+                                 struct context_entry *context, u16 did);
 
 int intel_iommu_enable_prq(struct intel_iommu *iommu);
 int intel_iommu_finish_prq(struct intel_iommu *iommu);
index c2742e256552ac6700ae430d613984bb977342a8..7ee18bb48bd469b82de6a425e3aa4c5a5c1a70f6 100644 (file)
@@ -932,7 +932,7 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
        context_clear_entry(context);
        __iommu_flush_cache(iommu, context, sizeof(*context));
        spin_unlock(&iommu->lock);
-       intel_context_flush_present(info, context, did, false);
+       intel_context_flush_no_pasid(info, context, did);
 }
 
 static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
@@ -1119,17 +1119,15 @@ static void __context_flush_dev_iotlb(struct device_domain_info *info)
 
 /*
  * Cache invalidations after change in a context table entry that was present
- * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations). If
- * IOMMU is in scalable mode and all PASID table entries of the device were
- * non-present, set flush_domains to false. Otherwise, true.
+ * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations).
+ * This helper can only be used when IOMMU is working in the legacy mode or
+ * IOMMU is in scalable mode but all PASID table entries of the device are
+ * non-present.
  */
-void intel_context_flush_present(struct device_domain_info *info,
-                                struct context_entry *context,
-                                u16 did, bool flush_domains)
+void intel_context_flush_no_pasid(struct device_domain_info *info,
+                                 struct context_entry *context, u16 did)
 {
        struct intel_iommu *iommu = info->iommu;
-       struct pasid_entry *pte;
-       int i;
 
        /*
         * Device-selective context-cache invalidation. The Domain-ID field
@@ -1152,30 +1150,5 @@ void intel_context_flush_present(struct device_domain_info *info,
                return;
        }
 
-       /*
-        * For scalable mode:
-        * - Domain-selective PASID-cache invalidation to affected domains
-        * - Domain-selective IOTLB invalidation to affected domains
-        * - Global Device-TLB invalidation to affected functions
-        */
-       if (flush_domains) {
-               /*
-                * If the IOMMU is running in scalable mode and there might
-                * be potential PASID translations, the caller should hold
-                * the lock to ensure that context changes and cache flushes
-                * are atomic.
-                */
-               assert_spin_locked(&iommu->lock);
-               for (i = 0; i < info->pasid_table->max_pasid; i++) {
-                       pte = intel_pasid_get_entry(info->dev, i);
-                       if (!pte || !pasid_pte_is_present(pte))
-                               continue;
-
-                       did = pasid_get_domain_id(pte);
-                       qi_flush_pasid_cache(iommu, did, QI_PC_ALL_PASIDS, 0);
-                       iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
-               }
-       }
-
        __context_flush_dev_iotlb(info);
 }