]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu/vt-d: Deduplicate cache_tag_flush_all by reusing flush_range
authorEthan Milon <ethan.milon@eviden.com>
Mon, 14 Jul 2025 04:50:28 +0000 (12:50 +0800)
committerWill Deacon <will@kernel.org>
Mon, 14 Jul 2025 10:18:04 +0000 (11:18 +0100)
The logic in cache_tag_flush_all() to iterate over cache tags and issue
TLB invalidations is largely duplicated in cache_tag_flush_range(), with
the only difference being the range parameters.

Extend cache_tag_flush_range() to handle a full address space flush when
called with start = 0 and end = ULONG_MAX. This allows
cache_tag_flush_all() to simply delegate to cache_tag_flush_range()

Signed-off-by: Ethan Milon <ethan.milon@eviden.com>
Link: https://lore.kernel.org/r/20250708214821.30967-2-ethan.milon@eviden.com
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20250714045028.958850-12-baolu.lu@linux.intel.com
Signed-off-by: Will Deacon <will@kernel.org>
drivers/iommu/intel/cache.c
drivers/iommu/intel/trace.h

index ff45c4c9609d2cbccbaf715be05307df602ddb2d..a0111e45576252bd62ce6a1e55bde5a11e4a4983 100644 (file)
@@ -435,7 +435,13 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
        struct cache_tag *tag;
        unsigned long flags;
 
-       addr = calculate_psi_aligned_address(start, end, &pages, &mask);
+       if (start == 0 && end == ULONG_MAX) {
+               addr = 0;
+               pages = -1;
+               mask = MAX_AGAW_PFN_WIDTH;
+       } else {
+               addr = calculate_psi_aligned_address(start, end, &pages, &mask);
+       }
 
        spin_lock_irqsave(&domain->cache_lock, flags);
        list_for_each_entry(tag, &domain->cache_tags, node) {
@@ -476,31 +482,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
  */
 void cache_tag_flush_all(struct dmar_domain *domain)
 {
-       struct intel_iommu *iommu = NULL;
-       struct cache_tag *tag;
-       unsigned long flags;
-
-       spin_lock_irqsave(&domain->cache_lock, flags);
-       list_for_each_entry(tag, &domain->cache_tags, node) {
-               if (iommu && iommu != tag->iommu)
-                       qi_batch_flush_descs(iommu, domain->qi_batch);
-               iommu = tag->iommu;
-
-               switch (tag->type) {
-               case CACHE_TAG_IOTLB:
-               case CACHE_TAG_NESTING_IOTLB:
-                       cache_tag_flush_iotlb(domain, tag, 0, -1, 0, 0);
-                       break;
-               case CACHE_TAG_DEVTLB:
-               case CACHE_TAG_NESTING_DEVTLB:
-                       cache_tag_flush_devtlb_psi(domain, tag, 0, MAX_AGAW_PFN_WIDTH);
-                       break;
-               }
-
-               trace_cache_tag_flush_all(tag);
-       }
-       qi_batch_flush_descs(iommu, domain->qi_batch);
-       spin_unlock_irqrestore(&domain->cache_lock, flags);
+       cache_tag_flush_range(domain, 0, ULONG_MAX, 0);
 }
 
 /*
index 9defdae6ebae8f08a0210690d4f2c3f1981a59d6..6311ba3f1691dd3b714a689b7441abbf8c1e0efb 100644 (file)
@@ -130,11 +130,6 @@ DEFINE_EVENT(cache_tag_log, cache_tag_unassign,
        TP_ARGS(tag)
 );
 
-DEFINE_EVENT(cache_tag_log, cache_tag_flush_all,
-       TP_PROTO(struct cache_tag *tag),
-       TP_ARGS(tag)
-);
-
 DECLARE_EVENT_CLASS(cache_tag_flush,
        TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
                 unsigned long addr, unsigned long pages, unsigned long mask),