]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu/vt-d: Add a helper to flush cache for updating present pasid entry
authorYi Liu <yi.l.liu@intel.com>
Fri, 8 Nov 2024 02:13:53 +0000 (10:13 +0800)
committerJoerg Roedel <jroedel@suse.de>
Fri, 8 Nov 2024 13:04:50 +0000 (14:04 +0100)
Generalize the logic for flushing pasid-related cache upon changes to
bits other than SSADE and P which requires a different flow according
to VT-d spec.

No functional change is intended.

Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Link: https://lore.kernel.org/r/20241107122234.7424-3-yi.l.liu@intel.com
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/intel/pasid.c

index 31665fb62e1cc870e3168410258a342e27016eab..8d11701c2e76a519dfba1701d9ae39c2e014249c 100644 (file)
@@ -287,6 +287,39 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
        }
 }
 
+/*
+ * This function is supposed to be used after caller updates the fields
+ * except for the SSADE and P bit of a pasid table entry. It does the
+ * below:
+ * - Flush cacheline if needed
+ * - Flush the caches per Table 28 ”Guidance to Software for Invalidations“
+ *   of VT-d spec 5.0.
+ */
+static void intel_pasid_flush_present(struct intel_iommu *iommu,
+                                     struct device *dev,
+                                     u32 pasid, u16 did,
+                                     struct pasid_entry *pte)
+{
+       if (!ecap_coherent(iommu->ecap))
+               clflush_cache_range(pte, sizeof(*pte));
+
+       /*
+        * VT-d spec 5.0 table28 states guides for cache invalidation:
+        *
+        * - PASID-selective-within-Domain PASID-cache invalidation
+        * - PASID-selective PASID-based IOTLB invalidation
+        * - If (pasid is RID_PASID)
+        *    - Global Device-TLB invalidation to affected functions
+        *   Else
+        *    - PASID-based Device-TLB invalidation (with S=1 and
+        *      Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
+        */
+       pasid_cache_invalidation_with_pasid(iommu, did, pasid);
+       qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+
+       devtlb_invalidation_with_pasid(iommu, dev, pasid);
+}
+
 /*
  * Set up the scalable mode pasid table entry for first only
  * translation type.
@@ -526,24 +559,7 @@ void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
        did = pasid_get_domain_id(pte);
        spin_unlock(&iommu->lock);
 
-       if (!ecap_coherent(iommu->ecap))
-               clflush_cache_range(pte, sizeof(*pte));
-
-       /*
-        * VT-d spec 3.4 table23 states guides for cache invalidation:
-        *
-        * - PASID-selective-within-Domain PASID-cache invalidation
-        * - PASID-selective PASID-based IOTLB invalidation
-        * - If (pasid is RID_PASID)
-        *    - Global Device-TLB invalidation to affected functions
-        *   Else
-        *    - PASID-based Device-TLB invalidation (with S=1 and
-        *      Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
-        */
-       pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-       qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
-
-       devtlb_invalidation_with_pasid(iommu, dev, pasid);
+       intel_pasid_flush_present(iommu, dev, pasid, did, pte);
 }
 
 /**