]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu/vt-d: Split piotlb invalidation into range and all
authorJason Gunthorpe <jgg@nvidia.com>
Thu, 2 Apr 2026 06:57:30 +0000 (14:57 +0800)
committerJoerg Roedel <joerg.roedel@amd.com>
Thu, 2 Apr 2026 07:26:07 +0000 (09:26 +0200)
Currently these call chains are muddled up by using npages=-1, but only
one caller has the possibility to do both options.

Simplify qi_flush_piotlb() to qi_flush_piotlb_all() since all callers
pass npages=-1.

Split qi_batch_add_piotlb() into qi_batch_add_piotlb_all() and related
helpers.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/1-v1-f175e27af136+11647-iommupt_inv_vtd_jgg@nvidia.com
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
drivers/iommu/intel/cache.c
drivers/iommu/intel/dmar.c
drivers/iommu/intel/iommu.h
drivers/iommu/intel/pasid.c
drivers/iommu/intel/prq.c

index 249ab5886c739f4fd73e14b4e22f123a1ef340f0..3ae0d21ecb9f840fa044b392e2f98bcfa8f2b2f5 100644 (file)
@@ -330,15 +330,17 @@ static void qi_batch_add_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid
        qi_batch_increment_index(iommu, batch);
 }
 
+static void qi_batch_add_piotlb_all(struct intel_iommu *iommu, u16 did,
+                                   u32 pasid, struct qi_batch *batch)
+{
+       qi_desc_piotlb_all(did, pasid, &batch->descs[batch->index]);
+       qi_batch_increment_index(iommu, batch);
+}
+
 static void qi_batch_add_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid,
                                u64 addr, unsigned long npages, bool ih,
                                struct qi_batch *batch)
 {
-       /*
-        * npages == -1 means a PASID-selective invalidation, otherwise,
-        * a positive value for Page-selective-within-PASID invalidation.
-        * 0 is not a valid input.
-        */
        if (!npages)
                return;
 
@@ -378,8 +380,12 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *
        u64 type = DMA_TLB_PSI_FLUSH;
 
        if (intel_domain_use_piotlb(domain)) {
-               qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, addr,
-                                   pages, ih, domain->qi_batch);
+               if (pages == -1)
+                       qi_batch_add_piotlb_all(iommu, tag->domain_id,
+                                               tag->pasid, domain->qi_batch);
+               else
+                       qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid,
+                                           addr, pages, ih, domain->qi_batch);
                return;
        }
 
index b958f2e6042ba90b528fa0b17848d86b0d83c211..b6015f3dc6db7d41938b605135f0a8259491fa6b 100644 (file)
@@ -1551,23 +1551,12 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
        qi_submit_sync(iommu, &desc, 1, 0);
 }
 
-/* PASID-based IOTLB invalidation */
-void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
-                    unsigned long npages, bool ih)
+/* PASID-selective IOTLB invalidation */
+void qi_flush_piotlb_all(struct intel_iommu *iommu, u16 did, u32 pasid)
 {
-       struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
+       struct qi_desc desc = {};
 
-       /*
-        * npages == -1 means a PASID-selective invalidation, otherwise,
-        * a positive value for Page-selective-within-PASID invalidation.
-        * 0 is not a valid input.
-        */
-       if (WARN_ON(!npages)) {
-               pr_err("Invalid input npages = %ld\n", npages);
-               return;
-       }
-
-       qi_desc_piotlb(did, pasid, addr, npages, ih, &desc);
+       qi_desc_piotlb_all(did, pasid, &desc);
        qi_submit_sync(iommu, &desc, 1, 0);
 }
 
index 10331364c0ef3ba16ab39ff7ebfdd05c548d6711..9b193bbcfd5844920bb81c76bdc013330fafefba 100644 (file)
@@ -1077,31 +1077,29 @@ static inline void qi_desc_dev_iotlb(u16 sid, u16 pfsid, u16 qdep, u64 addr,
        desc->qw3 = 0;
 }
 
+/* PASID-selective IOTLB invalidation */
+static inline void qi_desc_piotlb_all(u16 did, u32 pasid, struct qi_desc *desc)
+{
+       desc->qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) |
+                   QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE;
+       desc->qw1 = 0;
+}
+
+/* Page-selective-within-PASID IOTLB invalidation */
 static inline void qi_desc_piotlb(u16 did, u32 pasid, u64 addr,
                                  unsigned long npages, bool ih,
                                  struct qi_desc *desc)
 {
-       if (npages == -1) {
-               desc->qw0 = QI_EIOTLB_PASID(pasid) |
-                               QI_EIOTLB_DID(did) |
-                               QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
-                               QI_EIOTLB_TYPE;
-               desc->qw1 = 0;
-       } else {
-               int mask = ilog2(__roundup_pow_of_two(npages));
-               unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
-
-               if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
-                       addr = ALIGN_DOWN(addr, align);
-
-               desc->qw0 = QI_EIOTLB_PASID(pasid) |
-                               QI_EIOTLB_DID(did) |
-                               QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
-                               QI_EIOTLB_TYPE;
-               desc->qw1 = QI_EIOTLB_ADDR(addr) |
-                               QI_EIOTLB_IH(ih) |
-                               QI_EIOTLB_AM(mask);
-       }
+       int mask = ilog2(__roundup_pow_of_two(npages));
+       unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
+
+       if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
+               addr = ALIGN_DOWN(addr, align);
+
+       desc->qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) |
+                   QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE;
+       desc->qw1 = QI_EIOTLB_ADDR(addr) | QI_EIOTLB_IH(ih) |
+                   QI_EIOTLB_AM(mask);
 }
 
 static inline void qi_desc_dev_iotlb_pasid(u16 sid, u16 pfsid, u32 pasid,
@@ -1163,8 +1161,7 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
                        u16 qdep, u64 addr, unsigned mask);
 
-void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
-                    unsigned long npages, bool ih);
+void qi_flush_piotlb_all(struct intel_iommu *iommu, u16 did, u32 pasid);
 
 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
                              u32 pasid, u16 qdep, u64 addr,
index 9d30015b894057d0773e85bd6b1215f7106e5eb4..89541b74ab8ca3c2ebab0b5f60c43743b45c14a8 100644 (file)
@@ -282,7 +282,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
        pasid_cache_invalidation_with_pasid(iommu, did, pasid);
 
        if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
-               qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+               qi_flush_piotlb_all(iommu, did, pasid);
        else
                iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
 
@@ -308,7 +308,7 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
 
        if (cap_caching_mode(iommu->cap)) {
                pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-               qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+               qi_flush_piotlb_all(iommu, did, pasid);
        } else {
                iommu_flush_write_buffer(iommu);
        }
@@ -342,7 +342,7 @@ static void intel_pasid_flush_present(struct intel_iommu *iommu,
         *      Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
         */
        pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-       qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+       qi_flush_piotlb_all(iommu, did, pasid);
 
        devtlb_invalidation_with_pasid(iommu, dev, pasid);
 }
index 1460b57db1299979076d9dd2a40533387c1992e3..586055e51bb258c69484a66d2bd3d093e61d39c3 100644 (file)
@@ -113,7 +113,7 @@ prq_retry:
                qi_desc_dev_iotlb(sid, info->pfsid, info->ats_qdep, 0,
                                  MAX_AGAW_PFN_WIDTH, &desc[2]);
        } else {
-               qi_desc_piotlb(did, pasid, 0, -1, 0, &desc[1]);
+               qi_desc_piotlb_all(did, pasid, &desc[1]);
                qi_desc_dev_iotlb_pasid(sid, info->pfsid, pasid, info->ats_qdep,
                                        0, MAX_AGAW_PFN_WIDTH, &desc[2]);
        }