From: Jason Gunthorpe Date: Thu, 2 Apr 2026 06:57:30 +0000 (+0800) Subject: iommu/vt-d: Split piotlb invalidation into range and all X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b6fd468a052e43fa4e3a00837fbf44a05cc1ca11;p=thirdparty%2Flinux.git iommu/vt-d: Split piotlb invalidation into range and all Currently these call chains are muddled up by using npages=-1, but only one caller has the possibility to do both options. Simplify qi_flush_piotlb() to qi_flush_piotlb_all() since all callers pass npages=-1. Split qi_batch_add_piotlb() into qi_batch_add_piotlb_all() and related helpers. Signed-off-by: Jason Gunthorpe Link: https://lore.kernel.org/r/1-v1-f175e27af136+11647-iommupt_inv_vtd_jgg@nvidia.com Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c index 249ab5886c739..3ae0d21ecb9f8 100644 --- a/drivers/iommu/intel/cache.c +++ b/drivers/iommu/intel/cache.c @@ -330,15 +330,17 @@ static void qi_batch_add_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid qi_batch_increment_index(iommu, batch); } +static void qi_batch_add_piotlb_all(struct intel_iommu *iommu, u16 did, + u32 pasid, struct qi_batch *batch) +{ + qi_desc_piotlb_all(did, pasid, &batch->descs[batch->index]); + qi_batch_increment_index(iommu, batch); +} + static void qi_batch_add_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, unsigned long npages, bool ih, struct qi_batch *batch) { - /* - * npages == -1 means a PASID-selective invalidation, otherwise, - * a positive value for Page-selective-within-PASID invalidation. - * 0 is not a valid input. - */ if (!npages) return; @@ -378,8 +380,12 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag * u64 type = DMA_TLB_PSI_FLUSH; if (intel_domain_use_piotlb(domain)) { - qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, addr, - pages, ih, domain->qi_batch); + if (pages == -1) + qi_batch_add_piotlb_all(iommu, tag->domain_id, + tag->pasid, domain->qi_batch); + else + qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, + addr, pages, ih, domain->qi_batch); return; } diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index b958f2e6042ba..b6015f3dc6db7 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1551,23 +1551,12 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, qi_submit_sync(iommu, &desc, 1, 0); } -/* PASID-based IOTLB invalidation */ -void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, - unsigned long npages, bool ih) +/* PASID-selective IOTLB invalidation */ +void qi_flush_piotlb_all(struct intel_iommu *iommu, u16 did, u32 pasid) { - struct qi_desc desc = {.qw2 = 0, .qw3 = 0}; + struct qi_desc desc = {}; - /* - * npages == -1 means a PASID-selective invalidation, otherwise, - * a positive value for Page-selective-within-PASID invalidation. - * 0 is not a valid input. - */ - if (WARN_ON(!npages)) { - pr_err("Invalid input npages = %ld\n", npages); - return; - } - - qi_desc_piotlb(did, pasid, addr, npages, ih, &desc); + qi_desc_piotlb_all(did, pasid, &desc); qi_submit_sync(iommu, &desc, 1, 0); } diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index 10331364c0ef3..9b193bbcfd584 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -1077,31 +1077,29 @@ static inline void qi_desc_dev_iotlb(u16 sid, u16 pfsid, u16 qdep, u64 addr, desc->qw3 = 0; } +/* PASID-selective IOTLB invalidation */ +static inline void qi_desc_piotlb_all(u16 did, u32 pasid, struct qi_desc *desc) +{ + desc->qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) | + QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE; + desc->qw1 = 0; +} + +/* Page-selective-within-PASID IOTLB invalidation */ static inline void qi_desc_piotlb(u16 did, u32 pasid, u64 addr, unsigned long npages, bool ih, struct qi_desc *desc) { - if (npages == -1) { - desc->qw0 = QI_EIOTLB_PASID(pasid) | - QI_EIOTLB_DID(did) | - QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | - QI_EIOTLB_TYPE; - desc->qw1 = 0; - } else { - int mask = ilog2(__roundup_pow_of_two(npages)); - unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask)); - - if (WARN_ON_ONCE(!IS_ALIGNED(addr, align))) - addr = ALIGN_DOWN(addr, align); - - desc->qw0 = QI_EIOTLB_PASID(pasid) | - QI_EIOTLB_DID(did) | - QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | - QI_EIOTLB_TYPE; - desc->qw1 = QI_EIOTLB_ADDR(addr) | - QI_EIOTLB_IH(ih) | - QI_EIOTLB_AM(mask); - } + int mask = ilog2(__roundup_pow_of_two(npages)); + unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask)); + + if (WARN_ON_ONCE(!IS_ALIGNED(addr, align))) + addr = ALIGN_DOWN(addr, align); + + desc->qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) | + QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE; + desc->qw1 = QI_EIOTLB_ADDR(addr) | QI_EIOTLB_IH(ih) | + QI_EIOTLB_AM(mask); } static inline void qi_desc_dev_iotlb_pasid(u16 sid, u16 pfsid, u32 pasid, @@ -1163,8 +1161,7 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, u16 qdep, u64 addr, unsigned mask); -void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, - unsigned long npages, bool ih); +void qi_flush_piotlb_all(struct intel_iommu *iommu, u16 did, u32 pasid); void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, u32 pasid, u16 qdep, u64 addr, diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 9d30015b89405..89541b74ab8ca 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -282,7 +282,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, pasid_cache_invalidation_with_pasid(iommu, did, pasid); if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY) - qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + qi_flush_piotlb_all(iommu, did, pasid); else iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); @@ -308,7 +308,7 @@ static void pasid_flush_caches(struct intel_iommu *iommu, if (cap_caching_mode(iommu->cap)) { pasid_cache_invalidation_with_pasid(iommu, did, pasid); - qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + qi_flush_piotlb_all(iommu, did, pasid); } else { iommu_flush_write_buffer(iommu); } @@ -342,7 +342,7 @@ static void intel_pasid_flush_present(struct intel_iommu *iommu, * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions */ pasid_cache_invalidation_with_pasid(iommu, did, pasid); - qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + qi_flush_piotlb_all(iommu, did, pasid); devtlb_invalidation_with_pasid(iommu, dev, pasid); } diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c index 1460b57db1299..586055e51bb25 100644 --- a/drivers/iommu/intel/prq.c +++ b/drivers/iommu/intel/prq.c @@ -113,7 +113,7 @@ prq_retry: qi_desc_dev_iotlb(sid, info->pfsid, info->ats_qdep, 0, MAX_AGAW_PFN_WIDTH, &desc[2]); } else { - qi_desc_piotlb(did, pasid, 0, -1, 0, &desc[1]); + qi_desc_piotlb_all(did, pasid, &desc[1]); qi_desc_dev_iotlb_pasid(sid, info->pfsid, pasid, info->ats_qdep, 0, MAX_AGAW_PFN_WIDTH, &desc[2]); }