From 04b1b069f151e793767755f58b51670bff00cbc1 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Thu, 22 Jan 2026 09:48:53 +0800 Subject: [PATCH] iommu/vt-d: Flush piotlb for SVM and Nested domain Besides the paging domains that use FS, SVM and Nested domains need to use piotlb invalidation descriptor as well. Fixes: b33125296b50 ("iommu/vt-d: Create unique domain ops for each stage") Cc: stable@vger.kernel.org Signed-off-by: Yi Liu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20251223065824.6164-1-yi.l.liu@intel.com Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/intel/cache.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c index 265e7290256b5..385ae5cfb30d4 100644 --- a/drivers/iommu/intel/cache.c +++ b/drivers/iommu/intel/cache.c @@ -363,6 +363,13 @@ static void qi_batch_add_pasid_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qi_batch_increment_index(iommu, batch); } +static bool intel_domain_use_piotlb(struct dmar_domain *domain) +{ + return domain->domain.type == IOMMU_DOMAIN_SVA || + domain->domain.type == IOMMU_DOMAIN_NESTED || + intel_domain_is_fs_paging(domain); +} + static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *tag, unsigned long addr, unsigned long pages, unsigned long mask, int ih) @@ -370,7 +377,7 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag * struct intel_iommu *iommu = tag->iommu; u64 type = DMA_TLB_PSI_FLUSH; - if (intel_domain_is_fs_paging(domain)) { + if (intel_domain_use_piotlb(domain)) { qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, addr, pages, ih, domain->qi_batch); return; -- 2.47.3