]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu/dma: Always allow DMA-FQ when iommupt provides the iommu_domain
authorJason Gunthorpe <jgg@nvidia.com>
Thu, 26 Mar 2026 19:30:32 +0000 (16:30 -0300)
committerJoerg Roedel <joerg.roedel@amd.com>
Wed, 1 Apr 2026 07:50:20 +0000 (09:50 +0200)
iommupt always supports the semantics required for DMA-FQ, when drivers
are converted to use it they automatically get support.

Detect iommpt directly instead of using IOMMU_CAP_DEFERRED_FLUSH and
remove IOMMU_CAP_DEFERRED_FLUSH from converted drivers.

This will also enable DMA-FQ on RISC-V.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
drivers/iommu/amd/iommu.c
drivers/iommu/dma-iommu.c
drivers/iommu/intel/iommu.c

index f1814fee518239b691918076c01042e211ada30f..2e553e2051aa0e2114dacace90980d9112464470 100644 (file)
@@ -2978,8 +2978,6 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
                return amdr_ivrs_remap_support;
        case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
                return true;
-       case IOMMU_CAP_DEFERRED_FLUSH:
-               return true;
        case IOMMU_CAP_DIRTY_TRACKING: {
                struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
 
index 5dac64be61bb272a2b93f12f3e6d287b0164ed53..fbed93f8bf0adb0c73b5bb5e961ce04226fe6031 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/device.h>
 #include <linux/dma-direct.h>
 #include <linux/dma-map-ops.h>
+#include <linux/generic_pt/iommu.h>
 #include <linux/gfp.h>
 #include <linux/huge_mm.h>
 #include <linux/iommu.h>
@@ -648,6 +649,15 @@ static void iommu_dma_init_options(struct iommu_dma_options *options,
        }
 }
 
+static bool iommu_domain_supports_fq(struct device *dev,
+                                    struct iommu_domain *domain)
+{
+       /* iommupt always supports DMA-FQ */
+       if (iommupt_from_domain(domain))
+               return true;
+       return device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH);
+}
+
 /**
  * iommu_dma_init_domain - Initialise a DMA mapping domain
  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -706,7 +716,8 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
 
        /* If the FQ fails we can simply fall back to strict mode */
        if (domain->type == IOMMU_DOMAIN_DMA_FQ &&
-           (!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
+           (!iommu_domain_supports_fq(dev, domain) ||
+            iommu_dma_init_fq(domain)))
                domain->type = IOMMU_DOMAIN_DMA;
 
        return iova_reserve_iommu_regions(dev, domain);
index 5dca8e525c73c890e8acc1882118bc06279363c7..80b183e207e59b400e97768ba01c613fccd6b9cd 100644 (file)
@@ -3212,8 +3212,6 @@ static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap)
 
        switch (cap) {
        case IOMMU_CAP_CACHE_COHERENCY:
-       case IOMMU_CAP_DEFERRED_FLUSH:
-               return true;
        case IOMMU_CAP_PRE_BOOT_PROTECTION:
                return dmar_platform_optin();
        case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: