]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu: Add a capability for flush queue support
authorRobin Murphy <robin.murphy@arm.com>
Thu, 4 May 2023 21:10:55 +0000 (22:10 +0100)
committerJoerg Roedel <jroedel@suse.de>
Mon, 22 May 2023 15:38:44 +0000 (17:38 +0200)
Passing a special type to domain_alloc to indirectly query whether flush
queues are a worthwhile optimisation with the given driver is a bit
clunky, and looking increasingly anachronistic. Let's put that into an
explicit capability instead.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Tested-by: Jerry Snitselaar <jsnitsel@redhat.com> # amd, intel, smmu-v3
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/f0086a93dbccb92622e1ace775846d81c1c4b174.1683233867.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd/iommu.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu/arm-smmu.c
drivers/iommu/intel/iommu.c
include/linux/iommu.h

index 4a314647d1f792e3ef74538baedb654b8e8a2f42..9b7bd6bed6648239c842a83ed96fbfeddbb6ef0c 100644 (file)
@@ -2293,6 +2293,8 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
                return amdr_ivrs_remap_support;
        case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
                return true;
+       case IOMMU_CAP_DEFERRED_FLUSH:
+               return true;
        default:
                break;
        }
index 3fd83fb75722713eaed891db9226ab76061eb898..6d65a7e81df46011a27c6b36eb757d55e7f347e2 100644 (file)
@@ -2008,6 +2008,7 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
                /* Assume that a coherent TCU implies coherent TBUs */
                return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
        case IOMMU_CAP_NOEXEC:
+       case IOMMU_CAP_DEFERRED_FLUSH:
                return true;
        default:
                return false;
index 6e0813b26fb6980234be6632ed99210203d08a12..7f4ee365912cf1350ad4a692991d327de7815fdd 100644 (file)
@@ -1325,6 +1325,7 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
                return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
                        device_get_dma_attr(dev) == DEV_DMA_COHERENT;
        case IOMMU_CAP_NOEXEC:
+       case IOMMU_CAP_DEFERRED_FLUSH:
                return true;
        default:
                return false;
index b871a6afd803211a419b63b2c19b628df70fa24c..ff923298f8edd2870cf85741e90d2c9459399921 100644 (file)
@@ -4369,6 +4369,7 @@ static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap)
 
        switch (cap) {
        case IOMMU_CAP_CACHE_COHERENCY:
+       case IOMMU_CAP_DEFERRED_FLUSH:
                return true;
        case IOMMU_CAP_PRE_BOOT_PROTECTION:
                return dmar_platform_optin();
index e8c9a7da1060969a0c9ab1f9311e223bf9df3c96..1b7180d6edae3584c8735ff479da1f0975be2729 100644 (file)
@@ -127,6 +127,11 @@ enum iommu_cap {
         * this device.
         */
        IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
+       /*
+        * IOMMU driver does not issue TLB maintenance during .unmap, so can
+        * usefully support the non-strict DMA flush queue.
+        */
+       IOMMU_CAP_DEFERRED_FLUSH,
 };
 
 /* These are the possible reserved region types */