]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
iommu/dma: Resurrect the "forcedac" option
authorRobin Murphy <robin.murphy@arm.com>
Fri, 5 Mar 2021 16:32:34 +0000 (16:32 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 14 May 2021 08:50:00 +0000 (10:50 +0200)
[ Upstream commit 3542dcb15cef66c0b9e6c3b33168eb657e0d9520 ]

In converting intel-iommu over to the common IOMMU DMA ops, it quietly
lost the functionality of its "forcedac" option. Since this is a handy
thing both for testing and for performance optimisation on certain
platforms, reimplement it under the common IOMMU parameter namespace.

For the sake of fixing the inadvertent breakage of the Intel-specific
parameter, remove the dmar_forcedac remnants and hook it up as an alias
while documenting the transition to the new common parameter.

Fixes: c588072bba6b ("iommu/vt-d: Convert intel iommu driver to the iommu ops")
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: John Garry <john.garry@huawei.com>
Link: https://lore.kernel.org/r/7eece8e0ea7bfbe2cd0e30789e0d46df573af9b0.1614961776.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Documentation/admin-guide/kernel-parameters.txt
drivers/iommu/dma-iommu.c
drivers/iommu/intel/iommu.c
include/linux/dma-iommu.h

index a10b545c2070a54455c3e7181cc43135dc68f611..b537a960889582dadd7d5cd3412a5252d8ddfa6e 100644 (file)
                        bypassed by not enabling DMAR with this option. In
                        this case, gfx device will use physical address for
                        DMA.
-               forcedac [X86-64]
-                       With this option iommu will not optimize to look
-                       for io virtual address below 32-bit forcing dual
-                       address cycle on pci bus for cards supporting greater
-                       than 32-bit addressing. The default is to look
-                       for translation below 32-bit and if not available
-                       then look in the higher range.
                strict [Default Off]
                        With this option on every unmap_single operation will
                        result in a hardware IOTLB flush operation as opposed
                nobypass        [PPC/POWERNV]
                        Disable IOMMU bypass, using IOMMU for PCI devices.
 
+       iommu.forcedac= [ARM64, X86] Control IOVA allocation for PCI devices.
+                       Format: { "0" | "1" }
+                       0 - Try to allocate a 32-bit DMA address first, before
+                         falling back to the full range if needed.
+                       1 - Allocate directly from the full usable range,
+                         forcing Dual Address Cycle for PCI cards supporting
+                         greater than 32-bit addressing.
+
        iommu.strict=   [ARM64] Configure TLB invalidation behaviour
                        Format: { "0" | "1" }
                        0 - Lazy mode.
index 07e7b2f3ba27f65aa4621efbc8eaa05c40394f38..9d4a29796fe4671bc53cef196dedfeaff03eecb1 100644 (file)
@@ -52,6 +52,17 @@ struct iommu_dma_cookie {
 };
 
 static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
+bool iommu_dma_forcedac __read_mostly;
+
+static int __init iommu_dma_forcedac_setup(char *str)
+{
+       int ret = kstrtobool(str, &iommu_dma_forcedac);
+
+       if (!ret && iommu_dma_forcedac)
+               pr_info("Forcing DAC for PCI devices\n");
+       return ret;
+}
+early_param("iommu.forcedac", iommu_dma_forcedac_setup);
 
 void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
                struct iommu_domain *domain)
@@ -456,7 +467,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
                dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
 
        /* Try to get PCI devices a SAC address */
-       if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
+       if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
                iova = alloc_iova_fast(iovad, iova_len,
                                       DMA_BIT_MASK(32) >> shift, false);
 
index e49a79322c53fe2efb3497b884ae400d325efbf4..005daf50107df4ba4ea2fb5fe73feccc0692032c 100644 (file)
@@ -350,7 +350,6 @@ int intel_iommu_enabled = 0;
 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
 
 static int dmar_map_gfx = 1;
-static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
 static int iommu_identity_mapping;
@@ -441,8 +440,8 @@ static int __init intel_iommu_setup(char *str)
                        dmar_map_gfx = 0;
                        pr_info("Disable GFX device mapping\n");
                } else if (!strncmp(str, "forcedac", 8)) {
-                       pr_info("Forcing DAC for PCI devices\n");
-                       dmar_forcedac = 1;
+                       pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
+                       iommu_dma_forcedac = true;
                } else if (!strncmp(str, "strict", 6)) {
                        pr_info("Disable batched IOTLB flush\n");
                        intel_iommu_strict = 1;
index 706b68d1359beda8d0bebae3eb718e2bf418c26a..13d1f4c14d7ba98e90f3999cc8622fe9ad0356d0 100644 (file)
@@ -40,6 +40,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
 void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
                struct iommu_domain *domain);
 
+extern bool iommu_dma_forcedac;
+
 #else /* CONFIG_IOMMU_DMA */
 
 struct iommu_domain;