]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
dma-mapping: Simplify arch_setup_dma_ops()
authorRobin Murphy <robin.murphy@arm.com>
Fri, 19 Apr 2024 16:54:46 +0000 (17:54 +0100)
committerJoerg Roedel <jroedel@suse.de>
Fri, 26 Apr 2024 10:07:28 +0000 (12:07 +0200)
The dma_base, size and iommu arguments are only used by ARM, and can
now easily be deduced from the device itself, so there's no need to pass
them through the callchain as well.

Acked-by: Rob Herring <robh@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Michael Kelley <mhklinux@outlook.com> # For Hyper-V
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Hanjun Guo <guohanjun@huawei.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/5291c2326eab405b1aa7693aa964e8d3cb7193de.1713523152.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
arch/arc/mm/dma.c
arch/arm/mm/dma-mapping-nommu.c
arch/arm/mm/dma-mapping.c
arch/arm64/mm/dma-mapping.c
arch/mips/mm/dma-noncoherent.c
arch/riscv/mm/dma-noncoherent.c
drivers/acpi/scan.c
drivers/hv/hv_common.c
drivers/of/device.c
include/linux/dma-map-ops.h

index 197707bc7658898843d78bee7c0f02a10f7d26b9..6b85e94f3275995bc2a6f8e8624f08198cf665f9 100644 (file)
@@ -90,8 +90,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 /*
  * Plug in direct dma map ops.
  */
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                       bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
 {
        /*
         * IOC hardware snoops all DMA traffic keeping the caches consistent
index b94850b579952aefacbd1710bc3c317b4c4b77c9..97db5397c320bd93a8a9148fdfd6f9255a5abfba 100644 (file)
@@ -33,8 +33,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
        }
 }
 
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                       bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
 {
        if (IS_ENABLED(CONFIG_CPU_V7M)) {
                /*
index f68db05eba29fdaebb7d7cb8d9ec071e4f5f2910..5adf1769eee4245c383883a1b62ebd2e96eabda3 100644 (file)
@@ -1709,11 +1709,15 @@ void arm_iommu_detach_device(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
 
-static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                                   bool coherent)
+static void arm_setup_iommu_dma_ops(struct device *dev)
 {
        struct dma_iommu_mapping *mapping;
+       u64 dma_base = 0, size = 1ULL << 32;
 
+       if (dev->dma_range_map) {
+               dma_base = dma_range_map_min(dev->dma_range_map);
+               size = dma_range_map_max(dev->dma_range_map) - dma_base;
+       }
        mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
        if (IS_ERR(mapping)) {
                pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
@@ -1744,8 +1748,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
 
 #else
 
-static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                                   bool coherent)
+static void arm_setup_iommu_dma_ops(struct device *dev)
 {
 }
 
@@ -1753,8 +1756,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
 
 #endif /* CONFIG_ARM_DMA_USE_IOMMU */
 
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                       bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
 {
        /*
         * Due to legacy code that sets the ->dma_coherent flag from a bus
@@ -1774,7 +1776,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                return;
 
        if (device_iommu_mapped(dev))
-               arm_setup_iommu_dma_ops(dev, dma_base, size, coherent);
+               arm_setup_iommu_dma_ops(dev);
 
        xen_setup_dma_ops(dev);
        dev->archdata.dma_ops_setup = true;
index 313d8938a2f03a2fa5e0eef9e0765ceaf2347dea..0b320a25a471e0ccf583c3d2a44823bf6cf25fe1 100644 (file)
@@ -46,8 +46,7 @@ void arch_teardown_dma_ops(struct device *dev)
 }
 #endif
 
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                       bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
 {
        int cls = cache_line_size_of_cpu();
 
index 0f3cec663a12cd51498157c390f974213cb5a658..ab4f2a75a7d0116c31628b4c18c7e5f9d754a28e 100644 (file)
@@ -137,8 +137,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 #endif
 
 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-               bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
 {
        dev->dma_coherent = coherent;
 }
index 843107f834b231a032c6853b5d58382ed6165a37..cb89d7e0ba88ad4cbc7d11614314814b34ef01cd 100644 (file)
@@ -128,8 +128,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
        ALT_CMO_OP(FLUSH, flush_addr, size, riscv_cbom_block_size);
 }
 
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                       bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
 {
        WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
                   TAINT_CPU_OUT_OF_SPEC,
index 7c157bf926956be5cabd6db7c708ff87759c7879..b1a88992c1a9385eb26118ad071bc5cb04f8a25c 100644 (file)
@@ -1675,12 +1675,7 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
        if (ret == -EPROBE_DEFER)
                return -EPROBE_DEFER;
 
-       /*
-        * Historically this routine doesn't fail driver probing due to errors
-        * in acpi_iommu_configure_id()
-        */
-
-       arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT);
+       arch_setup_dma_ops(dev, attr == DEV_DMA_COHERENT);
 
        return 0;
 }
index dde3f9b6871af9eb68f1cd8931651a0250943fe0..9c452bfbd5719d78bda1b3c47d0e41676b1deef6 100644 (file)
@@ -561,11 +561,7 @@ EXPORT_SYMBOL_GPL(hv_query_ext_cap);
 
 void hv_setup_dma_ops(struct device *dev, bool coherent)
 {
-       /*
-        * Hyper-V does not offer a vIOMMU in the guest
-        * VM, so pass 0/NULL for the IOMMU settings
-        */
-       arch_setup_dma_ops(dev, 0, 0, coherent);
+       arch_setup_dma_ops(dev, coherent);
 }
 EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
 
index 9e7963972fa7503ed7b343cebd4b9af3c4d3528a..312c6336121120f0c700e0e26b27006df7e8bee8 100644 (file)
@@ -95,7 +95,6 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
 {
        const struct bus_dma_region *map = NULL;
        struct device_node *bus_np;
-       u64 dma_start = 0;
        u64 mask, end = 0;
        bool coherent;
        int iommu_ret;
@@ -118,7 +117,6 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
                        return ret == -ENODEV ? 0 : ret;
        } else {
                /* Determine the overall bounds of all DMA regions */
-               dma_start = dma_range_map_min(map);
                end = dma_range_map_max(map);
        }
 
@@ -175,7 +173,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
        } else
                dev_dbg(dev, "device is behind an iommu\n");
 
-       arch_setup_dma_ops(dev, dma_start, end - dma_start + 1, coherent);
+       arch_setup_dma_ops(dev, coherent);
 
        if (iommu_ret)
                of_dma_set_restricted_buffer(dev, np);
index 4abc60f04209281bf8af6905c4ec3d3bb6b531b5..ed89e1ce0114de6589e5c39734f9d893c259b566 100644 (file)
@@ -426,11 +426,9 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
 #endif
 
 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-               bool coherent);
+void arch_setup_dma_ops(struct device *dev, bool coherent);
 #else
-static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
-               u64 size, bool coherent)
+static inline void arch_setup_dma_ops(struct device *dev, bool coherent)
 {
 }
 #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */