]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
dma-mapping: Introduce DMA require coherency attribute
authorLeon Romanovsky <leonro@nvidia.com>
Mon, 16 Mar 2026 19:06:48 +0000 (21:06 +0200)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Fri, 20 Mar 2026 11:05:36 +0000 (12:05 +0100)
The mapping buffers which carry this attribute require DMA coherent system.
This means that they can't take SWIOTLB path, can perform CPU cache overlap
and doesn't perform cache flushing.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20260316-dma-debug-overlap-v3-4-1dde90a7f08b@nvidia.com
Documentation/core-api/dma-attributes.rst
include/linux/dma-mapping.h
include/trace/events/dma.h
kernel/dma/debug.c
kernel/dma/mapping.c

index 48cfe86cc06d75e3bb1feeabf5a6c0c3420351f7..123c8468d58f21af00cd691095ce03a270e64db8 100644 (file)
@@ -163,3 +163,19 @@ data corruption.
 
 All mappings that share a cache line must set this attribute to suppress DMA
 debug warnings about overlapping mappings.
+
+DMA_ATTR_REQUIRE_COHERENT
+-------------------------
+
+DMA mapping requests with the DMA_ATTR_REQUIRE_COHERENT fail on any
+system where SWIOTLB or cache management is required. This should only
+be used to support uAPI designs that require continuous HW DMA
+coherence with userspace processes, for example RDMA and DRM. At a
+minimum the memory being mapped must be userspace memory from
+pin_user_pages() or similar.
+
+Drivers should consider using dma_mmap_pages() instead of this
+interface when building their uAPIs, when possible.
+
+It must never be used in an in-kernel driver that only works with
+kernel memory.
index da44394b3a1a747846fe28e5cf188f96dca1f645..482b919f040f7019a696160502fde91787c3747b 100644 (file)
  */
 #define DMA_ATTR_DEBUGGING_IGNORE_CACHELINES   (1UL << 11)
 
+/*
+ * DMA_ATTR_REQUIRE_COHERENT: Indicates that DMA coherency is required.
+ * All mappings that carry this attribute can't work with SWIOTLB and cache
+ * flushing.
+ */
+#define DMA_ATTR_REQUIRE_COHERENT      (1UL << 12)
+
 /*
  * A dma_addr_t can hold any valid DMA or bus address for the platform.  It can
  * be given to a device to use as a DMA source or target.  It is specific to a
index 8c64bc0721fe4d81a131d620347cc1524e348515..63597b004424714793e81772262a27ae0a12ea68 100644 (file)
@@ -33,7 +33,8 @@ TRACE_DEFINE_ENUM(DMA_NONE);
                { DMA_ATTR_NO_WARN, "NO_WARN" }, \
                { DMA_ATTR_PRIVILEGED, "PRIVILEGED" }, \
                { DMA_ATTR_MMIO, "MMIO" }, \
-               { DMA_ATTR_DEBUGGING_IGNORE_CACHELINES, "CACHELINES_OVERLAP" })
+               { DMA_ATTR_DEBUGGING_IGNORE_CACHELINES, "CACHELINES_OVERLAP" }, \
+               { DMA_ATTR_REQUIRE_COHERENT, "REQUIRE_COHERENT" })
 
 DECLARE_EVENT_CLASS(dma_map,
        TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
index 83e1cfe05f08d73e68b7ecf6b3d37b0c9e4055a2..0677918f06a80ccea832fe27cc4fe5cce8eff620 100644 (file)
@@ -601,7 +601,8 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
        unsigned long flags;
        int rc;
 
-       entry->is_cache_clean = attrs & DMA_ATTR_DEBUGGING_IGNORE_CACHELINES;
+       entry->is_cache_clean = attrs & (DMA_ATTR_DEBUGGING_IGNORE_CACHELINES |
+                                        DMA_ATTR_REQUIRE_COHERENT);
 
        bucket = get_hash_bucket(entry, &flags);
        hash_bucket_add(bucket, entry);
index 3928a509c44c285e7b45874d876e6e56809c212b..6d3dd0bd3a88627f8b5925baa35b0845b5cb5a81 100644 (file)
@@ -164,6 +164,9 @@ dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
        if (WARN_ON_ONCE(!dev->dma_mask))
                return DMA_MAPPING_ERROR;
 
+       if (!dev_is_dma_coherent(dev) && (attrs & DMA_ATTR_REQUIRE_COHERENT))
+               return DMA_MAPPING_ERROR;
+
        if (dma_map_direct(dev, ops) ||
            (!is_mmio && arch_dma_map_phys_direct(dev, phys + size)))
                addr = dma_direct_map_phys(dev, phys, size, dir, attrs);
@@ -235,6 +238,9 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 
        BUG_ON(!valid_dma_direction(dir));
 
+       if (!dev_is_dma_coherent(dev) && (attrs & DMA_ATTR_REQUIRE_COHERENT))
+               return -EOPNOTSUPP;
+
        if (WARN_ON_ONCE(!dev->dma_mask))
                return 0;