]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu/dma: implement DMA_ATTR_MMIO for dma_iova_link().
authorLeon Romanovsky <leonro@nvidia.com>
Tue, 9 Sep 2025 13:27:30 +0000 (16:27 +0300)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Thu, 11 Sep 2025 22:08:07 +0000 (00:08 +0200)
This will replace the hacky use of DMA_ATTR_SKIP_CPU_SYNC to avoid
touching the possibly non-KVA MMIO memory.

Also correct the incorrect caching attribute for the IOMMU, MMIO
memory should not be cachable inside the IOMMU mapping or it can
possibly create system problems. Set IOMMU_MMIO for DMA_ATTR_MMIO.

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/17ba63991aeaf8a80d5aca9ba5d028f1daa58f62.1757423202.git.leonro@nvidia.com
drivers/iommu/dma-iommu.c

index ea2ef53bd4fef0d78403cf81b6b4a777884e8a49..e1185ba73e23afd30bc7d4c77e31536fbd49dbfd 100644 (file)
@@ -724,7 +724,12 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
                     unsigned long attrs)
 {
-       int prot = coherent ? IOMMU_CACHE : 0;
+       int prot;
+
+       if (attrs & DMA_ATTR_MMIO)
+               prot = IOMMU_MMIO;
+       else
+               prot = coherent ? IOMMU_CACHE : 0;
 
        if (attrs & DMA_ATTR_PRIVILEGED)
                prot |= IOMMU_PRIV;
@@ -1838,12 +1843,13 @@ static int __dma_iova_link(struct device *dev, dma_addr_t addr,
                unsigned long attrs)
 {
        bool coherent = dev_is_dma_coherent(dev);
+       int prot = dma_info_to_prot(dir, coherent, attrs);
 
-       if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+       if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
                arch_sync_dma_for_device(phys, size, dir);
 
        return iommu_map_nosync(iommu_get_dma_domain(dev), addr, phys, size,
-                       dma_info_to_prot(dir, coherent, attrs), GFP_ATOMIC);
+                       prot, GFP_ATOMIC);
 }
 
 static int iommu_dma_iova_bounce_and_link(struct device *dev, dma_addr_t addr,
@@ -1949,9 +1955,13 @@ int dma_iova_link(struct device *dev, struct dma_iova_state *state,
                return -EIO;
 
        if (dev_use_swiotlb(dev, size, dir) &&
-           iova_unaligned(iovad, phys, size))
+           iova_unaligned(iovad, phys, size)) {
+               if (attrs & DMA_ATTR_MMIO)
+                       return -EPERM;
+
                return iommu_dma_iova_link_swiotlb(dev, state, phys, offset,
                                size, dir, attrs);
+       }
 
        return __dma_iova_link(dev, state->addr + offset - iova_start_pad,
                        phys - iova_start_pad,