]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/hmm: properly take MMIO path
authorLeon Romanovsky <leonro@nvidia.com>
Tue, 9 Sep 2025 13:27:41 +0000 (16:27 +0300)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Thu, 11 Sep 2025 22:18:21 +0000 (00:18 +0200)
In case peer-to-peer transaction traverses through host bridge,
the IOMMU needs to have IOMMU_MMIO flag, together with skip of
CPU sync.

The latter was handled by provided DMA_ATTR_SKIP_CPU_SYNC flag,
but IOMMU flag was missed, due to assumption that such memory
can be treated as regular one.

Reuse newly introduced DMA attribute to properly take MMIO path.

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/998251caf3f9d1a3f6f8205f1f494c707fb4d8fa.1757423202.git.leonro@nvidia.com
mm/hmm.c

index 015ab243f08136e44bc0b67c91789166c301d251..6556c0e074ba80ef07f62cdcd5eece800c7f3183 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -746,7 +746,7 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
        case PCI_P2PDMA_MAP_NONE:
                break;
        case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
-               attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+               attrs |= DMA_ATTR_MMIO;
                pfns[idx] |= HMM_PFN_P2PDMA;
                break;
        case PCI_P2PDMA_MAP_BUS_ADDR:
@@ -776,7 +776,7 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
                        goto error;
 
                dma_addr = dma_map_phys(dev, paddr, map->dma_entry_size,
-                                       DMA_BIDIRECTIONAL, 0);
+                                       DMA_BIDIRECTIONAL, attrs);
                if (dma_mapping_error(dev, dma_addr))
                        goto error;
 
@@ -811,16 +811,17 @@ bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx)
        if ((pfns[idx] & valid_dma) != valid_dma)
                return false;
 
+       if (pfns[idx] & HMM_PFN_P2PDMA)
+               attrs |= DMA_ATTR_MMIO;
+
        if (pfns[idx] & HMM_PFN_P2PDMA_BUS)
                ; /* no need to unmap bus address P2P mappings */
-       else if (dma_use_iova(state)) {
-               if (pfns[idx] & HMM_PFN_P2PDMA)
-                       attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+       else if (dma_use_iova(state))
                dma_iova_unlink(dev, state, idx * map->dma_entry_size,
                                map->dma_entry_size, DMA_BIDIRECTIONAL, attrs);
-       else if (dma_need_unmap(dev))
+       else if (dma_need_unmap(dev))
                dma_unmap_phys(dev, dma_addrs[idx], map->dma_entry_size,
-                              DMA_BIDIRECTIONAL, 0);
+                              DMA_BIDIRECTIONAL, attrs);
 
        pfns[idx] &=
                ~(HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA | HMM_PFN_P2PDMA_BUS);