Leverage ARCH_HAS_DMA_MAP_DIRECT config option for coherent allocations as
well. This will bypass DMA ops for memory allocations that have been
pre-mapped.
Always set device bus_dma_limit when memory is pre-mapped. In some
architectures, like PowerPC, pmemory can be converted to regular memory via
daxctl command. This will gate the coherent allocations to pre-mapped RAM
only, by dma_coherent_ok().
Signed-off-by: Gaurav Batra <gbatra@linux.ibm.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/20251107161105.85999-1-gbatra@linux.ibm.com
return true;
}
+bool arch_dma_alloc_direct(struct device *dev)
+{
+ if (dev->dma_ops_bypass)
+ return true;
+
+ return false;
+}
+
+bool arch_dma_free_direct(struct device *dev, dma_addr_t dma_handle)
+{
+ if (!dev->dma_ops_bypass)
+ return false;
+
+ return is_direct_handle(dev, dma_handle);
+}
#endif /* CONFIG_ARCH_HAS_DMA_MAP_DIRECT */
/*
if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
/*
- * dma_iommu_bypass_supported() sets dma_max when there is
- * 1:1 mapping but it is somehow limited.
- * ibm,pmemory is one example.
+ * fixed ops will be used for RAM. This is limited by
+ * bus_dma_limit which is set when RAM is pre-mapped.
*/
- dev->dma_ops_bypass = dev->bus_dma_limit == 0;
- if (!dev->dma_ops_bypass)
- dev_warn(dev,
- "iommu: 64-bit OK but direct DMA is limited by %llx\n",
- dev->bus_dma_limit);
- else
- dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
+ dev->dma_ops_bypass = true;
+ dev_info(dev, "iommu: 64-bit OK but direct DMA is limited by %llx\n",
+ dev->bus_dma_limit);
return 1;
}
out_unlock:
mutex_unlock(&dma_win_init_mutex);
- /* If we have persistent memory and the window size is not big enough
- * to directly map both RAM and vPMEM, then we need to set DMA limit.
- */
- if (pmem_present && direct_mapping && len != MAX_PHYSMEM_BITS)
+ /* For pre-mapped memory, set bus_dma_limit to the max RAM */
+ if (direct_mapping)
dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset +
(1ULL << max_ram_len);
int nents);
bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
int nents);
+bool arch_dma_alloc_direct(struct device *dev);
+bool arch_dma_free_direct(struct device *dev, dma_addr_t dma_handle);
#else
#define arch_dma_map_phys_direct(d, a) (false)
#define arch_dma_unmap_phys_direct(d, a) (false)
#define arch_dma_map_sg_direct(d, s, n) (false)
#define arch_dma_unmap_sg_direct(d, s, n) (false)
+#define arch_dma_alloc_direct(d) (false)
+#define arch_dma_free_direct(d, a) (false)
#endif
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
/* let the implementation decide on the zone to allocate from: */
flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
- if (dma_alloc_direct(dev, ops)) {
+ if (dma_alloc_direct(dev, ops) || arch_dma_alloc_direct(dev)) {
cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
} else if (use_dma_iommu(dev)) {
cpu_addr = iommu_dma_alloc(dev, size, dma_handle, flag, attrs);
return;
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- if (dma_alloc_direct(dev, ops))
+ if (dma_alloc_direct(dev, ops) || arch_dma_free_direct(dev, dma_handle))
dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
else if (use_dma_iommu(dev))
iommu_dma_free(dev, size, cpu_addr, dma_handle, attrs);