void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
- dma_addr_t (*map_page)(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs);
- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-
dma_addr_t (*map_phys)(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir,
unsigned long attrs);
addr = iommu_dma_map_phys(dev, phys, size, dir, attrs);
else if (ops->map_phys)
addr = ops->map_phys(dev, phys, size, dir, attrs);
- else if (!is_mmio && ops->map_page) {
- struct page *page = phys_to_page(phys);
- size_t offset = offset_in_page(phys);
-
- /*
- * The dma_ops API contract for ops->map_page() requires
- * kmappable memory.
- */
- addr = ops->map_page(dev, page, offset, size, dir, attrs);
- }
if (!is_mmio)
kmsan_handle_dma(phys, size, dir);
iommu_dma_unmap_phys(dev, addr, size, dir, attrs);
else if (ops->unmap_phys)
ops->unmap_phys(dev, addr, size, dir, attrs);
- else
- ops->unmap_page(dev, addr, size, dir, attrs);
trace_dma_unmap_phys(dev, addr, size, dir, attrs);
debug_dma_unmap_phys(dev, addr, size, dir);
}
if (use_dma_iommu(dev))
*dma_handle = iommu_dma_map_phys(dev, phys, size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
- else if (ops->map_phys)
- *dma_handle = ops->map_phys(dev, phys, size, dir,
- DMA_ATTR_SKIP_CPU_SYNC);
else
- *dma_handle = ops->map_page(dev, page, 0, size, dir,
+ *dma_handle = ops->map_phys(dev, phys, size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
if (*dma_handle == DMA_MAPPING_ERROR) {
dma_free_contiguous(dev, page, size);
else if (ops->unmap_phys)
ops->unmap_phys(dev, dma_handle, size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
- else if (ops->unmap_page)
- ops->unmap_page(dev, dma_handle, size, dir,
- DMA_ATTR_SKIP_CPU_SYNC);
dma_free_contiguous(dev, page, size);
}