}
/* Map a single area into the IOMMU */
-static dma_addr_t gart_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
+static dma_addr_t gart_map_phys(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
unsigned long bus;
- phys_addr_t paddr = page_to_phys(page) + offset;
+
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ return DMA_MAPPING_ERROR;
if (!need_iommu(dev, paddr, size))
return paddr;
/*
* Free a DMA mapping.
*/
-static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
+static void gart_unmap_phys(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
for_each_sg(sg, s, nents, i) {
if (!s->dma_length || !s->length)
break;
- gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
+ gart_unmap_phys(dev, s->dma_address, s->dma_length, dir, 0);
}
}
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr, unsigned long attrs)
{
- gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
+ gart_unmap_phys(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
dma_direct_free(dev, size, vaddr, dma_addr, attrs);
}
static const struct dma_map_ops gart_dma_ops = {
.map_sg = gart_map_sg,
.unmap_sg = gart_unmap_sg,
- .map_page = gart_map_page,
- .unmap_page = gart_unmap_page,
+ .map_phys = gart_map_phys,
+ .unmap_phys = gart_unmap_phys,
.alloc = gart_alloc_coherent,
.free = gart_free_coherent,
.mmap = dma_common_mmap,