kfree(buf);
}
-static void dma_cache_maint_page(struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction dir,
+static void dma_cache_maint_page(phys_addr_t phys, size_t size,
+ enum dma_data_direction dir,
void (*op)(const void *, size_t, int))
{
- unsigned long pfn;
+ unsigned long offset = offset_in_page(phys);
+ unsigned long pfn = __phys_to_pfn(phys);
size_t left = size;
- pfn = page_to_pfn(page) + offset / PAGE_SIZE;
- offset %= PAGE_SIZE;
-
/*
* A single sg entry may refer to multiple physically contiguous
* pages. But we still need to process highmem pages individually.
size_t len = left;
void *vaddr;
- page = pfn_to_page(pfn);
-
- if (PageHighMem(page)) {
+ phys = __pfn_to_phys(pfn);
+ if (PhysHighMem(phys)) {
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
if (cache_is_vipt_nonaliasing()) {
- vaddr = kmap_atomic(page);
+ vaddr = kmap_atomic_pfn(pfn);
op(vaddr + offset, len, dir);
kunmap_atomic(vaddr);
} else {
+ struct page *page = phys_to_page(phys);
+
vaddr = kmap_high_get(page);
if (vaddr) {
op(vaddr + offset, len, dir);
}
}
} else {
- vaddr = page_address(page) + offset;
+ phys += offset;
+ vaddr = phys_to_virt(phys);
op(vaddr, len, dir);
}
offset = 0;
* Note: Drivers should NOT use this function directly.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
-static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- phys_addr_t paddr;
-
- dma_cache_maint_page(page, off, size, dir, dmac_map_area);
+ dma_cache_maint_page(paddr, size, dir, dmac_map_area);
- paddr = page_to_phys(page) + off;
if (dir == DMA_FROM_DEVICE) {
outer_inv_range(paddr, paddr + size);
} else {
/* FIXME: non-speculating: flush on bidirectional mappings? */
}
-static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- phys_addr_t paddr = page_to_phys(page) + off;
-
/* FIXME: non-speculating: not required */
/* in any case, don't bother invalidating if DMA to device */
if (dir != DMA_TO_DEVICE) {
outer_inv_range(paddr, paddr + size);
- dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
+ dma_cache_maint_page(paddr, size, dir, dmac_unmap_area);
}
/*
unsigned int len = PAGE_ALIGN(s->offset + s->length);
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+ arch_sync_dma_for_device(sg_phys(s), s->length, dir);
prot = __dma_info_to_prot(dir, attrs);
__iommu_remove_mapping(dev, sg_dma_address(s),
sg_dma_len(s));
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_page_dev_to_cpu(sg_page(s), s->offset,
- s->length, dir);
+ arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
}
}
return;
for_each_sg(sg, s, nents, i)
- __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+ arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
}
return;
for_each_sg(sg, s, nents, i)
- __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+ arch_sync_dma_for_device(sg_phys(s), s->length, dir);
}
/**
int ret, prot, len = PAGE_ALIGN(size + offset);
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_page_cpu_to_dev(page, offset, size, dir);
+ arch_sync_dma_for_device(page_to_phys(page), offset, size, dir);
dma_addr = __alloc_iova(mapping, len);
if (dma_addr == DMA_MAPPING_ERROR)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
- struct page *page;
int offset = handle & ~PAGE_MASK;
int len = PAGE_ALIGN(size + offset);
return;
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
- page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
- __dma_page_dev_to_cpu(page, offset, size, dir);
+ phys_addr_t phys = iommu_iova_to_phys(mapping->domain, iova);
+
+ arch_sync_dma_for_cpu(phys + offset, size, dir);
}
iommu_unmap(mapping->domain, iova, len);
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
- struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
+ phys_addr_t phys;
if (dev->dma_coherent || !iova)
return;
- page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
- __dma_page_dev_to_cpu(page, offset, size, dir);
+ phys = iommu_iova_to_phys(mapping->domain, iova);
+ arch_sync_dma_for_cpu(phys + offset, size, dir);
}
static void arm_iommu_sync_single_for_device(struct device *dev,
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
- struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
+ phys_addr_t phys;
if (dev->dma_coherent || !iova)
return;
- page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
- __dma_page_cpu_to_dev(page, offset, size, dir);
+ phys = iommu_iova_to_phys(mapping->domain, iova);
+ arch_sync_dma_for_device(phys + offset, size, dir);
}
static const struct dma_map_ops iommu_ops = {
set_dma_ops(dev, NULL);
}
-void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
-{
- __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
- size, dir);
-}
-
-void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
-{
- __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
- size, dir);
-}
-
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{