select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select ARCH_HAS_BATCHED_DMA_SYNC
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_ZONE_DMA_SET if EXPERT
#define dma_get_cache_alignment cache_line_size
+static inline void arch_sync_dma_flush(void)
+{
+ dsb(sy);
+}
+
/* Compress a u64 MPIDR value into 32 bits. */
static inline u64 arch_compact_of_hwid(u64 id)
{
{
unsigned long start = (unsigned long)phys_to_virt(paddr);
- dcache_clean_poc(start, start + size);
+ dcache_clean_poc_nosync(start, start + size);
}
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
if (dir == DMA_TO_DEVICE)
return;
- dcache_inval_poc(start, start + size);
+ dcache_inval_poc_nosync(start, start + size);
}
void arch_dma_prep_coherent(struct page *page, size_t size)
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
- if (!dev_is_dma_coherent(dev))
+ if (!dev_is_dma_coherent(dev)) {
arch_sync_dma_for_cpu(phys, size, dir);
+ arch_sync_dma_flush();
+ }
swiotlb_sync_single_for_cpu(dev, phys, size, dir);
}
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
swiotlb_sync_single_for_device(dev, phys, size, dir);
- if (!dev_is_dma_coherent(dev))
+ if (!dev_is_dma_coherent(dev)) {
arch_sync_dma_for_device(phys, size, dir);
+ arch_sync_dma_flush();
+ }
}
void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
struct scatterlist *sg;
int i;
- if (sg_dma_is_swiotlb(sgl))
+ if (sg_dma_is_swiotlb(sgl)) {
for_each_sg(sgl, sg, nelems, i)
iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
sg->length, dir);
- else if (!dev_is_dma_coherent(dev))
+ } else if (!dev_is_dma_coherent(dev)) {
for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
+ arch_sync_dma_flush();
+ }
}
void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
struct scatterlist *sg;
int i;
- if (sg_dma_is_swiotlb(sgl))
+ if (sg_dma_is_swiotlb(sgl)) {
for_each_sg(sgl, sg, nelems, i)
iommu_dma_sync_single_for_device(dev,
sg_dma_address(sg),
sg->length, dir);
- else if (!dev_is_dma_coherent(dev))
+ } else if (!dev_is_dma_coherent(dev)) {
for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
+ arch_sync_dma_flush();
+ }
}
static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
return DMA_MAPPING_ERROR;
}
- if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
+ if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
arch_sync_dma_for_device(phys, size, dir);
+ arch_sync_dma_flush();
+ }
iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
if (iova == DMA_MAPPING_ERROR && !(attrs & DMA_ATTR_MMIO))
if (WARN_ON(!phys))
return;
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) {
arch_sync_dma_for_cpu(phys, size, dir);
+ arch_sync_dma_flush();
+ }
__iommu_dma_unmap(dev, dma_handle, size);
dma_addr_t addr = state->addr + offset;
size_t iova_start_pad = iova_offset(iovad, addr);
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_flush();
return iommu_sync_map(domain, addr - iova_start_pad,
iova_align(iovad, size + iova_start_pad));
}
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t iova_start_pad = iova_offset(iovad, addr);
+ bool need_sync_dma = !dev_is_dma_coherent(dev) &&
+ !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO));
dma_addr_t end = addr + size;
do {
addr += len;
iova_start_pad = 0;
} while (addr < end);
+
+ if (need_sync_dma)
+ arch_sync_dma_flush();
}
static void __iommu_dma_iova_unlink(struct device *dev,
done:
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
- if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
+ if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr)))) {
arch_sync_dma_for_device(phys, size, dir);
- else
+ arch_sync_dma_flush();
+ } else {
xen_dma_sync_for_device(dev, dev_addr, size, dir);
+ }
}
return dev_addr;
}
BUG_ON(dir == DMA_NONE);
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
- if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
+ if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr)))) {
arch_sync_dma_for_cpu(paddr, size, dir);
- else
+ arch_sync_dma_flush();
+ } else {
xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
+ }
}
/* NOTE: We use dev_addr here, not paddr! */
struct io_tlb_pool *pool;
if (!dev_is_dma_coherent(dev)) {
- if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
+ if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) {
arch_sync_dma_for_cpu(paddr, size, dir);
- else
+ arch_sync_dma_flush();
+ } else {
xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
+ }
}
pool = xen_swiotlb_find_pool(dev, dma_addr);
__swiotlb_sync_single_for_device(dev, paddr, size, dir, pool);
if (!dev_is_dma_coherent(dev)) {
- if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
+ if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) {
arch_sync_dma_for_device(paddr, size, dir);
- else
+ arch_sync_dma_flush();
+ } else {
xen_dma_sync_for_device(dev, dma_addr, size, dir);
+ }
}
}
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
+#ifndef CONFIG_ARCH_HAS_BATCHED_DMA_SYNC
+static inline void arch_sync_dma_flush(void)
+{
+}
+#endif
+
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
void arch_sync_dma_for_cpu_all(void);
#else
config ARCH_HAS_FORCE_DMA_UNENCRYPTED
bool
+config ARCH_HAS_BATCHED_DMA_SYNC
+ bool
+
#
# Select this option if the architecture assumes DMA devices are coherent
# by default.
arch_sync_dma_for_device(paddr, sg->length,
dir);
}
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_flush();
}
#endif
swiotlb_sync_single_for_cpu(dev, paddr, sg->length, dir);
}
- if (!dev_is_dma_coherent(dev))
+ if (!dev_is_dma_coherent(dev)) {
+ arch_sync_dma_flush();
arch_sync_dma_for_cpu_all();
+ }
}
/*
swiotlb_sync_single_for_device(dev, paddr, size, dir);
- if (!dev_is_dma_coherent(dev))
+ if (!dev_is_dma_coherent(dev)) {
arch_sync_dma_for_device(paddr, size, dir);
+ arch_sync_dma_flush();
+ }
}
static inline void dma_direct_sync_single_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev)) {
arch_sync_dma_for_cpu(paddr, size, dir);
+ arch_sync_dma_flush();
arch_sync_dma_for_cpu_all();
}
}
if (!dev_is_dma_coherent(dev) &&
- !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
+ !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
arch_sync_dma_for_device(phys, size, dir);
+ arch_sync_dma_flush();
+ }
return dma_addr;
err_overflow:
if (orig_addr == INVALID_PHYS_ADDR)
return;
+ if (dir == DMA_FROM_DEVICE && !dev_is_dma_coherent(dev))
+ arch_sync_dma_flush();
+
/*
* It's valid for tlb_offset to be negative. This can happen when the
* "offset" returned by swiotlb_align_offset() is non-zero, and the
return DMA_MAPPING_ERROR;
}
- if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
arch_sync_dma_for_device(swiotlb_addr, size, dir);
+ arch_sync_dma_flush();
+ }
return dma_addr;
}