]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/arm/mm/dma-mapping.c
Merge tag 'usb-5.3-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
[thirdparty/linux.git] / arch / arm / mm / dma-mapping.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm/mm/dma-mapping.c
4 *
5 * Copyright (C) 2000-2004 Russell King
6 *
7 * DMA uncached mapping support.
8 */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/genalloc.h>
12 #include <linux/gfp.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/dma-noncoherent.h>
19 #include <linux/dma-contiguous.h>
20 #include <linux/highmem.h>
21 #include <linux/memblock.h>
22 #include <linux/slab.h>
23 #include <linux/iommu.h>
24 #include <linux/io.h>
25 #include <linux/vmalloc.h>
26 #include <linux/sizes.h>
27 #include <linux/cma.h>
28
29 #include <asm/memory.h>
30 #include <asm/highmem.h>
31 #include <asm/cacheflush.h>
32 #include <asm/tlbflush.h>
33 #include <asm/mach/arch.h>
34 #include <asm/dma-iommu.h>
35 #include <asm/mach/map.h>
36 #include <asm/system_info.h>
37 #include <asm/dma-contiguous.h>
38
39 #include "dma.h"
40 #include "mm.h"
41
42 struct arm_dma_alloc_args {
43 struct device *dev;
44 size_t size;
45 gfp_t gfp;
46 pgprot_t prot;
47 const void *caller;
48 bool want_vaddr;
49 int coherent_flag;
50 };
51
52 struct arm_dma_free_args {
53 struct device *dev;
54 size_t size;
55 void *cpu_addr;
56 struct page *page;
57 bool want_vaddr;
58 };
59
60 #define NORMAL 0
61 #define COHERENT 1
62
63 struct arm_dma_allocator {
64 void *(*alloc)(struct arm_dma_alloc_args *args,
65 struct page **ret_page);
66 void (*free)(struct arm_dma_free_args *args);
67 };
68
69 struct arm_dma_buffer {
70 struct list_head list;
71 void *virt;
72 struct arm_dma_allocator *allocator;
73 };
74
75 static LIST_HEAD(arm_dma_bufs);
76 static DEFINE_SPINLOCK(arm_dma_bufs_lock);
77
78 static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
79 {
80 struct arm_dma_buffer *buf, *found = NULL;
81 unsigned long flags;
82
83 spin_lock_irqsave(&arm_dma_bufs_lock, flags);
84 list_for_each_entry(buf, &arm_dma_bufs, list) {
85 if (buf->virt == virt) {
86 list_del(&buf->list);
87 found = buf;
88 break;
89 }
90 }
91 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
92 return found;
93 }
94
95 /*
96 * The DMA API is built upon the notion of "buffer ownership". A buffer
97 * is either exclusively owned by the CPU (and therefore may be accessed
98 * by it) or exclusively owned by the DMA device. These helper functions
99 * represent the transitions between these two ownership states.
100 *
101 * Note, however, that on later ARMs, this notion does not work due to
102 * speculative prefetches. We model our approach on the assumption that
103 * the CPU does do speculative prefetches, which means we clean caches
104 * before transfers and delay cache invalidation until transfer completion.
105 *
106 */
107 static void __dma_page_cpu_to_dev(struct page *, unsigned long,
108 size_t, enum dma_data_direction);
109 static void __dma_page_dev_to_cpu(struct page *, unsigned long,
110 size_t, enum dma_data_direction);
111
112 /**
113 * arm_dma_map_page - map a portion of a page for streaming DMA
114 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
115 * @page: page that buffer resides in
116 * @offset: offset into page for start of buffer
117 * @size: size of buffer to map
118 * @dir: DMA transfer direction
119 *
120 * Ensure that any data held in the cache is appropriately discarded
121 * or written back.
122 *
123 * The device owns this memory once this call has completed. The CPU
124 * can regain ownership by calling dma_unmap_page().
125 */
126 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
127 unsigned long offset, size_t size, enum dma_data_direction dir,
128 unsigned long attrs)
129 {
130 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
131 __dma_page_cpu_to_dev(page, offset, size, dir);
132 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
133 }
134
135 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
136 unsigned long offset, size_t size, enum dma_data_direction dir,
137 unsigned long attrs)
138 {
139 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
140 }
141
142 /**
143 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
144 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
145 * @handle: DMA address of buffer
146 * @size: size of buffer (same as passed to dma_map_page)
147 * @dir: DMA transfer direction (same as passed to dma_map_page)
148 *
149 * Unmap a page streaming mode DMA translation. The handle and size
150 * must match what was provided in the previous dma_map_page() call.
151 * All other usages are undefined.
152 *
153 * After this call, reads by the CPU to the buffer are guaranteed to see
154 * whatever the device wrote there.
155 */
156 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
157 size_t size, enum dma_data_direction dir, unsigned long attrs)
158 {
159 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
160 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
161 handle & ~PAGE_MASK, size, dir);
162 }
163
164 static void arm_dma_sync_single_for_cpu(struct device *dev,
165 dma_addr_t handle, size_t size, enum dma_data_direction dir)
166 {
167 unsigned int offset = handle & (PAGE_SIZE - 1);
168 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
169 __dma_page_dev_to_cpu(page, offset, size, dir);
170 }
171
172 static void arm_dma_sync_single_for_device(struct device *dev,
173 dma_addr_t handle, size_t size, enum dma_data_direction dir)
174 {
175 unsigned int offset = handle & (PAGE_SIZE - 1);
176 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
177 __dma_page_cpu_to_dev(page, offset, size, dir);
178 }
179
180 const struct dma_map_ops arm_dma_ops = {
181 .alloc = arm_dma_alloc,
182 .free = arm_dma_free,
183 .mmap = arm_dma_mmap,
184 .get_sgtable = arm_dma_get_sgtable,
185 .map_page = arm_dma_map_page,
186 .unmap_page = arm_dma_unmap_page,
187 .map_sg = arm_dma_map_sg,
188 .unmap_sg = arm_dma_unmap_sg,
189 .map_resource = dma_direct_map_resource,
190 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
191 .sync_single_for_device = arm_dma_sync_single_for_device,
192 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
193 .sync_sg_for_device = arm_dma_sync_sg_for_device,
194 .dma_supported = arm_dma_supported,
195 };
196 EXPORT_SYMBOL(arm_dma_ops);
197
198 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
199 dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
200 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
201 dma_addr_t handle, unsigned long attrs);
202 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
203 void *cpu_addr, dma_addr_t dma_addr, size_t size,
204 unsigned long attrs);
205
206 const struct dma_map_ops arm_coherent_dma_ops = {
207 .alloc = arm_coherent_dma_alloc,
208 .free = arm_coherent_dma_free,
209 .mmap = arm_coherent_dma_mmap,
210 .get_sgtable = arm_dma_get_sgtable,
211 .map_page = arm_coherent_dma_map_page,
212 .map_sg = arm_dma_map_sg,
213 .map_resource = dma_direct_map_resource,
214 .dma_supported = arm_dma_supported,
215 };
216 EXPORT_SYMBOL(arm_coherent_dma_ops);
217
218 static int __dma_supported(struct device *dev, u64 mask, bool warn)
219 {
220 unsigned long max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
221
222 /*
223 * Translate the device's DMA mask to a PFN limit. This
224 * PFN number includes the page which we can DMA to.
225 */
226 if (dma_to_pfn(dev, mask) < max_dma_pfn) {
227 if (warn)
228 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
229 mask,
230 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
231 max_dma_pfn + 1);
232 return 0;
233 }
234
235 return 1;
236 }
237
238 static u64 get_coherent_dma_mask(struct device *dev)
239 {
240 u64 mask = (u64)DMA_BIT_MASK(32);
241
242 if (dev) {
243 mask = dev->coherent_dma_mask;
244
245 /*
246 * Sanity check the DMA mask - it must be non-zero, and
247 * must be able to be satisfied by a DMA allocation.
248 */
249 if (mask == 0) {
250 dev_warn(dev, "coherent DMA mask is unset\n");
251 return 0;
252 }
253
254 if (!__dma_supported(dev, mask, true))
255 return 0;
256 }
257
258 return mask;
259 }
260
261 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
262 {
263 /*
264 * Ensure that the allocated pages are zeroed, and that any data
265 * lurking in the kernel direct-mapped region is invalidated.
266 */
267 if (PageHighMem(page)) {
268 phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
269 phys_addr_t end = base + size;
270 while (size > 0) {
271 void *ptr = kmap_atomic(page);
272 memset(ptr, 0, PAGE_SIZE);
273 if (coherent_flag != COHERENT)
274 dmac_flush_range(ptr, ptr + PAGE_SIZE);
275 kunmap_atomic(ptr);
276 page++;
277 size -= PAGE_SIZE;
278 }
279 if (coherent_flag != COHERENT)
280 outer_flush_range(base, end);
281 } else {
282 void *ptr = page_address(page);
283 memset(ptr, 0, size);
284 if (coherent_flag != COHERENT) {
285 dmac_flush_range(ptr, ptr + size);
286 outer_flush_range(__pa(ptr), __pa(ptr) + size);
287 }
288 }
289 }
290
291 /*
292 * Allocate a DMA buffer for 'dev' of size 'size' using the
293 * specified gfp mask. Note that 'size' must be page aligned.
294 */
295 static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
296 gfp_t gfp, int coherent_flag)
297 {
298 unsigned long order = get_order(size);
299 struct page *page, *p, *e;
300
301 page = alloc_pages(gfp, order);
302 if (!page)
303 return NULL;
304
305 /*
306 * Now split the huge page and free the excess pages
307 */
308 split_page(page, order);
309 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
310 __free_page(p);
311
312 __dma_clear_buffer(page, size, coherent_flag);
313
314 return page;
315 }
316
317 /*
318 * Free a DMA buffer. 'size' must be page aligned.
319 */
320 static void __dma_free_buffer(struct page *page, size_t size)
321 {
322 struct page *e = page + (size >> PAGE_SHIFT);
323
324 while (page < e) {
325 __free_page(page);
326 page++;
327 }
328 }
329
330 static void *__alloc_from_contiguous(struct device *dev, size_t size,
331 pgprot_t prot, struct page **ret_page,
332 const void *caller, bool want_vaddr,
333 int coherent_flag, gfp_t gfp);
334
335 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
336 pgprot_t prot, struct page **ret_page,
337 const void *caller, bool want_vaddr);
338
339 static void *
340 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
341 const void *caller)
342 {
343 /*
344 * DMA allocation can be mapped to user space, so lets
345 * set VM_USERMAP flags too.
346 */
347 return dma_common_contiguous_remap(page, size,
348 VM_ARM_DMA_CONSISTENT | VM_USERMAP,
349 prot, caller);
350 }
351
352 static void __dma_free_remap(void *cpu_addr, size_t size)
353 {
354 dma_common_free_remap(cpu_addr, size,
355 VM_ARM_DMA_CONSISTENT | VM_USERMAP);
356 }
357
358 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
359 static struct gen_pool *atomic_pool __ro_after_init;
360
361 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
362
363 static int __init early_coherent_pool(char *p)
364 {
365 atomic_pool_size = memparse(p, &p);
366 return 0;
367 }
368 early_param("coherent_pool", early_coherent_pool);
369
370 /*
371 * Initialise the coherent pool for atomic allocations.
372 */
373 static int __init atomic_pool_init(void)
374 {
375 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
376 gfp_t gfp = GFP_KERNEL | GFP_DMA;
377 struct page *page;
378 void *ptr;
379
380 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
381 if (!atomic_pool)
382 goto out;
383 /*
384 * The atomic pool is only used for non-coherent allocations
385 * so we must pass NORMAL for coherent_flag.
386 */
387 if (dev_get_cma_area(NULL))
388 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
389 &page, atomic_pool_init, true, NORMAL,
390 GFP_KERNEL);
391 else
392 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
393 &page, atomic_pool_init, true);
394 if (ptr) {
395 int ret;
396
397 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
398 page_to_phys(page),
399 atomic_pool_size, -1);
400 if (ret)
401 goto destroy_genpool;
402
403 gen_pool_set_algo(atomic_pool,
404 gen_pool_first_fit_order_align,
405 NULL);
406 pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
407 atomic_pool_size / 1024);
408 return 0;
409 }
410
411 destroy_genpool:
412 gen_pool_destroy(atomic_pool);
413 atomic_pool = NULL;
414 out:
415 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
416 atomic_pool_size / 1024);
417 return -ENOMEM;
418 }
419 /*
420 * CMA is activated by core_initcall, so we must be called after it.
421 */
422 postcore_initcall(atomic_pool_init);
423
424 struct dma_contig_early_reserve {
425 phys_addr_t base;
426 unsigned long size;
427 };
428
429 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
430
431 static int dma_mmu_remap_num __initdata;
432
433 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
434 {
435 dma_mmu_remap[dma_mmu_remap_num].base = base;
436 dma_mmu_remap[dma_mmu_remap_num].size = size;
437 dma_mmu_remap_num++;
438 }
439
440 void __init dma_contiguous_remap(void)
441 {
442 int i;
443 for (i = 0; i < dma_mmu_remap_num; i++) {
444 phys_addr_t start = dma_mmu_remap[i].base;
445 phys_addr_t end = start + dma_mmu_remap[i].size;
446 struct map_desc map;
447 unsigned long addr;
448
449 if (end > arm_lowmem_limit)
450 end = arm_lowmem_limit;
451 if (start >= end)
452 continue;
453
454 map.pfn = __phys_to_pfn(start);
455 map.virtual = __phys_to_virt(start);
456 map.length = end - start;
457 map.type = MT_MEMORY_DMA_READY;
458
459 /*
460 * Clear previous low-memory mapping to ensure that the
461 * TLB does not see any conflicting entries, then flush
462 * the TLB of the old entries before creating new mappings.
463 *
464 * This ensures that any speculatively loaded TLB entries
465 * (even though they may be rare) can not cause any problems,
466 * and ensures that this code is architecturally compliant.
467 */
468 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
469 addr += PMD_SIZE)
470 pmd_clear(pmd_off_k(addr));
471
472 flush_tlb_kernel_range(__phys_to_virt(start),
473 __phys_to_virt(end));
474
475 iotable_init(&map, 1);
476 }
477 }
478
479 static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
480 {
481 struct page *page = virt_to_page(addr);
482 pgprot_t prot = *(pgprot_t *)data;
483
484 set_pte_ext(pte, mk_pte(page, prot), 0);
485 return 0;
486 }
487
488 static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
489 {
490 unsigned long start = (unsigned long) page_address(page);
491 unsigned end = start + size;
492
493 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
494 flush_tlb_kernel_range(start, end);
495 }
496
497 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
498 pgprot_t prot, struct page **ret_page,
499 const void *caller, bool want_vaddr)
500 {
501 struct page *page;
502 void *ptr = NULL;
503 /*
504 * __alloc_remap_buffer is only called when the device is
505 * non-coherent
506 */
507 page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
508 if (!page)
509 return NULL;
510 if (!want_vaddr)
511 goto out;
512
513 ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
514 if (!ptr) {
515 __dma_free_buffer(page, size);
516 return NULL;
517 }
518
519 out:
520 *ret_page = page;
521 return ptr;
522 }
523
524 static void *__alloc_from_pool(size_t size, struct page **ret_page)
525 {
526 unsigned long val;
527 void *ptr = NULL;
528
529 if (!atomic_pool) {
530 WARN(1, "coherent pool not initialised!\n");
531 return NULL;
532 }
533
534 val = gen_pool_alloc(atomic_pool, size);
535 if (val) {
536 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
537
538 *ret_page = phys_to_page(phys);
539 ptr = (void *)val;
540 }
541
542 return ptr;
543 }
544
545 static bool __in_atomic_pool(void *start, size_t size)
546 {
547 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
548 }
549
550 static int __free_from_pool(void *start, size_t size)
551 {
552 if (!__in_atomic_pool(start, size))
553 return 0;
554
555 gen_pool_free(atomic_pool, (unsigned long)start, size);
556
557 return 1;
558 }
559
560 static void *__alloc_from_contiguous(struct device *dev, size_t size,
561 pgprot_t prot, struct page **ret_page,
562 const void *caller, bool want_vaddr,
563 int coherent_flag, gfp_t gfp)
564 {
565 unsigned long order = get_order(size);
566 size_t count = size >> PAGE_SHIFT;
567 struct page *page;
568 void *ptr = NULL;
569
570 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
571 if (!page)
572 return NULL;
573
574 __dma_clear_buffer(page, size, coherent_flag);
575
576 if (!want_vaddr)
577 goto out;
578
579 if (PageHighMem(page)) {
580 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
581 if (!ptr) {
582 dma_release_from_contiguous(dev, page, count);
583 return NULL;
584 }
585 } else {
586 __dma_remap(page, size, prot);
587 ptr = page_address(page);
588 }
589
590 out:
591 *ret_page = page;
592 return ptr;
593 }
594
595 static void __free_from_contiguous(struct device *dev, struct page *page,
596 void *cpu_addr, size_t size, bool want_vaddr)
597 {
598 if (want_vaddr) {
599 if (PageHighMem(page))
600 __dma_free_remap(cpu_addr, size);
601 else
602 __dma_remap(page, size, PAGE_KERNEL);
603 }
604 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
605 }
606
607 static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
608 {
609 prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
610 pgprot_writecombine(prot) :
611 pgprot_dmacoherent(prot);
612 return prot;
613 }
614
615 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
616 struct page **ret_page)
617 {
618 struct page *page;
619 /* __alloc_simple_buffer is only called when the device is coherent */
620 page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
621 if (!page)
622 return NULL;
623
624 *ret_page = page;
625 return page_address(page);
626 }
627
628 static void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
629 struct page **ret_page)
630 {
631 return __alloc_simple_buffer(args->dev, args->size, args->gfp,
632 ret_page);
633 }
634
635 static void simple_allocator_free(struct arm_dma_free_args *args)
636 {
637 __dma_free_buffer(args->page, args->size);
638 }
639
640 static struct arm_dma_allocator simple_allocator = {
641 .alloc = simple_allocator_alloc,
642 .free = simple_allocator_free,
643 };
644
645 static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
646 struct page **ret_page)
647 {
648 return __alloc_from_contiguous(args->dev, args->size, args->prot,
649 ret_page, args->caller,
650 args->want_vaddr, args->coherent_flag,
651 args->gfp);
652 }
653
654 static void cma_allocator_free(struct arm_dma_free_args *args)
655 {
656 __free_from_contiguous(args->dev, args->page, args->cpu_addr,
657 args->size, args->want_vaddr);
658 }
659
660 static struct arm_dma_allocator cma_allocator = {
661 .alloc = cma_allocator_alloc,
662 .free = cma_allocator_free,
663 };
664
665 static void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
666 struct page **ret_page)
667 {
668 return __alloc_from_pool(args->size, ret_page);
669 }
670
671 static void pool_allocator_free(struct arm_dma_free_args *args)
672 {
673 __free_from_pool(args->cpu_addr, args->size);
674 }
675
676 static struct arm_dma_allocator pool_allocator = {
677 .alloc = pool_allocator_alloc,
678 .free = pool_allocator_free,
679 };
680
681 static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
682 struct page **ret_page)
683 {
684 return __alloc_remap_buffer(args->dev, args->size, args->gfp,
685 args->prot, ret_page, args->caller,
686 args->want_vaddr);
687 }
688
689 static void remap_allocator_free(struct arm_dma_free_args *args)
690 {
691 if (args->want_vaddr)
692 __dma_free_remap(args->cpu_addr, args->size);
693
694 __dma_free_buffer(args->page, args->size);
695 }
696
697 static struct arm_dma_allocator remap_allocator = {
698 .alloc = remap_allocator_alloc,
699 .free = remap_allocator_free,
700 };
701
702 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
703 gfp_t gfp, pgprot_t prot, bool is_coherent,
704 unsigned long attrs, const void *caller)
705 {
706 u64 mask = get_coherent_dma_mask(dev);
707 struct page *page = NULL;
708 void *addr;
709 bool allowblock, cma;
710 struct arm_dma_buffer *buf;
711 struct arm_dma_alloc_args args = {
712 .dev = dev,
713 .size = PAGE_ALIGN(size),
714 .gfp = gfp,
715 .prot = prot,
716 .caller = caller,
717 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
718 .coherent_flag = is_coherent ? COHERENT : NORMAL,
719 };
720
721 #ifdef CONFIG_DMA_API_DEBUG
722 u64 limit = (mask + 1) & ~mask;
723 if (limit && size >= limit) {
724 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
725 size, mask);
726 return NULL;
727 }
728 #endif
729
730 if (!mask)
731 return NULL;
732
733 buf = kzalloc(sizeof(*buf),
734 gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
735 if (!buf)
736 return NULL;
737
738 if (mask < 0xffffffffULL)
739 gfp |= GFP_DMA;
740
741 /*
742 * Following is a work-around (a.k.a. hack) to prevent pages
743 * with __GFP_COMP being passed to split_page() which cannot
744 * handle them. The real problem is that this flag probably
745 * should be 0 on ARM as it is not supported on this
746 * platform; see CONFIG_HUGETLBFS.
747 */
748 gfp &= ~(__GFP_COMP);
749 args.gfp = gfp;
750
751 *handle = DMA_MAPPING_ERROR;
752 allowblock = gfpflags_allow_blocking(gfp);
753 cma = allowblock ? dev_get_cma_area(dev) : false;
754
755 if (cma)
756 buf->allocator = &cma_allocator;
757 else if (is_coherent)
758 buf->allocator = &simple_allocator;
759 else if (allowblock)
760 buf->allocator = &remap_allocator;
761 else
762 buf->allocator = &pool_allocator;
763
764 addr = buf->allocator->alloc(&args, &page);
765
766 if (page) {
767 unsigned long flags;
768
769 *handle = pfn_to_dma(dev, page_to_pfn(page));
770 buf->virt = args.want_vaddr ? addr : page;
771
772 spin_lock_irqsave(&arm_dma_bufs_lock, flags);
773 list_add(&buf->list, &arm_dma_bufs);
774 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
775 } else {
776 kfree(buf);
777 }
778
779 return args.want_vaddr ? addr : page;
780 }
781
782 /*
783 * Allocate DMA-coherent memory space and return both the kernel remapped
784 * virtual and bus address for that space.
785 */
786 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
787 gfp_t gfp, unsigned long attrs)
788 {
789 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
790
791 return __dma_alloc(dev, size, handle, gfp, prot, false,
792 attrs, __builtin_return_address(0));
793 }
794
795 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
796 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
797 {
798 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
799 attrs, __builtin_return_address(0));
800 }
801
802 static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
803 void *cpu_addr, dma_addr_t dma_addr, size_t size,
804 unsigned long attrs)
805 {
806 int ret = -ENXIO;
807 unsigned long nr_vma_pages = vma_pages(vma);
808 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
809 unsigned long pfn = dma_to_pfn(dev, dma_addr);
810 unsigned long off = vma->vm_pgoff;
811
812 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
813 return ret;
814
815 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
816 ret = remap_pfn_range(vma, vma->vm_start,
817 pfn + off,
818 vma->vm_end - vma->vm_start,
819 vma->vm_page_prot);
820 }
821
822 return ret;
823 }
824
825 /*
826 * Create userspace mapping for the DMA-coherent memory.
827 */
828 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
829 void *cpu_addr, dma_addr_t dma_addr, size_t size,
830 unsigned long attrs)
831 {
832 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
833 }
834
835 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
836 void *cpu_addr, dma_addr_t dma_addr, size_t size,
837 unsigned long attrs)
838 {
839 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
840 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
841 }
842
843 /*
844 * Free a buffer as defined by the above mapping.
845 */
846 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
847 dma_addr_t handle, unsigned long attrs,
848 bool is_coherent)
849 {
850 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
851 struct arm_dma_buffer *buf;
852 struct arm_dma_free_args args = {
853 .dev = dev,
854 .size = PAGE_ALIGN(size),
855 .cpu_addr = cpu_addr,
856 .page = page,
857 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
858 };
859
860 buf = arm_dma_buffer_find(cpu_addr);
861 if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
862 return;
863
864 buf->allocator->free(&args);
865 kfree(buf);
866 }
867
868 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
869 dma_addr_t handle, unsigned long attrs)
870 {
871 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
872 }
873
874 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
875 dma_addr_t handle, unsigned long attrs)
876 {
877 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
878 }
879
880 /*
881 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
882 * that the intention is to allow exporting memory allocated via the
883 * coherent DMA APIs through the dma_buf API, which only accepts a
884 * scattertable. This presents a couple of problems:
885 * 1. Not all memory allocated via the coherent DMA APIs is backed by
886 * a struct page
887 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
888 * as we will try to flush the memory through a different alias to that
889 * actually being used (and the flushes are redundant.)
890 */
891 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
892 void *cpu_addr, dma_addr_t handle, size_t size,
893 unsigned long attrs)
894 {
895 unsigned long pfn = dma_to_pfn(dev, handle);
896 struct page *page;
897 int ret;
898
899 /* If the PFN is not valid, we do not have a struct page */
900 if (!pfn_valid(pfn))
901 return -ENXIO;
902
903 page = pfn_to_page(pfn);
904
905 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
906 if (unlikely(ret))
907 return ret;
908
909 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
910 return 0;
911 }
912
913 static void dma_cache_maint_page(struct page *page, unsigned long offset,
914 size_t size, enum dma_data_direction dir,
915 void (*op)(const void *, size_t, int))
916 {
917 unsigned long pfn;
918 size_t left = size;
919
920 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
921 offset %= PAGE_SIZE;
922
923 /*
924 * A single sg entry may refer to multiple physically contiguous
925 * pages. But we still need to process highmem pages individually.
926 * If highmem is not configured then the bulk of this loop gets
927 * optimized out.
928 */
929 do {
930 size_t len = left;
931 void *vaddr;
932
933 page = pfn_to_page(pfn);
934
935 if (PageHighMem(page)) {
936 if (len + offset > PAGE_SIZE)
937 len = PAGE_SIZE - offset;
938
939 if (cache_is_vipt_nonaliasing()) {
940 vaddr = kmap_atomic(page);
941 op(vaddr + offset, len, dir);
942 kunmap_atomic(vaddr);
943 } else {
944 vaddr = kmap_high_get(page);
945 if (vaddr) {
946 op(vaddr + offset, len, dir);
947 kunmap_high(page);
948 }
949 }
950 } else {
951 vaddr = page_address(page) + offset;
952 op(vaddr, len, dir);
953 }
954 offset = 0;
955 pfn++;
956 left -= len;
957 } while (left);
958 }
959
960 /*
961 * Make an area consistent for devices.
962 * Note: Drivers should NOT use this function directly, as it will break
963 * platforms with CONFIG_DMABOUNCE.
964 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
965 */
966 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
967 size_t size, enum dma_data_direction dir)
968 {
969 phys_addr_t paddr;
970
971 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
972
973 paddr = page_to_phys(page) + off;
974 if (dir == DMA_FROM_DEVICE) {
975 outer_inv_range(paddr, paddr + size);
976 } else {
977 outer_clean_range(paddr, paddr + size);
978 }
979 /* FIXME: non-speculating: flush on bidirectional mappings? */
980 }
981
982 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
983 size_t size, enum dma_data_direction dir)
984 {
985 phys_addr_t paddr = page_to_phys(page) + off;
986
987 /* FIXME: non-speculating: not required */
988 /* in any case, don't bother invalidating if DMA to device */
989 if (dir != DMA_TO_DEVICE) {
990 outer_inv_range(paddr, paddr + size);
991
992 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
993 }
994
995 /*
996 * Mark the D-cache clean for these pages to avoid extra flushing.
997 */
998 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
999 unsigned long pfn;
1000 size_t left = size;
1001
1002 pfn = page_to_pfn(page) + off / PAGE_SIZE;
1003 off %= PAGE_SIZE;
1004 if (off) {
1005 pfn++;
1006 left -= PAGE_SIZE - off;
1007 }
1008 while (left >= PAGE_SIZE) {
1009 page = pfn_to_page(pfn++);
1010 set_bit(PG_dcache_clean, &page->flags);
1011 left -= PAGE_SIZE;
1012 }
1013 }
1014 }
1015
1016 /**
1017 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
1018 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1019 * @sg: list of buffers
1020 * @nents: number of buffers to map
1021 * @dir: DMA transfer direction
1022 *
1023 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1024 * This is the scatter-gather version of the dma_map_single interface.
1025 * Here the scatter gather list elements are each tagged with the
1026 * appropriate dma address and length. They are obtained via
1027 * sg_dma_{address,length}.
1028 *
1029 * Device ownership issues as mentioned for dma_map_single are the same
1030 * here.
1031 */
1032 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1033 enum dma_data_direction dir, unsigned long attrs)
1034 {
1035 const struct dma_map_ops *ops = get_dma_ops(dev);
1036 struct scatterlist *s;
1037 int i, j;
1038
1039 for_each_sg(sg, s, nents, i) {
1040 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1041 s->dma_length = s->length;
1042 #endif
1043 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
1044 s->length, dir, attrs);
1045 if (dma_mapping_error(dev, s->dma_address))
1046 goto bad_mapping;
1047 }
1048 return nents;
1049
1050 bad_mapping:
1051 for_each_sg(sg, s, i, j)
1052 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
1053 return 0;
1054 }
1055
1056 /**
1057 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1058 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1059 * @sg: list of buffers
1060 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1061 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1062 *
1063 * Unmap a set of streaming mode DMA translations. Again, CPU access
1064 * rules concerning calls here are the same as for dma_unmap_single().
1065 */
1066 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1067 enum dma_data_direction dir, unsigned long attrs)
1068 {
1069 const struct dma_map_ops *ops = get_dma_ops(dev);
1070 struct scatterlist *s;
1071
1072 int i;
1073
1074 for_each_sg(sg, s, nents, i)
1075 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
1076 }
1077
1078 /**
1079 * arm_dma_sync_sg_for_cpu
1080 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1081 * @sg: list of buffers
1082 * @nents: number of buffers to map (returned from dma_map_sg)
1083 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1084 */
1085 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1086 int nents, enum dma_data_direction dir)
1087 {
1088 const struct dma_map_ops *ops = get_dma_ops(dev);
1089 struct scatterlist *s;
1090 int i;
1091
1092 for_each_sg(sg, s, nents, i)
1093 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
1094 dir);
1095 }
1096
1097 /**
1098 * arm_dma_sync_sg_for_device
1099 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1100 * @sg: list of buffers
1101 * @nents: number of buffers to map (returned from dma_map_sg)
1102 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1103 */
1104 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1105 int nents, enum dma_data_direction dir)
1106 {
1107 const struct dma_map_ops *ops = get_dma_ops(dev);
1108 struct scatterlist *s;
1109 int i;
1110
1111 for_each_sg(sg, s, nents, i)
1112 ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
1113 dir);
1114 }
1115
1116 /*
1117 * Return whether the given device DMA address mask can be supported
1118 * properly. For example, if your device can only drive the low 24-bits
1119 * during bus mastering, then you would pass 0x00ffffff as the mask
1120 * to this function.
1121 */
1122 int arm_dma_supported(struct device *dev, u64 mask)
1123 {
1124 return __dma_supported(dev, mask, false);
1125 }
1126
1127 static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
1128 {
1129 /*
1130 * When CONFIG_ARM_LPAE is set, physical address can extend above
1131 * 32-bits, which then can't be addressed by devices that only support
1132 * 32-bit DMA.
1133 * Use the generic dma-direct / swiotlb ops code in that case, as that
1134 * handles bounce buffering for us.
1135 *
1136 * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the
1137 * latter is also selected by the Xen code, but that code for now relies
1138 * on non-NULL dev_dma_ops. To be cleaned up later.
1139 */
1140 if (IS_ENABLED(CONFIG_ARM_LPAE))
1141 return NULL;
1142 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
1143 }
1144
1145 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1146
1147 static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
1148 {
1149 int prot = 0;
1150
1151 if (attrs & DMA_ATTR_PRIVILEGED)
1152 prot |= IOMMU_PRIV;
1153
1154 switch (dir) {
1155 case DMA_BIDIRECTIONAL:
1156 return prot | IOMMU_READ | IOMMU_WRITE;
1157 case DMA_TO_DEVICE:
1158 return prot | IOMMU_READ;
1159 case DMA_FROM_DEVICE:
1160 return prot | IOMMU_WRITE;
1161 default:
1162 return prot;
1163 }
1164 }
1165
1166 /* IOMMU */
1167
1168 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
1169
1170 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1171 size_t size)
1172 {
1173 unsigned int order = get_order(size);
1174 unsigned int align = 0;
1175 unsigned int count, start;
1176 size_t mapping_size = mapping->bits << PAGE_SHIFT;
1177 unsigned long flags;
1178 dma_addr_t iova;
1179 int i;
1180
1181 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
1182 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
1183
1184 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1185 align = (1 << order) - 1;
1186
1187 spin_lock_irqsave(&mapping->lock, flags);
1188 for (i = 0; i < mapping->nr_bitmaps; i++) {
1189 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1190 mapping->bits, 0, count, align);
1191
1192 if (start > mapping->bits)
1193 continue;
1194
1195 bitmap_set(mapping->bitmaps[i], start, count);
1196 break;
1197 }
1198
1199 /*
1200 * No unused range found. Try to extend the existing mapping
1201 * and perform a second attempt to reserve an IO virtual
1202 * address range of size bytes.
1203 */
1204 if (i == mapping->nr_bitmaps) {
1205 if (extend_iommu_mapping(mapping)) {
1206 spin_unlock_irqrestore(&mapping->lock, flags);
1207 return DMA_MAPPING_ERROR;
1208 }
1209
1210 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1211 mapping->bits, 0, count, align);
1212
1213 if (start > mapping->bits) {
1214 spin_unlock_irqrestore(&mapping->lock, flags);
1215 return DMA_MAPPING_ERROR;
1216 }
1217
1218 bitmap_set(mapping->bitmaps[i], start, count);
1219 }
1220 spin_unlock_irqrestore(&mapping->lock, flags);
1221
1222 iova = mapping->base + (mapping_size * i);
1223 iova += start << PAGE_SHIFT;
1224
1225 return iova;
1226 }
1227
1228 static inline void __free_iova(struct dma_iommu_mapping *mapping,
1229 dma_addr_t addr, size_t size)
1230 {
1231 unsigned int start, count;
1232 size_t mapping_size = mapping->bits << PAGE_SHIFT;
1233 unsigned long flags;
1234 dma_addr_t bitmap_base;
1235 u32 bitmap_index;
1236
1237 if (!size)
1238 return;
1239
1240 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
1241 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
1242
1243 bitmap_base = mapping->base + mapping_size * bitmap_index;
1244
1245 start = (addr - bitmap_base) >> PAGE_SHIFT;
1246
1247 if (addr + size > bitmap_base + mapping_size) {
1248 /*
1249 * The address range to be freed reaches into the iova
1250 * range of the next bitmap. This should not happen as
1251 * we don't allow this in __alloc_iova (at the
1252 * moment).
1253 */
1254 BUG();
1255 } else
1256 count = size >> PAGE_SHIFT;
1257
1258 spin_lock_irqsave(&mapping->lock, flags);
1259 bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
1260 spin_unlock_irqrestore(&mapping->lock, flags);
1261 }
1262
1263 /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
1264 static const int iommu_order_array[] = { 9, 8, 4, 0 };
1265
1266 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1267 gfp_t gfp, unsigned long attrs,
1268 int coherent_flag)
1269 {
1270 struct page **pages;
1271 int count = size >> PAGE_SHIFT;
1272 int array_size = count * sizeof(struct page *);
1273 int i = 0;
1274 int order_idx = 0;
1275
1276 if (array_size <= PAGE_SIZE)
1277 pages = kzalloc(array_size, GFP_KERNEL);
1278 else
1279 pages = vzalloc(array_size);
1280 if (!pages)
1281 return NULL;
1282
1283 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
1284 {
1285 unsigned long order = get_order(size);
1286 struct page *page;
1287
1288 page = dma_alloc_from_contiguous(dev, count, order,
1289 gfp & __GFP_NOWARN);
1290 if (!page)
1291 goto error;
1292
1293 __dma_clear_buffer(page, size, coherent_flag);
1294
1295 for (i = 0; i < count; i++)
1296 pages[i] = page + i;
1297
1298 return pages;
1299 }
1300
1301 /* Go straight to 4K chunks if caller says it's OK. */
1302 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
1303 order_idx = ARRAY_SIZE(iommu_order_array) - 1;
1304
1305 /*
1306 * IOMMU can map any pages, so himem can also be used here
1307 */
1308 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
1309
1310 while (count) {
1311 int j, order;
1312
1313 order = iommu_order_array[order_idx];
1314
1315 /* Drop down when we get small */
1316 if (__fls(count) < order) {
1317 order_idx++;
1318 continue;
1319 }
1320
1321 if (order) {
1322 /* See if it's easy to allocate a high-order chunk */
1323 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
1324
1325 /* Go down a notch at first sign of pressure */
1326 if (!pages[i]) {
1327 order_idx++;
1328 continue;
1329 }
1330 } else {
1331 pages[i] = alloc_pages(gfp, 0);
1332 if (!pages[i])
1333 goto error;
1334 }
1335
1336 if (order) {
1337 split_page(pages[i], order);
1338 j = 1 << order;
1339 while (--j)
1340 pages[i + j] = pages[i] + j;
1341 }
1342
1343 __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
1344 i += 1 << order;
1345 count -= 1 << order;
1346 }
1347
1348 return pages;
1349 error:
1350 while (i--)
1351 if (pages[i])
1352 __free_pages(pages[i], 0);
1353 kvfree(pages);
1354 return NULL;
1355 }
1356
1357 static int __iommu_free_buffer(struct device *dev, struct page **pages,
1358 size_t size, unsigned long attrs)
1359 {
1360 int count = size >> PAGE_SHIFT;
1361 int i;
1362
1363 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
1364 dma_release_from_contiguous(dev, pages[0], count);
1365 } else {
1366 for (i = 0; i < count; i++)
1367 if (pages[i])
1368 __free_pages(pages[i], 0);
1369 }
1370
1371 kvfree(pages);
1372 return 0;
1373 }
1374
1375 /*
1376 * Create a CPU mapping for a specified pages
1377 */
1378 static void *
1379 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1380 const void *caller)
1381 {
1382 return dma_common_pages_remap(pages, size,
1383 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
1384 }
1385
1386 /*
1387 * Create a mapping in device IO address space for specified pages
1388 */
1389 static dma_addr_t
1390 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
1391 unsigned long attrs)
1392 {
1393 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1394 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1395 dma_addr_t dma_addr, iova;
1396 int i;
1397
1398 dma_addr = __alloc_iova(mapping, size);
1399 if (dma_addr == DMA_MAPPING_ERROR)
1400 return dma_addr;
1401
1402 iova = dma_addr;
1403 for (i = 0; i < count; ) {
1404 int ret;
1405
1406 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
1407 phys_addr_t phys = page_to_phys(pages[i]);
1408 unsigned int len, j;
1409
1410 for (j = i + 1; j < count; j++, next_pfn++)
1411 if (page_to_pfn(pages[j]) != next_pfn)
1412 break;
1413
1414 len = (j - i) << PAGE_SHIFT;
1415 ret = iommu_map(mapping->domain, iova, phys, len,
1416 __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
1417 if (ret < 0)
1418 goto fail;
1419 iova += len;
1420 i = j;
1421 }
1422 return dma_addr;
1423 fail:
1424 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1425 __free_iova(mapping, dma_addr, size);
1426 return DMA_MAPPING_ERROR;
1427 }
1428
1429 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1430 {
1431 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1432
1433 /*
1434 * add optional in-page offset from iova to size and align
1435 * result to page size
1436 */
1437 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1438 iova &= PAGE_MASK;
1439
1440 iommu_unmap(mapping->domain, iova, size);
1441 __free_iova(mapping, iova, size);
1442 return 0;
1443 }
1444
1445 static struct page **__atomic_get_pages(void *addr)
1446 {
1447 struct page *page;
1448 phys_addr_t phys;
1449
1450 phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
1451 page = phys_to_page(phys);
1452
1453 return (struct page **)page;
1454 }
1455
1456 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
1457 {
1458 struct vm_struct *area;
1459
1460 if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1461 return __atomic_get_pages(cpu_addr);
1462
1463 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1464 return cpu_addr;
1465
1466 area = find_vm_area(cpu_addr);
1467 if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
1468 return area->pages;
1469 return NULL;
1470 }
1471
1472 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
1473 dma_addr_t *handle, int coherent_flag,
1474 unsigned long attrs)
1475 {
1476 struct page *page;
1477 void *addr;
1478
1479 if (coherent_flag == COHERENT)
1480 addr = __alloc_simple_buffer(dev, size, gfp, &page);
1481 else
1482 addr = __alloc_from_pool(size, &page);
1483 if (!addr)
1484 return NULL;
1485
1486 *handle = __iommu_create_mapping(dev, &page, size, attrs);
1487 if (*handle == DMA_MAPPING_ERROR)
1488 goto err_mapping;
1489
1490 return addr;
1491
1492 err_mapping:
1493 __free_from_pool(addr, size);
1494 return NULL;
1495 }
1496
1497 static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
1498 dma_addr_t handle, size_t size, int coherent_flag)
1499 {
1500 __iommu_remove_mapping(dev, handle, size);
1501 if (coherent_flag == COHERENT)
1502 __dma_free_buffer(virt_to_page(cpu_addr), size);
1503 else
1504 __free_from_pool(cpu_addr, size);
1505 }
1506
1507 static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
1508 dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
1509 int coherent_flag)
1510 {
1511 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
1512 struct page **pages;
1513 void *addr = NULL;
1514
1515 *handle = DMA_MAPPING_ERROR;
1516 size = PAGE_ALIGN(size);
1517
1518 if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
1519 return __iommu_alloc_simple(dev, size, gfp, handle,
1520 coherent_flag, attrs);
1521
1522 /*
1523 * Following is a work-around (a.k.a. hack) to prevent pages
1524 * with __GFP_COMP being passed to split_page() which cannot
1525 * handle them. The real problem is that this flag probably
1526 * should be 0 on ARM as it is not supported on this
1527 * platform; see CONFIG_HUGETLBFS.
1528 */
1529 gfp &= ~(__GFP_COMP);
1530
1531 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
1532 if (!pages)
1533 return NULL;
1534
1535 *handle = __iommu_create_mapping(dev, pages, size, attrs);
1536 if (*handle == DMA_MAPPING_ERROR)
1537 goto err_buffer;
1538
1539 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1540 return pages;
1541
1542 addr = __iommu_alloc_remap(pages, size, gfp, prot,
1543 __builtin_return_address(0));
1544 if (!addr)
1545 goto err_mapping;
1546
1547 return addr;
1548
1549 err_mapping:
1550 __iommu_remove_mapping(dev, *handle, size);
1551 err_buffer:
1552 __iommu_free_buffer(dev, pages, size, attrs);
1553 return NULL;
1554 }
1555
1556 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1557 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1558 {
1559 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
1560 }
1561
1562 static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
1563 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1564 {
1565 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
1566 }
1567
1568 static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1569 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1570 unsigned long attrs)
1571 {
1572 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1573 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1574 int err;
1575
1576 if (!pages)
1577 return -ENXIO;
1578
1579 if (vma->vm_pgoff >= nr_pages)
1580 return -ENXIO;
1581
1582 err = vm_map_pages(vma, pages, nr_pages);
1583 if (err)
1584 pr_err("Remapping memory failed: %d\n", err);
1585
1586 return err;
1587 }
1588 static int arm_iommu_mmap_attrs(struct device *dev,
1589 struct vm_area_struct *vma, void *cpu_addr,
1590 dma_addr_t dma_addr, size_t size, unsigned long attrs)
1591 {
1592 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1593
1594 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1595 }
1596
1597 static int arm_coherent_iommu_mmap_attrs(struct device *dev,
1598 struct vm_area_struct *vma, void *cpu_addr,
1599 dma_addr_t dma_addr, size_t size, unsigned long attrs)
1600 {
1601 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1602 }
1603
1604 /*
1605 * free a page as defined by the above mapping.
1606 * Must not be called with IRQs disabled.
1607 */
1608 void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1609 dma_addr_t handle, unsigned long attrs, int coherent_flag)
1610 {
1611 struct page **pages;
1612 size = PAGE_ALIGN(size);
1613
1614 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
1615 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
1616 return;
1617 }
1618
1619 pages = __iommu_get_pages(cpu_addr, attrs);
1620 if (!pages) {
1621 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1622 return;
1623 }
1624
1625 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
1626 dma_common_free_remap(cpu_addr, size,
1627 VM_ARM_DMA_CONSISTENT | VM_USERMAP);
1628 }
1629
1630 __iommu_remove_mapping(dev, handle, size);
1631 __iommu_free_buffer(dev, pages, size, attrs);
1632 }
1633
1634 void arm_iommu_free_attrs(struct device *dev, size_t size,
1635 void *cpu_addr, dma_addr_t handle, unsigned long attrs)
1636 {
1637 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
1638 }
1639
1640 void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
1641 void *cpu_addr, dma_addr_t handle, unsigned long attrs)
1642 {
1643 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
1644 }
1645
1646 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1647 void *cpu_addr, dma_addr_t dma_addr,
1648 size_t size, unsigned long attrs)
1649 {
1650 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1651 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1652
1653 if (!pages)
1654 return -ENXIO;
1655
1656 return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1657 GFP_KERNEL);
1658 }
1659
1660 /*
1661 * Map a part of the scatter-gather list into contiguous io address space
1662 */
1663 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1664 size_t size, dma_addr_t *handle,
1665 enum dma_data_direction dir, unsigned long attrs,
1666 bool is_coherent)
1667 {
1668 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1669 dma_addr_t iova, iova_base;
1670 int ret = 0;
1671 unsigned int count;
1672 struct scatterlist *s;
1673 int prot;
1674
1675 size = PAGE_ALIGN(size);
1676 *handle = DMA_MAPPING_ERROR;
1677
1678 iova_base = iova = __alloc_iova(mapping, size);
1679 if (iova == DMA_MAPPING_ERROR)
1680 return -ENOMEM;
1681
1682 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1683 phys_addr_t phys = page_to_phys(sg_page(s));
1684 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1685
1686 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1687 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1688
1689 prot = __dma_info_to_prot(dir, attrs);
1690
1691 ret = iommu_map(mapping->domain, iova, phys, len, prot);
1692 if (ret < 0)
1693 goto fail;
1694 count += len >> PAGE_SHIFT;
1695 iova += len;
1696 }
1697 *handle = iova_base;
1698
1699 return 0;
1700 fail:
1701 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1702 __free_iova(mapping, iova_base, size);
1703 return ret;
1704 }
1705
1706 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1707 enum dma_data_direction dir, unsigned long attrs,
1708 bool is_coherent)
1709 {
1710 struct scatterlist *s = sg, *dma = sg, *start = sg;
1711 int i, count = 0;
1712 unsigned int offset = s->offset;
1713 unsigned int size = s->offset + s->length;
1714 unsigned int max = dma_get_max_seg_size(dev);
1715
1716 for (i = 1; i < nents; i++) {
1717 s = sg_next(s);
1718
1719 s->dma_address = DMA_MAPPING_ERROR;
1720 s->dma_length = 0;
1721
1722 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1723 if (__map_sg_chunk(dev, start, size, &dma->dma_address,
1724 dir, attrs, is_coherent) < 0)
1725 goto bad_mapping;
1726
1727 dma->dma_address += offset;
1728 dma->dma_length = size - offset;
1729
1730 size = offset = s->offset;
1731 start = s;
1732 dma = sg_next(dma);
1733 count += 1;
1734 }
1735 size += s->length;
1736 }
1737 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1738 is_coherent) < 0)
1739 goto bad_mapping;
1740
1741 dma->dma_address += offset;
1742 dma->dma_length = size - offset;
1743
1744 return count+1;
1745
1746 bad_mapping:
1747 for_each_sg(sg, s, count, i)
1748 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1749 return 0;
1750 }
1751
1752 /**
1753 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1754 * @dev: valid struct device pointer
1755 * @sg: list of buffers
1756 * @nents: number of buffers to map
1757 * @dir: DMA transfer direction
1758 *
1759 * Map a set of i/o coherent buffers described by scatterlist in streaming
1760 * mode for DMA. The scatter gather list elements are merged together (if
1761 * possible) and tagged with the appropriate dma address and length. They are
1762 * obtained via sg_dma_{address,length}.
1763 */
1764 int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1765 int nents, enum dma_data_direction dir, unsigned long attrs)
1766 {
1767 return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
1768 }
1769
1770 /**
1771 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1772 * @dev: valid struct device pointer
1773 * @sg: list of buffers
1774 * @nents: number of buffers to map
1775 * @dir: DMA transfer direction
1776 *
1777 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1778 * The scatter gather list elements are merged together (if possible) and
1779 * tagged with the appropriate dma address and length. They are obtained via
1780 * sg_dma_{address,length}.
1781 */
1782 int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1783 int nents, enum dma_data_direction dir, unsigned long attrs)
1784 {
1785 return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
1786 }
1787
1788 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1789 int nents, enum dma_data_direction dir,
1790 unsigned long attrs, bool is_coherent)
1791 {
1792 struct scatterlist *s;
1793 int i;
1794
1795 for_each_sg(sg, s, nents, i) {
1796 if (sg_dma_len(s))
1797 __iommu_remove_mapping(dev, sg_dma_address(s),
1798 sg_dma_len(s));
1799 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1800 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1801 s->length, dir);
1802 }
1803 }
1804
1805 /**
1806 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1807 * @dev: valid struct device pointer
1808 * @sg: list of buffers
1809 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1810 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1811 *
1812 * Unmap a set of streaming mode DMA translations. Again, CPU access
1813 * rules concerning calls here are the same as for dma_unmap_single().
1814 */
1815 void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1816 int nents, enum dma_data_direction dir,
1817 unsigned long attrs)
1818 {
1819 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
1820 }
1821
1822 /**
1823 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1824 * @dev: valid struct device pointer
1825 * @sg: list of buffers
1826 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1827 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1828 *
1829 * Unmap a set of streaming mode DMA translations. Again, CPU access
1830 * rules concerning calls here are the same as for dma_unmap_single().
1831 */
1832 void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1833 enum dma_data_direction dir,
1834 unsigned long attrs)
1835 {
1836 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
1837 }
1838
1839 /**
1840 * arm_iommu_sync_sg_for_cpu
1841 * @dev: valid struct device pointer
1842 * @sg: list of buffers
1843 * @nents: number of buffers to map (returned from dma_map_sg)
1844 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1845 */
1846 void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1847 int nents, enum dma_data_direction dir)
1848 {
1849 struct scatterlist *s;
1850 int i;
1851
1852 for_each_sg(sg, s, nents, i)
1853 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1854
1855 }
1856
1857 /**
1858 * arm_iommu_sync_sg_for_device
1859 * @dev: valid struct device pointer
1860 * @sg: list of buffers
1861 * @nents: number of buffers to map (returned from dma_map_sg)
1862 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1863 */
1864 void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1865 int nents, enum dma_data_direction dir)
1866 {
1867 struct scatterlist *s;
1868 int i;
1869
1870 for_each_sg(sg, s, nents, i)
1871 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1872 }
1873
1874
1875 /**
1876 * arm_coherent_iommu_map_page
1877 * @dev: valid struct device pointer
1878 * @page: page that buffer resides in
1879 * @offset: offset into page for start of buffer
1880 * @size: size of buffer to map
1881 * @dir: DMA transfer direction
1882 *
1883 * Coherent IOMMU aware version of arm_dma_map_page()
1884 */
1885 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
1886 unsigned long offset, size_t size, enum dma_data_direction dir,
1887 unsigned long attrs)
1888 {
1889 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1890 dma_addr_t dma_addr;
1891 int ret, prot, len = PAGE_ALIGN(size + offset);
1892
1893 dma_addr = __alloc_iova(mapping, len);
1894 if (dma_addr == DMA_MAPPING_ERROR)
1895 return dma_addr;
1896
1897 prot = __dma_info_to_prot(dir, attrs);
1898
1899 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
1900 if (ret < 0)
1901 goto fail;
1902
1903 return dma_addr + offset;
1904 fail:
1905 __free_iova(mapping, dma_addr, len);
1906 return DMA_MAPPING_ERROR;
1907 }
1908
1909 /**
1910 * arm_iommu_map_page
1911 * @dev: valid struct device pointer
1912 * @page: page that buffer resides in
1913 * @offset: offset into page for start of buffer
1914 * @size: size of buffer to map
1915 * @dir: DMA transfer direction
1916 *
1917 * IOMMU aware version of arm_dma_map_page()
1918 */
1919 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1920 unsigned long offset, size_t size, enum dma_data_direction dir,
1921 unsigned long attrs)
1922 {
1923 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1924 __dma_page_cpu_to_dev(page, offset, size, dir);
1925
1926 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
1927 }
1928
1929 /**
1930 * arm_coherent_iommu_unmap_page
1931 * @dev: valid struct device pointer
1932 * @handle: DMA address of buffer
1933 * @size: size of buffer (same as passed to dma_map_page)
1934 * @dir: DMA transfer direction (same as passed to dma_map_page)
1935 *
1936 * Coherent IOMMU aware version of arm_dma_unmap_page()
1937 */
1938 static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1939 size_t size, enum dma_data_direction dir, unsigned long attrs)
1940 {
1941 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1942 dma_addr_t iova = handle & PAGE_MASK;
1943 int offset = handle & ~PAGE_MASK;
1944 int len = PAGE_ALIGN(size + offset);
1945
1946 if (!iova)
1947 return;
1948
1949 iommu_unmap(mapping->domain, iova, len);
1950 __free_iova(mapping, iova, len);
1951 }
1952
1953 /**
1954 * arm_iommu_unmap_page
1955 * @dev: valid struct device pointer
1956 * @handle: DMA address of buffer
1957 * @size: size of buffer (same as passed to dma_map_page)
1958 * @dir: DMA transfer direction (same as passed to dma_map_page)
1959 *
1960 * IOMMU aware version of arm_dma_unmap_page()
1961 */
1962 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1963 size_t size, enum dma_data_direction dir, unsigned long attrs)
1964 {
1965 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1966 dma_addr_t iova = handle & PAGE_MASK;
1967 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1968 int offset = handle & ~PAGE_MASK;
1969 int len = PAGE_ALIGN(size + offset);
1970
1971 if (!iova)
1972 return;
1973
1974 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1975 __dma_page_dev_to_cpu(page, offset, size, dir);
1976
1977 iommu_unmap(mapping->domain, iova, len);
1978 __free_iova(mapping, iova, len);
1979 }
1980
1981 /**
1982 * arm_iommu_map_resource - map a device resource for DMA
1983 * @dev: valid struct device pointer
1984 * @phys_addr: physical address of resource
1985 * @size: size of resource to map
1986 * @dir: DMA transfer direction
1987 */
1988 static dma_addr_t arm_iommu_map_resource(struct device *dev,
1989 phys_addr_t phys_addr, size_t size,
1990 enum dma_data_direction dir, unsigned long attrs)
1991 {
1992 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1993 dma_addr_t dma_addr;
1994 int ret, prot;
1995 phys_addr_t addr = phys_addr & PAGE_MASK;
1996 unsigned int offset = phys_addr & ~PAGE_MASK;
1997 size_t len = PAGE_ALIGN(size + offset);
1998
1999 dma_addr = __alloc_iova(mapping, len);
2000 if (dma_addr == DMA_MAPPING_ERROR)
2001 return dma_addr;
2002
2003 prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
2004
2005 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
2006 if (ret < 0)
2007 goto fail;
2008
2009 return dma_addr + offset;
2010 fail:
2011 __free_iova(mapping, dma_addr, len);
2012 return DMA_MAPPING_ERROR;
2013 }
2014
2015 /**
2016 * arm_iommu_unmap_resource - unmap a device DMA resource
2017 * @dev: valid struct device pointer
2018 * @dma_handle: DMA address to resource
2019 * @size: size of resource to map
2020 * @dir: DMA transfer direction
2021 */
2022 static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
2023 size_t size, enum dma_data_direction dir,
2024 unsigned long attrs)
2025 {
2026 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2027 dma_addr_t iova = dma_handle & PAGE_MASK;
2028 unsigned int offset = dma_handle & ~PAGE_MASK;
2029 size_t len = PAGE_ALIGN(size + offset);
2030
2031 if (!iova)
2032 return;
2033
2034 iommu_unmap(mapping->domain, iova, len);
2035 __free_iova(mapping, iova, len);
2036 }
2037
2038 static void arm_iommu_sync_single_for_cpu(struct device *dev,
2039 dma_addr_t handle, size_t size, enum dma_data_direction dir)
2040 {
2041 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2042 dma_addr_t iova = handle & PAGE_MASK;
2043 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
2044 unsigned int offset = handle & ~PAGE_MASK;
2045
2046 if (!iova)
2047 return;
2048
2049 __dma_page_dev_to_cpu(page, offset, size, dir);
2050 }
2051
2052 static void arm_iommu_sync_single_for_device(struct device *dev,
2053 dma_addr_t handle, size_t size, enum dma_data_direction dir)
2054 {
2055 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2056 dma_addr_t iova = handle & PAGE_MASK;
2057 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
2058 unsigned int offset = handle & ~PAGE_MASK;
2059
2060 if (!iova)
2061 return;
2062
2063 __dma_page_cpu_to_dev(page, offset, size, dir);
2064 }
2065
2066 const struct dma_map_ops iommu_ops = {
2067 .alloc = arm_iommu_alloc_attrs,
2068 .free = arm_iommu_free_attrs,
2069 .mmap = arm_iommu_mmap_attrs,
2070 .get_sgtable = arm_iommu_get_sgtable,
2071
2072 .map_page = arm_iommu_map_page,
2073 .unmap_page = arm_iommu_unmap_page,
2074 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
2075 .sync_single_for_device = arm_iommu_sync_single_for_device,
2076
2077 .map_sg = arm_iommu_map_sg,
2078 .unmap_sg = arm_iommu_unmap_sg,
2079 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
2080 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
2081
2082 .map_resource = arm_iommu_map_resource,
2083 .unmap_resource = arm_iommu_unmap_resource,
2084
2085 .dma_supported = arm_dma_supported,
2086 };
2087
2088 const struct dma_map_ops iommu_coherent_ops = {
2089 .alloc = arm_coherent_iommu_alloc_attrs,
2090 .free = arm_coherent_iommu_free_attrs,
2091 .mmap = arm_coherent_iommu_mmap_attrs,
2092 .get_sgtable = arm_iommu_get_sgtable,
2093
2094 .map_page = arm_coherent_iommu_map_page,
2095 .unmap_page = arm_coherent_iommu_unmap_page,
2096
2097 .map_sg = arm_coherent_iommu_map_sg,
2098 .unmap_sg = arm_coherent_iommu_unmap_sg,
2099
2100 .map_resource = arm_iommu_map_resource,
2101 .unmap_resource = arm_iommu_unmap_resource,
2102
2103 .dma_supported = arm_dma_supported,
2104 };
2105
2106 /**
2107 * arm_iommu_create_mapping
2108 * @bus: pointer to the bus holding the client device (for IOMMU calls)
2109 * @base: start address of the valid IO address space
2110 * @size: maximum size of the valid IO address space
2111 *
2112 * Creates a mapping structure which holds information about used/unused
2113 * IO address ranges, which is required to perform memory allocation and
2114 * mapping with IOMMU aware functions.
2115 *
2116 * The client device need to be attached to the mapping with
2117 * arm_iommu_attach_device function.
2118 */
2119 struct dma_iommu_mapping *
2120 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
2121 {
2122 unsigned int bits = size >> PAGE_SHIFT;
2123 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
2124 struct dma_iommu_mapping *mapping;
2125 int extensions = 1;
2126 int err = -ENOMEM;
2127
2128 /* currently only 32-bit DMA address space is supported */
2129 if (size > DMA_BIT_MASK(32) + 1)
2130 return ERR_PTR(-ERANGE);
2131
2132 if (!bitmap_size)
2133 return ERR_PTR(-EINVAL);
2134
2135 if (bitmap_size > PAGE_SIZE) {
2136 extensions = bitmap_size / PAGE_SIZE;
2137 bitmap_size = PAGE_SIZE;
2138 }
2139
2140 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
2141 if (!mapping)
2142 goto err;
2143
2144 mapping->bitmap_size = bitmap_size;
2145 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
2146 GFP_KERNEL);
2147 if (!mapping->bitmaps)
2148 goto err2;
2149
2150 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
2151 if (!mapping->bitmaps[0])
2152 goto err3;
2153
2154 mapping->nr_bitmaps = 1;
2155 mapping->extensions = extensions;
2156 mapping->base = base;
2157 mapping->bits = BITS_PER_BYTE * bitmap_size;
2158
2159 spin_lock_init(&mapping->lock);
2160
2161 mapping->domain = iommu_domain_alloc(bus);
2162 if (!mapping->domain)
2163 goto err4;
2164
2165 kref_init(&mapping->kref);
2166 return mapping;
2167 err4:
2168 kfree(mapping->bitmaps[0]);
2169 err3:
2170 kfree(mapping->bitmaps);
2171 err2:
2172 kfree(mapping);
2173 err:
2174 return ERR_PTR(err);
2175 }
2176 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
2177
2178 static void release_iommu_mapping(struct kref *kref)
2179 {
2180 int i;
2181 struct dma_iommu_mapping *mapping =
2182 container_of(kref, struct dma_iommu_mapping, kref);
2183
2184 iommu_domain_free(mapping->domain);
2185 for (i = 0; i < mapping->nr_bitmaps; i++)
2186 kfree(mapping->bitmaps[i]);
2187 kfree(mapping->bitmaps);
2188 kfree(mapping);
2189 }
2190
2191 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
2192 {
2193 int next_bitmap;
2194
2195 if (mapping->nr_bitmaps >= mapping->extensions)
2196 return -EINVAL;
2197
2198 next_bitmap = mapping->nr_bitmaps;
2199 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
2200 GFP_ATOMIC);
2201 if (!mapping->bitmaps[next_bitmap])
2202 return -ENOMEM;
2203
2204 mapping->nr_bitmaps++;
2205
2206 return 0;
2207 }
2208
2209 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
2210 {
2211 if (mapping)
2212 kref_put(&mapping->kref, release_iommu_mapping);
2213 }
2214 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
2215
2216 static int __arm_iommu_attach_device(struct device *dev,
2217 struct dma_iommu_mapping *mapping)
2218 {
2219 int err;
2220
2221 err = iommu_attach_device(mapping->domain, dev);
2222 if (err)
2223 return err;
2224
2225 kref_get(&mapping->kref);
2226 to_dma_iommu_mapping(dev) = mapping;
2227
2228 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
2229 return 0;
2230 }
2231
2232 /**
2233 * arm_iommu_attach_device
2234 * @dev: valid struct device pointer
2235 * @mapping: io address space mapping structure (returned from
2236 * arm_iommu_create_mapping)
2237 *
2238 * Attaches specified io address space mapping to the provided device.
2239 * This replaces the dma operations (dma_map_ops pointer) with the
2240 * IOMMU aware version.
2241 *
2242 * More than one client might be attached to the same io address space
2243 * mapping.
2244 */
2245 int arm_iommu_attach_device(struct device *dev,
2246 struct dma_iommu_mapping *mapping)
2247 {
2248 int err;
2249
2250 err = __arm_iommu_attach_device(dev, mapping);
2251 if (err)
2252 return err;
2253
2254 set_dma_ops(dev, &iommu_ops);
2255 return 0;
2256 }
2257 EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
2258
2259 /**
2260 * arm_iommu_detach_device
2261 * @dev: valid struct device pointer
2262 *
2263 * Detaches the provided device from a previously attached map.
2264 * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
2265 */
2266 void arm_iommu_detach_device(struct device *dev)
2267 {
2268 struct dma_iommu_mapping *mapping;
2269
2270 mapping = to_dma_iommu_mapping(dev);
2271 if (!mapping) {
2272 dev_warn(dev, "Not attached\n");
2273 return;
2274 }
2275
2276 iommu_detach_device(mapping->domain, dev);
2277 kref_put(&mapping->kref, release_iommu_mapping);
2278 to_dma_iommu_mapping(dev) = NULL;
2279 set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent));
2280
2281 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
2282 }
2283 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
2284
2285 static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
2286 {
2287 return coherent ? &iommu_coherent_ops : &iommu_ops;
2288 }
2289
2290 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2291 const struct iommu_ops *iommu)
2292 {
2293 struct dma_iommu_mapping *mapping;
2294
2295 if (!iommu)
2296 return false;
2297
2298 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
2299 if (IS_ERR(mapping)) {
2300 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
2301 size, dev_name(dev));
2302 return false;
2303 }
2304
2305 if (__arm_iommu_attach_device(dev, mapping)) {
2306 pr_warn("Failed to attached device %s to IOMMU_mapping\n",
2307 dev_name(dev));
2308 arm_iommu_release_mapping(mapping);
2309 return false;
2310 }
2311
2312 return true;
2313 }
2314
2315 static void arm_teardown_iommu_dma_ops(struct device *dev)
2316 {
2317 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2318
2319 if (!mapping)
2320 return;
2321
2322 arm_iommu_detach_device(dev);
2323 arm_iommu_release_mapping(mapping);
2324 }
2325
2326 #else
2327
2328 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2329 const struct iommu_ops *iommu)
2330 {
2331 return false;
2332 }
2333
2334 static void arm_teardown_iommu_dma_ops(struct device *dev) { }
2335
2336 #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
2337
2338 #endif /* CONFIG_ARM_DMA_USE_IOMMU */
2339
2340 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
2341 const struct iommu_ops *iommu, bool coherent)
2342 {
2343 const struct dma_map_ops *dma_ops;
2344
2345 dev->archdata.dma_coherent = coherent;
2346 #ifdef CONFIG_SWIOTLB
2347 dev->dma_coherent = coherent;
2348 #endif
2349
2350 /*
2351 * Don't override the dma_ops if they have already been set. Ideally
2352 * this should be the only location where dma_ops are set, remove this
2353 * check when all other callers of set_dma_ops will have disappeared.
2354 */
2355 if (dev->dma_ops)
2356 return;
2357
2358 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
2359 dma_ops = arm_get_iommu_dma_map_ops(coherent);
2360 else
2361 dma_ops = arm_get_dma_map_ops(coherent);
2362
2363 set_dma_ops(dev, dma_ops);
2364
2365 #ifdef CONFIG_XEN
2366 if (xen_initial_domain()) {
2367 dev->archdata.dev_dma_ops = dev->dma_ops;
2368 dev->dma_ops = xen_dma_ops;
2369 }
2370 #endif
2371 dev->archdata.dma_ops_setup = true;
2372 }
2373
2374 void arch_teardown_dma_ops(struct device *dev)
2375 {
2376 if (!dev->archdata.dma_ops_setup)
2377 return;
2378
2379 arm_teardown_iommu_dma_ops(dev);
2380 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
2381 set_dma_ops(dev, NULL);
2382 }
2383
2384 #ifdef CONFIG_SWIOTLB
2385 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
2386 size_t size, enum dma_data_direction dir)
2387 {
2388 __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
2389 size, dir);
2390 }
2391
2392 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
2393 size_t size, enum dma_data_direction dir)
2394 {
2395 __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
2396 size, dir);
2397 }
2398
2399 long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
2400 dma_addr_t dma_addr)
2401 {
2402 return dma_to_pfn(dev, dma_addr);
2403 }
2404
2405 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
2406 unsigned long attrs)
2407 {
2408 if (!dev_is_dma_coherent(dev))
2409 return __get_dma_pgprot(attrs, prot);
2410 return prot;
2411 }
2412
2413 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
2414 gfp_t gfp, unsigned long attrs)
2415 {
2416 return __dma_alloc(dev, size, dma_handle, gfp,
2417 __get_dma_pgprot(attrs, PAGE_KERNEL), false,
2418 attrs, __builtin_return_address(0));
2419 }
2420
2421 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
2422 dma_addr_t dma_handle, unsigned long attrs)
2423 {
2424 __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
2425 }
2426 #endif /* CONFIG_SWIOTLB */