]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/arm/mm/dma-mapping.c
Merge tag 'v5.3-rc1' into docs-next
[thirdparty/linux.git] / arch / arm / mm / dma-mapping.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm/mm/dma-mapping.c
4 *
5 * Copyright (C) 2000-2004 Russell King
6 *
7 * DMA uncached mapping support.
8 */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/genalloc.h>
12 #include <linux/gfp.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/dma-contiguous.h>
19 #include <linux/highmem.h>
20 #include <linux/memblock.h>
21 #include <linux/slab.h>
22 #include <linux/iommu.h>
23 #include <linux/io.h>
24 #include <linux/vmalloc.h>
25 #include <linux/sizes.h>
26 #include <linux/cma.h>
27
28 #include <asm/memory.h>
29 #include <asm/highmem.h>
30 #include <asm/cacheflush.h>
31 #include <asm/tlbflush.h>
32 #include <asm/mach/arch.h>
33 #include <asm/dma-iommu.h>
34 #include <asm/mach/map.h>
35 #include <asm/system_info.h>
36 #include <asm/dma-contiguous.h>
37
38 #include "dma.h"
39 #include "mm.h"
40
41 struct arm_dma_alloc_args {
42 struct device *dev;
43 size_t size;
44 gfp_t gfp;
45 pgprot_t prot;
46 const void *caller;
47 bool want_vaddr;
48 int coherent_flag;
49 };
50
51 struct arm_dma_free_args {
52 struct device *dev;
53 size_t size;
54 void *cpu_addr;
55 struct page *page;
56 bool want_vaddr;
57 };
58
59 #define NORMAL 0
60 #define COHERENT 1
61
62 struct arm_dma_allocator {
63 void *(*alloc)(struct arm_dma_alloc_args *args,
64 struct page **ret_page);
65 void (*free)(struct arm_dma_free_args *args);
66 };
67
68 struct arm_dma_buffer {
69 struct list_head list;
70 void *virt;
71 struct arm_dma_allocator *allocator;
72 };
73
74 static LIST_HEAD(arm_dma_bufs);
75 static DEFINE_SPINLOCK(arm_dma_bufs_lock);
76
77 static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
78 {
79 struct arm_dma_buffer *buf, *found = NULL;
80 unsigned long flags;
81
82 spin_lock_irqsave(&arm_dma_bufs_lock, flags);
83 list_for_each_entry(buf, &arm_dma_bufs, list) {
84 if (buf->virt == virt) {
85 list_del(&buf->list);
86 found = buf;
87 break;
88 }
89 }
90 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
91 return found;
92 }
93
94 /*
95 * The DMA API is built upon the notion of "buffer ownership". A buffer
96 * is either exclusively owned by the CPU (and therefore may be accessed
97 * by it) or exclusively owned by the DMA device. These helper functions
98 * represent the transitions between these two ownership states.
99 *
100 * Note, however, that on later ARMs, this notion does not work due to
101 * speculative prefetches. We model our approach on the assumption that
102 * the CPU does do speculative prefetches, which means we clean caches
103 * before transfers and delay cache invalidation until transfer completion.
104 *
105 */
106 static void __dma_page_cpu_to_dev(struct page *, unsigned long,
107 size_t, enum dma_data_direction);
108 static void __dma_page_dev_to_cpu(struct page *, unsigned long,
109 size_t, enum dma_data_direction);
110
111 /**
112 * arm_dma_map_page - map a portion of a page for streaming DMA
113 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
114 * @page: page that buffer resides in
115 * @offset: offset into page for start of buffer
116 * @size: size of buffer to map
117 * @dir: DMA transfer direction
118 *
119 * Ensure that any data held in the cache is appropriately discarded
120 * or written back.
121 *
122 * The device owns this memory once this call has completed. The CPU
123 * can regain ownership by calling dma_unmap_page().
124 */
125 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
126 unsigned long offset, size_t size, enum dma_data_direction dir,
127 unsigned long attrs)
128 {
129 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
130 __dma_page_cpu_to_dev(page, offset, size, dir);
131 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
132 }
133
134 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
135 unsigned long offset, size_t size, enum dma_data_direction dir,
136 unsigned long attrs)
137 {
138 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
139 }
140
141 /**
142 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
143 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
144 * @handle: DMA address of buffer
145 * @size: size of buffer (same as passed to dma_map_page)
146 * @dir: DMA transfer direction (same as passed to dma_map_page)
147 *
148 * Unmap a page streaming mode DMA translation. The handle and size
149 * must match what was provided in the previous dma_map_page() call.
150 * All other usages are undefined.
151 *
152 * After this call, reads by the CPU to the buffer are guaranteed to see
153 * whatever the device wrote there.
154 */
155 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
156 size_t size, enum dma_data_direction dir, unsigned long attrs)
157 {
158 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
159 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
160 handle & ~PAGE_MASK, size, dir);
161 }
162
163 static void arm_dma_sync_single_for_cpu(struct device *dev,
164 dma_addr_t handle, size_t size, enum dma_data_direction dir)
165 {
166 unsigned int offset = handle & (PAGE_SIZE - 1);
167 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
168 __dma_page_dev_to_cpu(page, offset, size, dir);
169 }
170
171 static void arm_dma_sync_single_for_device(struct device *dev,
172 dma_addr_t handle, size_t size, enum dma_data_direction dir)
173 {
174 unsigned int offset = handle & (PAGE_SIZE - 1);
175 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
176 __dma_page_cpu_to_dev(page, offset, size, dir);
177 }
178
179 const struct dma_map_ops arm_dma_ops = {
180 .alloc = arm_dma_alloc,
181 .free = arm_dma_free,
182 .mmap = arm_dma_mmap,
183 .get_sgtable = arm_dma_get_sgtable,
184 .map_page = arm_dma_map_page,
185 .unmap_page = arm_dma_unmap_page,
186 .map_sg = arm_dma_map_sg,
187 .unmap_sg = arm_dma_unmap_sg,
188 .map_resource = dma_direct_map_resource,
189 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
190 .sync_single_for_device = arm_dma_sync_single_for_device,
191 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
192 .sync_sg_for_device = arm_dma_sync_sg_for_device,
193 .dma_supported = arm_dma_supported,
194 };
195 EXPORT_SYMBOL(arm_dma_ops);
196
197 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
198 dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
199 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
200 dma_addr_t handle, unsigned long attrs);
201 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
202 void *cpu_addr, dma_addr_t dma_addr, size_t size,
203 unsigned long attrs);
204
205 const struct dma_map_ops arm_coherent_dma_ops = {
206 .alloc = arm_coherent_dma_alloc,
207 .free = arm_coherent_dma_free,
208 .mmap = arm_coherent_dma_mmap,
209 .get_sgtable = arm_dma_get_sgtable,
210 .map_page = arm_coherent_dma_map_page,
211 .map_sg = arm_dma_map_sg,
212 .map_resource = dma_direct_map_resource,
213 .dma_supported = arm_dma_supported,
214 };
215 EXPORT_SYMBOL(arm_coherent_dma_ops);
216
217 static int __dma_supported(struct device *dev, u64 mask, bool warn)
218 {
219 unsigned long max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
220
221 /*
222 * Translate the device's DMA mask to a PFN limit. This
223 * PFN number includes the page which we can DMA to.
224 */
225 if (dma_to_pfn(dev, mask) < max_dma_pfn) {
226 if (warn)
227 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
228 mask,
229 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
230 max_dma_pfn + 1);
231 return 0;
232 }
233
234 return 1;
235 }
236
237 static u64 get_coherent_dma_mask(struct device *dev)
238 {
239 u64 mask = (u64)DMA_BIT_MASK(32);
240
241 if (dev) {
242 mask = dev->coherent_dma_mask;
243
244 /*
245 * Sanity check the DMA mask - it must be non-zero, and
246 * must be able to be satisfied by a DMA allocation.
247 */
248 if (mask == 0) {
249 dev_warn(dev, "coherent DMA mask is unset\n");
250 return 0;
251 }
252
253 if (!__dma_supported(dev, mask, true))
254 return 0;
255 }
256
257 return mask;
258 }
259
260 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
261 {
262 /*
263 * Ensure that the allocated pages are zeroed, and that any data
264 * lurking in the kernel direct-mapped region is invalidated.
265 */
266 if (PageHighMem(page)) {
267 phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
268 phys_addr_t end = base + size;
269 while (size > 0) {
270 void *ptr = kmap_atomic(page);
271 memset(ptr, 0, PAGE_SIZE);
272 if (coherent_flag != COHERENT)
273 dmac_flush_range(ptr, ptr + PAGE_SIZE);
274 kunmap_atomic(ptr);
275 page++;
276 size -= PAGE_SIZE;
277 }
278 if (coherent_flag != COHERENT)
279 outer_flush_range(base, end);
280 } else {
281 void *ptr = page_address(page);
282 memset(ptr, 0, size);
283 if (coherent_flag != COHERENT) {
284 dmac_flush_range(ptr, ptr + size);
285 outer_flush_range(__pa(ptr), __pa(ptr) + size);
286 }
287 }
288 }
289
290 /*
291 * Allocate a DMA buffer for 'dev' of size 'size' using the
292 * specified gfp mask. Note that 'size' must be page aligned.
293 */
294 static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
295 gfp_t gfp, int coherent_flag)
296 {
297 unsigned long order = get_order(size);
298 struct page *page, *p, *e;
299
300 page = alloc_pages(gfp, order);
301 if (!page)
302 return NULL;
303
304 /*
305 * Now split the huge page and free the excess pages
306 */
307 split_page(page, order);
308 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
309 __free_page(p);
310
311 __dma_clear_buffer(page, size, coherent_flag);
312
313 return page;
314 }
315
316 /*
317 * Free a DMA buffer. 'size' must be page aligned.
318 */
319 static void __dma_free_buffer(struct page *page, size_t size)
320 {
321 struct page *e = page + (size >> PAGE_SHIFT);
322
323 while (page < e) {
324 __free_page(page);
325 page++;
326 }
327 }
328
329 static void *__alloc_from_contiguous(struct device *dev, size_t size,
330 pgprot_t prot, struct page **ret_page,
331 const void *caller, bool want_vaddr,
332 int coherent_flag, gfp_t gfp);
333
334 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
335 pgprot_t prot, struct page **ret_page,
336 const void *caller, bool want_vaddr);
337
338 static void *
339 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
340 const void *caller)
341 {
342 /*
343 * DMA allocation can be mapped to user space, so lets
344 * set VM_USERMAP flags too.
345 */
346 return dma_common_contiguous_remap(page, size,
347 VM_ARM_DMA_CONSISTENT | VM_USERMAP,
348 prot, caller);
349 }
350
351 static void __dma_free_remap(void *cpu_addr, size_t size)
352 {
353 dma_common_free_remap(cpu_addr, size,
354 VM_ARM_DMA_CONSISTENT | VM_USERMAP);
355 }
356
357 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
358 static struct gen_pool *atomic_pool __ro_after_init;
359
360 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
361
362 static int __init early_coherent_pool(char *p)
363 {
364 atomic_pool_size = memparse(p, &p);
365 return 0;
366 }
367 early_param("coherent_pool", early_coherent_pool);
368
369 /*
370 * Initialise the coherent pool for atomic allocations.
371 */
372 static int __init atomic_pool_init(void)
373 {
374 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
375 gfp_t gfp = GFP_KERNEL | GFP_DMA;
376 struct page *page;
377 void *ptr;
378
379 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
380 if (!atomic_pool)
381 goto out;
382 /*
383 * The atomic pool is only used for non-coherent allocations
384 * so we must pass NORMAL for coherent_flag.
385 */
386 if (dev_get_cma_area(NULL))
387 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
388 &page, atomic_pool_init, true, NORMAL,
389 GFP_KERNEL);
390 else
391 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
392 &page, atomic_pool_init, true);
393 if (ptr) {
394 int ret;
395
396 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
397 page_to_phys(page),
398 atomic_pool_size, -1);
399 if (ret)
400 goto destroy_genpool;
401
402 gen_pool_set_algo(atomic_pool,
403 gen_pool_first_fit_order_align,
404 NULL);
405 pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
406 atomic_pool_size / 1024);
407 return 0;
408 }
409
410 destroy_genpool:
411 gen_pool_destroy(atomic_pool);
412 atomic_pool = NULL;
413 out:
414 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
415 atomic_pool_size / 1024);
416 return -ENOMEM;
417 }
418 /*
419 * CMA is activated by core_initcall, so we must be called after it.
420 */
421 postcore_initcall(atomic_pool_init);
422
423 struct dma_contig_early_reserve {
424 phys_addr_t base;
425 unsigned long size;
426 };
427
428 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
429
430 static int dma_mmu_remap_num __initdata;
431
432 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
433 {
434 dma_mmu_remap[dma_mmu_remap_num].base = base;
435 dma_mmu_remap[dma_mmu_remap_num].size = size;
436 dma_mmu_remap_num++;
437 }
438
439 void __init dma_contiguous_remap(void)
440 {
441 int i;
442 for (i = 0; i < dma_mmu_remap_num; i++) {
443 phys_addr_t start = dma_mmu_remap[i].base;
444 phys_addr_t end = start + dma_mmu_remap[i].size;
445 struct map_desc map;
446 unsigned long addr;
447
448 if (end > arm_lowmem_limit)
449 end = arm_lowmem_limit;
450 if (start >= end)
451 continue;
452
453 map.pfn = __phys_to_pfn(start);
454 map.virtual = __phys_to_virt(start);
455 map.length = end - start;
456 map.type = MT_MEMORY_DMA_READY;
457
458 /*
459 * Clear previous low-memory mapping to ensure that the
460 * TLB does not see any conflicting entries, then flush
461 * the TLB of the old entries before creating new mappings.
462 *
463 * This ensures that any speculatively loaded TLB entries
464 * (even though they may be rare) can not cause any problems,
465 * and ensures that this code is architecturally compliant.
466 */
467 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
468 addr += PMD_SIZE)
469 pmd_clear(pmd_off_k(addr));
470
471 flush_tlb_kernel_range(__phys_to_virt(start),
472 __phys_to_virt(end));
473
474 iotable_init(&map, 1);
475 }
476 }
477
478 static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
479 {
480 struct page *page = virt_to_page(addr);
481 pgprot_t prot = *(pgprot_t *)data;
482
483 set_pte_ext(pte, mk_pte(page, prot), 0);
484 return 0;
485 }
486
487 static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
488 {
489 unsigned long start = (unsigned long) page_address(page);
490 unsigned end = start + size;
491
492 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
493 flush_tlb_kernel_range(start, end);
494 }
495
496 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
497 pgprot_t prot, struct page **ret_page,
498 const void *caller, bool want_vaddr)
499 {
500 struct page *page;
501 void *ptr = NULL;
502 /*
503 * __alloc_remap_buffer is only called when the device is
504 * non-coherent
505 */
506 page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
507 if (!page)
508 return NULL;
509 if (!want_vaddr)
510 goto out;
511
512 ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
513 if (!ptr) {
514 __dma_free_buffer(page, size);
515 return NULL;
516 }
517
518 out:
519 *ret_page = page;
520 return ptr;
521 }
522
523 static void *__alloc_from_pool(size_t size, struct page **ret_page)
524 {
525 unsigned long val;
526 void *ptr = NULL;
527
528 if (!atomic_pool) {
529 WARN(1, "coherent pool not initialised!\n");
530 return NULL;
531 }
532
533 val = gen_pool_alloc(atomic_pool, size);
534 if (val) {
535 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
536
537 *ret_page = phys_to_page(phys);
538 ptr = (void *)val;
539 }
540
541 return ptr;
542 }
543
544 static bool __in_atomic_pool(void *start, size_t size)
545 {
546 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
547 }
548
549 static int __free_from_pool(void *start, size_t size)
550 {
551 if (!__in_atomic_pool(start, size))
552 return 0;
553
554 gen_pool_free(atomic_pool, (unsigned long)start, size);
555
556 return 1;
557 }
558
559 static void *__alloc_from_contiguous(struct device *dev, size_t size,
560 pgprot_t prot, struct page **ret_page,
561 const void *caller, bool want_vaddr,
562 int coherent_flag, gfp_t gfp)
563 {
564 unsigned long order = get_order(size);
565 size_t count = size >> PAGE_SHIFT;
566 struct page *page;
567 void *ptr = NULL;
568
569 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
570 if (!page)
571 return NULL;
572
573 __dma_clear_buffer(page, size, coherent_flag);
574
575 if (!want_vaddr)
576 goto out;
577
578 if (PageHighMem(page)) {
579 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
580 if (!ptr) {
581 dma_release_from_contiguous(dev, page, count);
582 return NULL;
583 }
584 } else {
585 __dma_remap(page, size, prot);
586 ptr = page_address(page);
587 }
588
589 out:
590 *ret_page = page;
591 return ptr;
592 }
593
594 static void __free_from_contiguous(struct device *dev, struct page *page,
595 void *cpu_addr, size_t size, bool want_vaddr)
596 {
597 if (want_vaddr) {
598 if (PageHighMem(page))
599 __dma_free_remap(cpu_addr, size);
600 else
601 __dma_remap(page, size, PAGE_KERNEL);
602 }
603 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
604 }
605
606 static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
607 {
608 prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
609 pgprot_writecombine(prot) :
610 pgprot_dmacoherent(prot);
611 return prot;
612 }
613
614 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
615 struct page **ret_page)
616 {
617 struct page *page;
618 /* __alloc_simple_buffer is only called when the device is coherent */
619 page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
620 if (!page)
621 return NULL;
622
623 *ret_page = page;
624 return page_address(page);
625 }
626
627 static void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
628 struct page **ret_page)
629 {
630 return __alloc_simple_buffer(args->dev, args->size, args->gfp,
631 ret_page);
632 }
633
634 static void simple_allocator_free(struct arm_dma_free_args *args)
635 {
636 __dma_free_buffer(args->page, args->size);
637 }
638
639 static struct arm_dma_allocator simple_allocator = {
640 .alloc = simple_allocator_alloc,
641 .free = simple_allocator_free,
642 };
643
644 static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
645 struct page **ret_page)
646 {
647 return __alloc_from_contiguous(args->dev, args->size, args->prot,
648 ret_page, args->caller,
649 args->want_vaddr, args->coherent_flag,
650 args->gfp);
651 }
652
653 static void cma_allocator_free(struct arm_dma_free_args *args)
654 {
655 __free_from_contiguous(args->dev, args->page, args->cpu_addr,
656 args->size, args->want_vaddr);
657 }
658
659 static struct arm_dma_allocator cma_allocator = {
660 .alloc = cma_allocator_alloc,
661 .free = cma_allocator_free,
662 };
663
664 static void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
665 struct page **ret_page)
666 {
667 return __alloc_from_pool(args->size, ret_page);
668 }
669
670 static void pool_allocator_free(struct arm_dma_free_args *args)
671 {
672 __free_from_pool(args->cpu_addr, args->size);
673 }
674
675 static struct arm_dma_allocator pool_allocator = {
676 .alloc = pool_allocator_alloc,
677 .free = pool_allocator_free,
678 };
679
680 static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
681 struct page **ret_page)
682 {
683 return __alloc_remap_buffer(args->dev, args->size, args->gfp,
684 args->prot, ret_page, args->caller,
685 args->want_vaddr);
686 }
687
688 static void remap_allocator_free(struct arm_dma_free_args *args)
689 {
690 if (args->want_vaddr)
691 __dma_free_remap(args->cpu_addr, args->size);
692
693 __dma_free_buffer(args->page, args->size);
694 }
695
696 static struct arm_dma_allocator remap_allocator = {
697 .alloc = remap_allocator_alloc,
698 .free = remap_allocator_free,
699 };
700
701 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
702 gfp_t gfp, pgprot_t prot, bool is_coherent,
703 unsigned long attrs, const void *caller)
704 {
705 u64 mask = get_coherent_dma_mask(dev);
706 struct page *page = NULL;
707 void *addr;
708 bool allowblock, cma;
709 struct arm_dma_buffer *buf;
710 struct arm_dma_alloc_args args = {
711 .dev = dev,
712 .size = PAGE_ALIGN(size),
713 .gfp = gfp,
714 .prot = prot,
715 .caller = caller,
716 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
717 .coherent_flag = is_coherent ? COHERENT : NORMAL,
718 };
719
720 #ifdef CONFIG_DMA_API_DEBUG
721 u64 limit = (mask + 1) & ~mask;
722 if (limit && size >= limit) {
723 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
724 size, mask);
725 return NULL;
726 }
727 #endif
728
729 if (!mask)
730 return NULL;
731
732 buf = kzalloc(sizeof(*buf),
733 gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
734 if (!buf)
735 return NULL;
736
737 if (mask < 0xffffffffULL)
738 gfp |= GFP_DMA;
739
740 /*
741 * Following is a work-around (a.k.a. hack) to prevent pages
742 * with __GFP_COMP being passed to split_page() which cannot
743 * handle them. The real problem is that this flag probably
744 * should be 0 on ARM as it is not supported on this
745 * platform; see CONFIG_HUGETLBFS.
746 */
747 gfp &= ~(__GFP_COMP);
748 args.gfp = gfp;
749
750 *handle = DMA_MAPPING_ERROR;
751 allowblock = gfpflags_allow_blocking(gfp);
752 cma = allowblock ? dev_get_cma_area(dev) : false;
753
754 if (cma)
755 buf->allocator = &cma_allocator;
756 else if (is_coherent)
757 buf->allocator = &simple_allocator;
758 else if (allowblock)
759 buf->allocator = &remap_allocator;
760 else
761 buf->allocator = &pool_allocator;
762
763 addr = buf->allocator->alloc(&args, &page);
764
765 if (page) {
766 unsigned long flags;
767
768 *handle = pfn_to_dma(dev, page_to_pfn(page));
769 buf->virt = args.want_vaddr ? addr : page;
770
771 spin_lock_irqsave(&arm_dma_bufs_lock, flags);
772 list_add(&buf->list, &arm_dma_bufs);
773 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
774 } else {
775 kfree(buf);
776 }
777
778 return args.want_vaddr ? addr : page;
779 }
780
781 /*
782 * Allocate DMA-coherent memory space and return both the kernel remapped
783 * virtual and bus address for that space.
784 */
785 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
786 gfp_t gfp, unsigned long attrs)
787 {
788 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
789
790 return __dma_alloc(dev, size, handle, gfp, prot, false,
791 attrs, __builtin_return_address(0));
792 }
793
794 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
795 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
796 {
797 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
798 attrs, __builtin_return_address(0));
799 }
800
801 static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
802 void *cpu_addr, dma_addr_t dma_addr, size_t size,
803 unsigned long attrs)
804 {
805 int ret = -ENXIO;
806 unsigned long nr_vma_pages = vma_pages(vma);
807 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
808 unsigned long pfn = dma_to_pfn(dev, dma_addr);
809 unsigned long off = vma->vm_pgoff;
810
811 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
812 return ret;
813
814 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
815 ret = remap_pfn_range(vma, vma->vm_start,
816 pfn + off,
817 vma->vm_end - vma->vm_start,
818 vma->vm_page_prot);
819 }
820
821 return ret;
822 }
823
824 /*
825 * Create userspace mapping for the DMA-coherent memory.
826 */
827 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
828 void *cpu_addr, dma_addr_t dma_addr, size_t size,
829 unsigned long attrs)
830 {
831 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
832 }
833
834 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
835 void *cpu_addr, dma_addr_t dma_addr, size_t size,
836 unsigned long attrs)
837 {
838 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
839 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
840 }
841
842 /*
843 * Free a buffer as defined by the above mapping.
844 */
845 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
846 dma_addr_t handle, unsigned long attrs,
847 bool is_coherent)
848 {
849 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
850 struct arm_dma_buffer *buf;
851 struct arm_dma_free_args args = {
852 .dev = dev,
853 .size = PAGE_ALIGN(size),
854 .cpu_addr = cpu_addr,
855 .page = page,
856 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
857 };
858
859 buf = arm_dma_buffer_find(cpu_addr);
860 if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
861 return;
862
863 buf->allocator->free(&args);
864 kfree(buf);
865 }
866
867 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
868 dma_addr_t handle, unsigned long attrs)
869 {
870 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
871 }
872
873 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
874 dma_addr_t handle, unsigned long attrs)
875 {
876 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
877 }
878
879 /*
880 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
881 * that the intention is to allow exporting memory allocated via the
882 * coherent DMA APIs through the dma_buf API, which only accepts a
883 * scattertable. This presents a couple of problems:
884 * 1. Not all memory allocated via the coherent DMA APIs is backed by
885 * a struct page
886 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
887 * as we will try to flush the memory through a different alias to that
888 * actually being used (and the flushes are redundant.)
889 */
890 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
891 void *cpu_addr, dma_addr_t handle, size_t size,
892 unsigned long attrs)
893 {
894 unsigned long pfn = dma_to_pfn(dev, handle);
895 struct page *page;
896 int ret;
897
898 /* If the PFN is not valid, we do not have a struct page */
899 if (!pfn_valid(pfn))
900 return -ENXIO;
901
902 page = pfn_to_page(pfn);
903
904 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
905 if (unlikely(ret))
906 return ret;
907
908 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
909 return 0;
910 }
911
912 static void dma_cache_maint_page(struct page *page, unsigned long offset,
913 size_t size, enum dma_data_direction dir,
914 void (*op)(const void *, size_t, int))
915 {
916 unsigned long pfn;
917 size_t left = size;
918
919 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
920 offset %= PAGE_SIZE;
921
922 /*
923 * A single sg entry may refer to multiple physically contiguous
924 * pages. But we still need to process highmem pages individually.
925 * If highmem is not configured then the bulk of this loop gets
926 * optimized out.
927 */
928 do {
929 size_t len = left;
930 void *vaddr;
931
932 page = pfn_to_page(pfn);
933
934 if (PageHighMem(page)) {
935 if (len + offset > PAGE_SIZE)
936 len = PAGE_SIZE - offset;
937
938 if (cache_is_vipt_nonaliasing()) {
939 vaddr = kmap_atomic(page);
940 op(vaddr + offset, len, dir);
941 kunmap_atomic(vaddr);
942 } else {
943 vaddr = kmap_high_get(page);
944 if (vaddr) {
945 op(vaddr + offset, len, dir);
946 kunmap_high(page);
947 }
948 }
949 } else {
950 vaddr = page_address(page) + offset;
951 op(vaddr, len, dir);
952 }
953 offset = 0;
954 pfn++;
955 left -= len;
956 } while (left);
957 }
958
959 /*
960 * Make an area consistent for devices.
961 * Note: Drivers should NOT use this function directly, as it will break
962 * platforms with CONFIG_DMABOUNCE.
963 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
964 */
965 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
966 size_t size, enum dma_data_direction dir)
967 {
968 phys_addr_t paddr;
969
970 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
971
972 paddr = page_to_phys(page) + off;
973 if (dir == DMA_FROM_DEVICE) {
974 outer_inv_range(paddr, paddr + size);
975 } else {
976 outer_clean_range(paddr, paddr + size);
977 }
978 /* FIXME: non-speculating: flush on bidirectional mappings? */
979 }
980
981 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
982 size_t size, enum dma_data_direction dir)
983 {
984 phys_addr_t paddr = page_to_phys(page) + off;
985
986 /* FIXME: non-speculating: not required */
987 /* in any case, don't bother invalidating if DMA to device */
988 if (dir != DMA_TO_DEVICE) {
989 outer_inv_range(paddr, paddr + size);
990
991 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
992 }
993
994 /*
995 * Mark the D-cache clean for these pages to avoid extra flushing.
996 */
997 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
998 unsigned long pfn;
999 size_t left = size;
1000
1001 pfn = page_to_pfn(page) + off / PAGE_SIZE;
1002 off %= PAGE_SIZE;
1003 if (off) {
1004 pfn++;
1005 left -= PAGE_SIZE - off;
1006 }
1007 while (left >= PAGE_SIZE) {
1008 page = pfn_to_page(pfn++);
1009 set_bit(PG_dcache_clean, &page->flags);
1010 left -= PAGE_SIZE;
1011 }
1012 }
1013 }
1014
1015 /**
1016 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
1017 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1018 * @sg: list of buffers
1019 * @nents: number of buffers to map
1020 * @dir: DMA transfer direction
1021 *
1022 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1023 * This is the scatter-gather version of the dma_map_single interface.
1024 * Here the scatter gather list elements are each tagged with the
1025 * appropriate dma address and length. They are obtained via
1026 * sg_dma_{address,length}.
1027 *
1028 * Device ownership issues as mentioned for dma_map_single are the same
1029 * here.
1030 */
1031 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1032 enum dma_data_direction dir, unsigned long attrs)
1033 {
1034 const struct dma_map_ops *ops = get_dma_ops(dev);
1035 struct scatterlist *s;
1036 int i, j;
1037
1038 for_each_sg(sg, s, nents, i) {
1039 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1040 s->dma_length = s->length;
1041 #endif
1042 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
1043 s->length, dir, attrs);
1044 if (dma_mapping_error(dev, s->dma_address))
1045 goto bad_mapping;
1046 }
1047 return nents;
1048
1049 bad_mapping:
1050 for_each_sg(sg, s, i, j)
1051 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
1052 return 0;
1053 }
1054
1055 /**
1056 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1057 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1058 * @sg: list of buffers
1059 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1060 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1061 *
1062 * Unmap a set of streaming mode DMA translations. Again, CPU access
1063 * rules concerning calls here are the same as for dma_unmap_single().
1064 */
1065 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1066 enum dma_data_direction dir, unsigned long attrs)
1067 {
1068 const struct dma_map_ops *ops = get_dma_ops(dev);
1069 struct scatterlist *s;
1070
1071 int i;
1072
1073 for_each_sg(sg, s, nents, i)
1074 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
1075 }
1076
1077 /**
1078 * arm_dma_sync_sg_for_cpu
1079 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1080 * @sg: list of buffers
1081 * @nents: number of buffers to map (returned from dma_map_sg)
1082 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1083 */
1084 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1085 int nents, enum dma_data_direction dir)
1086 {
1087 const struct dma_map_ops *ops = get_dma_ops(dev);
1088 struct scatterlist *s;
1089 int i;
1090
1091 for_each_sg(sg, s, nents, i)
1092 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
1093 dir);
1094 }
1095
1096 /**
1097 * arm_dma_sync_sg_for_device
1098 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1099 * @sg: list of buffers
1100 * @nents: number of buffers to map (returned from dma_map_sg)
1101 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1102 */
1103 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1104 int nents, enum dma_data_direction dir)
1105 {
1106 const struct dma_map_ops *ops = get_dma_ops(dev);
1107 struct scatterlist *s;
1108 int i;
1109
1110 for_each_sg(sg, s, nents, i)
1111 ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
1112 dir);
1113 }
1114
1115 /*
1116 * Return whether the given device DMA address mask can be supported
1117 * properly. For example, if your device can only drive the low 24-bits
1118 * during bus mastering, then you would pass 0x00ffffff as the mask
1119 * to this function.
1120 */
1121 int arm_dma_supported(struct device *dev, u64 mask)
1122 {
1123 return __dma_supported(dev, mask, false);
1124 }
1125
1126 static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
1127 {
1128 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
1129 }
1130
1131 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1132
1133 static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
1134 {
1135 int prot = 0;
1136
1137 if (attrs & DMA_ATTR_PRIVILEGED)
1138 prot |= IOMMU_PRIV;
1139
1140 switch (dir) {
1141 case DMA_BIDIRECTIONAL:
1142 return prot | IOMMU_READ | IOMMU_WRITE;
1143 case DMA_TO_DEVICE:
1144 return prot | IOMMU_READ;
1145 case DMA_FROM_DEVICE:
1146 return prot | IOMMU_WRITE;
1147 default:
1148 return prot;
1149 }
1150 }
1151
1152 /* IOMMU */
1153
1154 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
1155
1156 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1157 size_t size)
1158 {
1159 unsigned int order = get_order(size);
1160 unsigned int align = 0;
1161 unsigned int count, start;
1162 size_t mapping_size = mapping->bits << PAGE_SHIFT;
1163 unsigned long flags;
1164 dma_addr_t iova;
1165 int i;
1166
1167 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
1168 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
1169
1170 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1171 align = (1 << order) - 1;
1172
1173 spin_lock_irqsave(&mapping->lock, flags);
1174 for (i = 0; i < mapping->nr_bitmaps; i++) {
1175 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1176 mapping->bits, 0, count, align);
1177
1178 if (start > mapping->bits)
1179 continue;
1180
1181 bitmap_set(mapping->bitmaps[i], start, count);
1182 break;
1183 }
1184
1185 /*
1186 * No unused range found. Try to extend the existing mapping
1187 * and perform a second attempt to reserve an IO virtual
1188 * address range of size bytes.
1189 */
1190 if (i == mapping->nr_bitmaps) {
1191 if (extend_iommu_mapping(mapping)) {
1192 spin_unlock_irqrestore(&mapping->lock, flags);
1193 return DMA_MAPPING_ERROR;
1194 }
1195
1196 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1197 mapping->bits, 0, count, align);
1198
1199 if (start > mapping->bits) {
1200 spin_unlock_irqrestore(&mapping->lock, flags);
1201 return DMA_MAPPING_ERROR;
1202 }
1203
1204 bitmap_set(mapping->bitmaps[i], start, count);
1205 }
1206 spin_unlock_irqrestore(&mapping->lock, flags);
1207
1208 iova = mapping->base + (mapping_size * i);
1209 iova += start << PAGE_SHIFT;
1210
1211 return iova;
1212 }
1213
1214 static inline void __free_iova(struct dma_iommu_mapping *mapping,
1215 dma_addr_t addr, size_t size)
1216 {
1217 unsigned int start, count;
1218 size_t mapping_size = mapping->bits << PAGE_SHIFT;
1219 unsigned long flags;
1220 dma_addr_t bitmap_base;
1221 u32 bitmap_index;
1222
1223 if (!size)
1224 return;
1225
1226 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
1227 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
1228
1229 bitmap_base = mapping->base + mapping_size * bitmap_index;
1230
1231 start = (addr - bitmap_base) >> PAGE_SHIFT;
1232
1233 if (addr + size > bitmap_base + mapping_size) {
1234 /*
1235 * The address range to be freed reaches into the iova
1236 * range of the next bitmap. This should not happen as
1237 * we don't allow this in __alloc_iova (at the
1238 * moment).
1239 */
1240 BUG();
1241 } else
1242 count = size >> PAGE_SHIFT;
1243
1244 spin_lock_irqsave(&mapping->lock, flags);
1245 bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
1246 spin_unlock_irqrestore(&mapping->lock, flags);
1247 }
1248
1249 /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
1250 static const int iommu_order_array[] = { 9, 8, 4, 0 };
1251
1252 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1253 gfp_t gfp, unsigned long attrs,
1254 int coherent_flag)
1255 {
1256 struct page **pages;
1257 int count = size >> PAGE_SHIFT;
1258 int array_size = count * sizeof(struct page *);
1259 int i = 0;
1260 int order_idx = 0;
1261
1262 if (array_size <= PAGE_SIZE)
1263 pages = kzalloc(array_size, GFP_KERNEL);
1264 else
1265 pages = vzalloc(array_size);
1266 if (!pages)
1267 return NULL;
1268
1269 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
1270 {
1271 unsigned long order = get_order(size);
1272 struct page *page;
1273
1274 page = dma_alloc_from_contiguous(dev, count, order,
1275 gfp & __GFP_NOWARN);
1276 if (!page)
1277 goto error;
1278
1279 __dma_clear_buffer(page, size, coherent_flag);
1280
1281 for (i = 0; i < count; i++)
1282 pages[i] = page + i;
1283
1284 return pages;
1285 }
1286
1287 /* Go straight to 4K chunks if caller says it's OK. */
1288 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
1289 order_idx = ARRAY_SIZE(iommu_order_array) - 1;
1290
1291 /*
1292 * IOMMU can map any pages, so himem can also be used here
1293 */
1294 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
1295
1296 while (count) {
1297 int j, order;
1298
1299 order = iommu_order_array[order_idx];
1300
1301 /* Drop down when we get small */
1302 if (__fls(count) < order) {
1303 order_idx++;
1304 continue;
1305 }
1306
1307 if (order) {
1308 /* See if it's easy to allocate a high-order chunk */
1309 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
1310
1311 /* Go down a notch at first sign of pressure */
1312 if (!pages[i]) {
1313 order_idx++;
1314 continue;
1315 }
1316 } else {
1317 pages[i] = alloc_pages(gfp, 0);
1318 if (!pages[i])
1319 goto error;
1320 }
1321
1322 if (order) {
1323 split_page(pages[i], order);
1324 j = 1 << order;
1325 while (--j)
1326 pages[i + j] = pages[i] + j;
1327 }
1328
1329 __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
1330 i += 1 << order;
1331 count -= 1 << order;
1332 }
1333
1334 return pages;
1335 error:
1336 while (i--)
1337 if (pages[i])
1338 __free_pages(pages[i], 0);
1339 kvfree(pages);
1340 return NULL;
1341 }
1342
1343 static int __iommu_free_buffer(struct device *dev, struct page **pages,
1344 size_t size, unsigned long attrs)
1345 {
1346 int count = size >> PAGE_SHIFT;
1347 int i;
1348
1349 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
1350 dma_release_from_contiguous(dev, pages[0], count);
1351 } else {
1352 for (i = 0; i < count; i++)
1353 if (pages[i])
1354 __free_pages(pages[i], 0);
1355 }
1356
1357 kvfree(pages);
1358 return 0;
1359 }
1360
1361 /*
1362 * Create a CPU mapping for a specified pages
1363 */
1364 static void *
1365 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1366 const void *caller)
1367 {
1368 return dma_common_pages_remap(pages, size,
1369 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
1370 }
1371
1372 /*
1373 * Create a mapping in device IO address space for specified pages
1374 */
1375 static dma_addr_t
1376 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
1377 unsigned long attrs)
1378 {
1379 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1380 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1381 dma_addr_t dma_addr, iova;
1382 int i;
1383
1384 dma_addr = __alloc_iova(mapping, size);
1385 if (dma_addr == DMA_MAPPING_ERROR)
1386 return dma_addr;
1387
1388 iova = dma_addr;
1389 for (i = 0; i < count; ) {
1390 int ret;
1391
1392 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
1393 phys_addr_t phys = page_to_phys(pages[i]);
1394 unsigned int len, j;
1395
1396 for (j = i + 1; j < count; j++, next_pfn++)
1397 if (page_to_pfn(pages[j]) != next_pfn)
1398 break;
1399
1400 len = (j - i) << PAGE_SHIFT;
1401 ret = iommu_map(mapping->domain, iova, phys, len,
1402 __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
1403 if (ret < 0)
1404 goto fail;
1405 iova += len;
1406 i = j;
1407 }
1408 return dma_addr;
1409 fail:
1410 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1411 __free_iova(mapping, dma_addr, size);
1412 return DMA_MAPPING_ERROR;
1413 }
1414
1415 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1416 {
1417 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1418
1419 /*
1420 * add optional in-page offset from iova to size and align
1421 * result to page size
1422 */
1423 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1424 iova &= PAGE_MASK;
1425
1426 iommu_unmap(mapping->domain, iova, size);
1427 __free_iova(mapping, iova, size);
1428 return 0;
1429 }
1430
1431 static struct page **__atomic_get_pages(void *addr)
1432 {
1433 struct page *page;
1434 phys_addr_t phys;
1435
1436 phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
1437 page = phys_to_page(phys);
1438
1439 return (struct page **)page;
1440 }
1441
1442 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
1443 {
1444 struct vm_struct *area;
1445
1446 if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1447 return __atomic_get_pages(cpu_addr);
1448
1449 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1450 return cpu_addr;
1451
1452 area = find_vm_area(cpu_addr);
1453 if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
1454 return area->pages;
1455 return NULL;
1456 }
1457
1458 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
1459 dma_addr_t *handle, int coherent_flag,
1460 unsigned long attrs)
1461 {
1462 struct page *page;
1463 void *addr;
1464
1465 if (coherent_flag == COHERENT)
1466 addr = __alloc_simple_buffer(dev, size, gfp, &page);
1467 else
1468 addr = __alloc_from_pool(size, &page);
1469 if (!addr)
1470 return NULL;
1471
1472 *handle = __iommu_create_mapping(dev, &page, size, attrs);
1473 if (*handle == DMA_MAPPING_ERROR)
1474 goto err_mapping;
1475
1476 return addr;
1477
1478 err_mapping:
1479 __free_from_pool(addr, size);
1480 return NULL;
1481 }
1482
1483 static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
1484 dma_addr_t handle, size_t size, int coherent_flag)
1485 {
1486 __iommu_remove_mapping(dev, handle, size);
1487 if (coherent_flag == COHERENT)
1488 __dma_free_buffer(virt_to_page(cpu_addr), size);
1489 else
1490 __free_from_pool(cpu_addr, size);
1491 }
1492
1493 static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
1494 dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
1495 int coherent_flag)
1496 {
1497 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
1498 struct page **pages;
1499 void *addr = NULL;
1500
1501 *handle = DMA_MAPPING_ERROR;
1502 size = PAGE_ALIGN(size);
1503
1504 if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
1505 return __iommu_alloc_simple(dev, size, gfp, handle,
1506 coherent_flag, attrs);
1507
1508 /*
1509 * Following is a work-around (a.k.a. hack) to prevent pages
1510 * with __GFP_COMP being passed to split_page() which cannot
1511 * handle them. The real problem is that this flag probably
1512 * should be 0 on ARM as it is not supported on this
1513 * platform; see CONFIG_HUGETLBFS.
1514 */
1515 gfp &= ~(__GFP_COMP);
1516
1517 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
1518 if (!pages)
1519 return NULL;
1520
1521 *handle = __iommu_create_mapping(dev, pages, size, attrs);
1522 if (*handle == DMA_MAPPING_ERROR)
1523 goto err_buffer;
1524
1525 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1526 return pages;
1527
1528 addr = __iommu_alloc_remap(pages, size, gfp, prot,
1529 __builtin_return_address(0));
1530 if (!addr)
1531 goto err_mapping;
1532
1533 return addr;
1534
1535 err_mapping:
1536 __iommu_remove_mapping(dev, *handle, size);
1537 err_buffer:
1538 __iommu_free_buffer(dev, pages, size, attrs);
1539 return NULL;
1540 }
1541
1542 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1543 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1544 {
1545 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
1546 }
1547
1548 static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
1549 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1550 {
1551 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
1552 }
1553
1554 static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1555 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1556 unsigned long attrs)
1557 {
1558 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1559 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1560 int err;
1561
1562 if (!pages)
1563 return -ENXIO;
1564
1565 if (vma->vm_pgoff >= nr_pages)
1566 return -ENXIO;
1567
1568 err = vm_map_pages(vma, pages, nr_pages);
1569 if (err)
1570 pr_err("Remapping memory failed: %d\n", err);
1571
1572 return err;
1573 }
1574 static int arm_iommu_mmap_attrs(struct device *dev,
1575 struct vm_area_struct *vma, void *cpu_addr,
1576 dma_addr_t dma_addr, size_t size, unsigned long attrs)
1577 {
1578 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1579
1580 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1581 }
1582
1583 static int arm_coherent_iommu_mmap_attrs(struct device *dev,
1584 struct vm_area_struct *vma, void *cpu_addr,
1585 dma_addr_t dma_addr, size_t size, unsigned long attrs)
1586 {
1587 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1588 }
1589
1590 /*
1591 * free a page as defined by the above mapping.
1592 * Must not be called with IRQs disabled.
1593 */
1594 void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1595 dma_addr_t handle, unsigned long attrs, int coherent_flag)
1596 {
1597 struct page **pages;
1598 size = PAGE_ALIGN(size);
1599
1600 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
1601 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
1602 return;
1603 }
1604
1605 pages = __iommu_get_pages(cpu_addr, attrs);
1606 if (!pages) {
1607 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1608 return;
1609 }
1610
1611 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
1612 dma_common_free_remap(cpu_addr, size,
1613 VM_ARM_DMA_CONSISTENT | VM_USERMAP);
1614 }
1615
1616 __iommu_remove_mapping(dev, handle, size);
1617 __iommu_free_buffer(dev, pages, size, attrs);
1618 }
1619
1620 void arm_iommu_free_attrs(struct device *dev, size_t size,
1621 void *cpu_addr, dma_addr_t handle, unsigned long attrs)
1622 {
1623 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
1624 }
1625
1626 void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
1627 void *cpu_addr, dma_addr_t handle, unsigned long attrs)
1628 {
1629 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
1630 }
1631
1632 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1633 void *cpu_addr, dma_addr_t dma_addr,
1634 size_t size, unsigned long attrs)
1635 {
1636 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1637 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1638
1639 if (!pages)
1640 return -ENXIO;
1641
1642 return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1643 GFP_KERNEL);
1644 }
1645
1646 /*
1647 * Map a part of the scatter-gather list into contiguous io address space
1648 */
1649 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1650 size_t size, dma_addr_t *handle,
1651 enum dma_data_direction dir, unsigned long attrs,
1652 bool is_coherent)
1653 {
1654 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1655 dma_addr_t iova, iova_base;
1656 int ret = 0;
1657 unsigned int count;
1658 struct scatterlist *s;
1659 int prot;
1660
1661 size = PAGE_ALIGN(size);
1662 *handle = DMA_MAPPING_ERROR;
1663
1664 iova_base = iova = __alloc_iova(mapping, size);
1665 if (iova == DMA_MAPPING_ERROR)
1666 return -ENOMEM;
1667
1668 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1669 phys_addr_t phys = page_to_phys(sg_page(s));
1670 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1671
1672 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1673 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1674
1675 prot = __dma_info_to_prot(dir, attrs);
1676
1677 ret = iommu_map(mapping->domain, iova, phys, len, prot);
1678 if (ret < 0)
1679 goto fail;
1680 count += len >> PAGE_SHIFT;
1681 iova += len;
1682 }
1683 *handle = iova_base;
1684
1685 return 0;
1686 fail:
1687 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1688 __free_iova(mapping, iova_base, size);
1689 return ret;
1690 }
1691
1692 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1693 enum dma_data_direction dir, unsigned long attrs,
1694 bool is_coherent)
1695 {
1696 struct scatterlist *s = sg, *dma = sg, *start = sg;
1697 int i, count = 0;
1698 unsigned int offset = s->offset;
1699 unsigned int size = s->offset + s->length;
1700 unsigned int max = dma_get_max_seg_size(dev);
1701
1702 for (i = 1; i < nents; i++) {
1703 s = sg_next(s);
1704
1705 s->dma_address = DMA_MAPPING_ERROR;
1706 s->dma_length = 0;
1707
1708 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1709 if (__map_sg_chunk(dev, start, size, &dma->dma_address,
1710 dir, attrs, is_coherent) < 0)
1711 goto bad_mapping;
1712
1713 dma->dma_address += offset;
1714 dma->dma_length = size - offset;
1715
1716 size = offset = s->offset;
1717 start = s;
1718 dma = sg_next(dma);
1719 count += 1;
1720 }
1721 size += s->length;
1722 }
1723 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1724 is_coherent) < 0)
1725 goto bad_mapping;
1726
1727 dma->dma_address += offset;
1728 dma->dma_length = size - offset;
1729
1730 return count+1;
1731
1732 bad_mapping:
1733 for_each_sg(sg, s, count, i)
1734 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1735 return 0;
1736 }
1737
1738 /**
1739 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1740 * @dev: valid struct device pointer
1741 * @sg: list of buffers
1742 * @nents: number of buffers to map
1743 * @dir: DMA transfer direction
1744 *
1745 * Map a set of i/o coherent buffers described by scatterlist in streaming
1746 * mode for DMA. The scatter gather list elements are merged together (if
1747 * possible) and tagged with the appropriate dma address and length. They are
1748 * obtained via sg_dma_{address,length}.
1749 */
1750 int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1751 int nents, enum dma_data_direction dir, unsigned long attrs)
1752 {
1753 return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
1754 }
1755
1756 /**
1757 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1758 * @dev: valid struct device pointer
1759 * @sg: list of buffers
1760 * @nents: number of buffers to map
1761 * @dir: DMA transfer direction
1762 *
1763 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1764 * The scatter gather list elements are merged together (if possible) and
1765 * tagged with the appropriate dma address and length. They are obtained via
1766 * sg_dma_{address,length}.
1767 */
1768 int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1769 int nents, enum dma_data_direction dir, unsigned long attrs)
1770 {
1771 return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
1772 }
1773
1774 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1775 int nents, enum dma_data_direction dir,
1776 unsigned long attrs, bool is_coherent)
1777 {
1778 struct scatterlist *s;
1779 int i;
1780
1781 for_each_sg(sg, s, nents, i) {
1782 if (sg_dma_len(s))
1783 __iommu_remove_mapping(dev, sg_dma_address(s),
1784 sg_dma_len(s));
1785 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1786 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1787 s->length, dir);
1788 }
1789 }
1790
1791 /**
1792 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1793 * @dev: valid struct device pointer
1794 * @sg: list of buffers
1795 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1796 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1797 *
1798 * Unmap a set of streaming mode DMA translations. Again, CPU access
1799 * rules concerning calls here are the same as for dma_unmap_single().
1800 */
1801 void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1802 int nents, enum dma_data_direction dir,
1803 unsigned long attrs)
1804 {
1805 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
1806 }
1807
1808 /**
1809 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1810 * @dev: valid struct device pointer
1811 * @sg: list of buffers
1812 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1813 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1814 *
1815 * Unmap a set of streaming mode DMA translations. Again, CPU access
1816 * rules concerning calls here are the same as for dma_unmap_single().
1817 */
1818 void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1819 enum dma_data_direction dir,
1820 unsigned long attrs)
1821 {
1822 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
1823 }
1824
1825 /**
1826 * arm_iommu_sync_sg_for_cpu
1827 * @dev: valid struct device pointer
1828 * @sg: list of buffers
1829 * @nents: number of buffers to map (returned from dma_map_sg)
1830 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1831 */
1832 void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1833 int nents, enum dma_data_direction dir)
1834 {
1835 struct scatterlist *s;
1836 int i;
1837
1838 for_each_sg(sg, s, nents, i)
1839 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1840
1841 }
1842
1843 /**
1844 * arm_iommu_sync_sg_for_device
1845 * @dev: valid struct device pointer
1846 * @sg: list of buffers
1847 * @nents: number of buffers to map (returned from dma_map_sg)
1848 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1849 */
1850 void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1851 int nents, enum dma_data_direction dir)
1852 {
1853 struct scatterlist *s;
1854 int i;
1855
1856 for_each_sg(sg, s, nents, i)
1857 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1858 }
1859
1860
1861 /**
1862 * arm_coherent_iommu_map_page
1863 * @dev: valid struct device pointer
1864 * @page: page that buffer resides in
1865 * @offset: offset into page for start of buffer
1866 * @size: size of buffer to map
1867 * @dir: DMA transfer direction
1868 *
1869 * Coherent IOMMU aware version of arm_dma_map_page()
1870 */
1871 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
1872 unsigned long offset, size_t size, enum dma_data_direction dir,
1873 unsigned long attrs)
1874 {
1875 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1876 dma_addr_t dma_addr;
1877 int ret, prot, len = PAGE_ALIGN(size + offset);
1878
1879 dma_addr = __alloc_iova(mapping, len);
1880 if (dma_addr == DMA_MAPPING_ERROR)
1881 return dma_addr;
1882
1883 prot = __dma_info_to_prot(dir, attrs);
1884
1885 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
1886 if (ret < 0)
1887 goto fail;
1888
1889 return dma_addr + offset;
1890 fail:
1891 __free_iova(mapping, dma_addr, len);
1892 return DMA_MAPPING_ERROR;
1893 }
1894
1895 /**
1896 * arm_iommu_map_page
1897 * @dev: valid struct device pointer
1898 * @page: page that buffer resides in
1899 * @offset: offset into page for start of buffer
1900 * @size: size of buffer to map
1901 * @dir: DMA transfer direction
1902 *
1903 * IOMMU aware version of arm_dma_map_page()
1904 */
1905 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1906 unsigned long offset, size_t size, enum dma_data_direction dir,
1907 unsigned long attrs)
1908 {
1909 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1910 __dma_page_cpu_to_dev(page, offset, size, dir);
1911
1912 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
1913 }
1914
1915 /**
1916 * arm_coherent_iommu_unmap_page
1917 * @dev: valid struct device pointer
1918 * @handle: DMA address of buffer
1919 * @size: size of buffer (same as passed to dma_map_page)
1920 * @dir: DMA transfer direction (same as passed to dma_map_page)
1921 *
1922 * Coherent IOMMU aware version of arm_dma_unmap_page()
1923 */
1924 static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1925 size_t size, enum dma_data_direction dir, unsigned long attrs)
1926 {
1927 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1928 dma_addr_t iova = handle & PAGE_MASK;
1929 int offset = handle & ~PAGE_MASK;
1930 int len = PAGE_ALIGN(size + offset);
1931
1932 if (!iova)
1933 return;
1934
1935 iommu_unmap(mapping->domain, iova, len);
1936 __free_iova(mapping, iova, len);
1937 }
1938
1939 /**
1940 * arm_iommu_unmap_page
1941 * @dev: valid struct device pointer
1942 * @handle: DMA address of buffer
1943 * @size: size of buffer (same as passed to dma_map_page)
1944 * @dir: DMA transfer direction (same as passed to dma_map_page)
1945 *
1946 * IOMMU aware version of arm_dma_unmap_page()
1947 */
1948 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1949 size_t size, enum dma_data_direction dir, unsigned long attrs)
1950 {
1951 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1952 dma_addr_t iova = handle & PAGE_MASK;
1953 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1954 int offset = handle & ~PAGE_MASK;
1955 int len = PAGE_ALIGN(size + offset);
1956
1957 if (!iova)
1958 return;
1959
1960 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1961 __dma_page_dev_to_cpu(page, offset, size, dir);
1962
1963 iommu_unmap(mapping->domain, iova, len);
1964 __free_iova(mapping, iova, len);
1965 }
1966
1967 /**
1968 * arm_iommu_map_resource - map a device resource for DMA
1969 * @dev: valid struct device pointer
1970 * @phys_addr: physical address of resource
1971 * @size: size of resource to map
1972 * @dir: DMA transfer direction
1973 */
1974 static dma_addr_t arm_iommu_map_resource(struct device *dev,
1975 phys_addr_t phys_addr, size_t size,
1976 enum dma_data_direction dir, unsigned long attrs)
1977 {
1978 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1979 dma_addr_t dma_addr;
1980 int ret, prot;
1981 phys_addr_t addr = phys_addr & PAGE_MASK;
1982 unsigned int offset = phys_addr & ~PAGE_MASK;
1983 size_t len = PAGE_ALIGN(size + offset);
1984
1985 dma_addr = __alloc_iova(mapping, len);
1986 if (dma_addr == DMA_MAPPING_ERROR)
1987 return dma_addr;
1988
1989 prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
1990
1991 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
1992 if (ret < 0)
1993 goto fail;
1994
1995 return dma_addr + offset;
1996 fail:
1997 __free_iova(mapping, dma_addr, len);
1998 return DMA_MAPPING_ERROR;
1999 }
2000
2001 /**
2002 * arm_iommu_unmap_resource - unmap a device DMA resource
2003 * @dev: valid struct device pointer
2004 * @dma_handle: DMA address to resource
2005 * @size: size of resource to map
2006 * @dir: DMA transfer direction
2007 */
2008 static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
2009 size_t size, enum dma_data_direction dir,
2010 unsigned long attrs)
2011 {
2012 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2013 dma_addr_t iova = dma_handle & PAGE_MASK;
2014 unsigned int offset = dma_handle & ~PAGE_MASK;
2015 size_t len = PAGE_ALIGN(size + offset);
2016
2017 if (!iova)
2018 return;
2019
2020 iommu_unmap(mapping->domain, iova, len);
2021 __free_iova(mapping, iova, len);
2022 }
2023
2024 static void arm_iommu_sync_single_for_cpu(struct device *dev,
2025 dma_addr_t handle, size_t size, enum dma_data_direction dir)
2026 {
2027 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2028 dma_addr_t iova = handle & PAGE_MASK;
2029 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
2030 unsigned int offset = handle & ~PAGE_MASK;
2031
2032 if (!iova)
2033 return;
2034
2035 __dma_page_dev_to_cpu(page, offset, size, dir);
2036 }
2037
2038 static void arm_iommu_sync_single_for_device(struct device *dev,
2039 dma_addr_t handle, size_t size, enum dma_data_direction dir)
2040 {
2041 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2042 dma_addr_t iova = handle & PAGE_MASK;
2043 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
2044 unsigned int offset = handle & ~PAGE_MASK;
2045
2046 if (!iova)
2047 return;
2048
2049 __dma_page_cpu_to_dev(page, offset, size, dir);
2050 }
2051
2052 const struct dma_map_ops iommu_ops = {
2053 .alloc = arm_iommu_alloc_attrs,
2054 .free = arm_iommu_free_attrs,
2055 .mmap = arm_iommu_mmap_attrs,
2056 .get_sgtable = arm_iommu_get_sgtable,
2057
2058 .map_page = arm_iommu_map_page,
2059 .unmap_page = arm_iommu_unmap_page,
2060 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
2061 .sync_single_for_device = arm_iommu_sync_single_for_device,
2062
2063 .map_sg = arm_iommu_map_sg,
2064 .unmap_sg = arm_iommu_unmap_sg,
2065 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
2066 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
2067
2068 .map_resource = arm_iommu_map_resource,
2069 .unmap_resource = arm_iommu_unmap_resource,
2070
2071 .dma_supported = arm_dma_supported,
2072 };
2073
2074 const struct dma_map_ops iommu_coherent_ops = {
2075 .alloc = arm_coherent_iommu_alloc_attrs,
2076 .free = arm_coherent_iommu_free_attrs,
2077 .mmap = arm_coherent_iommu_mmap_attrs,
2078 .get_sgtable = arm_iommu_get_sgtable,
2079
2080 .map_page = arm_coherent_iommu_map_page,
2081 .unmap_page = arm_coherent_iommu_unmap_page,
2082
2083 .map_sg = arm_coherent_iommu_map_sg,
2084 .unmap_sg = arm_coherent_iommu_unmap_sg,
2085
2086 .map_resource = arm_iommu_map_resource,
2087 .unmap_resource = arm_iommu_unmap_resource,
2088
2089 .dma_supported = arm_dma_supported,
2090 };
2091
2092 /**
2093 * arm_iommu_create_mapping
2094 * @bus: pointer to the bus holding the client device (for IOMMU calls)
2095 * @base: start address of the valid IO address space
2096 * @size: maximum size of the valid IO address space
2097 *
2098 * Creates a mapping structure which holds information about used/unused
2099 * IO address ranges, which is required to perform memory allocation and
2100 * mapping with IOMMU aware functions.
2101 *
2102 * The client device need to be attached to the mapping with
2103 * arm_iommu_attach_device function.
2104 */
2105 struct dma_iommu_mapping *
2106 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
2107 {
2108 unsigned int bits = size >> PAGE_SHIFT;
2109 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
2110 struct dma_iommu_mapping *mapping;
2111 int extensions = 1;
2112 int err = -ENOMEM;
2113
2114 /* currently only 32-bit DMA address space is supported */
2115 if (size > DMA_BIT_MASK(32) + 1)
2116 return ERR_PTR(-ERANGE);
2117
2118 if (!bitmap_size)
2119 return ERR_PTR(-EINVAL);
2120
2121 if (bitmap_size > PAGE_SIZE) {
2122 extensions = bitmap_size / PAGE_SIZE;
2123 bitmap_size = PAGE_SIZE;
2124 }
2125
2126 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
2127 if (!mapping)
2128 goto err;
2129
2130 mapping->bitmap_size = bitmap_size;
2131 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
2132 GFP_KERNEL);
2133 if (!mapping->bitmaps)
2134 goto err2;
2135
2136 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
2137 if (!mapping->bitmaps[0])
2138 goto err3;
2139
2140 mapping->nr_bitmaps = 1;
2141 mapping->extensions = extensions;
2142 mapping->base = base;
2143 mapping->bits = BITS_PER_BYTE * bitmap_size;
2144
2145 spin_lock_init(&mapping->lock);
2146
2147 mapping->domain = iommu_domain_alloc(bus);
2148 if (!mapping->domain)
2149 goto err4;
2150
2151 kref_init(&mapping->kref);
2152 return mapping;
2153 err4:
2154 kfree(mapping->bitmaps[0]);
2155 err3:
2156 kfree(mapping->bitmaps);
2157 err2:
2158 kfree(mapping);
2159 err:
2160 return ERR_PTR(err);
2161 }
2162 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
2163
2164 static void release_iommu_mapping(struct kref *kref)
2165 {
2166 int i;
2167 struct dma_iommu_mapping *mapping =
2168 container_of(kref, struct dma_iommu_mapping, kref);
2169
2170 iommu_domain_free(mapping->domain);
2171 for (i = 0; i < mapping->nr_bitmaps; i++)
2172 kfree(mapping->bitmaps[i]);
2173 kfree(mapping->bitmaps);
2174 kfree(mapping);
2175 }
2176
2177 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
2178 {
2179 int next_bitmap;
2180
2181 if (mapping->nr_bitmaps >= mapping->extensions)
2182 return -EINVAL;
2183
2184 next_bitmap = mapping->nr_bitmaps;
2185 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
2186 GFP_ATOMIC);
2187 if (!mapping->bitmaps[next_bitmap])
2188 return -ENOMEM;
2189
2190 mapping->nr_bitmaps++;
2191
2192 return 0;
2193 }
2194
2195 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
2196 {
2197 if (mapping)
2198 kref_put(&mapping->kref, release_iommu_mapping);
2199 }
2200 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
2201
2202 static int __arm_iommu_attach_device(struct device *dev,
2203 struct dma_iommu_mapping *mapping)
2204 {
2205 int err;
2206
2207 err = iommu_attach_device(mapping->domain, dev);
2208 if (err)
2209 return err;
2210
2211 kref_get(&mapping->kref);
2212 to_dma_iommu_mapping(dev) = mapping;
2213
2214 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
2215 return 0;
2216 }
2217
2218 /**
2219 * arm_iommu_attach_device
2220 * @dev: valid struct device pointer
2221 * @mapping: io address space mapping structure (returned from
2222 * arm_iommu_create_mapping)
2223 *
2224 * Attaches specified io address space mapping to the provided device.
2225 * This replaces the dma operations (dma_map_ops pointer) with the
2226 * IOMMU aware version.
2227 *
2228 * More than one client might be attached to the same io address space
2229 * mapping.
2230 */
2231 int arm_iommu_attach_device(struct device *dev,
2232 struct dma_iommu_mapping *mapping)
2233 {
2234 int err;
2235
2236 err = __arm_iommu_attach_device(dev, mapping);
2237 if (err)
2238 return err;
2239
2240 set_dma_ops(dev, &iommu_ops);
2241 return 0;
2242 }
2243 EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
2244
2245 /**
2246 * arm_iommu_detach_device
2247 * @dev: valid struct device pointer
2248 *
2249 * Detaches the provided device from a previously attached map.
2250 * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
2251 */
2252 void arm_iommu_detach_device(struct device *dev)
2253 {
2254 struct dma_iommu_mapping *mapping;
2255
2256 mapping = to_dma_iommu_mapping(dev);
2257 if (!mapping) {
2258 dev_warn(dev, "Not attached\n");
2259 return;
2260 }
2261
2262 iommu_detach_device(mapping->domain, dev);
2263 kref_put(&mapping->kref, release_iommu_mapping);
2264 to_dma_iommu_mapping(dev) = NULL;
2265 set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent));
2266
2267 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
2268 }
2269 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
2270
2271 static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
2272 {
2273 return coherent ? &iommu_coherent_ops : &iommu_ops;
2274 }
2275
2276 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2277 const struct iommu_ops *iommu)
2278 {
2279 struct dma_iommu_mapping *mapping;
2280
2281 if (!iommu)
2282 return false;
2283
2284 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
2285 if (IS_ERR(mapping)) {
2286 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
2287 size, dev_name(dev));
2288 return false;
2289 }
2290
2291 if (__arm_iommu_attach_device(dev, mapping)) {
2292 pr_warn("Failed to attached device %s to IOMMU_mapping\n",
2293 dev_name(dev));
2294 arm_iommu_release_mapping(mapping);
2295 return false;
2296 }
2297
2298 return true;
2299 }
2300
2301 static void arm_teardown_iommu_dma_ops(struct device *dev)
2302 {
2303 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2304
2305 if (!mapping)
2306 return;
2307
2308 arm_iommu_detach_device(dev);
2309 arm_iommu_release_mapping(mapping);
2310 }
2311
2312 #else
2313
2314 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2315 const struct iommu_ops *iommu)
2316 {
2317 return false;
2318 }
2319
2320 static void arm_teardown_iommu_dma_ops(struct device *dev) { }
2321
2322 #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
2323
2324 #endif /* CONFIG_ARM_DMA_USE_IOMMU */
2325
2326 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
2327 const struct iommu_ops *iommu, bool coherent)
2328 {
2329 const struct dma_map_ops *dma_ops;
2330
2331 dev->archdata.dma_coherent = coherent;
2332
2333 /*
2334 * Don't override the dma_ops if they have already been set. Ideally
2335 * this should be the only location where dma_ops are set, remove this
2336 * check when all other callers of set_dma_ops will have disappeared.
2337 */
2338 if (dev->dma_ops)
2339 return;
2340
2341 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
2342 dma_ops = arm_get_iommu_dma_map_ops(coherent);
2343 else
2344 dma_ops = arm_get_dma_map_ops(coherent);
2345
2346 set_dma_ops(dev, dma_ops);
2347
2348 #ifdef CONFIG_XEN
2349 if (xen_initial_domain()) {
2350 dev->archdata.dev_dma_ops = dev->dma_ops;
2351 dev->dma_ops = xen_dma_ops;
2352 }
2353 #endif
2354 dev->archdata.dma_ops_setup = true;
2355 }
2356
2357 void arch_teardown_dma_ops(struct device *dev)
2358 {
2359 if (!dev->archdata.dma_ops_setup)
2360 return;
2361
2362 arm_teardown_iommu_dma_ops(dev);
2363 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
2364 set_dma_ops(dev, NULL);
2365 }