]> git.ipfire.org Git - thirdparty/linux.git/blob - kernel/dma/swiotlb.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rmk/linux
[thirdparty/linux.git] / kernel / dma / swiotlb.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Dynamic DMA mapping support.
4 *
5 * This implementation is a fallback for platforms that do not support
6 * I/O TLBs (aka DMA address translation hardware).
7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
11 *
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
14 * unnecessary i-cache flushing.
15 * 04/07/.. ak Better overflow handling. Assorted fixes.
16 * 05/09/10 linville Add support for syncing ranges, support syncing for
17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18 * 08/12/11 beckyb Add highmem support
19 */
20
21 #define pr_fmt(fmt) "software IO TLB: " fmt
22
23 #include <linux/cache.h>
24 #include <linux/cc_platform.h>
25 #include <linux/ctype.h>
26 #include <linux/debugfs.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
29 #include <linux/export.h>
30 #include <linux/gfp.h>
31 #include <linux/highmem.h>
32 #include <linux/io.h>
33 #include <linux/iommu-helper.h>
34 #include <linux/init.h>
35 #include <linux/memblock.h>
36 #include <linux/mm.h>
37 #include <linux/pfn.h>
38 #include <linux/rculist.h>
39 #include <linux/scatterlist.h>
40 #include <linux/set_memory.h>
41 #include <linux/spinlock.h>
42 #include <linux/string.h>
43 #include <linux/swiotlb.h>
44 #include <linux/types.h>
45 #ifdef CONFIG_DMA_RESTRICTED_POOL
46 #include <linux/of.h>
47 #include <linux/of_fdt.h>
48 #include <linux/of_reserved_mem.h>
49 #include <linux/slab.h>
50 #endif
51
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/swiotlb.h>
54
55 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
56
57 /*
58 * Minimum IO TLB size to bother booting with. Systems with mainly
59 * 64bit capable cards will only lightly use the swiotlb. If we can't
60 * allocate a contiguous 1MB, we're probably in trouble anyway.
61 */
62 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
63
64 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
65
66 /**
67 * struct io_tlb_slot - IO TLB slot descriptor
68 * @orig_addr: The original address corresponding to a mapped entry.
69 * @alloc_size: Size of the allocated buffer.
70 * @list: The free list describing the number of free entries available
71 * from each index.
72 * @pad_slots: Number of preceding padding slots. Valid only in the first
73 * allocated non-padding slot.
74 */
75 struct io_tlb_slot {
76 phys_addr_t orig_addr;
77 size_t alloc_size;
78 unsigned short list;
79 unsigned short pad_slots;
80 };
81
82 static bool swiotlb_force_bounce;
83 static bool swiotlb_force_disable;
84
85 #ifdef CONFIG_SWIOTLB_DYNAMIC
86
87 static void swiotlb_dyn_alloc(struct work_struct *work);
88
89 static struct io_tlb_mem io_tlb_default_mem = {
90 .lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock),
91 .pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
92 .dyn_alloc = __WORK_INITIALIZER(io_tlb_default_mem.dyn_alloc,
93 swiotlb_dyn_alloc),
94 };
95
96 #else /* !CONFIG_SWIOTLB_DYNAMIC */
97
98 static struct io_tlb_mem io_tlb_default_mem;
99
100 #endif /* CONFIG_SWIOTLB_DYNAMIC */
101
102 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
103 static unsigned long default_nareas;
104
105 /**
106 * struct io_tlb_area - IO TLB memory area descriptor
107 *
108 * This is a single area with a single lock.
109 *
110 * @used: The number of used IO TLB block.
111 * @index: The slot index to start searching in this area for next round.
112 * @lock: The lock to protect the above data structures in the map and
113 * unmap calls.
114 */
115 struct io_tlb_area {
116 unsigned long used;
117 unsigned int index;
118 spinlock_t lock;
119 };
120
121 /*
122 * Round up number of slabs to the next power of 2. The last area is going
123 * be smaller than the rest if default_nslabs is not power of two.
124 * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
125 * otherwise a segment may span two or more areas. It conflicts with free
126 * contiguous slots tracking: free slots are treated contiguous no matter
127 * whether they cross an area boundary.
128 *
129 * Return true if default_nslabs is rounded up.
130 */
131 static bool round_up_default_nslabs(void)
132 {
133 if (!default_nareas)
134 return false;
135
136 if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
137 default_nslabs = IO_TLB_SEGSIZE * default_nareas;
138 else if (is_power_of_2(default_nslabs))
139 return false;
140 default_nslabs = roundup_pow_of_two(default_nslabs);
141 return true;
142 }
143
144 /**
145 * swiotlb_adjust_nareas() - adjust the number of areas and slots
146 * @nareas: Desired number of areas. Zero is treated as 1.
147 *
148 * Adjust the default number of areas in a memory pool.
149 * The default size of the memory pool may also change to meet minimum area
150 * size requirements.
151 */
152 static void swiotlb_adjust_nareas(unsigned int nareas)
153 {
154 if (!nareas)
155 nareas = 1;
156 else if (!is_power_of_2(nareas))
157 nareas = roundup_pow_of_two(nareas);
158
159 default_nareas = nareas;
160
161 pr_info("area num %d.\n", nareas);
162 if (round_up_default_nslabs())
163 pr_info("SWIOTLB bounce buffer size roundup to %luMB",
164 (default_nslabs << IO_TLB_SHIFT) >> 20);
165 }
166
167 /**
168 * limit_nareas() - get the maximum number of areas for a given memory pool size
169 * @nareas: Desired number of areas.
170 * @nslots: Total number of slots in the memory pool.
171 *
172 * Limit the number of areas to the maximum possible number of areas in
173 * a memory pool of the given size.
174 *
175 * Return: Maximum possible number of areas.
176 */
177 static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
178 {
179 if (nslots < nareas * IO_TLB_SEGSIZE)
180 return nslots / IO_TLB_SEGSIZE;
181 return nareas;
182 }
183
184 static int __init
185 setup_io_tlb_npages(char *str)
186 {
187 if (isdigit(*str)) {
188 /* avoid tail segment of size < IO_TLB_SEGSIZE */
189 default_nslabs =
190 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
191 }
192 if (*str == ',')
193 ++str;
194 if (isdigit(*str))
195 swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
196 if (*str == ',')
197 ++str;
198 if (!strcmp(str, "force"))
199 swiotlb_force_bounce = true;
200 else if (!strcmp(str, "noforce"))
201 swiotlb_force_disable = true;
202
203 return 0;
204 }
205 early_param("swiotlb", setup_io_tlb_npages);
206
207 unsigned long swiotlb_size_or_default(void)
208 {
209 return default_nslabs << IO_TLB_SHIFT;
210 }
211
212 void __init swiotlb_adjust_size(unsigned long size)
213 {
214 /*
215 * If swiotlb parameter has not been specified, give a chance to
216 * architectures such as those supporting memory encryption to
217 * adjust/expand SWIOTLB size for their use.
218 */
219 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
220 return;
221
222 size = ALIGN(size, IO_TLB_SIZE);
223 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
224 if (round_up_default_nslabs())
225 size = default_nslabs << IO_TLB_SHIFT;
226 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
227 }
228
229 void swiotlb_print_info(void)
230 {
231 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
232
233 if (!mem->nslabs) {
234 pr_warn("No low mem\n");
235 return;
236 }
237
238 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
239 (mem->nslabs << IO_TLB_SHIFT) >> 20);
240 }
241
242 static inline unsigned long io_tlb_offset(unsigned long val)
243 {
244 return val & (IO_TLB_SEGSIZE - 1);
245 }
246
247 static inline unsigned long nr_slots(u64 val)
248 {
249 return DIV_ROUND_UP(val, IO_TLB_SIZE);
250 }
251
252 /*
253 * Early SWIOTLB allocation may be too early to allow an architecture to
254 * perform the desired operations. This function allows the architecture to
255 * call SWIOTLB when the operations are possible. It needs to be called
256 * before the SWIOTLB memory is used.
257 */
258 void __init swiotlb_update_mem_attributes(void)
259 {
260 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
261 unsigned long bytes;
262
263 if (!mem->nslabs || mem->late_alloc)
264 return;
265 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
266 set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT);
267 }
268
269 static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
270 unsigned long nslabs, bool late_alloc, unsigned int nareas)
271 {
272 void *vaddr = phys_to_virt(start);
273 unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
274
275 mem->nslabs = nslabs;
276 mem->start = start;
277 mem->end = mem->start + bytes;
278 mem->late_alloc = late_alloc;
279 mem->nareas = nareas;
280 mem->area_nslabs = nslabs / mem->nareas;
281
282 for (i = 0; i < mem->nareas; i++) {
283 spin_lock_init(&mem->areas[i].lock);
284 mem->areas[i].index = 0;
285 mem->areas[i].used = 0;
286 }
287
288 for (i = 0; i < mem->nslabs; i++) {
289 mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
290 mem->nslabs - i);
291 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
292 mem->slots[i].alloc_size = 0;
293 mem->slots[i].pad_slots = 0;
294 }
295
296 memset(vaddr, 0, bytes);
297 mem->vaddr = vaddr;
298 return;
299 }
300
301 /**
302 * add_mem_pool() - add a memory pool to the allocator
303 * @mem: Software IO TLB allocator.
304 * @pool: Memory pool to be added.
305 */
306 static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool)
307 {
308 #ifdef CONFIG_SWIOTLB_DYNAMIC
309 spin_lock(&mem->lock);
310 list_add_rcu(&pool->node, &mem->pools);
311 mem->nslabs += pool->nslabs;
312 spin_unlock(&mem->lock);
313 #else
314 mem->nslabs = pool->nslabs;
315 #endif
316 }
317
318 static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
319 unsigned int flags,
320 int (*remap)(void *tlb, unsigned long nslabs))
321 {
322 size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
323 void *tlb;
324
325 /*
326 * By default allocate the bounce buffer memory from low memory, but
327 * allow to pick a location everywhere for hypervisors with guest
328 * memory encryption.
329 */
330 if (flags & SWIOTLB_ANY)
331 tlb = memblock_alloc(bytes, PAGE_SIZE);
332 else
333 tlb = memblock_alloc_low(bytes, PAGE_SIZE);
334
335 if (!tlb) {
336 pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
337 __func__, bytes);
338 return NULL;
339 }
340
341 if (remap && remap(tlb, nslabs) < 0) {
342 memblock_free(tlb, PAGE_ALIGN(bytes));
343 pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
344 return NULL;
345 }
346
347 return tlb;
348 }
349
350 /*
351 * Statically reserve bounce buffer space and initialize bounce buffer data
352 * structures for the software IO TLB used to implement the DMA API.
353 */
354 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
355 int (*remap)(void *tlb, unsigned long nslabs))
356 {
357 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
358 unsigned long nslabs;
359 unsigned int nareas;
360 size_t alloc_size;
361 void *tlb;
362
363 if (!addressing_limit && !swiotlb_force_bounce)
364 return;
365 if (swiotlb_force_disable)
366 return;
367
368 io_tlb_default_mem.force_bounce =
369 swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
370
371 #ifdef CONFIG_SWIOTLB_DYNAMIC
372 if (!remap)
373 io_tlb_default_mem.can_grow = true;
374 if (flags & SWIOTLB_ANY)
375 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
376 else
377 io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT;
378 #endif
379
380 if (!default_nareas)
381 swiotlb_adjust_nareas(num_possible_cpus());
382
383 nslabs = default_nslabs;
384 nareas = limit_nareas(default_nareas, nslabs);
385 while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
386 if (nslabs <= IO_TLB_MIN_SLABS)
387 return;
388 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
389 nareas = limit_nareas(nareas, nslabs);
390 }
391
392 if (default_nslabs != nslabs) {
393 pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
394 default_nslabs, nslabs);
395 default_nslabs = nslabs;
396 }
397
398 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
399 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
400 if (!mem->slots) {
401 pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
402 __func__, alloc_size, PAGE_SIZE);
403 return;
404 }
405
406 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
407 nareas), SMP_CACHE_BYTES);
408 if (!mem->areas) {
409 pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
410 return;
411 }
412
413 swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas);
414 add_mem_pool(&io_tlb_default_mem, mem);
415
416 if (flags & SWIOTLB_VERBOSE)
417 swiotlb_print_info();
418 }
419
420 void __init swiotlb_init(bool addressing_limit, unsigned int flags)
421 {
422 swiotlb_init_remap(addressing_limit, flags, NULL);
423 }
424
425 /*
426 * Systems with larger DMA zones (those that don't support ISA) can
427 * initialize the swiotlb later using the slab allocator if needed.
428 * This should be just like above, but with some error catching.
429 */
430 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
431 int (*remap)(void *tlb, unsigned long nslabs))
432 {
433 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
434 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
435 unsigned int nareas;
436 unsigned char *vstart = NULL;
437 unsigned int order, area_order;
438 bool retried = false;
439 int rc = 0;
440
441 if (io_tlb_default_mem.nslabs)
442 return 0;
443
444 if (swiotlb_force_disable)
445 return 0;
446
447 io_tlb_default_mem.force_bounce = swiotlb_force_bounce;
448
449 #ifdef CONFIG_SWIOTLB_DYNAMIC
450 if (!remap)
451 io_tlb_default_mem.can_grow = true;
452 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
453 io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits);
454 else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
455 io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32);
456 else
457 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
458 #endif
459
460 if (!default_nareas)
461 swiotlb_adjust_nareas(num_possible_cpus());
462
463 retry:
464 order = get_order(nslabs << IO_TLB_SHIFT);
465 nslabs = SLABS_PER_PAGE << order;
466
467 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
468 vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
469 order);
470 if (vstart)
471 break;
472 order--;
473 nslabs = SLABS_PER_PAGE << order;
474 retried = true;
475 }
476
477 if (!vstart)
478 return -ENOMEM;
479
480 if (remap)
481 rc = remap(vstart, nslabs);
482 if (rc) {
483 free_pages((unsigned long)vstart, order);
484
485 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
486 if (nslabs < IO_TLB_MIN_SLABS)
487 return rc;
488 retried = true;
489 goto retry;
490 }
491
492 if (retried) {
493 pr_warn("only able to allocate %ld MB\n",
494 (PAGE_SIZE << order) >> 20);
495 }
496
497 nareas = limit_nareas(default_nareas, nslabs);
498 area_order = get_order(array_size(sizeof(*mem->areas), nareas));
499 mem->areas = (struct io_tlb_area *)
500 __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
501 if (!mem->areas)
502 goto error_area;
503
504 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
505 get_order(array_size(sizeof(*mem->slots), nslabs)));
506 if (!mem->slots)
507 goto error_slots;
508
509 set_memory_decrypted((unsigned long)vstart,
510 (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
511 swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true,
512 nareas);
513 add_mem_pool(&io_tlb_default_mem, mem);
514
515 swiotlb_print_info();
516 return 0;
517
518 error_slots:
519 free_pages((unsigned long)mem->areas, area_order);
520 error_area:
521 free_pages((unsigned long)vstart, order);
522 return -ENOMEM;
523 }
524
525 void __init swiotlb_exit(void)
526 {
527 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
528 unsigned long tbl_vaddr;
529 size_t tbl_size, slots_size;
530 unsigned int area_order;
531
532 if (swiotlb_force_bounce)
533 return;
534
535 if (!mem->nslabs)
536 return;
537
538 pr_info("tearing down default memory pool\n");
539 tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
540 tbl_size = PAGE_ALIGN(mem->end - mem->start);
541 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
542
543 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
544 if (mem->late_alloc) {
545 area_order = get_order(array_size(sizeof(*mem->areas),
546 mem->nareas));
547 free_pages((unsigned long)mem->areas, area_order);
548 free_pages(tbl_vaddr, get_order(tbl_size));
549 free_pages((unsigned long)mem->slots, get_order(slots_size));
550 } else {
551 memblock_free_late(__pa(mem->areas),
552 array_size(sizeof(*mem->areas), mem->nareas));
553 memblock_free_late(mem->start, tbl_size);
554 memblock_free_late(__pa(mem->slots), slots_size);
555 }
556
557 memset(mem, 0, sizeof(*mem));
558 }
559
560 #ifdef CONFIG_SWIOTLB_DYNAMIC
561
562 /**
563 * alloc_dma_pages() - allocate pages to be used for DMA
564 * @gfp: GFP flags for the allocation.
565 * @bytes: Size of the buffer.
566 * @phys_limit: Maximum allowed physical address of the buffer.
567 *
568 * Allocate pages from the buddy allocator. If successful, make the allocated
569 * pages decrypted that they can be used for DMA.
570 *
571 * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
572 * if the allocated physical address was above @phys_limit.
573 */
574 static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit)
575 {
576 unsigned int order = get_order(bytes);
577 struct page *page;
578 phys_addr_t paddr;
579 void *vaddr;
580
581 page = alloc_pages(gfp, order);
582 if (!page)
583 return NULL;
584
585 paddr = page_to_phys(page);
586 if (paddr + bytes - 1 > phys_limit) {
587 __free_pages(page, order);
588 return ERR_PTR(-EAGAIN);
589 }
590
591 vaddr = phys_to_virt(paddr);
592 if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
593 goto error;
594 return page;
595
596 error:
597 /* Intentional leak if pages cannot be encrypted again. */
598 if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
599 __free_pages(page, order);
600 return NULL;
601 }
602
603 /**
604 * swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer
605 * @dev: Device for which a memory pool is allocated.
606 * @bytes: Size of the buffer.
607 * @phys_limit: Maximum allowed physical address of the buffer.
608 * @gfp: GFP flags for the allocation.
609 *
610 * Return: Allocated pages, or %NULL on allocation failure.
611 */
612 static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
613 u64 phys_limit, gfp_t gfp)
614 {
615 struct page *page;
616
617 /*
618 * Allocate from the atomic pools if memory is encrypted and
619 * the allocation is atomic, because decrypting may block.
620 */
621 if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) {
622 void *vaddr;
623
624 if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
625 return NULL;
626
627 return dma_alloc_from_pool(dev, bytes, &vaddr, gfp,
628 dma_coherent_ok);
629 }
630
631 gfp &= ~GFP_ZONEMASK;
632 if (phys_limit <= DMA_BIT_MASK(zone_dma_bits))
633 gfp |= __GFP_DMA;
634 else if (phys_limit <= DMA_BIT_MASK(32))
635 gfp |= __GFP_DMA32;
636
637 while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) {
638 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
639 phys_limit < DMA_BIT_MASK(64) &&
640 !(gfp & (__GFP_DMA32 | __GFP_DMA)))
641 gfp |= __GFP_DMA32;
642 else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
643 !(gfp & __GFP_DMA))
644 gfp = (gfp & ~__GFP_DMA32) | __GFP_DMA;
645 else
646 return NULL;
647 }
648
649 return page;
650 }
651
652 /**
653 * swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer
654 * @vaddr: Virtual address of the buffer.
655 * @bytes: Size of the buffer.
656 */
657 static void swiotlb_free_tlb(void *vaddr, size_t bytes)
658 {
659 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
660 dma_free_from_pool(NULL, vaddr, bytes))
661 return;
662
663 /* Intentional leak if pages cannot be encrypted again. */
664 if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
665 __free_pages(virt_to_page(vaddr), get_order(bytes));
666 }
667
668 /**
669 * swiotlb_alloc_pool() - allocate a new IO TLB memory pool
670 * @dev: Device for which a memory pool is allocated.
671 * @minslabs: Minimum number of slabs.
672 * @nslabs: Desired (maximum) number of slabs.
673 * @nareas: Number of areas.
674 * @phys_limit: Maximum DMA buffer physical address.
675 * @gfp: GFP flags for the allocations.
676 *
677 * Allocate and initialize a new IO TLB memory pool. The actual number of
678 * slabs may be reduced if allocation of @nslabs fails. If even
679 * @minslabs cannot be allocated, this function fails.
680 *
681 * Return: New memory pool, or %NULL on allocation failure.
682 */
683 static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
684 unsigned long minslabs, unsigned long nslabs,
685 unsigned int nareas, u64 phys_limit, gfp_t gfp)
686 {
687 struct io_tlb_pool *pool;
688 unsigned int slot_order;
689 struct page *tlb;
690 size_t pool_size;
691 size_t tlb_size;
692
693 if (nslabs > SLABS_PER_PAGE << MAX_PAGE_ORDER) {
694 nslabs = SLABS_PER_PAGE << MAX_PAGE_ORDER;
695 nareas = limit_nareas(nareas, nslabs);
696 }
697
698 pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
699 pool = kzalloc(pool_size, gfp);
700 if (!pool)
701 goto error;
702 pool->areas = (void *)pool + sizeof(*pool);
703
704 tlb_size = nslabs << IO_TLB_SHIFT;
705 while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) {
706 if (nslabs <= minslabs)
707 goto error_tlb;
708 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
709 nareas = limit_nareas(nareas, nslabs);
710 tlb_size = nslabs << IO_TLB_SHIFT;
711 }
712
713 slot_order = get_order(array_size(sizeof(*pool->slots), nslabs));
714 pool->slots = (struct io_tlb_slot *)
715 __get_free_pages(gfp, slot_order);
716 if (!pool->slots)
717 goto error_slots;
718
719 swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas);
720 return pool;
721
722 error_slots:
723 swiotlb_free_tlb(page_address(tlb), tlb_size);
724 error_tlb:
725 kfree(pool);
726 error:
727 return NULL;
728 }
729
730 /**
731 * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
732 * @work: Pointer to dyn_alloc in struct io_tlb_mem.
733 */
734 static void swiotlb_dyn_alloc(struct work_struct *work)
735 {
736 struct io_tlb_mem *mem =
737 container_of(work, struct io_tlb_mem, dyn_alloc);
738 struct io_tlb_pool *pool;
739
740 pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, default_nslabs,
741 default_nareas, mem->phys_limit, GFP_KERNEL);
742 if (!pool) {
743 pr_warn_ratelimited("Failed to allocate new pool");
744 return;
745 }
746
747 add_mem_pool(mem, pool);
748 }
749
750 /**
751 * swiotlb_dyn_free() - RCU callback to free a memory pool
752 * @rcu: RCU head in the corresponding struct io_tlb_pool.
753 */
754 static void swiotlb_dyn_free(struct rcu_head *rcu)
755 {
756 struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu);
757 size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs);
758 size_t tlb_size = pool->end - pool->start;
759
760 free_pages((unsigned long)pool->slots, get_order(slots_size));
761 swiotlb_free_tlb(pool->vaddr, tlb_size);
762 kfree(pool);
763 }
764
765 /**
766 * swiotlb_find_pool() - find the IO TLB pool for a physical address
767 * @dev: Device which has mapped the DMA buffer.
768 * @paddr: Physical address within the DMA buffer.
769 *
770 * Find the IO TLB memory pool descriptor which contains the given physical
771 * address, if any.
772 *
773 * Return: Memory pool which contains @paddr, or %NULL if none.
774 */
775 struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
776 {
777 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
778 struct io_tlb_pool *pool;
779
780 rcu_read_lock();
781 list_for_each_entry_rcu(pool, &mem->pools, node) {
782 if (paddr >= pool->start && paddr < pool->end)
783 goto out;
784 }
785
786 list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) {
787 if (paddr >= pool->start && paddr < pool->end)
788 goto out;
789 }
790 pool = NULL;
791 out:
792 rcu_read_unlock();
793 return pool;
794 }
795
796 /**
797 * swiotlb_del_pool() - remove an IO TLB pool from a device
798 * @dev: Owning device.
799 * @pool: Memory pool to be removed.
800 */
801 static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool)
802 {
803 unsigned long flags;
804
805 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
806 list_del_rcu(&pool->node);
807 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
808
809 call_rcu(&pool->rcu, swiotlb_dyn_free);
810 }
811
812 #endif /* CONFIG_SWIOTLB_DYNAMIC */
813
814 /**
815 * swiotlb_dev_init() - initialize swiotlb fields in &struct device
816 * @dev: Device to be initialized.
817 */
818 void swiotlb_dev_init(struct device *dev)
819 {
820 dev->dma_io_tlb_mem = &io_tlb_default_mem;
821 #ifdef CONFIG_SWIOTLB_DYNAMIC
822 INIT_LIST_HEAD(&dev->dma_io_tlb_pools);
823 spin_lock_init(&dev->dma_io_tlb_lock);
824 dev->dma_uses_io_tlb = false;
825 #endif
826 }
827
828 /**
829 * swiotlb_align_offset() - Get required offset into an IO TLB allocation.
830 * @dev: Owning device.
831 * @align_mask: Allocation alignment mask.
832 * @addr: DMA address.
833 *
834 * Return the minimum offset from the start of an IO TLB allocation which is
835 * required for a given buffer address and allocation alignment to keep the
836 * device happy.
837 *
838 * First, the address bits covered by min_align_mask must be identical in the
839 * original address and the bounce buffer address. High bits are preserved by
840 * choosing a suitable IO TLB slot, but bits below IO_TLB_SHIFT require extra
841 * padding bytes before the bounce buffer.
842 *
843 * Second, @align_mask specifies which bits of the first allocated slot must
844 * be zero. This may require allocating additional padding slots, and then the
845 * offset (in bytes) from the first such padding slot is returned.
846 */
847 static unsigned int swiotlb_align_offset(struct device *dev,
848 unsigned int align_mask, u64 addr)
849 {
850 return addr & dma_get_min_align_mask(dev) &
851 (align_mask | (IO_TLB_SIZE - 1));
852 }
853
854 /*
855 * Bounce: copy the swiotlb buffer from or back to the original dma location
856 */
857 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
858 enum dma_data_direction dir)
859 {
860 struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
861 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
862 phys_addr_t orig_addr = mem->slots[index].orig_addr;
863 size_t alloc_size = mem->slots[index].alloc_size;
864 unsigned long pfn = PFN_DOWN(orig_addr);
865 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
866 int tlb_offset;
867
868 if (orig_addr == INVALID_PHYS_ADDR)
869 return;
870
871 /*
872 * It's valid for tlb_offset to be negative. This can happen when the
873 * "offset" returned by swiotlb_align_offset() is non-zero, and the
874 * tlb_addr is pointing within the first "offset" bytes of the second
875 * or subsequent slots of the allocated swiotlb area. While it's not
876 * valid for tlb_addr to be pointing within the first "offset" bytes
877 * of the first slot, there's no way to check for such an error since
878 * this function can't distinguish the first slot from the second and
879 * subsequent slots.
880 */
881 tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) -
882 swiotlb_align_offset(dev, 0, orig_addr);
883
884 orig_addr += tlb_offset;
885 alloc_size -= tlb_offset;
886
887 if (size > alloc_size) {
888 dev_WARN_ONCE(dev, 1,
889 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
890 alloc_size, size);
891 size = alloc_size;
892 }
893
894 if (PageHighMem(pfn_to_page(pfn))) {
895 unsigned int offset = orig_addr & ~PAGE_MASK;
896 struct page *page;
897 unsigned int sz = 0;
898 unsigned long flags;
899
900 while (size) {
901 sz = min_t(size_t, PAGE_SIZE - offset, size);
902
903 local_irq_save(flags);
904 page = pfn_to_page(pfn);
905 if (dir == DMA_TO_DEVICE)
906 memcpy_from_page(vaddr, page, offset, sz);
907 else
908 memcpy_to_page(page, offset, vaddr, sz);
909 local_irq_restore(flags);
910
911 size -= sz;
912 pfn++;
913 vaddr += sz;
914 offset = 0;
915 }
916 } else if (dir == DMA_TO_DEVICE) {
917 memcpy(vaddr, phys_to_virt(orig_addr), size);
918 } else {
919 memcpy(phys_to_virt(orig_addr), vaddr, size);
920 }
921 }
922
923 static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
924 {
925 return start + (idx << IO_TLB_SHIFT);
926 }
927
928 /*
929 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
930 */
931 static inline unsigned long get_max_slots(unsigned long boundary_mask)
932 {
933 return (boundary_mask >> IO_TLB_SHIFT) + 1;
934 }
935
936 static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index)
937 {
938 if (index >= mem->area_nslabs)
939 return 0;
940 return index;
941 }
942
943 /*
944 * Track the total used slots with a global atomic value in order to have
945 * correct information to determine the high water mark. The mem_used()
946 * function gives imprecise results because there's no locking across
947 * multiple areas.
948 */
949 #ifdef CONFIG_DEBUG_FS
950 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
951 {
952 unsigned long old_hiwater, new_used;
953
954 new_used = atomic_long_add_return(nslots, &mem->total_used);
955 old_hiwater = atomic_long_read(&mem->used_hiwater);
956 do {
957 if (new_used <= old_hiwater)
958 break;
959 } while (!atomic_long_try_cmpxchg(&mem->used_hiwater,
960 &old_hiwater, new_used));
961 }
962
963 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
964 {
965 atomic_long_sub(nslots, &mem->total_used);
966 }
967
968 #else /* !CONFIG_DEBUG_FS */
969 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
970 {
971 }
972 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
973 {
974 }
975 #endif /* CONFIG_DEBUG_FS */
976
977 #ifdef CONFIG_SWIOTLB_DYNAMIC
978 #ifdef CONFIG_DEBUG_FS
979 static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
980 {
981 atomic_long_add(nslots, &mem->transient_nslabs);
982 }
983
984 static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
985 {
986 atomic_long_sub(nslots, &mem->transient_nslabs);
987 }
988
989 #else /* !CONFIG_DEBUG_FS */
990 static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
991 {
992 }
993 static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
994 {
995 }
996 #endif /* CONFIG_DEBUG_FS */
997 #endif /* CONFIG_SWIOTLB_DYNAMIC */
998
999 /**
1000 * swiotlb_search_pool_area() - search one memory area in one pool
1001 * @dev: Device which maps the buffer.
1002 * @pool: Memory pool to be searched.
1003 * @area_index: Index of the IO TLB memory area to be searched.
1004 * @orig_addr: Original (non-bounced) IO buffer address.
1005 * @alloc_size: Total requested size of the bounce buffer,
1006 * including initial alignment padding.
1007 * @alloc_align_mask: Required alignment of the allocated buffer.
1008 *
1009 * Find a suitable sequence of IO TLB entries for the request and allocate
1010 * a buffer from the given IO TLB memory area.
1011 * This function takes care of locking.
1012 *
1013 * Return: Index of the first allocated slot, or -1 on error.
1014 */
1015 static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool,
1016 int area_index, phys_addr_t orig_addr, size_t alloc_size,
1017 unsigned int alloc_align_mask)
1018 {
1019 struct io_tlb_area *area = pool->areas + area_index;
1020 unsigned long boundary_mask = dma_get_seg_boundary(dev);
1021 dma_addr_t tbl_dma_addr =
1022 phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
1023 unsigned long max_slots = get_max_slots(boundary_mask);
1024 unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
1025 unsigned int nslots = nr_slots(alloc_size), stride;
1026 unsigned int offset = swiotlb_align_offset(dev, 0, orig_addr);
1027 unsigned int index, slots_checked, count = 0, i;
1028 unsigned long flags;
1029 unsigned int slot_base;
1030 unsigned int slot_index;
1031
1032 BUG_ON(!nslots);
1033 BUG_ON(area_index >= pool->nareas);
1034
1035 /*
1036 * Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be
1037 * page-aligned in the absence of any other alignment requirements.
1038 * 'alloc_align_mask' was later introduced to specify the alignment
1039 * explicitly, however this is passed as zero for streaming mappings
1040 * and so we preserve the old behaviour there in case any drivers are
1041 * relying on it.
1042 */
1043 if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE)
1044 alloc_align_mask = PAGE_SIZE - 1;
1045
1046 /*
1047 * Ensure that the allocation is at least slot-aligned and update
1048 * 'iotlb_align_mask' to ignore bits that will be preserved when
1049 * offsetting into the allocation.
1050 */
1051 alloc_align_mask |= (IO_TLB_SIZE - 1);
1052 iotlb_align_mask &= ~alloc_align_mask;
1053
1054 /*
1055 * For mappings with an alignment requirement don't bother looping to
1056 * unaligned slots once we found an aligned one.
1057 */
1058 stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask));
1059
1060 spin_lock_irqsave(&area->lock, flags);
1061 if (unlikely(nslots > pool->area_nslabs - area->used))
1062 goto not_found;
1063
1064 slot_base = area_index * pool->area_nslabs;
1065 index = area->index;
1066
1067 for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
1068 phys_addr_t tlb_addr;
1069
1070 slot_index = slot_base + index;
1071 tlb_addr = slot_addr(tbl_dma_addr, slot_index);
1072
1073 if ((tlb_addr & alloc_align_mask) ||
1074 (orig_addr && (tlb_addr & iotlb_align_mask) !=
1075 (orig_addr & iotlb_align_mask))) {
1076 index = wrap_area_index(pool, index + 1);
1077 slots_checked++;
1078 continue;
1079 }
1080
1081 if (!iommu_is_span_boundary(slot_index, nslots,
1082 nr_slots(tbl_dma_addr),
1083 max_slots)) {
1084 if (pool->slots[slot_index].list >= nslots)
1085 goto found;
1086 }
1087 index = wrap_area_index(pool, index + stride);
1088 slots_checked += stride;
1089 }
1090
1091 not_found:
1092 spin_unlock_irqrestore(&area->lock, flags);
1093 return -1;
1094
1095 found:
1096 /*
1097 * If we find a slot that indicates we have 'nslots' number of
1098 * contiguous buffers, we allocate the buffers from that slot onwards
1099 * and set the list of free entries to '0' indicating unavailable.
1100 */
1101 for (i = slot_index; i < slot_index + nslots; i++) {
1102 pool->slots[i].list = 0;
1103 pool->slots[i].alloc_size = alloc_size - (offset +
1104 ((i - slot_index) << IO_TLB_SHIFT));
1105 }
1106 for (i = slot_index - 1;
1107 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
1108 pool->slots[i].list; i--)
1109 pool->slots[i].list = ++count;
1110
1111 /*
1112 * Update the indices to avoid searching in the next round.
1113 */
1114 area->index = wrap_area_index(pool, index + nslots);
1115 area->used += nslots;
1116 spin_unlock_irqrestore(&area->lock, flags);
1117
1118 inc_used_and_hiwater(dev->dma_io_tlb_mem, nslots);
1119 return slot_index;
1120 }
1121
1122 #ifdef CONFIG_SWIOTLB_DYNAMIC
1123
1124 /**
1125 * swiotlb_search_area() - search one memory area in all pools
1126 * @dev: Device which maps the buffer.
1127 * @start_cpu: Start CPU number.
1128 * @cpu_offset: Offset from @start_cpu.
1129 * @orig_addr: Original (non-bounced) IO buffer address.
1130 * @alloc_size: Total requested size of the bounce buffer,
1131 * including initial alignment padding.
1132 * @alloc_align_mask: Required alignment of the allocated buffer.
1133 * @retpool: Used memory pool, updated on return.
1134 *
1135 * Search one memory area in all pools for a sequence of slots that match the
1136 * allocation constraints.
1137 *
1138 * Return: Index of the first allocated slot, or -1 on error.
1139 */
1140 static int swiotlb_search_area(struct device *dev, int start_cpu,
1141 int cpu_offset, phys_addr_t orig_addr, size_t alloc_size,
1142 unsigned int alloc_align_mask, struct io_tlb_pool **retpool)
1143 {
1144 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1145 struct io_tlb_pool *pool;
1146 int area_index;
1147 int index = -1;
1148
1149 rcu_read_lock();
1150 list_for_each_entry_rcu(pool, &mem->pools, node) {
1151 if (cpu_offset >= pool->nareas)
1152 continue;
1153 area_index = (start_cpu + cpu_offset) & (pool->nareas - 1);
1154 index = swiotlb_search_pool_area(dev, pool, area_index,
1155 orig_addr, alloc_size,
1156 alloc_align_mask);
1157 if (index >= 0) {
1158 *retpool = pool;
1159 break;
1160 }
1161 }
1162 rcu_read_unlock();
1163 return index;
1164 }
1165
1166 /**
1167 * swiotlb_find_slots() - search for slots in the whole swiotlb
1168 * @dev: Device which maps the buffer.
1169 * @orig_addr: Original (non-bounced) IO buffer address.
1170 * @alloc_size: Total requested size of the bounce buffer,
1171 * including initial alignment padding.
1172 * @alloc_align_mask: Required alignment of the allocated buffer.
1173 * @retpool: Used memory pool, updated on return.
1174 *
1175 * Search through the whole software IO TLB to find a sequence of slots that
1176 * match the allocation constraints.
1177 *
1178 * Return: Index of the first allocated slot, or -1 on error.
1179 */
1180 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
1181 size_t alloc_size, unsigned int alloc_align_mask,
1182 struct io_tlb_pool **retpool)
1183 {
1184 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1185 struct io_tlb_pool *pool;
1186 unsigned long nslabs;
1187 unsigned long flags;
1188 u64 phys_limit;
1189 int cpu, i;
1190 int index;
1191
1192 if (alloc_size > IO_TLB_SEGSIZE * IO_TLB_SIZE)
1193 return -1;
1194
1195 cpu = raw_smp_processor_id();
1196 for (i = 0; i < default_nareas; ++i) {
1197 index = swiotlb_search_area(dev, cpu, i, orig_addr, alloc_size,
1198 alloc_align_mask, &pool);
1199 if (index >= 0)
1200 goto found;
1201 }
1202
1203 if (!mem->can_grow)
1204 return -1;
1205
1206 schedule_work(&mem->dyn_alloc);
1207
1208 nslabs = nr_slots(alloc_size);
1209 phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
1210 pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
1211 GFP_NOWAIT | __GFP_NOWARN);
1212 if (!pool)
1213 return -1;
1214
1215 index = swiotlb_search_pool_area(dev, pool, 0, orig_addr,
1216 alloc_size, alloc_align_mask);
1217 if (index < 0) {
1218 swiotlb_dyn_free(&pool->rcu);
1219 return -1;
1220 }
1221
1222 pool->transient = true;
1223 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
1224 list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
1225 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
1226 inc_transient_used(mem, pool->nslabs);
1227
1228 found:
1229 WRITE_ONCE(dev->dma_uses_io_tlb, true);
1230
1231 /*
1232 * The general barrier orders reads and writes against a presumed store
1233 * of the SWIOTLB buffer address by a device driver (to a driver private
1234 * data structure). It serves two purposes.
1235 *
1236 * First, the store to dev->dma_uses_io_tlb must be ordered before the
1237 * presumed store. This guarantees that the returned buffer address
1238 * cannot be passed to another CPU before updating dev->dma_uses_io_tlb.
1239 *
1240 * Second, the load from mem->pools must be ordered before the same
1241 * presumed store. This guarantees that the returned buffer address
1242 * cannot be observed by another CPU before an update of the RCU list
1243 * that was made by swiotlb_dyn_alloc() on a third CPU (cf. multicopy
1244 * atomicity).
1245 *
1246 * See also the comment in is_swiotlb_buffer().
1247 */
1248 smp_mb();
1249
1250 *retpool = pool;
1251 return index;
1252 }
1253
1254 #else /* !CONFIG_SWIOTLB_DYNAMIC */
1255
1256 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
1257 size_t alloc_size, unsigned int alloc_align_mask,
1258 struct io_tlb_pool **retpool)
1259 {
1260 struct io_tlb_pool *pool;
1261 int start, i;
1262 int index;
1263
1264 *retpool = pool = &dev->dma_io_tlb_mem->defpool;
1265 i = start = raw_smp_processor_id() & (pool->nareas - 1);
1266 do {
1267 index = swiotlb_search_pool_area(dev, pool, i, orig_addr,
1268 alloc_size, alloc_align_mask);
1269 if (index >= 0)
1270 return index;
1271 if (++i >= pool->nareas)
1272 i = 0;
1273 } while (i != start);
1274 return -1;
1275 }
1276
1277 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1278
1279 #ifdef CONFIG_DEBUG_FS
1280
1281 /**
1282 * mem_used() - get number of used slots in an allocator
1283 * @mem: Software IO TLB allocator.
1284 *
1285 * The result is accurate in this version of the function, because an atomic
1286 * counter is available if CONFIG_DEBUG_FS is set.
1287 *
1288 * Return: Number of used slots.
1289 */
1290 static unsigned long mem_used(struct io_tlb_mem *mem)
1291 {
1292 return atomic_long_read(&mem->total_used);
1293 }
1294
1295 #else /* !CONFIG_DEBUG_FS */
1296
1297 /**
1298 * mem_pool_used() - get number of used slots in a memory pool
1299 * @pool: Software IO TLB memory pool.
1300 *
1301 * The result is not accurate, see mem_used().
1302 *
1303 * Return: Approximate number of used slots.
1304 */
1305 static unsigned long mem_pool_used(struct io_tlb_pool *pool)
1306 {
1307 int i;
1308 unsigned long used = 0;
1309
1310 for (i = 0; i < pool->nareas; i++)
1311 used += pool->areas[i].used;
1312 return used;
1313 }
1314
1315 /**
1316 * mem_used() - get number of used slots in an allocator
1317 * @mem: Software IO TLB allocator.
1318 *
1319 * The result is not accurate, because there is no locking of individual
1320 * areas.
1321 *
1322 * Return: Approximate number of used slots.
1323 */
1324 static unsigned long mem_used(struct io_tlb_mem *mem)
1325 {
1326 #ifdef CONFIG_SWIOTLB_DYNAMIC
1327 struct io_tlb_pool *pool;
1328 unsigned long used = 0;
1329
1330 rcu_read_lock();
1331 list_for_each_entry_rcu(pool, &mem->pools, node)
1332 used += mem_pool_used(pool);
1333 rcu_read_unlock();
1334
1335 return used;
1336 #else
1337 return mem_pool_used(&mem->defpool);
1338 #endif
1339 }
1340
1341 #endif /* CONFIG_DEBUG_FS */
1342
1343 /**
1344 * swiotlb_tbl_map_single() - bounce buffer map a single contiguous physical area
1345 * @dev: Device which maps the buffer.
1346 * @orig_addr: Original (non-bounced) physical IO buffer address
1347 * @mapping_size: Requested size of the actual bounce buffer, excluding
1348 * any pre- or post-padding for alignment
1349 * @alloc_align_mask: Required start and end alignment of the allocated buffer
1350 * @dir: DMA direction
1351 * @attrs: Optional DMA attributes for the map operation
1352 *
1353 * Find and allocate a suitable sequence of IO TLB slots for the request.
1354 * The allocated space starts at an alignment specified by alloc_align_mask,
1355 * and the size of the allocated space is rounded up so that the total amount
1356 * of allocated space is a multiple of (alloc_align_mask + 1). If
1357 * alloc_align_mask is zero, the allocated space may be at any alignment and
1358 * the size is not rounded up.
1359 *
1360 * The returned address is within the allocated space and matches the bits
1361 * of orig_addr that are specified in the DMA min_align_mask for the device. As
1362 * such, this returned address may be offset from the beginning of the allocated
1363 * space. The bounce buffer space starting at the returned address for
1364 * mapping_size bytes is initialized to the contents of the original IO buffer
1365 * area. Any pre-padding (due to an offset) and any post-padding (due to
1366 * rounding-up the size) is not initialized.
1367 */
1368 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
1369 size_t mapping_size, unsigned int alloc_align_mask,
1370 enum dma_data_direction dir, unsigned long attrs)
1371 {
1372 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1373 unsigned int offset;
1374 struct io_tlb_pool *pool;
1375 unsigned int i;
1376 size_t size;
1377 int index;
1378 phys_addr_t tlb_addr;
1379 unsigned short pad_slots;
1380
1381 if (!mem || !mem->nslabs) {
1382 dev_warn_ratelimited(dev,
1383 "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
1384 return (phys_addr_t)DMA_MAPPING_ERROR;
1385 }
1386
1387 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
1388 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
1389
1390 /*
1391 * The default swiotlb memory pool is allocated with PAGE_SIZE
1392 * alignment. If a mapping is requested with larger alignment,
1393 * the mapping may be unable to use the initial slot(s) in all
1394 * sets of IO_TLB_SEGSIZE slots. In such case, a mapping request
1395 * of or near the maximum mapping size would always fail.
1396 */
1397 dev_WARN_ONCE(dev, alloc_align_mask > ~PAGE_MASK,
1398 "Alloc alignment may prevent fulfilling requests with max mapping_size\n");
1399
1400 offset = swiotlb_align_offset(dev, alloc_align_mask, orig_addr);
1401 size = ALIGN(mapping_size + offset, alloc_align_mask + 1);
1402 index = swiotlb_find_slots(dev, orig_addr, size, alloc_align_mask, &pool);
1403 if (index == -1) {
1404 if (!(attrs & DMA_ATTR_NO_WARN))
1405 dev_warn_ratelimited(dev,
1406 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
1407 size, mem->nslabs, mem_used(mem));
1408 return (phys_addr_t)DMA_MAPPING_ERROR;
1409 }
1410
1411 /*
1412 * If dma_skip_sync was set, reset it on first SWIOTLB buffer
1413 * mapping to always sync SWIOTLB buffers.
1414 */
1415 dma_reset_need_sync(dev);
1416
1417 /*
1418 * Save away the mapping from the original address to the DMA address.
1419 * This is needed when we sync the memory. Then we sync the buffer if
1420 * needed.
1421 */
1422 pad_slots = offset >> IO_TLB_SHIFT;
1423 offset &= (IO_TLB_SIZE - 1);
1424 index += pad_slots;
1425 pool->slots[index].pad_slots = pad_slots;
1426 for (i = 0; i < (nr_slots(size) - pad_slots); i++)
1427 pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
1428 tlb_addr = slot_addr(pool->start, index) + offset;
1429 /*
1430 * When the device is writing memory, i.e. dir == DMA_FROM_DEVICE, copy
1431 * the original buffer to the TLB buffer before initiating DMA in order
1432 * to preserve the original's data if the device does a partial write,
1433 * i.e. if the device doesn't overwrite the entire buffer. Preserving
1434 * the original data, even if it's garbage, is necessary to match
1435 * hardware behavior. Use of swiotlb is supposed to be transparent,
1436 * i.e. swiotlb must not corrupt memory by clobbering unwritten bytes.
1437 */
1438 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
1439 return tlb_addr;
1440 }
1441
1442 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
1443 {
1444 struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
1445 unsigned long flags;
1446 unsigned int offset = swiotlb_align_offset(dev, 0, tlb_addr);
1447 int index, nslots, aindex;
1448 struct io_tlb_area *area;
1449 int count, i;
1450
1451 index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
1452 index -= mem->slots[index].pad_slots;
1453 nslots = nr_slots(mem->slots[index].alloc_size + offset);
1454 aindex = index / mem->area_nslabs;
1455 area = &mem->areas[aindex];
1456
1457 /*
1458 * Return the buffer to the free list by setting the corresponding
1459 * entries to indicate the number of contiguous entries available.
1460 * While returning the entries to the free list, we merge the entries
1461 * with slots below and above the pool being returned.
1462 */
1463 BUG_ON(aindex >= mem->nareas);
1464
1465 spin_lock_irqsave(&area->lock, flags);
1466 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
1467 count = mem->slots[index + nslots].list;
1468 else
1469 count = 0;
1470
1471 /*
1472 * Step 1: return the slots to the free list, merging the slots with
1473 * superceeding slots
1474 */
1475 for (i = index + nslots - 1; i >= index; i--) {
1476 mem->slots[i].list = ++count;
1477 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
1478 mem->slots[i].alloc_size = 0;
1479 mem->slots[i].pad_slots = 0;
1480 }
1481
1482 /*
1483 * Step 2: merge the returned slots with the preceding slots, if
1484 * available (non zero)
1485 */
1486 for (i = index - 1;
1487 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
1488 i--)
1489 mem->slots[i].list = ++count;
1490 area->used -= nslots;
1491 spin_unlock_irqrestore(&area->lock, flags);
1492
1493 dec_used(dev->dma_io_tlb_mem, nslots);
1494 }
1495
1496 #ifdef CONFIG_SWIOTLB_DYNAMIC
1497
1498 /**
1499 * swiotlb_del_transient() - delete a transient memory pool
1500 * @dev: Device which mapped the buffer.
1501 * @tlb_addr: Physical address within a bounce buffer.
1502 *
1503 * Check whether the address belongs to a transient SWIOTLB memory pool.
1504 * If yes, then delete the pool.
1505 *
1506 * Return: %true if @tlb_addr belonged to a transient pool that was released.
1507 */
1508 static bool swiotlb_del_transient(struct device *dev, phys_addr_t tlb_addr)
1509 {
1510 struct io_tlb_pool *pool;
1511
1512 pool = swiotlb_find_pool(dev, tlb_addr);
1513 if (!pool->transient)
1514 return false;
1515
1516 dec_used(dev->dma_io_tlb_mem, pool->nslabs);
1517 swiotlb_del_pool(dev, pool);
1518 dec_transient_used(dev->dma_io_tlb_mem, pool->nslabs);
1519 return true;
1520 }
1521
1522 #else /* !CONFIG_SWIOTLB_DYNAMIC */
1523
1524 static inline bool swiotlb_del_transient(struct device *dev,
1525 phys_addr_t tlb_addr)
1526 {
1527 return false;
1528 }
1529
1530 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1531
1532 /*
1533 * tlb_addr is the physical address of the bounce buffer to unmap.
1534 */
1535 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
1536 size_t mapping_size, enum dma_data_direction dir,
1537 unsigned long attrs)
1538 {
1539 /*
1540 * First, sync the memory before unmapping the entry
1541 */
1542 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
1543 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
1544 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
1545
1546 if (swiotlb_del_transient(dev, tlb_addr))
1547 return;
1548 swiotlb_release_slots(dev, tlb_addr);
1549 }
1550
1551 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
1552 size_t size, enum dma_data_direction dir)
1553 {
1554 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
1555 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
1556 else
1557 BUG_ON(dir != DMA_FROM_DEVICE);
1558 }
1559
1560 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
1561 size_t size, enum dma_data_direction dir)
1562 {
1563 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
1564 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
1565 else
1566 BUG_ON(dir != DMA_TO_DEVICE);
1567 }
1568
1569 /*
1570 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
1571 * to the device copy the data into it as well.
1572 */
1573 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
1574 enum dma_data_direction dir, unsigned long attrs)
1575 {
1576 phys_addr_t swiotlb_addr;
1577 dma_addr_t dma_addr;
1578
1579 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
1580
1581 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, 0, dir, attrs);
1582 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
1583 return DMA_MAPPING_ERROR;
1584
1585 /* Ensure that the address returned is DMA'ble */
1586 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
1587 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
1588 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
1589 attrs | DMA_ATTR_SKIP_CPU_SYNC);
1590 dev_WARN_ONCE(dev, 1,
1591 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
1592 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
1593 return DMA_MAPPING_ERROR;
1594 }
1595
1596 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1597 arch_sync_dma_for_device(swiotlb_addr, size, dir);
1598 return dma_addr;
1599 }
1600
1601 size_t swiotlb_max_mapping_size(struct device *dev)
1602 {
1603 int min_align_mask = dma_get_min_align_mask(dev);
1604 int min_align = 0;
1605
1606 /*
1607 * swiotlb_find_slots() skips slots according to
1608 * min align mask. This affects max mapping size.
1609 * Take it into acount here.
1610 */
1611 if (min_align_mask)
1612 min_align = roundup(min_align_mask, IO_TLB_SIZE);
1613
1614 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
1615 }
1616
1617 /**
1618 * is_swiotlb_allocated() - check if the default software IO TLB is initialized
1619 */
1620 bool is_swiotlb_allocated(void)
1621 {
1622 return io_tlb_default_mem.nslabs;
1623 }
1624
1625 bool is_swiotlb_active(struct device *dev)
1626 {
1627 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1628
1629 return mem && mem->nslabs;
1630 }
1631
1632 /**
1633 * default_swiotlb_base() - get the base address of the default SWIOTLB
1634 *
1635 * Get the lowest physical address used by the default software IO TLB pool.
1636 */
1637 phys_addr_t default_swiotlb_base(void)
1638 {
1639 #ifdef CONFIG_SWIOTLB_DYNAMIC
1640 io_tlb_default_mem.can_grow = false;
1641 #endif
1642 return io_tlb_default_mem.defpool.start;
1643 }
1644
1645 /**
1646 * default_swiotlb_limit() - get the address limit of the default SWIOTLB
1647 *
1648 * Get the highest physical address used by the default software IO TLB pool.
1649 */
1650 phys_addr_t default_swiotlb_limit(void)
1651 {
1652 #ifdef CONFIG_SWIOTLB_DYNAMIC
1653 return io_tlb_default_mem.phys_limit;
1654 #else
1655 return io_tlb_default_mem.defpool.end - 1;
1656 #endif
1657 }
1658
1659 #ifdef CONFIG_DEBUG_FS
1660 #ifdef CONFIG_SWIOTLB_DYNAMIC
1661 static unsigned long mem_transient_used(struct io_tlb_mem *mem)
1662 {
1663 return atomic_long_read(&mem->transient_nslabs);
1664 }
1665
1666 static int io_tlb_transient_used_get(void *data, u64 *val)
1667 {
1668 struct io_tlb_mem *mem = data;
1669
1670 *val = mem_transient_used(mem);
1671 return 0;
1672 }
1673
1674 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_transient_used, io_tlb_transient_used_get,
1675 NULL, "%llu\n");
1676 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1677
1678 static int io_tlb_used_get(void *data, u64 *val)
1679 {
1680 struct io_tlb_mem *mem = data;
1681
1682 *val = mem_used(mem);
1683 return 0;
1684 }
1685
1686 static int io_tlb_hiwater_get(void *data, u64 *val)
1687 {
1688 struct io_tlb_mem *mem = data;
1689
1690 *val = atomic_long_read(&mem->used_hiwater);
1691 return 0;
1692 }
1693
1694 static int io_tlb_hiwater_set(void *data, u64 val)
1695 {
1696 struct io_tlb_mem *mem = data;
1697
1698 /* Only allow setting to zero */
1699 if (val != 0)
1700 return -EINVAL;
1701
1702 atomic_long_set(&mem->used_hiwater, val);
1703 return 0;
1704 }
1705
1706 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
1707 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_hiwater, io_tlb_hiwater_get,
1708 io_tlb_hiwater_set, "%llu\n");
1709
1710 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
1711 const char *dirname)
1712 {
1713 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
1714 if (!mem->nslabs)
1715 return;
1716
1717 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
1718 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem,
1719 &fops_io_tlb_used);
1720 debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem,
1721 &fops_io_tlb_hiwater);
1722 #ifdef CONFIG_SWIOTLB_DYNAMIC
1723 debugfs_create_file("io_tlb_transient_nslabs", 0400, mem->debugfs,
1724 mem, &fops_io_tlb_transient_used);
1725 #endif
1726 }
1727
1728 static int __init swiotlb_create_default_debugfs(void)
1729 {
1730 swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
1731 return 0;
1732 }
1733
1734 late_initcall(swiotlb_create_default_debugfs);
1735
1736 #else /* !CONFIG_DEBUG_FS */
1737
1738 static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
1739 const char *dirname)
1740 {
1741 }
1742
1743 #endif /* CONFIG_DEBUG_FS */
1744
1745 #ifdef CONFIG_DMA_RESTRICTED_POOL
1746
1747 struct page *swiotlb_alloc(struct device *dev, size_t size)
1748 {
1749 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1750 struct io_tlb_pool *pool;
1751 phys_addr_t tlb_addr;
1752 unsigned int align;
1753 int index;
1754
1755 if (!mem)
1756 return NULL;
1757
1758 align = (1 << (get_order(size) + PAGE_SHIFT)) - 1;
1759 index = swiotlb_find_slots(dev, 0, size, align, &pool);
1760 if (index == -1)
1761 return NULL;
1762
1763 tlb_addr = slot_addr(pool->start, index);
1764 if (unlikely(!PAGE_ALIGNED(tlb_addr))) {
1765 dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n",
1766 &tlb_addr);
1767 swiotlb_release_slots(dev, tlb_addr);
1768 return NULL;
1769 }
1770
1771 return pfn_to_page(PFN_DOWN(tlb_addr));
1772 }
1773
1774 bool swiotlb_free(struct device *dev, struct page *page, size_t size)
1775 {
1776 phys_addr_t tlb_addr = page_to_phys(page);
1777
1778 if (!is_swiotlb_buffer(dev, tlb_addr))
1779 return false;
1780
1781 swiotlb_release_slots(dev, tlb_addr);
1782
1783 return true;
1784 }
1785
1786 static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
1787 struct device *dev)
1788 {
1789 struct io_tlb_mem *mem = rmem->priv;
1790 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
1791
1792 /* Set Per-device io tlb area to one */
1793 unsigned int nareas = 1;
1794
1795 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
1796 dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping.");
1797 return -EINVAL;
1798 }
1799
1800 /*
1801 * Since multiple devices can share the same pool, the private data,
1802 * io_tlb_mem struct, will be initialized by the first device attached
1803 * to it.
1804 */
1805 if (!mem) {
1806 struct io_tlb_pool *pool;
1807
1808 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
1809 if (!mem)
1810 return -ENOMEM;
1811 pool = &mem->defpool;
1812
1813 pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL);
1814 if (!pool->slots) {
1815 kfree(mem);
1816 return -ENOMEM;
1817 }
1818
1819 pool->areas = kcalloc(nareas, sizeof(*pool->areas),
1820 GFP_KERNEL);
1821 if (!pool->areas) {
1822 kfree(pool->slots);
1823 kfree(mem);
1824 return -ENOMEM;
1825 }
1826
1827 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
1828 rmem->size >> PAGE_SHIFT);
1829 swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs,
1830 false, nareas);
1831 mem->force_bounce = true;
1832 mem->for_alloc = true;
1833 #ifdef CONFIG_SWIOTLB_DYNAMIC
1834 spin_lock_init(&mem->lock);
1835 INIT_LIST_HEAD_RCU(&mem->pools);
1836 #endif
1837 add_mem_pool(mem, pool);
1838
1839 rmem->priv = mem;
1840
1841 swiotlb_create_debugfs_files(mem, rmem->name);
1842 }
1843
1844 dev->dma_io_tlb_mem = mem;
1845
1846 return 0;
1847 }
1848
1849 static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
1850 struct device *dev)
1851 {
1852 dev->dma_io_tlb_mem = &io_tlb_default_mem;
1853 }
1854
1855 static const struct reserved_mem_ops rmem_swiotlb_ops = {
1856 .device_init = rmem_swiotlb_device_init,
1857 .device_release = rmem_swiotlb_device_release,
1858 };
1859
1860 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
1861 {
1862 unsigned long node = rmem->fdt_node;
1863
1864 if (of_get_flat_dt_prop(node, "reusable", NULL) ||
1865 of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
1866 of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
1867 of_get_flat_dt_prop(node, "no-map", NULL))
1868 return -EINVAL;
1869
1870 rmem->ops = &rmem_swiotlb_ops;
1871 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1872 &rmem->base, (unsigned long)rmem->size / SZ_1M);
1873 return 0;
1874 }
1875
1876 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
1877 #endif /* CONFIG_DMA_RESTRICTED_POOL */