]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * Dynamic DMA mapping support. | |
4 | * | |
563aaf06 | 5 | * This implementation is a fallback for platforms that do not support |
1da177e4 LT |
6 | * I/O TLBs (aka DMA address translation hardware). |
7 | * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> | |
8 | * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> | |
9 | * Copyright (C) 2000, 2003 Hewlett-Packard Co | |
10 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
11 | * | |
12 | * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. | |
13 | * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid | |
14 | * unnecessary i-cache flushing. | |
569c8bf5 JL |
15 | * 04/07/.. ak Better overflow handling. Assorted fixes. |
16 | * 05/09/10 linville Add support for syncing ranges, support syncing for | |
17 | * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. | |
fb05a379 | 18 | * 08/12/11 beckyb Add highmem support |
1da177e4 LT |
19 | */ |
20 | ||
7d63fb3a KC |
21 | #define pr_fmt(fmt) "software IO TLB: " fmt |
22 | ||
1da177e4 | 23 | #include <linux/cache.h> |
c0a4191c RM |
24 | #include <linux/cc_platform.h> |
25 | #include <linux/ctype.h> | |
26 | #include <linux/debugfs.h> | |
ea8c64ac | 27 | #include <linux/dma-direct.h> |
9f4df96b | 28 | #include <linux/dma-map-ops.h> |
8bc3bcc9 | 29 | #include <linux/export.h> |
c0a4191c RM |
30 | #include <linux/gfp.h> |
31 | #include <linux/highmem.h> | |
32 | #include <linux/io.h> | |
33 | #include <linux/iommu-helper.h> | |
34 | #include <linux/init.h> | |
35 | #include <linux/memblock.h> | |
36 | #include <linux/mm.h> | |
37 | #include <linux/pfn.h> | |
79636caa | 38 | #include <linux/rculist.h> |
c0a4191c RM |
39 | #include <linux/scatterlist.h> |
40 | #include <linux/set_memory.h> | |
1da177e4 LT |
41 | #include <linux/spinlock.h> |
42 | #include <linux/string.h> | |
0016fdee | 43 | #include <linux/swiotlb.h> |
1da177e4 | 44 | #include <linux/types.h> |
0b84e4f8 | 45 | #ifdef CONFIG_DMA_RESTRICTED_POOL |
0b84e4f8 CC |
46 | #include <linux/of.h> |
47 | #include <linux/of_fdt.h> | |
48 | #include <linux/of_reserved_mem.h> | |
49 | #include <linux/slab.h> | |
50 | #endif | |
1da177e4 | 51 | |
ce5be5a1 | 52 | #define CREATE_TRACE_POINTS |
2b2b614d ZK |
53 | #include <trace/events/swiotlb.h> |
54 | ||
0b9afede AW |
55 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) |
56 | ||
57 | /* | |
58 | * Minimum IO TLB size to bother booting with. Systems with mainly | |
59 | * 64bit capable cards will only lightly use the swiotlb. If we can't | |
60 | * allocate a contiguous 1MB, we're probably in trouble anyway. | |
61 | */ | |
62 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | |
63 | ||
73f62095 | 64 | #define INVALID_PHYS_ADDR (~(phys_addr_t)0) |
1da177e4 | 65 | |
fea18777 PT |
66 | /** |
67 | * struct io_tlb_slot - IO TLB slot descriptor | |
68 | * @orig_addr: The original address corresponding to a mapped entry. | |
69 | * @alloc_size: Size of the allocated buffer. | |
70 | * @list: The free list describing the number of free entries available | |
71 | * from each index. | |
72 | */ | |
942a8186 CH |
73 | struct io_tlb_slot { |
74 | phys_addr_t orig_addr; | |
75 | size_t alloc_size; | |
76 | unsigned int list; | |
77 | }; | |
78 | ||
c6af2aa9 CH |
79 | static bool swiotlb_force_bounce; |
80 | static bool swiotlb_force_disable; | |
71602fe6 | 81 | |
1aaa7368 PT |
82 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
83 | ||
84 | static void swiotlb_dyn_alloc(struct work_struct *work); | |
85 | ||
86 | static struct io_tlb_mem io_tlb_default_mem = { | |
87 | .lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock), | |
88 | .pools = LIST_HEAD_INIT(io_tlb_default_mem.pools), | |
89 | .dyn_alloc = __WORK_INITIALIZER(io_tlb_default_mem.dyn_alloc, | |
90 | swiotlb_dyn_alloc), | |
91 | }; | |
92 | ||
93 | #else /* !CONFIG_SWIOTLB_DYNAMIC */ | |
94 | ||
05ee7741 | 95 | static struct io_tlb_mem io_tlb_default_mem; |
1da177e4 | 96 | |
1aaa7368 PT |
97 | #endif /* CONFIG_SWIOTLB_DYNAMIC */ |
98 | ||
2d29960a | 99 | static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT; |
20347fca TL |
100 | static unsigned long default_nareas; |
101 | ||
102 | /** | |
103 | * struct io_tlb_area - IO TLB memory area descriptor | |
104 | * | |
105 | * This is a single area with a single lock. | |
106 | * | |
107 | * @used: The number of used IO TLB block. | |
108 | * @index: The slot index to start searching in this area for next round. | |
109 | * @lock: The lock to protect the above data structures in the map and | |
110 | * unmap calls. | |
111 | */ | |
112 | struct io_tlb_area { | |
113 | unsigned long used; | |
114 | unsigned int index; | |
115 | spinlock_t lock; | |
116 | }; | |
117 | ||
44335487 CG |
118 | /* |
119 | * Round up number of slabs to the next power of 2. The last area is going | |
120 | * be smaller than the rest if default_nslabs is not power of two. | |
57e6840c CG |
121 | * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE, |
122 | * otherwise a segment may span two or more areas. It conflicts with free | |
123 | * contiguous slots tracking: free slots are treated contiguous no matter | |
124 | * whether they cross an area boundary. | |
44335487 CG |
125 | * |
126 | * Return true if default_nslabs is rounded up. | |
127 | */ | |
128 | static bool round_up_default_nslabs(void) | |
129 | { | |
57e6840c CG |
130 | if (!default_nareas) |
131 | return false; | |
132 | ||
133 | if (default_nslabs < IO_TLB_SEGSIZE * default_nareas) | |
134 | default_nslabs = IO_TLB_SEGSIZE * default_nareas; | |
135 | else if (is_power_of_2(default_nslabs)) | |
44335487 CG |
136 | return false; |
137 | default_nslabs = roundup_pow_of_two(default_nslabs); | |
138 | return true; | |
139 | } | |
140 | ||
aabd1260 PT |
141 | /** |
142 | * swiotlb_adjust_nareas() - adjust the number of areas and slots | |
143 | * @nareas: Desired number of areas. Zero is treated as 1. | |
144 | * | |
145 | * Adjust the default number of areas in a memory pool. | |
146 | * The default size of the memory pool may also change to meet minimum area | |
147 | * size requirements. | |
148 | */ | |
20347fca TL |
149 | static void swiotlb_adjust_nareas(unsigned int nareas) |
150 | { | |
72311809 TL |
151 | if (!nareas) |
152 | nareas = 1; | |
153 | else if (!is_power_of_2(nareas)) | |
20347fca TL |
154 | nareas = roundup_pow_of_two(nareas); |
155 | ||
156 | default_nareas = nareas; | |
157 | ||
158 | pr_info("area num %d.\n", nareas); | |
44335487 | 159 | if (round_up_default_nslabs()) |
20347fca TL |
160 | pr_info("SWIOTLB bounce buffer size roundup to %luMB", |
161 | (default_nslabs << IO_TLB_SHIFT) >> 20); | |
20347fca | 162 | } |
2d29960a | 163 | |
8ac04063 PT |
164 | /** |
165 | * limit_nareas() - get the maximum number of areas for a given memory pool size | |
166 | * @nareas: Desired number of areas. | |
167 | * @nslots: Total number of slots in the memory pool. | |
168 | * | |
169 | * Limit the number of areas to the maximum possible number of areas in | |
170 | * a memory pool of the given size. | |
171 | * | |
172 | * Return: Maximum possible number of areas. | |
173 | */ | |
174 | static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots) | |
175 | { | |
176 | if (nslots < nareas * IO_TLB_SEGSIZE) | |
177 | return nslots / IO_TLB_SEGSIZE; | |
178 | return nareas; | |
179 | } | |
180 | ||
1da177e4 LT |
181 | static int __init |
182 | setup_io_tlb_npages(char *str) | |
183 | { | |
184 | if (isdigit(*str)) { | |
1da177e4 | 185 | /* avoid tail segment of size < IO_TLB_SEGSIZE */ |
2d29960a CH |
186 | default_nslabs = |
187 | ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE); | |
1da177e4 | 188 | } |
20347fca TL |
189 | if (*str == ',') |
190 | ++str; | |
191 | if (isdigit(*str)) | |
192 | swiotlb_adjust_nareas(simple_strtoul(str, &str, 0)); | |
1da177e4 LT |
193 | if (*str == ',') |
194 | ++str; | |
2726bf3f | 195 | if (!strcmp(str, "force")) |
c6af2aa9 | 196 | swiotlb_force_bounce = true; |
2726bf3f | 197 | else if (!strcmp(str, "noforce")) |
c6af2aa9 | 198 | swiotlb_force_disable = true; |
b18485e7 | 199 | |
c729de8f | 200 | return 0; |
1da177e4 | 201 | } |
c729de8f | 202 | early_param("swiotlb", setup_io_tlb_npages); |
1da177e4 | 203 | |
c729de8f YL |
204 | unsigned long swiotlb_size_or_default(void) |
205 | { | |
2d29960a | 206 | return default_nslabs << IO_TLB_SHIFT; |
c729de8f YL |
207 | } |
208 | ||
2d29960a | 209 | void __init swiotlb_adjust_size(unsigned long size) |
e998879d | 210 | { |
e998879d AK |
211 | /* |
212 | * If swiotlb parameter has not been specified, give a chance to | |
213 | * architectures such as those supporting memory encryption to | |
214 | * adjust/expand SWIOTLB size for their use. | |
215 | */ | |
dfc06b38 CH |
216 | if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT) |
217 | return; | |
20347fca | 218 | |
2d29960a CH |
219 | size = ALIGN(size, IO_TLB_SIZE); |
220 | default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); | |
44335487 | 221 | if (round_up_default_nslabs()) |
20347fca | 222 | size = default_nslabs << IO_TLB_SHIFT; |
2d29960a | 223 | pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); |
e998879d AK |
224 | } |
225 | ||
ad32e8cb | 226 | void swiotlb_print_info(void) |
2e5b2b86 | 227 | { |
158dbe9c | 228 | struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; |
2e5b2b86 | 229 | |
463e862a | 230 | if (!mem->nslabs) { |
7d63fb3a | 231 | pr_warn("No low mem\n"); |
ac2cbab2 YL |
232 | return; |
233 | } | |
234 | ||
73f62095 | 235 | pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, |
2d29960a | 236 | (mem->nslabs << IO_TLB_SHIFT) >> 20); |
2e5b2b86 IC |
237 | } |
238 | ||
c7fbeca7 CH |
239 | static inline unsigned long io_tlb_offset(unsigned long val) |
240 | { | |
241 | return val & (IO_TLB_SEGSIZE - 1); | |
242 | } | |
243 | ||
c32a77fd CH |
244 | static inline unsigned long nr_slots(u64 val) |
245 | { | |
246 | return DIV_ROUND_UP(val, IO_TLB_SIZE); | |
247 | } | |
248 | ||
c7753208 TL |
249 | /* |
250 | * Early SWIOTLB allocation may be too early to allow an architecture to | |
251 | * perform the desired operations. This function allows the architecture to | |
252 | * call SWIOTLB when the operations are possible. It needs to be called | |
253 | * before the SWIOTLB memory is used. | |
254 | */ | |
255 | void __init swiotlb_update_mem_attributes(void) | |
256 | { | |
158dbe9c | 257 | struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; |
c7753208 TL |
258 | unsigned long bytes; |
259 | ||
463e862a | 260 | if (!mem->nslabs || mem->late_alloc) |
c7753208 | 261 | return; |
73f62095 | 262 | bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); |
0459ff48 | 263 | set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT); |
c7753208 TL |
264 | } |
265 | ||
158dbe9c PT |
266 | static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start, |
267 | unsigned long nslabs, bool late_alloc, unsigned int nareas) | |
1da177e4 | 268 | { |
0a65579c | 269 | void *vaddr = phys_to_virt(start); |
2d29960a | 270 | unsigned long bytes = nslabs << IO_TLB_SHIFT, i; |
0a65579c CC |
271 | |
272 | mem->nslabs = nslabs; | |
273 | mem->start = start; | |
274 | mem->end = mem->start + bytes; | |
0a65579c | 275 | mem->late_alloc = late_alloc; |
20347fca TL |
276 | mem->nareas = nareas; |
277 | mem->area_nslabs = nslabs / mem->nareas; | |
903cd0f3 | 278 | |
20347fca TL |
279 | for (i = 0; i < mem->nareas; i++) { |
280 | spin_lock_init(&mem->areas[i].lock); | |
281 | mem->areas[i].index = 0; | |
72311809 | 282 | mem->areas[i].used = 0; |
20347fca TL |
283 | } |
284 | ||
0a65579c | 285 | for (i = 0; i < mem->nslabs; i++) { |
53c87e84 PT |
286 | mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i), |
287 | mem->nslabs - i); | |
0a65579c CC |
288 | mem->slots[i].orig_addr = INVALID_PHYS_ADDR; |
289 | mem->slots[i].alloc_size = 0; | |
290 | } | |
1a5e91d8 | 291 | |
0a65579c | 292 | memset(vaddr, 0, bytes); |
1a5e91d8 TL |
293 | mem->vaddr = vaddr; |
294 | return; | |
0a65579c CC |
295 | } |
296 | ||
1aaa7368 PT |
297 | /** |
298 | * add_mem_pool() - add a memory pool to the allocator | |
299 | * @mem: Software IO TLB allocator. | |
300 | * @pool: Memory pool to be added. | |
301 | */ | |
302 | static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool) | |
303 | { | |
304 | #ifdef CONFIG_SWIOTLB_DYNAMIC | |
305 | spin_lock(&mem->lock); | |
306 | list_add_rcu(&pool->node, &mem->pools); | |
307 | mem->nslabs += pool->nslabs; | |
308 | spin_unlock(&mem->lock); | |
309 | #else | |
310 | mem->nslabs = pool->nslabs; | |
311 | #endif | |
312 | } | |
313 | ||
9b07d27d RD |
314 | static void __init *swiotlb_memblock_alloc(unsigned long nslabs, |
315 | unsigned int flags, | |
8d58aa48 AK |
316 | int (*remap)(void *tlb, unsigned long nslabs)) |
317 | { | |
318 | size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT); | |
319 | void *tlb; | |
320 | ||
321 | /* | |
322 | * By default allocate the bounce buffer memory from low memory, but | |
323 | * allow to pick a location everywhere for hypervisors with guest | |
324 | * memory encryption. | |
325 | */ | |
326 | if (flags & SWIOTLB_ANY) | |
327 | tlb = memblock_alloc(bytes, PAGE_SIZE); | |
328 | else | |
329 | tlb = memblock_alloc_low(bytes, PAGE_SIZE); | |
330 | ||
331 | if (!tlb) { | |
332 | pr_warn("%s: Failed to allocate %zu bytes tlb structure\n", | |
333 | __func__, bytes); | |
334 | return NULL; | |
335 | } | |
336 | ||
337 | if (remap && remap(tlb, nslabs) < 0) { | |
338 | memblock_free(tlb, PAGE_ALIGN(bytes)); | |
339 | pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes); | |
340 | return NULL; | |
341 | } | |
342 | ||
343 | return tlb; | |
344 | } | |
345 | ||
abbceff7 FT |
346 | /* |
347 | * Statically reserve bounce buffer space and initialize bounce buffer data | |
348 | * structures for the software IO TLB used to implement the DMA API. | |
349 | */ | |
7374153d CH |
350 | void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, |
351 | int (*remap)(void *tlb, unsigned long nslabs)) | |
abbceff7 | 352 | { |
158dbe9c | 353 | struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; |
20347fca | 354 | unsigned long nslabs; |
8ac04063 | 355 | unsigned int nareas; |
a5e89132 | 356 | size_t alloc_size; |
2d29960a | 357 | void *tlb; |
abbceff7 | 358 | |
c6af2aa9 CH |
359 | if (!addressing_limit && !swiotlb_force_bounce) |
360 | return; | |
361 | if (swiotlb_force_disable) | |
2726bf3f FF |
362 | return; |
363 | ||
158dbe9c PT |
364 | io_tlb_default_mem.force_bounce = |
365 | swiotlb_force_bounce || (flags & SWIOTLB_FORCE); | |
366 | ||
62708b2b PT |
367 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
368 | if (!remap) | |
369 | io_tlb_default_mem.can_grow = true; | |
ad96ce32 PT |
370 | if (flags & SWIOTLB_ANY) |
371 | io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1); | |
372 | else | |
373 | io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT; | |
62708b2b PT |
374 | #endif |
375 | ||
20347fca TL |
376 | if (!default_nareas) |
377 | swiotlb_adjust_nareas(num_possible_cpus()); | |
378 | ||
379 | nslabs = default_nslabs; | |
8ac04063 | 380 | nareas = limit_nareas(default_nareas, nslabs); |
8d58aa48 AK |
381 | while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) { |
382 | if (nslabs <= IO_TLB_MIN_SLABS) | |
383 | return; | |
7374153d | 384 | nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); |
8ac04063 | 385 | nareas = limit_nareas(nareas, nslabs); |
8d58aa48 | 386 | } |
639205ed | 387 | |
8d58aa48 AK |
388 | if (default_nslabs != nslabs) { |
389 | pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs", | |
390 | default_nslabs, nslabs); | |
391 | default_nslabs = nslabs; | |
7374153d | 392 | } |
2d29960a | 393 | |
a5e89132 | 394 | alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); |
6424e31b | 395 | mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); |
639205ed RM |
396 | if (!mem->slots) { |
397 | pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n", | |
398 | __func__, alloc_size, PAGE_SIZE); | |
399 | return; | |
400 | } | |
6424e31b | 401 | |
72311809 | 402 | mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), |
a6a24176 | 403 | nareas), SMP_CACHE_BYTES); |
639205ed RM |
404 | if (!mem->areas) { |
405 | pr_warn("%s: Failed to allocate mem->areas.\n", __func__); | |
406 | return; | |
407 | } | |
20347fca | 408 | |
a6a24176 | 409 | swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas); |
1aaa7368 | 410 | add_mem_pool(&io_tlb_default_mem, mem); |
6424e31b CH |
411 | |
412 | if (flags & SWIOTLB_VERBOSE) | |
413 | swiotlb_print_info(); | |
1da177e4 LT |
414 | } |
415 | ||
7374153d CH |
416 | void __init swiotlb_init(bool addressing_limit, unsigned int flags) |
417 | { | |
466298c6 | 418 | swiotlb_init_remap(addressing_limit, flags, NULL); |
7374153d CH |
419 | } |
420 | ||
0b9afede AW |
421 | /* |
422 | * Systems with larger DMA zones (those that don't support ISA) can | |
423 | * initialize the swiotlb later using the slab allocator if needed. | |
424 | * This should be just like above, but with some error catching. | |
425 | */ | |
7374153d CH |
426 | int swiotlb_init_late(size_t size, gfp_t gfp_mask, |
427 | int (*remap)(void *tlb, unsigned long nslabs)) | |
0b9afede | 428 | { |
158dbe9c | 429 | struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; |
0d5ffd9a | 430 | unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); |
8ac04063 | 431 | unsigned int nareas; |
ff7204a7 | 432 | unsigned char *vstart = NULL; |
20347fca | 433 | unsigned int order, area_order; |
1b8e5d1a | 434 | bool retried = false; |
74838b75 | 435 | int rc = 0; |
0b9afede | 436 | |
0c6874a6 PT |
437 | if (io_tlb_default_mem.nslabs) |
438 | return 0; | |
439 | ||
c6af2aa9 | 440 | if (swiotlb_force_disable) |
2726bf3f FF |
441 | return 0; |
442 | ||
158dbe9c PT |
443 | io_tlb_default_mem.force_bounce = swiotlb_force_bounce; |
444 | ||
62708b2b PT |
445 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
446 | if (!remap) | |
447 | io_tlb_default_mem.can_grow = true; | |
ad96ce32 PT |
448 | if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA)) |
449 | io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits); | |
450 | else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32)) | |
451 | io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32); | |
452 | else | |
453 | io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1); | |
62708b2b PT |
454 | #endif |
455 | ||
aabd1260 PT |
456 | if (!default_nareas) |
457 | swiotlb_adjust_nareas(num_possible_cpus()); | |
458 | ||
7374153d | 459 | retry: |
2d29960a CH |
460 | order = get_order(nslabs << IO_TLB_SHIFT); |
461 | nslabs = SLABS_PER_PAGE << order; | |
0b9afede AW |
462 | |
463 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { | |
74251953 | 464 | vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN, |
ff7204a7 AD |
465 | order); |
466 | if (vstart) | |
0b9afede AW |
467 | break; |
468 | order--; | |
1b8e5d1a CH |
469 | nslabs = SLABS_PER_PAGE << order; |
470 | retried = true; | |
0b9afede AW |
471 | } |
472 | ||
2d29960a | 473 | if (!vstart) |
74838b75 | 474 | return -ENOMEM; |
2d29960a | 475 | |
7374153d CH |
476 | if (remap) |
477 | rc = remap(vstart, nslabs); | |
478 | if (rc) { | |
479 | free_pages((unsigned long)vstart, order); | |
480 | ||
481 | nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); | |
482 | if (nslabs < IO_TLB_MIN_SLABS) | |
483 | return rc; | |
1b8e5d1a | 484 | retried = true; |
7374153d CH |
485 | goto retry; |
486 | } | |
5d0538b2 | 487 | |
1b8e5d1a CH |
488 | if (retried) { |
489 | pr_warn("only able to allocate %ld MB\n", | |
490 | (PAGE_SIZE << order) >> 20); | |
491 | } | |
492 | ||
8ac04063 PT |
493 | nareas = limit_nareas(default_nareas, nslabs); |
494 | area_order = get_order(array_size(sizeof(*mem->areas), nareas)); | |
20347fca TL |
495 | mem->areas = (struct io_tlb_area *) |
496 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order); | |
497 | if (!mem->areas) | |
498 | goto error_area; | |
499 | ||
463e862a WD |
500 | mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
501 | get_order(array_size(sizeof(*mem->slots), nslabs))); | |
20347fca TL |
502 | if (!mem->slots) |
503 | goto error_slots; | |
74838b75 | 504 | |
1b8e5d1a CH |
505 | set_memory_decrypted((unsigned long)vstart, |
506 | (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT); | |
158dbe9c PT |
507 | swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true, |
508 | nareas); | |
1aaa7368 | 509 | add_mem_pool(&io_tlb_default_mem, mem); |
0b9afede | 510 | |
ad32e8cb | 511 | swiotlb_print_info(); |
0b9afede | 512 | return 0; |
20347fca TL |
513 | |
514 | error_slots: | |
515 | free_pages((unsigned long)mem->areas, area_order); | |
516 | error_area: | |
517 | free_pages((unsigned long)vstart, order); | |
518 | return -ENOMEM; | |
0b9afede AW |
519 | } |
520 | ||
7f2c8bbd | 521 | void __init swiotlb_exit(void) |
5740afdb | 522 | { |
158dbe9c | 523 | struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; |
ad6c0028 WD |
524 | unsigned long tbl_vaddr; |
525 | size_t tbl_size, slots_size; | |
20347fca | 526 | unsigned int area_order; |
73f62095 | 527 | |
c6af2aa9 | 528 | if (swiotlb_force_bounce) |
3469d36d CH |
529 | return; |
530 | ||
463e862a | 531 | if (!mem->nslabs) |
5740afdb FT |
532 | return; |
533 | ||
1efd3fc0 | 534 | pr_info("tearing down default memory pool\n"); |
ad6c0028 WD |
535 | tbl_vaddr = (unsigned long)phys_to_virt(mem->start); |
536 | tbl_size = PAGE_ALIGN(mem->end - mem->start); | |
537 | slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); | |
538 | ||
539 | set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT); | |
540 | if (mem->late_alloc) { | |
20347fca TL |
541 | area_order = get_order(array_size(sizeof(*mem->areas), |
542 | mem->nareas)); | |
543 | free_pages((unsigned long)mem->areas, area_order); | |
ad6c0028 WD |
544 | free_pages(tbl_vaddr, get_order(tbl_size)); |
545 | free_pages((unsigned long)mem->slots, get_order(slots_size)); | |
546 | } else { | |
20347fca | 547 | memblock_free_late(__pa(mem->areas), |
72311809 | 548 | array_size(sizeof(*mem->areas), mem->nareas)); |
ad6c0028 WD |
549 | memblock_free_late(mem->start, tbl_size); |
550 | memblock_free_late(__pa(mem->slots), slots_size); | |
551 | } | |
552 | ||
463e862a | 553 | memset(mem, 0, sizeof(*mem)); |
5740afdb FT |
554 | } |
555 | ||
79636caa PT |
556 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
557 | ||
558 | /** | |
559 | * alloc_dma_pages() - allocate pages to be used for DMA | |
560 | * @gfp: GFP flags for the allocation. | |
561 | * @bytes: Size of the buffer. | |
a5e3b127 | 562 | * @phys_limit: Maximum allowed physical address of the buffer. |
79636caa PT |
563 | * |
564 | * Allocate pages from the buddy allocator. If successful, make the allocated | |
565 | * pages decrypted that they can be used for DMA. | |
566 | * | |
a5e3b127 PT |
567 | * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN) |
568 | * if the allocated physical address was above @phys_limit. | |
79636caa | 569 | */ |
a5e3b127 | 570 | static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit) |
79636caa PT |
571 | { |
572 | unsigned int order = get_order(bytes); | |
573 | struct page *page; | |
a5e3b127 | 574 | phys_addr_t paddr; |
79636caa PT |
575 | void *vaddr; |
576 | ||
577 | page = alloc_pages(gfp, order); | |
578 | if (!page) | |
579 | return NULL; | |
580 | ||
a5e3b127 PT |
581 | paddr = page_to_phys(page); |
582 | if (paddr + bytes - 1 > phys_limit) { | |
583 | __free_pages(page, order); | |
584 | return ERR_PTR(-EAGAIN); | |
585 | } | |
586 | ||
587 | vaddr = phys_to_virt(paddr); | |
79636caa PT |
588 | if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes))) |
589 | goto error; | |
590 | return page; | |
591 | ||
592 | error: | |
a5e3b127 PT |
593 | /* Intentional leak if pages cannot be encrypted again. */ |
594 | if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes))) | |
595 | __free_pages(page, order); | |
79636caa PT |
596 | return NULL; |
597 | } | |
598 | ||
599 | /** | |
600 | * swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer | |
601 | * @dev: Device for which a memory pool is allocated. | |
602 | * @bytes: Size of the buffer. | |
603 | * @phys_limit: Maximum allowed physical address of the buffer. | |
604 | * @gfp: GFP flags for the allocation. | |
605 | * | |
606 | * Return: Allocated pages, or %NULL on allocation failure. | |
607 | */ | |
608 | static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes, | |
609 | u64 phys_limit, gfp_t gfp) | |
610 | { | |
611 | struct page *page; | |
612 | ||
613 | /* | |
614 | * Allocate from the atomic pools if memory is encrypted and | |
615 | * the allocation is atomic, because decrypting may block. | |
616 | */ | |
617 | if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) { | |
618 | void *vaddr; | |
619 | ||
620 | if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)) | |
621 | return NULL; | |
622 | ||
623 | return dma_alloc_from_pool(dev, bytes, &vaddr, gfp, | |
624 | dma_coherent_ok); | |
625 | } | |
626 | ||
627 | gfp &= ~GFP_ZONEMASK; | |
628 | if (phys_limit <= DMA_BIT_MASK(zone_dma_bits)) | |
629 | gfp |= __GFP_DMA; | |
630 | else if (phys_limit <= DMA_BIT_MASK(32)) | |
631 | gfp |= __GFP_DMA32; | |
632 | ||
a5e3b127 | 633 | while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) { |
79636caa PT |
634 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && |
635 | phys_limit < DMA_BIT_MASK(64) && | |
636 | !(gfp & (__GFP_DMA32 | __GFP_DMA))) | |
637 | gfp |= __GFP_DMA32; | |
638 | else if (IS_ENABLED(CONFIG_ZONE_DMA) && | |
639 | !(gfp & __GFP_DMA)) | |
640 | gfp = (gfp & ~__GFP_DMA32) | __GFP_DMA; | |
641 | else | |
642 | return NULL; | |
643 | } | |
644 | ||
645 | return page; | |
646 | } | |
647 | ||
648 | /** | |
649 | * swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer | |
650 | * @vaddr: Virtual address of the buffer. | |
651 | * @bytes: Size of the buffer. | |
652 | */ | |
653 | static void swiotlb_free_tlb(void *vaddr, size_t bytes) | |
654 | { | |
655 | if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && | |
656 | dma_free_from_pool(NULL, vaddr, bytes)) | |
657 | return; | |
658 | ||
659 | /* Intentional leak if pages cannot be encrypted again. */ | |
660 | if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes))) | |
661 | __free_pages(virt_to_page(vaddr), get_order(bytes)); | |
662 | } | |
663 | ||
664 | /** | |
665 | * swiotlb_alloc_pool() - allocate a new IO TLB memory pool | |
666 | * @dev: Device for which a memory pool is allocated. | |
1aaa7368 PT |
667 | * @minslabs: Minimum number of slabs. |
668 | * @nslabs: Desired (maximum) number of slabs. | |
669 | * @nareas: Number of areas. | |
79636caa PT |
670 | * @phys_limit: Maximum DMA buffer physical address. |
671 | * @gfp: GFP flags for the allocations. | |
672 | * | |
1aaa7368 PT |
673 | * Allocate and initialize a new IO TLB memory pool. The actual number of |
674 | * slabs may be reduced if allocation of @nslabs fails. If even | |
675 | * @minslabs cannot be allocated, this function fails. | |
79636caa PT |
676 | * |
677 | * Return: New memory pool, or %NULL on allocation failure. | |
678 | */ | |
679 | static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev, | |
1aaa7368 PT |
680 | unsigned long minslabs, unsigned long nslabs, |
681 | unsigned int nareas, u64 phys_limit, gfp_t gfp) | |
79636caa PT |
682 | { |
683 | struct io_tlb_pool *pool; | |
1aaa7368 | 684 | unsigned int slot_order; |
79636caa PT |
685 | struct page *tlb; |
686 | size_t pool_size; | |
687 | size_t tlb_size; | |
688 | ||
5e0a760b KS |
689 | if (nslabs > SLABS_PER_PAGE << MAX_PAGE_ORDER) { |
690 | nslabs = SLABS_PER_PAGE << MAX_PAGE_ORDER; | |
d5090484 PT |
691 | nareas = limit_nareas(nareas, nslabs); |
692 | } | |
693 | ||
1aaa7368 | 694 | pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas); |
79636caa PT |
695 | pool = kzalloc(pool_size, gfp); |
696 | if (!pool) | |
697 | goto error; | |
698 | pool->areas = (void *)pool + sizeof(*pool); | |
79636caa PT |
699 | |
700 | tlb_size = nslabs << IO_TLB_SHIFT; | |
1aaa7368 PT |
701 | while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) { |
702 | if (nslabs <= minslabs) | |
703 | goto error_tlb; | |
704 | nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); | |
705 | nareas = limit_nareas(nareas, nslabs); | |
706 | tlb_size = nslabs << IO_TLB_SHIFT; | |
707 | } | |
79636caa | 708 | |
1aaa7368 PT |
709 | slot_order = get_order(array_size(sizeof(*pool->slots), nslabs)); |
710 | pool->slots = (struct io_tlb_slot *) | |
711 | __get_free_pages(gfp, slot_order); | |
712 | if (!pool->slots) | |
713 | goto error_slots; | |
714 | ||
715 | swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas); | |
79636caa PT |
716 | return pool; |
717 | ||
1aaa7368 PT |
718 | error_slots: |
719 | swiotlb_free_tlb(page_address(tlb), tlb_size); | |
79636caa PT |
720 | error_tlb: |
721 | kfree(pool); | |
722 | error: | |
723 | return NULL; | |
724 | } | |
725 | ||
1aaa7368 PT |
726 | /** |
727 | * swiotlb_dyn_alloc() - dynamic memory pool allocation worker | |
728 | * @work: Pointer to dyn_alloc in struct io_tlb_mem. | |
729 | */ | |
730 | static void swiotlb_dyn_alloc(struct work_struct *work) | |
731 | { | |
732 | struct io_tlb_mem *mem = | |
733 | container_of(work, struct io_tlb_mem, dyn_alloc); | |
734 | struct io_tlb_pool *pool; | |
735 | ||
736 | pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, default_nslabs, | |
737 | default_nareas, mem->phys_limit, GFP_KERNEL); | |
738 | if (!pool) { | |
739 | pr_warn_ratelimited("Failed to allocate new pool"); | |
740 | return; | |
741 | } | |
742 | ||
743 | add_mem_pool(mem, pool); | |
1aaa7368 PT |
744 | } |
745 | ||
79636caa PT |
746 | /** |
747 | * swiotlb_dyn_free() - RCU callback to free a memory pool | |
748 | * @rcu: RCU head in the corresponding struct io_tlb_pool. | |
749 | */ | |
750 | static void swiotlb_dyn_free(struct rcu_head *rcu) | |
751 | { | |
752 | struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu); | |
1aaa7368 | 753 | size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs); |
79636caa PT |
754 | size_t tlb_size = pool->end - pool->start; |
755 | ||
1aaa7368 | 756 | free_pages((unsigned long)pool->slots, get_order(slots_size)); |
79636caa PT |
757 | swiotlb_free_tlb(pool->vaddr, tlb_size); |
758 | kfree(pool); | |
759 | } | |
760 | ||
761 | /** | |
762 | * swiotlb_find_pool() - find the IO TLB pool for a physical address | |
763 | * @dev: Device which has mapped the DMA buffer. | |
764 | * @paddr: Physical address within the DMA buffer. | |
765 | * | |
766 | * Find the IO TLB memory pool descriptor which contains the given physical | |
767 | * address, if any. | |
768 | * | |
769 | * Return: Memory pool which contains @paddr, or %NULL if none. | |
770 | */ | |
771 | struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr) | |
772 | { | |
773 | struct io_tlb_mem *mem = dev->dma_io_tlb_mem; | |
1aaa7368 | 774 | struct io_tlb_pool *pool; |
79636caa | 775 | |
79636caa | 776 | rcu_read_lock(); |
1aaa7368 PT |
777 | list_for_each_entry_rcu(pool, &mem->pools, node) { |
778 | if (paddr >= pool->start && paddr < pool->end) | |
779 | goto out; | |
780 | } | |
781 | ||
79636caa PT |
782 | list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) { |
783 | if (paddr >= pool->start && paddr < pool->end) | |
784 | goto out; | |
785 | } | |
786 | pool = NULL; | |
787 | out: | |
788 | rcu_read_unlock(); | |
789 | return pool; | |
790 | } | |
791 | ||
792 | /** | |
793 | * swiotlb_del_pool() - remove an IO TLB pool from a device | |
794 | * @dev: Owning device. | |
795 | * @pool: Memory pool to be removed. | |
796 | */ | |
797 | static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool) | |
798 | { | |
799 | unsigned long flags; | |
800 | ||
801 | spin_lock_irqsave(&dev->dma_io_tlb_lock, flags); | |
802 | list_del_rcu(&pool->node); | |
803 | spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags); | |
804 | ||
805 | call_rcu(&pool->rcu, swiotlb_dyn_free); | |
806 | } | |
807 | ||
808 | #endif /* CONFIG_SWIOTLB_DYNAMIC */ | |
809 | ||
05ee7741 PT |
810 | /** |
811 | * swiotlb_dev_init() - initialize swiotlb fields in &struct device | |
812 | * @dev: Device to be initialized. | |
813 | */ | |
814 | void swiotlb_dev_init(struct device *dev) | |
815 | { | |
816 | dev->dma_io_tlb_mem = &io_tlb_default_mem; | |
79636caa PT |
817 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
818 | INIT_LIST_HEAD(&dev->dma_io_tlb_pools); | |
819 | spin_lock_init(&dev->dma_io_tlb_lock); | |
1395706a | 820 | dev->dma_uses_io_tlb = false; |
79636caa | 821 | #endif |
05ee7741 PT |
822 | } |
823 | ||
5f89468e BL |
824 | /* |
825 | * Return the offset into a iotlb slot required to keep the device happy. | |
826 | */ | |
827 | static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) | |
828 | { | |
829 | return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); | |
830 | } | |
831 | ||
fb05a379 | 832 | /* |
6442ca2a | 833 | * Bounce: copy the swiotlb buffer from or back to the original dma location |
fb05a379 | 834 | */ |
2bdba622 CH |
835 | static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, |
836 | enum dma_data_direction dir) | |
fb05a379 | 837 | { |
79636caa | 838 | struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr); |
73f62095 | 839 | int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; |
2d29960a CH |
840 | phys_addr_t orig_addr = mem->slots[index].orig_addr; |
841 | size_t alloc_size = mem->slots[index].alloc_size; | |
af51a9f1 | 842 | unsigned long pfn = PFN_DOWN(orig_addr); |
1a5e91d8 | 843 | unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; |
868c9ddc | 844 | unsigned int tlb_offset, orig_addr_offset; |
fb05a379 | 845 | |
2bdba622 CH |
846 | if (orig_addr == INVALID_PHYS_ADDR) |
847 | return; | |
848 | ||
868c9ddc DM |
849 | tlb_offset = tlb_addr & (IO_TLB_SIZE - 1); |
850 | orig_addr_offset = swiotlb_align_offset(dev, orig_addr); | |
851 | if (tlb_offset < orig_addr_offset) { | |
852 | dev_WARN_ONCE(dev, 1, | |
853 | "Access before mapping start detected. orig offset %u, requested offset %u.\n", | |
854 | orig_addr_offset, tlb_offset); | |
855 | return; | |
856 | } | |
857 | ||
858 | tlb_offset -= orig_addr_offset; | |
859 | if (tlb_offset > alloc_size) { | |
860 | dev_WARN_ONCE(dev, 1, | |
861 | "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n", | |
862 | alloc_size, size, tlb_offset); | |
863 | return; | |
864 | } | |
5f89468e BL |
865 | |
866 | orig_addr += tlb_offset; | |
867 | alloc_size -= tlb_offset; | |
868 | ||
2bdba622 CH |
869 | if (size > alloc_size) { |
870 | dev_WARN_ONCE(dev, 1, | |
871 | "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", | |
872 | alloc_size, size); | |
873 | size = alloc_size; | |
874 | } | |
875 | ||
fb05a379 | 876 | if (PageHighMem(pfn_to_page(pfn))) { |
af51a9f1 | 877 | unsigned int offset = orig_addr & ~PAGE_MASK; |
1d61261b | 878 | struct page *page; |
fb05a379 BB |
879 | unsigned int sz = 0; |
880 | unsigned long flags; | |
881 | ||
882 | while (size) { | |
67131ad0 | 883 | sz = min_t(size_t, PAGE_SIZE - offset, size); |
fb05a379 BB |
884 | |
885 | local_irq_save(flags); | |
1d61261b | 886 | page = pfn_to_page(pfn); |
fb05a379 | 887 | if (dir == DMA_TO_DEVICE) |
1d61261b | 888 | memcpy_from_page(vaddr, page, offset, sz); |
ef9b1893 | 889 | else |
1d61261b | 890 | memcpy_to_page(page, offset, vaddr, sz); |
ef9b1893 | 891 | local_irq_restore(flags); |
fb05a379 BB |
892 | |
893 | size -= sz; | |
894 | pfn++; | |
af51a9f1 | 895 | vaddr += sz; |
fb05a379 | 896 | offset = 0; |
ef9b1893 | 897 | } |
af51a9f1 AD |
898 | } else if (dir == DMA_TO_DEVICE) { |
899 | memcpy(vaddr, phys_to_virt(orig_addr), size); | |
ef9b1893 | 900 | } else { |
af51a9f1 | 901 | memcpy(phys_to_virt(orig_addr), vaddr, size); |
ef9b1893 | 902 | } |
1b548f66 JF |
903 | } |
904 | ||
3f046161 CG |
905 | static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx) |
906 | { | |
907 | return start + (idx << IO_TLB_SHIFT); | |
908 | } | |
648babb7 | 909 | |
26a7e094 CH |
910 | /* |
911 | * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. | |
912 | */ | |
913 | static inline unsigned long get_max_slots(unsigned long boundary_mask) | |
914 | { | |
d069ed28 | 915 | return (boundary_mask >> IO_TLB_SHIFT) + 1; |
26a7e094 | 916 | } |
681cc5cd | 917 | |
158dbe9c | 918 | static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index) |
26a7e094 | 919 | { |
20347fca | 920 | if (index >= mem->area_nslabs) |
26a7e094 CH |
921 | return 0; |
922 | return index; | |
923 | } | |
eb605a57 | 924 | |
8b0977ec MK |
925 | /* |
926 | * Track the total used slots with a global atomic value in order to have | |
927 | * correct information to determine the high water mark. The mem_used() | |
928 | * function gives imprecise results because there's no locking across | |
929 | * multiple areas. | |
930 | */ | |
931 | #ifdef CONFIG_DEBUG_FS | |
932 | static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots) | |
933 | { | |
934 | unsigned long old_hiwater, new_used; | |
935 | ||
936 | new_used = atomic_long_add_return(nslots, &mem->total_used); | |
937 | old_hiwater = atomic_long_read(&mem->used_hiwater); | |
938 | do { | |
939 | if (new_used <= old_hiwater) | |
940 | break; | |
941 | } while (!atomic_long_try_cmpxchg(&mem->used_hiwater, | |
942 | &old_hiwater, new_used)); | |
943 | } | |
944 | ||
945 | static void dec_used(struct io_tlb_mem *mem, unsigned int nslots) | |
946 | { | |
947 | atomic_long_sub(nslots, &mem->total_used); | |
948 | } | |
949 | ||
950 | #else /* !CONFIG_DEBUG_FS */ | |
951 | static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots) | |
952 | { | |
953 | } | |
954 | static void dec_used(struct io_tlb_mem *mem, unsigned int nslots) | |
955 | { | |
956 | } | |
957 | #endif /* CONFIG_DEBUG_FS */ | |
958 | ||
fea18777 | 959 | /** |
55c54386 | 960 | * swiotlb_search_pool_area() - search one memory area in one pool |
fea18777 | 961 | * @dev: Device which maps the buffer. |
158dbe9c | 962 | * @pool: Memory pool to be searched. |
fea18777 PT |
963 | * @area_index: Index of the IO TLB memory area to be searched. |
964 | * @orig_addr: Original (non-bounced) IO buffer address. | |
965 | * @alloc_size: Total requested size of the bounce buffer, | |
966 | * including initial alignment padding. | |
967 | * @alloc_align_mask: Required alignment of the allocated buffer. | |
968 | * | |
969 | * Find a suitable sequence of IO TLB entries for the request and allocate | |
970 | * a buffer from the given IO TLB memory area. | |
971 | * This function takes care of locking. | |
972 | * | |
973 | * Return: Index of the first allocated slot, or -1 on error. | |
26a7e094 | 974 | */ |
55c54386 | 975 | static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool, |
158dbe9c | 976 | int area_index, phys_addr_t orig_addr, size_t alloc_size, |
72311809 | 977 | unsigned int alloc_align_mask) |
26a7e094 | 978 | { |
158dbe9c | 979 | struct io_tlb_area *area = pool->areas + area_index; |
26a7e094 CH |
980 | unsigned long boundary_mask = dma_get_seg_boundary(dev); |
981 | dma_addr_t tbl_dma_addr = | |
158dbe9c | 982 | phys_to_dma_unencrypted(dev, pool->start) & boundary_mask; |
26a7e094 | 983 | unsigned long max_slots = get_max_slots(boundary_mask); |
1f221a0d | 984 | unsigned int iotlb_align_mask = |
bbb73a10 | 985 | dma_get_min_align_mask(dev) | alloc_align_mask; |
1f221a0d | 986 | unsigned int nslots = nr_slots(alloc_size), stride; |
36f7b2f3 | 987 | unsigned int offset = swiotlb_align_offset(dev, orig_addr); |
7c3940bf | 988 | unsigned int index, slots_checked, count = 0, i; |
26a7e094 | 989 | unsigned long flags; |
20347fca TL |
990 | unsigned int slot_base; |
991 | unsigned int slot_index; | |
a5ddde4a | 992 | |
26a7e094 | 993 | BUG_ON(!nslots); |
158dbe9c | 994 | BUG_ON(area_index >= pool->nareas); |
1da177e4 | 995 | |
0eee5ae1 PT |
996 | /* |
997 | * For allocations of PAGE_SIZE or larger only look for page aligned | |
998 | * allocations. | |
999 | */ | |
1000 | if (alloc_size >= PAGE_SIZE) | |
bbb73a10 PT |
1001 | iotlb_align_mask |= ~PAGE_MASK; |
1002 | iotlb_align_mask &= ~(IO_TLB_SIZE - 1); | |
0eee5ae1 | 1003 | |
1da177e4 | 1004 | /* |
1f221a0d | 1005 | * For mappings with an alignment requirement don't bother looping to |
0eee5ae1 | 1006 | * unaligned slots once we found an aligned one. |
1da177e4 | 1007 | */ |
1f221a0d | 1008 | stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1; |
1da177e4 | 1009 | |
20347fca | 1010 | spin_lock_irqsave(&area->lock, flags); |
158dbe9c | 1011 | if (unlikely(nslots > pool->area_nslabs - area->used)) |
60513ed0 DZ |
1012 | goto not_found; |
1013 | ||
158dbe9c | 1014 | slot_base = area_index * pool->area_nslabs; |
0eee5ae1 | 1015 | index = area->index; |
20347fca | 1016 | |
158dbe9c | 1017 | for (slots_checked = 0; slots_checked < pool->area_nslabs; ) { |
20347fca TL |
1018 | slot_index = slot_base + index; |
1019 | ||
f4111e39 | 1020 | if (orig_addr && |
20347fca TL |
1021 | (slot_addr(tbl_dma_addr, slot_index) & |
1022 | iotlb_align_mask) != (orig_addr & iotlb_align_mask)) { | |
158dbe9c | 1023 | index = wrap_area_index(pool, index + 1); |
7c3940bf | 1024 | slots_checked++; |
1f221a0d CH |
1025 | continue; |
1026 | } | |
1027 | ||
20347fca | 1028 | if (!iommu_is_span_boundary(slot_index, nslots, |
26a7e094 CH |
1029 | nr_slots(tbl_dma_addr), |
1030 | max_slots)) { | |
158dbe9c | 1031 | if (pool->slots[slot_index].list >= nslots) |
26a7e094 | 1032 | goto found; |
a7133a15 | 1033 | } |
158dbe9c | 1034 | index = wrap_area_index(pool, index + stride); |
7c3940bf GY |
1035 | slots_checked += stride; |
1036 | } | |
a7133a15 AM |
1037 | |
1038 | not_found: | |
20347fca | 1039 | spin_unlock_irqrestore(&area->lock, flags); |
26a7e094 CH |
1040 | return -1; |
1041 | ||
a7133a15 | 1042 | found: |
f94cb36e PT |
1043 | /* |
1044 | * If we find a slot that indicates we have 'nslots' number of | |
1045 | * contiguous buffers, we allocate the buffers from that slot onwards | |
1046 | * and set the list of free entries to '0' indicating unavailable. | |
1047 | */ | |
20347fca | 1048 | for (i = slot_index; i < slot_index + nslots; i++) { |
158dbe9c PT |
1049 | pool->slots[i].list = 0; |
1050 | pool->slots[i].alloc_size = alloc_size - (offset + | |
20347fca | 1051 | ((i - slot_index) << IO_TLB_SHIFT)); |
36f7b2f3 | 1052 | } |
20347fca | 1053 | for (i = slot_index - 1; |
26a7e094 | 1054 | io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && |
158dbe9c PT |
1055 | pool->slots[i].list; i--) |
1056 | pool->slots[i].list = ++count; | |
26a7e094 CH |
1057 | |
1058 | /* | |
1059 | * Update the indices to avoid searching in the next round. | |
1060 | */ | |
158dbe9c | 1061 | area->index = wrap_area_index(pool, index + nslots); |
20347fca TL |
1062 | area->used += nslots; |
1063 | spin_unlock_irqrestore(&area->lock, flags); | |
8b0977ec | 1064 | |
158dbe9c | 1065 | inc_used_and_hiwater(dev->dma_io_tlb_mem, nslots); |
20347fca TL |
1066 | return slot_index; |
1067 | } | |
26a7e094 | 1068 | |
55c54386 PT |
1069 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1070 | ||
fea18777 | 1071 | /** |
55c54386 | 1072 | * swiotlb_search_area() - search one memory area in all pools |
fea18777 | 1073 | * @dev: Device which maps the buffer. |
55c54386 PT |
1074 | * @start_cpu: Start CPU number. |
1075 | * @cpu_offset: Offset from @start_cpu. | |
fea18777 PT |
1076 | * @orig_addr: Original (non-bounced) IO buffer address. |
1077 | * @alloc_size: Total requested size of the bounce buffer, | |
1078 | * including initial alignment padding. | |
1079 | * @alloc_align_mask: Required alignment of the allocated buffer. | |
55c54386 | 1080 | * @retpool: Used memory pool, updated on return. |
fea18777 | 1081 | * |
55c54386 | 1082 | * Search one memory area in all pools for a sequence of slots that match the |
158dbe9c | 1083 | * allocation constraints. |
fea18777 PT |
1084 | * |
1085 | * Return: Index of the first allocated slot, or -1 on error. | |
1086 | */ | |
55c54386 PT |
1087 | static int swiotlb_search_area(struct device *dev, int start_cpu, |
1088 | int cpu_offset, phys_addr_t orig_addr, size_t alloc_size, | |
1089 | unsigned int alloc_align_mask, struct io_tlb_pool **retpool) | |
20347fca | 1090 | { |
55c54386 PT |
1091 | struct io_tlb_mem *mem = dev->dma_io_tlb_mem; |
1092 | struct io_tlb_pool *pool; | |
1093 | int area_index; | |
1094 | int index = -1; | |
20347fca | 1095 | |
55c54386 PT |
1096 | rcu_read_lock(); |
1097 | list_for_each_entry_rcu(pool, &mem->pools, node) { | |
1098 | if (cpu_offset >= pool->nareas) | |
1099 | continue; | |
1100 | area_index = (start_cpu + cpu_offset) & (pool->nareas - 1); | |
1101 | index = swiotlb_search_pool_area(dev, pool, area_index, | |
1102 | orig_addr, alloc_size, | |
1103 | alloc_align_mask); | |
1104 | if (index >= 0) { | |
1105 | *retpool = pool; | |
1106 | break; | |
1107 | } | |
1108 | } | |
1109 | rcu_read_unlock(); | |
1110 | return index; | |
20347fca TL |
1111 | } |
1112 | ||
158dbe9c PT |
1113 | /** |
1114 | * swiotlb_find_slots() - search for slots in the whole swiotlb | |
1115 | * @dev: Device which maps the buffer. | |
1116 | * @orig_addr: Original (non-bounced) IO buffer address. | |
1117 | * @alloc_size: Total requested size of the bounce buffer, | |
1118 | * including initial alignment padding. | |
1119 | * @alloc_align_mask: Required alignment of the allocated buffer. | |
79636caa | 1120 | * @retpool: Used memory pool, updated on return. |
158dbe9c PT |
1121 | * |
1122 | * Search through the whole software IO TLB to find a sequence of slots that | |
1123 | * match the allocation constraints. | |
1124 | * | |
1125 | * Return: Index of the first allocated slot, or -1 on error. | |
1126 | */ | |
1127 | static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, | |
79636caa PT |
1128 | size_t alloc_size, unsigned int alloc_align_mask, |
1129 | struct io_tlb_pool **retpool) | |
158dbe9c | 1130 | { |
79636caa PT |
1131 | struct io_tlb_mem *mem = dev->dma_io_tlb_mem; |
1132 | struct io_tlb_pool *pool; | |
1133 | unsigned long nslabs; | |
1134 | unsigned long flags; | |
1135 | u64 phys_limit; | |
55c54386 | 1136 | int cpu, i; |
79636caa PT |
1137 | int index; |
1138 | ||
3dc2f209 Z |
1139 | if (alloc_size > IO_TLB_SEGSIZE * IO_TLB_SIZE) |
1140 | return -1; | |
1141 | ||
55c54386 PT |
1142 | cpu = raw_smp_processor_id(); |
1143 | for (i = 0; i < default_nareas; ++i) { | |
1144 | index = swiotlb_search_area(dev, cpu, i, orig_addr, alloc_size, | |
1145 | alloc_align_mask, &pool); | |
1146 | if (index >= 0) | |
1aaa7368 | 1147 | goto found; |
1aaa7368 | 1148 | } |
55c54386 | 1149 | |
79636caa PT |
1150 | if (!mem->can_grow) |
1151 | return -1; | |
1152 | ||
1aaa7368 PT |
1153 | schedule_work(&mem->dyn_alloc); |
1154 | ||
79636caa PT |
1155 | nslabs = nr_slots(alloc_size); |
1156 | phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit); | |
1aaa7368 | 1157 | pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit, |
79636caa PT |
1158 | GFP_NOWAIT | __GFP_NOWARN); |
1159 | if (!pool) | |
1160 | return -1; | |
1161 | ||
55c54386 PT |
1162 | index = swiotlb_search_pool_area(dev, pool, 0, orig_addr, |
1163 | alloc_size, alloc_align_mask); | |
79636caa PT |
1164 | if (index < 0) { |
1165 | swiotlb_dyn_free(&pool->rcu); | |
1166 | return -1; | |
1167 | } | |
1168 | ||
1169 | pool->transient = true; | |
1170 | spin_lock_irqsave(&dev->dma_io_tlb_lock, flags); | |
1171 | list_add_rcu(&pool->node, &dev->dma_io_tlb_pools); | |
1172 | spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags); | |
1173 | ||
79636caa | 1174 | found: |
2d5780bb PT |
1175 | WRITE_ONCE(dev->dma_uses_io_tlb, true); |
1176 | ||
1177 | /* | |
1178 | * The general barrier orders reads and writes against a presumed store | |
1179 | * of the SWIOTLB buffer address by a device driver (to a driver private | |
1180 | * data structure). It serves two purposes. | |
1181 | * | |
1182 | * First, the store to dev->dma_uses_io_tlb must be ordered before the | |
1183 | * presumed store. This guarantees that the returned buffer address | |
1184 | * cannot be passed to another CPU before updating dev->dma_uses_io_tlb. | |
1185 | * | |
1186 | * Second, the load from mem->pools must be ordered before the same | |
1187 | * presumed store. This guarantees that the returned buffer address | |
1188 | * cannot be observed by another CPU before an update of the RCU list | |
1189 | * that was made by swiotlb_dyn_alloc() on a third CPU (cf. multicopy | |
1190 | * atomicity). | |
1191 | * | |
1192 | * See also the comment in is_swiotlb_buffer(). | |
1193 | */ | |
1194 | smp_mb(); | |
1395706a | 1195 | |
79636caa PT |
1196 | *retpool = pool; |
1197 | return index; | |
1198 | } | |
1199 | ||
1200 | #else /* !CONFIG_SWIOTLB_DYNAMIC */ | |
1201 | ||
1202 | static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, | |
1203 | size_t alloc_size, unsigned int alloc_align_mask, | |
1204 | struct io_tlb_pool **retpool) | |
1205 | { | |
55c54386 PT |
1206 | struct io_tlb_pool *pool; |
1207 | int start, i; | |
1208 | int index; | |
1209 | ||
1210 | *retpool = pool = &dev->dma_io_tlb_mem->defpool; | |
1211 | i = start = raw_smp_processor_id() & (pool->nareas - 1); | |
1212 | do { | |
1213 | index = swiotlb_search_pool_area(dev, pool, i, orig_addr, | |
1214 | alloc_size, alloc_align_mask); | |
1215 | if (index >= 0) | |
1216 | return index; | |
1217 | if (++i >= pool->nareas) | |
1218 | i = 0; | |
1219 | } while (i != start); | |
1220 | return -1; | |
158dbe9c PT |
1221 | } |
1222 | ||
79636caa PT |
1223 | #endif /* CONFIG_SWIOTLB_DYNAMIC */ |
1224 | ||
693405cf PT |
1225 | #ifdef CONFIG_DEBUG_FS |
1226 | ||
fea18777 PT |
1227 | /** |
1228 | * mem_used() - get number of used slots in an allocator | |
1229 | * @mem: Software IO TLB allocator. | |
1230 | * | |
1231 | * The result is accurate in this version of the function, because an atomic | |
1232 | * counter is available if CONFIG_DEBUG_FS is set. | |
1233 | * | |
1234 | * Return: Number of used slots. | |
1235 | */ | |
693405cf PT |
1236 | static unsigned long mem_used(struct io_tlb_mem *mem) |
1237 | { | |
1238 | return atomic_long_read(&mem->total_used); | |
1239 | } | |
1240 | ||
1241 | #else /* !CONFIG_DEBUG_FS */ | |
1242 | ||
158dbe9c PT |
1243 | /** |
1244 | * mem_pool_used() - get number of used slots in a memory pool | |
1245 | * @pool: Software IO TLB memory pool. | |
1246 | * | |
1247 | * The result is not accurate, see mem_used(). | |
1248 | * | |
1249 | * Return: Approximate number of used slots. | |
1250 | */ | |
1251 | static unsigned long mem_pool_used(struct io_tlb_pool *pool) | |
1252 | { | |
1253 | int i; | |
1254 | unsigned long used = 0; | |
1255 | ||
1256 | for (i = 0; i < pool->nareas; i++) | |
1257 | used += pool->areas[i].used; | |
1258 | return used; | |
1259 | } | |
1260 | ||
fea18777 PT |
1261 | /** |
1262 | * mem_used() - get number of used slots in an allocator | |
1263 | * @mem: Software IO TLB allocator. | |
1264 | * | |
1265 | * The result is not accurate, because there is no locking of individual | |
1266 | * areas. | |
1267 | * | |
1268 | * Return: Approximate number of used slots. | |
1269 | */ | |
20347fca TL |
1270 | static unsigned long mem_used(struct io_tlb_mem *mem) |
1271 | { | |
1aaa7368 PT |
1272 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1273 | struct io_tlb_pool *pool; | |
1274 | unsigned long used = 0; | |
1275 | ||
1276 | rcu_read_lock(); | |
1277 | list_for_each_entry_rcu(pool, &mem->pools, node) | |
1278 | used += mem_pool_used(pool); | |
1279 | rcu_read_unlock(); | |
1280 | ||
1281 | return used; | |
1282 | #else | |
158dbe9c | 1283 | return mem_pool_used(&mem->defpool); |
1aaa7368 | 1284 | #endif |
26a7e094 CH |
1285 | } |
1286 | ||
693405cf PT |
1287 | #endif /* CONFIG_DEBUG_FS */ |
1288 | ||
26a7e094 CH |
1289 | phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, |
1290 | size_t mapping_size, size_t alloc_size, | |
e81e99ba DS |
1291 | unsigned int alloc_align_mask, enum dma_data_direction dir, |
1292 | unsigned long attrs) | |
26a7e094 | 1293 | { |
69031f50 | 1294 | struct io_tlb_mem *mem = dev->dma_io_tlb_mem; |
1f221a0d | 1295 | unsigned int offset = swiotlb_align_offset(dev, orig_addr); |
158dbe9c | 1296 | struct io_tlb_pool *pool; |
95b079d8 CC |
1297 | unsigned int i; |
1298 | int index; | |
26a7e094 CH |
1299 | phys_addr_t tlb_addr; |
1300 | ||
639205ed RM |
1301 | if (!mem || !mem->nslabs) { |
1302 | dev_warn_ratelimited(dev, | |
1303 | "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); | |
1304 | return (phys_addr_t)DMA_MAPPING_ERROR; | |
1305 | } | |
26a7e094 | 1306 | |
e9d1d2bb | 1307 | if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) |
26a7e094 CH |
1308 | pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); |
1309 | ||
1310 | if (mapping_size > alloc_size) { | |
1311 | dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", | |
1312 | mapping_size, alloc_size); | |
1313 | return (phys_addr_t)DMA_MAPPING_ERROR; | |
1314 | } | |
1315 | ||
e81e99ba | 1316 | index = swiotlb_find_slots(dev, orig_addr, |
79636caa | 1317 | alloc_size + offset, alloc_align_mask, &pool); |
26a7e094 CH |
1318 | if (index == -1) { |
1319 | if (!(attrs & DMA_ATTR_NO_WARN)) | |
1320 | dev_warn_ratelimited(dev, | |
1321 | "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", | |
20347fca | 1322 | alloc_size, mem->nslabs, mem_used(mem)); |
26a7e094 CH |
1323 | return (phys_addr_t)DMA_MAPPING_ERROR; |
1324 | } | |
1da177e4 LT |
1325 | |
1326 | /* | |
1327 | * Save away the mapping from the original address to the DMA address. | |
1328 | * This is needed when we sync the memory. Then we sync the buffer if | |
1329 | * needed. | |
1330 | */ | |
36f7b2f3 | 1331 | for (i = 0; i < nr_slots(alloc_size + offset); i++) |
158dbe9c PT |
1332 | pool->slots[index + i].orig_addr = slot_addr(orig_addr, i); |
1333 | tlb_addr = slot_addr(pool->start, index) + offset; | |
901c7280 | 1334 | /* |
1132a1dc SC |
1335 | * When the device is writing memory, i.e. dir == DMA_FROM_DEVICE, copy |
1336 | * the original buffer to the TLB buffer before initiating DMA in order | |
1337 | * to preserve the original's data if the device does a partial write, | |
1338 | * i.e. if the device doesn't overwrite the entire buffer. Preserving | |
1339 | * the original data, even if it's garbage, is necessary to match | |
1340 | * hardware behavior. Use of swiotlb is supposed to be transparent, | |
1341 | * i.e. swiotlb must not corrupt memory by clobbering unwritten bytes. | |
901c7280 LT |
1342 | */ |
1343 | swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); | |
e05ed4d1 | 1344 | return tlb_addr; |
1da177e4 LT |
1345 | } |
1346 | ||
70347877 | 1347 | static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr) |
1da177e4 | 1348 | { |
79636caa | 1349 | struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr); |
1da177e4 | 1350 | unsigned long flags; |
70347877 | 1351 | unsigned int offset = swiotlb_align_offset(dev, tlb_addr); |
73f62095 | 1352 | int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; |
2d29960a | 1353 | int nslots = nr_slots(mem->slots[index].alloc_size + offset); |
20347fca TL |
1354 | int aindex = index / mem->area_nslabs; |
1355 | struct io_tlb_area *area = &mem->areas[aindex]; | |
2bdba622 | 1356 | int count, i; |
daf9514f | 1357 | |
1da177e4 LT |
1358 | /* |
1359 | * Return the buffer to the free list by setting the corresponding | |
af901ca1 | 1360 | * entries to indicate the number of contiguous entries available. |
1da177e4 LT |
1361 | * While returning the entries to the free list, we merge the entries |
1362 | * with slots below and above the pool being returned. | |
1363 | */ | |
20347fca TL |
1364 | BUG_ON(aindex >= mem->nareas); |
1365 | ||
1366 | spin_lock_irqsave(&area->lock, flags); | |
ca10d0f8 | 1367 | if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE)) |
2d29960a | 1368 | count = mem->slots[index + nslots].list; |
ca10d0f8 CH |
1369 | else |
1370 | count = 0; | |
71602fe6 | 1371 | |
ca10d0f8 CH |
1372 | /* |
1373 | * Step 1: return the slots to the free list, merging the slots with | |
1374 | * superceeding slots | |
1375 | */ | |
1376 | for (i = index + nslots - 1; i >= index; i--) { | |
2d29960a CH |
1377 | mem->slots[i].list = ++count; |
1378 | mem->slots[i].orig_addr = INVALID_PHYS_ADDR; | |
1379 | mem->slots[i].alloc_size = 0; | |
1da177e4 | 1380 | } |
ca10d0f8 CH |
1381 | |
1382 | /* | |
1383 | * Step 2: merge the returned slots with the preceding slots, if | |
1384 | * available (non zero) | |
1385 | */ | |
1386 | for (i = index - 1; | |
2d29960a | 1387 | io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; |
ca10d0f8 | 1388 | i--) |
2d29960a | 1389 | mem->slots[i].list = ++count; |
20347fca TL |
1390 | area->used -= nslots; |
1391 | spin_unlock_irqrestore(&area->lock, flags); | |
8b0977ec | 1392 | |
158dbe9c | 1393 | dec_used(dev->dma_io_tlb_mem, nslots); |
1da177e4 LT |
1394 | } |
1395 | ||
79636caa PT |
1396 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1397 | ||
1398 | /** | |
1399 | * swiotlb_del_transient() - delete a transient memory pool | |
1400 | * @dev: Device which mapped the buffer. | |
1401 | * @tlb_addr: Physical address within a bounce buffer. | |
1402 | * | |
1403 | * Check whether the address belongs to a transient SWIOTLB memory pool. | |
1404 | * If yes, then delete the pool. | |
1405 | * | |
1406 | * Return: %true if @tlb_addr belonged to a transient pool that was released. | |
1407 | */ | |
1408 | static bool swiotlb_del_transient(struct device *dev, phys_addr_t tlb_addr) | |
1409 | { | |
1410 | struct io_tlb_pool *pool; | |
1411 | ||
1412 | pool = swiotlb_find_pool(dev, tlb_addr); | |
1413 | if (!pool->transient) | |
1414 | return false; | |
1415 | ||
1416 | dec_used(dev->dma_io_tlb_mem, pool->nslabs); | |
1417 | swiotlb_del_pool(dev, pool); | |
1418 | return true; | |
1419 | } | |
1420 | ||
1421 | #else /* !CONFIG_SWIOTLB_DYNAMIC */ | |
1422 | ||
1423 | static inline bool swiotlb_del_transient(struct device *dev, | |
1424 | phys_addr_t tlb_addr) | |
1425 | { | |
1426 | return false; | |
1427 | } | |
1428 | ||
1429 | #endif /* CONFIG_SWIOTLB_DYNAMIC */ | |
1430 | ||
70347877 CC |
1431 | /* |
1432 | * tlb_addr is the physical address of the bounce buffer to unmap. | |
1433 | */ | |
1434 | void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr, | |
1435 | size_t mapping_size, enum dma_data_direction dir, | |
1436 | unsigned long attrs) | |
1437 | { | |
1438 | /* | |
1439 | * First, sync the memory before unmapping the entry | |
1440 | */ | |
1441 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && | |
1442 | (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) | |
1443 | swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE); | |
1444 | ||
79636caa PT |
1445 | if (swiotlb_del_transient(dev, tlb_addr)) |
1446 | return; | |
70347877 CC |
1447 | swiotlb_release_slots(dev, tlb_addr); |
1448 | } | |
1449 | ||
80808d27 CH |
1450 | void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, |
1451 | size_t size, enum dma_data_direction dir) | |
1da177e4 | 1452 | { |
bddac7c1 LT |
1453 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) |
1454 | swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); | |
1455 | else | |
1456 | BUG_ON(dir != DMA_FROM_DEVICE); | |
80808d27 CH |
1457 | } |
1458 | ||
1459 | void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, | |
1460 | size_t size, enum dma_data_direction dir) | |
1461 | { | |
1462 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | |
1463 | swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE); | |
1464 | else | |
1465 | BUG_ON(dir != DMA_TO_DEVICE); | |
1da177e4 LT |
1466 | } |
1467 | ||
55897af6 | 1468 | /* |
4a47cbae | 1469 | * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing |
55897af6 CH |
1470 | * to the device copy the data into it as well. |
1471 | */ | |
4a47cbae CH |
1472 | dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, |
1473 | enum dma_data_direction dir, unsigned long attrs) | |
c4dae366 | 1474 | { |
4a47cbae CH |
1475 | phys_addr_t swiotlb_addr; |
1476 | dma_addr_t dma_addr; | |
c4dae366 | 1477 | |
c6af2aa9 | 1478 | trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size); |
c4dae366 | 1479 | |
e81e99ba | 1480 | swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir, |
fc0021aa | 1481 | attrs); |
4a47cbae CH |
1482 | if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) |
1483 | return DMA_MAPPING_ERROR; | |
c4dae366 CH |
1484 | |
1485 | /* Ensure that the address returned is DMA'ble */ | |
5ceda740 | 1486 | dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr); |
4a47cbae | 1487 | if (unlikely(!dma_capable(dev, dma_addr, size, true))) { |
2973073a | 1488 | swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir, |
c4dae366 | 1489 | attrs | DMA_ATTR_SKIP_CPU_SYNC); |
4a47cbae CH |
1490 | dev_WARN_ONCE(dev, 1, |
1491 | "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", | |
1492 | &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); | |
1493 | return DMA_MAPPING_ERROR; | |
a4a4330d CH |
1494 | } |
1495 | ||
4a47cbae CH |
1496 | if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
1497 | arch_sync_dma_for_device(swiotlb_addr, size, dir); | |
1498 | return dma_addr; | |
1da177e4 LT |
1499 | } |
1500 | ||
abe420bf JR |
1501 | size_t swiotlb_max_mapping_size(struct device *dev) |
1502 | { | |
82806744 TL |
1503 | int min_align_mask = dma_get_min_align_mask(dev); |
1504 | int min_align = 0; | |
1505 | ||
1506 | /* | |
1507 | * swiotlb_find_slots() skips slots according to | |
1508 | * min align mask. This affects max mapping size. | |
1509 | * Take it into acount here. | |
1510 | */ | |
1511 | if (min_align_mask) | |
1512 | min_align = roundup(min_align_mask, IO_TLB_SIZE); | |
1513 | ||
1514 | return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align; | |
abe420bf | 1515 | } |
492366f7 | 1516 | |
05ee7741 PT |
1517 | /** |
1518 | * is_swiotlb_allocated() - check if the default software IO TLB is initialized | |
1519 | */ | |
1520 | bool is_swiotlb_allocated(void) | |
1521 | { | |
1522 | return io_tlb_default_mem.nslabs; | |
1523 | } | |
1524 | ||
6f2beb26 | 1525 | bool is_swiotlb_active(struct device *dev) |
492366f7 | 1526 | { |
463e862a WD |
1527 | struct io_tlb_mem *mem = dev->dma_io_tlb_mem; |
1528 | ||
1529 | return mem && mem->nslabs; | |
492366f7 | 1530 | } |
45ba8d5d | 1531 | |
05ee7741 PT |
1532 | /** |
1533 | * default_swiotlb_base() - get the base address of the default SWIOTLB | |
1534 | * | |
1535 | * Get the lowest physical address used by the default software IO TLB pool. | |
1536 | */ | |
1537 | phys_addr_t default_swiotlb_base(void) | |
1538 | { | |
62708b2b PT |
1539 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1540 | io_tlb_default_mem.can_grow = false; | |
1541 | #endif | |
158dbe9c | 1542 | return io_tlb_default_mem.defpool.start; |
05ee7741 PT |
1543 | } |
1544 | ||
1545 | /** | |
1546 | * default_swiotlb_limit() - get the address limit of the default SWIOTLB | |
1547 | * | |
1548 | * Get the highest physical address used by the default software IO TLB pool. | |
1549 | */ | |
1550 | phys_addr_t default_swiotlb_limit(void) | |
1551 | { | |
ad96ce32 PT |
1552 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1553 | return io_tlb_default_mem.phys_limit; | |
1554 | #else | |
158dbe9c | 1555 | return io_tlb_default_mem.defpool.end - 1; |
ad96ce32 | 1556 | #endif |
05ee7741 PT |
1557 | } |
1558 | ||
ec274aff PT |
1559 | #ifdef CONFIG_DEBUG_FS |
1560 | ||
5c850d31 TL |
1561 | static int io_tlb_used_get(void *data, u64 *val) |
1562 | { | |
5499d01c MK |
1563 | struct io_tlb_mem *mem = data; |
1564 | ||
1565 | *val = mem_used(mem); | |
5c850d31 TL |
1566 | return 0; |
1567 | } | |
8b0977ec MK |
1568 | |
1569 | static int io_tlb_hiwater_get(void *data, u64 *val) | |
1570 | { | |
1571 | struct io_tlb_mem *mem = data; | |
1572 | ||
1573 | *val = atomic_long_read(&mem->used_hiwater); | |
1574 | return 0; | |
1575 | } | |
1576 | ||
1577 | static int io_tlb_hiwater_set(void *data, u64 val) | |
1578 | { | |
1579 | struct io_tlb_mem *mem = data; | |
1580 | ||
1581 | /* Only allow setting to zero */ | |
1582 | if (val != 0) | |
1583 | return -EINVAL; | |
1584 | ||
1585 | atomic_long_set(&mem->used_hiwater, val); | |
1586 | return 0; | |
1587 | } | |
1588 | ||
5c850d31 | 1589 | DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n"); |
8b0977ec MK |
1590 | DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_hiwater, io_tlb_hiwater_get, |
1591 | io_tlb_hiwater_set, "%llu\n"); | |
5c850d31 | 1592 | |
35265899 RM |
1593 | static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, |
1594 | const char *dirname) | |
71602fe6 | 1595 | { |
8b0977ec MK |
1596 | atomic_long_set(&mem->total_used, 0); |
1597 | atomic_long_set(&mem->used_hiwater, 0); | |
1598 | ||
35265899 RM |
1599 | mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs); |
1600 | if (!mem->nslabs) | |
1601 | return; | |
1602 | ||
73f62095 | 1603 | debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); |
5499d01c | 1604 | debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem, |
5c850d31 | 1605 | &fops_io_tlb_used); |
8b0977ec MK |
1606 | debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem, |
1607 | &fops_io_tlb_hiwater); | |
6e675a1c CC |
1608 | } |
1609 | ||
ec274aff | 1610 | static int __init swiotlb_create_default_debugfs(void) |
6e675a1c | 1611 | { |
35265899 | 1612 | swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb"); |
71602fe6 | 1613 | return 0; |
71602fe6 DZ |
1614 | } |
1615 | ||
6e675a1c | 1616 | late_initcall(swiotlb_create_default_debugfs); |
ec274aff PT |
1617 | |
1618 | #else /* !CONFIG_DEBUG_FS */ | |
1619 | ||
1620 | static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, | |
1621 | const char *dirname) | |
1622 | { | |
1623 | } | |
1624 | ||
1625 | #endif /* CONFIG_DEBUG_FS */ | |
f4111e39 CC |
1626 | |
1627 | #ifdef CONFIG_DMA_RESTRICTED_POOL | |
09a4a79d | 1628 | |
f4111e39 CC |
1629 | struct page *swiotlb_alloc(struct device *dev, size_t size) |
1630 | { | |
1631 | struct io_tlb_mem *mem = dev->dma_io_tlb_mem; | |
158dbe9c | 1632 | struct io_tlb_pool *pool; |
f4111e39 CC |
1633 | phys_addr_t tlb_addr; |
1634 | int index; | |
1635 | ||
1636 | if (!mem) | |
1637 | return NULL; | |
1638 | ||
79636caa | 1639 | index = swiotlb_find_slots(dev, 0, size, 0, &pool); |
f4111e39 CC |
1640 | if (index == -1) |
1641 | return NULL; | |
1642 | ||
158dbe9c | 1643 | tlb_addr = slot_addr(pool->start, index); |
f4111e39 CC |
1644 | |
1645 | return pfn_to_page(PFN_DOWN(tlb_addr)); | |
1646 | } | |
1647 | ||
1648 | bool swiotlb_free(struct device *dev, struct page *page, size_t size) | |
1649 | { | |
1650 | phys_addr_t tlb_addr = page_to_phys(page); | |
1651 | ||
1652 | if (!is_swiotlb_buffer(dev, tlb_addr)) | |
1653 | return false; | |
1654 | ||
1655 | swiotlb_release_slots(dev, tlb_addr); | |
1656 | ||
1657 | return true; | |
1658 | } | |
1659 | ||
0b84e4f8 CC |
1660 | static int rmem_swiotlb_device_init(struct reserved_mem *rmem, |
1661 | struct device *dev) | |
1662 | { | |
1663 | struct io_tlb_mem *mem = rmem->priv; | |
1664 | unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; | |
1665 | ||
20347fca TL |
1666 | /* Set Per-device io tlb area to one */ |
1667 | unsigned int nareas = 1; | |
1668 | ||
a90922fa DB |
1669 | if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { |
1670 | dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping."); | |
1671 | return -EINVAL; | |
1672 | } | |
1673 | ||
0b84e4f8 CC |
1674 | /* |
1675 | * Since multiple devices can share the same pool, the private data, | |
1676 | * io_tlb_mem struct, will be initialized by the first device attached | |
1677 | * to it. | |
1678 | */ | |
1679 | if (!mem) { | |
158dbe9c PT |
1680 | struct io_tlb_pool *pool; |
1681 | ||
463e862a | 1682 | mem = kzalloc(sizeof(*mem), GFP_KERNEL); |
0b84e4f8 CC |
1683 | if (!mem) |
1684 | return -ENOMEM; | |
158dbe9c | 1685 | pool = &mem->defpool; |
0b84e4f8 | 1686 | |
158dbe9c PT |
1687 | pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL); |
1688 | if (!pool->slots) { | |
463e862a WD |
1689 | kfree(mem); |
1690 | return -ENOMEM; | |
1691 | } | |
1692 | ||
158dbe9c | 1693 | pool->areas = kcalloc(nareas, sizeof(*pool->areas), |
20347fca | 1694 | GFP_KERNEL); |
158dbe9c PT |
1695 | if (!pool->areas) { |
1696 | kfree(pool->slots); | |
4a977394 | 1697 | kfree(mem); |
20347fca TL |
1698 | return -ENOMEM; |
1699 | } | |
1700 | ||
0b84e4f8 CC |
1701 | set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), |
1702 | rmem->size >> PAGE_SHIFT); | |
158dbe9c PT |
1703 | swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs, |
1704 | false, nareas); | |
1705 | mem->force_bounce = true; | |
0b84e4f8 | 1706 | mem->for_alloc = true; |
1aaa7368 PT |
1707 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1708 | spin_lock_init(&mem->lock); | |
1709 | #endif | |
1710 | add_mem_pool(mem, pool); | |
0b84e4f8 CC |
1711 | |
1712 | rmem->priv = mem; | |
1713 | ||
35265899 | 1714 | swiotlb_create_debugfs_files(mem, rmem->name); |
0b84e4f8 CC |
1715 | } |
1716 | ||
1717 | dev->dma_io_tlb_mem = mem; | |
1718 | ||
1719 | return 0; | |
1720 | } | |
1721 | ||
1722 | static void rmem_swiotlb_device_release(struct reserved_mem *rmem, | |
1723 | struct device *dev) | |
1724 | { | |
463e862a | 1725 | dev->dma_io_tlb_mem = &io_tlb_default_mem; |
0b84e4f8 CC |
1726 | } |
1727 | ||
1728 | static const struct reserved_mem_ops rmem_swiotlb_ops = { | |
1729 | .device_init = rmem_swiotlb_device_init, | |
1730 | .device_release = rmem_swiotlb_device_release, | |
1731 | }; | |
1732 | ||
1733 | static int __init rmem_swiotlb_setup(struct reserved_mem *rmem) | |
1734 | { | |
1735 | unsigned long node = rmem->fdt_node; | |
1736 | ||
1737 | if (of_get_flat_dt_prop(node, "reusable", NULL) || | |
1738 | of_get_flat_dt_prop(node, "linux,cma-default", NULL) || | |
1739 | of_get_flat_dt_prop(node, "linux,dma-default", NULL) || | |
1740 | of_get_flat_dt_prop(node, "no-map", NULL)) | |
1741 | return -EINVAL; | |
1742 | ||
0b84e4f8 CC |
1743 | rmem->ops = &rmem_swiotlb_ops; |
1744 | pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n", | |
1745 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | |
1746 | return 0; | |
1747 | } | |
1748 | ||
1749 | RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup); | |
f4111e39 | 1750 | #endif /* CONFIG_DMA_RESTRICTED_POOL */ |