]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-or-later | |
2 | /* | |
3 | * Copyright (c) by Jaroslav Kysela <perex@perex.cz> | |
4 | * Takashi Iwai <tiwai@suse.de> | |
5 | * | |
6 | * Generic memory allocators | |
7 | */ | |
8 | ||
9 | #include <linux/slab.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | #include <linux/dma-map-ops.h> | |
13 | #include <linux/genalloc.h> | |
14 | #include <linux/highmem.h> | |
15 | #include <linux/vmalloc.h> | |
16 | #ifdef CONFIG_X86 | |
17 | #include <asm/set_memory.h> | |
18 | #endif | |
19 | #include <sound/memalloc.h> | |
20 | ||
21 | struct snd_malloc_ops { | |
22 | void *(*alloc)(struct snd_dma_buffer *dmab, size_t size); | |
23 | void (*free)(struct snd_dma_buffer *dmab); | |
24 | dma_addr_t (*get_addr)(struct snd_dma_buffer *dmab, size_t offset); | |
25 | struct page *(*get_page)(struct snd_dma_buffer *dmab, size_t offset); | |
26 | unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab, | |
27 | unsigned int ofs, unsigned int size); | |
28 | int (*mmap)(struct snd_dma_buffer *dmab, struct vm_area_struct *area); | |
29 | void (*sync)(struct snd_dma_buffer *dmab, enum snd_dma_sync_mode mode); | |
30 | }; | |
31 | ||
32 | #define DEFAULT_GFP \ | |
33 | (GFP_KERNEL | \ | |
34 | __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \ | |
35 | __GFP_NOWARN) /* no stack trace print - this call is non-critical */ | |
36 | ||
37 | static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab); | |
38 | ||
39 | static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) | |
40 | { | |
41 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
42 | ||
43 | if (WARN_ON_ONCE(!ops || !ops->alloc)) | |
44 | return NULL; | |
45 | return ops->alloc(dmab, size); | |
46 | } | |
47 | ||
48 | /** | |
49 | * snd_dma_alloc_dir_pages - allocate the buffer area according to the given | |
50 | * type and direction | |
51 | * @type: the DMA buffer type | |
52 | * @device: the device pointer | |
53 | * @dir: DMA direction | |
54 | * @size: the buffer size to allocate | |
55 | * @dmab: buffer allocation record to store the allocated data | |
56 | * | |
57 | * Calls the memory-allocator function for the corresponding | |
58 | * buffer type. | |
59 | * | |
60 | * Return: Zero if the buffer with the given size is allocated successfully, | |
61 | * otherwise a negative value on error. | |
62 | */ | |
63 | int snd_dma_alloc_dir_pages(int type, struct device *device, | |
64 | enum dma_data_direction dir, size_t size, | |
65 | struct snd_dma_buffer *dmab) | |
66 | { | |
67 | if (WARN_ON(!size)) | |
68 | return -ENXIO; | |
69 | if (WARN_ON(!dmab)) | |
70 | return -ENXIO; | |
71 | ||
72 | size = PAGE_ALIGN(size); | |
73 | dmab->dev.type = type; | |
74 | dmab->dev.dev = device; | |
75 | dmab->dev.dir = dir; | |
76 | dmab->bytes = 0; | |
77 | dmab->addr = 0; | |
78 | dmab->private_data = NULL; | |
79 | dmab->area = __snd_dma_alloc_pages(dmab, size); | |
80 | if (!dmab->area) | |
81 | return -ENOMEM; | |
82 | dmab->bytes = size; | |
83 | return 0; | |
84 | } | |
85 | EXPORT_SYMBOL(snd_dma_alloc_dir_pages); | |
86 | ||
87 | /** | |
88 | * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback | |
89 | * @type: the DMA buffer type | |
90 | * @device: the device pointer | |
91 | * @size: the buffer size to allocate | |
92 | * @dmab: buffer allocation record to store the allocated data | |
93 | * | |
94 | * Calls the memory-allocator function for the corresponding | |
95 | * buffer type. When no space is left, this function reduces the size and | |
96 | * tries to allocate again. The size actually allocated is stored in | |
97 | * res_size argument. | |
98 | * | |
99 | * Return: Zero if the buffer with the given size is allocated successfully, | |
100 | * otherwise a negative value on error. | |
101 | */ | |
102 | int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, | |
103 | struct snd_dma_buffer *dmab) | |
104 | { | |
105 | int err; | |
106 | ||
107 | while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { | |
108 | if (err != -ENOMEM) | |
109 | return err; | |
110 | if (size <= PAGE_SIZE) | |
111 | return -ENOMEM; | |
112 | size >>= 1; | |
113 | size = PAGE_SIZE << get_order(size); | |
114 | } | |
115 | if (! dmab->area) | |
116 | return -ENOMEM; | |
117 | return 0; | |
118 | } | |
119 | EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); | |
120 | ||
121 | /** | |
122 | * snd_dma_free_pages - release the allocated buffer | |
123 | * @dmab: the buffer allocation record to release | |
124 | * | |
125 | * Releases the allocated buffer via snd_dma_alloc_pages(). | |
126 | */ | |
127 | void snd_dma_free_pages(struct snd_dma_buffer *dmab) | |
128 | { | |
129 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
130 | ||
131 | if (ops && ops->free) | |
132 | ops->free(dmab); | |
133 | } | |
134 | EXPORT_SYMBOL(snd_dma_free_pages); | |
135 | ||
136 | /* called by devres */ | |
137 | static void __snd_release_pages(struct device *dev, void *res) | |
138 | { | |
139 | snd_dma_free_pages(res); | |
140 | } | |
141 | ||
142 | /** | |
143 | * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres | |
144 | * @dev: the device pointer | |
145 | * @type: the DMA buffer type | |
146 | * @dir: DMA direction | |
147 | * @size: the buffer size to allocate | |
148 | * | |
149 | * Allocate buffer pages depending on the given type and manage using devres. | |
150 | * The pages will be released automatically at the device removal. | |
151 | * | |
152 | * Unlike snd_dma_alloc_pages(), this function requires the real device pointer, | |
153 | * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or | |
154 | * SNDRV_DMA_TYPE_VMALLOC type. | |
155 | * | |
156 | * Return: the snd_dma_buffer object at success, or NULL if failed | |
157 | */ | |
158 | struct snd_dma_buffer * | |
159 | snd_devm_alloc_dir_pages(struct device *dev, int type, | |
160 | enum dma_data_direction dir, size_t size) | |
161 | { | |
162 | struct snd_dma_buffer *dmab; | |
163 | int err; | |
164 | ||
165 | if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS || | |
166 | type == SNDRV_DMA_TYPE_VMALLOC)) | |
167 | return NULL; | |
168 | ||
169 | dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL); | |
170 | if (!dmab) | |
171 | return NULL; | |
172 | ||
173 | err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab); | |
174 | if (err < 0) { | |
175 | devres_free(dmab); | |
176 | return NULL; | |
177 | } | |
178 | ||
179 | devres_add(dev, dmab); | |
180 | return dmab; | |
181 | } | |
182 | EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages); | |
183 | ||
184 | /** | |
185 | * snd_dma_buffer_mmap - perform mmap of the given DMA buffer | |
186 | * @dmab: buffer allocation information | |
187 | * @area: VM area information | |
188 | * | |
189 | * Return: zero if successful, or a negative error code | |
190 | */ | |
191 | int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab, | |
192 | struct vm_area_struct *area) | |
193 | { | |
194 | const struct snd_malloc_ops *ops; | |
195 | ||
196 | if (!dmab) | |
197 | return -ENOENT; | |
198 | ops = snd_dma_get_ops(dmab); | |
199 | if (ops && ops->mmap) | |
200 | return ops->mmap(dmab, area); | |
201 | else | |
202 | return -ENOENT; | |
203 | } | |
204 | EXPORT_SYMBOL(snd_dma_buffer_mmap); | |
205 | ||
206 | #ifdef CONFIG_HAS_DMA | |
207 | /** | |
208 | * snd_dma_buffer_sync - sync DMA buffer between CPU and device | |
209 | * @dmab: buffer allocation information | |
210 | * @mode: sync mode | |
211 | */ | |
212 | void snd_dma_buffer_sync(struct snd_dma_buffer *dmab, | |
213 | enum snd_dma_sync_mode mode) | |
214 | { | |
215 | const struct snd_malloc_ops *ops; | |
216 | ||
217 | if (!dmab || !dmab->dev.need_sync) | |
218 | return; | |
219 | ops = snd_dma_get_ops(dmab); | |
220 | if (ops && ops->sync) | |
221 | ops->sync(dmab, mode); | |
222 | } | |
223 | EXPORT_SYMBOL_GPL(snd_dma_buffer_sync); | |
224 | #endif /* CONFIG_HAS_DMA */ | |
225 | ||
226 | /** | |
227 | * snd_sgbuf_get_addr - return the physical address at the corresponding offset | |
228 | * @dmab: buffer allocation information | |
229 | * @offset: offset in the ring buffer | |
230 | * | |
231 | * Return: the physical address | |
232 | */ | |
233 | dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset) | |
234 | { | |
235 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
236 | ||
237 | if (ops && ops->get_addr) | |
238 | return ops->get_addr(dmab, offset); | |
239 | else | |
240 | return dmab->addr + offset; | |
241 | } | |
242 | EXPORT_SYMBOL(snd_sgbuf_get_addr); | |
243 | ||
244 | /** | |
245 | * snd_sgbuf_get_page - return the physical page at the corresponding offset | |
246 | * @dmab: buffer allocation information | |
247 | * @offset: offset in the ring buffer | |
248 | * | |
249 | * Return: the page pointer | |
250 | */ | |
251 | struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset) | |
252 | { | |
253 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
254 | ||
255 | if (ops && ops->get_page) | |
256 | return ops->get_page(dmab, offset); | |
257 | else | |
258 | return virt_to_page(dmab->area + offset); | |
259 | } | |
260 | EXPORT_SYMBOL(snd_sgbuf_get_page); | |
261 | ||
262 | /** | |
263 | * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages | |
264 | * on sg-buffer | |
265 | * @dmab: buffer allocation information | |
266 | * @ofs: offset in the ring buffer | |
267 | * @size: the requested size | |
268 | * | |
269 | * Return: the chunk size | |
270 | */ | |
271 | unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, | |
272 | unsigned int ofs, unsigned int size) | |
273 | { | |
274 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
275 | ||
276 | if (ops && ops->get_chunk_size) | |
277 | return ops->get_chunk_size(dmab, ofs, size); | |
278 | else | |
279 | return size; | |
280 | } | |
281 | EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); | |
282 | ||
283 | /* | |
284 | * Continuous pages allocator | |
285 | */ | |
286 | static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr, | |
287 | bool wc) | |
288 | { | |
289 | void *p; | |
290 | gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; | |
291 | ||
292 | again: | |
293 | p = alloc_pages_exact(size, gfp); | |
294 | if (!p) | |
295 | return NULL; | |
296 | *addr = page_to_phys(virt_to_page(p)); | |
297 | if (!dev) | |
298 | return p; | |
299 | if ((*addr + size - 1) & ~dev->coherent_dma_mask) { | |
300 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) { | |
301 | gfp |= GFP_DMA32; | |
302 | goto again; | |
303 | } | |
304 | if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { | |
305 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | |
306 | goto again; | |
307 | } | |
308 | } | |
309 | #ifdef CONFIG_X86 | |
310 | if (wc) | |
311 | set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT); | |
312 | #endif | |
313 | return p; | |
314 | } | |
315 | ||
316 | static void do_free_pages(void *p, size_t size, bool wc) | |
317 | { | |
318 | #ifdef CONFIG_X86 | |
319 | if (wc) | |
320 | set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT); | |
321 | #endif | |
322 | free_pages_exact(p, size); | |
323 | } | |
324 | ||
325 | ||
326 | static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size) | |
327 | { | |
328 | return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false); | |
329 | } | |
330 | ||
331 | static void snd_dma_continuous_free(struct snd_dma_buffer *dmab) | |
332 | { | |
333 | do_free_pages(dmab->area, dmab->bytes, false); | |
334 | } | |
335 | ||
336 | static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab, | |
337 | struct vm_area_struct *area) | |
338 | { | |
339 | return remap_pfn_range(area, area->vm_start, | |
340 | dmab->addr >> PAGE_SHIFT, | |
341 | area->vm_end - area->vm_start, | |
342 | area->vm_page_prot); | |
343 | } | |
344 | ||
345 | static const struct snd_malloc_ops snd_dma_continuous_ops = { | |
346 | .alloc = snd_dma_continuous_alloc, | |
347 | .free = snd_dma_continuous_free, | |
348 | .mmap = snd_dma_continuous_mmap, | |
349 | }; | |
350 | ||
351 | /* | |
352 | * VMALLOC allocator | |
353 | */ | |
354 | static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size) | |
355 | { | |
356 | return vmalloc(size); | |
357 | } | |
358 | ||
359 | static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab) | |
360 | { | |
361 | vfree(dmab->area); | |
362 | } | |
363 | ||
364 | static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab, | |
365 | struct vm_area_struct *area) | |
366 | { | |
367 | return remap_vmalloc_range(area, dmab->area, 0); | |
368 | } | |
369 | ||
370 | #define get_vmalloc_page_addr(dmab, offset) \ | |
371 | page_to_phys(vmalloc_to_page((dmab)->area + (offset))) | |
372 | ||
373 | static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab, | |
374 | size_t offset) | |
375 | { | |
376 | return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE; | |
377 | } | |
378 | ||
379 | static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab, | |
380 | size_t offset) | |
381 | { | |
382 | return vmalloc_to_page(dmab->area + offset); | |
383 | } | |
384 | ||
385 | static unsigned int | |
386 | snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab, | |
387 | unsigned int ofs, unsigned int size) | |
388 | { | |
389 | unsigned int start, end; | |
390 | unsigned long addr; | |
391 | ||
392 | start = ALIGN_DOWN(ofs, PAGE_SIZE); | |
393 | end = ofs + size - 1; /* the last byte address */ | |
394 | /* check page continuity */ | |
395 | addr = get_vmalloc_page_addr(dmab, start); | |
396 | for (;;) { | |
397 | start += PAGE_SIZE; | |
398 | if (start > end) | |
399 | break; | |
400 | addr += PAGE_SIZE; | |
401 | if (get_vmalloc_page_addr(dmab, start) != addr) | |
402 | return start - ofs; | |
403 | } | |
404 | /* ok, all on continuous pages */ | |
405 | return size; | |
406 | } | |
407 | ||
408 | static const struct snd_malloc_ops snd_dma_vmalloc_ops = { | |
409 | .alloc = snd_dma_vmalloc_alloc, | |
410 | .free = snd_dma_vmalloc_free, | |
411 | .mmap = snd_dma_vmalloc_mmap, | |
412 | .get_addr = snd_dma_vmalloc_get_addr, | |
413 | .get_page = snd_dma_vmalloc_get_page, | |
414 | .get_chunk_size = snd_dma_vmalloc_get_chunk_size, | |
415 | }; | |
416 | ||
417 | #ifdef CONFIG_HAS_DMA | |
418 | /* | |
419 | * IRAM allocator | |
420 | */ | |
421 | #ifdef CONFIG_GENERIC_ALLOCATOR | |
422 | static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size) | |
423 | { | |
424 | struct device *dev = dmab->dev.dev; | |
425 | struct gen_pool *pool; | |
426 | void *p; | |
427 | ||
428 | if (dev->of_node) { | |
429 | pool = of_gen_pool_get(dev->of_node, "iram", 0); | |
430 | /* Assign the pool into private_data field */ | |
431 | dmab->private_data = pool; | |
432 | ||
433 | p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE); | |
434 | if (p) | |
435 | return p; | |
436 | } | |
437 | ||
438 | /* Internal memory might have limited size and no enough space, | |
439 | * so if we fail to malloc, try to fetch memory traditionally. | |
440 | */ | |
441 | dmab->dev.type = SNDRV_DMA_TYPE_DEV; | |
442 | return __snd_dma_alloc_pages(dmab, size); | |
443 | } | |
444 | ||
445 | static void snd_dma_iram_free(struct snd_dma_buffer *dmab) | |
446 | { | |
447 | struct gen_pool *pool = dmab->private_data; | |
448 | ||
449 | if (pool && dmab->area) | |
450 | gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); | |
451 | } | |
452 | ||
453 | static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab, | |
454 | struct vm_area_struct *area) | |
455 | { | |
456 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
457 | return remap_pfn_range(area, area->vm_start, | |
458 | dmab->addr >> PAGE_SHIFT, | |
459 | area->vm_end - area->vm_start, | |
460 | area->vm_page_prot); | |
461 | } | |
462 | ||
463 | static const struct snd_malloc_ops snd_dma_iram_ops = { | |
464 | .alloc = snd_dma_iram_alloc, | |
465 | .free = snd_dma_iram_free, | |
466 | .mmap = snd_dma_iram_mmap, | |
467 | }; | |
468 | #endif /* CONFIG_GENERIC_ALLOCATOR */ | |
469 | ||
470 | /* | |
471 | * Coherent device pages allocator | |
472 | */ | |
473 | static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) | |
474 | { | |
475 | return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); | |
476 | } | |
477 | ||
478 | static void snd_dma_dev_free(struct snd_dma_buffer *dmab) | |
479 | { | |
480 | dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
481 | } | |
482 | ||
483 | static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab, | |
484 | struct vm_area_struct *area) | |
485 | { | |
486 | return dma_mmap_coherent(dmab->dev.dev, area, | |
487 | dmab->area, dmab->addr, dmab->bytes); | |
488 | } | |
489 | ||
490 | static const struct snd_malloc_ops snd_dma_dev_ops = { | |
491 | .alloc = snd_dma_dev_alloc, | |
492 | .free = snd_dma_dev_free, | |
493 | .mmap = snd_dma_dev_mmap, | |
494 | }; | |
495 | ||
496 | /* | |
497 | * Write-combined pages | |
498 | */ | |
499 | #ifdef CONFIG_SND_DMA_SGBUF | |
500 | /* x86-specific allocations */ | |
501 | static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) | |
502 | { | |
503 | void *p = do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true); | |
504 | ||
505 | if (!p) | |
506 | return NULL; | |
507 | dmab->addr = dma_map_single(dmab->dev.dev, p, size, DMA_BIDIRECTIONAL); | |
508 | if (dma_mapping_error(dmab->dev.dev, dmab->addr)) { | |
509 | do_free_pages(dmab->area, size, true); | |
510 | return NULL; | |
511 | } | |
512 | return p; | |
513 | } | |
514 | ||
515 | static void snd_dma_wc_free(struct snd_dma_buffer *dmab) | |
516 | { | |
517 | dma_unmap_single(dmab->dev.dev, dmab->addr, dmab->bytes, | |
518 | DMA_BIDIRECTIONAL); | |
519 | do_free_pages(dmab->area, dmab->bytes, true); | |
520 | } | |
521 | ||
522 | static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, | |
523 | struct vm_area_struct *area) | |
524 | { | |
525 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
526 | return dma_mmap_coherent(dmab->dev.dev, area, | |
527 | dmab->area, dmab->addr, dmab->bytes); | |
528 | } | |
529 | #else | |
530 | static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) | |
531 | { | |
532 | return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); | |
533 | } | |
534 | ||
535 | static void snd_dma_wc_free(struct snd_dma_buffer *dmab) | |
536 | { | |
537 | dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
538 | } | |
539 | ||
540 | static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, | |
541 | struct vm_area_struct *area) | |
542 | { | |
543 | return dma_mmap_wc(dmab->dev.dev, area, | |
544 | dmab->area, dmab->addr, dmab->bytes); | |
545 | } | |
546 | #endif | |
547 | ||
548 | static const struct snd_malloc_ops snd_dma_wc_ops = { | |
549 | .alloc = snd_dma_wc_alloc, | |
550 | .free = snd_dma_wc_free, | |
551 | .mmap = snd_dma_wc_mmap, | |
552 | }; | |
553 | ||
554 | /* | |
555 | * Non-contiguous pages allocator | |
556 | */ | |
557 | static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size) | |
558 | { | |
559 | struct sg_table *sgt; | |
560 | void *p; | |
561 | ||
562 | sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir, | |
563 | DEFAULT_GFP, 0); | |
564 | if (!sgt) | |
565 | return NULL; | |
566 | ||
567 | dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, | |
568 | sg_dma_address(sgt->sgl)); | |
569 | p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt); | |
570 | if (p) { | |
571 | dmab->private_data = sgt; | |
572 | /* store the first page address for convenience */ | |
573 | dmab->addr = snd_sgbuf_get_addr(dmab, 0); | |
574 | } else { | |
575 | dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir); | |
576 | } | |
577 | return p; | |
578 | } | |
579 | ||
580 | static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab) | |
581 | { | |
582 | dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area); | |
583 | dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data, | |
584 | dmab->dev.dir); | |
585 | } | |
586 | ||
587 | static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab, | |
588 | struct vm_area_struct *area) | |
589 | { | |
590 | return dma_mmap_noncontiguous(dmab->dev.dev, area, | |
591 | dmab->bytes, dmab->private_data); | |
592 | } | |
593 | ||
594 | static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab, | |
595 | enum snd_dma_sync_mode mode) | |
596 | { | |
597 | if (mode == SNDRV_DMA_SYNC_CPU) { | |
598 | if (dmab->dev.dir == DMA_TO_DEVICE) | |
599 | return; | |
600 | invalidate_kernel_vmap_range(dmab->area, dmab->bytes); | |
601 | dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data, | |
602 | dmab->dev.dir); | |
603 | } else { | |
604 | if (dmab->dev.dir == DMA_FROM_DEVICE) | |
605 | return; | |
606 | flush_kernel_vmap_range(dmab->area, dmab->bytes); | |
607 | dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data, | |
608 | dmab->dev.dir); | |
609 | } | |
610 | } | |
611 | ||
612 | static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab, | |
613 | struct sg_page_iter *piter, | |
614 | size_t offset) | |
615 | { | |
616 | struct sg_table *sgt = dmab->private_data; | |
617 | ||
618 | __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents, | |
619 | offset >> PAGE_SHIFT); | |
620 | } | |
621 | ||
622 | static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab, | |
623 | size_t offset) | |
624 | { | |
625 | struct sg_dma_page_iter iter; | |
626 | ||
627 | snd_dma_noncontig_iter_set(dmab, &iter.base, offset); | |
628 | __sg_page_iter_dma_next(&iter); | |
629 | return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE; | |
630 | } | |
631 | ||
632 | static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab, | |
633 | size_t offset) | |
634 | { | |
635 | struct sg_page_iter iter; | |
636 | ||
637 | snd_dma_noncontig_iter_set(dmab, &iter, offset); | |
638 | __sg_page_iter_next(&iter); | |
639 | return sg_page_iter_page(&iter); | |
640 | } | |
641 | ||
642 | static unsigned int | |
643 | snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab, | |
644 | unsigned int ofs, unsigned int size) | |
645 | { | |
646 | struct sg_dma_page_iter iter; | |
647 | unsigned int start, end; | |
648 | unsigned long addr; | |
649 | ||
650 | start = ALIGN_DOWN(ofs, PAGE_SIZE); | |
651 | end = ofs + size - 1; /* the last byte address */ | |
652 | snd_dma_noncontig_iter_set(dmab, &iter.base, start); | |
653 | if (!__sg_page_iter_dma_next(&iter)) | |
654 | return 0; | |
655 | /* check page continuity */ | |
656 | addr = sg_page_iter_dma_address(&iter); | |
657 | for (;;) { | |
658 | start += PAGE_SIZE; | |
659 | if (start > end) | |
660 | break; | |
661 | addr += PAGE_SIZE; | |
662 | if (!__sg_page_iter_dma_next(&iter) || | |
663 | sg_page_iter_dma_address(&iter) != addr) | |
664 | return start - ofs; | |
665 | } | |
666 | /* ok, all on continuous pages */ | |
667 | return size; | |
668 | } | |
669 | ||
670 | static const struct snd_malloc_ops snd_dma_noncontig_ops = { | |
671 | .alloc = snd_dma_noncontig_alloc, | |
672 | .free = snd_dma_noncontig_free, | |
673 | .mmap = snd_dma_noncontig_mmap, | |
674 | .sync = snd_dma_noncontig_sync, | |
675 | .get_addr = snd_dma_noncontig_get_addr, | |
676 | .get_page = snd_dma_noncontig_get_page, | |
677 | .get_chunk_size = snd_dma_noncontig_get_chunk_size, | |
678 | }; | |
679 | ||
680 | #ifdef CONFIG_SND_DMA_SGBUF | |
681 | /* Fallback SG-buffer allocations for x86 */ | |
682 | struct snd_dma_sg_fallback { | |
683 | struct sg_table sgt; /* used by get_addr - must be the first item */ | |
684 | size_t count; | |
685 | struct page **pages; | |
686 | unsigned int *npages; | |
687 | }; | |
688 | ||
689 | static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab, | |
690 | struct snd_dma_sg_fallback *sgbuf) | |
691 | { | |
692 | bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG; | |
693 | size_t i, size; | |
694 | ||
695 | if (sgbuf->pages && sgbuf->npages) { | |
696 | i = 0; | |
697 | while (i < sgbuf->count) { | |
698 | size = sgbuf->npages[i]; | |
699 | if (!size) | |
700 | break; | |
701 | do_free_pages(page_address(sgbuf->pages[i]), | |
702 | size << PAGE_SHIFT, wc); | |
703 | i += size; | |
704 | } | |
705 | } | |
706 | kvfree(sgbuf->pages); | |
707 | kvfree(sgbuf->npages); | |
708 | kfree(sgbuf); | |
709 | } | |
710 | ||
711 | /* fallback manual S/G buffer allocations */ | |
712 | static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size) | |
713 | { | |
714 | bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG; | |
715 | struct snd_dma_sg_fallback *sgbuf; | |
716 | struct page **pagep, *curp; | |
717 | size_t chunk; | |
718 | dma_addr_t addr; | |
719 | unsigned int idx, npages; | |
720 | void *p; | |
721 | ||
722 | sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); | |
723 | if (!sgbuf) | |
724 | return NULL; | |
725 | size = PAGE_ALIGN(size); | |
726 | sgbuf->count = size >> PAGE_SHIFT; | |
727 | sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL); | |
728 | sgbuf->npages = kvcalloc(sgbuf->count, sizeof(*sgbuf->npages), GFP_KERNEL); | |
729 | if (!sgbuf->pages || !sgbuf->npages) | |
730 | goto error; | |
731 | ||
732 | pagep = sgbuf->pages; | |
733 | chunk = size; | |
734 | idx = 0; | |
735 | while (size > 0) { | |
736 | chunk = min(size, chunk); | |
737 | p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc); | |
738 | if (!p) { | |
739 | if (chunk <= PAGE_SIZE) | |
740 | goto error; | |
741 | chunk >>= 1; | |
742 | chunk = PAGE_SIZE << get_order(chunk); | |
743 | continue; | |
744 | } | |
745 | ||
746 | size -= chunk; | |
747 | /* fill pages */ | |
748 | npages = chunk >> PAGE_SHIFT; | |
749 | sgbuf->npages[idx] = npages; | |
750 | idx += npages; | |
751 | curp = virt_to_page(p); | |
752 | while (npages--) | |
753 | *pagep++ = curp++; | |
754 | } | |
755 | ||
756 | if (sg_alloc_table_from_pages(&sgbuf->sgt, sgbuf->pages, sgbuf->count, | |
757 | 0, sgbuf->count << PAGE_SHIFT, GFP_KERNEL)) | |
758 | goto error; | |
759 | ||
760 | if (dma_map_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0)) | |
761 | goto error_dma_map; | |
762 | ||
763 | p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL); | |
764 | if (!p) | |
765 | goto error_vmap; | |
766 | ||
767 | dmab->private_data = sgbuf; | |
768 | /* store the first page address for convenience */ | |
769 | dmab->addr = snd_sgbuf_get_addr(dmab, 0); | |
770 | return p; | |
771 | ||
772 | error_vmap: | |
773 | dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0); | |
774 | error_dma_map: | |
775 | sg_free_table(&sgbuf->sgt); | |
776 | error: | |
777 | __snd_dma_sg_fallback_free(dmab, sgbuf); | |
778 | return NULL; | |
779 | } | |
780 | ||
781 | static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab) | |
782 | { | |
783 | struct snd_dma_sg_fallback *sgbuf = dmab->private_data; | |
784 | ||
785 | vunmap(dmab->area); | |
786 | dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0); | |
787 | sg_free_table(&sgbuf->sgt); | |
788 | __snd_dma_sg_fallback_free(dmab, dmab->private_data); | |
789 | } | |
790 | ||
791 | static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab, | |
792 | struct vm_area_struct *area) | |
793 | { | |
794 | struct snd_dma_sg_fallback *sgbuf = dmab->private_data; | |
795 | ||
796 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) | |
797 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
798 | return vm_map_pages(area, sgbuf->pages, sgbuf->count); | |
799 | } | |
800 | ||
801 | static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size) | |
802 | { | |
803 | int type = dmab->dev.type; | |
804 | void *p; | |
805 | ||
806 | /* try the standard DMA API allocation at first */ | |
807 | if (type == SNDRV_DMA_TYPE_DEV_WC_SG) | |
808 | dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC; | |
809 | else | |
810 | dmab->dev.type = SNDRV_DMA_TYPE_DEV; | |
811 | p = __snd_dma_alloc_pages(dmab, size); | |
812 | if (p) | |
813 | return p; | |
814 | ||
815 | dmab->dev.type = type; /* restore the type */ | |
816 | return snd_dma_sg_fallback_alloc(dmab, size); | |
817 | } | |
818 | ||
819 | static const struct snd_malloc_ops snd_dma_sg_ops = { | |
820 | .alloc = snd_dma_sg_alloc, | |
821 | .free = snd_dma_sg_fallback_free, | |
822 | .mmap = snd_dma_sg_fallback_mmap, | |
823 | /* reuse noncontig helper */ | |
824 | .get_addr = snd_dma_noncontig_get_addr, | |
825 | /* reuse vmalloc helpers */ | |
826 | .get_page = snd_dma_vmalloc_get_page, | |
827 | .get_chunk_size = snd_dma_vmalloc_get_chunk_size, | |
828 | }; | |
829 | #endif /* CONFIG_SND_DMA_SGBUF */ | |
830 | ||
831 | /* | |
832 | * Non-coherent pages allocator | |
833 | */ | |
834 | static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size) | |
835 | { | |
836 | void *p; | |
837 | ||
838 | p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr, | |
839 | dmab->dev.dir, DEFAULT_GFP); | |
840 | if (p) | |
841 | dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr); | |
842 | return p; | |
843 | } | |
844 | ||
845 | static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab) | |
846 | { | |
847 | dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area, | |
848 | dmab->addr, dmab->dev.dir); | |
849 | } | |
850 | ||
851 | static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab, | |
852 | struct vm_area_struct *area) | |
853 | { | |
854 | area->vm_page_prot = vm_get_page_prot(area->vm_flags); | |
855 | return dma_mmap_pages(dmab->dev.dev, area, | |
856 | area->vm_end - area->vm_start, | |
857 | virt_to_page(dmab->area)); | |
858 | } | |
859 | ||
860 | static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab, | |
861 | enum snd_dma_sync_mode mode) | |
862 | { | |
863 | if (mode == SNDRV_DMA_SYNC_CPU) { | |
864 | if (dmab->dev.dir != DMA_TO_DEVICE) | |
865 | dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr, | |
866 | dmab->bytes, dmab->dev.dir); | |
867 | } else { | |
868 | if (dmab->dev.dir != DMA_FROM_DEVICE) | |
869 | dma_sync_single_for_device(dmab->dev.dev, dmab->addr, | |
870 | dmab->bytes, dmab->dev.dir); | |
871 | } | |
872 | } | |
873 | ||
874 | static const struct snd_malloc_ops snd_dma_noncoherent_ops = { | |
875 | .alloc = snd_dma_noncoherent_alloc, | |
876 | .free = snd_dma_noncoherent_free, | |
877 | .mmap = snd_dma_noncoherent_mmap, | |
878 | .sync = snd_dma_noncoherent_sync, | |
879 | }; | |
880 | ||
881 | #endif /* CONFIG_HAS_DMA */ | |
882 | ||
883 | /* | |
884 | * Entry points | |
885 | */ | |
886 | static const struct snd_malloc_ops *snd_dma_ops[] = { | |
887 | [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops, | |
888 | [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops, | |
889 | #ifdef CONFIG_HAS_DMA | |
890 | [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops, | |
891 | [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops, | |
892 | [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops, | |
893 | [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops, | |
894 | #ifdef CONFIG_SND_DMA_SGBUF | |
895 | [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops, | |
896 | [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops, | |
897 | #endif | |
898 | #ifdef CONFIG_GENERIC_ALLOCATOR | |
899 | [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops, | |
900 | #endif /* CONFIG_GENERIC_ALLOCATOR */ | |
901 | #endif /* CONFIG_HAS_DMA */ | |
902 | }; | |
903 | ||
904 | static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab) | |
905 | { | |
906 | if (WARN_ON_ONCE(!dmab)) | |
907 | return NULL; | |
908 | if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || | |
909 | dmab->dev.type >= ARRAY_SIZE(snd_dma_ops))) | |
910 | return NULL; | |
911 | return snd_dma_ops[dmab->dev.type]; | |
912 | } |