]> git.ipfire.org Git - people/ms/linux.git/blame - sound/core/memalloc.c
Merge tag 'net-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[people/ms/linux.git] / sound / core / memalloc.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4 2/*
c1017a4c 3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
1da177e4
LT
4 * Takashi Iwai <tiwai@suse.de>
5 *
6 * Generic memory allocators
1da177e4
LT
7 */
8
1da177e4
LT
9#include <linux/slab.h>
10#include <linux/mm.h>
11#include <linux/dma-mapping.h>
05503214 12#include <linux/genalloc.h>
a25684a9 13#include <linux/highmem.h>
1fe7f397 14#include <linux/vmalloc.h>
42e748a0
TI
15#ifdef CONFIG_X86
16#include <asm/set_memory.h>
17#endif
1da177e4 18#include <sound/memalloc.h>
37af81c5 19#include "memalloc_local.h"
1da177e4 20
37af81c5 21static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
1da177e4 22
a8d302a0
TI
23#ifdef CONFIG_SND_DMA_SGBUF
24static void *do_alloc_fallback_pages(struct device *dev, size_t size,
25 dma_addr_t *addr, bool wc);
26static void do_free_fallback_pages(void *p, size_t size, bool wc);
27static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
28#endif
29
37af81c5
TI
30/* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
31static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
32 gfp_t default_gfp)
1da177e4 33{
37af81c5
TI
34 if (!dmab->dev.dev)
35 return default_gfp;
36 else
37 return (__force gfp_t)(unsigned long)dmab->dev.dev;
1da177e4 38}
05503214 39
723c1252 40static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
05503214 41{
37af81c5 42 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
05503214 43
37af81c5 44 if (WARN_ON_ONCE(!ops || !ops->alloc))
723c1252 45 return NULL;
37af81c5 46 return ops->alloc(dmab, size);
08422d2c 47}
1da177e4
LT
48
49/**
a25684a9
TI
50 * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
51 * type and direction
1da177e4
LT
52 * @type: the DMA buffer type
53 * @device: the device pointer
a25684a9 54 * @dir: DMA direction
1da177e4
LT
55 * @size: the buffer size to allocate
56 * @dmab: buffer allocation record to store the allocated data
57 *
58 * Calls the memory-allocator function for the corresponding
59 * buffer type.
eb7c06e8
YB
60 *
61 * Return: Zero if the buffer with the given size is allocated successfully,
62 * otherwise a negative value on error.
1da177e4 63 */
a25684a9
TI
64int snd_dma_alloc_dir_pages(int type, struct device *device,
65 enum dma_data_direction dir, size_t size,
66 struct snd_dma_buffer *dmab)
1da177e4 67{
7eaa943c
TI
68 if (WARN_ON(!size))
69 return -ENXIO;
70 if (WARN_ON(!dmab))
71 return -ENXIO;
1da177e4 72
5c1733e3 73 size = PAGE_ALIGN(size);
1da177e4
LT
74 dmab->dev.type = type;
75 dmab->dev.dev = device;
a25684a9 76 dmab->dev.dir = dir;
1da177e4 77 dmab->bytes = 0;
28e60dbb
TI
78 dmab->addr = 0;
79 dmab->private_data = NULL;
723c1252 80 dmab->area = __snd_dma_alloc_pages(dmab, size);
37af81c5 81 if (!dmab->area)
1da177e4
LT
82 return -ENOMEM;
83 dmab->bytes = size;
84 return 0;
85}
a25684a9 86EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
1da177e4
LT
87
88/**
89 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
90 * @type: the DMA buffer type
91 * @device: the device pointer
92 * @size: the buffer size to allocate
93 * @dmab: buffer allocation record to store the allocated data
94 *
95 * Calls the memory-allocator function for the corresponding
96 * buffer type. When no space is left, this function reduces the size and
97 * tries to allocate again. The size actually allocated is stored in
98 * res_size argument.
eb7c06e8
YB
99 *
100 * Return: Zero if the buffer with the given size is allocated successfully,
101 * otherwise a negative value on error.
1da177e4
LT
102 */
103int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
104 struct snd_dma_buffer *dmab)
105{
106 int err;
107
1da177e4
LT
108 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
109 if (err != -ENOMEM)
110 return err;
1da177e4
LT
111 if (size <= PAGE_SIZE)
112 return -ENOMEM;
dfef01e1
TI
113 size >>= 1;
114 size = PAGE_SIZE << get_order(size);
1da177e4
LT
115 }
116 if (! dmab->area)
117 return -ENOMEM;
118 return 0;
119}
35f80014 120EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
1da177e4 121
1da177e4
LT
122/**
123 * snd_dma_free_pages - release the allocated buffer
124 * @dmab: the buffer allocation record to release
125 *
126 * Releases the allocated buffer via snd_dma_alloc_pages().
127 */
128void snd_dma_free_pages(struct snd_dma_buffer *dmab)
129{
37af81c5
TI
130 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
131
132 if (ops && ops->free)
133 ops->free(dmab);
134}
135EXPORT_SYMBOL(snd_dma_free_pages);
136
427ae268
TI
137/* called by devres */
138static void __snd_release_pages(struct device *dev, void *res)
139{
140 snd_dma_free_pages(res);
141}
142
143/**
a25684a9 144 * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
427ae268
TI
145 * @dev: the device pointer
146 * @type: the DMA buffer type
a25684a9 147 * @dir: DMA direction
427ae268
TI
148 * @size: the buffer size to allocate
149 *
150 * Allocate buffer pages depending on the given type and manage using devres.
151 * The pages will be released automatically at the device removal.
152 *
153 * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
154 * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
155 * SNDRV_DMA_TYPE_VMALLOC type.
156 *
6eba99d4 157 * Return: the snd_dma_buffer object at success, or NULL if failed
427ae268
TI
158 */
159struct snd_dma_buffer *
a25684a9
TI
160snd_devm_alloc_dir_pages(struct device *dev, int type,
161 enum dma_data_direction dir, size_t size)
427ae268
TI
162{
163 struct snd_dma_buffer *dmab;
164 int err;
165
166 if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
167 type == SNDRV_DMA_TYPE_VMALLOC))
168 return NULL;
169
170 dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
171 if (!dmab)
172 return NULL;
173
a25684a9 174 err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
427ae268
TI
175 if (err < 0) {
176 devres_free(dmab);
177 return NULL;
178 }
179
180 devres_add(dev, dmab);
181 return dmab;
182}
a25684a9 183EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
427ae268 184
a202bd1a
TI
185/**
186 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
187 * @dmab: buffer allocation information
188 * @area: VM area information
6eba99d4
TI
189 *
190 * Return: zero if successful, or a negative error code
a202bd1a
TI
191 */
192int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
193 struct vm_area_struct *area)
194{
8e537d5d 195 const struct snd_malloc_ops *ops;
a202bd1a 196
8e537d5d
TI
197 if (!dmab)
198 return -ENOENT;
199 ops = snd_dma_get_ops(dmab);
a202bd1a
TI
200 if (ops && ops->mmap)
201 return ops->mmap(dmab, area);
202 else
203 return -ENOENT;
204}
205EXPORT_SYMBOL(snd_dma_buffer_mmap);
206
a25684a9
TI
207#ifdef CONFIG_HAS_DMA
208/**
209 * snd_dma_buffer_sync - sync DMA buffer between CPU and device
210 * @dmab: buffer allocation information
f917c04f 211 * @mode: sync mode
a25684a9
TI
212 */
213void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
214 enum snd_dma_sync_mode mode)
215{
216 const struct snd_malloc_ops *ops;
217
218 if (!dmab || !dmab->dev.need_sync)
219 return;
220 ops = snd_dma_get_ops(dmab);
221 if (ops && ops->sync)
222 ops->sync(dmab, mode);
223}
224EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
225#endif /* CONFIG_HAS_DMA */
226
37af81c5
TI
227/**
228 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
229 * @dmab: buffer allocation information
230 * @offset: offset in the ring buffer
6eba99d4
TI
231 *
232 * Return: the physical address
37af81c5
TI
233 */
234dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
235{
236 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
237
238 if (ops && ops->get_addr)
239 return ops->get_addr(dmab, offset);
240 else
241 return dmab->addr + offset;
242}
243EXPORT_SYMBOL(snd_sgbuf_get_addr);
244
245/**
246 * snd_sgbuf_get_page - return the physical page at the corresponding offset
247 * @dmab: buffer allocation information
248 * @offset: offset in the ring buffer
6eba99d4
TI
249 *
250 * Return: the page pointer
37af81c5
TI
251 */
252struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
253{
254 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
255
256 if (ops && ops->get_page)
257 return ops->get_page(dmab, offset);
258 else
259 return virt_to_page(dmab->area + offset);
260}
261EXPORT_SYMBOL(snd_sgbuf_get_page);
262
263/**
264 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
265 * on sg-buffer
266 * @dmab: buffer allocation information
267 * @ofs: offset in the ring buffer
268 * @size: the requested size
6eba99d4
TI
269 *
270 * Return: the chunk size
37af81c5
TI
271 */
272unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
273 unsigned int ofs, unsigned int size)
274{
275 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
276
277 if (ops && ops->get_chunk_size)
278 return ops->get_chunk_size(dmab, ofs, size);
279 else
280 return size;
281}
282EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
283
284/*
285 * Continuous pages allocator
286 */
a8d302a0 287static void *do_alloc_pages(size_t size, dma_addr_t *addr, gfp_t gfp)
37af81c5 288{
f84ba106 289 void *p = alloc_pages_exact(size, gfp);
37af81c5 290
f84ba106 291 if (p)
a8d302a0 292 *addr = page_to_phys(virt_to_page(p));
f84ba106 293 return p;
37af81c5
TI
294}
295
a8d302a0
TI
296static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
297{
298 return do_alloc_pages(size, &dmab->addr,
299 snd_mem_get_gfp_flags(dmab, GFP_KERNEL));
300}
301
37af81c5
TI
302static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
303{
304 free_pages_exact(dmab->area, dmab->bytes);
305}
306
30b7ba69
TI
307static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
308 struct vm_area_struct *area)
309{
310 return remap_pfn_range(area, area->vm_start,
f84ba106 311 dmab->addr >> PAGE_SHIFT,
30b7ba69
TI
312 area->vm_end - area->vm_start,
313 area->vm_page_prot);
314}
315
37af81c5
TI
316static const struct snd_malloc_ops snd_dma_continuous_ops = {
317 .alloc = snd_dma_continuous_alloc,
318 .free = snd_dma_continuous_free,
30b7ba69 319 .mmap = snd_dma_continuous_mmap,
37af81c5
TI
320};
321
322/*
323 * VMALLOC allocator
324 */
723c1252 325static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
37af81c5
TI
326{
327 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
328
723c1252 329 return __vmalloc(size, gfp);
37af81c5
TI
330}
331
332static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
333{
334 vfree(dmab->area);
335}
336
30b7ba69
TI
337static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
338 struct vm_area_struct *area)
339{
340 return remap_vmalloc_range(area, dmab->area, 0);
341}
342
bda36b0f
TI
343#define get_vmalloc_page_addr(dmab, offset) \
344 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
345
37af81c5
TI
346static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
347 size_t offset)
348{
bda36b0f 349 return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
37af81c5
TI
350}
351
352static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
353 size_t offset)
354{
355 return vmalloc_to_page(dmab->area + offset);
356}
357
358static unsigned int
359snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
360 unsigned int ofs, unsigned int size)
361{
bda36b0f
TI
362 unsigned int start, end;
363 unsigned long addr;
364
365 start = ALIGN_DOWN(ofs, PAGE_SIZE);
366 end = ofs + size - 1; /* the last byte address */
367 /* check page continuity */
368 addr = get_vmalloc_page_addr(dmab, start);
369 for (;;) {
370 start += PAGE_SIZE;
371 if (start > end)
372 break;
373 addr += PAGE_SIZE;
374 if (get_vmalloc_page_addr(dmab, start) != addr)
375 return start - ofs;
376 }
377 /* ok, all on continuous pages */
378 return size;
37af81c5
TI
379}
380
381static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
382 .alloc = snd_dma_vmalloc_alloc,
383 .free = snd_dma_vmalloc_free,
30b7ba69 384 .mmap = snd_dma_vmalloc_mmap,
37af81c5
TI
385 .get_addr = snd_dma_vmalloc_get_addr,
386 .get_page = snd_dma_vmalloc_get_page,
387 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
388};
389
8f11551b 390#ifdef CONFIG_HAS_DMA
37af81c5
TI
391/*
392 * IRAM allocator
393 */
a5606f85 394#ifdef CONFIG_GENERIC_ALLOCATOR
723c1252 395static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
37af81c5
TI
396{
397 struct device *dev = dmab->dev.dev;
398 struct gen_pool *pool;
723c1252 399 void *p;
37af81c5
TI
400
401 if (dev->of_node) {
402 pool = of_gen_pool_get(dev->of_node, "iram", 0);
403 /* Assign the pool into private_data field */
404 dmab->private_data = pool;
405
723c1252
TI
406 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
407 if (p)
408 return p;
37af81c5
TI
409 }
410
411 /* Internal memory might have limited size and no enough space,
412 * so if we fail to malloc, try to fetch memory traditionally.
413 */
414 dmab->dev.type = SNDRV_DMA_TYPE_DEV;
415 return __snd_dma_alloc_pages(dmab, size);
416}
417
418static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
419{
420 struct gen_pool *pool = dmab->private_data;
421
422 if (pool && dmab->area)
423 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
424}
425
a202bd1a
TI
426static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
427 struct vm_area_struct *area)
428{
429 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
430 return remap_pfn_range(area, area->vm_start,
431 dmab->addr >> PAGE_SHIFT,
432 area->vm_end - area->vm_start,
433 area->vm_page_prot);
434}
435
37af81c5
TI
436static const struct snd_malloc_ops snd_dma_iram_ops = {
437 .alloc = snd_dma_iram_alloc,
438 .free = snd_dma_iram_free,
a202bd1a 439 .mmap = snd_dma_iram_mmap,
37af81c5 440};
a5606f85 441#endif /* CONFIG_GENERIC_ALLOCATOR */
37af81c5 442
d5c50558
TI
443#define DEFAULT_GFP \
444 (GFP_KERNEL | \
445 __GFP_COMP | /* compound page lets parts be mapped */ \
446 __GFP_NORETRY | /* don't trigger OOM-killer */ \
447 __GFP_NOWARN) /* no stack trace print - this call is non-critical */
448
37af81c5
TI
449/*
450 * Coherent device pages allocator
451 */
723c1252 452static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
37af81c5 453{
9882d63b 454 return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
37af81c5
TI
455}
456
457static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
458{
37af81c5
TI
459 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
460}
461
a202bd1a
TI
462static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
463 struct vm_area_struct *area)
464{
465 return dma_mmap_coherent(dmab->dev.dev, area,
466 dmab->area, dmab->addr, dmab->bytes);
467}
468
37af81c5
TI
469static const struct snd_malloc_ops snd_dma_dev_ops = {
470 .alloc = snd_dma_dev_alloc,
471 .free = snd_dma_dev_free,
a202bd1a 472 .mmap = snd_dma_dev_mmap,
37af81c5 473};
d5c50558
TI
474
475/*
476 * Write-combined pages
477 */
a8d302a0
TI
478/* x86-specific allocations */
479#ifdef CONFIG_SND_DMA_SGBUF
480static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
481{
482 return do_alloc_fallback_pages(dmab->dev.dev, size, &dmab->addr, true);
483}
484
485static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
486{
487 do_free_fallback_pages(dmab->area, dmab->bytes, true);
488}
489
490static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
491 struct vm_area_struct *area)
492{
493 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
494 return snd_dma_continuous_mmap(dmab, area);
495}
496#else
d5c50558
TI
497static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
498{
499 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
500}
501
502static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
503{
504 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
505}
506
507static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
508 struct vm_area_struct *area)
509{
510 return dma_mmap_wc(dmab->dev.dev, area,
511 dmab->area, dmab->addr, dmab->bytes);
512}
a8d302a0 513#endif /* CONFIG_SND_DMA_SGBUF */
d5c50558
TI
514
515static const struct snd_malloc_ops snd_dma_wc_ops = {
516 .alloc = snd_dma_wc_alloc,
517 .free = snd_dma_wc_free,
518 .mmap = snd_dma_wc_mmap,
519};
a25684a9
TI
520
521/*
522 * Non-contiguous pages allocator
523 */
524static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
525{
526 struct sg_table *sgt;
527 void *p;
528
529 sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
530 DEFAULT_GFP, 0);
925ca893
TI
531 if (!sgt) {
532#ifdef CONFIG_SND_DMA_SGBUF
533 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
534 dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
535 else
536 dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
537 return snd_dma_sg_fallback_alloc(dmab, size);
538#else
a25684a9 539 return NULL;
925ca893
TI
540#endif
541 }
542
8e1741c6
TI
543 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
544 sg_dma_address(sgt->sgl));
a25684a9 545 p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
37137ec2 546 if (p) {
a25684a9 547 dmab->private_data = sgt;
37137ec2
TI
548 /* store the first page address for convenience */
549 dmab->addr = snd_sgbuf_get_addr(dmab, 0);
550 } else {
a25684a9 551 dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
37137ec2 552 }
a25684a9
TI
553 return p;
554}
555
556static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
557{
558 dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
559 dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
560 dmab->dev.dir);
561}
562
563static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
564 struct vm_area_struct *area)
565{
566 return dma_mmap_noncontiguous(dmab->dev.dev, area,
567 dmab->bytes, dmab->private_data);
568}
569
570static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
571 enum snd_dma_sync_mode mode)
572{
573 if (mode == SNDRV_DMA_SYNC_CPU) {
574 if (dmab->dev.dir == DMA_TO_DEVICE)
575 return;
3e16dc50 576 invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
a25684a9
TI
577 dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
578 dmab->dev.dir);
a25684a9
TI
579 } else {
580 if (dmab->dev.dir == DMA_FROM_DEVICE)
581 return;
582 flush_kernel_vmap_range(dmab->area, dmab->bytes);
583 dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
584 dmab->dev.dir);
585 }
586}
587
ad4f93ca
TI
588static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
589 struct sg_page_iter *piter,
590 size_t offset)
591{
592 struct sg_table *sgt = dmab->private_data;
593
594 __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
595 offset >> PAGE_SHIFT);
596}
597
598static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
599 size_t offset)
600{
601 struct sg_dma_page_iter iter;
602
603 snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
604 __sg_page_iter_dma_next(&iter);
605 return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
606}
607
608static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
609 size_t offset)
610{
611 struct sg_page_iter iter;
612
613 snd_dma_noncontig_iter_set(dmab, &iter, offset);
614 __sg_page_iter_next(&iter);
615 return sg_page_iter_page(&iter);
616}
617
618static unsigned int
619snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
620 unsigned int ofs, unsigned int size)
621{
622 struct sg_dma_page_iter iter;
623 unsigned int start, end;
624 unsigned long addr;
625
626 start = ALIGN_DOWN(ofs, PAGE_SIZE);
627 end = ofs + size - 1; /* the last byte address */
628 snd_dma_noncontig_iter_set(dmab, &iter.base, start);
629 if (!__sg_page_iter_dma_next(&iter))
630 return 0;
631 /* check page continuity */
632 addr = sg_page_iter_dma_address(&iter);
633 for (;;) {
634 start += PAGE_SIZE;
635 if (start > end)
636 break;
637 addr += PAGE_SIZE;
638 if (!__sg_page_iter_dma_next(&iter) ||
639 sg_page_iter_dma_address(&iter) != addr)
640 return start - ofs;
641 }
642 /* ok, all on continuous pages */
643 return size;
644}
645
a25684a9
TI
646static const struct snd_malloc_ops snd_dma_noncontig_ops = {
647 .alloc = snd_dma_noncontig_alloc,
648 .free = snd_dma_noncontig_free,
649 .mmap = snd_dma_noncontig_mmap,
650 .sync = snd_dma_noncontig_sync,
ad4f93ca
TI
651 .get_addr = snd_dma_noncontig_get_addr,
652 .get_page = snd_dma_noncontig_get_page,
653 .get_chunk_size = snd_dma_noncontig_get_chunk_size,
a25684a9
TI
654};
655
2c95b92e
TI
656/* x86-specific SG-buffer with WC pages */
657#ifdef CONFIG_SND_DMA_SGBUF
658#define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it)))
659
660static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
661{
662 void *p = snd_dma_noncontig_alloc(dmab, size);
663 struct sg_table *sgt = dmab->private_data;
664 struct sg_page_iter iter;
665
666 if (!p)
667 return NULL;
925ca893
TI
668 if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
669 return p;
2c95b92e
TI
670 for_each_sgtable_page(sgt, &iter, 0)
671 set_memory_wc(sg_wc_address(&iter), 1);
672 return p;
673}
674
675static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
676{
677 struct sg_table *sgt = dmab->private_data;
678 struct sg_page_iter iter;
679
680 for_each_sgtable_page(sgt, &iter, 0)
681 set_memory_wb(sg_wc_address(&iter), 1);
682 snd_dma_noncontig_free(dmab);
683}
684
685static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
686 struct vm_area_struct *area)
687{
688 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
689 return dma_mmap_noncontiguous(dmab->dev.dev, area,
690 dmab->bytes, dmab->private_data);
691}
692
693static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
694 .alloc = snd_dma_sg_wc_alloc,
695 .free = snd_dma_sg_wc_free,
696 .mmap = snd_dma_sg_wc_mmap,
697 .sync = snd_dma_noncontig_sync,
698 .get_addr = snd_dma_noncontig_get_addr,
699 .get_page = snd_dma_noncontig_get_page,
700 .get_chunk_size = snd_dma_noncontig_get_chunk_size,
701};
925ca893 702
a8d302a0
TI
703/* manual page allocations with wc setup */
704static void *do_alloc_fallback_pages(struct device *dev, size_t size,
705 dma_addr_t *addr, bool wc)
706{
707 gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
708 void *p;
709
710 again:
711 p = do_alloc_pages(size, addr, gfp);
712 if (!p || (*addr + size - 1) & ~dev->coherent_dma_mask) {
713 if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
714 gfp |= GFP_DMA32;
715 goto again;
716 }
717 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
718 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
719 goto again;
720 }
721 }
722 if (p && wc)
723 set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
724 return p;
725}
726
727static void do_free_fallback_pages(void *p, size_t size, bool wc)
728{
729 if (wc)
730 set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
731 free_pages_exact(p, size);
732}
733
925ca893
TI
734/* Fallback SG-buffer allocations for x86 */
735struct snd_dma_sg_fallback {
736 size_t count;
737 struct page **pages;
738 dma_addr_t *addrs;
739};
740
741static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
742 struct snd_dma_sg_fallback *sgbuf)
743{
a8d302a0 744 bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
925ca893
TI
745 size_t i;
746
925ca893 747 for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
a8d302a0 748 do_free_fallback_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
925ca893
TI
749 kvfree(sgbuf->pages);
750 kvfree(sgbuf->addrs);
751 kfree(sgbuf);
752}
753
754static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
755{
756 struct snd_dma_sg_fallback *sgbuf;
757 struct page **pages;
758 size_t i, count;
759 void *p;
a8d302a0 760 bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
925ca893
TI
761
762 sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
763 if (!sgbuf)
764 return NULL;
765 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
766 pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
767 if (!pages)
768 goto error;
769 sgbuf->pages = pages;
770 sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
771 if (!sgbuf->addrs)
772 goto error;
773
774 for (i = 0; i < count; sgbuf->count++, i++) {
a8d302a0
TI
775 p = do_alloc_fallback_pages(dmab->dev.dev, PAGE_SIZE,
776 &sgbuf->addrs[i], wc);
925ca893
TI
777 if (!p)
778 goto error;
779 sgbuf->pages[i] = virt_to_page(p);
780 }
781
925ca893
TI
782 p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
783 if (!p)
784 goto error;
785 dmab->private_data = sgbuf;
37137ec2
TI
786 /* store the first page address for convenience */
787 dmab->addr = snd_sgbuf_get_addr(dmab, 0);
925ca893
TI
788 return p;
789
790 error:
791 __snd_dma_sg_fallback_free(dmab, sgbuf);
792 return NULL;
793}
794
795static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
796{
797 vunmap(dmab->area);
798 __snd_dma_sg_fallback_free(dmab, dmab->private_data);
799}
800
801static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
802 struct vm_area_struct *area)
803{
804 struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
805
806 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
807 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
808 return vm_map_pages(area, sgbuf->pages, sgbuf->count);
809}
810
811static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
812 .alloc = snd_dma_sg_fallback_alloc,
813 .free = snd_dma_sg_fallback_free,
814 .mmap = snd_dma_sg_fallback_mmap,
815 /* reuse vmalloc helpers */
816 .get_addr = snd_dma_vmalloc_get_addr,
817 .get_page = snd_dma_vmalloc_get_page,
818 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
819};
2c95b92e
TI
820#endif /* CONFIG_SND_DMA_SGBUF */
821
73325f60
TI
822/*
823 * Non-coherent pages allocator
824 */
825static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
826{
8e1741c6
TI
827 void *p;
828
829 p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
830 dmab->dev.dir, DEFAULT_GFP);
831 if (p)
832 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
833 return p;
73325f60
TI
834}
835
836static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
837{
838 dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
839 dmab->addr, dmab->dev.dir);
840}
841
842static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
843 struct vm_area_struct *area)
844{
845 area->vm_page_prot = vm_get_page_prot(area->vm_flags);
846 return dma_mmap_pages(dmab->dev.dev, area,
847 area->vm_end - area->vm_start,
848 virt_to_page(dmab->area));
849}
850
851static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
852 enum snd_dma_sync_mode mode)
853{
854 if (mode == SNDRV_DMA_SYNC_CPU) {
855 if (dmab->dev.dir != DMA_TO_DEVICE)
856 dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
857 dmab->bytes, dmab->dev.dir);
858 } else {
859 if (dmab->dev.dir != DMA_FROM_DEVICE)
860 dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
861 dmab->bytes, dmab->dev.dir);
862 }
863}
864
865static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
866 .alloc = snd_dma_noncoherent_alloc,
867 .free = snd_dma_noncoherent_free,
868 .mmap = snd_dma_noncoherent_mmap,
869 .sync = snd_dma_noncoherent_sync,
870};
871
37af81c5
TI
872#endif /* CONFIG_HAS_DMA */
873
874/*
875 * Entry points
876 */
877static const struct snd_malloc_ops *dma_ops[] = {
878 [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
879 [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
880#ifdef CONFIG_HAS_DMA
881 [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
d5c50558 882 [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
a25684a9 883 [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
73325f60 884 [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
2c95b92e
TI
885#ifdef CONFIG_SND_DMA_SGBUF
886 [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
887#endif
37af81c5
TI
888#ifdef CONFIG_GENERIC_ALLOCATOR
889 [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
890#endif /* CONFIG_GENERIC_ALLOCATOR */
925ca893
TI
891#ifdef CONFIG_SND_DMA_SGBUF
892 [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
893 [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
894#endif
37af81c5 895#endif /* CONFIG_HAS_DMA */
37af81c5
TI
896};
897
898static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
899{
dce94461
TI
900 if (WARN_ON_ONCE(!dmab))
901 return NULL;
37af81c5
TI
902 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
903 dmab->dev.type >= ARRAY_SIZE(dma_ops)))
904 return NULL;
905 return dma_ops[dmab->dev.type];
1da177e4 906}