1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 * Takashi Iwai <tiwai@suse.de>
6 * Generic memory allocators
9 #include <linux/slab.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dma-map-ops.h>
13 #include <linux/genalloc.h>
14 #include <linux/highmem.h>
15 #include <linux/vmalloc.h>
17 #include <asm/set_memory.h>
19 #include <sound/memalloc.h>
21 struct snd_malloc_ops
{
22 void *(*alloc
)(struct snd_dma_buffer
*dmab
, size_t size
);
23 void (*free
)(struct snd_dma_buffer
*dmab
);
24 dma_addr_t (*get_addr
)(struct snd_dma_buffer
*dmab
, size_t offset
);
25 struct page
*(*get_page
)(struct snd_dma_buffer
*dmab
, size_t offset
);
26 unsigned int (*get_chunk_size
)(struct snd_dma_buffer
*dmab
,
27 unsigned int ofs
, unsigned int size
);
28 int (*mmap
)(struct snd_dma_buffer
*dmab
, struct vm_area_struct
*area
);
29 void (*sync
)(struct snd_dma_buffer
*dmab
, enum snd_dma_sync_mode mode
);
34 __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \
35 __GFP_NOWARN) /* no stack trace print - this call is non-critical */
37 static const struct snd_malloc_ops
*snd_dma_get_ops(struct snd_dma_buffer
*dmab
);
39 static void *__snd_dma_alloc_pages(struct snd_dma_buffer
*dmab
, size_t size
)
41 const struct snd_malloc_ops
*ops
= snd_dma_get_ops(dmab
);
43 if (WARN_ON_ONCE(!ops
|| !ops
->alloc
))
45 return ops
->alloc(dmab
, size
);
49 * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
51 * @type: the DMA buffer type
52 * @device: the device pointer
54 * @size: the buffer size to allocate
55 * @dmab: buffer allocation record to store the allocated data
57 * Calls the memory-allocator function for the corresponding
60 * Return: Zero if the buffer with the given size is allocated successfully,
61 * otherwise a negative value on error.
63 int snd_dma_alloc_dir_pages(int type
, struct device
*device
,
64 enum dma_data_direction dir
, size_t size
,
65 struct snd_dma_buffer
*dmab
)
72 size
= PAGE_ALIGN(size
);
73 dmab
->dev
.type
= type
;
74 dmab
->dev
.dev
= device
;
78 dmab
->private_data
= NULL
;
79 dmab
->area
= __snd_dma_alloc_pages(dmab
, size
);
85 EXPORT_SYMBOL(snd_dma_alloc_dir_pages
);
88 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
89 * @type: the DMA buffer type
90 * @device: the device pointer
91 * @size: the buffer size to allocate
92 * @dmab: buffer allocation record to store the allocated data
94 * Calls the memory-allocator function for the corresponding
95 * buffer type. When no space is left, this function reduces the size and
96 * tries to allocate again. The size actually allocated is stored in
99 * Return: Zero if the buffer with the given size is allocated successfully,
100 * otherwise a negative value on error.
102 int snd_dma_alloc_pages_fallback(int type
, struct device
*device
, size_t size
,
103 struct snd_dma_buffer
*dmab
)
107 while ((err
= snd_dma_alloc_pages(type
, device
, size
, dmab
)) < 0) {
110 if (size
<= PAGE_SIZE
)
113 size
= PAGE_SIZE
<< get_order(size
);
119 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback
);
122 * snd_dma_free_pages - release the allocated buffer
123 * @dmab: the buffer allocation record to release
125 * Releases the allocated buffer via snd_dma_alloc_pages().
127 void snd_dma_free_pages(struct snd_dma_buffer
*dmab
)
129 const struct snd_malloc_ops
*ops
= snd_dma_get_ops(dmab
);
131 if (ops
&& ops
->free
)
134 EXPORT_SYMBOL(snd_dma_free_pages
);
136 /* called by devres */
137 static void __snd_release_pages(struct device
*dev
, void *res
)
139 snd_dma_free_pages(res
);
143 * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
144 * @dev: the device pointer
145 * @type: the DMA buffer type
146 * @dir: DMA direction
147 * @size: the buffer size to allocate
149 * Allocate buffer pages depending on the given type and manage using devres.
150 * The pages will be released automatically at the device removal.
152 * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
153 * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
154 * SNDRV_DMA_TYPE_VMALLOC type.
156 * Return: the snd_dma_buffer object at success, or NULL if failed
158 struct snd_dma_buffer
*
159 snd_devm_alloc_dir_pages(struct device
*dev
, int type
,
160 enum dma_data_direction dir
, size_t size
)
162 struct snd_dma_buffer
*dmab
;
165 if (WARN_ON(type
== SNDRV_DMA_TYPE_CONTINUOUS
||
166 type
== SNDRV_DMA_TYPE_VMALLOC
))
169 dmab
= devres_alloc(__snd_release_pages
, sizeof(*dmab
), GFP_KERNEL
);
173 err
= snd_dma_alloc_dir_pages(type
, dev
, dir
, size
, dmab
);
179 devres_add(dev
, dmab
);
182 EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages
);
185 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
186 * @dmab: buffer allocation information
187 * @area: VM area information
189 * Return: zero if successful, or a negative error code
191 int snd_dma_buffer_mmap(struct snd_dma_buffer
*dmab
,
192 struct vm_area_struct
*area
)
194 const struct snd_malloc_ops
*ops
;
198 ops
= snd_dma_get_ops(dmab
);
199 if (ops
&& ops
->mmap
)
200 return ops
->mmap(dmab
, area
);
204 EXPORT_SYMBOL(snd_dma_buffer_mmap
);
206 #ifdef CONFIG_HAS_DMA
208 * snd_dma_buffer_sync - sync DMA buffer between CPU and device
209 * @dmab: buffer allocation information
212 void snd_dma_buffer_sync(struct snd_dma_buffer
*dmab
,
213 enum snd_dma_sync_mode mode
)
215 const struct snd_malloc_ops
*ops
;
217 if (!dmab
|| !dmab
->dev
.need_sync
)
219 ops
= snd_dma_get_ops(dmab
);
220 if (ops
&& ops
->sync
)
221 ops
->sync(dmab
, mode
);
223 EXPORT_SYMBOL_GPL(snd_dma_buffer_sync
);
224 #endif /* CONFIG_HAS_DMA */
227 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
228 * @dmab: buffer allocation information
229 * @offset: offset in the ring buffer
231 * Return: the physical address
233 dma_addr_t
snd_sgbuf_get_addr(struct snd_dma_buffer
*dmab
, size_t offset
)
235 const struct snd_malloc_ops
*ops
= snd_dma_get_ops(dmab
);
237 if (ops
&& ops
->get_addr
)
238 return ops
->get_addr(dmab
, offset
);
240 return dmab
->addr
+ offset
;
242 EXPORT_SYMBOL(snd_sgbuf_get_addr
);
245 * snd_sgbuf_get_page - return the physical page at the corresponding offset
246 * @dmab: buffer allocation information
247 * @offset: offset in the ring buffer
249 * Return: the page pointer
251 struct page
*snd_sgbuf_get_page(struct snd_dma_buffer
*dmab
, size_t offset
)
253 const struct snd_malloc_ops
*ops
= snd_dma_get_ops(dmab
);
255 if (ops
&& ops
->get_page
)
256 return ops
->get_page(dmab
, offset
);
258 return virt_to_page(dmab
->area
+ offset
);
260 EXPORT_SYMBOL(snd_sgbuf_get_page
);
263 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
265 * @dmab: buffer allocation information
266 * @ofs: offset in the ring buffer
267 * @size: the requested size
269 * Return: the chunk size
271 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer
*dmab
,
272 unsigned int ofs
, unsigned int size
)
274 const struct snd_malloc_ops
*ops
= snd_dma_get_ops(dmab
);
276 if (ops
&& ops
->get_chunk_size
)
277 return ops
->get_chunk_size(dmab
, ofs
, size
);
281 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size
);
284 * Continuous pages allocator
286 static void *do_alloc_pages(struct device
*dev
, size_t size
, dma_addr_t
*addr
,
290 gfp_t gfp
= GFP_KERNEL
| __GFP_NORETRY
| __GFP_NOWARN
;
293 p
= alloc_pages_exact(size
, gfp
);
296 *addr
= page_to_phys(virt_to_page(p
));
299 if ((*addr
+ size
- 1) & ~dev
->coherent_dma_mask
) {
300 if (IS_ENABLED(CONFIG_ZONE_DMA32
) && !(gfp
& GFP_DMA32
)) {
304 if (IS_ENABLED(CONFIG_ZONE_DMA
) && !(gfp
& GFP_DMA
)) {
305 gfp
= (gfp
& ~GFP_DMA32
) | GFP_DMA
;
311 set_memory_wc((unsigned long)(p
), size
>> PAGE_SHIFT
);
316 static void do_free_pages(void *p
, size_t size
, bool wc
)
320 set_memory_wb((unsigned long)(p
), size
>> PAGE_SHIFT
);
322 free_pages_exact(p
, size
);
326 static void *snd_dma_continuous_alloc(struct snd_dma_buffer
*dmab
, size_t size
)
328 return do_alloc_pages(dmab
->dev
.dev
, size
, &dmab
->addr
, false);
331 static void snd_dma_continuous_free(struct snd_dma_buffer
*dmab
)
333 do_free_pages(dmab
->area
, dmab
->bytes
, false);
336 static int snd_dma_continuous_mmap(struct snd_dma_buffer
*dmab
,
337 struct vm_area_struct
*area
)
339 return remap_pfn_range(area
, area
->vm_start
,
340 dmab
->addr
>> PAGE_SHIFT
,
341 area
->vm_end
- area
->vm_start
,
345 static const struct snd_malloc_ops snd_dma_continuous_ops
= {
346 .alloc
= snd_dma_continuous_alloc
,
347 .free
= snd_dma_continuous_free
,
348 .mmap
= snd_dma_continuous_mmap
,
354 static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer
*dmab
, size_t size
)
356 return vmalloc(size
);
359 static void snd_dma_vmalloc_free(struct snd_dma_buffer
*dmab
)
364 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer
*dmab
,
365 struct vm_area_struct
*area
)
367 return remap_vmalloc_range(area
, dmab
->area
, 0);
370 #define get_vmalloc_page_addr(dmab, offset) \
371 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
373 static dma_addr_t
snd_dma_vmalloc_get_addr(struct snd_dma_buffer
*dmab
,
376 return get_vmalloc_page_addr(dmab
, offset
) + offset
% PAGE_SIZE
;
379 static struct page
*snd_dma_vmalloc_get_page(struct snd_dma_buffer
*dmab
,
382 return vmalloc_to_page(dmab
->area
+ offset
);
386 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer
*dmab
,
387 unsigned int ofs
, unsigned int size
)
389 unsigned int start
, end
;
392 start
= ALIGN_DOWN(ofs
, PAGE_SIZE
);
393 end
= ofs
+ size
- 1; /* the last byte address */
394 /* check page continuity */
395 addr
= get_vmalloc_page_addr(dmab
, start
);
401 if (get_vmalloc_page_addr(dmab
, start
) != addr
)
404 /* ok, all on continuous pages */
408 static const struct snd_malloc_ops snd_dma_vmalloc_ops
= {
409 .alloc
= snd_dma_vmalloc_alloc
,
410 .free
= snd_dma_vmalloc_free
,
411 .mmap
= snd_dma_vmalloc_mmap
,
412 .get_addr
= snd_dma_vmalloc_get_addr
,
413 .get_page
= snd_dma_vmalloc_get_page
,
414 .get_chunk_size
= snd_dma_vmalloc_get_chunk_size
,
417 #ifdef CONFIG_HAS_DMA
421 #ifdef CONFIG_GENERIC_ALLOCATOR
422 static void *snd_dma_iram_alloc(struct snd_dma_buffer
*dmab
, size_t size
)
424 struct device
*dev
= dmab
->dev
.dev
;
425 struct gen_pool
*pool
;
429 pool
= of_gen_pool_get(dev
->of_node
, "iram", 0);
430 /* Assign the pool into private_data field */
431 dmab
->private_data
= pool
;
433 p
= gen_pool_dma_alloc_align(pool
, size
, &dmab
->addr
, PAGE_SIZE
);
438 /* Internal memory might have limited size and no enough space,
439 * so if we fail to malloc, try to fetch memory traditionally.
441 dmab
->dev
.type
= SNDRV_DMA_TYPE_DEV
;
442 return __snd_dma_alloc_pages(dmab
, size
);
445 static void snd_dma_iram_free(struct snd_dma_buffer
*dmab
)
447 struct gen_pool
*pool
= dmab
->private_data
;
449 if (pool
&& dmab
->area
)
450 gen_pool_free(pool
, (unsigned long)dmab
->area
, dmab
->bytes
);
453 static int snd_dma_iram_mmap(struct snd_dma_buffer
*dmab
,
454 struct vm_area_struct
*area
)
456 area
->vm_page_prot
= pgprot_writecombine(area
->vm_page_prot
);
457 return remap_pfn_range(area
, area
->vm_start
,
458 dmab
->addr
>> PAGE_SHIFT
,
459 area
->vm_end
- area
->vm_start
,
463 static const struct snd_malloc_ops snd_dma_iram_ops
= {
464 .alloc
= snd_dma_iram_alloc
,
465 .free
= snd_dma_iram_free
,
466 .mmap
= snd_dma_iram_mmap
,
468 #endif /* CONFIG_GENERIC_ALLOCATOR */
471 * Coherent device pages allocator
473 static void *snd_dma_dev_alloc(struct snd_dma_buffer
*dmab
, size_t size
)
475 return dma_alloc_coherent(dmab
->dev
.dev
, size
, &dmab
->addr
, DEFAULT_GFP
);
478 static void snd_dma_dev_free(struct snd_dma_buffer
*dmab
)
480 dma_free_coherent(dmab
->dev
.dev
, dmab
->bytes
, dmab
->area
, dmab
->addr
);
483 static int snd_dma_dev_mmap(struct snd_dma_buffer
*dmab
,
484 struct vm_area_struct
*area
)
486 return dma_mmap_coherent(dmab
->dev
.dev
, area
,
487 dmab
->area
, dmab
->addr
, dmab
->bytes
);
490 static const struct snd_malloc_ops snd_dma_dev_ops
= {
491 .alloc
= snd_dma_dev_alloc
,
492 .free
= snd_dma_dev_free
,
493 .mmap
= snd_dma_dev_mmap
,
497 * Write-combined pages
499 #ifdef CONFIG_SND_DMA_SGBUF
500 /* x86-specific allocations */
501 static void *snd_dma_wc_alloc(struct snd_dma_buffer
*dmab
, size_t size
)
503 void *p
= do_alloc_pages(dmab
->dev
.dev
, size
, &dmab
->addr
, true);
507 dmab
->addr
= dma_map_single(dmab
->dev
.dev
, p
, size
, DMA_BIDIRECTIONAL
);
508 if (dma_mapping_error(dmab
->dev
.dev
, dmab
->addr
)) {
509 do_free_pages(dmab
->area
, size
, true);
515 static void snd_dma_wc_free(struct snd_dma_buffer
*dmab
)
517 dma_unmap_single(dmab
->dev
.dev
, dmab
->addr
, dmab
->bytes
,
519 do_free_pages(dmab
->area
, dmab
->bytes
, true);
522 static int snd_dma_wc_mmap(struct snd_dma_buffer
*dmab
,
523 struct vm_area_struct
*area
)
525 area
->vm_page_prot
= pgprot_writecombine(area
->vm_page_prot
);
526 return dma_mmap_coherent(dmab
->dev
.dev
, area
,
527 dmab
->area
, dmab
->addr
, dmab
->bytes
);
530 static void *snd_dma_wc_alloc(struct snd_dma_buffer
*dmab
, size_t size
)
532 return dma_alloc_wc(dmab
->dev
.dev
, size
, &dmab
->addr
, DEFAULT_GFP
);
535 static void snd_dma_wc_free(struct snd_dma_buffer
*dmab
)
537 dma_free_wc(dmab
->dev
.dev
, dmab
->bytes
, dmab
->area
, dmab
->addr
);
540 static int snd_dma_wc_mmap(struct snd_dma_buffer
*dmab
,
541 struct vm_area_struct
*area
)
543 return dma_mmap_wc(dmab
->dev
.dev
, area
,
544 dmab
->area
, dmab
->addr
, dmab
->bytes
);
548 static const struct snd_malloc_ops snd_dma_wc_ops
= {
549 .alloc
= snd_dma_wc_alloc
,
550 .free
= snd_dma_wc_free
,
551 .mmap
= snd_dma_wc_mmap
,
555 * Non-contiguous pages allocator
557 static void *snd_dma_noncontig_alloc(struct snd_dma_buffer
*dmab
, size_t size
)
559 struct sg_table
*sgt
;
562 sgt
= dma_alloc_noncontiguous(dmab
->dev
.dev
, size
, dmab
->dev
.dir
,
567 dmab
->dev
.need_sync
= dma_need_sync(dmab
->dev
.dev
,
568 sg_dma_address(sgt
->sgl
));
569 p
= dma_vmap_noncontiguous(dmab
->dev
.dev
, size
, sgt
);
571 dmab
->private_data
= sgt
;
572 /* store the first page address for convenience */
573 dmab
->addr
= snd_sgbuf_get_addr(dmab
, 0);
575 dma_free_noncontiguous(dmab
->dev
.dev
, size
, sgt
, dmab
->dev
.dir
);
580 static void snd_dma_noncontig_free(struct snd_dma_buffer
*dmab
)
582 dma_vunmap_noncontiguous(dmab
->dev
.dev
, dmab
->area
);
583 dma_free_noncontiguous(dmab
->dev
.dev
, dmab
->bytes
, dmab
->private_data
,
587 static int snd_dma_noncontig_mmap(struct snd_dma_buffer
*dmab
,
588 struct vm_area_struct
*area
)
590 return dma_mmap_noncontiguous(dmab
->dev
.dev
, area
,
591 dmab
->bytes
, dmab
->private_data
);
594 static void snd_dma_noncontig_sync(struct snd_dma_buffer
*dmab
,
595 enum snd_dma_sync_mode mode
)
597 if (mode
== SNDRV_DMA_SYNC_CPU
) {
598 if (dmab
->dev
.dir
== DMA_TO_DEVICE
)
600 invalidate_kernel_vmap_range(dmab
->area
, dmab
->bytes
);
601 dma_sync_sgtable_for_cpu(dmab
->dev
.dev
, dmab
->private_data
,
604 if (dmab
->dev
.dir
== DMA_FROM_DEVICE
)
606 flush_kernel_vmap_range(dmab
->area
, dmab
->bytes
);
607 dma_sync_sgtable_for_device(dmab
->dev
.dev
, dmab
->private_data
,
612 static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer
*dmab
,
613 struct sg_page_iter
*piter
,
616 struct sg_table
*sgt
= dmab
->private_data
;
618 __sg_page_iter_start(piter
, sgt
->sgl
, sgt
->orig_nents
,
619 offset
>> PAGE_SHIFT
);
622 static dma_addr_t
snd_dma_noncontig_get_addr(struct snd_dma_buffer
*dmab
,
625 struct sg_dma_page_iter iter
;
627 snd_dma_noncontig_iter_set(dmab
, &iter
.base
, offset
);
628 __sg_page_iter_dma_next(&iter
);
629 return sg_page_iter_dma_address(&iter
) + offset
% PAGE_SIZE
;
632 static struct page
*snd_dma_noncontig_get_page(struct snd_dma_buffer
*dmab
,
635 struct sg_page_iter iter
;
637 snd_dma_noncontig_iter_set(dmab
, &iter
, offset
);
638 __sg_page_iter_next(&iter
);
639 return sg_page_iter_page(&iter
);
643 snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer
*dmab
,
644 unsigned int ofs
, unsigned int size
)
646 struct sg_dma_page_iter iter
;
647 unsigned int start
, end
;
650 start
= ALIGN_DOWN(ofs
, PAGE_SIZE
);
651 end
= ofs
+ size
- 1; /* the last byte address */
652 snd_dma_noncontig_iter_set(dmab
, &iter
.base
, start
);
653 if (!__sg_page_iter_dma_next(&iter
))
655 /* check page continuity */
656 addr
= sg_page_iter_dma_address(&iter
);
662 if (!__sg_page_iter_dma_next(&iter
) ||
663 sg_page_iter_dma_address(&iter
) != addr
)
666 /* ok, all on continuous pages */
670 static const struct snd_malloc_ops snd_dma_noncontig_ops
= {
671 .alloc
= snd_dma_noncontig_alloc
,
672 .free
= snd_dma_noncontig_free
,
673 .mmap
= snd_dma_noncontig_mmap
,
674 .sync
= snd_dma_noncontig_sync
,
675 .get_addr
= snd_dma_noncontig_get_addr
,
676 .get_page
= snd_dma_noncontig_get_page
,
677 .get_chunk_size
= snd_dma_noncontig_get_chunk_size
,
680 #ifdef CONFIG_SND_DMA_SGBUF
681 /* Fallback SG-buffer allocations for x86 */
682 struct snd_dma_sg_fallback
{
683 struct sg_table sgt
; /* used by get_addr - must be the first item */
686 unsigned int *npages
;
689 static void __snd_dma_sg_fallback_free(struct snd_dma_buffer
*dmab
,
690 struct snd_dma_sg_fallback
*sgbuf
)
692 bool wc
= dmab
->dev
.type
== SNDRV_DMA_TYPE_DEV_WC_SG
;
695 if (sgbuf
->pages
&& sgbuf
->npages
) {
697 while (i
< sgbuf
->count
) {
698 size
= sgbuf
->npages
[i
];
701 do_free_pages(page_address(sgbuf
->pages
[i
]),
702 size
<< PAGE_SHIFT
, wc
);
706 kvfree(sgbuf
->pages
);
707 kvfree(sgbuf
->npages
);
711 /* fallback manual S/G buffer allocations */
712 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer
*dmab
, size_t size
)
714 bool wc
= dmab
->dev
.type
== SNDRV_DMA_TYPE_DEV_WC_SG
;
715 struct snd_dma_sg_fallback
*sgbuf
;
716 struct page
**pagep
, *curp
;
719 unsigned int idx
, npages
;
722 sgbuf
= kzalloc(sizeof(*sgbuf
), GFP_KERNEL
);
725 size
= PAGE_ALIGN(size
);
726 sgbuf
->count
= size
>> PAGE_SHIFT
;
727 sgbuf
->pages
= kvcalloc(sgbuf
->count
, sizeof(*sgbuf
->pages
), GFP_KERNEL
);
728 sgbuf
->npages
= kvcalloc(sgbuf
->count
, sizeof(*sgbuf
->npages
), GFP_KERNEL
);
729 if (!sgbuf
->pages
|| !sgbuf
->npages
)
732 pagep
= sgbuf
->pages
;
736 chunk
= min(size
, chunk
);
737 p
= do_alloc_pages(dmab
->dev
.dev
, chunk
, &addr
, wc
);
739 if (chunk
<= PAGE_SIZE
)
742 chunk
= PAGE_SIZE
<< get_order(chunk
);
748 npages
= chunk
>> PAGE_SHIFT
;
749 sgbuf
->npages
[idx
] = npages
;
751 curp
= virt_to_page(p
);
756 if (sg_alloc_table_from_pages(&sgbuf
->sgt
, sgbuf
->pages
, sgbuf
->count
,
757 0, sgbuf
->count
<< PAGE_SHIFT
, GFP_KERNEL
))
760 if (dma_map_sgtable(dmab
->dev
.dev
, &sgbuf
->sgt
, DMA_BIDIRECTIONAL
, 0))
763 p
= vmap(sgbuf
->pages
, sgbuf
->count
, VM_MAP
, PAGE_KERNEL
);
767 dmab
->private_data
= sgbuf
;
768 /* store the first page address for convenience */
769 dmab
->addr
= snd_sgbuf_get_addr(dmab
, 0);
773 dma_unmap_sgtable(dmab
->dev
.dev
, &sgbuf
->sgt
, DMA_BIDIRECTIONAL
, 0);
775 sg_free_table(&sgbuf
->sgt
);
777 __snd_dma_sg_fallback_free(dmab
, sgbuf
);
781 static void snd_dma_sg_fallback_free(struct snd_dma_buffer
*dmab
)
783 struct snd_dma_sg_fallback
*sgbuf
= dmab
->private_data
;
786 dma_unmap_sgtable(dmab
->dev
.dev
, &sgbuf
->sgt
, DMA_BIDIRECTIONAL
, 0);
787 sg_free_table(&sgbuf
->sgt
);
788 __snd_dma_sg_fallback_free(dmab
, dmab
->private_data
);
791 static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer
*dmab
,
792 struct vm_area_struct
*area
)
794 struct snd_dma_sg_fallback
*sgbuf
= dmab
->private_data
;
796 if (dmab
->dev
.type
== SNDRV_DMA_TYPE_DEV_WC_SG
)
797 area
->vm_page_prot
= pgprot_writecombine(area
->vm_page_prot
);
798 return vm_map_pages(area
, sgbuf
->pages
, sgbuf
->count
);
801 static void *snd_dma_sg_alloc(struct snd_dma_buffer
*dmab
, size_t size
)
803 int type
= dmab
->dev
.type
;
806 /* try the standard DMA API allocation at first */
807 if (type
== SNDRV_DMA_TYPE_DEV_WC_SG
)
808 dmab
->dev
.type
= SNDRV_DMA_TYPE_DEV_WC
;
810 dmab
->dev
.type
= SNDRV_DMA_TYPE_DEV
;
811 p
= __snd_dma_alloc_pages(dmab
, size
);
815 dmab
->dev
.type
= type
; /* restore the type */
816 return snd_dma_sg_fallback_alloc(dmab
, size
);
819 static const struct snd_malloc_ops snd_dma_sg_ops
= {
820 .alloc
= snd_dma_sg_alloc
,
821 .free
= snd_dma_sg_fallback_free
,
822 .mmap
= snd_dma_sg_fallback_mmap
,
823 /* reuse noncontig helper */
824 .get_addr
= snd_dma_noncontig_get_addr
,
825 /* reuse vmalloc helpers */
826 .get_page
= snd_dma_vmalloc_get_page
,
827 .get_chunk_size
= snd_dma_vmalloc_get_chunk_size
,
829 #endif /* CONFIG_SND_DMA_SGBUF */
832 * Non-coherent pages allocator
834 static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer
*dmab
, size_t size
)
838 p
= dma_alloc_noncoherent(dmab
->dev
.dev
, size
, &dmab
->addr
,
839 dmab
->dev
.dir
, DEFAULT_GFP
);
841 dmab
->dev
.need_sync
= dma_need_sync(dmab
->dev
.dev
, dmab
->addr
);
845 static void snd_dma_noncoherent_free(struct snd_dma_buffer
*dmab
)
847 dma_free_noncoherent(dmab
->dev
.dev
, dmab
->bytes
, dmab
->area
,
848 dmab
->addr
, dmab
->dev
.dir
);
851 static int snd_dma_noncoherent_mmap(struct snd_dma_buffer
*dmab
,
852 struct vm_area_struct
*area
)
854 area
->vm_page_prot
= vm_get_page_prot(area
->vm_flags
);
855 return dma_mmap_pages(dmab
->dev
.dev
, area
,
856 area
->vm_end
- area
->vm_start
,
857 virt_to_page(dmab
->area
));
860 static void snd_dma_noncoherent_sync(struct snd_dma_buffer
*dmab
,
861 enum snd_dma_sync_mode mode
)
863 if (mode
== SNDRV_DMA_SYNC_CPU
) {
864 if (dmab
->dev
.dir
!= DMA_TO_DEVICE
)
865 dma_sync_single_for_cpu(dmab
->dev
.dev
, dmab
->addr
,
866 dmab
->bytes
, dmab
->dev
.dir
);
868 if (dmab
->dev
.dir
!= DMA_FROM_DEVICE
)
869 dma_sync_single_for_device(dmab
->dev
.dev
, dmab
->addr
,
870 dmab
->bytes
, dmab
->dev
.dir
);
874 static const struct snd_malloc_ops snd_dma_noncoherent_ops
= {
875 .alloc
= snd_dma_noncoherent_alloc
,
876 .free
= snd_dma_noncoherent_free
,
877 .mmap
= snd_dma_noncoherent_mmap
,
878 .sync
= snd_dma_noncoherent_sync
,
881 #endif /* CONFIG_HAS_DMA */
886 static const struct snd_malloc_ops
*snd_dma_ops
[] = {
887 [SNDRV_DMA_TYPE_CONTINUOUS
] = &snd_dma_continuous_ops
,
888 [SNDRV_DMA_TYPE_VMALLOC
] = &snd_dma_vmalloc_ops
,
889 #ifdef CONFIG_HAS_DMA
890 [SNDRV_DMA_TYPE_DEV
] = &snd_dma_dev_ops
,
891 [SNDRV_DMA_TYPE_DEV_WC
] = &snd_dma_wc_ops
,
892 [SNDRV_DMA_TYPE_NONCONTIG
] = &snd_dma_noncontig_ops
,
893 [SNDRV_DMA_TYPE_NONCOHERENT
] = &snd_dma_noncoherent_ops
,
894 #ifdef CONFIG_SND_DMA_SGBUF
895 [SNDRV_DMA_TYPE_DEV_SG
] = &snd_dma_sg_ops
,
896 [SNDRV_DMA_TYPE_DEV_WC_SG
] = &snd_dma_sg_ops
,
898 #ifdef CONFIG_GENERIC_ALLOCATOR
899 [SNDRV_DMA_TYPE_DEV_IRAM
] = &snd_dma_iram_ops
,
900 #endif /* CONFIG_GENERIC_ALLOCATOR */
901 #endif /* CONFIG_HAS_DMA */
904 static const struct snd_malloc_ops
*snd_dma_get_ops(struct snd_dma_buffer
*dmab
)
906 if (WARN_ON_ONCE(!dmab
))
908 if (WARN_ON_ONCE(dmab
->dev
.type
<= SNDRV_DMA_TYPE_UNKNOWN
||
909 dmab
->dev
.type
>= ARRAY_SIZE(snd_dma_ops
)))
911 return snd_dma_ops
[dmab
->dev
.type
];