]>
Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 | 2 | /* |
c1017a4c | 3 | * Copyright (c) by Jaroslav Kysela <perex@perex.cz> |
1da177e4 LT |
4 | * Takashi Iwai <tiwai@suse.de> |
5 | * | |
6 | * Generic memory allocators | |
1da177e4 LT |
7 | */ |
8 | ||
1da177e4 LT |
9 | #include <linux/slab.h> |
10 | #include <linux/mm.h> | |
11 | #include <linux/dma-mapping.h> | |
9736a325 | 12 | #include <linux/dma-map-ops.h> |
05503214 | 13 | #include <linux/genalloc.h> |
a25684a9 | 14 | #include <linux/highmem.h> |
1fe7f397 | 15 | #include <linux/vmalloc.h> |
42e748a0 TI |
16 | #ifdef CONFIG_X86 |
17 | #include <asm/set_memory.h> | |
18 | #endif | |
1da177e4 | 19 | #include <sound/memalloc.h> |
5ced8b91 TI |
20 | |
21 | struct snd_malloc_ops { | |
22 | void *(*alloc)(struct snd_dma_buffer *dmab, size_t size); | |
23 | void (*free)(struct snd_dma_buffer *dmab); | |
24 | dma_addr_t (*get_addr)(struct snd_dma_buffer *dmab, size_t offset); | |
25 | struct page *(*get_page)(struct snd_dma_buffer *dmab, size_t offset); | |
26 | unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab, | |
27 | unsigned int ofs, unsigned int size); | |
28 | int (*mmap)(struct snd_dma_buffer *dmab, struct vm_area_struct *area); | |
29 | void (*sync)(struct snd_dma_buffer *dmab, enum snd_dma_sync_mode mode); | |
30 | }; | |
1da177e4 | 31 | |
dd164fbf TI |
32 | #define DEFAULT_GFP \ |
33 | (GFP_KERNEL | \ | |
a61c7d88 | 34 | __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \ |
dd164fbf TI |
35 | __GFP_NOWARN) /* no stack trace print - this call is non-critical */ |
36 | ||
37af81c5 | 37 | static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab); |
1da177e4 | 38 | |
723c1252 | 39 | static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) |
05503214 | 40 | { |
37af81c5 | 41 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); |
05503214 | 42 | |
37af81c5 | 43 | if (WARN_ON_ONCE(!ops || !ops->alloc)) |
723c1252 | 44 | return NULL; |
37af81c5 | 45 | return ops->alloc(dmab, size); |
08422d2c | 46 | } |
1da177e4 LT |
47 | |
48 | /** | |
a25684a9 TI |
49 | * snd_dma_alloc_dir_pages - allocate the buffer area according to the given |
50 | * type and direction | |
1da177e4 LT |
51 | * @type: the DMA buffer type |
52 | * @device: the device pointer | |
a25684a9 | 53 | * @dir: DMA direction |
1da177e4 LT |
54 | * @size: the buffer size to allocate |
55 | * @dmab: buffer allocation record to store the allocated data | |
56 | * | |
57 | * Calls the memory-allocator function for the corresponding | |
58 | * buffer type. | |
eb7c06e8 YB |
59 | * |
60 | * Return: Zero if the buffer with the given size is allocated successfully, | |
61 | * otherwise a negative value on error. | |
1da177e4 | 62 | */ |
a25684a9 TI |
63 | int snd_dma_alloc_dir_pages(int type, struct device *device, |
64 | enum dma_data_direction dir, size_t size, | |
65 | struct snd_dma_buffer *dmab) | |
1da177e4 | 66 | { |
7eaa943c TI |
67 | if (WARN_ON(!size)) |
68 | return -ENXIO; | |
69 | if (WARN_ON(!dmab)) | |
70 | return -ENXIO; | |
1da177e4 | 71 | |
5c1733e3 | 72 | size = PAGE_ALIGN(size); |
1da177e4 LT |
73 | dmab->dev.type = type; |
74 | dmab->dev.dev = device; | |
a25684a9 | 75 | dmab->dev.dir = dir; |
1da177e4 | 76 | dmab->bytes = 0; |
28e60dbb TI |
77 | dmab->addr = 0; |
78 | dmab->private_data = NULL; | |
723c1252 | 79 | dmab->area = __snd_dma_alloc_pages(dmab, size); |
37af81c5 | 80 | if (!dmab->area) |
1da177e4 LT |
81 | return -ENOMEM; |
82 | dmab->bytes = size; | |
83 | return 0; | |
84 | } | |
a25684a9 | 85 | EXPORT_SYMBOL(snd_dma_alloc_dir_pages); |
1da177e4 LT |
86 | |
87 | /** | |
88 | * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback | |
89 | * @type: the DMA buffer type | |
90 | * @device: the device pointer | |
91 | * @size: the buffer size to allocate | |
92 | * @dmab: buffer allocation record to store the allocated data | |
93 | * | |
94 | * Calls the memory-allocator function for the corresponding | |
95 | * buffer type. When no space is left, this function reduces the size and | |
96 | * tries to allocate again. The size actually allocated is stored in | |
97 | * res_size argument. | |
eb7c06e8 YB |
98 | * |
99 | * Return: Zero if the buffer with the given size is allocated successfully, | |
100 | * otherwise a negative value on error. | |
1da177e4 LT |
101 | */ |
102 | int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, | |
103 | struct snd_dma_buffer *dmab) | |
104 | { | |
105 | int err; | |
106 | ||
1da177e4 LT |
107 | while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { |
108 | if (err != -ENOMEM) | |
109 | return err; | |
1da177e4 LT |
110 | if (size <= PAGE_SIZE) |
111 | return -ENOMEM; | |
dfef01e1 TI |
112 | size >>= 1; |
113 | size = PAGE_SIZE << get_order(size); | |
1da177e4 LT |
114 | } |
115 | if (! dmab->area) | |
116 | return -ENOMEM; | |
117 | return 0; | |
118 | } | |
35f80014 | 119 | EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); |
1da177e4 | 120 | |
1da177e4 LT |
121 | /** |
122 | * snd_dma_free_pages - release the allocated buffer | |
123 | * @dmab: the buffer allocation record to release | |
124 | * | |
125 | * Releases the allocated buffer via snd_dma_alloc_pages(). | |
126 | */ | |
127 | void snd_dma_free_pages(struct snd_dma_buffer *dmab) | |
128 | { | |
37af81c5 TI |
129 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); |
130 | ||
131 | if (ops && ops->free) | |
132 | ops->free(dmab); | |
133 | } | |
134 | EXPORT_SYMBOL(snd_dma_free_pages); | |
135 | ||
427ae268 TI |
136 | /* called by devres */ |
137 | static void __snd_release_pages(struct device *dev, void *res) | |
138 | { | |
139 | snd_dma_free_pages(res); | |
140 | } | |
141 | ||
142 | /** | |
a25684a9 | 143 | * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres |
427ae268 TI |
144 | * @dev: the device pointer |
145 | * @type: the DMA buffer type | |
a25684a9 | 146 | * @dir: DMA direction |
427ae268 TI |
147 | * @size: the buffer size to allocate |
148 | * | |
149 | * Allocate buffer pages depending on the given type and manage using devres. | |
150 | * The pages will be released automatically at the device removal. | |
151 | * | |
152 | * Unlike snd_dma_alloc_pages(), this function requires the real device pointer, | |
153 | * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or | |
154 | * SNDRV_DMA_TYPE_VMALLOC type. | |
155 | * | |
6eba99d4 | 156 | * Return: the snd_dma_buffer object at success, or NULL if failed |
427ae268 TI |
157 | */ |
158 | struct snd_dma_buffer * | |
a25684a9 TI |
159 | snd_devm_alloc_dir_pages(struct device *dev, int type, |
160 | enum dma_data_direction dir, size_t size) | |
427ae268 TI |
161 | { |
162 | struct snd_dma_buffer *dmab; | |
163 | int err; | |
164 | ||
165 | if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS || | |
166 | type == SNDRV_DMA_TYPE_VMALLOC)) | |
167 | return NULL; | |
168 | ||
169 | dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL); | |
170 | if (!dmab) | |
171 | return NULL; | |
172 | ||
a25684a9 | 173 | err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab); |
427ae268 TI |
174 | if (err < 0) { |
175 | devres_free(dmab); | |
176 | return NULL; | |
177 | } | |
178 | ||
179 | devres_add(dev, dmab); | |
180 | return dmab; | |
181 | } | |
a25684a9 | 182 | EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages); |
427ae268 | 183 | |
a202bd1a TI |
184 | /** |
185 | * snd_dma_buffer_mmap - perform mmap of the given DMA buffer | |
186 | * @dmab: buffer allocation information | |
187 | * @area: VM area information | |
6eba99d4 TI |
188 | * |
189 | * Return: zero if successful, or a negative error code | |
a202bd1a TI |
190 | */ |
191 | int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab, | |
192 | struct vm_area_struct *area) | |
193 | { | |
8e537d5d | 194 | const struct snd_malloc_ops *ops; |
a202bd1a | 195 | |
8e537d5d TI |
196 | if (!dmab) |
197 | return -ENOENT; | |
198 | ops = snd_dma_get_ops(dmab); | |
a202bd1a TI |
199 | if (ops && ops->mmap) |
200 | return ops->mmap(dmab, area); | |
201 | else | |
202 | return -ENOENT; | |
203 | } | |
204 | EXPORT_SYMBOL(snd_dma_buffer_mmap); | |
205 | ||
a25684a9 TI |
206 | #ifdef CONFIG_HAS_DMA |
207 | /** | |
208 | * snd_dma_buffer_sync - sync DMA buffer between CPU and device | |
209 | * @dmab: buffer allocation information | |
f917c04f | 210 | * @mode: sync mode |
a25684a9 TI |
211 | */ |
212 | void snd_dma_buffer_sync(struct snd_dma_buffer *dmab, | |
213 | enum snd_dma_sync_mode mode) | |
214 | { | |
215 | const struct snd_malloc_ops *ops; | |
216 | ||
217 | if (!dmab || !dmab->dev.need_sync) | |
218 | return; | |
219 | ops = snd_dma_get_ops(dmab); | |
220 | if (ops && ops->sync) | |
221 | ops->sync(dmab, mode); | |
222 | } | |
223 | EXPORT_SYMBOL_GPL(snd_dma_buffer_sync); | |
224 | #endif /* CONFIG_HAS_DMA */ | |
225 | ||
37af81c5 TI |
226 | /** |
227 | * snd_sgbuf_get_addr - return the physical address at the corresponding offset | |
228 | * @dmab: buffer allocation information | |
229 | * @offset: offset in the ring buffer | |
6eba99d4 TI |
230 | * |
231 | * Return: the physical address | |
37af81c5 TI |
232 | */ |
233 | dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset) | |
234 | { | |
235 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
236 | ||
237 | if (ops && ops->get_addr) | |
238 | return ops->get_addr(dmab, offset); | |
239 | else | |
240 | return dmab->addr + offset; | |
241 | } | |
242 | EXPORT_SYMBOL(snd_sgbuf_get_addr); | |
243 | ||
244 | /** | |
245 | * snd_sgbuf_get_page - return the physical page at the corresponding offset | |
246 | * @dmab: buffer allocation information | |
247 | * @offset: offset in the ring buffer | |
6eba99d4 TI |
248 | * |
249 | * Return: the page pointer | |
37af81c5 TI |
250 | */ |
251 | struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset) | |
252 | { | |
253 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
254 | ||
255 | if (ops && ops->get_page) | |
256 | return ops->get_page(dmab, offset); | |
257 | else | |
258 | return virt_to_page(dmab->area + offset); | |
259 | } | |
260 | EXPORT_SYMBOL(snd_sgbuf_get_page); | |
261 | ||
262 | /** | |
263 | * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages | |
264 | * on sg-buffer | |
265 | * @dmab: buffer allocation information | |
266 | * @ofs: offset in the ring buffer | |
267 | * @size: the requested size | |
6eba99d4 TI |
268 | * |
269 | * Return: the chunk size | |
37af81c5 TI |
270 | */ |
271 | unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, | |
272 | unsigned int ofs, unsigned int size) | |
273 | { | |
274 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
275 | ||
276 | if (ops && ops->get_chunk_size) | |
277 | return ops->get_chunk_size(dmab, ofs, size); | |
278 | else | |
279 | return size; | |
280 | } | |
281 | EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); | |
282 | ||
283 | /* | |
284 | * Continuous pages allocator | |
285 | */ | |
dd164fbf TI |
286 | static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr, |
287 | bool wc) | |
37af81c5 | 288 | { |
dd164fbf TI |
289 | void *p; |
290 | gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; | |
37af81c5 | 291 | |
dd164fbf TI |
292 | again: |
293 | p = alloc_pages_exact(size, gfp); | |
294 | if (!p) | |
295 | return NULL; | |
296 | *addr = page_to_phys(virt_to_page(p)); | |
297 | if (!dev) | |
298 | return p; | |
299 | if ((*addr + size - 1) & ~dev->coherent_dma_mask) { | |
300 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) { | |
301 | gfp |= GFP_DMA32; | |
302 | goto again; | |
303 | } | |
304 | if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { | |
305 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | |
306 | goto again; | |
307 | } | |
308 | } | |
309 | #ifdef CONFIG_X86 | |
310 | if (wc) | |
311 | set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT); | |
312 | #endif | |
f84ba106 | 313 | return p; |
37af81c5 TI |
314 | } |
315 | ||
dd164fbf TI |
316 | static void do_free_pages(void *p, size_t size, bool wc) |
317 | { | |
318 | #ifdef CONFIG_X86 | |
319 | if (wc) | |
320 | set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT); | |
321 | #endif | |
322 | free_pages_exact(p, size); | |
323 | } | |
324 | ||
325 | ||
a8d302a0 TI |
326 | static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size) |
327 | { | |
dd164fbf | 328 | return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false); |
a8d302a0 TI |
329 | } |
330 | ||
37af81c5 TI |
331 | static void snd_dma_continuous_free(struct snd_dma_buffer *dmab) |
332 | { | |
dd164fbf | 333 | do_free_pages(dmab->area, dmab->bytes, false); |
37af81c5 TI |
334 | } |
335 | ||
30b7ba69 TI |
336 | static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab, |
337 | struct vm_area_struct *area) | |
338 | { | |
339 | return remap_pfn_range(area, area->vm_start, | |
f84ba106 | 340 | dmab->addr >> PAGE_SHIFT, |
30b7ba69 TI |
341 | area->vm_end - area->vm_start, |
342 | area->vm_page_prot); | |
343 | } | |
344 | ||
37af81c5 TI |
345 | static const struct snd_malloc_ops snd_dma_continuous_ops = { |
346 | .alloc = snd_dma_continuous_alloc, | |
347 | .free = snd_dma_continuous_free, | |
30b7ba69 | 348 | .mmap = snd_dma_continuous_mmap, |
37af81c5 TI |
349 | }; |
350 | ||
351 | /* | |
352 | * VMALLOC allocator | |
353 | */ | |
723c1252 | 354 | static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size) |
37af81c5 | 355 | { |
dd164fbf | 356 | return vmalloc(size); |
37af81c5 TI |
357 | } |
358 | ||
359 | static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab) | |
360 | { | |
361 | vfree(dmab->area); | |
362 | } | |
363 | ||
30b7ba69 TI |
364 | static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab, |
365 | struct vm_area_struct *area) | |
366 | { | |
367 | return remap_vmalloc_range(area, dmab->area, 0); | |
368 | } | |
369 | ||
bda36b0f TI |
370 | #define get_vmalloc_page_addr(dmab, offset) \ |
371 | page_to_phys(vmalloc_to_page((dmab)->area + (offset))) | |
372 | ||
37af81c5 TI |
373 | static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab, |
374 | size_t offset) | |
375 | { | |
bda36b0f | 376 | return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE; |
37af81c5 TI |
377 | } |
378 | ||
379 | static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab, | |
380 | size_t offset) | |
381 | { | |
382 | return vmalloc_to_page(dmab->area + offset); | |
383 | } | |
384 | ||
385 | static unsigned int | |
386 | snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab, | |
387 | unsigned int ofs, unsigned int size) | |
388 | { | |
bda36b0f TI |
389 | unsigned int start, end; |
390 | unsigned long addr; | |
391 | ||
392 | start = ALIGN_DOWN(ofs, PAGE_SIZE); | |
393 | end = ofs + size - 1; /* the last byte address */ | |
394 | /* check page continuity */ | |
395 | addr = get_vmalloc_page_addr(dmab, start); | |
396 | for (;;) { | |
397 | start += PAGE_SIZE; | |
398 | if (start > end) | |
399 | break; | |
400 | addr += PAGE_SIZE; | |
401 | if (get_vmalloc_page_addr(dmab, start) != addr) | |
402 | return start - ofs; | |
403 | } | |
404 | /* ok, all on continuous pages */ | |
405 | return size; | |
37af81c5 TI |
406 | } |
407 | ||
408 | static const struct snd_malloc_ops snd_dma_vmalloc_ops = { | |
409 | .alloc = snd_dma_vmalloc_alloc, | |
410 | .free = snd_dma_vmalloc_free, | |
30b7ba69 | 411 | .mmap = snd_dma_vmalloc_mmap, |
37af81c5 TI |
412 | .get_addr = snd_dma_vmalloc_get_addr, |
413 | .get_page = snd_dma_vmalloc_get_page, | |
414 | .get_chunk_size = snd_dma_vmalloc_get_chunk_size, | |
415 | }; | |
416 | ||
8f11551b | 417 | #ifdef CONFIG_HAS_DMA |
37af81c5 TI |
418 | /* |
419 | * IRAM allocator | |
420 | */ | |
a5606f85 | 421 | #ifdef CONFIG_GENERIC_ALLOCATOR |
723c1252 | 422 | static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size) |
37af81c5 TI |
423 | { |
424 | struct device *dev = dmab->dev.dev; | |
425 | struct gen_pool *pool; | |
723c1252 | 426 | void *p; |
37af81c5 TI |
427 | |
428 | if (dev->of_node) { | |
429 | pool = of_gen_pool_get(dev->of_node, "iram", 0); | |
430 | /* Assign the pool into private_data field */ | |
431 | dmab->private_data = pool; | |
432 | ||
723c1252 TI |
433 | p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE); |
434 | if (p) | |
435 | return p; | |
37af81c5 TI |
436 | } |
437 | ||
438 | /* Internal memory might have limited size and no enough space, | |
439 | * so if we fail to malloc, try to fetch memory traditionally. | |
440 | */ | |
441 | dmab->dev.type = SNDRV_DMA_TYPE_DEV; | |
442 | return __snd_dma_alloc_pages(dmab, size); | |
443 | } | |
444 | ||
445 | static void snd_dma_iram_free(struct snd_dma_buffer *dmab) | |
446 | { | |
447 | struct gen_pool *pool = dmab->private_data; | |
448 | ||
449 | if (pool && dmab->area) | |
450 | gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); | |
451 | } | |
452 | ||
a202bd1a TI |
453 | static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab, |
454 | struct vm_area_struct *area) | |
455 | { | |
456 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
457 | return remap_pfn_range(area, area->vm_start, | |
458 | dmab->addr >> PAGE_SHIFT, | |
459 | area->vm_end - area->vm_start, | |
460 | area->vm_page_prot); | |
461 | } | |
462 | ||
37af81c5 TI |
463 | static const struct snd_malloc_ops snd_dma_iram_ops = { |
464 | .alloc = snd_dma_iram_alloc, | |
465 | .free = snd_dma_iram_free, | |
a202bd1a | 466 | .mmap = snd_dma_iram_mmap, |
37af81c5 | 467 | }; |
a5606f85 | 468 | #endif /* CONFIG_GENERIC_ALLOCATOR */ |
37af81c5 TI |
469 | |
470 | /* | |
471 | * Coherent device pages allocator | |
472 | */ | |
723c1252 | 473 | static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) |
37af81c5 | 474 | { |
9882d63b | 475 | return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); |
37af81c5 TI |
476 | } |
477 | ||
478 | static void snd_dma_dev_free(struct snd_dma_buffer *dmab) | |
479 | { | |
37af81c5 TI |
480 | dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); |
481 | } | |
482 | ||
a202bd1a TI |
483 | static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab, |
484 | struct vm_area_struct *area) | |
485 | { | |
486 | return dma_mmap_coherent(dmab->dev.dev, area, | |
487 | dmab->area, dmab->addr, dmab->bytes); | |
488 | } | |
489 | ||
37af81c5 TI |
490 | static const struct snd_malloc_ops snd_dma_dev_ops = { |
491 | .alloc = snd_dma_dev_alloc, | |
492 | .free = snd_dma_dev_free, | |
a202bd1a | 493 | .mmap = snd_dma_dev_mmap, |
37af81c5 | 494 | }; |
d5c50558 TI |
495 | |
496 | /* | |
497 | * Write-combined pages | |
498 | */ | |
a8d302a0 | 499 | #ifdef CONFIG_SND_DMA_SGBUF |
c880a514 TI |
500 | /* x86-specific allocations */ |
501 | static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) | |
502 | { | |
503 | void *p = do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true); | |
504 | ||
505 | if (!p) | |
506 | return NULL; | |
507 | dmab->addr = dma_map_single(dmab->dev.dev, p, size, DMA_BIDIRECTIONAL); | |
fa030813 | 508 | if (dma_mapping_error(dmab->dev.dev, dmab->addr)) { |
c880a514 TI |
509 | do_free_pages(dmab->area, size, true); |
510 | return NULL; | |
511 | } | |
512 | return p; | |
513 | } | |
514 | ||
515 | static void snd_dma_wc_free(struct snd_dma_buffer *dmab) | |
516 | { | |
517 | dma_unmap_single(dmab->dev.dev, dmab->addr, dmab->bytes, | |
518 | DMA_BIDIRECTIONAL); | |
519 | do_free_pages(dmab->area, dmab->bytes, true); | |
520 | } | |
9c273013 | 521 | |
c880a514 TI |
522 | static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, |
523 | struct vm_area_struct *area) | |
524 | { | |
525 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
526 | return dma_mmap_coherent(dmab->dev.dev, area, | |
527 | dmab->area, dmab->addr, dmab->bytes); | |
528 | } | |
529 | #else | |
d5c50558 TI |
530 | static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) |
531 | { | |
532 | return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); | |
533 | } | |
534 | ||
535 | static void snd_dma_wc_free(struct snd_dma_buffer *dmab) | |
536 | { | |
537 | dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
538 | } | |
539 | ||
540 | static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, | |
541 | struct vm_area_struct *area) | |
542 | { | |
543 | return dma_mmap_wc(dmab->dev.dev, area, | |
544 | dmab->area, dmab->addr, dmab->bytes); | |
545 | } | |
c880a514 | 546 | #endif |
d5c50558 TI |
547 | |
548 | static const struct snd_malloc_ops snd_dma_wc_ops = { | |
549 | .alloc = snd_dma_wc_alloc, | |
550 | .free = snd_dma_wc_free, | |
551 | .mmap = snd_dma_wc_mmap, | |
552 | }; | |
a25684a9 TI |
553 | |
554 | /* | |
555 | * Non-contiguous pages allocator | |
556 | */ | |
557 | static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size) | |
558 | { | |
559 | struct sg_table *sgt; | |
560 | void *p; | |
561 | ||
9d8e536d | 562 | sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir, |
db918321 | 563 | DEFAULT_GFP, 0); |
9736a325 TI |
564 | if (!sgt) |
565 | return NULL; | |
925ca893 | 566 | |
8e1741c6 TI |
567 | dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, |
568 | sg_dma_address(sgt->sgl)); | |
a25684a9 | 569 | p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt); |
37137ec2 | 570 | if (p) { |
a25684a9 | 571 | dmab->private_data = sgt; |
37137ec2 TI |
572 | /* store the first page address for convenience */ |
573 | dmab->addr = snd_sgbuf_get_addr(dmab, 0); | |
574 | } else { | |
a25684a9 | 575 | dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir); |
37137ec2 | 576 | } |
a25684a9 TI |
577 | return p; |
578 | } | |
579 | ||
580 | static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab) | |
581 | { | |
582 | dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area); | |
583 | dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data, | |
584 | dmab->dev.dir); | |
585 | } | |
586 | ||
587 | static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab, | |
588 | struct vm_area_struct *area) | |
589 | { | |
590 | return dma_mmap_noncontiguous(dmab->dev.dev, area, | |
591 | dmab->bytes, dmab->private_data); | |
592 | } | |
593 | ||
594 | static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab, | |
595 | enum snd_dma_sync_mode mode) | |
596 | { | |
597 | if (mode == SNDRV_DMA_SYNC_CPU) { | |
598 | if (dmab->dev.dir == DMA_TO_DEVICE) | |
599 | return; | |
3e16dc50 | 600 | invalidate_kernel_vmap_range(dmab->area, dmab->bytes); |
a25684a9 TI |
601 | dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data, |
602 | dmab->dev.dir); | |
a25684a9 TI |
603 | } else { |
604 | if (dmab->dev.dir == DMA_FROM_DEVICE) | |
605 | return; | |
606 | flush_kernel_vmap_range(dmab->area, dmab->bytes); | |
607 | dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data, | |
608 | dmab->dev.dir); | |
609 | } | |
610 | } | |
611 | ||
ad4f93ca TI |
612 | static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab, |
613 | struct sg_page_iter *piter, | |
614 | size_t offset) | |
615 | { | |
616 | struct sg_table *sgt = dmab->private_data; | |
617 | ||
618 | __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents, | |
619 | offset >> PAGE_SHIFT); | |
620 | } | |
621 | ||
622 | static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab, | |
623 | size_t offset) | |
624 | { | |
625 | struct sg_dma_page_iter iter; | |
626 | ||
627 | snd_dma_noncontig_iter_set(dmab, &iter.base, offset); | |
628 | __sg_page_iter_dma_next(&iter); | |
629 | return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE; | |
630 | } | |
631 | ||
632 | static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab, | |
633 | size_t offset) | |
634 | { | |
635 | struct sg_page_iter iter; | |
636 | ||
637 | snd_dma_noncontig_iter_set(dmab, &iter, offset); | |
638 | __sg_page_iter_next(&iter); | |
639 | return sg_page_iter_page(&iter); | |
640 | } | |
641 | ||
642 | static unsigned int | |
643 | snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab, | |
644 | unsigned int ofs, unsigned int size) | |
645 | { | |
646 | struct sg_dma_page_iter iter; | |
647 | unsigned int start, end; | |
648 | unsigned long addr; | |
649 | ||
650 | start = ALIGN_DOWN(ofs, PAGE_SIZE); | |
651 | end = ofs + size - 1; /* the last byte address */ | |
652 | snd_dma_noncontig_iter_set(dmab, &iter.base, start); | |
653 | if (!__sg_page_iter_dma_next(&iter)) | |
654 | return 0; | |
655 | /* check page continuity */ | |
656 | addr = sg_page_iter_dma_address(&iter); | |
657 | for (;;) { | |
658 | start += PAGE_SIZE; | |
659 | if (start > end) | |
660 | break; | |
661 | addr += PAGE_SIZE; | |
662 | if (!__sg_page_iter_dma_next(&iter) || | |
663 | sg_page_iter_dma_address(&iter) != addr) | |
664 | return start - ofs; | |
665 | } | |
666 | /* ok, all on continuous pages */ | |
667 | return size; | |
668 | } | |
669 | ||
a25684a9 TI |
670 | static const struct snd_malloc_ops snd_dma_noncontig_ops = { |
671 | .alloc = snd_dma_noncontig_alloc, | |
672 | .free = snd_dma_noncontig_free, | |
673 | .mmap = snd_dma_noncontig_mmap, | |
674 | .sync = snd_dma_noncontig_sync, | |
ad4f93ca TI |
675 | .get_addr = snd_dma_noncontig_get_addr, |
676 | .get_page = snd_dma_noncontig_get_page, | |
677 | .get_chunk_size = snd_dma_noncontig_get_chunk_size, | |
a25684a9 TI |
678 | }; |
679 | ||
2c95b92e | 680 | #ifdef CONFIG_SND_DMA_SGBUF |
925ca893 TI |
681 | /* Fallback SG-buffer allocations for x86 */ |
682 | struct snd_dma_sg_fallback { | |
0b9f2bd0 | 683 | struct sg_table sgt; /* used by get_addr - must be the first item */ |
925ca893 TI |
684 | size_t count; |
685 | struct page **pages; | |
0b9f2bd0 | 686 | unsigned int *npages; |
925ca893 TI |
687 | }; |
688 | ||
689 | static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab, | |
690 | struct snd_dma_sg_fallback *sgbuf) | |
691 | { | |
0b9f2bd0 | 692 | bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG; |
53466ebd TI |
693 | size_t i, size; |
694 | ||
0b9f2bd0 | 695 | if (sgbuf->pages && sgbuf->npages) { |
53466ebd TI |
696 | i = 0; |
697 | while (i < sgbuf->count) { | |
0b9f2bd0 TI |
698 | size = sgbuf->npages[i]; |
699 | if (!size) | |
53466ebd | 700 | break; |
9408ace4 | 701 | do_free_pages(page_address(sgbuf->pages[i]), |
0b9f2bd0 | 702 | size << PAGE_SHIFT, wc); |
53466ebd TI |
703 | i += size; |
704 | } | |
705 | } | |
925ca893 | 706 | kvfree(sgbuf->pages); |
0b9f2bd0 | 707 | kvfree(sgbuf->npages); |
925ca893 TI |
708 | kfree(sgbuf); |
709 | } | |
710 | ||
e469e204 | 711 | /* fallback manual S/G buffer allocations */ |
925ca893 TI |
712 | static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size) |
713 | { | |
0b9f2bd0 | 714 | bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG; |
925ca893 | 715 | struct snd_dma_sg_fallback *sgbuf; |
cc265163 | 716 | struct page **pagep, *curp; |
0b9f2bd0 | 717 | size_t chunk; |
cc265163 | 718 | dma_addr_t addr; |
0b9f2bd0 | 719 | unsigned int idx, npages; |
925ca893 | 720 | void *p; |
53466ebd | 721 | |
925ca893 TI |
722 | sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); |
723 | if (!sgbuf) | |
724 | return NULL; | |
cc265163 TI |
725 | size = PAGE_ALIGN(size); |
726 | sgbuf->count = size >> PAGE_SHIFT; | |
727 | sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL); | |
0b9f2bd0 TI |
728 | sgbuf->npages = kvcalloc(sgbuf->count, sizeof(*sgbuf->npages), GFP_KERNEL); |
729 | if (!sgbuf->pages || !sgbuf->npages) | |
925ca893 TI |
730 | goto error; |
731 | ||
cc265163 | 732 | pagep = sgbuf->pages; |
0b9f2bd0 TI |
733 | chunk = size; |
734 | idx = 0; | |
cc265163 TI |
735 | while (size > 0) { |
736 | chunk = min(size, chunk); | |
0b9f2bd0 | 737 | p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc); |
cc265163 TI |
738 | if (!p) { |
739 | if (chunk <= PAGE_SIZE) | |
740 | goto error; | |
741 | chunk >>= 1; | |
742 | chunk = PAGE_SIZE << get_order(chunk); | |
743 | continue; | |
744 | } | |
745 | ||
746 | size -= chunk; | |
747 | /* fill pages */ | |
748 | npages = chunk >> PAGE_SHIFT; | |
0b9f2bd0 TI |
749 | sgbuf->npages[idx] = npages; |
750 | idx += npages; | |
cc265163 | 751 | curp = virt_to_page(p); |
0b9f2bd0 | 752 | while (npages--) |
cc265163 | 753 | *pagep++ = curp++; |
925ca893 TI |
754 | } |
755 | ||
0b9f2bd0 TI |
756 | if (sg_alloc_table_from_pages(&sgbuf->sgt, sgbuf->pages, sgbuf->count, |
757 | 0, sgbuf->count << PAGE_SHIFT, GFP_KERNEL)) | |
925ca893 | 758 | goto error; |
53466ebd | 759 | |
0b9f2bd0 TI |
760 | if (dma_map_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0)) |
761 | goto error_dma_map; | |
762 | ||
763 | p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL); | |
764 | if (!p) | |
765 | goto error_vmap; | |
53466ebd | 766 | |
925ca893 | 767 | dmab->private_data = sgbuf; |
37137ec2 | 768 | /* store the first page address for convenience */ |
0b9f2bd0 | 769 | dmab->addr = snd_sgbuf_get_addr(dmab, 0); |
925ca893 TI |
770 | return p; |
771 | ||
0b9f2bd0 TI |
772 | error_vmap: |
773 | dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0); | |
774 | error_dma_map: | |
775 | sg_free_table(&sgbuf->sgt); | |
925ca893 TI |
776 | error: |
777 | __snd_dma_sg_fallback_free(dmab, sgbuf); | |
778 | return NULL; | |
779 | } | |
780 | ||
781 | static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab) | |
782 | { | |
53466ebd TI |
783 | struct snd_dma_sg_fallback *sgbuf = dmab->private_data; |
784 | ||
925ca893 | 785 | vunmap(dmab->area); |
0b9f2bd0 TI |
786 | dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0); |
787 | sg_free_table(&sgbuf->sgt); | |
925ca893 TI |
788 | __snd_dma_sg_fallback_free(dmab, dmab->private_data); |
789 | } | |
790 | ||
791 | static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab, | |
792 | struct vm_area_struct *area) | |
793 | { | |
794 | struct snd_dma_sg_fallback *sgbuf = dmab->private_data; | |
795 | ||
e469e204 | 796 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) |
925ca893 TI |
797 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); |
798 | return vm_map_pages(area, sgbuf->pages, sgbuf->count); | |
799 | } | |
800 | ||
e469e204 TI |
801 | static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size) |
802 | { | |
803 | int type = dmab->dev.type; | |
804 | void *p; | |
805 | ||
e469e204 TI |
806 | /* try the standard DMA API allocation at first */ |
807 | if (type == SNDRV_DMA_TYPE_DEV_WC_SG) | |
808 | dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC; | |
809 | else | |
810 | dmab->dev.type = SNDRV_DMA_TYPE_DEV; | |
811 | p = __snd_dma_alloc_pages(dmab, size); | |
812 | if (p) | |
813 | return p; | |
814 | ||
815 | dmab->dev.type = type; /* restore the type */ | |
e469e204 TI |
816 | return snd_dma_sg_fallback_alloc(dmab, size); |
817 | } | |
818 | ||
819 | static const struct snd_malloc_ops snd_dma_sg_ops = { | |
820 | .alloc = snd_dma_sg_alloc, | |
925ca893 TI |
821 | .free = snd_dma_sg_fallback_free, |
822 | .mmap = snd_dma_sg_fallback_mmap, | |
0b9f2bd0 TI |
823 | /* reuse noncontig helper */ |
824 | .get_addr = snd_dma_noncontig_get_addr, | |
925ca893 | 825 | /* reuse vmalloc helpers */ |
925ca893 TI |
826 | .get_page = snd_dma_vmalloc_get_page, |
827 | .get_chunk_size = snd_dma_vmalloc_get_chunk_size, | |
828 | }; | |
2c95b92e TI |
829 | #endif /* CONFIG_SND_DMA_SGBUF */ |
830 | ||
73325f60 TI |
831 | /* |
832 | * Non-coherent pages allocator | |
833 | */ | |
834 | static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size) | |
835 | { | |
8e1741c6 TI |
836 | void *p; |
837 | ||
838 | p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr, | |
db918321 | 839 | dmab->dev.dir, DEFAULT_GFP); |
8e1741c6 TI |
840 | if (p) |
841 | dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr); | |
842 | return p; | |
73325f60 TI |
843 | } |
844 | ||
845 | static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab) | |
846 | { | |
847 | dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area, | |
848 | dmab->addr, dmab->dev.dir); | |
849 | } | |
850 | ||
851 | static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab, | |
852 | struct vm_area_struct *area) | |
853 | { | |
854 | area->vm_page_prot = vm_get_page_prot(area->vm_flags); | |
855 | return dma_mmap_pages(dmab->dev.dev, area, | |
856 | area->vm_end - area->vm_start, | |
857 | virt_to_page(dmab->area)); | |
858 | } | |
859 | ||
860 | static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab, | |
861 | enum snd_dma_sync_mode mode) | |
862 | { | |
863 | if (mode == SNDRV_DMA_SYNC_CPU) { | |
864 | if (dmab->dev.dir != DMA_TO_DEVICE) | |
865 | dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr, | |
866 | dmab->bytes, dmab->dev.dir); | |
867 | } else { | |
868 | if (dmab->dev.dir != DMA_FROM_DEVICE) | |
869 | dma_sync_single_for_device(dmab->dev.dev, dmab->addr, | |
870 | dmab->bytes, dmab->dev.dir); | |
871 | } | |
872 | } | |
873 | ||
874 | static const struct snd_malloc_ops snd_dma_noncoherent_ops = { | |
875 | .alloc = snd_dma_noncoherent_alloc, | |
876 | .free = snd_dma_noncoherent_free, | |
877 | .mmap = snd_dma_noncoherent_mmap, | |
878 | .sync = snd_dma_noncoherent_sync, | |
879 | }; | |
880 | ||
37af81c5 TI |
881 | #endif /* CONFIG_HAS_DMA */ |
882 | ||
883 | /* | |
884 | * Entry points | |
885 | */ | |
9736a325 | 886 | static const struct snd_malloc_ops *snd_dma_ops[] = { |
37af81c5 TI |
887 | [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops, |
888 | [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops, | |
889 | #ifdef CONFIG_HAS_DMA | |
890 | [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops, | |
d5c50558 | 891 | [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops, |
a25684a9 | 892 | [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops, |
73325f60 | 893 | [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops, |
2c95b92e | 894 | #ifdef CONFIG_SND_DMA_SGBUF |
e469e204 TI |
895 | [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops, |
896 | [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops, | |
2c95b92e | 897 | #endif |
37af81c5 TI |
898 | #ifdef CONFIG_GENERIC_ALLOCATOR |
899 | [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops, | |
900 | #endif /* CONFIG_GENERIC_ALLOCATOR */ | |
901 | #endif /* CONFIG_HAS_DMA */ | |
37af81c5 TI |
902 | }; |
903 | ||
904 | static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab) | |
905 | { | |
dce94461 TI |
906 | if (WARN_ON_ONCE(!dmab)) |
907 | return NULL; | |
37af81c5 | 908 | if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || |
9736a325 | 909 | dmab->dev.type >= ARRAY_SIZE(snd_dma_ops))) |
37af81c5 | 910 | return NULL; |
9736a325 | 911 | return snd_dma_ops[dmab->dev.type]; |
1da177e4 | 912 | } |