1 // SPDX-License-Identifier: GPL-2.0
3 * arch-independent dma-mapping routines
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-direct.h>
11 #include <linux/dma-noncoherent.h>
12 #include <linux/export.h>
13 #include <linux/gfp.h>
14 #include <linux/of_device.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
24 dma_addr_t dma_handle
;
28 static void dmam_release(struct device
*dev
, void *res
)
30 struct dma_devres
*this = res
;
32 dma_free_attrs(dev
, this->size
, this->vaddr
, this->dma_handle
,
36 static int dmam_match(struct device
*dev
, void *res
, void *match_data
)
38 struct dma_devres
*this = res
, *match
= match_data
;
40 if (this->vaddr
== match
->vaddr
) {
41 WARN_ON(this->size
!= match
->size
||
42 this->dma_handle
!= match
->dma_handle
);
49 * dmam_free_coherent - Managed dma_free_coherent()
50 * @dev: Device to free coherent memory for
51 * @size: Size of allocation
52 * @vaddr: Virtual address of the memory to free
53 * @dma_handle: DMA handle of the memory to free
55 * Managed dma_free_coherent().
57 void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
58 dma_addr_t dma_handle
)
60 struct dma_devres match_data
= { size
, vaddr
, dma_handle
};
62 dma_free_coherent(dev
, size
, vaddr
, dma_handle
);
63 WARN_ON(devres_destroy(dev
, dmam_release
, dmam_match
, &match_data
));
65 EXPORT_SYMBOL(dmam_free_coherent
);
68 * dmam_alloc_attrs - Managed dma_alloc_attrs()
69 * @dev: Device to allocate non_coherent memory for
70 * @size: Size of allocation
71 * @dma_handle: Out argument for allocated DMA handle
72 * @gfp: Allocation flags
73 * @attrs: Flags in the DMA_ATTR_* namespace.
75 * Managed dma_alloc_attrs(). Memory allocated using this function will be
76 * automatically released on driver detach.
79 * Pointer to allocated memory on success, NULL on failure.
81 void *dmam_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
82 gfp_t gfp
, unsigned long attrs
)
84 struct dma_devres
*dr
;
87 dr
= devres_alloc(dmam_release
, sizeof(*dr
), gfp
);
91 vaddr
= dma_alloc_attrs(dev
, size
, dma_handle
, gfp
, attrs
);
98 dr
->dma_handle
= *dma_handle
;
106 EXPORT_SYMBOL(dmam_alloc_attrs
);
109 * Create scatter-list for the already allocated DMA buffer.
111 int dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
112 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
118 if (!dev_is_dma_coherent(dev
)) {
121 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN
))
124 /* If the PFN is not valid, we do not have a struct page */
125 pfn
= arch_dma_coherent_to_pfn(dev
, cpu_addr
, dma_addr
);
128 page
= pfn_to_page(pfn
);
130 page
= virt_to_page(cpu_addr
);
133 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
135 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
139 int dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
,
140 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
143 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
145 if (!dma_is_direct(ops
) && ops
->get_sgtable
)
146 return ops
->get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
,
148 return dma_common_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
,
151 EXPORT_SYMBOL(dma_get_sgtable_attrs
);
154 * Create userspace mapping for the DMA-coherent memory.
156 int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
157 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
160 #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
161 unsigned long user_count
= vma_pages(vma
);
162 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
163 unsigned long off
= vma
->vm_pgoff
;
167 vma
->vm_page_prot
= arch_dma_mmap_pgprot(dev
, vma
->vm_page_prot
, attrs
);
169 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
172 if (off
>= count
|| user_count
> count
- off
)
175 if (!dev_is_dma_coherent(dev
)) {
176 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN
))
179 /* If the PFN is not valid, we do not have a struct page */
180 pfn
= arch_dma_coherent_to_pfn(dev
, cpu_addr
, dma_addr
);
184 pfn
= page_to_pfn(virt_to_page(cpu_addr
));
187 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ vma
->vm_pgoff
,
188 user_count
<< PAGE_SHIFT
, vma
->vm_page_prot
);
191 #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
195 * dma_mmap_attrs - map a coherent DMA allocation into user space
196 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
197 * @vma: vm_area_struct describing requested user mapping
198 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
199 * @dma_addr: device-view address returned from dma_alloc_attrs
200 * @size: size of memory originally requested in dma_alloc_attrs
201 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
203 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
204 * space. The coherent DMA buffer must not be freed by the driver until the
205 * user space mapping has been released.
207 int dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
208 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
211 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
213 if (!dma_is_direct(ops
) && ops
->mmap
)
214 return ops
->mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
215 return dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
217 EXPORT_SYMBOL(dma_mmap_attrs
);
219 static u64
dma_default_get_required_mask(struct device
*dev
)
221 u32 low_totalram
= ((max_pfn
- 1) << PAGE_SHIFT
);
222 u32 high_totalram
= ((max_pfn
- 1) >> (32 - PAGE_SHIFT
));
225 if (!high_totalram
) {
226 /* convert to mask just covering totalram */
227 low_totalram
= (1 << (fls(low_totalram
) - 1));
228 low_totalram
+= low_totalram
- 1;
231 high_totalram
= (1 << (fls(high_totalram
) - 1));
232 high_totalram
+= high_totalram
- 1;
233 mask
= (((u64
)high_totalram
) << 32) + 0xffffffff;
238 u64
dma_get_required_mask(struct device
*dev
)
240 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
242 if (dma_is_direct(ops
))
243 return dma_direct_get_required_mask(dev
);
244 if (ops
->get_required_mask
)
245 return ops
->get_required_mask(dev
);
246 return dma_default_get_required_mask(dev
);
248 EXPORT_SYMBOL_GPL(dma_get_required_mask
);
250 void *dma_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
251 gfp_t flag
, unsigned long attrs
)
253 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
256 WARN_ON_ONCE(!dev
->coherent_dma_mask
);
258 if (dma_alloc_from_dev_coherent(dev
, size
, dma_handle
, &cpu_addr
))
261 /* let the implementation decide on the zone to allocate from: */
262 flag
&= ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
);
264 if (dma_is_direct(ops
))
265 cpu_addr
= dma_direct_alloc(dev
, size
, dma_handle
, flag
, attrs
);
267 cpu_addr
= ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
271 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
274 EXPORT_SYMBOL(dma_alloc_attrs
);
276 void dma_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
277 dma_addr_t dma_handle
, unsigned long attrs
)
279 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
281 if (dma_release_from_dev_coherent(dev
, get_order(size
), cpu_addr
))
284 * On non-coherent platforms which implement DMA-coherent buffers via
285 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
286 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
287 * sleep on some machines, and b) an indication that the driver is
288 * probably misusing the coherent API anyway.
290 WARN_ON(irqs_disabled());
295 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
296 if (dma_is_direct(ops
))
297 dma_direct_free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
299 ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
301 EXPORT_SYMBOL(dma_free_attrs
);
303 static inline void dma_check_mask(struct device
*dev
, u64 mask
)
305 if (sme_active() && (mask
< (((u64
)sme_get_me_mask() << 1) - 1)))
306 dev_warn(dev
, "SME is active, device will require DMA bounce buffers\n");
309 int dma_supported(struct device
*dev
, u64 mask
)
311 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
313 if (dma_is_direct(ops
))
314 return dma_direct_supported(dev
, mask
);
315 if (!ops
->dma_supported
)
317 return ops
->dma_supported(dev
, mask
);
319 EXPORT_SYMBOL(dma_supported
);
321 #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
322 void arch_dma_set_mask(struct device
*dev
, u64 mask
);
324 #define arch_dma_set_mask(dev, mask) do { } while (0)
327 int dma_set_mask(struct device
*dev
, u64 mask
)
330 * Truncate the mask to the actually supported dma_addr_t width to
331 * avoid generating unsupportable addresses.
333 mask
= (dma_addr_t
)mask
;
335 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
338 arch_dma_set_mask(dev
, mask
);
339 dma_check_mask(dev
, mask
);
340 *dev
->dma_mask
= mask
;
343 EXPORT_SYMBOL(dma_set_mask
);
345 #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
346 int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
349 * Truncate the mask to the actually supported dma_addr_t width to
350 * avoid generating unsupportable addresses.
352 mask
= (dma_addr_t
)mask
;
354 if (!dma_supported(dev
, mask
))
357 dma_check_mask(dev
, mask
);
358 dev
->coherent_dma_mask
= mask
;
361 EXPORT_SYMBOL(dma_set_coherent_mask
);
364 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
365 enum dma_data_direction dir
)
367 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
369 BUG_ON(!valid_dma_direction(dir
));
371 if (dma_is_direct(ops
))
372 arch_dma_cache_sync(dev
, vaddr
, size
, dir
);
373 else if (ops
->cache_sync
)
374 ops
->cache_sync(dev
, vaddr
, size
, dir
);
376 EXPORT_SYMBOL(dma_cache_sync
);
378 size_t dma_max_mapping_size(struct device
*dev
)
380 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
381 size_t size
= SIZE_MAX
;
383 if (dma_is_direct(ops
))
384 size
= dma_direct_max_mapping_size(dev
);
385 else if (ops
&& ops
->max_mapping_size
)
386 size
= ops
->max_mapping_size(dev
);
390 EXPORT_SYMBOL_GPL(dma_max_mapping_size
);