]>
git.ipfire.org Git - people/arne_f/kernel.git/blob - include/linux/dma-mapping.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
5 #include <linux/sizes.h>
6 #include <linux/string.h>
7 #include <linux/device.h>
9 #include <linux/dma-debug.h>
10 #include <linux/dma-direction.h>
11 #include <linux/scatterlist.h>
12 #include <linux/kmemcheck.h>
13 #include <linux/bug.h>
14 #include <linux/mem_encrypt.h>
17 * List of possible attributes associated with a DMA mapping. The semantics
18 * of each attribute should be defined in Documentation/DMA-attributes.txt.
20 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
21 * forces all pending DMA writes to complete.
23 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
25 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
26 * may be weakly ordered, that is that reads and writes may pass each other.
28 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
30 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
31 * buffered to improve performance.
33 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
35 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
36 * consistent or non-consistent memory as it sees fit.
38 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
40 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
41 * virtual mapping for the allocated buffer.
43 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
45 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
46 * the CPU cache for the given buffer assuming that it has been already
47 * transferred to 'device' domain.
49 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
51 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
54 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
56 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
57 * that it's probably not worth the time to try to allocate memory to in a way
58 * that gives better TLB efficiency.
60 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
62 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
63 * allocation failure reports (similarly to __GFP_NOWARN).
65 #define DMA_ATTR_NO_WARN (1UL << 8)
68 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
69 * accessible at an elevated privilege level (and ideally inaccessible or
70 * at least read-only at lesser-privileged levels).
72 #define DMA_ATTR_PRIVILEGED (1UL << 9)
75 * A dma_addr_t can hold any valid DMA or bus address for the platform.
76 * It can be given to a device to use as a DMA source or target. A CPU cannot
77 * reference a dma_addr_t directly because there may be translation between
78 * its physical address space and the bus address space.
81 void* (*alloc
)(struct device
*dev
, size_t size
,
82 dma_addr_t
*dma_handle
, gfp_t gfp
,
84 void (*free
)(struct device
*dev
, size_t size
,
85 void *vaddr
, dma_addr_t dma_handle
,
87 int (*mmap
)(struct device
*, struct vm_area_struct
*,
88 void *, dma_addr_t
, size_t,
91 int (*get_sgtable
)(struct device
*dev
, struct sg_table
*sgt
, void *,
92 dma_addr_t
, size_t, unsigned long attrs
);
94 dma_addr_t (*map_page
)(struct device
*dev
, struct page
*page
,
95 unsigned long offset
, size_t size
,
96 enum dma_data_direction dir
,
98 void (*unmap_page
)(struct device
*dev
, dma_addr_t dma_handle
,
99 size_t size
, enum dma_data_direction dir
,
100 unsigned long attrs
);
102 * map_sg returns 0 on error and a value > 0 on success.
103 * It should never return a value < 0.
105 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
106 int nents
, enum dma_data_direction dir
,
107 unsigned long attrs
);
108 void (*unmap_sg
)(struct device
*dev
,
109 struct scatterlist
*sg
, int nents
,
110 enum dma_data_direction dir
,
111 unsigned long attrs
);
112 dma_addr_t (*map_resource
)(struct device
*dev
, phys_addr_t phys_addr
,
113 size_t size
, enum dma_data_direction dir
,
114 unsigned long attrs
);
115 void (*unmap_resource
)(struct device
*dev
, dma_addr_t dma_handle
,
116 size_t size
, enum dma_data_direction dir
,
117 unsigned long attrs
);
118 void (*sync_single_for_cpu
)(struct device
*dev
,
119 dma_addr_t dma_handle
, size_t size
,
120 enum dma_data_direction dir
);
121 void (*sync_single_for_device
)(struct device
*dev
,
122 dma_addr_t dma_handle
, size_t size
,
123 enum dma_data_direction dir
);
124 void (*sync_sg_for_cpu
)(struct device
*dev
,
125 struct scatterlist
*sg
, int nents
,
126 enum dma_data_direction dir
);
127 void (*sync_sg_for_device
)(struct device
*dev
,
128 struct scatterlist
*sg
, int nents
,
129 enum dma_data_direction dir
);
130 int (*mapping_error
)(struct device
*dev
, dma_addr_t dma_addr
);
131 int (*dma_supported
)(struct device
*dev
, u64 mask
);
132 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
133 u64 (*get_required_mask
)(struct device
*dev
);
138 extern const struct dma_map_ops dma_noop_ops
;
139 extern const struct dma_map_ops dma_virt_ops
;
141 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
143 #define DMA_MASK_NONE 0x0ULL
145 static inline int valid_dma_direction(int dma_direction
)
147 return ((dma_direction
== DMA_BIDIRECTIONAL
) ||
148 (dma_direction
== DMA_TO_DEVICE
) ||
149 (dma_direction
== DMA_FROM_DEVICE
));
152 static inline int is_device_dma_capable(struct device
*dev
)
154 return dev
->dma_mask
!= NULL
&& *dev
->dma_mask
!= DMA_MASK_NONE
;
157 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
159 * These three functions are only for dma allocator.
160 * Don't use them in device drivers.
162 int dma_alloc_from_dev_coherent(struct device
*dev
, ssize_t size
,
163 dma_addr_t
*dma_handle
, void **ret
);
164 int dma_release_from_dev_coherent(struct device
*dev
, int order
, void *vaddr
);
166 int dma_mmap_from_dev_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
167 void *cpu_addr
, size_t size
, int *ret
);
169 void *dma_alloc_from_global_coherent(ssize_t size
, dma_addr_t
*dma_handle
);
170 int dma_release_from_global_coherent(int order
, void *vaddr
);
171 int dma_mmap_from_global_coherent(struct vm_area_struct
*vma
, void *cpu_addr
,
172 size_t size
, int *ret
);
175 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
176 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
177 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
179 static inline void *dma_alloc_from_global_coherent(ssize_t size
,
180 dma_addr_t
*dma_handle
)
185 static inline int dma_release_from_global_coherent(int order
, void *vaddr
)
190 static inline int dma_mmap_from_global_coherent(struct vm_area_struct
*vma
,
191 void *cpu_addr
, size_t size
,
196 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
198 #ifdef CONFIG_HAS_DMA
199 #include <asm/dma-mapping.h>
200 static inline const struct dma_map_ops
*get_dma_ops(struct device
*dev
)
202 if (dev
&& dev
->dma_ops
)
204 return get_arch_dma_ops(dev
? dev
->bus
: NULL
);
207 static inline void set_dma_ops(struct device
*dev
,
208 const struct dma_map_ops
*dma_ops
)
210 dev
->dma_ops
= dma_ops
;
214 * Define the dma api to allow compilation but not linking of
215 * dma dependent code. Code that depends on the dma-mapping
216 * API needs to set 'depends on HAS_DMA' in its Kconfig
218 extern const struct dma_map_ops bad_dma_ops
;
219 static inline const struct dma_map_ops
*get_dma_ops(struct device
*dev
)
225 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
, void *ptr
,
227 enum dma_data_direction dir
,
230 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
233 kmemcheck_mark_initialized(ptr
, size
);
234 BUG_ON(!valid_dma_direction(dir
));
235 addr
= ops
->map_page(dev
, virt_to_page(ptr
),
236 offset_in_page(ptr
), size
,
238 debug_dma_map_page(dev
, virt_to_page(ptr
),
239 offset_in_page(ptr
), size
,
244 static inline void dma_unmap_single_attrs(struct device
*dev
, dma_addr_t addr
,
246 enum dma_data_direction dir
,
249 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
251 BUG_ON(!valid_dma_direction(dir
));
253 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
254 debug_dma_unmap_page(dev
, addr
, size
, dir
, true);
258 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
259 * It should never return a value < 0.
261 static inline int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
262 int nents
, enum dma_data_direction dir
,
265 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
267 struct scatterlist
*s
;
269 for_each_sg(sg
, s
, nents
, i
)
270 kmemcheck_mark_initialized(sg_virt(s
), s
->length
);
271 BUG_ON(!valid_dma_direction(dir
));
272 ents
= ops
->map_sg(dev
, sg
, nents
, dir
, attrs
);
274 debug_dma_map_sg(dev
, sg
, nents
, ents
, dir
);
279 static inline void dma_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
280 int nents
, enum dma_data_direction dir
,
283 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
285 BUG_ON(!valid_dma_direction(dir
));
286 debug_dma_unmap_sg(dev
, sg
, nents
, dir
);
288 ops
->unmap_sg(dev
, sg
, nents
, dir
, attrs
);
291 static inline dma_addr_t
dma_map_page_attrs(struct device
*dev
,
293 size_t offset
, size_t size
,
294 enum dma_data_direction dir
,
297 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
300 kmemcheck_mark_initialized(page_address(page
) + offset
, size
);
301 BUG_ON(!valid_dma_direction(dir
));
302 addr
= ops
->map_page(dev
, page
, offset
, size
, dir
, attrs
);
303 debug_dma_map_page(dev
, page
, offset
, size
, dir
, addr
, false);
308 static inline void dma_unmap_page_attrs(struct device
*dev
,
309 dma_addr_t addr
, size_t size
,
310 enum dma_data_direction dir
,
313 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
315 BUG_ON(!valid_dma_direction(dir
));
317 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
318 debug_dma_unmap_page(dev
, addr
, size
, dir
, false);
321 static inline dma_addr_t
dma_map_resource(struct device
*dev
,
322 phys_addr_t phys_addr
,
324 enum dma_data_direction dir
,
327 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
330 BUG_ON(!valid_dma_direction(dir
));
332 /* Don't allow RAM to be mapped */
333 BUG_ON(pfn_valid(PHYS_PFN(phys_addr
)));
336 if (ops
->map_resource
)
337 addr
= ops
->map_resource(dev
, phys_addr
, size
, dir
, attrs
);
339 debug_dma_map_resource(dev
, phys_addr
, size
, dir
, addr
);
344 static inline void dma_unmap_resource(struct device
*dev
, dma_addr_t addr
,
345 size_t size
, enum dma_data_direction dir
,
348 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
350 BUG_ON(!valid_dma_direction(dir
));
351 if (ops
->unmap_resource
)
352 ops
->unmap_resource(dev
, addr
, size
, dir
, attrs
);
353 debug_dma_unmap_resource(dev
, addr
, size
, dir
);
356 static inline void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
,
358 enum dma_data_direction dir
)
360 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
362 BUG_ON(!valid_dma_direction(dir
));
363 if (ops
->sync_single_for_cpu
)
364 ops
->sync_single_for_cpu(dev
, addr
, size
, dir
);
365 debug_dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
368 static inline void dma_sync_single_for_device(struct device
*dev
,
369 dma_addr_t addr
, size_t size
,
370 enum dma_data_direction dir
)
372 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
374 BUG_ON(!valid_dma_direction(dir
));
375 if (ops
->sync_single_for_device
)
376 ops
->sync_single_for_device(dev
, addr
, size
, dir
);
377 debug_dma_sync_single_for_device(dev
, addr
, size
, dir
);
380 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
382 unsigned long offset
,
384 enum dma_data_direction dir
)
386 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
388 BUG_ON(!valid_dma_direction(dir
));
389 if (ops
->sync_single_for_cpu
)
390 ops
->sync_single_for_cpu(dev
, addr
+ offset
, size
, dir
);
391 debug_dma_sync_single_range_for_cpu(dev
, addr
, offset
, size
, dir
);
394 static inline void dma_sync_single_range_for_device(struct device
*dev
,
396 unsigned long offset
,
398 enum dma_data_direction dir
)
400 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
402 BUG_ON(!valid_dma_direction(dir
));
403 if (ops
->sync_single_for_device
)
404 ops
->sync_single_for_device(dev
, addr
+ offset
, size
, dir
);
405 debug_dma_sync_single_range_for_device(dev
, addr
, offset
, size
, dir
);
409 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
410 int nelems
, enum dma_data_direction dir
)
412 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
414 BUG_ON(!valid_dma_direction(dir
));
415 if (ops
->sync_sg_for_cpu
)
416 ops
->sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
417 debug_dma_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
421 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
422 int nelems
, enum dma_data_direction dir
)
424 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
426 BUG_ON(!valid_dma_direction(dir
));
427 if (ops
->sync_sg_for_device
)
428 ops
->sync_sg_for_device(dev
, sg
, nelems
, dir
);
429 debug_dma_sync_sg_for_device(dev
, sg
, nelems
, dir
);
433 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
434 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
435 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
436 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
437 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
438 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
440 extern int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
441 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
443 void *dma_common_contiguous_remap(struct page
*page
, size_t size
,
444 unsigned long vm_flags
,
445 pgprot_t prot
, const void *caller
);
447 void *dma_common_pages_remap(struct page
**pages
, size_t size
,
448 unsigned long vm_flags
, pgprot_t prot
,
450 void dma_common_free_remap(void *cpu_addr
, size_t size
, unsigned long vm_flags
);
453 * dma_mmap_attrs - map a coherent DMA allocation into user space
454 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
455 * @vma: vm_area_struct describing requested user mapping
456 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
457 * @handle: device-view address returned from dma_alloc_attrs
458 * @size: size of memory originally requested in dma_alloc_attrs
459 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
461 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
462 * into user space. The coherent DMA buffer must not be freed by the
463 * driver until the user space mapping has been released.
466 dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
, void *cpu_addr
,
467 dma_addr_t dma_addr
, size_t size
, unsigned long attrs
)
469 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
472 return ops
->mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
473 return dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
476 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
479 dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
480 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
483 dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
, void *cpu_addr
,
484 dma_addr_t dma_addr
, size_t size
,
487 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
489 if (ops
->get_sgtable
)
490 return ops
->get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
,
492 return dma_common_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
);
495 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
497 #ifndef arch_dma_alloc_attrs
498 #define arch_dma_alloc_attrs(dev, flag) (true)
501 static inline void *dma_alloc_attrs(struct device
*dev
, size_t size
,
502 dma_addr_t
*dma_handle
, gfp_t flag
,
505 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
510 if (dma_alloc_from_dev_coherent(dev
, size
, dma_handle
, &cpu_addr
))
513 if (!arch_dma_alloc_attrs(&dev
, &flag
))
518 cpu_addr
= ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
519 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
523 static inline void dma_free_attrs(struct device
*dev
, size_t size
,
524 void *cpu_addr
, dma_addr_t dma_handle
,
527 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
530 WARN_ON(irqs_disabled());
532 if (dma_release_from_dev_coherent(dev
, get_order(size
), cpu_addr
))
535 if (!ops
->free
|| !cpu_addr
)
538 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
539 ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
542 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
543 dma_addr_t
*dma_handle
, gfp_t flag
)
545 return dma_alloc_attrs(dev
, size
, dma_handle
, flag
, 0);
548 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
549 void *cpu_addr
, dma_addr_t dma_handle
)
551 return dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
, 0);
554 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
556 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
558 debug_dma_mapping_error(dev
, dma_addr
);
559 if (ops
->mapping_error
)
560 return ops
->mapping_error(dev
, dma_addr
);
564 static inline void dma_check_mask(struct device
*dev
, u64 mask
)
566 if (sme_active() && (mask
< (((u64
)sme_get_me_mask() << 1) - 1)))
567 dev_warn(dev
, "SME is active, device will require DMA bounce buffers\n");
570 static inline int dma_supported(struct device
*dev
, u64 mask
)
572 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
576 if (!ops
->dma_supported
)
578 return ops
->dma_supported(dev
, mask
);
581 #ifndef HAVE_ARCH_DMA_SET_MASK
582 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
584 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
587 dma_check_mask(dev
, mask
);
589 *dev
->dma_mask
= mask
;
594 static inline u64
dma_get_mask(struct device
*dev
)
596 if (dev
&& dev
->dma_mask
&& *dev
->dma_mask
)
597 return *dev
->dma_mask
;
598 return DMA_BIT_MASK(32);
601 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
602 int dma_set_coherent_mask(struct device
*dev
, u64 mask
);
604 static inline int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
606 if (!dma_supported(dev
, mask
))
609 dma_check_mask(dev
, mask
);
611 dev
->coherent_dma_mask
= mask
;
617 * Set both the DMA mask and the coherent DMA mask to the same thing.
618 * Note that we don't check the return value from dma_set_coherent_mask()
619 * as the DMA API guarantees that the coherent DMA mask can be set to
620 * the same or smaller than the streaming DMA mask.
622 static inline int dma_set_mask_and_coherent(struct device
*dev
, u64 mask
)
624 int rc
= dma_set_mask(dev
, mask
);
626 dma_set_coherent_mask(dev
, mask
);
631 * Similar to the above, except it deals with the case where the device
632 * does not have dev->dma_mask appropriately setup.
634 static inline int dma_coerce_mask_and_coherent(struct device
*dev
, u64 mask
)
636 dev
->dma_mask
= &dev
->coherent_dma_mask
;
637 return dma_set_mask_and_coherent(dev
, mask
);
640 extern u64
dma_get_required_mask(struct device
*dev
);
642 #ifndef arch_setup_dma_ops
643 static inline void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
,
644 u64 size
, const struct iommu_ops
*iommu
,
648 #ifndef arch_teardown_dma_ops
649 static inline void arch_teardown_dma_ops(struct device
*dev
) { }
652 static inline unsigned int dma_get_max_seg_size(struct device
*dev
)
654 if (dev
->dma_parms
&& dev
->dma_parms
->max_segment_size
)
655 return dev
->dma_parms
->max_segment_size
;
659 static inline unsigned int dma_set_max_seg_size(struct device
*dev
,
662 if (dev
->dma_parms
) {
663 dev
->dma_parms
->max_segment_size
= size
;
669 static inline unsigned long dma_get_seg_boundary(struct device
*dev
)
671 if (dev
->dma_parms
&& dev
->dma_parms
->segment_boundary_mask
)
672 return dev
->dma_parms
->segment_boundary_mask
;
673 return DMA_BIT_MASK(32);
676 static inline int dma_set_seg_boundary(struct device
*dev
, unsigned long mask
)
678 if (dev
->dma_parms
) {
679 dev
->dma_parms
->segment_boundary_mask
= mask
;
686 static inline unsigned long dma_max_pfn(struct device
*dev
)
688 return *dev
->dma_mask
>> PAGE_SHIFT
;
692 static inline void *dma_zalloc_coherent(struct device
*dev
, size_t size
,
693 dma_addr_t
*dma_handle
, gfp_t flag
)
695 void *ret
= dma_alloc_coherent(dev
, size
, dma_handle
,
700 #ifdef CONFIG_HAS_DMA
701 static inline int dma_get_cache_alignment(void)
703 #ifdef ARCH_DMA_MINALIGN
704 return ARCH_DMA_MINALIGN
;
710 /* flags for the coherent memory api */
711 #define DMA_MEMORY_EXCLUSIVE 0x01
713 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
714 int dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
715 dma_addr_t device_addr
, size_t size
, int flags
);
716 void dma_release_declared_memory(struct device
*dev
);
717 void *dma_mark_declared_memory_occupied(struct device
*dev
,
718 dma_addr_t device_addr
, size_t size
);
721 dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
722 dma_addr_t device_addr
, size_t size
, int flags
)
728 dma_release_declared_memory(struct device
*dev
)
733 dma_mark_declared_memory_occupied(struct device
*dev
,
734 dma_addr_t device_addr
, size_t size
)
736 return ERR_PTR(-EBUSY
);
738 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
740 #ifdef CONFIG_HAS_DMA
741 int dma_configure(struct device
*dev
);
742 void dma_deconfigure(struct device
*dev
);
744 static inline int dma_configure(struct device
*dev
)
749 static inline void dma_deconfigure(struct device
*dev
) {}
755 extern void *dmam_alloc_coherent(struct device
*dev
, size_t size
,
756 dma_addr_t
*dma_handle
, gfp_t gfp
);
757 extern void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
758 dma_addr_t dma_handle
);
759 extern void *dmam_alloc_attrs(struct device
*dev
, size_t size
,
760 dma_addr_t
*dma_handle
, gfp_t gfp
,
761 unsigned long attrs
);
762 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
763 extern int dmam_declare_coherent_memory(struct device
*dev
,
764 phys_addr_t phys_addr
,
765 dma_addr_t device_addr
, size_t size
,
767 extern void dmam_release_declared_memory(struct device
*dev
);
768 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
769 static inline int dmam_declare_coherent_memory(struct device
*dev
,
770 phys_addr_t phys_addr
, dma_addr_t device_addr
,
771 size_t size
, gfp_t gfp
)
776 static inline void dmam_release_declared_memory(struct device
*dev
)
779 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
781 static inline void *dma_alloc_wc(struct device
*dev
, size_t size
,
782 dma_addr_t
*dma_addr
, gfp_t gfp
)
784 return dma_alloc_attrs(dev
, size
, dma_addr
, gfp
,
785 DMA_ATTR_WRITE_COMBINE
);
787 #ifndef dma_alloc_writecombine
788 #define dma_alloc_writecombine dma_alloc_wc
791 static inline void dma_free_wc(struct device
*dev
, size_t size
,
792 void *cpu_addr
, dma_addr_t dma_addr
)
794 return dma_free_attrs(dev
, size
, cpu_addr
, dma_addr
,
795 DMA_ATTR_WRITE_COMBINE
);
797 #ifndef dma_free_writecombine
798 #define dma_free_writecombine dma_free_wc
801 static inline int dma_mmap_wc(struct device
*dev
,
802 struct vm_area_struct
*vma
,
803 void *cpu_addr
, dma_addr_t dma_addr
,
806 return dma_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
,
807 DMA_ATTR_WRITE_COMBINE
);
809 #ifndef dma_mmap_writecombine
810 #define dma_mmap_writecombine dma_mmap_wc
813 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
814 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
815 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
816 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
817 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
818 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
819 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
821 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
822 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
823 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
824 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
825 #define dma_unmap_len(PTR, LEN_NAME) (0)
826 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)