]>
git.ipfire.org Git - people/ms/linux.git/blob - include/linux/dma-mapping.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
5 #include <linux/sizes.h>
6 #include <linux/string.h>
7 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/bug.h>
12 #include <linux/mem_encrypt.h>
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
20 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
21 * may be weakly ordered, that is that reads and writes may pass each other.
23 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
25 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
26 * buffered to improve performance.
28 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
30 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
31 * virtual mapping for the allocated buffer.
33 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
35 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
36 * the CPU cache for the given buffer assuming that it has been already
37 * transferred to 'device' domain.
39 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
41 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
44 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
46 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
47 * that it's probably not worth the time to try to allocate memory to in a way
48 * that gives better TLB efficiency.
50 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
52 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
53 * allocation failure reports (similarly to __GFP_NOWARN).
55 #define DMA_ATTR_NO_WARN (1UL << 8)
58 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
59 * accessible at an elevated privilege level (and ideally inaccessible or
60 * at least read-only at lesser-privileged levels).
62 #define DMA_ATTR_PRIVILEGED (1UL << 9)
65 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
66 * be given to a device to use as a DMA source or target. It is specific to a
67 * given device and there may be a translation between the CPU physical address
68 * space and the bus address space.
70 * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
71 * be used directly in drivers, but checked for using dma_mapping_error()
74 #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
76 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
78 #ifdef CONFIG_DMA_API_DEBUG
79 void debug_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
);
80 void debug_dma_map_single(struct device
*dev
, const void *addr
,
83 static inline void debug_dma_mapping_error(struct device
*dev
,
87 static inline void debug_dma_map_single(struct device
*dev
, const void *addr
,
91 #endif /* CONFIG_DMA_API_DEBUG */
94 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
96 debug_dma_mapping_error(dev
, dma_addr
);
98 if (unlikely(dma_addr
== DMA_MAPPING_ERROR
))
103 dma_addr_t
dma_map_page_attrs(struct device
*dev
, struct page
*page
,
104 size_t offset
, size_t size
, enum dma_data_direction dir
,
105 unsigned long attrs
);
106 void dma_unmap_page_attrs(struct device
*dev
, dma_addr_t addr
, size_t size
,
107 enum dma_data_direction dir
, unsigned long attrs
);
108 unsigned int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
109 int nents
, enum dma_data_direction dir
, unsigned long attrs
);
110 void dma_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
111 int nents
, enum dma_data_direction dir
,
112 unsigned long attrs
);
113 int dma_map_sgtable(struct device
*dev
, struct sg_table
*sgt
,
114 enum dma_data_direction dir
, unsigned long attrs
);
115 dma_addr_t
dma_map_resource(struct device
*dev
, phys_addr_t phys_addr
,
116 size_t size
, enum dma_data_direction dir
, unsigned long attrs
);
117 void dma_unmap_resource(struct device
*dev
, dma_addr_t addr
, size_t size
,
118 enum dma_data_direction dir
, unsigned long attrs
);
119 void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
, size_t size
,
120 enum dma_data_direction dir
);
121 void dma_sync_single_for_device(struct device
*dev
, dma_addr_t addr
,
122 size_t size
, enum dma_data_direction dir
);
123 void dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
124 int nelems
, enum dma_data_direction dir
);
125 void dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
126 int nelems
, enum dma_data_direction dir
);
127 void *dma_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
128 gfp_t flag
, unsigned long attrs
);
129 void dma_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
130 dma_addr_t dma_handle
, unsigned long attrs
);
131 void *dmam_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
132 gfp_t gfp
, unsigned long attrs
);
133 void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
134 dma_addr_t dma_handle
);
135 int dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
,
136 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
137 unsigned long attrs
);
138 int dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
139 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
140 unsigned long attrs
);
141 bool dma_can_mmap(struct device
*dev
);
142 int dma_supported(struct device
*dev
, u64 mask
);
143 bool dma_pci_p2pdma_supported(struct device
*dev
);
144 int dma_set_mask(struct device
*dev
, u64 mask
);
145 int dma_set_coherent_mask(struct device
*dev
, u64 mask
);
146 u64
dma_get_required_mask(struct device
*dev
);
147 size_t dma_max_mapping_size(struct device
*dev
);
148 size_t dma_opt_mapping_size(struct device
*dev
);
149 bool dma_need_sync(struct device
*dev
, dma_addr_t dma_addr
);
150 unsigned long dma_get_merge_boundary(struct device
*dev
);
151 struct sg_table
*dma_alloc_noncontiguous(struct device
*dev
, size_t size
,
152 enum dma_data_direction dir
, gfp_t gfp
, unsigned long attrs
);
153 void dma_free_noncontiguous(struct device
*dev
, size_t size
,
154 struct sg_table
*sgt
, enum dma_data_direction dir
);
155 void *dma_vmap_noncontiguous(struct device
*dev
, size_t size
,
156 struct sg_table
*sgt
);
157 void dma_vunmap_noncontiguous(struct device
*dev
, void *vaddr
);
158 int dma_mmap_noncontiguous(struct device
*dev
, struct vm_area_struct
*vma
,
159 size_t size
, struct sg_table
*sgt
);
160 #else /* CONFIG_HAS_DMA */
161 static inline dma_addr_t
dma_map_page_attrs(struct device
*dev
,
162 struct page
*page
, size_t offset
, size_t size
,
163 enum dma_data_direction dir
, unsigned long attrs
)
165 return DMA_MAPPING_ERROR
;
167 static inline void dma_unmap_page_attrs(struct device
*dev
, dma_addr_t addr
,
168 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
171 static inline unsigned int dma_map_sg_attrs(struct device
*dev
,
172 struct scatterlist
*sg
, int nents
, enum dma_data_direction dir
,
177 static inline void dma_unmap_sg_attrs(struct device
*dev
,
178 struct scatterlist
*sg
, int nents
, enum dma_data_direction dir
,
182 static inline int dma_map_sgtable(struct device
*dev
, struct sg_table
*sgt
,
183 enum dma_data_direction dir
, unsigned long attrs
)
187 static inline dma_addr_t
dma_map_resource(struct device
*dev
,
188 phys_addr_t phys_addr
, size_t size
, enum dma_data_direction dir
,
191 return DMA_MAPPING_ERROR
;
193 static inline void dma_unmap_resource(struct device
*dev
, dma_addr_t addr
,
194 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
197 static inline void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
,
198 size_t size
, enum dma_data_direction dir
)
201 static inline void dma_sync_single_for_device(struct device
*dev
,
202 dma_addr_t addr
, size_t size
, enum dma_data_direction dir
)
205 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
206 struct scatterlist
*sg
, int nelems
, enum dma_data_direction dir
)
209 static inline void dma_sync_sg_for_device(struct device
*dev
,
210 struct scatterlist
*sg
, int nelems
, enum dma_data_direction dir
)
213 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
217 static inline void *dma_alloc_attrs(struct device
*dev
, size_t size
,
218 dma_addr_t
*dma_handle
, gfp_t flag
, unsigned long attrs
)
222 static void dma_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
223 dma_addr_t dma_handle
, unsigned long attrs
)
226 static inline void *dmam_alloc_attrs(struct device
*dev
, size_t size
,
227 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
231 static inline void dmam_free_coherent(struct device
*dev
, size_t size
,
232 void *vaddr
, dma_addr_t dma_handle
)
235 static inline int dma_get_sgtable_attrs(struct device
*dev
,
236 struct sg_table
*sgt
, void *cpu_addr
, dma_addr_t dma_addr
,
237 size_t size
, unsigned long attrs
)
241 static inline int dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
242 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
247 static inline bool dma_can_mmap(struct device
*dev
)
251 static inline int dma_supported(struct device
*dev
, u64 mask
)
255 static inline bool dma_pci_p2pdma_supported(struct device
*dev
)
259 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
263 static inline int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
267 static inline u64
dma_get_required_mask(struct device
*dev
)
271 static inline size_t dma_max_mapping_size(struct device
*dev
)
275 static inline size_t dma_opt_mapping_size(struct device
*dev
)
279 static inline bool dma_need_sync(struct device
*dev
, dma_addr_t dma_addr
)
283 static inline unsigned long dma_get_merge_boundary(struct device
*dev
)
287 static inline struct sg_table
*dma_alloc_noncontiguous(struct device
*dev
,
288 size_t size
, enum dma_data_direction dir
, gfp_t gfp
,
293 static inline void dma_free_noncontiguous(struct device
*dev
, size_t size
,
294 struct sg_table
*sgt
, enum dma_data_direction dir
)
297 static inline void *dma_vmap_noncontiguous(struct device
*dev
, size_t size
,
298 struct sg_table
*sgt
)
302 static inline void dma_vunmap_noncontiguous(struct device
*dev
, void *vaddr
)
305 static inline int dma_mmap_noncontiguous(struct device
*dev
,
306 struct vm_area_struct
*vma
, size_t size
, struct sg_table
*sgt
)
310 #endif /* CONFIG_HAS_DMA */
312 struct page
*dma_alloc_pages(struct device
*dev
, size_t size
,
313 dma_addr_t
*dma_handle
, enum dma_data_direction dir
, gfp_t gfp
);
314 void dma_free_pages(struct device
*dev
, size_t size
, struct page
*page
,
315 dma_addr_t dma_handle
, enum dma_data_direction dir
);
316 int dma_mmap_pages(struct device
*dev
, struct vm_area_struct
*vma
,
317 size_t size
, struct page
*page
);
319 static inline void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
320 dma_addr_t
*dma_handle
, enum dma_data_direction dir
, gfp_t gfp
)
322 struct page
*page
= dma_alloc_pages(dev
, size
, dma_handle
, dir
, gfp
);
323 return page
? page_address(page
) : NULL
;
326 static inline void dma_free_noncoherent(struct device
*dev
, size_t size
,
327 void *vaddr
, dma_addr_t dma_handle
, enum dma_data_direction dir
)
329 dma_free_pages(dev
, size
, virt_to_page(vaddr
), dma_handle
, dir
);
332 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
, void *ptr
,
333 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
335 /* DMA must never operate on areas that might be remapped. */
336 if (dev_WARN_ONCE(dev
, is_vmalloc_addr(ptr
),
337 "rejecting DMA map of vmalloc memory\n"))
338 return DMA_MAPPING_ERROR
;
339 debug_dma_map_single(dev
, ptr
, size
);
340 return dma_map_page_attrs(dev
, virt_to_page(ptr
), offset_in_page(ptr
),
344 static inline void dma_unmap_single_attrs(struct device
*dev
, dma_addr_t addr
,
345 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
347 return dma_unmap_page_attrs(dev
, addr
, size
, dir
, attrs
);
350 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
351 dma_addr_t addr
, unsigned long offset
, size_t size
,
352 enum dma_data_direction dir
)
354 return dma_sync_single_for_cpu(dev
, addr
+ offset
, size
, dir
);
357 static inline void dma_sync_single_range_for_device(struct device
*dev
,
358 dma_addr_t addr
, unsigned long offset
, size_t size
,
359 enum dma_data_direction dir
)
361 return dma_sync_single_for_device(dev
, addr
+ offset
, size
, dir
);
365 * dma_unmap_sgtable - Unmap the given buffer for DMA
366 * @dev: The device for which to perform the DMA operation
367 * @sgt: The sg_table object describing the buffer
368 * @dir: DMA direction
369 * @attrs: Optional DMA attributes for the unmap operation
371 * Unmaps a buffer described by a scatterlist stored in the given sg_table
372 * object for the @dir DMA operation by the @dev device. After this function
373 * the ownership of the buffer is transferred back to the CPU domain.
375 static inline void dma_unmap_sgtable(struct device
*dev
, struct sg_table
*sgt
,
376 enum dma_data_direction dir
, unsigned long attrs
)
378 dma_unmap_sg_attrs(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
, attrs
);
382 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
383 * @dev: The device for which to perform the DMA operation
384 * @sgt: The sg_table object describing the buffer
385 * @dir: DMA direction
387 * Performs the needed cache synchronization and moves the ownership of the
388 * buffer back to the CPU domain, so it is safe to perform any access to it
389 * by the CPU. Before doing any further DMA operations, one has to transfer
390 * the ownership of the buffer back to the DMA domain by calling the
391 * dma_sync_sgtable_for_device().
393 static inline void dma_sync_sgtable_for_cpu(struct device
*dev
,
394 struct sg_table
*sgt
, enum dma_data_direction dir
)
396 dma_sync_sg_for_cpu(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
400 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
401 * @dev: The device for which to perform the DMA operation
402 * @sgt: The sg_table object describing the buffer
403 * @dir: DMA direction
405 * Performs the needed cache synchronization and moves the ownership of the
406 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
407 * Once finished, one has to call dma_sync_sgtable_for_cpu() or
408 * dma_unmap_sgtable().
410 static inline void dma_sync_sgtable_for_device(struct device
*dev
,
411 struct sg_table
*sgt
, enum dma_data_direction dir
)
413 dma_sync_sg_for_device(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
416 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
417 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
418 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
419 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
420 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
421 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
422 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
423 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
425 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
426 dma_addr_t
*dma_handle
, gfp_t gfp
)
428 return dma_alloc_attrs(dev
, size
, dma_handle
, gfp
,
429 (gfp
& __GFP_NOWARN
) ? DMA_ATTR_NO_WARN
: 0);
432 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
433 void *cpu_addr
, dma_addr_t dma_handle
)
435 return dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
, 0);
439 static inline u64
dma_get_mask(struct device
*dev
)
441 if (dev
->dma_mask
&& *dev
->dma_mask
)
442 return *dev
->dma_mask
;
443 return DMA_BIT_MASK(32);
447 * Set both the DMA mask and the coherent DMA mask to the same thing.
448 * Note that we don't check the return value from dma_set_coherent_mask()
449 * as the DMA API guarantees that the coherent DMA mask can be set to
450 * the same or smaller than the streaming DMA mask.
452 static inline int dma_set_mask_and_coherent(struct device
*dev
, u64 mask
)
454 int rc
= dma_set_mask(dev
, mask
);
456 dma_set_coherent_mask(dev
, mask
);
461 * Similar to the above, except it deals with the case where the device
462 * does not have dev->dma_mask appropriately setup.
464 static inline int dma_coerce_mask_and_coherent(struct device
*dev
, u64 mask
)
466 dev
->dma_mask
= &dev
->coherent_dma_mask
;
467 return dma_set_mask_and_coherent(dev
, mask
);
471 * dma_addressing_limited - return if the device is addressing limited
472 * @dev: device to check
474 * Return %true if the devices DMA mask is too small to address all memory in
475 * the system, else %false. Lack of addressing bits is the prime reason for
476 * bounce buffering, but might not be the only one.
478 static inline bool dma_addressing_limited(struct device
*dev
)
480 return min_not_zero(dma_get_mask(dev
), dev
->bus_dma_limit
) <
481 dma_get_required_mask(dev
);
484 static inline unsigned int dma_get_max_seg_size(struct device
*dev
)
486 if (dev
->dma_parms
&& dev
->dma_parms
->max_segment_size
)
487 return dev
->dma_parms
->max_segment_size
;
491 static inline int dma_set_max_seg_size(struct device
*dev
, unsigned int size
)
493 if (dev
->dma_parms
) {
494 dev
->dma_parms
->max_segment_size
= size
;
500 static inline unsigned long dma_get_seg_boundary(struct device
*dev
)
502 if (dev
->dma_parms
&& dev
->dma_parms
->segment_boundary_mask
)
503 return dev
->dma_parms
->segment_boundary_mask
;
508 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
509 * @dev: device to guery the boundary for
510 * @page_shift: ilog() of the IOMMU page size
512 * Return the segment boundary in IOMMU page units (which may be different from
513 * the CPU page size) for the passed in device.
515 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
516 * non-DMA API callers.
518 static inline unsigned long dma_get_seg_boundary_nr_pages(struct device
*dev
,
519 unsigned int page_shift
)
522 return (U32_MAX
>> page_shift
) + 1;
523 return (dma_get_seg_boundary(dev
) >> page_shift
) + 1;
526 static inline int dma_set_seg_boundary(struct device
*dev
, unsigned long mask
)
528 if (dev
->dma_parms
) {
529 dev
->dma_parms
->segment_boundary_mask
= mask
;
535 static inline unsigned int dma_get_min_align_mask(struct device
*dev
)
538 return dev
->dma_parms
->min_align_mask
;
542 static inline int dma_set_min_align_mask(struct device
*dev
,
543 unsigned int min_align_mask
)
545 if (WARN_ON_ONCE(!dev
->dma_parms
))
547 dev
->dma_parms
->min_align_mask
= min_align_mask
;
551 static inline int dma_get_cache_alignment(void)
553 #ifdef ARCH_DMA_MINALIGN
554 return ARCH_DMA_MINALIGN
;
559 static inline void *dmam_alloc_coherent(struct device
*dev
, size_t size
,
560 dma_addr_t
*dma_handle
, gfp_t gfp
)
562 return dmam_alloc_attrs(dev
, size
, dma_handle
, gfp
,
563 (gfp
& __GFP_NOWARN
) ? DMA_ATTR_NO_WARN
: 0);
566 static inline void *dma_alloc_wc(struct device
*dev
, size_t size
,
567 dma_addr_t
*dma_addr
, gfp_t gfp
)
569 unsigned long attrs
= DMA_ATTR_WRITE_COMBINE
;
571 if (gfp
& __GFP_NOWARN
)
572 attrs
|= DMA_ATTR_NO_WARN
;
574 return dma_alloc_attrs(dev
, size
, dma_addr
, gfp
, attrs
);
577 static inline void dma_free_wc(struct device
*dev
, size_t size
,
578 void *cpu_addr
, dma_addr_t dma_addr
)
580 return dma_free_attrs(dev
, size
, cpu_addr
, dma_addr
,
581 DMA_ATTR_WRITE_COMBINE
);
584 static inline int dma_mmap_wc(struct device
*dev
,
585 struct vm_area_struct
*vma
,
586 void *cpu_addr
, dma_addr_t dma_addr
,
589 return dma_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
,
590 DMA_ATTR_WRITE_COMBINE
);
593 #ifdef CONFIG_NEED_DMA_MAP_STATE
594 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
595 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
596 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
597 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
598 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
599 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
601 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
602 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
603 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
604 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
605 #define dma_unmap_len(PTR, LEN_NAME) (0)
606 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
609 #endif /* _LINUX_DMA_MAPPING_H */