1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
7 * This code implements the DMA subsystem. It provides a HW-neutral interface
8 * for other kernel code to use asynchronous memory copy capabilities,
9 * if present, and allows different HW DMA drivers to register as providing
12 * Due to the fact we are accelerating what is already a relatively fast
13 * operation, the code goes to great lengths to avoid additional overhead,
18 * The subsystem keeps a global list of dma_device structs it is protected by a
19 * mutex, dma_list_mutex.
21 * A subsystem can get access to a channel by calling dmaengine_get() followed
22 * by dma_find_channel(), or if it has need for an exclusive channel it can call
23 * dma_request_channel(). Once a channel is allocated a reference is taken
24 * against its corresponding driver to disable removal.
26 * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
29 * See Documentation/driver-api/dmaengine for more details
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/init.h>
37 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/dmaengine.h>
41 #include <linux/hardirq.h>
42 #include <linux/spinlock.h>
43 #include <linux/percpu.h>
44 #include <linux/rcupdate.h>
45 #include <linux/mutex.h>
46 #include <linux/jiffies.h>
47 #include <linux/rculist.h>
48 #include <linux/idr.h>
49 #include <linux/slab.h>
50 #include <linux/acpi.h>
51 #include <linux/acpi_dma.h>
52 #include <linux/of_dma.h>
53 #include <linux/mempool.h>
54 #include <linux/numa.h>
56 static DEFINE_MUTEX(dma_list_mutex
);
57 static DEFINE_IDA(dma_ida
);
58 static LIST_HEAD(dma_device_list
);
59 static long dmaengine_ref_count
;
61 /* --- debugfs implementation --- */
62 #ifdef CONFIG_DEBUG_FS
63 #include <linux/debugfs.h>
65 static struct dentry
*rootdir
;
67 static void dmaengine_debug_register(struct dma_device
*dma_dev
)
69 dma_dev
->dbg_dev_root
= debugfs_create_dir(dev_name(dma_dev
->dev
),
71 if (IS_ERR(dma_dev
->dbg_dev_root
))
72 dma_dev
->dbg_dev_root
= NULL
;
75 static void dmaengine_debug_unregister(struct dma_device
*dma_dev
)
77 debugfs_remove_recursive(dma_dev
->dbg_dev_root
);
78 dma_dev
->dbg_dev_root
= NULL
;
81 static void dmaengine_dbg_summary_show(struct seq_file
*s
,
82 struct dma_device
*dma_dev
)
84 struct dma_chan
*chan
;
86 list_for_each_entry(chan
, &dma_dev
->channels
, device_node
) {
87 if (chan
->client_count
) {
88 seq_printf(s
, " %-13s| %s", dma_chan_name(chan
),
89 chan
->dbg_client_name
?: "in-use");
92 seq_printf(s
, " (via router: %s)\n",
93 dev_name(chan
->router
->dev
));
100 static int dmaengine_summary_show(struct seq_file
*s
, void *data
)
102 struct dma_device
*dma_dev
= NULL
;
104 mutex_lock(&dma_list_mutex
);
105 list_for_each_entry(dma_dev
, &dma_device_list
, global_node
) {
106 seq_printf(s
, "dma%d (%s): number of channels: %u\n",
107 dma_dev
->dev_id
, dev_name(dma_dev
->dev
),
110 if (dma_dev
->dbg_summary_show
)
111 dma_dev
->dbg_summary_show(s
, dma_dev
);
113 dmaengine_dbg_summary_show(s
, dma_dev
);
115 if (!list_is_last(&dma_dev
->global_node
, &dma_device_list
))
118 mutex_unlock(&dma_list_mutex
);
122 DEFINE_SHOW_ATTRIBUTE(dmaengine_summary
);
124 static void __init
dmaengine_debugfs_init(void)
126 rootdir
= debugfs_create_dir("dmaengine", NULL
);
128 /* /sys/kernel/debug/dmaengine/summary */
129 debugfs_create_file("summary", 0444, rootdir
, NULL
,
130 &dmaengine_summary_fops
);
133 static inline void dmaengine_debugfs_init(void) { }
134 static inline int dmaengine_debug_register(struct dma_device
*dma_dev
)
139 static inline void dmaengine_debug_unregister(struct dma_device
*dma_dev
) { }
140 #endif /* DEBUG_FS */
142 /* --- sysfs implementation --- */
144 #define DMA_SLAVE_NAME "slave"
147 * dev_to_dma_chan - convert a device pointer to its sysfs container object
150 * Must be called under dma_list_mutex
152 static struct dma_chan
*dev_to_dma_chan(struct device
*dev
)
154 struct dma_chan_dev
*chan_dev
;
156 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
157 return chan_dev
->chan
;
160 static ssize_t
memcpy_count_show(struct device
*dev
,
161 struct device_attribute
*attr
, char *buf
)
163 struct dma_chan
*chan
;
164 unsigned long count
= 0;
168 mutex_lock(&dma_list_mutex
);
169 chan
= dev_to_dma_chan(dev
);
171 for_each_possible_cpu(i
)
172 count
+= per_cpu_ptr(chan
->local
, i
)->memcpy_count
;
173 err
= sprintf(buf
, "%lu\n", count
);
176 mutex_unlock(&dma_list_mutex
);
180 static DEVICE_ATTR_RO(memcpy_count
);
182 static ssize_t
bytes_transferred_show(struct device
*dev
,
183 struct device_attribute
*attr
, char *buf
)
185 struct dma_chan
*chan
;
186 unsigned long count
= 0;
190 mutex_lock(&dma_list_mutex
);
191 chan
= dev_to_dma_chan(dev
);
193 for_each_possible_cpu(i
)
194 count
+= per_cpu_ptr(chan
->local
, i
)->bytes_transferred
;
195 err
= sprintf(buf
, "%lu\n", count
);
198 mutex_unlock(&dma_list_mutex
);
202 static DEVICE_ATTR_RO(bytes_transferred
);
204 static ssize_t
in_use_show(struct device
*dev
, struct device_attribute
*attr
,
207 struct dma_chan
*chan
;
210 mutex_lock(&dma_list_mutex
);
211 chan
= dev_to_dma_chan(dev
);
213 err
= sprintf(buf
, "%d\n", chan
->client_count
);
216 mutex_unlock(&dma_list_mutex
);
220 static DEVICE_ATTR_RO(in_use
);
222 static struct attribute
*dma_dev_attrs
[] = {
223 &dev_attr_memcpy_count
.attr
,
224 &dev_attr_bytes_transferred
.attr
,
225 &dev_attr_in_use
.attr
,
228 ATTRIBUTE_GROUPS(dma_dev
);
230 static void chan_dev_release(struct device
*dev
)
232 struct dma_chan_dev
*chan_dev
;
234 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
238 static struct class dma_devclass
= {
240 .dev_groups
= dma_dev_groups
,
241 .dev_release
= chan_dev_release
,
244 /* --- client and device registration --- */
247 * dma_cap_mask_all - enable iteration over all operation types
249 static dma_cap_mask_t dma_cap_mask_all
;
252 * dma_chan_tbl_ent - tracks channel allocations per core/operation
253 * @chan - associated channel for this entry
255 struct dma_chan_tbl_ent
{
256 struct dma_chan
*chan
;
260 * channel_table - percpu lookup table for memory-to-memory offload providers
262 static struct dma_chan_tbl_ent __percpu
*channel_table
[DMA_TX_TYPE_END
];
264 static int __init
dma_channel_table_init(void)
266 enum dma_transaction_type cap
;
269 bitmap_fill(dma_cap_mask_all
.bits
, DMA_TX_TYPE_END
);
271 /* 'interrupt', 'private', and 'slave' are channel capabilities,
272 * but are not associated with an operation so they do not need
273 * an entry in the channel_table
275 clear_bit(DMA_INTERRUPT
, dma_cap_mask_all
.bits
);
276 clear_bit(DMA_PRIVATE
, dma_cap_mask_all
.bits
);
277 clear_bit(DMA_SLAVE
, dma_cap_mask_all
.bits
);
279 for_each_dma_cap_mask(cap
, dma_cap_mask_all
) {
280 channel_table
[cap
] = alloc_percpu(struct dma_chan_tbl_ent
);
281 if (!channel_table
[cap
]) {
288 pr_err("dmaengine dma_channel_table_init failure: %d\n", err
);
289 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
290 free_percpu(channel_table
[cap
]);
295 arch_initcall(dma_channel_table_init
);
298 * dma_chan_is_local - returns true if the channel is in the same numa-node as
301 static bool dma_chan_is_local(struct dma_chan
*chan
, int cpu
)
303 int node
= dev_to_node(chan
->device
->dev
);
304 return node
== NUMA_NO_NODE
||
305 cpumask_test_cpu(cpu
, cpumask_of_node(node
));
309 * min_chan - returns the channel with min count and in the same numa-node as
311 * @cap: capability to match
312 * @cpu: cpu index which the channel should be close to
314 * If some channels are close to the given cpu, the one with the lowest
315 * reference count is returned. Otherwise, cpu is ignored and only the
316 * reference count is taken into account.
317 * Must be called under dma_list_mutex.
319 static struct dma_chan
*min_chan(enum dma_transaction_type cap
, int cpu
)
321 struct dma_device
*device
;
322 struct dma_chan
*chan
;
323 struct dma_chan
*min
= NULL
;
324 struct dma_chan
*localmin
= NULL
;
326 list_for_each_entry(device
, &dma_device_list
, global_node
) {
327 if (!dma_has_cap(cap
, device
->cap_mask
) ||
328 dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
330 list_for_each_entry(chan
, &device
->channels
, device_node
) {
331 if (!chan
->client_count
)
333 if (!min
|| chan
->table_count
< min
->table_count
)
336 if (dma_chan_is_local(chan
, cpu
))
338 chan
->table_count
< localmin
->table_count
)
343 chan
= localmin
? localmin
: min
;
352 * dma_channel_rebalance - redistribute the available channels
354 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
355 * operation type) in the SMP case, and operation isolation (avoid
356 * multi-tasking channels) in the non-SMP case. Must be called under
359 static void dma_channel_rebalance(void)
361 struct dma_chan
*chan
;
362 struct dma_device
*device
;
366 /* undo the last distribution */
367 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
368 for_each_possible_cpu(cpu
)
369 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= NULL
;
371 list_for_each_entry(device
, &dma_device_list
, global_node
) {
372 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
374 list_for_each_entry(chan
, &device
->channels
, device_node
)
375 chan
->table_count
= 0;
378 /* don't populate the channel_table if no clients are available */
379 if (!dmaengine_ref_count
)
382 /* redistribute available channels */
383 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
384 for_each_online_cpu(cpu
) {
385 chan
= min_chan(cap
, cpu
);
386 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= chan
;
390 static int dma_device_satisfies_mask(struct dma_device
*device
,
391 const dma_cap_mask_t
*want
)
395 bitmap_and(has
.bits
, want
->bits
, device
->cap_mask
.bits
,
397 return bitmap_equal(want
->bits
, has
.bits
, DMA_TX_TYPE_END
);
400 static struct module
*dma_chan_to_owner(struct dma_chan
*chan
)
402 return chan
->device
->owner
;
406 * balance_ref_count - catch up the channel reference count
407 * @chan - channel to balance ->client_count versus dmaengine_ref_count
409 * balance_ref_count must be called under dma_list_mutex
411 static void balance_ref_count(struct dma_chan
*chan
)
413 struct module
*owner
= dma_chan_to_owner(chan
);
415 while (chan
->client_count
< dmaengine_ref_count
) {
417 chan
->client_count
++;
421 static void dma_device_release(struct kref
*ref
)
423 struct dma_device
*device
= container_of(ref
, struct dma_device
, ref
);
425 list_del_rcu(&device
->global_node
);
426 dma_channel_rebalance();
428 if (device
->device_release
)
429 device
->device_release(device
);
432 static void dma_device_put(struct dma_device
*device
)
434 lockdep_assert_held(&dma_list_mutex
);
435 kref_put(&device
->ref
, dma_device_release
);
439 * dma_chan_get - try to grab a dma channel's parent driver module
440 * @chan - channel to grab
442 * Must be called under dma_list_mutex
444 static int dma_chan_get(struct dma_chan
*chan
)
446 struct module
*owner
= dma_chan_to_owner(chan
);
449 /* The channel is already in use, update client count */
450 if (chan
->client_count
) {
455 if (!try_module_get(owner
))
458 ret
= kref_get_unless_zero(&chan
->device
->ref
);
464 /* allocate upon first client reference */
465 if (chan
->device
->device_alloc_chan_resources
) {
466 ret
= chan
->device
->device_alloc_chan_resources(chan
);
471 if (!dma_has_cap(DMA_PRIVATE
, chan
->device
->cap_mask
))
472 balance_ref_count(chan
);
475 chan
->client_count
++;
479 dma_device_put(chan
->device
);
486 * dma_chan_put - drop a reference to a dma channel's parent driver module
487 * @chan - channel to release
489 * Must be called under dma_list_mutex
491 static void dma_chan_put(struct dma_chan
*chan
)
493 /* This channel is not in use, bail out */
494 if (!chan
->client_count
)
497 chan
->client_count
--;
499 /* This channel is not in use anymore, free it */
500 if (!chan
->client_count
&& chan
->device
->device_free_chan_resources
) {
501 /* Make sure all operations have completed */
502 dmaengine_synchronize(chan
);
503 chan
->device
->device_free_chan_resources(chan
);
506 /* If the channel is used via a DMA request router, free the mapping */
507 if (chan
->router
&& chan
->router
->route_free
) {
508 chan
->router
->route_free(chan
->router
->dev
, chan
->route_data
);
510 chan
->route_data
= NULL
;
513 dma_device_put(chan
->device
);
514 module_put(dma_chan_to_owner(chan
));
517 enum dma_status
dma_sync_wait(struct dma_chan
*chan
, dma_cookie_t cookie
)
519 enum dma_status status
;
520 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
522 dma_async_issue_pending(chan
);
524 status
= dma_async_is_tx_complete(chan
, cookie
, NULL
, NULL
);
525 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
526 dev_err(chan
->device
->dev
, "%s: timeout!\n", __func__
);
529 if (status
!= DMA_IN_PROGRESS
)
536 EXPORT_SYMBOL(dma_sync_wait
);
539 * dma_find_channel - find a channel to carry out the operation
540 * @tx_type: transaction type
542 struct dma_chan
*dma_find_channel(enum dma_transaction_type tx_type
)
544 return this_cpu_read(channel_table
[tx_type
]->chan
);
546 EXPORT_SYMBOL(dma_find_channel
);
549 * dma_issue_pending_all - flush all pending operations across all channels
551 void dma_issue_pending_all(void)
553 struct dma_device
*device
;
554 struct dma_chan
*chan
;
557 list_for_each_entry_rcu(device
, &dma_device_list
, global_node
) {
558 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
560 list_for_each_entry(chan
, &device
->channels
, device_node
)
561 if (chan
->client_count
)
562 device
->device_issue_pending(chan
);
566 EXPORT_SYMBOL(dma_issue_pending_all
);
568 int dma_get_slave_caps(struct dma_chan
*chan
, struct dma_slave_caps
*caps
)
570 struct dma_device
*device
;
575 device
= chan
->device
;
577 /* check if the channel supports slave transactions */
578 if (!(test_bit(DMA_SLAVE
, device
->cap_mask
.bits
) ||
579 test_bit(DMA_CYCLIC
, device
->cap_mask
.bits
)))
583 * Check whether it reports it uses the generic slave
584 * capabilities, if not, that means it doesn't support any
585 * kind of slave capabilities reporting.
587 if (!device
->directions
)
590 caps
->src_addr_widths
= device
->src_addr_widths
;
591 caps
->dst_addr_widths
= device
->dst_addr_widths
;
592 caps
->directions
= device
->directions
;
593 caps
->max_burst
= device
->max_burst
;
594 caps
->residue_granularity
= device
->residue_granularity
;
595 caps
->descriptor_reuse
= device
->descriptor_reuse
;
596 caps
->cmd_pause
= !!device
->device_pause
;
597 caps
->cmd_resume
= !!device
->device_resume
;
598 caps
->cmd_terminate
= !!device
->device_terminate_all
;
602 EXPORT_SYMBOL_GPL(dma_get_slave_caps
);
604 static struct dma_chan
*private_candidate(const dma_cap_mask_t
*mask
,
605 struct dma_device
*dev
,
606 dma_filter_fn fn
, void *fn_param
)
608 struct dma_chan
*chan
;
610 if (mask
&& !dma_device_satisfies_mask(dev
, mask
)) {
611 dev_dbg(dev
->dev
, "%s: wrong capabilities\n", __func__
);
614 /* devices with multiple channels need special handling as we need to
615 * ensure that all channels are either private or public.
617 if (dev
->chancnt
> 1 && !dma_has_cap(DMA_PRIVATE
, dev
->cap_mask
))
618 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
619 /* some channels are already publicly allocated */
620 if (chan
->client_count
)
624 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
625 if (chan
->client_count
) {
626 dev_dbg(dev
->dev
, "%s: %s busy\n",
627 __func__
, dma_chan_name(chan
));
630 if (fn
&& !fn(chan
, fn_param
)) {
631 dev_dbg(dev
->dev
, "%s: %s filter said false\n",
632 __func__
, dma_chan_name(chan
));
641 static struct dma_chan
*find_candidate(struct dma_device
*device
,
642 const dma_cap_mask_t
*mask
,
643 dma_filter_fn fn
, void *fn_param
)
645 struct dma_chan
*chan
= private_candidate(mask
, device
, fn
, fn_param
);
649 /* Found a suitable channel, try to grab, prep, and return it.
650 * We first set DMA_PRIVATE to disable balance_ref_count as this
651 * channel will not be published in the general-purpose
654 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
655 device
->privatecnt
++;
656 err
= dma_chan_get(chan
);
659 if (err
== -ENODEV
) {
660 dev_dbg(device
->dev
, "%s: %s module removed\n",
661 __func__
, dma_chan_name(chan
));
662 list_del_rcu(&device
->global_node
);
665 "%s: failed to get %s: (%d)\n",
666 __func__
, dma_chan_name(chan
), err
);
668 if (--device
->privatecnt
== 0)
669 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
675 return chan
? chan
: ERR_PTR(-EPROBE_DEFER
);
679 * dma_get_slave_channel - try to get specific channel exclusively
680 * @chan: target channel
682 struct dma_chan
*dma_get_slave_channel(struct dma_chan
*chan
)
686 /* lock against __dma_request_channel */
687 mutex_lock(&dma_list_mutex
);
689 if (chan
->client_count
== 0) {
690 struct dma_device
*device
= chan
->device
;
692 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
693 device
->privatecnt
++;
694 err
= dma_chan_get(chan
);
696 dev_dbg(chan
->device
->dev
,
697 "%s: failed to get %s: (%d)\n",
698 __func__
, dma_chan_name(chan
), err
);
700 if (--device
->privatecnt
== 0)
701 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
706 mutex_unlock(&dma_list_mutex
);
711 EXPORT_SYMBOL_GPL(dma_get_slave_channel
);
713 struct dma_chan
*dma_get_any_slave_channel(struct dma_device
*device
)
716 struct dma_chan
*chan
;
719 dma_cap_set(DMA_SLAVE
, mask
);
721 /* lock against __dma_request_channel */
722 mutex_lock(&dma_list_mutex
);
724 chan
= find_candidate(device
, &mask
, NULL
, NULL
);
726 mutex_unlock(&dma_list_mutex
);
728 return IS_ERR(chan
) ? NULL
: chan
;
730 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel
);
733 * __dma_request_channel - try to allocate an exclusive channel
734 * @mask: capabilities that the channel must satisfy
735 * @fn: optional callback to disposition available channels
736 * @fn_param: opaque parameter to pass to dma_filter_fn
737 * @np: device node to look for DMA channels
739 * Returns pointer to appropriate DMA channel on success or NULL.
741 struct dma_chan
*__dma_request_channel(const dma_cap_mask_t
*mask
,
742 dma_filter_fn fn
, void *fn_param
,
743 struct device_node
*np
)
745 struct dma_device
*device
, *_d
;
746 struct dma_chan
*chan
= NULL
;
749 mutex_lock(&dma_list_mutex
);
750 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
751 /* Finds a DMA controller with matching device node */
752 if (np
&& device
->dev
->of_node
&& np
!= device
->dev
->of_node
)
755 chan
= find_candidate(device
, mask
, fn
, fn_param
);
761 mutex_unlock(&dma_list_mutex
);
763 pr_debug("%s: %s (%s)\n",
765 chan
? "success" : "fail",
766 chan
? dma_chan_name(chan
) : NULL
);
770 EXPORT_SYMBOL_GPL(__dma_request_channel
);
772 static const struct dma_slave_map
*dma_filter_match(struct dma_device
*device
,
778 if (!device
->filter
.mapcnt
)
781 for (i
= 0; i
< device
->filter
.mapcnt
; i
++) {
782 const struct dma_slave_map
*map
= &device
->filter
.map
[i
];
784 if (!strcmp(map
->devname
, dev_name(dev
)) &&
785 !strcmp(map
->slave
, name
))
793 * dma_request_chan - try to allocate an exclusive slave channel
794 * @dev: pointer to client device structure
795 * @name: slave channel name
797 * Returns pointer to appropriate DMA channel on success or an error pointer.
799 struct dma_chan
*dma_request_chan(struct device
*dev
, const char *name
)
801 struct dma_device
*d
, *_d
;
802 struct dma_chan
*chan
= NULL
;
804 /* If device-tree is present get slave info from here */
806 chan
= of_dma_request_slave_channel(dev
->of_node
, name
);
808 /* If device was enumerated by ACPI get slave info from here */
809 if (has_acpi_companion(dev
) && !chan
)
810 chan
= acpi_dma_request_slave_chan_by_name(dev
, name
);
812 if (PTR_ERR(chan
) == -EPROBE_DEFER
)
815 if (!IS_ERR_OR_NULL(chan
))
818 /* Try to find the channel via the DMA filter map(s) */
819 mutex_lock(&dma_list_mutex
);
820 list_for_each_entry_safe(d
, _d
, &dma_device_list
, global_node
) {
822 const struct dma_slave_map
*map
= dma_filter_match(d
, name
, dev
);
828 dma_cap_set(DMA_SLAVE
, mask
);
830 chan
= find_candidate(d
, &mask
, d
->filter
.fn
, map
->param
);
834 mutex_unlock(&dma_list_mutex
);
836 if (IS_ERR_OR_NULL(chan
))
837 return chan
? chan
: ERR_PTR(-EPROBE_DEFER
);
840 #ifdef CONFIG_DEBUG_FS
841 chan
->dbg_client_name
= kasprintf(GFP_KERNEL
, "%s:%s", dev_name(dev
),
845 chan
->name
= kasprintf(GFP_KERNEL
, "dma:%s", name
);
850 if (sysfs_create_link(&chan
->dev
->device
.kobj
, &dev
->kobj
,
852 dev_warn(dev
, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME
);
853 if (sysfs_create_link(&dev
->kobj
, &chan
->dev
->device
.kobj
, chan
->name
))
854 dev_warn(dev
, "Cannot create DMA %s symlink\n", chan
->name
);
858 EXPORT_SYMBOL_GPL(dma_request_chan
);
861 * dma_request_slave_channel - try to allocate an exclusive slave channel
862 * @dev: pointer to client device structure
863 * @name: slave channel name
865 * Returns pointer to appropriate DMA channel on success or NULL.
867 struct dma_chan
*dma_request_slave_channel(struct device
*dev
,
870 struct dma_chan
*ch
= dma_request_chan(dev
, name
);
876 EXPORT_SYMBOL_GPL(dma_request_slave_channel
);
879 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
880 * @mask: capabilities that the channel must satisfy
882 * Returns pointer to appropriate DMA channel on success or an error pointer.
884 struct dma_chan
*dma_request_chan_by_mask(const dma_cap_mask_t
*mask
)
886 struct dma_chan
*chan
;
889 return ERR_PTR(-ENODEV
);
891 chan
= __dma_request_channel(mask
, NULL
, NULL
, NULL
);
893 mutex_lock(&dma_list_mutex
);
894 if (list_empty(&dma_device_list
))
895 chan
= ERR_PTR(-EPROBE_DEFER
);
897 chan
= ERR_PTR(-ENODEV
);
898 mutex_unlock(&dma_list_mutex
);
903 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask
);
905 void dma_release_channel(struct dma_chan
*chan
)
907 mutex_lock(&dma_list_mutex
);
908 WARN_ONCE(chan
->client_count
!= 1,
909 "chan reference count %d != 1\n", chan
->client_count
);
911 /* drop PRIVATE cap enabled by __dma_request_channel() */
912 if (--chan
->device
->privatecnt
== 0)
913 dma_cap_clear(DMA_PRIVATE
, chan
->device
->cap_mask
);
916 sysfs_remove_link(&chan
->dev
->device
.kobj
, DMA_SLAVE_NAME
);
917 sysfs_remove_link(&chan
->slave
->kobj
, chan
->name
);
923 #ifdef CONFIG_DEBUG_FS
924 kfree(chan
->dbg_client_name
);
925 chan
->dbg_client_name
= NULL
;
927 mutex_unlock(&dma_list_mutex
);
929 EXPORT_SYMBOL_GPL(dma_release_channel
);
932 * dmaengine_get - register interest in dma_channels
934 void dmaengine_get(void)
936 struct dma_device
*device
, *_d
;
937 struct dma_chan
*chan
;
940 mutex_lock(&dma_list_mutex
);
941 dmaengine_ref_count
++;
943 /* try to grab channels */
944 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
945 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
947 list_for_each_entry(chan
, &device
->channels
, device_node
) {
948 err
= dma_chan_get(chan
);
949 if (err
== -ENODEV
) {
950 /* module removed before we could use it */
951 list_del_rcu(&device
->global_node
);
954 dev_dbg(chan
->device
->dev
,
955 "%s: failed to get %s: (%d)\n",
956 __func__
, dma_chan_name(chan
), err
);
960 /* if this is the first reference and there were channels
961 * waiting we need to rebalance to get those channels
962 * incorporated into the channel table
964 if (dmaengine_ref_count
== 1)
965 dma_channel_rebalance();
966 mutex_unlock(&dma_list_mutex
);
968 EXPORT_SYMBOL(dmaengine_get
);
971 * dmaengine_put - let dma drivers be removed when ref_count == 0
973 void dmaengine_put(void)
975 struct dma_device
*device
, *_d
;
976 struct dma_chan
*chan
;
978 mutex_lock(&dma_list_mutex
);
979 dmaengine_ref_count
--;
980 BUG_ON(dmaengine_ref_count
< 0);
981 /* drop channel references */
982 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
983 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
985 list_for_each_entry(chan
, &device
->channels
, device_node
)
988 mutex_unlock(&dma_list_mutex
);
990 EXPORT_SYMBOL(dmaengine_put
);
992 static bool device_has_all_tx_types(struct dma_device
*device
)
994 /* A device that satisfies this test has channels that will never cause
995 * an async_tx channel switch event as all possible operation types can
998 #ifdef CONFIG_ASYNC_TX_DMA
999 if (!dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
))
1003 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
1004 if (!dma_has_cap(DMA_MEMCPY
, device
->cap_mask
))
1008 #if IS_ENABLED(CONFIG_ASYNC_XOR)
1009 if (!dma_has_cap(DMA_XOR
, device
->cap_mask
))
1012 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1013 if (!dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
))
1018 #if IS_ENABLED(CONFIG_ASYNC_PQ)
1019 if (!dma_has_cap(DMA_PQ
, device
->cap_mask
))
1022 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1023 if (!dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
))
1031 static int get_dma_id(struct dma_device
*device
)
1033 int rc
= ida_alloc(&dma_ida
, GFP_KERNEL
);
1037 device
->dev_id
= rc
;
1041 static int __dma_async_device_channel_register(struct dma_device
*device
,
1042 struct dma_chan
*chan
)
1046 chan
->local
= alloc_percpu(typeof(*chan
->local
));
1049 chan
->dev
= kzalloc(sizeof(*chan
->dev
), GFP_KERNEL
);
1051 free_percpu(chan
->local
);
1057 * When the chan_id is a negative value, we are dynamically adding
1058 * the channel. Otherwise we are static enumerating.
1060 mutex_lock(&device
->chan_mutex
);
1061 chan
->chan_id
= ida_alloc(&device
->chan_ida
, GFP_KERNEL
);
1062 mutex_unlock(&device
->chan_mutex
);
1063 if (chan
->chan_id
< 0) {
1064 pr_err("%s: unable to alloc ida for chan: %d\n",
1065 __func__
, chan
->chan_id
);
1069 chan
->dev
->device
.class = &dma_devclass
;
1070 chan
->dev
->device
.parent
= device
->dev
;
1071 chan
->dev
->chan
= chan
;
1072 chan
->dev
->dev_id
= device
->dev_id
;
1073 dev_set_name(&chan
->dev
->device
, "dma%dchan%d",
1074 device
->dev_id
, chan
->chan_id
);
1075 rc
= device_register(&chan
->dev
->device
);
1078 chan
->client_count
= 0;
1084 mutex_lock(&device
->chan_mutex
);
1085 ida_free(&device
->chan_ida
, chan
->chan_id
);
1086 mutex_unlock(&device
->chan_mutex
);
1088 free_percpu(chan
->local
);
1093 int dma_async_device_channel_register(struct dma_device
*device
,
1094 struct dma_chan
*chan
)
1098 rc
= __dma_async_device_channel_register(device
, chan
);
1102 dma_channel_rebalance();
1105 EXPORT_SYMBOL_GPL(dma_async_device_channel_register
);
1107 static void __dma_async_device_channel_unregister(struct dma_device
*device
,
1108 struct dma_chan
*chan
)
1110 WARN_ONCE(!device
->device_release
&& chan
->client_count
,
1111 "%s called while %d clients hold a reference\n",
1112 __func__
, chan
->client_count
);
1113 mutex_lock(&dma_list_mutex
);
1114 list_del(&chan
->device_node
);
1116 chan
->dev
->chan
= NULL
;
1117 mutex_unlock(&dma_list_mutex
);
1118 mutex_lock(&device
->chan_mutex
);
1119 ida_free(&device
->chan_ida
, chan
->chan_id
);
1120 mutex_unlock(&device
->chan_mutex
);
1121 device_unregister(&chan
->dev
->device
);
1122 free_percpu(chan
->local
);
1125 void dma_async_device_channel_unregister(struct dma_device
*device
,
1126 struct dma_chan
*chan
)
1128 __dma_async_device_channel_unregister(device
, chan
);
1129 dma_channel_rebalance();
1131 EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister
);
1134 * dma_async_device_register - registers DMA devices found
1135 * @device: &dma_device
1137 * After calling this routine the structure should not be freed except in the
1138 * device_release() callback which will be called after
1139 * dma_async_device_unregister() is called and no further references are taken.
1141 int dma_async_device_register(struct dma_device
*device
)
1144 struct dma_chan
* chan
;
1149 /* validate device routines */
1151 pr_err("DMAdevice must have dev\n");
1155 device
->owner
= device
->dev
->driver
->owner
;
1157 if (dma_has_cap(DMA_MEMCPY
, device
->cap_mask
) && !device
->device_prep_dma_memcpy
) {
1158 dev_err(device
->dev
,
1159 "Device claims capability %s, but op is not defined\n",
1164 if (dma_has_cap(DMA_XOR
, device
->cap_mask
) && !device
->device_prep_dma_xor
) {
1165 dev_err(device
->dev
,
1166 "Device claims capability %s, but op is not defined\n",
1171 if (dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
) && !device
->device_prep_dma_xor_val
) {
1172 dev_err(device
->dev
,
1173 "Device claims capability %s, but op is not defined\n",
1178 if (dma_has_cap(DMA_PQ
, device
->cap_mask
) && !device
->device_prep_dma_pq
) {
1179 dev_err(device
->dev
,
1180 "Device claims capability %s, but op is not defined\n",
1185 if (dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
) && !device
->device_prep_dma_pq_val
) {
1186 dev_err(device
->dev
,
1187 "Device claims capability %s, but op is not defined\n",
1192 if (dma_has_cap(DMA_MEMSET
, device
->cap_mask
) && !device
->device_prep_dma_memset
) {
1193 dev_err(device
->dev
,
1194 "Device claims capability %s, but op is not defined\n",
1199 if (dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
) && !device
->device_prep_dma_interrupt
) {
1200 dev_err(device
->dev
,
1201 "Device claims capability %s, but op is not defined\n",
1206 if (dma_has_cap(DMA_CYCLIC
, device
->cap_mask
) && !device
->device_prep_dma_cyclic
) {
1207 dev_err(device
->dev
,
1208 "Device claims capability %s, but op is not defined\n",
1213 if (dma_has_cap(DMA_INTERLEAVE
, device
->cap_mask
) && !device
->device_prep_interleaved_dma
) {
1214 dev_err(device
->dev
,
1215 "Device claims capability %s, but op is not defined\n",
1221 if (!device
->device_tx_status
) {
1222 dev_err(device
->dev
, "Device tx_status is not defined\n");
1227 if (!device
->device_issue_pending
) {
1228 dev_err(device
->dev
, "Device issue_pending is not defined\n");
1232 if (!device
->device_release
)
1233 dev_dbg(device
->dev
,
1234 "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
1236 kref_init(&device
->ref
);
1238 /* note: this only matters in the
1239 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1241 if (device_has_all_tx_types(device
))
1242 dma_cap_set(DMA_ASYNC_TX
, device
->cap_mask
);
1244 rc
= get_dma_id(device
);
1248 mutex_init(&device
->chan_mutex
);
1249 ida_init(&device
->chan_ida
);
1251 /* represent channels in sysfs. Probably want devs too */
1252 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1253 rc
= __dma_async_device_channel_register(device
, chan
);
1258 mutex_lock(&dma_list_mutex
);
1259 /* take references on public channels */
1260 if (dmaengine_ref_count
&& !dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
1261 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1262 /* if clients are already waiting for channels we need
1263 * to take references on their behalf
1265 if (dma_chan_get(chan
) == -ENODEV
) {
1266 /* note we can only get here for the first
1267 * channel as the remaining channels are
1268 * guaranteed to get a reference
1271 mutex_unlock(&dma_list_mutex
);
1275 list_add_tail_rcu(&device
->global_node
, &dma_device_list
);
1276 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
1277 device
->privatecnt
++; /* Always private */
1278 dma_channel_rebalance();
1279 mutex_unlock(&dma_list_mutex
);
1281 dmaengine_debug_register(device
);
1286 /* if we never registered a channel just release the idr */
1287 if (!device
->chancnt
) {
1288 ida_free(&dma_ida
, device
->dev_id
);
1292 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1293 if (chan
->local
== NULL
)
1295 mutex_lock(&dma_list_mutex
);
1296 chan
->dev
->chan
= NULL
;
1297 mutex_unlock(&dma_list_mutex
);
1298 device_unregister(&chan
->dev
->device
);
1299 free_percpu(chan
->local
);
1303 EXPORT_SYMBOL(dma_async_device_register
);
1306 * dma_async_device_unregister - unregister a DMA device
1307 * @device: &dma_device
1309 * This routine is called by dma driver exit routines, dmaengine holds module
1310 * references to prevent it being called while channels are in use.
1312 void dma_async_device_unregister(struct dma_device
*device
)
1314 struct dma_chan
*chan
, *n
;
1316 dmaengine_debug_unregister(device
);
1318 list_for_each_entry_safe(chan
, n
, &device
->channels
, device_node
)
1319 __dma_async_device_channel_unregister(device
, chan
);
1321 mutex_lock(&dma_list_mutex
);
1323 * setting DMA_PRIVATE ensures the device being torn down will not
1324 * be used in the channel_table
1326 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
1327 dma_channel_rebalance();
1328 ida_free(&dma_ida
, device
->dev_id
);
1329 dma_device_put(device
);
1330 mutex_unlock(&dma_list_mutex
);
1332 EXPORT_SYMBOL(dma_async_device_unregister
);
1334 static void dmam_device_release(struct device
*dev
, void *res
)
1336 struct dma_device
*device
;
1338 device
= *(struct dma_device
**)res
;
1339 dma_async_device_unregister(device
);
1343 * dmaenginem_async_device_register - registers DMA devices found
1344 * @device: &dma_device
1346 * The operation is managed and will be undone on driver detach.
1348 int dmaenginem_async_device_register(struct dma_device
*device
)
1353 p
= devres_alloc(dmam_device_release
, sizeof(void *), GFP_KERNEL
);
1357 ret
= dma_async_device_register(device
);
1359 *(struct dma_device
**)p
= device
;
1360 devres_add(device
->dev
, p
);
1367 EXPORT_SYMBOL(dmaenginem_async_device_register
);
1369 struct dmaengine_unmap_pool
{
1370 struct kmem_cache
*cache
;
1376 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1377 static struct dmaengine_unmap_pool unmap_pool
[] = {
1379 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1386 static struct dmaengine_unmap_pool
*__get_unmap_pool(int nr
)
1388 int order
= get_count_order(nr
);
1392 return &unmap_pool
[0];
1393 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1395 return &unmap_pool
[1];
1397 return &unmap_pool
[2];
1399 return &unmap_pool
[3];
1407 static void dmaengine_unmap(struct kref
*kref
)
1409 struct dmaengine_unmap_data
*unmap
= container_of(kref
, typeof(*unmap
), kref
);
1410 struct device
*dev
= unmap
->dev
;
1413 cnt
= unmap
->to_cnt
;
1414 for (i
= 0; i
< cnt
; i
++)
1415 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1417 cnt
+= unmap
->from_cnt
;
1418 for (; i
< cnt
; i
++)
1419 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1421 cnt
+= unmap
->bidi_cnt
;
1422 for (; i
< cnt
; i
++) {
1423 if (unmap
->addr
[i
] == 0)
1425 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1428 cnt
= unmap
->map_cnt
;
1429 mempool_free(unmap
, __get_unmap_pool(cnt
)->pool
);
1432 void dmaengine_unmap_put(struct dmaengine_unmap_data
*unmap
)
1435 kref_put(&unmap
->kref
, dmaengine_unmap
);
1437 EXPORT_SYMBOL_GPL(dmaengine_unmap_put
);
1439 static void dmaengine_destroy_unmap_pool(void)
1443 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1444 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1446 mempool_destroy(p
->pool
);
1448 kmem_cache_destroy(p
->cache
);
1453 static int __init
dmaengine_init_unmap_pool(void)
1457 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1458 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1461 size
= sizeof(struct dmaengine_unmap_data
) +
1462 sizeof(dma_addr_t
) * p
->size
;
1464 p
->cache
= kmem_cache_create(p
->name
, size
, 0,
1465 SLAB_HWCACHE_ALIGN
, NULL
);
1468 p
->pool
= mempool_create_slab_pool(1, p
->cache
);
1473 if (i
== ARRAY_SIZE(unmap_pool
))
1476 dmaengine_destroy_unmap_pool();
1480 struct dmaengine_unmap_data
*
1481 dmaengine_get_unmap_data(struct device
*dev
, int nr
, gfp_t flags
)
1483 struct dmaengine_unmap_data
*unmap
;
1485 unmap
= mempool_alloc(__get_unmap_pool(nr
)->pool
, flags
);
1489 memset(unmap
, 0, sizeof(*unmap
));
1490 kref_init(&unmap
->kref
);
1492 unmap
->map_cnt
= nr
;
1496 EXPORT_SYMBOL(dmaengine_get_unmap_data
);
1498 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor
*tx
,
1499 struct dma_chan
*chan
)
1502 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1503 spin_lock_init(&tx
->lock
);
1506 EXPORT_SYMBOL(dma_async_tx_descriptor_init
);
1508 static inline int desc_check_and_set_metadata_mode(
1509 struct dma_async_tx_descriptor
*desc
, enum dma_desc_metadata_mode mode
)
1511 /* Make sure that the metadata mode is not mixed */
1512 if (!desc
->desc_metadata_mode
) {
1513 if (dmaengine_is_metadata_mode_supported(desc
->chan
, mode
))
1514 desc
->desc_metadata_mode
= mode
;
1517 } else if (desc
->desc_metadata_mode
!= mode
) {
1524 int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor
*desc
,
1525 void *data
, size_t len
)
1532 ret
= desc_check_and_set_metadata_mode(desc
, DESC_METADATA_CLIENT
);
1536 if (!desc
->metadata_ops
|| !desc
->metadata_ops
->attach
)
1539 return desc
->metadata_ops
->attach(desc
, data
, len
);
1541 EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata
);
1543 void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor
*desc
,
1544 size_t *payload_len
, size_t *max_len
)
1549 return ERR_PTR(-EINVAL
);
1551 ret
= desc_check_and_set_metadata_mode(desc
, DESC_METADATA_ENGINE
);
1553 return ERR_PTR(ret
);
1555 if (!desc
->metadata_ops
|| !desc
->metadata_ops
->get_ptr
)
1556 return ERR_PTR(-ENOTSUPP
);
1558 return desc
->metadata_ops
->get_ptr(desc
, payload_len
, max_len
);
1560 EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr
);
1562 int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor
*desc
,
1570 ret
= desc_check_and_set_metadata_mode(desc
, DESC_METADATA_ENGINE
);
1574 if (!desc
->metadata_ops
|| !desc
->metadata_ops
->set_len
)
1577 return desc
->metadata_ops
->set_len(desc
, payload_len
);
1579 EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len
);
1581 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1582 * @tx: in-flight transaction to wait on
1585 dma_wait_for_async_tx(struct dma_async_tx_descriptor
*tx
)
1587 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
1590 return DMA_COMPLETE
;
1592 while (tx
->cookie
== -EBUSY
) {
1593 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
1594 dev_err(tx
->chan
->device
->dev
,
1595 "%s timeout waiting for descriptor submission\n",
1601 return dma_sync_wait(tx
->chan
, tx
->cookie
);
1603 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx
);
1605 /* dma_run_dependencies - helper routine for dma drivers to process
1606 * (start) dependent operations on their target channel
1607 * @tx: transaction with dependencies
1609 void dma_run_dependencies(struct dma_async_tx_descriptor
*tx
)
1611 struct dma_async_tx_descriptor
*dep
= txd_next(tx
);
1612 struct dma_async_tx_descriptor
*dep_next
;
1613 struct dma_chan
*chan
;
1618 /* we'll submit tx->next now, so clear the link */
1622 /* keep submitting up until a channel switch is detected
1623 * in that case we will be called again as a result of
1624 * processing the interrupt from async_tx_channel_switch
1626 for (; dep
; dep
= dep_next
) {
1628 txd_clear_parent(dep
);
1629 dep_next
= txd_next(dep
);
1630 if (dep_next
&& dep_next
->chan
== chan
)
1631 txd_clear_next(dep
); /* ->next will be submitted */
1633 dep_next
= NULL
; /* submit current dep and terminate */
1636 dep
->tx_submit(dep
);
1639 chan
->device
->device_issue_pending(chan
);
1641 EXPORT_SYMBOL_GPL(dma_run_dependencies
);
1643 static int __init
dma_bus_init(void)
1645 int err
= dmaengine_init_unmap_pool();
1650 err
= class_register(&dma_devclass
);
1652 dmaengine_debugfs_init();
1656 arch_initcall(dma_bus_init
);