1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
7 * This code implements the DMA subsystem. It provides a HW-neutral interface
8 * for other kernel code to use asynchronous memory copy capabilities,
9 * if present, and allows different HW DMA drivers to register as providing
12 * Due to the fact we are accelerating what is already a relatively fast
13 * operation, the code goes to great lengths to avoid additional overhead,
18 * The subsystem keeps a global list of dma_device structs it is protected by a
19 * mutex, dma_list_mutex.
21 * A subsystem can get access to a channel by calling dmaengine_get() followed
22 * by dma_find_channel(), or if it has need for an exclusive channel it can call
23 * dma_request_channel(). Once a channel is allocated a reference is taken
24 * against its corresponding driver to disable removal.
26 * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
29 * See Documentation/driver-api/dmaengine for more details
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/init.h>
37 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/dmaengine.h>
41 #include <linux/hardirq.h>
42 #include <linux/spinlock.h>
43 #include <linux/percpu.h>
44 #include <linux/rcupdate.h>
45 #include <linux/mutex.h>
46 #include <linux/jiffies.h>
47 #include <linux/rculist.h>
48 #include <linux/idr.h>
49 #include <linux/slab.h>
50 #include <linux/acpi.h>
51 #include <linux/acpi_dma.h>
52 #include <linux/of_dma.h>
53 #include <linux/mempool.h>
54 #include <linux/numa.h>
56 static DEFINE_MUTEX(dma_list_mutex
);
57 static DEFINE_IDA(dma_ida
);
58 static LIST_HEAD(dma_device_list
);
59 static long dmaengine_ref_count
;
61 /* --- debugfs implementation --- */
62 #ifdef CONFIG_DEBUG_FS
63 #include <linux/debugfs.h>
65 static struct dentry
*rootdir
;
67 static void dmaengine_debug_register(struct dma_device
*dma_dev
)
69 dma_dev
->dbg_dev_root
= debugfs_create_dir(dev_name(dma_dev
->dev
),
71 if (IS_ERR(dma_dev
->dbg_dev_root
))
72 dma_dev
->dbg_dev_root
= NULL
;
75 static void dmaengine_debug_unregister(struct dma_device
*dma_dev
)
77 debugfs_remove_recursive(dma_dev
->dbg_dev_root
);
78 dma_dev
->dbg_dev_root
= NULL
;
81 static void dmaengine_dbg_summary_show(struct seq_file
*s
,
82 struct dma_device
*dma_dev
)
84 struct dma_chan
*chan
;
86 list_for_each_entry(chan
, &dma_dev
->channels
, device_node
) {
87 if (chan
->client_count
) {
88 seq_printf(s
, " %-13s| %s", dma_chan_name(chan
),
89 chan
->dbg_client_name
?: "in-use");
92 seq_printf(s
, " (via router: %s)\n",
93 dev_name(chan
->router
->dev
));
100 static int dmaengine_summary_show(struct seq_file
*s
, void *data
)
102 struct dma_device
*dma_dev
= NULL
;
104 mutex_lock(&dma_list_mutex
);
105 list_for_each_entry(dma_dev
, &dma_device_list
, global_node
) {
106 seq_printf(s
, "dma%d (%s): number of channels: %u\n",
107 dma_dev
->dev_id
, dev_name(dma_dev
->dev
),
110 if (dma_dev
->dbg_summary_show
)
111 dma_dev
->dbg_summary_show(s
, dma_dev
);
113 dmaengine_dbg_summary_show(s
, dma_dev
);
115 if (!list_is_last(&dma_dev
->global_node
, &dma_device_list
))
118 mutex_unlock(&dma_list_mutex
);
122 DEFINE_SHOW_ATTRIBUTE(dmaengine_summary
);
124 static void __init
dmaengine_debugfs_init(void)
126 rootdir
= debugfs_create_dir("dmaengine", NULL
);
128 /* /sys/kernel/debug/dmaengine/summary */
129 debugfs_create_file("summary", 0444, rootdir
, NULL
,
130 &dmaengine_summary_fops
);
133 static inline void dmaengine_debugfs_init(void) { }
134 static inline int dmaengine_debug_register(struct dma_device
*dma_dev
)
139 static inline void dmaengine_debug_unregister(struct dma_device
*dma_dev
) { }
140 #endif /* DEBUG_FS */
142 /* --- sysfs implementation --- */
144 #define DMA_SLAVE_NAME "slave"
147 * dev_to_dma_chan - convert a device pointer to its sysfs container object
150 * Must be called under dma_list_mutex
152 static struct dma_chan
*dev_to_dma_chan(struct device
*dev
)
154 struct dma_chan_dev
*chan_dev
;
156 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
157 return chan_dev
->chan
;
160 static ssize_t
memcpy_count_show(struct device
*dev
,
161 struct device_attribute
*attr
, char *buf
)
163 struct dma_chan
*chan
;
164 unsigned long count
= 0;
168 mutex_lock(&dma_list_mutex
);
169 chan
= dev_to_dma_chan(dev
);
171 for_each_possible_cpu(i
)
172 count
+= per_cpu_ptr(chan
->local
, i
)->memcpy_count
;
173 err
= sprintf(buf
, "%lu\n", count
);
176 mutex_unlock(&dma_list_mutex
);
180 static DEVICE_ATTR_RO(memcpy_count
);
182 static ssize_t
bytes_transferred_show(struct device
*dev
,
183 struct device_attribute
*attr
, char *buf
)
185 struct dma_chan
*chan
;
186 unsigned long count
= 0;
190 mutex_lock(&dma_list_mutex
);
191 chan
= dev_to_dma_chan(dev
);
193 for_each_possible_cpu(i
)
194 count
+= per_cpu_ptr(chan
->local
, i
)->bytes_transferred
;
195 err
= sprintf(buf
, "%lu\n", count
);
198 mutex_unlock(&dma_list_mutex
);
202 static DEVICE_ATTR_RO(bytes_transferred
);
204 static ssize_t
in_use_show(struct device
*dev
, struct device_attribute
*attr
,
207 struct dma_chan
*chan
;
210 mutex_lock(&dma_list_mutex
);
211 chan
= dev_to_dma_chan(dev
);
213 err
= sprintf(buf
, "%d\n", chan
->client_count
);
216 mutex_unlock(&dma_list_mutex
);
220 static DEVICE_ATTR_RO(in_use
);
222 static struct attribute
*dma_dev_attrs
[] = {
223 &dev_attr_memcpy_count
.attr
,
224 &dev_attr_bytes_transferred
.attr
,
225 &dev_attr_in_use
.attr
,
228 ATTRIBUTE_GROUPS(dma_dev
);
230 static void chan_dev_release(struct device
*dev
)
232 struct dma_chan_dev
*chan_dev
;
234 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
235 if (atomic_dec_and_test(chan_dev
->idr_ref
)) {
236 ida_free(&dma_ida
, chan_dev
->dev_id
);
237 kfree(chan_dev
->idr_ref
);
242 static struct class dma_devclass
= {
244 .dev_groups
= dma_dev_groups
,
245 .dev_release
= chan_dev_release
,
248 /* --- client and device registration --- */
251 * dma_cap_mask_all - enable iteration over all operation types
253 static dma_cap_mask_t dma_cap_mask_all
;
256 * dma_chan_tbl_ent - tracks channel allocations per core/operation
257 * @chan - associated channel for this entry
259 struct dma_chan_tbl_ent
{
260 struct dma_chan
*chan
;
264 * channel_table - percpu lookup table for memory-to-memory offload providers
266 static struct dma_chan_tbl_ent __percpu
*channel_table
[DMA_TX_TYPE_END
];
268 static int __init
dma_channel_table_init(void)
270 enum dma_transaction_type cap
;
273 bitmap_fill(dma_cap_mask_all
.bits
, DMA_TX_TYPE_END
);
275 /* 'interrupt', 'private', and 'slave' are channel capabilities,
276 * but are not associated with an operation so they do not need
277 * an entry in the channel_table
279 clear_bit(DMA_INTERRUPT
, dma_cap_mask_all
.bits
);
280 clear_bit(DMA_PRIVATE
, dma_cap_mask_all
.bits
);
281 clear_bit(DMA_SLAVE
, dma_cap_mask_all
.bits
);
283 for_each_dma_cap_mask(cap
, dma_cap_mask_all
) {
284 channel_table
[cap
] = alloc_percpu(struct dma_chan_tbl_ent
);
285 if (!channel_table
[cap
]) {
292 pr_err("dmaengine dma_channel_table_init failure: %d\n", err
);
293 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
294 free_percpu(channel_table
[cap
]);
299 arch_initcall(dma_channel_table_init
);
302 * dma_chan_is_local - returns true if the channel is in the same numa-node as
305 static bool dma_chan_is_local(struct dma_chan
*chan
, int cpu
)
307 int node
= dev_to_node(chan
->device
->dev
);
308 return node
== NUMA_NO_NODE
||
309 cpumask_test_cpu(cpu
, cpumask_of_node(node
));
313 * min_chan - returns the channel with min count and in the same numa-node as
315 * @cap: capability to match
316 * @cpu: cpu index which the channel should be close to
318 * If some channels are close to the given cpu, the one with the lowest
319 * reference count is returned. Otherwise, cpu is ignored and only the
320 * reference count is taken into account.
321 * Must be called under dma_list_mutex.
323 static struct dma_chan
*min_chan(enum dma_transaction_type cap
, int cpu
)
325 struct dma_device
*device
;
326 struct dma_chan
*chan
;
327 struct dma_chan
*min
= NULL
;
328 struct dma_chan
*localmin
= NULL
;
330 list_for_each_entry(device
, &dma_device_list
, global_node
) {
331 if (!dma_has_cap(cap
, device
->cap_mask
) ||
332 dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
334 list_for_each_entry(chan
, &device
->channels
, device_node
) {
335 if (!chan
->client_count
)
337 if (!min
|| chan
->table_count
< min
->table_count
)
340 if (dma_chan_is_local(chan
, cpu
))
342 chan
->table_count
< localmin
->table_count
)
347 chan
= localmin
? localmin
: min
;
356 * dma_channel_rebalance - redistribute the available channels
358 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
359 * operation type) in the SMP case, and operation isolation (avoid
360 * multi-tasking channels) in the non-SMP case. Must be called under
363 static void dma_channel_rebalance(void)
365 struct dma_chan
*chan
;
366 struct dma_device
*device
;
370 /* undo the last distribution */
371 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
372 for_each_possible_cpu(cpu
)
373 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= NULL
;
375 list_for_each_entry(device
, &dma_device_list
, global_node
) {
376 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
378 list_for_each_entry(chan
, &device
->channels
, device_node
)
379 chan
->table_count
= 0;
382 /* don't populate the channel_table if no clients are available */
383 if (!dmaengine_ref_count
)
386 /* redistribute available channels */
387 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
388 for_each_online_cpu(cpu
) {
389 chan
= min_chan(cap
, cpu
);
390 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= chan
;
394 static int dma_device_satisfies_mask(struct dma_device
*device
,
395 const dma_cap_mask_t
*want
)
399 bitmap_and(has
.bits
, want
->bits
, device
->cap_mask
.bits
,
401 return bitmap_equal(want
->bits
, has
.bits
, DMA_TX_TYPE_END
);
404 static struct module
*dma_chan_to_owner(struct dma_chan
*chan
)
406 return chan
->device
->owner
;
410 * balance_ref_count - catch up the channel reference count
411 * @chan - channel to balance ->client_count versus dmaengine_ref_count
413 * balance_ref_count must be called under dma_list_mutex
415 static void balance_ref_count(struct dma_chan
*chan
)
417 struct module
*owner
= dma_chan_to_owner(chan
);
419 while (chan
->client_count
< dmaengine_ref_count
) {
421 chan
->client_count
++;
425 static void dma_device_release(struct kref
*ref
)
427 struct dma_device
*device
= container_of(ref
, struct dma_device
, ref
);
429 list_del_rcu(&device
->global_node
);
430 dma_channel_rebalance();
432 if (device
->device_release
)
433 device
->device_release(device
);
436 static void dma_device_put(struct dma_device
*device
)
438 lockdep_assert_held(&dma_list_mutex
);
439 kref_put(&device
->ref
, dma_device_release
);
443 * dma_chan_get - try to grab a dma channel's parent driver module
444 * @chan - channel to grab
446 * Must be called under dma_list_mutex
448 static int dma_chan_get(struct dma_chan
*chan
)
450 struct module
*owner
= dma_chan_to_owner(chan
);
453 /* The channel is already in use, update client count */
454 if (chan
->client_count
) {
459 if (!try_module_get(owner
))
462 ret
= kref_get_unless_zero(&chan
->device
->ref
);
468 /* allocate upon first client reference */
469 if (chan
->device
->device_alloc_chan_resources
) {
470 ret
= chan
->device
->device_alloc_chan_resources(chan
);
475 if (!dma_has_cap(DMA_PRIVATE
, chan
->device
->cap_mask
))
476 balance_ref_count(chan
);
479 chan
->client_count
++;
483 dma_device_put(chan
->device
);
490 * dma_chan_put - drop a reference to a dma channel's parent driver module
491 * @chan - channel to release
493 * Must be called under dma_list_mutex
495 static void dma_chan_put(struct dma_chan
*chan
)
497 /* This channel is not in use, bail out */
498 if (!chan
->client_count
)
501 chan
->client_count
--;
503 /* This channel is not in use anymore, free it */
504 if (!chan
->client_count
&& chan
->device
->device_free_chan_resources
) {
505 /* Make sure all operations have completed */
506 dmaengine_synchronize(chan
);
507 chan
->device
->device_free_chan_resources(chan
);
510 /* If the channel is used via a DMA request router, free the mapping */
511 if (chan
->router
&& chan
->router
->route_free
) {
512 chan
->router
->route_free(chan
->router
->dev
, chan
->route_data
);
514 chan
->route_data
= NULL
;
517 dma_device_put(chan
->device
);
518 module_put(dma_chan_to_owner(chan
));
521 enum dma_status
dma_sync_wait(struct dma_chan
*chan
, dma_cookie_t cookie
)
523 enum dma_status status
;
524 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
526 dma_async_issue_pending(chan
);
528 status
= dma_async_is_tx_complete(chan
, cookie
, NULL
, NULL
);
529 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
530 dev_err(chan
->device
->dev
, "%s: timeout!\n", __func__
);
533 if (status
!= DMA_IN_PROGRESS
)
540 EXPORT_SYMBOL(dma_sync_wait
);
543 * dma_find_channel - find a channel to carry out the operation
544 * @tx_type: transaction type
546 struct dma_chan
*dma_find_channel(enum dma_transaction_type tx_type
)
548 return this_cpu_read(channel_table
[tx_type
]->chan
);
550 EXPORT_SYMBOL(dma_find_channel
);
553 * dma_issue_pending_all - flush all pending operations across all channels
555 void dma_issue_pending_all(void)
557 struct dma_device
*device
;
558 struct dma_chan
*chan
;
561 list_for_each_entry_rcu(device
, &dma_device_list
, global_node
) {
562 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
564 list_for_each_entry(chan
, &device
->channels
, device_node
)
565 if (chan
->client_count
)
566 device
->device_issue_pending(chan
);
570 EXPORT_SYMBOL(dma_issue_pending_all
);
572 int dma_get_slave_caps(struct dma_chan
*chan
, struct dma_slave_caps
*caps
)
574 struct dma_device
*device
;
579 device
= chan
->device
;
581 /* check if the channel supports slave transactions */
582 if (!(test_bit(DMA_SLAVE
, device
->cap_mask
.bits
) ||
583 test_bit(DMA_CYCLIC
, device
->cap_mask
.bits
)))
587 * Check whether it reports it uses the generic slave
588 * capabilities, if not, that means it doesn't support any
589 * kind of slave capabilities reporting.
591 if (!device
->directions
)
594 caps
->src_addr_widths
= device
->src_addr_widths
;
595 caps
->dst_addr_widths
= device
->dst_addr_widths
;
596 caps
->directions
= device
->directions
;
597 caps
->max_burst
= device
->max_burst
;
598 caps
->residue_granularity
= device
->residue_granularity
;
599 caps
->descriptor_reuse
= device
->descriptor_reuse
;
600 caps
->cmd_pause
= !!device
->device_pause
;
601 caps
->cmd_resume
= !!device
->device_resume
;
602 caps
->cmd_terminate
= !!device
->device_terminate_all
;
606 EXPORT_SYMBOL_GPL(dma_get_slave_caps
);
608 static struct dma_chan
*private_candidate(const dma_cap_mask_t
*mask
,
609 struct dma_device
*dev
,
610 dma_filter_fn fn
, void *fn_param
)
612 struct dma_chan
*chan
;
614 if (mask
&& !dma_device_satisfies_mask(dev
, mask
)) {
615 dev_dbg(dev
->dev
, "%s: wrong capabilities\n", __func__
);
618 /* devices with multiple channels need special handling as we need to
619 * ensure that all channels are either private or public.
621 if (dev
->chancnt
> 1 && !dma_has_cap(DMA_PRIVATE
, dev
->cap_mask
))
622 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
623 /* some channels are already publicly allocated */
624 if (chan
->client_count
)
628 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
629 if (chan
->client_count
) {
630 dev_dbg(dev
->dev
, "%s: %s busy\n",
631 __func__
, dma_chan_name(chan
));
634 if (fn
&& !fn(chan
, fn_param
)) {
635 dev_dbg(dev
->dev
, "%s: %s filter said false\n",
636 __func__
, dma_chan_name(chan
));
645 static struct dma_chan
*find_candidate(struct dma_device
*device
,
646 const dma_cap_mask_t
*mask
,
647 dma_filter_fn fn
, void *fn_param
)
649 struct dma_chan
*chan
= private_candidate(mask
, device
, fn
, fn_param
);
653 /* Found a suitable channel, try to grab, prep, and return it.
654 * We first set DMA_PRIVATE to disable balance_ref_count as this
655 * channel will not be published in the general-purpose
658 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
659 device
->privatecnt
++;
660 err
= dma_chan_get(chan
);
663 if (err
== -ENODEV
) {
664 dev_dbg(device
->dev
, "%s: %s module removed\n",
665 __func__
, dma_chan_name(chan
));
666 list_del_rcu(&device
->global_node
);
669 "%s: failed to get %s: (%d)\n",
670 __func__
, dma_chan_name(chan
), err
);
672 if (--device
->privatecnt
== 0)
673 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
679 return chan
? chan
: ERR_PTR(-EPROBE_DEFER
);
683 * dma_get_slave_channel - try to get specific channel exclusively
684 * @chan: target channel
686 struct dma_chan
*dma_get_slave_channel(struct dma_chan
*chan
)
690 /* lock against __dma_request_channel */
691 mutex_lock(&dma_list_mutex
);
693 if (chan
->client_count
== 0) {
694 struct dma_device
*device
= chan
->device
;
696 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
697 device
->privatecnt
++;
698 err
= dma_chan_get(chan
);
700 dev_dbg(chan
->device
->dev
,
701 "%s: failed to get %s: (%d)\n",
702 __func__
, dma_chan_name(chan
), err
);
704 if (--device
->privatecnt
== 0)
705 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
710 mutex_unlock(&dma_list_mutex
);
715 EXPORT_SYMBOL_GPL(dma_get_slave_channel
);
717 struct dma_chan
*dma_get_any_slave_channel(struct dma_device
*device
)
720 struct dma_chan
*chan
;
723 dma_cap_set(DMA_SLAVE
, mask
);
725 /* lock against __dma_request_channel */
726 mutex_lock(&dma_list_mutex
);
728 chan
= find_candidate(device
, &mask
, NULL
, NULL
);
730 mutex_unlock(&dma_list_mutex
);
732 return IS_ERR(chan
) ? NULL
: chan
;
734 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel
);
737 * __dma_request_channel - try to allocate an exclusive channel
738 * @mask: capabilities that the channel must satisfy
739 * @fn: optional callback to disposition available channels
740 * @fn_param: opaque parameter to pass to dma_filter_fn
741 * @np: device node to look for DMA channels
743 * Returns pointer to appropriate DMA channel on success or NULL.
745 struct dma_chan
*__dma_request_channel(const dma_cap_mask_t
*mask
,
746 dma_filter_fn fn
, void *fn_param
,
747 struct device_node
*np
)
749 struct dma_device
*device
, *_d
;
750 struct dma_chan
*chan
= NULL
;
753 mutex_lock(&dma_list_mutex
);
754 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
755 /* Finds a DMA controller with matching device node */
756 if (np
&& device
->dev
->of_node
&& np
!= device
->dev
->of_node
)
759 chan
= find_candidate(device
, mask
, fn
, fn_param
);
765 mutex_unlock(&dma_list_mutex
);
767 pr_debug("%s: %s (%s)\n",
769 chan
? "success" : "fail",
770 chan
? dma_chan_name(chan
) : NULL
);
774 EXPORT_SYMBOL_GPL(__dma_request_channel
);
776 static const struct dma_slave_map
*dma_filter_match(struct dma_device
*device
,
782 if (!device
->filter
.mapcnt
)
785 for (i
= 0; i
< device
->filter
.mapcnt
; i
++) {
786 const struct dma_slave_map
*map
= &device
->filter
.map
[i
];
788 if (!strcmp(map
->devname
, dev_name(dev
)) &&
789 !strcmp(map
->slave
, name
))
797 * dma_request_chan - try to allocate an exclusive slave channel
798 * @dev: pointer to client device structure
799 * @name: slave channel name
801 * Returns pointer to appropriate DMA channel on success or an error pointer.
803 struct dma_chan
*dma_request_chan(struct device
*dev
, const char *name
)
805 struct dma_device
*d
, *_d
;
806 struct dma_chan
*chan
= NULL
;
808 /* If device-tree is present get slave info from here */
810 chan
= of_dma_request_slave_channel(dev
->of_node
, name
);
812 /* If device was enumerated by ACPI get slave info from here */
813 if (has_acpi_companion(dev
) && !chan
)
814 chan
= acpi_dma_request_slave_chan_by_name(dev
, name
);
816 if (PTR_ERR(chan
) == -EPROBE_DEFER
)
819 if (!IS_ERR_OR_NULL(chan
))
822 /* Try to find the channel via the DMA filter map(s) */
823 mutex_lock(&dma_list_mutex
);
824 list_for_each_entry_safe(d
, _d
, &dma_device_list
, global_node
) {
826 const struct dma_slave_map
*map
= dma_filter_match(d
, name
, dev
);
832 dma_cap_set(DMA_SLAVE
, mask
);
834 chan
= find_candidate(d
, &mask
, d
->filter
.fn
, map
->param
);
838 mutex_unlock(&dma_list_mutex
);
840 if (IS_ERR_OR_NULL(chan
))
841 return chan
? chan
: ERR_PTR(-EPROBE_DEFER
);
844 #ifdef CONFIG_DEBUG_FS
845 chan
->dbg_client_name
= kasprintf(GFP_KERNEL
, "%s:%s", dev_name(dev
),
849 chan
->name
= kasprintf(GFP_KERNEL
, "dma:%s", name
);
854 if (sysfs_create_link(&chan
->dev
->device
.kobj
, &dev
->kobj
,
856 dev_warn(dev
, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME
);
857 if (sysfs_create_link(&dev
->kobj
, &chan
->dev
->device
.kobj
, chan
->name
))
858 dev_warn(dev
, "Cannot create DMA %s symlink\n", chan
->name
);
862 EXPORT_SYMBOL_GPL(dma_request_chan
);
865 * dma_request_slave_channel - try to allocate an exclusive slave channel
866 * @dev: pointer to client device structure
867 * @name: slave channel name
869 * Returns pointer to appropriate DMA channel on success or NULL.
871 struct dma_chan
*dma_request_slave_channel(struct device
*dev
,
874 struct dma_chan
*ch
= dma_request_chan(dev
, name
);
880 EXPORT_SYMBOL_GPL(dma_request_slave_channel
);
883 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
884 * @mask: capabilities that the channel must satisfy
886 * Returns pointer to appropriate DMA channel on success or an error pointer.
888 struct dma_chan
*dma_request_chan_by_mask(const dma_cap_mask_t
*mask
)
890 struct dma_chan
*chan
;
893 return ERR_PTR(-ENODEV
);
895 chan
= __dma_request_channel(mask
, NULL
, NULL
, NULL
);
897 mutex_lock(&dma_list_mutex
);
898 if (list_empty(&dma_device_list
))
899 chan
= ERR_PTR(-EPROBE_DEFER
);
901 chan
= ERR_PTR(-ENODEV
);
902 mutex_unlock(&dma_list_mutex
);
907 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask
);
909 void dma_release_channel(struct dma_chan
*chan
)
911 mutex_lock(&dma_list_mutex
);
912 WARN_ONCE(chan
->client_count
!= 1,
913 "chan reference count %d != 1\n", chan
->client_count
);
915 /* drop PRIVATE cap enabled by __dma_request_channel() */
916 if (--chan
->device
->privatecnt
== 0)
917 dma_cap_clear(DMA_PRIVATE
, chan
->device
->cap_mask
);
920 sysfs_remove_link(&chan
->dev
->device
.kobj
, DMA_SLAVE_NAME
);
921 sysfs_remove_link(&chan
->slave
->kobj
, chan
->name
);
927 #ifdef CONFIG_DEBUG_FS
928 kfree(chan
->dbg_client_name
);
929 chan
->dbg_client_name
= NULL
;
931 mutex_unlock(&dma_list_mutex
);
933 EXPORT_SYMBOL_GPL(dma_release_channel
);
936 * dmaengine_get - register interest in dma_channels
938 void dmaengine_get(void)
940 struct dma_device
*device
, *_d
;
941 struct dma_chan
*chan
;
944 mutex_lock(&dma_list_mutex
);
945 dmaengine_ref_count
++;
947 /* try to grab channels */
948 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
949 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
951 list_for_each_entry(chan
, &device
->channels
, device_node
) {
952 err
= dma_chan_get(chan
);
953 if (err
== -ENODEV
) {
954 /* module removed before we could use it */
955 list_del_rcu(&device
->global_node
);
958 dev_dbg(chan
->device
->dev
,
959 "%s: failed to get %s: (%d)\n",
960 __func__
, dma_chan_name(chan
), err
);
964 /* if this is the first reference and there were channels
965 * waiting we need to rebalance to get those channels
966 * incorporated into the channel table
968 if (dmaengine_ref_count
== 1)
969 dma_channel_rebalance();
970 mutex_unlock(&dma_list_mutex
);
972 EXPORT_SYMBOL(dmaengine_get
);
975 * dmaengine_put - let dma drivers be removed when ref_count == 0
977 void dmaengine_put(void)
979 struct dma_device
*device
, *_d
;
980 struct dma_chan
*chan
;
982 mutex_lock(&dma_list_mutex
);
983 dmaengine_ref_count
--;
984 BUG_ON(dmaengine_ref_count
< 0);
985 /* drop channel references */
986 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
987 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
989 list_for_each_entry(chan
, &device
->channels
, device_node
)
992 mutex_unlock(&dma_list_mutex
);
994 EXPORT_SYMBOL(dmaengine_put
);
996 static bool device_has_all_tx_types(struct dma_device
*device
)
998 /* A device that satisfies this test has channels that will never cause
999 * an async_tx channel switch event as all possible operation types can
1002 #ifdef CONFIG_ASYNC_TX_DMA
1003 if (!dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
))
1007 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
1008 if (!dma_has_cap(DMA_MEMCPY
, device
->cap_mask
))
1012 #if IS_ENABLED(CONFIG_ASYNC_XOR)
1013 if (!dma_has_cap(DMA_XOR
, device
->cap_mask
))
1016 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1017 if (!dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
))
1022 #if IS_ENABLED(CONFIG_ASYNC_PQ)
1023 if (!dma_has_cap(DMA_PQ
, device
->cap_mask
))
1026 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1027 if (!dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
))
1035 static int get_dma_id(struct dma_device
*device
)
1037 int rc
= ida_alloc(&dma_ida
, GFP_KERNEL
);
1041 device
->dev_id
= rc
;
1045 static int __dma_async_device_channel_register(struct dma_device
*device
,
1046 struct dma_chan
*chan
,
1050 int chancnt
= device
->chancnt
;
1052 struct dma_chan
*tchan
;
1054 tchan
= list_first_entry_or_null(&device
->channels
,
1055 struct dma_chan
, device_node
);
1060 idr_ref
= tchan
->dev
->idr_ref
;
1062 idr_ref
= kmalloc(sizeof(*idr_ref
), GFP_KERNEL
);
1065 atomic_set(idr_ref
, 0);
1068 chan
->local
= alloc_percpu(typeof(*chan
->local
));
1071 chan
->dev
= kzalloc(sizeof(*chan
->dev
), GFP_KERNEL
);
1073 free_percpu(chan
->local
);
1079 * When the chan_id is a negative value, we are dynamically adding
1080 * the channel. Otherwise we are static enumerating.
1082 chan
->chan_id
= chan_id
< 0 ? chancnt
: chan_id
;
1083 chan
->dev
->device
.class = &dma_devclass
;
1084 chan
->dev
->device
.parent
= device
->dev
;
1085 chan
->dev
->chan
= chan
;
1086 chan
->dev
->idr_ref
= idr_ref
;
1087 chan
->dev
->dev_id
= device
->dev_id
;
1088 atomic_inc(idr_ref
);
1089 dev_set_name(&chan
->dev
->device
, "dma%dchan%d",
1090 device
->dev_id
, chan
->chan_id
);
1092 rc
= device_register(&chan
->dev
->device
);
1095 chan
->client_count
= 0;
1096 device
->chancnt
= chan
->chan_id
+ 1;
1101 free_percpu(chan
->local
);
1103 if (atomic_dec_return(idr_ref
) == 0)
1108 int dma_async_device_channel_register(struct dma_device
*device
,
1109 struct dma_chan
*chan
)
1113 rc
= __dma_async_device_channel_register(device
, chan
, -1);
1117 dma_channel_rebalance();
1120 EXPORT_SYMBOL_GPL(dma_async_device_channel_register
);
1122 static void __dma_async_device_channel_unregister(struct dma_device
*device
,
1123 struct dma_chan
*chan
)
1125 WARN_ONCE(!device
->device_release
&& chan
->client_count
,
1126 "%s called while %d clients hold a reference\n",
1127 __func__
, chan
->client_count
);
1128 mutex_lock(&dma_list_mutex
);
1129 list_del(&chan
->device_node
);
1131 chan
->dev
->chan
= NULL
;
1132 mutex_unlock(&dma_list_mutex
);
1133 device_unregister(&chan
->dev
->device
);
1134 free_percpu(chan
->local
);
1137 void dma_async_device_channel_unregister(struct dma_device
*device
,
1138 struct dma_chan
*chan
)
1140 __dma_async_device_channel_unregister(device
, chan
);
1141 dma_channel_rebalance();
1143 EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister
);
1146 * dma_async_device_register - registers DMA devices found
1147 * @device: &dma_device
1149 * After calling this routine the structure should not be freed except in the
1150 * device_release() callback which will be called after
1151 * dma_async_device_unregister() is called and no further references are taken.
1153 int dma_async_device_register(struct dma_device
*device
)
1156 struct dma_chan
* chan
;
1161 /* validate device routines */
1163 pr_err("DMAdevice must have dev\n");
1167 device
->owner
= device
->dev
->driver
->owner
;
1169 if (dma_has_cap(DMA_MEMCPY
, device
->cap_mask
) && !device
->device_prep_dma_memcpy
) {
1170 dev_err(device
->dev
,
1171 "Device claims capability %s, but op is not defined\n",
1176 if (dma_has_cap(DMA_XOR
, device
->cap_mask
) && !device
->device_prep_dma_xor
) {
1177 dev_err(device
->dev
,
1178 "Device claims capability %s, but op is not defined\n",
1183 if (dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
) && !device
->device_prep_dma_xor_val
) {
1184 dev_err(device
->dev
,
1185 "Device claims capability %s, but op is not defined\n",
1190 if (dma_has_cap(DMA_PQ
, device
->cap_mask
) && !device
->device_prep_dma_pq
) {
1191 dev_err(device
->dev
,
1192 "Device claims capability %s, but op is not defined\n",
1197 if (dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
) && !device
->device_prep_dma_pq_val
) {
1198 dev_err(device
->dev
,
1199 "Device claims capability %s, but op is not defined\n",
1204 if (dma_has_cap(DMA_MEMSET
, device
->cap_mask
) && !device
->device_prep_dma_memset
) {
1205 dev_err(device
->dev
,
1206 "Device claims capability %s, but op is not defined\n",
1211 if (dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
) && !device
->device_prep_dma_interrupt
) {
1212 dev_err(device
->dev
,
1213 "Device claims capability %s, but op is not defined\n",
1218 if (dma_has_cap(DMA_CYCLIC
, device
->cap_mask
) && !device
->device_prep_dma_cyclic
) {
1219 dev_err(device
->dev
,
1220 "Device claims capability %s, but op is not defined\n",
1225 if (dma_has_cap(DMA_INTERLEAVE
, device
->cap_mask
) && !device
->device_prep_interleaved_dma
) {
1226 dev_err(device
->dev
,
1227 "Device claims capability %s, but op is not defined\n",
1233 if (!device
->device_tx_status
) {
1234 dev_err(device
->dev
, "Device tx_status is not defined\n");
1239 if (!device
->device_issue_pending
) {
1240 dev_err(device
->dev
, "Device issue_pending is not defined\n");
1244 if (!device
->device_release
)
1245 dev_dbg(device
->dev
,
1246 "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
1248 kref_init(&device
->ref
);
1250 /* note: this only matters in the
1251 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1253 if (device_has_all_tx_types(device
))
1254 dma_cap_set(DMA_ASYNC_TX
, device
->cap_mask
);
1256 rc
= get_dma_id(device
);
1260 /* represent channels in sysfs. Probably want devs too */
1261 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1262 rc
= __dma_async_device_channel_register(device
, chan
, i
++);
1267 mutex_lock(&dma_list_mutex
);
1268 /* take references on public channels */
1269 if (dmaengine_ref_count
&& !dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
1270 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1271 /* if clients are already waiting for channels we need
1272 * to take references on their behalf
1274 if (dma_chan_get(chan
) == -ENODEV
) {
1275 /* note we can only get here for the first
1276 * channel as the remaining channels are
1277 * guaranteed to get a reference
1280 mutex_unlock(&dma_list_mutex
);
1284 list_add_tail_rcu(&device
->global_node
, &dma_device_list
);
1285 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
1286 device
->privatecnt
++; /* Always private */
1287 dma_channel_rebalance();
1288 mutex_unlock(&dma_list_mutex
);
1290 dmaengine_debug_register(device
);
1295 /* if we never registered a channel just release the idr */
1296 if (!device
->chancnt
) {
1297 ida_free(&dma_ida
, device
->dev_id
);
1301 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1302 if (chan
->local
== NULL
)
1304 mutex_lock(&dma_list_mutex
);
1305 chan
->dev
->chan
= NULL
;
1306 mutex_unlock(&dma_list_mutex
);
1307 device_unregister(&chan
->dev
->device
);
1308 free_percpu(chan
->local
);
1312 EXPORT_SYMBOL(dma_async_device_register
);
1315 * dma_async_device_unregister - unregister a DMA device
1316 * @device: &dma_device
1318 * This routine is called by dma driver exit routines, dmaengine holds module
1319 * references to prevent it being called while channels are in use.
1321 void dma_async_device_unregister(struct dma_device
*device
)
1323 struct dma_chan
*chan
, *n
;
1325 dmaengine_debug_unregister(device
);
1327 list_for_each_entry_safe(chan
, n
, &device
->channels
, device_node
)
1328 __dma_async_device_channel_unregister(device
, chan
);
1330 mutex_lock(&dma_list_mutex
);
1332 * setting DMA_PRIVATE ensures the device being torn down will not
1333 * be used in the channel_table
1335 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
1336 dma_channel_rebalance();
1337 dma_device_put(device
);
1338 mutex_unlock(&dma_list_mutex
);
1340 EXPORT_SYMBOL(dma_async_device_unregister
);
1342 static void dmam_device_release(struct device
*dev
, void *res
)
1344 struct dma_device
*device
;
1346 device
= *(struct dma_device
**)res
;
1347 dma_async_device_unregister(device
);
1351 * dmaenginem_async_device_register - registers DMA devices found
1352 * @device: &dma_device
1354 * The operation is managed and will be undone on driver detach.
1356 int dmaenginem_async_device_register(struct dma_device
*device
)
1361 p
= devres_alloc(dmam_device_release
, sizeof(void *), GFP_KERNEL
);
1365 ret
= dma_async_device_register(device
);
1367 *(struct dma_device
**)p
= device
;
1368 devres_add(device
->dev
, p
);
1375 EXPORT_SYMBOL(dmaenginem_async_device_register
);
1377 struct dmaengine_unmap_pool
{
1378 struct kmem_cache
*cache
;
1384 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1385 static struct dmaengine_unmap_pool unmap_pool
[] = {
1387 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1394 static struct dmaengine_unmap_pool
*__get_unmap_pool(int nr
)
1396 int order
= get_count_order(nr
);
1400 return &unmap_pool
[0];
1401 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1403 return &unmap_pool
[1];
1405 return &unmap_pool
[2];
1407 return &unmap_pool
[3];
1415 static void dmaengine_unmap(struct kref
*kref
)
1417 struct dmaengine_unmap_data
*unmap
= container_of(kref
, typeof(*unmap
), kref
);
1418 struct device
*dev
= unmap
->dev
;
1421 cnt
= unmap
->to_cnt
;
1422 for (i
= 0; i
< cnt
; i
++)
1423 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1425 cnt
+= unmap
->from_cnt
;
1426 for (; i
< cnt
; i
++)
1427 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1429 cnt
+= unmap
->bidi_cnt
;
1430 for (; i
< cnt
; i
++) {
1431 if (unmap
->addr
[i
] == 0)
1433 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1436 cnt
= unmap
->map_cnt
;
1437 mempool_free(unmap
, __get_unmap_pool(cnt
)->pool
);
1440 void dmaengine_unmap_put(struct dmaengine_unmap_data
*unmap
)
1443 kref_put(&unmap
->kref
, dmaengine_unmap
);
1445 EXPORT_SYMBOL_GPL(dmaengine_unmap_put
);
1447 static void dmaengine_destroy_unmap_pool(void)
1451 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1452 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1454 mempool_destroy(p
->pool
);
1456 kmem_cache_destroy(p
->cache
);
1461 static int __init
dmaengine_init_unmap_pool(void)
1465 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1466 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1469 size
= sizeof(struct dmaengine_unmap_data
) +
1470 sizeof(dma_addr_t
) * p
->size
;
1472 p
->cache
= kmem_cache_create(p
->name
, size
, 0,
1473 SLAB_HWCACHE_ALIGN
, NULL
);
1476 p
->pool
= mempool_create_slab_pool(1, p
->cache
);
1481 if (i
== ARRAY_SIZE(unmap_pool
))
1484 dmaengine_destroy_unmap_pool();
1488 struct dmaengine_unmap_data
*
1489 dmaengine_get_unmap_data(struct device
*dev
, int nr
, gfp_t flags
)
1491 struct dmaengine_unmap_data
*unmap
;
1493 unmap
= mempool_alloc(__get_unmap_pool(nr
)->pool
, flags
);
1497 memset(unmap
, 0, sizeof(*unmap
));
1498 kref_init(&unmap
->kref
);
1500 unmap
->map_cnt
= nr
;
1504 EXPORT_SYMBOL(dmaengine_get_unmap_data
);
1506 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor
*tx
,
1507 struct dma_chan
*chan
)
1510 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1511 spin_lock_init(&tx
->lock
);
1514 EXPORT_SYMBOL(dma_async_tx_descriptor_init
);
1516 static inline int desc_check_and_set_metadata_mode(
1517 struct dma_async_tx_descriptor
*desc
, enum dma_desc_metadata_mode mode
)
1519 /* Make sure that the metadata mode is not mixed */
1520 if (!desc
->desc_metadata_mode
) {
1521 if (dmaengine_is_metadata_mode_supported(desc
->chan
, mode
))
1522 desc
->desc_metadata_mode
= mode
;
1525 } else if (desc
->desc_metadata_mode
!= mode
) {
1532 int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor
*desc
,
1533 void *data
, size_t len
)
1540 ret
= desc_check_and_set_metadata_mode(desc
, DESC_METADATA_CLIENT
);
1544 if (!desc
->metadata_ops
|| !desc
->metadata_ops
->attach
)
1547 return desc
->metadata_ops
->attach(desc
, data
, len
);
1549 EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata
);
1551 void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor
*desc
,
1552 size_t *payload_len
, size_t *max_len
)
1557 return ERR_PTR(-EINVAL
);
1559 ret
= desc_check_and_set_metadata_mode(desc
, DESC_METADATA_ENGINE
);
1561 return ERR_PTR(ret
);
1563 if (!desc
->metadata_ops
|| !desc
->metadata_ops
->get_ptr
)
1564 return ERR_PTR(-ENOTSUPP
);
1566 return desc
->metadata_ops
->get_ptr(desc
, payload_len
, max_len
);
1568 EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr
);
1570 int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor
*desc
,
1578 ret
= desc_check_and_set_metadata_mode(desc
, DESC_METADATA_ENGINE
);
1582 if (!desc
->metadata_ops
|| !desc
->metadata_ops
->set_len
)
1585 return desc
->metadata_ops
->set_len(desc
, payload_len
);
1587 EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len
);
1589 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1590 * @tx: in-flight transaction to wait on
1593 dma_wait_for_async_tx(struct dma_async_tx_descriptor
*tx
)
1595 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
1598 return DMA_COMPLETE
;
1600 while (tx
->cookie
== -EBUSY
) {
1601 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
1602 dev_err(tx
->chan
->device
->dev
,
1603 "%s timeout waiting for descriptor submission\n",
1609 return dma_sync_wait(tx
->chan
, tx
->cookie
);
1611 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx
);
1613 /* dma_run_dependencies - helper routine for dma drivers to process
1614 * (start) dependent operations on their target channel
1615 * @tx: transaction with dependencies
1617 void dma_run_dependencies(struct dma_async_tx_descriptor
*tx
)
1619 struct dma_async_tx_descriptor
*dep
= txd_next(tx
);
1620 struct dma_async_tx_descriptor
*dep_next
;
1621 struct dma_chan
*chan
;
1626 /* we'll submit tx->next now, so clear the link */
1630 /* keep submitting up until a channel switch is detected
1631 * in that case we will be called again as a result of
1632 * processing the interrupt from async_tx_channel_switch
1634 for (; dep
; dep
= dep_next
) {
1636 txd_clear_parent(dep
);
1637 dep_next
= txd_next(dep
);
1638 if (dep_next
&& dep_next
->chan
== chan
)
1639 txd_clear_next(dep
); /* ->next will be submitted */
1641 dep_next
= NULL
; /* submit current dep and terminate */
1644 dep
->tx_submit(dep
);
1647 chan
->device
->device_issue_pending(chan
);
1649 EXPORT_SYMBOL_GPL(dma_run_dependencies
);
1651 static int __init
dma_bus_init(void)
1653 int err
= dmaengine_init_unmap_pool();
1658 err
= class_register(&dma_devclass
);
1660 dmaengine_debugfs_init();
1664 arch_initcall(dma_bus_init
);