2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
45 * See Documentation/dmaengine.txt for more details
48 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50 #include <linux/dma-mapping.h>
51 #include <linux/init.h>
52 #include <linux/module.h>
54 #include <linux/device.h>
55 #include <linux/dmaengine.h>
56 #include <linux/hardirq.h>
57 #include <linux/spinlock.h>
58 #include <linux/percpu.h>
59 #include <linux/rcupdate.h>
60 #include <linux/mutex.h>
61 #include <linux/jiffies.h>
62 #include <linux/rculist.h>
63 #include <linux/idr.h>
64 #include <linux/slab.h>
65 #include <linux/acpi.h>
66 #include <linux/acpi_dma.h>
67 #include <linux/of_dma.h>
69 static DEFINE_MUTEX(dma_list_mutex
);
70 static DEFINE_IDR(dma_idr
);
71 static LIST_HEAD(dma_device_list
);
72 static long dmaengine_ref_count
;
74 /* --- sysfs implementation --- */
77 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
80 * Must be called under dma_list_mutex
82 static struct dma_chan
*dev_to_dma_chan(struct device
*dev
)
84 struct dma_chan_dev
*chan_dev
;
86 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
87 return chan_dev
->chan
;
90 static ssize_t
show_memcpy_count(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
92 struct dma_chan
*chan
;
93 unsigned long count
= 0;
97 mutex_lock(&dma_list_mutex
);
98 chan
= dev_to_dma_chan(dev
);
100 for_each_possible_cpu(i
)
101 count
+= per_cpu_ptr(chan
->local
, i
)->memcpy_count
;
102 err
= sprintf(buf
, "%lu\n", count
);
105 mutex_unlock(&dma_list_mutex
);
110 static ssize_t
show_bytes_transferred(struct device
*dev
, struct device_attribute
*attr
,
113 struct dma_chan
*chan
;
114 unsigned long count
= 0;
118 mutex_lock(&dma_list_mutex
);
119 chan
= dev_to_dma_chan(dev
);
121 for_each_possible_cpu(i
)
122 count
+= per_cpu_ptr(chan
->local
, i
)->bytes_transferred
;
123 err
= sprintf(buf
, "%lu\n", count
);
126 mutex_unlock(&dma_list_mutex
);
131 static ssize_t
show_in_use(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
133 struct dma_chan
*chan
;
136 mutex_lock(&dma_list_mutex
);
137 chan
= dev_to_dma_chan(dev
);
139 err
= sprintf(buf
, "%d\n", chan
->client_count
);
142 mutex_unlock(&dma_list_mutex
);
147 static struct device_attribute dma_attrs
[] = {
148 __ATTR(memcpy_count
, S_IRUGO
, show_memcpy_count
, NULL
),
149 __ATTR(bytes_transferred
, S_IRUGO
, show_bytes_transferred
, NULL
),
150 __ATTR(in_use
, S_IRUGO
, show_in_use
, NULL
),
154 static void chan_dev_release(struct device
*dev
)
156 struct dma_chan_dev
*chan_dev
;
158 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
159 if (atomic_dec_and_test(chan_dev
->idr_ref
)) {
160 mutex_lock(&dma_list_mutex
);
161 idr_remove(&dma_idr
, chan_dev
->dev_id
);
162 mutex_unlock(&dma_list_mutex
);
163 kfree(chan_dev
->idr_ref
);
168 static struct class dma_devclass
= {
170 .dev_attrs
= dma_attrs
,
171 .dev_release
= chan_dev_release
,
174 /* --- client and device registration --- */
176 #define dma_device_satisfies_mask(device, mask) \
177 __dma_device_satisfies_mask((device), &(mask))
179 __dma_device_satisfies_mask(struct dma_device
*device
,
180 const dma_cap_mask_t
*want
)
184 bitmap_and(has
.bits
, want
->bits
, device
->cap_mask
.bits
,
186 return bitmap_equal(want
->bits
, has
.bits
, DMA_TX_TYPE_END
);
189 static struct module
*dma_chan_to_owner(struct dma_chan
*chan
)
191 return chan
->device
->dev
->driver
->owner
;
195 * balance_ref_count - catch up the channel reference count
196 * @chan - channel to balance ->client_count versus dmaengine_ref_count
198 * balance_ref_count must be called under dma_list_mutex
200 static void balance_ref_count(struct dma_chan
*chan
)
202 struct module
*owner
= dma_chan_to_owner(chan
);
204 while (chan
->client_count
< dmaengine_ref_count
) {
206 chan
->client_count
++;
211 * dma_chan_get - try to grab a dma channel's parent driver module
212 * @chan - channel to grab
214 * Must be called under dma_list_mutex
216 static int dma_chan_get(struct dma_chan
*chan
)
219 struct module
*owner
= dma_chan_to_owner(chan
);
221 if (chan
->client_count
) {
224 } else if (try_module_get(owner
))
228 chan
->client_count
++;
230 /* allocate upon first client reference */
231 if (chan
->client_count
== 1 && err
== 0) {
232 int desc_cnt
= chan
->device
->device_alloc_chan_resources(chan
);
236 chan
->client_count
= 0;
238 } else if (!dma_has_cap(DMA_PRIVATE
, chan
->device
->cap_mask
))
239 balance_ref_count(chan
);
246 * dma_chan_put - drop a reference to a dma channel's parent driver module
247 * @chan - channel to release
249 * Must be called under dma_list_mutex
251 static void dma_chan_put(struct dma_chan
*chan
)
253 if (!chan
->client_count
)
254 return; /* this channel failed alloc_chan_resources */
255 chan
->client_count
--;
256 module_put(dma_chan_to_owner(chan
));
257 if (chan
->client_count
== 0)
258 chan
->device
->device_free_chan_resources(chan
);
261 enum dma_status
dma_sync_wait(struct dma_chan
*chan
, dma_cookie_t cookie
)
263 enum dma_status status
;
264 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
266 dma_async_issue_pending(chan
);
268 status
= dma_async_is_tx_complete(chan
, cookie
, NULL
, NULL
);
269 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
270 pr_err("%s: timeout!\n", __func__
);
273 if (status
!= DMA_IN_PROGRESS
)
280 EXPORT_SYMBOL(dma_sync_wait
);
283 * dma_cap_mask_all - enable iteration over all operation types
285 static dma_cap_mask_t dma_cap_mask_all
;
288 * dma_chan_tbl_ent - tracks channel allocations per core/operation
289 * @chan - associated channel for this entry
291 struct dma_chan_tbl_ent
{
292 struct dma_chan
*chan
;
296 * channel_table - percpu lookup table for memory-to-memory offload providers
298 static struct dma_chan_tbl_ent __percpu
*channel_table
[DMA_TX_TYPE_END
];
300 static int __init
dma_channel_table_init(void)
302 enum dma_transaction_type cap
;
305 bitmap_fill(dma_cap_mask_all
.bits
, DMA_TX_TYPE_END
);
307 /* 'interrupt', 'private', and 'slave' are channel capabilities,
308 * but are not associated with an operation so they do not need
309 * an entry in the channel_table
311 clear_bit(DMA_INTERRUPT
, dma_cap_mask_all
.bits
);
312 clear_bit(DMA_PRIVATE
, dma_cap_mask_all
.bits
);
313 clear_bit(DMA_SLAVE
, dma_cap_mask_all
.bits
);
315 for_each_dma_cap_mask(cap
, dma_cap_mask_all
) {
316 channel_table
[cap
] = alloc_percpu(struct dma_chan_tbl_ent
);
317 if (!channel_table
[cap
]) {
324 pr_err("initialization failure\n");
325 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
326 if (channel_table
[cap
])
327 free_percpu(channel_table
[cap
]);
332 arch_initcall(dma_channel_table_init
);
335 * dma_find_channel - find a channel to carry out the operation
336 * @tx_type: transaction type
338 struct dma_chan
*dma_find_channel(enum dma_transaction_type tx_type
)
340 return this_cpu_read(channel_table
[tx_type
]->chan
);
342 EXPORT_SYMBOL(dma_find_channel
);
345 * net_dma_find_channel - find a channel for net_dma
346 * net_dma has alignment requirements
348 struct dma_chan
*net_dma_find_channel(void)
350 struct dma_chan
*chan
= dma_find_channel(DMA_MEMCPY
);
351 if (chan
&& !is_dma_copy_aligned(chan
->device
, 1, 1, 1))
356 EXPORT_SYMBOL(net_dma_find_channel
);
359 * dma_issue_pending_all - flush all pending operations across all channels
361 void dma_issue_pending_all(void)
363 struct dma_device
*device
;
364 struct dma_chan
*chan
;
367 list_for_each_entry_rcu(device
, &dma_device_list
, global_node
) {
368 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
370 list_for_each_entry(chan
, &device
->channels
, device_node
)
371 if (chan
->client_count
)
372 device
->device_issue_pending(chan
);
376 EXPORT_SYMBOL(dma_issue_pending_all
);
379 * nth_chan - returns the nth channel of the given capability
380 * @cap: capability to match
381 * @n: nth channel desired
383 * Defaults to returning the channel with the desired capability and the
384 * lowest reference count when 'n' cannot be satisfied. Must be called
385 * under dma_list_mutex.
387 static struct dma_chan
*nth_chan(enum dma_transaction_type cap
, int n
)
389 struct dma_device
*device
;
390 struct dma_chan
*chan
;
391 struct dma_chan
*ret
= NULL
;
392 struct dma_chan
*min
= NULL
;
394 list_for_each_entry(device
, &dma_device_list
, global_node
) {
395 if (!dma_has_cap(cap
, device
->cap_mask
) ||
396 dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
398 list_for_each_entry(chan
, &device
->channels
, device_node
) {
399 if (!chan
->client_count
)
403 else if (chan
->table_count
< min
->table_count
)
425 * dma_channel_rebalance - redistribute the available channels
427 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
428 * operation type) in the SMP case, and operation isolation (avoid
429 * multi-tasking channels) in the non-SMP case. Must be called under
432 static void dma_channel_rebalance(void)
434 struct dma_chan
*chan
;
435 struct dma_device
*device
;
440 /* undo the last distribution */
441 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
442 for_each_possible_cpu(cpu
)
443 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= NULL
;
445 list_for_each_entry(device
, &dma_device_list
, global_node
) {
446 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
448 list_for_each_entry(chan
, &device
->channels
, device_node
)
449 chan
->table_count
= 0;
452 /* don't populate the channel_table if no clients are available */
453 if (!dmaengine_ref_count
)
456 /* redistribute available channels */
458 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
459 for_each_online_cpu(cpu
) {
460 if (num_possible_cpus() > 1)
461 chan
= nth_chan(cap
, n
++);
463 chan
= nth_chan(cap
, -1);
465 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= chan
;
469 static struct dma_chan
*private_candidate(const dma_cap_mask_t
*mask
,
470 struct dma_device
*dev
,
471 dma_filter_fn fn
, void *fn_param
)
473 struct dma_chan
*chan
;
475 if (!__dma_device_satisfies_mask(dev
, mask
)) {
476 pr_debug("%s: wrong capabilities\n", __func__
);
479 /* devices with multiple channels need special handling as we need to
480 * ensure that all channels are either private or public.
482 if (dev
->chancnt
> 1 && !dma_has_cap(DMA_PRIVATE
, dev
->cap_mask
))
483 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
484 /* some channels are already publicly allocated */
485 if (chan
->client_count
)
489 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
490 if (chan
->client_count
) {
491 pr_debug("%s: %s busy\n",
492 __func__
, dma_chan_name(chan
));
495 if (fn
&& !fn(chan
, fn_param
)) {
496 pr_debug("%s: %s filter said false\n",
497 __func__
, dma_chan_name(chan
));
507 * dma_request_channel - try to allocate an exclusive channel
508 * @mask: capabilities that the channel must satisfy
509 * @fn: optional callback to disposition available channels
510 * @fn_param: opaque parameter to pass to dma_filter_fn
512 struct dma_chan
*__dma_request_channel(const dma_cap_mask_t
*mask
,
513 dma_filter_fn fn
, void *fn_param
)
515 struct dma_device
*device
, *_d
;
516 struct dma_chan
*chan
= NULL
;
520 mutex_lock(&dma_list_mutex
);
521 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
522 chan
= private_candidate(mask
, device
, fn
, fn_param
);
524 /* Found a suitable channel, try to grab, prep, and
525 * return it. We first set DMA_PRIVATE to disable
526 * balance_ref_count as this channel will not be
527 * published in the general-purpose allocator
529 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
530 device
->privatecnt
++;
531 err
= dma_chan_get(chan
);
533 if (err
== -ENODEV
) {
534 pr_debug("%s: %s module removed\n",
535 __func__
, dma_chan_name(chan
));
536 list_del_rcu(&device
->global_node
);
538 pr_debug("%s: failed to get %s: (%d)\n",
539 __func__
, dma_chan_name(chan
), err
);
542 if (--device
->privatecnt
== 0)
543 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
547 mutex_unlock(&dma_list_mutex
);
549 pr_debug("%s: %s (%s)\n",
551 chan
? "success" : "fail",
552 chan
? dma_chan_name(chan
) : NULL
);
556 EXPORT_SYMBOL_GPL(__dma_request_channel
);
559 * dma_request_slave_channel - try to allocate an exclusive slave channel
560 * @dev: pointer to client device structure
561 * @name: slave channel name
563 struct dma_chan
*dma_request_slave_channel(struct device
*dev
, const char *name
)
565 /* If device-tree is present get slave info from here */
567 return of_dma_request_slave_channel(dev
->of_node
, name
);
569 /* If device was enumerated by ACPI get slave info from here */
570 if (ACPI_HANDLE(dev
))
571 return acpi_dma_request_slave_chan_by_name(dev
, name
);
575 EXPORT_SYMBOL_GPL(dma_request_slave_channel
);
577 void dma_release_channel(struct dma_chan
*chan
)
579 mutex_lock(&dma_list_mutex
);
580 WARN_ONCE(chan
->client_count
!= 1,
581 "chan reference count %d != 1\n", chan
->client_count
);
583 /* drop PRIVATE cap enabled by __dma_request_channel() */
584 if (--chan
->device
->privatecnt
== 0)
585 dma_cap_clear(DMA_PRIVATE
, chan
->device
->cap_mask
);
586 mutex_unlock(&dma_list_mutex
);
588 EXPORT_SYMBOL_GPL(dma_release_channel
);
591 * dmaengine_get - register interest in dma_channels
593 void dmaengine_get(void)
595 struct dma_device
*device
, *_d
;
596 struct dma_chan
*chan
;
599 mutex_lock(&dma_list_mutex
);
600 dmaengine_ref_count
++;
602 /* try to grab channels */
603 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
604 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
606 list_for_each_entry(chan
, &device
->channels
, device_node
) {
607 err
= dma_chan_get(chan
);
608 if (err
== -ENODEV
) {
609 /* module removed before we could use it */
610 list_del_rcu(&device
->global_node
);
613 pr_debug("%s: failed to get %s: (%d)\n",
614 __func__
, dma_chan_name(chan
), err
);
618 /* if this is the first reference and there were channels
619 * waiting we need to rebalance to get those channels
620 * incorporated into the channel table
622 if (dmaengine_ref_count
== 1)
623 dma_channel_rebalance();
624 mutex_unlock(&dma_list_mutex
);
626 EXPORT_SYMBOL(dmaengine_get
);
629 * dmaengine_put - let dma drivers be removed when ref_count == 0
631 void dmaengine_put(void)
633 struct dma_device
*device
;
634 struct dma_chan
*chan
;
636 mutex_lock(&dma_list_mutex
);
637 dmaengine_ref_count
--;
638 BUG_ON(dmaengine_ref_count
< 0);
639 /* drop channel references */
640 list_for_each_entry(device
, &dma_device_list
, global_node
) {
641 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
643 list_for_each_entry(chan
, &device
->channels
, device_node
)
646 mutex_unlock(&dma_list_mutex
);
648 EXPORT_SYMBOL(dmaengine_put
);
650 static bool device_has_all_tx_types(struct dma_device
*device
)
652 /* A device that satisfies this test has channels that will never cause
653 * an async_tx channel switch event as all possible operation types can
656 #ifdef CONFIG_ASYNC_TX_DMA
657 if (!dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
))
661 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
662 if (!dma_has_cap(DMA_MEMCPY
, device
->cap_mask
))
666 #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
667 if (!dma_has_cap(DMA_MEMSET
, device
->cap_mask
))
671 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
672 if (!dma_has_cap(DMA_XOR
, device
->cap_mask
))
675 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
676 if (!dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
))
681 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
682 if (!dma_has_cap(DMA_PQ
, device
->cap_mask
))
685 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
686 if (!dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
))
694 static int get_dma_id(struct dma_device
*device
)
698 mutex_lock(&dma_list_mutex
);
700 rc
= idr_alloc(&dma_idr
, NULL
, 0, 0, GFP_KERNEL
);
704 mutex_unlock(&dma_list_mutex
);
705 return rc
< 0 ? rc
: 0;
709 * dma_async_device_register - registers DMA devices found
710 * @device: &dma_device
712 int dma_async_device_register(struct dma_device
*device
)
715 struct dma_chan
* chan
;
721 /* validate device routines */
722 BUG_ON(dma_has_cap(DMA_MEMCPY
, device
->cap_mask
) &&
723 !device
->device_prep_dma_memcpy
);
724 BUG_ON(dma_has_cap(DMA_XOR
, device
->cap_mask
) &&
725 !device
->device_prep_dma_xor
);
726 BUG_ON(dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
) &&
727 !device
->device_prep_dma_xor_val
);
728 BUG_ON(dma_has_cap(DMA_PQ
, device
->cap_mask
) &&
729 !device
->device_prep_dma_pq
);
730 BUG_ON(dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
) &&
731 !device
->device_prep_dma_pq_val
);
732 BUG_ON(dma_has_cap(DMA_MEMSET
, device
->cap_mask
) &&
733 !device
->device_prep_dma_memset
);
734 BUG_ON(dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
) &&
735 !device
->device_prep_dma_interrupt
);
736 BUG_ON(dma_has_cap(DMA_SG
, device
->cap_mask
) &&
737 !device
->device_prep_dma_sg
);
738 BUG_ON(dma_has_cap(DMA_CYCLIC
, device
->cap_mask
) &&
739 !device
->device_prep_dma_cyclic
);
740 BUG_ON(dma_has_cap(DMA_SLAVE
, device
->cap_mask
) &&
741 !device
->device_control
);
742 BUG_ON(dma_has_cap(DMA_INTERLEAVE
, device
->cap_mask
) &&
743 !device
->device_prep_interleaved_dma
);
745 BUG_ON(!device
->device_alloc_chan_resources
);
746 BUG_ON(!device
->device_free_chan_resources
);
747 BUG_ON(!device
->device_tx_status
);
748 BUG_ON(!device
->device_issue_pending
);
749 BUG_ON(!device
->dev
);
751 /* note: this only matters in the
752 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
754 if (device_has_all_tx_types(device
))
755 dma_cap_set(DMA_ASYNC_TX
, device
->cap_mask
);
757 idr_ref
= kmalloc(sizeof(*idr_ref
), GFP_KERNEL
);
760 rc
= get_dma_id(device
);
766 atomic_set(idr_ref
, 0);
768 /* represent channels in sysfs. Probably want devs too */
769 list_for_each_entry(chan
, &device
->channels
, device_node
) {
771 chan
->local
= alloc_percpu(typeof(*chan
->local
));
772 if (chan
->local
== NULL
)
774 chan
->dev
= kzalloc(sizeof(*chan
->dev
), GFP_KERNEL
);
775 if (chan
->dev
== NULL
) {
776 free_percpu(chan
->local
);
781 chan
->chan_id
= chancnt
++;
782 chan
->dev
->device
.class = &dma_devclass
;
783 chan
->dev
->device
.parent
= device
->dev
;
784 chan
->dev
->chan
= chan
;
785 chan
->dev
->idr_ref
= idr_ref
;
786 chan
->dev
->dev_id
= device
->dev_id
;
788 dev_set_name(&chan
->dev
->device
, "dma%dchan%d",
789 device
->dev_id
, chan
->chan_id
);
791 rc
= device_register(&chan
->dev
->device
);
793 free_percpu(chan
->local
);
799 chan
->client_count
= 0;
801 device
->chancnt
= chancnt
;
803 mutex_lock(&dma_list_mutex
);
804 /* take references on public channels */
805 if (dmaengine_ref_count
&& !dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
806 list_for_each_entry(chan
, &device
->channels
, device_node
) {
807 /* if clients are already waiting for channels we need
808 * to take references on their behalf
810 if (dma_chan_get(chan
) == -ENODEV
) {
811 /* note we can only get here for the first
812 * channel as the remaining channels are
813 * guaranteed to get a reference
816 mutex_unlock(&dma_list_mutex
);
820 list_add_tail_rcu(&device
->global_node
, &dma_device_list
);
821 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
822 device
->privatecnt
++; /* Always private */
823 dma_channel_rebalance();
824 mutex_unlock(&dma_list_mutex
);
829 /* if we never registered a channel just release the idr */
830 if (atomic_read(idr_ref
) == 0) {
831 mutex_lock(&dma_list_mutex
);
832 idr_remove(&dma_idr
, device
->dev_id
);
833 mutex_unlock(&dma_list_mutex
);
838 list_for_each_entry(chan
, &device
->channels
, device_node
) {
839 if (chan
->local
== NULL
)
841 mutex_lock(&dma_list_mutex
);
842 chan
->dev
->chan
= NULL
;
843 mutex_unlock(&dma_list_mutex
);
844 device_unregister(&chan
->dev
->device
);
845 free_percpu(chan
->local
);
849 EXPORT_SYMBOL(dma_async_device_register
);
852 * dma_async_device_unregister - unregister a DMA device
853 * @device: &dma_device
855 * This routine is called by dma driver exit routines, dmaengine holds module
856 * references to prevent it being called while channels are in use.
858 void dma_async_device_unregister(struct dma_device
*device
)
860 struct dma_chan
*chan
;
862 mutex_lock(&dma_list_mutex
);
863 list_del_rcu(&device
->global_node
);
864 dma_channel_rebalance();
865 mutex_unlock(&dma_list_mutex
);
867 list_for_each_entry(chan
, &device
->channels
, device_node
) {
868 WARN_ONCE(chan
->client_count
,
869 "%s called while %d clients hold a reference\n",
870 __func__
, chan
->client_count
);
871 mutex_lock(&dma_list_mutex
);
872 chan
->dev
->chan
= NULL
;
873 mutex_unlock(&dma_list_mutex
);
874 device_unregister(&chan
->dev
->device
);
875 free_percpu(chan
->local
);
878 EXPORT_SYMBOL(dma_async_device_unregister
);
881 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
882 * @chan: DMA channel to offload copy to
883 * @dest: destination address (virtual)
884 * @src: source address (virtual)
887 * Both @dest and @src must be mappable to a bus address according to the
888 * DMA mapping API rules for streaming mappings.
889 * Both @dest and @src must stay memory resident (kernel memory or locked
893 dma_async_memcpy_buf_to_buf(struct dma_chan
*chan
, void *dest
,
894 void *src
, size_t len
)
896 struct dma_device
*dev
= chan
->device
;
897 struct dma_async_tx_descriptor
*tx
;
898 dma_addr_t dma_dest
, dma_src
;
902 dma_src
= dma_map_single(dev
->dev
, src
, len
, DMA_TO_DEVICE
);
903 dma_dest
= dma_map_single(dev
->dev
, dest
, len
, DMA_FROM_DEVICE
);
904 flags
= DMA_CTRL_ACK
|
905 DMA_COMPL_SRC_UNMAP_SINGLE
|
906 DMA_COMPL_DEST_UNMAP_SINGLE
;
907 tx
= dev
->device_prep_dma_memcpy(chan
, dma_dest
, dma_src
, len
, flags
);
910 dma_unmap_single(dev
->dev
, dma_src
, len
, DMA_TO_DEVICE
);
911 dma_unmap_single(dev
->dev
, dma_dest
, len
, DMA_FROM_DEVICE
);
916 cookie
= tx
->tx_submit(tx
);
919 __this_cpu_add(chan
->local
->bytes_transferred
, len
);
920 __this_cpu_inc(chan
->local
->memcpy_count
);
925 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf
);
928 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
929 * @chan: DMA channel to offload copy to
930 * @page: destination page
931 * @offset: offset in page to copy to
932 * @kdata: source address (virtual)
935 * Both @page/@offset and @kdata must be mappable to a bus address according
936 * to the DMA mapping API rules for streaming mappings.
937 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
938 * locked user space pages)
941 dma_async_memcpy_buf_to_pg(struct dma_chan
*chan
, struct page
*page
,
942 unsigned int offset
, void *kdata
, size_t len
)
944 struct dma_device
*dev
= chan
->device
;
945 struct dma_async_tx_descriptor
*tx
;
946 dma_addr_t dma_dest
, dma_src
;
950 dma_src
= dma_map_single(dev
->dev
, kdata
, len
, DMA_TO_DEVICE
);
951 dma_dest
= dma_map_page(dev
->dev
, page
, offset
, len
, DMA_FROM_DEVICE
);
952 flags
= DMA_CTRL_ACK
| DMA_COMPL_SRC_UNMAP_SINGLE
;
953 tx
= dev
->device_prep_dma_memcpy(chan
, dma_dest
, dma_src
, len
, flags
);
956 dma_unmap_single(dev
->dev
, dma_src
, len
, DMA_TO_DEVICE
);
957 dma_unmap_page(dev
->dev
, dma_dest
, len
, DMA_FROM_DEVICE
);
962 cookie
= tx
->tx_submit(tx
);
965 __this_cpu_add(chan
->local
->bytes_transferred
, len
);
966 __this_cpu_inc(chan
->local
->memcpy_count
);
971 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg
);
974 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
975 * @chan: DMA channel to offload copy to
976 * @dest_pg: destination page
977 * @dest_off: offset in page to copy to
978 * @src_pg: source page
979 * @src_off: offset in page to copy from
982 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
983 * address according to the DMA mapping API rules for streaming mappings.
984 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
985 * (kernel memory or locked user space pages).
988 dma_async_memcpy_pg_to_pg(struct dma_chan
*chan
, struct page
*dest_pg
,
989 unsigned int dest_off
, struct page
*src_pg
, unsigned int src_off
,
992 struct dma_device
*dev
= chan
->device
;
993 struct dma_async_tx_descriptor
*tx
;
994 dma_addr_t dma_dest
, dma_src
;
998 dma_src
= dma_map_page(dev
->dev
, src_pg
, src_off
, len
, DMA_TO_DEVICE
);
999 dma_dest
= dma_map_page(dev
->dev
, dest_pg
, dest_off
, len
,
1001 flags
= DMA_CTRL_ACK
;
1002 tx
= dev
->device_prep_dma_memcpy(chan
, dma_dest
, dma_src
, len
, flags
);
1005 dma_unmap_page(dev
->dev
, dma_src
, len
, DMA_TO_DEVICE
);
1006 dma_unmap_page(dev
->dev
, dma_dest
, len
, DMA_FROM_DEVICE
);
1010 tx
->callback
= NULL
;
1011 cookie
= tx
->tx_submit(tx
);
1014 __this_cpu_add(chan
->local
->bytes_transferred
, len
);
1015 __this_cpu_inc(chan
->local
->memcpy_count
);
1020 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg
);
1022 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor
*tx
,
1023 struct dma_chan
*chan
)
1026 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1027 spin_lock_init(&tx
->lock
);
1030 EXPORT_SYMBOL(dma_async_tx_descriptor_init
);
1032 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1033 * @tx: in-flight transaction to wait on
1036 dma_wait_for_async_tx(struct dma_async_tx_descriptor
*tx
)
1038 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
1043 while (tx
->cookie
== -EBUSY
) {
1044 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
1045 pr_err("%s timeout waiting for descriptor submission\n",
1051 return dma_sync_wait(tx
->chan
, tx
->cookie
);
1053 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx
);
1055 /* dma_run_dependencies - helper routine for dma drivers to process
1056 * (start) dependent operations on their target channel
1057 * @tx: transaction with dependencies
1059 void dma_run_dependencies(struct dma_async_tx_descriptor
*tx
)
1061 struct dma_async_tx_descriptor
*dep
= txd_next(tx
);
1062 struct dma_async_tx_descriptor
*dep_next
;
1063 struct dma_chan
*chan
;
1068 /* we'll submit tx->next now, so clear the link */
1072 /* keep submitting up until a channel switch is detected
1073 * in that case we will be called again as a result of
1074 * processing the interrupt from async_tx_channel_switch
1076 for (; dep
; dep
= dep_next
) {
1078 txd_clear_parent(dep
);
1079 dep_next
= txd_next(dep
);
1080 if (dep_next
&& dep_next
->chan
== chan
)
1081 txd_clear_next(dep
); /* ->next will be submitted */
1083 dep_next
= NULL
; /* submit current dep and terminate */
1086 dep
->tx_submit(dep
);
1089 chan
->device
->device_issue_pending(chan
);
1091 EXPORT_SYMBOL_GPL(dma_run_dependencies
);
1093 static int __init
dma_bus_init(void)
1095 return class_register(&dma_devclass
);
1097 arch_initcall(dma_bus_init
);