2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
19 * This code implements the DMA subsystem. It provides a HW-neutral interface
20 * for other kernel code to use asynchronous memory copy capabilities,
21 * if present, and allows different HW DMA drivers to register as providing
24 * Due to the fact we are accelerating what is already a relatively fast
25 * operation, the code goes to great lengths to avoid additional overhead,
30 * The subsystem keeps a global list of dma_device structs it is protected by a
31 * mutex, dma_list_mutex.
33 * A subsystem can get access to a channel by calling dmaengine_get() followed
34 * by dma_find_channel(), or if it has need for an exclusive channel it can call
35 * dma_request_channel(). Once a channel is allocated a reference is taken
36 * against its corresponding driver to disable removal.
38 * Each device has a channels list, which runs unlocked but is never modified
39 * once the device is registered, it's just setup by the driver.
41 * See Documentation/dmaengine.txt for more details
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/dma-mapping.h>
47 #include <linux/init.h>
48 #include <linux/module.h>
50 #include <linux/device.h>
51 #include <linux/dmaengine.h>
52 #include <linux/hardirq.h>
53 #include <linux/spinlock.h>
54 #include <linux/percpu.h>
55 #include <linux/rcupdate.h>
56 #include <linux/mutex.h>
57 #include <linux/jiffies.h>
58 #include <linux/rculist.h>
59 #include <linux/idr.h>
60 #include <linux/slab.h>
61 #include <linux/acpi.h>
62 #include <linux/acpi_dma.h>
63 #include <linux/of_dma.h>
64 #include <linux/mempool.h>
66 static DEFINE_MUTEX(dma_list_mutex
);
67 static DEFINE_IDR(dma_idr
);
68 static LIST_HEAD(dma_device_list
);
69 static long dmaengine_ref_count
;
71 /* --- sysfs implementation --- */
74 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
77 * Must be called under dma_list_mutex
79 static struct dma_chan
*dev_to_dma_chan(struct device
*dev
)
81 struct dma_chan_dev
*chan_dev
;
83 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
84 return chan_dev
->chan
;
87 static ssize_t
memcpy_count_show(struct device
*dev
,
88 struct device_attribute
*attr
, char *buf
)
90 struct dma_chan
*chan
;
91 unsigned long count
= 0;
95 mutex_lock(&dma_list_mutex
);
96 chan
= dev_to_dma_chan(dev
);
98 for_each_possible_cpu(i
)
99 count
+= per_cpu_ptr(chan
->local
, i
)->memcpy_count
;
100 err
= sprintf(buf
, "%lu\n", count
);
103 mutex_unlock(&dma_list_mutex
);
107 static DEVICE_ATTR_RO(memcpy_count
);
109 static ssize_t
bytes_transferred_show(struct device
*dev
,
110 struct device_attribute
*attr
, char *buf
)
112 struct dma_chan
*chan
;
113 unsigned long count
= 0;
117 mutex_lock(&dma_list_mutex
);
118 chan
= dev_to_dma_chan(dev
);
120 for_each_possible_cpu(i
)
121 count
+= per_cpu_ptr(chan
->local
, i
)->bytes_transferred
;
122 err
= sprintf(buf
, "%lu\n", count
);
125 mutex_unlock(&dma_list_mutex
);
129 static DEVICE_ATTR_RO(bytes_transferred
);
131 static ssize_t
in_use_show(struct device
*dev
, struct device_attribute
*attr
,
134 struct dma_chan
*chan
;
137 mutex_lock(&dma_list_mutex
);
138 chan
= dev_to_dma_chan(dev
);
140 err
= sprintf(buf
, "%d\n", chan
->client_count
);
143 mutex_unlock(&dma_list_mutex
);
147 static DEVICE_ATTR_RO(in_use
);
149 static struct attribute
*dma_dev_attrs
[] = {
150 &dev_attr_memcpy_count
.attr
,
151 &dev_attr_bytes_transferred
.attr
,
152 &dev_attr_in_use
.attr
,
155 ATTRIBUTE_GROUPS(dma_dev
);
157 static void chan_dev_release(struct device
*dev
)
159 struct dma_chan_dev
*chan_dev
;
161 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
162 if (atomic_dec_and_test(chan_dev
->idr_ref
)) {
163 mutex_lock(&dma_list_mutex
);
164 idr_remove(&dma_idr
, chan_dev
->dev_id
);
165 mutex_unlock(&dma_list_mutex
);
166 kfree(chan_dev
->idr_ref
);
171 static struct class dma_devclass
= {
173 .dev_groups
= dma_dev_groups
,
174 .dev_release
= chan_dev_release
,
177 /* --- client and device registration --- */
179 #define dma_device_satisfies_mask(device, mask) \
180 __dma_device_satisfies_mask((device), &(mask))
182 __dma_device_satisfies_mask(struct dma_device
*device
,
183 const dma_cap_mask_t
*want
)
187 bitmap_and(has
.bits
, want
->bits
, device
->cap_mask
.bits
,
189 return bitmap_equal(want
->bits
, has
.bits
, DMA_TX_TYPE_END
);
192 static struct module
*dma_chan_to_owner(struct dma_chan
*chan
)
194 return chan
->device
->dev
->driver
->owner
;
198 * balance_ref_count - catch up the channel reference count
199 * @chan - channel to balance ->client_count versus dmaengine_ref_count
201 * balance_ref_count must be called under dma_list_mutex
203 static void balance_ref_count(struct dma_chan
*chan
)
205 struct module
*owner
= dma_chan_to_owner(chan
);
207 while (chan
->client_count
< dmaengine_ref_count
) {
209 chan
->client_count
++;
214 * dma_chan_get - try to grab a dma channel's parent driver module
215 * @chan - channel to grab
217 * Must be called under dma_list_mutex
219 static int dma_chan_get(struct dma_chan
*chan
)
221 struct module
*owner
= dma_chan_to_owner(chan
);
224 /* The channel is already in use, update client count */
225 if (chan
->client_count
) {
230 if (!try_module_get(owner
))
233 /* allocate upon first client reference */
234 if (chan
->device
->device_alloc_chan_resources
) {
235 ret
= chan
->device
->device_alloc_chan_resources(chan
);
240 if (!dma_has_cap(DMA_PRIVATE
, chan
->device
->cap_mask
))
241 balance_ref_count(chan
);
244 chan
->client_count
++;
253 * dma_chan_put - drop a reference to a dma channel's parent driver module
254 * @chan - channel to release
256 * Must be called under dma_list_mutex
258 static void dma_chan_put(struct dma_chan
*chan
)
260 /* This channel is not in use, bail out */
261 if (!chan
->client_count
)
264 chan
->client_count
--;
265 module_put(dma_chan_to_owner(chan
));
267 /* This channel is not in use anymore, free it */
268 if (!chan
->client_count
&& chan
->device
->device_free_chan_resources
)
269 chan
->device
->device_free_chan_resources(chan
);
272 enum dma_status
dma_sync_wait(struct dma_chan
*chan
, dma_cookie_t cookie
)
274 enum dma_status status
;
275 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
277 dma_async_issue_pending(chan
);
279 status
= dma_async_is_tx_complete(chan
, cookie
, NULL
, NULL
);
280 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
281 pr_err("%s: timeout!\n", __func__
);
284 if (status
!= DMA_IN_PROGRESS
)
291 EXPORT_SYMBOL(dma_sync_wait
);
294 * dma_cap_mask_all - enable iteration over all operation types
296 static dma_cap_mask_t dma_cap_mask_all
;
299 * dma_chan_tbl_ent - tracks channel allocations per core/operation
300 * @chan - associated channel for this entry
302 struct dma_chan_tbl_ent
{
303 struct dma_chan
*chan
;
307 * channel_table - percpu lookup table for memory-to-memory offload providers
309 static struct dma_chan_tbl_ent __percpu
*channel_table
[DMA_TX_TYPE_END
];
311 static int __init
dma_channel_table_init(void)
313 enum dma_transaction_type cap
;
316 bitmap_fill(dma_cap_mask_all
.bits
, DMA_TX_TYPE_END
);
318 /* 'interrupt', 'private', and 'slave' are channel capabilities,
319 * but are not associated with an operation so they do not need
320 * an entry in the channel_table
322 clear_bit(DMA_INTERRUPT
, dma_cap_mask_all
.bits
);
323 clear_bit(DMA_PRIVATE
, dma_cap_mask_all
.bits
);
324 clear_bit(DMA_SLAVE
, dma_cap_mask_all
.bits
);
326 for_each_dma_cap_mask(cap
, dma_cap_mask_all
) {
327 channel_table
[cap
] = alloc_percpu(struct dma_chan_tbl_ent
);
328 if (!channel_table
[cap
]) {
335 pr_err("initialization failure\n");
336 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
337 free_percpu(channel_table
[cap
]);
342 arch_initcall(dma_channel_table_init
);
345 * dma_find_channel - find a channel to carry out the operation
346 * @tx_type: transaction type
348 struct dma_chan
*dma_find_channel(enum dma_transaction_type tx_type
)
350 return this_cpu_read(channel_table
[tx_type
]->chan
);
352 EXPORT_SYMBOL(dma_find_channel
);
355 * dma_issue_pending_all - flush all pending operations across all channels
357 void dma_issue_pending_all(void)
359 struct dma_device
*device
;
360 struct dma_chan
*chan
;
363 list_for_each_entry_rcu(device
, &dma_device_list
, global_node
) {
364 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
366 list_for_each_entry(chan
, &device
->channels
, device_node
)
367 if (chan
->client_count
)
368 device
->device_issue_pending(chan
);
372 EXPORT_SYMBOL(dma_issue_pending_all
);
375 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
377 static bool dma_chan_is_local(struct dma_chan
*chan
, int cpu
)
379 int node
= dev_to_node(chan
->device
->dev
);
380 return node
== -1 || cpumask_test_cpu(cpu
, cpumask_of_node(node
));
384 * min_chan - returns the channel with min count and in the same numa-node as the cpu
385 * @cap: capability to match
386 * @cpu: cpu index which the channel should be close to
388 * If some channels are close to the given cpu, the one with the lowest
389 * reference count is returned. Otherwise, cpu is ignored and only the
390 * reference count is taken into account.
391 * Must be called under dma_list_mutex.
393 static struct dma_chan
*min_chan(enum dma_transaction_type cap
, int cpu
)
395 struct dma_device
*device
;
396 struct dma_chan
*chan
;
397 struct dma_chan
*min
= NULL
;
398 struct dma_chan
*localmin
= NULL
;
400 list_for_each_entry(device
, &dma_device_list
, global_node
) {
401 if (!dma_has_cap(cap
, device
->cap_mask
) ||
402 dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
404 list_for_each_entry(chan
, &device
->channels
, device_node
) {
405 if (!chan
->client_count
)
407 if (!min
|| chan
->table_count
< min
->table_count
)
410 if (dma_chan_is_local(chan
, cpu
))
412 chan
->table_count
< localmin
->table_count
)
417 chan
= localmin
? localmin
: min
;
426 * dma_channel_rebalance - redistribute the available channels
428 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
429 * operation type) in the SMP case, and operation isolation (avoid
430 * multi-tasking channels) in the non-SMP case. Must be called under
433 static void dma_channel_rebalance(void)
435 struct dma_chan
*chan
;
436 struct dma_device
*device
;
440 /* undo the last distribution */
441 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
442 for_each_possible_cpu(cpu
)
443 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= NULL
;
445 list_for_each_entry(device
, &dma_device_list
, global_node
) {
446 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
448 list_for_each_entry(chan
, &device
->channels
, device_node
)
449 chan
->table_count
= 0;
452 /* don't populate the channel_table if no clients are available */
453 if (!dmaengine_ref_count
)
456 /* redistribute available channels */
457 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
458 for_each_online_cpu(cpu
) {
459 chan
= min_chan(cap
, cpu
);
460 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= chan
;
464 int dma_get_slave_caps(struct dma_chan
*chan
, struct dma_slave_caps
*caps
)
466 struct dma_device
*device
;
471 device
= chan
->device
;
473 /* check if the channel supports slave transactions */
474 if (!test_bit(DMA_SLAVE
, device
->cap_mask
.bits
))
478 * Check whether it reports it uses the generic slave
479 * capabilities, if not, that means it doesn't support any
480 * kind of slave capabilities reporting.
482 if (!device
->directions
)
485 caps
->src_addr_widths
= device
->src_addr_widths
;
486 caps
->dst_addr_widths
= device
->dst_addr_widths
;
487 caps
->directions
= device
->directions
;
488 caps
->residue_granularity
= device
->residue_granularity
;
491 * Some devices implement only pause (e.g. to get residuum) but no
492 * resume. However cmd_pause is advertised as pause AND resume.
494 caps
->cmd_pause
= !!(device
->device_pause
&& device
->device_resume
);
495 caps
->cmd_terminate
= !!device
->device_terminate_all
;
499 EXPORT_SYMBOL_GPL(dma_get_slave_caps
);
501 static struct dma_chan
*private_candidate(const dma_cap_mask_t
*mask
,
502 struct dma_device
*dev
,
503 dma_filter_fn fn
, void *fn_param
)
505 struct dma_chan
*chan
;
507 if (!__dma_device_satisfies_mask(dev
, mask
)) {
508 pr_debug("%s: wrong capabilities\n", __func__
);
511 /* devices with multiple channels need special handling as we need to
512 * ensure that all channels are either private or public.
514 if (dev
->chancnt
> 1 && !dma_has_cap(DMA_PRIVATE
, dev
->cap_mask
))
515 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
516 /* some channels are already publicly allocated */
517 if (chan
->client_count
)
521 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
522 if (chan
->client_count
) {
523 pr_debug("%s: %s busy\n",
524 __func__
, dma_chan_name(chan
));
527 if (fn
&& !fn(chan
, fn_param
)) {
528 pr_debug("%s: %s filter said false\n",
529 __func__
, dma_chan_name(chan
));
539 * dma_request_slave_channel - try to get specific channel exclusively
540 * @chan: target channel
542 struct dma_chan
*dma_get_slave_channel(struct dma_chan
*chan
)
546 /* lock against __dma_request_channel */
547 mutex_lock(&dma_list_mutex
);
549 if (chan
->client_count
== 0) {
550 err
= dma_chan_get(chan
);
552 pr_debug("%s: failed to get %s: (%d)\n",
553 __func__
, dma_chan_name(chan
), err
);
557 mutex_unlock(&dma_list_mutex
);
562 EXPORT_SYMBOL_GPL(dma_get_slave_channel
);
564 struct dma_chan
*dma_get_any_slave_channel(struct dma_device
*device
)
567 struct dma_chan
*chan
;
571 dma_cap_set(DMA_SLAVE
, mask
);
573 /* lock against __dma_request_channel */
574 mutex_lock(&dma_list_mutex
);
576 chan
= private_candidate(&mask
, device
, NULL
, NULL
);
578 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
579 device
->privatecnt
++;
580 err
= dma_chan_get(chan
);
582 pr_debug("%s: failed to get %s: (%d)\n",
583 __func__
, dma_chan_name(chan
), err
);
585 if (--device
->privatecnt
== 0)
586 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
590 mutex_unlock(&dma_list_mutex
);
594 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel
);
597 * __dma_request_channel - try to allocate an exclusive channel
598 * @mask: capabilities that the channel must satisfy
599 * @fn: optional callback to disposition available channels
600 * @fn_param: opaque parameter to pass to dma_filter_fn
602 * Returns pointer to appropriate DMA channel on success or NULL.
604 struct dma_chan
*__dma_request_channel(const dma_cap_mask_t
*mask
,
605 dma_filter_fn fn
, void *fn_param
)
607 struct dma_device
*device
, *_d
;
608 struct dma_chan
*chan
= NULL
;
612 mutex_lock(&dma_list_mutex
);
613 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
614 chan
= private_candidate(mask
, device
, fn
, fn_param
);
616 /* Found a suitable channel, try to grab, prep, and
617 * return it. We first set DMA_PRIVATE to disable
618 * balance_ref_count as this channel will not be
619 * published in the general-purpose allocator
621 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
622 device
->privatecnt
++;
623 err
= dma_chan_get(chan
);
625 if (err
== -ENODEV
) {
626 pr_debug("%s: %s module removed\n",
627 __func__
, dma_chan_name(chan
));
628 list_del_rcu(&device
->global_node
);
630 pr_debug("%s: failed to get %s: (%d)\n",
631 __func__
, dma_chan_name(chan
), err
);
634 if (--device
->privatecnt
== 0)
635 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
639 mutex_unlock(&dma_list_mutex
);
641 pr_debug("%s: %s (%s)\n",
643 chan
? "success" : "fail",
644 chan
? dma_chan_name(chan
) : NULL
);
648 EXPORT_SYMBOL_GPL(__dma_request_channel
);
651 * dma_request_slave_channel - try to allocate an exclusive slave channel
652 * @dev: pointer to client device structure
653 * @name: slave channel name
655 * Returns pointer to appropriate DMA channel on success or an error pointer.
657 struct dma_chan
*dma_request_slave_channel_reason(struct device
*dev
,
660 /* If device-tree is present get slave info from here */
662 return of_dma_request_slave_channel(dev
->of_node
, name
);
664 /* If device was enumerated by ACPI get slave info from here */
665 if (ACPI_HANDLE(dev
))
666 return acpi_dma_request_slave_chan_by_name(dev
, name
);
668 return ERR_PTR(-ENODEV
);
670 EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason
);
673 * dma_request_slave_channel - try to allocate an exclusive slave channel
674 * @dev: pointer to client device structure
675 * @name: slave channel name
677 * Returns pointer to appropriate DMA channel on success or NULL.
679 struct dma_chan
*dma_request_slave_channel(struct device
*dev
,
682 struct dma_chan
*ch
= dma_request_slave_channel_reason(dev
, name
);
687 EXPORT_SYMBOL_GPL(dma_request_slave_channel
);
689 void dma_release_channel(struct dma_chan
*chan
)
691 mutex_lock(&dma_list_mutex
);
692 WARN_ONCE(chan
->client_count
!= 1,
693 "chan reference count %d != 1\n", chan
->client_count
);
695 /* drop PRIVATE cap enabled by __dma_request_channel() */
696 if (--chan
->device
->privatecnt
== 0)
697 dma_cap_clear(DMA_PRIVATE
, chan
->device
->cap_mask
);
698 mutex_unlock(&dma_list_mutex
);
700 EXPORT_SYMBOL_GPL(dma_release_channel
);
703 * dmaengine_get - register interest in dma_channels
705 void dmaengine_get(void)
707 struct dma_device
*device
, *_d
;
708 struct dma_chan
*chan
;
711 mutex_lock(&dma_list_mutex
);
712 dmaengine_ref_count
++;
714 /* try to grab channels */
715 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
716 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
718 list_for_each_entry(chan
, &device
->channels
, device_node
) {
719 err
= dma_chan_get(chan
);
720 if (err
== -ENODEV
) {
721 /* module removed before we could use it */
722 list_del_rcu(&device
->global_node
);
725 pr_debug("%s: failed to get %s: (%d)\n",
726 __func__
, dma_chan_name(chan
), err
);
730 /* if this is the first reference and there were channels
731 * waiting we need to rebalance to get those channels
732 * incorporated into the channel table
734 if (dmaengine_ref_count
== 1)
735 dma_channel_rebalance();
736 mutex_unlock(&dma_list_mutex
);
738 EXPORT_SYMBOL(dmaengine_get
);
741 * dmaengine_put - let dma drivers be removed when ref_count == 0
743 void dmaengine_put(void)
745 struct dma_device
*device
;
746 struct dma_chan
*chan
;
748 mutex_lock(&dma_list_mutex
);
749 dmaengine_ref_count
--;
750 BUG_ON(dmaengine_ref_count
< 0);
751 /* drop channel references */
752 list_for_each_entry(device
, &dma_device_list
, global_node
) {
753 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
755 list_for_each_entry(chan
, &device
->channels
, device_node
)
758 mutex_unlock(&dma_list_mutex
);
760 EXPORT_SYMBOL(dmaengine_put
);
762 static bool device_has_all_tx_types(struct dma_device
*device
)
764 /* A device that satisfies this test has channels that will never cause
765 * an async_tx channel switch event as all possible operation types can
768 #ifdef CONFIG_ASYNC_TX_DMA
769 if (!dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
))
773 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
774 if (!dma_has_cap(DMA_MEMCPY
, device
->cap_mask
))
778 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
779 if (!dma_has_cap(DMA_XOR
, device
->cap_mask
))
782 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
783 if (!dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
))
788 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
789 if (!dma_has_cap(DMA_PQ
, device
->cap_mask
))
792 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
793 if (!dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
))
801 static int get_dma_id(struct dma_device
*device
)
805 mutex_lock(&dma_list_mutex
);
807 rc
= idr_alloc(&dma_idr
, NULL
, 0, 0, GFP_KERNEL
);
811 mutex_unlock(&dma_list_mutex
);
812 return rc
< 0 ? rc
: 0;
816 * dma_async_device_register - registers DMA devices found
817 * @device: &dma_device
819 int dma_async_device_register(struct dma_device
*device
)
822 struct dma_chan
* chan
;
828 /* validate device routines */
829 BUG_ON(dma_has_cap(DMA_MEMCPY
, device
->cap_mask
) &&
830 !device
->device_prep_dma_memcpy
);
831 BUG_ON(dma_has_cap(DMA_XOR
, device
->cap_mask
) &&
832 !device
->device_prep_dma_xor
);
833 BUG_ON(dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
) &&
834 !device
->device_prep_dma_xor_val
);
835 BUG_ON(dma_has_cap(DMA_PQ
, device
->cap_mask
) &&
836 !device
->device_prep_dma_pq
);
837 BUG_ON(dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
) &&
838 !device
->device_prep_dma_pq_val
);
839 BUG_ON(dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
) &&
840 !device
->device_prep_dma_interrupt
);
841 BUG_ON(dma_has_cap(DMA_SG
, device
->cap_mask
) &&
842 !device
->device_prep_dma_sg
);
843 BUG_ON(dma_has_cap(DMA_CYCLIC
, device
->cap_mask
) &&
844 !device
->device_prep_dma_cyclic
);
845 BUG_ON(dma_has_cap(DMA_INTERLEAVE
, device
->cap_mask
) &&
846 !device
->device_prep_interleaved_dma
);
848 BUG_ON(!device
->device_tx_status
);
849 BUG_ON(!device
->device_issue_pending
);
850 BUG_ON(!device
->dev
);
852 /* note: this only matters in the
853 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
855 if (device_has_all_tx_types(device
))
856 dma_cap_set(DMA_ASYNC_TX
, device
->cap_mask
);
858 idr_ref
= kmalloc(sizeof(*idr_ref
), GFP_KERNEL
);
861 rc
= get_dma_id(device
);
867 atomic_set(idr_ref
, 0);
869 /* represent channels in sysfs. Probably want devs too */
870 list_for_each_entry(chan
, &device
->channels
, device_node
) {
872 chan
->local
= alloc_percpu(typeof(*chan
->local
));
873 if (chan
->local
== NULL
)
875 chan
->dev
= kzalloc(sizeof(*chan
->dev
), GFP_KERNEL
);
876 if (chan
->dev
== NULL
) {
877 free_percpu(chan
->local
);
882 chan
->chan_id
= chancnt
++;
883 chan
->dev
->device
.class = &dma_devclass
;
884 chan
->dev
->device
.parent
= device
->dev
;
885 chan
->dev
->chan
= chan
;
886 chan
->dev
->idr_ref
= idr_ref
;
887 chan
->dev
->dev_id
= device
->dev_id
;
889 dev_set_name(&chan
->dev
->device
, "dma%dchan%d",
890 device
->dev_id
, chan
->chan_id
);
892 rc
= device_register(&chan
->dev
->device
);
894 free_percpu(chan
->local
);
900 chan
->client_count
= 0;
902 device
->chancnt
= chancnt
;
904 mutex_lock(&dma_list_mutex
);
905 /* take references on public channels */
906 if (dmaengine_ref_count
&& !dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
907 list_for_each_entry(chan
, &device
->channels
, device_node
) {
908 /* if clients are already waiting for channels we need
909 * to take references on their behalf
911 if (dma_chan_get(chan
) == -ENODEV
) {
912 /* note we can only get here for the first
913 * channel as the remaining channels are
914 * guaranteed to get a reference
917 mutex_unlock(&dma_list_mutex
);
921 list_add_tail_rcu(&device
->global_node
, &dma_device_list
);
922 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
923 device
->privatecnt
++; /* Always private */
924 dma_channel_rebalance();
925 mutex_unlock(&dma_list_mutex
);
930 /* if we never registered a channel just release the idr */
931 if (atomic_read(idr_ref
) == 0) {
932 mutex_lock(&dma_list_mutex
);
933 idr_remove(&dma_idr
, device
->dev_id
);
934 mutex_unlock(&dma_list_mutex
);
939 list_for_each_entry(chan
, &device
->channels
, device_node
) {
940 if (chan
->local
== NULL
)
942 mutex_lock(&dma_list_mutex
);
943 chan
->dev
->chan
= NULL
;
944 mutex_unlock(&dma_list_mutex
);
945 device_unregister(&chan
->dev
->device
);
946 free_percpu(chan
->local
);
950 EXPORT_SYMBOL(dma_async_device_register
);
953 * dma_async_device_unregister - unregister a DMA device
954 * @device: &dma_device
956 * This routine is called by dma driver exit routines, dmaengine holds module
957 * references to prevent it being called while channels are in use.
959 void dma_async_device_unregister(struct dma_device
*device
)
961 struct dma_chan
*chan
;
963 mutex_lock(&dma_list_mutex
);
964 list_del_rcu(&device
->global_node
);
965 dma_channel_rebalance();
966 mutex_unlock(&dma_list_mutex
);
968 list_for_each_entry(chan
, &device
->channels
, device_node
) {
969 WARN_ONCE(chan
->client_count
,
970 "%s called while %d clients hold a reference\n",
971 __func__
, chan
->client_count
);
972 mutex_lock(&dma_list_mutex
);
973 chan
->dev
->chan
= NULL
;
974 mutex_unlock(&dma_list_mutex
);
975 device_unregister(&chan
->dev
->device
);
976 free_percpu(chan
->local
);
979 EXPORT_SYMBOL(dma_async_device_unregister
);
981 struct dmaengine_unmap_pool
{
982 struct kmem_cache
*cache
;
988 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
989 static struct dmaengine_unmap_pool unmap_pool
[] = {
991 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
998 static struct dmaengine_unmap_pool
*__get_unmap_pool(int nr
)
1000 int order
= get_count_order(nr
);
1004 return &unmap_pool
[0];
1006 return &unmap_pool
[1];
1008 return &unmap_pool
[2];
1010 return &unmap_pool
[3];
1017 static void dmaengine_unmap(struct kref
*kref
)
1019 struct dmaengine_unmap_data
*unmap
= container_of(kref
, typeof(*unmap
), kref
);
1020 struct device
*dev
= unmap
->dev
;
1023 cnt
= unmap
->to_cnt
;
1024 for (i
= 0; i
< cnt
; i
++)
1025 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1027 cnt
+= unmap
->from_cnt
;
1028 for (; i
< cnt
; i
++)
1029 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1031 cnt
+= unmap
->bidi_cnt
;
1032 for (; i
< cnt
; i
++) {
1033 if (unmap
->addr
[i
] == 0)
1035 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1038 cnt
= unmap
->map_cnt
;
1039 mempool_free(unmap
, __get_unmap_pool(cnt
)->pool
);
1042 void dmaengine_unmap_put(struct dmaengine_unmap_data
*unmap
)
1045 kref_put(&unmap
->kref
, dmaengine_unmap
);
1047 EXPORT_SYMBOL_GPL(dmaengine_unmap_put
);
1049 static void dmaengine_destroy_unmap_pool(void)
1053 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1054 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1057 mempool_destroy(p
->pool
);
1060 kmem_cache_destroy(p
->cache
);
1065 static int __init
dmaengine_init_unmap_pool(void)
1069 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1070 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1073 size
= sizeof(struct dmaengine_unmap_data
) +
1074 sizeof(dma_addr_t
) * p
->size
;
1076 p
->cache
= kmem_cache_create(p
->name
, size
, 0,
1077 SLAB_HWCACHE_ALIGN
, NULL
);
1080 p
->pool
= mempool_create_slab_pool(1, p
->cache
);
1085 if (i
== ARRAY_SIZE(unmap_pool
))
1088 dmaengine_destroy_unmap_pool();
1092 struct dmaengine_unmap_data
*
1093 dmaengine_get_unmap_data(struct device
*dev
, int nr
, gfp_t flags
)
1095 struct dmaengine_unmap_data
*unmap
;
1097 unmap
= mempool_alloc(__get_unmap_pool(nr
)->pool
, flags
);
1101 memset(unmap
, 0, sizeof(*unmap
));
1102 kref_init(&unmap
->kref
);
1104 unmap
->map_cnt
= nr
;
1108 EXPORT_SYMBOL(dmaengine_get_unmap_data
);
1110 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor
*tx
,
1111 struct dma_chan
*chan
)
1114 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1115 spin_lock_init(&tx
->lock
);
1118 EXPORT_SYMBOL(dma_async_tx_descriptor_init
);
1120 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1121 * @tx: in-flight transaction to wait on
1124 dma_wait_for_async_tx(struct dma_async_tx_descriptor
*tx
)
1126 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
1129 return DMA_COMPLETE
;
1131 while (tx
->cookie
== -EBUSY
) {
1132 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
1133 pr_err("%s timeout waiting for descriptor submission\n",
1139 return dma_sync_wait(tx
->chan
, tx
->cookie
);
1141 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx
);
1143 /* dma_run_dependencies - helper routine for dma drivers to process
1144 * (start) dependent operations on their target channel
1145 * @tx: transaction with dependencies
1147 void dma_run_dependencies(struct dma_async_tx_descriptor
*tx
)
1149 struct dma_async_tx_descriptor
*dep
= txd_next(tx
);
1150 struct dma_async_tx_descriptor
*dep_next
;
1151 struct dma_chan
*chan
;
1156 /* we'll submit tx->next now, so clear the link */
1160 /* keep submitting up until a channel switch is detected
1161 * in that case we will be called again as a result of
1162 * processing the interrupt from async_tx_channel_switch
1164 for (; dep
; dep
= dep_next
) {
1166 txd_clear_parent(dep
);
1167 dep_next
= txd_next(dep
);
1168 if (dep_next
&& dep_next
->chan
== chan
)
1169 txd_clear_next(dep
); /* ->next will be submitted */
1171 dep_next
= NULL
; /* submit current dep and terminate */
1174 dep
->tx_submit(dep
);
1177 chan
->device
->device_issue_pending(chan
);
1179 EXPORT_SYMBOL_GPL(dma_run_dependencies
);
1181 static int __init
dma_bus_init(void)
1183 int err
= dmaengine_init_unmap_pool();
1187 return class_register(&dma_devclass
);
1189 arch_initcall(dma_bus_init
);