Staging: strip: delete the driver
[linux/fpc-iii.git] / drivers / dma / dmaengine.c
blobd18b5d069d7eb768789a7f99b783b7c632bb2660
1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
32 * LOCKING:
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
45 * See Documentation/dmaengine.txt for more details
48 #include <linux/init.h>
49 #include <linux/module.h>
50 #include <linux/mm.h>
51 #include <linux/device.h>
52 #include <linux/dmaengine.h>
53 #include <linux/hardirq.h>
54 #include <linux/spinlock.h>
55 #include <linux/percpu.h>
56 #include <linux/rcupdate.h>
57 #include <linux/mutex.h>
58 #include <linux/jiffies.h>
59 #include <linux/rculist.h>
60 #include <linux/idr.h>
61 #include <linux/slab.h>
63 static DEFINE_MUTEX(dma_list_mutex);
64 static LIST_HEAD(dma_device_list);
65 static long dmaengine_ref_count;
66 static struct idr dma_idr;
68 /* --- sysfs implementation --- */
70 /**
71 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
72 * @dev - device node
74 * Must be called under dma_list_mutex
76 static struct dma_chan *dev_to_dma_chan(struct device *dev)
78 struct dma_chan_dev *chan_dev;
80 chan_dev = container_of(dev, typeof(*chan_dev), device);
81 return chan_dev->chan;
84 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
86 struct dma_chan *chan;
87 unsigned long count = 0;
88 int i;
89 int err;
91 mutex_lock(&dma_list_mutex);
92 chan = dev_to_dma_chan(dev);
93 if (chan) {
94 for_each_possible_cpu(i)
95 count += per_cpu_ptr(chan->local, i)->memcpy_count;
96 err = sprintf(buf, "%lu\n", count);
97 } else
98 err = -ENODEV;
99 mutex_unlock(&dma_list_mutex);
101 return err;
104 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
105 char *buf)
107 struct dma_chan *chan;
108 unsigned long count = 0;
109 int i;
110 int err;
112 mutex_lock(&dma_list_mutex);
113 chan = dev_to_dma_chan(dev);
114 if (chan) {
115 for_each_possible_cpu(i)
116 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
117 err = sprintf(buf, "%lu\n", count);
118 } else
119 err = -ENODEV;
120 mutex_unlock(&dma_list_mutex);
122 return err;
125 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
127 struct dma_chan *chan;
128 int err;
130 mutex_lock(&dma_list_mutex);
131 chan = dev_to_dma_chan(dev);
132 if (chan)
133 err = sprintf(buf, "%d\n", chan->client_count);
134 else
135 err = -ENODEV;
136 mutex_unlock(&dma_list_mutex);
138 return err;
141 static struct device_attribute dma_attrs[] = {
142 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
143 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
144 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
145 __ATTR_NULL
148 static void chan_dev_release(struct device *dev)
150 struct dma_chan_dev *chan_dev;
152 chan_dev = container_of(dev, typeof(*chan_dev), device);
153 if (atomic_dec_and_test(chan_dev->idr_ref)) {
154 mutex_lock(&dma_list_mutex);
155 idr_remove(&dma_idr, chan_dev->dev_id);
156 mutex_unlock(&dma_list_mutex);
157 kfree(chan_dev->idr_ref);
159 kfree(chan_dev);
162 static struct class dma_devclass = {
163 .name = "dma",
164 .dev_attrs = dma_attrs,
165 .dev_release = chan_dev_release,
168 /* --- client and device registration --- */
170 #define dma_device_satisfies_mask(device, mask) \
171 __dma_device_satisfies_mask((device), &(mask))
172 static int
173 __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
175 dma_cap_mask_t has;
177 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
178 DMA_TX_TYPE_END);
179 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
182 static struct module *dma_chan_to_owner(struct dma_chan *chan)
184 return chan->device->dev->driver->owner;
188 * balance_ref_count - catch up the channel reference count
189 * @chan - channel to balance ->client_count versus dmaengine_ref_count
191 * balance_ref_count must be called under dma_list_mutex
193 static void balance_ref_count(struct dma_chan *chan)
195 struct module *owner = dma_chan_to_owner(chan);
197 while (chan->client_count < dmaengine_ref_count) {
198 __module_get(owner);
199 chan->client_count++;
204 * dma_chan_get - try to grab a dma channel's parent driver module
205 * @chan - channel to grab
207 * Must be called under dma_list_mutex
209 static int dma_chan_get(struct dma_chan *chan)
211 int err = -ENODEV;
212 struct module *owner = dma_chan_to_owner(chan);
214 if (chan->client_count) {
215 __module_get(owner);
216 err = 0;
217 } else if (try_module_get(owner))
218 err = 0;
220 if (err == 0)
221 chan->client_count++;
223 /* allocate upon first client reference */
224 if (chan->client_count == 1 && err == 0) {
225 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
227 if (desc_cnt < 0) {
228 err = desc_cnt;
229 chan->client_count = 0;
230 module_put(owner);
231 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
232 balance_ref_count(chan);
235 return err;
239 * dma_chan_put - drop a reference to a dma channel's parent driver module
240 * @chan - channel to release
242 * Must be called under dma_list_mutex
244 static void dma_chan_put(struct dma_chan *chan)
246 if (!chan->client_count)
247 return; /* this channel failed alloc_chan_resources */
248 chan->client_count--;
249 module_put(dma_chan_to_owner(chan));
250 if (chan->client_count == 0)
251 chan->device->device_free_chan_resources(chan);
254 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
256 enum dma_status status;
257 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
259 dma_async_issue_pending(chan);
260 do {
261 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
262 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
263 printk(KERN_ERR "dma_sync_wait_timeout!\n");
264 return DMA_ERROR;
266 } while (status == DMA_IN_PROGRESS);
268 return status;
270 EXPORT_SYMBOL(dma_sync_wait);
273 * dma_cap_mask_all - enable iteration over all operation types
275 static dma_cap_mask_t dma_cap_mask_all;
278 * dma_chan_tbl_ent - tracks channel allocations per core/operation
279 * @chan - associated channel for this entry
281 struct dma_chan_tbl_ent {
282 struct dma_chan *chan;
286 * channel_table - percpu lookup table for memory-to-memory offload providers
288 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
290 static int __init dma_channel_table_init(void)
292 enum dma_transaction_type cap;
293 int err = 0;
295 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
297 /* 'interrupt', 'private', and 'slave' are channel capabilities,
298 * but are not associated with an operation so they do not need
299 * an entry in the channel_table
301 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
302 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
303 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
305 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
306 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
307 if (!channel_table[cap]) {
308 err = -ENOMEM;
309 break;
313 if (err) {
314 pr_err("dmaengine: initialization failure\n");
315 for_each_dma_cap_mask(cap, dma_cap_mask_all)
316 if (channel_table[cap])
317 free_percpu(channel_table[cap]);
320 return err;
322 arch_initcall(dma_channel_table_init);
325 * dma_find_channel - find a channel to carry out the operation
326 * @tx_type: transaction type
328 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
330 return this_cpu_read(channel_table[tx_type]->chan);
332 EXPORT_SYMBOL(dma_find_channel);
335 * dma_issue_pending_all - flush all pending operations across all channels
337 void dma_issue_pending_all(void)
339 struct dma_device *device;
340 struct dma_chan *chan;
342 rcu_read_lock();
343 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
344 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
345 continue;
346 list_for_each_entry(chan, &device->channels, device_node)
347 if (chan->client_count)
348 device->device_issue_pending(chan);
350 rcu_read_unlock();
352 EXPORT_SYMBOL(dma_issue_pending_all);
355 * nth_chan - returns the nth channel of the given capability
356 * @cap: capability to match
357 * @n: nth channel desired
359 * Defaults to returning the channel with the desired capability and the
360 * lowest reference count when 'n' cannot be satisfied. Must be called
361 * under dma_list_mutex.
363 static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
365 struct dma_device *device;
366 struct dma_chan *chan;
367 struct dma_chan *ret = NULL;
368 struct dma_chan *min = NULL;
370 list_for_each_entry(device, &dma_device_list, global_node) {
371 if (!dma_has_cap(cap, device->cap_mask) ||
372 dma_has_cap(DMA_PRIVATE, device->cap_mask))
373 continue;
374 list_for_each_entry(chan, &device->channels, device_node) {
375 if (!chan->client_count)
376 continue;
377 if (!min)
378 min = chan;
379 else if (chan->table_count < min->table_count)
380 min = chan;
382 if (n-- == 0) {
383 ret = chan;
384 break; /* done */
387 if (ret)
388 break; /* done */
391 if (!ret)
392 ret = min;
394 if (ret)
395 ret->table_count++;
397 return ret;
401 * dma_channel_rebalance - redistribute the available channels
403 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
404 * operation type) in the SMP case, and operation isolation (avoid
405 * multi-tasking channels) in the non-SMP case. Must be called under
406 * dma_list_mutex.
408 static void dma_channel_rebalance(void)
410 struct dma_chan *chan;
411 struct dma_device *device;
412 int cpu;
413 int cap;
414 int n;
416 /* undo the last distribution */
417 for_each_dma_cap_mask(cap, dma_cap_mask_all)
418 for_each_possible_cpu(cpu)
419 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
421 list_for_each_entry(device, &dma_device_list, global_node) {
422 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
423 continue;
424 list_for_each_entry(chan, &device->channels, device_node)
425 chan->table_count = 0;
428 /* don't populate the channel_table if no clients are available */
429 if (!dmaengine_ref_count)
430 return;
432 /* redistribute available channels */
433 n = 0;
434 for_each_dma_cap_mask(cap, dma_cap_mask_all)
435 for_each_online_cpu(cpu) {
436 if (num_possible_cpus() > 1)
437 chan = nth_chan(cap, n++);
438 else
439 chan = nth_chan(cap, -1);
441 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
445 static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
446 dma_filter_fn fn, void *fn_param)
448 struct dma_chan *chan;
450 if (!__dma_device_satisfies_mask(dev, mask)) {
451 pr_debug("%s: wrong capabilities\n", __func__);
452 return NULL;
454 /* devices with multiple channels need special handling as we need to
455 * ensure that all channels are either private or public.
457 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
458 list_for_each_entry(chan, &dev->channels, device_node) {
459 /* some channels are already publicly allocated */
460 if (chan->client_count)
461 return NULL;
464 list_for_each_entry(chan, &dev->channels, device_node) {
465 if (chan->client_count) {
466 pr_debug("%s: %s busy\n",
467 __func__, dma_chan_name(chan));
468 continue;
470 if (fn && !fn(chan, fn_param)) {
471 pr_debug("%s: %s filter said false\n",
472 __func__, dma_chan_name(chan));
473 continue;
475 return chan;
478 return NULL;
482 * dma_request_channel - try to allocate an exclusive channel
483 * @mask: capabilities that the channel must satisfy
484 * @fn: optional callback to disposition available channels
485 * @fn_param: opaque parameter to pass to dma_filter_fn
487 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
489 struct dma_device *device, *_d;
490 struct dma_chan *chan = NULL;
491 int err;
493 /* Find a channel */
494 mutex_lock(&dma_list_mutex);
495 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
496 chan = private_candidate(mask, device, fn, fn_param);
497 if (chan) {
498 /* Found a suitable channel, try to grab, prep, and
499 * return it. We first set DMA_PRIVATE to disable
500 * balance_ref_count as this channel will not be
501 * published in the general-purpose allocator
503 dma_cap_set(DMA_PRIVATE, device->cap_mask);
504 device->privatecnt++;
505 err = dma_chan_get(chan);
507 if (err == -ENODEV) {
508 pr_debug("%s: %s module removed\n", __func__,
509 dma_chan_name(chan));
510 list_del_rcu(&device->global_node);
511 } else if (err)
512 pr_err("dmaengine: failed to get %s: (%d)\n",
513 dma_chan_name(chan), err);
514 else
515 break;
516 if (--device->privatecnt == 0)
517 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
518 chan->private = NULL;
519 chan = NULL;
522 mutex_unlock(&dma_list_mutex);
524 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
525 chan ? dma_chan_name(chan) : NULL);
527 return chan;
529 EXPORT_SYMBOL_GPL(__dma_request_channel);
531 void dma_release_channel(struct dma_chan *chan)
533 mutex_lock(&dma_list_mutex);
534 WARN_ONCE(chan->client_count != 1,
535 "chan reference count %d != 1\n", chan->client_count);
536 dma_chan_put(chan);
537 /* drop PRIVATE cap enabled by __dma_request_channel() */
538 if (--chan->device->privatecnt == 0)
539 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
540 chan->private = NULL;
541 mutex_unlock(&dma_list_mutex);
543 EXPORT_SYMBOL_GPL(dma_release_channel);
546 * dmaengine_get - register interest in dma_channels
548 void dmaengine_get(void)
550 struct dma_device *device, *_d;
551 struct dma_chan *chan;
552 int err;
554 mutex_lock(&dma_list_mutex);
555 dmaengine_ref_count++;
557 /* try to grab channels */
558 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
559 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
560 continue;
561 list_for_each_entry(chan, &device->channels, device_node) {
562 err = dma_chan_get(chan);
563 if (err == -ENODEV) {
564 /* module removed before we could use it */
565 list_del_rcu(&device->global_node);
566 break;
567 } else if (err)
568 pr_err("dmaengine: failed to get %s: (%d)\n",
569 dma_chan_name(chan), err);
573 /* if this is the first reference and there were channels
574 * waiting we need to rebalance to get those channels
575 * incorporated into the channel table
577 if (dmaengine_ref_count == 1)
578 dma_channel_rebalance();
579 mutex_unlock(&dma_list_mutex);
581 EXPORT_SYMBOL(dmaengine_get);
584 * dmaengine_put - let dma drivers be removed when ref_count == 0
586 void dmaengine_put(void)
588 struct dma_device *device;
589 struct dma_chan *chan;
591 mutex_lock(&dma_list_mutex);
592 dmaengine_ref_count--;
593 BUG_ON(dmaengine_ref_count < 0);
594 /* drop channel references */
595 list_for_each_entry(device, &dma_device_list, global_node) {
596 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
597 continue;
598 list_for_each_entry(chan, &device->channels, device_node)
599 dma_chan_put(chan);
601 mutex_unlock(&dma_list_mutex);
603 EXPORT_SYMBOL(dmaengine_put);
605 static bool device_has_all_tx_types(struct dma_device *device)
607 /* A device that satisfies this test has channels that will never cause
608 * an async_tx channel switch event as all possible operation types can
609 * be handled.
611 #ifdef CONFIG_ASYNC_TX_DMA
612 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
613 return false;
614 #endif
616 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
617 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
618 return false;
619 #endif
621 #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
622 if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
623 return false;
624 #endif
626 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
627 if (!dma_has_cap(DMA_XOR, device->cap_mask))
628 return false;
630 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
631 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
632 return false;
633 #endif
634 #endif
636 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
637 if (!dma_has_cap(DMA_PQ, device->cap_mask))
638 return false;
640 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
641 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
642 return false;
643 #endif
644 #endif
646 return true;
649 static int get_dma_id(struct dma_device *device)
651 int rc;
653 idr_retry:
654 if (!idr_pre_get(&dma_idr, GFP_KERNEL))
655 return -ENOMEM;
656 mutex_lock(&dma_list_mutex);
657 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
658 mutex_unlock(&dma_list_mutex);
659 if (rc == -EAGAIN)
660 goto idr_retry;
661 else if (rc != 0)
662 return rc;
664 return 0;
668 * dma_async_device_register - registers DMA devices found
669 * @device: &dma_device
671 int dma_async_device_register(struct dma_device *device)
673 int chancnt = 0, rc;
674 struct dma_chan* chan;
675 atomic_t *idr_ref;
677 if (!device)
678 return -ENODEV;
680 /* validate device routines */
681 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
682 !device->device_prep_dma_memcpy);
683 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
684 !device->device_prep_dma_xor);
685 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
686 !device->device_prep_dma_xor_val);
687 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
688 !device->device_prep_dma_pq);
689 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
690 !device->device_prep_dma_pq_val);
691 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
692 !device->device_prep_dma_memset);
693 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
694 !device->device_prep_dma_interrupt);
695 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
696 !device->device_prep_slave_sg);
697 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
698 !device->device_terminate_all);
700 BUG_ON(!device->device_alloc_chan_resources);
701 BUG_ON(!device->device_free_chan_resources);
702 BUG_ON(!device->device_is_tx_complete);
703 BUG_ON(!device->device_issue_pending);
704 BUG_ON(!device->dev);
706 /* note: this only matters in the
707 * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case
709 if (device_has_all_tx_types(device))
710 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
712 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
713 if (!idr_ref)
714 return -ENOMEM;
715 rc = get_dma_id(device);
716 if (rc != 0) {
717 kfree(idr_ref);
718 return rc;
721 atomic_set(idr_ref, 0);
723 /* represent channels in sysfs. Probably want devs too */
724 list_for_each_entry(chan, &device->channels, device_node) {
725 rc = -ENOMEM;
726 chan->local = alloc_percpu(typeof(*chan->local));
727 if (chan->local == NULL)
728 goto err_out;
729 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
730 if (chan->dev == NULL) {
731 free_percpu(chan->local);
732 chan->local = NULL;
733 goto err_out;
736 chan->chan_id = chancnt++;
737 chan->dev->device.class = &dma_devclass;
738 chan->dev->device.parent = device->dev;
739 chan->dev->chan = chan;
740 chan->dev->idr_ref = idr_ref;
741 chan->dev->dev_id = device->dev_id;
742 atomic_inc(idr_ref);
743 dev_set_name(&chan->dev->device, "dma%dchan%d",
744 device->dev_id, chan->chan_id);
746 rc = device_register(&chan->dev->device);
747 if (rc) {
748 free_percpu(chan->local);
749 chan->local = NULL;
750 kfree(chan->dev);
751 atomic_dec(idr_ref);
752 goto err_out;
754 chan->client_count = 0;
756 device->chancnt = chancnt;
758 mutex_lock(&dma_list_mutex);
759 /* take references on public channels */
760 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
761 list_for_each_entry(chan, &device->channels, device_node) {
762 /* if clients are already waiting for channels we need
763 * to take references on their behalf
765 if (dma_chan_get(chan) == -ENODEV) {
766 /* note we can only get here for the first
767 * channel as the remaining channels are
768 * guaranteed to get a reference
770 rc = -ENODEV;
771 mutex_unlock(&dma_list_mutex);
772 goto err_out;
775 list_add_tail_rcu(&device->global_node, &dma_device_list);
776 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
777 device->privatecnt++; /* Always private */
778 dma_channel_rebalance();
779 mutex_unlock(&dma_list_mutex);
781 return 0;
783 err_out:
784 /* if we never registered a channel just release the idr */
785 if (atomic_read(idr_ref) == 0) {
786 mutex_lock(&dma_list_mutex);
787 idr_remove(&dma_idr, device->dev_id);
788 mutex_unlock(&dma_list_mutex);
789 kfree(idr_ref);
790 return rc;
793 list_for_each_entry(chan, &device->channels, device_node) {
794 if (chan->local == NULL)
795 continue;
796 mutex_lock(&dma_list_mutex);
797 chan->dev->chan = NULL;
798 mutex_unlock(&dma_list_mutex);
799 device_unregister(&chan->dev->device);
800 free_percpu(chan->local);
802 return rc;
804 EXPORT_SYMBOL(dma_async_device_register);
807 * dma_async_device_unregister - unregister a DMA device
808 * @device: &dma_device
810 * This routine is called by dma driver exit routines, dmaengine holds module
811 * references to prevent it being called while channels are in use.
813 void dma_async_device_unregister(struct dma_device *device)
815 struct dma_chan *chan;
817 mutex_lock(&dma_list_mutex);
818 list_del_rcu(&device->global_node);
819 dma_channel_rebalance();
820 mutex_unlock(&dma_list_mutex);
822 list_for_each_entry(chan, &device->channels, device_node) {
823 WARN_ONCE(chan->client_count,
824 "%s called while %d clients hold a reference\n",
825 __func__, chan->client_count);
826 mutex_lock(&dma_list_mutex);
827 chan->dev->chan = NULL;
828 mutex_unlock(&dma_list_mutex);
829 device_unregister(&chan->dev->device);
830 free_percpu(chan->local);
833 EXPORT_SYMBOL(dma_async_device_unregister);
836 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
837 * @chan: DMA channel to offload copy to
838 * @dest: destination address (virtual)
839 * @src: source address (virtual)
840 * @len: length
842 * Both @dest and @src must be mappable to a bus address according to the
843 * DMA mapping API rules for streaming mappings.
844 * Both @dest and @src must stay memory resident (kernel memory or locked
845 * user space pages).
847 dma_cookie_t
848 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
849 void *src, size_t len)
851 struct dma_device *dev = chan->device;
852 struct dma_async_tx_descriptor *tx;
853 dma_addr_t dma_dest, dma_src;
854 dma_cookie_t cookie;
855 unsigned long flags;
857 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
858 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
859 flags = DMA_CTRL_ACK |
860 DMA_COMPL_SRC_UNMAP_SINGLE |
861 DMA_COMPL_DEST_UNMAP_SINGLE;
862 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
864 if (!tx) {
865 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
866 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
867 return -ENOMEM;
870 tx->callback = NULL;
871 cookie = tx->tx_submit(tx);
873 preempt_disable();
874 __this_cpu_add(chan->local->bytes_transferred, len);
875 __this_cpu_inc(chan->local->memcpy_count);
876 preempt_enable();
878 return cookie;
880 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
883 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
884 * @chan: DMA channel to offload copy to
885 * @page: destination page
886 * @offset: offset in page to copy to
887 * @kdata: source address (virtual)
888 * @len: length
890 * Both @page/@offset and @kdata must be mappable to a bus address according
891 * to the DMA mapping API rules for streaming mappings.
892 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
893 * locked user space pages)
895 dma_cookie_t
896 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
897 unsigned int offset, void *kdata, size_t len)
899 struct dma_device *dev = chan->device;
900 struct dma_async_tx_descriptor *tx;
901 dma_addr_t dma_dest, dma_src;
902 dma_cookie_t cookie;
903 unsigned long flags;
905 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
906 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
907 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
908 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
910 if (!tx) {
911 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
912 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
913 return -ENOMEM;
916 tx->callback = NULL;
917 cookie = tx->tx_submit(tx);
919 preempt_disable();
920 __this_cpu_add(chan->local->bytes_transferred, len);
921 __this_cpu_inc(chan->local->memcpy_count);
922 preempt_enable();
924 return cookie;
926 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
929 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
930 * @chan: DMA channel to offload copy to
931 * @dest_pg: destination page
932 * @dest_off: offset in page to copy to
933 * @src_pg: source page
934 * @src_off: offset in page to copy from
935 * @len: length
937 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
938 * address according to the DMA mapping API rules for streaming mappings.
939 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
940 * (kernel memory or locked user space pages).
942 dma_cookie_t
943 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
944 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
945 size_t len)
947 struct dma_device *dev = chan->device;
948 struct dma_async_tx_descriptor *tx;
949 dma_addr_t dma_dest, dma_src;
950 dma_cookie_t cookie;
951 unsigned long flags;
953 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
954 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
955 DMA_FROM_DEVICE);
956 flags = DMA_CTRL_ACK;
957 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
959 if (!tx) {
960 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
961 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
962 return -ENOMEM;
965 tx->callback = NULL;
966 cookie = tx->tx_submit(tx);
968 preempt_disable();
969 __this_cpu_add(chan->local->bytes_transferred, len);
970 __this_cpu_inc(chan->local->memcpy_count);
971 preempt_enable();
973 return cookie;
975 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
977 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
978 struct dma_chan *chan)
980 tx->chan = chan;
981 spin_lock_init(&tx->lock);
983 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
985 /* dma_wait_for_async_tx - spin wait for a transaction to complete
986 * @tx: in-flight transaction to wait on
988 enum dma_status
989 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
991 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
993 if (!tx)
994 return DMA_SUCCESS;
996 while (tx->cookie == -EBUSY) {
997 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
998 pr_err("%s timeout waiting for descriptor submission\n",
999 __func__);
1000 return DMA_ERROR;
1002 cpu_relax();
1004 return dma_sync_wait(tx->chan, tx->cookie);
1006 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1008 /* dma_run_dependencies - helper routine for dma drivers to process
1009 * (start) dependent operations on their target channel
1010 * @tx: transaction with dependencies
1012 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1014 struct dma_async_tx_descriptor *dep = tx->next;
1015 struct dma_async_tx_descriptor *dep_next;
1016 struct dma_chan *chan;
1018 if (!dep)
1019 return;
1021 /* we'll submit tx->next now, so clear the link */
1022 tx->next = NULL;
1023 chan = dep->chan;
1025 /* keep submitting up until a channel switch is detected
1026 * in that case we will be called again as a result of
1027 * processing the interrupt from async_tx_channel_switch
1029 for (; dep; dep = dep_next) {
1030 spin_lock_bh(&dep->lock);
1031 dep->parent = NULL;
1032 dep_next = dep->next;
1033 if (dep_next && dep_next->chan == chan)
1034 dep->next = NULL; /* ->next will be submitted */
1035 else
1036 dep_next = NULL; /* submit current dep and terminate */
1037 spin_unlock_bh(&dep->lock);
1039 dep->tx_submit(dep);
1042 chan->device->device_issue_pending(chan);
1044 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1046 static int __init dma_bus_init(void)
1048 idr_init(&dma_idr);
1049 mutex_init(&dma_list_mutex);
1050 return class_register(&dma_devclass);
1052 arch_initcall(dma_bus_init);