Linux 4.2.1
[linux/fpc-iii.git] / drivers / dma / dmaengine.c
blob3ff284c8e3d5aef72f229017c883c73cbe13403f
1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
19 * This code implements the DMA subsystem. It provides a HW-neutral interface
20 * for other kernel code to use asynchronous memory copy capabilities,
21 * if present, and allows different HW DMA drivers to register as providing
22 * this capability.
24 * Due to the fact we are accelerating what is already a relatively fast
25 * operation, the code goes to great lengths to avoid additional overhead,
26 * such as locking.
28 * LOCKING:
30 * The subsystem keeps a global list of dma_device structs it is protected by a
31 * mutex, dma_list_mutex.
33 * A subsystem can get access to a channel by calling dmaengine_get() followed
34 * by dma_find_channel(), or if it has need for an exclusive channel it can call
35 * dma_request_channel(). Once a channel is allocated a reference is taken
36 * against its corresponding driver to disable removal.
38 * Each device has a channels list, which runs unlocked but is never modified
39 * once the device is registered, it's just setup by the driver.
41 * See Documentation/dmaengine.txt for more details
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/dma-mapping.h>
47 #include <linux/init.h>
48 #include <linux/module.h>
49 #include <linux/mm.h>
50 #include <linux/device.h>
51 #include <linux/dmaengine.h>
52 #include <linux/hardirq.h>
53 #include <linux/spinlock.h>
54 #include <linux/percpu.h>
55 #include <linux/rcupdate.h>
56 #include <linux/mutex.h>
57 #include <linux/jiffies.h>
58 #include <linux/rculist.h>
59 #include <linux/idr.h>
60 #include <linux/slab.h>
61 #include <linux/acpi.h>
62 #include <linux/acpi_dma.h>
63 #include <linux/of_dma.h>
64 #include <linux/mempool.h>
66 static DEFINE_MUTEX(dma_list_mutex);
67 static DEFINE_IDR(dma_idr);
68 static LIST_HEAD(dma_device_list);
69 static long dmaengine_ref_count;
71 /* --- sysfs implementation --- */
73 /**
74 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
75 * @dev - device node
77 * Must be called under dma_list_mutex
79 static struct dma_chan *dev_to_dma_chan(struct device *dev)
81 struct dma_chan_dev *chan_dev;
83 chan_dev = container_of(dev, typeof(*chan_dev), device);
84 return chan_dev->chan;
87 static ssize_t memcpy_count_show(struct device *dev,
88 struct device_attribute *attr, char *buf)
90 struct dma_chan *chan;
91 unsigned long count = 0;
92 int i;
93 int err;
95 mutex_lock(&dma_list_mutex);
96 chan = dev_to_dma_chan(dev);
97 if (chan) {
98 for_each_possible_cpu(i)
99 count += per_cpu_ptr(chan->local, i)->memcpy_count;
100 err = sprintf(buf, "%lu\n", count);
101 } else
102 err = -ENODEV;
103 mutex_unlock(&dma_list_mutex);
105 return err;
107 static DEVICE_ATTR_RO(memcpy_count);
109 static ssize_t bytes_transferred_show(struct device *dev,
110 struct device_attribute *attr, char *buf)
112 struct dma_chan *chan;
113 unsigned long count = 0;
114 int i;
115 int err;
117 mutex_lock(&dma_list_mutex);
118 chan = dev_to_dma_chan(dev);
119 if (chan) {
120 for_each_possible_cpu(i)
121 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
122 err = sprintf(buf, "%lu\n", count);
123 } else
124 err = -ENODEV;
125 mutex_unlock(&dma_list_mutex);
127 return err;
129 static DEVICE_ATTR_RO(bytes_transferred);
131 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
132 char *buf)
134 struct dma_chan *chan;
135 int err;
137 mutex_lock(&dma_list_mutex);
138 chan = dev_to_dma_chan(dev);
139 if (chan)
140 err = sprintf(buf, "%d\n", chan->client_count);
141 else
142 err = -ENODEV;
143 mutex_unlock(&dma_list_mutex);
145 return err;
147 static DEVICE_ATTR_RO(in_use);
149 static struct attribute *dma_dev_attrs[] = {
150 &dev_attr_memcpy_count.attr,
151 &dev_attr_bytes_transferred.attr,
152 &dev_attr_in_use.attr,
153 NULL,
155 ATTRIBUTE_GROUPS(dma_dev);
157 static void chan_dev_release(struct device *dev)
159 struct dma_chan_dev *chan_dev;
161 chan_dev = container_of(dev, typeof(*chan_dev), device);
162 if (atomic_dec_and_test(chan_dev->idr_ref)) {
163 mutex_lock(&dma_list_mutex);
164 idr_remove(&dma_idr, chan_dev->dev_id);
165 mutex_unlock(&dma_list_mutex);
166 kfree(chan_dev->idr_ref);
168 kfree(chan_dev);
171 static struct class dma_devclass = {
172 .name = "dma",
173 .dev_groups = dma_dev_groups,
174 .dev_release = chan_dev_release,
177 /* --- client and device registration --- */
179 #define dma_device_satisfies_mask(device, mask) \
180 __dma_device_satisfies_mask((device), &(mask))
181 static int
182 __dma_device_satisfies_mask(struct dma_device *device,
183 const dma_cap_mask_t *want)
185 dma_cap_mask_t has;
187 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
188 DMA_TX_TYPE_END);
189 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
192 static struct module *dma_chan_to_owner(struct dma_chan *chan)
194 return chan->device->dev->driver->owner;
198 * balance_ref_count - catch up the channel reference count
199 * @chan - channel to balance ->client_count versus dmaengine_ref_count
201 * balance_ref_count must be called under dma_list_mutex
203 static void balance_ref_count(struct dma_chan *chan)
205 struct module *owner = dma_chan_to_owner(chan);
207 while (chan->client_count < dmaengine_ref_count) {
208 __module_get(owner);
209 chan->client_count++;
214 * dma_chan_get - try to grab a dma channel's parent driver module
215 * @chan - channel to grab
217 * Must be called under dma_list_mutex
219 static int dma_chan_get(struct dma_chan *chan)
221 struct module *owner = dma_chan_to_owner(chan);
222 int ret;
224 /* The channel is already in use, update client count */
225 if (chan->client_count) {
226 __module_get(owner);
227 goto out;
230 if (!try_module_get(owner))
231 return -ENODEV;
233 /* allocate upon first client reference */
234 if (chan->device->device_alloc_chan_resources) {
235 ret = chan->device->device_alloc_chan_resources(chan);
236 if (ret < 0)
237 goto err_out;
240 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
241 balance_ref_count(chan);
243 out:
244 chan->client_count++;
245 return 0;
247 err_out:
248 module_put(owner);
249 return ret;
253 * dma_chan_put - drop a reference to a dma channel's parent driver module
254 * @chan - channel to release
256 * Must be called under dma_list_mutex
258 static void dma_chan_put(struct dma_chan *chan)
260 /* This channel is not in use, bail out */
261 if (!chan->client_count)
262 return;
264 chan->client_count--;
265 module_put(dma_chan_to_owner(chan));
267 /* This channel is not in use anymore, free it */
268 if (!chan->client_count && chan->device->device_free_chan_resources)
269 chan->device->device_free_chan_resources(chan);
271 /* If the channel is used via a DMA request router, free the mapping */
272 if (chan->router && chan->router->route_free) {
273 chan->router->route_free(chan->router->dev, chan->route_data);
274 chan->router = NULL;
275 chan->route_data = NULL;
279 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
281 enum dma_status status;
282 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
284 dma_async_issue_pending(chan);
285 do {
286 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
287 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
288 pr_err("%s: timeout!\n", __func__);
289 return DMA_ERROR;
291 if (status != DMA_IN_PROGRESS)
292 break;
293 cpu_relax();
294 } while (1);
296 return status;
298 EXPORT_SYMBOL(dma_sync_wait);
301 * dma_cap_mask_all - enable iteration over all operation types
303 static dma_cap_mask_t dma_cap_mask_all;
306 * dma_chan_tbl_ent - tracks channel allocations per core/operation
307 * @chan - associated channel for this entry
309 struct dma_chan_tbl_ent {
310 struct dma_chan *chan;
314 * channel_table - percpu lookup table for memory-to-memory offload providers
316 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
318 static int __init dma_channel_table_init(void)
320 enum dma_transaction_type cap;
321 int err = 0;
323 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
325 /* 'interrupt', 'private', and 'slave' are channel capabilities,
326 * but are not associated with an operation so they do not need
327 * an entry in the channel_table
329 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
330 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
331 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
333 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
334 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
335 if (!channel_table[cap]) {
336 err = -ENOMEM;
337 break;
341 if (err) {
342 pr_err("initialization failure\n");
343 for_each_dma_cap_mask(cap, dma_cap_mask_all)
344 free_percpu(channel_table[cap]);
347 return err;
349 arch_initcall(dma_channel_table_init);
352 * dma_find_channel - find a channel to carry out the operation
353 * @tx_type: transaction type
355 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
357 return this_cpu_read(channel_table[tx_type]->chan);
359 EXPORT_SYMBOL(dma_find_channel);
362 * dma_issue_pending_all - flush all pending operations across all channels
364 void dma_issue_pending_all(void)
366 struct dma_device *device;
367 struct dma_chan *chan;
369 rcu_read_lock();
370 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
371 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
372 continue;
373 list_for_each_entry(chan, &device->channels, device_node)
374 if (chan->client_count)
375 device->device_issue_pending(chan);
377 rcu_read_unlock();
379 EXPORT_SYMBOL(dma_issue_pending_all);
382 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
384 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
386 int node = dev_to_node(chan->device->dev);
387 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
391 * min_chan - returns the channel with min count and in the same numa-node as the cpu
392 * @cap: capability to match
393 * @cpu: cpu index which the channel should be close to
395 * If some channels are close to the given cpu, the one with the lowest
396 * reference count is returned. Otherwise, cpu is ignored and only the
397 * reference count is taken into account.
398 * Must be called under dma_list_mutex.
400 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
402 struct dma_device *device;
403 struct dma_chan *chan;
404 struct dma_chan *min = NULL;
405 struct dma_chan *localmin = NULL;
407 list_for_each_entry(device, &dma_device_list, global_node) {
408 if (!dma_has_cap(cap, device->cap_mask) ||
409 dma_has_cap(DMA_PRIVATE, device->cap_mask))
410 continue;
411 list_for_each_entry(chan, &device->channels, device_node) {
412 if (!chan->client_count)
413 continue;
414 if (!min || chan->table_count < min->table_count)
415 min = chan;
417 if (dma_chan_is_local(chan, cpu))
418 if (!localmin ||
419 chan->table_count < localmin->table_count)
420 localmin = chan;
424 chan = localmin ? localmin : min;
426 if (chan)
427 chan->table_count++;
429 return chan;
433 * dma_channel_rebalance - redistribute the available channels
435 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
436 * operation type) in the SMP case, and operation isolation (avoid
437 * multi-tasking channels) in the non-SMP case. Must be called under
438 * dma_list_mutex.
440 static void dma_channel_rebalance(void)
442 struct dma_chan *chan;
443 struct dma_device *device;
444 int cpu;
445 int cap;
447 /* undo the last distribution */
448 for_each_dma_cap_mask(cap, dma_cap_mask_all)
449 for_each_possible_cpu(cpu)
450 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
452 list_for_each_entry(device, &dma_device_list, global_node) {
453 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
454 continue;
455 list_for_each_entry(chan, &device->channels, device_node)
456 chan->table_count = 0;
459 /* don't populate the channel_table if no clients are available */
460 if (!dmaengine_ref_count)
461 return;
463 /* redistribute available channels */
464 for_each_dma_cap_mask(cap, dma_cap_mask_all)
465 for_each_online_cpu(cpu) {
466 chan = min_chan(cap, cpu);
467 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
471 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
473 struct dma_device *device;
475 if (!chan || !caps)
476 return -EINVAL;
478 device = chan->device;
480 /* check if the channel supports slave transactions */
481 if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
482 return -ENXIO;
485 * Check whether it reports it uses the generic slave
486 * capabilities, if not, that means it doesn't support any
487 * kind of slave capabilities reporting.
489 if (!device->directions)
490 return -ENXIO;
492 caps->src_addr_widths = device->src_addr_widths;
493 caps->dst_addr_widths = device->dst_addr_widths;
494 caps->directions = device->directions;
495 caps->residue_granularity = device->residue_granularity;
498 * Some devices implement only pause (e.g. to get residuum) but no
499 * resume. However cmd_pause is advertised as pause AND resume.
501 caps->cmd_pause = !!(device->device_pause && device->device_resume);
502 caps->cmd_terminate = !!device->device_terminate_all;
504 return 0;
506 EXPORT_SYMBOL_GPL(dma_get_slave_caps);
508 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
509 struct dma_device *dev,
510 dma_filter_fn fn, void *fn_param)
512 struct dma_chan *chan;
514 if (!__dma_device_satisfies_mask(dev, mask)) {
515 pr_debug("%s: wrong capabilities\n", __func__);
516 return NULL;
518 /* devices with multiple channels need special handling as we need to
519 * ensure that all channels are either private or public.
521 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
522 list_for_each_entry(chan, &dev->channels, device_node) {
523 /* some channels are already publicly allocated */
524 if (chan->client_count)
525 return NULL;
528 list_for_each_entry(chan, &dev->channels, device_node) {
529 if (chan->client_count) {
530 pr_debug("%s: %s busy\n",
531 __func__, dma_chan_name(chan));
532 continue;
534 if (fn && !fn(chan, fn_param)) {
535 pr_debug("%s: %s filter said false\n",
536 __func__, dma_chan_name(chan));
537 continue;
539 return chan;
542 return NULL;
546 * dma_get_slave_channel - try to get specific channel exclusively
547 * @chan: target channel
549 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
551 int err = -EBUSY;
553 /* lock against __dma_request_channel */
554 mutex_lock(&dma_list_mutex);
556 if (chan->client_count == 0) {
557 err = dma_chan_get(chan);
558 if (err)
559 pr_debug("%s: failed to get %s: (%d)\n",
560 __func__, dma_chan_name(chan), err);
561 } else
562 chan = NULL;
564 mutex_unlock(&dma_list_mutex);
567 return chan;
569 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
571 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
573 dma_cap_mask_t mask;
574 struct dma_chan *chan;
575 int err;
577 dma_cap_zero(mask);
578 dma_cap_set(DMA_SLAVE, mask);
580 /* lock against __dma_request_channel */
581 mutex_lock(&dma_list_mutex);
583 chan = private_candidate(&mask, device, NULL, NULL);
584 if (chan) {
585 dma_cap_set(DMA_PRIVATE, device->cap_mask);
586 device->privatecnt++;
587 err = dma_chan_get(chan);
588 if (err) {
589 pr_debug("%s: failed to get %s: (%d)\n",
590 __func__, dma_chan_name(chan), err);
591 chan = NULL;
592 if (--device->privatecnt == 0)
593 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
597 mutex_unlock(&dma_list_mutex);
599 return chan;
601 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
604 * __dma_request_channel - try to allocate an exclusive channel
605 * @mask: capabilities that the channel must satisfy
606 * @fn: optional callback to disposition available channels
607 * @fn_param: opaque parameter to pass to dma_filter_fn
609 * Returns pointer to appropriate DMA channel on success or NULL.
611 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
612 dma_filter_fn fn, void *fn_param)
614 struct dma_device *device, *_d;
615 struct dma_chan *chan = NULL;
616 int err;
618 /* Find a channel */
619 mutex_lock(&dma_list_mutex);
620 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
621 chan = private_candidate(mask, device, fn, fn_param);
622 if (chan) {
623 /* Found a suitable channel, try to grab, prep, and
624 * return it. We first set DMA_PRIVATE to disable
625 * balance_ref_count as this channel will not be
626 * published in the general-purpose allocator
628 dma_cap_set(DMA_PRIVATE, device->cap_mask);
629 device->privatecnt++;
630 err = dma_chan_get(chan);
632 if (err == -ENODEV) {
633 pr_debug("%s: %s module removed\n",
634 __func__, dma_chan_name(chan));
635 list_del_rcu(&device->global_node);
636 } else if (err)
637 pr_debug("%s: failed to get %s: (%d)\n",
638 __func__, dma_chan_name(chan), err);
639 else
640 break;
641 if (--device->privatecnt == 0)
642 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
643 chan = NULL;
646 mutex_unlock(&dma_list_mutex);
648 pr_debug("%s: %s (%s)\n",
649 __func__,
650 chan ? "success" : "fail",
651 chan ? dma_chan_name(chan) : NULL);
653 return chan;
655 EXPORT_SYMBOL_GPL(__dma_request_channel);
658 * dma_request_slave_channel_reason - try to allocate an exclusive slave channel
659 * @dev: pointer to client device structure
660 * @name: slave channel name
662 * Returns pointer to appropriate DMA channel on success or an error pointer.
664 struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
665 const char *name)
667 /* If device-tree is present get slave info from here */
668 if (dev->of_node)
669 return of_dma_request_slave_channel(dev->of_node, name);
671 /* If device was enumerated by ACPI get slave info from here */
672 if (ACPI_HANDLE(dev))
673 return acpi_dma_request_slave_chan_by_name(dev, name);
675 return ERR_PTR(-ENODEV);
677 EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
680 * dma_request_slave_channel - try to allocate an exclusive slave channel
681 * @dev: pointer to client device structure
682 * @name: slave channel name
684 * Returns pointer to appropriate DMA channel on success or NULL.
686 struct dma_chan *dma_request_slave_channel(struct device *dev,
687 const char *name)
689 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
690 if (IS_ERR(ch))
691 return NULL;
693 dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
694 ch->device->privatecnt++;
696 return ch;
698 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
700 void dma_release_channel(struct dma_chan *chan)
702 mutex_lock(&dma_list_mutex);
703 WARN_ONCE(chan->client_count != 1,
704 "chan reference count %d != 1\n", chan->client_count);
705 dma_chan_put(chan);
706 /* drop PRIVATE cap enabled by __dma_request_channel() */
707 if (--chan->device->privatecnt == 0)
708 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
709 mutex_unlock(&dma_list_mutex);
711 EXPORT_SYMBOL_GPL(dma_release_channel);
714 * dmaengine_get - register interest in dma_channels
716 void dmaengine_get(void)
718 struct dma_device *device, *_d;
719 struct dma_chan *chan;
720 int err;
722 mutex_lock(&dma_list_mutex);
723 dmaengine_ref_count++;
725 /* try to grab channels */
726 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
727 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
728 continue;
729 list_for_each_entry(chan, &device->channels, device_node) {
730 err = dma_chan_get(chan);
731 if (err == -ENODEV) {
732 /* module removed before we could use it */
733 list_del_rcu(&device->global_node);
734 break;
735 } else if (err)
736 pr_debug("%s: failed to get %s: (%d)\n",
737 __func__, dma_chan_name(chan), err);
741 /* if this is the first reference and there were channels
742 * waiting we need to rebalance to get those channels
743 * incorporated into the channel table
745 if (dmaengine_ref_count == 1)
746 dma_channel_rebalance();
747 mutex_unlock(&dma_list_mutex);
749 EXPORT_SYMBOL(dmaengine_get);
752 * dmaengine_put - let dma drivers be removed when ref_count == 0
754 void dmaengine_put(void)
756 struct dma_device *device;
757 struct dma_chan *chan;
759 mutex_lock(&dma_list_mutex);
760 dmaengine_ref_count--;
761 BUG_ON(dmaengine_ref_count < 0);
762 /* drop channel references */
763 list_for_each_entry(device, &dma_device_list, global_node) {
764 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
765 continue;
766 list_for_each_entry(chan, &device->channels, device_node)
767 dma_chan_put(chan);
769 mutex_unlock(&dma_list_mutex);
771 EXPORT_SYMBOL(dmaengine_put);
773 static bool device_has_all_tx_types(struct dma_device *device)
775 /* A device that satisfies this test has channels that will never cause
776 * an async_tx channel switch event as all possible operation types can
777 * be handled.
779 #ifdef CONFIG_ASYNC_TX_DMA
780 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
781 return false;
782 #endif
784 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
785 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
786 return false;
787 #endif
789 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
790 if (!dma_has_cap(DMA_XOR, device->cap_mask))
791 return false;
793 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
794 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
795 return false;
796 #endif
797 #endif
799 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
800 if (!dma_has_cap(DMA_PQ, device->cap_mask))
801 return false;
803 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
804 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
805 return false;
806 #endif
807 #endif
809 return true;
812 static int get_dma_id(struct dma_device *device)
814 int rc;
816 mutex_lock(&dma_list_mutex);
818 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
819 if (rc >= 0)
820 device->dev_id = rc;
822 mutex_unlock(&dma_list_mutex);
823 return rc < 0 ? rc : 0;
827 * dma_async_device_register - registers DMA devices found
828 * @device: &dma_device
830 int dma_async_device_register(struct dma_device *device)
832 int chancnt = 0, rc;
833 struct dma_chan* chan;
834 atomic_t *idr_ref;
836 if (!device)
837 return -ENODEV;
839 /* validate device routines */
840 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
841 !device->device_prep_dma_memcpy);
842 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
843 !device->device_prep_dma_xor);
844 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
845 !device->device_prep_dma_xor_val);
846 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
847 !device->device_prep_dma_pq);
848 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
849 !device->device_prep_dma_pq_val);
850 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
851 !device->device_prep_dma_memset);
852 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
853 !device->device_prep_dma_interrupt);
854 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
855 !device->device_prep_dma_sg);
856 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
857 !device->device_prep_dma_cyclic);
858 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
859 !device->device_prep_interleaved_dma);
861 BUG_ON(!device->device_tx_status);
862 BUG_ON(!device->device_issue_pending);
863 BUG_ON(!device->dev);
865 /* note: this only matters in the
866 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
868 if (device_has_all_tx_types(device))
869 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
871 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
872 if (!idr_ref)
873 return -ENOMEM;
874 rc = get_dma_id(device);
875 if (rc != 0) {
876 kfree(idr_ref);
877 return rc;
880 atomic_set(idr_ref, 0);
882 /* represent channels in sysfs. Probably want devs too */
883 list_for_each_entry(chan, &device->channels, device_node) {
884 rc = -ENOMEM;
885 chan->local = alloc_percpu(typeof(*chan->local));
886 if (chan->local == NULL)
887 goto err_out;
888 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
889 if (chan->dev == NULL) {
890 free_percpu(chan->local);
891 chan->local = NULL;
892 goto err_out;
895 chan->chan_id = chancnt++;
896 chan->dev->device.class = &dma_devclass;
897 chan->dev->device.parent = device->dev;
898 chan->dev->chan = chan;
899 chan->dev->idr_ref = idr_ref;
900 chan->dev->dev_id = device->dev_id;
901 atomic_inc(idr_ref);
902 dev_set_name(&chan->dev->device, "dma%dchan%d",
903 device->dev_id, chan->chan_id);
905 rc = device_register(&chan->dev->device);
906 if (rc) {
907 free_percpu(chan->local);
908 chan->local = NULL;
909 kfree(chan->dev);
910 atomic_dec(idr_ref);
911 goto err_out;
913 chan->client_count = 0;
915 device->chancnt = chancnt;
917 mutex_lock(&dma_list_mutex);
918 /* take references on public channels */
919 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
920 list_for_each_entry(chan, &device->channels, device_node) {
921 /* if clients are already waiting for channels we need
922 * to take references on their behalf
924 if (dma_chan_get(chan) == -ENODEV) {
925 /* note we can only get here for the first
926 * channel as the remaining channels are
927 * guaranteed to get a reference
929 rc = -ENODEV;
930 mutex_unlock(&dma_list_mutex);
931 goto err_out;
934 list_add_tail_rcu(&device->global_node, &dma_device_list);
935 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
936 device->privatecnt++; /* Always private */
937 dma_channel_rebalance();
938 mutex_unlock(&dma_list_mutex);
940 return 0;
942 err_out:
943 /* if we never registered a channel just release the idr */
944 if (atomic_read(idr_ref) == 0) {
945 mutex_lock(&dma_list_mutex);
946 idr_remove(&dma_idr, device->dev_id);
947 mutex_unlock(&dma_list_mutex);
948 kfree(idr_ref);
949 return rc;
952 list_for_each_entry(chan, &device->channels, device_node) {
953 if (chan->local == NULL)
954 continue;
955 mutex_lock(&dma_list_mutex);
956 chan->dev->chan = NULL;
957 mutex_unlock(&dma_list_mutex);
958 device_unregister(&chan->dev->device);
959 free_percpu(chan->local);
961 return rc;
963 EXPORT_SYMBOL(dma_async_device_register);
966 * dma_async_device_unregister - unregister a DMA device
967 * @device: &dma_device
969 * This routine is called by dma driver exit routines, dmaengine holds module
970 * references to prevent it being called while channels are in use.
972 void dma_async_device_unregister(struct dma_device *device)
974 struct dma_chan *chan;
976 mutex_lock(&dma_list_mutex);
977 list_del_rcu(&device->global_node);
978 dma_channel_rebalance();
979 mutex_unlock(&dma_list_mutex);
981 list_for_each_entry(chan, &device->channels, device_node) {
982 WARN_ONCE(chan->client_count,
983 "%s called while %d clients hold a reference\n",
984 __func__, chan->client_count);
985 mutex_lock(&dma_list_mutex);
986 chan->dev->chan = NULL;
987 mutex_unlock(&dma_list_mutex);
988 device_unregister(&chan->dev->device);
989 free_percpu(chan->local);
992 EXPORT_SYMBOL(dma_async_device_unregister);
994 struct dmaengine_unmap_pool {
995 struct kmem_cache *cache;
996 const char *name;
997 mempool_t *pool;
998 size_t size;
1001 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1002 static struct dmaengine_unmap_pool unmap_pool[] = {
1003 __UNMAP_POOL(2),
1004 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1005 __UNMAP_POOL(16),
1006 __UNMAP_POOL(128),
1007 __UNMAP_POOL(256),
1008 #endif
1011 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1013 int order = get_count_order(nr);
1015 switch (order) {
1016 case 0 ... 1:
1017 return &unmap_pool[0];
1018 case 2 ... 4:
1019 return &unmap_pool[1];
1020 case 5 ... 7:
1021 return &unmap_pool[2];
1022 case 8:
1023 return &unmap_pool[3];
1024 default:
1025 BUG();
1026 return NULL;
1030 static void dmaengine_unmap(struct kref *kref)
1032 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1033 struct device *dev = unmap->dev;
1034 int cnt, i;
1036 cnt = unmap->to_cnt;
1037 for (i = 0; i < cnt; i++)
1038 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1039 DMA_TO_DEVICE);
1040 cnt += unmap->from_cnt;
1041 for (; i < cnt; i++)
1042 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1043 DMA_FROM_DEVICE);
1044 cnt += unmap->bidi_cnt;
1045 for (; i < cnt; i++) {
1046 if (unmap->addr[i] == 0)
1047 continue;
1048 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1049 DMA_BIDIRECTIONAL);
1051 cnt = unmap->map_cnt;
1052 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1055 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1057 if (unmap)
1058 kref_put(&unmap->kref, dmaengine_unmap);
1060 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1062 static void dmaengine_destroy_unmap_pool(void)
1064 int i;
1066 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1067 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1069 if (p->pool)
1070 mempool_destroy(p->pool);
1071 p->pool = NULL;
1072 if (p->cache)
1073 kmem_cache_destroy(p->cache);
1074 p->cache = NULL;
1078 static int __init dmaengine_init_unmap_pool(void)
1080 int i;
1082 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1083 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1084 size_t size;
1086 size = sizeof(struct dmaengine_unmap_data) +
1087 sizeof(dma_addr_t) * p->size;
1089 p->cache = kmem_cache_create(p->name, size, 0,
1090 SLAB_HWCACHE_ALIGN, NULL);
1091 if (!p->cache)
1092 break;
1093 p->pool = mempool_create_slab_pool(1, p->cache);
1094 if (!p->pool)
1095 break;
1098 if (i == ARRAY_SIZE(unmap_pool))
1099 return 0;
1101 dmaengine_destroy_unmap_pool();
1102 return -ENOMEM;
1105 struct dmaengine_unmap_data *
1106 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1108 struct dmaengine_unmap_data *unmap;
1110 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1111 if (!unmap)
1112 return NULL;
1114 memset(unmap, 0, sizeof(*unmap));
1115 kref_init(&unmap->kref);
1116 unmap->dev = dev;
1117 unmap->map_cnt = nr;
1119 return unmap;
1121 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1123 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1124 struct dma_chan *chan)
1126 tx->chan = chan;
1127 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1128 spin_lock_init(&tx->lock);
1129 #endif
1131 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1133 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1134 * @tx: in-flight transaction to wait on
1136 enum dma_status
1137 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1139 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1141 if (!tx)
1142 return DMA_COMPLETE;
1144 while (tx->cookie == -EBUSY) {
1145 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1146 pr_err("%s timeout waiting for descriptor submission\n",
1147 __func__);
1148 return DMA_ERROR;
1150 cpu_relax();
1152 return dma_sync_wait(tx->chan, tx->cookie);
1154 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1156 /* dma_run_dependencies - helper routine for dma drivers to process
1157 * (start) dependent operations on their target channel
1158 * @tx: transaction with dependencies
1160 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1162 struct dma_async_tx_descriptor *dep = txd_next(tx);
1163 struct dma_async_tx_descriptor *dep_next;
1164 struct dma_chan *chan;
1166 if (!dep)
1167 return;
1169 /* we'll submit tx->next now, so clear the link */
1170 txd_clear_next(tx);
1171 chan = dep->chan;
1173 /* keep submitting up until a channel switch is detected
1174 * in that case we will be called again as a result of
1175 * processing the interrupt from async_tx_channel_switch
1177 for (; dep; dep = dep_next) {
1178 txd_lock(dep);
1179 txd_clear_parent(dep);
1180 dep_next = txd_next(dep);
1181 if (dep_next && dep_next->chan == chan)
1182 txd_clear_next(dep); /* ->next will be submitted */
1183 else
1184 dep_next = NULL; /* submit current dep and terminate */
1185 txd_unlock(dep);
1187 dep->tx_submit(dep);
1190 chan->device->device_issue_pending(chan);
1192 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1194 static int __init dma_bus_init(void)
1196 int err = dmaengine_init_unmap_pool();
1198 if (err)
1199 return err;
1200 return class_register(&dma_devclass);
1202 arch_initcall(dma_bus_init);