at_hdmac: move to generic DMA binding
[linux/fpc-iii.git] / drivers / dma / at_hdmac.c
blob3502c412caf90dc654ed86ca79819739a1a54b87
1 /*
2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
13 * The only Atmel DMA Controller that is not covered by this driver is the one
14 * found on AT91SAM9263.
17 #include <linux/clk.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dmapool.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/of_dma.h>
29 #include "at_hdmac_regs.h"
30 #include "dmaengine.h"
33 * Glossary
34 * --------
36 * at_hdmac : Name of the ATmel AHB DMA Controller
37 * at_dma_ / atdma : ATmel DMA controller entity related
38 * atc_ / atchan : ATmel DMA Channel entity related
41 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
42 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
43 |ATC_DIF(AT_DMA_MEM_IF))
46 * Initial number of descriptors to allocate for each channel. This could
47 * be increased during dma usage.
49 static unsigned int init_nr_desc_per_channel = 64;
50 module_param(init_nr_desc_per_channel, uint, 0644);
51 MODULE_PARM_DESC(init_nr_desc_per_channel,
52 "initial descriptors per channel (default: 64)");
55 /* prototypes */
56 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
59 /*----------------------------------------------------------------------*/
61 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
63 return list_first_entry(&atchan->active_list,
64 struct at_desc, desc_node);
67 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
69 return list_first_entry(&atchan->queue,
70 struct at_desc, desc_node);
73 /**
74 * atc_alloc_descriptor - allocate and return an initialized descriptor
75 * @chan: the channel to allocate descriptors for
76 * @gfp_flags: GFP allocation flags
78 * Note: The ack-bit is positioned in the descriptor flag at creation time
79 * to make initial allocation more convenient. This bit will be cleared
80 * and control will be given to client at usage time (during
81 * preparation functions).
83 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
84 gfp_t gfp_flags)
86 struct at_desc *desc = NULL;
87 struct at_dma *atdma = to_at_dma(chan->device);
88 dma_addr_t phys;
90 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
91 if (desc) {
92 memset(desc, 0, sizeof(struct at_desc));
93 INIT_LIST_HEAD(&desc->tx_list);
94 dma_async_tx_descriptor_init(&desc->txd, chan);
95 /* txd.flags will be overwritten in prep functions */
96 desc->txd.flags = DMA_CTRL_ACK;
97 desc->txd.tx_submit = atc_tx_submit;
98 desc->txd.phys = phys;
101 return desc;
105 * atc_desc_get - get an unused descriptor from free_list
106 * @atchan: channel we want a new descriptor for
108 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
110 struct at_desc *desc, *_desc;
111 struct at_desc *ret = NULL;
112 unsigned long flags;
113 unsigned int i = 0;
114 LIST_HEAD(tmp_list);
116 spin_lock_irqsave(&atchan->lock, flags);
117 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
118 i++;
119 if (async_tx_test_ack(&desc->txd)) {
120 list_del(&desc->desc_node);
121 ret = desc;
122 break;
124 dev_dbg(chan2dev(&atchan->chan_common),
125 "desc %p not ACKed\n", desc);
127 spin_unlock_irqrestore(&atchan->lock, flags);
128 dev_vdbg(chan2dev(&atchan->chan_common),
129 "scanned %u descriptors on freelist\n", i);
131 /* no more descriptor available in initial pool: create one more */
132 if (!ret) {
133 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
134 if (ret) {
135 spin_lock_irqsave(&atchan->lock, flags);
136 atchan->descs_allocated++;
137 spin_unlock_irqrestore(&atchan->lock, flags);
138 } else {
139 dev_err(chan2dev(&atchan->chan_common),
140 "not enough descriptors available\n");
144 return ret;
148 * atc_desc_put - move a descriptor, including any children, to the free list
149 * @atchan: channel we work on
150 * @desc: descriptor, at the head of a chain, to move to free list
152 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
154 if (desc) {
155 struct at_desc *child;
156 unsigned long flags;
158 spin_lock_irqsave(&atchan->lock, flags);
159 list_for_each_entry(child, &desc->tx_list, desc_node)
160 dev_vdbg(chan2dev(&atchan->chan_common),
161 "moving child desc %p to freelist\n",
162 child);
163 list_splice_init(&desc->tx_list, &atchan->free_list);
164 dev_vdbg(chan2dev(&atchan->chan_common),
165 "moving desc %p to freelist\n", desc);
166 list_add(&desc->desc_node, &atchan->free_list);
167 spin_unlock_irqrestore(&atchan->lock, flags);
172 * atc_desc_chain - build chain adding a descriptor
173 * @first: address of first descriptor of the chain
174 * @prev: address of previous descriptor of the chain
175 * @desc: descriptor to queue
177 * Called from prep_* functions
179 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
180 struct at_desc *desc)
182 if (!(*first)) {
183 *first = desc;
184 } else {
185 /* inform the HW lli about chaining */
186 (*prev)->lli.dscr = desc->txd.phys;
187 /* insert the link descriptor to the LD ring */
188 list_add_tail(&desc->desc_node,
189 &(*first)->tx_list);
191 *prev = desc;
195 * atc_dostart - starts the DMA engine for real
196 * @atchan: the channel we want to start
197 * @first: first descriptor in the list we want to begin with
199 * Called with atchan->lock held and bh disabled
201 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
203 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
205 /* ASSERT: channel is idle */
206 if (atc_chan_is_enabled(atchan)) {
207 dev_err(chan2dev(&atchan->chan_common),
208 "BUG: Attempted to start non-idle channel\n");
209 dev_err(chan2dev(&atchan->chan_common),
210 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
211 channel_readl(atchan, SADDR),
212 channel_readl(atchan, DADDR),
213 channel_readl(atchan, CTRLA),
214 channel_readl(atchan, CTRLB),
215 channel_readl(atchan, DSCR));
217 /* The tasklet will hopefully advance the queue... */
218 return;
221 vdbg_dump_regs(atchan);
223 channel_writel(atchan, SADDR, 0);
224 channel_writel(atchan, DADDR, 0);
225 channel_writel(atchan, CTRLA, 0);
226 channel_writel(atchan, CTRLB, 0);
227 channel_writel(atchan, DSCR, first->txd.phys);
228 dma_writel(atdma, CHER, atchan->mask);
230 vdbg_dump_regs(atchan);
234 * atc_chain_complete - finish work for one transaction chain
235 * @atchan: channel we work on
236 * @desc: descriptor at the head of the chain we want do complete
238 * Called with atchan->lock held and bh disabled */
239 static void
240 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
242 struct dma_async_tx_descriptor *txd = &desc->txd;
244 dev_vdbg(chan2dev(&atchan->chan_common),
245 "descriptor %u complete\n", txd->cookie);
247 /* mark the descriptor as complete for non cyclic cases only */
248 if (!atc_chan_is_cyclic(atchan))
249 dma_cookie_complete(txd);
251 /* move children to free_list */
252 list_splice_init(&desc->tx_list, &atchan->free_list);
253 /* move myself to free_list */
254 list_move(&desc->desc_node, &atchan->free_list);
256 /* unmap dma addresses (not on slave channels) */
257 if (!atchan->chan_common.private) {
258 struct device *parent = chan2parent(&atchan->chan_common);
259 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
260 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
261 dma_unmap_single(parent,
262 desc->lli.daddr,
263 desc->len, DMA_FROM_DEVICE);
264 else
265 dma_unmap_page(parent,
266 desc->lli.daddr,
267 desc->len, DMA_FROM_DEVICE);
269 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
270 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
271 dma_unmap_single(parent,
272 desc->lli.saddr,
273 desc->len, DMA_TO_DEVICE);
274 else
275 dma_unmap_page(parent,
276 desc->lli.saddr,
277 desc->len, DMA_TO_DEVICE);
281 /* for cyclic transfers,
282 * no need to replay callback function while stopping */
283 if (!atc_chan_is_cyclic(atchan)) {
284 dma_async_tx_callback callback = txd->callback;
285 void *param = txd->callback_param;
288 * The API requires that no submissions are done from a
289 * callback, so we don't need to drop the lock here
291 if (callback)
292 callback(param);
295 dma_run_dependencies(txd);
299 * atc_complete_all - finish work for all transactions
300 * @atchan: channel to complete transactions for
302 * Eventually submit queued descriptors if any
304 * Assume channel is idle while calling this function
305 * Called with atchan->lock held and bh disabled
307 static void atc_complete_all(struct at_dma_chan *atchan)
309 struct at_desc *desc, *_desc;
310 LIST_HEAD(list);
312 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
314 BUG_ON(atc_chan_is_enabled(atchan));
317 * Submit queued descriptors ASAP, i.e. before we go through
318 * the completed ones.
320 if (!list_empty(&atchan->queue))
321 atc_dostart(atchan, atc_first_queued(atchan));
322 /* empty active_list now it is completed */
323 list_splice_init(&atchan->active_list, &list);
324 /* empty queue list by moving descriptors (if any) to active_list */
325 list_splice_init(&atchan->queue, &atchan->active_list);
327 list_for_each_entry_safe(desc, _desc, &list, desc_node)
328 atc_chain_complete(atchan, desc);
332 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
333 * @atchan: channel to be cleaned up
335 * Called with atchan->lock held and bh disabled
337 static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
339 struct at_desc *desc, *_desc;
340 struct at_desc *child;
342 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
344 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
345 if (!(desc->lli.ctrla & ATC_DONE))
346 /* This one is currently in progress */
347 return;
349 list_for_each_entry(child, &desc->tx_list, desc_node)
350 if (!(child->lli.ctrla & ATC_DONE))
351 /* Currently in progress */
352 return;
355 * No descriptors so far seem to be in progress, i.e.
356 * this chain must be done.
358 atc_chain_complete(atchan, desc);
363 * atc_advance_work - at the end of a transaction, move forward
364 * @atchan: channel where the transaction ended
366 * Called with atchan->lock held and bh disabled
368 static void atc_advance_work(struct at_dma_chan *atchan)
370 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
372 if (list_empty(&atchan->active_list) ||
373 list_is_singular(&atchan->active_list)) {
374 atc_complete_all(atchan);
375 } else {
376 atc_chain_complete(atchan, atc_first_active(atchan));
377 /* advance work */
378 atc_dostart(atchan, atc_first_active(atchan));
384 * atc_handle_error - handle errors reported by DMA controller
385 * @atchan: channel where error occurs
387 * Called with atchan->lock held and bh disabled
389 static void atc_handle_error(struct at_dma_chan *atchan)
391 struct at_desc *bad_desc;
392 struct at_desc *child;
395 * The descriptor currently at the head of the active list is
396 * broked. Since we don't have any way to report errors, we'll
397 * just have to scream loudly and try to carry on.
399 bad_desc = atc_first_active(atchan);
400 list_del_init(&bad_desc->desc_node);
402 /* As we are stopped, take advantage to push queued descriptors
403 * in active_list */
404 list_splice_init(&atchan->queue, atchan->active_list.prev);
406 /* Try to restart the controller */
407 if (!list_empty(&atchan->active_list))
408 atc_dostart(atchan, atc_first_active(atchan));
411 * KERN_CRITICAL may seem harsh, but since this only happens
412 * when someone submits a bad physical address in a
413 * descriptor, we should consider ourselves lucky that the
414 * controller flagged an error instead of scribbling over
415 * random memory locations.
417 dev_crit(chan2dev(&atchan->chan_common),
418 "Bad descriptor submitted for DMA!\n");
419 dev_crit(chan2dev(&atchan->chan_common),
420 " cookie: %d\n", bad_desc->txd.cookie);
421 atc_dump_lli(atchan, &bad_desc->lli);
422 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
423 atc_dump_lli(atchan, &child->lli);
425 /* Pretend the descriptor completed successfully */
426 atc_chain_complete(atchan, bad_desc);
430 * atc_handle_cyclic - at the end of a period, run callback function
431 * @atchan: channel used for cyclic operations
433 * Called with atchan->lock held and bh disabled
435 static void atc_handle_cyclic(struct at_dma_chan *atchan)
437 struct at_desc *first = atc_first_active(atchan);
438 struct dma_async_tx_descriptor *txd = &first->txd;
439 dma_async_tx_callback callback = txd->callback;
440 void *param = txd->callback_param;
442 dev_vdbg(chan2dev(&atchan->chan_common),
443 "new cyclic period llp 0x%08x\n",
444 channel_readl(atchan, DSCR));
446 if (callback)
447 callback(param);
450 /*-- IRQ & Tasklet ---------------------------------------------------*/
452 static void atc_tasklet(unsigned long data)
454 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
455 unsigned long flags;
457 spin_lock_irqsave(&atchan->lock, flags);
458 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
459 atc_handle_error(atchan);
460 else if (atc_chan_is_cyclic(atchan))
461 atc_handle_cyclic(atchan);
462 else
463 atc_advance_work(atchan);
465 spin_unlock_irqrestore(&atchan->lock, flags);
468 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
470 struct at_dma *atdma = (struct at_dma *)dev_id;
471 struct at_dma_chan *atchan;
472 int i;
473 u32 status, pending, imr;
474 int ret = IRQ_NONE;
476 do {
477 imr = dma_readl(atdma, EBCIMR);
478 status = dma_readl(atdma, EBCISR);
479 pending = status & imr;
481 if (!pending)
482 break;
484 dev_vdbg(atdma->dma_common.dev,
485 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
486 status, imr, pending);
488 for (i = 0; i < atdma->dma_common.chancnt; i++) {
489 atchan = &atdma->chan[i];
490 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
491 if (pending & AT_DMA_ERR(i)) {
492 /* Disable channel on AHB error */
493 dma_writel(atdma, CHDR,
494 AT_DMA_RES(i) | atchan->mask);
495 /* Give information to tasklet */
496 set_bit(ATC_IS_ERROR, &atchan->status);
498 tasklet_schedule(&atchan->tasklet);
499 ret = IRQ_HANDLED;
503 } while (pending);
505 return ret;
509 /*-- DMA Engine API --------------------------------------------------*/
512 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
513 * @desc: descriptor at the head of the transaction chain
515 * Queue chain if DMA engine is working already
517 * Cookie increment and adding to active_list or queue must be atomic
519 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
521 struct at_desc *desc = txd_to_at_desc(tx);
522 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
523 dma_cookie_t cookie;
524 unsigned long flags;
526 spin_lock_irqsave(&atchan->lock, flags);
527 cookie = dma_cookie_assign(tx);
529 if (list_empty(&atchan->active_list)) {
530 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
531 desc->txd.cookie);
532 atc_dostart(atchan, desc);
533 list_add_tail(&desc->desc_node, &atchan->active_list);
534 } else {
535 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
536 desc->txd.cookie);
537 list_add_tail(&desc->desc_node, &atchan->queue);
540 spin_unlock_irqrestore(&atchan->lock, flags);
542 return cookie;
546 * atc_prep_dma_memcpy - prepare a memcpy operation
547 * @chan: the channel to prepare operation on
548 * @dest: operation virtual destination address
549 * @src: operation virtual source address
550 * @len: operation length
551 * @flags: tx descriptor status flags
553 static struct dma_async_tx_descriptor *
554 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
555 size_t len, unsigned long flags)
557 struct at_dma_chan *atchan = to_at_dma_chan(chan);
558 struct at_desc *desc = NULL;
559 struct at_desc *first = NULL;
560 struct at_desc *prev = NULL;
561 size_t xfer_count;
562 size_t offset;
563 unsigned int src_width;
564 unsigned int dst_width;
565 u32 ctrla;
566 u32 ctrlb;
568 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
569 dest, src, len, flags);
571 if (unlikely(!len)) {
572 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
573 return NULL;
576 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
577 | ATC_SRC_ADDR_MODE_INCR
578 | ATC_DST_ADDR_MODE_INCR
579 | ATC_FC_MEM2MEM;
582 * We can be a lot more clever here, but this should take care
583 * of the most common optimization.
585 if (!((src | dest | len) & 3)) {
586 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
587 src_width = dst_width = 2;
588 } else if (!((src | dest | len) & 1)) {
589 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
590 src_width = dst_width = 1;
591 } else {
592 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
593 src_width = dst_width = 0;
596 for (offset = 0; offset < len; offset += xfer_count << src_width) {
597 xfer_count = min_t(size_t, (len - offset) >> src_width,
598 ATC_BTSIZE_MAX);
600 desc = atc_desc_get(atchan);
601 if (!desc)
602 goto err_desc_get;
604 desc->lli.saddr = src + offset;
605 desc->lli.daddr = dest + offset;
606 desc->lli.ctrla = ctrla | xfer_count;
607 desc->lli.ctrlb = ctrlb;
609 desc->txd.cookie = 0;
611 atc_desc_chain(&first, &prev, desc);
614 /* First descriptor of the chain embedds additional information */
615 first->txd.cookie = -EBUSY;
616 first->len = len;
618 /* set end-of-link to the last link descriptor of list*/
619 set_desc_eol(desc);
621 first->txd.flags = flags; /* client is in control of this ack */
623 return &first->txd;
625 err_desc_get:
626 atc_desc_put(atchan, first);
627 return NULL;
632 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
633 * @chan: DMA channel
634 * @sgl: scatterlist to transfer to/from
635 * @sg_len: number of entries in @scatterlist
636 * @direction: DMA direction
637 * @flags: tx descriptor status flags
638 * @context: transaction context (ignored)
640 static struct dma_async_tx_descriptor *
641 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
642 unsigned int sg_len, enum dma_transfer_direction direction,
643 unsigned long flags, void *context)
645 struct at_dma_chan *atchan = to_at_dma_chan(chan);
646 struct at_dma_slave *atslave = chan->private;
647 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
648 struct at_desc *first = NULL;
649 struct at_desc *prev = NULL;
650 u32 ctrla;
651 u32 ctrlb;
652 dma_addr_t reg;
653 unsigned int reg_width;
654 unsigned int mem_width;
655 unsigned int i;
656 struct scatterlist *sg;
657 size_t total_len = 0;
659 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
660 sg_len,
661 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
662 flags);
664 if (unlikely(!atslave || !sg_len)) {
665 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
666 return NULL;
669 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
670 | ATC_DCSIZE(sconfig->dst_maxburst);
671 ctrlb = ATC_IEN;
673 switch (direction) {
674 case DMA_MEM_TO_DEV:
675 reg_width = convert_buswidth(sconfig->dst_addr_width);
676 ctrla |= ATC_DST_WIDTH(reg_width);
677 ctrlb |= ATC_DST_ADDR_MODE_FIXED
678 | ATC_SRC_ADDR_MODE_INCR
679 | ATC_FC_MEM2PER
680 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
681 reg = sconfig->dst_addr;
682 for_each_sg(sgl, sg, sg_len, i) {
683 struct at_desc *desc;
684 u32 len;
685 u32 mem;
687 desc = atc_desc_get(atchan);
688 if (!desc)
689 goto err_desc_get;
691 mem = sg_dma_address(sg);
692 len = sg_dma_len(sg);
693 if (unlikely(!len)) {
694 dev_dbg(chan2dev(chan),
695 "prep_slave_sg: sg(%d) data length is zero\n", i);
696 goto err;
698 mem_width = 2;
699 if (unlikely(mem & 3 || len & 3))
700 mem_width = 0;
702 desc->lli.saddr = mem;
703 desc->lli.daddr = reg;
704 desc->lli.ctrla = ctrla
705 | ATC_SRC_WIDTH(mem_width)
706 | len >> mem_width;
707 desc->lli.ctrlb = ctrlb;
709 atc_desc_chain(&first, &prev, desc);
710 total_len += len;
712 break;
713 case DMA_DEV_TO_MEM:
714 reg_width = convert_buswidth(sconfig->src_addr_width);
715 ctrla |= ATC_SRC_WIDTH(reg_width);
716 ctrlb |= ATC_DST_ADDR_MODE_INCR
717 | ATC_SRC_ADDR_MODE_FIXED
718 | ATC_FC_PER2MEM
719 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
721 reg = sconfig->src_addr;
722 for_each_sg(sgl, sg, sg_len, i) {
723 struct at_desc *desc;
724 u32 len;
725 u32 mem;
727 desc = atc_desc_get(atchan);
728 if (!desc)
729 goto err_desc_get;
731 mem = sg_dma_address(sg);
732 len = sg_dma_len(sg);
733 if (unlikely(!len)) {
734 dev_dbg(chan2dev(chan),
735 "prep_slave_sg: sg(%d) data length is zero\n", i);
736 goto err;
738 mem_width = 2;
739 if (unlikely(mem & 3 || len & 3))
740 mem_width = 0;
742 desc->lli.saddr = reg;
743 desc->lli.daddr = mem;
744 desc->lli.ctrla = ctrla
745 | ATC_DST_WIDTH(mem_width)
746 | len >> reg_width;
747 desc->lli.ctrlb = ctrlb;
749 atc_desc_chain(&first, &prev, desc);
750 total_len += len;
752 break;
753 default:
754 return NULL;
757 /* set end-of-link to the last link descriptor of list*/
758 set_desc_eol(prev);
760 /* First descriptor of the chain embedds additional information */
761 first->txd.cookie = -EBUSY;
762 first->len = total_len;
764 /* first link descriptor of list is responsible of flags */
765 first->txd.flags = flags; /* client is in control of this ack */
767 return &first->txd;
769 err_desc_get:
770 dev_err(chan2dev(chan), "not enough descriptors available\n");
771 err:
772 atc_desc_put(atchan, first);
773 return NULL;
777 * atc_dma_cyclic_check_values
778 * Check for too big/unaligned periods and unaligned DMA buffer
780 static int
781 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
782 size_t period_len)
784 if (period_len > (ATC_BTSIZE_MAX << reg_width))
785 goto err_out;
786 if (unlikely(period_len & ((1 << reg_width) - 1)))
787 goto err_out;
788 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
789 goto err_out;
791 return 0;
793 err_out:
794 return -EINVAL;
798 * atc_dma_cyclic_fill_desc - Fill one period descriptor
800 static int
801 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
802 unsigned int period_index, dma_addr_t buf_addr,
803 unsigned int reg_width, size_t period_len,
804 enum dma_transfer_direction direction)
806 struct at_dma_chan *atchan = to_at_dma_chan(chan);
807 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
808 u32 ctrla;
810 /* prepare common CRTLA value */
811 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
812 | ATC_DCSIZE(sconfig->dst_maxburst)
813 | ATC_DST_WIDTH(reg_width)
814 | ATC_SRC_WIDTH(reg_width)
815 | period_len >> reg_width;
817 switch (direction) {
818 case DMA_MEM_TO_DEV:
819 desc->lli.saddr = buf_addr + (period_len * period_index);
820 desc->lli.daddr = sconfig->dst_addr;
821 desc->lli.ctrla = ctrla;
822 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
823 | ATC_SRC_ADDR_MODE_INCR
824 | ATC_FC_MEM2PER
825 | ATC_SIF(atchan->mem_if)
826 | ATC_DIF(atchan->per_if);
827 break;
829 case DMA_DEV_TO_MEM:
830 desc->lli.saddr = sconfig->src_addr;
831 desc->lli.daddr = buf_addr + (period_len * period_index);
832 desc->lli.ctrla = ctrla;
833 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
834 | ATC_SRC_ADDR_MODE_FIXED
835 | ATC_FC_PER2MEM
836 | ATC_SIF(atchan->per_if)
837 | ATC_DIF(atchan->mem_if);
838 break;
840 default:
841 return -EINVAL;
844 return 0;
848 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
849 * @chan: the DMA channel to prepare
850 * @buf_addr: physical DMA address where the buffer starts
851 * @buf_len: total number of bytes for the entire buffer
852 * @period_len: number of bytes for each period
853 * @direction: transfer direction, to or from device
854 * @flags: tx descriptor status flags
855 * @context: transfer context (ignored)
857 static struct dma_async_tx_descriptor *
858 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
859 size_t period_len, enum dma_transfer_direction direction,
860 unsigned long flags, void *context)
862 struct at_dma_chan *atchan = to_at_dma_chan(chan);
863 struct at_dma_slave *atslave = chan->private;
864 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
865 struct at_desc *first = NULL;
866 struct at_desc *prev = NULL;
867 unsigned long was_cyclic;
868 unsigned int reg_width;
869 unsigned int periods = buf_len / period_len;
870 unsigned int i;
872 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
873 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
874 buf_addr,
875 periods, buf_len, period_len);
877 if (unlikely(!atslave || !buf_len || !period_len)) {
878 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
879 return NULL;
882 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
883 if (was_cyclic) {
884 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
885 return NULL;
888 if (unlikely(!is_slave_direction(direction)))
889 goto err_out;
891 if (sconfig->direction == DMA_MEM_TO_DEV)
892 reg_width = convert_buswidth(sconfig->dst_addr_width);
893 else
894 reg_width = convert_buswidth(sconfig->src_addr_width);
896 /* Check for too big/unaligned periods and unaligned DMA buffer */
897 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
898 goto err_out;
900 /* build cyclic linked list */
901 for (i = 0; i < periods; i++) {
902 struct at_desc *desc;
904 desc = atc_desc_get(atchan);
905 if (!desc)
906 goto err_desc_get;
908 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
909 reg_width, period_len, direction))
910 goto err_desc_get;
912 atc_desc_chain(&first, &prev, desc);
915 /* lets make a cyclic list */
916 prev->lli.dscr = first->txd.phys;
918 /* First descriptor of the chain embedds additional information */
919 first->txd.cookie = -EBUSY;
920 first->len = buf_len;
922 return &first->txd;
924 err_desc_get:
925 dev_err(chan2dev(chan), "not enough descriptors available\n");
926 atc_desc_put(atchan, first);
927 err_out:
928 clear_bit(ATC_IS_CYCLIC, &atchan->status);
929 return NULL;
932 static int set_runtime_config(struct dma_chan *chan,
933 struct dma_slave_config *sconfig)
935 struct at_dma_chan *atchan = to_at_dma_chan(chan);
937 /* Check if it is chan is configured for slave transfers */
938 if (!chan->private)
939 return -EINVAL;
941 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
943 convert_burst(&atchan->dma_sconfig.src_maxburst);
944 convert_burst(&atchan->dma_sconfig.dst_maxburst);
946 return 0;
950 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
951 unsigned long arg)
953 struct at_dma_chan *atchan = to_at_dma_chan(chan);
954 struct at_dma *atdma = to_at_dma(chan->device);
955 int chan_id = atchan->chan_common.chan_id;
956 unsigned long flags;
958 LIST_HEAD(list);
960 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
962 if (cmd == DMA_PAUSE) {
963 spin_lock_irqsave(&atchan->lock, flags);
965 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
966 set_bit(ATC_IS_PAUSED, &atchan->status);
968 spin_unlock_irqrestore(&atchan->lock, flags);
969 } else if (cmd == DMA_RESUME) {
970 if (!atc_chan_is_paused(atchan))
971 return 0;
973 spin_lock_irqsave(&atchan->lock, flags);
975 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
976 clear_bit(ATC_IS_PAUSED, &atchan->status);
978 spin_unlock_irqrestore(&atchan->lock, flags);
979 } else if (cmd == DMA_TERMINATE_ALL) {
980 struct at_desc *desc, *_desc;
982 * This is only called when something went wrong elsewhere, so
983 * we don't really care about the data. Just disable the
984 * channel. We still have to poll the channel enable bit due
985 * to AHB/HSB limitations.
987 spin_lock_irqsave(&atchan->lock, flags);
989 /* disabling channel: must also remove suspend state */
990 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
992 /* confirm that this channel is disabled */
993 while (dma_readl(atdma, CHSR) & atchan->mask)
994 cpu_relax();
996 /* active_list entries will end up before queued entries */
997 list_splice_init(&atchan->queue, &list);
998 list_splice_init(&atchan->active_list, &list);
1000 /* Flush all pending and queued descriptors */
1001 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1002 atc_chain_complete(atchan, desc);
1004 clear_bit(ATC_IS_PAUSED, &atchan->status);
1005 /* if channel dedicated to cyclic operations, free it */
1006 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1008 spin_unlock_irqrestore(&atchan->lock, flags);
1009 } else if (cmd == DMA_SLAVE_CONFIG) {
1010 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1011 } else {
1012 return -ENXIO;
1015 return 0;
1019 * atc_tx_status - poll for transaction completion
1020 * @chan: DMA channel
1021 * @cookie: transaction identifier to check status of
1022 * @txstate: if not %NULL updated with transaction state
1024 * If @txstate is passed in, upon return it reflect the driver
1025 * internal state and can be used with dma_async_is_complete() to check
1026 * the status of multiple cookies without re-checking hardware state.
1028 static enum dma_status
1029 atc_tx_status(struct dma_chan *chan,
1030 dma_cookie_t cookie,
1031 struct dma_tx_state *txstate)
1033 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1034 dma_cookie_t last_used;
1035 dma_cookie_t last_complete;
1036 unsigned long flags;
1037 enum dma_status ret;
1039 spin_lock_irqsave(&atchan->lock, flags);
1041 ret = dma_cookie_status(chan, cookie, txstate);
1042 if (ret != DMA_SUCCESS) {
1043 atc_cleanup_descriptors(atchan);
1045 ret = dma_cookie_status(chan, cookie, txstate);
1048 last_complete = chan->completed_cookie;
1049 last_used = chan->cookie;
1051 spin_unlock_irqrestore(&atchan->lock, flags);
1053 if (ret != DMA_SUCCESS)
1054 dma_set_residue(txstate, atc_first_active(atchan)->len);
1056 if (atc_chan_is_paused(atchan))
1057 ret = DMA_PAUSED;
1059 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
1060 ret, cookie, last_complete ? last_complete : 0,
1061 last_used ? last_used : 0);
1063 return ret;
1067 * atc_issue_pending - try to finish work
1068 * @chan: target DMA channel
1070 static void atc_issue_pending(struct dma_chan *chan)
1072 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1073 unsigned long flags;
1075 dev_vdbg(chan2dev(chan), "issue_pending\n");
1077 /* Not needed for cyclic transfers */
1078 if (atc_chan_is_cyclic(atchan))
1079 return;
1081 spin_lock_irqsave(&atchan->lock, flags);
1082 if (!atc_chan_is_enabled(atchan)) {
1083 atc_advance_work(atchan);
1085 spin_unlock_irqrestore(&atchan->lock, flags);
1089 * atc_alloc_chan_resources - allocate resources for DMA channel
1090 * @chan: allocate descriptor resources for this channel
1091 * @client: current client requesting the channel be ready for requests
1093 * return - the number of allocated descriptors
1095 static int atc_alloc_chan_resources(struct dma_chan *chan)
1097 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1098 struct at_dma *atdma = to_at_dma(chan->device);
1099 struct at_desc *desc;
1100 struct at_dma_slave *atslave;
1101 unsigned long flags;
1102 int i;
1103 u32 cfg;
1104 LIST_HEAD(tmp_list);
1106 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1108 /* ASSERT: channel is idle */
1109 if (atc_chan_is_enabled(atchan)) {
1110 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1111 return -EIO;
1114 cfg = ATC_DEFAULT_CFG;
1116 atslave = chan->private;
1117 if (atslave) {
1119 * We need controller-specific data to set up slave
1120 * transfers.
1122 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1124 /* if cfg configuration specified take it instad of default */
1125 if (atslave->cfg)
1126 cfg = atslave->cfg;
1129 /* have we already been set up?
1130 * reconfigure channel but no need to reallocate descriptors */
1131 if (!list_empty(&atchan->free_list))
1132 return atchan->descs_allocated;
1134 /* Allocate initial pool of descriptors */
1135 for (i = 0; i < init_nr_desc_per_channel; i++) {
1136 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1137 if (!desc) {
1138 dev_err(atdma->dma_common.dev,
1139 "Only %d initial descriptors\n", i);
1140 break;
1142 list_add_tail(&desc->desc_node, &tmp_list);
1145 spin_lock_irqsave(&atchan->lock, flags);
1146 atchan->descs_allocated = i;
1147 list_splice(&tmp_list, &atchan->free_list);
1148 dma_cookie_init(chan);
1149 spin_unlock_irqrestore(&atchan->lock, flags);
1151 /* channel parameters */
1152 channel_writel(atchan, CFG, cfg);
1154 dev_dbg(chan2dev(chan),
1155 "alloc_chan_resources: allocated %d descriptors\n",
1156 atchan->descs_allocated);
1158 return atchan->descs_allocated;
1162 * atc_free_chan_resources - free all channel resources
1163 * @chan: DMA channel
1165 static void atc_free_chan_resources(struct dma_chan *chan)
1167 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1168 struct at_dma *atdma = to_at_dma(chan->device);
1169 struct at_desc *desc, *_desc;
1170 LIST_HEAD(list);
1172 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1173 atchan->descs_allocated);
1175 /* ASSERT: channel is idle */
1176 BUG_ON(!list_empty(&atchan->active_list));
1177 BUG_ON(!list_empty(&atchan->queue));
1178 BUG_ON(atc_chan_is_enabled(atchan));
1180 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1181 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1182 list_del(&desc->desc_node);
1183 /* free link descriptor */
1184 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1186 list_splice_init(&atchan->free_list, &list);
1187 atchan->descs_allocated = 0;
1188 atchan->status = 0;
1190 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1193 #ifdef CONFIG_OF
1194 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1196 struct at_dma_slave *atslave = slave;
1198 if (atslave->dma_dev == chan->device->dev) {
1199 chan->private = atslave;
1200 return true;
1201 } else {
1202 return false;
1206 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1207 struct of_dma *of_dma)
1209 struct dma_chan *chan;
1210 struct at_dma_chan *atchan;
1211 struct at_dma_slave *atslave;
1212 dma_cap_mask_t mask;
1213 unsigned int per_id;
1214 struct platform_device *dmac_pdev;
1216 if (dma_spec->args_count != 2)
1217 return NULL;
1219 dmac_pdev = of_find_device_by_node(dma_spec->np);
1221 dma_cap_zero(mask);
1222 dma_cap_set(DMA_SLAVE, mask);
1224 atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
1225 if (!atslave)
1226 return NULL;
1228 * We can fill both SRC_PER and DST_PER, one of these fields will be
1229 * ignored depending on DMA transfer direction.
1231 per_id = dma_spec->args[1];
1232 atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW
1233 | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id)
1234 | ATC_SRC_PER(per_id);
1235 atslave->dma_dev = &dmac_pdev->dev;
1237 chan = dma_request_channel(mask, at_dma_filter, atslave);
1238 if (!chan)
1239 return NULL;
1241 atchan = to_at_dma_chan(chan);
1242 atchan->per_if = dma_spec->args[0] & 0xff;
1243 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1245 return chan;
1247 #else
1248 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1249 struct of_dma *of_dma)
1251 return NULL;
1253 #endif
1255 /*-- Module Management -----------------------------------------------*/
1257 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1258 static struct at_dma_platform_data at91sam9rl_config = {
1259 .nr_channels = 2,
1261 static struct at_dma_platform_data at91sam9g45_config = {
1262 .nr_channels = 8,
1265 #if defined(CONFIG_OF)
1266 static const struct of_device_id atmel_dma_dt_ids[] = {
1268 .compatible = "atmel,at91sam9rl-dma",
1269 .data = &at91sam9rl_config,
1270 }, {
1271 .compatible = "atmel,at91sam9g45-dma",
1272 .data = &at91sam9g45_config,
1273 }, {
1274 /* sentinel */
1278 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1279 #endif
1281 static const struct platform_device_id atdma_devtypes[] = {
1283 .name = "at91sam9rl_dma",
1284 .driver_data = (unsigned long) &at91sam9rl_config,
1285 }, {
1286 .name = "at91sam9g45_dma",
1287 .driver_data = (unsigned long) &at91sam9g45_config,
1288 }, {
1289 /* sentinel */
1293 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1294 struct platform_device *pdev)
1296 if (pdev->dev.of_node) {
1297 const struct of_device_id *match;
1298 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1299 if (match == NULL)
1300 return NULL;
1301 return match->data;
1303 return (struct at_dma_platform_data *)
1304 platform_get_device_id(pdev)->driver_data;
1308 * at_dma_off - disable DMA controller
1309 * @atdma: the Atmel HDAMC device
1311 static void at_dma_off(struct at_dma *atdma)
1313 dma_writel(atdma, EN, 0);
1315 /* disable all interrupts */
1316 dma_writel(atdma, EBCIDR, -1L);
1318 /* confirm that all channels are disabled */
1319 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1320 cpu_relax();
1323 static int __init at_dma_probe(struct platform_device *pdev)
1325 struct resource *io;
1326 struct at_dma *atdma;
1327 size_t size;
1328 int irq;
1329 int err;
1330 int i;
1331 const struct at_dma_platform_data *plat_dat;
1333 /* setup platform data for each SoC */
1334 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1335 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1336 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1338 /* get DMA parameters from controller type */
1339 plat_dat = at_dma_get_driver_data(pdev);
1340 if (!plat_dat)
1341 return -ENODEV;
1343 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1344 if (!io)
1345 return -EINVAL;
1347 irq = platform_get_irq(pdev, 0);
1348 if (irq < 0)
1349 return irq;
1351 size = sizeof(struct at_dma);
1352 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1353 atdma = kzalloc(size, GFP_KERNEL);
1354 if (!atdma)
1355 return -ENOMEM;
1357 /* discover transaction capabilities */
1358 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1359 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1361 size = resource_size(io);
1362 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1363 err = -EBUSY;
1364 goto err_kfree;
1367 atdma->regs = ioremap(io->start, size);
1368 if (!atdma->regs) {
1369 err = -ENOMEM;
1370 goto err_release_r;
1373 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1374 if (IS_ERR(atdma->clk)) {
1375 err = PTR_ERR(atdma->clk);
1376 goto err_clk;
1378 clk_enable(atdma->clk);
1380 /* force dma off, just in case */
1381 at_dma_off(atdma);
1383 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1384 if (err)
1385 goto err_irq;
1387 platform_set_drvdata(pdev, atdma);
1389 /* create a pool of consistent memory blocks for hardware descriptors */
1390 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1391 &pdev->dev, sizeof(struct at_desc),
1392 4 /* word alignment */, 0);
1393 if (!atdma->dma_desc_pool) {
1394 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1395 err = -ENOMEM;
1396 goto err_pool_create;
1399 /* clear any pending interrupt */
1400 while (dma_readl(atdma, EBCISR))
1401 cpu_relax();
1403 /* initialize channels related values */
1404 INIT_LIST_HEAD(&atdma->dma_common.channels);
1405 for (i = 0; i < plat_dat->nr_channels; i++) {
1406 struct at_dma_chan *atchan = &atdma->chan[i];
1408 atchan->mem_if = AT_DMA_MEM_IF;
1409 atchan->per_if = AT_DMA_PER_IF;
1410 atchan->chan_common.device = &atdma->dma_common;
1411 dma_cookie_init(&atchan->chan_common);
1412 list_add_tail(&atchan->chan_common.device_node,
1413 &atdma->dma_common.channels);
1415 atchan->ch_regs = atdma->regs + ch_regs(i);
1416 spin_lock_init(&atchan->lock);
1417 atchan->mask = 1 << i;
1419 INIT_LIST_HEAD(&atchan->active_list);
1420 INIT_LIST_HEAD(&atchan->queue);
1421 INIT_LIST_HEAD(&atchan->free_list);
1423 tasklet_init(&atchan->tasklet, atc_tasklet,
1424 (unsigned long)atchan);
1425 atc_enable_chan_irq(atdma, i);
1428 /* set base routines */
1429 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1430 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1431 atdma->dma_common.device_tx_status = atc_tx_status;
1432 atdma->dma_common.device_issue_pending = atc_issue_pending;
1433 atdma->dma_common.dev = &pdev->dev;
1435 /* set prep routines based on capability */
1436 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1437 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1439 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1440 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1441 /* controller can do slave DMA: can trigger cyclic transfers */
1442 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1443 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1444 atdma->dma_common.device_control = atc_control;
1447 dma_writel(atdma, EN, AT_DMA_ENABLE);
1449 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1450 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1451 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1452 plat_dat->nr_channels);
1454 dma_async_device_register(&atdma->dma_common);
1457 * Do not return an error if the dmac node is not present in order to
1458 * not break the existing way of requesting channel with
1459 * dma_request_channel().
1461 if (pdev->dev.of_node) {
1462 err = of_dma_controller_register(pdev->dev.of_node,
1463 at_dma_xlate, atdma);
1464 if (err) {
1465 dev_err(&pdev->dev, "could not register of_dma_controller\n");
1466 goto err_of_dma_controller_register;
1470 return 0;
1472 err_of_dma_controller_register:
1473 dma_async_device_unregister(&atdma->dma_common);
1474 dma_pool_destroy(atdma->dma_desc_pool);
1475 err_pool_create:
1476 platform_set_drvdata(pdev, NULL);
1477 free_irq(platform_get_irq(pdev, 0), atdma);
1478 err_irq:
1479 clk_disable(atdma->clk);
1480 clk_put(atdma->clk);
1481 err_clk:
1482 iounmap(atdma->regs);
1483 atdma->regs = NULL;
1484 err_release_r:
1485 release_mem_region(io->start, size);
1486 err_kfree:
1487 kfree(atdma);
1488 return err;
1491 static int at_dma_remove(struct platform_device *pdev)
1493 struct at_dma *atdma = platform_get_drvdata(pdev);
1494 struct dma_chan *chan, *_chan;
1495 struct resource *io;
1497 at_dma_off(atdma);
1498 dma_async_device_unregister(&atdma->dma_common);
1500 dma_pool_destroy(atdma->dma_desc_pool);
1501 platform_set_drvdata(pdev, NULL);
1502 free_irq(platform_get_irq(pdev, 0), atdma);
1504 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1505 device_node) {
1506 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1508 /* Disable interrupts */
1509 atc_disable_chan_irq(atdma, chan->chan_id);
1510 tasklet_disable(&atchan->tasklet);
1512 tasklet_kill(&atchan->tasklet);
1513 list_del(&chan->device_node);
1516 clk_disable(atdma->clk);
1517 clk_put(atdma->clk);
1519 iounmap(atdma->regs);
1520 atdma->regs = NULL;
1522 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1523 release_mem_region(io->start, resource_size(io));
1525 kfree(atdma);
1527 return 0;
1530 static void at_dma_shutdown(struct platform_device *pdev)
1532 struct at_dma *atdma = platform_get_drvdata(pdev);
1534 at_dma_off(platform_get_drvdata(pdev));
1535 clk_disable(atdma->clk);
1538 static int at_dma_prepare(struct device *dev)
1540 struct platform_device *pdev = to_platform_device(dev);
1541 struct at_dma *atdma = platform_get_drvdata(pdev);
1542 struct dma_chan *chan, *_chan;
1544 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1545 device_node) {
1546 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1547 /* wait for transaction completion (except in cyclic case) */
1548 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1549 return -EAGAIN;
1551 return 0;
1554 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1556 struct dma_chan *chan = &atchan->chan_common;
1558 /* Channel should be paused by user
1559 * do it anyway even if it is not done already */
1560 if (!atc_chan_is_paused(atchan)) {
1561 dev_warn(chan2dev(chan),
1562 "cyclic channel not paused, should be done by channel user\n");
1563 atc_control(chan, DMA_PAUSE, 0);
1566 /* now preserve additional data for cyclic operations */
1567 /* next descriptor address in the cyclic list */
1568 atchan->save_dscr = channel_readl(atchan, DSCR);
1570 vdbg_dump_regs(atchan);
1573 static int at_dma_suspend_noirq(struct device *dev)
1575 struct platform_device *pdev = to_platform_device(dev);
1576 struct at_dma *atdma = platform_get_drvdata(pdev);
1577 struct dma_chan *chan, *_chan;
1579 /* preserve data */
1580 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1581 device_node) {
1582 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1584 if (atc_chan_is_cyclic(atchan))
1585 atc_suspend_cyclic(atchan);
1586 atchan->save_cfg = channel_readl(atchan, CFG);
1588 atdma->save_imr = dma_readl(atdma, EBCIMR);
1590 /* disable DMA controller */
1591 at_dma_off(atdma);
1592 clk_disable(atdma->clk);
1593 return 0;
1596 static void atc_resume_cyclic(struct at_dma_chan *atchan)
1598 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1600 /* restore channel status for cyclic descriptors list:
1601 * next descriptor in the cyclic list at the time of suspend */
1602 channel_writel(atchan, SADDR, 0);
1603 channel_writel(atchan, DADDR, 0);
1604 channel_writel(atchan, CTRLA, 0);
1605 channel_writel(atchan, CTRLB, 0);
1606 channel_writel(atchan, DSCR, atchan->save_dscr);
1607 dma_writel(atdma, CHER, atchan->mask);
1609 /* channel pause status should be removed by channel user
1610 * We cannot take the initiative to do it here */
1612 vdbg_dump_regs(atchan);
1615 static int at_dma_resume_noirq(struct device *dev)
1617 struct platform_device *pdev = to_platform_device(dev);
1618 struct at_dma *atdma = platform_get_drvdata(pdev);
1619 struct dma_chan *chan, *_chan;
1621 /* bring back DMA controller */
1622 clk_enable(atdma->clk);
1623 dma_writel(atdma, EN, AT_DMA_ENABLE);
1625 /* clear any pending interrupt */
1626 while (dma_readl(atdma, EBCISR))
1627 cpu_relax();
1629 /* restore saved data */
1630 dma_writel(atdma, EBCIER, atdma->save_imr);
1631 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1632 device_node) {
1633 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1635 channel_writel(atchan, CFG, atchan->save_cfg);
1636 if (atc_chan_is_cyclic(atchan))
1637 atc_resume_cyclic(atchan);
1639 return 0;
1642 static const struct dev_pm_ops at_dma_dev_pm_ops = {
1643 .prepare = at_dma_prepare,
1644 .suspend_noirq = at_dma_suspend_noirq,
1645 .resume_noirq = at_dma_resume_noirq,
1648 static struct platform_driver at_dma_driver = {
1649 .remove = at_dma_remove,
1650 .shutdown = at_dma_shutdown,
1651 .id_table = atdma_devtypes,
1652 .driver = {
1653 .name = "at_hdmac",
1654 .pm = &at_dma_dev_pm_ops,
1655 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
1659 static int __init at_dma_init(void)
1661 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1663 subsys_initcall(at_dma_init);
1665 static void __exit at_dma_exit(void)
1667 platform_driver_unregister(&at_dma_driver);
1669 module_exit(at_dma_exit);
1671 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1672 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1673 MODULE_LICENSE("GPL");
1674 MODULE_ALIAS("platform:at_hdmac");