Staging: strip: delete the driver
[linux/fpc-iii.git] / drivers / dma / at_hdmac.c
blob278cf5bceef209ad95b796e184239b71978765ab
1 /*
2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 * This supports the Atmel AHB DMA Controller,
14 * The driver has currently been tested with the Atmel AT91SAM9RL
15 * and AT91SAM9G45 series.
18 #include <linux/clk.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 #include "at_hdmac_regs.h"
30 * Glossary
31 * --------
33 * at_hdmac : Name of the ATmel AHB DMA Controller
34 * at_dma_ / atdma : ATmel DMA controller entity related
35 * atc_ / atchan : ATmel DMA Channel entity related
38 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
39 #define ATC_DEFAULT_CTRLA (0)
40 #define ATC_DEFAULT_CTRLB (ATC_SIF(0) \
41 |ATC_DIF(1))
44 * Initial number of descriptors to allocate for each channel. This could
45 * be increased during dma usage.
47 static unsigned int init_nr_desc_per_channel = 64;
48 module_param(init_nr_desc_per_channel, uint, 0644);
49 MODULE_PARM_DESC(init_nr_desc_per_channel,
50 "initial descriptors per channel (default: 64)");
53 /* prototypes */
54 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
57 /*----------------------------------------------------------------------*/
59 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
61 return list_first_entry(&atchan->active_list,
62 struct at_desc, desc_node);
65 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
67 return list_first_entry(&atchan->queue,
68 struct at_desc, desc_node);
71 /**
72 * atc_alloc_descriptor - allocate and return an initilized descriptor
73 * @chan: the channel to allocate descriptors for
74 * @gfp_flags: GFP allocation flags
76 * Note: The ack-bit is positioned in the descriptor flag at creation time
77 * to make initial allocation more convenient. This bit will be cleared
78 * and control will be given to client at usage time (during
79 * preparation functions).
81 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
82 gfp_t gfp_flags)
84 struct at_desc *desc = NULL;
85 struct at_dma *atdma = to_at_dma(chan->device);
86 dma_addr_t phys;
88 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
89 if (desc) {
90 memset(desc, 0, sizeof(struct at_desc));
91 INIT_LIST_HEAD(&desc->tx_list);
92 dma_async_tx_descriptor_init(&desc->txd, chan);
93 /* txd.flags will be overwritten in prep functions */
94 desc->txd.flags = DMA_CTRL_ACK;
95 desc->txd.tx_submit = atc_tx_submit;
96 desc->txd.phys = phys;
99 return desc;
103 * atc_desc_get - get an unused descriptor from free_list
104 * @atchan: channel we want a new descriptor for
106 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
108 struct at_desc *desc, *_desc;
109 struct at_desc *ret = NULL;
110 unsigned int i = 0;
111 LIST_HEAD(tmp_list);
113 spin_lock_bh(&atchan->lock);
114 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
115 i++;
116 if (async_tx_test_ack(&desc->txd)) {
117 list_del(&desc->desc_node);
118 ret = desc;
119 break;
121 dev_dbg(chan2dev(&atchan->chan_common),
122 "desc %p not ACKed\n", desc);
124 spin_unlock_bh(&atchan->lock);
125 dev_vdbg(chan2dev(&atchan->chan_common),
126 "scanned %u descriptors on freelist\n", i);
128 /* no more descriptor available in initial pool: create one more */
129 if (!ret) {
130 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
131 if (ret) {
132 spin_lock_bh(&atchan->lock);
133 atchan->descs_allocated++;
134 spin_unlock_bh(&atchan->lock);
135 } else {
136 dev_err(chan2dev(&atchan->chan_common),
137 "not enough descriptors available\n");
141 return ret;
145 * atc_desc_put - move a descriptor, including any children, to the free list
146 * @atchan: channel we work on
147 * @desc: descriptor, at the head of a chain, to move to free list
149 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
151 if (desc) {
152 struct at_desc *child;
154 spin_lock_bh(&atchan->lock);
155 list_for_each_entry(child, &desc->tx_list, desc_node)
156 dev_vdbg(chan2dev(&atchan->chan_common),
157 "moving child desc %p to freelist\n",
158 child);
159 list_splice_init(&desc->tx_list, &atchan->free_list);
160 dev_vdbg(chan2dev(&atchan->chan_common),
161 "moving desc %p to freelist\n", desc);
162 list_add(&desc->desc_node, &atchan->free_list);
163 spin_unlock_bh(&atchan->lock);
168 * atc_assign_cookie - compute and assign new cookie
169 * @atchan: channel we work on
170 * @desc: descriptor to asign cookie for
172 * Called with atchan->lock held and bh disabled
174 static dma_cookie_t
175 atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
177 dma_cookie_t cookie = atchan->chan_common.cookie;
179 if (++cookie < 0)
180 cookie = 1;
182 atchan->chan_common.cookie = cookie;
183 desc->txd.cookie = cookie;
185 return cookie;
189 * atc_dostart - starts the DMA engine for real
190 * @atchan: the channel we want to start
191 * @first: first descriptor in the list we want to begin with
193 * Called with atchan->lock held and bh disabled
195 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
197 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
199 /* ASSERT: channel is idle */
200 if (atc_chan_is_enabled(atchan)) {
201 dev_err(chan2dev(&atchan->chan_common),
202 "BUG: Attempted to start non-idle channel\n");
203 dev_err(chan2dev(&atchan->chan_common),
204 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
205 channel_readl(atchan, SADDR),
206 channel_readl(atchan, DADDR),
207 channel_readl(atchan, CTRLA),
208 channel_readl(atchan, CTRLB),
209 channel_readl(atchan, DSCR));
211 /* The tasklet will hopefully advance the queue... */
212 return;
215 vdbg_dump_regs(atchan);
217 /* clear any pending interrupt */
218 while (dma_readl(atdma, EBCISR))
219 cpu_relax();
221 channel_writel(atchan, SADDR, 0);
222 channel_writel(atchan, DADDR, 0);
223 channel_writel(atchan, CTRLA, 0);
224 channel_writel(atchan, CTRLB, 0);
225 channel_writel(atchan, DSCR, first->txd.phys);
226 dma_writel(atdma, CHER, atchan->mask);
228 vdbg_dump_regs(atchan);
232 * atc_chain_complete - finish work for one transaction chain
233 * @atchan: channel we work on
234 * @desc: descriptor at the head of the chain we want do complete
236 * Called with atchan->lock held and bh disabled */
237 static void
238 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
240 dma_async_tx_callback callback;
241 void *param;
242 struct dma_async_tx_descriptor *txd = &desc->txd;
244 dev_vdbg(chan2dev(&atchan->chan_common),
245 "descriptor %u complete\n", txd->cookie);
247 atchan->completed_cookie = txd->cookie;
248 callback = txd->callback;
249 param = txd->callback_param;
251 /* move children to free_list */
252 list_splice_init(&desc->tx_list, &atchan->free_list);
253 /* move myself to free_list */
254 list_move(&desc->desc_node, &atchan->free_list);
256 /* unmap dma addresses */
257 if (!atchan->chan_common.private) {
258 struct device *parent = chan2parent(&atchan->chan_common);
259 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
260 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
261 dma_unmap_single(parent,
262 desc->lli.daddr,
263 desc->len, DMA_FROM_DEVICE);
264 else
265 dma_unmap_page(parent,
266 desc->lli.daddr,
267 desc->len, DMA_FROM_DEVICE);
269 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
270 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
271 dma_unmap_single(parent,
272 desc->lli.saddr,
273 desc->len, DMA_TO_DEVICE);
274 else
275 dma_unmap_page(parent,
276 desc->lli.saddr,
277 desc->len, DMA_TO_DEVICE);
282 * The API requires that no submissions are done from a
283 * callback, so we don't need to drop the lock here
285 if (callback)
286 callback(param);
288 dma_run_dependencies(txd);
292 * atc_complete_all - finish work for all transactions
293 * @atchan: channel to complete transactions for
295 * Eventually submit queued descriptors if any
297 * Assume channel is idle while calling this function
298 * Called with atchan->lock held and bh disabled
300 static void atc_complete_all(struct at_dma_chan *atchan)
302 struct at_desc *desc, *_desc;
303 LIST_HEAD(list);
305 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
307 BUG_ON(atc_chan_is_enabled(atchan));
310 * Submit queued descriptors ASAP, i.e. before we go through
311 * the completed ones.
313 if (!list_empty(&atchan->queue))
314 atc_dostart(atchan, atc_first_queued(atchan));
315 /* empty active_list now it is completed */
316 list_splice_init(&atchan->active_list, &list);
317 /* empty queue list by moving descriptors (if any) to active_list */
318 list_splice_init(&atchan->queue, &atchan->active_list);
320 list_for_each_entry_safe(desc, _desc, &list, desc_node)
321 atc_chain_complete(atchan, desc);
325 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
326 * @atchan: channel to be cleaned up
328 * Called with atchan->lock held and bh disabled
330 static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
332 struct at_desc *desc, *_desc;
333 struct at_desc *child;
335 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
337 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
338 if (!(desc->lli.ctrla & ATC_DONE))
339 /* This one is currently in progress */
340 return;
342 list_for_each_entry(child, &desc->tx_list, desc_node)
343 if (!(child->lli.ctrla & ATC_DONE))
344 /* Currently in progress */
345 return;
348 * No descriptors so far seem to be in progress, i.e.
349 * this chain must be done.
351 atc_chain_complete(atchan, desc);
356 * atc_advance_work - at the end of a transaction, move forward
357 * @atchan: channel where the transaction ended
359 * Called with atchan->lock held and bh disabled
361 static void atc_advance_work(struct at_dma_chan *atchan)
363 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
365 if (list_empty(&atchan->active_list) ||
366 list_is_singular(&atchan->active_list)) {
367 atc_complete_all(atchan);
368 } else {
369 atc_chain_complete(atchan, atc_first_active(atchan));
370 /* advance work */
371 atc_dostart(atchan, atc_first_active(atchan));
377 * atc_handle_error - handle errors reported by DMA controller
378 * @atchan: channel where error occurs
380 * Called with atchan->lock held and bh disabled
382 static void atc_handle_error(struct at_dma_chan *atchan)
384 struct at_desc *bad_desc;
385 struct at_desc *child;
388 * The descriptor currently at the head of the active list is
389 * broked. Since we don't have any way to report errors, we'll
390 * just have to scream loudly and try to carry on.
392 bad_desc = atc_first_active(atchan);
393 list_del_init(&bad_desc->desc_node);
395 /* As we are stopped, take advantage to push queued descriptors
396 * in active_list */
397 list_splice_init(&atchan->queue, atchan->active_list.prev);
399 /* Try to restart the controller */
400 if (!list_empty(&atchan->active_list))
401 atc_dostart(atchan, atc_first_active(atchan));
404 * KERN_CRITICAL may seem harsh, but since this only happens
405 * when someone submits a bad physical address in a
406 * descriptor, we should consider ourselves lucky that the
407 * controller flagged an error instead of scribbling over
408 * random memory locations.
410 dev_crit(chan2dev(&atchan->chan_common),
411 "Bad descriptor submitted for DMA!\n");
412 dev_crit(chan2dev(&atchan->chan_common),
413 " cookie: %d\n", bad_desc->txd.cookie);
414 atc_dump_lli(atchan, &bad_desc->lli);
415 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
416 atc_dump_lli(atchan, &child->lli);
418 /* Pretend the descriptor completed successfully */
419 atc_chain_complete(atchan, bad_desc);
423 /*-- IRQ & Tasklet ---------------------------------------------------*/
425 static void atc_tasklet(unsigned long data)
427 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
429 /* Channel cannot be enabled here */
430 if (atc_chan_is_enabled(atchan)) {
431 dev_err(chan2dev(&atchan->chan_common),
432 "BUG: channel enabled in tasklet\n");
433 return;
436 spin_lock(&atchan->lock);
437 if (test_and_clear_bit(0, &atchan->error_status))
438 atc_handle_error(atchan);
439 else
440 atc_advance_work(atchan);
442 spin_unlock(&atchan->lock);
445 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
447 struct at_dma *atdma = (struct at_dma *)dev_id;
448 struct at_dma_chan *atchan;
449 int i;
450 u32 status, pending, imr;
451 int ret = IRQ_NONE;
453 do {
454 imr = dma_readl(atdma, EBCIMR);
455 status = dma_readl(atdma, EBCISR);
456 pending = status & imr;
458 if (!pending)
459 break;
461 dev_vdbg(atdma->dma_common.dev,
462 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
463 status, imr, pending);
465 for (i = 0; i < atdma->dma_common.chancnt; i++) {
466 atchan = &atdma->chan[i];
467 if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) {
468 if (pending & AT_DMA_ERR(i)) {
469 /* Disable channel on AHB error */
470 dma_writel(atdma, CHDR, atchan->mask);
471 /* Give information to tasklet */
472 set_bit(0, &atchan->error_status);
474 tasklet_schedule(&atchan->tasklet);
475 ret = IRQ_HANDLED;
479 } while (pending);
481 return ret;
485 /*-- DMA Engine API --------------------------------------------------*/
488 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
489 * @desc: descriptor at the head of the transaction chain
491 * Queue chain if DMA engine is working already
493 * Cookie increment and adding to active_list or queue must be atomic
495 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
497 struct at_desc *desc = txd_to_at_desc(tx);
498 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
499 dma_cookie_t cookie;
501 spin_lock_bh(&atchan->lock);
502 cookie = atc_assign_cookie(atchan, desc);
504 if (list_empty(&atchan->active_list)) {
505 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
506 desc->txd.cookie);
507 atc_dostart(atchan, desc);
508 list_add_tail(&desc->desc_node, &atchan->active_list);
509 } else {
510 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
511 desc->txd.cookie);
512 list_add_tail(&desc->desc_node, &atchan->queue);
515 spin_unlock_bh(&atchan->lock);
517 return cookie;
521 * atc_prep_dma_memcpy - prepare a memcpy operation
522 * @chan: the channel to prepare operation on
523 * @dest: operation virtual destination address
524 * @src: operation virtual source address
525 * @len: operation length
526 * @flags: tx descriptor status flags
528 static struct dma_async_tx_descriptor *
529 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
530 size_t len, unsigned long flags)
532 struct at_dma_chan *atchan = to_at_dma_chan(chan);
533 struct at_desc *desc = NULL;
534 struct at_desc *first = NULL;
535 struct at_desc *prev = NULL;
536 size_t xfer_count;
537 size_t offset;
538 unsigned int src_width;
539 unsigned int dst_width;
540 u32 ctrla;
541 u32 ctrlb;
543 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
544 dest, src, len, flags);
546 if (unlikely(!len)) {
547 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
548 return NULL;
551 ctrla = ATC_DEFAULT_CTRLA;
552 ctrlb = ATC_DEFAULT_CTRLB
553 | ATC_SRC_ADDR_MODE_INCR
554 | ATC_DST_ADDR_MODE_INCR
555 | ATC_FC_MEM2MEM;
558 * We can be a lot more clever here, but this should take care
559 * of the most common optimization.
561 if (!((src | dest | len) & 3)) {
562 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
563 src_width = dst_width = 2;
564 } else if (!((src | dest | len) & 1)) {
565 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
566 src_width = dst_width = 1;
567 } else {
568 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
569 src_width = dst_width = 0;
572 for (offset = 0; offset < len; offset += xfer_count << src_width) {
573 xfer_count = min_t(size_t, (len - offset) >> src_width,
574 ATC_BTSIZE_MAX);
576 desc = atc_desc_get(atchan);
577 if (!desc)
578 goto err_desc_get;
580 desc->lli.saddr = src + offset;
581 desc->lli.daddr = dest + offset;
582 desc->lli.ctrla = ctrla | xfer_count;
583 desc->lli.ctrlb = ctrlb;
585 desc->txd.cookie = 0;
586 async_tx_ack(&desc->txd);
588 if (!first) {
589 first = desc;
590 } else {
591 /* inform the HW lli about chaining */
592 prev->lli.dscr = desc->txd.phys;
593 /* insert the link descriptor to the LD ring */
594 list_add_tail(&desc->desc_node,
595 &first->tx_list);
597 prev = desc;
600 /* First descriptor of the chain embedds additional information */
601 first->txd.cookie = -EBUSY;
602 first->len = len;
604 /* set end-of-link to the last link descriptor of list*/
605 set_desc_eol(desc);
607 desc->txd.flags = flags; /* client is in control of this ack */
609 return &first->txd;
611 err_desc_get:
612 atc_desc_put(atchan, first);
613 return NULL;
618 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
619 * @chan: DMA channel
620 * @sgl: scatterlist to transfer to/from
621 * @sg_len: number of entries in @scatterlist
622 * @direction: DMA direction
623 * @flags: tx descriptor status flags
625 static struct dma_async_tx_descriptor *
626 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
627 unsigned int sg_len, enum dma_data_direction direction,
628 unsigned long flags)
630 struct at_dma_chan *atchan = to_at_dma_chan(chan);
631 struct at_dma_slave *atslave = chan->private;
632 struct at_desc *first = NULL;
633 struct at_desc *prev = NULL;
634 u32 ctrla;
635 u32 ctrlb;
636 dma_addr_t reg;
637 unsigned int reg_width;
638 unsigned int mem_width;
639 unsigned int i;
640 struct scatterlist *sg;
641 size_t total_len = 0;
643 dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n",
644 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
645 flags);
647 if (unlikely(!atslave || !sg_len)) {
648 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
649 return NULL;
652 reg_width = atslave->reg_width;
654 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
655 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
657 switch (direction) {
658 case DMA_TO_DEVICE:
659 ctrla |= ATC_DST_WIDTH(reg_width);
660 ctrlb |= ATC_DST_ADDR_MODE_FIXED
661 | ATC_SRC_ADDR_MODE_INCR
662 | ATC_FC_MEM2PER;
663 reg = atslave->tx_reg;
664 for_each_sg(sgl, sg, sg_len, i) {
665 struct at_desc *desc;
666 u32 len;
667 u32 mem;
669 desc = atc_desc_get(atchan);
670 if (!desc)
671 goto err_desc_get;
673 mem = sg_phys(sg);
674 len = sg_dma_len(sg);
675 mem_width = 2;
676 if (unlikely(mem & 3 || len & 3))
677 mem_width = 0;
679 desc->lli.saddr = mem;
680 desc->lli.daddr = reg;
681 desc->lli.ctrla = ctrla
682 | ATC_SRC_WIDTH(mem_width)
683 | len >> mem_width;
684 desc->lli.ctrlb = ctrlb;
686 if (!first) {
687 first = desc;
688 } else {
689 /* inform the HW lli about chaining */
690 prev->lli.dscr = desc->txd.phys;
691 /* insert the link descriptor to the LD ring */
692 list_add_tail(&desc->desc_node,
693 &first->tx_list);
695 prev = desc;
696 total_len += len;
698 break;
699 case DMA_FROM_DEVICE:
700 ctrla |= ATC_SRC_WIDTH(reg_width);
701 ctrlb |= ATC_DST_ADDR_MODE_INCR
702 | ATC_SRC_ADDR_MODE_FIXED
703 | ATC_FC_PER2MEM;
705 reg = atslave->rx_reg;
706 for_each_sg(sgl, sg, sg_len, i) {
707 struct at_desc *desc;
708 u32 len;
709 u32 mem;
711 desc = atc_desc_get(atchan);
712 if (!desc)
713 goto err_desc_get;
715 mem = sg_phys(sg);
716 len = sg_dma_len(sg);
717 mem_width = 2;
718 if (unlikely(mem & 3 || len & 3))
719 mem_width = 0;
721 desc->lli.saddr = reg;
722 desc->lli.daddr = mem;
723 desc->lli.ctrla = ctrla
724 | ATC_DST_WIDTH(mem_width)
725 | len >> mem_width;
726 desc->lli.ctrlb = ctrlb;
728 if (!first) {
729 first = desc;
730 } else {
731 /* inform the HW lli about chaining */
732 prev->lli.dscr = desc->txd.phys;
733 /* insert the link descriptor to the LD ring */
734 list_add_tail(&desc->desc_node,
735 &first->tx_list);
737 prev = desc;
738 total_len += len;
740 break;
741 default:
742 return NULL;
745 /* set end-of-link to the last link descriptor of list*/
746 set_desc_eol(prev);
748 /* First descriptor of the chain embedds additional information */
749 first->txd.cookie = -EBUSY;
750 first->len = total_len;
752 /* last link descriptor of list is responsible of flags */
753 prev->txd.flags = flags; /* client is in control of this ack */
755 return &first->txd;
757 err_desc_get:
758 dev_err(chan2dev(chan), "not enough descriptors available\n");
759 atc_desc_put(atchan, first);
760 return NULL;
763 static void atc_terminate_all(struct dma_chan *chan)
765 struct at_dma_chan *atchan = to_at_dma_chan(chan);
766 struct at_dma *atdma = to_at_dma(chan->device);
767 struct at_desc *desc, *_desc;
768 LIST_HEAD(list);
771 * This is only called when something went wrong elsewhere, so
772 * we don't really care about the data. Just disable the
773 * channel. We still have to poll the channel enable bit due
774 * to AHB/HSB limitations.
776 spin_lock_bh(&atchan->lock);
778 dma_writel(atdma, CHDR, atchan->mask);
780 /* confirm that this channel is disabled */
781 while (dma_readl(atdma, CHSR) & atchan->mask)
782 cpu_relax();
784 /* active_list entries will end up before queued entries */
785 list_splice_init(&atchan->queue, &list);
786 list_splice_init(&atchan->active_list, &list);
788 spin_unlock_bh(&atchan->lock);
790 /* Flush all pending and queued descriptors */
791 list_for_each_entry_safe(desc, _desc, &list, desc_node)
792 atc_chain_complete(atchan, desc);
796 * atc_is_tx_complete - poll for transaction completion
797 * @chan: DMA channel
798 * @cookie: transaction identifier to check status of
799 * @done: if not %NULL, updated with last completed transaction
800 * @used: if not %NULL, updated with last used transaction
802 * If @done and @used are passed in, upon return they reflect the driver
803 * internal state and can be used with dma_async_is_complete() to check
804 * the status of multiple cookies without re-checking hardware state.
806 static enum dma_status
807 atc_is_tx_complete(struct dma_chan *chan,
808 dma_cookie_t cookie,
809 dma_cookie_t *done, dma_cookie_t *used)
811 struct at_dma_chan *atchan = to_at_dma_chan(chan);
812 dma_cookie_t last_used;
813 dma_cookie_t last_complete;
814 enum dma_status ret;
816 dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
817 cookie, done ? *done : 0, used ? *used : 0);
819 spin_lock_bh(&atchan->lock);
821 last_complete = atchan->completed_cookie;
822 last_used = chan->cookie;
824 ret = dma_async_is_complete(cookie, last_complete, last_used);
825 if (ret != DMA_SUCCESS) {
826 atc_cleanup_descriptors(atchan);
828 last_complete = atchan->completed_cookie;
829 last_used = chan->cookie;
831 ret = dma_async_is_complete(cookie, last_complete, last_used);
834 spin_unlock_bh(&atchan->lock);
836 if (done)
837 *done = last_complete;
838 if (used)
839 *used = last_used;
841 return ret;
845 * atc_issue_pending - try to finish work
846 * @chan: target DMA channel
848 static void atc_issue_pending(struct dma_chan *chan)
850 struct at_dma_chan *atchan = to_at_dma_chan(chan);
852 dev_vdbg(chan2dev(chan), "issue_pending\n");
854 if (!atc_chan_is_enabled(atchan)) {
855 spin_lock_bh(&atchan->lock);
856 atc_advance_work(atchan);
857 spin_unlock_bh(&atchan->lock);
862 * atc_alloc_chan_resources - allocate resources for DMA channel
863 * @chan: allocate descriptor resources for this channel
864 * @client: current client requesting the channel be ready for requests
866 * return - the number of allocated descriptors
868 static int atc_alloc_chan_resources(struct dma_chan *chan)
870 struct at_dma_chan *atchan = to_at_dma_chan(chan);
871 struct at_dma *atdma = to_at_dma(chan->device);
872 struct at_desc *desc;
873 struct at_dma_slave *atslave;
874 int i;
875 u32 cfg;
876 LIST_HEAD(tmp_list);
878 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
880 /* ASSERT: channel is idle */
881 if (atc_chan_is_enabled(atchan)) {
882 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
883 return -EIO;
886 cfg = ATC_DEFAULT_CFG;
888 atslave = chan->private;
889 if (atslave) {
891 * We need controller-specific data to set up slave
892 * transfers.
894 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
896 /* if cfg configuration specified take it instad of default */
897 if (atslave->cfg)
898 cfg = atslave->cfg;
901 /* have we already been set up?
902 * reconfigure channel but no need to reallocate descriptors */
903 if (!list_empty(&atchan->free_list))
904 return atchan->descs_allocated;
906 /* Allocate initial pool of descriptors */
907 for (i = 0; i < init_nr_desc_per_channel; i++) {
908 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
909 if (!desc) {
910 dev_err(atdma->dma_common.dev,
911 "Only %d initial descriptors\n", i);
912 break;
914 list_add_tail(&desc->desc_node, &tmp_list);
917 spin_lock_bh(&atchan->lock);
918 atchan->descs_allocated = i;
919 list_splice(&tmp_list, &atchan->free_list);
920 atchan->completed_cookie = chan->cookie = 1;
921 spin_unlock_bh(&atchan->lock);
923 /* channel parameters */
924 channel_writel(atchan, CFG, cfg);
926 dev_dbg(chan2dev(chan),
927 "alloc_chan_resources: allocated %d descriptors\n",
928 atchan->descs_allocated);
930 return atchan->descs_allocated;
934 * atc_free_chan_resources - free all channel resources
935 * @chan: DMA channel
937 static void atc_free_chan_resources(struct dma_chan *chan)
939 struct at_dma_chan *atchan = to_at_dma_chan(chan);
940 struct at_dma *atdma = to_at_dma(chan->device);
941 struct at_desc *desc, *_desc;
942 LIST_HEAD(list);
944 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
945 atchan->descs_allocated);
947 /* ASSERT: channel is idle */
948 BUG_ON(!list_empty(&atchan->active_list));
949 BUG_ON(!list_empty(&atchan->queue));
950 BUG_ON(atc_chan_is_enabled(atchan));
952 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
953 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
954 list_del(&desc->desc_node);
955 /* free link descriptor */
956 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
958 list_splice_init(&atchan->free_list, &list);
959 atchan->descs_allocated = 0;
961 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
965 /*-- Module Management -----------------------------------------------*/
968 * at_dma_off - disable DMA controller
969 * @atdma: the Atmel HDAMC device
971 static void at_dma_off(struct at_dma *atdma)
973 dma_writel(atdma, EN, 0);
975 /* disable all interrupts */
976 dma_writel(atdma, EBCIDR, -1L);
978 /* confirm that all channels are disabled */
979 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
980 cpu_relax();
983 static int __init at_dma_probe(struct platform_device *pdev)
985 struct at_dma_platform_data *pdata;
986 struct resource *io;
987 struct at_dma *atdma;
988 size_t size;
989 int irq;
990 int err;
991 int i;
993 /* get DMA Controller parameters from platform */
994 pdata = pdev->dev.platform_data;
995 if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
996 return -EINVAL;
998 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
999 if (!io)
1000 return -EINVAL;
1002 irq = platform_get_irq(pdev, 0);
1003 if (irq < 0)
1004 return irq;
1006 size = sizeof(struct at_dma);
1007 size += pdata->nr_channels * sizeof(struct at_dma_chan);
1008 atdma = kzalloc(size, GFP_KERNEL);
1009 if (!atdma)
1010 return -ENOMEM;
1012 /* discover transaction capabilites from the platform data */
1013 atdma->dma_common.cap_mask = pdata->cap_mask;
1014 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
1016 size = io->end - io->start + 1;
1017 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1018 err = -EBUSY;
1019 goto err_kfree;
1022 atdma->regs = ioremap(io->start, size);
1023 if (!atdma->regs) {
1024 err = -ENOMEM;
1025 goto err_release_r;
1028 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1029 if (IS_ERR(atdma->clk)) {
1030 err = PTR_ERR(atdma->clk);
1031 goto err_clk;
1033 clk_enable(atdma->clk);
1035 /* force dma off, just in case */
1036 at_dma_off(atdma);
1038 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1039 if (err)
1040 goto err_irq;
1042 platform_set_drvdata(pdev, atdma);
1044 /* create a pool of consistent memory blocks for hardware descriptors */
1045 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1046 &pdev->dev, sizeof(struct at_desc),
1047 4 /* word alignment */, 0);
1048 if (!atdma->dma_desc_pool) {
1049 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1050 err = -ENOMEM;
1051 goto err_pool_create;
1054 /* clear any pending interrupt */
1055 while (dma_readl(atdma, EBCISR))
1056 cpu_relax();
1058 /* initialize channels related values */
1059 INIT_LIST_HEAD(&atdma->dma_common.channels);
1060 for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
1061 struct at_dma_chan *atchan = &atdma->chan[i];
1063 atchan->chan_common.device = &atdma->dma_common;
1064 atchan->chan_common.cookie = atchan->completed_cookie = 1;
1065 atchan->chan_common.chan_id = i;
1066 list_add_tail(&atchan->chan_common.device_node,
1067 &atdma->dma_common.channels);
1069 atchan->ch_regs = atdma->regs + ch_regs(i);
1070 spin_lock_init(&atchan->lock);
1071 atchan->mask = 1 << i;
1073 INIT_LIST_HEAD(&atchan->active_list);
1074 INIT_LIST_HEAD(&atchan->queue);
1075 INIT_LIST_HEAD(&atchan->free_list);
1077 tasklet_init(&atchan->tasklet, atc_tasklet,
1078 (unsigned long)atchan);
1079 atc_enable_irq(atchan);
1082 /* set base routines */
1083 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1084 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1085 atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
1086 atdma->dma_common.device_issue_pending = atc_issue_pending;
1087 atdma->dma_common.dev = &pdev->dev;
1089 /* set prep routines based on capability */
1090 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1091 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1093 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1094 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1095 atdma->dma_common.device_terminate_all = atc_terminate_all;
1098 dma_writel(atdma, EN, AT_DMA_ENABLE);
1100 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1101 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1102 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1103 atdma->dma_common.chancnt);
1105 dma_async_device_register(&atdma->dma_common);
1107 return 0;
1109 err_pool_create:
1110 platform_set_drvdata(pdev, NULL);
1111 free_irq(platform_get_irq(pdev, 0), atdma);
1112 err_irq:
1113 clk_disable(atdma->clk);
1114 clk_put(atdma->clk);
1115 err_clk:
1116 iounmap(atdma->regs);
1117 atdma->regs = NULL;
1118 err_release_r:
1119 release_mem_region(io->start, size);
1120 err_kfree:
1121 kfree(atdma);
1122 return err;
1125 static int __exit at_dma_remove(struct platform_device *pdev)
1127 struct at_dma *atdma = platform_get_drvdata(pdev);
1128 struct dma_chan *chan, *_chan;
1129 struct resource *io;
1131 at_dma_off(atdma);
1132 dma_async_device_unregister(&atdma->dma_common);
1134 dma_pool_destroy(atdma->dma_desc_pool);
1135 platform_set_drvdata(pdev, NULL);
1136 free_irq(platform_get_irq(pdev, 0), atdma);
1138 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1139 device_node) {
1140 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1142 /* Disable interrupts */
1143 atc_disable_irq(atchan);
1144 tasklet_disable(&atchan->tasklet);
1146 tasklet_kill(&atchan->tasklet);
1147 list_del(&chan->device_node);
1150 clk_disable(atdma->clk);
1151 clk_put(atdma->clk);
1153 iounmap(atdma->regs);
1154 atdma->regs = NULL;
1156 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1157 release_mem_region(io->start, io->end - io->start + 1);
1159 kfree(atdma);
1161 return 0;
1164 static void at_dma_shutdown(struct platform_device *pdev)
1166 struct at_dma *atdma = platform_get_drvdata(pdev);
1168 at_dma_off(platform_get_drvdata(pdev));
1169 clk_disable(atdma->clk);
1172 static int at_dma_suspend_noirq(struct device *dev)
1174 struct platform_device *pdev = to_platform_device(dev);
1175 struct at_dma *atdma = platform_get_drvdata(pdev);
1177 at_dma_off(platform_get_drvdata(pdev));
1178 clk_disable(atdma->clk);
1179 return 0;
1182 static int at_dma_resume_noirq(struct device *dev)
1184 struct platform_device *pdev = to_platform_device(dev);
1185 struct at_dma *atdma = platform_get_drvdata(pdev);
1187 clk_enable(atdma->clk);
1188 dma_writel(atdma, EN, AT_DMA_ENABLE);
1189 return 0;
1192 static const struct dev_pm_ops at_dma_dev_pm_ops = {
1193 .suspend_noirq = at_dma_suspend_noirq,
1194 .resume_noirq = at_dma_resume_noirq,
1197 static struct platform_driver at_dma_driver = {
1198 .remove = __exit_p(at_dma_remove),
1199 .shutdown = at_dma_shutdown,
1200 .driver = {
1201 .name = "at_hdmac",
1202 .pm = &at_dma_dev_pm_ops,
1206 static int __init at_dma_init(void)
1208 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1210 module_init(at_dma_init);
1212 static void __exit at_dma_exit(void)
1214 platform_driver_unregister(&at_dma_driver);
1216 module_exit(at_dma_exit);
1218 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1219 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1220 MODULE_LICENSE("GPL");
1221 MODULE_ALIAS("platform:at_hdmac");