drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / drivers / dma / dw / core.c
blobdd75f97a33b3d3dfeb6b5d37d2cc85ca5a15c4d8
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Core driver for the Synopsys DesignWare DMA Controller
5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
7 * Copyright (C) 2013 Intel Corporation
8 */
10 #include <linux/bitops.h>
11 #include <linux/delay.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/log2.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/pm_runtime.h>
25 #include "../dmaengine.h"
26 #include "internal.h"
29 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
30 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
31 * of which use ARM any more). See the "Databook" from Synopsys for
32 * information beyond what licensees probably provide.
35 /* The set of bus widths supported by the DMA controller */
36 #define DW_DMA_BUSWIDTHS \
37 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
38 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
39 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
40 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
42 /*----------------------------------------------------------------------*/
44 static struct device *chan2dev(struct dma_chan *chan)
46 return &chan->dev->device;
49 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
51 return to_dw_desc(dwc->active_list.next);
54 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
56 struct dw_desc *desc = txd_to_dw_desc(tx);
57 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
58 dma_cookie_t cookie;
59 unsigned long flags;
61 spin_lock_irqsave(&dwc->lock, flags);
62 cookie = dma_cookie_assign(tx);
65 * REVISIT: We should attempt to chain as many descriptors as
66 * possible, perhaps even appending to those already submitted
67 * for DMA. But this is hard to do in a race-free manner.
70 list_add_tail(&desc->desc_node, &dwc->queue);
71 spin_unlock_irqrestore(&dwc->lock, flags);
72 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
73 __func__, desc->txd.cookie);
75 return cookie;
78 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
80 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
81 struct dw_desc *desc;
82 dma_addr_t phys;
84 desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
85 if (!desc)
86 return NULL;
88 dwc->descs_allocated++;
89 INIT_LIST_HEAD(&desc->tx_list);
90 dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
91 desc->txd.tx_submit = dwc_tx_submit;
92 desc->txd.flags = DMA_CTRL_ACK;
93 desc->txd.phys = phys;
94 return desc;
97 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
99 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
100 struct dw_desc *child, *_next;
102 if (unlikely(!desc))
103 return;
105 list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
106 list_del(&child->desc_node);
107 dma_pool_free(dw->desc_pool, child, child->txd.phys);
108 dwc->descs_allocated--;
111 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
112 dwc->descs_allocated--;
115 static void dwc_initialize(struct dw_dma_chan *dwc)
117 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
119 dw->initialize_chan(dwc);
121 /* Enable interrupts */
122 channel_set_bit(dw, MASK.XFER, dwc->mask);
123 channel_set_bit(dw, MASK.ERROR, dwc->mask);
126 /*----------------------------------------------------------------------*/
128 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
130 dev_err(chan2dev(&dwc->chan),
131 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
132 channel_readl(dwc, SAR),
133 channel_readl(dwc, DAR),
134 channel_readl(dwc, LLP),
135 channel_readl(dwc, CTL_HI),
136 channel_readl(dwc, CTL_LO));
139 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
141 channel_clear_bit(dw, CH_EN, dwc->mask);
142 while (dma_readl(dw, CH_EN) & dwc->mask)
143 cpu_relax();
146 /*----------------------------------------------------------------------*/
148 /* Perform single block transfer */
149 static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
150 struct dw_desc *desc)
152 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
153 u32 ctllo;
156 * Software emulation of LLP mode relies on interrupts to continue
157 * multi block transfer.
159 ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
161 channel_writel(dwc, SAR, lli_read(desc, sar));
162 channel_writel(dwc, DAR, lli_read(desc, dar));
163 channel_writel(dwc, CTL_LO, ctllo);
164 channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
165 channel_set_bit(dw, CH_EN, dwc->mask);
167 /* Move pointer to next descriptor */
168 dwc->tx_node_active = dwc->tx_node_active->next;
171 /* Called with dwc->lock held and bh disabled */
172 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
174 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
175 u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
176 unsigned long was_soft_llp;
178 /* ASSERT: channel is idle */
179 if (dma_readl(dw, CH_EN) & dwc->mask) {
180 dev_err(chan2dev(&dwc->chan),
181 "%s: BUG: Attempted to start non-idle channel\n",
182 __func__);
183 dwc_dump_chan_regs(dwc);
185 /* The tasklet will hopefully advance the queue... */
186 return;
189 if (dwc->nollp) {
190 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
191 &dwc->flags);
192 if (was_soft_llp) {
193 dev_err(chan2dev(&dwc->chan),
194 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
195 return;
198 dwc_initialize(dwc);
200 first->residue = first->total_len;
201 dwc->tx_node_active = &first->tx_list;
203 /* Submit first block */
204 dwc_do_single_block(dwc, first);
206 return;
209 dwc_initialize(dwc);
211 channel_writel(dwc, LLP, first->txd.phys | lms);
212 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
213 channel_writel(dwc, CTL_HI, 0);
214 channel_set_bit(dw, CH_EN, dwc->mask);
217 static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
219 struct dw_desc *desc;
221 if (list_empty(&dwc->queue))
222 return;
224 list_move(dwc->queue.next, &dwc->active_list);
225 desc = dwc_first_active(dwc);
226 dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
227 dwc_dostart(dwc, desc);
230 /*----------------------------------------------------------------------*/
232 static void
233 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
234 bool callback_required)
236 struct dma_async_tx_descriptor *txd = &desc->txd;
237 struct dw_desc *child;
238 unsigned long flags;
239 struct dmaengine_desc_callback cb;
241 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
243 spin_lock_irqsave(&dwc->lock, flags);
244 dma_cookie_complete(txd);
245 if (callback_required)
246 dmaengine_desc_get_callback(txd, &cb);
247 else
248 memset(&cb, 0, sizeof(cb));
250 /* async_tx_ack */
251 list_for_each_entry(child, &desc->tx_list, desc_node)
252 async_tx_ack(&child->txd);
253 async_tx_ack(&desc->txd);
254 dwc_desc_put(dwc, desc);
255 spin_unlock_irqrestore(&dwc->lock, flags);
257 dmaengine_desc_callback_invoke(&cb, NULL);
260 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
262 struct dw_desc *desc, *_desc;
263 LIST_HEAD(list);
264 unsigned long flags;
266 spin_lock_irqsave(&dwc->lock, flags);
267 if (dma_readl(dw, CH_EN) & dwc->mask) {
268 dev_err(chan2dev(&dwc->chan),
269 "BUG: XFER bit set, but channel not idle!\n");
271 /* Try to continue after resetting the channel... */
272 dwc_chan_disable(dw, dwc);
276 * Submit queued descriptors ASAP, i.e. before we go through
277 * the completed ones.
279 list_splice_init(&dwc->active_list, &list);
280 dwc_dostart_first_queued(dwc);
282 spin_unlock_irqrestore(&dwc->lock, flags);
284 list_for_each_entry_safe(desc, _desc, &list, desc_node)
285 dwc_descriptor_complete(dwc, desc, true);
288 /* Returns how many bytes were already received from source */
289 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
291 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
292 u32 ctlhi = channel_readl(dwc, CTL_HI);
293 u32 ctllo = channel_readl(dwc, CTL_LO);
295 return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
298 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
300 dma_addr_t llp;
301 struct dw_desc *desc, *_desc;
302 struct dw_desc *child;
303 u32 status_xfer;
304 unsigned long flags;
306 spin_lock_irqsave(&dwc->lock, flags);
307 llp = channel_readl(dwc, LLP);
308 status_xfer = dma_readl(dw, RAW.XFER);
310 if (status_xfer & dwc->mask) {
311 /* Everything we've submitted is done */
312 dma_writel(dw, CLEAR.XFER, dwc->mask);
314 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
315 struct list_head *head, *active = dwc->tx_node_active;
318 * We are inside first active descriptor.
319 * Otherwise something is really wrong.
321 desc = dwc_first_active(dwc);
323 head = &desc->tx_list;
324 if (active != head) {
325 /* Update residue to reflect last sent descriptor */
326 if (active == head->next)
327 desc->residue -= desc->len;
328 else
329 desc->residue -= to_dw_desc(active->prev)->len;
331 child = to_dw_desc(active);
333 /* Submit next block */
334 dwc_do_single_block(dwc, child);
336 spin_unlock_irqrestore(&dwc->lock, flags);
337 return;
340 /* We are done here */
341 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
344 spin_unlock_irqrestore(&dwc->lock, flags);
346 dwc_complete_all(dw, dwc);
347 return;
350 if (list_empty(&dwc->active_list)) {
351 spin_unlock_irqrestore(&dwc->lock, flags);
352 return;
355 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
356 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
357 spin_unlock_irqrestore(&dwc->lock, flags);
358 return;
361 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
363 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
364 /* Initial residue value */
365 desc->residue = desc->total_len;
367 /* Check first descriptors addr */
368 if (desc->txd.phys == DWC_LLP_LOC(llp)) {
369 spin_unlock_irqrestore(&dwc->lock, flags);
370 return;
373 /* Check first descriptors llp */
374 if (lli_read(desc, llp) == llp) {
375 /* This one is currently in progress */
376 desc->residue -= dwc_get_sent(dwc);
377 spin_unlock_irqrestore(&dwc->lock, flags);
378 return;
381 desc->residue -= desc->len;
382 list_for_each_entry(child, &desc->tx_list, desc_node) {
383 if (lli_read(child, llp) == llp) {
384 /* Currently in progress */
385 desc->residue -= dwc_get_sent(dwc);
386 spin_unlock_irqrestore(&dwc->lock, flags);
387 return;
389 desc->residue -= child->len;
393 * No descriptors so far seem to be in progress, i.e.
394 * this one must be done.
396 spin_unlock_irqrestore(&dwc->lock, flags);
397 dwc_descriptor_complete(dwc, desc, true);
398 spin_lock_irqsave(&dwc->lock, flags);
401 dev_err(chan2dev(&dwc->chan),
402 "BUG: All descriptors done, but channel not idle!\n");
404 /* Try to continue after resetting the channel... */
405 dwc_chan_disable(dw, dwc);
407 dwc_dostart_first_queued(dwc);
408 spin_unlock_irqrestore(&dwc->lock, flags);
411 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
413 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
414 lli_read(desc, sar),
415 lli_read(desc, dar),
416 lli_read(desc, llp),
417 lli_read(desc, ctlhi),
418 lli_read(desc, ctllo));
421 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
423 struct dw_desc *bad_desc;
424 struct dw_desc *child;
425 unsigned long flags;
427 dwc_scan_descriptors(dw, dwc);
429 spin_lock_irqsave(&dwc->lock, flags);
432 * The descriptor currently at the head of the active list is
433 * borked. Since we don't have any way to report errors, we'll
434 * just have to scream loudly and try to carry on.
436 bad_desc = dwc_first_active(dwc);
437 list_del_init(&bad_desc->desc_node);
438 list_move(dwc->queue.next, dwc->active_list.prev);
440 /* Clear the error flag and try to restart the controller */
441 dma_writel(dw, CLEAR.ERROR, dwc->mask);
442 if (!list_empty(&dwc->active_list))
443 dwc_dostart(dwc, dwc_first_active(dwc));
446 * WARN may seem harsh, but since this only happens
447 * when someone submits a bad physical address in a
448 * descriptor, we should consider ourselves lucky that the
449 * controller flagged an error instead of scribbling over
450 * random memory locations.
452 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
453 " cookie: %d\n", bad_desc->txd.cookie);
454 dwc_dump_lli(dwc, bad_desc);
455 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
456 dwc_dump_lli(dwc, child);
458 spin_unlock_irqrestore(&dwc->lock, flags);
460 /* Pretend the descriptor completed successfully */
461 dwc_descriptor_complete(dwc, bad_desc, true);
464 static void dw_dma_tasklet(struct tasklet_struct *t)
466 struct dw_dma *dw = from_tasklet(dw, t, tasklet);
467 struct dw_dma_chan *dwc;
468 u32 status_xfer;
469 u32 status_err;
470 unsigned int i;
472 status_xfer = dma_readl(dw, RAW.XFER);
473 status_err = dma_readl(dw, RAW.ERROR);
475 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
477 for (i = 0; i < dw->dma.chancnt; i++) {
478 dwc = &dw->chan[i];
479 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
480 dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
481 else if (status_err & (1 << i))
482 dwc_handle_error(dw, dwc);
483 else if (status_xfer & (1 << i))
484 dwc_scan_descriptors(dw, dwc);
487 /* Re-enable interrupts */
488 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
489 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
492 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
494 struct dw_dma *dw = dev_id;
495 u32 status;
497 /* Check if we have any interrupt from the DMAC which is not in use */
498 if (!dw->in_use)
499 return IRQ_NONE;
501 status = dma_readl(dw, STATUS_INT);
502 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
504 /* Check if we have any interrupt from the DMAC */
505 if (!status)
506 return IRQ_NONE;
509 * Just disable the interrupts. We'll turn them back on in the
510 * softirq handler.
512 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
513 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
514 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
516 status = dma_readl(dw, STATUS_INT);
517 if (status) {
518 dev_err(dw->dma.dev,
519 "BUG: Unexpected interrupts pending: 0x%x\n",
520 status);
522 /* Try to recover */
523 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
524 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
525 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
526 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
527 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
530 tasklet_schedule(&dw->tasklet);
532 return IRQ_HANDLED;
535 /*----------------------------------------------------------------------*/
537 static struct dma_async_tx_descriptor *
538 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
539 size_t len, unsigned long flags)
541 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
542 struct dw_dma *dw = to_dw_dma(chan->device);
543 struct dw_desc *desc;
544 struct dw_desc *first;
545 struct dw_desc *prev;
546 size_t xfer_count;
547 size_t offset;
548 u8 m_master = dwc->dws.m_master;
549 unsigned int src_width;
550 unsigned int dst_width;
551 unsigned int data_width = dw->pdata->data_width[m_master];
552 u32 ctllo, ctlhi;
553 u8 lms = DWC_LLP_LMS(m_master);
555 dev_vdbg(chan2dev(chan),
556 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
557 &dest, &src, len, flags);
559 if (unlikely(!len)) {
560 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
561 return NULL;
564 dwc->direction = DMA_MEM_TO_MEM;
566 src_width = dst_width = __ffs(data_width | src | dest | len);
568 ctllo = dw->prepare_ctllo(dwc)
569 | DWC_CTLL_DST_WIDTH(dst_width)
570 | DWC_CTLL_SRC_WIDTH(src_width)
571 | DWC_CTLL_DST_INC
572 | DWC_CTLL_SRC_INC
573 | DWC_CTLL_FC_M2M;
574 prev = first = NULL;
576 for (offset = 0; offset < len; offset += xfer_count) {
577 desc = dwc_desc_get(dwc);
578 if (!desc)
579 goto err_desc_get;
581 ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count);
583 lli_write(desc, sar, src + offset);
584 lli_write(desc, dar, dest + offset);
585 lli_write(desc, ctllo, ctllo);
586 lli_write(desc, ctlhi, ctlhi);
587 desc->len = xfer_count;
589 if (!first) {
590 first = desc;
591 } else {
592 lli_write(prev, llp, desc->txd.phys | lms);
593 list_add_tail(&desc->desc_node, &first->tx_list);
595 prev = desc;
598 if (flags & DMA_PREP_INTERRUPT)
599 /* Trigger interrupt after last block */
600 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
602 prev->lli.llp = 0;
603 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
604 first->txd.flags = flags;
605 first->total_len = len;
607 return &first->txd;
609 err_desc_get:
610 dwc_desc_put(dwc, first);
611 return NULL;
614 static struct dma_async_tx_descriptor *
615 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
616 unsigned int sg_len, enum dma_transfer_direction direction,
617 unsigned long flags, void *context)
619 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
620 struct dw_dma *dw = to_dw_dma(chan->device);
621 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
622 struct dw_desc *prev;
623 struct dw_desc *first;
624 u32 ctllo, ctlhi;
625 u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
626 dma_addr_t reg;
627 unsigned int reg_width;
628 unsigned int mem_width;
629 unsigned int i;
630 struct scatterlist *sg;
631 size_t total_len = 0;
633 dev_vdbg(chan2dev(chan), "%s\n", __func__);
635 if (unlikely(!is_slave_direction(direction) || !sg_len))
636 return NULL;
638 dwc->direction = direction;
640 prev = first = NULL;
642 switch (direction) {
643 case DMA_MEM_TO_DEV:
644 reg_width = __ffs(sconfig->dst_addr_width);
645 reg = sconfig->dst_addr;
646 ctllo = dw->prepare_ctllo(dwc)
647 | DWC_CTLL_DST_WIDTH(reg_width)
648 | DWC_CTLL_DST_FIX
649 | DWC_CTLL_SRC_INC;
651 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
652 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
654 for_each_sg(sgl, sg, sg_len, i) {
655 struct dw_desc *desc;
656 u32 len, mem;
657 size_t dlen;
659 mem = sg_dma_address(sg);
660 len = sg_dma_len(sg);
662 mem_width = __ffs(sconfig->src_addr_width | mem | len);
664 slave_sg_todev_fill_desc:
665 desc = dwc_desc_get(dwc);
666 if (!desc)
667 goto err_desc_get;
669 ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen);
671 lli_write(desc, sar, mem);
672 lli_write(desc, dar, reg);
673 lli_write(desc, ctlhi, ctlhi);
674 lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
675 desc->len = dlen;
677 if (!first) {
678 first = desc;
679 } else {
680 lli_write(prev, llp, desc->txd.phys | lms);
681 list_add_tail(&desc->desc_node, &first->tx_list);
683 prev = desc;
685 mem += dlen;
686 len -= dlen;
687 total_len += dlen;
689 if (len)
690 goto slave_sg_todev_fill_desc;
692 break;
693 case DMA_DEV_TO_MEM:
694 reg_width = __ffs(sconfig->src_addr_width);
695 reg = sconfig->src_addr;
696 ctllo = dw->prepare_ctllo(dwc)
697 | DWC_CTLL_SRC_WIDTH(reg_width)
698 | DWC_CTLL_DST_INC
699 | DWC_CTLL_SRC_FIX;
701 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
702 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
704 for_each_sg(sgl, sg, sg_len, i) {
705 struct dw_desc *desc;
706 u32 len, mem;
707 size_t dlen;
709 mem = sg_dma_address(sg);
710 len = sg_dma_len(sg);
712 slave_sg_fromdev_fill_desc:
713 desc = dwc_desc_get(dwc);
714 if (!desc)
715 goto err_desc_get;
717 ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen);
719 lli_write(desc, sar, reg);
720 lli_write(desc, dar, mem);
721 lli_write(desc, ctlhi, ctlhi);
722 mem_width = __ffs(sconfig->dst_addr_width | mem);
723 lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
724 desc->len = dlen;
726 if (!first) {
727 first = desc;
728 } else {
729 lli_write(prev, llp, desc->txd.phys | lms);
730 list_add_tail(&desc->desc_node, &first->tx_list);
732 prev = desc;
734 mem += dlen;
735 len -= dlen;
736 total_len += dlen;
738 if (len)
739 goto slave_sg_fromdev_fill_desc;
741 break;
742 default:
743 return NULL;
746 if (flags & DMA_PREP_INTERRUPT)
747 /* Trigger interrupt after last block */
748 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
750 prev->lli.llp = 0;
751 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
752 first->total_len = total_len;
754 return &first->txd;
756 err_desc_get:
757 dev_err(chan2dev(chan),
758 "not enough descriptors available. Direction %d\n", direction);
759 dwc_desc_put(dwc, first);
760 return NULL;
763 bool dw_dma_filter(struct dma_chan *chan, void *param)
765 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
766 struct dw_dma_slave *dws = param;
768 if (dws->dma_dev != chan->device->dev)
769 return false;
771 /* permit channels in accordance with the channels mask */
772 if (dws->channels && !(dws->channels & dwc->mask))
773 return false;
775 /* We have to copy data since dws can be temporary storage */
776 memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
778 return true;
780 EXPORT_SYMBOL_GPL(dw_dma_filter);
782 static int dwc_verify_maxburst(struct dma_chan *chan)
784 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
786 dwc->dma_sconfig.src_maxburst =
787 clamp(dwc->dma_sconfig.src_maxburst, 1U, dwc->max_burst);
788 dwc->dma_sconfig.dst_maxburst =
789 clamp(dwc->dma_sconfig.dst_maxburst, 1U, dwc->max_burst);
791 dwc->dma_sconfig.src_maxburst =
792 rounddown_pow_of_two(dwc->dma_sconfig.src_maxburst);
793 dwc->dma_sconfig.dst_maxburst =
794 rounddown_pow_of_two(dwc->dma_sconfig.dst_maxburst);
796 return 0;
799 static int dwc_verify_p_buswidth(struct dma_chan *chan)
801 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
802 struct dw_dma *dw = to_dw_dma(chan->device);
803 u32 reg_width, max_width;
805 if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
806 reg_width = dwc->dma_sconfig.dst_addr_width;
807 else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
808 reg_width = dwc->dma_sconfig.src_addr_width;
809 else /* DMA_MEM_TO_MEM */
810 return 0;
812 max_width = dw->pdata->data_width[dwc->dws.p_master];
814 /* Fall-back to 1-byte transfer width if undefined */
815 if (reg_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
816 reg_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
817 else if (!is_power_of_2(reg_width) || reg_width > max_width)
818 return -EINVAL;
819 else /* bus width is valid */
820 return 0;
822 /* Update undefined addr width value */
823 if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
824 dwc->dma_sconfig.dst_addr_width = reg_width;
825 else /* DMA_DEV_TO_MEM */
826 dwc->dma_sconfig.src_addr_width = reg_width;
828 return 0;
831 static int dwc_verify_m_buswidth(struct dma_chan *chan)
833 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
834 struct dw_dma *dw = to_dw_dma(chan->device);
835 u32 reg_width, reg_burst, mem_width;
837 mem_width = dw->pdata->data_width[dwc->dws.m_master];
840 * It's possible to have a data portion locked in the DMA FIFO in case
841 * of the channel suspension. Subsequent channel disabling will cause
842 * that data silent loss. In order to prevent that maintain the src and
843 * dst transfer widths coherency by means of the relation:
844 * (CTLx.SRC_TR_WIDTH * CTLx.SRC_MSIZE >= CTLx.DST_TR_WIDTH)
845 * Look for the details in the commit message that brings this change.
847 * Note the DMA configs utilized in the calculations below must have
848 * been verified to have correct values by this method call.
850 if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) {
851 reg_width = dwc->dma_sconfig.dst_addr_width;
852 if (mem_width < reg_width)
853 return -EINVAL;
855 dwc->dma_sconfig.src_addr_width = mem_width;
856 } else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) {
857 reg_width = dwc->dma_sconfig.src_addr_width;
858 reg_burst = dwc->dma_sconfig.src_maxburst;
860 dwc->dma_sconfig.dst_addr_width = min(mem_width, reg_width * reg_burst);
863 return 0;
866 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
868 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
869 int ret;
871 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
873 ret = dwc_verify_maxburst(chan);
874 if (ret)
875 return ret;
877 ret = dwc_verify_p_buswidth(chan);
878 if (ret)
879 return ret;
881 ret = dwc_verify_m_buswidth(chan);
882 if (ret)
883 return ret;
885 return 0;
888 static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
890 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
891 unsigned int count = 20; /* timeout iterations */
893 dw->suspend_chan(dwc, drain);
895 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
896 udelay(2);
898 set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
901 static int dwc_pause(struct dma_chan *chan)
903 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
904 unsigned long flags;
906 spin_lock_irqsave(&dwc->lock, flags);
907 dwc_chan_pause(dwc, false);
908 spin_unlock_irqrestore(&dwc->lock, flags);
910 return 0;
913 static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain)
915 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
917 dw->resume_chan(dwc, drain);
919 clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
922 static int dwc_resume(struct dma_chan *chan)
924 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
925 unsigned long flags;
927 spin_lock_irqsave(&dwc->lock, flags);
929 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
930 dwc_chan_resume(dwc, false);
932 spin_unlock_irqrestore(&dwc->lock, flags);
934 return 0;
937 static int dwc_terminate_all(struct dma_chan *chan)
939 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
940 struct dw_dma *dw = to_dw_dma(chan->device);
941 struct dw_desc *desc, *_desc;
942 unsigned long flags;
943 LIST_HEAD(list);
945 spin_lock_irqsave(&dwc->lock, flags);
947 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
949 dwc_chan_pause(dwc, true);
951 dwc_chan_disable(dw, dwc);
953 dwc_chan_resume(dwc, true);
955 /* active_list entries will end up before queued entries */
956 list_splice_init(&dwc->queue, &list);
957 list_splice_init(&dwc->active_list, &list);
959 spin_unlock_irqrestore(&dwc->lock, flags);
961 /* Flush all pending and queued descriptors */
962 list_for_each_entry_safe(desc, _desc, &list, desc_node)
963 dwc_descriptor_complete(dwc, desc, false);
965 return 0;
968 static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
970 struct dw_desc *desc;
972 list_for_each_entry(desc, &dwc->active_list, desc_node)
973 if (desc->txd.cookie == c)
974 return desc;
976 return NULL;
979 static u32 dwc_get_residue_and_status(struct dw_dma_chan *dwc, dma_cookie_t cookie,
980 enum dma_status *status)
982 struct dw_desc *desc;
983 unsigned long flags;
984 u32 residue;
986 spin_lock_irqsave(&dwc->lock, flags);
988 desc = dwc_find_desc(dwc, cookie);
989 if (desc) {
990 if (desc == dwc_first_active(dwc)) {
991 residue = desc->residue;
992 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
993 residue -= dwc_get_sent(dwc);
994 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
995 *status = DMA_PAUSED;
996 } else {
997 residue = desc->total_len;
999 } else {
1000 residue = 0;
1003 spin_unlock_irqrestore(&dwc->lock, flags);
1004 return residue;
1007 static enum dma_status
1008 dwc_tx_status(struct dma_chan *chan,
1009 dma_cookie_t cookie,
1010 struct dma_tx_state *txstate)
1012 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1013 enum dma_status ret;
1015 ret = dma_cookie_status(chan, cookie, txstate);
1016 if (ret == DMA_COMPLETE)
1017 return ret;
1019 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1021 ret = dma_cookie_status(chan, cookie, txstate);
1022 if (ret == DMA_COMPLETE)
1023 return ret;
1025 dma_set_residue(txstate, dwc_get_residue_and_status(dwc, cookie, &ret));
1026 return ret;
1029 static void dwc_issue_pending(struct dma_chan *chan)
1031 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1032 unsigned long flags;
1034 spin_lock_irqsave(&dwc->lock, flags);
1035 if (list_empty(&dwc->active_list))
1036 dwc_dostart_first_queued(dwc);
1037 spin_unlock_irqrestore(&dwc->lock, flags);
1040 /*----------------------------------------------------------------------*/
1042 void do_dw_dma_off(struct dw_dma *dw)
1044 dma_writel(dw, CFG, 0);
1046 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1047 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1048 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1049 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1050 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1052 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1053 cpu_relax();
1056 void do_dw_dma_on(struct dw_dma *dw)
1058 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1061 static int dwc_alloc_chan_resources(struct dma_chan *chan)
1063 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1064 struct dw_dma *dw = to_dw_dma(chan->device);
1066 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1068 /* ASSERT: channel is idle */
1069 if (dma_readl(dw, CH_EN) & dwc->mask) {
1070 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1071 return -EIO;
1074 dma_cookie_init(chan);
1077 * NOTE: some controllers may have additional features that we
1078 * need to initialize here, like "scatter-gather" (which
1079 * doesn't mean what you think it means), and status writeback.
1083 * We need controller-specific data to set up slave transfers.
1085 if (chan->private && !dw_dma_filter(chan, chan->private)) {
1086 dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1087 return -EINVAL;
1090 /* Enable controller here if needed */
1091 if (!dw->in_use)
1092 do_dw_dma_on(dw);
1093 dw->in_use |= dwc->mask;
1095 return 0;
1098 static void dwc_free_chan_resources(struct dma_chan *chan)
1100 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1101 struct dw_dma *dw = to_dw_dma(chan->device);
1102 unsigned long flags;
1104 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1105 dwc->descs_allocated);
1107 /* ASSERT: channel is idle */
1108 BUG_ON(!list_empty(&dwc->active_list));
1109 BUG_ON(!list_empty(&dwc->queue));
1110 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1112 spin_lock_irqsave(&dwc->lock, flags);
1114 /* Clear custom channel configuration */
1115 memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
1117 /* Disable interrupts */
1118 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1119 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1120 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1122 spin_unlock_irqrestore(&dwc->lock, flags);
1124 /* Disable controller in case it was a last user */
1125 dw->in_use &= ~dwc->mask;
1126 if (!dw->in_use)
1127 do_dw_dma_off(dw);
1129 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1132 static void dwc_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
1134 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1136 caps->max_burst = dwc->max_burst;
1139 * It might be crucial for some devices to have the hardware
1140 * accelerated multi-block transfers supported, aka LLPs in DW DMAC
1141 * notation. So if LLPs are supported then max_sg_burst is set to
1142 * zero which means unlimited number of SG entries can be handled in a
1143 * single DMA transaction, otherwise it's just one SG entry.
1145 if (dwc->nollp)
1146 caps->max_sg_burst = 1;
1147 else
1148 caps->max_sg_burst = 0;
1151 int do_dma_probe(struct dw_dma_chip *chip)
1153 struct dw_dma *dw = chip->dw;
1154 struct dw_dma_platform_data *pdata;
1155 bool autocfg = false;
1156 unsigned int dw_params;
1157 unsigned int i;
1158 int ret;
1160 dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1161 if (!dw->pdata)
1162 return -ENOMEM;
1164 dw->regs = chip->regs;
1166 pm_runtime_get_sync(chip->dev);
1168 if (!chip->pdata) {
1169 dw_params = dma_readl(dw, DW_PARAMS);
1170 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1172 autocfg = dw_params >> DW_PARAMS_EN & 1;
1173 if (!autocfg) {
1174 ret = -EINVAL;
1175 goto err_pdata;
1178 /* Reassign the platform data pointer */
1179 pdata = dw->pdata;
1181 /* Get hardware configuration parameters */
1182 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1183 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1184 for (i = 0; i < pdata->nr_masters; i++) {
1185 pdata->data_width[i] =
1186 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
1188 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
1190 /* Fill platform data with the default values */
1191 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1192 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1193 } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
1194 ret = -EINVAL;
1195 goto err_pdata;
1196 } else {
1197 memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
1199 /* Reassign the platform data pointer */
1200 pdata = dw->pdata;
1203 dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
1204 GFP_KERNEL);
1205 if (!dw->chan) {
1206 ret = -ENOMEM;
1207 goto err_pdata;
1210 /* Calculate all channel mask before DMA setup */
1211 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1213 /* Force dma off, just in case */
1214 dw->disable(dw);
1216 /* Device and instance ID for IRQ and DMA pool */
1217 dw->set_device_name(dw, chip->id);
1219 /* Create a pool of consistent memory blocks for hardware descriptors */
1220 dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
1221 sizeof(struct dw_desc), 4, 0);
1222 if (!dw->desc_pool) {
1223 dev_err(chip->dev, "No memory for descriptors dma pool\n");
1224 ret = -ENOMEM;
1225 goto err_pdata;
1228 tasklet_setup(&dw->tasklet, dw_dma_tasklet);
1230 ret = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1231 dw->name, dw);
1232 if (ret)
1233 goto err_pdata;
1235 INIT_LIST_HEAD(&dw->dma.channels);
1236 for (i = 0; i < pdata->nr_channels; i++) {
1237 struct dw_dma_chan *dwc = &dw->chan[i];
1239 dwc->chan.device = &dw->dma;
1240 dma_cookie_init(&dwc->chan);
1241 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1242 list_add_tail(&dwc->chan.device_node,
1243 &dw->dma.channels);
1244 else
1245 list_add(&dwc->chan.device_node, &dw->dma.channels);
1247 /* 7 is highest priority & 0 is lowest. */
1248 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1249 dwc->priority = pdata->nr_channels - i - 1;
1250 else
1251 dwc->priority = i;
1253 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1254 spin_lock_init(&dwc->lock);
1255 dwc->mask = 1 << i;
1257 INIT_LIST_HEAD(&dwc->active_list);
1258 INIT_LIST_HEAD(&dwc->queue);
1260 channel_clear_bit(dw, CH_EN, dwc->mask);
1262 dwc->direction = DMA_TRANS_NONE;
1264 /* Hardware configuration */
1265 if (autocfg) {
1266 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1267 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1268 unsigned int dwc_params = readl(addr);
1270 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1271 dwc_params);
1274 * Decode maximum block size for given channel. The
1275 * stored 4 bit value represents blocks from 0x00 for 3
1276 * up to 0x0a for 4095.
1278 dwc->block_size =
1279 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
1282 * According to the DW DMA databook the true scatter-
1283 * gether LLPs aren't available if either multi-block
1284 * config is disabled (CHx_MULTI_BLK_EN == 0) or the
1285 * LLP register is hard-coded to zeros
1286 * (CHx_HC_LLP == 1).
1288 dwc->nollp =
1289 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0 ||
1290 (dwc_params >> DWC_PARAMS_HC_LLP & 0x1) == 1;
1291 dwc->max_burst =
1292 (0x4 << (dwc_params >> DWC_PARAMS_MSIZE & 0x7));
1293 } else {
1294 dwc->block_size = pdata->block_size;
1295 dwc->nollp = !pdata->multi_block[i];
1296 dwc->max_burst = pdata->max_burst[i] ?: DW_DMA_MAX_BURST;
1300 /* Clear all interrupts on all channels. */
1301 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1302 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1303 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1304 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1305 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1307 /* Set capabilities */
1308 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1309 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1310 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1312 dw->dma.dev = chip->dev;
1313 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1314 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1316 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1317 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1319 dw->dma.device_caps = dwc_caps;
1320 dw->dma.device_config = dwc_config;
1321 dw->dma.device_pause = dwc_pause;
1322 dw->dma.device_resume = dwc_resume;
1323 dw->dma.device_terminate_all = dwc_terminate_all;
1325 dw->dma.device_tx_status = dwc_tx_status;
1326 dw->dma.device_issue_pending = dwc_issue_pending;
1328 /* DMA capabilities */
1329 dw->dma.min_burst = DW_DMA_MIN_BURST;
1330 dw->dma.max_burst = DW_DMA_MAX_BURST;
1331 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1332 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1333 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1334 BIT(DMA_MEM_TO_MEM);
1335 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1338 * For now there is no hardware with non uniform maximum block size
1339 * across all of the device channels, so we set the maximum segment
1340 * size as the block size found for the very first channel.
1342 dma_set_max_seg_size(dw->dma.dev, dw->chan[0].block_size);
1344 ret = dma_async_device_register(&dw->dma);
1345 if (ret)
1346 goto err_dma_register;
1348 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1349 pdata->nr_channels);
1351 pm_runtime_put_sync_suspend(chip->dev);
1353 return 0;
1355 err_dma_register:
1356 free_irq(chip->irq, dw);
1357 err_pdata:
1358 pm_runtime_put_sync_suspend(chip->dev);
1359 return ret;
1362 int do_dma_remove(struct dw_dma_chip *chip)
1364 struct dw_dma *dw = chip->dw;
1365 struct dw_dma_chan *dwc, *_dwc;
1367 pm_runtime_get_sync(chip->dev);
1369 do_dw_dma_off(dw);
1370 dma_async_device_unregister(&dw->dma);
1372 free_irq(chip->irq, dw);
1373 tasklet_kill(&dw->tasklet);
1375 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1376 chan.device_node) {
1377 list_del(&dwc->chan.device_node);
1378 channel_clear_bit(dw, CH_EN, dwc->mask);
1381 pm_runtime_put_sync_suspend(chip->dev);
1382 return 0;
1385 int do_dw_dma_disable(struct dw_dma_chip *chip)
1387 struct dw_dma *dw = chip->dw;
1389 dw->disable(dw);
1390 return 0;
1392 EXPORT_SYMBOL_GPL(do_dw_dma_disable);
1394 int do_dw_dma_enable(struct dw_dma_chip *chip)
1396 struct dw_dma *dw = chip->dw;
1398 dw->enable(dw);
1399 return 0;
1401 EXPORT_SYMBOL_GPL(do_dw_dma_enable);
1403 MODULE_LICENSE("GPL v2");
1404 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1405 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1406 MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");