2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/bitops.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
26 #include "dw_dmac_regs.h"
27 #include "dmaengine.h"
30 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
31 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
32 * of which use ARM any more). See the "Databook" from Synopsys for
33 * information beyond what licensees probably provide.
35 * The driver has currently been tested only with the Atmel AT32AP7000,
36 * which does not support descriptor writeback.
39 #define DWC_DEFAULT_CTLLO(_chan) ({ \
40 struct dw_dma_slave *__slave = (_chan->private); \
41 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43 int _dms = __slave ? __slave->dst_master : 0; \
44 int _sms = __slave ? __slave->src_master : 1; \
45 u8 _smsize = __slave ? _sconfig->src_maxburst : \
47 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
50 (DWC_CTLL_DST_MSIZE(_dmsize) \
51 | DWC_CTLL_SRC_MSIZE(_smsize) \
54 | DWC_CTLL_DMS(_dms) \
55 | DWC_CTLL_SMS(_sms)); \
59 * This is configuration-dependent and usually a funny size like 4095.
61 * Note that this is a transfer count, i.e. if we transfer 32-bit
62 * words, we can do 16380 bytes per descriptor.
64 * This parameter is also system-specific.
66 #define DWC_MAX_COUNT 4095U
69 * Number of descriptors to allocate for each channel. This should be
70 * made configurable somehow; preferably, the clients (at least the
71 * ones using slave transfers) should be able to give us a hint.
73 #define NR_DESCS_PER_CHANNEL 64
75 /*----------------------------------------------------------------------*/
78 * Because we're not relying on writeback from the controller (it may not
79 * even be configured into the core!) we don't need to use dma_pool. These
80 * descriptors -- and associated data -- are cacheable. We do need to make
81 * sure their dcache entries are written back before handing them off to
82 * the controller, though.
85 static struct device
*chan2dev(struct dma_chan
*chan
)
87 return &chan
->dev
->device
;
89 static struct device
*chan2parent(struct dma_chan
*chan
)
91 return chan
->dev
->device
.parent
;
94 static struct dw_desc
*dwc_first_active(struct dw_dma_chan
*dwc
)
96 return list_entry(dwc
->active_list
.next
, struct dw_desc
, desc_node
);
99 static struct dw_desc
*dwc_desc_get(struct dw_dma_chan
*dwc
)
101 struct dw_desc
*desc
, *_desc
;
102 struct dw_desc
*ret
= NULL
;
106 spin_lock_irqsave(&dwc
->lock
, flags
);
107 list_for_each_entry_safe(desc
, _desc
, &dwc
->free_list
, desc_node
) {
108 if (async_tx_test_ack(&desc
->txd
)) {
109 list_del(&desc
->desc_node
);
113 dev_dbg(chan2dev(&dwc
->chan
), "desc %p not ACKed\n", desc
);
116 spin_unlock_irqrestore(&dwc
->lock
, flags
);
118 dev_vdbg(chan2dev(&dwc
->chan
), "scanned %u descriptors on freelist\n", i
);
123 static void dwc_sync_desc_for_cpu(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
125 struct dw_desc
*child
;
127 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
128 dma_sync_single_for_cpu(chan2parent(&dwc
->chan
),
129 child
->txd
.phys
, sizeof(child
->lli
),
131 dma_sync_single_for_cpu(chan2parent(&dwc
->chan
),
132 desc
->txd
.phys
, sizeof(desc
->lli
),
137 * Move a descriptor, including any children, to the free list.
138 * `desc' must not be on any lists.
140 static void dwc_desc_put(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
145 struct dw_desc
*child
;
147 dwc_sync_desc_for_cpu(dwc
, desc
);
149 spin_lock_irqsave(&dwc
->lock
, flags
);
150 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
151 dev_vdbg(chan2dev(&dwc
->chan
),
152 "moving child desc %p to freelist\n",
154 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
155 dev_vdbg(chan2dev(&dwc
->chan
), "moving desc %p to freelist\n", desc
);
156 list_add(&desc
->desc_node
, &dwc
->free_list
);
157 spin_unlock_irqrestore(&dwc
->lock
, flags
);
161 static void dwc_initialize(struct dw_dma_chan
*dwc
)
163 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
164 struct dw_dma_slave
*dws
= dwc
->chan
.private;
165 u32 cfghi
= DWC_CFGH_FIFO_MODE
;
166 u32 cfglo
= DWC_CFGL_CH_PRIOR(dwc
->priority
);
168 if (dwc
->initialized
== true)
173 * We need controller-specific data to set up slave
176 BUG_ON(!dws
->dma_dev
|| dws
->dma_dev
!= dw
->dma
.dev
);
179 cfglo
|= dws
->cfg_lo
& ~DWC_CFGL_CH_PRIOR_MASK
;
182 channel_writel(dwc
, CFG_LO
, cfglo
);
183 channel_writel(dwc
, CFG_HI
, cfghi
);
185 /* Enable interrupts */
186 channel_set_bit(dw
, MASK
.XFER
, dwc
->mask
);
187 channel_set_bit(dw
, MASK
.ERROR
, dwc
->mask
);
189 dwc
->initialized
= true;
192 /*----------------------------------------------------------------------*/
194 /* Called with dwc->lock held and bh disabled */
195 static void dwc_dostart(struct dw_dma_chan
*dwc
, struct dw_desc
*first
)
197 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
199 /* ASSERT: channel is idle */
200 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
201 dev_err(chan2dev(&dwc
->chan
),
202 "BUG: Attempted to start non-idle channel\n");
203 dev_err(chan2dev(&dwc
->chan
),
204 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
205 channel_readl(dwc
, SAR
),
206 channel_readl(dwc
, DAR
),
207 channel_readl(dwc
, LLP
),
208 channel_readl(dwc
, CTL_HI
),
209 channel_readl(dwc
, CTL_LO
));
211 /* The tasklet will hopefully advance the queue... */
217 channel_writel(dwc
, LLP
, first
->txd
.phys
);
218 channel_writel(dwc
, CTL_LO
,
219 DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
220 channel_writel(dwc
, CTL_HI
, 0);
221 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
224 /*----------------------------------------------------------------------*/
227 dwc_descriptor_complete(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
,
228 bool callback_required
)
230 dma_async_tx_callback callback
= NULL
;
232 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
233 struct dw_desc
*child
;
236 dev_vdbg(chan2dev(&dwc
->chan
), "descriptor %u complete\n", txd
->cookie
);
238 spin_lock_irqsave(&dwc
->lock
, flags
);
239 dma_cookie_complete(txd
);
240 if (callback_required
) {
241 callback
= txd
->callback
;
242 param
= txd
->callback_param
;
245 dwc_sync_desc_for_cpu(dwc
, desc
);
248 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
249 async_tx_ack(&child
->txd
);
250 async_tx_ack(&desc
->txd
);
252 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
253 list_move(&desc
->desc_node
, &dwc
->free_list
);
255 if (!dwc
->chan
.private) {
256 struct device
*parent
= chan2parent(&dwc
->chan
);
257 if (!(txd
->flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
258 if (txd
->flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
259 dma_unmap_single(parent
, desc
->lli
.dar
,
260 desc
->len
, DMA_FROM_DEVICE
);
262 dma_unmap_page(parent
, desc
->lli
.dar
,
263 desc
->len
, DMA_FROM_DEVICE
);
265 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
266 if (txd
->flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
267 dma_unmap_single(parent
, desc
->lli
.sar
,
268 desc
->len
, DMA_TO_DEVICE
);
270 dma_unmap_page(parent
, desc
->lli
.sar
,
271 desc
->len
, DMA_TO_DEVICE
);
275 spin_unlock_irqrestore(&dwc
->lock
, flags
);
277 if (callback_required
&& callback
)
281 static void dwc_complete_all(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
283 struct dw_desc
*desc
, *_desc
;
287 spin_lock_irqsave(&dwc
->lock
, flags
);
288 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
289 dev_err(chan2dev(&dwc
->chan
),
290 "BUG: XFER bit set, but channel not idle!\n");
292 /* Try to continue after resetting the channel... */
293 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
294 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
299 * Submit queued descriptors ASAP, i.e. before we go through
300 * the completed ones.
302 list_splice_init(&dwc
->active_list
, &list
);
303 if (!list_empty(&dwc
->queue
)) {
304 list_move(dwc
->queue
.next
, &dwc
->active_list
);
305 dwc_dostart(dwc
, dwc_first_active(dwc
));
308 spin_unlock_irqrestore(&dwc
->lock
, flags
);
310 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
311 dwc_descriptor_complete(dwc
, desc
, true);
314 static void dwc_scan_descriptors(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
317 struct dw_desc
*desc
, *_desc
;
318 struct dw_desc
*child
;
322 spin_lock_irqsave(&dwc
->lock
, flags
);
323 llp
= channel_readl(dwc
, LLP
);
324 status_xfer
= dma_readl(dw
, RAW
.XFER
);
326 if (status_xfer
& dwc
->mask
) {
327 /* Everything we've submitted is done */
328 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
329 spin_unlock_irqrestore(&dwc
->lock
, flags
);
331 dwc_complete_all(dw
, dwc
);
335 if (list_empty(&dwc
->active_list
)) {
336 spin_unlock_irqrestore(&dwc
->lock
, flags
);
340 dev_vdbg(chan2dev(&dwc
->chan
), "scan_descriptors: llp=0x%x\n", llp
);
342 list_for_each_entry_safe(desc
, _desc
, &dwc
->active_list
, desc_node
) {
343 /* check first descriptors addr */
344 if (desc
->txd
.phys
== llp
) {
345 spin_unlock_irqrestore(&dwc
->lock
, flags
);
349 /* check first descriptors llp */
350 if (desc
->lli
.llp
== llp
) {
351 /* This one is currently in progress */
352 spin_unlock_irqrestore(&dwc
->lock
, flags
);
356 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
357 if (child
->lli
.llp
== llp
) {
358 /* Currently in progress */
359 spin_unlock_irqrestore(&dwc
->lock
, flags
);
364 * No descriptors so far seem to be in progress, i.e.
365 * this one must be done.
367 spin_unlock_irqrestore(&dwc
->lock
, flags
);
368 dwc_descriptor_complete(dwc
, desc
, true);
369 spin_lock_irqsave(&dwc
->lock
, flags
);
372 dev_err(chan2dev(&dwc
->chan
),
373 "BUG: All descriptors done, but channel not idle!\n");
375 /* Try to continue after resetting the channel... */
376 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
377 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
380 if (!list_empty(&dwc
->queue
)) {
381 list_move(dwc
->queue
.next
, &dwc
->active_list
);
382 dwc_dostart(dwc
, dwc_first_active(dwc
));
384 spin_unlock_irqrestore(&dwc
->lock
, flags
);
387 static void dwc_dump_lli(struct dw_dma_chan
*dwc
, struct dw_lli
*lli
)
389 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
390 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
391 lli
->sar
, lli
->dar
, lli
->llp
,
392 lli
->ctlhi
, lli
->ctllo
);
395 static void dwc_handle_error(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
397 struct dw_desc
*bad_desc
;
398 struct dw_desc
*child
;
401 dwc_scan_descriptors(dw
, dwc
);
403 spin_lock_irqsave(&dwc
->lock
, flags
);
406 * The descriptor currently at the head of the active list is
407 * borked. Since we don't have any way to report errors, we'll
408 * just have to scream loudly and try to carry on.
410 bad_desc
= dwc_first_active(dwc
);
411 list_del_init(&bad_desc
->desc_node
);
412 list_move(dwc
->queue
.next
, dwc
->active_list
.prev
);
414 /* Clear the error flag and try to restart the controller */
415 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
416 if (!list_empty(&dwc
->active_list
))
417 dwc_dostart(dwc
, dwc_first_active(dwc
));
420 * KERN_CRITICAL may seem harsh, but since this only happens
421 * when someone submits a bad physical address in a
422 * descriptor, we should consider ourselves lucky that the
423 * controller flagged an error instead of scribbling over
424 * random memory locations.
426 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
427 "Bad descriptor submitted for DMA!\n");
428 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
429 " cookie: %d\n", bad_desc
->txd
.cookie
);
430 dwc_dump_lli(dwc
, &bad_desc
->lli
);
431 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
432 dwc_dump_lli(dwc
, &child
->lli
);
434 spin_unlock_irqrestore(&dwc
->lock
, flags
);
436 /* Pretend the descriptor completed successfully */
437 dwc_descriptor_complete(dwc
, bad_desc
, true);
440 /* --------------------- Cyclic DMA API extensions -------------------- */
442 inline dma_addr_t
dw_dma_get_src_addr(struct dma_chan
*chan
)
444 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
445 return channel_readl(dwc
, SAR
);
447 EXPORT_SYMBOL(dw_dma_get_src_addr
);
449 inline dma_addr_t
dw_dma_get_dst_addr(struct dma_chan
*chan
)
451 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
452 return channel_readl(dwc
, DAR
);
454 EXPORT_SYMBOL(dw_dma_get_dst_addr
);
456 /* called with dwc->lock held and all DMAC interrupts disabled */
457 static void dwc_handle_cyclic(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
,
458 u32 status_err
, u32 status_xfer
)
463 void (*callback
)(void *param
);
464 void *callback_param
;
466 dev_vdbg(chan2dev(&dwc
->chan
), "new cyclic period llp 0x%08x\n",
467 channel_readl(dwc
, LLP
));
469 callback
= dwc
->cdesc
->period_callback
;
470 callback_param
= dwc
->cdesc
->period_callback_param
;
473 callback(callback_param
);
477 * Error and transfer complete are highly unlikely, and will most
478 * likely be due to a configuration error by the user.
480 if (unlikely(status_err
& dwc
->mask
) ||
481 unlikely(status_xfer
& dwc
->mask
)) {
484 dev_err(chan2dev(&dwc
->chan
), "cyclic DMA unexpected %s "
485 "interrupt, stopping DMA transfer\n",
486 status_xfer
? "xfer" : "error");
488 spin_lock_irqsave(&dwc
->lock
, flags
);
490 dev_err(chan2dev(&dwc
->chan
),
491 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
492 channel_readl(dwc
, SAR
),
493 channel_readl(dwc
, DAR
),
494 channel_readl(dwc
, LLP
),
495 channel_readl(dwc
, CTL_HI
),
496 channel_readl(dwc
, CTL_LO
));
498 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
499 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
502 /* make sure DMA does not restart by loading a new list */
503 channel_writel(dwc
, LLP
, 0);
504 channel_writel(dwc
, CTL_LO
, 0);
505 channel_writel(dwc
, CTL_HI
, 0);
507 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
508 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
510 for (i
= 0; i
< dwc
->cdesc
->periods
; i
++)
511 dwc_dump_lli(dwc
, &dwc
->cdesc
->desc
[i
]->lli
);
513 spin_unlock_irqrestore(&dwc
->lock
, flags
);
517 /* ------------------------------------------------------------------------- */
519 static void dw_dma_tasklet(unsigned long data
)
521 struct dw_dma
*dw
= (struct dw_dma
*)data
;
522 struct dw_dma_chan
*dwc
;
527 status_xfer
= dma_readl(dw
, RAW
.XFER
);
528 status_err
= dma_readl(dw
, RAW
.ERROR
);
530 dev_vdbg(dw
->dma
.dev
, "tasklet: status_err=%x\n", status_err
);
532 for (i
= 0; i
< dw
->dma
.chancnt
; i
++) {
534 if (test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
))
535 dwc_handle_cyclic(dw
, dwc
, status_err
, status_xfer
);
536 else if (status_err
& (1 << i
))
537 dwc_handle_error(dw
, dwc
);
538 else if (status_xfer
& (1 << i
))
539 dwc_scan_descriptors(dw
, dwc
);
543 * Re-enable interrupts.
545 channel_set_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
546 channel_set_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
549 static irqreturn_t
dw_dma_interrupt(int irq
, void *dev_id
)
551 struct dw_dma
*dw
= dev_id
;
554 dev_vdbg(dw
->dma
.dev
, "interrupt: status=0x%x\n",
555 dma_readl(dw
, STATUS_INT
));
558 * Just disable the interrupts. We'll turn them back on in the
561 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
562 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
564 status
= dma_readl(dw
, STATUS_INT
);
567 "BUG: Unexpected interrupts pending: 0x%x\n",
571 channel_clear_bit(dw
, MASK
.XFER
, (1 << 8) - 1);
572 channel_clear_bit(dw
, MASK
.SRC_TRAN
, (1 << 8) - 1);
573 channel_clear_bit(dw
, MASK
.DST_TRAN
, (1 << 8) - 1);
574 channel_clear_bit(dw
, MASK
.ERROR
, (1 << 8) - 1);
577 tasklet_schedule(&dw
->tasklet
);
582 /*----------------------------------------------------------------------*/
584 static dma_cookie_t
dwc_tx_submit(struct dma_async_tx_descriptor
*tx
)
586 struct dw_desc
*desc
= txd_to_dw_desc(tx
);
587 struct dw_dma_chan
*dwc
= to_dw_dma_chan(tx
->chan
);
591 spin_lock_irqsave(&dwc
->lock
, flags
);
592 cookie
= dma_cookie_assign(tx
);
595 * REVISIT: We should attempt to chain as many descriptors as
596 * possible, perhaps even appending to those already submitted
597 * for DMA. But this is hard to do in a race-free manner.
599 if (list_empty(&dwc
->active_list
)) {
600 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: started %u\n",
602 list_add_tail(&desc
->desc_node
, &dwc
->active_list
);
603 dwc_dostart(dwc
, dwc_first_active(dwc
));
605 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: queued %u\n",
608 list_add_tail(&desc
->desc_node
, &dwc
->queue
);
611 spin_unlock_irqrestore(&dwc
->lock
, flags
);
616 static struct dma_async_tx_descriptor
*
617 dwc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
618 size_t len
, unsigned long flags
)
620 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
621 struct dw_desc
*desc
;
622 struct dw_desc
*first
;
623 struct dw_desc
*prev
;
626 unsigned int src_width
;
627 unsigned int dst_width
;
630 dev_vdbg(chan2dev(chan
), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
631 dest
, src
, len
, flags
);
633 if (unlikely(!len
)) {
634 dev_dbg(chan2dev(chan
), "prep_dma_memcpy: length is zero!\n");
639 * We can be a lot more clever here, but this should take care
640 * of the most common optimization.
642 if (!((src
| dest
| len
) & 7))
643 src_width
= dst_width
= 3;
644 else if (!((src
| dest
| len
) & 3))
645 src_width
= dst_width
= 2;
646 else if (!((src
| dest
| len
) & 1))
647 src_width
= dst_width
= 1;
649 src_width
= dst_width
= 0;
651 ctllo
= DWC_DEFAULT_CTLLO(chan
)
652 | DWC_CTLL_DST_WIDTH(dst_width
)
653 | DWC_CTLL_SRC_WIDTH(src_width
)
659 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
660 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
663 desc
= dwc_desc_get(dwc
);
667 desc
->lli
.sar
= src
+ offset
;
668 desc
->lli
.dar
= dest
+ offset
;
669 desc
->lli
.ctllo
= ctllo
;
670 desc
->lli
.ctlhi
= xfer_count
;
675 prev
->lli
.llp
= desc
->txd
.phys
;
676 dma_sync_single_for_device(chan2parent(chan
),
677 prev
->txd
.phys
, sizeof(prev
->lli
),
679 list_add_tail(&desc
->desc_node
,
686 if (flags
& DMA_PREP_INTERRUPT
)
687 /* Trigger interrupt after last block */
688 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
691 dma_sync_single_for_device(chan2parent(chan
),
692 prev
->txd
.phys
, sizeof(prev
->lli
),
695 first
->txd
.flags
= flags
;
701 dwc_desc_put(dwc
, first
);
705 static struct dma_async_tx_descriptor
*
706 dwc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
707 unsigned int sg_len
, enum dma_transfer_direction direction
,
708 unsigned long flags
, void *context
)
710 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
711 struct dw_dma_slave
*dws
= chan
->private;
712 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
713 struct dw_desc
*prev
;
714 struct dw_desc
*first
;
717 unsigned int reg_width
;
718 unsigned int mem_width
;
720 struct scatterlist
*sg
;
721 size_t total_len
= 0;
723 dev_vdbg(chan2dev(chan
), "prep_dma_slave\n");
725 if (unlikely(!dws
|| !sg_len
))
732 reg_width
= __fls(sconfig
->dst_addr_width
);
733 reg
= sconfig
->dst_addr
;
734 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
735 | DWC_CTLL_DST_WIDTH(reg_width
)
739 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
740 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
742 for_each_sg(sgl
, sg
, sg_len
, i
) {
743 struct dw_desc
*desc
;
746 mem
= sg_dma_address(sg
);
747 len
= sg_dma_len(sg
);
749 if (!((mem
| len
) & 7))
751 else if (!((mem
| len
) & 3))
753 else if (!((mem
| len
) & 1))
758 slave_sg_todev_fill_desc
:
759 desc
= dwc_desc_get(dwc
);
761 dev_err(chan2dev(chan
),
762 "not enough descriptors available\n");
768 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_SRC_WIDTH(mem_width
);
769 if ((len
>> mem_width
) > DWC_MAX_COUNT
) {
770 dlen
= DWC_MAX_COUNT
<< mem_width
;
778 desc
->lli
.ctlhi
= dlen
>> mem_width
;
783 prev
->lli
.llp
= desc
->txd
.phys
;
784 dma_sync_single_for_device(chan2parent(chan
),
788 list_add_tail(&desc
->desc_node
,
795 goto slave_sg_todev_fill_desc
;
799 reg_width
= __fls(sconfig
->src_addr_width
);
800 reg
= sconfig
->src_addr
;
801 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
802 | DWC_CTLL_SRC_WIDTH(reg_width
)
806 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
807 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
809 for_each_sg(sgl
, sg
, sg_len
, i
) {
810 struct dw_desc
*desc
;
813 mem
= sg_dma_address(sg
);
814 len
= sg_dma_len(sg
);
816 if (!((mem
| len
) & 7))
818 else if (!((mem
| len
) & 3))
820 else if (!((mem
| len
) & 1))
825 slave_sg_fromdev_fill_desc
:
826 desc
= dwc_desc_get(dwc
);
828 dev_err(chan2dev(chan
),
829 "not enough descriptors available\n");
835 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_DST_WIDTH(mem_width
);
836 if ((len
>> reg_width
) > DWC_MAX_COUNT
) {
837 dlen
= DWC_MAX_COUNT
<< reg_width
;
844 desc
->lli
.ctlhi
= dlen
>> reg_width
;
849 prev
->lli
.llp
= desc
->txd
.phys
;
850 dma_sync_single_for_device(chan2parent(chan
),
854 list_add_tail(&desc
->desc_node
,
861 goto slave_sg_fromdev_fill_desc
;
868 if (flags
& DMA_PREP_INTERRUPT
)
869 /* Trigger interrupt after last block */
870 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
873 dma_sync_single_for_device(chan2parent(chan
),
874 prev
->txd
.phys
, sizeof(prev
->lli
),
877 first
->len
= total_len
;
882 dwc_desc_put(dwc
, first
);
887 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
888 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
890 * NOTE: burst size 2 is not supported by controller.
892 * This can be done by finding least significant bit set: n & (n - 1)
894 static inline void convert_burst(u32
*maxburst
)
897 *maxburst
= fls(*maxburst
) - 2;
903 set_runtime_config(struct dma_chan
*chan
, struct dma_slave_config
*sconfig
)
905 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
907 /* Check if it is chan is configured for slave transfers */
911 memcpy(&dwc
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
913 convert_burst(&dwc
->dma_sconfig
.src_maxburst
);
914 convert_burst(&dwc
->dma_sconfig
.dst_maxburst
);
919 static int dwc_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
922 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
923 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
924 struct dw_desc
*desc
, *_desc
;
929 if (cmd
== DMA_PAUSE
) {
930 spin_lock_irqsave(&dwc
->lock
, flags
);
932 cfglo
= channel_readl(dwc
, CFG_LO
);
933 channel_writel(dwc
, CFG_LO
, cfglo
| DWC_CFGL_CH_SUSP
);
934 while (!(channel_readl(dwc
, CFG_LO
) & DWC_CFGL_FIFO_EMPTY
))
938 spin_unlock_irqrestore(&dwc
->lock
, flags
);
939 } else if (cmd
== DMA_RESUME
) {
943 spin_lock_irqsave(&dwc
->lock
, flags
);
945 cfglo
= channel_readl(dwc
, CFG_LO
);
946 channel_writel(dwc
, CFG_LO
, cfglo
& ~DWC_CFGL_CH_SUSP
);
949 spin_unlock_irqrestore(&dwc
->lock
, flags
);
950 } else if (cmd
== DMA_TERMINATE_ALL
) {
951 spin_lock_irqsave(&dwc
->lock
, flags
);
953 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
954 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
959 /* active_list entries will end up before queued entries */
960 list_splice_init(&dwc
->queue
, &list
);
961 list_splice_init(&dwc
->active_list
, &list
);
963 spin_unlock_irqrestore(&dwc
->lock
, flags
);
965 /* Flush all pending and queued descriptors */
966 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
967 dwc_descriptor_complete(dwc
, desc
, false);
968 } else if (cmd
== DMA_SLAVE_CONFIG
) {
969 return set_runtime_config(chan
, (struct dma_slave_config
*)arg
);
977 static enum dma_status
978 dwc_tx_status(struct dma_chan
*chan
,
980 struct dma_tx_state
*txstate
)
982 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
985 ret
= dma_cookie_status(chan
, cookie
, txstate
);
986 if (ret
!= DMA_SUCCESS
) {
987 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
989 ret
= dma_cookie_status(chan
, cookie
, txstate
);
992 if (ret
!= DMA_SUCCESS
)
993 dma_set_residue(txstate
, dwc_first_active(dwc
)->len
);
1001 static void dwc_issue_pending(struct dma_chan
*chan
)
1003 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1005 if (!list_empty(&dwc
->queue
))
1006 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
1009 static int dwc_alloc_chan_resources(struct dma_chan
*chan
)
1011 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1012 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1013 struct dw_desc
*desc
;
1015 unsigned long flags
;
1017 dev_vdbg(chan2dev(chan
), "alloc_chan_resources\n");
1019 /* ASSERT: channel is idle */
1020 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1021 dev_dbg(chan2dev(chan
), "DMA channel not idle?\n");
1025 dma_cookie_init(chan
);
1028 * NOTE: some controllers may have additional features that we
1029 * need to initialize here, like "scatter-gather" (which
1030 * doesn't mean what you think it means), and status writeback.
1033 spin_lock_irqsave(&dwc
->lock
, flags
);
1034 i
= dwc
->descs_allocated
;
1035 while (dwc
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
1036 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1038 desc
= kzalloc(sizeof(struct dw_desc
), GFP_KERNEL
);
1040 dev_info(chan2dev(chan
),
1041 "only allocated %d descriptors\n", i
);
1042 spin_lock_irqsave(&dwc
->lock
, flags
);
1046 INIT_LIST_HEAD(&desc
->tx_list
);
1047 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
1048 desc
->txd
.tx_submit
= dwc_tx_submit
;
1049 desc
->txd
.flags
= DMA_CTRL_ACK
;
1050 desc
->txd
.phys
= dma_map_single(chan2parent(chan
), &desc
->lli
,
1051 sizeof(desc
->lli
), DMA_TO_DEVICE
);
1052 dwc_desc_put(dwc
, desc
);
1054 spin_lock_irqsave(&dwc
->lock
, flags
);
1055 i
= ++dwc
->descs_allocated
;
1058 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1060 dev_dbg(chan2dev(chan
),
1061 "alloc_chan_resources allocated %d descriptors\n", i
);
1066 static void dwc_free_chan_resources(struct dma_chan
*chan
)
1068 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1069 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1070 struct dw_desc
*desc
, *_desc
;
1071 unsigned long flags
;
1074 dev_dbg(chan2dev(chan
), "free_chan_resources (descs allocated=%u)\n",
1075 dwc
->descs_allocated
);
1077 /* ASSERT: channel is idle */
1078 BUG_ON(!list_empty(&dwc
->active_list
));
1079 BUG_ON(!list_empty(&dwc
->queue
));
1080 BUG_ON(dma_readl(to_dw_dma(chan
->device
), CH_EN
) & dwc
->mask
);
1082 spin_lock_irqsave(&dwc
->lock
, flags
);
1083 list_splice_init(&dwc
->free_list
, &list
);
1084 dwc
->descs_allocated
= 0;
1085 dwc
->initialized
= false;
1087 /* Disable interrupts */
1088 channel_clear_bit(dw
, MASK
.XFER
, dwc
->mask
);
1089 channel_clear_bit(dw
, MASK
.ERROR
, dwc
->mask
);
1091 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1093 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
) {
1094 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
1095 dma_unmap_single(chan2parent(chan
), desc
->txd
.phys
,
1096 sizeof(desc
->lli
), DMA_TO_DEVICE
);
1100 dev_vdbg(chan2dev(chan
), "free_chan_resources done\n");
1103 /* --------------------- Cyclic DMA API extensions -------------------- */
1106 * dw_dma_cyclic_start - start the cyclic DMA transfer
1107 * @chan: the DMA channel to start
1109 * Must be called with soft interrupts disabled. Returns zero on success or
1110 * -errno on failure.
1112 int dw_dma_cyclic_start(struct dma_chan
*chan
)
1114 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1115 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1116 unsigned long flags
;
1118 if (!test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
)) {
1119 dev_err(chan2dev(&dwc
->chan
), "missing prep for cyclic DMA\n");
1123 spin_lock_irqsave(&dwc
->lock
, flags
);
1125 /* assert channel is idle */
1126 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1127 dev_err(chan2dev(&dwc
->chan
),
1128 "BUG: Attempted to start non-idle channel\n");
1129 dev_err(chan2dev(&dwc
->chan
),
1130 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1131 channel_readl(dwc
, SAR
),
1132 channel_readl(dwc
, DAR
),
1133 channel_readl(dwc
, LLP
),
1134 channel_readl(dwc
, CTL_HI
),
1135 channel_readl(dwc
, CTL_LO
));
1136 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1140 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1141 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1143 /* setup DMAC channel registers */
1144 channel_writel(dwc
, LLP
, dwc
->cdesc
->desc
[0]->txd
.phys
);
1145 channel_writel(dwc
, CTL_LO
, DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
1146 channel_writel(dwc
, CTL_HI
, 0);
1148 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
1150 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1154 EXPORT_SYMBOL(dw_dma_cyclic_start
);
1157 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1158 * @chan: the DMA channel to stop
1160 * Must be called with soft interrupts disabled.
1162 void dw_dma_cyclic_stop(struct dma_chan
*chan
)
1164 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1165 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1166 unsigned long flags
;
1168 spin_lock_irqsave(&dwc
->lock
, flags
);
1170 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1171 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
1174 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1176 EXPORT_SYMBOL(dw_dma_cyclic_stop
);
1179 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1180 * @chan: the DMA channel to prepare
1181 * @buf_addr: physical DMA address where the buffer starts
1182 * @buf_len: total number of bytes for the entire buffer
1183 * @period_len: number of bytes for each period
1184 * @direction: transfer direction, to or from device
1186 * Must be called before trying to start the transfer. Returns a valid struct
1187 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1189 struct dw_cyclic_desc
*dw_dma_cyclic_prep(struct dma_chan
*chan
,
1190 dma_addr_t buf_addr
, size_t buf_len
, size_t period_len
,
1191 enum dma_transfer_direction direction
)
1193 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1194 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
1195 struct dw_cyclic_desc
*cdesc
;
1196 struct dw_cyclic_desc
*retval
= NULL
;
1197 struct dw_desc
*desc
;
1198 struct dw_desc
*last
= NULL
;
1199 unsigned long was_cyclic
;
1200 unsigned int reg_width
;
1201 unsigned int periods
;
1203 unsigned long flags
;
1205 spin_lock_irqsave(&dwc
->lock
, flags
);
1206 if (!list_empty(&dwc
->queue
) || !list_empty(&dwc
->active_list
)) {
1207 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1208 dev_dbg(chan2dev(&dwc
->chan
),
1209 "queue and/or active list are not empty\n");
1210 return ERR_PTR(-EBUSY
);
1213 was_cyclic
= test_and_set_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1214 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1216 dev_dbg(chan2dev(&dwc
->chan
),
1217 "channel already prepared for cyclic DMA\n");
1218 return ERR_PTR(-EBUSY
);
1221 retval
= ERR_PTR(-EINVAL
);
1223 if (direction
== DMA_MEM_TO_DEV
)
1224 reg_width
= __ffs(sconfig
->dst_addr_width
);
1226 reg_width
= __ffs(sconfig
->src_addr_width
);
1228 periods
= buf_len
/ period_len
;
1230 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1231 if (period_len
> (DWC_MAX_COUNT
<< reg_width
))
1233 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
1235 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
1237 if (unlikely(!(direction
& (DMA_MEM_TO_DEV
| DMA_DEV_TO_MEM
))))
1240 retval
= ERR_PTR(-ENOMEM
);
1242 if (periods
> NR_DESCS_PER_CHANNEL
)
1245 cdesc
= kzalloc(sizeof(struct dw_cyclic_desc
), GFP_KERNEL
);
1249 cdesc
->desc
= kzalloc(sizeof(struct dw_desc
*) * periods
, GFP_KERNEL
);
1253 for (i
= 0; i
< periods
; i
++) {
1254 desc
= dwc_desc_get(dwc
);
1256 goto out_err_desc_get
;
1258 switch (direction
) {
1259 case DMA_MEM_TO_DEV
:
1260 desc
->lli
.dar
= sconfig
->dst_addr
;
1261 desc
->lli
.sar
= buf_addr
+ (period_len
* i
);
1262 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1263 | DWC_CTLL_DST_WIDTH(reg_width
)
1264 | DWC_CTLL_SRC_WIDTH(reg_width
)
1269 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1270 DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
1271 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
1274 case DMA_DEV_TO_MEM
:
1275 desc
->lli
.dar
= buf_addr
+ (period_len
* i
);
1276 desc
->lli
.sar
= sconfig
->src_addr
;
1277 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1278 | DWC_CTLL_SRC_WIDTH(reg_width
)
1279 | DWC_CTLL_DST_WIDTH(reg_width
)
1284 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1285 DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
1286 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
1293 desc
->lli
.ctlhi
= (period_len
>> reg_width
);
1294 cdesc
->desc
[i
] = desc
;
1297 last
->lli
.llp
= desc
->txd
.phys
;
1298 dma_sync_single_for_device(chan2parent(chan
),
1299 last
->txd
.phys
, sizeof(last
->lli
),
1306 /* lets make a cyclic list */
1307 last
->lli
.llp
= cdesc
->desc
[0]->txd
.phys
;
1308 dma_sync_single_for_device(chan2parent(chan
), last
->txd
.phys
,
1309 sizeof(last
->lli
), DMA_TO_DEVICE
);
1311 dev_dbg(chan2dev(&dwc
->chan
), "cyclic prepared buf 0x%08x len %zu "
1312 "period %zu periods %d\n", buf_addr
, buf_len
,
1313 period_len
, periods
);
1315 cdesc
->periods
= periods
;
1322 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1326 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1327 return (struct dw_cyclic_desc
*)retval
;
1329 EXPORT_SYMBOL(dw_dma_cyclic_prep
);
1332 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1333 * @chan: the DMA channel to free
1335 void dw_dma_cyclic_free(struct dma_chan
*chan
)
1337 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1338 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1339 struct dw_cyclic_desc
*cdesc
= dwc
->cdesc
;
1341 unsigned long flags
;
1343 dev_dbg(chan2dev(&dwc
->chan
), "cyclic free\n");
1348 spin_lock_irqsave(&dwc
->lock
, flags
);
1350 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1351 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
1354 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1355 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1357 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1359 for (i
= 0; i
< cdesc
->periods
; i
++)
1360 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1365 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1367 EXPORT_SYMBOL(dw_dma_cyclic_free
);
1369 /*----------------------------------------------------------------------*/
1371 static void dw_dma_off(struct dw_dma
*dw
)
1375 dma_writel(dw
, CFG
, 0);
1377 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1378 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1379 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1380 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1382 while (dma_readl(dw
, CFG
) & DW_CFG_DMA_EN
)
1385 for (i
= 0; i
< dw
->dma
.chancnt
; i
++)
1386 dw
->chan
[i
].initialized
= false;
1389 static int __init
dw_probe(struct platform_device
*pdev
)
1391 struct dw_dma_platform_data
*pdata
;
1392 struct resource
*io
;
1399 pdata
= dev_get_platdata(&pdev
->dev
);
1400 if (!pdata
|| pdata
->nr_channels
> DW_DMA_MAX_NR_CHANNELS
)
1403 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1407 irq
= platform_get_irq(pdev
, 0);
1411 size
= sizeof(struct dw_dma
);
1412 size
+= pdata
->nr_channels
* sizeof(struct dw_dma_chan
);
1413 dw
= kzalloc(size
, GFP_KERNEL
);
1417 if (!request_mem_region(io
->start
, DW_REGLEN
, pdev
->dev
.driver
->name
)) {
1422 dw
->regs
= ioremap(io
->start
, DW_REGLEN
);
1428 dw
->clk
= clk_get(&pdev
->dev
, "hclk");
1429 if (IS_ERR(dw
->clk
)) {
1430 err
= PTR_ERR(dw
->clk
);
1433 clk_prepare_enable(dw
->clk
);
1435 /* force dma off, just in case */
1438 err
= request_irq(irq
, dw_dma_interrupt
, 0, "dw_dmac", dw
);
1442 platform_set_drvdata(pdev
, dw
);
1444 tasklet_init(&dw
->tasklet
, dw_dma_tasklet
, (unsigned long)dw
);
1446 dw
->all_chan_mask
= (1 << pdata
->nr_channels
) - 1;
1448 INIT_LIST_HEAD(&dw
->dma
.channels
);
1449 for (i
= 0; i
< pdata
->nr_channels
; i
++) {
1450 struct dw_dma_chan
*dwc
= &dw
->chan
[i
];
1452 dwc
->chan
.device
= &dw
->dma
;
1453 dma_cookie_init(&dwc
->chan
);
1454 if (pdata
->chan_allocation_order
== CHAN_ALLOCATION_ASCENDING
)
1455 list_add_tail(&dwc
->chan
.device_node
,
1458 list_add(&dwc
->chan
.device_node
, &dw
->dma
.channels
);
1460 /* 7 is highest priority & 0 is lowest. */
1461 if (pdata
->chan_priority
== CHAN_PRIORITY_ASCENDING
)
1462 dwc
->priority
= pdata
->nr_channels
- i
- 1;
1466 dwc
->ch_regs
= &__dw_regs(dw
)->CHAN
[i
];
1467 spin_lock_init(&dwc
->lock
);
1470 INIT_LIST_HEAD(&dwc
->active_list
);
1471 INIT_LIST_HEAD(&dwc
->queue
);
1472 INIT_LIST_HEAD(&dwc
->free_list
);
1474 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1477 /* Clear/disable all interrupts on all channels. */
1478 dma_writel(dw
, CLEAR
.XFER
, dw
->all_chan_mask
);
1479 dma_writel(dw
, CLEAR
.SRC_TRAN
, dw
->all_chan_mask
);
1480 dma_writel(dw
, CLEAR
.DST_TRAN
, dw
->all_chan_mask
);
1481 dma_writel(dw
, CLEAR
.ERROR
, dw
->all_chan_mask
);
1483 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1484 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1485 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1486 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1488 dma_cap_set(DMA_MEMCPY
, dw
->dma
.cap_mask
);
1489 dma_cap_set(DMA_SLAVE
, dw
->dma
.cap_mask
);
1490 if (pdata
->is_private
)
1491 dma_cap_set(DMA_PRIVATE
, dw
->dma
.cap_mask
);
1492 dw
->dma
.dev
= &pdev
->dev
;
1493 dw
->dma
.device_alloc_chan_resources
= dwc_alloc_chan_resources
;
1494 dw
->dma
.device_free_chan_resources
= dwc_free_chan_resources
;
1496 dw
->dma
.device_prep_dma_memcpy
= dwc_prep_dma_memcpy
;
1498 dw
->dma
.device_prep_slave_sg
= dwc_prep_slave_sg
;
1499 dw
->dma
.device_control
= dwc_control
;
1501 dw
->dma
.device_tx_status
= dwc_tx_status
;
1502 dw
->dma
.device_issue_pending
= dwc_issue_pending
;
1504 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1506 printk(KERN_INFO
"%s: DesignWare DMA Controller, %d channels\n",
1507 dev_name(&pdev
->dev
), pdata
->nr_channels
);
1509 dma_async_device_register(&dw
->dma
);
1514 clk_disable_unprepare(dw
->clk
);
1520 release_resource(io
);
1526 static int __exit
dw_remove(struct platform_device
*pdev
)
1528 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1529 struct dw_dma_chan
*dwc
, *_dwc
;
1530 struct resource
*io
;
1533 dma_async_device_unregister(&dw
->dma
);
1535 free_irq(platform_get_irq(pdev
, 0), dw
);
1536 tasklet_kill(&dw
->tasklet
);
1538 list_for_each_entry_safe(dwc
, _dwc
, &dw
->dma
.channels
,
1540 list_del(&dwc
->chan
.device_node
);
1541 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1544 clk_disable_unprepare(dw
->clk
);
1550 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1551 release_mem_region(io
->start
, DW_REGLEN
);
1558 static void dw_shutdown(struct platform_device
*pdev
)
1560 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1562 dw_dma_off(platform_get_drvdata(pdev
));
1563 clk_disable_unprepare(dw
->clk
);
1566 static int dw_suspend_noirq(struct device
*dev
)
1568 struct platform_device
*pdev
= to_platform_device(dev
);
1569 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1571 dw_dma_off(platform_get_drvdata(pdev
));
1572 clk_disable_unprepare(dw
->clk
);
1577 static int dw_resume_noirq(struct device
*dev
)
1579 struct platform_device
*pdev
= to_platform_device(dev
);
1580 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1582 clk_prepare_enable(dw
->clk
);
1583 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1587 static const struct dev_pm_ops dw_dev_pm_ops
= {
1588 .suspend_noirq
= dw_suspend_noirq
,
1589 .resume_noirq
= dw_resume_noirq
,
1590 .freeze_noirq
= dw_suspend_noirq
,
1591 .thaw_noirq
= dw_resume_noirq
,
1592 .restore_noirq
= dw_resume_noirq
,
1593 .poweroff_noirq
= dw_suspend_noirq
,
1597 static const struct of_device_id dw_dma_id_table
[] = {
1598 { .compatible
= "snps,dma-spear1340" },
1601 MODULE_DEVICE_TABLE(of
, dw_dma_id_table
);
1604 static struct platform_driver dw_driver
= {
1605 .remove
= __exit_p(dw_remove
),
1606 .shutdown
= dw_shutdown
,
1609 .pm
= &dw_dev_pm_ops
,
1610 .of_match_table
= of_match_ptr(dw_dma_id_table
),
1614 static int __init
dw_init(void)
1616 return platform_driver_probe(&dw_driver
, dw_probe
);
1618 subsys_initcall(dw_init
);
1620 static void __exit
dw_exit(void)
1622 platform_driver_unregister(&dw_driver
);
1624 module_exit(dw_exit
);
1626 MODULE_LICENSE("GPL v2");
1627 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1628 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1629 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");