2 * SA11x0 DMAengine support
4 * Copyright (C) 2012 Russell King
5 * Derived in part from arch/arm/mach-sa1100/dma.c,
6 * Copyright (C) 2000, 2001 by Nicolas Pitre
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/sched.h>
13 #include <linux/device.h>
14 #include <linux/dmaengine.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/sa11x0-dma.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
26 #define DMA_MAX_SIZE 0x1fff
27 #define DMA_CHUNK_SIZE 0x1000
30 #define DMA_DCSR_S 0x04
31 #define DMA_DCSR_C 0x08
32 #define DMA_DCSR_R 0x0c
39 #define DCSR_RUN (1 << 0)
40 #define DCSR_IE (1 << 1)
41 #define DCSR_ERROR (1 << 2)
42 #define DCSR_DONEA (1 << 3)
43 #define DCSR_STRTA (1 << 4)
44 #define DCSR_DONEB (1 << 5)
45 #define DCSR_STRTB (1 << 6)
46 #define DCSR_BIU (1 << 7)
48 #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */
49 #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */
50 #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */
51 #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */
52 #define DDAR_Ser0UDCTr (0x0 << 4)
53 #define DDAR_Ser0UDCRc (0x1 << 4)
54 #define DDAR_Ser1SDLCTr (0x2 << 4)
55 #define DDAR_Ser1SDLCRc (0x3 << 4)
56 #define DDAR_Ser1UARTTr (0x4 << 4)
57 #define DDAR_Ser1UARTRc (0x5 << 4)
58 #define DDAR_Ser2ICPTr (0x6 << 4)
59 #define DDAR_Ser2ICPRc (0x7 << 4)
60 #define DDAR_Ser3UARTTr (0x8 << 4)
61 #define DDAR_Ser3UARTRc (0x9 << 4)
62 #define DDAR_Ser4MCP0Tr (0xa << 4)
63 #define DDAR_Ser4MCP0Rc (0xb << 4)
64 #define DDAR_Ser4MCP1Tr (0xc << 4)
65 #define DDAR_Ser4MCP1Rc (0xd << 4)
66 #define DDAR_Ser4SSPTr (0xe << 4)
67 #define DDAR_Ser4SSPRc (0xf << 4)
69 struct sa11x0_dma_sg
{
74 struct sa11x0_dma_desc
{
75 struct dma_async_tx_descriptor tx
;
79 /* maybe protected by c->lock */
80 struct list_head node
;
82 struct sa11x0_dma_sg sg
[0];
85 struct sa11x0_dma_phy
;
87 struct sa11x0_dma_chan
{
92 /* protected by c->lock */
93 struct sa11x0_dma_phy
*phy
;
94 enum dma_status status
;
95 struct list_head desc_submitted
;
96 struct list_head desc_issued
;
98 /* protected by d->lock */
99 struct list_head node
;
105 struct sa11x0_dma_phy
{
107 struct sa11x0_dma_dev
*dev
;
110 struct sa11x0_dma_chan
*vchan
;
112 /* Protected by c->lock */
114 struct sa11x0_dma_desc
*txd_load
;
116 struct sa11x0_dma_desc
*txd_done
;
117 #ifdef CONFIG_PM_SLEEP
124 struct sa11x0_dma_dev
{
125 struct dma_device slave
;
128 struct tasklet_struct task
;
129 struct list_head chan_pending
;
130 struct list_head desc_complete
;
131 struct sa11x0_dma_phy phy
[NR_PHY_CHAN
];
134 static struct sa11x0_dma_chan
*to_sa11x0_dma_chan(struct dma_chan
*chan
)
136 return container_of(chan
, struct sa11x0_dma_chan
, chan
);
139 static struct sa11x0_dma_dev
*to_sa11x0_dma(struct dma_device
*dmadev
)
141 return container_of(dmadev
, struct sa11x0_dma_dev
, slave
);
144 static struct sa11x0_dma_desc
*to_sa11x0_dma_tx(struct dma_async_tx_descriptor
*tx
)
146 return container_of(tx
, struct sa11x0_dma_desc
, tx
);
149 static struct sa11x0_dma_desc
*sa11x0_dma_next_desc(struct sa11x0_dma_chan
*c
)
151 if (list_empty(&c
->desc_issued
))
154 return list_first_entry(&c
->desc_issued
, struct sa11x0_dma_desc
, node
);
157 static void sa11x0_dma_start_desc(struct sa11x0_dma_phy
*p
, struct sa11x0_dma_desc
*txd
)
159 list_del(&txd
->node
);
163 dev_vdbg(p
->dev
->slave
.dev
, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
164 p
->num
, txd
, txd
->tx
.cookie
, txd
->ddar
);
167 static void noinline
sa11x0_dma_start_sg(struct sa11x0_dma_phy
*p
,
168 struct sa11x0_dma_chan
*c
)
170 struct sa11x0_dma_desc
*txd
= p
->txd_load
;
171 struct sa11x0_dma_sg
*sg
;
172 void __iomem
*base
= p
->base
;
179 dcsr
= readl_relaxed(base
+ DMA_DCSR_R
);
181 /* Don't try to load the next transfer if both buffers are started */
182 if ((dcsr
& (DCSR_STRTA
| DCSR_STRTB
)) == (DCSR_STRTA
| DCSR_STRTB
))
185 if (p
->sg_load
== txd
->sglen
) {
186 struct sa11x0_dma_desc
*txn
= sa11x0_dma_next_desc(c
);
189 * We have reached the end of the current descriptor.
190 * Peek at the next descriptor, and if compatible with
191 * the current, start processing it.
193 if (txn
&& txn
->ddar
== txd
->ddar
) {
195 sa11x0_dma_start_desc(p
, txn
);
202 sg
= &txd
->sg
[p
->sg_load
++];
204 /* Select buffer to load according to channel status */
205 if (((dcsr
& (DCSR_BIU
| DCSR_STRTB
)) == (DCSR_BIU
| DCSR_STRTB
)) ||
206 ((dcsr
& (DCSR_BIU
| DCSR_STRTA
)) == 0)) {
209 dcsr
= DCSR_STRTA
| DCSR_IE
| DCSR_RUN
;
213 dcsr
= DCSR_STRTB
| DCSR_IE
| DCSR_RUN
;
216 writel_relaxed(sg
->addr
, base
+ dbsx
);
217 writel_relaxed(sg
->len
, base
+ dbtx
);
218 writel(dcsr
, base
+ DMA_DCSR_S
);
220 dev_dbg(p
->dev
->slave
.dev
, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
222 'A' + (dbsx
== DMA_DBSB
), sg
->addr
,
223 'A' + (dbtx
== DMA_DBTB
), sg
->len
);
226 static void noinline
sa11x0_dma_complete(struct sa11x0_dma_phy
*p
,
227 struct sa11x0_dma_chan
*c
)
229 struct sa11x0_dma_desc
*txd
= p
->txd_done
;
231 if (++p
->sg_done
== txd
->sglen
) {
232 struct sa11x0_dma_dev
*d
= p
->dev
;
234 dev_vdbg(d
->slave
.dev
, "pchan %u: txd %p[%x]: completed\n",
235 p
->num
, p
->txd_done
, p
->txd_done
->tx
.cookie
);
237 c
->lc
= txd
->tx
.cookie
;
240 list_add_tail(&txd
->node
, &d
->desc_complete
);
241 spin_unlock(&d
->lock
);
244 p
->txd_done
= p
->txd_load
;
246 tasklet_schedule(&d
->task
);
249 sa11x0_dma_start_sg(p
, c
);
252 static irqreturn_t
sa11x0_dma_irq(int irq
, void *dev_id
)
254 struct sa11x0_dma_phy
*p
= dev_id
;
255 struct sa11x0_dma_dev
*d
= p
->dev
;
256 struct sa11x0_dma_chan
*c
;
259 dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
260 if (!(dcsr
& (DCSR_ERROR
| DCSR_DONEA
| DCSR_DONEB
)))
263 /* Clear reported status bits */
264 writel_relaxed(dcsr
& (DCSR_ERROR
| DCSR_DONEA
| DCSR_DONEB
),
265 p
->base
+ DMA_DCSR_C
);
267 dev_dbg(d
->slave
.dev
, "pchan %u: irq: DCSR:%02x\n", p
->num
, dcsr
);
269 if (dcsr
& DCSR_ERROR
) {
270 dev_err(d
->slave
.dev
, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
272 readl_relaxed(p
->base
+ DMA_DDAR
),
273 readl_relaxed(p
->base
+ DMA_DBSA
),
274 readl_relaxed(p
->base
+ DMA_DBTA
),
275 readl_relaxed(p
->base
+ DMA_DBSB
),
276 readl_relaxed(p
->base
+ DMA_DBTB
));
283 spin_lock_irqsave(&c
->lock
, flags
);
285 * Now that we're holding the lock, check that the vchan
286 * really is associated with this pchan before touching the
287 * hardware. This should always succeed, because we won't
288 * change p->vchan or c->phy while the channel is actively
292 if (dcsr
& DCSR_DONEA
)
293 sa11x0_dma_complete(p
, c
);
294 if (dcsr
& DCSR_DONEB
)
295 sa11x0_dma_complete(p
, c
);
297 spin_unlock_irqrestore(&c
->lock
, flags
);
303 static void sa11x0_dma_start_txd(struct sa11x0_dma_chan
*c
)
305 struct sa11x0_dma_desc
*txd
= sa11x0_dma_next_desc(c
);
307 /* If the issued list is empty, we have no further txds to process */
309 struct sa11x0_dma_phy
*p
= c
->phy
;
311 sa11x0_dma_start_desc(p
, txd
);
315 /* The channel should not have any transfers started */
316 WARN_ON(readl_relaxed(p
->base
+ DMA_DCSR_R
) &
317 (DCSR_STRTA
| DCSR_STRTB
));
319 /* Clear the run and start bits before changing DDAR */
320 writel_relaxed(DCSR_RUN
| DCSR_STRTA
| DCSR_STRTB
,
321 p
->base
+ DMA_DCSR_C
);
322 writel_relaxed(txd
->ddar
, p
->base
+ DMA_DDAR
);
324 /* Try to start both buffers */
325 sa11x0_dma_start_sg(p
, c
);
326 sa11x0_dma_start_sg(p
, c
);
330 static void sa11x0_dma_tasklet(unsigned long arg
)
332 struct sa11x0_dma_dev
*d
= (struct sa11x0_dma_dev
*)arg
;
333 struct sa11x0_dma_phy
*p
;
334 struct sa11x0_dma_chan
*c
;
335 struct sa11x0_dma_desc
*txd
, *txn
;
337 unsigned pch
, pch_alloc
= 0;
339 dev_dbg(d
->slave
.dev
, "tasklet enter\n");
341 /* Get the completed tx descriptors */
342 spin_lock_irq(&d
->lock
);
343 list_splice_init(&d
->desc_complete
, &head
);
344 spin_unlock_irq(&d
->lock
);
346 list_for_each_entry(txd
, &head
, node
) {
347 c
= to_sa11x0_dma_chan(txd
->tx
.chan
);
349 dev_dbg(d
->slave
.dev
, "vchan %p: txd %p[%x] completed\n",
350 c
, txd
, txd
->tx
.cookie
);
352 spin_lock_irq(&c
->lock
);
356 sa11x0_dma_start_txd(c
);
358 /* No current txd associated with this channel */
359 dev_dbg(d
->slave
.dev
, "pchan %u: free\n", p
->num
);
361 /* Mark this channel free */
366 spin_unlock_irq(&c
->lock
);
369 spin_lock_irq(&d
->lock
);
370 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++) {
373 if (p
->vchan
== NULL
&& !list_empty(&d
->chan_pending
)) {
374 c
= list_first_entry(&d
->chan_pending
,
375 struct sa11x0_dma_chan
, node
);
376 list_del_init(&c
->node
);
378 pch_alloc
|= 1 << pch
;
380 /* Mark this channel allocated */
383 dev_dbg(d
->slave
.dev
, "pchan %u: alloc vchan %p\n", pch
, c
);
386 spin_unlock_irq(&d
->lock
);
388 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++) {
389 if (pch_alloc
& (1 << pch
)) {
393 spin_lock_irq(&c
->lock
);
396 sa11x0_dma_start_txd(c
);
397 spin_unlock_irq(&c
->lock
);
401 /* Now free the completed tx descriptor, and call their callbacks */
402 list_for_each_entry_safe(txd
, txn
, &head
, node
) {
403 dma_async_tx_callback callback
= txd
->tx
.callback
;
404 void *callback_param
= txd
->tx
.callback_param
;
406 dev_dbg(d
->slave
.dev
, "txd %p[%x]: callback and free\n",
407 txd
, txd
->tx
.cookie
);
412 callback(callback_param
);
415 dev_dbg(d
->slave
.dev
, "tasklet exit\n");
419 static void sa11x0_dma_desc_free(struct sa11x0_dma_dev
*d
, struct list_head
*head
)
421 struct sa11x0_dma_desc
*txd
, *txn
;
423 list_for_each_entry_safe(txd
, txn
, head
, node
) {
424 dev_dbg(d
->slave
.dev
, "txd %p: freeing\n", txd
);
429 static int sa11x0_dma_alloc_chan_resources(struct dma_chan
*chan
)
434 static void sa11x0_dma_free_chan_resources(struct dma_chan
*chan
)
436 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
437 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
441 spin_lock_irqsave(&c
->lock
, flags
);
443 list_del_init(&c
->node
);
444 spin_unlock(&d
->lock
);
446 list_splice_tail_init(&c
->desc_submitted
, &head
);
447 list_splice_tail_init(&c
->desc_issued
, &head
);
448 spin_unlock_irqrestore(&c
->lock
, flags
);
450 sa11x0_dma_desc_free(d
, &head
);
453 static dma_addr_t
sa11x0_dma_pos(struct sa11x0_dma_phy
*p
)
458 dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
460 if ((dcsr
& (DCSR_BIU
| DCSR_STRTA
)) == DCSR_STRTA
||
461 (dcsr
& (DCSR_BIU
| DCSR_STRTB
)) == DCSR_BIU
)
466 return readl_relaxed(p
->base
+ reg
);
469 static enum dma_status
sa11x0_dma_tx_status(struct dma_chan
*chan
,
470 dma_cookie_t cookie
, struct dma_tx_state
*state
)
472 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
473 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
474 struct sa11x0_dma_phy
*p
;
475 struct sa11x0_dma_desc
*txd
;
476 dma_cookie_t last_used
, last_complete
;
481 last_used
= c
->chan
.cookie
;
482 last_complete
= c
->lc
;
484 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
485 if (ret
== DMA_SUCCESS
) {
486 dma_set_tx_state(state
, last_complete
, last_used
, 0);
490 spin_lock_irqsave(&c
->lock
, flags
);
494 dma_addr_t addr
= sa11x0_dma_pos(p
);
496 dev_vdbg(d
->slave
.dev
, "tx_status: addr:%x\n", addr
);
502 for (i
= 0; i
< txd
->sglen
; i
++) {
503 dev_vdbg(d
->slave
.dev
, "tx_status: [%u] %x+%x\n",
504 i
, txd
->sg
[i
].addr
, txd
->sg
[i
].len
);
505 if (addr
>= txd
->sg
[i
].addr
&&
506 addr
< txd
->sg
[i
].addr
+ txd
->sg
[i
].len
) {
509 len
= txd
->sg
[i
].len
-
510 (addr
- txd
->sg
[i
].addr
);
511 dev_vdbg(d
->slave
.dev
, "tx_status: [%u] +%x\n",
518 for (; i
< txd
->sglen
; i
++) {
519 dev_vdbg(d
->slave
.dev
, "tx_status: [%u] %x+%x ++\n",
520 i
, txd
->sg
[i
].addr
, txd
->sg
[i
].len
);
521 bytes
+= txd
->sg
[i
].len
;
524 if (txd
!= p
->txd_load
&& p
->txd_load
)
525 bytes
+= p
->txd_load
->size
;
527 list_for_each_entry(txd
, &c
->desc_issued
, node
) {
530 spin_unlock_irqrestore(&c
->lock
, flags
);
532 dma_set_tx_state(state
, last_complete
, last_used
, bytes
);
534 dev_vdbg(d
->slave
.dev
, "tx_status: bytes 0x%zx\n", bytes
);
540 * Move pending txds to the issued list, and re-init pending list.
541 * If not already pending, add this channel to the list of pending
542 * channels and trigger the tasklet to run.
544 static void sa11x0_dma_issue_pending(struct dma_chan
*chan
)
546 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
547 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
550 spin_lock_irqsave(&c
->lock
, flags
);
551 list_splice_tail_init(&c
->desc_submitted
, &c
->desc_issued
);
552 if (!list_empty(&c
->desc_issued
)) {
554 if (!c
->phy
&& list_empty(&c
->node
)) {
555 list_add_tail(&c
->node
, &d
->chan_pending
);
556 tasklet_schedule(&d
->task
);
557 dev_dbg(d
->slave
.dev
, "vchan %p: issued\n", c
);
559 spin_unlock(&d
->lock
);
561 dev_dbg(d
->slave
.dev
, "vchan %p: nothing to issue\n", c
);
562 spin_unlock_irqrestore(&c
->lock
, flags
);
565 static dma_cookie_t
sa11x0_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
567 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(tx
->chan
);
568 struct sa11x0_dma_desc
*txd
= to_sa11x0_dma_tx(tx
);
571 spin_lock_irqsave(&c
->lock
, flags
);
573 if (c
->chan
.cookie
< 0)
575 txd
->tx
.cookie
= c
->chan
.cookie
;
577 list_add_tail(&txd
->node
, &c
->desc_submitted
);
578 spin_unlock_irqrestore(&c
->lock
, flags
);
580 dev_dbg(tx
->chan
->device
->dev
, "vchan %p: txd %p[%x]: submitted\n",
581 c
, txd
, txd
->tx
.cookie
);
583 return txd
->tx
.cookie
;
586 static struct dma_async_tx_descriptor
*sa11x0_dma_prep_slave_sg(
587 struct dma_chan
*chan
, struct scatterlist
*sg
, unsigned int sglen
,
588 enum dma_transfer_direction dir
, unsigned long flags
, void *context
)
590 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
591 struct sa11x0_dma_desc
*txd
;
592 struct scatterlist
*sgent
;
593 unsigned i
, j
= sglen
;
596 /* SA11x0 channels can only operate in their native direction */
597 if (dir
!= (c
->ddar
& DDAR_RW
? DMA_DEV_TO_MEM
: DMA_MEM_TO_DEV
)) {
598 dev_err(chan
->device
->dev
, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
603 /* Do not allow zero-sized txds */
607 for_each_sg(sg
, sgent
, sglen
, i
) {
608 dma_addr_t addr
= sg_dma_address(sgent
);
609 unsigned int len
= sg_dma_len(sgent
);
611 if (len
> DMA_MAX_SIZE
)
612 j
+= DIV_ROUND_UP(len
, DMA_MAX_SIZE
& ~DMA_ALIGN
) - 1;
613 if (addr
& DMA_ALIGN
) {
614 dev_dbg(chan
->device
->dev
, "vchan %p: bad buffer alignment: %08x\n",
620 txd
= kzalloc(sizeof(*txd
) + j
* sizeof(txd
->sg
[0]), GFP_ATOMIC
);
622 dev_dbg(chan
->device
->dev
, "vchan %p: kzalloc failed\n", c
);
627 for_each_sg(sg
, sgent
, sglen
, i
) {
628 dma_addr_t addr
= sg_dma_address(sgent
);
629 unsigned len
= sg_dma_len(sgent
);
637 * Check whether the transfer will fit. If not, try
638 * to split the transfer up such that we end up with
639 * equal chunks - but make sure that we preserve the
640 * alignment. This avoids small segments.
642 if (tlen
> DMA_MAX_SIZE
) {
643 unsigned mult
= DIV_ROUND_UP(tlen
,
644 DMA_MAX_SIZE
& ~DMA_ALIGN
);
646 tlen
= (tlen
/ mult
) & ~DMA_ALIGN
;
649 txd
->sg
[j
].addr
= addr
;
650 txd
->sg
[j
].len
= tlen
;
658 dma_async_tx_descriptor_init(&txd
->tx
, &c
->chan
);
659 txd
->tx
.flags
= flags
;
660 txd
->tx
.tx_submit
= sa11x0_dma_tx_submit
;
665 dev_dbg(chan
->device
->dev
, "vchan %p: txd %p: size %u nr %u\n",
666 c
, txd
, txd
->size
, txd
->sglen
);
671 static int sa11x0_dma_slave_config(struct sa11x0_dma_chan
*c
, struct dma_slave_config
*cfg
)
673 u32 ddar
= c
->ddar
& ((0xf << 4) | DDAR_RW
);
675 enum dma_slave_buswidth width
;
678 if (ddar
& DDAR_RW
) {
679 addr
= cfg
->src_addr
;
680 width
= cfg
->src_addr_width
;
681 maxburst
= cfg
->src_maxburst
;
683 addr
= cfg
->dst_addr
;
684 width
= cfg
->dst_addr_width
;
685 maxburst
= cfg
->dst_maxburst
;
688 if ((width
!= DMA_SLAVE_BUSWIDTH_1_BYTE
&&
689 width
!= DMA_SLAVE_BUSWIDTH_2_BYTES
) ||
690 (maxburst
!= 4 && maxburst
!= 8))
693 if (width
== DMA_SLAVE_BUSWIDTH_2_BYTES
)
698 dev_dbg(c
->chan
.device
->dev
, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
699 c
, addr
, width
, maxburst
);
701 c
->ddar
= ddar
| (addr
& 0xf0000000) | (addr
& 0x003ffffc) << 6;
706 static int sa11x0_dma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
709 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
710 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
711 struct sa11x0_dma_phy
*p
;
717 case DMA_SLAVE_CONFIG
:
718 return sa11x0_dma_slave_config(c
, (struct dma_slave_config
*)arg
);
720 case DMA_TERMINATE_ALL
:
721 dev_dbg(d
->slave
.dev
, "vchan %p: terminate all\n", c
);
722 /* Clear the tx descriptor lists */
723 spin_lock_irqsave(&c
->lock
, flags
);
724 list_splice_tail_init(&c
->desc_submitted
, &head
);
725 list_splice_tail_init(&c
->desc_issued
, &head
);
729 struct sa11x0_dma_desc
*txd
, *txn
;
731 dev_dbg(d
->slave
.dev
, "pchan %u: terminating\n", p
->num
);
732 /* vchan is assigned to a pchan - stop the channel */
733 writel(DCSR_RUN
| DCSR_IE
|
734 DCSR_STRTA
| DCSR_DONEA
|
735 DCSR_STRTB
| DCSR_DONEB
,
736 p
->base
+ DMA_DCSR_C
);
738 list_for_each_entry_safe(txd
, txn
, &d
->desc_complete
, node
)
739 if (txd
->tx
.chan
== &c
->chan
)
740 list_move(&txd
->node
, &head
);
743 if (p
->txd_load
!= p
->txd_done
)
744 list_add_tail(&p
->txd_load
->node
, &head
);
748 list_add_tail(&p
->txd_done
->node
, &head
);
754 spin_unlock(&d
->lock
);
755 tasklet_schedule(&d
->task
);
757 spin_unlock_irqrestore(&c
->lock
, flags
);
758 sa11x0_dma_desc_free(d
, &head
);
763 dev_dbg(d
->slave
.dev
, "vchan %p: pause\n", c
);
764 spin_lock_irqsave(&c
->lock
, flags
);
765 if (c
->status
== DMA_IN_PROGRESS
) {
766 c
->status
= DMA_PAUSED
;
770 writel(DCSR_RUN
| DCSR_IE
, p
->base
+ DMA_DCSR_C
);
773 list_del_init(&c
->node
);
774 spin_unlock(&d
->lock
);
777 spin_unlock_irqrestore(&c
->lock
, flags
);
782 dev_dbg(d
->slave
.dev
, "vchan %p: resume\n", c
);
783 spin_lock_irqsave(&c
->lock
, flags
);
784 if (c
->status
== DMA_PAUSED
) {
785 c
->status
= DMA_IN_PROGRESS
;
789 writel(DCSR_RUN
| DCSR_IE
, p
->base
+ DMA_DCSR_S
);
790 } else if (!list_empty(&c
->desc_issued
)) {
792 list_add_tail(&c
->node
, &d
->chan_pending
);
793 spin_unlock(&d
->lock
);
796 spin_unlock_irqrestore(&c
->lock
, flags
);
808 struct sa11x0_dma_channel_desc
{
813 #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
814 static const struct sa11x0_dma_channel_desc chan_desc
[] = {
816 CD(Ser0UDCRc
, DDAR_RW
),
818 CD(Ser1SDLCRc
, DDAR_RW
),
820 CD(Ser1UARTRc
, DDAR_RW
),
822 CD(Ser2ICPRc
, DDAR_RW
),
824 CD(Ser3UARTRc
, DDAR_RW
),
826 CD(Ser4MCP0Rc
, DDAR_RW
),
828 CD(Ser4MCP1Rc
, DDAR_RW
),
830 CD(Ser4SSPRc
, DDAR_RW
),
833 static int __devinit
sa11x0_dma_init_dmadev(struct dma_device
*dmadev
,
838 dmadev
->chancnt
= ARRAY_SIZE(chan_desc
);
839 INIT_LIST_HEAD(&dmadev
->channels
);
841 dmadev
->device_alloc_chan_resources
= sa11x0_dma_alloc_chan_resources
;
842 dmadev
->device_free_chan_resources
= sa11x0_dma_free_chan_resources
;
843 dmadev
->device_control
= sa11x0_dma_control
;
844 dmadev
->device_tx_status
= sa11x0_dma_tx_status
;
845 dmadev
->device_issue_pending
= sa11x0_dma_issue_pending
;
847 for (i
= 0; i
< dmadev
->chancnt
; i
++) {
848 struct sa11x0_dma_chan
*c
;
850 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
852 dev_err(dev
, "no memory for channel %u\n", i
);
856 c
->chan
.device
= dmadev
;
857 c
->status
= DMA_IN_PROGRESS
;
858 c
->ddar
= chan_desc
[i
].ddar
;
859 c
->name
= chan_desc
[i
].name
;
860 spin_lock_init(&c
->lock
);
861 INIT_LIST_HEAD(&c
->desc_submitted
);
862 INIT_LIST_HEAD(&c
->desc_issued
);
863 INIT_LIST_HEAD(&c
->node
);
864 list_add_tail(&c
->chan
.device_node
, &dmadev
->channels
);
867 return dma_async_device_register(dmadev
);
870 static int sa11x0_dma_request_irq(struct platform_device
*pdev
, int nr
,
873 int irq
= platform_get_irq(pdev
, nr
);
878 return request_irq(irq
, sa11x0_dma_irq
, 0, dev_name(&pdev
->dev
), data
);
881 static void sa11x0_dma_free_irq(struct platform_device
*pdev
, int nr
,
884 int irq
= platform_get_irq(pdev
, nr
);
889 static void sa11x0_dma_free_channels(struct dma_device
*dmadev
)
891 struct sa11x0_dma_chan
*c
, *cn
;
893 list_for_each_entry_safe(c
, cn
, &dmadev
->channels
, chan
.device_node
) {
894 list_del(&c
->chan
.device_node
);
899 static int __devinit
sa11x0_dma_probe(struct platform_device
*pdev
)
901 struct sa11x0_dma_dev
*d
;
902 struct resource
*res
;
906 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
910 d
= kzalloc(sizeof(*d
), GFP_KERNEL
);
916 spin_lock_init(&d
->lock
);
917 INIT_LIST_HEAD(&d
->chan_pending
);
918 INIT_LIST_HEAD(&d
->desc_complete
);
920 d
->base
= ioremap(res
->start
, resource_size(res
));
926 tasklet_init(&d
->task
, sa11x0_dma_tasklet
, (unsigned long)d
);
928 for (i
= 0; i
< NR_PHY_CHAN
; i
++) {
929 struct sa11x0_dma_phy
*p
= &d
->phy
[i
];
933 p
->base
= d
->base
+ i
* DMA_SIZE
;
934 writel_relaxed(DCSR_RUN
| DCSR_IE
| DCSR_ERROR
|
935 DCSR_DONEA
| DCSR_STRTA
| DCSR_DONEB
| DCSR_STRTB
,
936 p
->base
+ DMA_DCSR_C
);
937 writel_relaxed(0, p
->base
+ DMA_DDAR
);
939 ret
= sa11x0_dma_request_irq(pdev
, i
, p
);
943 sa11x0_dma_free_irq(pdev
, i
, &d
->phy
[i
]);
949 dma_cap_set(DMA_SLAVE
, d
->slave
.cap_mask
);
950 d
->slave
.device_prep_slave_sg
= sa11x0_dma_prep_slave_sg
;
951 ret
= sa11x0_dma_init_dmadev(&d
->slave
, &pdev
->dev
);
953 dev_warn(d
->slave
.dev
, "failed to register slave async device: %d\n",
958 platform_set_drvdata(pdev
, d
);
962 sa11x0_dma_free_channels(&d
->slave
);
963 for (i
= 0; i
< NR_PHY_CHAN
; i
++)
964 sa11x0_dma_free_irq(pdev
, i
, &d
->phy
[i
]);
966 tasklet_kill(&d
->task
);
974 static int __devexit
sa11x0_dma_remove(struct platform_device
*pdev
)
976 struct sa11x0_dma_dev
*d
= platform_get_drvdata(pdev
);
979 dma_async_device_unregister(&d
->slave
);
981 sa11x0_dma_free_channels(&d
->slave
);
982 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++)
983 sa11x0_dma_free_irq(pdev
, pch
, &d
->phy
[pch
]);
984 tasklet_kill(&d
->task
);
991 #ifdef CONFIG_PM_SLEEP
992 static int sa11x0_dma_suspend(struct device
*dev
)
994 struct sa11x0_dma_dev
*d
= dev_get_drvdata(dev
);
997 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++) {
998 struct sa11x0_dma_phy
*p
= &d
->phy
[pch
];
999 u32 dcsr
, saved_dcsr
;
1001 dcsr
= saved_dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
1002 if (dcsr
& DCSR_RUN
) {
1003 writel(DCSR_RUN
| DCSR_IE
, p
->base
+ DMA_DCSR_C
);
1004 dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
1007 saved_dcsr
&= DCSR_RUN
| DCSR_IE
;
1008 if (dcsr
& DCSR_BIU
) {
1009 p
->dbs
[0] = readl_relaxed(p
->base
+ DMA_DBSB
);
1010 p
->dbt
[0] = readl_relaxed(p
->base
+ DMA_DBTB
);
1011 p
->dbs
[1] = readl_relaxed(p
->base
+ DMA_DBSA
);
1012 p
->dbt
[1] = readl_relaxed(p
->base
+ DMA_DBTA
);
1013 saved_dcsr
|= (dcsr
& DCSR_STRTA
? DCSR_STRTB
: 0) |
1014 (dcsr
& DCSR_STRTB
? DCSR_STRTA
: 0);
1016 p
->dbs
[0] = readl_relaxed(p
->base
+ DMA_DBSA
);
1017 p
->dbt
[0] = readl_relaxed(p
->base
+ DMA_DBTA
);
1018 p
->dbs
[1] = readl_relaxed(p
->base
+ DMA_DBSB
);
1019 p
->dbt
[1] = readl_relaxed(p
->base
+ DMA_DBTB
);
1020 saved_dcsr
|= dcsr
& (DCSR_STRTA
| DCSR_STRTB
);
1022 p
->dcsr
= saved_dcsr
;
1024 writel(DCSR_STRTA
| DCSR_STRTB
, p
->base
+ DMA_DCSR_C
);
1030 static int sa11x0_dma_resume(struct device
*dev
)
1032 struct sa11x0_dma_dev
*d
= dev_get_drvdata(dev
);
1035 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++) {
1036 struct sa11x0_dma_phy
*p
= &d
->phy
[pch
];
1037 struct sa11x0_dma_desc
*txd
= NULL
;
1038 u32 dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
1040 WARN_ON(dcsr
& (DCSR_BIU
| DCSR_STRTA
| DCSR_STRTB
| DCSR_RUN
));
1044 else if (p
->txd_load
)
1050 writel_relaxed(txd
->ddar
, p
->base
+ DMA_DDAR
);
1052 writel_relaxed(p
->dbs
[0], p
->base
+ DMA_DBSA
);
1053 writel_relaxed(p
->dbt
[0], p
->base
+ DMA_DBTA
);
1054 writel_relaxed(p
->dbs
[1], p
->base
+ DMA_DBSB
);
1055 writel_relaxed(p
->dbt
[1], p
->base
+ DMA_DBTB
);
1056 writel_relaxed(p
->dcsr
, p
->base
+ DMA_DCSR_S
);
1063 static const struct dev_pm_ops sa11x0_dma_pm_ops
= {
1064 .suspend_noirq
= sa11x0_dma_suspend
,
1065 .resume_noirq
= sa11x0_dma_resume
,
1066 .freeze_noirq
= sa11x0_dma_suspend
,
1067 .thaw_noirq
= sa11x0_dma_resume
,
1068 .poweroff_noirq
= sa11x0_dma_suspend
,
1069 .restore_noirq
= sa11x0_dma_resume
,
1072 static struct platform_driver sa11x0_dma_driver
= {
1074 .name
= "sa11x0-dma",
1075 .owner
= THIS_MODULE
,
1076 .pm
= &sa11x0_dma_pm_ops
,
1078 .probe
= sa11x0_dma_probe
,
1079 .remove
= __devexit_p(sa11x0_dma_remove
),
1082 bool sa11x0_dma_filter_fn(struct dma_chan
*chan
, void *param
)
1084 if (chan
->device
->dev
->driver
== &sa11x0_dma_driver
.driver
) {
1085 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
1086 const char *p
= param
;
1088 return !strcmp(c
->name
, p
);
1092 EXPORT_SYMBOL(sa11x0_dma_filter_fn
);
1094 static int __init
sa11x0_dma_init(void)
1096 return platform_driver_register(&sa11x0_dma_driver
);
1098 subsys_initcall(sa11x0_dma_init
);
1100 static void __exit
sa11x0_dma_exit(void)
1102 platform_driver_unregister(&sa11x0_dma_driver
);
1104 module_exit(sa11x0_dma_exit
);
1106 MODULE_AUTHOR("Russell King");
1107 MODULE_DESCRIPTION("SA-11x0 DMA driver");
1108 MODULE_LICENSE("GPL v2");
1109 MODULE_ALIAS("platform:sa11x0-dma");