2 * SA11x0 DMAengine support
4 * Copyright (C) 2012 Russell King
5 * Derived in part from arch/arm/mach-sa1100/dma.c,
6 * Copyright (C) 2000, 2001 by Nicolas Pitre
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/sched.h>
13 #include <linux/device.h>
14 #include <linux/dmaengine.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/sa11x0-dma.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
28 #define DMA_MAX_SIZE 0x1fff
29 #define DMA_CHUNK_SIZE 0x1000
32 #define DMA_DCSR_S 0x04
33 #define DMA_DCSR_C 0x08
34 #define DMA_DCSR_R 0x0c
41 #define DCSR_RUN (1 << 0)
42 #define DCSR_IE (1 << 1)
43 #define DCSR_ERROR (1 << 2)
44 #define DCSR_DONEA (1 << 3)
45 #define DCSR_STRTA (1 << 4)
46 #define DCSR_DONEB (1 << 5)
47 #define DCSR_STRTB (1 << 6)
48 #define DCSR_BIU (1 << 7)
50 #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */
51 #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */
52 #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */
53 #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */
54 #define DDAR_Ser0UDCTr (0x0 << 4)
55 #define DDAR_Ser0UDCRc (0x1 << 4)
56 #define DDAR_Ser1SDLCTr (0x2 << 4)
57 #define DDAR_Ser1SDLCRc (0x3 << 4)
58 #define DDAR_Ser1UARTTr (0x4 << 4)
59 #define DDAR_Ser1UARTRc (0x5 << 4)
60 #define DDAR_Ser2ICPTr (0x6 << 4)
61 #define DDAR_Ser2ICPRc (0x7 << 4)
62 #define DDAR_Ser3UARTTr (0x8 << 4)
63 #define DDAR_Ser3UARTRc (0x9 << 4)
64 #define DDAR_Ser4MCP0Tr (0xa << 4)
65 #define DDAR_Ser4MCP0Rc (0xb << 4)
66 #define DDAR_Ser4MCP1Tr (0xc << 4)
67 #define DDAR_Ser4MCP1Rc (0xd << 4)
68 #define DDAR_Ser4SSPTr (0xe << 4)
69 #define DDAR_Ser4SSPRc (0xf << 4)
71 struct sa11x0_dma_sg
{
76 struct sa11x0_dma_desc
{
77 struct virt_dma_desc vd
;
85 struct sa11x0_dma_sg sg
[0];
88 struct sa11x0_dma_phy
;
90 struct sa11x0_dma_chan
{
91 struct virt_dma_chan vc
;
93 /* protected by c->vc.lock */
94 struct sa11x0_dma_phy
*phy
;
95 enum dma_status status
;
97 /* protected by d->lock */
98 struct list_head node
;
104 struct sa11x0_dma_phy
{
106 struct sa11x0_dma_dev
*dev
;
109 struct sa11x0_dma_chan
*vchan
;
111 /* Protected by c->vc.lock */
113 struct sa11x0_dma_desc
*txd_load
;
115 struct sa11x0_dma_desc
*txd_done
;
116 #ifdef CONFIG_PM_SLEEP
123 struct sa11x0_dma_dev
{
124 struct dma_device slave
;
127 struct tasklet_struct task
;
128 struct list_head chan_pending
;
129 struct sa11x0_dma_phy phy
[NR_PHY_CHAN
];
132 static struct sa11x0_dma_chan
*to_sa11x0_dma_chan(struct dma_chan
*chan
)
134 return container_of(chan
, struct sa11x0_dma_chan
, vc
.chan
);
137 static struct sa11x0_dma_dev
*to_sa11x0_dma(struct dma_device
*dmadev
)
139 return container_of(dmadev
, struct sa11x0_dma_dev
, slave
);
142 static struct sa11x0_dma_desc
*sa11x0_dma_next_desc(struct sa11x0_dma_chan
*c
)
144 struct virt_dma_desc
*vd
= vchan_next_desc(&c
->vc
);
146 return vd
? container_of(vd
, struct sa11x0_dma_desc
, vd
) : NULL
;
149 static void sa11x0_dma_free_desc(struct virt_dma_desc
*vd
)
151 kfree(container_of(vd
, struct sa11x0_dma_desc
, vd
));
154 static void sa11x0_dma_start_desc(struct sa11x0_dma_phy
*p
, struct sa11x0_dma_desc
*txd
)
156 list_del(&txd
->vd
.node
);
160 dev_vdbg(p
->dev
->slave
.dev
, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
161 p
->num
, &txd
->vd
, txd
->vd
.tx
.cookie
, txd
->ddar
);
164 static void noinline
sa11x0_dma_start_sg(struct sa11x0_dma_phy
*p
,
165 struct sa11x0_dma_chan
*c
)
167 struct sa11x0_dma_desc
*txd
= p
->txd_load
;
168 struct sa11x0_dma_sg
*sg
;
169 void __iomem
*base
= p
->base
;
176 dcsr
= readl_relaxed(base
+ DMA_DCSR_R
);
178 /* Don't try to load the next transfer if both buffers are started */
179 if ((dcsr
& (DCSR_STRTA
| DCSR_STRTB
)) == (DCSR_STRTA
| DCSR_STRTB
))
182 if (p
->sg_load
== txd
->sglen
) {
184 struct sa11x0_dma_desc
*txn
= sa11x0_dma_next_desc(c
);
187 * We have reached the end of the current descriptor.
188 * Peek at the next descriptor, and if compatible with
189 * the current, start processing it.
191 if (txn
&& txn
->ddar
== txd
->ddar
) {
193 sa11x0_dma_start_desc(p
, txn
);
199 /* Cyclic: reset back to beginning */
204 sg
= &txd
->sg
[p
->sg_load
++];
206 /* Select buffer to load according to channel status */
207 if (((dcsr
& (DCSR_BIU
| DCSR_STRTB
)) == (DCSR_BIU
| DCSR_STRTB
)) ||
208 ((dcsr
& (DCSR_BIU
| DCSR_STRTA
)) == 0)) {
211 dcsr
= DCSR_STRTA
| DCSR_IE
| DCSR_RUN
;
215 dcsr
= DCSR_STRTB
| DCSR_IE
| DCSR_RUN
;
218 writel_relaxed(sg
->addr
, base
+ dbsx
);
219 writel_relaxed(sg
->len
, base
+ dbtx
);
220 writel(dcsr
, base
+ DMA_DCSR_S
);
222 dev_dbg(p
->dev
->slave
.dev
, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
224 'A' + (dbsx
== DMA_DBSB
), sg
->addr
,
225 'A' + (dbtx
== DMA_DBTB
), sg
->len
);
228 static void noinline
sa11x0_dma_complete(struct sa11x0_dma_phy
*p
,
229 struct sa11x0_dma_chan
*c
)
231 struct sa11x0_dma_desc
*txd
= p
->txd_done
;
233 if (++p
->sg_done
== txd
->sglen
) {
235 vchan_cookie_complete(&txd
->vd
);
238 p
->txd_done
= p
->txd_load
;
241 tasklet_schedule(&p
->dev
->task
);
243 if ((p
->sg_done
% txd
->period
) == 0)
244 vchan_cyclic_callback(&txd
->vd
);
246 /* Cyclic: reset back to beginning */
251 sa11x0_dma_start_sg(p
, c
);
254 static irqreturn_t
sa11x0_dma_irq(int irq
, void *dev_id
)
256 struct sa11x0_dma_phy
*p
= dev_id
;
257 struct sa11x0_dma_dev
*d
= p
->dev
;
258 struct sa11x0_dma_chan
*c
;
261 dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
262 if (!(dcsr
& (DCSR_ERROR
| DCSR_DONEA
| DCSR_DONEB
)))
265 /* Clear reported status bits */
266 writel_relaxed(dcsr
& (DCSR_ERROR
| DCSR_DONEA
| DCSR_DONEB
),
267 p
->base
+ DMA_DCSR_C
);
269 dev_dbg(d
->slave
.dev
, "pchan %u: irq: DCSR:%02x\n", p
->num
, dcsr
);
271 if (dcsr
& DCSR_ERROR
) {
272 dev_err(d
->slave
.dev
, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
274 readl_relaxed(p
->base
+ DMA_DDAR
),
275 readl_relaxed(p
->base
+ DMA_DBSA
),
276 readl_relaxed(p
->base
+ DMA_DBTA
),
277 readl_relaxed(p
->base
+ DMA_DBSB
),
278 readl_relaxed(p
->base
+ DMA_DBTB
));
285 spin_lock_irqsave(&c
->vc
.lock
, flags
);
287 * Now that we're holding the lock, check that the vchan
288 * really is associated with this pchan before touching the
289 * hardware. This should always succeed, because we won't
290 * change p->vchan or c->phy while the channel is actively
294 if (dcsr
& DCSR_DONEA
)
295 sa11x0_dma_complete(p
, c
);
296 if (dcsr
& DCSR_DONEB
)
297 sa11x0_dma_complete(p
, c
);
299 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
305 static void sa11x0_dma_start_txd(struct sa11x0_dma_chan
*c
)
307 struct sa11x0_dma_desc
*txd
= sa11x0_dma_next_desc(c
);
309 /* If the issued list is empty, we have no further txds to process */
311 struct sa11x0_dma_phy
*p
= c
->phy
;
313 sa11x0_dma_start_desc(p
, txd
);
317 /* The channel should not have any transfers started */
318 WARN_ON(readl_relaxed(p
->base
+ DMA_DCSR_R
) &
319 (DCSR_STRTA
| DCSR_STRTB
));
321 /* Clear the run and start bits before changing DDAR */
322 writel_relaxed(DCSR_RUN
| DCSR_STRTA
| DCSR_STRTB
,
323 p
->base
+ DMA_DCSR_C
);
324 writel_relaxed(txd
->ddar
, p
->base
+ DMA_DDAR
);
326 /* Try to start both buffers */
327 sa11x0_dma_start_sg(p
, c
);
328 sa11x0_dma_start_sg(p
, c
);
332 static void sa11x0_dma_tasklet(unsigned long arg
)
334 struct sa11x0_dma_dev
*d
= (struct sa11x0_dma_dev
*)arg
;
335 struct sa11x0_dma_phy
*p
;
336 struct sa11x0_dma_chan
*c
;
337 unsigned pch
, pch_alloc
= 0;
339 dev_dbg(d
->slave
.dev
, "tasklet enter\n");
341 list_for_each_entry(c
, &d
->slave
.channels
, vc
.chan
.device_node
) {
342 spin_lock_irq(&c
->vc
.lock
);
344 if (p
&& !p
->txd_done
) {
345 sa11x0_dma_start_txd(c
);
347 /* No current txd associated with this channel */
348 dev_dbg(d
->slave
.dev
, "pchan %u: free\n", p
->num
);
350 /* Mark this channel free */
355 spin_unlock_irq(&c
->vc
.lock
);
358 spin_lock_irq(&d
->lock
);
359 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++) {
362 if (p
->vchan
== NULL
&& !list_empty(&d
->chan_pending
)) {
363 c
= list_first_entry(&d
->chan_pending
,
364 struct sa11x0_dma_chan
, node
);
365 list_del_init(&c
->node
);
367 pch_alloc
|= 1 << pch
;
369 /* Mark this channel allocated */
372 dev_dbg(d
->slave
.dev
, "pchan %u: alloc vchan %p\n", pch
, &c
->vc
);
375 spin_unlock_irq(&d
->lock
);
377 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++) {
378 if (pch_alloc
& (1 << pch
)) {
382 spin_lock_irq(&c
->vc
.lock
);
385 sa11x0_dma_start_txd(c
);
386 spin_unlock_irq(&c
->vc
.lock
);
390 dev_dbg(d
->slave
.dev
, "tasklet exit\n");
394 static int sa11x0_dma_alloc_chan_resources(struct dma_chan
*chan
)
399 static void sa11x0_dma_free_chan_resources(struct dma_chan
*chan
)
401 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
402 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
405 spin_lock_irqsave(&d
->lock
, flags
);
406 list_del_init(&c
->node
);
407 spin_unlock_irqrestore(&d
->lock
, flags
);
409 vchan_free_chan_resources(&c
->vc
);
412 static dma_addr_t
sa11x0_dma_pos(struct sa11x0_dma_phy
*p
)
417 dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
419 if ((dcsr
& (DCSR_BIU
| DCSR_STRTA
)) == DCSR_STRTA
||
420 (dcsr
& (DCSR_BIU
| DCSR_STRTB
)) == DCSR_BIU
)
425 return readl_relaxed(p
->base
+ reg
);
428 static enum dma_status
sa11x0_dma_tx_status(struct dma_chan
*chan
,
429 dma_cookie_t cookie
, struct dma_tx_state
*state
)
431 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
432 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
433 struct sa11x0_dma_phy
*p
;
434 struct virt_dma_desc
*vd
;
438 ret
= dma_cookie_status(&c
->vc
.chan
, cookie
, state
);
439 if (ret
== DMA_COMPLETE
)
445 spin_lock_irqsave(&c
->vc
.lock
, flags
);
449 * If the cookie is on our issue queue, then the residue is
452 vd
= vchan_find_desc(&c
->vc
, cookie
);
454 state
->residue
= container_of(vd
, struct sa11x0_dma_desc
, vd
)->size
;
458 struct sa11x0_dma_desc
*txd
;
461 if (p
->txd_done
&& p
->txd_done
->vd
.tx
.cookie
== cookie
)
463 else if (p
->txd_load
&& p
->txd_load
->vd
.tx
.cookie
== cookie
)
470 dma_addr_t addr
= sa11x0_dma_pos(p
);
473 dev_vdbg(d
->slave
.dev
, "tx_status: addr:%x\n", addr
);
475 for (i
= 0; i
< txd
->sglen
; i
++) {
476 dev_vdbg(d
->slave
.dev
, "tx_status: [%u] %x+%x\n",
477 i
, txd
->sg
[i
].addr
, txd
->sg
[i
].len
);
478 if (addr
>= txd
->sg
[i
].addr
&&
479 addr
< txd
->sg
[i
].addr
+ txd
->sg
[i
].len
) {
482 len
= txd
->sg
[i
].len
-
483 (addr
- txd
->sg
[i
].addr
);
484 dev_vdbg(d
->slave
.dev
, "tx_status: [%u] +%x\n",
491 for (; i
< txd
->sglen
; i
++) {
492 dev_vdbg(d
->slave
.dev
, "tx_status: [%u] %x+%x ++\n",
493 i
, txd
->sg
[i
].addr
, txd
->sg
[i
].len
);
494 bytes
+= txd
->sg
[i
].len
;
497 state
->residue
= bytes
;
499 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
501 dev_vdbg(d
->slave
.dev
, "tx_status: bytes 0x%zx\n", state
->residue
);
507 * Move pending txds to the issued list, and re-init pending list.
508 * If not already pending, add this channel to the list of pending
509 * channels and trigger the tasklet to run.
511 static void sa11x0_dma_issue_pending(struct dma_chan
*chan
)
513 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
514 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
517 spin_lock_irqsave(&c
->vc
.lock
, flags
);
518 if (vchan_issue_pending(&c
->vc
)) {
521 if (list_empty(&c
->node
)) {
522 list_add_tail(&c
->node
, &d
->chan_pending
);
523 tasklet_schedule(&d
->task
);
524 dev_dbg(d
->slave
.dev
, "vchan %p: issued\n", &c
->vc
);
526 spin_unlock(&d
->lock
);
529 dev_dbg(d
->slave
.dev
, "vchan %p: nothing to issue\n", &c
->vc
);
530 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
533 static struct dma_async_tx_descriptor
*sa11x0_dma_prep_slave_sg(
534 struct dma_chan
*chan
, struct scatterlist
*sg
, unsigned int sglen
,
535 enum dma_transfer_direction dir
, unsigned long flags
, void *context
)
537 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
538 struct sa11x0_dma_desc
*txd
;
539 struct scatterlist
*sgent
;
540 unsigned i
, j
= sglen
;
543 /* SA11x0 channels can only operate in their native direction */
544 if (dir
!= (c
->ddar
& DDAR_RW
? DMA_DEV_TO_MEM
: DMA_MEM_TO_DEV
)) {
545 dev_err(chan
->device
->dev
, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
546 &c
->vc
, c
->ddar
, dir
);
550 /* Do not allow zero-sized txds */
554 for_each_sg(sg
, sgent
, sglen
, i
) {
555 dma_addr_t addr
= sg_dma_address(sgent
);
556 unsigned int len
= sg_dma_len(sgent
);
558 if (len
> DMA_MAX_SIZE
)
559 j
+= DIV_ROUND_UP(len
, DMA_MAX_SIZE
& ~DMA_ALIGN
) - 1;
560 if (addr
& DMA_ALIGN
) {
561 dev_dbg(chan
->device
->dev
, "vchan %p: bad buffer alignment: %08x\n",
567 txd
= kzalloc(sizeof(*txd
) + j
* sizeof(txd
->sg
[0]), GFP_ATOMIC
);
569 dev_dbg(chan
->device
->dev
, "vchan %p: kzalloc failed\n", &c
->vc
);
574 for_each_sg(sg
, sgent
, sglen
, i
) {
575 dma_addr_t addr
= sg_dma_address(sgent
);
576 unsigned len
= sg_dma_len(sgent
);
584 * Check whether the transfer will fit. If not, try
585 * to split the transfer up such that we end up with
586 * equal chunks - but make sure that we preserve the
587 * alignment. This avoids small segments.
589 if (tlen
> DMA_MAX_SIZE
) {
590 unsigned mult
= DIV_ROUND_UP(tlen
,
591 DMA_MAX_SIZE
& ~DMA_ALIGN
);
593 tlen
= (tlen
/ mult
) & ~DMA_ALIGN
;
596 txd
->sg
[j
].addr
= addr
;
597 txd
->sg
[j
].len
= tlen
;
609 dev_dbg(chan
->device
->dev
, "vchan %p: txd %p: size %u nr %u\n",
610 &c
->vc
, &txd
->vd
, txd
->size
, txd
->sglen
);
612 return vchan_tx_prep(&c
->vc
, &txd
->vd
, flags
);
615 static struct dma_async_tx_descriptor
*sa11x0_dma_prep_dma_cyclic(
616 struct dma_chan
*chan
, dma_addr_t addr
, size_t size
, size_t period
,
617 enum dma_transfer_direction dir
, unsigned long flags
, void *context
)
619 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
620 struct sa11x0_dma_desc
*txd
;
621 unsigned i
, j
, k
, sglen
, sgperiod
;
623 /* SA11x0 channels can only operate in their native direction */
624 if (dir
!= (c
->ddar
& DDAR_RW
? DMA_DEV_TO_MEM
: DMA_MEM_TO_DEV
)) {
625 dev_err(chan
->device
->dev
, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
626 &c
->vc
, c
->ddar
, dir
);
630 sgperiod
= DIV_ROUND_UP(period
, DMA_MAX_SIZE
& ~DMA_ALIGN
);
631 sglen
= size
* sgperiod
/ period
;
633 /* Do not allow zero-sized txds */
637 txd
= kzalloc(sizeof(*txd
) + sglen
* sizeof(txd
->sg
[0]), GFP_ATOMIC
);
639 dev_dbg(chan
->device
->dev
, "vchan %p: kzalloc failed\n", &c
->vc
);
643 for (i
= k
= 0; i
< size
/ period
; i
++) {
644 size_t tlen
, len
= period
;
646 for (j
= 0; j
< sgperiod
; j
++, k
++) {
649 if (tlen
> DMA_MAX_SIZE
) {
650 unsigned mult
= DIV_ROUND_UP(tlen
, DMA_MAX_SIZE
& ~DMA_ALIGN
);
651 tlen
= (tlen
/ mult
) & ~DMA_ALIGN
;
654 txd
->sg
[k
].addr
= addr
;
655 txd
->sg
[k
].len
= tlen
;
669 txd
->period
= sgperiod
;
671 return vchan_tx_prep(&c
->vc
, &txd
->vd
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
674 static int sa11x0_dma_slave_config(struct sa11x0_dma_chan
*c
, struct dma_slave_config
*cfg
)
676 u32 ddar
= c
->ddar
& ((0xf << 4) | DDAR_RW
);
678 enum dma_slave_buswidth width
;
681 if (ddar
& DDAR_RW
) {
682 addr
= cfg
->src_addr
;
683 width
= cfg
->src_addr_width
;
684 maxburst
= cfg
->src_maxburst
;
686 addr
= cfg
->dst_addr
;
687 width
= cfg
->dst_addr_width
;
688 maxburst
= cfg
->dst_maxburst
;
691 if ((width
!= DMA_SLAVE_BUSWIDTH_1_BYTE
&&
692 width
!= DMA_SLAVE_BUSWIDTH_2_BYTES
) ||
693 (maxburst
!= 4 && maxburst
!= 8))
696 if (width
== DMA_SLAVE_BUSWIDTH_2_BYTES
)
701 dev_dbg(c
->vc
.chan
.device
->dev
, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
702 &c
->vc
, addr
, width
, maxburst
);
704 c
->ddar
= ddar
| (addr
& 0xf0000000) | (addr
& 0x003ffffc) << 6;
709 static int sa11x0_dma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
712 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
713 struct sa11x0_dma_dev
*d
= to_sa11x0_dma(chan
->device
);
714 struct sa11x0_dma_phy
*p
;
720 case DMA_SLAVE_CONFIG
:
721 return sa11x0_dma_slave_config(c
, (struct dma_slave_config
*)arg
);
723 case DMA_TERMINATE_ALL
:
724 dev_dbg(d
->slave
.dev
, "vchan %p: terminate all\n", &c
->vc
);
725 /* Clear the tx descriptor lists */
726 spin_lock_irqsave(&c
->vc
.lock
, flags
);
727 vchan_get_all_descriptors(&c
->vc
, &head
);
731 dev_dbg(d
->slave
.dev
, "pchan %u: terminating\n", p
->num
);
732 /* vchan is assigned to a pchan - stop the channel */
733 writel(DCSR_RUN
| DCSR_IE
|
734 DCSR_STRTA
| DCSR_DONEA
|
735 DCSR_STRTB
| DCSR_DONEB
,
736 p
->base
+ DMA_DCSR_C
);
739 if (p
->txd_load
!= p
->txd_done
)
740 list_add_tail(&p
->txd_load
->vd
.node
, &head
);
744 list_add_tail(&p
->txd_done
->vd
.node
, &head
);
750 spin_unlock(&d
->lock
);
751 tasklet_schedule(&d
->task
);
753 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
754 vchan_dma_desc_free_list(&c
->vc
, &head
);
759 dev_dbg(d
->slave
.dev
, "vchan %p: pause\n", &c
->vc
);
760 spin_lock_irqsave(&c
->vc
.lock
, flags
);
761 if (c
->status
== DMA_IN_PROGRESS
) {
762 c
->status
= DMA_PAUSED
;
766 writel(DCSR_RUN
| DCSR_IE
, p
->base
+ DMA_DCSR_C
);
769 list_del_init(&c
->node
);
770 spin_unlock(&d
->lock
);
773 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
778 dev_dbg(d
->slave
.dev
, "vchan %p: resume\n", &c
->vc
);
779 spin_lock_irqsave(&c
->vc
.lock
, flags
);
780 if (c
->status
== DMA_PAUSED
) {
781 c
->status
= DMA_IN_PROGRESS
;
785 writel(DCSR_RUN
| DCSR_IE
, p
->base
+ DMA_DCSR_S
);
786 } else if (!list_empty(&c
->vc
.desc_issued
)) {
788 list_add_tail(&c
->node
, &d
->chan_pending
);
789 spin_unlock(&d
->lock
);
792 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
804 struct sa11x0_dma_channel_desc
{
809 #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
810 static const struct sa11x0_dma_channel_desc chan_desc
[] = {
812 CD(Ser0UDCRc
, DDAR_RW
),
814 CD(Ser1SDLCRc
, DDAR_RW
),
816 CD(Ser1UARTRc
, DDAR_RW
),
818 CD(Ser2ICPRc
, DDAR_RW
),
820 CD(Ser3UARTRc
, DDAR_RW
),
822 CD(Ser4MCP0Rc
, DDAR_RW
),
824 CD(Ser4MCP1Rc
, DDAR_RW
),
826 CD(Ser4SSPRc
, DDAR_RW
),
829 static int sa11x0_dma_init_dmadev(struct dma_device
*dmadev
,
834 dmadev
->chancnt
= ARRAY_SIZE(chan_desc
);
835 INIT_LIST_HEAD(&dmadev
->channels
);
837 dmadev
->device_alloc_chan_resources
= sa11x0_dma_alloc_chan_resources
;
838 dmadev
->device_free_chan_resources
= sa11x0_dma_free_chan_resources
;
839 dmadev
->device_control
= sa11x0_dma_control
;
840 dmadev
->device_tx_status
= sa11x0_dma_tx_status
;
841 dmadev
->device_issue_pending
= sa11x0_dma_issue_pending
;
843 for (i
= 0; i
< dmadev
->chancnt
; i
++) {
844 struct sa11x0_dma_chan
*c
;
846 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
848 dev_err(dev
, "no memory for channel %u\n", i
);
852 c
->status
= DMA_IN_PROGRESS
;
853 c
->ddar
= chan_desc
[i
].ddar
;
854 c
->name
= chan_desc
[i
].name
;
855 INIT_LIST_HEAD(&c
->node
);
857 c
->vc
.desc_free
= sa11x0_dma_free_desc
;
858 vchan_init(&c
->vc
, dmadev
);
861 return dma_async_device_register(dmadev
);
864 static int sa11x0_dma_request_irq(struct platform_device
*pdev
, int nr
,
867 int irq
= platform_get_irq(pdev
, nr
);
872 return request_irq(irq
, sa11x0_dma_irq
, 0, dev_name(&pdev
->dev
), data
);
875 static void sa11x0_dma_free_irq(struct platform_device
*pdev
, int nr
,
878 int irq
= platform_get_irq(pdev
, nr
);
883 static void sa11x0_dma_free_channels(struct dma_device
*dmadev
)
885 struct sa11x0_dma_chan
*c
, *cn
;
887 list_for_each_entry_safe(c
, cn
, &dmadev
->channels
, vc
.chan
.device_node
) {
888 list_del(&c
->vc
.chan
.device_node
);
889 tasklet_kill(&c
->vc
.task
);
894 static int sa11x0_dma_probe(struct platform_device
*pdev
)
896 struct sa11x0_dma_dev
*d
;
897 struct resource
*res
;
901 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
905 d
= kzalloc(sizeof(*d
), GFP_KERNEL
);
911 spin_lock_init(&d
->lock
);
912 INIT_LIST_HEAD(&d
->chan_pending
);
914 d
->base
= ioremap(res
->start
, resource_size(res
));
920 tasklet_init(&d
->task
, sa11x0_dma_tasklet
, (unsigned long)d
);
922 for (i
= 0; i
< NR_PHY_CHAN
; i
++) {
923 struct sa11x0_dma_phy
*p
= &d
->phy
[i
];
927 p
->base
= d
->base
+ i
* DMA_SIZE
;
928 writel_relaxed(DCSR_RUN
| DCSR_IE
| DCSR_ERROR
|
929 DCSR_DONEA
| DCSR_STRTA
| DCSR_DONEB
| DCSR_STRTB
,
930 p
->base
+ DMA_DCSR_C
);
931 writel_relaxed(0, p
->base
+ DMA_DDAR
);
933 ret
= sa11x0_dma_request_irq(pdev
, i
, p
);
937 sa11x0_dma_free_irq(pdev
, i
, &d
->phy
[i
]);
943 dma_cap_set(DMA_SLAVE
, d
->slave
.cap_mask
);
944 dma_cap_set(DMA_CYCLIC
, d
->slave
.cap_mask
);
945 d
->slave
.device_prep_slave_sg
= sa11x0_dma_prep_slave_sg
;
946 d
->slave
.device_prep_dma_cyclic
= sa11x0_dma_prep_dma_cyclic
;
947 ret
= sa11x0_dma_init_dmadev(&d
->slave
, &pdev
->dev
);
949 dev_warn(d
->slave
.dev
, "failed to register slave async device: %d\n",
954 platform_set_drvdata(pdev
, d
);
958 sa11x0_dma_free_channels(&d
->slave
);
959 for (i
= 0; i
< NR_PHY_CHAN
; i
++)
960 sa11x0_dma_free_irq(pdev
, i
, &d
->phy
[i
]);
962 tasklet_kill(&d
->task
);
970 static int sa11x0_dma_remove(struct platform_device
*pdev
)
972 struct sa11x0_dma_dev
*d
= platform_get_drvdata(pdev
);
975 dma_async_device_unregister(&d
->slave
);
977 sa11x0_dma_free_channels(&d
->slave
);
978 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++)
979 sa11x0_dma_free_irq(pdev
, pch
, &d
->phy
[pch
]);
980 tasklet_kill(&d
->task
);
987 #ifdef CONFIG_PM_SLEEP
988 static int sa11x0_dma_suspend(struct device
*dev
)
990 struct sa11x0_dma_dev
*d
= dev_get_drvdata(dev
);
993 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++) {
994 struct sa11x0_dma_phy
*p
= &d
->phy
[pch
];
995 u32 dcsr
, saved_dcsr
;
997 dcsr
= saved_dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
998 if (dcsr
& DCSR_RUN
) {
999 writel(DCSR_RUN
| DCSR_IE
, p
->base
+ DMA_DCSR_C
);
1000 dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
1003 saved_dcsr
&= DCSR_RUN
| DCSR_IE
;
1004 if (dcsr
& DCSR_BIU
) {
1005 p
->dbs
[0] = readl_relaxed(p
->base
+ DMA_DBSB
);
1006 p
->dbt
[0] = readl_relaxed(p
->base
+ DMA_DBTB
);
1007 p
->dbs
[1] = readl_relaxed(p
->base
+ DMA_DBSA
);
1008 p
->dbt
[1] = readl_relaxed(p
->base
+ DMA_DBTA
);
1009 saved_dcsr
|= (dcsr
& DCSR_STRTA
? DCSR_STRTB
: 0) |
1010 (dcsr
& DCSR_STRTB
? DCSR_STRTA
: 0);
1012 p
->dbs
[0] = readl_relaxed(p
->base
+ DMA_DBSA
);
1013 p
->dbt
[0] = readl_relaxed(p
->base
+ DMA_DBTA
);
1014 p
->dbs
[1] = readl_relaxed(p
->base
+ DMA_DBSB
);
1015 p
->dbt
[1] = readl_relaxed(p
->base
+ DMA_DBTB
);
1016 saved_dcsr
|= dcsr
& (DCSR_STRTA
| DCSR_STRTB
);
1018 p
->dcsr
= saved_dcsr
;
1020 writel(DCSR_STRTA
| DCSR_STRTB
, p
->base
+ DMA_DCSR_C
);
1026 static int sa11x0_dma_resume(struct device
*dev
)
1028 struct sa11x0_dma_dev
*d
= dev_get_drvdata(dev
);
1031 for (pch
= 0; pch
< NR_PHY_CHAN
; pch
++) {
1032 struct sa11x0_dma_phy
*p
= &d
->phy
[pch
];
1033 struct sa11x0_dma_desc
*txd
= NULL
;
1034 u32 dcsr
= readl_relaxed(p
->base
+ DMA_DCSR_R
);
1036 WARN_ON(dcsr
& (DCSR_BIU
| DCSR_STRTA
| DCSR_STRTB
| DCSR_RUN
));
1040 else if (p
->txd_load
)
1046 writel_relaxed(txd
->ddar
, p
->base
+ DMA_DDAR
);
1048 writel_relaxed(p
->dbs
[0], p
->base
+ DMA_DBSA
);
1049 writel_relaxed(p
->dbt
[0], p
->base
+ DMA_DBTA
);
1050 writel_relaxed(p
->dbs
[1], p
->base
+ DMA_DBSB
);
1051 writel_relaxed(p
->dbt
[1], p
->base
+ DMA_DBTB
);
1052 writel_relaxed(p
->dcsr
, p
->base
+ DMA_DCSR_S
);
1059 static const struct dev_pm_ops sa11x0_dma_pm_ops
= {
1060 .suspend_noirq
= sa11x0_dma_suspend
,
1061 .resume_noirq
= sa11x0_dma_resume
,
1062 .freeze_noirq
= sa11x0_dma_suspend
,
1063 .thaw_noirq
= sa11x0_dma_resume
,
1064 .poweroff_noirq
= sa11x0_dma_suspend
,
1065 .restore_noirq
= sa11x0_dma_resume
,
1068 static struct platform_driver sa11x0_dma_driver
= {
1070 .name
= "sa11x0-dma",
1071 .owner
= THIS_MODULE
,
1072 .pm
= &sa11x0_dma_pm_ops
,
1074 .probe
= sa11x0_dma_probe
,
1075 .remove
= sa11x0_dma_remove
,
1078 bool sa11x0_dma_filter_fn(struct dma_chan
*chan
, void *param
)
1080 if (chan
->device
->dev
->driver
== &sa11x0_dma_driver
.driver
) {
1081 struct sa11x0_dma_chan
*c
= to_sa11x0_dma_chan(chan
);
1082 const char *p
= param
;
1084 return !strcmp(c
->name
, p
);
1088 EXPORT_SYMBOL(sa11x0_dma_filter_fn
);
1090 static int __init
sa11x0_dma_init(void)
1092 return platform_driver_register(&sa11x0_dma_driver
);
1094 subsys_initcall(sa11x0_dma_init
);
1096 static void __exit
sa11x0_dma_exit(void)
1098 platform_driver_unregister(&sa11x0_dma_driver
);
1100 module_exit(sa11x0_dma_exit
);
1102 MODULE_AUTHOR("Russell King");
1103 MODULE_DESCRIPTION("SA-11x0 DMA driver");
1104 MODULE_LICENSE("GPL v2");
1105 MODULE_ALIAS("platform:sa11x0-dma");