2 * Copyright (C) 2013-2014 Allwinner Tech Co., Ltd
3 * Author: Sugar <shuge@allwinnertech.com>
5 * Copyright (C) 2014 Maxime Ripard
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/dmaengine.h>
17 #include <linux/dmapool.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_device.h>
22 #include <linux/platform_device.h>
23 #include <linux/reset.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
32 #define DMA_IRQ_EN(x) ((x) * 0x04)
33 #define DMA_IRQ_HALF BIT(0)
34 #define DMA_IRQ_PKG BIT(1)
35 #define DMA_IRQ_QUEUE BIT(2)
37 #define DMA_IRQ_CHAN_NR 8
38 #define DMA_IRQ_CHAN_WIDTH 4
41 #define DMA_IRQ_STAT(x) ((x) * 0x04 + 0x10)
46 * sun8i specific registers
48 #define SUN8I_DMA_GATE 0x20
49 #define SUN8I_DMA_GATE_ENABLE 0x4
52 * Channels specific registers
54 #define DMA_CHAN_ENABLE 0x00
55 #define DMA_CHAN_ENABLE_START BIT(0)
56 #define DMA_CHAN_ENABLE_STOP 0
58 #define DMA_CHAN_PAUSE 0x04
59 #define DMA_CHAN_PAUSE_PAUSE BIT(1)
60 #define DMA_CHAN_PAUSE_RESUME 0
62 #define DMA_CHAN_LLI_ADDR 0x08
64 #define DMA_CHAN_CUR_CFG 0x0c
65 #define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & 0x1f)
66 #define DMA_CHAN_CFG_SRC_IO_MODE BIT(5)
67 #define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5)
68 #define DMA_CHAN_CFG_SRC_BURST(x) (((x) & 0x3) << 7)
69 #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9)
71 #define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16)
72 #define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16)
73 #define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16)
74 #define DMA_CHAN_CFG_DST_BURST(x) (DMA_CHAN_CFG_SRC_BURST(x) << 16)
75 #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16)
77 #define DMA_CHAN_CUR_SRC 0x10
79 #define DMA_CHAN_CUR_DST 0x14
81 #define DMA_CHAN_CUR_CNT 0x18
83 #define DMA_CHAN_CUR_PARA 0x1c
87 * Various hardware related defines
89 #define LLI_LAST_ITEM 0xfffff800
94 * Hardware channels / ports representation
96 * The hardware is used in several SoCs, with differing numbers
97 * of channels and endpoints. This structure ties those numbers
98 * to a certain compatible string.
100 struct sun6i_dma_config
{
105 * In the datasheets/user manuals of newer Allwinner SoCs, a special
106 * bit (bit 2 at register 0x20) is present.
107 * It's named "DMA MCLK interface circuit auto gating bit" in the
108 * documents, and the footnote of this register says that this bit
109 * should be set up when initializing the DMA controller.
110 * Allwinner A23/A33 user manuals do not have this bit documented,
111 * however these SoCs really have and need this bit, as seen in the
112 * BSP kernel source code.
118 * Hardware representation of the LLI
120 * The hardware will be fed the physical address of this structure,
121 * and read its content in order to start the transfer.
123 struct sun6i_dma_lli
{
132 * This field is not used by the DMA controller, but will be
133 * used by the CPU to go through the list (mostly for dumping
136 struct sun6i_dma_lli
*v_lli_next
;
141 struct virt_dma_desc vd
;
143 struct sun6i_dma_lli
*v_lli
;
149 struct sun6i_vchan
*vchan
;
150 struct sun6i_desc
*desc
;
151 struct sun6i_desc
*done
;
155 struct virt_dma_chan vc
;
156 struct list_head node
;
157 struct dma_slave_config cfg
;
158 struct sun6i_pchan
*phy
;
164 struct sun6i_dma_dev
{
165 struct dma_device slave
;
170 struct reset_control
*rstc
;
171 struct tasklet_struct task
;
172 atomic_t tasklet_shutdown
;
173 struct list_head pending
;
174 struct dma_pool
*pool
;
175 struct sun6i_pchan
*pchans
;
176 struct sun6i_vchan
*vchans
;
177 const struct sun6i_dma_config
*cfg
;
180 static struct device
*chan2dev(struct dma_chan
*chan
)
182 return &chan
->dev
->device
;
185 static inline struct sun6i_dma_dev
*to_sun6i_dma_dev(struct dma_device
*d
)
187 return container_of(d
, struct sun6i_dma_dev
, slave
);
190 static inline struct sun6i_vchan
*to_sun6i_vchan(struct dma_chan
*chan
)
192 return container_of(chan
, struct sun6i_vchan
, vc
.chan
);
195 static inline struct sun6i_desc
*
196 to_sun6i_desc(struct dma_async_tx_descriptor
*tx
)
198 return container_of(tx
, struct sun6i_desc
, vd
.tx
);
201 static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev
*sdev
)
203 dev_dbg(sdev
->slave
.dev
, "Common register:\n"
204 "\tmask0(%04x): 0x%08x\n"
205 "\tmask1(%04x): 0x%08x\n"
206 "\tpend0(%04x): 0x%08x\n"
207 "\tpend1(%04x): 0x%08x\n"
208 "\tstats(%04x): 0x%08x\n",
209 DMA_IRQ_EN(0), readl(sdev
->base
+ DMA_IRQ_EN(0)),
210 DMA_IRQ_EN(1), readl(sdev
->base
+ DMA_IRQ_EN(1)),
211 DMA_IRQ_STAT(0), readl(sdev
->base
+ DMA_IRQ_STAT(0)),
212 DMA_IRQ_STAT(1), readl(sdev
->base
+ DMA_IRQ_STAT(1)),
213 DMA_STAT
, readl(sdev
->base
+ DMA_STAT
));
216 static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev
*sdev
,
217 struct sun6i_pchan
*pchan
)
219 phys_addr_t reg
= virt_to_phys(pchan
->base
);
221 dev_dbg(sdev
->slave
.dev
, "Chan %d reg: %pa\n"
222 "\t___en(%04x): \t0x%08x\n"
223 "\tpause(%04x): \t0x%08x\n"
224 "\tstart(%04x): \t0x%08x\n"
225 "\t__cfg(%04x): \t0x%08x\n"
226 "\t__src(%04x): \t0x%08x\n"
227 "\t__dst(%04x): \t0x%08x\n"
228 "\tcount(%04x): \t0x%08x\n"
229 "\t_para(%04x): \t0x%08x\n\n",
232 readl(pchan
->base
+ DMA_CHAN_ENABLE
),
234 readl(pchan
->base
+ DMA_CHAN_PAUSE
),
236 readl(pchan
->base
+ DMA_CHAN_LLI_ADDR
),
238 readl(pchan
->base
+ DMA_CHAN_CUR_CFG
),
240 readl(pchan
->base
+ DMA_CHAN_CUR_SRC
),
242 readl(pchan
->base
+ DMA_CHAN_CUR_DST
),
244 readl(pchan
->base
+ DMA_CHAN_CUR_CNT
),
246 readl(pchan
->base
+ DMA_CHAN_CUR_PARA
));
249 static inline s8
convert_burst(u32 maxburst
)
261 static inline s8
convert_buswidth(enum dma_slave_buswidth addr_width
)
263 if ((addr_width
< DMA_SLAVE_BUSWIDTH_1_BYTE
) ||
264 (addr_width
> DMA_SLAVE_BUSWIDTH_4_BYTES
))
267 return addr_width
>> 1;
270 static size_t sun6i_get_chan_size(struct sun6i_pchan
*pchan
)
272 struct sun6i_desc
*txd
= pchan
->desc
;
273 struct sun6i_dma_lli
*lli
;
277 pos
= readl(pchan
->base
+ DMA_CHAN_LLI_ADDR
);
278 bytes
= readl(pchan
->base
+ DMA_CHAN_CUR_CNT
);
280 if (pos
== LLI_LAST_ITEM
)
283 for (lli
= txd
->v_lli
; lli
; lli
= lli
->v_lli_next
) {
284 if (lli
->p_lli_next
== pos
) {
285 for (lli
= lli
->v_lli_next
; lli
; lli
= lli
->v_lli_next
)
294 static void *sun6i_dma_lli_add(struct sun6i_dma_lli
*prev
,
295 struct sun6i_dma_lli
*next
,
297 struct sun6i_desc
*txd
)
299 if ((!prev
&& !txd
) || !next
)
303 txd
->p_lli
= next_phy
;
306 prev
->p_lli_next
= next_phy
;
307 prev
->v_lli_next
= next
;
310 next
->p_lli_next
= LLI_LAST_ITEM
;
311 next
->v_lli_next
= NULL
;
316 static inline void sun6i_dma_dump_lli(struct sun6i_vchan
*vchan
,
317 struct sun6i_dma_lli
*lli
)
319 phys_addr_t p_lli
= virt_to_phys(lli
);
321 dev_dbg(chan2dev(&vchan
->vc
.chan
),
322 "\n\tdesc: p - %pa v - 0x%p\n"
323 "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n"
324 "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n",
326 lli
->cfg
, lli
->src
, lli
->dst
,
327 lli
->len
, lli
->para
, lli
->p_lli_next
);
330 static void sun6i_dma_free_desc(struct virt_dma_desc
*vd
)
332 struct sun6i_desc
*txd
= to_sun6i_desc(&vd
->tx
);
333 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(vd
->tx
.chan
->device
);
334 struct sun6i_dma_lli
*v_lli
, *v_next
;
335 dma_addr_t p_lli
, p_next
;
344 v_next
= v_lli
->v_lli_next
;
345 p_next
= v_lli
->p_lli_next
;
347 dma_pool_free(sdev
->pool
, v_lli
, p_lli
);
356 static int sun6i_dma_start_desc(struct sun6i_vchan
*vchan
)
358 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(vchan
->vc
.chan
.device
);
359 struct virt_dma_desc
*desc
= vchan_next_desc(&vchan
->vc
);
360 struct sun6i_pchan
*pchan
= vchan
->phy
;
361 u32 irq_val
, irq_reg
, irq_offset
;
372 list_del(&desc
->node
);
374 pchan
->desc
= to_sun6i_desc(&desc
->tx
);
377 sun6i_dma_dump_lli(vchan
, pchan
->desc
->v_lli
);
379 irq_reg
= pchan
->idx
/ DMA_IRQ_CHAN_NR
;
380 irq_offset
= pchan
->idx
% DMA_IRQ_CHAN_NR
;
382 vchan
->irq_type
= vchan
->cyclic
? DMA_IRQ_PKG
: DMA_IRQ_QUEUE
;
384 irq_val
= readl(sdev
->base
+ DMA_IRQ_EN(irq_reg
));
385 irq_val
&= ~((DMA_IRQ_HALF
| DMA_IRQ_PKG
| DMA_IRQ_QUEUE
) <<
386 (irq_offset
* DMA_IRQ_CHAN_WIDTH
));
387 irq_val
|= vchan
->irq_type
<< (irq_offset
* DMA_IRQ_CHAN_WIDTH
);
388 writel(irq_val
, sdev
->base
+ DMA_IRQ_EN(irq_reg
));
390 writel(pchan
->desc
->p_lli
, pchan
->base
+ DMA_CHAN_LLI_ADDR
);
391 writel(DMA_CHAN_ENABLE_START
, pchan
->base
+ DMA_CHAN_ENABLE
);
393 sun6i_dma_dump_com_regs(sdev
);
394 sun6i_dma_dump_chan_regs(sdev
, pchan
);
399 static void sun6i_dma_tasklet(unsigned long data
)
401 struct sun6i_dma_dev
*sdev
= (struct sun6i_dma_dev
*)data
;
402 const struct sun6i_dma_config
*cfg
= sdev
->cfg
;
403 struct sun6i_vchan
*vchan
;
404 struct sun6i_pchan
*pchan
;
405 unsigned int pchan_alloc
= 0;
406 unsigned int pchan_idx
;
408 list_for_each_entry(vchan
, &sdev
->slave
.channels
, vc
.chan
.device_node
) {
409 spin_lock_irq(&vchan
->vc
.lock
);
413 if (pchan
&& pchan
->done
) {
414 if (sun6i_dma_start_desc(vchan
)) {
416 * No current txd associated with this channel
418 dev_dbg(sdev
->slave
.dev
, "pchan %u: free\n",
421 /* Mark this channel free */
426 spin_unlock_irq(&vchan
->vc
.lock
);
429 spin_lock_irq(&sdev
->lock
);
430 for (pchan_idx
= 0; pchan_idx
< cfg
->nr_max_channels
; pchan_idx
++) {
431 pchan
= &sdev
->pchans
[pchan_idx
];
433 if (pchan
->vchan
|| list_empty(&sdev
->pending
))
436 vchan
= list_first_entry(&sdev
->pending
,
437 struct sun6i_vchan
, node
);
439 /* Remove from pending channels */
440 list_del_init(&vchan
->node
);
441 pchan_alloc
|= BIT(pchan_idx
);
443 /* Mark this channel allocated */
444 pchan
->vchan
= vchan
;
446 dev_dbg(sdev
->slave
.dev
, "pchan %u: alloc vchan %p\n",
447 pchan
->idx
, &vchan
->vc
);
449 spin_unlock_irq(&sdev
->lock
);
451 for (pchan_idx
= 0; pchan_idx
< cfg
->nr_max_channels
; pchan_idx
++) {
452 if (!(pchan_alloc
& BIT(pchan_idx
)))
455 pchan
= sdev
->pchans
+ pchan_idx
;
456 vchan
= pchan
->vchan
;
458 spin_lock_irq(&vchan
->vc
.lock
);
459 sun6i_dma_start_desc(vchan
);
460 spin_unlock_irq(&vchan
->vc
.lock
);
465 static irqreturn_t
sun6i_dma_interrupt(int irq
, void *dev_id
)
467 struct sun6i_dma_dev
*sdev
= dev_id
;
468 struct sun6i_vchan
*vchan
;
469 struct sun6i_pchan
*pchan
;
470 int i
, j
, ret
= IRQ_NONE
;
473 for (i
= 0; i
< sdev
->cfg
->nr_max_channels
/ DMA_IRQ_CHAN_NR
; i
++) {
474 status
= readl(sdev
->base
+ DMA_IRQ_STAT(i
));
478 dev_dbg(sdev
->slave
.dev
, "DMA irq status %s: 0x%x\n",
479 i
? "high" : "low", status
);
481 writel(status
, sdev
->base
+ DMA_IRQ_STAT(i
));
483 for (j
= 0; (j
< DMA_IRQ_CHAN_NR
) && status
; j
++) {
484 pchan
= sdev
->pchans
+ j
;
485 vchan
= pchan
->vchan
;
486 if (vchan
&& (status
& vchan
->irq_type
)) {
488 vchan_cyclic_callback(&pchan
->desc
->vd
);
490 spin_lock(&vchan
->vc
.lock
);
491 vchan_cookie_complete(&pchan
->desc
->vd
);
492 pchan
->done
= pchan
->desc
;
493 spin_unlock(&vchan
->vc
.lock
);
497 status
= status
>> DMA_IRQ_CHAN_WIDTH
;
500 if (!atomic_read(&sdev
->tasklet_shutdown
))
501 tasklet_schedule(&sdev
->task
);
508 static int set_config(struct sun6i_dma_dev
*sdev
,
509 struct dma_slave_config
*sconfig
,
510 enum dma_transfer_direction direction
,
513 s8 src_width
, dst_width
, src_burst
, dst_burst
;
517 src_burst
= convert_burst(sconfig
->src_maxburst
?
518 sconfig
->src_maxburst
: 8);
519 src_width
= convert_buswidth(sconfig
->src_addr_width
!=
520 DMA_SLAVE_BUSWIDTH_UNDEFINED
?
521 sconfig
->src_addr_width
:
522 DMA_SLAVE_BUSWIDTH_4_BYTES
);
523 dst_burst
= convert_burst(sconfig
->dst_maxburst
);
524 dst_width
= convert_buswidth(sconfig
->dst_addr_width
);
527 src_burst
= convert_burst(sconfig
->src_maxburst
);
528 src_width
= convert_buswidth(sconfig
->src_addr_width
);
529 dst_burst
= convert_burst(sconfig
->dst_maxburst
?
530 sconfig
->dst_maxburst
: 8);
531 dst_width
= convert_buswidth(sconfig
->dst_addr_width
!=
532 DMA_SLAVE_BUSWIDTH_UNDEFINED
?
533 sconfig
->dst_addr_width
:
534 DMA_SLAVE_BUSWIDTH_4_BYTES
);
549 *p_cfg
= DMA_CHAN_CFG_SRC_BURST(src_burst
) |
550 DMA_CHAN_CFG_SRC_WIDTH(src_width
) |
551 DMA_CHAN_CFG_DST_BURST(dst_burst
) |
552 DMA_CHAN_CFG_DST_WIDTH(dst_width
);
557 static struct dma_async_tx_descriptor
*sun6i_dma_prep_dma_memcpy(
558 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
559 size_t len
, unsigned long flags
)
561 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
562 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
563 struct sun6i_dma_lli
*v_lli
;
564 struct sun6i_desc
*txd
;
568 dev_dbg(chan2dev(chan
),
569 "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n",
570 __func__
, vchan
->vc
.chan
.chan_id
, &dest
, &src
, len
, flags
);
575 txd
= kzalloc(sizeof(*txd
), GFP_NOWAIT
);
579 v_lli
= dma_pool_alloc(sdev
->pool
, GFP_NOWAIT
, &p_lli
);
581 dev_err(sdev
->slave
.dev
, "Failed to alloc lli memory\n");
588 v_lli
->para
= NORMAL_WAIT
;
590 burst
= convert_burst(8);
591 width
= convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES
);
592 v_lli
->cfg
= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM
) |
593 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM
) |
594 DMA_CHAN_CFG_DST_LINEAR_MODE
|
595 DMA_CHAN_CFG_SRC_LINEAR_MODE
|
596 DMA_CHAN_CFG_SRC_BURST(burst
) |
597 DMA_CHAN_CFG_SRC_WIDTH(width
) |
598 DMA_CHAN_CFG_DST_BURST(burst
) |
599 DMA_CHAN_CFG_DST_WIDTH(width
);
601 sun6i_dma_lli_add(NULL
, v_lli
, p_lli
, txd
);
603 sun6i_dma_dump_lli(vchan
, v_lli
);
605 return vchan_tx_prep(&vchan
->vc
, &txd
->vd
, flags
);
612 static struct dma_async_tx_descriptor
*sun6i_dma_prep_slave_sg(
613 struct dma_chan
*chan
, struct scatterlist
*sgl
,
614 unsigned int sg_len
, enum dma_transfer_direction dir
,
615 unsigned long flags
, void *context
)
617 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
618 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
619 struct dma_slave_config
*sconfig
= &vchan
->cfg
;
620 struct sun6i_dma_lli
*v_lli
, *prev
= NULL
;
621 struct sun6i_desc
*txd
;
622 struct scatterlist
*sg
;
630 ret
= set_config(sdev
, sconfig
, dir
, &lli_cfg
);
632 dev_err(chan2dev(chan
), "Invalid DMA configuration\n");
636 txd
= kzalloc(sizeof(*txd
), GFP_NOWAIT
);
640 for_each_sg(sgl
, sg
, sg_len
, i
) {
641 v_lli
= dma_pool_alloc(sdev
->pool
, GFP_NOWAIT
, &p_lli
);
645 v_lli
->len
= sg_dma_len(sg
);
646 v_lli
->para
= NORMAL_WAIT
;
648 if (dir
== DMA_MEM_TO_DEV
) {
649 v_lli
->src
= sg_dma_address(sg
);
650 v_lli
->dst
= sconfig
->dst_addr
;
651 v_lli
->cfg
= lli_cfg
|
652 DMA_CHAN_CFG_DST_IO_MODE
|
653 DMA_CHAN_CFG_SRC_LINEAR_MODE
|
654 DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM
) |
655 DMA_CHAN_CFG_DST_DRQ(vchan
->port
);
657 dev_dbg(chan2dev(chan
),
658 "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
659 __func__
, vchan
->vc
.chan
.chan_id
,
660 &sconfig
->dst_addr
, &sg_dma_address(sg
),
661 sg_dma_len(sg
), flags
);
664 v_lli
->src
= sconfig
->src_addr
;
665 v_lli
->dst
= sg_dma_address(sg
);
666 v_lli
->cfg
= lli_cfg
|
667 DMA_CHAN_CFG_DST_LINEAR_MODE
|
668 DMA_CHAN_CFG_SRC_IO_MODE
|
669 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM
) |
670 DMA_CHAN_CFG_SRC_DRQ(vchan
->port
);
672 dev_dbg(chan2dev(chan
),
673 "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
674 __func__
, vchan
->vc
.chan
.chan_id
,
675 &sg_dma_address(sg
), &sconfig
->src_addr
,
676 sg_dma_len(sg
), flags
);
679 prev
= sun6i_dma_lli_add(prev
, v_lli
, p_lli
, txd
);
682 dev_dbg(chan2dev(chan
), "First: %pad\n", &txd
->p_lli
);
683 for (prev
= txd
->v_lli
; prev
; prev
= prev
->v_lli_next
)
684 sun6i_dma_dump_lli(vchan
, prev
);
686 return vchan_tx_prep(&vchan
->vc
, &txd
->vd
, flags
);
689 for (prev
= txd
->v_lli
; prev
; prev
= prev
->v_lli_next
)
690 dma_pool_free(sdev
->pool
, prev
, virt_to_phys(prev
));
695 static struct dma_async_tx_descriptor
*sun6i_dma_prep_dma_cyclic(
696 struct dma_chan
*chan
,
700 enum dma_transfer_direction dir
,
703 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
704 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
705 struct dma_slave_config
*sconfig
= &vchan
->cfg
;
706 struct sun6i_dma_lli
*v_lli
, *prev
= NULL
;
707 struct sun6i_desc
*txd
;
710 unsigned int i
, periods
= buf_len
/ period_len
;
713 ret
= set_config(sdev
, sconfig
, dir
, &lli_cfg
);
715 dev_err(chan2dev(chan
), "Invalid DMA configuration\n");
719 txd
= kzalloc(sizeof(*txd
), GFP_NOWAIT
);
723 for (i
= 0; i
< periods
; i
++) {
724 v_lli
= dma_pool_alloc(sdev
->pool
, GFP_NOWAIT
, &p_lli
);
726 dev_err(sdev
->slave
.dev
, "Failed to alloc lli memory\n");
730 v_lli
->len
= period_len
;
731 v_lli
->para
= NORMAL_WAIT
;
733 if (dir
== DMA_MEM_TO_DEV
) {
734 v_lli
->src
= buf_addr
+ period_len
* i
;
735 v_lli
->dst
= sconfig
->dst_addr
;
736 v_lli
->cfg
= lli_cfg
|
737 DMA_CHAN_CFG_DST_IO_MODE
|
738 DMA_CHAN_CFG_SRC_LINEAR_MODE
|
739 DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM
) |
740 DMA_CHAN_CFG_DST_DRQ(vchan
->port
);
742 v_lli
->src
= sconfig
->src_addr
;
743 v_lli
->dst
= buf_addr
+ period_len
* i
;
744 v_lli
->cfg
= lli_cfg
|
745 DMA_CHAN_CFG_DST_LINEAR_MODE
|
746 DMA_CHAN_CFG_SRC_IO_MODE
|
747 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM
) |
748 DMA_CHAN_CFG_SRC_DRQ(vchan
->port
);
751 prev
= sun6i_dma_lli_add(prev
, v_lli
, p_lli
, txd
);
754 prev
->p_lli_next
= txd
->p_lli
; /* cyclic list */
756 vchan
->cyclic
= true;
758 return vchan_tx_prep(&vchan
->vc
, &txd
->vd
, flags
);
761 for (prev
= txd
->v_lli
; prev
; prev
= prev
->v_lli_next
)
762 dma_pool_free(sdev
->pool
, prev
, virt_to_phys(prev
));
767 static int sun6i_dma_config(struct dma_chan
*chan
,
768 struct dma_slave_config
*config
)
770 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
772 memcpy(&vchan
->cfg
, config
, sizeof(*config
));
777 static int sun6i_dma_pause(struct dma_chan
*chan
)
779 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
780 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
781 struct sun6i_pchan
*pchan
= vchan
->phy
;
783 dev_dbg(chan2dev(chan
), "vchan %p: pause\n", &vchan
->vc
);
786 writel(DMA_CHAN_PAUSE_PAUSE
,
787 pchan
->base
+ DMA_CHAN_PAUSE
);
789 spin_lock(&sdev
->lock
);
790 list_del_init(&vchan
->node
);
791 spin_unlock(&sdev
->lock
);
797 static int sun6i_dma_resume(struct dma_chan
*chan
)
799 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
800 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
801 struct sun6i_pchan
*pchan
= vchan
->phy
;
804 dev_dbg(chan2dev(chan
), "vchan %p: resume\n", &vchan
->vc
);
806 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
809 writel(DMA_CHAN_PAUSE_RESUME
,
810 pchan
->base
+ DMA_CHAN_PAUSE
);
811 } else if (!list_empty(&vchan
->vc
.desc_issued
)) {
812 spin_lock(&sdev
->lock
);
813 list_add_tail(&vchan
->node
, &sdev
->pending
);
814 spin_unlock(&sdev
->lock
);
817 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
822 static int sun6i_dma_terminate_all(struct dma_chan
*chan
)
824 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
825 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
826 struct sun6i_pchan
*pchan
= vchan
->phy
;
830 spin_lock(&sdev
->lock
);
831 list_del_init(&vchan
->node
);
832 spin_unlock(&sdev
->lock
);
834 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
837 vchan
->cyclic
= false;
838 if (pchan
&& pchan
->desc
) {
839 struct virt_dma_desc
*vd
= &pchan
->desc
->vd
;
840 struct virt_dma_chan
*vc
= &vchan
->vc
;
842 list_add_tail(&vd
->node
, &vc
->desc_completed
);
846 vchan_get_all_descriptors(&vchan
->vc
, &head
);
849 writel(DMA_CHAN_ENABLE_STOP
, pchan
->base
+ DMA_CHAN_ENABLE
);
850 writel(DMA_CHAN_PAUSE_RESUME
, pchan
->base
+ DMA_CHAN_PAUSE
);
858 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
860 vchan_dma_desc_free_list(&vchan
->vc
, &head
);
865 static enum dma_status
sun6i_dma_tx_status(struct dma_chan
*chan
,
867 struct dma_tx_state
*state
)
869 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
870 struct sun6i_pchan
*pchan
= vchan
->phy
;
871 struct sun6i_dma_lli
*lli
;
872 struct virt_dma_desc
*vd
;
873 struct sun6i_desc
*txd
;
878 ret
= dma_cookie_status(chan
, cookie
, state
);
879 if (ret
== DMA_COMPLETE
|| !state
)
882 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
884 vd
= vchan_find_desc(&vchan
->vc
, cookie
);
885 txd
= to_sun6i_desc(&vd
->tx
);
888 for (lli
= txd
->v_lli
; lli
!= NULL
; lli
= lli
->v_lli_next
)
890 } else if (!pchan
|| !pchan
->desc
) {
893 bytes
= sun6i_get_chan_size(pchan
);
896 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
898 dma_set_residue(state
, bytes
);
903 static void sun6i_dma_issue_pending(struct dma_chan
*chan
)
905 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
906 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
909 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
911 if (vchan_issue_pending(&vchan
->vc
)) {
912 spin_lock(&sdev
->lock
);
914 if (!vchan
->phy
&& list_empty(&vchan
->node
)) {
915 list_add_tail(&vchan
->node
, &sdev
->pending
);
916 tasklet_schedule(&sdev
->task
);
917 dev_dbg(chan2dev(chan
), "vchan %p: issued\n",
921 spin_unlock(&sdev
->lock
);
923 dev_dbg(chan2dev(chan
), "vchan %p: nothing to issue\n",
927 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
930 static void sun6i_dma_free_chan_resources(struct dma_chan
*chan
)
932 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
933 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
936 spin_lock_irqsave(&sdev
->lock
, flags
);
937 list_del_init(&vchan
->node
);
938 spin_unlock_irqrestore(&sdev
->lock
, flags
);
940 vchan_free_chan_resources(&vchan
->vc
);
943 static struct dma_chan
*sun6i_dma_of_xlate(struct of_phandle_args
*dma_spec
,
944 struct of_dma
*ofdma
)
946 struct sun6i_dma_dev
*sdev
= ofdma
->of_dma_data
;
947 struct sun6i_vchan
*vchan
;
948 struct dma_chan
*chan
;
949 u8 port
= dma_spec
->args
[0];
951 if (port
> sdev
->cfg
->nr_max_requests
)
954 chan
= dma_get_any_slave_channel(&sdev
->slave
);
958 vchan
= to_sun6i_vchan(chan
);
964 static inline void sun6i_kill_tasklet(struct sun6i_dma_dev
*sdev
)
966 /* Disable all interrupts from DMA */
967 writel(0, sdev
->base
+ DMA_IRQ_EN(0));
968 writel(0, sdev
->base
+ DMA_IRQ_EN(1));
970 /* Prevent spurious interrupts from scheduling the tasklet */
971 atomic_inc(&sdev
->tasklet_shutdown
);
973 /* Make sure we won't have any further interrupts */
974 devm_free_irq(sdev
->slave
.dev
, sdev
->irq
, sdev
);
976 /* Actually prevent the tasklet from being scheduled */
977 tasklet_kill(&sdev
->task
);
980 static inline void sun6i_dma_free(struct sun6i_dma_dev
*sdev
)
984 for (i
= 0; i
< sdev
->cfg
->nr_max_vchans
; i
++) {
985 struct sun6i_vchan
*vchan
= &sdev
->vchans
[i
];
987 list_del(&vchan
->vc
.chan
.device_node
);
988 tasklet_kill(&vchan
->vc
.task
);
995 * There's 16 physical channels that can work in parallel.
997 * However we have 30 different endpoints for our requests.
999 * Since the channels are able to handle only an unidirectional
1000 * transfer, we need to allocate more virtual channels so that
1001 * everyone can grab one channel.
1003 * Some devices can't work in both direction (mostly because it
1004 * wouldn't make sense), so we have a bit fewer virtual channels than
1005 * 2 channels per endpoints.
1008 static struct sun6i_dma_config sun6i_a31_dma_cfg
= {
1009 .nr_max_channels
= 16,
1010 .nr_max_requests
= 30,
1011 .nr_max_vchans
= 53,
1015 * The A23 only has 8 physical channels, a maximum DRQ port id of 24,
1016 * and a total of 37 usable source and destination endpoints.
1019 static struct sun6i_dma_config sun8i_a23_dma_cfg
= {
1020 .nr_max_channels
= 8,
1021 .nr_max_requests
= 24,
1022 .nr_max_vchans
= 37,
1023 .gate_needed
= true,
1026 static struct sun6i_dma_config sun8i_a83t_dma_cfg
= {
1027 .nr_max_channels
= 8,
1028 .nr_max_requests
= 28,
1029 .nr_max_vchans
= 39,
1033 * The H3 has 12 physical channels, a maximum DRQ port id of 27,
1034 * and a total of 34 usable source and destination endpoints.
1037 static struct sun6i_dma_config sun8i_h3_dma_cfg
= {
1038 .nr_max_channels
= 12,
1039 .nr_max_requests
= 27,
1040 .nr_max_vchans
= 34,
1044 * The V3s have only 8 physical channels, a maximum DRQ port id of 23,
1045 * and a total of 24 usable source and destination endpoints.
1048 static struct sun6i_dma_config sun8i_v3s_dma_cfg
= {
1049 .nr_max_channels
= 8,
1050 .nr_max_requests
= 23,
1051 .nr_max_vchans
= 24,
1052 .gate_needed
= true,
1055 static const struct of_device_id sun6i_dma_match
[] = {
1056 { .compatible
= "allwinner,sun6i-a31-dma", .data
= &sun6i_a31_dma_cfg
},
1057 { .compatible
= "allwinner,sun8i-a23-dma", .data
= &sun8i_a23_dma_cfg
},
1058 { .compatible
= "allwinner,sun8i-a83t-dma", .data
= &sun8i_a83t_dma_cfg
},
1059 { .compatible
= "allwinner,sun8i-h3-dma", .data
= &sun8i_h3_dma_cfg
},
1060 { .compatible
= "allwinner,sun8i-v3s-dma", .data
= &sun8i_v3s_dma_cfg
},
1063 MODULE_DEVICE_TABLE(of
, sun6i_dma_match
);
1065 static int sun6i_dma_probe(struct platform_device
*pdev
)
1067 const struct of_device_id
*device
;
1068 struct sun6i_dma_dev
*sdc
;
1069 struct resource
*res
;
1072 sdc
= devm_kzalloc(&pdev
->dev
, sizeof(*sdc
), GFP_KERNEL
);
1076 device
= of_match_device(sun6i_dma_match
, &pdev
->dev
);
1079 sdc
->cfg
= device
->data
;
1081 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1082 sdc
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
1083 if (IS_ERR(sdc
->base
))
1084 return PTR_ERR(sdc
->base
);
1086 sdc
->irq
= platform_get_irq(pdev
, 0);
1088 dev_err(&pdev
->dev
, "Cannot claim IRQ\n");
1092 sdc
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1093 if (IS_ERR(sdc
->clk
)) {
1094 dev_err(&pdev
->dev
, "No clock specified\n");
1095 return PTR_ERR(sdc
->clk
);
1098 sdc
->rstc
= devm_reset_control_get(&pdev
->dev
, NULL
);
1099 if (IS_ERR(sdc
->rstc
)) {
1100 dev_err(&pdev
->dev
, "No reset controller specified\n");
1101 return PTR_ERR(sdc
->rstc
);
1104 sdc
->pool
= dmam_pool_create(dev_name(&pdev
->dev
), &pdev
->dev
,
1105 sizeof(struct sun6i_dma_lli
), 4, 0);
1107 dev_err(&pdev
->dev
, "No memory for descriptors dma pool\n");
1111 platform_set_drvdata(pdev
, sdc
);
1112 INIT_LIST_HEAD(&sdc
->pending
);
1113 spin_lock_init(&sdc
->lock
);
1115 dma_cap_set(DMA_PRIVATE
, sdc
->slave
.cap_mask
);
1116 dma_cap_set(DMA_MEMCPY
, sdc
->slave
.cap_mask
);
1117 dma_cap_set(DMA_SLAVE
, sdc
->slave
.cap_mask
);
1118 dma_cap_set(DMA_CYCLIC
, sdc
->slave
.cap_mask
);
1120 INIT_LIST_HEAD(&sdc
->slave
.channels
);
1121 sdc
->slave
.device_free_chan_resources
= sun6i_dma_free_chan_resources
;
1122 sdc
->slave
.device_tx_status
= sun6i_dma_tx_status
;
1123 sdc
->slave
.device_issue_pending
= sun6i_dma_issue_pending
;
1124 sdc
->slave
.device_prep_slave_sg
= sun6i_dma_prep_slave_sg
;
1125 sdc
->slave
.device_prep_dma_memcpy
= sun6i_dma_prep_dma_memcpy
;
1126 sdc
->slave
.device_prep_dma_cyclic
= sun6i_dma_prep_dma_cyclic
;
1127 sdc
->slave
.copy_align
= DMAENGINE_ALIGN_4_BYTES
;
1128 sdc
->slave
.device_config
= sun6i_dma_config
;
1129 sdc
->slave
.device_pause
= sun6i_dma_pause
;
1130 sdc
->slave
.device_resume
= sun6i_dma_resume
;
1131 sdc
->slave
.device_terminate_all
= sun6i_dma_terminate_all
;
1132 sdc
->slave
.src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1133 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1134 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
1135 sdc
->slave
.dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1136 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1137 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
1138 sdc
->slave
.directions
= BIT(DMA_DEV_TO_MEM
) |
1139 BIT(DMA_MEM_TO_DEV
);
1140 sdc
->slave
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1141 sdc
->slave
.dev
= &pdev
->dev
;
1143 sdc
->pchans
= devm_kcalloc(&pdev
->dev
, sdc
->cfg
->nr_max_channels
,
1144 sizeof(struct sun6i_pchan
), GFP_KERNEL
);
1148 sdc
->vchans
= devm_kcalloc(&pdev
->dev
, sdc
->cfg
->nr_max_vchans
,
1149 sizeof(struct sun6i_vchan
), GFP_KERNEL
);
1153 tasklet_init(&sdc
->task
, sun6i_dma_tasklet
, (unsigned long)sdc
);
1155 for (i
= 0; i
< sdc
->cfg
->nr_max_channels
; i
++) {
1156 struct sun6i_pchan
*pchan
= &sdc
->pchans
[i
];
1159 pchan
->base
= sdc
->base
+ 0x100 + i
* 0x40;
1162 for (i
= 0; i
< sdc
->cfg
->nr_max_vchans
; i
++) {
1163 struct sun6i_vchan
*vchan
= &sdc
->vchans
[i
];
1165 INIT_LIST_HEAD(&vchan
->node
);
1166 vchan
->vc
.desc_free
= sun6i_dma_free_desc
;
1167 vchan_init(&vchan
->vc
, &sdc
->slave
);
1170 ret
= reset_control_deassert(sdc
->rstc
);
1172 dev_err(&pdev
->dev
, "Couldn't deassert the device from reset\n");
1176 ret
= clk_prepare_enable(sdc
->clk
);
1178 dev_err(&pdev
->dev
, "Couldn't enable the clock\n");
1179 goto err_reset_assert
;
1182 ret
= devm_request_irq(&pdev
->dev
, sdc
->irq
, sun6i_dma_interrupt
, 0,
1183 dev_name(&pdev
->dev
), sdc
);
1185 dev_err(&pdev
->dev
, "Cannot request IRQ\n");
1186 goto err_clk_disable
;
1189 ret
= dma_async_device_register(&sdc
->slave
);
1191 dev_warn(&pdev
->dev
, "Failed to register DMA engine device\n");
1192 goto err_irq_disable
;
1195 ret
= of_dma_controller_register(pdev
->dev
.of_node
, sun6i_dma_of_xlate
,
1198 dev_err(&pdev
->dev
, "of_dma_controller_register failed\n");
1199 goto err_dma_unregister
;
1202 if (sdc
->cfg
->gate_needed
)
1203 writel(SUN8I_DMA_GATE_ENABLE
, sdc
->base
+ SUN8I_DMA_GATE
);
1208 dma_async_device_unregister(&sdc
->slave
);
1210 sun6i_kill_tasklet(sdc
);
1212 clk_disable_unprepare(sdc
->clk
);
1214 reset_control_assert(sdc
->rstc
);
1216 sun6i_dma_free(sdc
);
1220 static int sun6i_dma_remove(struct platform_device
*pdev
)
1222 struct sun6i_dma_dev
*sdc
= platform_get_drvdata(pdev
);
1224 of_dma_controller_free(pdev
->dev
.of_node
);
1225 dma_async_device_unregister(&sdc
->slave
);
1227 sun6i_kill_tasklet(sdc
);
1229 clk_disable_unprepare(sdc
->clk
);
1230 reset_control_assert(sdc
->rstc
);
1232 sun6i_dma_free(sdc
);
1237 static struct platform_driver sun6i_dma_driver
= {
1238 .probe
= sun6i_dma_probe
,
1239 .remove
= sun6i_dma_remove
,
1241 .name
= "sun6i-dma",
1242 .of_match_table
= sun6i_dma_match
,
1245 module_platform_driver(sun6i_dma_driver
);
1247 MODULE_DESCRIPTION("Allwinner A31 DMA Controller Driver");
1248 MODULE_AUTHOR("Sugar <shuge@allwinnertech.com>");
1249 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1250 MODULE_LICENSE("GPL");