1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2013-2014 Allwinner Tech Co., Ltd
4 * Author: Sugar <shuge@allwinnertech.com>
6 * Copyright (C) 2014 Maxime Ripard
7 * Maxime Ripard <maxime.ripard@free-electrons.com>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dmapool.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
18 #include <linux/of_dma.h>
19 #include <linux/platform_device.h>
20 #include <linux/reset.h>
21 #include <linux/slab.h>
22 #include <linux/types.h>
29 #define DMA_IRQ_EN(x) ((x) * 0x04)
30 #define DMA_IRQ_HALF BIT(0)
31 #define DMA_IRQ_PKG BIT(1)
32 #define DMA_IRQ_QUEUE BIT(2)
34 #define DMA_IRQ_CHAN_NR 8
35 #define DMA_IRQ_CHAN_WIDTH 4
38 #define DMA_IRQ_STAT(x) ((x) * 0x04 + 0x10)
42 /* Offset between DMA_IRQ_EN and DMA_IRQ_STAT limits number of channels */
43 #define DMA_MAX_CHANNELS (DMA_IRQ_CHAN_NR * 0x10 / 4)
46 * sun8i specific registers
48 #define SUN8I_DMA_GATE 0x20
49 #define SUN8I_DMA_GATE_ENABLE 0x4
51 #define SUNXI_H3_SECURE_REG 0x20
52 #define SUNXI_H3_DMA_GATE 0x28
53 #define SUNXI_H3_DMA_GATE_ENABLE 0x4
55 * Channels specific registers
57 #define DMA_CHAN_ENABLE 0x00
58 #define DMA_CHAN_ENABLE_START BIT(0)
59 #define DMA_CHAN_ENABLE_STOP 0
61 #define DMA_CHAN_PAUSE 0x04
62 #define DMA_CHAN_PAUSE_PAUSE BIT(1)
63 #define DMA_CHAN_PAUSE_RESUME 0
65 #define DMA_CHAN_LLI_ADDR 0x08
67 #define DMA_CHAN_CUR_CFG 0x0c
68 #define DMA_CHAN_MAX_DRQ_A31 0x1f
69 #define DMA_CHAN_MAX_DRQ_H6 0x3f
70 #define DMA_CHAN_CFG_SRC_DRQ_A31(x) ((x) & DMA_CHAN_MAX_DRQ_A31)
71 #define DMA_CHAN_CFG_SRC_DRQ_H6(x) ((x) & DMA_CHAN_MAX_DRQ_H6)
72 #define DMA_CHAN_CFG_SRC_MODE_A31(x) (((x) & 0x1) << 5)
73 #define DMA_CHAN_CFG_SRC_MODE_H6(x) (((x) & 0x1) << 8)
74 #define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7)
75 #define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6)
76 #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9)
78 #define DMA_CHAN_CFG_DST_DRQ_A31(x) (DMA_CHAN_CFG_SRC_DRQ_A31(x) << 16)
79 #define DMA_CHAN_CFG_DST_DRQ_H6(x) (DMA_CHAN_CFG_SRC_DRQ_H6(x) << 16)
80 #define DMA_CHAN_CFG_DST_MODE_A31(x) (DMA_CHAN_CFG_SRC_MODE_A31(x) << 16)
81 #define DMA_CHAN_CFG_DST_MODE_H6(x) (DMA_CHAN_CFG_SRC_MODE_H6(x) << 16)
82 #define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16)
83 #define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16)
84 #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16)
86 #define DMA_CHAN_CUR_SRC 0x10
88 #define DMA_CHAN_CUR_DST 0x14
90 #define DMA_CHAN_CUR_CNT 0x18
92 #define DMA_CHAN_CUR_PARA 0x1c
95 * LLI address mangling
97 * The LLI link physical address is also mangled, but we avoid dealing
98 * with that by allocating LLIs from the DMA32 zone.
100 #define SRC_HIGH_ADDR(x) (((x) & 0x3U) << 16)
101 #define DST_HIGH_ADDR(x) (((x) & 0x3U) << 18)
104 * Various hardware related defines
106 #define LLI_LAST_ITEM 0xfffff800
107 #define NORMAL_WAIT 8
109 #define LINEAR_MODE 0
112 /* forward declaration */
113 struct sun6i_dma_dev
;
116 * Hardware channels / ports representation
118 * The hardware is used in several SoCs, with differing numbers
119 * of channels and endpoints. This structure ties those numbers
120 * to a certain compatible string.
122 struct sun6i_dma_config
{
127 * In the datasheets/user manuals of newer Allwinner SoCs, a special
128 * bit (bit 2 at register 0x20) is present.
129 * It's named "DMA MCLK interface circuit auto gating bit" in the
130 * documents, and the footnote of this register says that this bit
131 * should be set up when initializing the DMA controller.
132 * Allwinner A23/A33 user manuals do not have this bit documented,
133 * however these SoCs really have and need this bit, as seen in the
134 * BSP kernel source code.
136 void (*clock_autogate_enable
)(struct sun6i_dma_dev
*);
137 void (*set_burst_length
)(u32
*p_cfg
, s8 src_burst
, s8 dst_burst
);
138 void (*set_drq
)(u32
*p_cfg
, s8 src_drq
, s8 dst_drq
);
139 void (*set_mode
)(u32
*p_cfg
, s8 src_mode
, s8 dst_mode
);
140 u32 src_burst_lengths
;
141 u32 dst_burst_lengths
;
149 * Hardware representation of the LLI
151 * The hardware will be fed the physical address of this structure,
152 * and read its content in order to start the transfer.
154 struct sun6i_dma_lli
{
163 * This field is not used by the DMA controller, but will be
164 * used by the CPU to go through the list (mostly for dumping
167 struct sun6i_dma_lli
*v_lli_next
;
172 struct virt_dma_desc vd
;
174 struct sun6i_dma_lli
*v_lli
;
180 struct sun6i_vchan
*vchan
;
181 struct sun6i_desc
*desc
;
182 struct sun6i_desc
*done
;
186 struct virt_dma_chan vc
;
187 struct list_head node
;
188 struct dma_slave_config cfg
;
189 struct sun6i_pchan
*phy
;
195 struct sun6i_dma_dev
{
196 struct dma_device slave
;
199 struct clk
*clk_mbus
;
202 struct reset_control
*rstc
;
203 struct tasklet_struct task
;
204 atomic_t tasklet_shutdown
;
205 struct list_head pending
;
206 struct dma_pool
*pool
;
207 struct sun6i_pchan
*pchans
;
208 struct sun6i_vchan
*vchans
;
209 const struct sun6i_dma_config
*cfg
;
215 static struct device
*chan2dev(struct dma_chan
*chan
)
217 return &chan
->dev
->device
;
220 static inline struct sun6i_dma_dev
*to_sun6i_dma_dev(struct dma_device
*d
)
222 return container_of(d
, struct sun6i_dma_dev
, slave
);
225 static inline struct sun6i_vchan
*to_sun6i_vchan(struct dma_chan
*chan
)
227 return container_of(chan
, struct sun6i_vchan
, vc
.chan
);
230 static inline struct sun6i_desc
*
231 to_sun6i_desc(struct dma_async_tx_descriptor
*tx
)
233 return container_of(tx
, struct sun6i_desc
, vd
.tx
);
236 static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev
*sdev
)
238 dev_dbg(sdev
->slave
.dev
, "Common register:\n"
239 "\tmask0(%04x): 0x%08x\n"
240 "\tmask1(%04x): 0x%08x\n"
241 "\tpend0(%04x): 0x%08x\n"
242 "\tpend1(%04x): 0x%08x\n"
243 "\tstats(%04x): 0x%08x\n",
244 DMA_IRQ_EN(0), readl(sdev
->base
+ DMA_IRQ_EN(0)),
245 DMA_IRQ_EN(1), readl(sdev
->base
+ DMA_IRQ_EN(1)),
246 DMA_IRQ_STAT(0), readl(sdev
->base
+ DMA_IRQ_STAT(0)),
247 DMA_IRQ_STAT(1), readl(sdev
->base
+ DMA_IRQ_STAT(1)),
248 DMA_STAT
, readl(sdev
->base
+ DMA_STAT
));
251 static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev
*sdev
,
252 struct sun6i_pchan
*pchan
)
254 dev_dbg(sdev
->slave
.dev
, "Chan %d reg:\n"
255 "\t___en(%04x): \t0x%08x\n"
256 "\tpause(%04x): \t0x%08x\n"
257 "\tstart(%04x): \t0x%08x\n"
258 "\t__cfg(%04x): \t0x%08x\n"
259 "\t__src(%04x): \t0x%08x\n"
260 "\t__dst(%04x): \t0x%08x\n"
261 "\tcount(%04x): \t0x%08x\n"
262 "\t_para(%04x): \t0x%08x\n\n",
265 readl(pchan
->base
+ DMA_CHAN_ENABLE
),
267 readl(pchan
->base
+ DMA_CHAN_PAUSE
),
269 readl(pchan
->base
+ DMA_CHAN_LLI_ADDR
),
271 readl(pchan
->base
+ DMA_CHAN_CUR_CFG
),
273 readl(pchan
->base
+ DMA_CHAN_CUR_SRC
),
275 readl(pchan
->base
+ DMA_CHAN_CUR_DST
),
277 readl(pchan
->base
+ DMA_CHAN_CUR_CNT
),
279 readl(pchan
->base
+ DMA_CHAN_CUR_PARA
));
282 static inline s8
convert_burst(u32 maxburst
)
298 static inline s8
convert_buswidth(enum dma_slave_buswidth addr_width
)
300 return ilog2(addr_width
);
303 static void sun6i_enable_clock_autogate_a23(struct sun6i_dma_dev
*sdev
)
305 writel(SUN8I_DMA_GATE_ENABLE
, sdev
->base
+ SUN8I_DMA_GATE
);
308 static void sun6i_enable_clock_autogate_h3(struct sun6i_dma_dev
*sdev
)
310 writel(SUNXI_H3_DMA_GATE_ENABLE
, sdev
->base
+ SUNXI_H3_DMA_GATE
);
313 static void sun6i_set_burst_length_a31(u32
*p_cfg
, s8 src_burst
, s8 dst_burst
)
315 *p_cfg
|= DMA_CHAN_CFG_SRC_BURST_A31(src_burst
) |
316 DMA_CHAN_CFG_DST_BURST_A31(dst_burst
);
319 static void sun6i_set_burst_length_h3(u32
*p_cfg
, s8 src_burst
, s8 dst_burst
)
321 *p_cfg
|= DMA_CHAN_CFG_SRC_BURST_H3(src_burst
) |
322 DMA_CHAN_CFG_DST_BURST_H3(dst_burst
);
325 static void sun6i_set_drq_a31(u32
*p_cfg
, s8 src_drq
, s8 dst_drq
)
327 *p_cfg
|= DMA_CHAN_CFG_SRC_DRQ_A31(src_drq
) |
328 DMA_CHAN_CFG_DST_DRQ_A31(dst_drq
);
331 static void sun6i_set_drq_h6(u32
*p_cfg
, s8 src_drq
, s8 dst_drq
)
333 *p_cfg
|= DMA_CHAN_CFG_SRC_DRQ_H6(src_drq
) |
334 DMA_CHAN_CFG_DST_DRQ_H6(dst_drq
);
337 static void sun6i_set_mode_a31(u32
*p_cfg
, s8 src_mode
, s8 dst_mode
)
339 *p_cfg
|= DMA_CHAN_CFG_SRC_MODE_A31(src_mode
) |
340 DMA_CHAN_CFG_DST_MODE_A31(dst_mode
);
343 static void sun6i_set_mode_h6(u32
*p_cfg
, s8 src_mode
, s8 dst_mode
)
345 *p_cfg
|= DMA_CHAN_CFG_SRC_MODE_H6(src_mode
) |
346 DMA_CHAN_CFG_DST_MODE_H6(dst_mode
);
349 static size_t sun6i_get_chan_size(struct sun6i_pchan
*pchan
)
351 struct sun6i_desc
*txd
= pchan
->desc
;
352 struct sun6i_dma_lli
*lli
;
356 pos
= readl(pchan
->base
+ DMA_CHAN_LLI_ADDR
);
357 bytes
= readl(pchan
->base
+ DMA_CHAN_CUR_CNT
);
359 if (pos
== LLI_LAST_ITEM
)
362 for (lli
= txd
->v_lli
; lli
; lli
= lli
->v_lli_next
) {
363 if (lli
->p_lli_next
== pos
) {
364 for (lli
= lli
->v_lli_next
; lli
; lli
= lli
->v_lli_next
)
373 static void *sun6i_dma_lli_add(struct sun6i_dma_lli
*prev
,
374 struct sun6i_dma_lli
*next
,
376 struct sun6i_desc
*txd
)
378 if ((!prev
&& !txd
) || !next
)
382 txd
->p_lli
= next_phy
;
385 prev
->p_lli_next
= next_phy
;
386 prev
->v_lli_next
= next
;
389 next
->p_lli_next
= LLI_LAST_ITEM
;
390 next
->v_lli_next
= NULL
;
395 static inline void sun6i_dma_dump_lli(struct sun6i_vchan
*vchan
,
396 struct sun6i_dma_lli
*v_lli
,
399 dev_dbg(chan2dev(&vchan
->vc
.chan
),
400 "\n\tdesc:\tp - %pad v - 0x%p\n"
401 "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n"
402 "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n",
404 v_lli
->cfg
, v_lli
->src
, v_lli
->dst
,
405 v_lli
->len
, v_lli
->para
, v_lli
->p_lli_next
);
408 static void sun6i_dma_free_desc(struct virt_dma_desc
*vd
)
410 struct sun6i_desc
*txd
= to_sun6i_desc(&vd
->tx
);
411 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(vd
->tx
.chan
->device
);
412 struct sun6i_dma_lli
*v_lli
, *v_next
;
413 dma_addr_t p_lli
, p_next
;
422 v_next
= v_lli
->v_lli_next
;
423 p_next
= v_lli
->p_lli_next
;
425 dma_pool_free(sdev
->pool
, v_lli
, p_lli
);
434 static int sun6i_dma_start_desc(struct sun6i_vchan
*vchan
)
436 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(vchan
->vc
.chan
.device
);
437 struct virt_dma_desc
*desc
= vchan_next_desc(&vchan
->vc
);
438 struct sun6i_pchan
*pchan
= vchan
->phy
;
439 u32 irq_val
, irq_reg
, irq_offset
;
450 list_del(&desc
->node
);
452 pchan
->desc
= to_sun6i_desc(&desc
->tx
);
455 sun6i_dma_dump_lli(vchan
, pchan
->desc
->v_lli
, pchan
->desc
->p_lli
);
457 irq_reg
= pchan
->idx
/ DMA_IRQ_CHAN_NR
;
458 irq_offset
= pchan
->idx
% DMA_IRQ_CHAN_NR
;
460 vchan
->irq_type
= vchan
->cyclic
? DMA_IRQ_PKG
: DMA_IRQ_QUEUE
;
462 irq_val
= readl(sdev
->base
+ DMA_IRQ_EN(irq_reg
));
463 irq_val
&= ~((DMA_IRQ_HALF
| DMA_IRQ_PKG
| DMA_IRQ_QUEUE
) <<
464 (irq_offset
* DMA_IRQ_CHAN_WIDTH
));
465 irq_val
|= vchan
->irq_type
<< (irq_offset
* DMA_IRQ_CHAN_WIDTH
);
466 writel(irq_val
, sdev
->base
+ DMA_IRQ_EN(irq_reg
));
468 writel(pchan
->desc
->p_lli
, pchan
->base
+ DMA_CHAN_LLI_ADDR
);
469 writel(DMA_CHAN_ENABLE_START
, pchan
->base
+ DMA_CHAN_ENABLE
);
471 sun6i_dma_dump_com_regs(sdev
);
472 sun6i_dma_dump_chan_regs(sdev
, pchan
);
477 static void sun6i_dma_tasklet(struct tasklet_struct
*t
)
479 struct sun6i_dma_dev
*sdev
= from_tasklet(sdev
, t
, task
);
480 struct sun6i_vchan
*vchan
;
481 struct sun6i_pchan
*pchan
;
482 unsigned int pchan_alloc
= 0;
483 unsigned int pchan_idx
;
485 list_for_each_entry(vchan
, &sdev
->slave
.channels
, vc
.chan
.device_node
) {
486 spin_lock_irq(&vchan
->vc
.lock
);
490 if (pchan
&& pchan
->done
) {
491 if (sun6i_dma_start_desc(vchan
)) {
493 * No current txd associated with this channel
495 dev_dbg(sdev
->slave
.dev
, "pchan %u: free\n",
498 /* Mark this channel free */
503 spin_unlock_irq(&vchan
->vc
.lock
);
506 spin_lock_irq(&sdev
->lock
);
507 for (pchan_idx
= 0; pchan_idx
< sdev
->num_pchans
; pchan_idx
++) {
508 pchan
= &sdev
->pchans
[pchan_idx
];
510 if (pchan
->vchan
|| list_empty(&sdev
->pending
))
513 vchan
= list_first_entry(&sdev
->pending
,
514 struct sun6i_vchan
, node
);
516 /* Remove from pending channels */
517 list_del_init(&vchan
->node
);
518 pchan_alloc
|= BIT(pchan_idx
);
520 /* Mark this channel allocated */
521 pchan
->vchan
= vchan
;
523 dev_dbg(sdev
->slave
.dev
, "pchan %u: alloc vchan %p\n",
524 pchan
->idx
, &vchan
->vc
);
526 spin_unlock_irq(&sdev
->lock
);
528 for (pchan_idx
= 0; pchan_idx
< sdev
->num_pchans
; pchan_idx
++) {
529 if (!(pchan_alloc
& BIT(pchan_idx
)))
532 pchan
= sdev
->pchans
+ pchan_idx
;
533 vchan
= pchan
->vchan
;
535 spin_lock_irq(&vchan
->vc
.lock
);
536 sun6i_dma_start_desc(vchan
);
537 spin_unlock_irq(&vchan
->vc
.lock
);
542 static irqreturn_t
sun6i_dma_interrupt(int irq
, void *dev_id
)
544 struct sun6i_dma_dev
*sdev
= dev_id
;
545 struct sun6i_vchan
*vchan
;
546 struct sun6i_pchan
*pchan
;
547 int i
, j
, ret
= IRQ_NONE
;
550 for (i
= 0; i
< sdev
->num_pchans
/ DMA_IRQ_CHAN_NR
; i
++) {
551 status
= readl(sdev
->base
+ DMA_IRQ_STAT(i
));
555 dev_dbg(sdev
->slave
.dev
, "DMA irq status %s: 0x%x\n",
556 i
? "high" : "low", status
);
558 writel(status
, sdev
->base
+ DMA_IRQ_STAT(i
));
560 for (j
= 0; (j
< DMA_IRQ_CHAN_NR
) && status
; j
++) {
561 pchan
= sdev
->pchans
+ j
;
562 vchan
= pchan
->vchan
;
563 if (vchan
&& (status
& vchan
->irq_type
)) {
565 vchan_cyclic_callback(&pchan
->desc
->vd
);
567 spin_lock(&vchan
->vc
.lock
);
568 vchan_cookie_complete(&pchan
->desc
->vd
);
569 pchan
->done
= pchan
->desc
;
570 spin_unlock(&vchan
->vc
.lock
);
574 status
= status
>> DMA_IRQ_CHAN_WIDTH
;
577 if (!atomic_read(&sdev
->tasklet_shutdown
))
578 tasklet_schedule(&sdev
->task
);
585 static int set_config(struct sun6i_dma_dev
*sdev
,
586 struct dma_slave_config
*sconfig
,
587 enum dma_transfer_direction direction
,
590 enum dma_slave_buswidth src_addr_width
, dst_addr_width
;
591 u32 src_maxburst
, dst_maxburst
;
592 s8 src_width
, dst_width
, src_burst
, dst_burst
;
594 src_addr_width
= sconfig
->src_addr_width
;
595 dst_addr_width
= sconfig
->dst_addr_width
;
596 src_maxburst
= sconfig
->src_maxburst
;
597 dst_maxburst
= sconfig
->dst_maxburst
;
601 if (src_addr_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
)
602 src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
603 src_maxburst
= src_maxburst
? src_maxburst
: 8;
606 if (dst_addr_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
)
607 dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
608 dst_maxburst
= dst_maxburst
? dst_maxburst
: 8;
614 if (!(BIT(src_addr_width
) & sdev
->slave
.src_addr_widths
))
616 if (!(BIT(dst_addr_width
) & sdev
->slave
.dst_addr_widths
))
618 if (!(BIT(src_maxburst
) & sdev
->cfg
->src_burst_lengths
))
620 if (!(BIT(dst_maxburst
) & sdev
->cfg
->dst_burst_lengths
))
623 src_width
= convert_buswidth(src_addr_width
);
624 dst_width
= convert_buswidth(dst_addr_width
);
625 dst_burst
= convert_burst(dst_maxburst
);
626 src_burst
= convert_burst(src_maxburst
);
628 *p_cfg
= DMA_CHAN_CFG_SRC_WIDTH(src_width
) |
629 DMA_CHAN_CFG_DST_WIDTH(dst_width
);
631 sdev
->cfg
->set_burst_length(p_cfg
, src_burst
, dst_burst
);
636 static inline void sun6i_dma_set_addr(struct sun6i_dma_dev
*sdev
,
637 struct sun6i_dma_lli
*v_lli
,
638 dma_addr_t src
, dma_addr_t dst
)
640 v_lli
->src
= lower_32_bits(src
);
641 v_lli
->dst
= lower_32_bits(dst
);
643 if (sdev
->cfg
->has_high_addr
)
644 v_lli
->para
|= SRC_HIGH_ADDR(upper_32_bits(src
)) |
645 DST_HIGH_ADDR(upper_32_bits(dst
));
648 static struct dma_async_tx_descriptor
*sun6i_dma_prep_dma_memcpy(
649 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
650 size_t len
, unsigned long flags
)
652 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
653 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
654 struct sun6i_dma_lli
*v_lli
;
655 struct sun6i_desc
*txd
;
659 dev_dbg(chan2dev(chan
),
660 "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n",
661 __func__
, vchan
->vc
.chan
.chan_id
, &dest
, &src
, len
, flags
);
666 txd
= kzalloc(sizeof(*txd
), GFP_NOWAIT
);
670 v_lli
= dma_pool_alloc(sdev
->pool
, GFP_DMA32
| GFP_NOWAIT
, &p_lli
);
672 dev_err(sdev
->slave
.dev
, "Failed to alloc lli memory\n");
677 v_lli
->para
= NORMAL_WAIT
;
678 sun6i_dma_set_addr(sdev
, v_lli
, src
, dest
);
680 burst
= convert_burst(8);
681 width
= convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES
);
682 v_lli
->cfg
= DMA_CHAN_CFG_SRC_WIDTH(width
) |
683 DMA_CHAN_CFG_DST_WIDTH(width
);
685 sdev
->cfg
->set_burst_length(&v_lli
->cfg
, burst
, burst
);
686 sdev
->cfg
->set_drq(&v_lli
->cfg
, DRQ_SDRAM
, DRQ_SDRAM
);
687 sdev
->cfg
->set_mode(&v_lli
->cfg
, LINEAR_MODE
, LINEAR_MODE
);
689 sun6i_dma_lli_add(NULL
, v_lli
, p_lli
, txd
);
691 sun6i_dma_dump_lli(vchan
, v_lli
, p_lli
);
693 return vchan_tx_prep(&vchan
->vc
, &txd
->vd
, flags
);
700 static struct dma_async_tx_descriptor
*sun6i_dma_prep_slave_sg(
701 struct dma_chan
*chan
, struct scatterlist
*sgl
,
702 unsigned int sg_len
, enum dma_transfer_direction dir
,
703 unsigned long flags
, void *context
)
705 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
706 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
707 struct dma_slave_config
*sconfig
= &vchan
->cfg
;
708 struct sun6i_dma_lli
*v_lli
, *prev
= NULL
;
709 struct sun6i_desc
*txd
;
710 struct scatterlist
*sg
;
718 ret
= set_config(sdev
, sconfig
, dir
, &lli_cfg
);
720 dev_err(chan2dev(chan
), "Invalid DMA configuration\n");
724 txd
= kzalloc(sizeof(*txd
), GFP_NOWAIT
);
728 for_each_sg(sgl
, sg
, sg_len
, i
) {
729 v_lli
= dma_pool_alloc(sdev
->pool
, GFP_DMA32
| GFP_NOWAIT
, &p_lli
);
733 v_lli
->len
= sg_dma_len(sg
);
734 v_lli
->para
= NORMAL_WAIT
;
736 if (dir
== DMA_MEM_TO_DEV
) {
737 sun6i_dma_set_addr(sdev
, v_lli
,
740 v_lli
->cfg
= lli_cfg
;
741 sdev
->cfg
->set_drq(&v_lli
->cfg
, DRQ_SDRAM
, vchan
->port
);
742 sdev
->cfg
->set_mode(&v_lli
->cfg
, LINEAR_MODE
, IO_MODE
);
744 dev_dbg(chan2dev(chan
),
745 "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
746 __func__
, vchan
->vc
.chan
.chan_id
,
747 &sconfig
->dst_addr
, &sg_dma_address(sg
),
748 sg_dma_len(sg
), flags
);
751 sun6i_dma_set_addr(sdev
, v_lli
,
754 v_lli
->cfg
= lli_cfg
;
755 sdev
->cfg
->set_drq(&v_lli
->cfg
, vchan
->port
, DRQ_SDRAM
);
756 sdev
->cfg
->set_mode(&v_lli
->cfg
, IO_MODE
, LINEAR_MODE
);
758 dev_dbg(chan2dev(chan
),
759 "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
760 __func__
, vchan
->vc
.chan
.chan_id
,
761 &sg_dma_address(sg
), &sconfig
->src_addr
,
762 sg_dma_len(sg
), flags
);
765 prev
= sun6i_dma_lli_add(prev
, v_lli
, p_lli
, txd
);
768 dev_dbg(chan2dev(chan
), "First: %pad\n", &txd
->p_lli
);
769 for (p_lli
= txd
->p_lli
, v_lli
= txd
->v_lli
; v_lli
;
770 p_lli
= v_lli
->p_lli_next
, v_lli
= v_lli
->v_lli_next
)
771 sun6i_dma_dump_lli(vchan
, v_lli
, p_lli
);
773 return vchan_tx_prep(&vchan
->vc
, &txd
->vd
, flags
);
776 for (p_lli
= txd
->p_lli
, v_lli
= txd
->v_lli
; v_lli
;
777 p_lli
= v_lli
->p_lli_next
, v_lli
= v_lli
->v_lli_next
)
778 dma_pool_free(sdev
->pool
, v_lli
, p_lli
);
783 static struct dma_async_tx_descriptor
*sun6i_dma_prep_dma_cyclic(
784 struct dma_chan
*chan
,
788 enum dma_transfer_direction dir
,
791 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
792 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
793 struct dma_slave_config
*sconfig
= &vchan
->cfg
;
794 struct sun6i_dma_lli
*v_lli
, *prev
= NULL
;
795 struct sun6i_desc
*txd
;
798 unsigned int i
, periods
= buf_len
/ period_len
;
801 ret
= set_config(sdev
, sconfig
, dir
, &lli_cfg
);
803 dev_err(chan2dev(chan
), "Invalid DMA configuration\n");
807 txd
= kzalloc(sizeof(*txd
), GFP_NOWAIT
);
811 for (i
= 0; i
< periods
; i
++) {
812 v_lli
= dma_pool_alloc(sdev
->pool
, GFP_DMA32
| GFP_NOWAIT
, &p_lli
);
814 dev_err(sdev
->slave
.dev
, "Failed to alloc lli memory\n");
818 v_lli
->len
= period_len
;
819 v_lli
->para
= NORMAL_WAIT
;
821 if (dir
== DMA_MEM_TO_DEV
) {
822 sun6i_dma_set_addr(sdev
, v_lli
,
823 buf_addr
+ period_len
* i
,
825 v_lli
->cfg
= lli_cfg
;
826 sdev
->cfg
->set_drq(&v_lli
->cfg
, DRQ_SDRAM
, vchan
->port
);
827 sdev
->cfg
->set_mode(&v_lli
->cfg
, LINEAR_MODE
, IO_MODE
);
829 sun6i_dma_set_addr(sdev
, v_lli
,
831 buf_addr
+ period_len
* i
);
832 v_lli
->cfg
= lli_cfg
;
833 sdev
->cfg
->set_drq(&v_lli
->cfg
, vchan
->port
, DRQ_SDRAM
);
834 sdev
->cfg
->set_mode(&v_lli
->cfg
, IO_MODE
, LINEAR_MODE
);
837 prev
= sun6i_dma_lli_add(prev
, v_lli
, p_lli
, txd
);
840 prev
->p_lli_next
= txd
->p_lli
; /* cyclic list */
842 vchan
->cyclic
= true;
844 return vchan_tx_prep(&vchan
->vc
, &txd
->vd
, flags
);
847 for (p_lli
= txd
->p_lli
, v_lli
= txd
->v_lli
; v_lli
;
848 p_lli
= v_lli
->p_lli_next
, v_lli
= v_lli
->v_lli_next
)
849 dma_pool_free(sdev
->pool
, v_lli
, p_lli
);
854 static int sun6i_dma_config(struct dma_chan
*chan
,
855 struct dma_slave_config
*config
)
857 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
859 memcpy(&vchan
->cfg
, config
, sizeof(*config
));
864 static int sun6i_dma_pause(struct dma_chan
*chan
)
866 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
867 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
868 struct sun6i_pchan
*pchan
= vchan
->phy
;
870 dev_dbg(chan2dev(chan
), "vchan %p: pause\n", &vchan
->vc
);
873 writel(DMA_CHAN_PAUSE_PAUSE
,
874 pchan
->base
+ DMA_CHAN_PAUSE
);
876 spin_lock(&sdev
->lock
);
877 list_del_init(&vchan
->node
);
878 spin_unlock(&sdev
->lock
);
884 static int sun6i_dma_resume(struct dma_chan
*chan
)
886 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
887 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
888 struct sun6i_pchan
*pchan
= vchan
->phy
;
891 dev_dbg(chan2dev(chan
), "vchan %p: resume\n", &vchan
->vc
);
893 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
896 writel(DMA_CHAN_PAUSE_RESUME
,
897 pchan
->base
+ DMA_CHAN_PAUSE
);
898 } else if (!list_empty(&vchan
->vc
.desc_issued
)) {
899 spin_lock(&sdev
->lock
);
900 list_add_tail(&vchan
->node
, &sdev
->pending
);
901 spin_unlock(&sdev
->lock
);
904 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
909 static int sun6i_dma_terminate_all(struct dma_chan
*chan
)
911 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
912 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
913 struct sun6i_pchan
*pchan
= vchan
->phy
;
917 spin_lock(&sdev
->lock
);
918 list_del_init(&vchan
->node
);
919 spin_unlock(&sdev
->lock
);
921 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
924 vchan
->cyclic
= false;
925 if (pchan
&& pchan
->desc
) {
926 struct virt_dma_desc
*vd
= &pchan
->desc
->vd
;
927 struct virt_dma_chan
*vc
= &vchan
->vc
;
929 list_add_tail(&vd
->node
, &vc
->desc_completed
);
933 vchan_get_all_descriptors(&vchan
->vc
, &head
);
936 writel(DMA_CHAN_ENABLE_STOP
, pchan
->base
+ DMA_CHAN_ENABLE
);
937 writel(DMA_CHAN_PAUSE_RESUME
, pchan
->base
+ DMA_CHAN_PAUSE
);
945 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
947 vchan_dma_desc_free_list(&vchan
->vc
, &head
);
952 static enum dma_status
sun6i_dma_tx_status(struct dma_chan
*chan
,
954 struct dma_tx_state
*state
)
956 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
957 struct sun6i_pchan
*pchan
= vchan
->phy
;
958 struct sun6i_dma_lli
*lli
;
959 struct virt_dma_desc
*vd
;
960 struct sun6i_desc
*txd
;
965 ret
= dma_cookie_status(chan
, cookie
, state
);
966 if (ret
== DMA_COMPLETE
|| !state
)
969 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
971 vd
= vchan_find_desc(&vchan
->vc
, cookie
);
972 txd
= to_sun6i_desc(&vd
->tx
);
975 for (lli
= txd
->v_lli
; lli
!= NULL
; lli
= lli
->v_lli_next
)
977 } else if (!pchan
|| !pchan
->desc
) {
980 bytes
= sun6i_get_chan_size(pchan
);
983 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
985 dma_set_residue(state
, bytes
);
990 static void sun6i_dma_issue_pending(struct dma_chan
*chan
)
992 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
993 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
996 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
998 if (vchan_issue_pending(&vchan
->vc
)) {
999 spin_lock(&sdev
->lock
);
1001 if (!vchan
->phy
&& list_empty(&vchan
->node
)) {
1002 list_add_tail(&vchan
->node
, &sdev
->pending
);
1003 tasklet_schedule(&sdev
->task
);
1004 dev_dbg(chan2dev(chan
), "vchan %p: issued\n",
1008 spin_unlock(&sdev
->lock
);
1010 dev_dbg(chan2dev(chan
), "vchan %p: nothing to issue\n",
1014 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
1017 static void sun6i_dma_free_chan_resources(struct dma_chan
*chan
)
1019 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
1020 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
1021 unsigned long flags
;
1023 spin_lock_irqsave(&sdev
->lock
, flags
);
1024 list_del_init(&vchan
->node
);
1025 spin_unlock_irqrestore(&sdev
->lock
, flags
);
1027 vchan_free_chan_resources(&vchan
->vc
);
1030 static struct dma_chan
*sun6i_dma_of_xlate(struct of_phandle_args
*dma_spec
,
1031 struct of_dma
*ofdma
)
1033 struct sun6i_dma_dev
*sdev
= ofdma
->of_dma_data
;
1034 struct sun6i_vchan
*vchan
;
1035 struct dma_chan
*chan
;
1036 u8 port
= dma_spec
->args
[0];
1038 if (port
> sdev
->max_request
)
1041 chan
= dma_get_any_slave_channel(&sdev
->slave
);
1045 vchan
= to_sun6i_vchan(chan
);
1051 static inline void sun6i_kill_tasklet(struct sun6i_dma_dev
*sdev
)
1053 /* Disable all interrupts from DMA */
1054 writel(0, sdev
->base
+ DMA_IRQ_EN(0));
1055 writel(0, sdev
->base
+ DMA_IRQ_EN(1));
1057 /* Prevent spurious interrupts from scheduling the tasklet */
1058 atomic_inc(&sdev
->tasklet_shutdown
);
1060 /* Make sure we won't have any further interrupts */
1061 devm_free_irq(sdev
->slave
.dev
, sdev
->irq
, sdev
);
1063 /* Actually prevent the tasklet from being scheduled */
1064 tasklet_kill(&sdev
->task
);
1067 static inline void sun6i_dma_free(struct sun6i_dma_dev
*sdev
)
1071 for (i
= 0; i
< sdev
->num_vchans
; i
++) {
1072 struct sun6i_vchan
*vchan
= &sdev
->vchans
[i
];
1074 list_del(&vchan
->vc
.chan
.device_node
);
1075 tasklet_kill(&vchan
->vc
.task
);
1082 * There's 16 physical channels that can work in parallel.
1084 * However we have 30 different endpoints for our requests.
1086 * Since the channels are able to handle only an unidirectional
1087 * transfer, we need to allocate more virtual channels so that
1088 * everyone can grab one channel.
1090 * Some devices can't work in both direction (mostly because it
1091 * wouldn't make sense), so we have a bit fewer virtual channels than
1092 * 2 channels per endpoints.
1095 static struct sun6i_dma_config sun6i_a31_dma_cfg
= {
1096 .nr_max_channels
= 16,
1097 .nr_max_requests
= 30,
1098 .nr_max_vchans
= 53,
1099 .set_burst_length
= sun6i_set_burst_length_a31
,
1100 .set_drq
= sun6i_set_drq_a31
,
1101 .set_mode
= sun6i_set_mode_a31
,
1102 .src_burst_lengths
= BIT(1) | BIT(8),
1103 .dst_burst_lengths
= BIT(1) | BIT(8),
1104 .src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1105 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1106 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
),
1107 .dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1108 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1109 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
),
1113 * The A23 only has 8 physical channels, a maximum DRQ port id of 24,
1114 * and a total of 37 usable source and destination endpoints.
1117 static struct sun6i_dma_config sun8i_a23_dma_cfg
= {
1118 .nr_max_channels
= 8,
1119 .nr_max_requests
= 24,
1120 .nr_max_vchans
= 37,
1121 .clock_autogate_enable
= sun6i_enable_clock_autogate_a23
,
1122 .set_burst_length
= sun6i_set_burst_length_a31
,
1123 .set_drq
= sun6i_set_drq_a31
,
1124 .set_mode
= sun6i_set_mode_a31
,
1125 .src_burst_lengths
= BIT(1) | BIT(8),
1126 .dst_burst_lengths
= BIT(1) | BIT(8),
1127 .src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1128 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1129 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
),
1130 .dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1131 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1132 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
),
1135 static struct sun6i_dma_config sun8i_a83t_dma_cfg
= {
1136 .nr_max_channels
= 8,
1137 .nr_max_requests
= 28,
1138 .nr_max_vchans
= 39,
1139 .clock_autogate_enable
= sun6i_enable_clock_autogate_a23
,
1140 .set_burst_length
= sun6i_set_burst_length_a31
,
1141 .set_drq
= sun6i_set_drq_a31
,
1142 .set_mode
= sun6i_set_mode_a31
,
1143 .src_burst_lengths
= BIT(1) | BIT(8),
1144 .dst_burst_lengths
= BIT(1) | BIT(8),
1145 .src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1146 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1147 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
),
1148 .dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1149 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1150 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
),
1154 * The H3 has 12 physical channels, a maximum DRQ port id of 27,
1155 * and a total of 34 usable source and destination endpoints.
1156 * It also supports additional burst lengths and bus widths,
1157 * and the burst length fields have different offsets.
1160 static struct sun6i_dma_config sun8i_h3_dma_cfg
= {
1161 .nr_max_channels
= 12,
1162 .nr_max_requests
= 27,
1163 .nr_max_vchans
= 34,
1164 .clock_autogate_enable
= sun6i_enable_clock_autogate_h3
,
1165 .set_burst_length
= sun6i_set_burst_length_h3
,
1166 .set_drq
= sun6i_set_drq_a31
,
1167 .set_mode
= sun6i_set_mode_a31
,
1168 .src_burst_lengths
= BIT(1) | BIT(4) | BIT(8) | BIT(16),
1169 .dst_burst_lengths
= BIT(1) | BIT(4) | BIT(8) | BIT(16),
1170 .src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1171 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1172 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
) |
1173 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES
),
1174 .dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1175 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1176 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
) |
1177 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES
),
1181 * The A64 binding uses the number of dma channels from the
1184 static struct sun6i_dma_config sun50i_a64_dma_cfg
= {
1185 .clock_autogate_enable
= sun6i_enable_clock_autogate_h3
,
1186 .set_burst_length
= sun6i_set_burst_length_h3
,
1187 .set_drq
= sun6i_set_drq_a31
,
1188 .set_mode
= sun6i_set_mode_a31
,
1189 .src_burst_lengths
= BIT(1) | BIT(4) | BIT(8) | BIT(16),
1190 .dst_burst_lengths
= BIT(1) | BIT(4) | BIT(8) | BIT(16),
1191 .src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1192 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1193 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
) |
1194 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES
),
1195 .dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1196 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1197 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
) |
1198 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES
),
1202 * The A100 binding uses the number of dma channels from the
1205 static struct sun6i_dma_config sun50i_a100_dma_cfg
= {
1206 .clock_autogate_enable
= sun6i_enable_clock_autogate_h3
,
1207 .set_burst_length
= sun6i_set_burst_length_h3
,
1208 .set_drq
= sun6i_set_drq_h6
,
1209 .set_mode
= sun6i_set_mode_h6
,
1210 .src_burst_lengths
= BIT(1) | BIT(4) | BIT(8) | BIT(16),
1211 .dst_burst_lengths
= BIT(1) | BIT(4) | BIT(8) | BIT(16),
1212 .src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1213 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1214 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
) |
1215 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES
),
1216 .dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1217 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1218 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
) |
1219 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES
),
1220 .has_high_addr
= true,
1221 .has_mbus_clk
= true,
1225 * The H6 binding uses the number of dma channels from the
1228 static struct sun6i_dma_config sun50i_h6_dma_cfg
= {
1229 .clock_autogate_enable
= sun6i_enable_clock_autogate_h3
,
1230 .set_burst_length
= sun6i_set_burst_length_h3
,
1231 .set_drq
= sun6i_set_drq_h6
,
1232 .set_mode
= sun6i_set_mode_h6
,
1233 .src_burst_lengths
= BIT(1) | BIT(4) | BIT(8) | BIT(16),
1234 .dst_burst_lengths
= BIT(1) | BIT(4) | BIT(8) | BIT(16),
1235 .src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1236 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1237 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
) |
1238 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES
),
1239 .dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1240 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1241 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
) |
1242 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES
),
1243 .has_mbus_clk
= true,
1247 * The V3s have only 8 physical channels, a maximum DRQ port id of 23,
1248 * and a total of 24 usable source and destination endpoints.
1251 static struct sun6i_dma_config sun8i_v3s_dma_cfg
= {
1252 .nr_max_channels
= 8,
1253 .nr_max_requests
= 23,
1254 .nr_max_vchans
= 24,
1255 .clock_autogate_enable
= sun6i_enable_clock_autogate_a23
,
1256 .set_burst_length
= sun6i_set_burst_length_a31
,
1257 .set_drq
= sun6i_set_drq_a31
,
1258 .set_mode
= sun6i_set_mode_a31
,
1259 .src_burst_lengths
= BIT(1) | BIT(8),
1260 .dst_burst_lengths
= BIT(1) | BIT(8),
1261 .src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1262 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1263 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
),
1264 .dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
1265 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
1266 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
),
1269 static const struct of_device_id sun6i_dma_match
[] = {
1270 { .compatible
= "allwinner,sun6i-a31-dma", .data
= &sun6i_a31_dma_cfg
},
1271 { .compatible
= "allwinner,sun8i-a23-dma", .data
= &sun8i_a23_dma_cfg
},
1272 { .compatible
= "allwinner,sun8i-a83t-dma", .data
= &sun8i_a83t_dma_cfg
},
1273 { .compatible
= "allwinner,sun8i-h3-dma", .data
= &sun8i_h3_dma_cfg
},
1274 { .compatible
= "allwinner,sun8i-v3s-dma", .data
= &sun8i_v3s_dma_cfg
},
1275 { .compatible
= "allwinner,sun20i-d1-dma", .data
= &sun50i_a100_dma_cfg
},
1276 { .compatible
= "allwinner,sun50i-a64-dma", .data
= &sun50i_a64_dma_cfg
},
1277 { .compatible
= "allwinner,sun50i-a100-dma", .data
= &sun50i_a100_dma_cfg
},
1278 { .compatible
= "allwinner,sun50i-h6-dma", .data
= &sun50i_h6_dma_cfg
},
1281 MODULE_DEVICE_TABLE(of
, sun6i_dma_match
);
1283 static int sun6i_dma_probe(struct platform_device
*pdev
)
1285 struct device_node
*np
= pdev
->dev
.of_node
;
1286 struct sun6i_dma_dev
*sdc
;
1289 sdc
= devm_kzalloc(&pdev
->dev
, sizeof(*sdc
), GFP_KERNEL
);
1293 sdc
->cfg
= of_device_get_match_data(&pdev
->dev
);
1297 sdc
->base
= devm_platform_ioremap_resource(pdev
, 0);
1298 if (IS_ERR(sdc
->base
))
1299 return PTR_ERR(sdc
->base
);
1301 sdc
->irq
= platform_get_irq(pdev
, 0);
1305 sdc
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1306 if (IS_ERR(sdc
->clk
)) {
1307 dev_err(&pdev
->dev
, "No clock specified\n");
1308 return PTR_ERR(sdc
->clk
);
1311 if (sdc
->cfg
->has_mbus_clk
) {
1312 sdc
->clk_mbus
= devm_clk_get(&pdev
->dev
, "mbus");
1313 if (IS_ERR(sdc
->clk_mbus
)) {
1314 dev_err(&pdev
->dev
, "No mbus clock specified\n");
1315 return PTR_ERR(sdc
->clk_mbus
);
1319 sdc
->rstc
= devm_reset_control_get(&pdev
->dev
, NULL
);
1320 if (IS_ERR(sdc
->rstc
)) {
1321 dev_err(&pdev
->dev
, "No reset controller specified\n");
1322 return PTR_ERR(sdc
->rstc
);
1325 sdc
->pool
= dmam_pool_create(dev_name(&pdev
->dev
), &pdev
->dev
,
1326 sizeof(struct sun6i_dma_lli
), 4, 0);
1328 dev_err(&pdev
->dev
, "No memory for descriptors dma pool\n");
1332 platform_set_drvdata(pdev
, sdc
);
1333 INIT_LIST_HEAD(&sdc
->pending
);
1334 spin_lock_init(&sdc
->lock
);
1336 dma_set_max_seg_size(&pdev
->dev
, SZ_32M
- 1);
1338 dma_cap_set(DMA_PRIVATE
, sdc
->slave
.cap_mask
);
1339 dma_cap_set(DMA_MEMCPY
, sdc
->slave
.cap_mask
);
1340 dma_cap_set(DMA_SLAVE
, sdc
->slave
.cap_mask
);
1341 dma_cap_set(DMA_CYCLIC
, sdc
->slave
.cap_mask
);
1343 INIT_LIST_HEAD(&sdc
->slave
.channels
);
1344 sdc
->slave
.device_free_chan_resources
= sun6i_dma_free_chan_resources
;
1345 sdc
->slave
.device_tx_status
= sun6i_dma_tx_status
;
1346 sdc
->slave
.device_issue_pending
= sun6i_dma_issue_pending
;
1347 sdc
->slave
.device_prep_slave_sg
= sun6i_dma_prep_slave_sg
;
1348 sdc
->slave
.device_prep_dma_memcpy
= sun6i_dma_prep_dma_memcpy
;
1349 sdc
->slave
.device_prep_dma_cyclic
= sun6i_dma_prep_dma_cyclic
;
1350 sdc
->slave
.copy_align
= DMAENGINE_ALIGN_4_BYTES
;
1351 sdc
->slave
.device_config
= sun6i_dma_config
;
1352 sdc
->slave
.device_pause
= sun6i_dma_pause
;
1353 sdc
->slave
.device_resume
= sun6i_dma_resume
;
1354 sdc
->slave
.device_terminate_all
= sun6i_dma_terminate_all
;
1355 sdc
->slave
.src_addr_widths
= sdc
->cfg
->src_addr_widths
;
1356 sdc
->slave
.dst_addr_widths
= sdc
->cfg
->dst_addr_widths
;
1357 sdc
->slave
.directions
= BIT(DMA_DEV_TO_MEM
) |
1358 BIT(DMA_MEM_TO_DEV
);
1359 sdc
->slave
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1360 sdc
->slave
.dev
= &pdev
->dev
;
1362 sdc
->num_pchans
= sdc
->cfg
->nr_max_channels
;
1363 sdc
->num_vchans
= sdc
->cfg
->nr_max_vchans
;
1364 sdc
->max_request
= sdc
->cfg
->nr_max_requests
;
1366 ret
= of_property_read_u32(np
, "dma-channels", &sdc
->num_pchans
);
1367 if (ret
&& !sdc
->num_pchans
) {
1368 dev_err(&pdev
->dev
, "Can't get dma-channels.\n");
1372 ret
= of_property_read_u32(np
, "dma-requests", &sdc
->max_request
);
1373 if (ret
&& !sdc
->max_request
) {
1374 dev_info(&pdev
->dev
, "Missing dma-requests, using %u.\n",
1375 DMA_CHAN_MAX_DRQ_A31
);
1376 sdc
->max_request
= DMA_CHAN_MAX_DRQ_A31
;
1380 * If the number of vchans is not specified, derive it from the
1381 * highest port number, at most one channel per port and direction.
1383 if (!sdc
->num_vchans
)
1384 sdc
->num_vchans
= 2 * (sdc
->max_request
+ 1);
1386 sdc
->pchans
= devm_kcalloc(&pdev
->dev
, sdc
->num_pchans
,
1387 sizeof(struct sun6i_pchan
), GFP_KERNEL
);
1391 sdc
->vchans
= devm_kcalloc(&pdev
->dev
, sdc
->num_vchans
,
1392 sizeof(struct sun6i_vchan
), GFP_KERNEL
);
1396 tasklet_setup(&sdc
->task
, sun6i_dma_tasklet
);
1398 for (i
= 0; i
< sdc
->num_pchans
; i
++) {
1399 struct sun6i_pchan
*pchan
= &sdc
->pchans
[i
];
1402 pchan
->base
= sdc
->base
+ 0x100 + i
* 0x40;
1405 for (i
= 0; i
< sdc
->num_vchans
; i
++) {
1406 struct sun6i_vchan
*vchan
= &sdc
->vchans
[i
];
1408 INIT_LIST_HEAD(&vchan
->node
);
1409 vchan
->vc
.desc_free
= sun6i_dma_free_desc
;
1410 vchan_init(&vchan
->vc
, &sdc
->slave
);
1413 ret
= reset_control_deassert(sdc
->rstc
);
1415 dev_err(&pdev
->dev
, "Couldn't deassert the device from reset\n");
1419 ret
= clk_prepare_enable(sdc
->clk
);
1421 dev_err(&pdev
->dev
, "Couldn't enable the clock\n");
1422 goto err_reset_assert
;
1425 if (sdc
->cfg
->has_mbus_clk
) {
1426 ret
= clk_prepare_enable(sdc
->clk_mbus
);
1428 dev_err(&pdev
->dev
, "Couldn't enable mbus clock\n");
1429 goto err_clk_disable
;
1433 ret
= devm_request_irq(&pdev
->dev
, sdc
->irq
, sun6i_dma_interrupt
, 0,
1434 dev_name(&pdev
->dev
), sdc
);
1436 dev_err(&pdev
->dev
, "Cannot request IRQ\n");
1437 goto err_mbus_clk_disable
;
1440 ret
= dma_async_device_register(&sdc
->slave
);
1442 dev_warn(&pdev
->dev
, "Failed to register DMA engine device\n");
1443 goto err_irq_disable
;
1446 ret
= of_dma_controller_register(pdev
->dev
.of_node
, sun6i_dma_of_xlate
,
1449 dev_err(&pdev
->dev
, "of_dma_controller_register failed\n");
1450 goto err_dma_unregister
;
1453 if (sdc
->cfg
->clock_autogate_enable
)
1454 sdc
->cfg
->clock_autogate_enable(sdc
);
1459 dma_async_device_unregister(&sdc
->slave
);
1461 sun6i_kill_tasklet(sdc
);
1462 err_mbus_clk_disable
:
1463 clk_disable_unprepare(sdc
->clk_mbus
);
1465 clk_disable_unprepare(sdc
->clk
);
1467 reset_control_assert(sdc
->rstc
);
1469 sun6i_dma_free(sdc
);
1473 static void sun6i_dma_remove(struct platform_device
*pdev
)
1475 struct sun6i_dma_dev
*sdc
= platform_get_drvdata(pdev
);
1477 of_dma_controller_free(pdev
->dev
.of_node
);
1478 dma_async_device_unregister(&sdc
->slave
);
1480 sun6i_kill_tasklet(sdc
);
1482 clk_disable_unprepare(sdc
->clk_mbus
);
1483 clk_disable_unprepare(sdc
->clk
);
1484 reset_control_assert(sdc
->rstc
);
1486 sun6i_dma_free(sdc
);
1489 static struct platform_driver sun6i_dma_driver
= {
1490 .probe
= sun6i_dma_probe
,
1491 .remove
= sun6i_dma_remove
,
1493 .name
= "sun6i-dma",
1494 .of_match_table
= sun6i_dma_match
,
1497 module_platform_driver(sun6i_dma_driver
);
1499 MODULE_DESCRIPTION("Allwinner A31 DMA Controller Driver");
1500 MODULE_AUTHOR("Sugar <shuge@allwinnertech.com>");
1501 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1502 MODULE_LICENSE("GPL");