1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
7 #include <linux/dmaengine.h>
8 #include <linux/dma-mapping.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/irq.h>
17 #include <linux/of_dma.h>
18 #include <linux/reset.h>
19 #include <linux/of_device.h>
23 #define HSDMA_BASE_OFFSET 0x800
25 #define HSDMA_REG_TX_BASE 0x00
26 #define HSDMA_REG_TX_CNT 0x04
27 #define HSDMA_REG_TX_CTX 0x08
28 #define HSDMA_REG_TX_DTX 0x0c
29 #define HSDMA_REG_RX_BASE 0x100
30 #define HSDMA_REG_RX_CNT 0x104
31 #define HSDMA_REG_RX_CRX 0x108
32 #define HSDMA_REG_RX_DRX 0x10c
33 #define HSDMA_REG_INFO 0x200
34 #define HSDMA_REG_GLO_CFG 0x204
35 #define HSDMA_REG_RST_CFG 0x208
36 #define HSDMA_REG_DELAY_INT 0x20c
37 #define HSDMA_REG_FREEQ_THRES 0x210
38 #define HSDMA_REG_INT_STATUS 0x220
39 #define HSDMA_REG_INT_MASK 0x228
40 #define HSDMA_REG_SCH_Q01 0x280
41 #define HSDMA_REG_SCH_Q23 0x284
43 #define HSDMA_DESCS_MAX 0xfff
44 #define HSDMA_DESCS_NUM 8
45 #define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
46 #define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
49 #define HSDMA_INFO_INDEX_MASK 0xf
50 #define HSDMA_INFO_INDEX_SHIFT 24
51 #define HSDMA_INFO_BASE_MASK 0xff
52 #define HSDMA_INFO_BASE_SHIFT 16
53 #define HSDMA_INFO_RX_MASK 0xff
54 #define HSDMA_INFO_RX_SHIFT 8
55 #define HSDMA_INFO_TX_MASK 0xff
56 #define HSDMA_INFO_TX_SHIFT 0
58 /* HSDMA_REG_GLO_CFG */
59 #define HSDMA_GLO_TX_2B_OFFSET BIT(31)
60 #define HSDMA_GLO_CLK_GATE BIT(30)
61 #define HSDMA_GLO_BYTE_SWAP BIT(29)
62 #define HSDMA_GLO_MULTI_DMA BIT(10)
63 #define HSDMA_GLO_TWO_BUF BIT(9)
64 #define HSDMA_GLO_32B_DESC BIT(8)
65 #define HSDMA_GLO_BIG_ENDIAN BIT(7)
66 #define HSDMA_GLO_TX_DONE BIT(6)
67 #define HSDMA_GLO_BT_MASK 0x3
68 #define HSDMA_GLO_BT_SHIFT 4
69 #define HSDMA_GLO_RX_BUSY BIT(3)
70 #define HSDMA_GLO_RX_DMA BIT(2)
71 #define HSDMA_GLO_TX_BUSY BIT(1)
72 #define HSDMA_GLO_TX_DMA BIT(0)
74 #define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
75 #define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
76 #define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
77 #define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
79 #define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
80 HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
82 /* HSDMA_REG_RST_CFG */
83 #define HSDMA_RST_RX_SHIFT 16
84 #define HSDMA_RST_TX_SHIFT 0
86 /* HSDMA_REG_DELAY_INT */
87 #define HSDMA_DELAY_INT_EN BIT(15)
88 #define HSDMA_DELAY_PEND_OFFSET 8
89 #define HSDMA_DELAY_TIME_OFFSET 0
90 #define HSDMA_DELAY_TX_OFFSET 16
91 #define HSDMA_DELAY_RX_OFFSET 0
93 #define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
94 ((x) << HSDMA_DELAY_PEND_OFFSET))
95 #define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
96 HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
98 /* HSDMA_REG_INT_STATUS */
99 #define HSDMA_INT_DELAY_RX_COH BIT(31)
100 #define HSDMA_INT_DELAY_RX_INT BIT(30)
101 #define HSDMA_INT_DELAY_TX_COH BIT(29)
102 #define HSDMA_INT_DELAY_TX_INT BIT(28)
103 #define HSDMA_INT_RX_MASK 0x3
104 #define HSDMA_INT_RX_SHIFT 16
105 #define HSDMA_INT_RX_Q0 BIT(16)
106 #define HSDMA_INT_TX_MASK 0xf
107 #define HSDMA_INT_TX_SHIFT 0
108 #define HSDMA_INT_TX_Q0 BIT(0)
110 /* tx/rx dma desc flags */
111 #define HSDMA_PLEN_MASK 0x3fff
112 #define HSDMA_DESC_DONE BIT(31)
113 #define HSDMA_DESC_LS0 BIT(30)
114 #define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
115 #define HSDMA_DESC_TAG BIT(15)
116 #define HSDMA_DESC_LS1 BIT(14)
117 #define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
120 #define HSDMA_ALIGN_SIZE 3
121 /* align size 128bytes */
122 #define HSDMA_MAX_PLEN 0x3f80
131 struct mtk_hsdma_sg
{
137 struct mtk_hsdma_desc
{
138 struct virt_dma_desc vdesc
;
139 unsigned int num_sgs
;
140 struct mtk_hsdma_sg sg
[1];
143 struct mtk_hsdma_chan
{
144 struct virt_dma_chan vchan
;
146 dma_addr_t desc_addr
;
149 struct hsdma_desc
*tx_ring
;
150 struct hsdma_desc
*rx_ring
;
151 struct mtk_hsdma_desc
*desc
;
152 unsigned int next_sg
;
155 struct mtk_hsdam_engine
{
156 struct dma_device ddev
;
157 struct device_dma_parameters dma_parms
;
159 struct tasklet_struct task
;
160 volatile unsigned long chan_issued
;
162 struct mtk_hsdma_chan chan
[1];
165 static inline struct mtk_hsdam_engine
*mtk_hsdma_chan_get_dev(
166 struct mtk_hsdma_chan
*chan
)
168 return container_of(chan
->vchan
.chan
.device
, struct mtk_hsdam_engine
,
172 static inline struct mtk_hsdma_chan
*to_mtk_hsdma_chan(struct dma_chan
*c
)
174 return container_of(c
, struct mtk_hsdma_chan
, vchan
.chan
);
177 static inline struct mtk_hsdma_desc
*to_mtk_hsdma_desc(
178 struct virt_dma_desc
*vdesc
)
180 return container_of(vdesc
, struct mtk_hsdma_desc
, vdesc
);
183 static inline u32
mtk_hsdma_read(struct mtk_hsdam_engine
*hsdma
, u32 reg
)
185 return readl(hsdma
->base
+ reg
);
188 static inline void mtk_hsdma_write(struct mtk_hsdam_engine
*hsdma
,
189 unsigned int reg
, u32 val
)
191 writel(val
, hsdma
->base
+ reg
);
194 static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine
*hsdma
,
195 struct mtk_hsdma_chan
*chan
)
198 chan
->rx_idx
= HSDMA_DESCS_NUM
- 1;
200 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_CTX
, chan
->tx_idx
);
201 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_CRX
, chan
->rx_idx
);
203 mtk_hsdma_write(hsdma
, HSDMA_REG_RST_CFG
,
204 0x1 << (chan
->id
+ HSDMA_RST_TX_SHIFT
));
205 mtk_hsdma_write(hsdma
, HSDMA_REG_RST_CFG
,
206 0x1 << (chan
->id
+ HSDMA_RST_RX_SHIFT
));
209 static void hsdma_dump_reg(struct mtk_hsdam_engine
*hsdma
)
211 dev_dbg(hsdma
->ddev
.dev
, "tbase %08x, tcnt %08x, "
212 "tctx %08x, tdtx: %08x, rbase %08x, "
213 "rcnt %08x, rctx %08x, rdtx %08x\n",
214 mtk_hsdma_read(hsdma
, HSDMA_REG_TX_BASE
),
215 mtk_hsdma_read(hsdma
, HSDMA_REG_TX_CNT
),
216 mtk_hsdma_read(hsdma
, HSDMA_REG_TX_CTX
),
217 mtk_hsdma_read(hsdma
, HSDMA_REG_TX_DTX
),
218 mtk_hsdma_read(hsdma
, HSDMA_REG_RX_BASE
),
219 mtk_hsdma_read(hsdma
, HSDMA_REG_RX_CNT
),
220 mtk_hsdma_read(hsdma
, HSDMA_REG_RX_CRX
),
221 mtk_hsdma_read(hsdma
, HSDMA_REG_RX_DRX
));
223 dev_dbg(hsdma
->ddev
.dev
, "info %08x, glo %08x, delay %08x, "
224 "intr_stat %08x, intr_mask %08x\n",
225 mtk_hsdma_read(hsdma
, HSDMA_REG_INFO
),
226 mtk_hsdma_read(hsdma
, HSDMA_REG_GLO_CFG
),
227 mtk_hsdma_read(hsdma
, HSDMA_REG_DELAY_INT
),
228 mtk_hsdma_read(hsdma
, HSDMA_REG_INT_STATUS
),
229 mtk_hsdma_read(hsdma
, HSDMA_REG_INT_MASK
));
232 static void hsdma_dump_desc(struct mtk_hsdam_engine
*hsdma
,
233 struct mtk_hsdma_chan
*chan
)
235 struct hsdma_desc
*tx_desc
;
236 struct hsdma_desc
*rx_desc
;
239 dev_dbg(hsdma
->ddev
.dev
, "tx idx: %d, rx idx: %d\n",
240 chan
->tx_idx
, chan
->rx_idx
);
242 for (i
= 0; i
< HSDMA_DESCS_NUM
; i
++) {
243 tx_desc
= &chan
->tx_ring
[i
];
244 rx_desc
= &chan
->rx_ring
[i
];
246 dev_dbg(hsdma
->ddev
.dev
, "%d tx addr0: %08x, flags %08x, "
247 "tx addr1: %08x, rx addr0 %08x, flags %08x\n",
248 i
, tx_desc
->addr0
, tx_desc
->flags
,
249 tx_desc
->addr1
, rx_desc
->addr0
, rx_desc
->flags
);
253 static void mtk_hsdma_reset(struct mtk_hsdam_engine
*hsdma
,
254 struct mtk_hsdma_chan
*chan
)
259 mtk_hsdma_write(hsdma
, HSDMA_REG_GLO_CFG
, 0);
262 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_MASK
, 0);
264 /* init desc value */
265 for (i
= 0; i
< HSDMA_DESCS_NUM
; i
++) {
266 chan
->tx_ring
[i
].addr0
= 0;
267 chan
->tx_ring
[i
].flags
= HSDMA_DESC_LS0
| HSDMA_DESC_DONE
;
269 for (i
= 0; i
< HSDMA_DESCS_NUM
; i
++) {
270 chan
->rx_ring
[i
].addr0
= 0;
271 chan
->rx_ring
[i
].flags
= 0;
275 mtk_hsdma_reset_chan(hsdma
, chan
);
278 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_MASK
, HSDMA_INT_RX_Q0
);
281 mtk_hsdma_write(hsdma
, HSDMA_REG_GLO_CFG
, HSDMA_GLO_DEFAULT
);
284 static int mtk_hsdma_terminate_all(struct dma_chan
*c
)
286 struct mtk_hsdma_chan
*chan
= to_mtk_hsdma_chan(c
);
287 struct mtk_hsdam_engine
*hsdma
= mtk_hsdma_chan_get_dev(chan
);
288 unsigned long timeout
;
291 spin_lock_bh(&chan
->vchan
.lock
);
293 clear_bit(chan
->id
, &hsdma
->chan_issued
);
294 vchan_get_all_descriptors(&chan
->vchan
, &head
);
295 spin_unlock_bh(&chan
->vchan
.lock
);
297 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
299 /* wait dma transfer complete */
300 timeout
= jiffies
+ msecs_to_jiffies(2000);
301 while (mtk_hsdma_read(hsdma
, HSDMA_REG_GLO_CFG
) &
302 (HSDMA_GLO_RX_BUSY
| HSDMA_GLO_TX_BUSY
)) {
303 if (time_after_eq(jiffies
, timeout
)) {
304 hsdma_dump_desc(hsdma
, chan
);
305 mtk_hsdma_reset(hsdma
, chan
);
306 dev_err(hsdma
->ddev
.dev
, "timeout, reset it\n");
315 static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine
*hsdma
,
316 struct mtk_hsdma_chan
*chan
)
320 struct hsdma_desc
*tx_desc
, *rx_desc
;
321 struct mtk_hsdma_sg
*sg
;
325 sg
= &chan
->desc
->sg
[0];
327 chan
->desc
->num_sgs
= DIV_ROUND_UP(len
, HSDMA_MAX_PLEN
);
331 for (i
= 0; i
< chan
->desc
->num_sgs
; i
++) {
332 tx_desc
= &chan
->tx_ring
[chan
->tx_idx
];
334 if (len
> HSDMA_MAX_PLEN
)
335 tlen
= HSDMA_MAX_PLEN
;
340 tx_desc
->addr1
= src
;
341 tx_desc
->flags
|= HSDMA_DESC_PLEN1(tlen
);
343 tx_desc
->addr0
= src
;
344 tx_desc
->flags
= HSDMA_DESC_PLEN0(tlen
);
347 chan
->tx_idx
= HSDMA_NEXT_DESC(chan
->tx_idx
);
354 tx_desc
->flags
|= HSDMA_DESC_LS0
;
356 tx_desc
->flags
|= HSDMA_DESC_LS1
;
359 rx_idx
= HSDMA_NEXT_DESC(chan
->rx_idx
);
362 for (i
= 0; i
< chan
->desc
->num_sgs
; i
++) {
363 rx_desc
= &chan
->rx_ring
[rx_idx
];
364 if (len
> HSDMA_MAX_PLEN
)
365 tlen
= HSDMA_MAX_PLEN
;
369 rx_desc
->addr0
= dst
;
370 rx_desc
->flags
= HSDMA_DESC_PLEN0(tlen
);
376 rx_idx
= HSDMA_NEXT_DESC(rx_idx
);
379 /* make sure desc and index all up to date */
381 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_CTX
, chan
->tx_idx
);
386 static int gdma_next_desc(struct mtk_hsdma_chan
*chan
)
388 struct virt_dma_desc
*vdesc
;
390 vdesc
= vchan_next_desc(&chan
->vchan
);
395 chan
->desc
= to_mtk_hsdma_desc(vdesc
);
401 static void mtk_hsdma_chan_done(struct mtk_hsdam_engine
*hsdma
,
402 struct mtk_hsdma_chan
*chan
)
404 struct mtk_hsdma_desc
*desc
;
408 spin_lock_bh(&chan
->vchan
.lock
);
411 if (chan
->next_sg
== desc
->num_sgs
) {
412 list_del(&desc
->vdesc
.node
);
413 vchan_cookie_complete(&desc
->vdesc
);
414 chan_issued
= gdma_next_desc(chan
);
417 dev_dbg(hsdma
->ddev
.dev
, "no desc to complete\n");
421 set_bit(chan
->id
, &hsdma
->chan_issued
);
422 spin_unlock_bh(&chan
->vchan
.lock
);
425 static irqreturn_t
mtk_hsdma_irq(int irq
, void *devid
)
427 struct mtk_hsdam_engine
*hsdma
= devid
;
430 status
= mtk_hsdma_read(hsdma
, HSDMA_REG_INT_STATUS
);
431 if (unlikely(!status
))
434 if (likely(status
& HSDMA_INT_RX_Q0
))
435 tasklet_schedule(&hsdma
->task
);
437 dev_dbg(hsdma
->ddev
.dev
, "unhandle irq status %08x\n", status
);
438 /* clean intr bits */
439 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_STATUS
, status
);
444 static void mtk_hsdma_issue_pending(struct dma_chan
*c
)
446 struct mtk_hsdma_chan
*chan
= to_mtk_hsdma_chan(c
);
447 struct mtk_hsdam_engine
*hsdma
= mtk_hsdma_chan_get_dev(chan
);
449 spin_lock_bh(&chan
->vchan
.lock
);
450 if (vchan_issue_pending(&chan
->vchan
) && !chan
->desc
) {
451 if (gdma_next_desc(chan
)) {
452 set_bit(chan
->id
, &hsdma
->chan_issued
);
453 tasklet_schedule(&hsdma
->task
);
455 dev_dbg(hsdma
->ddev
.dev
, "no desc to issue\n");
458 spin_unlock_bh(&chan
->vchan
.lock
);
461 static struct dma_async_tx_descriptor
*mtk_hsdma_prep_dma_memcpy(
462 struct dma_chan
*c
, dma_addr_t dest
, dma_addr_t src
,
463 size_t len
, unsigned long flags
)
465 struct mtk_hsdma_chan
*chan
= to_mtk_hsdma_chan(c
);
466 struct mtk_hsdma_desc
*desc
;
471 desc
= kzalloc(sizeof(*desc
), GFP_ATOMIC
);
473 dev_err(c
->device
->dev
, "alloc memcpy decs error\n");
477 desc
->sg
[0].src_addr
= src
;
478 desc
->sg
[0].dst_addr
= dest
;
479 desc
->sg
[0].len
= len
;
481 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
484 static enum dma_status
mtk_hsdma_tx_status(struct dma_chan
*c
,
486 struct dma_tx_state
*state
)
488 return dma_cookie_status(c
, cookie
, state
);
491 static void mtk_hsdma_free_chan_resources(struct dma_chan
*c
)
493 vchan_free_chan_resources(to_virt_chan(c
));
496 static void mtk_hsdma_desc_free(struct virt_dma_desc
*vdesc
)
498 kfree(container_of(vdesc
, struct mtk_hsdma_desc
, vdesc
));
501 static void mtk_hsdma_tx(struct mtk_hsdam_engine
*hsdma
)
503 struct mtk_hsdma_chan
*chan
;
505 if (test_and_clear_bit(0, &hsdma
->chan_issued
)) {
506 chan
= &hsdma
->chan
[0];
508 mtk_hsdma_start_transfer(hsdma
, chan
);
510 dev_dbg(hsdma
->ddev
.dev
, "chan 0 no desc to issue\n");
514 static void mtk_hsdma_rx(struct mtk_hsdam_engine
*hsdma
)
516 struct mtk_hsdma_chan
*chan
;
517 int next_idx
, drx_idx
, cnt
;
519 chan
= &hsdma
->chan
[0];
520 next_idx
= HSDMA_NEXT_DESC(chan
->rx_idx
);
521 drx_idx
= mtk_hsdma_read(hsdma
, HSDMA_REG_RX_DRX
);
523 cnt
= (drx_idx
- next_idx
) & HSDMA_DESCS_MASK
;
527 chan
->next_sg
+= cnt
;
528 chan
->rx_idx
= (chan
->rx_idx
+ cnt
) & HSDMA_DESCS_MASK
;
532 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_CRX
, chan
->rx_idx
);
534 mtk_hsdma_chan_done(hsdma
, chan
);
537 static void mtk_hsdma_tasklet(unsigned long arg
)
539 struct mtk_hsdam_engine
*hsdma
= (struct mtk_hsdam_engine
*)arg
;
545 static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine
*hsdma
,
546 struct mtk_hsdma_chan
*chan
)
550 chan
->tx_ring
= dma_alloc_coherent(hsdma
->ddev
.dev
,
551 2 * HSDMA_DESCS_NUM
*
552 sizeof(*chan
->tx_ring
),
553 &chan
->desc_addr
, GFP_ATOMIC
| __GFP_ZERO
);
557 chan
->rx_ring
= &chan
->tx_ring
[HSDMA_DESCS_NUM
];
559 /* init tx ring value */
560 for (i
= 0; i
< HSDMA_DESCS_NUM
; i
++)
561 chan
->tx_ring
[i
].flags
= HSDMA_DESC_LS0
| HSDMA_DESC_DONE
;
568 static void mtk_hsdam_free_desc(struct mtk_hsdam_engine
*hsdma
,
569 struct mtk_hsdma_chan
*chan
)
572 dma_free_coherent(hsdma
->ddev
.dev
,
573 2 * HSDMA_DESCS_NUM
* sizeof(*chan
->tx_ring
),
574 chan
->tx_ring
, chan
->desc_addr
);
575 chan
->tx_ring
= NULL
;
576 chan
->rx_ring
= NULL
;
580 static int mtk_hsdma_init(struct mtk_hsdam_engine
*hsdma
)
582 struct mtk_hsdma_chan
*chan
;
587 chan
= &hsdma
->chan
[0];
588 ret
= mtk_hsdam_alloc_desc(hsdma
, chan
);
593 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_BASE
, chan
->desc_addr
);
594 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_CNT
, HSDMA_DESCS_NUM
);
596 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_BASE
, chan
->desc_addr
+
597 (sizeof(struct hsdma_desc
) * HSDMA_DESCS_NUM
));
598 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_CNT
, HSDMA_DESCS_NUM
);
600 mtk_hsdma_reset_chan(hsdma
, chan
);
603 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_MASK
, HSDMA_INT_RX_Q0
);
606 mtk_hsdma_write(hsdma
, HSDMA_REG_GLO_CFG
, HSDMA_GLO_DEFAULT
);
609 reg
= mtk_hsdma_read(hsdma
, HSDMA_REG_INFO
);
610 dev_info(hsdma
->ddev
.dev
, "rx: %d, tx: %d\n",
611 (reg
>> HSDMA_INFO_RX_SHIFT
) & HSDMA_INFO_RX_MASK
,
612 (reg
>> HSDMA_INFO_TX_SHIFT
) & HSDMA_INFO_TX_MASK
);
614 hsdma_dump_reg(hsdma
);
619 static void mtk_hsdma_uninit(struct mtk_hsdam_engine
*hsdma
)
621 struct mtk_hsdma_chan
*chan
;
624 mtk_hsdma_write(hsdma
, HSDMA_REG_GLO_CFG
, 0);
627 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_MASK
, 0);
630 chan
= &hsdma
->chan
[0];
631 mtk_hsdam_free_desc(hsdma
, chan
);
634 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_BASE
, 0);
635 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_CNT
, 0);
637 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_BASE
, 0);
638 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_CNT
, 0);
640 mtk_hsdma_reset_chan(hsdma
, chan
);
643 static const struct of_device_id mtk_hsdma_of_match
[] = {
644 { .compatible
= "mediatek,mt7621-hsdma" },
648 static int mtk_hsdma_probe(struct platform_device
*pdev
)
650 const struct of_device_id
*match
;
651 struct mtk_hsdma_chan
*chan
;
652 struct mtk_hsdam_engine
*hsdma
;
653 struct dma_device
*dd
;
658 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
662 match
= of_match_device(mtk_hsdma_of_match
, &pdev
->dev
);
666 hsdma
= devm_kzalloc(&pdev
->dev
, sizeof(*hsdma
), GFP_KERNEL
);
670 base
= devm_platform_ioremap_resource(pdev
, 0);
672 return PTR_ERR(base
);
673 hsdma
->base
= base
+ HSDMA_BASE_OFFSET
;
674 tasklet_init(&hsdma
->task
, mtk_hsdma_tasklet
, (unsigned long)hsdma
);
676 irq
= platform_get_irq(pdev
, 0);
679 ret
= devm_request_irq(&pdev
->dev
, irq
, mtk_hsdma_irq
,
680 0, dev_name(&pdev
->dev
), hsdma
);
682 dev_err(&pdev
->dev
, "failed to request irq\n");
686 device_reset(&pdev
->dev
);
689 dma_cap_set(DMA_MEMCPY
, dd
->cap_mask
);
690 dd
->copy_align
= HSDMA_ALIGN_SIZE
;
691 dd
->device_free_chan_resources
= mtk_hsdma_free_chan_resources
;
692 dd
->device_prep_dma_memcpy
= mtk_hsdma_prep_dma_memcpy
;
693 dd
->device_terminate_all
= mtk_hsdma_terminate_all
;
694 dd
->device_tx_status
= mtk_hsdma_tx_status
;
695 dd
->device_issue_pending
= mtk_hsdma_issue_pending
;
696 dd
->dev
= &pdev
->dev
;
697 dd
->dev
->dma_parms
= &hsdma
->dma_parms
;
698 dma_set_max_seg_size(dd
->dev
, HSDMA_MAX_PLEN
);
699 INIT_LIST_HEAD(&dd
->channels
);
701 chan
= &hsdma
->chan
[0];
703 chan
->vchan
.desc_free
= mtk_hsdma_desc_free
;
704 vchan_init(&chan
->vchan
, dd
);
707 ret
= mtk_hsdma_init(hsdma
);
709 dev_err(&pdev
->dev
, "failed to alloc ring descs\n");
713 ret
= dma_async_device_register(dd
);
715 dev_err(&pdev
->dev
, "failed to register dma device\n");
719 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
720 of_dma_xlate_by_chan_id
, hsdma
);
722 dev_err(&pdev
->dev
, "failed to register of dma controller\n");
726 platform_set_drvdata(pdev
, hsdma
);
731 dma_async_device_unregister(dd
);
735 static int mtk_hsdma_remove(struct platform_device
*pdev
)
737 struct mtk_hsdam_engine
*hsdma
= platform_get_drvdata(pdev
);
739 mtk_hsdma_uninit(hsdma
);
741 of_dma_controller_free(pdev
->dev
.of_node
);
742 dma_async_device_unregister(&hsdma
->ddev
);
747 static struct platform_driver mtk_hsdma_driver
= {
748 .probe
= mtk_hsdma_probe
,
749 .remove
= mtk_hsdma_remove
,
751 .name
= "hsdma-mt7621",
752 .of_match_table
= mtk_hsdma_of_match
,
755 module_platform_driver(mtk_hsdma_driver
);
757 MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
758 MODULE_DESCRIPTION("MTK HSDMA driver");
759 MODULE_LICENSE("GPL v2");