1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
7 #include <linux/dmaengine.h>
8 #include <linux/dma-mapping.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/irq.h>
17 #include <linux/of_dma.h>
18 #include <linux/reset.h>
19 #include <linux/of_device.h>
23 #define HSDMA_BASE_OFFSET 0x800
25 #define HSDMA_REG_TX_BASE 0x00
26 #define HSDMA_REG_TX_CNT 0x04
27 #define HSDMA_REG_TX_CTX 0x08
28 #define HSDMA_REG_TX_DTX 0x0c
29 #define HSDMA_REG_RX_BASE 0x100
30 #define HSDMA_REG_RX_CNT 0x104
31 #define HSDMA_REG_RX_CRX 0x108
32 #define HSDMA_REG_RX_DRX 0x10c
33 #define HSDMA_REG_INFO 0x200
34 #define HSDMA_REG_GLO_CFG 0x204
35 #define HSDMA_REG_RST_CFG 0x208
36 #define HSDMA_REG_DELAY_INT 0x20c
37 #define HSDMA_REG_FREEQ_THRES 0x210
38 #define HSDMA_REG_INT_STATUS 0x220
39 #define HSDMA_REG_INT_MASK 0x228
40 #define HSDMA_REG_SCH_Q01 0x280
41 #define HSDMA_REG_SCH_Q23 0x284
43 #define HSDMA_DESCS_MAX 0xfff
44 #define HSDMA_DESCS_NUM 8
45 #define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
46 #define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
49 #define HSDMA_INFO_INDEX_MASK 0xf
50 #define HSDMA_INFO_INDEX_SHIFT 24
51 #define HSDMA_INFO_BASE_MASK 0xff
52 #define HSDMA_INFO_BASE_SHIFT 16
53 #define HSDMA_INFO_RX_MASK 0xff
54 #define HSDMA_INFO_RX_SHIFT 8
55 #define HSDMA_INFO_TX_MASK 0xff
56 #define HSDMA_INFO_TX_SHIFT 0
58 /* HSDMA_REG_GLO_CFG */
59 #define HSDMA_GLO_TX_2B_OFFSET BIT(31)
60 #define HSDMA_GLO_CLK_GATE BIT(30)
61 #define HSDMA_GLO_BYTE_SWAP BIT(29)
62 #define HSDMA_GLO_MULTI_DMA BIT(10)
63 #define HSDMA_GLO_TWO_BUF BIT(9)
64 #define HSDMA_GLO_32B_DESC BIT(8)
65 #define HSDMA_GLO_BIG_ENDIAN BIT(7)
66 #define HSDMA_GLO_TX_DONE BIT(6)
67 #define HSDMA_GLO_BT_MASK 0x3
68 #define HSDMA_GLO_BT_SHIFT 4
69 #define HSDMA_GLO_RX_BUSY BIT(3)
70 #define HSDMA_GLO_RX_DMA BIT(2)
71 #define HSDMA_GLO_TX_BUSY BIT(1)
72 #define HSDMA_GLO_TX_DMA BIT(0)
74 #define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
75 #define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
76 #define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
77 #define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
79 #define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
80 HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
82 /* HSDMA_REG_RST_CFG */
83 #define HSDMA_RST_RX_SHIFT 16
84 #define HSDMA_RST_TX_SHIFT 0
86 /* HSDMA_REG_DELAY_INT */
87 #define HSDMA_DELAY_INT_EN BIT(15)
88 #define HSDMA_DELAY_PEND_OFFSET 8
89 #define HSDMA_DELAY_TIME_OFFSET 0
90 #define HSDMA_DELAY_TX_OFFSET 16
91 #define HSDMA_DELAY_RX_OFFSET 0
93 #define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
94 ((x) << HSDMA_DELAY_PEND_OFFSET))
95 #define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
96 HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
98 /* HSDMA_REG_INT_STATUS */
99 #define HSDMA_INT_DELAY_RX_COH BIT(31)
100 #define HSDMA_INT_DELAY_RX_INT BIT(30)
101 #define HSDMA_INT_DELAY_TX_COH BIT(29)
102 #define HSDMA_INT_DELAY_TX_INT BIT(28)
103 #define HSDMA_INT_RX_MASK 0x3
104 #define HSDMA_INT_RX_SHIFT 16
105 #define HSDMA_INT_RX_Q0 BIT(16)
106 #define HSDMA_INT_TX_MASK 0xf
107 #define HSDMA_INT_TX_SHIFT 0
108 #define HSDMA_INT_TX_Q0 BIT(0)
110 /* tx/rx dma desc flags */
111 #define HSDMA_PLEN_MASK 0x3fff
112 #define HSDMA_DESC_DONE BIT(31)
113 #define HSDMA_DESC_LS0 BIT(30)
114 #define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
115 #define HSDMA_DESC_TAG BIT(15)
116 #define HSDMA_DESC_LS1 BIT(14)
117 #define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
120 #define HSDMA_ALIGN_SIZE 3
121 /* align size 128bytes */
122 #define HSDMA_MAX_PLEN 0x3f80
131 struct mtk_hsdma_sg
{
137 struct mtk_hsdma_desc
{
138 struct virt_dma_desc vdesc
;
139 unsigned int num_sgs
;
140 struct mtk_hsdma_sg sg
[1];
143 struct mtk_hsdma_chan
{
144 struct virt_dma_chan vchan
;
146 dma_addr_t desc_addr
;
149 struct hsdma_desc
*tx_ring
;
150 struct hsdma_desc
*rx_ring
;
151 struct mtk_hsdma_desc
*desc
;
152 unsigned int next_sg
;
155 struct mtk_hsdam_engine
{
156 struct dma_device ddev
;
157 struct device_dma_parameters dma_parms
;
159 struct tasklet_struct task
;
160 volatile unsigned long chan_issued
;
162 struct mtk_hsdma_chan chan
[1];
165 static inline struct mtk_hsdam_engine
*mtk_hsdma_chan_get_dev(
166 struct mtk_hsdma_chan
*chan
)
168 return container_of(chan
->vchan
.chan
.device
, struct mtk_hsdam_engine
,
172 static inline struct mtk_hsdma_chan
*to_mtk_hsdma_chan(struct dma_chan
*c
)
174 return container_of(c
, struct mtk_hsdma_chan
, vchan
.chan
);
177 static inline struct mtk_hsdma_desc
*to_mtk_hsdma_desc(
178 struct virt_dma_desc
*vdesc
)
180 return container_of(vdesc
, struct mtk_hsdma_desc
, vdesc
);
183 static inline u32
mtk_hsdma_read(struct mtk_hsdam_engine
*hsdma
, u32 reg
)
185 return readl(hsdma
->base
+ reg
);
188 static inline void mtk_hsdma_write(struct mtk_hsdam_engine
*hsdma
,
189 unsigned int reg
, u32 val
)
191 writel(val
, hsdma
->base
+ reg
);
194 static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine
*hsdma
,
195 struct mtk_hsdma_chan
*chan
)
198 chan
->rx_idx
= HSDMA_DESCS_NUM
- 1;
200 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_CTX
, chan
->tx_idx
);
201 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_CRX
, chan
->rx_idx
);
203 mtk_hsdma_write(hsdma
, HSDMA_REG_RST_CFG
,
204 0x1 << (chan
->id
+ HSDMA_RST_TX_SHIFT
));
205 mtk_hsdma_write(hsdma
, HSDMA_REG_RST_CFG
,
206 0x1 << (chan
->id
+ HSDMA_RST_RX_SHIFT
));
209 static void hsdma_dump_reg(struct mtk_hsdam_engine
*hsdma
)
211 dev_dbg(hsdma
->ddev
.dev
, "tbase %08x, tcnt %08x, "
212 "tctx %08x, tdtx: %08x, rbase %08x, "
213 "rcnt %08x, rctx %08x, rdtx %08x\n",
214 mtk_hsdma_read(hsdma
, HSDMA_REG_TX_BASE
),
215 mtk_hsdma_read(hsdma
, HSDMA_REG_TX_CNT
),
216 mtk_hsdma_read(hsdma
, HSDMA_REG_TX_CTX
),
217 mtk_hsdma_read(hsdma
, HSDMA_REG_TX_DTX
),
218 mtk_hsdma_read(hsdma
, HSDMA_REG_RX_BASE
),
219 mtk_hsdma_read(hsdma
, HSDMA_REG_RX_CNT
),
220 mtk_hsdma_read(hsdma
, HSDMA_REG_RX_CRX
),
221 mtk_hsdma_read(hsdma
, HSDMA_REG_RX_DRX
));
223 dev_dbg(hsdma
->ddev
.dev
, "info %08x, glo %08x, delay %08x, intr_stat %08x, intr_mask %08x\n",
224 mtk_hsdma_read(hsdma
, HSDMA_REG_INFO
),
225 mtk_hsdma_read(hsdma
, HSDMA_REG_GLO_CFG
),
226 mtk_hsdma_read(hsdma
, HSDMA_REG_DELAY_INT
),
227 mtk_hsdma_read(hsdma
, HSDMA_REG_INT_STATUS
),
228 mtk_hsdma_read(hsdma
, HSDMA_REG_INT_MASK
));
231 static void hsdma_dump_desc(struct mtk_hsdam_engine
*hsdma
,
232 struct mtk_hsdma_chan
*chan
)
234 struct hsdma_desc
*tx_desc
;
235 struct hsdma_desc
*rx_desc
;
238 dev_dbg(hsdma
->ddev
.dev
, "tx idx: %d, rx idx: %d\n",
239 chan
->tx_idx
, chan
->rx_idx
);
241 for (i
= 0; i
< HSDMA_DESCS_NUM
; i
++) {
242 tx_desc
= &chan
->tx_ring
[i
];
243 rx_desc
= &chan
->rx_ring
[i
];
245 dev_dbg(hsdma
->ddev
.dev
, "%d tx addr0: %08x, flags %08x, "
246 "tx addr1: %08x, rx addr0 %08x, flags %08x\n",
247 i
, tx_desc
->addr0
, tx_desc
->flags
,
248 tx_desc
->addr1
, rx_desc
->addr0
, rx_desc
->flags
);
252 static void mtk_hsdma_reset(struct mtk_hsdam_engine
*hsdma
,
253 struct mtk_hsdma_chan
*chan
)
258 mtk_hsdma_write(hsdma
, HSDMA_REG_GLO_CFG
, 0);
261 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_MASK
, 0);
263 /* init desc value */
264 for (i
= 0; i
< HSDMA_DESCS_NUM
; i
++) {
265 chan
->tx_ring
[i
].addr0
= 0;
266 chan
->tx_ring
[i
].flags
= HSDMA_DESC_LS0
| HSDMA_DESC_DONE
;
268 for (i
= 0; i
< HSDMA_DESCS_NUM
; i
++) {
269 chan
->rx_ring
[i
].addr0
= 0;
270 chan
->rx_ring
[i
].flags
= 0;
274 mtk_hsdma_reset_chan(hsdma
, chan
);
277 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_MASK
, HSDMA_INT_RX_Q0
);
280 mtk_hsdma_write(hsdma
, HSDMA_REG_GLO_CFG
, HSDMA_GLO_DEFAULT
);
283 static int mtk_hsdma_terminate_all(struct dma_chan
*c
)
285 struct mtk_hsdma_chan
*chan
= to_mtk_hsdma_chan(c
);
286 struct mtk_hsdam_engine
*hsdma
= mtk_hsdma_chan_get_dev(chan
);
287 unsigned long timeout
;
290 spin_lock_bh(&chan
->vchan
.lock
);
292 clear_bit(chan
->id
, &hsdma
->chan_issued
);
293 vchan_get_all_descriptors(&chan
->vchan
, &head
);
294 spin_unlock_bh(&chan
->vchan
.lock
);
296 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
298 /* wait dma transfer complete */
299 timeout
= jiffies
+ msecs_to_jiffies(2000);
300 while (mtk_hsdma_read(hsdma
, HSDMA_REG_GLO_CFG
) &
301 (HSDMA_GLO_RX_BUSY
| HSDMA_GLO_TX_BUSY
)) {
302 if (time_after_eq(jiffies
, timeout
)) {
303 hsdma_dump_desc(hsdma
, chan
);
304 mtk_hsdma_reset(hsdma
, chan
);
305 dev_err(hsdma
->ddev
.dev
, "timeout, reset it\n");
314 static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine
*hsdma
,
315 struct mtk_hsdma_chan
*chan
)
319 struct hsdma_desc
*tx_desc
, *rx_desc
;
320 struct mtk_hsdma_sg
*sg
;
324 sg
= &chan
->desc
->sg
[0];
326 chan
->desc
->num_sgs
= DIV_ROUND_UP(len
, HSDMA_MAX_PLEN
);
330 for (i
= 0; i
< chan
->desc
->num_sgs
; i
++) {
331 tx_desc
= &chan
->tx_ring
[chan
->tx_idx
];
333 if (len
> HSDMA_MAX_PLEN
)
334 tlen
= HSDMA_MAX_PLEN
;
339 tx_desc
->addr1
= src
;
340 tx_desc
->flags
|= HSDMA_DESC_PLEN1(tlen
);
342 tx_desc
->addr0
= src
;
343 tx_desc
->flags
= HSDMA_DESC_PLEN0(tlen
);
346 chan
->tx_idx
= HSDMA_NEXT_DESC(chan
->tx_idx
);
353 tx_desc
->flags
|= HSDMA_DESC_LS0
;
355 tx_desc
->flags
|= HSDMA_DESC_LS1
;
358 rx_idx
= HSDMA_NEXT_DESC(chan
->rx_idx
);
361 for (i
= 0; i
< chan
->desc
->num_sgs
; i
++) {
362 rx_desc
= &chan
->rx_ring
[rx_idx
];
363 if (len
> HSDMA_MAX_PLEN
)
364 tlen
= HSDMA_MAX_PLEN
;
368 rx_desc
->addr0
= dst
;
369 rx_desc
->flags
= HSDMA_DESC_PLEN0(tlen
);
375 rx_idx
= HSDMA_NEXT_DESC(rx_idx
);
378 /* make sure desc and index all up to date */
380 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_CTX
, chan
->tx_idx
);
385 static int gdma_next_desc(struct mtk_hsdma_chan
*chan
)
387 struct virt_dma_desc
*vdesc
;
389 vdesc
= vchan_next_desc(&chan
->vchan
);
394 chan
->desc
= to_mtk_hsdma_desc(vdesc
);
400 static void mtk_hsdma_chan_done(struct mtk_hsdam_engine
*hsdma
,
401 struct mtk_hsdma_chan
*chan
)
403 struct mtk_hsdma_desc
*desc
;
407 spin_lock_bh(&chan
->vchan
.lock
);
410 if (chan
->next_sg
== desc
->num_sgs
) {
411 list_del(&desc
->vdesc
.node
);
412 vchan_cookie_complete(&desc
->vdesc
);
413 chan_issued
= gdma_next_desc(chan
);
416 dev_dbg(hsdma
->ddev
.dev
, "no desc to complete\n");
420 set_bit(chan
->id
, &hsdma
->chan_issued
);
421 spin_unlock_bh(&chan
->vchan
.lock
);
424 static irqreturn_t
mtk_hsdma_irq(int irq
, void *devid
)
426 struct mtk_hsdam_engine
*hsdma
= devid
;
429 status
= mtk_hsdma_read(hsdma
, HSDMA_REG_INT_STATUS
);
430 if (unlikely(!status
))
433 if (likely(status
& HSDMA_INT_RX_Q0
))
434 tasklet_schedule(&hsdma
->task
);
436 dev_dbg(hsdma
->ddev
.dev
, "unhandle irq status %08x\n", status
);
437 /* clean intr bits */
438 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_STATUS
, status
);
443 static void mtk_hsdma_issue_pending(struct dma_chan
*c
)
445 struct mtk_hsdma_chan
*chan
= to_mtk_hsdma_chan(c
);
446 struct mtk_hsdam_engine
*hsdma
= mtk_hsdma_chan_get_dev(chan
);
448 spin_lock_bh(&chan
->vchan
.lock
);
449 if (vchan_issue_pending(&chan
->vchan
) && !chan
->desc
) {
450 if (gdma_next_desc(chan
)) {
451 set_bit(chan
->id
, &hsdma
->chan_issued
);
452 tasklet_schedule(&hsdma
->task
);
454 dev_dbg(hsdma
->ddev
.dev
, "no desc to issue\n");
457 spin_unlock_bh(&chan
->vchan
.lock
);
460 static struct dma_async_tx_descriptor
*mtk_hsdma_prep_dma_memcpy(
461 struct dma_chan
*c
, dma_addr_t dest
, dma_addr_t src
,
462 size_t len
, unsigned long flags
)
464 struct mtk_hsdma_chan
*chan
= to_mtk_hsdma_chan(c
);
465 struct mtk_hsdma_desc
*desc
;
470 desc
= kzalloc(sizeof(*desc
), GFP_ATOMIC
);
472 dev_err(c
->device
->dev
, "alloc memcpy decs error\n");
476 desc
->sg
[0].src_addr
= src
;
477 desc
->sg
[0].dst_addr
= dest
;
478 desc
->sg
[0].len
= len
;
480 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
483 static enum dma_status
mtk_hsdma_tx_status(struct dma_chan
*c
,
485 struct dma_tx_state
*state
)
487 return dma_cookie_status(c
, cookie
, state
);
490 static void mtk_hsdma_free_chan_resources(struct dma_chan
*c
)
492 vchan_free_chan_resources(to_virt_chan(c
));
495 static void mtk_hsdma_desc_free(struct virt_dma_desc
*vdesc
)
497 kfree(container_of(vdesc
, struct mtk_hsdma_desc
, vdesc
));
500 static void mtk_hsdma_tx(struct mtk_hsdam_engine
*hsdma
)
502 struct mtk_hsdma_chan
*chan
;
504 if (test_and_clear_bit(0, &hsdma
->chan_issued
)) {
505 chan
= &hsdma
->chan
[0];
507 mtk_hsdma_start_transfer(hsdma
, chan
);
509 dev_dbg(hsdma
->ddev
.dev
, "chan 0 no desc to issue\n");
513 static void mtk_hsdma_rx(struct mtk_hsdam_engine
*hsdma
)
515 struct mtk_hsdma_chan
*chan
;
516 int next_idx
, drx_idx
, cnt
;
518 chan
= &hsdma
->chan
[0];
519 next_idx
= HSDMA_NEXT_DESC(chan
->rx_idx
);
520 drx_idx
= mtk_hsdma_read(hsdma
, HSDMA_REG_RX_DRX
);
522 cnt
= (drx_idx
- next_idx
) & HSDMA_DESCS_MASK
;
526 chan
->next_sg
+= cnt
;
527 chan
->rx_idx
= (chan
->rx_idx
+ cnt
) & HSDMA_DESCS_MASK
;
531 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_CRX
, chan
->rx_idx
);
533 mtk_hsdma_chan_done(hsdma
, chan
);
536 static void mtk_hsdma_tasklet(unsigned long arg
)
538 struct mtk_hsdam_engine
*hsdma
= (struct mtk_hsdam_engine
*)arg
;
544 static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine
*hsdma
,
545 struct mtk_hsdma_chan
*chan
)
549 chan
->tx_ring
= dma_alloc_coherent(hsdma
->ddev
.dev
,
550 2 * HSDMA_DESCS_NUM
*
551 sizeof(*chan
->tx_ring
),
552 &chan
->desc_addr
, GFP_ATOMIC
| __GFP_ZERO
);
556 chan
->rx_ring
= &chan
->tx_ring
[HSDMA_DESCS_NUM
];
558 /* init tx ring value */
559 for (i
= 0; i
< HSDMA_DESCS_NUM
; i
++)
560 chan
->tx_ring
[i
].flags
= HSDMA_DESC_LS0
| HSDMA_DESC_DONE
;
567 static void mtk_hsdam_free_desc(struct mtk_hsdam_engine
*hsdma
,
568 struct mtk_hsdma_chan
*chan
)
571 dma_free_coherent(hsdma
->ddev
.dev
,
572 2 * HSDMA_DESCS_NUM
* sizeof(*chan
->tx_ring
),
573 chan
->tx_ring
, chan
->desc_addr
);
574 chan
->tx_ring
= NULL
;
575 chan
->rx_ring
= NULL
;
579 static int mtk_hsdma_init(struct mtk_hsdam_engine
*hsdma
)
581 struct mtk_hsdma_chan
*chan
;
586 chan
= &hsdma
->chan
[0];
587 ret
= mtk_hsdam_alloc_desc(hsdma
, chan
);
592 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_BASE
, chan
->desc_addr
);
593 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_CNT
, HSDMA_DESCS_NUM
);
595 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_BASE
, chan
->desc_addr
+
596 (sizeof(struct hsdma_desc
) * HSDMA_DESCS_NUM
));
597 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_CNT
, HSDMA_DESCS_NUM
);
599 mtk_hsdma_reset_chan(hsdma
, chan
);
602 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_MASK
, HSDMA_INT_RX_Q0
);
605 mtk_hsdma_write(hsdma
, HSDMA_REG_GLO_CFG
, HSDMA_GLO_DEFAULT
);
608 reg
= mtk_hsdma_read(hsdma
, HSDMA_REG_INFO
);
609 dev_info(hsdma
->ddev
.dev
, "rx: %d, tx: %d\n",
610 (reg
>> HSDMA_INFO_RX_SHIFT
) & HSDMA_INFO_RX_MASK
,
611 (reg
>> HSDMA_INFO_TX_SHIFT
) & HSDMA_INFO_TX_MASK
);
613 hsdma_dump_reg(hsdma
);
618 static void mtk_hsdma_uninit(struct mtk_hsdam_engine
*hsdma
)
620 struct mtk_hsdma_chan
*chan
;
623 mtk_hsdma_write(hsdma
, HSDMA_REG_GLO_CFG
, 0);
626 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_MASK
, 0);
629 chan
= &hsdma
->chan
[0];
630 mtk_hsdam_free_desc(hsdma
, chan
);
633 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_BASE
, 0);
634 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_CNT
, 0);
636 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_BASE
, 0);
637 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_CNT
, 0);
639 mtk_hsdma_reset_chan(hsdma
, chan
);
642 static const struct of_device_id mtk_hsdma_of_match
[] = {
643 { .compatible
= "mediatek,mt7621-hsdma" },
647 static int mtk_hsdma_probe(struct platform_device
*pdev
)
649 const struct of_device_id
*match
;
650 struct mtk_hsdma_chan
*chan
;
651 struct mtk_hsdam_engine
*hsdma
;
652 struct dma_device
*dd
;
657 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
661 match
= of_match_device(mtk_hsdma_of_match
, &pdev
->dev
);
665 hsdma
= devm_kzalloc(&pdev
->dev
, sizeof(*hsdma
), GFP_KERNEL
);
669 base
= devm_platform_ioremap_resource(pdev
, 0);
671 return PTR_ERR(base
);
672 hsdma
->base
= base
+ HSDMA_BASE_OFFSET
;
673 tasklet_init(&hsdma
->task
, mtk_hsdma_tasklet
, (unsigned long)hsdma
);
675 irq
= platform_get_irq(pdev
, 0);
678 ret
= devm_request_irq(&pdev
->dev
, irq
, mtk_hsdma_irq
,
679 0, dev_name(&pdev
->dev
), hsdma
);
681 dev_err(&pdev
->dev
, "failed to request irq\n");
685 device_reset(&pdev
->dev
);
688 dma_cap_set(DMA_MEMCPY
, dd
->cap_mask
);
689 dd
->copy_align
= HSDMA_ALIGN_SIZE
;
690 dd
->device_free_chan_resources
= mtk_hsdma_free_chan_resources
;
691 dd
->device_prep_dma_memcpy
= mtk_hsdma_prep_dma_memcpy
;
692 dd
->device_terminate_all
= mtk_hsdma_terminate_all
;
693 dd
->device_tx_status
= mtk_hsdma_tx_status
;
694 dd
->device_issue_pending
= mtk_hsdma_issue_pending
;
695 dd
->dev
= &pdev
->dev
;
696 dd
->dev
->dma_parms
= &hsdma
->dma_parms
;
697 dma_set_max_seg_size(dd
->dev
, HSDMA_MAX_PLEN
);
698 INIT_LIST_HEAD(&dd
->channels
);
700 chan
= &hsdma
->chan
[0];
702 chan
->vchan
.desc_free
= mtk_hsdma_desc_free
;
703 vchan_init(&chan
->vchan
, dd
);
706 ret
= mtk_hsdma_init(hsdma
);
708 dev_err(&pdev
->dev
, "failed to alloc ring descs\n");
712 ret
= dma_async_device_register(dd
);
714 dev_err(&pdev
->dev
, "failed to register dma device\n");
718 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
719 of_dma_xlate_by_chan_id
, hsdma
);
721 dev_err(&pdev
->dev
, "failed to register of dma controller\n");
725 platform_set_drvdata(pdev
, hsdma
);
730 dma_async_device_unregister(dd
);
734 static int mtk_hsdma_remove(struct platform_device
*pdev
)
736 struct mtk_hsdam_engine
*hsdma
= platform_get_drvdata(pdev
);
738 mtk_hsdma_uninit(hsdma
);
740 of_dma_controller_free(pdev
->dev
.of_node
);
741 dma_async_device_unregister(&hsdma
->ddev
);
746 static struct platform_driver mtk_hsdma_driver
= {
747 .probe
= mtk_hsdma_probe
,
748 .remove
= mtk_hsdma_remove
,
750 .name
= "hsdma-mt7621",
751 .of_match_table
= mtk_hsdma_of_match
,
754 module_platform_driver(mtk_hsdma_driver
);
756 MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
757 MODULE_DESCRIPTION("MTK HSDMA driver");
758 MODULE_LICENSE("GPL v2");