1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2018-2019 MediaTek Inc.
5 * Driver for MediaTek Command-Queue DMA Controller
7 * Author: Shun-Chih Yu <shun-chih.yu@mediatek.com>
11 #include <linux/bitops.h>
12 #include <linux/clk.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/iopoll.h>
17 #include <linux/interrupt.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
21 #include <linux/of_dma.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/refcount.h>
25 #include <linux/slab.h>
27 #include "../virt-dma.h"
29 #define MTK_CQDMA_USEC_POLL 10
30 #define MTK_CQDMA_TIMEOUT_POLL 1000
31 #define MTK_CQDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
32 #define MTK_CQDMA_ALIGN_SIZE 1
34 /* The default number of virtual channel */
35 #define MTK_CQDMA_NR_VCHANS 32
37 /* The default number of physical channel */
38 #define MTK_CQDMA_NR_PCHANS 3
40 /* Registers for underlying dma manipulation */
41 #define MTK_CQDMA_INT_FLAG 0x0
42 #define MTK_CQDMA_INT_EN 0x4
43 #define MTK_CQDMA_EN 0x8
44 #define MTK_CQDMA_RESET 0xc
45 #define MTK_CQDMA_FLUSH 0x14
46 #define MTK_CQDMA_SRC 0x1c
47 #define MTK_CQDMA_DST 0x20
48 #define MTK_CQDMA_LEN1 0x24
49 #define MTK_CQDMA_LEN2 0x28
50 #define MTK_CQDMA_SRC2 0x60
51 #define MTK_CQDMA_DST2 0x64
53 /* Registers setting */
54 #define MTK_CQDMA_EN_BIT BIT(0)
55 #define MTK_CQDMA_INT_FLAG_BIT BIT(0)
56 #define MTK_CQDMA_INT_EN_BIT BIT(0)
57 #define MTK_CQDMA_FLUSH_BIT BIT(0)
59 #define MTK_CQDMA_WARM_RST_BIT BIT(0)
60 #define MTK_CQDMA_HARD_RST_BIT BIT(1)
62 #define MTK_CQDMA_MAX_LEN GENMASK(27, 0)
63 #define MTK_CQDMA_ADDR_LIMIT GENMASK(31, 0)
64 #define MTK_CQDMA_ADDR2_SHFIT (32)
67 * struct mtk_cqdma_vdesc - The struct holding info describing virtual
69 * @vd: An instance for struct virt_dma_desc
70 * @len: The total data size device wants to move
71 * @residue: The remaining data size device will move
72 * @dest: The destination address device wants to move to
73 * @src: The source address device wants to move from
74 * @ch: The pointer to the corresponding dma channel
75 * @node: The lise_head struct to build link-list for VDs
76 * @parent: The pointer to the parent CVD
78 struct mtk_cqdma_vdesc
{
79 struct virt_dma_desc vd
;
86 struct list_head node
;
87 struct mtk_cqdma_vdesc
*parent
;
91 * struct mtk_cqdma_pchan - The struct holding info describing physical
93 * @queue: Queue for the PDs issued to this PC
94 * @base: The mapped register I/O base of this PC
95 * @irq: The IRQ that this PC are using
96 * @refcnt: Track how many VCs are using this PC
97 * @tasklet: Tasklet for this PC
98 * @lock: Lock protect agaisting multiple VCs access PC
100 struct mtk_cqdma_pchan
{
101 struct list_head queue
;
107 struct tasklet_struct tasklet
;
109 /* lock to protect PC */
114 * struct mtk_cqdma_vchan - The struct holding info describing virtual
116 * @vc: An instance for struct virt_dma_chan
117 * @pc: The pointer to the underlying PC
118 * @issue_completion: The wait for all issued descriptors completited
119 * @issue_synchronize: Bool indicating channel synchronization starts
121 struct mtk_cqdma_vchan
{
122 struct virt_dma_chan vc
;
123 struct mtk_cqdma_pchan
*pc
;
124 struct completion issue_completion
;
125 bool issue_synchronize
;
129 * struct mtk_cqdma_device - The struct holding info describing CQDMA
131 * @ddev: An instance for struct dma_device
132 * @clk: The clock that device internal is using
133 * @dma_requests: The number of VCs the device supports to
134 * @dma_channels: The number of PCs the device supports to
135 * @vc: The pointer to all available VCs
136 * @pc: The pointer to all the underlying PCs
138 struct mtk_cqdma_device
{
139 struct dma_device ddev
;
144 struct mtk_cqdma_vchan
*vc
;
145 struct mtk_cqdma_pchan
**pc
;
148 static struct mtk_cqdma_device
*to_cqdma_dev(struct dma_chan
*chan
)
150 return container_of(chan
->device
, struct mtk_cqdma_device
, ddev
);
153 static struct mtk_cqdma_vchan
*to_cqdma_vchan(struct dma_chan
*chan
)
155 return container_of(chan
, struct mtk_cqdma_vchan
, vc
.chan
);
158 static struct mtk_cqdma_vdesc
*to_cqdma_vdesc(struct virt_dma_desc
*vd
)
160 return container_of(vd
, struct mtk_cqdma_vdesc
, vd
);
163 static struct device
*cqdma2dev(struct mtk_cqdma_device
*cqdma
)
165 return cqdma
->ddev
.dev
;
168 static u32
mtk_dma_read(struct mtk_cqdma_pchan
*pc
, u32 reg
)
170 return readl(pc
->base
+ reg
);
173 static void mtk_dma_write(struct mtk_cqdma_pchan
*pc
, u32 reg
, u32 val
)
175 writel_relaxed(val
, pc
->base
+ reg
);
178 static void mtk_dma_rmw(struct mtk_cqdma_pchan
*pc
, u32 reg
,
183 val
= mtk_dma_read(pc
, reg
);
186 mtk_dma_write(pc
, reg
, val
);
189 static void mtk_dma_set(struct mtk_cqdma_pchan
*pc
, u32 reg
, u32 val
)
191 mtk_dma_rmw(pc
, reg
, 0, val
);
194 static void mtk_dma_clr(struct mtk_cqdma_pchan
*pc
, u32 reg
, u32 val
)
196 mtk_dma_rmw(pc
, reg
, val
, 0);
199 static void mtk_cqdma_vdesc_free(struct virt_dma_desc
*vd
)
201 kfree(to_cqdma_vdesc(vd
));
204 static int mtk_cqdma_poll_engine_done(struct mtk_cqdma_pchan
*pc
, bool atomic
)
209 return readl_poll_timeout(pc
->base
+ MTK_CQDMA_EN
,
211 !(status
& MTK_CQDMA_EN_BIT
),
213 MTK_CQDMA_TIMEOUT_POLL
);
215 return readl_poll_timeout_atomic(pc
->base
+ MTK_CQDMA_EN
,
217 !(status
& MTK_CQDMA_EN_BIT
),
219 MTK_CQDMA_TIMEOUT_POLL
);
222 static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan
*pc
)
224 mtk_dma_set(pc
, MTK_CQDMA_RESET
, MTK_CQDMA_HARD_RST_BIT
);
225 mtk_dma_clr(pc
, MTK_CQDMA_RESET
, MTK_CQDMA_HARD_RST_BIT
);
227 return mtk_cqdma_poll_engine_done(pc
, true);
230 static void mtk_cqdma_start(struct mtk_cqdma_pchan
*pc
,
231 struct mtk_cqdma_vdesc
*cvd
)
233 /* wait for the previous transaction done */
234 if (mtk_cqdma_poll_engine_done(pc
, true) < 0)
235 dev_err(cqdma2dev(to_cqdma_dev(cvd
->ch
)), "cqdma wait transaction timeout\n");
237 /* warm reset the dma engine for the new transaction */
238 mtk_dma_set(pc
, MTK_CQDMA_RESET
, MTK_CQDMA_WARM_RST_BIT
);
239 if (mtk_cqdma_poll_engine_done(pc
, true) < 0)
240 dev_err(cqdma2dev(to_cqdma_dev(cvd
->ch
)), "cqdma warm reset timeout\n");
242 /* setup the source */
243 mtk_dma_set(pc
, MTK_CQDMA_SRC
, cvd
->src
& MTK_CQDMA_ADDR_LIMIT
);
244 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
245 mtk_dma_set(pc
, MTK_CQDMA_SRC2
, cvd
->src
>> MTK_CQDMA_ADDR2_SHFIT
);
247 mtk_dma_set(pc
, MTK_CQDMA_SRC2
, 0);
250 /* setup the destination */
251 mtk_dma_set(pc
, MTK_CQDMA_DST
, cvd
->dest
& MTK_CQDMA_ADDR_LIMIT
);
252 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
253 mtk_dma_set(pc
, MTK_CQDMA_DST2
, cvd
->dest
>> MTK_CQDMA_ADDR2_SHFIT
);
255 mtk_dma_set(pc
, MTK_CQDMA_DST2
, 0);
258 /* setup the length */
259 mtk_dma_set(pc
, MTK_CQDMA_LEN1
, cvd
->len
);
261 /* start dma engine */
262 mtk_dma_set(pc
, MTK_CQDMA_EN
, MTK_CQDMA_EN_BIT
);
265 static void mtk_cqdma_issue_vchan_pending(struct mtk_cqdma_vchan
*cvc
)
267 struct virt_dma_desc
*vd
, *vd2
;
268 struct mtk_cqdma_pchan
*pc
= cvc
->pc
;
269 struct mtk_cqdma_vdesc
*cvd
;
270 bool trigger_engine
= false;
272 lockdep_assert_held(&cvc
->vc
.lock
);
273 lockdep_assert_held(&pc
->lock
);
275 list_for_each_entry_safe(vd
, vd2
, &cvc
->vc
.desc_issued
, node
) {
276 /* need to trigger dma engine if PC's queue is empty */
277 if (list_empty(&pc
->queue
))
278 trigger_engine
= true;
280 cvd
= to_cqdma_vdesc(vd
);
282 /* add VD into PC's queue */
283 list_add_tail(&cvd
->node
, &pc
->queue
);
285 /* start the dma engine */
287 mtk_cqdma_start(pc
, cvd
);
289 /* remove VD from list desc_issued */
295 * return true if this VC is active,
296 * meaning that there are VDs under processing by the PC
298 static bool mtk_cqdma_is_vchan_active(struct mtk_cqdma_vchan
*cvc
)
300 struct mtk_cqdma_vdesc
*cvd
;
302 list_for_each_entry(cvd
, &cvc
->pc
->queue
, node
)
303 if (cvc
== to_cqdma_vchan(cvd
->ch
))
310 * return the pointer of the CVD that is just consumed by the PC
312 static struct mtk_cqdma_vdesc
313 *mtk_cqdma_consume_work_queue(struct mtk_cqdma_pchan
*pc
)
315 struct mtk_cqdma_vchan
*cvc
;
316 struct mtk_cqdma_vdesc
*cvd
, *ret
= NULL
;
318 /* consume a CVD from PC's queue */
319 cvd
= list_first_entry_or_null(&pc
->queue
,
320 struct mtk_cqdma_vdesc
, node
);
321 if (unlikely(!cvd
|| !cvd
->parent
))
324 cvc
= to_cqdma_vchan(cvd
->ch
);
327 /* update residue of the parent CVD */
328 cvd
->parent
->residue
-= cvd
->len
;
330 /* delete CVD from PC's queue */
331 list_del(&cvd
->node
);
333 spin_lock(&cvc
->vc
.lock
);
335 /* check whether all the child CVDs completed */
336 if (!cvd
->parent
->residue
) {
337 /* add the parent VD into list desc_completed */
338 vchan_cookie_complete(&cvd
->parent
->vd
);
340 /* setup completion if this VC is under synchronization */
341 if (cvc
->issue_synchronize
&& !mtk_cqdma_is_vchan_active(cvc
)) {
342 complete(&cvc
->issue_completion
);
343 cvc
->issue_synchronize
= false;
347 spin_unlock(&cvc
->vc
.lock
);
349 /* start transaction for next CVD in the queue */
350 cvd
= list_first_entry_or_null(&pc
->queue
,
351 struct mtk_cqdma_vdesc
, node
);
353 mtk_cqdma_start(pc
, cvd
);
358 static void mtk_cqdma_tasklet_cb(struct tasklet_struct
*t
)
360 struct mtk_cqdma_pchan
*pc
= from_tasklet(pc
, t
, tasklet
);
361 struct mtk_cqdma_vdesc
*cvd
= NULL
;
364 spin_lock_irqsave(&pc
->lock
, flags
);
365 /* consume the queue */
366 cvd
= mtk_cqdma_consume_work_queue(pc
);
367 spin_unlock_irqrestore(&pc
->lock
, flags
);
369 /* submit the next CVD */
371 dma_run_dependencies(&cvd
->vd
.tx
);
374 * free child CVD after completion.
375 * the parent CVD would be freed with desc_free by user.
377 if (cvd
->parent
!= cvd
)
381 /* re-enable interrupt before leaving tasklet */
385 static irqreturn_t
mtk_cqdma_irq(int irq
, void *devid
)
387 struct mtk_cqdma_device
*cqdma
= devid
;
388 irqreturn_t ret
= IRQ_NONE
;
389 bool schedule_tasklet
= false;
392 /* clear interrupt flags for each PC */
393 for (i
= 0; i
< cqdma
->dma_channels
; ++i
, schedule_tasklet
= false) {
394 spin_lock(&cqdma
->pc
[i
]->lock
);
395 if (mtk_dma_read(cqdma
->pc
[i
],
396 MTK_CQDMA_INT_FLAG
) & MTK_CQDMA_INT_FLAG_BIT
) {
397 /* clear interrupt */
398 mtk_dma_clr(cqdma
->pc
[i
], MTK_CQDMA_INT_FLAG
,
399 MTK_CQDMA_INT_FLAG_BIT
);
401 schedule_tasklet
= true;
404 spin_unlock(&cqdma
->pc
[i
]->lock
);
406 if (schedule_tasklet
) {
407 /* disable interrupt */
408 disable_irq_nosync(cqdma
->pc
[i
]->irq
);
410 /* schedule the tasklet to handle the transactions */
411 tasklet_schedule(&cqdma
->pc
[i
]->tasklet
);
418 static struct virt_dma_desc
*mtk_cqdma_find_active_desc(struct dma_chan
*c
,
421 struct mtk_cqdma_vchan
*cvc
= to_cqdma_vchan(c
);
422 struct virt_dma_desc
*vd
;
425 spin_lock_irqsave(&cvc
->pc
->lock
, flags
);
426 list_for_each_entry(vd
, &cvc
->pc
->queue
, node
)
427 if (vd
->tx
.cookie
== cookie
) {
428 spin_unlock_irqrestore(&cvc
->pc
->lock
, flags
);
431 spin_unlock_irqrestore(&cvc
->pc
->lock
, flags
);
433 list_for_each_entry(vd
, &cvc
->vc
.desc_issued
, node
)
434 if (vd
->tx
.cookie
== cookie
)
440 static enum dma_status
mtk_cqdma_tx_status(struct dma_chan
*c
,
442 struct dma_tx_state
*txstate
)
444 struct mtk_cqdma_vchan
*cvc
= to_cqdma_vchan(c
);
445 struct mtk_cqdma_vdesc
*cvd
;
446 struct virt_dma_desc
*vd
;
451 ret
= dma_cookie_status(c
, cookie
, txstate
);
452 if (ret
== DMA_COMPLETE
|| !txstate
)
455 spin_lock_irqsave(&cvc
->vc
.lock
, flags
);
456 vd
= mtk_cqdma_find_active_desc(c
, cookie
);
457 spin_unlock_irqrestore(&cvc
->vc
.lock
, flags
);
460 cvd
= to_cqdma_vdesc(vd
);
461 bytes
= cvd
->residue
;
464 dma_set_residue(txstate
, bytes
);
469 static void mtk_cqdma_issue_pending(struct dma_chan
*c
)
471 struct mtk_cqdma_vchan
*cvc
= to_cqdma_vchan(c
);
472 unsigned long pc_flags
;
473 unsigned long vc_flags
;
475 /* acquire PC's lock before VS's lock for lock dependency in tasklet */
476 spin_lock_irqsave(&cvc
->pc
->lock
, pc_flags
);
477 spin_lock_irqsave(&cvc
->vc
.lock
, vc_flags
);
479 if (vchan_issue_pending(&cvc
->vc
))
480 mtk_cqdma_issue_vchan_pending(cvc
);
482 spin_unlock_irqrestore(&cvc
->vc
.lock
, vc_flags
);
483 spin_unlock_irqrestore(&cvc
->pc
->lock
, pc_flags
);
486 static struct dma_async_tx_descriptor
*
487 mtk_cqdma_prep_dma_memcpy(struct dma_chan
*c
, dma_addr_t dest
,
488 dma_addr_t src
, size_t len
, unsigned long flags
)
490 struct mtk_cqdma_vdesc
**cvd
;
491 struct dma_async_tx_descriptor
*tx
= NULL
, *prev_tx
= NULL
;
492 size_t i
, tlen
, nr_vd
;
495 * In the case that trsanction length is larger than the
496 * DMA engine supports, a single memcpy transaction needs
497 * to be separated into several DMA transactions.
498 * Each DMA transaction would be described by a CVD,
499 * and the first one is referred as the parent CVD,
500 * while the others are child CVDs.
501 * The parent CVD's tx descriptor is the only tx descriptor
502 * returned to the DMA user, and it should not be completed
503 * until all the child CVDs completed.
505 nr_vd
= DIV_ROUND_UP(len
, MTK_CQDMA_MAX_LEN
);
506 cvd
= kcalloc(nr_vd
, sizeof(*cvd
), GFP_NOWAIT
);
510 for (i
= 0; i
< nr_vd
; ++i
) {
511 cvd
[i
] = kzalloc(sizeof(*cvd
[i
]), GFP_NOWAIT
);
518 /* setup dma channel */
521 /* setup source, destination, and length */
522 tlen
= (len
> MTK_CQDMA_MAX_LEN
) ? MTK_CQDMA_MAX_LEN
: len
;
527 /* setup tx descriptor */
528 tx
= vchan_tx_prep(to_virt_chan(c
), &cvd
[i
]->vd
, flags
);
532 cvd
[0]->residue
= len
;
535 cvd
[i
]->residue
= tlen
;
538 cvd
[i
]->parent
= cvd
[0];
540 /* update the src, dest, len, prev_tx for the next CVD */
547 return &cvd
[0]->vd
.tx
;
550 static void mtk_cqdma_free_inactive_desc(struct dma_chan
*c
)
552 struct virt_dma_chan
*vc
= to_virt_chan(c
);
557 * set desc_allocated, desc_submitted,
558 * and desc_issued as the candicates to be freed
560 spin_lock_irqsave(&vc
->lock
, flags
);
561 list_splice_tail_init(&vc
->desc_allocated
, &head
);
562 list_splice_tail_init(&vc
->desc_submitted
, &head
);
563 list_splice_tail_init(&vc
->desc_issued
, &head
);
564 spin_unlock_irqrestore(&vc
->lock
, flags
);
566 /* free descriptor lists */
567 vchan_dma_desc_free_list(vc
, &head
);
570 static void mtk_cqdma_free_active_desc(struct dma_chan
*c
)
572 struct mtk_cqdma_vchan
*cvc
= to_cqdma_vchan(c
);
573 bool sync_needed
= false;
574 unsigned long pc_flags
;
575 unsigned long vc_flags
;
577 /* acquire PC's lock first due to lock dependency in dma ISR */
578 spin_lock_irqsave(&cvc
->pc
->lock
, pc_flags
);
579 spin_lock_irqsave(&cvc
->vc
.lock
, vc_flags
);
581 /* synchronization is required if this VC is active */
582 if (mtk_cqdma_is_vchan_active(cvc
)) {
583 cvc
->issue_synchronize
= true;
587 spin_unlock_irqrestore(&cvc
->vc
.lock
, vc_flags
);
588 spin_unlock_irqrestore(&cvc
->pc
->lock
, pc_flags
);
590 /* waiting for the completion of this VC */
592 wait_for_completion(&cvc
->issue_completion
);
594 /* free all descriptors in list desc_completed */
595 vchan_synchronize(&cvc
->vc
);
597 WARN_ONCE(!list_empty(&cvc
->vc
.desc_completed
),
598 "Desc pending still in list desc_completed\n");
601 static int mtk_cqdma_terminate_all(struct dma_chan
*c
)
603 /* free descriptors not processed yet by hardware */
604 mtk_cqdma_free_inactive_desc(c
);
606 /* free descriptors being processed by hardware */
607 mtk_cqdma_free_active_desc(c
);
612 static int mtk_cqdma_alloc_chan_resources(struct dma_chan
*c
)
614 struct mtk_cqdma_device
*cqdma
= to_cqdma_dev(c
);
615 struct mtk_cqdma_vchan
*vc
= to_cqdma_vchan(c
);
616 struct mtk_cqdma_pchan
*pc
= NULL
;
617 u32 i
, min_refcnt
= U32_MAX
, refcnt
;
620 /* allocate PC with the minimum refcount */
621 for (i
= 0; i
< cqdma
->dma_channels
; ++i
) {
622 refcnt
= refcount_read(&cqdma
->pc
[i
]->refcnt
);
623 if (refcnt
< min_refcnt
) {
632 spin_lock_irqsave(&pc
->lock
, flags
);
634 if (!refcount_read(&pc
->refcnt
)) {
635 /* allocate PC when the refcount is zero */
636 mtk_cqdma_hard_reset(pc
);
638 /* enable interrupt for this PC */
639 mtk_dma_set(pc
, MTK_CQDMA_INT_EN
, MTK_CQDMA_INT_EN_BIT
);
642 * refcount_inc would complain increment on 0; use-after-free.
643 * Thus, we need to explicitly set it as 1 initially.
645 refcount_set(&pc
->refcnt
, 1);
647 refcount_inc(&pc
->refcnt
);
650 spin_unlock_irqrestore(&pc
->lock
, flags
);
657 static void mtk_cqdma_free_chan_resources(struct dma_chan
*c
)
659 struct mtk_cqdma_vchan
*cvc
= to_cqdma_vchan(c
);
662 /* free all descriptors in all lists on the VC */
663 mtk_cqdma_terminate_all(c
);
665 spin_lock_irqsave(&cvc
->pc
->lock
, flags
);
667 /* PC is not freed until there is no VC mapped to it */
668 if (refcount_dec_and_test(&cvc
->pc
->refcnt
)) {
669 /* start the flush operation and stop the engine */
670 mtk_dma_set(cvc
->pc
, MTK_CQDMA_FLUSH
, MTK_CQDMA_FLUSH_BIT
);
672 /* wait for the completion of flush operation */
673 if (mtk_cqdma_poll_engine_done(cvc
->pc
, true) < 0)
674 dev_err(cqdma2dev(to_cqdma_dev(c
)), "cqdma flush timeout\n");
676 /* clear the flush bit and interrupt flag */
677 mtk_dma_clr(cvc
->pc
, MTK_CQDMA_FLUSH
, MTK_CQDMA_FLUSH_BIT
);
678 mtk_dma_clr(cvc
->pc
, MTK_CQDMA_INT_FLAG
,
679 MTK_CQDMA_INT_FLAG_BIT
);
681 /* disable interrupt for this PC */
682 mtk_dma_clr(cvc
->pc
, MTK_CQDMA_INT_EN
, MTK_CQDMA_INT_EN_BIT
);
685 spin_unlock_irqrestore(&cvc
->pc
->lock
, flags
);
688 static int mtk_cqdma_hw_init(struct mtk_cqdma_device
*cqdma
)
694 pm_runtime_enable(cqdma2dev(cqdma
));
695 pm_runtime_get_sync(cqdma2dev(cqdma
));
697 err
= clk_prepare_enable(cqdma
->clk
);
700 pm_runtime_put_sync(cqdma2dev(cqdma
));
701 pm_runtime_disable(cqdma2dev(cqdma
));
706 for (i
= 0; i
< cqdma
->dma_channels
; ++i
) {
707 spin_lock_irqsave(&cqdma
->pc
[i
]->lock
, flags
);
708 if (mtk_cqdma_hard_reset(cqdma
->pc
[i
]) < 0) {
709 dev_err(cqdma2dev(cqdma
), "cqdma hard reset timeout\n");
710 spin_unlock_irqrestore(&cqdma
->pc
[i
]->lock
, flags
);
712 clk_disable_unprepare(cqdma
->clk
);
713 pm_runtime_put_sync(cqdma2dev(cqdma
));
714 pm_runtime_disable(cqdma2dev(cqdma
));
717 spin_unlock_irqrestore(&cqdma
->pc
[i
]->lock
, flags
);
723 static void mtk_cqdma_hw_deinit(struct mtk_cqdma_device
*cqdma
)
729 for (i
= 0; i
< cqdma
->dma_channels
; ++i
) {
730 spin_lock_irqsave(&cqdma
->pc
[i
]->lock
, flags
);
731 if (mtk_cqdma_hard_reset(cqdma
->pc
[i
]) < 0)
732 dev_err(cqdma2dev(cqdma
), "cqdma hard reset timeout\n");
733 spin_unlock_irqrestore(&cqdma
->pc
[i
]->lock
, flags
);
736 clk_disable_unprepare(cqdma
->clk
);
738 pm_runtime_put_sync(cqdma2dev(cqdma
));
739 pm_runtime_disable(cqdma2dev(cqdma
));
742 static const struct of_device_id mtk_cqdma_match
[] = {
743 { .compatible
= "mediatek,mt6765-cqdma" },
746 MODULE_DEVICE_TABLE(of
, mtk_cqdma_match
);
748 static int mtk_cqdma_probe(struct platform_device
*pdev
)
750 struct mtk_cqdma_device
*cqdma
;
751 struct mtk_cqdma_vchan
*vc
;
752 struct dma_device
*dd
;
756 cqdma
= devm_kzalloc(&pdev
->dev
, sizeof(*cqdma
), GFP_KERNEL
);
762 cqdma
->clk
= devm_clk_get(&pdev
->dev
, "cqdma");
763 if (IS_ERR(cqdma
->clk
)) {
764 dev_err(&pdev
->dev
, "No clock for %s\n",
765 dev_name(&pdev
->dev
));
766 return PTR_ERR(cqdma
->clk
);
769 dma_cap_set(DMA_MEMCPY
, dd
->cap_mask
);
771 dd
->copy_align
= MTK_CQDMA_ALIGN_SIZE
;
772 dd
->device_alloc_chan_resources
= mtk_cqdma_alloc_chan_resources
;
773 dd
->device_free_chan_resources
= mtk_cqdma_free_chan_resources
;
774 dd
->device_tx_status
= mtk_cqdma_tx_status
;
775 dd
->device_issue_pending
= mtk_cqdma_issue_pending
;
776 dd
->device_prep_dma_memcpy
= mtk_cqdma_prep_dma_memcpy
;
777 dd
->device_terminate_all
= mtk_cqdma_terminate_all
;
778 dd
->src_addr_widths
= MTK_CQDMA_DMA_BUSWIDTHS
;
779 dd
->dst_addr_widths
= MTK_CQDMA_DMA_BUSWIDTHS
;
780 dd
->directions
= BIT(DMA_MEM_TO_MEM
);
781 dd
->residue_granularity
= DMA_RESIDUE_GRANULARITY_SEGMENT
;
782 dd
->dev
= &pdev
->dev
;
783 INIT_LIST_HEAD(&dd
->channels
);
785 if (pdev
->dev
.of_node
&& of_property_read_u32(pdev
->dev
.of_node
,
787 &cqdma
->dma_requests
)) {
789 "Using %u as missing dma-requests property\n",
790 MTK_CQDMA_NR_VCHANS
);
792 cqdma
->dma_requests
= MTK_CQDMA_NR_VCHANS
;
795 if (pdev
->dev
.of_node
&& of_property_read_u32(pdev
->dev
.of_node
,
797 &cqdma
->dma_channels
)) {
799 "Using %u as missing dma-channels property\n",
800 MTK_CQDMA_NR_PCHANS
);
802 cqdma
->dma_channels
= MTK_CQDMA_NR_PCHANS
;
805 cqdma
->pc
= devm_kcalloc(&pdev
->dev
, cqdma
->dma_channels
,
806 sizeof(*cqdma
->pc
), GFP_KERNEL
);
810 /* initialization for PCs */
811 for (i
= 0; i
< cqdma
->dma_channels
; ++i
) {
812 cqdma
->pc
[i
] = devm_kcalloc(&pdev
->dev
, 1,
813 sizeof(**cqdma
->pc
), GFP_KERNEL
);
817 INIT_LIST_HEAD(&cqdma
->pc
[i
]->queue
);
818 spin_lock_init(&cqdma
->pc
[i
]->lock
);
819 refcount_set(&cqdma
->pc
[i
]->refcnt
, 0);
820 cqdma
->pc
[i
]->base
= devm_platform_ioremap_resource(pdev
, i
);
821 if (IS_ERR(cqdma
->pc
[i
]->base
))
822 return PTR_ERR(cqdma
->pc
[i
]->base
);
824 /* allocate IRQ resource */
825 err
= platform_get_irq(pdev
, i
);
828 cqdma
->pc
[i
]->irq
= err
;
830 err
= devm_request_irq(&pdev
->dev
, cqdma
->pc
[i
]->irq
,
831 mtk_cqdma_irq
, 0, dev_name(&pdev
->dev
),
835 "request_irq failed with err %d\n", err
);
840 /* allocate resource for VCs */
841 cqdma
->vc
= devm_kcalloc(&pdev
->dev
, cqdma
->dma_requests
,
842 sizeof(*cqdma
->vc
), GFP_KERNEL
);
846 for (i
= 0; i
< cqdma
->dma_requests
; i
++) {
848 vc
->vc
.desc_free
= mtk_cqdma_vdesc_free
;
849 vchan_init(&vc
->vc
, dd
);
850 init_completion(&vc
->issue_completion
);
853 err
= dma_async_device_register(dd
);
857 err
= of_dma_controller_register(pdev
->dev
.of_node
,
858 of_dma_xlate_by_chan_id
, cqdma
);
861 "MediaTek CQDMA OF registration failed %d\n", err
);
865 err
= mtk_cqdma_hw_init(cqdma
);
868 "MediaTek CQDMA HW initialization failed %d\n", err
);
872 platform_set_drvdata(pdev
, cqdma
);
874 /* initialize tasklet for each PC */
875 for (i
= 0; i
< cqdma
->dma_channels
; ++i
)
876 tasklet_setup(&cqdma
->pc
[i
]->tasklet
, mtk_cqdma_tasklet_cb
);
878 dev_info(&pdev
->dev
, "MediaTek CQDMA driver registered\n");
883 dma_async_device_unregister(dd
);
888 static void mtk_cqdma_remove(struct platform_device
*pdev
)
890 struct mtk_cqdma_device
*cqdma
= platform_get_drvdata(pdev
);
891 struct mtk_cqdma_vchan
*vc
;
896 for (i
= 0; i
< cqdma
->dma_requests
; i
++) {
899 list_del(&vc
->vc
.chan
.device_node
);
900 tasklet_kill(&vc
->vc
.task
);
903 /* disable interrupt */
904 for (i
= 0; i
< cqdma
->dma_channels
; i
++) {
905 spin_lock_irqsave(&cqdma
->pc
[i
]->lock
, flags
);
906 mtk_dma_clr(cqdma
->pc
[i
], MTK_CQDMA_INT_EN
,
907 MTK_CQDMA_INT_EN_BIT
);
908 spin_unlock_irqrestore(&cqdma
->pc
[i
]->lock
, flags
);
910 /* Waits for any pending IRQ handlers to complete */
911 synchronize_irq(cqdma
->pc
[i
]->irq
);
913 tasklet_kill(&cqdma
->pc
[i
]->tasklet
);
916 /* disable hardware */
917 mtk_cqdma_hw_deinit(cqdma
);
919 dma_async_device_unregister(&cqdma
->ddev
);
920 of_dma_controller_free(pdev
->dev
.of_node
);
923 static struct platform_driver mtk_cqdma_driver
= {
924 .probe
= mtk_cqdma_probe
,
925 .remove
= mtk_cqdma_remove
,
927 .name
= KBUILD_MODNAME
,
928 .of_match_table
= mtk_cqdma_match
,
931 module_platform_driver(mtk_cqdma_driver
);
933 MODULE_DESCRIPTION("MediaTek CQDMA Controller Driver");
934 MODULE_AUTHOR("Shun-Chih Yu <shun-chih.yu@mediatek.com>");
935 MODULE_LICENSE("GPL v2");