1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2018-2019 MediaTek Inc.
5 * Driver for MediaTek Command-Queue DMA Controller
7 * Author: Shun-Chih Yu <shun-chih.yu@mediatek.com>
11 #include <linux/bitops.h>
12 #include <linux/clk.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/iopoll.h>
17 #include <linux/interrupt.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
21 #include <linux/of_device.h>
22 #include <linux/of_dma.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/refcount.h>
26 #include <linux/slab.h>
28 #include "../virt-dma.h"
30 #define MTK_CQDMA_USEC_POLL 10
31 #define MTK_CQDMA_TIMEOUT_POLL 1000
32 #define MTK_CQDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
33 #define MTK_CQDMA_ALIGN_SIZE 1
35 /* The default number of virtual channel */
36 #define MTK_CQDMA_NR_VCHANS 32
38 /* The default number of physical channel */
39 #define MTK_CQDMA_NR_PCHANS 3
41 /* Registers for underlying dma manipulation */
42 #define MTK_CQDMA_INT_FLAG 0x0
43 #define MTK_CQDMA_INT_EN 0x4
44 #define MTK_CQDMA_EN 0x8
45 #define MTK_CQDMA_RESET 0xc
46 #define MTK_CQDMA_FLUSH 0x14
47 #define MTK_CQDMA_SRC 0x1c
48 #define MTK_CQDMA_DST 0x20
49 #define MTK_CQDMA_LEN1 0x24
50 #define MTK_CQDMA_LEN2 0x28
51 #define MTK_CQDMA_SRC2 0x60
52 #define MTK_CQDMA_DST2 0x64
54 /* Registers setting */
55 #define MTK_CQDMA_EN_BIT BIT(0)
56 #define MTK_CQDMA_INT_FLAG_BIT BIT(0)
57 #define MTK_CQDMA_INT_EN_BIT BIT(0)
58 #define MTK_CQDMA_FLUSH_BIT BIT(0)
60 #define MTK_CQDMA_WARM_RST_BIT BIT(0)
61 #define MTK_CQDMA_HARD_RST_BIT BIT(1)
63 #define MTK_CQDMA_MAX_LEN GENMASK(27, 0)
64 #define MTK_CQDMA_ADDR_LIMIT GENMASK(31, 0)
65 #define MTK_CQDMA_ADDR2_SHFIT (32)
68 * struct mtk_cqdma_vdesc - The struct holding info describing virtual
70 * @vd: An instance for struct virt_dma_desc
71 * @len: The total data size device wants to move
72 * @residue: The remaining data size device will move
73 * @dest: The destination address device wants to move to
74 * @src: The source address device wants to move from
75 * @ch: The pointer to the corresponding dma channel
76 * @node: The lise_head struct to build link-list for VDs
77 * @parent: The pointer to the parent CVD
79 struct mtk_cqdma_vdesc
{
80 struct virt_dma_desc vd
;
87 struct list_head node
;
88 struct mtk_cqdma_vdesc
*parent
;
92 * struct mtk_cqdma_pchan - The struct holding info describing physical
94 * @queue: Queue for the PDs issued to this PC
95 * @base: The mapped register I/O base of this PC
96 * @irq: The IRQ that this PC are using
97 * @refcnt: Track how many VCs are using this PC
98 * @tasklet: Tasklet for this PC
99 * @lock: Lock protect agaisting multiple VCs access PC
101 struct mtk_cqdma_pchan
{
102 struct list_head queue
;
108 struct tasklet_struct tasklet
;
110 /* lock to protect PC */
115 * struct mtk_cqdma_vchan - The struct holding info describing virtual
117 * @vc: An instance for struct virt_dma_chan
118 * @pc: The pointer to the underlying PC
119 * @issue_completion: The wait for all issued descriptors completited
120 * @issue_synchronize: Bool indicating channel synchronization starts
122 struct mtk_cqdma_vchan
{
123 struct virt_dma_chan vc
;
124 struct mtk_cqdma_pchan
*pc
;
125 struct completion issue_completion
;
126 bool issue_synchronize
;
130 * struct mtk_cqdma_device - The struct holding info describing CQDMA
132 * @ddev: An instance for struct dma_device
133 * @clk: The clock that device internal is using
134 * @dma_requests: The number of VCs the device supports to
135 * @dma_channels: The number of PCs the device supports to
136 * @vc: The pointer to all available VCs
137 * @pc: The pointer to all the underlying PCs
139 struct mtk_cqdma_device
{
140 struct dma_device ddev
;
145 struct mtk_cqdma_vchan
*vc
;
146 struct mtk_cqdma_pchan
**pc
;
149 static struct mtk_cqdma_device
*to_cqdma_dev(struct dma_chan
*chan
)
151 return container_of(chan
->device
, struct mtk_cqdma_device
, ddev
);
154 static struct mtk_cqdma_vchan
*to_cqdma_vchan(struct dma_chan
*chan
)
156 return container_of(chan
, struct mtk_cqdma_vchan
, vc
.chan
);
159 static struct mtk_cqdma_vdesc
*to_cqdma_vdesc(struct virt_dma_desc
*vd
)
161 return container_of(vd
, struct mtk_cqdma_vdesc
, vd
);
164 static struct device
*cqdma2dev(struct mtk_cqdma_device
*cqdma
)
166 return cqdma
->ddev
.dev
;
169 static u32
mtk_dma_read(struct mtk_cqdma_pchan
*pc
, u32 reg
)
171 return readl(pc
->base
+ reg
);
174 static void mtk_dma_write(struct mtk_cqdma_pchan
*pc
, u32 reg
, u32 val
)
176 writel_relaxed(val
, pc
->base
+ reg
);
179 static void mtk_dma_rmw(struct mtk_cqdma_pchan
*pc
, u32 reg
,
184 val
= mtk_dma_read(pc
, reg
);
187 mtk_dma_write(pc
, reg
, val
);
190 static void mtk_dma_set(struct mtk_cqdma_pchan
*pc
, u32 reg
, u32 val
)
192 mtk_dma_rmw(pc
, reg
, 0, val
);
195 static void mtk_dma_clr(struct mtk_cqdma_pchan
*pc
, u32 reg
, u32 val
)
197 mtk_dma_rmw(pc
, reg
, val
, 0);
200 static void mtk_cqdma_vdesc_free(struct virt_dma_desc
*vd
)
202 kfree(to_cqdma_vdesc(vd
));
205 static int mtk_cqdma_poll_engine_done(struct mtk_cqdma_pchan
*pc
, bool atomic
)
210 return readl_poll_timeout(pc
->base
+ MTK_CQDMA_EN
,
212 !(status
& MTK_CQDMA_EN_BIT
),
214 MTK_CQDMA_TIMEOUT_POLL
);
216 return readl_poll_timeout_atomic(pc
->base
+ MTK_CQDMA_EN
,
218 !(status
& MTK_CQDMA_EN_BIT
),
220 MTK_CQDMA_TIMEOUT_POLL
);
223 static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan
*pc
)
225 mtk_dma_set(pc
, MTK_CQDMA_RESET
, MTK_CQDMA_HARD_RST_BIT
);
226 mtk_dma_clr(pc
, MTK_CQDMA_RESET
, MTK_CQDMA_HARD_RST_BIT
);
228 return mtk_cqdma_poll_engine_done(pc
, true);
231 static void mtk_cqdma_start(struct mtk_cqdma_pchan
*pc
,
232 struct mtk_cqdma_vdesc
*cvd
)
234 /* wait for the previous transaction done */
235 if (mtk_cqdma_poll_engine_done(pc
, true) < 0)
236 dev_err(cqdma2dev(to_cqdma_dev(cvd
->ch
)), "cqdma wait transaction timeout\n");
238 /* warm reset the dma engine for the new transaction */
239 mtk_dma_set(pc
, MTK_CQDMA_RESET
, MTK_CQDMA_WARM_RST_BIT
);
240 if (mtk_cqdma_poll_engine_done(pc
, true) < 0)
241 dev_err(cqdma2dev(to_cqdma_dev(cvd
->ch
)), "cqdma warm reset timeout\n");
243 /* setup the source */
244 mtk_dma_set(pc
, MTK_CQDMA_SRC
, cvd
->src
& MTK_CQDMA_ADDR_LIMIT
);
245 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
246 mtk_dma_set(pc
, MTK_CQDMA_SRC2
, cvd
->src
>> MTK_CQDMA_ADDR2_SHFIT
);
248 mtk_dma_set(pc
, MTK_CQDMA_SRC2
, 0);
251 /* setup the destination */
252 mtk_dma_set(pc
, MTK_CQDMA_DST
, cvd
->dest
& MTK_CQDMA_ADDR_LIMIT
);
253 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
254 mtk_dma_set(pc
, MTK_CQDMA_DST2
, cvd
->dest
>> MTK_CQDMA_ADDR2_SHFIT
);
256 mtk_dma_set(pc
, MTK_CQDMA_DST2
, 0);
259 /* setup the length */
260 mtk_dma_set(pc
, MTK_CQDMA_LEN1
, cvd
->len
);
262 /* start dma engine */
263 mtk_dma_set(pc
, MTK_CQDMA_EN
, MTK_CQDMA_EN_BIT
);
266 static void mtk_cqdma_issue_vchan_pending(struct mtk_cqdma_vchan
*cvc
)
268 struct virt_dma_desc
*vd
, *vd2
;
269 struct mtk_cqdma_pchan
*pc
= cvc
->pc
;
270 struct mtk_cqdma_vdesc
*cvd
;
271 bool trigger_engine
= false;
273 lockdep_assert_held(&cvc
->vc
.lock
);
274 lockdep_assert_held(&pc
->lock
);
276 list_for_each_entry_safe(vd
, vd2
, &cvc
->vc
.desc_issued
, node
) {
277 /* need to trigger dma engine if PC's queue is empty */
278 if (list_empty(&pc
->queue
))
279 trigger_engine
= true;
281 cvd
= to_cqdma_vdesc(vd
);
283 /* add VD into PC's queue */
284 list_add_tail(&cvd
->node
, &pc
->queue
);
286 /* start the dma engine */
288 mtk_cqdma_start(pc
, cvd
);
290 /* remove VD from list desc_issued */
296 * return true if this VC is active,
297 * meaning that there are VDs under processing by the PC
299 static bool mtk_cqdma_is_vchan_active(struct mtk_cqdma_vchan
*cvc
)
301 struct mtk_cqdma_vdesc
*cvd
;
303 list_for_each_entry(cvd
, &cvc
->pc
->queue
, node
)
304 if (cvc
== to_cqdma_vchan(cvd
->ch
))
311 * return the pointer of the CVD that is just consumed by the PC
313 static struct mtk_cqdma_vdesc
314 *mtk_cqdma_consume_work_queue(struct mtk_cqdma_pchan
*pc
)
316 struct mtk_cqdma_vchan
*cvc
;
317 struct mtk_cqdma_vdesc
*cvd
, *ret
= NULL
;
319 /* consume a CVD from PC's queue */
320 cvd
= list_first_entry_or_null(&pc
->queue
,
321 struct mtk_cqdma_vdesc
, node
);
322 if (unlikely(!cvd
|| !cvd
->parent
))
325 cvc
= to_cqdma_vchan(cvd
->ch
);
328 /* update residue of the parent CVD */
329 cvd
->parent
->residue
-= cvd
->len
;
331 /* delete CVD from PC's queue */
332 list_del(&cvd
->node
);
334 spin_lock(&cvc
->vc
.lock
);
336 /* check whether all the child CVDs completed */
337 if (!cvd
->parent
->residue
) {
338 /* add the parent VD into list desc_completed */
339 vchan_cookie_complete(&cvd
->parent
->vd
);
341 /* setup completion if this VC is under synchronization */
342 if (cvc
->issue_synchronize
&& !mtk_cqdma_is_vchan_active(cvc
)) {
343 complete(&cvc
->issue_completion
);
344 cvc
->issue_synchronize
= false;
348 spin_unlock(&cvc
->vc
.lock
);
350 /* start transaction for next CVD in the queue */
351 cvd
= list_first_entry_or_null(&pc
->queue
,
352 struct mtk_cqdma_vdesc
, node
);
354 mtk_cqdma_start(pc
, cvd
);
359 static void mtk_cqdma_tasklet_cb(struct tasklet_struct
*t
)
361 struct mtk_cqdma_pchan
*pc
= from_tasklet(pc
, t
, tasklet
);
362 struct mtk_cqdma_vdesc
*cvd
= NULL
;
365 spin_lock_irqsave(&pc
->lock
, flags
);
366 /* consume the queue */
367 cvd
= mtk_cqdma_consume_work_queue(pc
);
368 spin_unlock_irqrestore(&pc
->lock
, flags
);
370 /* submit the next CVD */
372 dma_run_dependencies(&cvd
->vd
.tx
);
375 * free child CVD after completion.
376 * the parent CVD would be freeed with desc_free by user.
378 if (cvd
->parent
!= cvd
)
382 /* re-enable interrupt before leaving tasklet */
386 static irqreturn_t
mtk_cqdma_irq(int irq
, void *devid
)
388 struct mtk_cqdma_device
*cqdma
= devid
;
389 irqreturn_t ret
= IRQ_NONE
;
390 bool schedule_tasklet
= false;
393 /* clear interrupt flags for each PC */
394 for (i
= 0; i
< cqdma
->dma_channels
; ++i
, schedule_tasklet
= false) {
395 spin_lock(&cqdma
->pc
[i
]->lock
);
396 if (mtk_dma_read(cqdma
->pc
[i
],
397 MTK_CQDMA_INT_FLAG
) & MTK_CQDMA_INT_FLAG_BIT
) {
398 /* clear interrupt */
399 mtk_dma_clr(cqdma
->pc
[i
], MTK_CQDMA_INT_FLAG
,
400 MTK_CQDMA_INT_FLAG_BIT
);
402 schedule_tasklet
= true;
405 spin_unlock(&cqdma
->pc
[i
]->lock
);
407 if (schedule_tasklet
) {
408 /* disable interrupt */
409 disable_irq_nosync(cqdma
->pc
[i
]->irq
);
411 /* schedule the tasklet to handle the transactions */
412 tasklet_schedule(&cqdma
->pc
[i
]->tasklet
);
419 static struct virt_dma_desc
*mtk_cqdma_find_active_desc(struct dma_chan
*c
,
422 struct mtk_cqdma_vchan
*cvc
= to_cqdma_vchan(c
);
423 struct virt_dma_desc
*vd
;
426 spin_lock_irqsave(&cvc
->pc
->lock
, flags
);
427 list_for_each_entry(vd
, &cvc
->pc
->queue
, node
)
428 if (vd
->tx
.cookie
== cookie
) {
429 spin_unlock_irqrestore(&cvc
->pc
->lock
, flags
);
432 spin_unlock_irqrestore(&cvc
->pc
->lock
, flags
);
434 list_for_each_entry(vd
, &cvc
->vc
.desc_issued
, node
)
435 if (vd
->tx
.cookie
== cookie
)
441 static enum dma_status
mtk_cqdma_tx_status(struct dma_chan
*c
,
443 struct dma_tx_state
*txstate
)
445 struct mtk_cqdma_vchan
*cvc
= to_cqdma_vchan(c
);
446 struct mtk_cqdma_vdesc
*cvd
;
447 struct virt_dma_desc
*vd
;
452 ret
= dma_cookie_status(c
, cookie
, txstate
);
453 if (ret
== DMA_COMPLETE
|| !txstate
)
456 spin_lock_irqsave(&cvc
->vc
.lock
, flags
);
457 vd
= mtk_cqdma_find_active_desc(c
, cookie
);
458 spin_unlock_irqrestore(&cvc
->vc
.lock
, flags
);
461 cvd
= to_cqdma_vdesc(vd
);
462 bytes
= cvd
->residue
;
465 dma_set_residue(txstate
, bytes
);
470 static void mtk_cqdma_issue_pending(struct dma_chan
*c
)
472 struct mtk_cqdma_vchan
*cvc
= to_cqdma_vchan(c
);
473 unsigned long pc_flags
;
474 unsigned long vc_flags
;
476 /* acquire PC's lock before VS's lock for lock dependency in tasklet */
477 spin_lock_irqsave(&cvc
->pc
->lock
, pc_flags
);
478 spin_lock_irqsave(&cvc
->vc
.lock
, vc_flags
);
480 if (vchan_issue_pending(&cvc
->vc
))
481 mtk_cqdma_issue_vchan_pending(cvc
);
483 spin_unlock_irqrestore(&cvc
->vc
.lock
, vc_flags
);
484 spin_unlock_irqrestore(&cvc
->pc
->lock
, pc_flags
);
487 static struct dma_async_tx_descriptor
*
488 mtk_cqdma_prep_dma_memcpy(struct dma_chan
*c
, dma_addr_t dest
,
489 dma_addr_t src
, size_t len
, unsigned long flags
)
491 struct mtk_cqdma_vdesc
**cvd
;
492 struct dma_async_tx_descriptor
*tx
= NULL
, *prev_tx
= NULL
;
493 size_t i
, tlen
, nr_vd
;
496 * In the case that trsanction length is larger than the
497 * DMA engine supports, a single memcpy transaction needs
498 * to be separated into several DMA transactions.
499 * Each DMA transaction would be described by a CVD,
500 * and the first one is referred as the parent CVD,
501 * while the others are child CVDs.
502 * The parent CVD's tx descriptor is the only tx descriptor
503 * returned to the DMA user, and it should not be completed
504 * until all the child CVDs completed.
506 nr_vd
= DIV_ROUND_UP(len
, MTK_CQDMA_MAX_LEN
);
507 cvd
= kcalloc(nr_vd
, sizeof(*cvd
), GFP_NOWAIT
);
511 for (i
= 0; i
< nr_vd
; ++i
) {
512 cvd
[i
] = kzalloc(sizeof(*cvd
[i
]), GFP_NOWAIT
);
519 /* setup dma channel */
522 /* setup sourece, destination, and length */
523 tlen
= (len
> MTK_CQDMA_MAX_LEN
) ? MTK_CQDMA_MAX_LEN
: len
;
528 /* setup tx descriptor */
529 tx
= vchan_tx_prep(to_virt_chan(c
), &cvd
[i
]->vd
, flags
);
533 cvd
[0]->residue
= len
;
536 cvd
[i
]->residue
= tlen
;
539 cvd
[i
]->parent
= cvd
[0];
541 /* update the src, dest, len, prev_tx for the next CVD */
548 return &cvd
[0]->vd
.tx
;
551 static void mtk_cqdma_free_inactive_desc(struct dma_chan
*c
)
553 struct virt_dma_chan
*vc
= to_virt_chan(c
);
558 * set desc_allocated, desc_submitted,
559 * and desc_issued as the candicates to be freed
561 spin_lock_irqsave(&vc
->lock
, flags
);
562 list_splice_tail_init(&vc
->desc_allocated
, &head
);
563 list_splice_tail_init(&vc
->desc_submitted
, &head
);
564 list_splice_tail_init(&vc
->desc_issued
, &head
);
565 spin_unlock_irqrestore(&vc
->lock
, flags
);
567 /* free descriptor lists */
568 vchan_dma_desc_free_list(vc
, &head
);
571 static void mtk_cqdma_free_active_desc(struct dma_chan
*c
)
573 struct mtk_cqdma_vchan
*cvc
= to_cqdma_vchan(c
);
574 bool sync_needed
= false;
575 unsigned long pc_flags
;
576 unsigned long vc_flags
;
578 /* acquire PC's lock first due to lock dependency in dma ISR */
579 spin_lock_irqsave(&cvc
->pc
->lock
, pc_flags
);
580 spin_lock_irqsave(&cvc
->vc
.lock
, vc_flags
);
582 /* synchronization is required if this VC is active */
583 if (mtk_cqdma_is_vchan_active(cvc
)) {
584 cvc
->issue_synchronize
= true;
588 spin_unlock_irqrestore(&cvc
->vc
.lock
, vc_flags
);
589 spin_unlock_irqrestore(&cvc
->pc
->lock
, pc_flags
);
591 /* waiting for the completion of this VC */
593 wait_for_completion(&cvc
->issue_completion
);
595 /* free all descriptors in list desc_completed */
596 vchan_synchronize(&cvc
->vc
);
598 WARN_ONCE(!list_empty(&cvc
->vc
.desc_completed
),
599 "Desc pending still in list desc_completed\n");
602 static int mtk_cqdma_terminate_all(struct dma_chan
*c
)
604 /* free descriptors not processed yet by hardware */
605 mtk_cqdma_free_inactive_desc(c
);
607 /* free descriptors being processed by hardware */
608 mtk_cqdma_free_active_desc(c
);
613 static int mtk_cqdma_alloc_chan_resources(struct dma_chan
*c
)
615 struct mtk_cqdma_device
*cqdma
= to_cqdma_dev(c
);
616 struct mtk_cqdma_vchan
*vc
= to_cqdma_vchan(c
);
617 struct mtk_cqdma_pchan
*pc
= NULL
;
618 u32 i
, min_refcnt
= U32_MAX
, refcnt
;
621 /* allocate PC with the minimun refcount */
622 for (i
= 0; i
< cqdma
->dma_channels
; ++i
) {
623 refcnt
= refcount_read(&cqdma
->pc
[i
]->refcnt
);
624 if (refcnt
< min_refcnt
) {
633 spin_lock_irqsave(&pc
->lock
, flags
);
635 if (!refcount_read(&pc
->refcnt
)) {
636 /* allocate PC when the refcount is zero */
637 mtk_cqdma_hard_reset(pc
);
639 /* enable interrupt for this PC */
640 mtk_dma_set(pc
, MTK_CQDMA_INT_EN
, MTK_CQDMA_INT_EN_BIT
);
643 * refcount_inc would complain increment on 0; use-after-free.
644 * Thus, we need to explicitly set it as 1 initially.
646 refcount_set(&pc
->refcnt
, 1);
648 refcount_inc(&pc
->refcnt
);
651 spin_unlock_irqrestore(&pc
->lock
, flags
);
658 static void mtk_cqdma_free_chan_resources(struct dma_chan
*c
)
660 struct mtk_cqdma_vchan
*cvc
= to_cqdma_vchan(c
);
663 /* free all descriptors in all lists on the VC */
664 mtk_cqdma_terminate_all(c
);
666 spin_lock_irqsave(&cvc
->pc
->lock
, flags
);
668 /* PC is not freed until there is no VC mapped to it */
669 if (refcount_dec_and_test(&cvc
->pc
->refcnt
)) {
670 /* start the flush operation and stop the engine */
671 mtk_dma_set(cvc
->pc
, MTK_CQDMA_FLUSH
, MTK_CQDMA_FLUSH_BIT
);
673 /* wait for the completion of flush operation */
674 if (mtk_cqdma_poll_engine_done(cvc
->pc
, true) < 0)
675 dev_err(cqdma2dev(to_cqdma_dev(c
)), "cqdma flush timeout\n");
677 /* clear the flush bit and interrupt flag */
678 mtk_dma_clr(cvc
->pc
, MTK_CQDMA_FLUSH
, MTK_CQDMA_FLUSH_BIT
);
679 mtk_dma_clr(cvc
->pc
, MTK_CQDMA_INT_FLAG
,
680 MTK_CQDMA_INT_FLAG_BIT
);
682 /* disable interrupt for this PC */
683 mtk_dma_clr(cvc
->pc
, MTK_CQDMA_INT_EN
, MTK_CQDMA_INT_EN_BIT
);
686 spin_unlock_irqrestore(&cvc
->pc
->lock
, flags
);
689 static int mtk_cqdma_hw_init(struct mtk_cqdma_device
*cqdma
)
695 pm_runtime_enable(cqdma2dev(cqdma
));
696 pm_runtime_get_sync(cqdma2dev(cqdma
));
698 err
= clk_prepare_enable(cqdma
->clk
);
701 pm_runtime_put_sync(cqdma2dev(cqdma
));
702 pm_runtime_disable(cqdma2dev(cqdma
));
707 for (i
= 0; i
< cqdma
->dma_channels
; ++i
) {
708 spin_lock_irqsave(&cqdma
->pc
[i
]->lock
, flags
);
709 if (mtk_cqdma_hard_reset(cqdma
->pc
[i
]) < 0) {
710 dev_err(cqdma2dev(cqdma
), "cqdma hard reset timeout\n");
711 spin_unlock_irqrestore(&cqdma
->pc
[i
]->lock
, flags
);
713 clk_disable_unprepare(cqdma
->clk
);
714 pm_runtime_put_sync(cqdma2dev(cqdma
));
715 pm_runtime_disable(cqdma2dev(cqdma
));
718 spin_unlock_irqrestore(&cqdma
->pc
[i
]->lock
, flags
);
724 static void mtk_cqdma_hw_deinit(struct mtk_cqdma_device
*cqdma
)
730 for (i
= 0; i
< cqdma
->dma_channels
; ++i
) {
731 spin_lock_irqsave(&cqdma
->pc
[i
]->lock
, flags
);
732 if (mtk_cqdma_hard_reset(cqdma
->pc
[i
]) < 0)
733 dev_err(cqdma2dev(cqdma
), "cqdma hard reset timeout\n");
734 spin_unlock_irqrestore(&cqdma
->pc
[i
]->lock
, flags
);
737 clk_disable_unprepare(cqdma
->clk
);
739 pm_runtime_put_sync(cqdma2dev(cqdma
));
740 pm_runtime_disable(cqdma2dev(cqdma
));
743 static const struct of_device_id mtk_cqdma_match
[] = {
744 { .compatible
= "mediatek,mt6765-cqdma" },
747 MODULE_DEVICE_TABLE(of
, mtk_cqdma_match
);
749 static int mtk_cqdma_probe(struct platform_device
*pdev
)
751 struct mtk_cqdma_device
*cqdma
;
752 struct mtk_cqdma_vchan
*vc
;
753 struct dma_device
*dd
;
754 struct resource
*res
;
758 cqdma
= devm_kzalloc(&pdev
->dev
, sizeof(*cqdma
), GFP_KERNEL
);
764 cqdma
->clk
= devm_clk_get(&pdev
->dev
, "cqdma");
765 if (IS_ERR(cqdma
->clk
)) {
766 dev_err(&pdev
->dev
, "No clock for %s\n",
767 dev_name(&pdev
->dev
));
768 return PTR_ERR(cqdma
->clk
);
771 dma_cap_set(DMA_MEMCPY
, dd
->cap_mask
);
773 dd
->copy_align
= MTK_CQDMA_ALIGN_SIZE
;
774 dd
->device_alloc_chan_resources
= mtk_cqdma_alloc_chan_resources
;
775 dd
->device_free_chan_resources
= mtk_cqdma_free_chan_resources
;
776 dd
->device_tx_status
= mtk_cqdma_tx_status
;
777 dd
->device_issue_pending
= mtk_cqdma_issue_pending
;
778 dd
->device_prep_dma_memcpy
= mtk_cqdma_prep_dma_memcpy
;
779 dd
->device_terminate_all
= mtk_cqdma_terminate_all
;
780 dd
->src_addr_widths
= MTK_CQDMA_DMA_BUSWIDTHS
;
781 dd
->dst_addr_widths
= MTK_CQDMA_DMA_BUSWIDTHS
;
782 dd
->directions
= BIT(DMA_MEM_TO_MEM
);
783 dd
->residue_granularity
= DMA_RESIDUE_GRANULARITY_SEGMENT
;
784 dd
->dev
= &pdev
->dev
;
785 INIT_LIST_HEAD(&dd
->channels
);
787 if (pdev
->dev
.of_node
&& of_property_read_u32(pdev
->dev
.of_node
,
789 &cqdma
->dma_requests
)) {
791 "Using %u as missing dma-requests property\n",
792 MTK_CQDMA_NR_VCHANS
);
794 cqdma
->dma_requests
= MTK_CQDMA_NR_VCHANS
;
797 if (pdev
->dev
.of_node
&& of_property_read_u32(pdev
->dev
.of_node
,
799 &cqdma
->dma_channels
)) {
801 "Using %u as missing dma-channels property\n",
802 MTK_CQDMA_NR_PCHANS
);
804 cqdma
->dma_channels
= MTK_CQDMA_NR_PCHANS
;
807 cqdma
->pc
= devm_kcalloc(&pdev
->dev
, cqdma
->dma_channels
,
808 sizeof(*cqdma
->pc
), GFP_KERNEL
);
812 /* initialization for PCs */
813 for (i
= 0; i
< cqdma
->dma_channels
; ++i
) {
814 cqdma
->pc
[i
] = devm_kcalloc(&pdev
->dev
, 1,
815 sizeof(**cqdma
->pc
), GFP_KERNEL
);
819 INIT_LIST_HEAD(&cqdma
->pc
[i
]->queue
);
820 spin_lock_init(&cqdma
->pc
[i
]->lock
);
821 refcount_set(&cqdma
->pc
[i
]->refcnt
, 0);
822 cqdma
->pc
[i
]->base
= devm_platform_ioremap_resource(pdev
, i
);
823 if (IS_ERR(cqdma
->pc
[i
]->base
))
824 return PTR_ERR(cqdma
->pc
[i
]->base
);
826 /* allocate IRQ resource */
827 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, i
);
829 dev_err(&pdev
->dev
, "No irq resource for %s\n",
830 dev_name(&pdev
->dev
));
833 cqdma
->pc
[i
]->irq
= res
->start
;
835 err
= devm_request_irq(&pdev
->dev
, cqdma
->pc
[i
]->irq
,
836 mtk_cqdma_irq
, 0, dev_name(&pdev
->dev
),
840 "request_irq failed with err %d\n", err
);
845 /* allocate resource for VCs */
846 cqdma
->vc
= devm_kcalloc(&pdev
->dev
, cqdma
->dma_requests
,
847 sizeof(*cqdma
->vc
), GFP_KERNEL
);
851 for (i
= 0; i
< cqdma
->dma_requests
; i
++) {
853 vc
->vc
.desc_free
= mtk_cqdma_vdesc_free
;
854 vchan_init(&vc
->vc
, dd
);
855 init_completion(&vc
->issue_completion
);
858 err
= dma_async_device_register(dd
);
862 err
= of_dma_controller_register(pdev
->dev
.of_node
,
863 of_dma_xlate_by_chan_id
, cqdma
);
866 "MediaTek CQDMA OF registration failed %d\n", err
);
870 err
= mtk_cqdma_hw_init(cqdma
);
873 "MediaTek CQDMA HW initialization failed %d\n", err
);
877 platform_set_drvdata(pdev
, cqdma
);
879 /* initialize tasklet for each PC */
880 for (i
= 0; i
< cqdma
->dma_channels
; ++i
)
881 tasklet_setup(&cqdma
->pc
[i
]->tasklet
, mtk_cqdma_tasklet_cb
);
883 dev_info(&pdev
->dev
, "MediaTek CQDMA driver registered\n");
888 dma_async_device_unregister(dd
);
893 static int mtk_cqdma_remove(struct platform_device
*pdev
)
895 struct mtk_cqdma_device
*cqdma
= platform_get_drvdata(pdev
);
896 struct mtk_cqdma_vchan
*vc
;
901 for (i
= 0; i
< cqdma
->dma_requests
; i
++) {
904 list_del(&vc
->vc
.chan
.device_node
);
905 tasklet_kill(&vc
->vc
.task
);
908 /* disable interrupt */
909 for (i
= 0; i
< cqdma
->dma_channels
; i
++) {
910 spin_lock_irqsave(&cqdma
->pc
[i
]->lock
, flags
);
911 mtk_dma_clr(cqdma
->pc
[i
], MTK_CQDMA_INT_EN
,
912 MTK_CQDMA_INT_EN_BIT
);
913 spin_unlock_irqrestore(&cqdma
->pc
[i
]->lock
, flags
);
915 /* Waits for any pending IRQ handlers to complete */
916 synchronize_irq(cqdma
->pc
[i
]->irq
);
918 tasklet_kill(&cqdma
->pc
[i
]->tasklet
);
921 /* disable hardware */
922 mtk_cqdma_hw_deinit(cqdma
);
924 dma_async_device_unregister(&cqdma
->ddev
);
925 of_dma_controller_free(pdev
->dev
.of_node
);
930 static struct platform_driver mtk_cqdma_driver
= {
931 .probe
= mtk_cqdma_probe
,
932 .remove
= mtk_cqdma_remove
,
934 .name
= KBUILD_MODNAME
,
935 .of_match_table
= mtk_cqdma_match
,
938 module_platform_driver(mtk_cqdma_driver
);
940 MODULE_DESCRIPTION("MediaTek CQDMA Controller Driver");
941 MODULE_AUTHOR("Shun-Chih Yu <shun-chih.yu@mediatek.com>");
942 MODULE_LICENSE("GPL v2");