1 // SPDX-License-Identifier: GPL-2.0
3 * Renesas RZ/G2L DMA Controller Driver
7 * Copyright (C) 2021 Renesas Electronics Corp.
8 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
9 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
12 #include <linux/bitfield.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmaengine.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/reset.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
28 #include "../dmaengine.h"
29 #include "../virt-dma.h"
31 enum rz_dmac_prep_type
{
33 RZ_DMAC_DESC_SLAVE_SG
,
48 struct virt_dma_desc vd
;
52 struct list_head node
;
53 enum dma_transfer_direction direction
;
54 enum rz_dmac_prep_type type
;
56 struct scatterlist
*sg
;
60 #define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd)
63 struct virt_dma_chan vc
;
64 void __iomem
*ch_base
;
65 void __iomem
*ch_cmn_base
;
68 struct rz_dmac_desc
*desc
;
71 dma_addr_t src_per_address
;
72 dma_addr_t dst_per_address
;
78 struct list_head ld_free
;
79 struct list_head ld_queue
;
80 struct list_head ld_active
;
83 struct rz_lmdesc
*base
;
84 struct rz_lmdesc
*head
;
85 struct rz_lmdesc
*tail
;
90 #define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan)
93 struct dma_device engine
;
95 struct reset_control
*rstc
;
97 void __iomem
*ext_base
;
99 unsigned int n_channels
;
100 struct rz_dmac_chan
*channels
;
102 DECLARE_BITMAP(modules
, 1024);
105 #define to_rz_dmac(d) container_of(d, struct rz_dmac, engine)
108 * -----------------------------------------------------------------------------
112 #define CHSTAT 0x0024
113 #define CHCTRL 0x0028
119 #define EACH_CHANNEL_OFFSET 0x0040
120 #define CHANNEL_0_7_OFFSET 0x0000
121 #define CHANNEL_0_7_COMMON_BASE 0x0300
122 #define CHANNEL_8_15_OFFSET 0x0400
123 #define CHANNEL_8_15_COMMON_BASE 0x0700
125 #define CHSTAT_ER BIT(4)
126 #define CHSTAT_EN BIT(0)
128 #define CHCTRL_CLRINTMSK BIT(17)
129 #define CHCTRL_CLRSUS BIT(9)
130 #define CHCTRL_CLRTC BIT(6)
131 #define CHCTRL_CLREND BIT(5)
132 #define CHCTRL_CLRRQ BIT(4)
133 #define CHCTRL_SWRST BIT(3)
134 #define CHCTRL_STG BIT(2)
135 #define CHCTRL_CLREN BIT(1)
136 #define CHCTRL_SETEN BIT(0)
137 #define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \
138 CHCTRL_CLRTC | CHCTRL_CLREND | \
139 CHCTRL_CLRRQ | CHCTRL_SWRST | \
142 #define CHCFG_DMS BIT(31)
143 #define CHCFG_DEM BIT(24)
144 #define CHCFG_DAD BIT(21)
145 #define CHCFG_SAD BIT(20)
146 #define CHCFG_REQD BIT(3)
147 #define CHCFG_SEL(bits) ((bits) & 0x07)
148 #define CHCFG_MEM_COPY (0x80400008)
149 #define CHCFG_FILL_DDS_MASK GENMASK(19, 16)
150 #define CHCFG_FILL_SDS_MASK GENMASK(15, 12)
151 #define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
152 #define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
153 #define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
154 #define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5)
156 #define MID_RID_MASK GENMASK(9, 0)
157 #define CHCFG_MASK GENMASK(15, 10)
158 #define CHCFG_DS_INVALID 0xFF
159 #define DCTRL_LVINT BIT(1)
160 #define DCTRL_PR BIT(0)
161 #define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR)
163 /* LINK MODE DESCRIPTOR */
164 #define HEADER_LV BIT(0)
166 #define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16
167 #define RZ_DMAC_MAX_CHANNELS 16
168 #define DMAC_NR_LMDESC 64
171 * -----------------------------------------------------------------------------
175 static void rz_dmac_writel(struct rz_dmac
*dmac
, unsigned int val
,
178 writel(val
, dmac
->base
+ offset
);
181 static void rz_dmac_ext_writel(struct rz_dmac
*dmac
, unsigned int val
,
184 writel(val
, dmac
->ext_base
+ offset
);
187 static u32
rz_dmac_ext_readl(struct rz_dmac
*dmac
, unsigned int offset
)
189 return readl(dmac
->ext_base
+ offset
);
192 static void rz_dmac_ch_writel(struct rz_dmac_chan
*channel
, unsigned int val
,
193 unsigned int offset
, int which
)
196 writel(val
, channel
->ch_base
+ offset
);
198 writel(val
, channel
->ch_cmn_base
+ offset
);
201 static u32
rz_dmac_ch_readl(struct rz_dmac_chan
*channel
,
202 unsigned int offset
, int which
)
205 return readl(channel
->ch_base
+ offset
);
207 return readl(channel
->ch_cmn_base
+ offset
);
211 * -----------------------------------------------------------------------------
215 static void rz_lmdesc_setup(struct rz_dmac_chan
*channel
,
216 struct rz_lmdesc
*lmdesc
)
220 channel
->lmdesc
.base
= lmdesc
;
221 channel
->lmdesc
.head
= lmdesc
;
222 channel
->lmdesc
.tail
= lmdesc
;
223 nxla
= channel
->lmdesc
.base_dma
;
224 while (lmdesc
< (channel
->lmdesc
.base
+ (DMAC_NR_LMDESC
- 1))) {
226 nxla
+= sizeof(*lmdesc
);
232 lmdesc
->nxla
= channel
->lmdesc
.base_dma
;
236 * -----------------------------------------------------------------------------
237 * Descriptors preparation
240 static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan
*channel
)
242 struct rz_lmdesc
*lmdesc
= channel
->lmdesc
.head
;
244 while (!(lmdesc
->header
& HEADER_LV
)) {
247 if (lmdesc
>= (channel
->lmdesc
.base
+ DMAC_NR_LMDESC
))
248 lmdesc
= channel
->lmdesc
.base
;
250 channel
->lmdesc
.head
= lmdesc
;
253 static void rz_dmac_enable_hw(struct rz_dmac_chan
*channel
)
255 struct dma_chan
*chan
= &channel
->vc
.chan
;
256 struct rz_dmac
*dmac
= to_rz_dmac(chan
->device
);
262 dev_dbg(dmac
->dev
, "%s channel %d\n", __func__
, channel
->index
);
264 local_irq_save(flags
);
266 rz_dmac_lmdesc_recycle(channel
);
268 nxla
= channel
->lmdesc
.base_dma
+
269 (sizeof(struct rz_lmdesc
) * (channel
->lmdesc
.head
-
270 channel
->lmdesc
.base
));
272 chstat
= rz_dmac_ch_readl(channel
, CHSTAT
, 1);
273 if (!(chstat
& CHSTAT_EN
)) {
274 chctrl
= (channel
->chctrl
| CHCTRL_SETEN
);
275 rz_dmac_ch_writel(channel
, nxla
, NXLA
, 1);
276 rz_dmac_ch_writel(channel
, channel
->chcfg
, CHCFG
, 1);
277 rz_dmac_ch_writel(channel
, CHCTRL_SWRST
, CHCTRL
, 1);
278 rz_dmac_ch_writel(channel
, chctrl
, CHCTRL
, 1);
281 local_irq_restore(flags
);
284 static void rz_dmac_disable_hw(struct rz_dmac_chan
*channel
)
286 struct dma_chan
*chan
= &channel
->vc
.chan
;
287 struct rz_dmac
*dmac
= to_rz_dmac(chan
->device
);
290 dev_dbg(dmac
->dev
, "%s channel %d\n", __func__
, channel
->index
);
292 local_irq_save(flags
);
293 rz_dmac_ch_writel(channel
, CHCTRL_DEFAULT
, CHCTRL
, 1);
294 local_irq_restore(flags
);
297 static void rz_dmac_set_dmars_register(struct rz_dmac
*dmac
, int nr
, u32 dmars
)
299 u32 dmars_offset
= (nr
/ 2) * 4;
300 u32 shift
= (nr
% 2) * 16;
303 dmars32
= rz_dmac_ext_readl(dmac
, dmars_offset
);
304 dmars32
&= ~(0xffff << shift
);
305 dmars32
|= dmars
<< shift
;
307 rz_dmac_ext_writel(dmac
, dmars32
, dmars_offset
);
310 static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan
*channel
)
312 struct dma_chan
*chan
= &channel
->vc
.chan
;
313 struct rz_dmac
*dmac
= to_rz_dmac(chan
->device
);
314 struct rz_lmdesc
*lmdesc
= channel
->lmdesc
.tail
;
315 struct rz_dmac_desc
*d
= channel
->desc
;
316 u32 chcfg
= CHCFG_MEM_COPY
;
318 /* prepare descriptor */
320 lmdesc
->da
= d
->dest
;
322 lmdesc
->chcfg
= chcfg
;
325 lmdesc
->header
= HEADER_LV
;
327 rz_dmac_set_dmars_register(dmac
, channel
->index
, 0);
329 channel
->chcfg
= chcfg
;
330 channel
->chctrl
= CHCTRL_STG
| CHCTRL_SETEN
;
333 static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan
*channel
)
335 struct dma_chan
*chan
= &channel
->vc
.chan
;
336 struct rz_dmac
*dmac
= to_rz_dmac(chan
->device
);
337 struct rz_dmac_desc
*d
= channel
->desc
;
338 struct scatterlist
*sg
, *sgl
= d
->sg
;
339 struct rz_lmdesc
*lmdesc
;
340 unsigned int i
, sg_len
= d
->sgcount
;
342 channel
->chcfg
|= CHCFG_SEL(channel
->index
) | CHCFG_DEM
| CHCFG_DMS
;
344 if (d
->direction
== DMA_DEV_TO_MEM
) {
345 channel
->chcfg
|= CHCFG_SAD
;
346 channel
->chcfg
&= ~CHCFG_REQD
;
348 channel
->chcfg
|= CHCFG_DAD
| CHCFG_REQD
;
351 lmdesc
= channel
->lmdesc
.tail
;
353 for (i
= 0, sg
= sgl
; i
< sg_len
; i
++, sg
= sg_next(sg
)) {
354 if (d
->direction
== DMA_DEV_TO_MEM
) {
355 lmdesc
->sa
= channel
->src_per_address
;
356 lmdesc
->da
= sg_dma_address(sg
);
358 lmdesc
->sa
= sg_dma_address(sg
);
359 lmdesc
->da
= channel
->dst_per_address
;
362 lmdesc
->tb
= sg_dma_len(sg
);
365 if (i
== (sg_len
- 1)) {
366 lmdesc
->chcfg
= (channel
->chcfg
& ~CHCFG_DEM
);
367 lmdesc
->header
= HEADER_LV
;
369 lmdesc
->chcfg
= channel
->chcfg
;
370 lmdesc
->header
= HEADER_LV
;
372 if (++lmdesc
>= (channel
->lmdesc
.base
+ DMAC_NR_LMDESC
))
373 lmdesc
= channel
->lmdesc
.base
;
376 channel
->lmdesc
.tail
= lmdesc
;
378 rz_dmac_set_dmars_register(dmac
, channel
->index
, channel
->mid_rid
);
379 channel
->chctrl
= CHCTRL_SETEN
;
382 static int rz_dmac_xfer_desc(struct rz_dmac_chan
*chan
)
384 struct rz_dmac_desc
*d
= chan
->desc
;
385 struct virt_dma_desc
*vd
;
387 vd
= vchan_next_desc(&chan
->vc
);
394 case RZ_DMAC_DESC_MEMCPY
:
395 rz_dmac_prepare_desc_for_memcpy(chan
);
398 case RZ_DMAC_DESC_SLAVE_SG
:
399 rz_dmac_prepare_descs_for_slave_sg(chan
);
406 rz_dmac_enable_hw(chan
);
412 * -----------------------------------------------------------------------------
413 * DMA engine operations
416 static int rz_dmac_alloc_chan_resources(struct dma_chan
*chan
)
418 struct rz_dmac_chan
*channel
= to_rz_dmac_chan(chan
);
420 while (channel
->descs_allocated
< RZ_DMAC_MAX_CHAN_DESCRIPTORS
) {
421 struct rz_dmac_desc
*desc
;
423 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
427 list_add_tail(&desc
->node
, &channel
->ld_free
);
428 channel
->descs_allocated
++;
431 if (!channel
->descs_allocated
)
434 return channel
->descs_allocated
;
437 static void rz_dmac_free_chan_resources(struct dma_chan
*chan
)
439 struct rz_dmac_chan
*channel
= to_rz_dmac_chan(chan
);
440 struct rz_dmac
*dmac
= to_rz_dmac(chan
->device
);
441 struct rz_lmdesc
*lmdesc
= channel
->lmdesc
.base
;
442 struct rz_dmac_desc
*desc
, *_desc
;
446 spin_lock_irqsave(&channel
->vc
.lock
, flags
);
448 for (i
= 0; i
< DMAC_NR_LMDESC
; i
++)
449 lmdesc
[i
].header
= 0;
451 rz_dmac_disable_hw(channel
);
452 list_splice_tail_init(&channel
->ld_active
, &channel
->ld_free
);
453 list_splice_tail_init(&channel
->ld_queue
, &channel
->ld_free
);
455 if (channel
->mid_rid
>= 0) {
456 clear_bit(channel
->mid_rid
, dmac
->modules
);
457 channel
->mid_rid
= -EINVAL
;
460 spin_unlock_irqrestore(&channel
->vc
.lock
, flags
);
462 list_for_each_entry_safe(desc
, _desc
, &channel
->ld_free
, node
) {
464 channel
->descs_allocated
--;
467 INIT_LIST_HEAD(&channel
->ld_free
);
468 vchan_free_chan_resources(&channel
->vc
);
471 static struct dma_async_tx_descriptor
*
472 rz_dmac_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
473 size_t len
, unsigned long flags
)
475 struct rz_dmac_chan
*channel
= to_rz_dmac_chan(chan
);
476 struct rz_dmac
*dmac
= to_rz_dmac(chan
->device
);
477 struct rz_dmac_desc
*desc
;
479 dev_dbg(dmac
->dev
, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n",
480 __func__
, channel
->index
, &src
, &dest
, len
);
482 if (list_empty(&channel
->ld_free
))
485 desc
= list_first_entry(&channel
->ld_free
, struct rz_dmac_desc
, node
);
487 desc
->type
= RZ_DMAC_DESC_MEMCPY
;
491 desc
->direction
= DMA_MEM_TO_MEM
;
493 list_move_tail(channel
->ld_free
.next
, &channel
->ld_queue
);
494 return vchan_tx_prep(&channel
->vc
, &desc
->vd
, flags
);
497 static struct dma_async_tx_descriptor
*
498 rz_dmac_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
500 enum dma_transfer_direction direction
,
501 unsigned long flags
, void *context
)
503 struct rz_dmac_chan
*channel
= to_rz_dmac_chan(chan
);
504 struct rz_dmac_desc
*desc
;
505 struct scatterlist
*sg
;
509 if (list_empty(&channel
->ld_free
))
512 desc
= list_first_entry(&channel
->ld_free
, struct rz_dmac_desc
, node
);
514 for_each_sg(sgl
, sg
, sg_len
, i
) {
515 dma_length
+= sg_dma_len(sg
);
518 desc
->type
= RZ_DMAC_DESC_SLAVE_SG
;
520 desc
->sgcount
= sg_len
;
521 desc
->len
= dma_length
;
522 desc
->direction
= direction
;
524 if (direction
== DMA_DEV_TO_MEM
)
525 desc
->src
= channel
->src_per_address
;
527 desc
->dest
= channel
->dst_per_address
;
529 list_move_tail(channel
->ld_free
.next
, &channel
->ld_queue
);
530 return vchan_tx_prep(&channel
->vc
, &desc
->vd
, flags
);
533 static int rz_dmac_terminate_all(struct dma_chan
*chan
)
535 struct rz_dmac_chan
*channel
= to_rz_dmac_chan(chan
);
539 rz_dmac_disable_hw(channel
);
540 spin_lock_irqsave(&channel
->vc
.lock
, flags
);
541 list_splice_tail_init(&channel
->ld_active
, &channel
->ld_free
);
542 list_splice_tail_init(&channel
->ld_queue
, &channel
->ld_free
);
543 vchan_get_all_descriptors(&channel
->vc
, &head
);
544 spin_unlock_irqrestore(&channel
->vc
.lock
, flags
);
545 vchan_dma_desc_free_list(&channel
->vc
, &head
);
550 static void rz_dmac_issue_pending(struct dma_chan
*chan
)
552 struct rz_dmac_chan
*channel
= to_rz_dmac_chan(chan
);
553 struct rz_dmac
*dmac
= to_rz_dmac(chan
->device
);
554 struct rz_dmac_desc
*desc
;
557 spin_lock_irqsave(&channel
->vc
.lock
, flags
);
559 if (!list_empty(&channel
->ld_queue
)) {
560 desc
= list_first_entry(&channel
->ld_queue
,
561 struct rz_dmac_desc
, node
);
562 channel
->desc
= desc
;
563 if (vchan_issue_pending(&channel
->vc
)) {
564 if (rz_dmac_xfer_desc(channel
) < 0)
565 dev_warn(dmac
->dev
, "ch: %d couldn't issue DMA xfer\n",
568 list_move_tail(channel
->ld_queue
.next
,
569 &channel
->ld_active
);
573 spin_unlock_irqrestore(&channel
->vc
.lock
, flags
);
576 static u8
rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds
)
579 static const enum dma_slave_buswidth ds_lut
[] = {
580 DMA_SLAVE_BUSWIDTH_1_BYTE
,
581 DMA_SLAVE_BUSWIDTH_2_BYTES
,
582 DMA_SLAVE_BUSWIDTH_4_BYTES
,
583 DMA_SLAVE_BUSWIDTH_8_BYTES
,
584 DMA_SLAVE_BUSWIDTH_16_BYTES
,
585 DMA_SLAVE_BUSWIDTH_32_BYTES
,
586 DMA_SLAVE_BUSWIDTH_64_BYTES
,
587 DMA_SLAVE_BUSWIDTH_128_BYTES
,
590 for (i
= 0; i
< ARRAY_SIZE(ds_lut
); i
++) {
595 return CHCFG_DS_INVALID
;
598 static int rz_dmac_config(struct dma_chan
*chan
,
599 struct dma_slave_config
*config
)
601 struct rz_dmac_chan
*channel
= to_rz_dmac_chan(chan
);
604 channel
->dst_per_address
= config
->dst_addr
;
605 channel
->chcfg
&= ~CHCFG_FILL_DDS_MASK
;
606 if (channel
->dst_per_address
) {
607 val
= rz_dmac_ds_to_val_mapping(config
->dst_addr_width
);
608 if (val
== CHCFG_DS_INVALID
)
611 channel
->chcfg
|= FIELD_PREP(CHCFG_FILL_DDS_MASK
, val
);
614 channel
->src_per_address
= config
->src_addr
;
615 channel
->chcfg
&= ~CHCFG_FILL_SDS_MASK
;
616 if (channel
->src_per_address
) {
617 val
= rz_dmac_ds_to_val_mapping(config
->src_addr_width
);
618 if (val
== CHCFG_DS_INVALID
)
621 channel
->chcfg
|= FIELD_PREP(CHCFG_FILL_SDS_MASK
, val
);
627 static void rz_dmac_virt_desc_free(struct virt_dma_desc
*vd
)
631 * Descriptor allocation is done during alloc_chan_resources and
632 * get freed during free_chan_resources.
633 * list is used to manage the descriptors and avoid any memory
634 * allocation/free during DMA read/write.
638 static void rz_dmac_device_synchronize(struct dma_chan
*chan
)
640 struct rz_dmac_chan
*channel
= to_rz_dmac_chan(chan
);
641 struct rz_dmac
*dmac
= to_rz_dmac(chan
->device
);
645 ret
= read_poll_timeout(rz_dmac_ch_readl
, chstat
, !(chstat
& CHSTAT_EN
),
646 100, 100000, false, channel
, CHSTAT
, 1);
648 dev_warn(dmac
->dev
, "DMA Timeout");
650 rz_dmac_set_dmars_register(dmac
, channel
->index
, 0);
654 * -----------------------------------------------------------------------------
658 static void rz_dmac_irq_handle_channel(struct rz_dmac_chan
*channel
)
660 struct dma_chan
*chan
= &channel
->vc
.chan
;
661 struct rz_dmac
*dmac
= to_rz_dmac(chan
->device
);
664 chstat
= rz_dmac_ch_readl(channel
, CHSTAT
, 1);
665 if (chstat
& CHSTAT_ER
) {
666 dev_err(dmac
->dev
, "DMAC err CHSTAT_%d = %08X\n",
667 channel
->index
, chstat
);
668 rz_dmac_ch_writel(channel
, CHCTRL_DEFAULT
, CHCTRL
, 1);
672 chctrl
= rz_dmac_ch_readl(channel
, CHCTRL
, 1);
673 rz_dmac_ch_writel(channel
, chctrl
| CHCTRL_CLREND
, CHCTRL
, 1);
678 static irqreturn_t
rz_dmac_irq_handler(int irq
, void *dev_id
)
680 struct rz_dmac_chan
*channel
= dev_id
;
683 rz_dmac_irq_handle_channel(channel
);
684 return IRQ_WAKE_THREAD
;
686 /* handle DMAERR irq */
690 static irqreturn_t
rz_dmac_irq_handler_thread(int irq
, void *dev_id
)
692 struct rz_dmac_chan
*channel
= dev_id
;
693 struct rz_dmac_desc
*desc
= NULL
;
696 spin_lock_irqsave(&channel
->vc
.lock
, flags
);
698 if (list_empty(&channel
->ld_active
)) {
699 /* Someone might have called terminate all */
703 desc
= list_first_entry(&channel
->ld_active
, struct rz_dmac_desc
, node
);
704 vchan_cookie_complete(&desc
->vd
);
705 list_move_tail(channel
->ld_active
.next
, &channel
->ld_free
);
706 if (!list_empty(&channel
->ld_queue
)) {
707 desc
= list_first_entry(&channel
->ld_queue
, struct rz_dmac_desc
,
709 channel
->desc
= desc
;
710 if (rz_dmac_xfer_desc(channel
) == 0)
711 list_move_tail(channel
->ld_queue
.next
, &channel
->ld_active
);
714 spin_unlock_irqrestore(&channel
->vc
.lock
, flags
);
720 * -----------------------------------------------------------------------------
721 * OF xlate and channel filter
724 static bool rz_dmac_chan_filter(struct dma_chan
*chan
, void *arg
)
726 struct rz_dmac_chan
*channel
= to_rz_dmac_chan(chan
);
727 struct rz_dmac
*dmac
= to_rz_dmac(chan
->device
);
728 struct of_phandle_args
*dma_spec
= arg
;
731 channel
->mid_rid
= dma_spec
->args
[0] & MID_RID_MASK
;
732 ch_cfg
= (dma_spec
->args
[0] & CHCFG_MASK
) >> 10;
733 channel
->chcfg
= CHCFG_FILL_TM(ch_cfg
) | CHCFG_FILL_AM(ch_cfg
) |
734 CHCFG_FILL_LVL(ch_cfg
) | CHCFG_FILL_HIEN(ch_cfg
);
736 return !test_and_set_bit(channel
->mid_rid
, dmac
->modules
);
739 static struct dma_chan
*rz_dmac_of_xlate(struct of_phandle_args
*dma_spec
,
740 struct of_dma
*ofdma
)
744 if (dma_spec
->args_count
!= 1)
747 /* Only slave DMA channels can be allocated via DT */
749 dma_cap_set(DMA_SLAVE
, mask
);
751 return dma_request_channel(mask
, rz_dmac_chan_filter
, dma_spec
);
755 * -----------------------------------------------------------------------------
759 static int rz_dmac_chan_probe(struct rz_dmac
*dmac
,
760 struct rz_dmac_chan
*channel
,
763 struct platform_device
*pdev
= to_platform_device(dmac
->dev
);
764 struct rz_lmdesc
*lmdesc
;
765 char pdev_irqname
[6];
769 channel
->index
= index
;
770 channel
->mid_rid
= -EINVAL
;
772 /* Request the channel interrupt. */
773 scnprintf(pdev_irqname
, sizeof(pdev_irqname
), "ch%u", index
);
774 channel
->irq
= platform_get_irq_byname(pdev
, pdev_irqname
);
775 if (channel
->irq
< 0)
778 irqname
= devm_kasprintf(dmac
->dev
, GFP_KERNEL
, "%s:%u",
779 dev_name(dmac
->dev
), index
);
783 ret
= devm_request_threaded_irq(dmac
->dev
, channel
->irq
,
785 rz_dmac_irq_handler_thread
, 0,
788 dev_err(dmac
->dev
, "failed to request IRQ %u (%d)\n",
793 /* Set io base address for each channel */
795 channel
->ch_base
= dmac
->base
+ CHANNEL_0_7_OFFSET
+
796 EACH_CHANNEL_OFFSET
* index
;
797 channel
->ch_cmn_base
= dmac
->base
+ CHANNEL_0_7_COMMON_BASE
;
799 channel
->ch_base
= dmac
->base
+ CHANNEL_8_15_OFFSET
+
800 EACH_CHANNEL_OFFSET
* (index
- 8);
801 channel
->ch_cmn_base
= dmac
->base
+ CHANNEL_8_15_COMMON_BASE
;
804 /* Allocate descriptors */
805 lmdesc
= dma_alloc_coherent(&pdev
->dev
,
806 sizeof(struct rz_lmdesc
) * DMAC_NR_LMDESC
,
807 &channel
->lmdesc
.base_dma
, GFP_KERNEL
);
809 dev_err(&pdev
->dev
, "Can't allocate memory (lmdesc)\n");
812 rz_lmdesc_setup(channel
, lmdesc
);
814 /* Initialize register for each channel */
815 rz_dmac_ch_writel(channel
, CHCTRL_DEFAULT
, CHCTRL
, 1);
817 channel
->vc
.desc_free
= rz_dmac_virt_desc_free
;
818 vchan_init(&channel
->vc
, &dmac
->engine
);
819 INIT_LIST_HEAD(&channel
->ld_queue
);
820 INIT_LIST_HEAD(&channel
->ld_free
);
821 INIT_LIST_HEAD(&channel
->ld_active
);
826 static int rz_dmac_parse_of(struct device
*dev
, struct rz_dmac
*dmac
)
828 struct device_node
*np
= dev
->of_node
;
831 ret
= of_property_read_u32(np
, "dma-channels", &dmac
->n_channels
);
833 dev_err(dev
, "unable to read dma-channels property\n");
837 if (!dmac
->n_channels
|| dmac
->n_channels
> RZ_DMAC_MAX_CHANNELS
) {
838 dev_err(dev
, "invalid number of channels %u\n", dmac
->n_channels
);
845 static int rz_dmac_probe(struct platform_device
*pdev
)
847 const char *irqname
= "error";
848 struct dma_device
*engine
;
849 struct rz_dmac
*dmac
;
855 dmac
= devm_kzalloc(&pdev
->dev
, sizeof(*dmac
), GFP_KERNEL
);
859 dmac
->dev
= &pdev
->dev
;
860 platform_set_drvdata(pdev
, dmac
);
862 ret
= rz_dmac_parse_of(&pdev
->dev
, dmac
);
866 dmac
->channels
= devm_kcalloc(&pdev
->dev
, dmac
->n_channels
,
867 sizeof(*dmac
->channels
), GFP_KERNEL
);
871 /* Request resources */
872 dmac
->base
= devm_platform_ioremap_resource(pdev
, 0);
873 if (IS_ERR(dmac
->base
))
874 return PTR_ERR(dmac
->base
);
876 dmac
->ext_base
= devm_platform_ioremap_resource(pdev
, 1);
877 if (IS_ERR(dmac
->ext_base
))
878 return PTR_ERR(dmac
->ext_base
);
880 /* Register interrupt handler for error */
881 irq
= platform_get_irq_byname(pdev
, irqname
);
885 ret
= devm_request_irq(&pdev
->dev
, irq
, rz_dmac_irq_handler
, 0,
888 dev_err(&pdev
->dev
, "failed to request IRQ %u (%d)\n",
893 /* Initialize the channels. */
894 INIT_LIST_HEAD(&dmac
->engine
.channels
);
896 dmac
->rstc
= devm_reset_control_array_get_optional_exclusive(&pdev
->dev
);
897 if (IS_ERR(dmac
->rstc
))
898 return dev_err_probe(&pdev
->dev
, PTR_ERR(dmac
->rstc
),
899 "failed to get resets\n");
901 pm_runtime_enable(&pdev
->dev
);
902 ret
= pm_runtime_resume_and_get(&pdev
->dev
);
904 dev_err(&pdev
->dev
, "pm_runtime_resume_and_get failed\n");
908 ret
= reset_control_deassert(dmac
->rstc
);
910 goto err_pm_runtime_put
;
912 for (i
= 0; i
< dmac
->n_channels
; i
++) {
913 ret
= rz_dmac_chan_probe(dmac
, &dmac
->channels
[i
], i
);
918 /* Register the DMAC as a DMA provider for DT. */
919 ret
= of_dma_controller_register(pdev
->dev
.of_node
, rz_dmac_of_xlate
,
924 /* Register the DMA engine device. */
925 engine
= &dmac
->engine
;
926 dma_cap_set(DMA_SLAVE
, engine
->cap_mask
);
927 dma_cap_set(DMA_MEMCPY
, engine
->cap_mask
);
928 rz_dmac_writel(dmac
, DCTRL_DEFAULT
, CHANNEL_0_7_COMMON_BASE
+ DCTRL
);
929 rz_dmac_writel(dmac
, DCTRL_DEFAULT
, CHANNEL_8_15_COMMON_BASE
+ DCTRL
);
931 engine
->dev
= &pdev
->dev
;
933 engine
->device_alloc_chan_resources
= rz_dmac_alloc_chan_resources
;
934 engine
->device_free_chan_resources
= rz_dmac_free_chan_resources
;
935 engine
->device_tx_status
= dma_cookie_status
;
936 engine
->device_prep_slave_sg
= rz_dmac_prep_slave_sg
;
937 engine
->device_prep_dma_memcpy
= rz_dmac_prep_dma_memcpy
;
938 engine
->device_config
= rz_dmac_config
;
939 engine
->device_terminate_all
= rz_dmac_terminate_all
;
940 engine
->device_issue_pending
= rz_dmac_issue_pending
;
941 engine
->device_synchronize
= rz_dmac_device_synchronize
;
943 engine
->copy_align
= DMAENGINE_ALIGN_1_BYTE
;
944 dma_set_max_seg_size(engine
->dev
, U32_MAX
);
946 ret
= dma_async_device_register(engine
);
948 dev_err(&pdev
->dev
, "unable to register\n");
949 goto dma_register_err
;
954 of_dma_controller_free(pdev
->dev
.of_node
);
956 channel_num
= i
? i
- 1 : 0;
957 for (i
= 0; i
< channel_num
; i
++) {
958 struct rz_dmac_chan
*channel
= &dmac
->channels
[i
];
960 dma_free_coherent(&pdev
->dev
,
961 sizeof(struct rz_lmdesc
) * DMAC_NR_LMDESC
,
962 channel
->lmdesc
.base
,
963 channel
->lmdesc
.base_dma
);
966 reset_control_assert(dmac
->rstc
);
968 pm_runtime_put(&pdev
->dev
);
970 pm_runtime_disable(&pdev
->dev
);
975 static void rz_dmac_remove(struct platform_device
*pdev
)
977 struct rz_dmac
*dmac
= platform_get_drvdata(pdev
);
980 dma_async_device_unregister(&dmac
->engine
);
981 of_dma_controller_free(pdev
->dev
.of_node
);
982 for (i
= 0; i
< dmac
->n_channels
; i
++) {
983 struct rz_dmac_chan
*channel
= &dmac
->channels
[i
];
985 dma_free_coherent(&pdev
->dev
,
986 sizeof(struct rz_lmdesc
) * DMAC_NR_LMDESC
,
987 channel
->lmdesc
.base
,
988 channel
->lmdesc
.base_dma
);
990 reset_control_assert(dmac
->rstc
);
991 pm_runtime_put(&pdev
->dev
);
992 pm_runtime_disable(&pdev
->dev
);
995 static const struct of_device_id of_rz_dmac_match
[] = {
996 { .compatible
= "renesas,rz-dmac", },
999 MODULE_DEVICE_TABLE(of
, of_rz_dmac_match
);
1001 static struct platform_driver rz_dmac_driver
= {
1004 .of_match_table
= of_rz_dmac_match
,
1006 .probe
= rz_dmac_probe
,
1007 .remove
= rz_dmac_remove
,
1010 module_platform_driver(rz_dmac_driver
);
1012 MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver");
1013 MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
1014 MODULE_LICENSE("GPL v2");