1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (C) 2018 Socionext Inc.
4 // Author: Masahiro Yamada <yamada.masahiro@socionext.com>
6 #include <linux/bits.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/dmaengine.h>
10 #include <linux/interrupt.h>
11 #include <linux/iopoll.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
15 #include <linux/of_dma.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
22 /* registers common for all channels */
23 #define UNIPHIER_MDMAC_CMD 0x000 /* issue DMA start/abort */
24 #define UNIPHIER_MDMAC_CMD_ABORT BIT(31) /* 1: abort, 0: start */
26 /* per-channel registers */
27 #define UNIPHIER_MDMAC_CH_OFFSET 0x100
28 #define UNIPHIER_MDMAC_CH_STRIDE 0x040
30 #define UNIPHIER_MDMAC_CH_IRQ_STAT 0x010 /* current hw status (RO) */
31 #define UNIPHIER_MDMAC_CH_IRQ_REQ 0x014 /* latched STAT (WOC) */
32 #define UNIPHIER_MDMAC_CH_IRQ_EN 0x018 /* IRQ enable mask */
33 #define UNIPHIER_MDMAC_CH_IRQ_DET 0x01c /* REQ & EN (RO) */
34 #define UNIPHIER_MDMAC_CH_IRQ__ABORT BIT(13)
35 #define UNIPHIER_MDMAC_CH_IRQ__DONE BIT(1)
36 #define UNIPHIER_MDMAC_CH_SRC_MODE 0x020 /* mode of source */
37 #define UNIPHIER_MDMAC_CH_DEST_MODE 0x024 /* mode of destination */
38 #define UNIPHIER_MDMAC_CH_MODE__ADDR_INC (0 << 4)
39 #define UNIPHIER_MDMAC_CH_MODE__ADDR_DEC (1 << 4)
40 #define UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED (2 << 4)
41 #define UNIPHIER_MDMAC_CH_SRC_ADDR 0x028 /* source address */
42 #define UNIPHIER_MDMAC_CH_DEST_ADDR 0x02c /* destination address */
43 #define UNIPHIER_MDMAC_CH_SIZE 0x030 /* transfer bytes */
45 #define UNIPHIER_MDMAC_SLAVE_BUSWIDTHS \
46 (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
47 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
48 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
51 struct uniphier_mdmac_desc
{
52 struct virt_dma_desc vd
;
53 struct scatterlist
*sgl
;
56 enum dma_transfer_direction dir
;
59 struct uniphier_mdmac_chan
{
60 struct virt_dma_chan vc
;
61 struct uniphier_mdmac_device
*mdev
;
62 struct uniphier_mdmac_desc
*md
;
63 void __iomem
*reg_ch_base
;
67 struct uniphier_mdmac_device
{
68 struct dma_device ddev
;
70 void __iomem
*reg_base
;
71 struct uniphier_mdmac_chan channels
[];
74 static struct uniphier_mdmac_chan
*
75 to_uniphier_mdmac_chan(struct virt_dma_chan
*vc
)
77 return container_of(vc
, struct uniphier_mdmac_chan
, vc
);
80 static struct uniphier_mdmac_desc
*
81 to_uniphier_mdmac_desc(struct virt_dma_desc
*vd
)
83 return container_of(vd
, struct uniphier_mdmac_desc
, vd
);
86 /* mc->vc.lock must be held by caller */
87 static struct uniphier_mdmac_desc
*
88 uniphier_mdmac_next_desc(struct uniphier_mdmac_chan
*mc
)
90 struct virt_dma_desc
*vd
;
92 vd
= vchan_next_desc(&mc
->vc
);
100 mc
->md
= to_uniphier_mdmac_desc(vd
);
105 /* mc->vc.lock must be held by caller */
106 static void uniphier_mdmac_handle(struct uniphier_mdmac_chan
*mc
,
107 struct uniphier_mdmac_desc
*md
)
109 struct uniphier_mdmac_device
*mdev
= mc
->mdev
;
110 struct scatterlist
*sg
;
111 u32 irq_flag
= UNIPHIER_MDMAC_CH_IRQ__DONE
;
112 u32 src_mode
, src_addr
, dest_mode
, dest_addr
, chunk_size
;
114 sg
= &md
->sgl
[md
->sg_cur
];
116 if (md
->dir
== DMA_MEM_TO_DEV
) {
117 src_mode
= UNIPHIER_MDMAC_CH_MODE__ADDR_INC
;
118 src_addr
= sg_dma_address(sg
);
119 dest_mode
= UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED
;
122 src_mode
= UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED
;
124 dest_mode
= UNIPHIER_MDMAC_CH_MODE__ADDR_INC
;
125 dest_addr
= sg_dma_address(sg
);
128 chunk_size
= sg_dma_len(sg
);
130 writel(src_mode
, mc
->reg_ch_base
+ UNIPHIER_MDMAC_CH_SRC_MODE
);
131 writel(dest_mode
, mc
->reg_ch_base
+ UNIPHIER_MDMAC_CH_DEST_MODE
);
132 writel(src_addr
, mc
->reg_ch_base
+ UNIPHIER_MDMAC_CH_SRC_ADDR
);
133 writel(dest_addr
, mc
->reg_ch_base
+ UNIPHIER_MDMAC_CH_DEST_ADDR
);
134 writel(chunk_size
, mc
->reg_ch_base
+ UNIPHIER_MDMAC_CH_SIZE
);
136 /* write 1 to clear */
137 writel(irq_flag
, mc
->reg_ch_base
+ UNIPHIER_MDMAC_CH_IRQ_REQ
);
139 writel(irq_flag
, mc
->reg_ch_base
+ UNIPHIER_MDMAC_CH_IRQ_EN
);
141 writel(BIT(mc
->chan_id
), mdev
->reg_base
+ UNIPHIER_MDMAC_CMD
);
144 /* mc->vc.lock must be held by caller */
145 static void uniphier_mdmac_start(struct uniphier_mdmac_chan
*mc
)
147 struct uniphier_mdmac_desc
*md
;
149 md
= uniphier_mdmac_next_desc(mc
);
151 uniphier_mdmac_handle(mc
, md
);
154 /* mc->vc.lock must be held by caller */
155 static int uniphier_mdmac_abort(struct uniphier_mdmac_chan
*mc
)
157 struct uniphier_mdmac_device
*mdev
= mc
->mdev
;
158 u32 irq_flag
= UNIPHIER_MDMAC_CH_IRQ__ABORT
;
161 /* write 1 to clear */
162 writel(irq_flag
, mc
->reg_ch_base
+ UNIPHIER_MDMAC_CH_IRQ_REQ
);
164 writel(UNIPHIER_MDMAC_CMD_ABORT
| BIT(mc
->chan_id
),
165 mdev
->reg_base
+ UNIPHIER_MDMAC_CMD
);
168 * Abort should be accepted soon. We poll the bit here instead of
169 * waiting for the interrupt.
171 return readl_poll_timeout(mc
->reg_ch_base
+ UNIPHIER_MDMAC_CH_IRQ_REQ
,
172 val
, val
& irq_flag
, 0, 20);
175 static irqreturn_t
uniphier_mdmac_interrupt(int irq
, void *dev_id
)
177 struct uniphier_mdmac_chan
*mc
= dev_id
;
178 struct uniphier_mdmac_desc
*md
;
179 irqreturn_t ret
= IRQ_HANDLED
;
182 spin_lock(&mc
->vc
.lock
);
184 irq_stat
= readl(mc
->reg_ch_base
+ UNIPHIER_MDMAC_CH_IRQ_DET
);
187 * Some channels share a single interrupt line. If the IRQ status is 0,
188 * this is probably triggered by a different channel.
195 /* write 1 to clear */
196 writel(irq_stat
, mc
->reg_ch_base
+ UNIPHIER_MDMAC_CH_IRQ_REQ
);
199 * UNIPHIER_MDMAC_CH_IRQ__DONE interrupt is asserted even when the DMA
200 * is aborted. To distinguish the normal completion and the abort,
201 * check mc->md. If it is NULL, we are aborting.
209 if (md
->sg_cur
>= md
->sg_len
) {
210 vchan_cookie_complete(&md
->vd
);
211 md
= uniphier_mdmac_next_desc(mc
);
216 uniphier_mdmac_handle(mc
, md
);
219 spin_unlock(&mc
->vc
.lock
);
224 static void uniphier_mdmac_free_chan_resources(struct dma_chan
*chan
)
226 vchan_free_chan_resources(to_virt_chan(chan
));
229 static struct dma_async_tx_descriptor
*
230 uniphier_mdmac_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
232 enum dma_transfer_direction direction
,
233 unsigned long flags
, void *context
)
235 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
236 struct uniphier_mdmac_desc
*md
;
238 if (!is_slave_direction(direction
))
241 md
= kzalloc(sizeof(*md
), GFP_NOWAIT
);
249 return vchan_tx_prep(vc
, &md
->vd
, flags
);
252 static int uniphier_mdmac_terminate_all(struct dma_chan
*chan
)
254 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
255 struct uniphier_mdmac_chan
*mc
= to_uniphier_mdmac_chan(vc
);
260 spin_lock_irqsave(&vc
->lock
, flags
);
263 vchan_terminate_vdesc(&mc
->md
->vd
);
265 ret
= uniphier_mdmac_abort(mc
);
267 vchan_get_all_descriptors(vc
, &head
);
269 spin_unlock_irqrestore(&vc
->lock
, flags
);
271 vchan_dma_desc_free_list(vc
, &head
);
276 static void uniphier_mdmac_synchronize(struct dma_chan
*chan
)
278 vchan_synchronize(to_virt_chan(chan
));
281 static enum dma_status
uniphier_mdmac_tx_status(struct dma_chan
*chan
,
283 struct dma_tx_state
*txstate
)
285 struct virt_dma_chan
*vc
;
286 struct virt_dma_desc
*vd
;
287 struct uniphier_mdmac_chan
*mc
;
288 struct uniphier_mdmac_desc
*md
= NULL
;
289 enum dma_status stat
;
293 stat
= dma_cookie_status(chan
, cookie
, txstate
);
294 /* Return immediately if we do not need to compute the residue. */
295 if (stat
== DMA_COMPLETE
|| !txstate
)
298 vc
= to_virt_chan(chan
);
300 spin_lock_irqsave(&vc
->lock
, flags
);
302 mc
= to_uniphier_mdmac_chan(vc
);
304 if (mc
->md
&& mc
->md
->vd
.tx
.cookie
== cookie
) {
305 /* residue from the on-flight chunk */
306 txstate
->residue
= readl(mc
->reg_ch_base
+
307 UNIPHIER_MDMAC_CH_SIZE
);
312 vd
= vchan_find_desc(vc
, cookie
);
314 md
= to_uniphier_mdmac_desc(vd
);
318 /* residue from the queued chunks */
319 for (i
= md
->sg_cur
; i
< md
->sg_len
; i
++)
320 txstate
->residue
+= sg_dma_len(&md
->sgl
[i
]);
323 spin_unlock_irqrestore(&vc
->lock
, flags
);
328 static void uniphier_mdmac_issue_pending(struct dma_chan
*chan
)
330 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
331 struct uniphier_mdmac_chan
*mc
= to_uniphier_mdmac_chan(vc
);
334 spin_lock_irqsave(&vc
->lock
, flags
);
336 if (vchan_issue_pending(vc
) && !mc
->md
)
337 uniphier_mdmac_start(mc
);
339 spin_unlock_irqrestore(&vc
->lock
, flags
);
342 static void uniphier_mdmac_desc_free(struct virt_dma_desc
*vd
)
344 kfree(to_uniphier_mdmac_desc(vd
));
347 static int uniphier_mdmac_chan_init(struct platform_device
*pdev
,
348 struct uniphier_mdmac_device
*mdev
,
351 struct device
*dev
= &pdev
->dev
;
352 struct uniphier_mdmac_chan
*mc
= &mdev
->channels
[chan_id
];
356 irq
= platform_get_irq(pdev
, chan_id
);
360 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "uniphier-mio-dmac-ch%d",
365 ret
= devm_request_irq(dev
, irq
, uniphier_mdmac_interrupt
,
366 IRQF_SHARED
, irq_name
, mc
);
371 mc
->reg_ch_base
= mdev
->reg_base
+ UNIPHIER_MDMAC_CH_OFFSET
+
372 UNIPHIER_MDMAC_CH_STRIDE
* chan_id
;
373 mc
->chan_id
= chan_id
;
374 mc
->vc
.desc_free
= uniphier_mdmac_desc_free
;
375 vchan_init(&mc
->vc
, &mdev
->ddev
);
380 static int uniphier_mdmac_probe(struct platform_device
*pdev
)
382 struct device
*dev
= &pdev
->dev
;
383 struct uniphier_mdmac_device
*mdev
;
384 struct dma_device
*ddev
;
385 int nr_chans
, ret
, i
;
387 nr_chans
= platform_irq_count(pdev
);
391 ret
= dma_set_mask(dev
, DMA_BIT_MASK(32));
395 mdev
= devm_kzalloc(dev
, struct_size(mdev
, channels
, nr_chans
),
400 mdev
->reg_base
= devm_platform_ioremap_resource(pdev
, 0);
401 if (IS_ERR(mdev
->reg_base
))
402 return PTR_ERR(mdev
->reg_base
);
404 mdev
->clk
= devm_clk_get(dev
, NULL
);
405 if (IS_ERR(mdev
->clk
)) {
406 dev_err(dev
, "failed to get clock\n");
407 return PTR_ERR(mdev
->clk
);
410 ret
= clk_prepare_enable(mdev
->clk
);
416 dma_cap_set(DMA_PRIVATE
, ddev
->cap_mask
);
417 ddev
->src_addr_widths
= UNIPHIER_MDMAC_SLAVE_BUSWIDTHS
;
418 ddev
->dst_addr_widths
= UNIPHIER_MDMAC_SLAVE_BUSWIDTHS
;
419 ddev
->directions
= BIT(DMA_MEM_TO_DEV
) | BIT(DMA_DEV_TO_MEM
);
420 ddev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_SEGMENT
;
421 ddev
->device_free_chan_resources
= uniphier_mdmac_free_chan_resources
;
422 ddev
->device_prep_slave_sg
= uniphier_mdmac_prep_slave_sg
;
423 ddev
->device_terminate_all
= uniphier_mdmac_terminate_all
;
424 ddev
->device_synchronize
= uniphier_mdmac_synchronize
;
425 ddev
->device_tx_status
= uniphier_mdmac_tx_status
;
426 ddev
->device_issue_pending
= uniphier_mdmac_issue_pending
;
427 INIT_LIST_HEAD(&ddev
->channels
);
429 for (i
= 0; i
< nr_chans
; i
++) {
430 ret
= uniphier_mdmac_chan_init(pdev
, mdev
, i
);
435 ret
= dma_async_device_register(ddev
);
439 ret
= of_dma_controller_register(dev
->of_node
, of_dma_xlate_by_chan_id
,
442 goto unregister_dmac
;
444 platform_set_drvdata(pdev
, mdev
);
449 dma_async_device_unregister(ddev
);
451 clk_disable_unprepare(mdev
->clk
);
456 static int uniphier_mdmac_remove(struct platform_device
*pdev
)
458 struct uniphier_mdmac_device
*mdev
= platform_get_drvdata(pdev
);
459 struct dma_chan
*chan
;
463 * Before reaching here, almost all descriptors have been freed by the
464 * ->device_free_chan_resources() hook. However, each channel might
465 * be still holding one descriptor that was on-flight at that moment.
466 * Terminate it to make sure this hardware is no longer running. Then,
467 * free the channel resources once again to avoid memory leak.
469 list_for_each_entry(chan
, &mdev
->ddev
.channels
, device_node
) {
470 ret
= dmaengine_terminate_sync(chan
);
473 uniphier_mdmac_free_chan_resources(chan
);
476 of_dma_controller_free(pdev
->dev
.of_node
);
477 dma_async_device_unregister(&mdev
->ddev
);
478 clk_disable_unprepare(mdev
->clk
);
483 static const struct of_device_id uniphier_mdmac_match
[] = {
484 { .compatible
= "socionext,uniphier-mio-dmac" },
487 MODULE_DEVICE_TABLE(of
, uniphier_mdmac_match
);
489 static struct platform_driver uniphier_mdmac_driver
= {
490 .probe
= uniphier_mdmac_probe
,
491 .remove
= uniphier_mdmac_remove
,
493 .name
= "uniphier-mio-dmac",
494 .of_match_table
= uniphier_mdmac_match
,
497 module_platform_driver(uniphier_mdmac_driver
);
499 MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
500 MODULE_DESCRIPTION("UniPhier MIO DMAC driver");
501 MODULE_LICENSE("GPL v2");