1 // SPDX-License-Identifier: GPL-2.0
3 * External DMA controller driver for UniPhier SoCs
4 * Copyright 2019 Socionext Inc.
5 * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
8 #include <linux/bitops.h>
9 #include <linux/bitfield.h>
10 #include <linux/iopoll.h>
11 #include <linux/module.h>
13 #include <linux/of_dma.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
17 #include "dmaengine.h"
20 #define XDMAC_CH_WIDTH 0x100
22 #define XDMAC_TFA 0x08
23 #define XDMAC_TFA_MCNT_MASK GENMASK(23, 16)
24 #define XDMAC_TFA_MASK GENMASK(5, 0)
25 #define XDMAC_SADM 0x10
26 #define XDMAC_SADM_STW_MASK GENMASK(25, 24)
27 #define XDMAC_SADM_SAM BIT(4)
28 #define XDMAC_SADM_SAM_FIXED XDMAC_SADM_SAM
29 #define XDMAC_SADM_SAM_INC 0
30 #define XDMAC_DADM 0x14
31 #define XDMAC_DADM_DTW_MASK XDMAC_SADM_STW_MASK
32 #define XDMAC_DADM_DAM XDMAC_SADM_SAM
33 #define XDMAC_DADM_DAM_FIXED XDMAC_SADM_SAM_FIXED
34 #define XDMAC_DADM_DAM_INC XDMAC_SADM_SAM_INC
35 #define XDMAC_EXSAD 0x18
36 #define XDMAC_EXDAD 0x1c
37 #define XDMAC_SAD 0x20
38 #define XDMAC_DAD 0x24
39 #define XDMAC_ITS 0x28
40 #define XDMAC_ITS_MASK GENMASK(25, 0)
41 #define XDMAC_TNUM 0x2c
42 #define XDMAC_TNUM_MASK GENMASK(15, 0)
43 #define XDMAC_TSS 0x30
44 #define XDMAC_TSS_REQ BIT(0)
45 #define XDMAC_IEN 0x34
46 #define XDMAC_IEN_ERRIEN BIT(1)
47 #define XDMAC_IEN_ENDIEN BIT(0)
48 #define XDMAC_STAT 0x40
49 #define XDMAC_STAT_TENF BIT(0)
51 #define XDMAC_IR_ERRF BIT(1)
52 #define XDMAC_IR_ENDF BIT(0)
54 #define XDMAC_ID_ERRIDF BIT(1)
55 #define XDMAC_ID_ENDIDF BIT(0)
57 #define XDMAC_MAX_CHANS 16
58 #define XDMAC_INTERVAL_CLKS 20
59 #define XDMAC_MAX_WORDS XDMAC_TNUM_MASK
61 /* cut lower bit for maintain alignment of maximum transfer size */
62 #define XDMAC_MAX_WORD_SIZE (XDMAC_ITS_MASK & ~GENMASK(3, 0))
64 #define UNIPHIER_XDMAC_BUSWIDTHS \
65 (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
66 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
67 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
68 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
70 struct uniphier_xdmac_desc_node
{
77 struct uniphier_xdmac_desc
{
78 struct virt_dma_desc vd
;
81 unsigned int cur_node
;
82 enum dma_transfer_direction dir
;
83 struct uniphier_xdmac_desc_node nodes
[] __counted_by(nr_node
);
86 struct uniphier_xdmac_chan
{
87 struct virt_dma_chan vc
;
88 struct uniphier_xdmac_device
*xdev
;
89 struct uniphier_xdmac_desc
*xd
;
90 void __iomem
*reg_ch_base
;
91 struct dma_slave_config sconfig
;
93 unsigned int req_factor
;
96 struct uniphier_xdmac_device
{
97 struct dma_device ddev
;
98 void __iomem
*reg_base
;
100 struct uniphier_xdmac_chan channels
[] __counted_by(nr_chans
);
103 static struct uniphier_xdmac_chan
*
104 to_uniphier_xdmac_chan(struct virt_dma_chan
*vc
)
106 return container_of(vc
, struct uniphier_xdmac_chan
, vc
);
109 static struct uniphier_xdmac_desc
*
110 to_uniphier_xdmac_desc(struct virt_dma_desc
*vd
)
112 return container_of(vd
, struct uniphier_xdmac_desc
, vd
);
115 /* xc->vc.lock must be held by caller */
116 static struct uniphier_xdmac_desc
*
117 uniphier_xdmac_next_desc(struct uniphier_xdmac_chan
*xc
)
119 struct virt_dma_desc
*vd
;
121 vd
= vchan_next_desc(&xc
->vc
);
127 return to_uniphier_xdmac_desc(vd
);
130 /* xc->vc.lock must be held by caller */
131 static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan
*xc
,
132 struct uniphier_xdmac_desc
*xd
)
134 u32 src_mode
, src_width
;
135 u32 dst_mode
, dst_width
;
136 dma_addr_t src_addr
, dst_addr
;
138 enum dma_slave_buswidth buswidth
;
140 src_addr
= xd
->nodes
[xd
->cur_node
].src
;
141 dst_addr
= xd
->nodes
[xd
->cur_node
].dst
;
142 its
= xd
->nodes
[xd
->cur_node
].burst_size
;
143 tnum
= xd
->nodes
[xd
->cur_node
].nr_burst
;
146 * The width of MEM side must be 4 or 8 bytes, that does not
147 * affect that of DEV side and transfer size.
149 if (xd
->dir
== DMA_DEV_TO_MEM
) {
150 src_mode
= XDMAC_SADM_SAM_FIXED
;
151 buswidth
= xc
->sconfig
.src_addr_width
;
153 src_mode
= XDMAC_SADM_SAM_INC
;
154 buswidth
= DMA_SLAVE_BUSWIDTH_8_BYTES
;
156 src_width
= FIELD_PREP(XDMAC_SADM_STW_MASK
, __ffs(buswidth
));
158 if (xd
->dir
== DMA_MEM_TO_DEV
) {
159 dst_mode
= XDMAC_DADM_DAM_FIXED
;
160 buswidth
= xc
->sconfig
.dst_addr_width
;
162 dst_mode
= XDMAC_DADM_DAM_INC
;
163 buswidth
= DMA_SLAVE_BUSWIDTH_8_BYTES
;
165 dst_width
= FIELD_PREP(XDMAC_DADM_DTW_MASK
, __ffs(buswidth
));
167 /* setup transfer factor */
168 val
= FIELD_PREP(XDMAC_TFA_MCNT_MASK
, XDMAC_INTERVAL_CLKS
);
169 val
|= FIELD_PREP(XDMAC_TFA_MASK
, xc
->req_factor
);
170 writel(val
, xc
->reg_ch_base
+ XDMAC_TFA
);
172 /* setup the channel */
173 writel(lower_32_bits(src_addr
), xc
->reg_ch_base
+ XDMAC_SAD
);
174 writel(upper_32_bits(src_addr
), xc
->reg_ch_base
+ XDMAC_EXSAD
);
176 writel(lower_32_bits(dst_addr
), xc
->reg_ch_base
+ XDMAC_DAD
);
177 writel(upper_32_bits(dst_addr
), xc
->reg_ch_base
+ XDMAC_EXDAD
);
179 src_mode
|= src_width
;
180 dst_mode
|= dst_width
;
181 writel(src_mode
, xc
->reg_ch_base
+ XDMAC_SADM
);
182 writel(dst_mode
, xc
->reg_ch_base
+ XDMAC_DADM
);
184 writel(its
, xc
->reg_ch_base
+ XDMAC_ITS
);
185 writel(tnum
, xc
->reg_ch_base
+ XDMAC_TNUM
);
187 /* enable interrupt */
188 writel(XDMAC_IEN_ENDIEN
| XDMAC_IEN_ERRIEN
,
189 xc
->reg_ch_base
+ XDMAC_IEN
);
192 val
= readl(xc
->reg_ch_base
+ XDMAC_TSS
);
193 val
|= XDMAC_TSS_REQ
;
194 writel(val
, xc
->reg_ch_base
+ XDMAC_TSS
);
197 /* xc->vc.lock must be held by caller */
198 static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan
*xc
)
202 /* disable interrupt */
203 val
= readl(xc
->reg_ch_base
+ XDMAC_IEN
);
204 val
&= ~(XDMAC_IEN_ENDIEN
| XDMAC_IEN_ERRIEN
);
205 writel(val
, xc
->reg_ch_base
+ XDMAC_IEN
);
208 val
= readl(xc
->reg_ch_base
+ XDMAC_TSS
);
209 val
&= ~XDMAC_TSS_REQ
;
210 writel(0, xc
->reg_ch_base
+ XDMAC_TSS
);
212 /* wait until transfer is stopped */
213 return readl_poll_timeout_atomic(xc
->reg_ch_base
+ XDMAC_STAT
, val
,
214 !(val
& XDMAC_STAT_TENF
), 100, 1000);
217 /* xc->vc.lock must be held by caller */
218 static void uniphier_xdmac_start(struct uniphier_xdmac_chan
*xc
)
220 struct uniphier_xdmac_desc
*xd
;
222 xd
= uniphier_xdmac_next_desc(xc
);
224 uniphier_xdmac_chan_start(xc
, xd
);
226 /* set desc to chan regardless of xd is null */
230 static void uniphier_xdmac_chan_irq(struct uniphier_xdmac_chan
*xc
)
235 spin_lock(&xc
->vc
.lock
);
237 stat
= readl(xc
->reg_ch_base
+ XDMAC_ID
);
239 if (stat
& XDMAC_ID_ERRIDF
) {
240 ret
= uniphier_xdmac_chan_stop(xc
);
242 dev_err(xc
->xdev
->ddev
.dev
,
243 "DMA transfer error with aborting issue\n");
245 dev_err(xc
->xdev
->ddev
.dev
,
246 "DMA transfer error\n");
248 } else if ((stat
& XDMAC_ID_ENDIDF
) && xc
->xd
) {
250 if (xc
->xd
->cur_node
>= xc
->xd
->nr_node
) {
251 vchan_cookie_complete(&xc
->xd
->vd
);
252 uniphier_xdmac_start(xc
);
254 uniphier_xdmac_chan_start(xc
, xc
->xd
);
258 /* write bits to clear */
259 writel(stat
, xc
->reg_ch_base
+ XDMAC_IR
);
261 spin_unlock(&xc
->vc
.lock
);
264 static irqreturn_t
uniphier_xdmac_irq_handler(int irq
, void *dev_id
)
266 struct uniphier_xdmac_device
*xdev
= dev_id
;
269 for (i
= 0; i
< xdev
->nr_chans
; i
++)
270 uniphier_xdmac_chan_irq(&xdev
->channels
[i
]);
275 static void uniphier_xdmac_free_chan_resources(struct dma_chan
*chan
)
277 vchan_free_chan_resources(to_virt_chan(chan
));
280 static struct dma_async_tx_descriptor
*
281 uniphier_xdmac_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dst
,
282 dma_addr_t src
, size_t len
, unsigned long flags
)
284 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
285 struct uniphier_xdmac_desc
*xd
;
287 size_t burst_size
, tlen
;
290 if (len
> XDMAC_MAX_WORD_SIZE
* XDMAC_MAX_WORDS
)
293 nr
= 1 + len
/ XDMAC_MAX_WORD_SIZE
;
295 xd
= kzalloc(struct_size(xd
, nodes
, nr
), GFP_NOWAIT
);
300 for (i
= 0; i
< nr
; i
++) {
301 burst_size
= min_t(size_t, len
, XDMAC_MAX_WORD_SIZE
);
302 xd
->nodes
[i
].src
= src
;
303 xd
->nodes
[i
].dst
= dst
;
304 xd
->nodes
[i
].burst_size
= burst_size
;
305 xd
->nodes
[i
].nr_burst
= len
/ burst_size
;
306 tlen
= rounddown(len
, burst_size
);
312 xd
->dir
= DMA_MEM_TO_MEM
;
315 return vchan_tx_prep(vc
, &xd
->vd
, flags
);
318 static struct dma_async_tx_descriptor
*
319 uniphier_xdmac_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
321 enum dma_transfer_direction direction
,
322 unsigned long flags
, void *context
)
324 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
325 struct uniphier_xdmac_chan
*xc
= to_uniphier_xdmac_chan(vc
);
326 struct uniphier_xdmac_desc
*xd
;
327 struct scatterlist
*sg
;
328 enum dma_slave_buswidth buswidth
;
332 if (!is_slave_direction(direction
))
335 if (direction
== DMA_DEV_TO_MEM
) {
336 buswidth
= xc
->sconfig
.src_addr_width
;
337 maxburst
= xc
->sconfig
.src_maxburst
;
339 buswidth
= xc
->sconfig
.dst_addr_width
;
340 maxburst
= xc
->sconfig
.dst_maxburst
;
345 if (maxburst
> xc
->xdev
->ddev
.max_burst
) {
346 dev_err(xc
->xdev
->ddev
.dev
,
347 "Exceed maximum number of burst words\n");
351 xd
= kzalloc(struct_size(xd
, nodes
, sg_len
), GFP_NOWAIT
);
354 xd
->nr_node
= sg_len
;
356 for_each_sg(sgl
, sg
, sg_len
, i
) {
357 xd
->nodes
[i
].src
= (direction
== DMA_DEV_TO_MEM
)
358 ? xc
->sconfig
.src_addr
: sg_dma_address(sg
);
359 xd
->nodes
[i
].dst
= (direction
== DMA_MEM_TO_DEV
)
360 ? xc
->sconfig
.dst_addr
: sg_dma_address(sg
);
361 xd
->nodes
[i
].burst_size
= maxburst
* buswidth
;
362 xd
->nodes
[i
].nr_burst
=
363 sg_dma_len(sg
) / xd
->nodes
[i
].burst_size
;
366 * Currently transfer that size doesn't align the unit size
367 * (the number of burst words * bus-width) is not allowed,
368 * because the driver does not support the way to transfer
369 * residue size. As a matter of fact, in order to transfer
370 * arbitrary size, 'src_maxburst' or 'dst_maxburst' of
371 * dma_slave_config must be 1.
373 if (sg_dma_len(sg
) % xd
->nodes
[i
].burst_size
) {
374 dev_err(xc
->xdev
->ddev
.dev
,
375 "Unaligned transfer size: %d", sg_dma_len(sg
));
380 if (xd
->nodes
[i
].nr_burst
> XDMAC_MAX_WORDS
) {
381 dev_err(xc
->xdev
->ddev
.dev
,
382 "Exceed maximum transfer size");
391 return vchan_tx_prep(vc
, &xd
->vd
, flags
);
394 static int uniphier_xdmac_slave_config(struct dma_chan
*chan
,
395 struct dma_slave_config
*config
)
397 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
398 struct uniphier_xdmac_chan
*xc
= to_uniphier_xdmac_chan(vc
);
400 memcpy(&xc
->sconfig
, config
, sizeof(*config
));
405 static int uniphier_xdmac_terminate_all(struct dma_chan
*chan
)
407 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
408 struct uniphier_xdmac_chan
*xc
= to_uniphier_xdmac_chan(vc
);
413 spin_lock_irqsave(&vc
->lock
, flags
);
416 vchan_terminate_vdesc(&xc
->xd
->vd
);
418 ret
= uniphier_xdmac_chan_stop(xc
);
421 vchan_get_all_descriptors(vc
, &head
);
423 spin_unlock_irqrestore(&vc
->lock
, flags
);
425 vchan_dma_desc_free_list(vc
, &head
);
430 static void uniphier_xdmac_synchronize(struct dma_chan
*chan
)
432 vchan_synchronize(to_virt_chan(chan
));
435 static void uniphier_xdmac_issue_pending(struct dma_chan
*chan
)
437 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
438 struct uniphier_xdmac_chan
*xc
= to_uniphier_xdmac_chan(vc
);
441 spin_lock_irqsave(&vc
->lock
, flags
);
443 if (vchan_issue_pending(vc
) && !xc
->xd
)
444 uniphier_xdmac_start(xc
);
446 spin_unlock_irqrestore(&vc
->lock
, flags
);
449 static void uniphier_xdmac_desc_free(struct virt_dma_desc
*vd
)
451 kfree(to_uniphier_xdmac_desc(vd
));
454 static void uniphier_xdmac_chan_init(struct uniphier_xdmac_device
*xdev
,
457 struct uniphier_xdmac_chan
*xc
= &xdev
->channels
[ch
];
460 xc
->reg_ch_base
= xdev
->reg_base
+ XDMAC_CH_WIDTH
* ch
;
461 xc
->vc
.desc_free
= uniphier_xdmac_desc_free
;
463 vchan_init(&xc
->vc
, &xdev
->ddev
);
466 static struct dma_chan
*of_dma_uniphier_xlate(struct of_phandle_args
*dma_spec
,
467 struct of_dma
*ofdma
)
469 struct uniphier_xdmac_device
*xdev
= ofdma
->of_dma_data
;
470 int chan_id
= dma_spec
->args
[0];
472 if (chan_id
>= xdev
->nr_chans
)
475 xdev
->channels
[chan_id
].id
= chan_id
;
476 xdev
->channels
[chan_id
].req_factor
= dma_spec
->args
[1];
478 return dma_get_slave_channel(&xdev
->channels
[chan_id
].vc
.chan
);
481 static int uniphier_xdmac_probe(struct platform_device
*pdev
)
483 struct uniphier_xdmac_device
*xdev
;
484 struct device
*dev
= &pdev
->dev
;
485 struct dma_device
*ddev
;
490 if (of_property_read_u32(dev
->of_node
, "dma-channels", &nr_chans
))
492 if (nr_chans
> XDMAC_MAX_CHANS
)
493 nr_chans
= XDMAC_MAX_CHANS
;
495 xdev
= devm_kzalloc(dev
, struct_size(xdev
, channels
, nr_chans
),
500 xdev
->nr_chans
= nr_chans
;
501 xdev
->reg_base
= devm_platform_ioremap_resource(pdev
, 0);
502 if (IS_ERR(xdev
->reg_base
))
503 return PTR_ERR(xdev
->reg_base
);
507 dma_cap_zero(ddev
->cap_mask
);
508 dma_cap_set(DMA_MEMCPY
, ddev
->cap_mask
);
509 dma_cap_set(DMA_SLAVE
, ddev
->cap_mask
);
510 ddev
->src_addr_widths
= UNIPHIER_XDMAC_BUSWIDTHS
;
511 ddev
->dst_addr_widths
= UNIPHIER_XDMAC_BUSWIDTHS
;
512 ddev
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
) |
514 ddev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
515 ddev
->max_burst
= XDMAC_MAX_WORDS
;
516 ddev
->device_free_chan_resources
= uniphier_xdmac_free_chan_resources
;
517 ddev
->device_prep_dma_memcpy
= uniphier_xdmac_prep_dma_memcpy
;
518 ddev
->device_prep_slave_sg
= uniphier_xdmac_prep_slave_sg
;
519 ddev
->device_config
= uniphier_xdmac_slave_config
;
520 ddev
->device_terminate_all
= uniphier_xdmac_terminate_all
;
521 ddev
->device_synchronize
= uniphier_xdmac_synchronize
;
522 ddev
->device_tx_status
= dma_cookie_status
;
523 ddev
->device_issue_pending
= uniphier_xdmac_issue_pending
;
524 INIT_LIST_HEAD(&ddev
->channels
);
526 for (i
= 0; i
< nr_chans
; i
++)
527 uniphier_xdmac_chan_init(xdev
, i
);
529 irq
= platform_get_irq(pdev
, 0);
533 ret
= devm_request_irq(dev
, irq
, uniphier_xdmac_irq_handler
,
534 IRQF_SHARED
, "xdmac", xdev
);
536 dev_err(dev
, "Failed to request IRQ\n");
540 ret
= dma_async_device_register(ddev
);
542 dev_err(dev
, "Failed to register XDMA device\n");
546 ret
= of_dma_controller_register(dev
->of_node
,
547 of_dma_uniphier_xlate
, xdev
);
549 dev_err(dev
, "Failed to register XDMA controller\n");
550 goto out_unregister_dmac
;
553 platform_set_drvdata(pdev
, xdev
);
555 dev_info(&pdev
->dev
, "UniPhier XDMAC driver (%d channels)\n",
561 dma_async_device_unregister(ddev
);
566 static void uniphier_xdmac_remove(struct platform_device
*pdev
)
568 struct uniphier_xdmac_device
*xdev
= platform_get_drvdata(pdev
);
569 struct dma_device
*ddev
= &xdev
->ddev
;
570 struct dma_chan
*chan
;
574 * Before reaching here, almost all descriptors have been freed by the
575 * ->device_free_chan_resources() hook. However, each channel might
576 * be still holding one descriptor that was on-flight at that moment.
577 * Terminate it to make sure this hardware is no longer running. Then,
578 * free the channel resources once again to avoid memory leak.
580 list_for_each_entry(chan
, &ddev
->channels
, device_node
) {
581 ret
= dmaengine_terminate_sync(chan
);
584 * This results in resource leakage and maybe also
585 * use-after-free errors as e.g. *xdev is kfreed.
587 dev_alert(&pdev
->dev
, "Failed to terminate channel %d (%pe)\n",
588 chan
->chan_id
, ERR_PTR(ret
));
591 uniphier_xdmac_free_chan_resources(chan
);
594 of_dma_controller_free(pdev
->dev
.of_node
);
595 dma_async_device_unregister(ddev
);
598 static const struct of_device_id uniphier_xdmac_match
[] = {
599 { .compatible
= "socionext,uniphier-xdmac" },
602 MODULE_DEVICE_TABLE(of
, uniphier_xdmac_match
);
604 static struct platform_driver uniphier_xdmac_driver
= {
605 .probe
= uniphier_xdmac_probe
,
606 .remove
= uniphier_xdmac_remove
,
608 .name
= "uniphier-xdmac",
609 .of_match_table
= uniphier_xdmac_match
,
612 module_platform_driver(uniphier_xdmac_driver
);
614 MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
615 MODULE_DESCRIPTION("UniPhier external DMA controller driver");
616 MODULE_LICENSE("GPL v2");