1 // SPDX-License-Identifier: GPL-2.0
3 * External DMA controller driver for UniPhier SoCs
4 * Copyright 2019 Socionext Inc.
5 * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
8 #include <linux/bitops.h>
9 #include <linux/bitfield.h>
10 #include <linux/iopoll.h>
11 #include <linux/module.h>
13 #include <linux/of_dma.h>
14 #include <linux/platform_device.h>
16 #include "dmaengine.h"
19 #define XDMAC_CH_WIDTH 0x100
21 #define XDMAC_TFA 0x08
22 #define XDMAC_TFA_MCNT_MASK GENMASK(23, 16)
23 #define XDMAC_TFA_MASK GENMASK(5, 0)
24 #define XDMAC_SADM 0x10
25 #define XDMAC_SADM_STW_MASK GENMASK(25, 24)
26 #define XDMAC_SADM_SAM BIT(4)
27 #define XDMAC_SADM_SAM_FIXED XDMAC_SADM_SAM
28 #define XDMAC_SADM_SAM_INC 0
29 #define XDMAC_DADM 0x14
30 #define XDMAC_DADM_DTW_MASK XDMAC_SADM_STW_MASK
31 #define XDMAC_DADM_DAM XDMAC_SADM_SAM
32 #define XDMAC_DADM_DAM_FIXED XDMAC_SADM_SAM_FIXED
33 #define XDMAC_DADM_DAM_INC XDMAC_SADM_SAM_INC
34 #define XDMAC_EXSAD 0x18
35 #define XDMAC_EXDAD 0x1c
36 #define XDMAC_SAD 0x20
37 #define XDMAC_DAD 0x24
38 #define XDMAC_ITS 0x28
39 #define XDMAC_ITS_MASK GENMASK(25, 0)
40 #define XDMAC_TNUM 0x2c
41 #define XDMAC_TNUM_MASK GENMASK(15, 0)
42 #define XDMAC_TSS 0x30
43 #define XDMAC_TSS_REQ BIT(0)
44 #define XDMAC_IEN 0x34
45 #define XDMAC_IEN_ERRIEN BIT(1)
46 #define XDMAC_IEN_ENDIEN BIT(0)
47 #define XDMAC_STAT 0x40
48 #define XDMAC_STAT_TENF BIT(0)
50 #define XDMAC_IR_ERRF BIT(1)
51 #define XDMAC_IR_ENDF BIT(0)
53 #define XDMAC_ID_ERRIDF BIT(1)
54 #define XDMAC_ID_ENDIDF BIT(0)
56 #define XDMAC_MAX_CHANS 16
57 #define XDMAC_INTERVAL_CLKS 20
58 #define XDMAC_MAX_WORDS XDMAC_TNUM_MASK
60 /* cut lower bit for maintain alignment of maximum transfer size */
61 #define XDMAC_MAX_WORD_SIZE (XDMAC_ITS_MASK & ~GENMASK(3, 0))
63 #define UNIPHIER_XDMAC_BUSWIDTHS \
64 (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
65 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
66 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
67 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
69 struct uniphier_xdmac_desc_node
{
76 struct uniphier_xdmac_desc
{
77 struct virt_dma_desc vd
;
80 unsigned int cur_node
;
81 enum dma_transfer_direction dir
;
82 struct uniphier_xdmac_desc_node nodes
[];
85 struct uniphier_xdmac_chan
{
86 struct virt_dma_chan vc
;
87 struct uniphier_xdmac_device
*xdev
;
88 struct uniphier_xdmac_desc
*xd
;
89 void __iomem
*reg_ch_base
;
90 struct dma_slave_config sconfig
;
92 unsigned int req_factor
;
95 struct uniphier_xdmac_device
{
96 struct dma_device ddev
;
97 void __iomem
*reg_base
;
99 struct uniphier_xdmac_chan channels
[];
102 static struct uniphier_xdmac_chan
*
103 to_uniphier_xdmac_chan(struct virt_dma_chan
*vc
)
105 return container_of(vc
, struct uniphier_xdmac_chan
, vc
);
108 static struct uniphier_xdmac_desc
*
109 to_uniphier_xdmac_desc(struct virt_dma_desc
*vd
)
111 return container_of(vd
, struct uniphier_xdmac_desc
, vd
);
114 /* xc->vc.lock must be held by caller */
115 static struct uniphier_xdmac_desc
*
116 uniphier_xdmac_next_desc(struct uniphier_xdmac_chan
*xc
)
118 struct virt_dma_desc
*vd
;
120 vd
= vchan_next_desc(&xc
->vc
);
126 return to_uniphier_xdmac_desc(vd
);
129 /* xc->vc.lock must be held by caller */
130 static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan
*xc
,
131 struct uniphier_xdmac_desc
*xd
)
133 u32 src_mode
, src_addr
, src_width
;
134 u32 dst_mode
, dst_addr
, dst_width
;
136 enum dma_slave_buswidth buswidth
;
138 src_addr
= xd
->nodes
[xd
->cur_node
].src
;
139 dst_addr
= xd
->nodes
[xd
->cur_node
].dst
;
140 its
= xd
->nodes
[xd
->cur_node
].burst_size
;
141 tnum
= xd
->nodes
[xd
->cur_node
].nr_burst
;
144 * The width of MEM side must be 4 or 8 bytes, that does not
145 * affect that of DEV side and transfer size.
147 if (xd
->dir
== DMA_DEV_TO_MEM
) {
148 src_mode
= XDMAC_SADM_SAM_FIXED
;
149 buswidth
= xc
->sconfig
.src_addr_width
;
151 src_mode
= XDMAC_SADM_SAM_INC
;
152 buswidth
= DMA_SLAVE_BUSWIDTH_8_BYTES
;
154 src_width
= FIELD_PREP(XDMAC_SADM_STW_MASK
, __ffs(buswidth
));
156 if (xd
->dir
== DMA_MEM_TO_DEV
) {
157 dst_mode
= XDMAC_DADM_DAM_FIXED
;
158 buswidth
= xc
->sconfig
.dst_addr_width
;
160 dst_mode
= XDMAC_DADM_DAM_INC
;
161 buswidth
= DMA_SLAVE_BUSWIDTH_8_BYTES
;
163 dst_width
= FIELD_PREP(XDMAC_DADM_DTW_MASK
, __ffs(buswidth
));
165 /* setup transfer factor */
166 val
= FIELD_PREP(XDMAC_TFA_MCNT_MASK
, XDMAC_INTERVAL_CLKS
);
167 val
|= FIELD_PREP(XDMAC_TFA_MASK
, xc
->req_factor
);
168 writel(val
, xc
->reg_ch_base
+ XDMAC_TFA
);
170 /* setup the channel */
171 writel(lower_32_bits(src_addr
), xc
->reg_ch_base
+ XDMAC_SAD
);
172 writel(upper_32_bits(src_addr
), xc
->reg_ch_base
+ XDMAC_EXSAD
);
174 writel(lower_32_bits(dst_addr
), xc
->reg_ch_base
+ XDMAC_DAD
);
175 writel(upper_32_bits(dst_addr
), xc
->reg_ch_base
+ XDMAC_EXDAD
);
177 src_mode
|= src_width
;
178 dst_mode
|= dst_width
;
179 writel(src_mode
, xc
->reg_ch_base
+ XDMAC_SADM
);
180 writel(dst_mode
, xc
->reg_ch_base
+ XDMAC_DADM
);
182 writel(its
, xc
->reg_ch_base
+ XDMAC_ITS
);
183 writel(tnum
, xc
->reg_ch_base
+ XDMAC_TNUM
);
185 /* enable interrupt */
186 writel(XDMAC_IEN_ENDIEN
| XDMAC_IEN_ERRIEN
,
187 xc
->reg_ch_base
+ XDMAC_IEN
);
190 val
= readl(xc
->reg_ch_base
+ XDMAC_TSS
);
191 val
|= XDMAC_TSS_REQ
;
192 writel(val
, xc
->reg_ch_base
+ XDMAC_TSS
);
195 /* xc->vc.lock must be held by caller */
196 static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan
*xc
)
200 /* disable interrupt */
201 val
= readl(xc
->reg_ch_base
+ XDMAC_IEN
);
202 val
&= ~(XDMAC_IEN_ENDIEN
| XDMAC_IEN_ERRIEN
);
203 writel(val
, xc
->reg_ch_base
+ XDMAC_IEN
);
206 val
= readl(xc
->reg_ch_base
+ XDMAC_TSS
);
207 val
&= ~XDMAC_TSS_REQ
;
208 writel(0, xc
->reg_ch_base
+ XDMAC_TSS
);
210 /* wait until transfer is stopped */
211 return readl_poll_timeout(xc
->reg_ch_base
+ XDMAC_STAT
, val
,
212 !(val
& XDMAC_STAT_TENF
), 100, 1000);
215 /* xc->vc.lock must be held by caller */
216 static void uniphier_xdmac_start(struct uniphier_xdmac_chan
*xc
)
218 struct uniphier_xdmac_desc
*xd
;
220 xd
= uniphier_xdmac_next_desc(xc
);
222 uniphier_xdmac_chan_start(xc
, xd
);
224 /* set desc to chan regardless of xd is null */
228 static void uniphier_xdmac_chan_irq(struct uniphier_xdmac_chan
*xc
)
233 spin_lock(&xc
->vc
.lock
);
235 stat
= readl(xc
->reg_ch_base
+ XDMAC_ID
);
237 if (stat
& XDMAC_ID_ERRIDF
) {
238 ret
= uniphier_xdmac_chan_stop(xc
);
240 dev_err(xc
->xdev
->ddev
.dev
,
241 "DMA transfer error with aborting issue\n");
243 dev_err(xc
->xdev
->ddev
.dev
,
244 "DMA transfer error\n");
246 } else if ((stat
& XDMAC_ID_ENDIDF
) && xc
->xd
) {
248 if (xc
->xd
->cur_node
>= xc
->xd
->nr_node
) {
249 vchan_cookie_complete(&xc
->xd
->vd
);
250 uniphier_xdmac_start(xc
);
252 uniphier_xdmac_chan_start(xc
, xc
->xd
);
256 /* write bits to clear */
257 writel(stat
, xc
->reg_ch_base
+ XDMAC_IR
);
259 spin_unlock(&xc
->vc
.lock
);
262 static irqreturn_t
uniphier_xdmac_irq_handler(int irq
, void *dev_id
)
264 struct uniphier_xdmac_device
*xdev
= dev_id
;
267 for (i
= 0; i
< xdev
->nr_chans
; i
++)
268 uniphier_xdmac_chan_irq(&xdev
->channels
[i
]);
273 static void uniphier_xdmac_free_chan_resources(struct dma_chan
*chan
)
275 vchan_free_chan_resources(to_virt_chan(chan
));
278 static struct dma_async_tx_descriptor
*
279 uniphier_xdmac_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dst
,
280 dma_addr_t src
, size_t len
, unsigned long flags
)
282 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
283 struct uniphier_xdmac_desc
*xd
;
285 size_t burst_size
, tlen
;
288 if (len
> XDMAC_MAX_WORD_SIZE
* XDMAC_MAX_WORDS
)
291 nr
= 1 + len
/ XDMAC_MAX_WORD_SIZE
;
293 xd
= kzalloc(struct_size(xd
, nodes
, nr
), GFP_NOWAIT
);
297 for (i
= 0; i
< nr
; i
++) {
298 burst_size
= min_t(size_t, len
, XDMAC_MAX_WORD_SIZE
);
299 xd
->nodes
[i
].src
= src
;
300 xd
->nodes
[i
].dst
= dst
;
301 xd
->nodes
[i
].burst_size
= burst_size
;
302 xd
->nodes
[i
].nr_burst
= len
/ burst_size
;
303 tlen
= rounddown(len
, burst_size
);
309 xd
->dir
= DMA_MEM_TO_MEM
;
313 return vchan_tx_prep(vc
, &xd
->vd
, flags
);
316 static struct dma_async_tx_descriptor
*
317 uniphier_xdmac_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
319 enum dma_transfer_direction direction
,
320 unsigned long flags
, void *context
)
322 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
323 struct uniphier_xdmac_chan
*xc
= to_uniphier_xdmac_chan(vc
);
324 struct uniphier_xdmac_desc
*xd
;
325 struct scatterlist
*sg
;
326 enum dma_slave_buswidth buswidth
;
330 if (!is_slave_direction(direction
))
333 if (direction
== DMA_DEV_TO_MEM
) {
334 buswidth
= xc
->sconfig
.src_addr_width
;
335 maxburst
= xc
->sconfig
.src_maxburst
;
337 buswidth
= xc
->sconfig
.dst_addr_width
;
338 maxburst
= xc
->sconfig
.dst_maxburst
;
343 if (maxburst
> xc
->xdev
->ddev
.max_burst
) {
344 dev_err(xc
->xdev
->ddev
.dev
,
345 "Exceed maximum number of burst words\n");
349 xd
= kzalloc(struct_size(xd
, nodes
, sg_len
), GFP_NOWAIT
);
353 for_each_sg(sgl
, sg
, sg_len
, i
) {
354 xd
->nodes
[i
].src
= (direction
== DMA_DEV_TO_MEM
)
355 ? xc
->sconfig
.src_addr
: sg_dma_address(sg
);
356 xd
->nodes
[i
].dst
= (direction
== DMA_MEM_TO_DEV
)
357 ? xc
->sconfig
.dst_addr
: sg_dma_address(sg
);
358 xd
->nodes
[i
].burst_size
= maxburst
* buswidth
;
359 xd
->nodes
[i
].nr_burst
=
360 sg_dma_len(sg
) / xd
->nodes
[i
].burst_size
;
363 * Currently transfer that size doesn't align the unit size
364 * (the number of burst words * bus-width) is not allowed,
365 * because the driver does not support the way to transfer
366 * residue size. As a matter of fact, in order to transfer
367 * arbitrary size, 'src_maxburst' or 'dst_maxburst' of
368 * dma_slave_config must be 1.
370 if (sg_dma_len(sg
) % xd
->nodes
[i
].burst_size
) {
371 dev_err(xc
->xdev
->ddev
.dev
,
372 "Unaligned transfer size: %d", sg_dma_len(sg
));
377 if (xd
->nodes
[i
].nr_burst
> XDMAC_MAX_WORDS
) {
378 dev_err(xc
->xdev
->ddev
.dev
,
379 "Exceed maximum transfer size");
386 xd
->nr_node
= sg_len
;
389 return vchan_tx_prep(vc
, &xd
->vd
, flags
);
392 static int uniphier_xdmac_slave_config(struct dma_chan
*chan
,
393 struct dma_slave_config
*config
)
395 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
396 struct uniphier_xdmac_chan
*xc
= to_uniphier_xdmac_chan(vc
);
398 memcpy(&xc
->sconfig
, config
, sizeof(*config
));
403 static int uniphier_xdmac_terminate_all(struct dma_chan
*chan
)
405 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
406 struct uniphier_xdmac_chan
*xc
= to_uniphier_xdmac_chan(vc
);
411 spin_lock_irqsave(&vc
->lock
, flags
);
414 vchan_terminate_vdesc(&xc
->xd
->vd
);
416 ret
= uniphier_xdmac_chan_stop(xc
);
419 vchan_get_all_descriptors(vc
, &head
);
421 spin_unlock_irqrestore(&vc
->lock
, flags
);
423 vchan_dma_desc_free_list(vc
, &head
);
428 static void uniphier_xdmac_synchronize(struct dma_chan
*chan
)
430 vchan_synchronize(to_virt_chan(chan
));
433 static void uniphier_xdmac_issue_pending(struct dma_chan
*chan
)
435 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
436 struct uniphier_xdmac_chan
*xc
= to_uniphier_xdmac_chan(vc
);
439 spin_lock_irqsave(&vc
->lock
, flags
);
441 if (vchan_issue_pending(vc
) && !xc
->xd
)
442 uniphier_xdmac_start(xc
);
444 spin_unlock_irqrestore(&vc
->lock
, flags
);
447 static void uniphier_xdmac_desc_free(struct virt_dma_desc
*vd
)
449 kfree(to_uniphier_xdmac_desc(vd
));
452 static void uniphier_xdmac_chan_init(struct uniphier_xdmac_device
*xdev
,
455 struct uniphier_xdmac_chan
*xc
= &xdev
->channels
[ch
];
458 xc
->reg_ch_base
= xdev
->reg_base
+ XDMAC_CH_WIDTH
* ch
;
459 xc
->vc
.desc_free
= uniphier_xdmac_desc_free
;
461 vchan_init(&xc
->vc
, &xdev
->ddev
);
464 static struct dma_chan
*of_dma_uniphier_xlate(struct of_phandle_args
*dma_spec
,
465 struct of_dma
*ofdma
)
467 struct uniphier_xdmac_device
*xdev
= ofdma
->of_dma_data
;
468 int chan_id
= dma_spec
->args
[0];
470 if (chan_id
>= xdev
->nr_chans
)
473 xdev
->channels
[chan_id
].id
= chan_id
;
474 xdev
->channels
[chan_id
].req_factor
= dma_spec
->args
[1];
476 return dma_get_slave_channel(&xdev
->channels
[chan_id
].vc
.chan
);
479 static int uniphier_xdmac_probe(struct platform_device
*pdev
)
481 struct uniphier_xdmac_device
*xdev
;
482 struct device
*dev
= &pdev
->dev
;
483 struct dma_device
*ddev
;
488 if (of_property_read_u32(dev
->of_node
, "dma-channels", &nr_chans
))
490 if (nr_chans
> XDMAC_MAX_CHANS
)
491 nr_chans
= XDMAC_MAX_CHANS
;
493 xdev
= devm_kzalloc(dev
, struct_size(xdev
, channels
, nr_chans
),
498 xdev
->nr_chans
= nr_chans
;
499 xdev
->reg_base
= devm_platform_ioremap_resource(pdev
, 0);
500 if (IS_ERR(xdev
->reg_base
))
501 return PTR_ERR(xdev
->reg_base
);
505 dma_cap_zero(ddev
->cap_mask
);
506 dma_cap_set(DMA_MEMCPY
, ddev
->cap_mask
);
507 dma_cap_set(DMA_SLAVE
, ddev
->cap_mask
);
508 ddev
->src_addr_widths
= UNIPHIER_XDMAC_BUSWIDTHS
;
509 ddev
->dst_addr_widths
= UNIPHIER_XDMAC_BUSWIDTHS
;
510 ddev
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
) |
512 ddev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
513 ddev
->max_burst
= XDMAC_MAX_WORDS
;
514 ddev
->device_free_chan_resources
= uniphier_xdmac_free_chan_resources
;
515 ddev
->device_prep_dma_memcpy
= uniphier_xdmac_prep_dma_memcpy
;
516 ddev
->device_prep_slave_sg
= uniphier_xdmac_prep_slave_sg
;
517 ddev
->device_config
= uniphier_xdmac_slave_config
;
518 ddev
->device_terminate_all
= uniphier_xdmac_terminate_all
;
519 ddev
->device_synchronize
= uniphier_xdmac_synchronize
;
520 ddev
->device_tx_status
= dma_cookie_status
;
521 ddev
->device_issue_pending
= uniphier_xdmac_issue_pending
;
522 INIT_LIST_HEAD(&ddev
->channels
);
524 for (i
= 0; i
< nr_chans
; i
++)
525 uniphier_xdmac_chan_init(xdev
, i
);
527 irq
= platform_get_irq(pdev
, 0);
531 ret
= devm_request_irq(dev
, irq
, uniphier_xdmac_irq_handler
,
532 IRQF_SHARED
, "xdmac", xdev
);
534 dev_err(dev
, "Failed to request IRQ\n");
538 ret
= dma_async_device_register(ddev
);
540 dev_err(dev
, "Failed to register XDMA device\n");
544 ret
= of_dma_controller_register(dev
->of_node
,
545 of_dma_uniphier_xlate
, xdev
);
547 dev_err(dev
, "Failed to register XDMA controller\n");
548 goto out_unregister_dmac
;
551 platform_set_drvdata(pdev
, xdev
);
553 dev_info(&pdev
->dev
, "UniPhier XDMAC driver (%d channels)\n",
559 dma_async_device_unregister(ddev
);
564 static int uniphier_xdmac_remove(struct platform_device
*pdev
)
566 struct uniphier_xdmac_device
*xdev
= platform_get_drvdata(pdev
);
567 struct dma_device
*ddev
= &xdev
->ddev
;
568 struct dma_chan
*chan
;
572 * Before reaching here, almost all descriptors have been freed by the
573 * ->device_free_chan_resources() hook. However, each channel might
574 * be still holding one descriptor that was on-flight at that moment.
575 * Terminate it to make sure this hardware is no longer running. Then,
576 * free the channel resources once again to avoid memory leak.
578 list_for_each_entry(chan
, &ddev
->channels
, device_node
) {
579 ret
= dmaengine_terminate_sync(chan
);
582 uniphier_xdmac_free_chan_resources(chan
);
585 of_dma_controller_free(pdev
->dev
.of_node
);
586 dma_async_device_unregister(ddev
);
591 static const struct of_device_id uniphier_xdmac_match
[] = {
592 { .compatible
= "socionext,uniphier-xdmac" },
595 MODULE_DEVICE_TABLE(of
, uniphier_xdmac_match
);
597 static struct platform_driver uniphier_xdmac_driver
= {
598 .probe
= uniphier_xdmac_probe
,
599 .remove
= uniphier_xdmac_remove
,
601 .name
= "uniphier-xdmac",
602 .of_match_table
= uniphier_xdmac_match
,
605 module_platform_driver(uniphier_xdmac_driver
);
607 MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
608 MODULE_DESCRIPTION("UniPhier external DMA controller driver");
609 MODULE_LICENSE("GPL v2");