1 // SPDX-License-Identifier: GPL-2.0
3 * External DMA controller driver for UniPhier SoCs
4 * Copyright 2019 Socionext Inc.
5 * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
8 #include <linux/bitops.h>
9 #include <linux/bitfield.h>
10 #include <linux/iopoll.h>
11 #include <linux/module.h>
13 #include <linux/of_dma.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
17 #include "dmaengine.h"
20 #define XDMAC_CH_WIDTH 0x100
22 #define XDMAC_TFA 0x08
23 #define XDMAC_TFA_MCNT_MASK GENMASK(23, 16)
24 #define XDMAC_TFA_MASK GENMASK(5, 0)
25 #define XDMAC_SADM 0x10
26 #define XDMAC_SADM_STW_MASK GENMASK(25, 24)
27 #define XDMAC_SADM_SAM BIT(4)
28 #define XDMAC_SADM_SAM_FIXED XDMAC_SADM_SAM
29 #define XDMAC_SADM_SAM_INC 0
30 #define XDMAC_DADM 0x14
31 #define XDMAC_DADM_DTW_MASK XDMAC_SADM_STW_MASK
32 #define XDMAC_DADM_DAM XDMAC_SADM_SAM
33 #define XDMAC_DADM_DAM_FIXED XDMAC_SADM_SAM_FIXED
34 #define XDMAC_DADM_DAM_INC XDMAC_SADM_SAM_INC
35 #define XDMAC_EXSAD 0x18
36 #define XDMAC_EXDAD 0x1c
37 #define XDMAC_SAD 0x20
38 #define XDMAC_DAD 0x24
39 #define XDMAC_ITS 0x28
40 #define XDMAC_ITS_MASK GENMASK(25, 0)
41 #define XDMAC_TNUM 0x2c
42 #define XDMAC_TNUM_MASK GENMASK(15, 0)
43 #define XDMAC_TSS 0x30
44 #define XDMAC_TSS_REQ BIT(0)
45 #define XDMAC_IEN 0x34
46 #define XDMAC_IEN_ERRIEN BIT(1)
47 #define XDMAC_IEN_ENDIEN BIT(0)
48 #define XDMAC_STAT 0x40
49 #define XDMAC_STAT_TENF BIT(0)
51 #define XDMAC_IR_ERRF BIT(1)
52 #define XDMAC_IR_ENDF BIT(0)
54 #define XDMAC_ID_ERRIDF BIT(1)
55 #define XDMAC_ID_ENDIDF BIT(0)
57 #define XDMAC_MAX_CHANS 16
58 #define XDMAC_INTERVAL_CLKS 20
59 #define XDMAC_MAX_WORDS XDMAC_TNUM_MASK
61 /* cut lower bit for maintain alignment of maximum transfer size */
62 #define XDMAC_MAX_WORD_SIZE (XDMAC_ITS_MASK & ~GENMASK(3, 0))
64 #define UNIPHIER_XDMAC_BUSWIDTHS \
65 (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
66 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
67 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
68 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
70 struct uniphier_xdmac_desc_node
{
77 struct uniphier_xdmac_desc
{
78 struct virt_dma_desc vd
;
81 unsigned int cur_node
;
82 enum dma_transfer_direction dir
;
83 struct uniphier_xdmac_desc_node nodes
[];
86 struct uniphier_xdmac_chan
{
87 struct virt_dma_chan vc
;
88 struct uniphier_xdmac_device
*xdev
;
89 struct uniphier_xdmac_desc
*xd
;
90 void __iomem
*reg_ch_base
;
91 struct dma_slave_config sconfig
;
93 unsigned int req_factor
;
96 struct uniphier_xdmac_device
{
97 struct dma_device ddev
;
98 void __iomem
*reg_base
;
100 struct uniphier_xdmac_chan channels
[];
103 static struct uniphier_xdmac_chan
*
104 to_uniphier_xdmac_chan(struct virt_dma_chan
*vc
)
106 return container_of(vc
, struct uniphier_xdmac_chan
, vc
);
109 static struct uniphier_xdmac_desc
*
110 to_uniphier_xdmac_desc(struct virt_dma_desc
*vd
)
112 return container_of(vd
, struct uniphier_xdmac_desc
, vd
);
115 /* xc->vc.lock must be held by caller */
116 static struct uniphier_xdmac_desc
*
117 uniphier_xdmac_next_desc(struct uniphier_xdmac_chan
*xc
)
119 struct virt_dma_desc
*vd
;
121 vd
= vchan_next_desc(&xc
->vc
);
127 return to_uniphier_xdmac_desc(vd
);
130 /* xc->vc.lock must be held by caller */
131 static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan
*xc
,
132 struct uniphier_xdmac_desc
*xd
)
134 u32 src_mode
, src_addr
, src_width
;
135 u32 dst_mode
, dst_addr
, dst_width
;
137 enum dma_slave_buswidth buswidth
;
139 src_addr
= xd
->nodes
[xd
->cur_node
].src
;
140 dst_addr
= xd
->nodes
[xd
->cur_node
].dst
;
141 its
= xd
->nodes
[xd
->cur_node
].burst_size
;
142 tnum
= xd
->nodes
[xd
->cur_node
].nr_burst
;
145 * The width of MEM side must be 4 or 8 bytes, that does not
146 * affect that of DEV side and transfer size.
148 if (xd
->dir
== DMA_DEV_TO_MEM
) {
149 src_mode
= XDMAC_SADM_SAM_FIXED
;
150 buswidth
= xc
->sconfig
.src_addr_width
;
152 src_mode
= XDMAC_SADM_SAM_INC
;
153 buswidth
= DMA_SLAVE_BUSWIDTH_8_BYTES
;
155 src_width
= FIELD_PREP(XDMAC_SADM_STW_MASK
, __ffs(buswidth
));
157 if (xd
->dir
== DMA_MEM_TO_DEV
) {
158 dst_mode
= XDMAC_DADM_DAM_FIXED
;
159 buswidth
= xc
->sconfig
.dst_addr_width
;
161 dst_mode
= XDMAC_DADM_DAM_INC
;
162 buswidth
= DMA_SLAVE_BUSWIDTH_8_BYTES
;
164 dst_width
= FIELD_PREP(XDMAC_DADM_DTW_MASK
, __ffs(buswidth
));
166 /* setup transfer factor */
167 val
= FIELD_PREP(XDMAC_TFA_MCNT_MASK
, XDMAC_INTERVAL_CLKS
);
168 val
|= FIELD_PREP(XDMAC_TFA_MASK
, xc
->req_factor
);
169 writel(val
, xc
->reg_ch_base
+ XDMAC_TFA
);
171 /* setup the channel */
172 writel(lower_32_bits(src_addr
), xc
->reg_ch_base
+ XDMAC_SAD
);
173 writel(upper_32_bits(src_addr
), xc
->reg_ch_base
+ XDMAC_EXSAD
);
175 writel(lower_32_bits(dst_addr
), xc
->reg_ch_base
+ XDMAC_DAD
);
176 writel(upper_32_bits(dst_addr
), xc
->reg_ch_base
+ XDMAC_EXDAD
);
178 src_mode
|= src_width
;
179 dst_mode
|= dst_width
;
180 writel(src_mode
, xc
->reg_ch_base
+ XDMAC_SADM
);
181 writel(dst_mode
, xc
->reg_ch_base
+ XDMAC_DADM
);
183 writel(its
, xc
->reg_ch_base
+ XDMAC_ITS
);
184 writel(tnum
, xc
->reg_ch_base
+ XDMAC_TNUM
);
186 /* enable interrupt */
187 writel(XDMAC_IEN_ENDIEN
| XDMAC_IEN_ERRIEN
,
188 xc
->reg_ch_base
+ XDMAC_IEN
);
191 val
= readl(xc
->reg_ch_base
+ XDMAC_TSS
);
192 val
|= XDMAC_TSS_REQ
;
193 writel(val
, xc
->reg_ch_base
+ XDMAC_TSS
);
196 /* xc->vc.lock must be held by caller */
197 static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan
*xc
)
201 /* disable interrupt */
202 val
= readl(xc
->reg_ch_base
+ XDMAC_IEN
);
203 val
&= ~(XDMAC_IEN_ENDIEN
| XDMAC_IEN_ERRIEN
);
204 writel(val
, xc
->reg_ch_base
+ XDMAC_IEN
);
207 val
= readl(xc
->reg_ch_base
+ XDMAC_TSS
);
208 val
&= ~XDMAC_TSS_REQ
;
209 writel(0, xc
->reg_ch_base
+ XDMAC_TSS
);
211 /* wait until transfer is stopped */
212 return readl_poll_timeout(xc
->reg_ch_base
+ XDMAC_STAT
, val
,
213 !(val
& XDMAC_STAT_TENF
), 100, 1000);
216 /* xc->vc.lock must be held by caller */
217 static void uniphier_xdmac_start(struct uniphier_xdmac_chan
*xc
)
219 struct uniphier_xdmac_desc
*xd
;
221 xd
= uniphier_xdmac_next_desc(xc
);
223 uniphier_xdmac_chan_start(xc
, xd
);
225 /* set desc to chan regardless of xd is null */
229 static void uniphier_xdmac_chan_irq(struct uniphier_xdmac_chan
*xc
)
234 spin_lock(&xc
->vc
.lock
);
236 stat
= readl(xc
->reg_ch_base
+ XDMAC_ID
);
238 if (stat
& XDMAC_ID_ERRIDF
) {
239 ret
= uniphier_xdmac_chan_stop(xc
);
241 dev_err(xc
->xdev
->ddev
.dev
,
242 "DMA transfer error with aborting issue\n");
244 dev_err(xc
->xdev
->ddev
.dev
,
245 "DMA transfer error\n");
247 } else if ((stat
& XDMAC_ID_ENDIDF
) && xc
->xd
) {
249 if (xc
->xd
->cur_node
>= xc
->xd
->nr_node
) {
250 vchan_cookie_complete(&xc
->xd
->vd
);
251 uniphier_xdmac_start(xc
);
253 uniphier_xdmac_chan_start(xc
, xc
->xd
);
257 /* write bits to clear */
258 writel(stat
, xc
->reg_ch_base
+ XDMAC_IR
);
260 spin_unlock(&xc
->vc
.lock
);
263 static irqreturn_t
uniphier_xdmac_irq_handler(int irq
, void *dev_id
)
265 struct uniphier_xdmac_device
*xdev
= dev_id
;
268 for (i
= 0; i
< xdev
->nr_chans
; i
++)
269 uniphier_xdmac_chan_irq(&xdev
->channels
[i
]);
274 static void uniphier_xdmac_free_chan_resources(struct dma_chan
*chan
)
276 vchan_free_chan_resources(to_virt_chan(chan
));
279 static struct dma_async_tx_descriptor
*
280 uniphier_xdmac_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dst
,
281 dma_addr_t src
, size_t len
, unsigned long flags
)
283 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
284 struct uniphier_xdmac_desc
*xd
;
286 size_t burst_size
, tlen
;
289 if (len
> XDMAC_MAX_WORD_SIZE
* XDMAC_MAX_WORDS
)
292 nr
= 1 + len
/ XDMAC_MAX_WORD_SIZE
;
294 xd
= kzalloc(struct_size(xd
, nodes
, nr
), GFP_NOWAIT
);
298 for (i
= 0; i
< nr
; i
++) {
299 burst_size
= min_t(size_t, len
, XDMAC_MAX_WORD_SIZE
);
300 xd
->nodes
[i
].src
= src
;
301 xd
->nodes
[i
].dst
= dst
;
302 xd
->nodes
[i
].burst_size
= burst_size
;
303 xd
->nodes
[i
].nr_burst
= len
/ burst_size
;
304 tlen
= rounddown(len
, burst_size
);
310 xd
->dir
= DMA_MEM_TO_MEM
;
314 return vchan_tx_prep(vc
, &xd
->vd
, flags
);
317 static struct dma_async_tx_descriptor
*
318 uniphier_xdmac_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
320 enum dma_transfer_direction direction
,
321 unsigned long flags
, void *context
)
323 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
324 struct uniphier_xdmac_chan
*xc
= to_uniphier_xdmac_chan(vc
);
325 struct uniphier_xdmac_desc
*xd
;
326 struct scatterlist
*sg
;
327 enum dma_slave_buswidth buswidth
;
331 if (!is_slave_direction(direction
))
334 if (direction
== DMA_DEV_TO_MEM
) {
335 buswidth
= xc
->sconfig
.src_addr_width
;
336 maxburst
= xc
->sconfig
.src_maxburst
;
338 buswidth
= xc
->sconfig
.dst_addr_width
;
339 maxburst
= xc
->sconfig
.dst_maxburst
;
344 if (maxburst
> xc
->xdev
->ddev
.max_burst
) {
345 dev_err(xc
->xdev
->ddev
.dev
,
346 "Exceed maximum number of burst words\n");
350 xd
= kzalloc(struct_size(xd
, nodes
, sg_len
), GFP_NOWAIT
);
354 for_each_sg(sgl
, sg
, sg_len
, i
) {
355 xd
->nodes
[i
].src
= (direction
== DMA_DEV_TO_MEM
)
356 ? xc
->sconfig
.src_addr
: sg_dma_address(sg
);
357 xd
->nodes
[i
].dst
= (direction
== DMA_MEM_TO_DEV
)
358 ? xc
->sconfig
.dst_addr
: sg_dma_address(sg
);
359 xd
->nodes
[i
].burst_size
= maxburst
* buswidth
;
360 xd
->nodes
[i
].nr_burst
=
361 sg_dma_len(sg
) / xd
->nodes
[i
].burst_size
;
364 * Currently transfer that size doesn't align the unit size
365 * (the number of burst words * bus-width) is not allowed,
366 * because the driver does not support the way to transfer
367 * residue size. As a matter of fact, in order to transfer
368 * arbitrary size, 'src_maxburst' or 'dst_maxburst' of
369 * dma_slave_config must be 1.
371 if (sg_dma_len(sg
) % xd
->nodes
[i
].burst_size
) {
372 dev_err(xc
->xdev
->ddev
.dev
,
373 "Unaligned transfer size: %d", sg_dma_len(sg
));
378 if (xd
->nodes
[i
].nr_burst
> XDMAC_MAX_WORDS
) {
379 dev_err(xc
->xdev
->ddev
.dev
,
380 "Exceed maximum transfer size");
387 xd
->nr_node
= sg_len
;
390 return vchan_tx_prep(vc
, &xd
->vd
, flags
);
393 static int uniphier_xdmac_slave_config(struct dma_chan
*chan
,
394 struct dma_slave_config
*config
)
396 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
397 struct uniphier_xdmac_chan
*xc
= to_uniphier_xdmac_chan(vc
);
399 memcpy(&xc
->sconfig
, config
, sizeof(*config
));
404 static int uniphier_xdmac_terminate_all(struct dma_chan
*chan
)
406 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
407 struct uniphier_xdmac_chan
*xc
= to_uniphier_xdmac_chan(vc
);
412 spin_lock_irqsave(&vc
->lock
, flags
);
415 vchan_terminate_vdesc(&xc
->xd
->vd
);
417 ret
= uniphier_xdmac_chan_stop(xc
);
420 vchan_get_all_descriptors(vc
, &head
);
422 spin_unlock_irqrestore(&vc
->lock
, flags
);
424 vchan_dma_desc_free_list(vc
, &head
);
429 static void uniphier_xdmac_synchronize(struct dma_chan
*chan
)
431 vchan_synchronize(to_virt_chan(chan
));
434 static void uniphier_xdmac_issue_pending(struct dma_chan
*chan
)
436 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
437 struct uniphier_xdmac_chan
*xc
= to_uniphier_xdmac_chan(vc
);
440 spin_lock_irqsave(&vc
->lock
, flags
);
442 if (vchan_issue_pending(vc
) && !xc
->xd
)
443 uniphier_xdmac_start(xc
);
445 spin_unlock_irqrestore(&vc
->lock
, flags
);
448 static void uniphier_xdmac_desc_free(struct virt_dma_desc
*vd
)
450 kfree(to_uniphier_xdmac_desc(vd
));
453 static void uniphier_xdmac_chan_init(struct uniphier_xdmac_device
*xdev
,
456 struct uniphier_xdmac_chan
*xc
= &xdev
->channels
[ch
];
459 xc
->reg_ch_base
= xdev
->reg_base
+ XDMAC_CH_WIDTH
* ch
;
460 xc
->vc
.desc_free
= uniphier_xdmac_desc_free
;
462 vchan_init(&xc
->vc
, &xdev
->ddev
);
465 static struct dma_chan
*of_dma_uniphier_xlate(struct of_phandle_args
*dma_spec
,
466 struct of_dma
*ofdma
)
468 struct uniphier_xdmac_device
*xdev
= ofdma
->of_dma_data
;
469 int chan_id
= dma_spec
->args
[0];
471 if (chan_id
>= xdev
->nr_chans
)
474 xdev
->channels
[chan_id
].id
= chan_id
;
475 xdev
->channels
[chan_id
].req_factor
= dma_spec
->args
[1];
477 return dma_get_slave_channel(&xdev
->channels
[chan_id
].vc
.chan
);
480 static int uniphier_xdmac_probe(struct platform_device
*pdev
)
482 struct uniphier_xdmac_device
*xdev
;
483 struct device
*dev
= &pdev
->dev
;
484 struct dma_device
*ddev
;
489 if (of_property_read_u32(dev
->of_node
, "dma-channels", &nr_chans
))
491 if (nr_chans
> XDMAC_MAX_CHANS
)
492 nr_chans
= XDMAC_MAX_CHANS
;
494 xdev
= devm_kzalloc(dev
, struct_size(xdev
, channels
, nr_chans
),
499 xdev
->nr_chans
= nr_chans
;
500 xdev
->reg_base
= devm_platform_ioremap_resource(pdev
, 0);
501 if (IS_ERR(xdev
->reg_base
))
502 return PTR_ERR(xdev
->reg_base
);
506 dma_cap_zero(ddev
->cap_mask
);
507 dma_cap_set(DMA_MEMCPY
, ddev
->cap_mask
);
508 dma_cap_set(DMA_SLAVE
, ddev
->cap_mask
);
509 ddev
->src_addr_widths
= UNIPHIER_XDMAC_BUSWIDTHS
;
510 ddev
->dst_addr_widths
= UNIPHIER_XDMAC_BUSWIDTHS
;
511 ddev
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
) |
513 ddev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
514 ddev
->max_burst
= XDMAC_MAX_WORDS
;
515 ddev
->device_free_chan_resources
= uniphier_xdmac_free_chan_resources
;
516 ddev
->device_prep_dma_memcpy
= uniphier_xdmac_prep_dma_memcpy
;
517 ddev
->device_prep_slave_sg
= uniphier_xdmac_prep_slave_sg
;
518 ddev
->device_config
= uniphier_xdmac_slave_config
;
519 ddev
->device_terminate_all
= uniphier_xdmac_terminate_all
;
520 ddev
->device_synchronize
= uniphier_xdmac_synchronize
;
521 ddev
->device_tx_status
= dma_cookie_status
;
522 ddev
->device_issue_pending
= uniphier_xdmac_issue_pending
;
523 INIT_LIST_HEAD(&ddev
->channels
);
525 for (i
= 0; i
< nr_chans
; i
++)
526 uniphier_xdmac_chan_init(xdev
, i
);
528 irq
= platform_get_irq(pdev
, 0);
532 ret
= devm_request_irq(dev
, irq
, uniphier_xdmac_irq_handler
,
533 IRQF_SHARED
, "xdmac", xdev
);
535 dev_err(dev
, "Failed to request IRQ\n");
539 ret
= dma_async_device_register(ddev
);
541 dev_err(dev
, "Failed to register XDMA device\n");
545 ret
= of_dma_controller_register(dev
->of_node
,
546 of_dma_uniphier_xlate
, xdev
);
548 dev_err(dev
, "Failed to register XDMA controller\n");
549 goto out_unregister_dmac
;
552 platform_set_drvdata(pdev
, xdev
);
554 dev_info(&pdev
->dev
, "UniPhier XDMAC driver (%d channels)\n",
560 dma_async_device_unregister(ddev
);
565 static int uniphier_xdmac_remove(struct platform_device
*pdev
)
567 struct uniphier_xdmac_device
*xdev
= platform_get_drvdata(pdev
);
568 struct dma_device
*ddev
= &xdev
->ddev
;
569 struct dma_chan
*chan
;
573 * Before reaching here, almost all descriptors have been freed by the
574 * ->device_free_chan_resources() hook. However, each channel might
575 * be still holding one descriptor that was on-flight at that moment.
576 * Terminate it to make sure this hardware is no longer running. Then,
577 * free the channel resources once again to avoid memory leak.
579 list_for_each_entry(chan
, &ddev
->channels
, device_node
) {
580 ret
= dmaengine_terminate_sync(chan
);
583 uniphier_xdmac_free_chan_resources(chan
);
586 of_dma_controller_free(pdev
->dev
.of_node
);
587 dma_async_device_unregister(ddev
);
592 static const struct of_device_id uniphier_xdmac_match
[] = {
593 { .compatible
= "socionext,uniphier-xdmac" },
596 MODULE_DEVICE_TABLE(of
, uniphier_xdmac_match
);
598 static struct platform_driver uniphier_xdmac_driver
= {
599 .probe
= uniphier_xdmac_probe
,
600 .remove
= uniphier_xdmac_remove
,
602 .name
= "uniphier-xdmac",
603 .of_match_table
= uniphier_xdmac_match
,
606 module_platform_driver(uniphier_xdmac_driver
);
608 MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
609 MODULE_DESCRIPTION("UniPhier external DMA controller driver");
610 MODULE_LICENSE("GPL v2");