1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SiFive FU540 Platform DMA driver
4 * Copyright (C) 2019 SiFive
7 * - drivers/dma/fsl-edma.c
8 * - drivers/dma/dw-edma/
9 * - drivers/dma/pxa-dma.c
11 * See the following sources for further documentation:
12 * - Chapter 12 "Platform DMA Engine (PDMA)" of
13 * SiFive FU540-C000 v1.0
14 * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
16 #include <linux/module.h>
17 #include <linux/device.h>
18 #include <linux/kernel.h>
19 #include <linux/platform_device.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/dma-mapping.h>
27 static inline unsigned long long readq(void __iomem
*addr
)
29 return readl(addr
) | (((unsigned long long)readl(addr
+ 4)) << 32LL);
34 static inline void writeq(unsigned long long v
, void __iomem
*addr
)
36 writel(lower_32_bits(v
), addr
);
37 writel(upper_32_bits(v
), addr
+ 4);
41 static inline struct sf_pdma_chan
*to_sf_pdma_chan(struct dma_chan
*dchan
)
43 return container_of(dchan
, struct sf_pdma_chan
, vchan
.chan
);
46 static inline struct sf_pdma_desc
*to_sf_pdma_desc(struct virt_dma_desc
*vd
)
48 return container_of(vd
, struct sf_pdma_desc
, vdesc
);
51 static struct sf_pdma_desc
*sf_pdma_alloc_desc(struct sf_pdma_chan
*chan
)
53 struct sf_pdma_desc
*desc
;
56 spin_lock_irqsave(&chan
->lock
, flags
);
58 if (chan
->desc
&& !chan
->desc
->in_use
) {
59 spin_unlock_irqrestore(&chan
->lock
, flags
);
63 spin_unlock_irqrestore(&chan
->lock
, flags
);
65 desc
= kzalloc(sizeof(*desc
), GFP_NOWAIT
);
74 static void sf_pdma_fill_desc(struct sf_pdma_desc
*desc
,
75 u64 dst
, u64 src
, u64 size
)
77 desc
->xfer_type
= PDMA_FULL_SPEED
;
78 desc
->xfer_size
= size
;
83 static void sf_pdma_disclaim_chan(struct sf_pdma_chan
*chan
)
85 struct pdma_regs
*regs
= &chan
->regs
;
87 writel(PDMA_CLEAR_CTRL
, regs
->ctrl
);
90 static struct dma_async_tx_descriptor
*
91 sf_pdma_prep_dma_memcpy(struct dma_chan
*dchan
, dma_addr_t dest
, dma_addr_t src
,
92 size_t len
, unsigned long flags
)
94 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
95 struct sf_pdma_desc
*desc
;
97 if (chan
&& (!len
|| !dest
|| !src
)) {
98 dev_err(chan
->pdma
->dma_dev
.dev
,
99 "Please check dma len, dest, src!\n");
103 desc
= sf_pdma_alloc_desc(chan
);
108 desc
->dirn
= DMA_MEM_TO_MEM
;
109 desc
->async_tx
= vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
111 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
113 sf_pdma_fill_desc(desc
, dest
, src
, len
);
114 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
116 return desc
->async_tx
;
119 static int sf_pdma_slave_config(struct dma_chan
*dchan
,
120 struct dma_slave_config
*cfg
)
122 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
124 memcpy(&chan
->cfg
, cfg
, sizeof(*cfg
));
129 static int sf_pdma_alloc_chan_resources(struct dma_chan
*dchan
)
131 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
132 struct pdma_regs
*regs
= &chan
->regs
;
134 dma_cookie_init(dchan
);
135 writel(PDMA_CLAIM_MASK
, regs
->ctrl
);
140 static void sf_pdma_disable_request(struct sf_pdma_chan
*chan
)
142 struct pdma_regs
*regs
= &chan
->regs
;
144 writel(readl(regs
->ctrl
) & ~PDMA_RUN_MASK
, regs
->ctrl
);
147 static void sf_pdma_free_chan_resources(struct dma_chan
*dchan
)
149 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
153 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
154 sf_pdma_disable_request(chan
);
157 vchan_get_all_descriptors(&chan
->vchan
, &head
);
158 sf_pdma_disclaim_chan(chan
);
159 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
160 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
163 static size_t sf_pdma_desc_residue(struct sf_pdma_chan
*chan
,
166 struct virt_dma_desc
*vd
= NULL
;
167 struct pdma_regs
*regs
= &chan
->regs
;
170 struct sf_pdma_desc
*desc
;
171 struct dma_async_tx_descriptor
*tx
;
173 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
175 tx
= &chan
->desc
->vdesc
.tx
;
176 if (cookie
== tx
->chan
->completed_cookie
)
179 if (cookie
== tx
->cookie
) {
180 residue
= readq(regs
->residue
);
182 vd
= vchan_find_desc(&chan
->vchan
, cookie
);
186 desc
= to_sf_pdma_desc(vd
);
187 residue
= desc
->xfer_size
;
191 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
195 static enum dma_status
196 sf_pdma_tx_status(struct dma_chan
*dchan
,
198 struct dma_tx_state
*txstate
)
200 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
201 enum dma_status status
;
203 status
= dma_cookie_status(dchan
, cookie
, txstate
);
205 if (txstate
&& status
!= DMA_ERROR
)
206 dma_set_residue(txstate
, sf_pdma_desc_residue(chan
, cookie
));
211 static int sf_pdma_terminate_all(struct dma_chan
*dchan
)
213 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
217 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
218 sf_pdma_disable_request(chan
);
221 chan
->xfer_err
= false;
222 vchan_get_all_descriptors(&chan
->vchan
, &head
);
223 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
224 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
229 static void sf_pdma_enable_request(struct sf_pdma_chan
*chan
)
231 struct pdma_regs
*regs
= &chan
->regs
;
234 v
= PDMA_CLAIM_MASK
|
235 PDMA_ENABLE_DONE_INT_MASK
|
236 PDMA_ENABLE_ERR_INT_MASK
|
239 writel(v
, regs
->ctrl
);
242 static void sf_pdma_xfer_desc(struct sf_pdma_chan
*chan
)
244 struct sf_pdma_desc
*desc
= chan
->desc
;
245 struct pdma_regs
*regs
= &chan
->regs
;
248 dev_err(chan
->pdma
->dma_dev
.dev
, "NULL desc.\n");
252 writel(desc
->xfer_type
, regs
->xfer_type
);
253 writeq(desc
->xfer_size
, regs
->xfer_size
);
254 writeq(desc
->dst_addr
, regs
->dst_addr
);
255 writeq(desc
->src_addr
, regs
->src_addr
);
258 chan
->status
= DMA_IN_PROGRESS
;
259 sf_pdma_enable_request(chan
);
262 static void sf_pdma_issue_pending(struct dma_chan
*dchan
)
264 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
267 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
269 if (vchan_issue_pending(&chan
->vchan
) && chan
->desc
)
270 sf_pdma_xfer_desc(chan
);
272 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
275 static void sf_pdma_free_desc(struct virt_dma_desc
*vdesc
)
277 struct sf_pdma_desc
*desc
;
279 desc
= to_sf_pdma_desc(vdesc
);
280 desc
->in_use
= false;
283 static void sf_pdma_donebh_tasklet(unsigned long arg
)
285 struct sf_pdma_chan
*chan
= (struct sf_pdma_chan
*)arg
;
286 struct sf_pdma_desc
*desc
= chan
->desc
;
289 spin_lock_irqsave(&chan
->lock
, flags
);
290 if (chan
->xfer_err
) {
291 chan
->retries
= MAX_RETRY
;
292 chan
->status
= DMA_COMPLETE
;
293 chan
->xfer_err
= false;
295 spin_unlock_irqrestore(&chan
->lock
, flags
);
297 dmaengine_desc_get_callback_invoke(desc
->async_tx
, NULL
);
300 static void sf_pdma_errbh_tasklet(unsigned long arg
)
302 struct sf_pdma_chan
*chan
= (struct sf_pdma_chan
*)arg
;
303 struct sf_pdma_desc
*desc
= chan
->desc
;
306 spin_lock_irqsave(&chan
->lock
, flags
);
307 if (chan
->retries
<= 0) {
308 /* fail to recover */
309 spin_unlock_irqrestore(&chan
->lock
, flags
);
310 dmaengine_desc_get_callback_invoke(desc
->async_tx
, NULL
);
314 chan
->xfer_err
= true;
315 chan
->status
= DMA_ERROR
;
317 sf_pdma_enable_request(chan
);
318 spin_unlock_irqrestore(&chan
->lock
, flags
);
322 static irqreturn_t
sf_pdma_done_isr(int irq
, void *dev_id
)
324 struct sf_pdma_chan
*chan
= dev_id
;
325 struct pdma_regs
*regs
= &chan
->regs
;
329 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
330 writel((readl(regs
->ctrl
)) & ~PDMA_DONE_STATUS_MASK
, regs
->ctrl
);
331 residue
= readq(regs
->residue
);
334 list_del(&chan
->desc
->vdesc
.node
);
335 vchan_cookie_complete(&chan
->desc
->vdesc
);
337 /* submit next trascatioin if possible */
338 struct sf_pdma_desc
*desc
= chan
->desc
;
340 desc
->src_addr
+= desc
->xfer_size
- residue
;
341 desc
->dst_addr
+= desc
->xfer_size
- residue
;
342 desc
->xfer_size
= residue
;
344 sf_pdma_xfer_desc(chan
);
347 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
349 tasklet_hi_schedule(&chan
->done_tasklet
);
354 static irqreturn_t
sf_pdma_err_isr(int irq
, void *dev_id
)
356 struct sf_pdma_chan
*chan
= dev_id
;
357 struct pdma_regs
*regs
= &chan
->regs
;
360 spin_lock_irqsave(&chan
->lock
, flags
);
361 writel((readl(regs
->ctrl
)) & ~PDMA_ERR_STATUS_MASK
, regs
->ctrl
);
362 spin_unlock_irqrestore(&chan
->lock
, flags
);
364 tasklet_schedule(&chan
->err_tasklet
);
370 * sf_pdma_irq_init() - Init PDMA IRQ Handlers
371 * @pdev: pointer of platform_device
372 * @pdma: pointer of PDMA engine. Caller should check NULL
374 * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should
375 * make sure the pointer passed in are non-NULL. This function should be called
376 * only one time during the device probe.
378 * Context: Any context.
381 * * 0 - OK to init all IRQ handlers
382 * * -EINVAL - Fail to request IRQ
384 static int sf_pdma_irq_init(struct platform_device
*pdev
, struct sf_pdma
*pdma
)
387 struct sf_pdma_chan
*chan
;
389 for (i
= 0; i
< pdma
->n_chans
; i
++) {
390 chan
= &pdma
->chans
[i
];
392 irq
= platform_get_irq(pdev
, i
* 2);
394 dev_err(&pdev
->dev
, "ch(%d) Can't get done irq.\n", i
);
398 r
= devm_request_irq(&pdev
->dev
, irq
, sf_pdma_done_isr
, 0,
399 dev_name(&pdev
->dev
), (void *)chan
);
401 dev_err(&pdev
->dev
, "Fail to attach done ISR: %d\n", r
);
407 irq
= platform_get_irq(pdev
, (i
* 2) + 1);
409 dev_err(&pdev
->dev
, "ch(%d) Can't get err irq.\n", i
);
413 r
= devm_request_irq(&pdev
->dev
, irq
, sf_pdma_err_isr
, 0,
414 dev_name(&pdev
->dev
), (void *)chan
);
416 dev_err(&pdev
->dev
, "Fail to attach err ISR: %d\n", r
);
427 * sf_pdma_setup_chans() - Init settings of each channel
428 * @pdma: pointer of PDMA engine. Caller should check NULL
430 * Initialize all data structure and register base. Caller should make sure
431 * the pointer passed in are non-NULL. This function should be called only
432 * one time during the device probe.
434 * Context: Any context.
438 static void sf_pdma_setup_chans(struct sf_pdma
*pdma
)
441 struct sf_pdma_chan
*chan
;
443 INIT_LIST_HEAD(&pdma
->dma_dev
.channels
);
445 for (i
= 0; i
< pdma
->n_chans
; i
++) {
446 chan
= &pdma
->chans
[i
];
449 SF_PDMA_REG_BASE(i
) + PDMA_CTRL
;
450 chan
->regs
.xfer_type
=
451 SF_PDMA_REG_BASE(i
) + PDMA_XFER_TYPE
;
452 chan
->regs
.xfer_size
=
453 SF_PDMA_REG_BASE(i
) + PDMA_XFER_SIZE
;
454 chan
->regs
.dst_addr
=
455 SF_PDMA_REG_BASE(i
) + PDMA_DST_ADDR
;
456 chan
->regs
.src_addr
=
457 SF_PDMA_REG_BASE(i
) + PDMA_SRC_ADDR
;
458 chan
->regs
.act_type
=
459 SF_PDMA_REG_BASE(i
) + PDMA_ACT_TYPE
;
461 SF_PDMA_REG_BASE(i
) + PDMA_REMAINING_BYTE
;
462 chan
->regs
.cur_dst_addr
=
463 SF_PDMA_REG_BASE(i
) + PDMA_CUR_DST_ADDR
;
464 chan
->regs
.cur_src_addr
=
465 SF_PDMA_REG_BASE(i
) + PDMA_CUR_SRC_ADDR
;
468 chan
->pm_state
= RUNNING
;
470 chan
->xfer_err
= false;
471 spin_lock_init(&chan
->lock
);
473 chan
->vchan
.desc_free
= sf_pdma_free_desc
;
474 vchan_init(&chan
->vchan
, &pdma
->dma_dev
);
476 writel(PDMA_CLEAR_CTRL
, chan
->regs
.ctrl
);
478 tasklet_init(&chan
->done_tasklet
,
479 sf_pdma_donebh_tasklet
, (unsigned long)chan
);
480 tasklet_init(&chan
->err_tasklet
,
481 sf_pdma_errbh_tasklet
, (unsigned long)chan
);
485 static int sf_pdma_probe(struct platform_device
*pdev
)
487 struct sf_pdma
*pdma
;
488 struct sf_pdma_chan
*chan
;
489 struct resource
*res
;
492 const enum dma_slave_buswidth widths
=
493 DMA_SLAVE_BUSWIDTH_1_BYTE
| DMA_SLAVE_BUSWIDTH_2_BYTES
|
494 DMA_SLAVE_BUSWIDTH_4_BYTES
| DMA_SLAVE_BUSWIDTH_8_BYTES
|
495 DMA_SLAVE_BUSWIDTH_16_BYTES
| DMA_SLAVE_BUSWIDTH_32_BYTES
|
496 DMA_SLAVE_BUSWIDTH_64_BYTES
;
499 len
= sizeof(*pdma
) + sizeof(*chan
) * chans
;
500 pdma
= devm_kzalloc(&pdev
->dev
, len
, GFP_KERNEL
);
504 pdma
->n_chans
= chans
;
506 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
507 pdma
->membase
= devm_ioremap_resource(&pdev
->dev
, res
);
508 if (IS_ERR(pdma
->membase
))
511 ret
= sf_pdma_irq_init(pdev
, pdma
);
515 sf_pdma_setup_chans(pdma
);
517 pdma
->dma_dev
.dev
= &pdev
->dev
;
519 /* Setup capability */
520 dma_cap_set(DMA_MEMCPY
, pdma
->dma_dev
.cap_mask
);
521 pdma
->dma_dev
.copy_align
= 2;
522 pdma
->dma_dev
.src_addr_widths
= widths
;
523 pdma
->dma_dev
.dst_addr_widths
= widths
;
524 pdma
->dma_dev
.directions
= BIT(DMA_MEM_TO_MEM
);
525 pdma
->dma_dev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
526 pdma
->dma_dev
.descriptor_reuse
= true;
529 pdma
->dma_dev
.device_alloc_chan_resources
=
530 sf_pdma_alloc_chan_resources
;
531 pdma
->dma_dev
.device_free_chan_resources
=
532 sf_pdma_free_chan_resources
;
533 pdma
->dma_dev
.device_tx_status
= sf_pdma_tx_status
;
534 pdma
->dma_dev
.device_prep_dma_memcpy
= sf_pdma_prep_dma_memcpy
;
535 pdma
->dma_dev
.device_config
= sf_pdma_slave_config
;
536 pdma
->dma_dev
.device_terminate_all
= sf_pdma_terminate_all
;
537 pdma
->dma_dev
.device_issue_pending
= sf_pdma_issue_pending
;
539 platform_set_drvdata(pdev
, pdma
);
541 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
544 "Failed to set DMA mask. Fall back to default.\n");
546 ret
= dma_async_device_register(&pdma
->dma_dev
);
548 goto ERR_REG_DMADEVICE
;
553 devm_kfree(&pdev
->dev
, pdma
);
554 return PTR_ERR(pdma
->membase
);
557 devm_kfree(&pdev
->dev
, pdma
);
561 devm_kfree(&pdev
->dev
, pdma
);
563 "Can't register SiFive Platform DMA. (%d)\n", ret
);
567 static int sf_pdma_remove(struct platform_device
*pdev
)
569 struct sf_pdma
*pdma
= platform_get_drvdata(pdev
);
570 struct sf_pdma_chan
*ch
;
573 for (i
= 0; i
< PDMA_NR_CH
; i
++) {
574 ch
= &pdma
->chans
[i
];
576 devm_free_irq(&pdev
->dev
, ch
->txirq
, ch
);
577 devm_free_irq(&pdev
->dev
, ch
->errirq
, ch
);
578 list_del(&ch
->vchan
.chan
.device_node
);
579 tasklet_kill(&ch
->vchan
.task
);
580 tasklet_kill(&ch
->done_tasklet
);
581 tasklet_kill(&ch
->err_tasklet
);
584 dma_async_device_unregister(&pdma
->dma_dev
);
589 static const struct of_device_id sf_pdma_dt_ids
[] = {
590 { .compatible
= "sifive,fu540-c000-pdma" },
593 MODULE_DEVICE_TABLE(of
, sf_pdma_dt_ids
);
595 static struct platform_driver sf_pdma_driver
= {
596 .probe
= sf_pdma_probe
,
597 .remove
= sf_pdma_remove
,
600 .of_match_table
= of_match_ptr(sf_pdma_dt_ids
),
604 static int __init
sf_pdma_init(void)
606 return platform_driver_register(&sf_pdma_driver
);
609 static void __exit
sf_pdma_exit(void)
611 platform_driver_unregister(&sf_pdma_driver
);
615 subsys_initcall(sf_pdma_init
);
616 module_exit(sf_pdma_exit
);
618 MODULE_LICENSE("GPL v2");
619 MODULE_DESCRIPTION("SiFive Platform DMA driver");
620 MODULE_AUTHOR("Green Wan <green.wan@sifive.com>");