1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SiFive FU540 Platform DMA driver
4 * Copyright (C) 2019 SiFive
7 * - drivers/dma/fsl-edma.c
8 * - drivers/dma/dw-edma/
9 * - drivers/dma/pxa-dma.c
11 * See the following sources for further documentation:
12 * - Chapter 12 "Platform DMA Engine (PDMA)" of
13 * SiFive FU540-C000 v1.0
14 * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
16 #include <linux/module.h>
17 #include <linux/device.h>
18 #include <linux/kernel.h>
19 #include <linux/platform_device.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/of_dma.h>
24 #include <linux/slab.h>
28 #define PDMA_QUIRK_NO_STRICT_ORDERING BIT(0)
31 static inline unsigned long long readq(void __iomem
*addr
)
33 return readl(addr
) | (((unsigned long long)readl(addr
+ 4)) << 32LL);
38 static inline void writeq(unsigned long long v
, void __iomem
*addr
)
40 writel(lower_32_bits(v
), addr
);
41 writel(upper_32_bits(v
), addr
+ 4);
45 static inline struct sf_pdma_chan
*to_sf_pdma_chan(struct dma_chan
*dchan
)
47 return container_of(dchan
, struct sf_pdma_chan
, vchan
.chan
);
50 static inline struct sf_pdma_desc
*to_sf_pdma_desc(struct virt_dma_desc
*vd
)
52 return container_of(vd
, struct sf_pdma_desc
, vdesc
);
55 static struct sf_pdma_desc
*sf_pdma_alloc_desc(struct sf_pdma_chan
*chan
)
57 struct sf_pdma_desc
*desc
;
59 desc
= kzalloc(sizeof(*desc
), GFP_NOWAIT
);
68 static void sf_pdma_fill_desc(struct sf_pdma_desc
*desc
,
69 u64 dst
, u64 src
, u64 size
)
71 desc
->xfer_type
= desc
->chan
->pdma
->transfer_type
;
72 desc
->xfer_size
= size
;
77 static void sf_pdma_disclaim_chan(struct sf_pdma_chan
*chan
)
79 struct pdma_regs
*regs
= &chan
->regs
;
81 writel(PDMA_CLEAR_CTRL
, regs
->ctrl
);
84 static struct dma_async_tx_descriptor
*
85 sf_pdma_prep_dma_memcpy(struct dma_chan
*dchan
, dma_addr_t dest
, dma_addr_t src
,
86 size_t len
, unsigned long flags
)
88 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
89 struct sf_pdma_desc
*desc
;
92 if (chan
&& (!len
|| !dest
|| !src
)) {
93 dev_err(chan
->pdma
->dma_dev
.dev
,
94 "Please check dma len, dest, src!\n");
98 desc
= sf_pdma_alloc_desc(chan
);
102 desc
->dirn
= DMA_MEM_TO_MEM
;
103 desc
->async_tx
= vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
105 spin_lock_irqsave(&chan
->vchan
.lock
, iflags
);
106 sf_pdma_fill_desc(desc
, dest
, src
, len
);
107 spin_unlock_irqrestore(&chan
->vchan
.lock
, iflags
);
109 return desc
->async_tx
;
112 static int sf_pdma_slave_config(struct dma_chan
*dchan
,
113 struct dma_slave_config
*cfg
)
115 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
117 memcpy(&chan
->cfg
, cfg
, sizeof(*cfg
));
122 static int sf_pdma_alloc_chan_resources(struct dma_chan
*dchan
)
124 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
125 struct pdma_regs
*regs
= &chan
->regs
;
127 dma_cookie_init(dchan
);
128 writel(PDMA_CLAIM_MASK
, regs
->ctrl
);
133 static void sf_pdma_disable_request(struct sf_pdma_chan
*chan
)
135 struct pdma_regs
*regs
= &chan
->regs
;
137 writel(readl(regs
->ctrl
) & ~PDMA_RUN_MASK
, regs
->ctrl
);
140 static void sf_pdma_free_chan_resources(struct dma_chan
*dchan
)
142 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
146 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
147 sf_pdma_disable_request(chan
);
150 vchan_get_all_descriptors(&chan
->vchan
, &head
);
151 sf_pdma_disclaim_chan(chan
);
152 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
153 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
156 static size_t sf_pdma_desc_residue(struct sf_pdma_chan
*chan
,
159 struct virt_dma_desc
*vd
= NULL
;
160 struct pdma_regs
*regs
= &chan
->regs
;
163 struct sf_pdma_desc
*desc
;
164 struct dma_async_tx_descriptor
*tx
= NULL
;
166 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
168 list_for_each_entry(vd
, &chan
->vchan
.desc_submitted
, node
)
169 if (vd
->tx
.cookie
== cookie
)
175 if (cookie
== tx
->chan
->completed_cookie
)
178 if (cookie
== tx
->cookie
) {
179 residue
= readq(regs
->residue
);
181 vd
= vchan_find_desc(&chan
->vchan
, cookie
);
185 desc
= to_sf_pdma_desc(vd
);
186 residue
= desc
->xfer_size
;
190 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
194 static enum dma_status
195 sf_pdma_tx_status(struct dma_chan
*dchan
,
197 struct dma_tx_state
*txstate
)
199 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
200 enum dma_status status
;
202 status
= dma_cookie_status(dchan
, cookie
, txstate
);
204 if (txstate
&& status
!= DMA_ERROR
)
205 dma_set_residue(txstate
, sf_pdma_desc_residue(chan
, cookie
));
210 static int sf_pdma_terminate_all(struct dma_chan
*dchan
)
212 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
216 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
217 sf_pdma_disable_request(chan
);
220 chan
->xfer_err
= false;
221 vchan_get_all_descriptors(&chan
->vchan
, &head
);
222 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
223 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
228 static void sf_pdma_enable_request(struct sf_pdma_chan
*chan
)
230 struct pdma_regs
*regs
= &chan
->regs
;
233 v
= PDMA_CLAIM_MASK
|
234 PDMA_ENABLE_DONE_INT_MASK
|
235 PDMA_ENABLE_ERR_INT_MASK
|
238 writel(v
, regs
->ctrl
);
241 static struct sf_pdma_desc
*sf_pdma_get_first_pending_desc(struct sf_pdma_chan
*chan
)
243 struct virt_dma_chan
*vchan
= &chan
->vchan
;
244 struct virt_dma_desc
*vdesc
;
246 if (list_empty(&vchan
->desc_issued
))
249 vdesc
= list_first_entry(&vchan
->desc_issued
, struct virt_dma_desc
, node
);
251 return container_of(vdesc
, struct sf_pdma_desc
, vdesc
);
254 static void sf_pdma_xfer_desc(struct sf_pdma_chan
*chan
)
256 struct sf_pdma_desc
*desc
= chan
->desc
;
257 struct pdma_regs
*regs
= &chan
->regs
;
260 dev_err(chan
->pdma
->dma_dev
.dev
, "NULL desc.\n");
264 writel(desc
->xfer_type
, regs
->xfer_type
);
265 writeq(desc
->xfer_size
, regs
->xfer_size
);
266 writeq(desc
->dst_addr
, regs
->dst_addr
);
267 writeq(desc
->src_addr
, regs
->src_addr
);
270 chan
->status
= DMA_IN_PROGRESS
;
271 sf_pdma_enable_request(chan
);
274 static void sf_pdma_issue_pending(struct dma_chan
*dchan
)
276 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
279 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
281 if (!chan
->desc
&& vchan_issue_pending(&chan
->vchan
)) {
282 /* vchan_issue_pending has made a check that desc in not NULL */
283 chan
->desc
= sf_pdma_get_first_pending_desc(chan
);
284 sf_pdma_xfer_desc(chan
);
287 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
290 static void sf_pdma_free_desc(struct virt_dma_desc
*vdesc
)
292 struct sf_pdma_desc
*desc
;
294 desc
= to_sf_pdma_desc(vdesc
);
298 static void sf_pdma_donebh_tasklet(struct tasklet_struct
*t
)
300 struct sf_pdma_chan
*chan
= from_tasklet(chan
, t
, done_tasklet
);
303 spin_lock_irqsave(&chan
->lock
, flags
);
304 if (chan
->xfer_err
) {
305 chan
->retries
= MAX_RETRY
;
306 chan
->status
= DMA_COMPLETE
;
307 chan
->xfer_err
= false;
309 spin_unlock_irqrestore(&chan
->lock
, flags
);
311 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
312 list_del(&chan
->desc
->vdesc
.node
);
313 vchan_cookie_complete(&chan
->desc
->vdesc
);
315 chan
->desc
= sf_pdma_get_first_pending_desc(chan
);
317 sf_pdma_xfer_desc(chan
);
319 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
322 static void sf_pdma_errbh_tasklet(struct tasklet_struct
*t
)
324 struct sf_pdma_chan
*chan
= from_tasklet(chan
, t
, err_tasklet
);
325 struct sf_pdma_desc
*desc
= chan
->desc
;
328 spin_lock_irqsave(&chan
->lock
, flags
);
329 if (chan
->retries
<= 0) {
330 /* fail to recover */
331 spin_unlock_irqrestore(&chan
->lock
, flags
);
332 dmaengine_desc_get_callback_invoke(desc
->async_tx
, NULL
);
336 chan
->xfer_err
= true;
337 chan
->status
= DMA_ERROR
;
339 sf_pdma_enable_request(chan
);
340 spin_unlock_irqrestore(&chan
->lock
, flags
);
344 static irqreturn_t
sf_pdma_done_isr(int irq
, void *dev_id
)
346 struct sf_pdma_chan
*chan
= dev_id
;
347 struct pdma_regs
*regs
= &chan
->regs
;
350 spin_lock(&chan
->vchan
.lock
);
351 writel((readl(regs
->ctrl
)) & ~PDMA_DONE_STATUS_MASK
, regs
->ctrl
);
352 residue
= readq(regs
->residue
);
355 tasklet_hi_schedule(&chan
->done_tasklet
);
357 /* submit next transaction if possible */
358 struct sf_pdma_desc
*desc
= chan
->desc
;
360 desc
->src_addr
+= desc
->xfer_size
- residue
;
361 desc
->dst_addr
+= desc
->xfer_size
- residue
;
362 desc
->xfer_size
= residue
;
364 sf_pdma_xfer_desc(chan
);
367 spin_unlock(&chan
->vchan
.lock
);
372 static irqreturn_t
sf_pdma_err_isr(int irq
, void *dev_id
)
374 struct sf_pdma_chan
*chan
= dev_id
;
375 struct pdma_regs
*regs
= &chan
->regs
;
377 spin_lock(&chan
->lock
);
378 writel((readl(regs
->ctrl
)) & ~PDMA_ERR_STATUS_MASK
, regs
->ctrl
);
379 spin_unlock(&chan
->lock
);
381 tasklet_schedule(&chan
->err_tasklet
);
387 * sf_pdma_irq_init() - Init PDMA IRQ Handlers
388 * @pdev: pointer of platform_device
389 * @pdma: pointer of PDMA engine. Caller should check NULL
391 * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should
392 * make sure the pointer passed in are non-NULL. This function should be called
393 * only one time during the device probe.
395 * Context: Any context.
398 * * 0 - OK to init all IRQ handlers
399 * * -EINVAL - Fail to request IRQ
401 static int sf_pdma_irq_init(struct platform_device
*pdev
, struct sf_pdma
*pdma
)
404 struct sf_pdma_chan
*chan
;
406 for (i
= 0; i
< pdma
->n_chans
; i
++) {
407 chan
= &pdma
->chans
[i
];
409 irq
= platform_get_irq(pdev
, i
* 2);
413 r
= devm_request_irq(&pdev
->dev
, irq
, sf_pdma_done_isr
, 0,
414 dev_name(&pdev
->dev
), (void *)chan
);
416 dev_err(&pdev
->dev
, "Fail to attach done ISR: %d\n", r
);
422 irq
= platform_get_irq(pdev
, (i
* 2) + 1);
426 r
= devm_request_irq(&pdev
->dev
, irq
, sf_pdma_err_isr
, 0,
427 dev_name(&pdev
->dev
), (void *)chan
);
429 dev_err(&pdev
->dev
, "Fail to attach err ISR: %d\n", r
);
440 * sf_pdma_setup_chans() - Init settings of each channel
441 * @pdma: pointer of PDMA engine. Caller should check NULL
443 * Initialize all data structure and register base. Caller should make sure
444 * the pointer passed in are non-NULL. This function should be called only
445 * one time during the device probe.
447 * Context: Any context.
451 static void sf_pdma_setup_chans(struct sf_pdma
*pdma
)
454 struct sf_pdma_chan
*chan
;
456 INIT_LIST_HEAD(&pdma
->dma_dev
.channels
);
458 for (i
= 0; i
< pdma
->n_chans
; i
++) {
459 chan
= &pdma
->chans
[i
];
462 SF_PDMA_REG_BASE(i
) + PDMA_CTRL
;
463 chan
->regs
.xfer_type
=
464 SF_PDMA_REG_BASE(i
) + PDMA_XFER_TYPE
;
465 chan
->regs
.xfer_size
=
466 SF_PDMA_REG_BASE(i
) + PDMA_XFER_SIZE
;
467 chan
->regs
.dst_addr
=
468 SF_PDMA_REG_BASE(i
) + PDMA_DST_ADDR
;
469 chan
->regs
.src_addr
=
470 SF_PDMA_REG_BASE(i
) + PDMA_SRC_ADDR
;
471 chan
->regs
.act_type
=
472 SF_PDMA_REG_BASE(i
) + PDMA_ACT_TYPE
;
474 SF_PDMA_REG_BASE(i
) + PDMA_REMAINING_BYTE
;
475 chan
->regs
.cur_dst_addr
=
476 SF_PDMA_REG_BASE(i
) + PDMA_CUR_DST_ADDR
;
477 chan
->regs
.cur_src_addr
=
478 SF_PDMA_REG_BASE(i
) + PDMA_CUR_SRC_ADDR
;
481 chan
->pm_state
= RUNNING
;
483 chan
->xfer_err
= false;
484 spin_lock_init(&chan
->lock
);
486 chan
->vchan
.desc_free
= sf_pdma_free_desc
;
487 vchan_init(&chan
->vchan
, &pdma
->dma_dev
);
489 writel(PDMA_CLEAR_CTRL
, chan
->regs
.ctrl
);
491 tasklet_setup(&chan
->done_tasklet
, sf_pdma_donebh_tasklet
);
492 tasklet_setup(&chan
->err_tasklet
, sf_pdma_errbh_tasklet
);
496 static int sf_pdma_probe(struct platform_device
*pdev
)
498 const struct sf_pdma_driver_platdata
*ddata
;
499 struct sf_pdma
*pdma
;
501 const enum dma_slave_buswidth widths
=
502 DMA_SLAVE_BUSWIDTH_1_BYTE
| DMA_SLAVE_BUSWIDTH_2_BYTES
|
503 DMA_SLAVE_BUSWIDTH_4_BYTES
| DMA_SLAVE_BUSWIDTH_8_BYTES
|
504 DMA_SLAVE_BUSWIDTH_16_BYTES
| DMA_SLAVE_BUSWIDTH_32_BYTES
|
505 DMA_SLAVE_BUSWIDTH_64_BYTES
;
507 ret
= of_property_read_u32(pdev
->dev
.of_node
, "dma-channels", &n_chans
);
509 /* backwards-compatibility for no dma-channels property */
510 dev_dbg(&pdev
->dev
, "set number of channels to default value: 4\n");
511 n_chans
= PDMA_MAX_NR_CH
;
512 } else if (n_chans
> PDMA_MAX_NR_CH
) {
513 dev_err(&pdev
->dev
, "the number of channels exceeds the maximum\n");
517 pdma
= devm_kzalloc(&pdev
->dev
, struct_size(pdma
, chans
, n_chans
),
522 pdma
->n_chans
= n_chans
;
524 pdma
->transfer_type
= PDMA_FULL_SPEED
| PDMA_STRICT_ORDERING
;
526 ddata
= device_get_match_data(&pdev
->dev
);
528 if (ddata
->quirks
& PDMA_QUIRK_NO_STRICT_ORDERING
)
529 pdma
->transfer_type
&= ~PDMA_STRICT_ORDERING
;
532 pdma
->membase
= devm_platform_ioremap_resource(pdev
, 0);
533 if (IS_ERR(pdma
->membase
))
534 return PTR_ERR(pdma
->membase
);
536 ret
= sf_pdma_irq_init(pdev
, pdma
);
540 sf_pdma_setup_chans(pdma
);
542 pdma
->dma_dev
.dev
= &pdev
->dev
;
544 /* Setup capability */
545 dma_cap_set(DMA_MEMCPY
, pdma
->dma_dev
.cap_mask
);
546 pdma
->dma_dev
.copy_align
= 2;
547 pdma
->dma_dev
.src_addr_widths
= widths
;
548 pdma
->dma_dev
.dst_addr_widths
= widths
;
549 pdma
->dma_dev
.directions
= BIT(DMA_MEM_TO_MEM
);
550 pdma
->dma_dev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
551 pdma
->dma_dev
.descriptor_reuse
= true;
554 pdma
->dma_dev
.device_alloc_chan_resources
=
555 sf_pdma_alloc_chan_resources
;
556 pdma
->dma_dev
.device_free_chan_resources
=
557 sf_pdma_free_chan_resources
;
558 pdma
->dma_dev
.device_tx_status
= sf_pdma_tx_status
;
559 pdma
->dma_dev
.device_prep_dma_memcpy
= sf_pdma_prep_dma_memcpy
;
560 pdma
->dma_dev
.device_config
= sf_pdma_slave_config
;
561 pdma
->dma_dev
.device_terminate_all
= sf_pdma_terminate_all
;
562 pdma
->dma_dev
.device_issue_pending
= sf_pdma_issue_pending
;
564 platform_set_drvdata(pdev
, pdma
);
566 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
569 "Failed to set DMA mask. Fall back to default.\n");
571 ret
= dma_async_device_register(&pdma
->dma_dev
);
574 "Can't register SiFive Platform DMA. (%d)\n", ret
);
578 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
579 of_dma_xlate_by_chan_id
, pdma
);
582 "Can't register SiFive Platform OF_DMA. (%d)\n", ret
);
589 dma_async_device_unregister(&pdma
->dma_dev
);
594 static void sf_pdma_remove(struct platform_device
*pdev
)
596 struct sf_pdma
*pdma
= platform_get_drvdata(pdev
);
597 struct sf_pdma_chan
*ch
;
600 for (i
= 0; i
< pdma
->n_chans
; i
++) {
601 ch
= &pdma
->chans
[i
];
603 devm_free_irq(&pdev
->dev
, ch
->txirq
, ch
);
604 devm_free_irq(&pdev
->dev
, ch
->errirq
, ch
);
605 list_del(&ch
->vchan
.chan
.device_node
);
606 tasklet_kill(&ch
->vchan
.task
);
607 tasklet_kill(&ch
->done_tasklet
);
608 tasklet_kill(&ch
->err_tasklet
);
611 if (pdev
->dev
.of_node
)
612 of_dma_controller_free(pdev
->dev
.of_node
);
614 dma_async_device_unregister(&pdma
->dma_dev
);
617 static const struct sf_pdma_driver_platdata mpfs_pdma
= {
618 .quirks
= PDMA_QUIRK_NO_STRICT_ORDERING
,
621 static const struct of_device_id sf_pdma_dt_ids
[] = {
623 .compatible
= "sifive,fu540-c000-pdma",
625 .compatible
= "sifive,pdma0",
627 .compatible
= "microchip,mpfs-pdma",
632 MODULE_DEVICE_TABLE(of
, sf_pdma_dt_ids
);
634 static struct platform_driver sf_pdma_driver
= {
635 .probe
= sf_pdma_probe
,
636 .remove
= sf_pdma_remove
,
639 .of_match_table
= sf_pdma_dt_ids
,
643 static int __init
sf_pdma_init(void)
645 return platform_driver_register(&sf_pdma_driver
);
648 static void __exit
sf_pdma_exit(void)
650 platform_driver_unregister(&sf_pdma_driver
);
654 subsys_initcall(sf_pdma_init
);
655 module_exit(sf_pdma_exit
);
657 MODULE_LICENSE("GPL v2");
658 MODULE_DESCRIPTION("SiFive Platform DMA driver");
659 MODULE_AUTHOR("Green Wan <green.wan@sifive.com>");