1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SiFive FU540 Platform DMA driver
4 * Copyright (C) 2019 SiFive
7 * - drivers/dma/fsl-edma.c
8 * - drivers/dma/dw-edma/
9 * - drivers/dma/pxa-dma.c
11 * See the following sources for further documentation:
12 * - Chapter 12 "Platform DMA Engine (PDMA)" of
13 * SiFive FU540-C000 v1.0
14 * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
16 #include <linux/module.h>
17 #include <linux/device.h>
18 #include <linux/kernel.h>
19 #include <linux/platform_device.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/slab.h>
28 static inline unsigned long long readq(void __iomem
*addr
)
30 return readl(addr
) | (((unsigned long long)readl(addr
+ 4)) << 32LL);
35 static inline void writeq(unsigned long long v
, void __iomem
*addr
)
37 writel(lower_32_bits(v
), addr
);
38 writel(upper_32_bits(v
), addr
+ 4);
42 static inline struct sf_pdma_chan
*to_sf_pdma_chan(struct dma_chan
*dchan
)
44 return container_of(dchan
, struct sf_pdma_chan
, vchan
.chan
);
47 static inline struct sf_pdma_desc
*to_sf_pdma_desc(struct virt_dma_desc
*vd
)
49 return container_of(vd
, struct sf_pdma_desc
, vdesc
);
52 static struct sf_pdma_desc
*sf_pdma_alloc_desc(struct sf_pdma_chan
*chan
)
54 struct sf_pdma_desc
*desc
;
57 spin_lock_irqsave(&chan
->lock
, flags
);
59 if (chan
->desc
&& !chan
->desc
->in_use
) {
60 spin_unlock_irqrestore(&chan
->lock
, flags
);
64 spin_unlock_irqrestore(&chan
->lock
, flags
);
66 desc
= kzalloc(sizeof(*desc
), GFP_NOWAIT
);
75 static void sf_pdma_fill_desc(struct sf_pdma_desc
*desc
,
76 u64 dst
, u64 src
, u64 size
)
78 desc
->xfer_type
= PDMA_FULL_SPEED
;
79 desc
->xfer_size
= size
;
84 static void sf_pdma_disclaim_chan(struct sf_pdma_chan
*chan
)
86 struct pdma_regs
*regs
= &chan
->regs
;
88 writel(PDMA_CLEAR_CTRL
, regs
->ctrl
);
91 static struct dma_async_tx_descriptor
*
92 sf_pdma_prep_dma_memcpy(struct dma_chan
*dchan
, dma_addr_t dest
, dma_addr_t src
,
93 size_t len
, unsigned long flags
)
95 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
96 struct sf_pdma_desc
*desc
;
98 if (chan
&& (!len
|| !dest
|| !src
)) {
99 dev_err(chan
->pdma
->dma_dev
.dev
,
100 "Please check dma len, dest, src!\n");
104 desc
= sf_pdma_alloc_desc(chan
);
109 desc
->dirn
= DMA_MEM_TO_MEM
;
110 desc
->async_tx
= vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
112 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
114 sf_pdma_fill_desc(desc
, dest
, src
, len
);
115 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
117 return desc
->async_tx
;
120 static int sf_pdma_slave_config(struct dma_chan
*dchan
,
121 struct dma_slave_config
*cfg
)
123 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
125 memcpy(&chan
->cfg
, cfg
, sizeof(*cfg
));
130 static int sf_pdma_alloc_chan_resources(struct dma_chan
*dchan
)
132 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
133 struct pdma_regs
*regs
= &chan
->regs
;
135 dma_cookie_init(dchan
);
136 writel(PDMA_CLAIM_MASK
, regs
->ctrl
);
141 static void sf_pdma_disable_request(struct sf_pdma_chan
*chan
)
143 struct pdma_regs
*regs
= &chan
->regs
;
145 writel(readl(regs
->ctrl
) & ~PDMA_RUN_MASK
, regs
->ctrl
);
148 static void sf_pdma_free_chan_resources(struct dma_chan
*dchan
)
150 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
154 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
155 sf_pdma_disable_request(chan
);
158 vchan_get_all_descriptors(&chan
->vchan
, &head
);
159 sf_pdma_disclaim_chan(chan
);
160 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
161 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
164 static size_t sf_pdma_desc_residue(struct sf_pdma_chan
*chan
,
167 struct virt_dma_desc
*vd
= NULL
;
168 struct pdma_regs
*regs
= &chan
->regs
;
171 struct sf_pdma_desc
*desc
;
172 struct dma_async_tx_descriptor
*tx
;
174 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
176 tx
= &chan
->desc
->vdesc
.tx
;
177 if (cookie
== tx
->chan
->completed_cookie
)
180 if (cookie
== tx
->cookie
) {
181 residue
= readq(regs
->residue
);
183 vd
= vchan_find_desc(&chan
->vchan
, cookie
);
187 desc
= to_sf_pdma_desc(vd
);
188 residue
= desc
->xfer_size
;
192 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
196 static enum dma_status
197 sf_pdma_tx_status(struct dma_chan
*dchan
,
199 struct dma_tx_state
*txstate
)
201 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
202 enum dma_status status
;
204 status
= dma_cookie_status(dchan
, cookie
, txstate
);
206 if (txstate
&& status
!= DMA_ERROR
)
207 dma_set_residue(txstate
, sf_pdma_desc_residue(chan
, cookie
));
212 static int sf_pdma_terminate_all(struct dma_chan
*dchan
)
214 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
218 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
219 sf_pdma_disable_request(chan
);
222 chan
->xfer_err
= false;
223 vchan_get_all_descriptors(&chan
->vchan
, &head
);
224 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
225 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
230 static void sf_pdma_enable_request(struct sf_pdma_chan
*chan
)
232 struct pdma_regs
*regs
= &chan
->regs
;
235 v
= PDMA_CLAIM_MASK
|
236 PDMA_ENABLE_DONE_INT_MASK
|
237 PDMA_ENABLE_ERR_INT_MASK
|
240 writel(v
, regs
->ctrl
);
243 static void sf_pdma_xfer_desc(struct sf_pdma_chan
*chan
)
245 struct sf_pdma_desc
*desc
= chan
->desc
;
246 struct pdma_regs
*regs
= &chan
->regs
;
249 dev_err(chan
->pdma
->dma_dev
.dev
, "NULL desc.\n");
253 writel(desc
->xfer_type
, regs
->xfer_type
);
254 writeq(desc
->xfer_size
, regs
->xfer_size
);
255 writeq(desc
->dst_addr
, regs
->dst_addr
);
256 writeq(desc
->src_addr
, regs
->src_addr
);
259 chan
->status
= DMA_IN_PROGRESS
;
260 sf_pdma_enable_request(chan
);
263 static void sf_pdma_issue_pending(struct dma_chan
*dchan
)
265 struct sf_pdma_chan
*chan
= to_sf_pdma_chan(dchan
);
268 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
270 if (vchan_issue_pending(&chan
->vchan
) && chan
->desc
)
271 sf_pdma_xfer_desc(chan
);
273 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
276 static void sf_pdma_free_desc(struct virt_dma_desc
*vdesc
)
278 struct sf_pdma_desc
*desc
;
280 desc
= to_sf_pdma_desc(vdesc
);
281 desc
->in_use
= false;
284 static void sf_pdma_donebh_tasklet(struct tasklet_struct
*t
)
286 struct sf_pdma_chan
*chan
= from_tasklet(chan
, t
, done_tasklet
);
289 spin_lock_irqsave(&chan
->lock
, flags
);
290 if (chan
->xfer_err
) {
291 chan
->retries
= MAX_RETRY
;
292 chan
->status
= DMA_COMPLETE
;
293 chan
->xfer_err
= false;
295 spin_unlock_irqrestore(&chan
->lock
, flags
);
297 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
298 list_del(&chan
->desc
->vdesc
.node
);
299 vchan_cookie_complete(&chan
->desc
->vdesc
);
300 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
303 static void sf_pdma_errbh_tasklet(struct tasklet_struct
*t
)
305 struct sf_pdma_chan
*chan
= from_tasklet(chan
, t
, err_tasklet
);
306 struct sf_pdma_desc
*desc
= chan
->desc
;
309 spin_lock_irqsave(&chan
->lock
, flags
);
310 if (chan
->retries
<= 0) {
311 /* fail to recover */
312 spin_unlock_irqrestore(&chan
->lock
, flags
);
313 dmaengine_desc_get_callback_invoke(desc
->async_tx
, NULL
);
317 chan
->xfer_err
= true;
318 chan
->status
= DMA_ERROR
;
320 sf_pdma_enable_request(chan
);
321 spin_unlock_irqrestore(&chan
->lock
, flags
);
325 static irqreturn_t
sf_pdma_done_isr(int irq
, void *dev_id
)
327 struct sf_pdma_chan
*chan
= dev_id
;
328 struct pdma_regs
*regs
= &chan
->regs
;
331 spin_lock(&chan
->vchan
.lock
);
332 writel((readl(regs
->ctrl
)) & ~PDMA_DONE_STATUS_MASK
, regs
->ctrl
);
333 residue
= readq(regs
->residue
);
336 tasklet_hi_schedule(&chan
->done_tasklet
);
338 /* submit next trascatioin if possible */
339 struct sf_pdma_desc
*desc
= chan
->desc
;
341 desc
->src_addr
+= desc
->xfer_size
- residue
;
342 desc
->dst_addr
+= desc
->xfer_size
- residue
;
343 desc
->xfer_size
= residue
;
345 sf_pdma_xfer_desc(chan
);
348 spin_unlock(&chan
->vchan
.lock
);
353 static irqreturn_t
sf_pdma_err_isr(int irq
, void *dev_id
)
355 struct sf_pdma_chan
*chan
= dev_id
;
356 struct pdma_regs
*regs
= &chan
->regs
;
358 spin_lock(&chan
->lock
);
359 writel((readl(regs
->ctrl
)) & ~PDMA_ERR_STATUS_MASK
, regs
->ctrl
);
360 spin_unlock(&chan
->lock
);
362 tasklet_schedule(&chan
->err_tasklet
);
368 * sf_pdma_irq_init() - Init PDMA IRQ Handlers
369 * @pdev: pointer of platform_device
370 * @pdma: pointer of PDMA engine. Caller should check NULL
372 * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should
373 * make sure the pointer passed in are non-NULL. This function should be called
374 * only one time during the device probe.
376 * Context: Any context.
379 * * 0 - OK to init all IRQ handlers
380 * * -EINVAL - Fail to request IRQ
382 static int sf_pdma_irq_init(struct platform_device
*pdev
, struct sf_pdma
*pdma
)
385 struct sf_pdma_chan
*chan
;
387 for (i
= 0; i
< pdma
->n_chans
; i
++) {
388 chan
= &pdma
->chans
[i
];
390 irq
= platform_get_irq(pdev
, i
* 2);
392 dev_err(&pdev
->dev
, "ch(%d) Can't get done irq.\n", i
);
396 r
= devm_request_irq(&pdev
->dev
, irq
, sf_pdma_done_isr
, 0,
397 dev_name(&pdev
->dev
), (void *)chan
);
399 dev_err(&pdev
->dev
, "Fail to attach done ISR: %d\n", r
);
405 irq
= platform_get_irq(pdev
, (i
* 2) + 1);
407 dev_err(&pdev
->dev
, "ch(%d) Can't get err irq.\n", i
);
411 r
= devm_request_irq(&pdev
->dev
, irq
, sf_pdma_err_isr
, 0,
412 dev_name(&pdev
->dev
), (void *)chan
);
414 dev_err(&pdev
->dev
, "Fail to attach err ISR: %d\n", r
);
425 * sf_pdma_setup_chans() - Init settings of each channel
426 * @pdma: pointer of PDMA engine. Caller should check NULL
428 * Initialize all data structure and register base. Caller should make sure
429 * the pointer passed in are non-NULL. This function should be called only
430 * one time during the device probe.
432 * Context: Any context.
436 static void sf_pdma_setup_chans(struct sf_pdma
*pdma
)
439 struct sf_pdma_chan
*chan
;
441 INIT_LIST_HEAD(&pdma
->dma_dev
.channels
);
443 for (i
= 0; i
< pdma
->n_chans
; i
++) {
444 chan
= &pdma
->chans
[i
];
447 SF_PDMA_REG_BASE(i
) + PDMA_CTRL
;
448 chan
->regs
.xfer_type
=
449 SF_PDMA_REG_BASE(i
) + PDMA_XFER_TYPE
;
450 chan
->regs
.xfer_size
=
451 SF_PDMA_REG_BASE(i
) + PDMA_XFER_SIZE
;
452 chan
->regs
.dst_addr
=
453 SF_PDMA_REG_BASE(i
) + PDMA_DST_ADDR
;
454 chan
->regs
.src_addr
=
455 SF_PDMA_REG_BASE(i
) + PDMA_SRC_ADDR
;
456 chan
->regs
.act_type
=
457 SF_PDMA_REG_BASE(i
) + PDMA_ACT_TYPE
;
459 SF_PDMA_REG_BASE(i
) + PDMA_REMAINING_BYTE
;
460 chan
->regs
.cur_dst_addr
=
461 SF_PDMA_REG_BASE(i
) + PDMA_CUR_DST_ADDR
;
462 chan
->regs
.cur_src_addr
=
463 SF_PDMA_REG_BASE(i
) + PDMA_CUR_SRC_ADDR
;
466 chan
->pm_state
= RUNNING
;
468 chan
->xfer_err
= false;
469 spin_lock_init(&chan
->lock
);
471 chan
->vchan
.desc_free
= sf_pdma_free_desc
;
472 vchan_init(&chan
->vchan
, &pdma
->dma_dev
);
474 writel(PDMA_CLEAR_CTRL
, chan
->regs
.ctrl
);
476 tasklet_setup(&chan
->done_tasklet
, sf_pdma_donebh_tasklet
);
477 tasklet_setup(&chan
->err_tasklet
, sf_pdma_errbh_tasklet
);
481 static int sf_pdma_probe(struct platform_device
*pdev
)
483 struct sf_pdma
*pdma
;
484 struct sf_pdma_chan
*chan
;
485 struct resource
*res
;
488 const enum dma_slave_buswidth widths
=
489 DMA_SLAVE_BUSWIDTH_1_BYTE
| DMA_SLAVE_BUSWIDTH_2_BYTES
|
490 DMA_SLAVE_BUSWIDTH_4_BYTES
| DMA_SLAVE_BUSWIDTH_8_BYTES
|
491 DMA_SLAVE_BUSWIDTH_16_BYTES
| DMA_SLAVE_BUSWIDTH_32_BYTES
|
492 DMA_SLAVE_BUSWIDTH_64_BYTES
;
495 len
= sizeof(*pdma
) + sizeof(*chan
) * chans
;
496 pdma
= devm_kzalloc(&pdev
->dev
, len
, GFP_KERNEL
);
500 pdma
->n_chans
= chans
;
502 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
503 pdma
->membase
= devm_ioremap_resource(&pdev
->dev
, res
);
504 if (IS_ERR(pdma
->membase
))
505 return PTR_ERR(pdma
->membase
);
507 ret
= sf_pdma_irq_init(pdev
, pdma
);
511 sf_pdma_setup_chans(pdma
);
513 pdma
->dma_dev
.dev
= &pdev
->dev
;
515 /* Setup capability */
516 dma_cap_set(DMA_MEMCPY
, pdma
->dma_dev
.cap_mask
);
517 pdma
->dma_dev
.copy_align
= 2;
518 pdma
->dma_dev
.src_addr_widths
= widths
;
519 pdma
->dma_dev
.dst_addr_widths
= widths
;
520 pdma
->dma_dev
.directions
= BIT(DMA_MEM_TO_MEM
);
521 pdma
->dma_dev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
522 pdma
->dma_dev
.descriptor_reuse
= true;
525 pdma
->dma_dev
.device_alloc_chan_resources
=
526 sf_pdma_alloc_chan_resources
;
527 pdma
->dma_dev
.device_free_chan_resources
=
528 sf_pdma_free_chan_resources
;
529 pdma
->dma_dev
.device_tx_status
= sf_pdma_tx_status
;
530 pdma
->dma_dev
.device_prep_dma_memcpy
= sf_pdma_prep_dma_memcpy
;
531 pdma
->dma_dev
.device_config
= sf_pdma_slave_config
;
532 pdma
->dma_dev
.device_terminate_all
= sf_pdma_terminate_all
;
533 pdma
->dma_dev
.device_issue_pending
= sf_pdma_issue_pending
;
535 platform_set_drvdata(pdev
, pdma
);
537 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
540 "Failed to set DMA mask. Fall back to default.\n");
542 ret
= dma_async_device_register(&pdma
->dma_dev
);
545 "Can't register SiFive Platform DMA. (%d)\n", ret
);
552 static int sf_pdma_remove(struct platform_device
*pdev
)
554 struct sf_pdma
*pdma
= platform_get_drvdata(pdev
);
555 struct sf_pdma_chan
*ch
;
558 for (i
= 0; i
< PDMA_NR_CH
; i
++) {
559 ch
= &pdma
->chans
[i
];
561 devm_free_irq(&pdev
->dev
, ch
->txirq
, ch
);
562 devm_free_irq(&pdev
->dev
, ch
->errirq
, ch
);
563 list_del(&ch
->vchan
.chan
.device_node
);
564 tasklet_kill(&ch
->vchan
.task
);
565 tasklet_kill(&ch
->done_tasklet
);
566 tasklet_kill(&ch
->err_tasklet
);
569 dma_async_device_unregister(&pdma
->dma_dev
);
574 static const struct of_device_id sf_pdma_dt_ids
[] = {
575 { .compatible
= "sifive,fu540-c000-pdma" },
578 MODULE_DEVICE_TABLE(of
, sf_pdma_dt_ids
);
580 static struct platform_driver sf_pdma_driver
= {
581 .probe
= sf_pdma_probe
,
582 .remove
= sf_pdma_remove
,
585 .of_match_table
= sf_pdma_dt_ids
,
589 static int __init
sf_pdma_init(void)
591 return platform_driver_register(&sf_pdma_driver
);
594 static void __exit
sf_pdma_exit(void)
596 platform_driver_unregister(&sf_pdma_driver
);
600 subsys_initcall(sf_pdma_init
);
601 module_exit(sf_pdma_exit
);
603 MODULE_LICENSE("GPL v2");
604 MODULE_DESCRIPTION("SiFive Platform DMA driver");
605 MODULE_AUTHOR("Green Wan <green.wan@sifive.com>");