2 * Core driver for the Intel integrated DMA 64-bit
4 * Copyright (C) 2015 Intel Corporation
5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/bitops.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmapool.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
24 /* Platform driver name */
25 #define DRV_NAME "idma64"
27 /* For now we support only two channels */
28 #define IDMA64_NR_CHAN 2
30 /* ---------------------------------------------------------------------- */
32 static struct device
*chan2dev(struct dma_chan
*chan
)
34 return &chan
->dev
->device
;
37 /* ---------------------------------------------------------------------- */
39 static void idma64_off(struct idma64
*idma64
)
41 unsigned short count
= 100;
43 dma_writel(idma64
, CFG
, 0);
45 channel_clear_bit(idma64
, MASK(XFER
), idma64
->all_chan_mask
);
46 channel_clear_bit(idma64
, MASK(BLOCK
), idma64
->all_chan_mask
);
47 channel_clear_bit(idma64
, MASK(SRC_TRAN
), idma64
->all_chan_mask
);
48 channel_clear_bit(idma64
, MASK(DST_TRAN
), idma64
->all_chan_mask
);
49 channel_clear_bit(idma64
, MASK(ERROR
), idma64
->all_chan_mask
);
53 } while (dma_readl(idma64
, CFG
) & IDMA64_CFG_DMA_EN
&& --count
);
56 static void idma64_on(struct idma64
*idma64
)
58 dma_writel(idma64
, CFG
, IDMA64_CFG_DMA_EN
);
61 /* ---------------------------------------------------------------------- */
63 static void idma64_chan_init(struct idma64
*idma64
, struct idma64_chan
*idma64c
)
65 u32 cfghi
= IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0);
68 /* Set default burst alignment */
69 cfglo
|= IDMA64C_CFGL_DST_BURST_ALIGN
| IDMA64C_CFGL_SRC_BURST_ALIGN
;
71 channel_writel(idma64c
, CFG_LO
, cfglo
);
72 channel_writel(idma64c
, CFG_HI
, cfghi
);
74 /* Enable interrupts */
75 channel_set_bit(idma64
, MASK(XFER
), idma64c
->mask
);
76 channel_set_bit(idma64
, MASK(ERROR
), idma64c
->mask
);
79 * Enforce the controller to be turned on.
81 * The iDMA is turned off in ->probe() and looses context during system
82 * suspend / resume cycle. That's why we have to enable it each time we
88 static void idma64_chan_stop(struct idma64
*idma64
, struct idma64_chan
*idma64c
)
90 channel_clear_bit(idma64
, CH_EN
, idma64c
->mask
);
93 static void idma64_chan_start(struct idma64
*idma64
, struct idma64_chan
*idma64c
)
95 struct idma64_desc
*desc
= idma64c
->desc
;
96 struct idma64_hw_desc
*hw
= &desc
->hw
[0];
98 channel_writeq(idma64c
, SAR
, 0);
99 channel_writeq(idma64c
, DAR
, 0);
101 channel_writel(idma64c
, CTL_HI
, IDMA64C_CTLH_BLOCK_TS(~0UL));
102 channel_writel(idma64c
, CTL_LO
, IDMA64C_CTLL_LLP_S_EN
| IDMA64C_CTLL_LLP_D_EN
);
104 channel_writeq(idma64c
, LLP
, hw
->llp
);
106 channel_set_bit(idma64
, CH_EN
, idma64c
->mask
);
109 static void idma64_stop_transfer(struct idma64_chan
*idma64c
)
111 struct idma64
*idma64
= to_idma64(idma64c
->vchan
.chan
.device
);
113 idma64_chan_stop(idma64
, idma64c
);
116 static void idma64_start_transfer(struct idma64_chan
*idma64c
)
118 struct idma64
*idma64
= to_idma64(idma64c
->vchan
.chan
.device
);
119 struct virt_dma_desc
*vdesc
;
121 /* Get the next descriptor */
122 vdesc
= vchan_next_desc(&idma64c
->vchan
);
124 idma64c
->desc
= NULL
;
128 list_del(&vdesc
->node
);
129 idma64c
->desc
= to_idma64_desc(vdesc
);
131 /* Configure the channel */
132 idma64_chan_init(idma64
, idma64c
);
134 /* Start the channel with a new descriptor */
135 idma64_chan_start(idma64
, idma64c
);
138 /* ---------------------------------------------------------------------- */
140 static void idma64_chan_irq(struct idma64
*idma64
, unsigned short c
,
141 u32 status_err
, u32 status_xfer
)
143 struct idma64_chan
*idma64c
= &idma64
->chan
[c
];
144 struct idma64_desc
*desc
;
147 spin_lock_irqsave(&idma64c
->vchan
.lock
, flags
);
148 desc
= idma64c
->desc
;
150 if (status_err
& (1 << c
)) {
151 dma_writel(idma64
, CLEAR(ERROR
), idma64c
->mask
);
152 desc
->status
= DMA_ERROR
;
153 } else if (status_xfer
& (1 << c
)) {
154 dma_writel(idma64
, CLEAR(XFER
), idma64c
->mask
);
155 desc
->status
= DMA_COMPLETE
;
156 vchan_cookie_complete(&desc
->vdesc
);
157 idma64_start_transfer(idma64c
);
160 /* idma64_start_transfer() updates idma64c->desc */
161 if (idma64c
->desc
== NULL
|| desc
->status
== DMA_ERROR
)
162 idma64_stop_transfer(idma64c
);
164 spin_unlock_irqrestore(&idma64c
->vchan
.lock
, flags
);
167 static irqreturn_t
idma64_irq(int irq
, void *dev
)
169 struct idma64
*idma64
= dev
;
170 u32 status
= dma_readl(idma64
, STATUS_INT
);
175 dev_vdbg(idma64
->dma
.dev
, "%s: status=%#x\n", __func__
, status
);
177 /* Check if we have any interrupt from the DMA controller */
181 status_xfer
= dma_readl(idma64
, RAW(XFER
));
182 status_err
= dma_readl(idma64
, RAW(ERROR
));
184 for (i
= 0; i
< idma64
->dma
.chancnt
; i
++)
185 idma64_chan_irq(idma64
, i
, status_err
, status_xfer
);
190 /* ---------------------------------------------------------------------- */
192 static struct idma64_desc
*idma64_alloc_desc(unsigned int ndesc
)
194 struct idma64_desc
*desc
;
196 desc
= kzalloc(sizeof(*desc
), GFP_NOWAIT
);
200 desc
->hw
= kcalloc(ndesc
, sizeof(*desc
->hw
), GFP_NOWAIT
);
209 static void idma64_desc_free(struct idma64_chan
*idma64c
,
210 struct idma64_desc
*desc
)
212 struct idma64_hw_desc
*hw
;
215 unsigned int i
= desc
->ndesc
;
219 dma_pool_free(idma64c
->pool
, hw
->lli
, hw
->llp
);
227 static void idma64_vdesc_free(struct virt_dma_desc
*vdesc
)
229 struct idma64_chan
*idma64c
= to_idma64_chan(vdesc
->tx
.chan
);
231 idma64_desc_free(idma64c
, to_idma64_desc(vdesc
));
234 static void idma64_hw_desc_fill(struct idma64_hw_desc
*hw
,
235 struct dma_slave_config
*config
,
236 enum dma_transfer_direction direction
, u64 llp
)
238 struct idma64_lli
*lli
= hw
->lli
;
240 u32 ctlhi
= IDMA64C_CTLH_BLOCK_TS(hw
->len
);
241 u32 ctllo
= IDMA64C_CTLL_LLP_S_EN
| IDMA64C_CTLL_LLP_D_EN
;
242 u32 src_width
, dst_width
;
244 if (direction
== DMA_MEM_TO_DEV
) {
246 dar
= config
->dst_addr
;
247 ctllo
|= IDMA64C_CTLL_DST_FIX
| IDMA64C_CTLL_SRC_INC
|
249 src_width
= __ffs(sar
| hw
->len
| 4);
250 dst_width
= __ffs(config
->dst_addr_width
);
251 } else { /* DMA_DEV_TO_MEM */
252 sar
= config
->src_addr
;
254 ctllo
|= IDMA64C_CTLL_DST_INC
| IDMA64C_CTLL_SRC_FIX
|
256 src_width
= __ffs(config
->src_addr_width
);
257 dst_width
= __ffs(dar
| hw
->len
| 4);
265 IDMA64C_CTLL_SRC_MSIZE(config
->src_maxburst
) |
266 IDMA64C_CTLL_DST_MSIZE(config
->dst_maxburst
) |
267 IDMA64C_CTLL_DST_WIDTH(dst_width
) |
268 IDMA64C_CTLL_SRC_WIDTH(src_width
);
273 static void idma64_desc_fill(struct idma64_chan
*idma64c
,
274 struct idma64_desc
*desc
)
276 struct dma_slave_config
*config
= &idma64c
->config
;
277 unsigned int i
= desc
->ndesc
;
278 struct idma64_hw_desc
*hw
= &desc
->hw
[i
- 1];
279 struct idma64_lli
*lli
= hw
->lli
;
282 /* Fill the hardware descriptors and link them to a list */
285 idma64_hw_desc_fill(hw
, config
, desc
->direction
, llp
);
287 desc
->length
+= hw
->len
;
290 /* Trigger an interrupt after the last block is transfered */
291 lli
->ctllo
|= IDMA64C_CTLL_INT_EN
;
293 /* Disable LLP transfer in the last block */
294 lli
->ctllo
&= ~(IDMA64C_CTLL_LLP_S_EN
| IDMA64C_CTLL_LLP_D_EN
);
297 static struct dma_async_tx_descriptor
*idma64_prep_slave_sg(
298 struct dma_chan
*chan
, struct scatterlist
*sgl
,
299 unsigned int sg_len
, enum dma_transfer_direction direction
,
300 unsigned long flags
, void *context
)
302 struct idma64_chan
*idma64c
= to_idma64_chan(chan
);
303 struct idma64_desc
*desc
;
304 struct scatterlist
*sg
;
307 desc
= idma64_alloc_desc(sg_len
);
311 for_each_sg(sgl
, sg
, sg_len
, i
) {
312 struct idma64_hw_desc
*hw
= &desc
->hw
[i
];
314 /* Allocate DMA capable memory for hardware descriptor */
315 hw
->lli
= dma_pool_alloc(idma64c
->pool
, GFP_NOWAIT
, &hw
->llp
);
318 idma64_desc_free(idma64c
, desc
);
322 hw
->phys
= sg_dma_address(sg
);
323 hw
->len
= sg_dma_len(sg
);
326 desc
->ndesc
= sg_len
;
327 desc
->direction
= direction
;
328 desc
->status
= DMA_IN_PROGRESS
;
330 idma64_desc_fill(idma64c
, desc
);
331 return vchan_tx_prep(&idma64c
->vchan
, &desc
->vdesc
, flags
);
334 static void idma64_issue_pending(struct dma_chan
*chan
)
336 struct idma64_chan
*idma64c
= to_idma64_chan(chan
);
339 spin_lock_irqsave(&idma64c
->vchan
.lock
, flags
);
340 if (vchan_issue_pending(&idma64c
->vchan
) && !idma64c
->desc
)
341 idma64_start_transfer(idma64c
);
342 spin_unlock_irqrestore(&idma64c
->vchan
.lock
, flags
);
345 static size_t idma64_active_desc_size(struct idma64_chan
*idma64c
)
347 struct idma64_desc
*desc
= idma64c
->desc
;
348 struct idma64_hw_desc
*hw
;
349 size_t bytes
= desc
->length
;
350 u64 llp
= channel_readq(idma64c
, LLP
);
351 u32 ctlhi
= channel_readl(idma64c
, CTL_HI
);
359 } while (++i
< desc
->ndesc
);
364 /* The current chunk is not fully transfered yet */
365 bytes
+= desc
->hw
[--i
].len
;
367 return bytes
- IDMA64C_CTLH_BLOCK_TS(ctlhi
);
370 static enum dma_status
idma64_tx_status(struct dma_chan
*chan
,
371 dma_cookie_t cookie
, struct dma_tx_state
*state
)
373 struct idma64_chan
*idma64c
= to_idma64_chan(chan
);
374 struct virt_dma_desc
*vdesc
;
375 enum dma_status status
;
379 status
= dma_cookie_status(chan
, cookie
, state
);
380 if (status
== DMA_COMPLETE
)
383 spin_lock_irqsave(&idma64c
->vchan
.lock
, flags
);
384 vdesc
= vchan_find_desc(&idma64c
->vchan
, cookie
);
385 if (idma64c
->desc
&& cookie
== idma64c
->desc
->vdesc
.tx
.cookie
) {
386 bytes
= idma64_active_desc_size(idma64c
);
387 dma_set_residue(state
, bytes
);
388 status
= idma64c
->desc
->status
;
390 bytes
= to_idma64_desc(vdesc
)->length
;
391 dma_set_residue(state
, bytes
);
393 spin_unlock_irqrestore(&idma64c
->vchan
.lock
, flags
);
398 static void convert_burst(u32
*maxburst
)
401 *maxburst
= __fls(*maxburst
);
406 static int idma64_slave_config(struct dma_chan
*chan
,
407 struct dma_slave_config
*config
)
409 struct idma64_chan
*idma64c
= to_idma64_chan(chan
);
411 /* Check if chan will be configured for slave transfers */
412 if (!is_slave_direction(config
->direction
))
415 memcpy(&idma64c
->config
, config
, sizeof(idma64c
->config
));
417 convert_burst(&idma64c
->config
.src_maxburst
);
418 convert_burst(&idma64c
->config
.dst_maxburst
);
423 static void idma64_chan_deactivate(struct idma64_chan
*idma64c
, bool drain
)
425 unsigned short count
= 100;
428 cfglo
= channel_readl(idma64c
, CFG_LO
);
430 cfglo
|= IDMA64C_CFGL_CH_DRAIN
;
432 cfglo
&= ~IDMA64C_CFGL_CH_DRAIN
;
434 channel_writel(idma64c
, CFG_LO
, cfglo
| IDMA64C_CFGL_CH_SUSP
);
437 cfglo
= channel_readl(idma64c
, CFG_LO
);
438 } while (!(cfglo
& IDMA64C_CFGL_FIFO_EMPTY
) && --count
);
441 static void idma64_chan_activate(struct idma64_chan
*idma64c
)
445 cfglo
= channel_readl(idma64c
, CFG_LO
);
446 channel_writel(idma64c
, CFG_LO
, cfglo
& ~IDMA64C_CFGL_CH_SUSP
);
449 static int idma64_pause(struct dma_chan
*chan
)
451 struct idma64_chan
*idma64c
= to_idma64_chan(chan
);
454 spin_lock_irqsave(&idma64c
->vchan
.lock
, flags
);
455 if (idma64c
->desc
&& idma64c
->desc
->status
== DMA_IN_PROGRESS
) {
456 idma64_chan_deactivate(idma64c
, false);
457 idma64c
->desc
->status
= DMA_PAUSED
;
459 spin_unlock_irqrestore(&idma64c
->vchan
.lock
, flags
);
464 static int idma64_resume(struct dma_chan
*chan
)
466 struct idma64_chan
*idma64c
= to_idma64_chan(chan
);
469 spin_lock_irqsave(&idma64c
->vchan
.lock
, flags
);
470 if (idma64c
->desc
&& idma64c
->desc
->status
== DMA_PAUSED
) {
471 idma64c
->desc
->status
= DMA_IN_PROGRESS
;
472 idma64_chan_activate(idma64c
);
474 spin_unlock_irqrestore(&idma64c
->vchan
.lock
, flags
);
479 static int idma64_terminate_all(struct dma_chan
*chan
)
481 struct idma64_chan
*idma64c
= to_idma64_chan(chan
);
485 spin_lock_irqsave(&idma64c
->vchan
.lock
, flags
);
486 idma64_chan_deactivate(idma64c
, true);
487 idma64_stop_transfer(idma64c
);
489 idma64_vdesc_free(&idma64c
->desc
->vdesc
);
490 idma64c
->desc
= NULL
;
492 vchan_get_all_descriptors(&idma64c
->vchan
, &head
);
493 spin_unlock_irqrestore(&idma64c
->vchan
.lock
, flags
);
495 vchan_dma_desc_free_list(&idma64c
->vchan
, &head
);
499 static void idma64_synchronize(struct dma_chan
*chan
)
501 struct idma64_chan
*idma64c
= to_idma64_chan(chan
);
503 vchan_synchronize(&idma64c
->vchan
);
506 static int idma64_alloc_chan_resources(struct dma_chan
*chan
)
508 struct idma64_chan
*idma64c
= to_idma64_chan(chan
);
510 /* Create a pool of consistent memory blocks for hardware descriptors */
511 idma64c
->pool
= dma_pool_create(dev_name(chan2dev(chan
)),
513 sizeof(struct idma64_lli
), 8, 0);
514 if (!idma64c
->pool
) {
515 dev_err(chan2dev(chan
), "No memory for descriptors\n");
522 static void idma64_free_chan_resources(struct dma_chan
*chan
)
524 struct idma64_chan
*idma64c
= to_idma64_chan(chan
);
526 vchan_free_chan_resources(to_virt_chan(chan
));
527 dma_pool_destroy(idma64c
->pool
);
528 idma64c
->pool
= NULL
;
531 /* ---------------------------------------------------------------------- */
533 #define IDMA64_BUSWIDTHS \
534 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
535 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
536 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
538 static int idma64_probe(struct idma64_chip
*chip
)
540 struct idma64
*idma64
;
541 unsigned short nr_chan
= IDMA64_NR_CHAN
;
545 idma64
= devm_kzalloc(chip
->dev
, sizeof(*idma64
), GFP_KERNEL
);
549 idma64
->regs
= chip
->regs
;
550 chip
->idma64
= idma64
;
552 idma64
->chan
= devm_kcalloc(chip
->dev
, nr_chan
, sizeof(*idma64
->chan
),
557 idma64
->all_chan_mask
= (1 << nr_chan
) - 1;
559 /* Turn off iDMA controller */
562 ret
= devm_request_irq(chip
->dev
, chip
->irq
, idma64_irq
, IRQF_SHARED
,
563 dev_name(chip
->dev
), idma64
);
567 INIT_LIST_HEAD(&idma64
->dma
.channels
);
568 for (i
= 0; i
< nr_chan
; i
++) {
569 struct idma64_chan
*idma64c
= &idma64
->chan
[i
];
571 idma64c
->vchan
.desc_free
= idma64_vdesc_free
;
572 vchan_init(&idma64c
->vchan
, &idma64
->dma
);
574 idma64c
->regs
= idma64
->regs
+ i
* IDMA64_CH_LENGTH
;
575 idma64c
->mask
= BIT(i
);
578 dma_cap_set(DMA_SLAVE
, idma64
->dma
.cap_mask
);
579 dma_cap_set(DMA_PRIVATE
, idma64
->dma
.cap_mask
);
581 idma64
->dma
.device_alloc_chan_resources
= idma64_alloc_chan_resources
;
582 idma64
->dma
.device_free_chan_resources
= idma64_free_chan_resources
;
584 idma64
->dma
.device_prep_slave_sg
= idma64_prep_slave_sg
;
586 idma64
->dma
.device_issue_pending
= idma64_issue_pending
;
587 idma64
->dma
.device_tx_status
= idma64_tx_status
;
589 idma64
->dma
.device_config
= idma64_slave_config
;
590 idma64
->dma
.device_pause
= idma64_pause
;
591 idma64
->dma
.device_resume
= idma64_resume
;
592 idma64
->dma
.device_terminate_all
= idma64_terminate_all
;
593 idma64
->dma
.device_synchronize
= idma64_synchronize
;
595 idma64
->dma
.src_addr_widths
= IDMA64_BUSWIDTHS
;
596 idma64
->dma
.dst_addr_widths
= IDMA64_BUSWIDTHS
;
597 idma64
->dma
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
598 idma64
->dma
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
600 idma64
->dma
.dev
= chip
->sysdev
;
602 dma_set_max_seg_size(idma64
->dma
.dev
, IDMA64C_CTLH_BLOCK_TS_MASK
);
604 ret
= dma_async_device_register(&idma64
->dma
);
608 dev_info(chip
->dev
, "Found Intel integrated DMA 64-bit\n");
612 static int idma64_remove(struct idma64_chip
*chip
)
614 struct idma64
*idma64
= chip
->idma64
;
617 dma_async_device_unregister(&idma64
->dma
);
620 * Explicitly call devm_request_irq() to avoid the side effects with
621 * the scheduled tasklets.
623 devm_free_irq(chip
->dev
, chip
->irq
, idma64
);
625 for (i
= 0; i
< idma64
->dma
.chancnt
; i
++) {
626 struct idma64_chan
*idma64c
= &idma64
->chan
[i
];
628 tasklet_kill(&idma64c
->vchan
.task
);
634 /* ---------------------------------------------------------------------- */
636 static int idma64_platform_probe(struct platform_device
*pdev
)
638 struct idma64_chip
*chip
;
639 struct device
*dev
= &pdev
->dev
;
640 struct device
*sysdev
= dev
->parent
;
641 struct resource
*mem
;
644 chip
= devm_kzalloc(dev
, sizeof(*chip
), GFP_KERNEL
);
648 chip
->irq
= platform_get_irq(pdev
, 0);
652 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
653 chip
->regs
= devm_ioremap_resource(dev
, mem
);
654 if (IS_ERR(chip
->regs
))
655 return PTR_ERR(chip
->regs
);
657 ret
= dma_coerce_mask_and_coherent(sysdev
, DMA_BIT_MASK(64));
662 chip
->sysdev
= sysdev
;
664 ret
= idma64_probe(chip
);
668 platform_set_drvdata(pdev
, chip
);
672 static int idma64_platform_remove(struct platform_device
*pdev
)
674 struct idma64_chip
*chip
= platform_get_drvdata(pdev
);
676 return idma64_remove(chip
);
679 #ifdef CONFIG_PM_SLEEP
681 static int idma64_pm_suspend(struct device
*dev
)
683 struct idma64_chip
*chip
= dev_get_drvdata(dev
);
685 idma64_off(chip
->idma64
);
689 static int idma64_pm_resume(struct device
*dev
)
691 struct idma64_chip
*chip
= dev_get_drvdata(dev
);
693 idma64_on(chip
->idma64
);
697 #endif /* CONFIG_PM_SLEEP */
699 static const struct dev_pm_ops idma64_dev_pm_ops
= {
700 SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend
, idma64_pm_resume
)
703 static struct platform_driver idma64_platform_driver
= {
704 .probe
= idma64_platform_probe
,
705 .remove
= idma64_platform_remove
,
708 .pm
= &idma64_dev_pm_ops
,
712 module_platform_driver(idma64_platform_driver
);
714 MODULE_LICENSE("GPL v2");
715 MODULE_DESCRIPTION("iDMA64 core driver");
716 MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
717 MODULE_ALIAS("platform:" DRV_NAME
);