1 // SPDX-License-Identifier: GPL-2.0+
3 // drivers/dma/imx-dma.c
5 // This file contains a driver for the Freescale i.MX DMA engine
6 // found on i.MX1/21/27
8 // Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
9 // Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/types.h>
15 #include <linux/interrupt.h>
16 #include <linux/spinlock.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/slab.h>
20 #include <linux/platform_device.h>
21 #include <linux/clk.h>
22 #include <linux/dmaengine.h>
23 #include <linux/module.h>
25 #include <linux/of_dma.h>
28 #include <linux/dma/imx-dma.h>
30 #include "dmaengine.h"
31 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
32 #define IMX_DMA_CHANNELS 16
34 #define IMX_DMA_2D_SLOTS 2
35 #define IMX_DMA_2D_SLOT_A 0
36 #define IMX_DMA_2D_SLOT_B 1
38 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
39 #define IMX_DMA_MEMSIZE_32 (0 << 4)
40 #define IMX_DMA_MEMSIZE_8 (1 << 4)
41 #define IMX_DMA_MEMSIZE_16 (2 << 4)
42 #define IMX_DMA_TYPE_LINEAR (0 << 10)
43 #define IMX_DMA_TYPE_2D (1 << 10)
44 #define IMX_DMA_TYPE_FIFO (2 << 10)
46 #define IMX_DMA_ERR_BURST (1 << 0)
47 #define IMX_DMA_ERR_REQUEST (1 << 1)
48 #define IMX_DMA_ERR_TRANSFER (1 << 2)
49 #define IMX_DMA_ERR_BUFFER (1 << 3)
50 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
52 #define DMA_DCR 0x00 /* Control Register */
53 #define DMA_DISR 0x04 /* Interrupt status Register */
54 #define DMA_DIMR 0x08 /* Interrupt mask Register */
55 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
56 #define DMA_DRTOSR 0x10 /* Request timeout Register */
57 #define DMA_DSESR 0x14 /* Transfer Error Status Register */
58 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
59 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
60 #define DMA_WSRA 0x40 /* W-Size Register A */
61 #define DMA_XSRA 0x44 /* X-Size Register A */
62 #define DMA_YSRA 0x48 /* Y-Size Register A */
63 #define DMA_WSRB 0x4c /* W-Size Register B */
64 #define DMA_XSRB 0x50 /* X-Size Register B */
65 #define DMA_YSRB 0x54 /* Y-Size Register B */
66 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
67 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
68 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
69 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
70 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
71 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
72 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
73 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
74 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
76 #define DCR_DRST (1<<1)
77 #define DCR_DEN (1<<0)
78 #define DBTOCR_EN (1<<15)
79 #define DBTOCR_CNT(x) ((x) & 0x7fff)
80 #define CNTR_CNT(x) ((x) & 0xffffff)
81 #define CCR_ACRPT (1<<14)
82 #define CCR_DMOD_LINEAR (0x0 << 12)
83 #define CCR_DMOD_2D (0x1 << 12)
84 #define CCR_DMOD_FIFO (0x2 << 12)
85 #define CCR_DMOD_EOBFIFO (0x3 << 12)
86 #define CCR_SMOD_LINEAR (0x0 << 10)
87 #define CCR_SMOD_2D (0x1 << 10)
88 #define CCR_SMOD_FIFO (0x2 << 10)
89 #define CCR_SMOD_EOBFIFO (0x3 << 10)
90 #define CCR_MDIR_DEC (1<<9)
91 #define CCR_MSEL_B (1<<8)
92 #define CCR_DSIZ_32 (0x0 << 6)
93 #define CCR_DSIZ_8 (0x1 << 6)
94 #define CCR_DSIZ_16 (0x2 << 6)
95 #define CCR_SSIZ_32 (0x0 << 4)
96 #define CCR_SSIZ_8 (0x1 << 4)
97 #define CCR_SSIZ_16 (0x2 << 4)
98 #define CCR_REN (1<<3)
99 #define CCR_RPT (1<<2)
100 #define CCR_FRC (1<<1)
101 #define CCR_CEN (1<<0)
102 #define RTOR_EN (1<<15)
103 #define RTOR_CLK (1<<14)
104 #define RTOR_PSC (1<<13)
106 enum imxdma_prep_type
{
108 IMXDMA_DESC_INTERLEAVED
,
109 IMXDMA_DESC_SLAVE_SG
,
113 struct imx_dma_2d_config
{
121 struct list_head node
;
122 struct dma_async_tx_descriptor desc
;
123 enum dma_status status
;
127 enum dma_transfer_direction direction
;
128 enum imxdma_prep_type type
;
129 /* For memcpy and interleaved */
130 unsigned int config_port
;
131 unsigned int config_mem
;
132 /* For interleaved transfers */
136 /* For slave sg and cyclic */
137 struct scatterlist
*sg
;
138 unsigned int sgcount
;
141 struct imxdma_channel
{
143 struct timer_list watchdog
;
144 struct imxdma_engine
*imxdma
;
145 unsigned int channel
;
147 struct tasklet_struct dma_tasklet
;
148 struct list_head ld_free
;
149 struct list_head ld_queue
;
150 struct list_head ld_active
;
152 enum dma_slave_buswidth word_size
;
153 dma_addr_t per_address
;
155 struct dma_chan chan
;
156 struct dma_async_tx_descriptor desc
;
157 enum dma_status status
;
159 struct scatterlist
*sg_list
;
165 struct dma_slave_config config
;
173 struct imxdma_engine
{
175 struct dma_device dma_device
;
180 struct imx_dma_2d_config slots_2d
[IMX_DMA_2D_SLOTS
];
181 struct imxdma_channel channel
[IMX_DMA_CHANNELS
];
182 enum imx_dma_type devtype
;
184 unsigned int irq_err
;
188 struct imxdma_filter_data
{
189 struct imxdma_engine
*imxdma
;
193 static const struct of_device_id imx_dma_of_dev_id
[] = {
195 .compatible
= "fsl,imx1-dma", .data
= (const void *)IMX1_DMA
,
197 .compatible
= "fsl,imx27-dma", .data
= (const void *)IMX27_DMA
,
202 MODULE_DEVICE_TABLE(of
, imx_dma_of_dev_id
);
204 static inline int is_imx1_dma(struct imxdma_engine
*imxdma
)
206 return imxdma
->devtype
== IMX1_DMA
;
209 static inline int is_imx27_dma(struct imxdma_engine
*imxdma
)
211 return imxdma
->devtype
== IMX27_DMA
;
214 static struct imxdma_channel
*to_imxdma_chan(struct dma_chan
*chan
)
216 return container_of(chan
, struct imxdma_channel
, chan
);
219 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel
*imxdmac
)
221 struct imxdma_desc
*desc
;
223 if (!list_empty(&imxdmac
->ld_active
)) {
224 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
,
226 if (desc
->type
== IMXDMA_DESC_CYCLIC
)
234 static void imx_dmav1_writel(struct imxdma_engine
*imxdma
, unsigned val
,
237 __raw_writel(val
, imxdma
->base
+ offset
);
240 static unsigned imx_dmav1_readl(struct imxdma_engine
*imxdma
, unsigned offset
)
242 return __raw_readl(imxdma
->base
+ offset
);
245 static int imxdma_hw_chain(struct imxdma_channel
*imxdmac
)
247 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
249 if (is_imx27_dma(imxdma
))
250 return imxdmac
->hw_chaining
;
256 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
258 static inline void imxdma_sg_next(struct imxdma_desc
*d
)
260 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
261 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
262 struct scatterlist
*sg
= d
->sg
;
265 now
= min_t(size_t, d
->len
, sg_dma_len(sg
));
266 if (d
->len
!= IMX_DMA_LENGTH_LOOP
)
269 if (d
->direction
== DMA_DEV_TO_MEM
)
270 imx_dmav1_writel(imxdma
, sg
->dma_address
,
271 DMA_DAR(imxdmac
->channel
));
273 imx_dmav1_writel(imxdma
, sg
->dma_address
,
274 DMA_SAR(imxdmac
->channel
));
276 imx_dmav1_writel(imxdma
, now
, DMA_CNTR(imxdmac
->channel
));
278 dev_dbg(imxdma
->dev
, " %s channel: %d dst 0x%08x, src 0x%08x, "
279 "size 0x%08x\n", __func__
, imxdmac
->channel
,
280 imx_dmav1_readl(imxdma
, DMA_DAR(imxdmac
->channel
)),
281 imx_dmav1_readl(imxdma
, DMA_SAR(imxdmac
->channel
)),
282 imx_dmav1_readl(imxdma
, DMA_CNTR(imxdmac
->channel
)));
285 static void imxdma_enable_hw(struct imxdma_desc
*d
)
287 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
288 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
289 int channel
= imxdmac
->channel
;
292 dev_dbg(imxdma
->dev
, "%s channel %d\n", __func__
, channel
);
294 local_irq_save(flags
);
296 imx_dmav1_writel(imxdma
, 1 << channel
, DMA_DISR
);
297 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_DIMR
) &
298 ~(1 << channel
), DMA_DIMR
);
299 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_CCR(channel
)) |
300 CCR_CEN
| CCR_ACRPT
, DMA_CCR(channel
));
302 if (!is_imx1_dma(imxdma
) &&
303 d
->sg
&& imxdma_hw_chain(imxdmac
)) {
304 d
->sg
= sg_next(d
->sg
);
308 tmp
= imx_dmav1_readl(imxdma
, DMA_CCR(channel
));
309 imx_dmav1_writel(imxdma
, tmp
| CCR_RPT
| CCR_ACRPT
,
314 local_irq_restore(flags
);
317 static void imxdma_disable_hw(struct imxdma_channel
*imxdmac
)
319 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
320 int channel
= imxdmac
->channel
;
323 dev_dbg(imxdma
->dev
, "%s channel %d\n", __func__
, channel
);
325 if (imxdma_hw_chain(imxdmac
))
326 del_timer(&imxdmac
->watchdog
);
328 local_irq_save(flags
);
329 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_DIMR
) |
330 (1 << channel
), DMA_DIMR
);
331 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_CCR(channel
)) &
332 ~CCR_CEN
, DMA_CCR(channel
));
333 imx_dmav1_writel(imxdma
, 1 << channel
, DMA_DISR
);
334 local_irq_restore(flags
);
337 static void imxdma_watchdog(struct timer_list
*t
)
339 struct imxdma_channel
*imxdmac
= from_timer(imxdmac
, t
, watchdog
);
340 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
341 int channel
= imxdmac
->channel
;
343 imx_dmav1_writel(imxdma
, 0, DMA_CCR(channel
));
345 /* Tasklet watchdog error handler */
346 tasklet_schedule(&imxdmac
->dma_tasklet
);
347 dev_dbg(imxdma
->dev
, "channel %d: watchdog timeout!\n",
351 static irqreturn_t
imxdma_err_handler(int irq
, void *dev_id
)
353 struct imxdma_engine
*imxdma
= dev_id
;
354 unsigned int err_mask
;
358 disr
= imx_dmav1_readl(imxdma
, DMA_DISR
);
360 err_mask
= imx_dmav1_readl(imxdma
, DMA_DBTOSR
) |
361 imx_dmav1_readl(imxdma
, DMA_DRTOSR
) |
362 imx_dmav1_readl(imxdma
, DMA_DSESR
) |
363 imx_dmav1_readl(imxdma
, DMA_DBOSR
);
368 imx_dmav1_writel(imxdma
, disr
& err_mask
, DMA_DISR
);
370 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
371 if (!(err_mask
& (1 << i
)))
375 if (imx_dmav1_readl(imxdma
, DMA_DBTOSR
) & (1 << i
)) {
376 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DBTOSR
);
377 errcode
|= IMX_DMA_ERR_BURST
;
379 if (imx_dmav1_readl(imxdma
, DMA_DRTOSR
) & (1 << i
)) {
380 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DRTOSR
);
381 errcode
|= IMX_DMA_ERR_REQUEST
;
383 if (imx_dmav1_readl(imxdma
, DMA_DSESR
) & (1 << i
)) {
384 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DSESR
);
385 errcode
|= IMX_DMA_ERR_TRANSFER
;
387 if (imx_dmav1_readl(imxdma
, DMA_DBOSR
) & (1 << i
)) {
388 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DBOSR
);
389 errcode
|= IMX_DMA_ERR_BUFFER
;
391 /* Tasklet error handler */
392 tasklet_schedule(&imxdma
->channel
[i
].dma_tasklet
);
394 dev_warn(imxdma
->dev
,
395 "DMA timeout on channel %d -%s%s%s%s\n", i
,
396 errcode
& IMX_DMA_ERR_BURST
? " burst" : "",
397 errcode
& IMX_DMA_ERR_REQUEST
? " request" : "",
398 errcode
& IMX_DMA_ERR_TRANSFER
? " transfer" : "",
399 errcode
& IMX_DMA_ERR_BUFFER
? " buffer" : "");
404 static void dma_irq_handle_channel(struct imxdma_channel
*imxdmac
)
406 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
407 int chno
= imxdmac
->channel
;
408 struct imxdma_desc
*desc
;
411 spin_lock_irqsave(&imxdma
->lock
, flags
);
412 if (list_empty(&imxdmac
->ld_active
)) {
413 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
417 desc
= list_first_entry(&imxdmac
->ld_active
,
420 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
424 desc
->sg
= sg_next(desc
->sg
);
427 imxdma_sg_next(desc
);
429 tmp
= imx_dmav1_readl(imxdma
, DMA_CCR(chno
));
431 if (imxdma_hw_chain(imxdmac
)) {
432 /* FIXME: The timeout should probably be
435 mod_timer(&imxdmac
->watchdog
,
436 jiffies
+ msecs_to_jiffies(500));
438 tmp
|= CCR_CEN
| CCR_RPT
| CCR_ACRPT
;
439 imx_dmav1_writel(imxdma
, tmp
, DMA_CCR(chno
));
441 imx_dmav1_writel(imxdma
, tmp
& ~CCR_CEN
,
446 imx_dmav1_writel(imxdma
, tmp
, DMA_CCR(chno
));
448 if (imxdma_chan_is_doing_cyclic(imxdmac
))
449 /* Tasklet progression */
450 tasklet_schedule(&imxdmac
->dma_tasklet
);
455 if (imxdma_hw_chain(imxdmac
)) {
456 del_timer(&imxdmac
->watchdog
);
462 imx_dmav1_writel(imxdma
, 0, DMA_CCR(chno
));
464 tasklet_schedule(&imxdmac
->dma_tasklet
);
467 static irqreturn_t
dma_irq_handler(int irq
, void *dev_id
)
469 struct imxdma_engine
*imxdma
= dev_id
;
472 if (!is_imx1_dma(imxdma
))
473 imxdma_err_handler(irq
, dev_id
);
475 disr
= imx_dmav1_readl(imxdma
, DMA_DISR
);
477 dev_dbg(imxdma
->dev
, "%s called, disr=0x%08x\n", __func__
, disr
);
479 imx_dmav1_writel(imxdma
, disr
, DMA_DISR
);
480 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
482 dma_irq_handle_channel(&imxdma
->channel
[i
]);
488 static int imxdma_xfer_desc(struct imxdma_desc
*d
)
490 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
491 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
495 /* Configure and enable */
497 case IMXDMA_DESC_INTERLEAVED
:
498 /* Try to get a free 2D slot */
499 for (i
= 0; i
< IMX_DMA_2D_SLOTS
; i
++) {
500 if ((imxdma
->slots_2d
[i
].count
> 0) &&
501 ((imxdma
->slots_2d
[i
].xsr
!= d
->x
) ||
502 (imxdma
->slots_2d
[i
].ysr
!= d
->y
) ||
503 (imxdma
->slots_2d
[i
].wsr
!= d
->w
)))
511 imxdma
->slots_2d
[slot
].xsr
= d
->x
;
512 imxdma
->slots_2d
[slot
].ysr
= d
->y
;
513 imxdma
->slots_2d
[slot
].wsr
= d
->w
;
514 imxdma
->slots_2d
[slot
].count
++;
516 imxdmac
->slot_2d
= slot
;
517 imxdmac
->enabled_2d
= true;
519 if (slot
== IMX_DMA_2D_SLOT_A
) {
520 d
->config_mem
&= ~CCR_MSEL_B
;
521 d
->config_port
&= ~CCR_MSEL_B
;
522 imx_dmav1_writel(imxdma
, d
->x
, DMA_XSRA
);
523 imx_dmav1_writel(imxdma
, d
->y
, DMA_YSRA
);
524 imx_dmav1_writel(imxdma
, d
->w
, DMA_WSRA
);
526 d
->config_mem
|= CCR_MSEL_B
;
527 d
->config_port
|= CCR_MSEL_B
;
528 imx_dmav1_writel(imxdma
, d
->x
, DMA_XSRB
);
529 imx_dmav1_writel(imxdma
, d
->y
, DMA_YSRB
);
530 imx_dmav1_writel(imxdma
, d
->w
, DMA_WSRB
);
533 * We fall-through here intentionally, since a 2D transfer is
534 * similar to MEMCPY just adding the 2D slot configuration.
537 case IMXDMA_DESC_MEMCPY
:
538 imx_dmav1_writel(imxdma
, d
->src
, DMA_SAR(imxdmac
->channel
));
539 imx_dmav1_writel(imxdma
, d
->dest
, DMA_DAR(imxdmac
->channel
));
540 imx_dmav1_writel(imxdma
, d
->config_mem
| (d
->config_port
<< 2),
541 DMA_CCR(imxdmac
->channel
));
543 imx_dmav1_writel(imxdma
, d
->len
, DMA_CNTR(imxdmac
->channel
));
546 "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
547 __func__
, imxdmac
->channel
,
548 (unsigned long long)d
->dest
,
549 (unsigned long long)d
->src
, d
->len
);
552 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
553 case IMXDMA_DESC_CYCLIC
:
554 case IMXDMA_DESC_SLAVE_SG
:
555 if (d
->direction
== DMA_DEV_TO_MEM
) {
556 imx_dmav1_writel(imxdma
, imxdmac
->per_address
,
557 DMA_SAR(imxdmac
->channel
));
558 imx_dmav1_writel(imxdma
, imxdmac
->ccr_from_device
,
559 DMA_CCR(imxdmac
->channel
));
562 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
563 __func__
, imxdmac
->channel
,
564 d
->sg
, d
->sgcount
, d
->len
,
565 (unsigned long long)imxdmac
->per_address
);
566 } else if (d
->direction
== DMA_MEM_TO_DEV
) {
567 imx_dmav1_writel(imxdma
, imxdmac
->per_address
,
568 DMA_DAR(imxdmac
->channel
));
569 imx_dmav1_writel(imxdma
, imxdmac
->ccr_to_device
,
570 DMA_CCR(imxdmac
->channel
));
573 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
574 __func__
, imxdmac
->channel
,
575 d
->sg
, d
->sgcount
, d
->len
,
576 (unsigned long long)imxdmac
->per_address
);
578 dev_err(imxdma
->dev
, "%s channel: %d bad dma mode\n",
579 __func__
, imxdmac
->channel
);
593 static void imxdma_tasklet(struct tasklet_struct
*t
)
595 struct imxdma_channel
*imxdmac
= from_tasklet(imxdmac
, t
, dma_tasklet
);
596 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
597 struct imxdma_desc
*desc
, *next_desc
;
600 spin_lock_irqsave(&imxdma
->lock
, flags
);
602 if (list_empty(&imxdmac
->ld_active
)) {
603 /* Someone might have called terminate all */
604 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
607 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
, node
);
609 /* If we are dealing with a cyclic descriptor, keep it on ld_active
610 * and dont mark the descriptor as complete.
611 * Only in non-cyclic cases it would be marked as complete
613 if (imxdma_chan_is_doing_cyclic(imxdmac
))
616 dma_cookie_complete(&desc
->desc
);
618 /* Free 2D slot if it was an interleaved transfer */
619 if (imxdmac
->enabled_2d
) {
620 imxdma
->slots_2d
[imxdmac
->slot_2d
].count
--;
621 imxdmac
->enabled_2d
= false;
624 list_move_tail(imxdmac
->ld_active
.next
, &imxdmac
->ld_free
);
626 if (!list_empty(&imxdmac
->ld_queue
)) {
627 next_desc
= list_first_entry(&imxdmac
->ld_queue
,
628 struct imxdma_desc
, node
);
629 list_move_tail(imxdmac
->ld_queue
.next
, &imxdmac
->ld_active
);
630 if (imxdma_xfer_desc(next_desc
) < 0)
631 dev_warn(imxdma
->dev
, "%s: channel: %d couldn't xfer desc\n",
632 __func__
, imxdmac
->channel
);
635 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
637 dmaengine_desc_get_callback_invoke(&desc
->desc
, NULL
);
640 static int imxdma_terminate_all(struct dma_chan
*chan
)
642 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
643 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
646 imxdma_disable_hw(imxdmac
);
648 spin_lock_irqsave(&imxdma
->lock
, flags
);
649 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
650 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
651 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
655 static int imxdma_config_write(struct dma_chan
*chan
,
656 struct dma_slave_config
*dmaengine_cfg
,
657 enum dma_transfer_direction direction
)
659 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
660 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
661 unsigned int mode
= 0;
663 if (direction
== DMA_DEV_TO_MEM
) {
664 imxdmac
->per_address
= dmaengine_cfg
->src_addr
;
665 imxdmac
->watermark_level
= dmaengine_cfg
->src_maxburst
;
666 imxdmac
->word_size
= dmaengine_cfg
->src_addr_width
;
668 imxdmac
->per_address
= dmaengine_cfg
->dst_addr
;
669 imxdmac
->watermark_level
= dmaengine_cfg
->dst_maxburst
;
670 imxdmac
->word_size
= dmaengine_cfg
->dst_addr_width
;
673 switch (imxdmac
->word_size
) {
674 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
675 mode
= IMX_DMA_MEMSIZE_8
;
677 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
678 mode
= IMX_DMA_MEMSIZE_16
;
681 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
682 mode
= IMX_DMA_MEMSIZE_32
;
686 imxdmac
->hw_chaining
= 0;
688 imxdmac
->ccr_from_device
= (mode
| IMX_DMA_TYPE_FIFO
) |
689 ((IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
) << 2) |
691 imxdmac
->ccr_to_device
=
692 (IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
) |
693 ((mode
| IMX_DMA_TYPE_FIFO
) << 2) | CCR_REN
;
694 imx_dmav1_writel(imxdma
, imxdmac
->dma_request
,
695 DMA_RSSR(imxdmac
->channel
));
697 /* Set burst length */
698 imx_dmav1_writel(imxdma
, imxdmac
->watermark_level
*
699 imxdmac
->word_size
, DMA_BLR(imxdmac
->channel
));
704 static int imxdma_config(struct dma_chan
*chan
,
705 struct dma_slave_config
*dmaengine_cfg
)
707 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
709 memcpy(&imxdmac
->config
, dmaengine_cfg
, sizeof(*dmaengine_cfg
));
714 static enum dma_status
imxdma_tx_status(struct dma_chan
*chan
,
716 struct dma_tx_state
*txstate
)
718 return dma_cookie_status(chan
, cookie
, txstate
);
721 static dma_cookie_t
imxdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
723 struct imxdma_channel
*imxdmac
= to_imxdma_chan(tx
->chan
);
724 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
728 spin_lock_irqsave(&imxdma
->lock
, flags
);
729 list_move_tail(imxdmac
->ld_free
.next
, &imxdmac
->ld_queue
);
730 cookie
= dma_cookie_assign(tx
);
731 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
736 static int imxdma_alloc_chan_resources(struct dma_chan
*chan
)
738 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
739 struct imx_dma_data
*data
= chan
->private;
742 imxdmac
->dma_request
= data
->dma_request
;
744 while (imxdmac
->descs_allocated
< IMXDMA_MAX_CHAN_DESCRIPTORS
) {
745 struct imxdma_desc
*desc
;
747 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
750 dma_async_tx_descriptor_init(&desc
->desc
, chan
);
751 desc
->desc
.tx_submit
= imxdma_tx_submit
;
752 /* txd.flags will be overwritten in prep funcs */
753 desc
->desc
.flags
= DMA_CTRL_ACK
;
754 desc
->status
= DMA_COMPLETE
;
756 list_add_tail(&desc
->node
, &imxdmac
->ld_free
);
757 imxdmac
->descs_allocated
++;
760 if (!imxdmac
->descs_allocated
)
763 return imxdmac
->descs_allocated
;
766 static void imxdma_free_chan_resources(struct dma_chan
*chan
)
768 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
769 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
770 struct imxdma_desc
*desc
, *_desc
;
773 spin_lock_irqsave(&imxdma
->lock
, flags
);
775 imxdma_disable_hw(imxdmac
);
776 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
777 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
779 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
781 list_for_each_entry_safe(desc
, _desc
, &imxdmac
->ld_free
, node
) {
783 imxdmac
->descs_allocated
--;
785 INIT_LIST_HEAD(&imxdmac
->ld_free
);
787 kfree(imxdmac
->sg_list
);
788 imxdmac
->sg_list
= NULL
;
791 static struct dma_async_tx_descriptor
*imxdma_prep_slave_sg(
792 struct dma_chan
*chan
, struct scatterlist
*sgl
,
793 unsigned int sg_len
, enum dma_transfer_direction direction
,
794 unsigned long flags
, void *context
)
796 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
797 struct scatterlist
*sg
;
798 int i
, dma_length
= 0;
799 struct imxdma_desc
*desc
;
801 if (list_empty(&imxdmac
->ld_free
) ||
802 imxdma_chan_is_doing_cyclic(imxdmac
))
805 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
807 for_each_sg(sgl
, sg
, sg_len
, i
) {
808 dma_length
+= sg_dma_len(sg
);
811 imxdma_config_write(chan
, &imxdmac
->config
, direction
);
813 switch (imxdmac
->word_size
) {
814 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
815 if (sg_dma_len(sgl
) & 3 || sgl
->dma_address
& 3)
818 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
819 if (sg_dma_len(sgl
) & 1 || sgl
->dma_address
& 1)
822 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
828 desc
->type
= IMXDMA_DESC_SLAVE_SG
;
830 desc
->sgcount
= sg_len
;
831 desc
->len
= dma_length
;
832 desc
->direction
= direction
;
833 if (direction
== DMA_DEV_TO_MEM
) {
834 desc
->src
= imxdmac
->per_address
;
836 desc
->dest
= imxdmac
->per_address
;
838 desc
->desc
.callback
= NULL
;
839 desc
->desc
.callback_param
= NULL
;
844 static struct dma_async_tx_descriptor
*imxdma_prep_dma_cyclic(
845 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
846 size_t period_len
, enum dma_transfer_direction direction
,
849 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
850 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
851 struct imxdma_desc
*desc
;
853 unsigned int periods
= buf_len
/ period_len
;
855 dev_dbg(imxdma
->dev
, "%s channel: %d buf_len=%zu period_len=%zu\n",
856 __func__
, imxdmac
->channel
, buf_len
, period_len
);
858 if (list_empty(&imxdmac
->ld_free
) ||
859 imxdma_chan_is_doing_cyclic(imxdmac
))
862 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
864 kfree(imxdmac
->sg_list
);
866 imxdmac
->sg_list
= kcalloc(periods
+ 1,
867 sizeof(struct scatterlist
), GFP_ATOMIC
);
868 if (!imxdmac
->sg_list
)
871 sg_init_table(imxdmac
->sg_list
, periods
);
873 for (i
= 0; i
< periods
; i
++) {
874 sg_assign_page(&imxdmac
->sg_list
[i
], NULL
);
875 imxdmac
->sg_list
[i
].offset
= 0;
876 imxdmac
->sg_list
[i
].dma_address
= dma_addr
;
877 sg_dma_len(&imxdmac
->sg_list
[i
]) = period_len
;
878 dma_addr
+= period_len
;
882 sg_chain(imxdmac
->sg_list
, periods
+ 1, imxdmac
->sg_list
);
884 desc
->type
= IMXDMA_DESC_CYCLIC
;
885 desc
->sg
= imxdmac
->sg_list
;
886 desc
->sgcount
= periods
;
887 desc
->len
= IMX_DMA_LENGTH_LOOP
;
888 desc
->direction
= direction
;
889 if (direction
== DMA_DEV_TO_MEM
) {
890 desc
->src
= imxdmac
->per_address
;
892 desc
->dest
= imxdmac
->per_address
;
894 desc
->desc
.callback
= NULL
;
895 desc
->desc
.callback_param
= NULL
;
897 imxdma_config_write(chan
, &imxdmac
->config
, direction
);
902 static struct dma_async_tx_descriptor
*imxdma_prep_dma_memcpy(
903 struct dma_chan
*chan
, dma_addr_t dest
,
904 dma_addr_t src
, size_t len
, unsigned long flags
)
906 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
907 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
908 struct imxdma_desc
*desc
;
910 dev_dbg(imxdma
->dev
, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
911 __func__
, imxdmac
->channel
, (unsigned long long)src
,
912 (unsigned long long)dest
, len
);
914 if (list_empty(&imxdmac
->ld_free
) ||
915 imxdma_chan_is_doing_cyclic(imxdmac
))
918 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
920 desc
->type
= IMXDMA_DESC_MEMCPY
;
924 desc
->direction
= DMA_MEM_TO_MEM
;
925 desc
->config_port
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
926 desc
->config_mem
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
927 desc
->desc
.callback
= NULL
;
928 desc
->desc
.callback_param
= NULL
;
933 static struct dma_async_tx_descriptor
*imxdma_prep_dma_interleaved(
934 struct dma_chan
*chan
, struct dma_interleaved_template
*xt
,
937 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
938 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
939 struct imxdma_desc
*desc
;
941 dev_dbg(imxdma
->dev
, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
942 " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__
,
943 imxdmac
->channel
, (unsigned long long)xt
->src_start
,
944 (unsigned long long) xt
->dst_start
,
945 xt
->src_sgl
? "true" : "false", xt
->dst_sgl
? "true" : "false",
946 xt
->numf
, xt
->frame_size
);
948 if (list_empty(&imxdmac
->ld_free
) ||
949 imxdma_chan_is_doing_cyclic(imxdmac
))
952 if (xt
->frame_size
!= 1 || xt
->numf
<= 0 || xt
->dir
!= DMA_MEM_TO_MEM
)
955 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
957 desc
->type
= IMXDMA_DESC_INTERLEAVED
;
958 desc
->src
= xt
->src_start
;
959 desc
->dest
= xt
->dst_start
;
960 desc
->x
= xt
->sgl
[0].size
;
962 desc
->w
= xt
->sgl
[0].icg
+ desc
->x
;
963 desc
->len
= desc
->x
* desc
->y
;
964 desc
->direction
= DMA_MEM_TO_MEM
;
965 desc
->config_port
= IMX_DMA_MEMSIZE_32
;
966 desc
->config_mem
= IMX_DMA_MEMSIZE_32
;
968 desc
->config_mem
|= IMX_DMA_TYPE_2D
;
970 desc
->config_port
|= IMX_DMA_TYPE_2D
;
971 desc
->desc
.callback
= NULL
;
972 desc
->desc
.callback_param
= NULL
;
977 static void imxdma_issue_pending(struct dma_chan
*chan
)
979 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
980 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
981 struct imxdma_desc
*desc
;
984 spin_lock_irqsave(&imxdma
->lock
, flags
);
985 if (list_empty(&imxdmac
->ld_active
) &&
986 !list_empty(&imxdmac
->ld_queue
)) {
987 desc
= list_first_entry(&imxdmac
->ld_queue
,
988 struct imxdma_desc
, node
);
990 if (imxdma_xfer_desc(desc
) < 0) {
991 dev_warn(imxdma
->dev
,
992 "%s: channel: %d couldn't issue DMA xfer\n",
993 __func__
, imxdmac
->channel
);
995 list_move_tail(imxdmac
->ld_queue
.next
,
996 &imxdmac
->ld_active
);
999 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
1002 static bool imxdma_filter_fn(struct dma_chan
*chan
, void *param
)
1004 struct imxdma_filter_data
*fdata
= param
;
1005 struct imxdma_channel
*imxdma_chan
= to_imxdma_chan(chan
);
1007 if (chan
->device
->dev
!= fdata
->imxdma
->dev
)
1010 imxdma_chan
->dma_request
= fdata
->request
;
1011 chan
->private = NULL
;
1016 static struct dma_chan
*imxdma_xlate(struct of_phandle_args
*dma_spec
,
1017 struct of_dma
*ofdma
)
1019 int count
= dma_spec
->args_count
;
1020 struct imxdma_engine
*imxdma
= ofdma
->of_dma_data
;
1021 struct imxdma_filter_data fdata
= {
1028 fdata
.request
= dma_spec
->args
[0];
1030 return dma_request_channel(imxdma
->dma_device
.cap_mask
,
1031 imxdma_filter_fn
, &fdata
);
1034 static int __init
imxdma_probe(struct platform_device
*pdev
)
1036 struct imxdma_engine
*imxdma
;
1040 imxdma
= devm_kzalloc(&pdev
->dev
, sizeof(*imxdma
), GFP_KERNEL
);
1044 imxdma
->dev
= &pdev
->dev
;
1045 imxdma
->devtype
= (uintptr_t)of_device_get_match_data(&pdev
->dev
);
1047 imxdma
->base
= devm_platform_ioremap_resource(pdev
, 0);
1048 if (IS_ERR(imxdma
->base
))
1049 return PTR_ERR(imxdma
->base
);
1051 irq
= platform_get_irq(pdev
, 0);
1055 imxdma
->dma_ipg
= devm_clk_get(&pdev
->dev
, "ipg");
1056 if (IS_ERR(imxdma
->dma_ipg
))
1057 return PTR_ERR(imxdma
->dma_ipg
);
1059 imxdma
->dma_ahb
= devm_clk_get(&pdev
->dev
, "ahb");
1060 if (IS_ERR(imxdma
->dma_ahb
))
1061 return PTR_ERR(imxdma
->dma_ahb
);
1063 ret
= clk_prepare_enable(imxdma
->dma_ipg
);
1066 ret
= clk_prepare_enable(imxdma
->dma_ahb
);
1068 goto disable_dma_ipg_clk
;
1070 /* reset DMA module */
1071 imx_dmav1_writel(imxdma
, DCR_DRST
, DMA_DCR
);
1073 if (is_imx1_dma(imxdma
)) {
1074 ret
= devm_request_irq(&pdev
->dev
, irq
,
1075 dma_irq_handler
, 0, "DMA", imxdma
);
1077 dev_warn(imxdma
->dev
, "Can't register IRQ for DMA\n");
1078 goto disable_dma_ahb_clk
;
1082 irq_err
= platform_get_irq(pdev
, 1);
1085 goto disable_dma_ahb_clk
;
1088 ret
= devm_request_irq(&pdev
->dev
, irq_err
,
1089 imxdma_err_handler
, 0, "DMA", imxdma
);
1091 dev_warn(imxdma
->dev
, "Can't register ERRIRQ for DMA\n");
1092 goto disable_dma_ahb_clk
;
1094 imxdma
->irq_err
= irq_err
;
1097 /* enable DMA module */
1098 imx_dmav1_writel(imxdma
, DCR_DEN
, DMA_DCR
);
1100 /* clear all interrupts */
1101 imx_dmav1_writel(imxdma
, (1 << IMX_DMA_CHANNELS
) - 1, DMA_DISR
);
1103 /* disable interrupts */
1104 imx_dmav1_writel(imxdma
, (1 << IMX_DMA_CHANNELS
) - 1, DMA_DIMR
);
1106 INIT_LIST_HEAD(&imxdma
->dma_device
.channels
);
1108 dma_cap_set(DMA_SLAVE
, imxdma
->dma_device
.cap_mask
);
1109 dma_cap_set(DMA_CYCLIC
, imxdma
->dma_device
.cap_mask
);
1110 dma_cap_set(DMA_MEMCPY
, imxdma
->dma_device
.cap_mask
);
1111 dma_cap_set(DMA_INTERLEAVE
, imxdma
->dma_device
.cap_mask
);
1113 /* Initialize 2D global parameters */
1114 for (i
= 0; i
< IMX_DMA_2D_SLOTS
; i
++)
1115 imxdma
->slots_2d
[i
].count
= 0;
1117 spin_lock_init(&imxdma
->lock
);
1119 /* Initialize channel parameters */
1120 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
1121 struct imxdma_channel
*imxdmac
= &imxdma
->channel
[i
];
1123 if (!is_imx1_dma(imxdma
)) {
1124 ret
= devm_request_irq(&pdev
->dev
, irq
+ i
,
1125 dma_irq_handler
, 0, "DMA", imxdma
);
1127 dev_warn(imxdma
->dev
, "Can't register IRQ %d "
1128 "for DMA channel %d\n",
1130 goto disable_dma_ahb_clk
;
1133 imxdmac
->irq
= irq
+ i
;
1134 timer_setup(&imxdmac
->watchdog
, imxdma_watchdog
, 0);
1137 imxdmac
->imxdma
= imxdma
;
1139 INIT_LIST_HEAD(&imxdmac
->ld_queue
);
1140 INIT_LIST_HEAD(&imxdmac
->ld_free
);
1141 INIT_LIST_HEAD(&imxdmac
->ld_active
);
1143 tasklet_setup(&imxdmac
->dma_tasklet
, imxdma_tasklet
);
1144 imxdmac
->chan
.device
= &imxdma
->dma_device
;
1145 dma_cookie_init(&imxdmac
->chan
);
1146 imxdmac
->channel
= i
;
1148 /* Add the channel to the DMAC list */
1149 list_add_tail(&imxdmac
->chan
.device_node
,
1150 &imxdma
->dma_device
.channels
);
1153 imxdma
->dma_device
.dev
= &pdev
->dev
;
1155 imxdma
->dma_device
.device_alloc_chan_resources
= imxdma_alloc_chan_resources
;
1156 imxdma
->dma_device
.device_free_chan_resources
= imxdma_free_chan_resources
;
1157 imxdma
->dma_device
.device_tx_status
= imxdma_tx_status
;
1158 imxdma
->dma_device
.device_prep_slave_sg
= imxdma_prep_slave_sg
;
1159 imxdma
->dma_device
.device_prep_dma_cyclic
= imxdma_prep_dma_cyclic
;
1160 imxdma
->dma_device
.device_prep_dma_memcpy
= imxdma_prep_dma_memcpy
;
1161 imxdma
->dma_device
.device_prep_interleaved_dma
= imxdma_prep_dma_interleaved
;
1162 imxdma
->dma_device
.device_config
= imxdma_config
;
1163 imxdma
->dma_device
.device_terminate_all
= imxdma_terminate_all
;
1164 imxdma
->dma_device
.device_issue_pending
= imxdma_issue_pending
;
1166 platform_set_drvdata(pdev
, imxdma
);
1168 imxdma
->dma_device
.copy_align
= DMAENGINE_ALIGN_4_BYTES
;
1169 dma_set_max_seg_size(imxdma
->dma_device
.dev
, 0xffffff);
1171 ret
= dma_async_device_register(&imxdma
->dma_device
);
1173 dev_err(&pdev
->dev
, "unable to register\n");
1174 goto disable_dma_ahb_clk
;
1177 if (pdev
->dev
.of_node
) {
1178 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
1179 imxdma_xlate
, imxdma
);
1181 dev_err(&pdev
->dev
, "unable to register of_dma_controller\n");
1182 goto err_of_dma_controller
;
1188 err_of_dma_controller
:
1189 dma_async_device_unregister(&imxdma
->dma_device
);
1190 disable_dma_ahb_clk
:
1191 clk_disable_unprepare(imxdma
->dma_ahb
);
1192 disable_dma_ipg_clk
:
1193 clk_disable_unprepare(imxdma
->dma_ipg
);
1197 static void imxdma_free_irq(struct platform_device
*pdev
, struct imxdma_engine
*imxdma
)
1201 if (is_imx1_dma(imxdma
)) {
1202 disable_irq(imxdma
->irq
);
1203 disable_irq(imxdma
->irq_err
);
1206 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
1207 struct imxdma_channel
*imxdmac
= &imxdma
->channel
[i
];
1209 if (!is_imx1_dma(imxdma
))
1210 disable_irq(imxdmac
->irq
);
1212 tasklet_kill(&imxdmac
->dma_tasklet
);
1216 static void imxdma_remove(struct platform_device
*pdev
)
1218 struct imxdma_engine
*imxdma
= platform_get_drvdata(pdev
);
1220 imxdma_free_irq(pdev
, imxdma
);
1222 dma_async_device_unregister(&imxdma
->dma_device
);
1224 if (pdev
->dev
.of_node
)
1225 of_dma_controller_free(pdev
->dev
.of_node
);
1227 clk_disable_unprepare(imxdma
->dma_ipg
);
1228 clk_disable_unprepare(imxdma
->dma_ahb
);
1231 static struct platform_driver imxdma_driver
= {
1234 .of_match_table
= imx_dma_of_dev_id
,
1236 .remove
= imxdma_remove
,
1239 static int __init
imxdma_module_init(void)
1241 return platform_driver_probe(&imxdma_driver
, imxdma_probe
);
1243 subsys_initcall(imxdma_module_init
);
1245 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1246 MODULE_DESCRIPTION("i.MX dma driver");
1247 MODULE_LICENSE("GPL");