PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / dma / imx-dma.c
blob6f9ac2022abd8b3d23c739bc7face1cf662a39f3
1 /*
2 * drivers/dma/imx-dma.c
4 * This file contains a driver for the Freescale i.MX DMA engine
5 * found on i.MX1/21/27
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h>
26 #include <linux/platform_device.h>
27 #include <linux/clk.h>
28 #include <linux/dmaengine.h>
29 #include <linux/module.h>
30 #include <linux/of_device.h>
31 #include <linux/of_dma.h>
33 #include <asm/irq.h>
34 #include <linux/platform_data/dma-imx.h>
36 #include "dmaengine.h"
37 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
38 #define IMX_DMA_CHANNELS 16
40 #define IMX_DMA_2D_SLOTS 2
41 #define IMX_DMA_2D_SLOT_A 0
42 #define IMX_DMA_2D_SLOT_B 1
44 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
45 #define IMX_DMA_MEMSIZE_32 (0 << 4)
46 #define IMX_DMA_MEMSIZE_8 (1 << 4)
47 #define IMX_DMA_MEMSIZE_16 (2 << 4)
48 #define IMX_DMA_TYPE_LINEAR (0 << 10)
49 #define IMX_DMA_TYPE_2D (1 << 10)
50 #define IMX_DMA_TYPE_FIFO (2 << 10)
52 #define IMX_DMA_ERR_BURST (1 << 0)
53 #define IMX_DMA_ERR_REQUEST (1 << 1)
54 #define IMX_DMA_ERR_TRANSFER (1 << 2)
55 #define IMX_DMA_ERR_BUFFER (1 << 3)
56 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
58 #define DMA_DCR 0x00 /* Control Register */
59 #define DMA_DISR 0x04 /* Interrupt status Register */
60 #define DMA_DIMR 0x08 /* Interrupt mask Register */
61 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
62 #define DMA_DRTOSR 0x10 /* Request timeout Register */
63 #define DMA_DSESR 0x14 /* Transfer Error Status Register */
64 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
65 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
66 #define DMA_WSRA 0x40 /* W-Size Register A */
67 #define DMA_XSRA 0x44 /* X-Size Register A */
68 #define DMA_YSRA 0x48 /* Y-Size Register A */
69 #define DMA_WSRB 0x4c /* W-Size Register B */
70 #define DMA_XSRB 0x50 /* X-Size Register B */
71 #define DMA_YSRB 0x54 /* Y-Size Register B */
72 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
73 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
74 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
75 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
76 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
77 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
78 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
79 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
80 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
82 #define DCR_DRST (1<<1)
83 #define DCR_DEN (1<<0)
84 #define DBTOCR_EN (1<<15)
85 #define DBTOCR_CNT(x) ((x) & 0x7fff)
86 #define CNTR_CNT(x) ((x) & 0xffffff)
87 #define CCR_ACRPT (1<<14)
88 #define CCR_DMOD_LINEAR (0x0 << 12)
89 #define CCR_DMOD_2D (0x1 << 12)
90 #define CCR_DMOD_FIFO (0x2 << 12)
91 #define CCR_DMOD_EOBFIFO (0x3 << 12)
92 #define CCR_SMOD_LINEAR (0x0 << 10)
93 #define CCR_SMOD_2D (0x1 << 10)
94 #define CCR_SMOD_FIFO (0x2 << 10)
95 #define CCR_SMOD_EOBFIFO (0x3 << 10)
96 #define CCR_MDIR_DEC (1<<9)
97 #define CCR_MSEL_B (1<<8)
98 #define CCR_DSIZ_32 (0x0 << 6)
99 #define CCR_DSIZ_8 (0x1 << 6)
100 #define CCR_DSIZ_16 (0x2 << 6)
101 #define CCR_SSIZ_32 (0x0 << 4)
102 #define CCR_SSIZ_8 (0x1 << 4)
103 #define CCR_SSIZ_16 (0x2 << 4)
104 #define CCR_REN (1<<3)
105 #define CCR_RPT (1<<2)
106 #define CCR_FRC (1<<1)
107 #define CCR_CEN (1<<0)
108 #define RTOR_EN (1<<15)
109 #define RTOR_CLK (1<<14)
110 #define RTOR_PSC (1<<13)
112 enum imxdma_prep_type {
113 IMXDMA_DESC_MEMCPY,
114 IMXDMA_DESC_INTERLEAVED,
115 IMXDMA_DESC_SLAVE_SG,
116 IMXDMA_DESC_CYCLIC,
119 struct imx_dma_2d_config {
120 u16 xsr;
121 u16 ysr;
122 u16 wsr;
123 int count;
126 struct imxdma_desc {
127 struct list_head node;
128 struct dma_async_tx_descriptor desc;
129 enum dma_status status;
130 dma_addr_t src;
131 dma_addr_t dest;
132 size_t len;
133 enum dma_transfer_direction direction;
134 enum imxdma_prep_type type;
135 /* For memcpy and interleaved */
136 unsigned int config_port;
137 unsigned int config_mem;
138 /* For interleaved transfers */
139 unsigned int x;
140 unsigned int y;
141 unsigned int w;
142 /* For slave sg and cyclic */
143 struct scatterlist *sg;
144 unsigned int sgcount;
147 struct imxdma_channel {
148 int hw_chaining;
149 struct timer_list watchdog;
150 struct imxdma_engine *imxdma;
151 unsigned int channel;
153 struct tasklet_struct dma_tasklet;
154 struct list_head ld_free;
155 struct list_head ld_queue;
156 struct list_head ld_active;
157 int descs_allocated;
158 enum dma_slave_buswidth word_size;
159 dma_addr_t per_address;
160 u32 watermark_level;
161 struct dma_chan chan;
162 struct dma_async_tx_descriptor desc;
163 enum dma_status status;
164 int dma_request;
165 struct scatterlist *sg_list;
166 u32 ccr_from_device;
167 u32 ccr_to_device;
168 bool enabled_2d;
169 int slot_2d;
172 enum imx_dma_type {
173 IMX1_DMA,
174 IMX21_DMA,
175 IMX27_DMA,
178 struct imxdma_engine {
179 struct device *dev;
180 struct device_dma_parameters dma_parms;
181 struct dma_device dma_device;
182 void __iomem *base;
183 struct clk *dma_ahb;
184 struct clk *dma_ipg;
185 spinlock_t lock;
186 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
187 struct imxdma_channel channel[IMX_DMA_CHANNELS];
188 enum imx_dma_type devtype;
191 struct imxdma_filter_data {
192 struct imxdma_engine *imxdma;
193 int request;
196 static struct platform_device_id imx_dma_devtype[] = {
198 .name = "imx1-dma",
199 .driver_data = IMX1_DMA,
200 }, {
201 .name = "imx21-dma",
202 .driver_data = IMX21_DMA,
203 }, {
204 .name = "imx27-dma",
205 .driver_data = IMX27_DMA,
206 }, {
207 /* sentinel */
210 MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
212 static const struct of_device_id imx_dma_of_dev_id[] = {
214 .compatible = "fsl,imx1-dma",
215 .data = &imx_dma_devtype[IMX1_DMA],
216 }, {
217 .compatible = "fsl,imx21-dma",
218 .data = &imx_dma_devtype[IMX21_DMA],
219 }, {
220 .compatible = "fsl,imx27-dma",
221 .data = &imx_dma_devtype[IMX27_DMA],
222 }, {
223 /* sentinel */
226 MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);
228 static inline int is_imx1_dma(struct imxdma_engine *imxdma)
230 return imxdma->devtype == IMX1_DMA;
233 static inline int is_imx21_dma(struct imxdma_engine *imxdma)
235 return imxdma->devtype == IMX21_DMA;
238 static inline int is_imx27_dma(struct imxdma_engine *imxdma)
240 return imxdma->devtype == IMX27_DMA;
243 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
245 return container_of(chan, struct imxdma_channel, chan);
248 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
250 struct imxdma_desc *desc;
252 if (!list_empty(&imxdmac->ld_active)) {
253 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
254 node);
255 if (desc->type == IMXDMA_DESC_CYCLIC)
256 return true;
258 return false;
263 static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
264 unsigned offset)
266 __raw_writel(val, imxdma->base + offset);
269 static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
271 return __raw_readl(imxdma->base + offset);
274 static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
276 struct imxdma_engine *imxdma = imxdmac->imxdma;
278 if (is_imx27_dma(imxdma))
279 return imxdmac->hw_chaining;
280 else
281 return 0;
285 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
287 static inline int imxdma_sg_next(struct imxdma_desc *d)
289 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
290 struct imxdma_engine *imxdma = imxdmac->imxdma;
291 struct scatterlist *sg = d->sg;
292 unsigned long now;
294 now = min(d->len, sg_dma_len(sg));
295 if (d->len != IMX_DMA_LENGTH_LOOP)
296 d->len -= now;
298 if (d->direction == DMA_DEV_TO_MEM)
299 imx_dmav1_writel(imxdma, sg->dma_address,
300 DMA_DAR(imxdmac->channel));
301 else
302 imx_dmav1_writel(imxdma, sg->dma_address,
303 DMA_SAR(imxdmac->channel));
305 imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
307 dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
308 "size 0x%08x\n", __func__, imxdmac->channel,
309 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
310 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
311 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
313 return now;
316 static void imxdma_enable_hw(struct imxdma_desc *d)
318 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
319 struct imxdma_engine *imxdma = imxdmac->imxdma;
320 int channel = imxdmac->channel;
321 unsigned long flags;
323 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
325 local_irq_save(flags);
327 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
328 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
329 ~(1 << channel), DMA_DIMR);
330 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
331 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
333 if (!is_imx1_dma(imxdma) &&
334 d->sg && imxdma_hw_chain(imxdmac)) {
335 d->sg = sg_next(d->sg);
336 if (d->sg) {
337 u32 tmp;
338 imxdma_sg_next(d);
339 tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
340 imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
341 DMA_CCR(channel));
345 local_irq_restore(flags);
348 static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
350 struct imxdma_engine *imxdma = imxdmac->imxdma;
351 int channel = imxdmac->channel;
352 unsigned long flags;
354 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
356 if (imxdma_hw_chain(imxdmac))
357 del_timer(&imxdmac->watchdog);
359 local_irq_save(flags);
360 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
361 (1 << channel), DMA_DIMR);
362 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
363 ~CCR_CEN, DMA_CCR(channel));
364 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
365 local_irq_restore(flags);
368 static void imxdma_watchdog(unsigned long data)
370 struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
371 struct imxdma_engine *imxdma = imxdmac->imxdma;
372 int channel = imxdmac->channel;
374 imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
376 /* Tasklet watchdog error handler */
377 tasklet_schedule(&imxdmac->dma_tasklet);
378 dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
379 imxdmac->channel);
382 static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
384 struct imxdma_engine *imxdma = dev_id;
385 unsigned int err_mask;
386 int i, disr;
387 int errcode;
389 disr = imx_dmav1_readl(imxdma, DMA_DISR);
391 err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
392 imx_dmav1_readl(imxdma, DMA_DRTOSR) |
393 imx_dmav1_readl(imxdma, DMA_DSESR) |
394 imx_dmav1_readl(imxdma, DMA_DBOSR);
396 if (!err_mask)
397 return IRQ_HANDLED;
399 imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
401 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
402 if (!(err_mask & (1 << i)))
403 continue;
404 errcode = 0;
406 if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
407 imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
408 errcode |= IMX_DMA_ERR_BURST;
410 if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
411 imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
412 errcode |= IMX_DMA_ERR_REQUEST;
414 if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
415 imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
416 errcode |= IMX_DMA_ERR_TRANSFER;
418 if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
419 imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
420 errcode |= IMX_DMA_ERR_BUFFER;
422 /* Tasklet error handler */
423 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
425 printk(KERN_WARNING
426 "DMA timeout on channel %d -%s%s%s%s\n", i,
427 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
428 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
429 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
430 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
432 return IRQ_HANDLED;
435 static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
437 struct imxdma_engine *imxdma = imxdmac->imxdma;
438 int chno = imxdmac->channel;
439 struct imxdma_desc *desc;
440 unsigned long flags;
442 spin_lock_irqsave(&imxdma->lock, flags);
443 if (list_empty(&imxdmac->ld_active)) {
444 spin_unlock_irqrestore(&imxdma->lock, flags);
445 goto out;
448 desc = list_first_entry(&imxdmac->ld_active,
449 struct imxdma_desc,
450 node);
451 spin_unlock_irqrestore(&imxdma->lock, flags);
453 if (desc->sg) {
454 u32 tmp;
455 desc->sg = sg_next(desc->sg);
457 if (desc->sg) {
458 imxdma_sg_next(desc);
460 tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
462 if (imxdma_hw_chain(imxdmac)) {
463 /* FIXME: The timeout should probably be
464 * configurable
466 mod_timer(&imxdmac->watchdog,
467 jiffies + msecs_to_jiffies(500));
469 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
470 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
471 } else {
472 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
473 DMA_CCR(chno));
474 tmp |= CCR_CEN;
477 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
479 if (imxdma_chan_is_doing_cyclic(imxdmac))
480 /* Tasklet progression */
481 tasklet_schedule(&imxdmac->dma_tasklet);
483 return;
486 if (imxdma_hw_chain(imxdmac)) {
487 del_timer(&imxdmac->watchdog);
488 return;
492 out:
493 imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
494 /* Tasklet irq */
495 tasklet_schedule(&imxdmac->dma_tasklet);
498 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
500 struct imxdma_engine *imxdma = dev_id;
501 int i, disr;
503 if (!is_imx1_dma(imxdma))
504 imxdma_err_handler(irq, dev_id);
506 disr = imx_dmav1_readl(imxdma, DMA_DISR);
508 dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
510 imx_dmav1_writel(imxdma, disr, DMA_DISR);
511 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
512 if (disr & (1 << i))
513 dma_irq_handle_channel(&imxdma->channel[i]);
516 return IRQ_HANDLED;
519 static int imxdma_xfer_desc(struct imxdma_desc *d)
521 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
522 struct imxdma_engine *imxdma = imxdmac->imxdma;
523 int slot = -1;
524 int i;
526 /* Configure and enable */
527 switch (d->type) {
528 case IMXDMA_DESC_INTERLEAVED:
529 /* Try to get a free 2D slot */
530 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
531 if ((imxdma->slots_2d[i].count > 0) &&
532 ((imxdma->slots_2d[i].xsr != d->x) ||
533 (imxdma->slots_2d[i].ysr != d->y) ||
534 (imxdma->slots_2d[i].wsr != d->w)))
535 continue;
536 slot = i;
537 break;
539 if (slot < 0)
540 return -EBUSY;
542 imxdma->slots_2d[slot].xsr = d->x;
543 imxdma->slots_2d[slot].ysr = d->y;
544 imxdma->slots_2d[slot].wsr = d->w;
545 imxdma->slots_2d[slot].count++;
547 imxdmac->slot_2d = slot;
548 imxdmac->enabled_2d = true;
550 if (slot == IMX_DMA_2D_SLOT_A) {
551 d->config_mem &= ~CCR_MSEL_B;
552 d->config_port &= ~CCR_MSEL_B;
553 imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
554 imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
555 imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
556 } else {
557 d->config_mem |= CCR_MSEL_B;
558 d->config_port |= CCR_MSEL_B;
559 imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
560 imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
561 imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
564 * We fall-through here intentionally, since a 2D transfer is
565 * similar to MEMCPY just adding the 2D slot configuration.
567 case IMXDMA_DESC_MEMCPY:
568 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
569 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
570 imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
571 DMA_CCR(imxdmac->channel));
573 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
575 dev_dbg(imxdma->dev,
576 "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
577 __func__, imxdmac->channel,
578 (unsigned long long)d->dest,
579 (unsigned long long)d->src, d->len);
581 break;
582 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
583 case IMXDMA_DESC_CYCLIC:
584 case IMXDMA_DESC_SLAVE_SG:
585 if (d->direction == DMA_DEV_TO_MEM) {
586 imx_dmav1_writel(imxdma, imxdmac->per_address,
587 DMA_SAR(imxdmac->channel));
588 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
589 DMA_CCR(imxdmac->channel));
591 dev_dbg(imxdma->dev,
592 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
593 __func__, imxdmac->channel,
594 d->sg, d->sgcount, d->len,
595 (unsigned long long)imxdmac->per_address);
596 } else if (d->direction == DMA_MEM_TO_DEV) {
597 imx_dmav1_writel(imxdma, imxdmac->per_address,
598 DMA_DAR(imxdmac->channel));
599 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
600 DMA_CCR(imxdmac->channel));
602 dev_dbg(imxdma->dev,
603 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
604 __func__, imxdmac->channel,
605 d->sg, d->sgcount, d->len,
606 (unsigned long long)imxdmac->per_address);
607 } else {
608 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
609 __func__, imxdmac->channel);
610 return -EINVAL;
613 imxdma_sg_next(d);
615 break;
616 default:
617 return -EINVAL;
619 imxdma_enable_hw(d);
620 return 0;
623 static void imxdma_tasklet(unsigned long data)
625 struct imxdma_channel *imxdmac = (void *)data;
626 struct imxdma_engine *imxdma = imxdmac->imxdma;
627 struct imxdma_desc *desc;
628 unsigned long flags;
630 spin_lock_irqsave(&imxdma->lock, flags);
632 if (list_empty(&imxdmac->ld_active)) {
633 /* Someone might have called terminate all */
634 spin_unlock_irqrestore(&imxdma->lock, flags);
635 return;
637 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
639 /* If we are dealing with a cyclic descriptor, keep it on ld_active
640 * and dont mark the descriptor as complete.
641 * Only in non-cyclic cases it would be marked as complete
643 if (imxdma_chan_is_doing_cyclic(imxdmac))
644 goto out;
645 else
646 dma_cookie_complete(&desc->desc);
648 /* Free 2D slot if it was an interleaved transfer */
649 if (imxdmac->enabled_2d) {
650 imxdma->slots_2d[imxdmac->slot_2d].count--;
651 imxdmac->enabled_2d = false;
654 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
656 if (!list_empty(&imxdmac->ld_queue)) {
657 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
658 node);
659 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
660 if (imxdma_xfer_desc(desc) < 0)
661 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
662 __func__, imxdmac->channel);
664 out:
665 spin_unlock_irqrestore(&imxdma->lock, flags);
667 if (desc->desc.callback)
668 desc->desc.callback(desc->desc.callback_param);
672 static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
673 unsigned long arg)
675 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
676 struct dma_slave_config *dmaengine_cfg = (void *)arg;
677 struct imxdma_engine *imxdma = imxdmac->imxdma;
678 unsigned long flags;
679 unsigned int mode = 0;
681 switch (cmd) {
682 case DMA_TERMINATE_ALL:
683 imxdma_disable_hw(imxdmac);
685 spin_lock_irqsave(&imxdma->lock, flags);
686 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
687 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
688 spin_unlock_irqrestore(&imxdma->lock, flags);
689 return 0;
690 case DMA_SLAVE_CONFIG:
691 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
692 imxdmac->per_address = dmaengine_cfg->src_addr;
693 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
694 imxdmac->word_size = dmaengine_cfg->src_addr_width;
695 } else {
696 imxdmac->per_address = dmaengine_cfg->dst_addr;
697 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
698 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
701 switch (imxdmac->word_size) {
702 case DMA_SLAVE_BUSWIDTH_1_BYTE:
703 mode = IMX_DMA_MEMSIZE_8;
704 break;
705 case DMA_SLAVE_BUSWIDTH_2_BYTES:
706 mode = IMX_DMA_MEMSIZE_16;
707 break;
708 default:
709 case DMA_SLAVE_BUSWIDTH_4_BYTES:
710 mode = IMX_DMA_MEMSIZE_32;
711 break;
714 imxdmac->hw_chaining = 0;
716 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
717 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
718 CCR_REN;
719 imxdmac->ccr_to_device =
720 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
721 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
722 imx_dmav1_writel(imxdma, imxdmac->dma_request,
723 DMA_RSSR(imxdmac->channel));
725 /* Set burst length */
726 imx_dmav1_writel(imxdma, imxdmac->watermark_level *
727 imxdmac->word_size, DMA_BLR(imxdmac->channel));
729 return 0;
730 default:
731 return -ENOSYS;
734 return -EINVAL;
737 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
738 dma_cookie_t cookie,
739 struct dma_tx_state *txstate)
741 return dma_cookie_status(chan, cookie, txstate);
744 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
746 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
747 struct imxdma_engine *imxdma = imxdmac->imxdma;
748 dma_cookie_t cookie;
749 unsigned long flags;
751 spin_lock_irqsave(&imxdma->lock, flags);
752 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
753 cookie = dma_cookie_assign(tx);
754 spin_unlock_irqrestore(&imxdma->lock, flags);
756 return cookie;
759 static int imxdma_alloc_chan_resources(struct dma_chan *chan)
761 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
762 struct imx_dma_data *data = chan->private;
764 if (data != NULL)
765 imxdmac->dma_request = data->dma_request;
767 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
768 struct imxdma_desc *desc;
770 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
771 if (!desc)
772 break;
773 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
774 dma_async_tx_descriptor_init(&desc->desc, chan);
775 desc->desc.tx_submit = imxdma_tx_submit;
776 /* txd.flags will be overwritten in prep funcs */
777 desc->desc.flags = DMA_CTRL_ACK;
778 desc->status = DMA_COMPLETE;
780 list_add_tail(&desc->node, &imxdmac->ld_free);
781 imxdmac->descs_allocated++;
784 if (!imxdmac->descs_allocated)
785 return -ENOMEM;
787 return imxdmac->descs_allocated;
790 static void imxdma_free_chan_resources(struct dma_chan *chan)
792 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
793 struct imxdma_engine *imxdma = imxdmac->imxdma;
794 struct imxdma_desc *desc, *_desc;
795 unsigned long flags;
797 spin_lock_irqsave(&imxdma->lock, flags);
799 imxdma_disable_hw(imxdmac);
800 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
801 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
803 spin_unlock_irqrestore(&imxdma->lock, flags);
805 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
806 kfree(desc);
807 imxdmac->descs_allocated--;
809 INIT_LIST_HEAD(&imxdmac->ld_free);
811 kfree(imxdmac->sg_list);
812 imxdmac->sg_list = NULL;
815 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
816 struct dma_chan *chan, struct scatterlist *sgl,
817 unsigned int sg_len, enum dma_transfer_direction direction,
818 unsigned long flags, void *context)
820 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
821 struct scatterlist *sg;
822 int i, dma_length = 0;
823 struct imxdma_desc *desc;
825 if (list_empty(&imxdmac->ld_free) ||
826 imxdma_chan_is_doing_cyclic(imxdmac))
827 return NULL;
829 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
831 for_each_sg(sgl, sg, sg_len, i) {
832 dma_length += sg_dma_len(sg);
835 switch (imxdmac->word_size) {
836 case DMA_SLAVE_BUSWIDTH_4_BYTES:
837 if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
838 return NULL;
839 break;
840 case DMA_SLAVE_BUSWIDTH_2_BYTES:
841 if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
842 return NULL;
843 break;
844 case DMA_SLAVE_BUSWIDTH_1_BYTE:
845 break;
846 default:
847 return NULL;
850 desc->type = IMXDMA_DESC_SLAVE_SG;
851 desc->sg = sgl;
852 desc->sgcount = sg_len;
853 desc->len = dma_length;
854 desc->direction = direction;
855 if (direction == DMA_DEV_TO_MEM) {
856 desc->src = imxdmac->per_address;
857 } else {
858 desc->dest = imxdmac->per_address;
860 desc->desc.callback = NULL;
861 desc->desc.callback_param = NULL;
863 return &desc->desc;
866 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
867 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
868 size_t period_len, enum dma_transfer_direction direction,
869 unsigned long flags, void *context)
871 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
872 struct imxdma_engine *imxdma = imxdmac->imxdma;
873 struct imxdma_desc *desc;
874 int i;
875 unsigned int periods = buf_len / period_len;
877 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
878 __func__, imxdmac->channel, buf_len, period_len);
880 if (list_empty(&imxdmac->ld_free) ||
881 imxdma_chan_is_doing_cyclic(imxdmac))
882 return NULL;
884 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
886 kfree(imxdmac->sg_list);
888 imxdmac->sg_list = kcalloc(periods + 1,
889 sizeof(struct scatterlist), GFP_ATOMIC);
890 if (!imxdmac->sg_list)
891 return NULL;
893 sg_init_table(imxdmac->sg_list, periods);
895 for (i = 0; i < periods; i++) {
896 imxdmac->sg_list[i].page_link = 0;
897 imxdmac->sg_list[i].offset = 0;
898 imxdmac->sg_list[i].dma_address = dma_addr;
899 sg_dma_len(&imxdmac->sg_list[i]) = period_len;
900 dma_addr += period_len;
903 /* close the loop */
904 imxdmac->sg_list[periods].offset = 0;
905 sg_dma_len(&imxdmac->sg_list[periods]) = 0;
906 imxdmac->sg_list[periods].page_link =
907 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
909 desc->type = IMXDMA_DESC_CYCLIC;
910 desc->sg = imxdmac->sg_list;
911 desc->sgcount = periods;
912 desc->len = IMX_DMA_LENGTH_LOOP;
913 desc->direction = direction;
914 if (direction == DMA_DEV_TO_MEM) {
915 desc->src = imxdmac->per_address;
916 } else {
917 desc->dest = imxdmac->per_address;
919 desc->desc.callback = NULL;
920 desc->desc.callback_param = NULL;
922 return &desc->desc;
925 static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
926 struct dma_chan *chan, dma_addr_t dest,
927 dma_addr_t src, size_t len, unsigned long flags)
929 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
930 struct imxdma_engine *imxdma = imxdmac->imxdma;
931 struct imxdma_desc *desc;
933 dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
934 __func__, imxdmac->channel, (unsigned long long)src,
935 (unsigned long long)dest, len);
937 if (list_empty(&imxdmac->ld_free) ||
938 imxdma_chan_is_doing_cyclic(imxdmac))
939 return NULL;
941 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
943 desc->type = IMXDMA_DESC_MEMCPY;
944 desc->src = src;
945 desc->dest = dest;
946 desc->len = len;
947 desc->direction = DMA_MEM_TO_MEM;
948 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
949 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
950 desc->desc.callback = NULL;
951 desc->desc.callback_param = NULL;
953 return &desc->desc;
956 static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
957 struct dma_chan *chan, struct dma_interleaved_template *xt,
958 unsigned long flags)
960 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
961 struct imxdma_engine *imxdma = imxdmac->imxdma;
962 struct imxdma_desc *desc;
964 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
965 " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
966 imxdmac->channel, (unsigned long long)xt->src_start,
967 (unsigned long long) xt->dst_start,
968 xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
969 xt->numf, xt->frame_size);
971 if (list_empty(&imxdmac->ld_free) ||
972 imxdma_chan_is_doing_cyclic(imxdmac))
973 return NULL;
975 if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
976 return NULL;
978 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
980 desc->type = IMXDMA_DESC_INTERLEAVED;
981 desc->src = xt->src_start;
982 desc->dest = xt->dst_start;
983 desc->x = xt->sgl[0].size;
984 desc->y = xt->numf;
985 desc->w = xt->sgl[0].icg + desc->x;
986 desc->len = desc->x * desc->y;
987 desc->direction = DMA_MEM_TO_MEM;
988 desc->config_port = IMX_DMA_MEMSIZE_32;
989 desc->config_mem = IMX_DMA_MEMSIZE_32;
990 if (xt->src_sgl)
991 desc->config_mem |= IMX_DMA_TYPE_2D;
992 if (xt->dst_sgl)
993 desc->config_port |= IMX_DMA_TYPE_2D;
994 desc->desc.callback = NULL;
995 desc->desc.callback_param = NULL;
997 return &desc->desc;
1000 static void imxdma_issue_pending(struct dma_chan *chan)
1002 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
1003 struct imxdma_engine *imxdma = imxdmac->imxdma;
1004 struct imxdma_desc *desc;
1005 unsigned long flags;
1007 spin_lock_irqsave(&imxdma->lock, flags);
1008 if (list_empty(&imxdmac->ld_active) &&
1009 !list_empty(&imxdmac->ld_queue)) {
1010 desc = list_first_entry(&imxdmac->ld_queue,
1011 struct imxdma_desc, node);
1013 if (imxdma_xfer_desc(desc) < 0) {
1014 dev_warn(imxdma->dev,
1015 "%s: channel: %d couldn't issue DMA xfer\n",
1016 __func__, imxdmac->channel);
1017 } else {
1018 list_move_tail(imxdmac->ld_queue.next,
1019 &imxdmac->ld_active);
1022 spin_unlock_irqrestore(&imxdma->lock, flags);
1025 static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
1027 struct imxdma_filter_data *fdata = param;
1028 struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);
1030 if (chan->device->dev != fdata->imxdma->dev)
1031 return false;
1033 imxdma_chan->dma_request = fdata->request;
1034 chan->private = NULL;
1036 return true;
1039 static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
1040 struct of_dma *ofdma)
1042 int count = dma_spec->args_count;
1043 struct imxdma_engine *imxdma = ofdma->of_dma_data;
1044 struct imxdma_filter_data fdata = {
1045 .imxdma = imxdma,
1048 if (count != 1)
1049 return NULL;
1051 fdata.request = dma_spec->args[0];
1053 return dma_request_channel(imxdma->dma_device.cap_mask,
1054 imxdma_filter_fn, &fdata);
1057 static int __init imxdma_probe(struct platform_device *pdev)
1059 struct imxdma_engine *imxdma;
1060 struct resource *res;
1061 const struct of_device_id *of_id;
1062 int ret, i;
1063 int irq, irq_err;
1065 of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev);
1066 if (of_id)
1067 pdev->id_entry = of_id->data;
1069 imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1070 if (!imxdma)
1071 return -ENOMEM;
1073 imxdma->dev = &pdev->dev;
1074 imxdma->devtype = pdev->id_entry->driver_data;
1076 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1077 imxdma->base = devm_ioremap_resource(&pdev->dev, res);
1078 if (IS_ERR(imxdma->base))
1079 return PTR_ERR(imxdma->base);
1081 irq = platform_get_irq(pdev, 0);
1082 if (irq < 0)
1083 return irq;
1085 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1086 if (IS_ERR(imxdma->dma_ipg))
1087 return PTR_ERR(imxdma->dma_ipg);
1089 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1090 if (IS_ERR(imxdma->dma_ahb))
1091 return PTR_ERR(imxdma->dma_ahb);
1093 clk_prepare_enable(imxdma->dma_ipg);
1094 clk_prepare_enable(imxdma->dma_ahb);
1096 /* reset DMA module */
1097 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1099 if (is_imx1_dma(imxdma)) {
1100 ret = devm_request_irq(&pdev->dev, irq,
1101 dma_irq_handler, 0, "DMA", imxdma);
1102 if (ret) {
1103 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1104 goto err;
1107 irq_err = platform_get_irq(pdev, 1);
1108 if (irq_err < 0) {
1109 ret = irq_err;
1110 goto err;
1113 ret = devm_request_irq(&pdev->dev, irq_err,
1114 imxdma_err_handler, 0, "DMA", imxdma);
1115 if (ret) {
1116 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1117 goto err;
1121 /* enable DMA module */
1122 imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1124 /* clear all interrupts */
1125 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1127 /* disable interrupts */
1128 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1130 INIT_LIST_HEAD(&imxdma->dma_device.channels);
1132 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1133 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1134 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1135 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1137 /* Initialize 2D global parameters */
1138 for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1139 imxdma->slots_2d[i].count = 0;
1141 spin_lock_init(&imxdma->lock);
1143 /* Initialize channel parameters */
1144 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1145 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1147 if (!is_imx1_dma(imxdma)) {
1148 ret = devm_request_irq(&pdev->dev, irq + i,
1149 dma_irq_handler, 0, "DMA", imxdma);
1150 if (ret) {
1151 dev_warn(imxdma->dev, "Can't register IRQ %d "
1152 "for DMA channel %d\n",
1153 irq + i, i);
1154 goto err;
1156 init_timer(&imxdmac->watchdog);
1157 imxdmac->watchdog.function = &imxdma_watchdog;
1158 imxdmac->watchdog.data = (unsigned long)imxdmac;
1161 imxdmac->imxdma = imxdma;
1163 INIT_LIST_HEAD(&imxdmac->ld_queue);
1164 INIT_LIST_HEAD(&imxdmac->ld_free);
1165 INIT_LIST_HEAD(&imxdmac->ld_active);
1167 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
1168 (unsigned long)imxdmac);
1169 imxdmac->chan.device = &imxdma->dma_device;
1170 dma_cookie_init(&imxdmac->chan);
1171 imxdmac->channel = i;
1173 /* Add the channel to the DMAC list */
1174 list_add_tail(&imxdmac->chan.device_node,
1175 &imxdma->dma_device.channels);
1178 imxdma->dma_device.dev = &pdev->dev;
1180 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1181 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1182 imxdma->dma_device.device_tx_status = imxdma_tx_status;
1183 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1184 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1185 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1186 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1187 imxdma->dma_device.device_control = imxdma_control;
1188 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1190 platform_set_drvdata(pdev, imxdma);
1192 imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
1193 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
1194 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1196 ret = dma_async_device_register(&imxdma->dma_device);
1197 if (ret) {
1198 dev_err(&pdev->dev, "unable to register\n");
1199 goto err;
1202 if (pdev->dev.of_node) {
1203 ret = of_dma_controller_register(pdev->dev.of_node,
1204 imxdma_xlate, imxdma);
1205 if (ret) {
1206 dev_err(&pdev->dev, "unable to register of_dma_controller\n");
1207 goto err_of_dma_controller;
1211 return 0;
1213 err_of_dma_controller:
1214 dma_async_device_unregister(&imxdma->dma_device);
1215 err:
1216 clk_disable_unprepare(imxdma->dma_ipg);
1217 clk_disable_unprepare(imxdma->dma_ahb);
1218 return ret;
1221 static int imxdma_remove(struct platform_device *pdev)
1223 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1225 dma_async_device_unregister(&imxdma->dma_device);
1227 if (pdev->dev.of_node)
1228 of_dma_controller_free(pdev->dev.of_node);
1230 clk_disable_unprepare(imxdma->dma_ipg);
1231 clk_disable_unprepare(imxdma->dma_ahb);
1233 return 0;
1236 static struct platform_driver imxdma_driver = {
1237 .driver = {
1238 .name = "imx-dma",
1239 .of_match_table = imx_dma_of_dev_id,
1241 .id_table = imx_dma_devtype,
1242 .remove = imxdma_remove,
1245 static int __init imxdma_module_init(void)
1247 return platform_driver_probe(&imxdma_driver, imxdma_probe);
1249 subsys_initcall(imxdma_module_init);
1251 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1252 MODULE_DESCRIPTION("i.MX dma driver");
1253 MODULE_LICENSE("GPL");