Adding support for MOXA ART SoC. Testing port of linux-2.6.32.60-moxart.
[linux-3.6.7-moxart.git] / drivers / dma / imx-dma.c
blob8aa91132e9ab085df2c26d70999a4cdf9f323fa6
1 /*
2 * drivers/dma/imx-dma.c
4 * This file contains a driver for the Freescale i.MX DMA engine
5 * found on i.MX1/21/27
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
17 #include <linux/init.h>
18 #include <linux/types.h>
19 #include <linux/mm.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/slab.h>
25 #include <linux/platform_device.h>
26 #include <linux/clk.h>
27 #include <linux/dmaengine.h>
28 #include <linux/module.h>
30 #include <asm/irq.h>
31 #include <mach/dma.h>
32 #include <mach/hardware.h>
34 #include "dmaengine.h"
35 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
36 #define IMX_DMA_CHANNELS 16
38 #define IMX_DMA_2D_SLOTS 2
39 #define IMX_DMA_2D_SLOT_A 0
40 #define IMX_DMA_2D_SLOT_B 1
42 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
43 #define IMX_DMA_MEMSIZE_32 (0 << 4)
44 #define IMX_DMA_MEMSIZE_8 (1 << 4)
45 #define IMX_DMA_MEMSIZE_16 (2 << 4)
46 #define IMX_DMA_TYPE_LINEAR (0 << 10)
47 #define IMX_DMA_TYPE_2D (1 << 10)
48 #define IMX_DMA_TYPE_FIFO (2 << 10)
50 #define IMX_DMA_ERR_BURST (1 << 0)
51 #define IMX_DMA_ERR_REQUEST (1 << 1)
52 #define IMX_DMA_ERR_TRANSFER (1 << 2)
53 #define IMX_DMA_ERR_BUFFER (1 << 3)
54 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
56 #define DMA_DCR 0x00 /* Control Register */
57 #define DMA_DISR 0x04 /* Interrupt status Register */
58 #define DMA_DIMR 0x08 /* Interrupt mask Register */
59 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
60 #define DMA_DRTOSR 0x10 /* Request timeout Register */
61 #define DMA_DSESR 0x14 /* Transfer Error Status Register */
62 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
63 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
64 #define DMA_WSRA 0x40 /* W-Size Register A */
65 #define DMA_XSRA 0x44 /* X-Size Register A */
66 #define DMA_YSRA 0x48 /* Y-Size Register A */
67 #define DMA_WSRB 0x4c /* W-Size Register B */
68 #define DMA_XSRB 0x50 /* X-Size Register B */
69 #define DMA_YSRB 0x54 /* Y-Size Register B */
70 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
71 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
72 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
73 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
74 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
75 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
76 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
77 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
78 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
80 #define DCR_DRST (1<<1)
81 #define DCR_DEN (1<<0)
82 #define DBTOCR_EN (1<<15)
83 #define DBTOCR_CNT(x) ((x) & 0x7fff)
84 #define CNTR_CNT(x) ((x) & 0xffffff)
85 #define CCR_ACRPT (1<<14)
86 #define CCR_DMOD_LINEAR (0x0 << 12)
87 #define CCR_DMOD_2D (0x1 << 12)
88 #define CCR_DMOD_FIFO (0x2 << 12)
89 #define CCR_DMOD_EOBFIFO (0x3 << 12)
90 #define CCR_SMOD_LINEAR (0x0 << 10)
91 #define CCR_SMOD_2D (0x1 << 10)
92 #define CCR_SMOD_FIFO (0x2 << 10)
93 #define CCR_SMOD_EOBFIFO (0x3 << 10)
94 #define CCR_MDIR_DEC (1<<9)
95 #define CCR_MSEL_B (1<<8)
96 #define CCR_DSIZ_32 (0x0 << 6)
97 #define CCR_DSIZ_8 (0x1 << 6)
98 #define CCR_DSIZ_16 (0x2 << 6)
99 #define CCR_SSIZ_32 (0x0 << 4)
100 #define CCR_SSIZ_8 (0x1 << 4)
101 #define CCR_SSIZ_16 (0x2 << 4)
102 #define CCR_REN (1<<3)
103 #define CCR_RPT (1<<2)
104 #define CCR_FRC (1<<1)
105 #define CCR_CEN (1<<0)
106 #define RTOR_EN (1<<15)
107 #define RTOR_CLK (1<<14)
108 #define RTOR_PSC (1<<13)
110 enum imxdma_prep_type {
111 IMXDMA_DESC_MEMCPY,
112 IMXDMA_DESC_INTERLEAVED,
113 IMXDMA_DESC_SLAVE_SG,
114 IMXDMA_DESC_CYCLIC,
117 struct imx_dma_2d_config {
118 u16 xsr;
119 u16 ysr;
120 u16 wsr;
121 int count;
124 struct imxdma_desc {
125 struct list_head node;
126 struct dma_async_tx_descriptor desc;
127 enum dma_status status;
128 dma_addr_t src;
129 dma_addr_t dest;
130 size_t len;
131 enum dma_transfer_direction direction;
132 enum imxdma_prep_type type;
133 /* For memcpy and interleaved */
134 unsigned int config_port;
135 unsigned int config_mem;
136 /* For interleaved transfers */
137 unsigned int x;
138 unsigned int y;
139 unsigned int w;
140 /* For slave sg and cyclic */
141 struct scatterlist *sg;
142 unsigned int sgcount;
145 struct imxdma_channel {
146 int hw_chaining;
147 struct timer_list watchdog;
148 struct imxdma_engine *imxdma;
149 unsigned int channel;
151 struct tasklet_struct dma_tasklet;
152 struct list_head ld_free;
153 struct list_head ld_queue;
154 struct list_head ld_active;
155 int descs_allocated;
156 enum dma_slave_buswidth word_size;
157 dma_addr_t per_address;
158 u32 watermark_level;
159 struct dma_chan chan;
160 struct dma_async_tx_descriptor desc;
161 enum dma_status status;
162 int dma_request;
163 struct scatterlist *sg_list;
164 u32 ccr_from_device;
165 u32 ccr_to_device;
166 bool enabled_2d;
167 int slot_2d;
170 struct imxdma_engine {
171 struct device *dev;
172 struct device_dma_parameters dma_parms;
173 struct dma_device dma_device;
174 void __iomem *base;
175 struct clk *dma_ahb;
176 struct clk *dma_ipg;
177 spinlock_t lock;
178 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
179 struct imxdma_channel channel[IMX_DMA_CHANNELS];
182 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
184 return container_of(chan, struct imxdma_channel, chan);
187 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
189 struct imxdma_desc *desc;
191 if (!list_empty(&imxdmac->ld_active)) {
192 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
193 node);
194 if (desc->type == IMXDMA_DESC_CYCLIC)
195 return true;
197 return false;
202 static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
203 unsigned offset)
205 __raw_writel(val, imxdma->base + offset);
208 static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
210 return __raw_readl(imxdma->base + offset);
213 static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
215 if (cpu_is_mx27())
216 return imxdmac->hw_chaining;
217 else
218 return 0;
222 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
224 static inline int imxdma_sg_next(struct imxdma_desc *d)
226 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
227 struct imxdma_engine *imxdma = imxdmac->imxdma;
228 struct scatterlist *sg = d->sg;
229 unsigned long now;
231 now = min(d->len, sg_dma_len(sg));
232 if (d->len != IMX_DMA_LENGTH_LOOP)
233 d->len -= now;
235 if (d->direction == DMA_DEV_TO_MEM)
236 imx_dmav1_writel(imxdma, sg->dma_address,
237 DMA_DAR(imxdmac->channel));
238 else
239 imx_dmav1_writel(imxdma, sg->dma_address,
240 DMA_SAR(imxdmac->channel));
242 imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
244 dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
245 "size 0x%08x\n", __func__, imxdmac->channel,
246 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
247 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
248 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
250 return now;
253 static void imxdma_enable_hw(struct imxdma_desc *d)
255 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
256 struct imxdma_engine *imxdma = imxdmac->imxdma;
257 int channel = imxdmac->channel;
258 unsigned long flags;
260 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
262 local_irq_save(flags);
264 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
265 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
266 ~(1 << channel), DMA_DIMR);
267 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
268 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
270 if ((cpu_is_mx21() || cpu_is_mx27()) &&
271 d->sg && imxdma_hw_chain(imxdmac)) {
272 d->sg = sg_next(d->sg);
273 if (d->sg) {
274 u32 tmp;
275 imxdma_sg_next(d);
276 tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
277 imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
278 DMA_CCR(channel));
282 local_irq_restore(flags);
285 static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
287 struct imxdma_engine *imxdma = imxdmac->imxdma;
288 int channel = imxdmac->channel;
289 unsigned long flags;
291 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
293 if (imxdma_hw_chain(imxdmac))
294 del_timer(&imxdmac->watchdog);
296 local_irq_save(flags);
297 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
298 (1 << channel), DMA_DIMR);
299 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
300 ~CCR_CEN, DMA_CCR(channel));
301 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
302 local_irq_restore(flags);
305 static void imxdma_watchdog(unsigned long data)
307 struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
308 struct imxdma_engine *imxdma = imxdmac->imxdma;
309 int channel = imxdmac->channel;
311 imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
313 /* Tasklet watchdog error handler */
314 tasklet_schedule(&imxdmac->dma_tasklet);
315 dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
316 imxdmac->channel);
319 static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
321 struct imxdma_engine *imxdma = dev_id;
322 unsigned int err_mask;
323 int i, disr;
324 int errcode;
326 disr = imx_dmav1_readl(imxdma, DMA_DISR);
328 err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
329 imx_dmav1_readl(imxdma, DMA_DRTOSR) |
330 imx_dmav1_readl(imxdma, DMA_DSESR) |
331 imx_dmav1_readl(imxdma, DMA_DBOSR);
333 if (!err_mask)
334 return IRQ_HANDLED;
336 imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
338 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
339 if (!(err_mask & (1 << i)))
340 continue;
341 errcode = 0;
343 if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
344 imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
345 errcode |= IMX_DMA_ERR_BURST;
347 if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
348 imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
349 errcode |= IMX_DMA_ERR_REQUEST;
351 if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
352 imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
353 errcode |= IMX_DMA_ERR_TRANSFER;
355 if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
356 imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
357 errcode |= IMX_DMA_ERR_BUFFER;
359 /* Tasklet error handler */
360 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
362 printk(KERN_WARNING
363 "DMA timeout on channel %d -%s%s%s%s\n", i,
364 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
365 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
366 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
367 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
369 return IRQ_HANDLED;
372 static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
374 struct imxdma_engine *imxdma = imxdmac->imxdma;
375 int chno = imxdmac->channel;
376 struct imxdma_desc *desc;
378 spin_lock(&imxdma->lock);
379 if (list_empty(&imxdmac->ld_active)) {
380 spin_unlock(&imxdma->lock);
381 goto out;
384 desc = list_first_entry(&imxdmac->ld_active,
385 struct imxdma_desc,
386 node);
387 spin_unlock(&imxdma->lock);
389 if (desc->sg) {
390 u32 tmp;
391 desc->sg = sg_next(desc->sg);
393 if (desc->sg) {
394 imxdma_sg_next(desc);
396 tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
398 if (imxdma_hw_chain(imxdmac)) {
399 /* FIXME: The timeout should probably be
400 * configurable
402 mod_timer(&imxdmac->watchdog,
403 jiffies + msecs_to_jiffies(500));
405 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
406 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
407 } else {
408 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
409 DMA_CCR(chno));
410 tmp |= CCR_CEN;
413 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
415 if (imxdma_chan_is_doing_cyclic(imxdmac))
416 /* Tasklet progression */
417 tasklet_schedule(&imxdmac->dma_tasklet);
419 return;
422 if (imxdma_hw_chain(imxdmac)) {
423 del_timer(&imxdmac->watchdog);
424 return;
428 out:
429 imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
430 /* Tasklet irq */
431 tasklet_schedule(&imxdmac->dma_tasklet);
434 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
436 struct imxdma_engine *imxdma = dev_id;
437 int i, disr;
439 if (cpu_is_mx21() || cpu_is_mx27())
440 imxdma_err_handler(irq, dev_id);
442 disr = imx_dmav1_readl(imxdma, DMA_DISR);
444 dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
446 imx_dmav1_writel(imxdma, disr, DMA_DISR);
447 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
448 if (disr & (1 << i))
449 dma_irq_handle_channel(&imxdma->channel[i]);
452 return IRQ_HANDLED;
455 static int imxdma_xfer_desc(struct imxdma_desc *d)
457 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
458 struct imxdma_engine *imxdma = imxdmac->imxdma;
459 unsigned long flags;
460 int slot = -1;
461 int i;
463 /* Configure and enable */
464 switch (d->type) {
465 case IMXDMA_DESC_INTERLEAVED:
466 /* Try to get a free 2D slot */
467 spin_lock_irqsave(&imxdma->lock, flags);
468 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
469 if ((imxdma->slots_2d[i].count > 0) &&
470 ((imxdma->slots_2d[i].xsr != d->x) ||
471 (imxdma->slots_2d[i].ysr != d->y) ||
472 (imxdma->slots_2d[i].wsr != d->w)))
473 continue;
474 slot = i;
475 break;
477 if (slot < 0) {
478 spin_unlock_irqrestore(&imxdma->lock, flags);
479 return -EBUSY;
482 imxdma->slots_2d[slot].xsr = d->x;
483 imxdma->slots_2d[slot].ysr = d->y;
484 imxdma->slots_2d[slot].wsr = d->w;
485 imxdma->slots_2d[slot].count++;
487 imxdmac->slot_2d = slot;
488 imxdmac->enabled_2d = true;
489 spin_unlock_irqrestore(&imxdma->lock, flags);
491 if (slot == IMX_DMA_2D_SLOT_A) {
492 d->config_mem &= ~CCR_MSEL_B;
493 d->config_port &= ~CCR_MSEL_B;
494 imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
495 imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
496 imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
497 } else {
498 d->config_mem |= CCR_MSEL_B;
499 d->config_port |= CCR_MSEL_B;
500 imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
501 imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
502 imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
505 * We fall-through here intentionally, since a 2D transfer is
506 * similar to MEMCPY just adding the 2D slot configuration.
508 case IMXDMA_DESC_MEMCPY:
509 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
510 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
511 imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
512 DMA_CCR(imxdmac->channel));
514 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
516 dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
517 "dma_length=%d\n", __func__, imxdmac->channel,
518 d->dest, d->src, d->len);
520 break;
521 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
522 case IMXDMA_DESC_CYCLIC:
523 case IMXDMA_DESC_SLAVE_SG:
524 if (d->direction == DMA_DEV_TO_MEM) {
525 imx_dmav1_writel(imxdma, imxdmac->per_address,
526 DMA_SAR(imxdmac->channel));
527 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
528 DMA_CCR(imxdmac->channel));
530 dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
531 "total length=%d dev_addr=0x%08x (dev2mem)\n",
532 __func__, imxdmac->channel, d->sg, d->sgcount,
533 d->len, imxdmac->per_address);
534 } else if (d->direction == DMA_MEM_TO_DEV) {
535 imx_dmav1_writel(imxdma, imxdmac->per_address,
536 DMA_DAR(imxdmac->channel));
537 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
538 DMA_CCR(imxdmac->channel));
540 dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
541 "total length=%d dev_addr=0x%08x (mem2dev)\n",
542 __func__, imxdmac->channel, d->sg, d->sgcount,
543 d->len, imxdmac->per_address);
544 } else {
545 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
546 __func__, imxdmac->channel);
547 return -EINVAL;
550 imxdma_sg_next(d);
552 break;
553 default:
554 return -EINVAL;
556 imxdma_enable_hw(d);
557 return 0;
560 static void imxdma_tasklet(unsigned long data)
562 struct imxdma_channel *imxdmac = (void *)data;
563 struct imxdma_engine *imxdma = imxdmac->imxdma;
564 struct imxdma_desc *desc;
566 spin_lock(&imxdma->lock);
568 if (list_empty(&imxdmac->ld_active)) {
569 /* Someone might have called terminate all */
570 goto out;
572 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
574 if (desc->desc.callback)
575 desc->desc.callback(desc->desc.callback_param);
577 /* If we are dealing with a cyclic descriptor keep it on ld_active
578 * and dont mark the descripor as complete.
579 * Only in non-cyclic cases it would be marked as complete
581 if (imxdma_chan_is_doing_cyclic(imxdmac))
582 goto out;
583 else
584 dma_cookie_complete(&desc->desc);
586 /* Free 2D slot if it was an interleaved transfer */
587 if (imxdmac->enabled_2d) {
588 imxdma->slots_2d[imxdmac->slot_2d].count--;
589 imxdmac->enabled_2d = false;
592 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
594 if (!list_empty(&imxdmac->ld_queue)) {
595 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
596 node);
597 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
598 if (imxdma_xfer_desc(desc) < 0)
599 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
600 __func__, imxdmac->channel);
602 out:
603 spin_unlock(&imxdma->lock);
606 static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
607 unsigned long arg)
609 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
610 struct dma_slave_config *dmaengine_cfg = (void *)arg;
611 struct imxdma_engine *imxdma = imxdmac->imxdma;
612 unsigned long flags;
613 unsigned int mode = 0;
615 switch (cmd) {
616 case DMA_TERMINATE_ALL:
617 imxdma_disable_hw(imxdmac);
619 spin_lock_irqsave(&imxdma->lock, flags);
620 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
621 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
622 spin_unlock_irqrestore(&imxdma->lock, flags);
623 return 0;
624 case DMA_SLAVE_CONFIG:
625 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
626 imxdmac->per_address = dmaengine_cfg->src_addr;
627 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
628 imxdmac->word_size = dmaengine_cfg->src_addr_width;
629 } else {
630 imxdmac->per_address = dmaengine_cfg->dst_addr;
631 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
632 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
635 switch (imxdmac->word_size) {
636 case DMA_SLAVE_BUSWIDTH_1_BYTE:
637 mode = IMX_DMA_MEMSIZE_8;
638 break;
639 case DMA_SLAVE_BUSWIDTH_2_BYTES:
640 mode = IMX_DMA_MEMSIZE_16;
641 break;
642 default:
643 case DMA_SLAVE_BUSWIDTH_4_BYTES:
644 mode = IMX_DMA_MEMSIZE_32;
645 break;
648 imxdmac->hw_chaining = 1;
649 if (!imxdma_hw_chain(imxdmac))
650 return -EINVAL;
651 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
652 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
653 CCR_REN;
654 imxdmac->ccr_to_device =
655 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
656 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
657 imx_dmav1_writel(imxdma, imxdmac->dma_request,
658 DMA_RSSR(imxdmac->channel));
660 /* Set burst length */
661 imx_dmav1_writel(imxdma, imxdmac->watermark_level *
662 imxdmac->word_size, DMA_BLR(imxdmac->channel));
664 return 0;
665 default:
666 return -ENOSYS;
669 return -EINVAL;
672 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
673 dma_cookie_t cookie,
674 struct dma_tx_state *txstate)
676 return dma_cookie_status(chan, cookie, txstate);
679 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
681 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
682 struct imxdma_engine *imxdma = imxdmac->imxdma;
683 dma_cookie_t cookie;
684 unsigned long flags;
686 spin_lock_irqsave(&imxdma->lock, flags);
687 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
688 cookie = dma_cookie_assign(tx);
689 spin_unlock_irqrestore(&imxdma->lock, flags);
691 return cookie;
694 static int imxdma_alloc_chan_resources(struct dma_chan *chan)
696 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
697 struct imx_dma_data *data = chan->private;
699 if (data != NULL)
700 imxdmac->dma_request = data->dma_request;
702 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
703 struct imxdma_desc *desc;
705 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
706 if (!desc)
707 break;
708 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
709 dma_async_tx_descriptor_init(&desc->desc, chan);
710 desc->desc.tx_submit = imxdma_tx_submit;
711 /* txd.flags will be overwritten in prep funcs */
712 desc->desc.flags = DMA_CTRL_ACK;
713 desc->status = DMA_SUCCESS;
715 list_add_tail(&desc->node, &imxdmac->ld_free);
716 imxdmac->descs_allocated++;
719 if (!imxdmac->descs_allocated)
720 return -ENOMEM;
722 return imxdmac->descs_allocated;
725 static void imxdma_free_chan_resources(struct dma_chan *chan)
727 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
728 struct imxdma_engine *imxdma = imxdmac->imxdma;
729 struct imxdma_desc *desc, *_desc;
730 unsigned long flags;
732 spin_lock_irqsave(&imxdma->lock, flags);
734 imxdma_disable_hw(imxdmac);
735 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
736 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
738 spin_unlock_irqrestore(&imxdma->lock, flags);
740 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
741 kfree(desc);
742 imxdmac->descs_allocated--;
744 INIT_LIST_HEAD(&imxdmac->ld_free);
746 if (imxdmac->sg_list) {
747 kfree(imxdmac->sg_list);
748 imxdmac->sg_list = NULL;
752 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
753 struct dma_chan *chan, struct scatterlist *sgl,
754 unsigned int sg_len, enum dma_transfer_direction direction,
755 unsigned long flags, void *context)
757 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
758 struct scatterlist *sg;
759 int i, dma_length = 0;
760 struct imxdma_desc *desc;
762 if (list_empty(&imxdmac->ld_free) ||
763 imxdma_chan_is_doing_cyclic(imxdmac))
764 return NULL;
766 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
768 for_each_sg(sgl, sg, sg_len, i) {
769 dma_length += sg_dma_len(sg);
772 switch (imxdmac->word_size) {
773 case DMA_SLAVE_BUSWIDTH_4_BYTES:
774 if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
775 return NULL;
776 break;
777 case DMA_SLAVE_BUSWIDTH_2_BYTES:
778 if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
779 return NULL;
780 break;
781 case DMA_SLAVE_BUSWIDTH_1_BYTE:
782 break;
783 default:
784 return NULL;
787 desc->type = IMXDMA_DESC_SLAVE_SG;
788 desc->sg = sgl;
789 desc->sgcount = sg_len;
790 desc->len = dma_length;
791 desc->direction = direction;
792 if (direction == DMA_DEV_TO_MEM) {
793 desc->src = imxdmac->per_address;
794 } else {
795 desc->dest = imxdmac->per_address;
797 desc->desc.callback = NULL;
798 desc->desc.callback_param = NULL;
800 return &desc->desc;
803 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
804 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
805 size_t period_len, enum dma_transfer_direction direction,
806 void *context)
808 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
809 struct imxdma_engine *imxdma = imxdmac->imxdma;
810 struct imxdma_desc *desc;
811 int i;
812 unsigned int periods = buf_len / period_len;
814 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
815 __func__, imxdmac->channel, buf_len, period_len);
817 if (list_empty(&imxdmac->ld_free) ||
818 imxdma_chan_is_doing_cyclic(imxdmac))
819 return NULL;
821 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
823 if (imxdmac->sg_list)
824 kfree(imxdmac->sg_list);
826 imxdmac->sg_list = kcalloc(periods + 1,
827 sizeof(struct scatterlist), GFP_KERNEL);
828 if (!imxdmac->sg_list)
829 return NULL;
831 sg_init_table(imxdmac->sg_list, periods);
833 for (i = 0; i < periods; i++) {
834 imxdmac->sg_list[i].page_link = 0;
835 imxdmac->sg_list[i].offset = 0;
836 imxdmac->sg_list[i].dma_address = dma_addr;
837 sg_dma_len(&imxdmac->sg_list[i]) = period_len;
838 dma_addr += period_len;
841 /* close the loop */
842 imxdmac->sg_list[periods].offset = 0;
843 sg_dma_len(&imxdmac->sg_list[periods]) = 0;
844 imxdmac->sg_list[periods].page_link =
845 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
847 desc->type = IMXDMA_DESC_CYCLIC;
848 desc->sg = imxdmac->sg_list;
849 desc->sgcount = periods;
850 desc->len = IMX_DMA_LENGTH_LOOP;
851 desc->direction = direction;
852 if (direction == DMA_DEV_TO_MEM) {
853 desc->src = imxdmac->per_address;
854 } else {
855 desc->dest = imxdmac->per_address;
857 desc->desc.callback = NULL;
858 desc->desc.callback_param = NULL;
860 return &desc->desc;
863 static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
864 struct dma_chan *chan, dma_addr_t dest,
865 dma_addr_t src, size_t len, unsigned long flags)
867 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
868 struct imxdma_engine *imxdma = imxdmac->imxdma;
869 struct imxdma_desc *desc;
871 dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
872 __func__, imxdmac->channel, src, dest, len);
874 if (list_empty(&imxdmac->ld_free) ||
875 imxdma_chan_is_doing_cyclic(imxdmac))
876 return NULL;
878 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
880 desc->type = IMXDMA_DESC_MEMCPY;
881 desc->src = src;
882 desc->dest = dest;
883 desc->len = len;
884 desc->direction = DMA_MEM_TO_MEM;
885 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
886 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
887 desc->desc.callback = NULL;
888 desc->desc.callback_param = NULL;
890 return &desc->desc;
893 static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
894 struct dma_chan *chan, struct dma_interleaved_template *xt,
895 unsigned long flags)
897 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
898 struct imxdma_engine *imxdma = imxdmac->imxdma;
899 struct imxdma_desc *desc;
901 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
902 " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
903 imxdmac->channel, xt->src_start, xt->dst_start,
904 xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
905 xt->numf, xt->frame_size);
907 if (list_empty(&imxdmac->ld_free) ||
908 imxdma_chan_is_doing_cyclic(imxdmac))
909 return NULL;
911 if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
912 return NULL;
914 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
916 desc->type = IMXDMA_DESC_INTERLEAVED;
917 desc->src = xt->src_start;
918 desc->dest = xt->dst_start;
919 desc->x = xt->sgl[0].size;
920 desc->y = xt->numf;
921 desc->w = xt->sgl[0].icg + desc->x;
922 desc->len = desc->x * desc->y;
923 desc->direction = DMA_MEM_TO_MEM;
924 desc->config_port = IMX_DMA_MEMSIZE_32;
925 desc->config_mem = IMX_DMA_MEMSIZE_32;
926 if (xt->src_sgl)
927 desc->config_mem |= IMX_DMA_TYPE_2D;
928 if (xt->dst_sgl)
929 desc->config_port |= IMX_DMA_TYPE_2D;
930 desc->desc.callback = NULL;
931 desc->desc.callback_param = NULL;
933 return &desc->desc;
936 static void imxdma_issue_pending(struct dma_chan *chan)
938 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
939 struct imxdma_engine *imxdma = imxdmac->imxdma;
940 struct imxdma_desc *desc;
941 unsigned long flags;
943 spin_lock_irqsave(&imxdma->lock, flags);
944 if (list_empty(&imxdmac->ld_active) &&
945 !list_empty(&imxdmac->ld_queue)) {
946 desc = list_first_entry(&imxdmac->ld_queue,
947 struct imxdma_desc, node);
949 if (imxdma_xfer_desc(desc) < 0) {
950 dev_warn(imxdma->dev,
951 "%s: channel: %d couldn't issue DMA xfer\n",
952 __func__, imxdmac->channel);
953 } else {
954 list_move_tail(imxdmac->ld_queue.next,
955 &imxdmac->ld_active);
958 spin_unlock_irqrestore(&imxdma->lock, flags);
961 static int __init imxdma_probe(struct platform_device *pdev)
963 struct imxdma_engine *imxdma;
964 int ret, i;
967 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
968 if (!imxdma)
969 return -ENOMEM;
971 if (cpu_is_mx1()) {
972 imxdma->base = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
973 } else if (cpu_is_mx21()) {
974 imxdma->base = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
975 } else if (cpu_is_mx27()) {
976 imxdma->base = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
977 } else {
978 kfree(imxdma);
979 return 0;
982 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
983 if (IS_ERR(imxdma->dma_ipg)) {
984 ret = PTR_ERR(imxdma->dma_ipg);
985 goto err_clk;
988 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
989 if (IS_ERR(imxdma->dma_ahb)) {
990 ret = PTR_ERR(imxdma->dma_ahb);
991 goto err_clk;
994 clk_prepare_enable(imxdma->dma_ipg);
995 clk_prepare_enable(imxdma->dma_ahb);
997 /* reset DMA module */
998 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1000 if (cpu_is_mx1()) {
1001 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
1002 if (ret) {
1003 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1004 goto err_enable;
1007 ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
1008 if (ret) {
1009 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1010 free_irq(MX1_DMA_INT, NULL);
1011 goto err_enable;
1015 /* enable DMA module */
1016 imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1018 /* clear all interrupts */
1019 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1021 /* disable interrupts */
1022 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1024 INIT_LIST_HEAD(&imxdma->dma_device.channels);
1026 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1027 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1028 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1029 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1031 /* Initialize 2D global parameters */
1032 for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1033 imxdma->slots_2d[i].count = 0;
1035 spin_lock_init(&imxdma->lock);
1037 /* Initialize channel parameters */
1038 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1039 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1041 if (cpu_is_mx21() || cpu_is_mx27()) {
1042 ret = request_irq(MX2x_INT_DMACH0 + i,
1043 dma_irq_handler, 0, "DMA", imxdma);
1044 if (ret) {
1045 dev_warn(imxdma->dev, "Can't register IRQ %d "
1046 "for DMA channel %d\n",
1047 MX2x_INT_DMACH0 + i, i);
1048 goto err_init;
1050 init_timer(&imxdmac->watchdog);
1051 imxdmac->watchdog.function = &imxdma_watchdog;
1052 imxdmac->watchdog.data = (unsigned long)imxdmac;
1055 imxdmac->imxdma = imxdma;
1057 INIT_LIST_HEAD(&imxdmac->ld_queue);
1058 INIT_LIST_HEAD(&imxdmac->ld_free);
1059 INIT_LIST_HEAD(&imxdmac->ld_active);
1061 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
1062 (unsigned long)imxdmac);
1063 imxdmac->chan.device = &imxdma->dma_device;
1064 dma_cookie_init(&imxdmac->chan);
1065 imxdmac->channel = i;
1067 /* Add the channel to the DMAC list */
1068 list_add_tail(&imxdmac->chan.device_node,
1069 &imxdma->dma_device.channels);
1072 imxdma->dev = &pdev->dev;
1073 imxdma->dma_device.dev = &pdev->dev;
1075 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1076 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1077 imxdma->dma_device.device_tx_status = imxdma_tx_status;
1078 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1079 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1080 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1081 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1082 imxdma->dma_device.device_control = imxdma_control;
1083 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1085 platform_set_drvdata(pdev, imxdma);
1087 imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
1088 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
1089 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1091 ret = dma_async_device_register(&imxdma->dma_device);
1092 if (ret) {
1093 dev_err(&pdev->dev, "unable to register\n");
1094 goto err_init;
1097 return 0;
1099 err_init:
1101 if (cpu_is_mx21() || cpu_is_mx27()) {
1102 while (--i >= 0)
1103 free_irq(MX2x_INT_DMACH0 + i, NULL);
1104 } else if cpu_is_mx1() {
1105 free_irq(MX1_DMA_INT, NULL);
1106 free_irq(MX1_DMA_ERR, NULL);
1108 err_enable:
1109 clk_disable_unprepare(imxdma->dma_ipg);
1110 clk_disable_unprepare(imxdma->dma_ahb);
1111 err_clk:
1112 kfree(imxdma);
1113 return ret;
1116 static int __exit imxdma_remove(struct platform_device *pdev)
1118 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1119 int i;
1121 dma_async_device_unregister(&imxdma->dma_device);
1123 if (cpu_is_mx21() || cpu_is_mx27()) {
1124 for (i = 0; i < IMX_DMA_CHANNELS; i++)
1125 free_irq(MX2x_INT_DMACH0 + i, NULL);
1126 } else if cpu_is_mx1() {
1127 free_irq(MX1_DMA_INT, NULL);
1128 free_irq(MX1_DMA_ERR, NULL);
1131 clk_disable_unprepare(imxdma->dma_ipg);
1132 clk_disable_unprepare(imxdma->dma_ahb);
1133 kfree(imxdma);
1135 return 0;
1138 static struct platform_driver imxdma_driver = {
1139 .driver = {
1140 .name = "imx-dma",
1142 .remove = __exit_p(imxdma_remove),
1145 static int __init imxdma_module_init(void)
1147 return platform_driver_probe(&imxdma_driver, imxdma_probe);
1149 subsys_initcall(imxdma_module_init);
1151 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1152 MODULE_DESCRIPTION("i.MX dma driver");
1153 MODULE_LICENSE("GPL");