Revert "genirq: Use irqd_get_trigger_type to compare the trigger type for shared...
[linux/fpc-iii.git] / drivers / dma / mxs-dma.c
blob5ea61201dbf02c9aa69682d93c40263727ef699e
1 /*
2 * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
4 * Refer to drivers/dma/imx-sdma.c
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/mm.h>
14 #include <linux/interrupt.h>
15 #include <linux/clk.h>
16 #include <linux/wait.h>
17 #include <linux/sched.h>
18 #include <linux/semaphore.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/platform_device.h>
23 #include <linux/dmaengine.h>
24 #include <linux/delay.h>
25 #include <linux/module.h>
26 #include <linux/stmp_device.h>
27 #include <linux/of.h>
28 #include <linux/of_device.h>
29 #include <linux/of_dma.h>
30 #include <linux/list.h>
32 #include <asm/irq.h>
34 #include "dmaengine.h"
37 * NOTE: The term "PIO" throughout the mxs-dma implementation means
38 * PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
39 * dma can program the controller registers of peripheral devices.
42 #define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH)
43 #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA)
45 #define HW_APBHX_CTRL0 0x000
46 #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
47 #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
48 #define BP_APBH_CTRL0_RESET_CHANNEL 16
49 #define HW_APBHX_CTRL1 0x010
50 #define HW_APBHX_CTRL2 0x020
51 #define HW_APBHX_CHANNEL_CTRL 0x030
52 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
54 * The offset of NXTCMDAR register is different per both dma type and version,
55 * while stride for each channel is all the same 0x70.
57 #define HW_APBHX_CHn_NXTCMDAR(d, n) \
58 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
59 #define HW_APBHX_CHn_SEMA(d, n) \
60 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
61 #define HW_APBHX_CHn_BAR(d, n) \
62 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70)
63 #define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70)
66 * ccw bits definitions
68 * COMMAND: 0..1 (2)
69 * CHAIN: 2 (1)
70 * IRQ: 3 (1)
71 * NAND_LOCK: 4 (1) - not implemented
72 * NAND_WAIT4READY: 5 (1) - not implemented
73 * DEC_SEM: 6 (1)
74 * WAIT4END: 7 (1)
75 * HALT_ON_TERMINATE: 8 (1)
76 * TERMINATE_FLUSH: 9 (1)
77 * RESERVED: 10..11 (2)
78 * PIO_NUM: 12..15 (4)
80 #define BP_CCW_COMMAND 0
81 #define BM_CCW_COMMAND (3 << 0)
82 #define CCW_CHAIN (1 << 2)
83 #define CCW_IRQ (1 << 3)
84 #define CCW_DEC_SEM (1 << 6)
85 #define CCW_WAIT4END (1 << 7)
86 #define CCW_HALT_ON_TERM (1 << 8)
87 #define CCW_TERM_FLUSH (1 << 9)
88 #define BP_CCW_PIO_NUM 12
89 #define BM_CCW_PIO_NUM (0xf << 12)
91 #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field)
93 #define MXS_DMA_CMD_NO_XFER 0
94 #define MXS_DMA_CMD_WRITE 1
95 #define MXS_DMA_CMD_READ 2
96 #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */
98 struct mxs_dma_ccw {
99 u32 next;
100 u16 bits;
101 u16 xfer_bytes;
102 #define MAX_XFER_BYTES 0xff00
103 u32 bufaddr;
104 #define MXS_PIO_WORDS 16
105 u32 pio_words[MXS_PIO_WORDS];
108 #define CCW_BLOCK_SIZE (4 * PAGE_SIZE)
109 #define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw))
111 struct mxs_dma_chan {
112 struct mxs_dma_engine *mxs_dma;
113 struct dma_chan chan;
114 struct dma_async_tx_descriptor desc;
115 struct tasklet_struct tasklet;
116 unsigned int chan_irq;
117 struct mxs_dma_ccw *ccw;
118 dma_addr_t ccw_phys;
119 int desc_count;
120 enum dma_status status;
121 unsigned int flags;
122 bool reset;
123 #define MXS_DMA_SG_LOOP (1 << 0)
124 #define MXS_DMA_USE_SEMAPHORE (1 << 1)
127 #define MXS_DMA_CHANNELS 16
128 #define MXS_DMA_CHANNELS_MASK 0xffff
130 enum mxs_dma_devtype {
131 MXS_DMA_APBH,
132 MXS_DMA_APBX,
135 enum mxs_dma_id {
136 IMX23_DMA,
137 IMX28_DMA,
140 struct mxs_dma_engine {
141 enum mxs_dma_id dev_id;
142 enum mxs_dma_devtype type;
143 void __iomem *base;
144 struct clk *clk;
145 struct dma_device dma_device;
146 struct device_dma_parameters dma_parms;
147 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
148 struct platform_device *pdev;
149 unsigned int nr_channels;
152 struct mxs_dma_type {
153 enum mxs_dma_id id;
154 enum mxs_dma_devtype type;
157 static struct mxs_dma_type mxs_dma_types[] = {
159 .id = IMX23_DMA,
160 .type = MXS_DMA_APBH,
161 }, {
162 .id = IMX23_DMA,
163 .type = MXS_DMA_APBX,
164 }, {
165 .id = IMX28_DMA,
166 .type = MXS_DMA_APBH,
167 }, {
168 .id = IMX28_DMA,
169 .type = MXS_DMA_APBX,
173 static struct platform_device_id mxs_dma_ids[] = {
175 .name = "imx23-dma-apbh",
176 .driver_data = (kernel_ulong_t) &mxs_dma_types[0],
177 }, {
178 .name = "imx23-dma-apbx",
179 .driver_data = (kernel_ulong_t) &mxs_dma_types[1],
180 }, {
181 .name = "imx28-dma-apbh",
182 .driver_data = (kernel_ulong_t) &mxs_dma_types[2],
183 }, {
184 .name = "imx28-dma-apbx",
185 .driver_data = (kernel_ulong_t) &mxs_dma_types[3],
186 }, {
187 /* end of list */
191 static const struct of_device_id mxs_dma_dt_ids[] = {
192 { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
193 { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
194 { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
195 { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
196 { /* sentinel */ }
198 MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
200 static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
202 return container_of(chan, struct mxs_dma_chan, chan);
205 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
207 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
208 int chan_id = mxs_chan->chan.chan_id;
211 * mxs dma channel resets can cause a channel stall. To recover from a
212 * channel stall, we have to reset the whole DMA engine. To avoid this,
213 * we use cyclic DMA with semaphores, that are enhanced in
214 * mxs_dma_int_handler. To reset the channel, we can simply stop writing
215 * into the semaphore counter.
217 if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
218 mxs_chan->flags & MXS_DMA_SG_LOOP) {
219 mxs_chan->reset = true;
220 } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) {
221 writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
222 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
223 } else {
224 unsigned long elapsed = 0;
225 const unsigned long max_wait = 50000; /* 50ms */
226 void __iomem *reg_dbg1 = mxs_dma->base +
227 HW_APBX_CHn_DEBUG1(mxs_dma, chan_id);
230 * On i.MX28 APBX, the DMA channel can stop working if we reset
231 * the channel while it is in READ_FLUSH (0x08) state.
232 * We wait here until we leave the state. Then we trigger the
233 * reset. Waiting a maximum of 50ms, the kernel shouldn't crash
234 * because of this.
236 while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) {
237 udelay(100);
238 elapsed += 100;
241 if (elapsed >= max_wait)
242 dev_err(&mxs_chan->mxs_dma->pdev->dev,
243 "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n",
244 chan_id);
246 writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
247 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
250 mxs_chan->status = DMA_COMPLETE;
253 static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
255 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
256 int chan_id = mxs_chan->chan.chan_id;
258 /* set cmd_addr up */
259 writel(mxs_chan->ccw_phys,
260 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
262 /* write 1 to SEMA to kick off the channel */
263 if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
264 mxs_chan->flags & MXS_DMA_SG_LOOP) {
265 /* A cyclic DMA consists of at least 2 segments, so initialize
266 * the semaphore with 2 so we have enough time to add 1 to the
267 * semaphore if we need to */
268 writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
269 } else {
270 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
272 mxs_chan->reset = false;
275 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
277 mxs_chan->status = DMA_COMPLETE;
280 static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
282 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
283 int chan_id = mxs_chan->chan.chan_id;
285 /* freeze the channel */
286 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
287 writel(1 << chan_id,
288 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
289 else
290 writel(1 << chan_id,
291 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
293 mxs_chan->status = DMA_PAUSED;
296 static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
298 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
299 int chan_id = mxs_chan->chan.chan_id;
301 /* unfreeze the channel */
302 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
303 writel(1 << chan_id,
304 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
305 else
306 writel(1 << chan_id,
307 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
309 mxs_chan->status = DMA_IN_PROGRESS;
312 static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
314 return dma_cookie_assign(tx);
317 static void mxs_dma_tasklet(unsigned long data)
319 struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
321 if (mxs_chan->desc.callback)
322 mxs_chan->desc.callback(mxs_chan->desc.callback_param);
325 static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
327 int i;
329 for (i = 0; i != mxs_dma->nr_channels; ++i)
330 if (mxs_dma->mxs_chans[i].chan_irq == irq)
331 return i;
333 return -EINVAL;
336 static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
338 struct mxs_dma_engine *mxs_dma = dev_id;
339 struct mxs_dma_chan *mxs_chan;
340 u32 completed;
341 u32 err;
342 int chan = mxs_dma_irq_to_chan(mxs_dma, irq);
344 if (chan < 0)
345 return IRQ_NONE;
347 /* completion status */
348 completed = readl(mxs_dma->base + HW_APBHX_CTRL1);
349 completed = (completed >> chan) & 0x1;
351 /* Clear interrupt */
352 writel((1 << chan),
353 mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
355 /* error status */
356 err = readl(mxs_dma->base + HW_APBHX_CTRL2);
357 err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan);
360 * error status bit is in the upper 16 bits, error irq bit in the lower
361 * 16 bits. We transform it into a simpler error code:
362 * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR
364 err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan);
366 /* Clear error irq */
367 writel((1 << chan),
368 mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
371 * When both completion and error of termination bits set at the
372 * same time, we do not take it as an error. IOW, it only becomes
373 * an error we need to handle here in case of either it's a bus
374 * error or a termination error with no completion. 0x01 is termination
375 * error, so we can subtract err & completed to get the real error case.
377 err -= err & completed;
379 mxs_chan = &mxs_dma->mxs_chans[chan];
381 if (err) {
382 dev_dbg(mxs_dma->dma_device.dev,
383 "%s: error in channel %d\n", __func__,
384 chan);
385 mxs_chan->status = DMA_ERROR;
386 mxs_dma_reset_chan(mxs_chan);
387 } else if (mxs_chan->status != DMA_COMPLETE) {
388 if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
389 mxs_chan->status = DMA_IN_PROGRESS;
390 if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE)
391 writel(1, mxs_dma->base +
392 HW_APBHX_CHn_SEMA(mxs_dma, chan));
393 } else {
394 mxs_chan->status = DMA_COMPLETE;
398 if (mxs_chan->status == DMA_COMPLETE) {
399 if (mxs_chan->reset)
400 return IRQ_HANDLED;
401 dma_cookie_complete(&mxs_chan->desc);
404 /* schedule tasklet on this channel */
405 tasklet_schedule(&mxs_chan->tasklet);
407 return IRQ_HANDLED;
410 static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
412 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
413 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
414 int ret;
416 mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev,
417 CCW_BLOCK_SIZE,
418 &mxs_chan->ccw_phys, GFP_KERNEL);
419 if (!mxs_chan->ccw) {
420 ret = -ENOMEM;
421 goto err_alloc;
424 if (mxs_chan->chan_irq != NO_IRQ) {
425 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
426 0, "mxs-dma", mxs_dma);
427 if (ret)
428 goto err_irq;
431 ret = clk_prepare_enable(mxs_dma->clk);
432 if (ret)
433 goto err_clk;
435 mxs_dma_reset_chan(mxs_chan);
437 dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
438 mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
440 /* the descriptor is ready */
441 async_tx_ack(&mxs_chan->desc);
443 return 0;
445 err_clk:
446 free_irq(mxs_chan->chan_irq, mxs_dma);
447 err_irq:
448 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
449 mxs_chan->ccw, mxs_chan->ccw_phys);
450 err_alloc:
451 return ret;
454 static void mxs_dma_free_chan_resources(struct dma_chan *chan)
456 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
457 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
459 mxs_dma_disable_chan(mxs_chan);
461 free_irq(mxs_chan->chan_irq, mxs_dma);
463 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
464 mxs_chan->ccw, mxs_chan->ccw_phys);
466 clk_disable_unprepare(mxs_dma->clk);
470 * How to use the flags for ->device_prep_slave_sg() :
471 * [1] If there is only one DMA command in the DMA chain, the code should be:
472 * ......
473 * ->device_prep_slave_sg(DMA_CTRL_ACK);
474 * ......
475 * [2] If there are two DMA commands in the DMA chain, the code should be
476 * ......
477 * ->device_prep_slave_sg(0);
478 * ......
479 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
480 * ......
481 * [3] If there are more than two DMA commands in the DMA chain, the code
482 * should be:
483 * ......
484 * ->device_prep_slave_sg(0); // First
485 * ......
486 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
487 * ......
488 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
489 * ......
491 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
492 struct dma_chan *chan, struct scatterlist *sgl,
493 unsigned int sg_len, enum dma_transfer_direction direction,
494 unsigned long flags, void *context)
496 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
497 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
498 struct mxs_dma_ccw *ccw;
499 struct scatterlist *sg;
500 u32 i, j;
501 u32 *pio;
502 bool append = flags & DMA_PREP_INTERRUPT;
503 int idx = append ? mxs_chan->desc_count : 0;
505 if (mxs_chan->status == DMA_IN_PROGRESS && !append)
506 return NULL;
508 if (sg_len + (append ? idx : 0) > NUM_CCW) {
509 dev_err(mxs_dma->dma_device.dev,
510 "maximum number of sg exceeded: %d > %d\n",
511 sg_len, NUM_CCW);
512 goto err_out;
515 mxs_chan->status = DMA_IN_PROGRESS;
516 mxs_chan->flags = 0;
519 * If the sg is prepared with append flag set, the sg
520 * will be appended to the last prepared sg.
522 if (append) {
523 BUG_ON(idx < 1);
524 ccw = &mxs_chan->ccw[idx - 1];
525 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
526 ccw->bits |= CCW_CHAIN;
527 ccw->bits &= ~CCW_IRQ;
528 ccw->bits &= ~CCW_DEC_SEM;
529 } else {
530 idx = 0;
533 if (direction == DMA_TRANS_NONE) {
534 ccw = &mxs_chan->ccw[idx++];
535 pio = (u32 *) sgl;
537 for (j = 0; j < sg_len;)
538 ccw->pio_words[j++] = *pio++;
540 ccw->bits = 0;
541 ccw->bits |= CCW_IRQ;
542 ccw->bits |= CCW_DEC_SEM;
543 if (flags & DMA_CTRL_ACK)
544 ccw->bits |= CCW_WAIT4END;
545 ccw->bits |= CCW_HALT_ON_TERM;
546 ccw->bits |= CCW_TERM_FLUSH;
547 ccw->bits |= BF_CCW(sg_len, PIO_NUM);
548 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
549 } else {
550 for_each_sg(sgl, sg, sg_len, i) {
551 if (sg_dma_len(sg) > MAX_XFER_BYTES) {
552 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
553 sg_dma_len(sg), MAX_XFER_BYTES);
554 goto err_out;
557 ccw = &mxs_chan->ccw[idx++];
559 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
560 ccw->bufaddr = sg->dma_address;
561 ccw->xfer_bytes = sg_dma_len(sg);
563 ccw->bits = 0;
564 ccw->bits |= CCW_CHAIN;
565 ccw->bits |= CCW_HALT_ON_TERM;
566 ccw->bits |= CCW_TERM_FLUSH;
567 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
568 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
569 COMMAND);
571 if (i + 1 == sg_len) {
572 ccw->bits &= ~CCW_CHAIN;
573 ccw->bits |= CCW_IRQ;
574 ccw->bits |= CCW_DEC_SEM;
575 if (flags & DMA_CTRL_ACK)
576 ccw->bits |= CCW_WAIT4END;
580 mxs_chan->desc_count = idx;
582 return &mxs_chan->desc;
584 err_out:
585 mxs_chan->status = DMA_ERROR;
586 return NULL;
589 static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
590 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
591 size_t period_len, enum dma_transfer_direction direction,
592 unsigned long flags)
594 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
595 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
596 u32 num_periods = buf_len / period_len;
597 u32 i = 0, buf = 0;
599 if (mxs_chan->status == DMA_IN_PROGRESS)
600 return NULL;
602 mxs_chan->status = DMA_IN_PROGRESS;
603 mxs_chan->flags |= MXS_DMA_SG_LOOP;
604 mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE;
606 if (num_periods > NUM_CCW) {
607 dev_err(mxs_dma->dma_device.dev,
608 "maximum number of sg exceeded: %d > %d\n",
609 num_periods, NUM_CCW);
610 goto err_out;
613 if (period_len > MAX_XFER_BYTES) {
614 dev_err(mxs_dma->dma_device.dev,
615 "maximum period size exceeded: %d > %d\n",
616 period_len, MAX_XFER_BYTES);
617 goto err_out;
620 while (buf < buf_len) {
621 struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i];
623 if (i + 1 == num_periods)
624 ccw->next = mxs_chan->ccw_phys;
625 else
626 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1);
628 ccw->bufaddr = dma_addr;
629 ccw->xfer_bytes = period_len;
631 ccw->bits = 0;
632 ccw->bits |= CCW_CHAIN;
633 ccw->bits |= CCW_IRQ;
634 ccw->bits |= CCW_HALT_ON_TERM;
635 ccw->bits |= CCW_TERM_FLUSH;
636 ccw->bits |= CCW_DEC_SEM;
637 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
638 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
640 dma_addr += period_len;
641 buf += period_len;
643 i++;
645 mxs_chan->desc_count = i;
647 return &mxs_chan->desc;
649 err_out:
650 mxs_chan->status = DMA_ERROR;
651 return NULL;
654 static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
655 unsigned long arg)
657 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
658 int ret = 0;
660 switch (cmd) {
661 case DMA_TERMINATE_ALL:
662 mxs_dma_reset_chan(mxs_chan);
663 mxs_dma_disable_chan(mxs_chan);
664 break;
665 case DMA_PAUSE:
666 mxs_dma_pause_chan(mxs_chan);
667 break;
668 case DMA_RESUME:
669 mxs_dma_resume_chan(mxs_chan);
670 break;
671 default:
672 ret = -ENOSYS;
675 return ret;
678 static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
679 dma_cookie_t cookie, struct dma_tx_state *txstate)
681 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
682 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
683 u32 residue = 0;
685 if (mxs_chan->status == DMA_IN_PROGRESS &&
686 mxs_chan->flags & MXS_DMA_SG_LOOP) {
687 struct mxs_dma_ccw *last_ccw;
688 u32 bar;
690 last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1];
691 residue = last_ccw->xfer_bytes + last_ccw->bufaddr;
693 bar = readl(mxs_dma->base +
694 HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id));
695 residue -= bar;
698 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
699 residue);
701 return mxs_chan->status;
704 static void mxs_dma_issue_pending(struct dma_chan *chan)
706 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
708 mxs_dma_enable_chan(mxs_chan);
711 static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
713 int ret;
715 ret = clk_prepare_enable(mxs_dma->clk);
716 if (ret)
717 return ret;
719 ret = stmp_reset_block(mxs_dma->base);
720 if (ret)
721 goto err_out;
723 /* enable apbh burst */
724 if (dma_is_apbh(mxs_dma)) {
725 writel(BM_APBH_CTRL0_APB_BURST_EN,
726 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
727 writel(BM_APBH_CTRL0_APB_BURST8_EN,
728 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
731 /* enable irq for all the channels */
732 writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
733 mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
735 err_out:
736 clk_disable_unprepare(mxs_dma->clk);
737 return ret;
740 struct mxs_dma_filter_param {
741 struct device_node *of_node;
742 unsigned int chan_id;
745 static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
747 struct mxs_dma_filter_param *param = fn_param;
748 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
749 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
750 int chan_irq;
752 if (mxs_dma->dma_device.dev->of_node != param->of_node)
753 return false;
755 if (chan->chan_id != param->chan_id)
756 return false;
758 chan_irq = platform_get_irq(mxs_dma->pdev, param->chan_id);
759 if (chan_irq < 0)
760 return false;
762 mxs_chan->chan_irq = chan_irq;
764 return true;
767 static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
768 struct of_dma *ofdma)
770 struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data;
771 dma_cap_mask_t mask = mxs_dma->dma_device.cap_mask;
772 struct mxs_dma_filter_param param;
774 if (dma_spec->args_count != 1)
775 return NULL;
777 param.of_node = ofdma->of_node;
778 param.chan_id = dma_spec->args[0];
780 if (param.chan_id >= mxs_dma->nr_channels)
781 return NULL;
783 return dma_request_channel(mask, mxs_dma_filter_fn, &param);
786 static int __init mxs_dma_probe(struct platform_device *pdev)
788 struct device_node *np = pdev->dev.of_node;
789 const struct platform_device_id *id_entry;
790 const struct of_device_id *of_id;
791 const struct mxs_dma_type *dma_type;
792 struct mxs_dma_engine *mxs_dma;
793 struct resource *iores;
794 int ret, i;
796 mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
797 if (!mxs_dma)
798 return -ENOMEM;
800 ret = of_property_read_u32(np, "dma-channels", &mxs_dma->nr_channels);
801 if (ret) {
802 dev_err(&pdev->dev, "failed to read dma-channels\n");
803 return ret;
806 of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
807 if (of_id)
808 id_entry = of_id->data;
809 else
810 id_entry = platform_get_device_id(pdev);
812 dma_type = (struct mxs_dma_type *)id_entry->driver_data;
813 mxs_dma->type = dma_type->type;
814 mxs_dma->dev_id = dma_type->id;
816 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
817 mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores);
818 if (IS_ERR(mxs_dma->base))
819 return PTR_ERR(mxs_dma->base);
821 mxs_dma->clk = devm_clk_get(&pdev->dev, NULL);
822 if (IS_ERR(mxs_dma->clk))
823 return PTR_ERR(mxs_dma->clk);
825 dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
826 dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
828 INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
830 /* Initialize channel parameters */
831 for (i = 0; i < MXS_DMA_CHANNELS; i++) {
832 struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
834 mxs_chan->mxs_dma = mxs_dma;
835 mxs_chan->chan.device = &mxs_dma->dma_device;
836 dma_cookie_init(&mxs_chan->chan);
838 tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
839 (unsigned long) mxs_chan);
842 /* Add the channel to mxs_chan list */
843 list_add_tail(&mxs_chan->chan.device_node,
844 &mxs_dma->dma_device.channels);
847 ret = mxs_dma_init(mxs_dma);
848 if (ret)
849 return ret;
851 mxs_dma->pdev = pdev;
852 mxs_dma->dma_device.dev = &pdev->dev;
854 /* mxs_dma gets 65535 bytes maximum sg size */
855 mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
856 dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
858 mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
859 mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
860 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
861 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
862 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
863 mxs_dma->dma_device.device_control = mxs_dma_control;
864 mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending;
866 ret = dma_async_device_register(&mxs_dma->dma_device);
867 if (ret) {
868 dev_err(mxs_dma->dma_device.dev, "unable to register\n");
869 return ret;
872 ret = of_dma_controller_register(np, mxs_dma_xlate, mxs_dma);
873 if (ret) {
874 dev_err(mxs_dma->dma_device.dev,
875 "failed to register controller\n");
876 dma_async_device_unregister(&mxs_dma->dma_device);
879 dev_info(mxs_dma->dma_device.dev, "initialized\n");
881 return 0;
884 static struct platform_driver mxs_dma_driver = {
885 .driver = {
886 .name = "mxs-dma",
887 .of_match_table = mxs_dma_dt_ids,
889 .id_table = mxs_dma_ids,
892 static int __init mxs_dma_module_init(void)
894 return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
896 subsys_initcall(mxs_dma_module_init);