vdso: don't require 64-bit math in standalone test
[linux/fpc-iii.git] / drivers / dma / imx-sdma.c
blob88afc48c2ca718e62b567f7d48ac8fe27fae7c58
1 /*
2 * drivers/dma/imx-sdma.c
4 * This file contains a driver for the Freescale Smart DMA engine
6 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Based on code from Freescale:
10 * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
12 * The code contained herein is licensed under the GNU General Public
13 * License. You may obtain a copy of the GNU General Public License
14 * Version 2 or later at the following locations:
16 * http://www.opensource.org/licenses/gpl-license.html
17 * http://www.gnu.org/copyleft/gpl.html
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/bitops.h>
24 #include <linux/mm.h>
25 #include <linux/interrupt.h>
26 #include <linux/clk.h>
27 #include <linux/delay.h>
28 #include <linux/sched.h>
29 #include <linux/semaphore.h>
30 #include <linux/spinlock.h>
31 #include <linux/device.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/firmware.h>
34 #include <linux/slab.h>
35 #include <linux/platform_device.h>
36 #include <linux/dmaengine.h>
37 #include <linux/of.h>
38 #include <linux/of_device.h>
39 #include <linux/of_dma.h>
41 #include <asm/irq.h>
42 #include <linux/platform_data/dma-imx-sdma.h>
43 #include <linux/platform_data/dma-imx.h>
45 #include "dmaengine.h"
47 /* SDMA registers */
48 #define SDMA_H_C0PTR 0x000
49 #define SDMA_H_INTR 0x004
50 #define SDMA_H_STATSTOP 0x008
51 #define SDMA_H_START 0x00c
52 #define SDMA_H_EVTOVR 0x010
53 #define SDMA_H_DSPOVR 0x014
54 #define SDMA_H_HOSTOVR 0x018
55 #define SDMA_H_EVTPEND 0x01c
56 #define SDMA_H_DSPENBL 0x020
57 #define SDMA_H_RESET 0x024
58 #define SDMA_H_EVTERR 0x028
59 #define SDMA_H_INTRMSK 0x02c
60 #define SDMA_H_PSW 0x030
61 #define SDMA_H_EVTERRDBG 0x034
62 #define SDMA_H_CONFIG 0x038
63 #define SDMA_ONCE_ENB 0x040
64 #define SDMA_ONCE_DATA 0x044
65 #define SDMA_ONCE_INSTR 0x048
66 #define SDMA_ONCE_STAT 0x04c
67 #define SDMA_ONCE_CMD 0x050
68 #define SDMA_EVT_MIRROR 0x054
69 #define SDMA_ILLINSTADDR 0x058
70 #define SDMA_CHN0ADDR 0x05c
71 #define SDMA_ONCE_RTB 0x060
72 #define SDMA_XTRIG_CONF1 0x070
73 #define SDMA_XTRIG_CONF2 0x074
74 #define SDMA_CHNENBL0_IMX35 0x200
75 #define SDMA_CHNENBL0_IMX31 0x080
76 #define SDMA_CHNPRI_0 0x100
79 * Buffer descriptor status values.
81 #define BD_DONE 0x01
82 #define BD_WRAP 0x02
83 #define BD_CONT 0x04
84 #define BD_INTR 0x08
85 #define BD_RROR 0x10
86 #define BD_LAST 0x20
87 #define BD_EXTD 0x80
90 * Data Node descriptor status values.
92 #define DND_END_OF_FRAME 0x80
93 #define DND_END_OF_XFER 0x40
94 #define DND_DONE 0x20
95 #define DND_UNUSED 0x01
98 * IPCV2 descriptor status values.
100 #define BD_IPCV2_END_OF_FRAME 0x40
102 #define IPCV2_MAX_NODES 50
104 * Error bit set in the CCB status field by the SDMA,
105 * in setbd routine, in case of a transfer error
107 #define DATA_ERROR 0x10000000
110 * Buffer descriptor commands.
112 #define C0_ADDR 0x01
113 #define C0_LOAD 0x02
114 #define C0_DUMP 0x03
115 #define C0_SETCTX 0x07
116 #define C0_GETCTX 0x03
117 #define C0_SETDM 0x01
118 #define C0_SETPM 0x04
119 #define C0_GETDM 0x02
120 #define C0_GETPM 0x08
122 * Change endianness indicator in the BD command field
124 #define CHANGE_ENDIANNESS 0x80
127 * Mode/Count of data node descriptors - IPCv2
129 struct sdma_mode_count {
130 u32 count : 16; /* size of the buffer pointed by this BD */
131 u32 status : 8; /* E,R,I,C,W,D status bits stored here */
132 u32 command : 8; /* command mostlky used for channel 0 */
136 * Buffer descriptor
138 struct sdma_buffer_descriptor {
139 struct sdma_mode_count mode;
140 u32 buffer_addr; /* address of the buffer described */
141 u32 ext_buffer_addr; /* extended buffer address */
142 } __attribute__ ((packed));
145 * struct sdma_channel_control - Channel control Block
147 * @current_bd_ptr current buffer descriptor processed
148 * @base_bd_ptr first element of buffer descriptor array
149 * @unused padding. The SDMA engine expects an array of 128 byte
150 * control blocks
152 struct sdma_channel_control {
153 u32 current_bd_ptr;
154 u32 base_bd_ptr;
155 u32 unused[2];
156 } __attribute__ ((packed));
159 * struct sdma_state_registers - SDMA context for a channel
161 * @pc: program counter
162 * @t: test bit: status of arithmetic & test instruction
163 * @rpc: return program counter
164 * @sf: source fault while loading data
165 * @spc: loop start program counter
166 * @df: destination fault while storing data
167 * @epc: loop end program counter
168 * @lm: loop mode
170 struct sdma_state_registers {
171 u32 pc :14;
172 u32 unused1: 1;
173 u32 t : 1;
174 u32 rpc :14;
175 u32 unused0: 1;
176 u32 sf : 1;
177 u32 spc :14;
178 u32 unused2: 1;
179 u32 df : 1;
180 u32 epc :14;
181 u32 lm : 2;
182 } __attribute__ ((packed));
185 * struct sdma_context_data - sdma context specific to a channel
187 * @channel_state: channel state bits
188 * @gReg: general registers
189 * @mda: burst dma destination address register
190 * @msa: burst dma source address register
191 * @ms: burst dma status register
192 * @md: burst dma data register
193 * @pda: peripheral dma destination address register
194 * @psa: peripheral dma source address register
195 * @ps: peripheral dma status register
196 * @pd: peripheral dma data register
197 * @ca: CRC polynomial register
198 * @cs: CRC accumulator register
199 * @dda: dedicated core destination address register
200 * @dsa: dedicated core source address register
201 * @ds: dedicated core status register
202 * @dd: dedicated core data register
204 struct sdma_context_data {
205 struct sdma_state_registers channel_state;
206 u32 gReg[8];
207 u32 mda;
208 u32 msa;
209 u32 ms;
210 u32 md;
211 u32 pda;
212 u32 psa;
213 u32 ps;
214 u32 pd;
215 u32 ca;
216 u32 cs;
217 u32 dda;
218 u32 dsa;
219 u32 ds;
220 u32 dd;
221 u32 scratch0;
222 u32 scratch1;
223 u32 scratch2;
224 u32 scratch3;
225 u32 scratch4;
226 u32 scratch5;
227 u32 scratch6;
228 u32 scratch7;
229 } __attribute__ ((packed));
231 #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
233 struct sdma_engine;
236 * struct sdma_channel - housekeeping for a SDMA channel
238 * @sdma pointer to the SDMA engine for this channel
239 * @channel the channel number, matches dmaengine chan_id + 1
240 * @direction transfer type. Needed for setting SDMA script
241 * @peripheral_type Peripheral type. Needed for setting SDMA script
242 * @event_id0 aka dma request line
243 * @event_id1 for channels that use 2 events
244 * @word_size peripheral access size
245 * @buf_tail ID of the buffer that was processed
246 * @num_bd max NUM_BD. number of descriptors currently handling
248 struct sdma_channel {
249 struct sdma_engine *sdma;
250 unsigned int channel;
251 enum dma_transfer_direction direction;
252 enum sdma_peripheral_type peripheral_type;
253 unsigned int event_id0;
254 unsigned int event_id1;
255 enum dma_slave_buswidth word_size;
256 unsigned int buf_tail;
257 unsigned int num_bd;
258 unsigned int period_len;
259 struct sdma_buffer_descriptor *bd;
260 dma_addr_t bd_phys;
261 unsigned int pc_from_device, pc_to_device;
262 unsigned long flags;
263 dma_addr_t per_address;
264 unsigned long event_mask[2];
265 unsigned long watermark_level;
266 u32 shp_addr, per_addr;
267 struct dma_chan chan;
268 spinlock_t lock;
269 struct dma_async_tx_descriptor desc;
270 enum dma_status status;
271 unsigned int chn_count;
272 unsigned int chn_real_count;
273 struct tasklet_struct tasklet;
274 struct imx_dma_data data;
277 #define IMX_DMA_SG_LOOP BIT(0)
279 #define MAX_DMA_CHANNELS 32
280 #define MXC_SDMA_DEFAULT_PRIORITY 1
281 #define MXC_SDMA_MIN_PRIORITY 1
282 #define MXC_SDMA_MAX_PRIORITY 7
284 #define SDMA_FIRMWARE_MAGIC 0x414d4453
287 * struct sdma_firmware_header - Layout of the firmware image
289 * @magic "SDMA"
290 * @version_major increased whenever layout of struct sdma_script_start_addrs
291 * changes.
292 * @version_minor firmware minor version (for binary compatible changes)
293 * @script_addrs_start offset of struct sdma_script_start_addrs in this image
294 * @num_script_addrs Number of script addresses in this image
295 * @ram_code_start offset of SDMA ram image in this firmware image
296 * @ram_code_size size of SDMA ram image
297 * @script_addrs Stores the start address of the SDMA scripts
298 * (in SDMA memory space)
300 struct sdma_firmware_header {
301 u32 magic;
302 u32 version_major;
303 u32 version_minor;
304 u32 script_addrs_start;
305 u32 num_script_addrs;
306 u32 ram_code_start;
307 u32 ram_code_size;
310 struct sdma_driver_data {
311 int chnenbl0;
312 int num_events;
313 struct sdma_script_start_addrs *script_addrs;
316 struct sdma_engine {
317 struct device *dev;
318 struct device_dma_parameters dma_parms;
319 struct sdma_channel channel[MAX_DMA_CHANNELS];
320 struct sdma_channel_control *channel_control;
321 void __iomem *regs;
322 struct sdma_context_data *context;
323 dma_addr_t context_phys;
324 struct dma_device dma_device;
325 struct clk *clk_ipg;
326 struct clk *clk_ahb;
327 spinlock_t channel_0_lock;
328 u32 script_number;
329 struct sdma_script_start_addrs *script_addrs;
330 const struct sdma_driver_data *drvdata;
333 static struct sdma_driver_data sdma_imx31 = {
334 .chnenbl0 = SDMA_CHNENBL0_IMX31,
335 .num_events = 32,
338 static struct sdma_script_start_addrs sdma_script_imx25 = {
339 .ap_2_ap_addr = 729,
340 .uart_2_mcu_addr = 904,
341 .per_2_app_addr = 1255,
342 .mcu_2_app_addr = 834,
343 .uartsh_2_mcu_addr = 1120,
344 .per_2_shp_addr = 1329,
345 .mcu_2_shp_addr = 1048,
346 .ata_2_mcu_addr = 1560,
347 .mcu_2_ata_addr = 1479,
348 .app_2_per_addr = 1189,
349 .app_2_mcu_addr = 770,
350 .shp_2_per_addr = 1407,
351 .shp_2_mcu_addr = 979,
354 static struct sdma_driver_data sdma_imx25 = {
355 .chnenbl0 = SDMA_CHNENBL0_IMX35,
356 .num_events = 48,
357 .script_addrs = &sdma_script_imx25,
360 static struct sdma_driver_data sdma_imx35 = {
361 .chnenbl0 = SDMA_CHNENBL0_IMX35,
362 .num_events = 48,
365 static struct sdma_script_start_addrs sdma_script_imx51 = {
366 .ap_2_ap_addr = 642,
367 .uart_2_mcu_addr = 817,
368 .mcu_2_app_addr = 747,
369 .mcu_2_shp_addr = 961,
370 .ata_2_mcu_addr = 1473,
371 .mcu_2_ata_addr = 1392,
372 .app_2_per_addr = 1033,
373 .app_2_mcu_addr = 683,
374 .shp_2_per_addr = 1251,
375 .shp_2_mcu_addr = 892,
378 static struct sdma_driver_data sdma_imx51 = {
379 .chnenbl0 = SDMA_CHNENBL0_IMX35,
380 .num_events = 48,
381 .script_addrs = &sdma_script_imx51,
384 static struct sdma_script_start_addrs sdma_script_imx53 = {
385 .ap_2_ap_addr = 642,
386 .app_2_mcu_addr = 683,
387 .mcu_2_app_addr = 747,
388 .uart_2_mcu_addr = 817,
389 .shp_2_mcu_addr = 891,
390 .mcu_2_shp_addr = 960,
391 .uartsh_2_mcu_addr = 1032,
392 .spdif_2_mcu_addr = 1100,
393 .mcu_2_spdif_addr = 1134,
394 .firi_2_mcu_addr = 1193,
395 .mcu_2_firi_addr = 1290,
398 static struct sdma_driver_data sdma_imx53 = {
399 .chnenbl0 = SDMA_CHNENBL0_IMX35,
400 .num_events = 48,
401 .script_addrs = &sdma_script_imx53,
404 static struct sdma_script_start_addrs sdma_script_imx6q = {
405 .ap_2_ap_addr = 642,
406 .uart_2_mcu_addr = 817,
407 .mcu_2_app_addr = 747,
408 .per_2_per_addr = 6331,
409 .uartsh_2_mcu_addr = 1032,
410 .mcu_2_shp_addr = 960,
411 .app_2_mcu_addr = 683,
412 .shp_2_mcu_addr = 891,
413 .spdif_2_mcu_addr = 1100,
414 .mcu_2_spdif_addr = 1134,
417 static struct sdma_driver_data sdma_imx6q = {
418 .chnenbl0 = SDMA_CHNENBL0_IMX35,
419 .num_events = 48,
420 .script_addrs = &sdma_script_imx6q,
423 static struct platform_device_id sdma_devtypes[] = {
425 .name = "imx25-sdma",
426 .driver_data = (unsigned long)&sdma_imx25,
427 }, {
428 .name = "imx31-sdma",
429 .driver_data = (unsigned long)&sdma_imx31,
430 }, {
431 .name = "imx35-sdma",
432 .driver_data = (unsigned long)&sdma_imx35,
433 }, {
434 .name = "imx51-sdma",
435 .driver_data = (unsigned long)&sdma_imx51,
436 }, {
437 .name = "imx53-sdma",
438 .driver_data = (unsigned long)&sdma_imx53,
439 }, {
440 .name = "imx6q-sdma",
441 .driver_data = (unsigned long)&sdma_imx6q,
442 }, {
443 /* sentinel */
446 MODULE_DEVICE_TABLE(platform, sdma_devtypes);
448 static const struct of_device_id sdma_dt_ids[] = {
449 { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
450 { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
451 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
452 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
453 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
454 { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
455 { /* sentinel */ }
457 MODULE_DEVICE_TABLE(of, sdma_dt_ids);
459 #define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */
460 #define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */
461 #define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */
462 #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
464 static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
466 u32 chnenbl0 = sdma->drvdata->chnenbl0;
467 return chnenbl0 + event * 4;
470 static int sdma_config_ownership(struct sdma_channel *sdmac,
471 bool event_override, bool mcu_override, bool dsp_override)
473 struct sdma_engine *sdma = sdmac->sdma;
474 int channel = sdmac->channel;
475 unsigned long evt, mcu, dsp;
477 if (event_override && mcu_override && dsp_override)
478 return -EINVAL;
480 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
481 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
482 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
484 if (dsp_override)
485 __clear_bit(channel, &dsp);
486 else
487 __set_bit(channel, &dsp);
489 if (event_override)
490 __clear_bit(channel, &evt);
491 else
492 __set_bit(channel, &evt);
494 if (mcu_override)
495 __clear_bit(channel, &mcu);
496 else
497 __set_bit(channel, &mcu);
499 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
500 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
501 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
503 return 0;
506 static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
508 writel(BIT(channel), sdma->regs + SDMA_H_START);
512 * sdma_run_channel0 - run a channel and wait till it's done
514 static int sdma_run_channel0(struct sdma_engine *sdma)
516 int ret;
517 unsigned long timeout = 500;
519 sdma_enable_channel(sdma, 0);
521 while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
522 if (timeout-- <= 0)
523 break;
524 udelay(1);
527 if (ret) {
528 /* Clear the interrupt status */
529 writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
530 } else {
531 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
534 return ret ? 0 : -ETIMEDOUT;
537 static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
538 u32 address)
540 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
541 void *buf_virt;
542 dma_addr_t buf_phys;
543 int ret;
544 unsigned long flags;
546 buf_virt = dma_alloc_coherent(NULL,
547 size,
548 &buf_phys, GFP_KERNEL);
549 if (!buf_virt) {
550 return -ENOMEM;
553 spin_lock_irqsave(&sdma->channel_0_lock, flags);
555 bd0->mode.command = C0_SETPM;
556 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
557 bd0->mode.count = size / 2;
558 bd0->buffer_addr = buf_phys;
559 bd0->ext_buffer_addr = address;
561 memcpy(buf_virt, buf, size);
563 ret = sdma_run_channel0(sdma);
565 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
567 dma_free_coherent(NULL, size, buf_virt, buf_phys);
569 return ret;
572 static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
574 struct sdma_engine *sdma = sdmac->sdma;
575 int channel = sdmac->channel;
576 unsigned long val;
577 u32 chnenbl = chnenbl_ofs(sdma, event);
579 val = readl_relaxed(sdma->regs + chnenbl);
580 __set_bit(channel, &val);
581 writel_relaxed(val, sdma->regs + chnenbl);
584 static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
586 struct sdma_engine *sdma = sdmac->sdma;
587 int channel = sdmac->channel;
588 u32 chnenbl = chnenbl_ofs(sdma, event);
589 unsigned long val;
591 val = readl_relaxed(sdma->regs + chnenbl);
592 __clear_bit(channel, &val);
593 writel_relaxed(val, sdma->regs + chnenbl);
596 static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
598 if (sdmac->desc.callback)
599 sdmac->desc.callback(sdmac->desc.callback_param);
602 static void sdma_update_channel_loop(struct sdma_channel *sdmac)
604 struct sdma_buffer_descriptor *bd;
607 * loop mode. Iterate over descriptors, re-setup them and
608 * call callback function.
610 while (1) {
611 bd = &sdmac->bd[sdmac->buf_tail];
613 if (bd->mode.status & BD_DONE)
614 break;
616 if (bd->mode.status & BD_RROR)
617 sdmac->status = DMA_ERROR;
619 bd->mode.status |= BD_DONE;
620 sdmac->buf_tail++;
621 sdmac->buf_tail %= sdmac->num_bd;
625 static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
627 struct sdma_buffer_descriptor *bd;
628 int i, error = 0;
630 sdmac->chn_real_count = 0;
632 * non loop mode. Iterate over all descriptors, collect
633 * errors and call callback function
635 for (i = 0; i < sdmac->num_bd; i++) {
636 bd = &sdmac->bd[i];
638 if (bd->mode.status & (BD_DONE | BD_RROR))
639 error = -EIO;
640 sdmac->chn_real_count += bd->mode.count;
643 if (error)
644 sdmac->status = DMA_ERROR;
645 else
646 sdmac->status = DMA_COMPLETE;
648 dma_cookie_complete(&sdmac->desc);
649 if (sdmac->desc.callback)
650 sdmac->desc.callback(sdmac->desc.callback_param);
653 static void sdma_tasklet(unsigned long data)
655 struct sdma_channel *sdmac = (struct sdma_channel *) data;
657 if (sdmac->flags & IMX_DMA_SG_LOOP)
658 sdma_handle_channel_loop(sdmac);
659 else
660 mxc_sdma_handle_channel_normal(sdmac);
663 static irqreturn_t sdma_int_handler(int irq, void *dev_id)
665 struct sdma_engine *sdma = dev_id;
666 unsigned long stat;
668 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
669 /* not interested in channel 0 interrupts */
670 stat &= ~1;
671 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
673 while (stat) {
674 int channel = fls(stat) - 1;
675 struct sdma_channel *sdmac = &sdma->channel[channel];
677 if (sdmac->flags & IMX_DMA_SG_LOOP)
678 sdma_update_channel_loop(sdmac);
680 tasklet_schedule(&sdmac->tasklet);
682 __clear_bit(channel, &stat);
685 return IRQ_HANDLED;
689 * sets the pc of SDMA script according to the peripheral type
691 static void sdma_get_pc(struct sdma_channel *sdmac,
692 enum sdma_peripheral_type peripheral_type)
694 struct sdma_engine *sdma = sdmac->sdma;
695 int per_2_emi = 0, emi_2_per = 0;
697 * These are needed once we start to support transfers between
698 * two peripherals or memory-to-memory transfers
700 int per_2_per = 0, emi_2_emi = 0;
702 sdmac->pc_from_device = 0;
703 sdmac->pc_to_device = 0;
705 switch (peripheral_type) {
706 case IMX_DMATYPE_MEMORY:
707 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
708 break;
709 case IMX_DMATYPE_DSP:
710 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
711 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
712 break;
713 case IMX_DMATYPE_FIRI:
714 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
715 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
716 break;
717 case IMX_DMATYPE_UART:
718 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
719 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
720 break;
721 case IMX_DMATYPE_UART_SP:
722 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
723 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
724 break;
725 case IMX_DMATYPE_ATA:
726 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
727 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
728 break;
729 case IMX_DMATYPE_CSPI:
730 case IMX_DMATYPE_EXT:
731 case IMX_DMATYPE_SSI:
732 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
733 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
734 break;
735 case IMX_DMATYPE_SSI_DUAL:
736 per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
737 emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
738 break;
739 case IMX_DMATYPE_SSI_SP:
740 case IMX_DMATYPE_MMC:
741 case IMX_DMATYPE_SDHC:
742 case IMX_DMATYPE_CSPI_SP:
743 case IMX_DMATYPE_ESAI:
744 case IMX_DMATYPE_MSHC_SP:
745 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
746 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
747 break;
748 case IMX_DMATYPE_ASRC:
749 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
750 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
751 per_2_per = sdma->script_addrs->per_2_per_addr;
752 break;
753 case IMX_DMATYPE_ASRC_SP:
754 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
755 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
756 per_2_per = sdma->script_addrs->per_2_per_addr;
757 break;
758 case IMX_DMATYPE_MSHC:
759 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
760 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
761 break;
762 case IMX_DMATYPE_CCM:
763 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
764 break;
765 case IMX_DMATYPE_SPDIF:
766 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
767 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
768 break;
769 case IMX_DMATYPE_IPU_MEMORY:
770 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
771 break;
772 default:
773 break;
776 sdmac->pc_from_device = per_2_emi;
777 sdmac->pc_to_device = emi_2_per;
780 static int sdma_load_context(struct sdma_channel *sdmac)
782 struct sdma_engine *sdma = sdmac->sdma;
783 int channel = sdmac->channel;
784 int load_address;
785 struct sdma_context_data *context = sdma->context;
786 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
787 int ret;
788 unsigned long flags;
790 if (sdmac->direction == DMA_DEV_TO_MEM) {
791 load_address = sdmac->pc_from_device;
792 } else {
793 load_address = sdmac->pc_to_device;
796 if (load_address < 0)
797 return load_address;
799 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
800 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
801 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
802 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
803 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
804 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
806 spin_lock_irqsave(&sdma->channel_0_lock, flags);
808 memset(context, 0, sizeof(*context));
809 context->channel_state.pc = load_address;
811 /* Send by context the event mask,base address for peripheral
812 * and watermark level
814 context->gReg[0] = sdmac->event_mask[1];
815 context->gReg[1] = sdmac->event_mask[0];
816 context->gReg[2] = sdmac->per_addr;
817 context->gReg[6] = sdmac->shp_addr;
818 context->gReg[7] = sdmac->watermark_level;
820 bd0->mode.command = C0_SETDM;
821 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
822 bd0->mode.count = sizeof(*context) / 4;
823 bd0->buffer_addr = sdma->context_phys;
824 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
825 ret = sdma_run_channel0(sdma);
827 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
829 return ret;
832 static void sdma_disable_channel(struct sdma_channel *sdmac)
834 struct sdma_engine *sdma = sdmac->sdma;
835 int channel = sdmac->channel;
837 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
838 sdmac->status = DMA_ERROR;
841 static int sdma_config_channel(struct sdma_channel *sdmac)
843 int ret;
845 sdma_disable_channel(sdmac);
847 sdmac->event_mask[0] = 0;
848 sdmac->event_mask[1] = 0;
849 sdmac->shp_addr = 0;
850 sdmac->per_addr = 0;
852 if (sdmac->event_id0) {
853 if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
854 return -EINVAL;
855 sdma_event_enable(sdmac, sdmac->event_id0);
858 switch (sdmac->peripheral_type) {
859 case IMX_DMATYPE_DSP:
860 sdma_config_ownership(sdmac, false, true, true);
861 break;
862 case IMX_DMATYPE_MEMORY:
863 sdma_config_ownership(sdmac, false, true, false);
864 break;
865 default:
866 sdma_config_ownership(sdmac, true, true, false);
867 break;
870 sdma_get_pc(sdmac, sdmac->peripheral_type);
872 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
873 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
874 /* Handle multiple event channels differently */
875 if (sdmac->event_id1) {
876 sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
877 if (sdmac->event_id1 > 31)
878 __set_bit(31, &sdmac->watermark_level);
879 sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
880 if (sdmac->event_id0 > 31)
881 __set_bit(30, &sdmac->watermark_level);
882 } else {
883 __set_bit(sdmac->event_id0, sdmac->event_mask);
885 /* Watermark Level */
886 sdmac->watermark_level |= sdmac->watermark_level;
887 /* Address */
888 sdmac->shp_addr = sdmac->per_address;
889 } else {
890 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
893 ret = sdma_load_context(sdmac);
895 return ret;
898 static int sdma_set_channel_priority(struct sdma_channel *sdmac,
899 unsigned int priority)
901 struct sdma_engine *sdma = sdmac->sdma;
902 int channel = sdmac->channel;
904 if (priority < MXC_SDMA_MIN_PRIORITY
905 || priority > MXC_SDMA_MAX_PRIORITY) {
906 return -EINVAL;
909 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
911 return 0;
914 static int sdma_request_channel(struct sdma_channel *sdmac)
916 struct sdma_engine *sdma = sdmac->sdma;
917 int channel = sdmac->channel;
918 int ret = -EBUSY;
920 sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
921 GFP_KERNEL);
922 if (!sdmac->bd) {
923 ret = -ENOMEM;
924 goto out;
927 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
928 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
930 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
931 return 0;
932 out:
934 return ret;
937 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
939 return container_of(chan, struct sdma_channel, chan);
942 static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
944 unsigned long flags;
945 struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
946 dma_cookie_t cookie;
948 spin_lock_irqsave(&sdmac->lock, flags);
950 cookie = dma_cookie_assign(tx);
952 spin_unlock_irqrestore(&sdmac->lock, flags);
954 return cookie;
957 static int sdma_alloc_chan_resources(struct dma_chan *chan)
959 struct sdma_channel *sdmac = to_sdma_chan(chan);
960 struct imx_dma_data *data = chan->private;
961 int prio, ret;
963 if (!data)
964 return -EINVAL;
966 switch (data->priority) {
967 case DMA_PRIO_HIGH:
968 prio = 3;
969 break;
970 case DMA_PRIO_MEDIUM:
971 prio = 2;
972 break;
973 case DMA_PRIO_LOW:
974 default:
975 prio = 1;
976 break;
979 sdmac->peripheral_type = data->peripheral_type;
980 sdmac->event_id0 = data->dma_request;
982 clk_enable(sdmac->sdma->clk_ipg);
983 clk_enable(sdmac->sdma->clk_ahb);
985 ret = sdma_request_channel(sdmac);
986 if (ret)
987 return ret;
989 ret = sdma_set_channel_priority(sdmac, prio);
990 if (ret)
991 return ret;
993 dma_async_tx_descriptor_init(&sdmac->desc, chan);
994 sdmac->desc.tx_submit = sdma_tx_submit;
995 /* txd.flags will be overwritten in prep funcs */
996 sdmac->desc.flags = DMA_CTRL_ACK;
998 return 0;
1001 static void sdma_free_chan_resources(struct dma_chan *chan)
1003 struct sdma_channel *sdmac = to_sdma_chan(chan);
1004 struct sdma_engine *sdma = sdmac->sdma;
1006 sdma_disable_channel(sdmac);
1008 if (sdmac->event_id0)
1009 sdma_event_disable(sdmac, sdmac->event_id0);
1010 if (sdmac->event_id1)
1011 sdma_event_disable(sdmac, sdmac->event_id1);
1013 sdmac->event_id0 = 0;
1014 sdmac->event_id1 = 0;
1016 sdma_set_channel_priority(sdmac, 0);
1018 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
1020 clk_disable(sdma->clk_ipg);
1021 clk_disable(sdma->clk_ahb);
1024 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1025 struct dma_chan *chan, struct scatterlist *sgl,
1026 unsigned int sg_len, enum dma_transfer_direction direction,
1027 unsigned long flags, void *context)
1029 struct sdma_channel *sdmac = to_sdma_chan(chan);
1030 struct sdma_engine *sdma = sdmac->sdma;
1031 int ret, i, count;
1032 int channel = sdmac->channel;
1033 struct scatterlist *sg;
1035 if (sdmac->status == DMA_IN_PROGRESS)
1036 return NULL;
1037 sdmac->status = DMA_IN_PROGRESS;
1039 sdmac->flags = 0;
1041 sdmac->buf_tail = 0;
1043 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1044 sg_len, channel);
1046 sdmac->direction = direction;
1047 ret = sdma_load_context(sdmac);
1048 if (ret)
1049 goto err_out;
1051 if (sg_len > NUM_BD) {
1052 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1053 channel, sg_len, NUM_BD);
1054 ret = -EINVAL;
1055 goto err_out;
1058 sdmac->chn_count = 0;
1059 for_each_sg(sgl, sg, sg_len, i) {
1060 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1061 int param;
1063 bd->buffer_addr = sg->dma_address;
1065 count = sg_dma_len(sg);
1067 if (count > 0xffff) {
1068 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1069 channel, count, 0xffff);
1070 ret = -EINVAL;
1071 goto err_out;
1074 bd->mode.count = count;
1075 sdmac->chn_count += count;
1077 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
1078 ret = -EINVAL;
1079 goto err_out;
1082 switch (sdmac->word_size) {
1083 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1084 bd->mode.command = 0;
1085 if (count & 3 || sg->dma_address & 3)
1086 return NULL;
1087 break;
1088 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1089 bd->mode.command = 2;
1090 if (count & 1 || sg->dma_address & 1)
1091 return NULL;
1092 break;
1093 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1094 bd->mode.command = 1;
1095 break;
1096 default:
1097 return NULL;
1100 param = BD_DONE | BD_EXTD | BD_CONT;
1102 if (i + 1 == sg_len) {
1103 param |= BD_INTR;
1104 param |= BD_LAST;
1105 param &= ~BD_CONT;
1108 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1109 i, count, (u64)sg->dma_address,
1110 param & BD_WRAP ? "wrap" : "",
1111 param & BD_INTR ? " intr" : "");
1113 bd->mode.status = param;
1116 sdmac->num_bd = sg_len;
1117 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1119 return &sdmac->desc;
1120 err_out:
1121 sdmac->status = DMA_ERROR;
1122 return NULL;
1125 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1126 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1127 size_t period_len, enum dma_transfer_direction direction,
1128 unsigned long flags)
1130 struct sdma_channel *sdmac = to_sdma_chan(chan);
1131 struct sdma_engine *sdma = sdmac->sdma;
1132 int num_periods = buf_len / period_len;
1133 int channel = sdmac->channel;
1134 int ret, i = 0, buf = 0;
1136 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1138 if (sdmac->status == DMA_IN_PROGRESS)
1139 return NULL;
1141 sdmac->status = DMA_IN_PROGRESS;
1143 sdmac->buf_tail = 0;
1144 sdmac->period_len = period_len;
1146 sdmac->flags |= IMX_DMA_SG_LOOP;
1147 sdmac->direction = direction;
1148 ret = sdma_load_context(sdmac);
1149 if (ret)
1150 goto err_out;
1152 if (num_periods > NUM_BD) {
1153 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1154 channel, num_periods, NUM_BD);
1155 goto err_out;
1158 if (period_len > 0xffff) {
1159 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1160 channel, period_len, 0xffff);
1161 goto err_out;
1164 while (buf < buf_len) {
1165 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1166 int param;
1168 bd->buffer_addr = dma_addr;
1170 bd->mode.count = period_len;
1172 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1173 goto err_out;
1174 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1175 bd->mode.command = 0;
1176 else
1177 bd->mode.command = sdmac->word_size;
1179 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1180 if (i + 1 == num_periods)
1181 param |= BD_WRAP;
1183 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1184 i, period_len, (u64)dma_addr,
1185 param & BD_WRAP ? "wrap" : "",
1186 param & BD_INTR ? " intr" : "");
1188 bd->mode.status = param;
1190 dma_addr += period_len;
1191 buf += period_len;
1193 i++;
1196 sdmac->num_bd = num_periods;
1197 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1199 return &sdmac->desc;
1200 err_out:
1201 sdmac->status = DMA_ERROR;
1202 return NULL;
1205 static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1206 unsigned long arg)
1208 struct sdma_channel *sdmac = to_sdma_chan(chan);
1209 struct dma_slave_config *dmaengine_cfg = (void *)arg;
1211 switch (cmd) {
1212 case DMA_TERMINATE_ALL:
1213 sdma_disable_channel(sdmac);
1214 return 0;
1215 case DMA_SLAVE_CONFIG:
1216 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1217 sdmac->per_address = dmaengine_cfg->src_addr;
1218 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1219 dmaengine_cfg->src_addr_width;
1220 sdmac->word_size = dmaengine_cfg->src_addr_width;
1221 } else {
1222 sdmac->per_address = dmaengine_cfg->dst_addr;
1223 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1224 dmaengine_cfg->dst_addr_width;
1225 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1227 sdmac->direction = dmaengine_cfg->direction;
1228 return sdma_config_channel(sdmac);
1229 default:
1230 return -ENOSYS;
1233 return -EINVAL;
1236 static enum dma_status sdma_tx_status(struct dma_chan *chan,
1237 dma_cookie_t cookie,
1238 struct dma_tx_state *txstate)
1240 struct sdma_channel *sdmac = to_sdma_chan(chan);
1241 u32 residue;
1243 if (sdmac->flags & IMX_DMA_SG_LOOP)
1244 residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
1245 else
1246 residue = sdmac->chn_count - sdmac->chn_real_count;
1248 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1249 residue);
1251 return sdmac->status;
1254 static void sdma_issue_pending(struct dma_chan *chan)
1256 struct sdma_channel *sdmac = to_sdma_chan(chan);
1257 struct sdma_engine *sdma = sdmac->sdma;
1259 if (sdmac->status == DMA_IN_PROGRESS)
1260 sdma_enable_channel(sdma, sdmac->channel);
1263 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
1264 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
1266 static void sdma_add_scripts(struct sdma_engine *sdma,
1267 const struct sdma_script_start_addrs *addr)
1269 s32 *addr_arr = (u32 *)addr;
1270 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1271 int i;
1273 /* use the default firmware in ROM if missing external firmware */
1274 if (!sdma->script_number)
1275 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1277 for (i = 0; i < sdma->script_number; i++)
1278 if (addr_arr[i] > 0)
1279 saddr_arr[i] = addr_arr[i];
1282 static void sdma_load_firmware(const struct firmware *fw, void *context)
1284 struct sdma_engine *sdma = context;
1285 const struct sdma_firmware_header *header;
1286 const struct sdma_script_start_addrs *addr;
1287 unsigned short *ram_code;
1289 if (!fw) {
1290 dev_err(sdma->dev, "firmware not found\n");
1291 return;
1294 if (fw->size < sizeof(*header))
1295 goto err_firmware;
1297 header = (struct sdma_firmware_header *)fw->data;
1299 if (header->magic != SDMA_FIRMWARE_MAGIC)
1300 goto err_firmware;
1301 if (header->ram_code_start + header->ram_code_size > fw->size)
1302 goto err_firmware;
1303 switch (header->version_major) {
1304 case 1:
1305 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1306 break;
1307 case 2:
1308 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1309 break;
1310 default:
1311 dev_err(sdma->dev, "unknown firmware version\n");
1312 goto err_firmware;
1315 addr = (void *)header + header->script_addrs_start;
1316 ram_code = (void *)header + header->ram_code_start;
1318 clk_enable(sdma->clk_ipg);
1319 clk_enable(sdma->clk_ahb);
1320 /* download the RAM image for SDMA */
1321 sdma_load_script(sdma, ram_code,
1322 header->ram_code_size,
1323 addr->ram_code_start_addr);
1324 clk_disable(sdma->clk_ipg);
1325 clk_disable(sdma->clk_ahb);
1327 sdma_add_scripts(sdma, addr);
1329 dev_info(sdma->dev, "loaded firmware %d.%d\n",
1330 header->version_major,
1331 header->version_minor);
1333 err_firmware:
1334 release_firmware(fw);
1337 static int sdma_get_firmware(struct sdma_engine *sdma,
1338 const char *fw_name)
1340 int ret;
1342 ret = request_firmware_nowait(THIS_MODULE,
1343 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1344 GFP_KERNEL, sdma, sdma_load_firmware);
1346 return ret;
1349 static int __init sdma_init(struct sdma_engine *sdma)
1351 int i, ret;
1352 dma_addr_t ccb_phys;
1354 clk_enable(sdma->clk_ipg);
1355 clk_enable(sdma->clk_ahb);
1357 /* Be sure SDMA has not started yet */
1358 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1360 sdma->channel_control = dma_alloc_coherent(NULL,
1361 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1362 sizeof(struct sdma_context_data),
1363 &ccb_phys, GFP_KERNEL);
1365 if (!sdma->channel_control) {
1366 ret = -ENOMEM;
1367 goto err_dma_alloc;
1370 sdma->context = (void *)sdma->channel_control +
1371 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1372 sdma->context_phys = ccb_phys +
1373 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1375 /* Zero-out the CCB structures array just allocated */
1376 memset(sdma->channel_control, 0,
1377 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1379 /* disable all channels */
1380 for (i = 0; i < sdma->drvdata->num_events; i++)
1381 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1383 /* All channels have priority 0 */
1384 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1385 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1387 ret = sdma_request_channel(&sdma->channel[0]);
1388 if (ret)
1389 goto err_dma_alloc;
1391 sdma_config_ownership(&sdma->channel[0], false, true, false);
1393 /* Set Command Channel (Channel Zero) */
1394 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1396 /* Set bits of CONFIG register but with static context switching */
1397 /* FIXME: Check whether to set ACR bit depending on clock ratios */
1398 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1400 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1402 /* Set bits of CONFIG register with given context switching mode */
1403 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1405 /* Initializes channel's priorities */
1406 sdma_set_channel_priority(&sdma->channel[0], 7);
1408 clk_disable(sdma->clk_ipg);
1409 clk_disable(sdma->clk_ahb);
1411 return 0;
1413 err_dma_alloc:
1414 clk_disable(sdma->clk_ipg);
1415 clk_disable(sdma->clk_ahb);
1416 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1417 return ret;
1420 static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1422 struct sdma_channel *sdmac = to_sdma_chan(chan);
1423 struct imx_dma_data *data = fn_param;
1425 if (!imx_dma_is_general_purpose(chan))
1426 return false;
1428 sdmac->data = *data;
1429 chan->private = &sdmac->data;
1431 return true;
1434 static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1435 struct of_dma *ofdma)
1437 struct sdma_engine *sdma = ofdma->of_dma_data;
1438 dma_cap_mask_t mask = sdma->dma_device.cap_mask;
1439 struct imx_dma_data data;
1441 if (dma_spec->args_count != 3)
1442 return NULL;
1444 data.dma_request = dma_spec->args[0];
1445 data.peripheral_type = dma_spec->args[1];
1446 data.priority = dma_spec->args[2];
1448 return dma_request_channel(mask, sdma_filter_fn, &data);
1451 static int sdma_probe(struct platform_device *pdev)
1453 const struct of_device_id *of_id =
1454 of_match_device(sdma_dt_ids, &pdev->dev);
1455 struct device_node *np = pdev->dev.of_node;
1456 const char *fw_name;
1457 int ret;
1458 int irq;
1459 struct resource *iores;
1460 struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1461 int i;
1462 struct sdma_engine *sdma;
1463 s32 *saddr_arr;
1464 const struct sdma_driver_data *drvdata = NULL;
1466 if (of_id)
1467 drvdata = of_id->data;
1468 else if (pdev->id_entry)
1469 drvdata = (void *)pdev->id_entry->driver_data;
1471 if (!drvdata) {
1472 dev_err(&pdev->dev, "unable to find driver data\n");
1473 return -EINVAL;
1476 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1477 if (ret)
1478 return ret;
1480 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
1481 if (!sdma)
1482 return -ENOMEM;
1484 spin_lock_init(&sdma->channel_0_lock);
1486 sdma->dev = &pdev->dev;
1487 sdma->drvdata = drvdata;
1489 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1490 irq = platform_get_irq(pdev, 0);
1491 if (!iores || irq < 0) {
1492 ret = -EINVAL;
1493 goto err_irq;
1496 if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
1497 ret = -EBUSY;
1498 goto err_request_region;
1501 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1502 if (IS_ERR(sdma->clk_ipg)) {
1503 ret = PTR_ERR(sdma->clk_ipg);
1504 goto err_clk;
1507 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1508 if (IS_ERR(sdma->clk_ahb)) {
1509 ret = PTR_ERR(sdma->clk_ahb);
1510 goto err_clk;
1513 clk_prepare(sdma->clk_ipg);
1514 clk_prepare(sdma->clk_ahb);
1516 sdma->regs = ioremap(iores->start, resource_size(iores));
1517 if (!sdma->regs) {
1518 ret = -ENOMEM;
1519 goto err_ioremap;
1522 ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
1523 if (ret)
1524 goto err_request_irq;
1526 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1527 if (!sdma->script_addrs) {
1528 ret = -ENOMEM;
1529 goto err_alloc;
1532 /* initially no scripts available */
1533 saddr_arr = (s32 *)sdma->script_addrs;
1534 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1535 saddr_arr[i] = -EINVAL;
1537 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1538 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1540 INIT_LIST_HEAD(&sdma->dma_device.channels);
1541 /* Initialize channel parameters */
1542 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1543 struct sdma_channel *sdmac = &sdma->channel[i];
1545 sdmac->sdma = sdma;
1546 spin_lock_init(&sdmac->lock);
1548 sdmac->chan.device = &sdma->dma_device;
1549 dma_cookie_init(&sdmac->chan);
1550 sdmac->channel = i;
1552 tasklet_init(&sdmac->tasklet, sdma_tasklet,
1553 (unsigned long) sdmac);
1555 * Add the channel to the DMAC list. Do not add channel 0 though
1556 * because we need it internally in the SDMA driver. This also means
1557 * that channel 0 in dmaengine counting matches sdma channel 1.
1559 if (i)
1560 list_add_tail(&sdmac->chan.device_node,
1561 &sdma->dma_device.channels);
1564 ret = sdma_init(sdma);
1565 if (ret)
1566 goto err_init;
1568 if (sdma->drvdata->script_addrs)
1569 sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
1570 if (pdata && pdata->script_addrs)
1571 sdma_add_scripts(sdma, pdata->script_addrs);
1573 if (pdata) {
1574 ret = sdma_get_firmware(sdma, pdata->fw_name);
1575 if (ret)
1576 dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
1577 } else {
1579 * Because that device tree does not encode ROM script address,
1580 * the RAM script in firmware is mandatory for device tree
1581 * probe, otherwise it fails.
1583 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1584 &fw_name);
1585 if (ret)
1586 dev_warn(&pdev->dev, "failed to get firmware name\n");
1587 else {
1588 ret = sdma_get_firmware(sdma, fw_name);
1589 if (ret)
1590 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
1594 sdma->dma_device.dev = &pdev->dev;
1596 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1597 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1598 sdma->dma_device.device_tx_status = sdma_tx_status;
1599 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1600 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1601 sdma->dma_device.device_control = sdma_control;
1602 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1603 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1604 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1606 platform_set_drvdata(pdev, sdma);
1608 ret = dma_async_device_register(&sdma->dma_device);
1609 if (ret) {
1610 dev_err(&pdev->dev, "unable to register\n");
1611 goto err_init;
1614 if (np) {
1615 ret = of_dma_controller_register(np, sdma_xlate, sdma);
1616 if (ret) {
1617 dev_err(&pdev->dev, "failed to register controller\n");
1618 goto err_register;
1622 dev_info(sdma->dev, "initialized\n");
1624 return 0;
1626 err_register:
1627 dma_async_device_unregister(&sdma->dma_device);
1628 err_init:
1629 kfree(sdma->script_addrs);
1630 err_alloc:
1631 free_irq(irq, sdma);
1632 err_request_irq:
1633 iounmap(sdma->regs);
1634 err_ioremap:
1635 err_clk:
1636 release_mem_region(iores->start, resource_size(iores));
1637 err_request_region:
1638 err_irq:
1639 kfree(sdma);
1640 return ret;
1643 static int sdma_remove(struct platform_device *pdev)
1645 struct sdma_engine *sdma = platform_get_drvdata(pdev);
1646 struct resource *iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1647 int irq = platform_get_irq(pdev, 0);
1648 int i;
1650 dma_async_device_unregister(&sdma->dma_device);
1651 kfree(sdma->script_addrs);
1652 free_irq(irq, sdma);
1653 iounmap(sdma->regs);
1654 release_mem_region(iores->start, resource_size(iores));
1655 /* Kill the tasklet */
1656 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1657 struct sdma_channel *sdmac = &sdma->channel[i];
1659 tasklet_kill(&sdmac->tasklet);
1661 kfree(sdma);
1663 platform_set_drvdata(pdev, NULL);
1664 dev_info(&pdev->dev, "Removed...\n");
1665 return 0;
1668 static struct platform_driver sdma_driver = {
1669 .driver = {
1670 .name = "imx-sdma",
1671 .of_match_table = sdma_dt_ids,
1673 .id_table = sdma_devtypes,
1674 .remove = sdma_remove,
1675 .probe = sdma_probe,
1678 module_platform_driver(sdma_driver);
1680 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1681 MODULE_DESCRIPTION("i.MX SDMA driver");
1682 MODULE_LICENSE("GPL");