davinci: update DM644x support in preparation for more SoCs
[linux-ginger.git] / arch / arm / mach-davinci / dma.c
blob15e9eb158bb7c7f8d5b679fd6e797722757aced9
1 /*
2 * EDMA3 support for DaVinci
4 * Copyright (C) 2006-2009 Texas Instruments.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/platform_device.h>
26 #include <linux/spinlock.h>
27 #include <linux/compiler.h>
28 #include <linux/io.h>
30 #include <mach/cputype.h>
31 #include <mach/memory.h>
32 #include <mach/hardware.h>
33 #include <mach/irqs.h>
34 #include <mach/edma.h>
35 #include <mach/mux.h>
38 /* Offsets matching "struct edmacc_param" */
39 #define PARM_OPT 0x00
40 #define PARM_SRC 0x04
41 #define PARM_A_B_CNT 0x08
42 #define PARM_DST 0x0c
43 #define PARM_SRC_DST_BIDX 0x10
44 #define PARM_LINK_BCNTRLD 0x14
45 #define PARM_SRC_DST_CIDX 0x18
46 #define PARM_CCNT 0x1c
48 #define PARM_SIZE 0x20
50 /* Offsets for EDMA CC global channel registers and their shadows */
51 #define SH_ER 0x00 /* 64 bits */
52 #define SH_ECR 0x08 /* 64 bits */
53 #define SH_ESR 0x10 /* 64 bits */
54 #define SH_CER 0x18 /* 64 bits */
55 #define SH_EER 0x20 /* 64 bits */
56 #define SH_EECR 0x28 /* 64 bits */
57 #define SH_EESR 0x30 /* 64 bits */
58 #define SH_SER 0x38 /* 64 bits */
59 #define SH_SECR 0x40 /* 64 bits */
60 #define SH_IER 0x50 /* 64 bits */
61 #define SH_IECR 0x58 /* 64 bits */
62 #define SH_IESR 0x60 /* 64 bits */
63 #define SH_IPR 0x68 /* 64 bits */
64 #define SH_ICR 0x70 /* 64 bits */
65 #define SH_IEVAL 0x78
66 #define SH_QER 0x80
67 #define SH_QEER 0x84
68 #define SH_QEECR 0x88
69 #define SH_QEESR 0x8c
70 #define SH_QSER 0x90
71 #define SH_QSECR 0x94
72 #define SH_SIZE 0x200
74 /* Offsets for EDMA CC global registers */
75 #define EDMA_REV 0x0000
76 #define EDMA_CCCFG 0x0004
77 #define EDMA_QCHMAP 0x0200 /* 8 registers */
78 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
79 #define EDMA_QDMAQNUM 0x0260
80 #define EDMA_QUETCMAP 0x0280
81 #define EDMA_QUEPRI 0x0284
82 #define EDMA_EMR 0x0300 /* 64 bits */
83 #define EDMA_EMCR 0x0308 /* 64 bits */
84 #define EDMA_QEMR 0x0310
85 #define EDMA_QEMCR 0x0314
86 #define EDMA_CCERR 0x0318
87 #define EDMA_CCERRCLR 0x031c
88 #define EDMA_EEVAL 0x0320
89 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
90 #define EDMA_QRAE 0x0380 /* 4 registers */
91 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
92 #define EDMA_QSTAT 0x0600 /* 2 registers */
93 #define EDMA_QWMTHRA 0x0620
94 #define EDMA_QWMTHRB 0x0624
95 #define EDMA_CCSTAT 0x0640
97 #define EDMA_M 0x1000 /* global channel registers */
98 #define EDMA_ECR 0x1008
99 #define EDMA_ECRH 0x100C
100 #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */
101 #define EDMA_PARM 0x4000 /* 128 param entries */
103 #define DAVINCI_DMA_3PCC_BASE 0x01C00000
105 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
107 #define EDMA_MAX_DMACH 64
108 #define EDMA_MAX_PARAMENTRY 512
109 #define EDMA_MAX_EVQUE 2 /* FIXME too small */
112 /*****************************************************************************/
114 static void __iomem *edmacc_regs_base;
116 static inline unsigned int edma_read(int offset)
118 return (unsigned int)__raw_readl(edmacc_regs_base + offset);
121 static inline void edma_write(int offset, int val)
123 __raw_writel(val, edmacc_regs_base + offset);
125 static inline void edma_modify(int offset, unsigned and, unsigned or)
127 unsigned val = edma_read(offset);
128 val &= and;
129 val |= or;
130 edma_write(offset, val);
132 static inline void edma_and(int offset, unsigned and)
134 unsigned val = edma_read(offset);
135 val &= and;
136 edma_write(offset, val);
138 static inline void edma_or(int offset, unsigned or)
140 unsigned val = edma_read(offset);
141 val |= or;
142 edma_write(offset, val);
144 static inline unsigned int edma_read_array(int offset, int i)
146 return edma_read(offset + (i << 2));
148 static inline void edma_write_array(int offset, int i, unsigned val)
150 edma_write(offset + (i << 2), val);
152 static inline void edma_modify_array(int offset, int i,
153 unsigned and, unsigned or)
155 edma_modify(offset + (i << 2), and, or);
157 static inline void edma_or_array(int offset, int i, unsigned or)
159 edma_or(offset + (i << 2), or);
161 static inline void edma_or_array2(int offset, int i, int j, unsigned or)
163 edma_or(offset + ((i*2 + j) << 2), or);
165 static inline void edma_write_array2(int offset, int i, int j, unsigned val)
167 edma_write(offset + ((i*2 + j) << 2), val);
169 static inline unsigned int edma_shadow0_read(int offset)
171 return edma_read(EDMA_SHADOW0 + offset);
173 static inline unsigned int edma_shadow0_read_array(int offset, int i)
175 return edma_read(EDMA_SHADOW0 + offset + (i << 2));
177 static inline void edma_shadow0_write(int offset, unsigned val)
179 edma_write(EDMA_SHADOW0 + offset, val);
181 static inline void edma_shadow0_write_array(int offset, int i, unsigned val)
183 edma_write(EDMA_SHADOW0 + offset + (i << 2), val);
185 static inline unsigned int edma_parm_read(int offset, int param_no)
187 return edma_read(EDMA_PARM + offset + (param_no << 5));
189 static inline void edma_parm_write(int offset, int param_no, unsigned val)
191 edma_write(EDMA_PARM + offset + (param_no << 5), val);
193 static inline void edma_parm_modify(int offset, int param_no,
194 unsigned and, unsigned or)
196 edma_modify(EDMA_PARM + offset + (param_no << 5), and, or);
198 static inline void edma_parm_and(int offset, int param_no, unsigned and)
200 edma_and(EDMA_PARM + offset + (param_no << 5), and);
202 static inline void edma_parm_or(int offset, int param_no, unsigned or)
204 edma_or(EDMA_PARM + offset + (param_no << 5), or);
207 /*****************************************************************************/
209 /* actual number of DMA channels and slots on this silicon */
210 static unsigned num_channels;
211 static unsigned num_slots;
213 static struct dma_interrupt_data {
214 void (*callback)(unsigned channel, unsigned short ch_status,
215 void *data);
216 void *data;
217 } intr_data[EDMA_MAX_DMACH];
219 /* The edma_inuse bit for each PaRAM slot is clear unless the
220 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
222 static DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
224 /* The edma_noevent bit for each channel is clear unless
225 * it doesn't trigger DMA events on this platform. It uses a
226 * bit of SOC-specific initialization code.
228 static DECLARE_BITMAP(edma_noevent, EDMA_MAX_DMACH);
230 /* dummy param set used to (re)initialize parameter RAM slots */
231 static const struct edmacc_param dummy_paramset = {
232 .link_bcntrld = 0xffff,
233 .ccnt = 1,
236 static const int __initconst
237 queue_tc_mapping[EDMA_MAX_EVQUE + 1][2] = {
238 /* {event queue no, TC no} */
239 {0, 0},
240 {1, 1},
241 {-1, -1}
244 static const int __initconst
245 queue_priority_mapping[EDMA_MAX_EVQUE + 1][2] = {
246 /* {event queue no, Priority} */
247 {0, 3},
248 {1, 7},
249 {-1, -1}
252 /*****************************************************************************/
254 static void map_dmach_queue(unsigned ch_no, enum dma_event_q queue_no)
256 int bit = (ch_no & 0x7) * 4;
258 /* default to low priority queue */
259 if (queue_no == EVENTQ_DEFAULT)
260 queue_no = EVENTQ_1;
262 queue_no &= 7;
263 edma_modify_array(EDMA_DMAQNUM, (ch_no >> 3),
264 ~(0x7 << bit), queue_no << bit);
267 static void __init map_queue_tc(int queue_no, int tc_no)
269 int bit = queue_no * 4;
270 edma_modify(EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
273 static void __init assign_priority_to_queue(int queue_no, int priority)
275 int bit = queue_no * 4;
276 edma_modify(EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
279 static inline void
280 setup_dma_interrupt(unsigned lch,
281 void (*callback)(unsigned channel, u16 ch_status, void *data),
282 void *data)
284 if (!callback) {
285 edma_shadow0_write_array(SH_IECR, lch >> 5,
286 (1 << (lch & 0x1f)));
289 intr_data[lch].callback = callback;
290 intr_data[lch].data = data;
292 if (callback) {
293 edma_shadow0_write_array(SH_ICR, lch >> 5,
294 (1 << (lch & 0x1f)));
295 edma_shadow0_write_array(SH_IESR, lch >> 5,
296 (1 << (lch & 0x1f)));
300 /******************************************************************************
302 * DMA interrupt handler
304 *****************************************************************************/
305 static irqreturn_t dma_irq_handler(int irq, void *data)
307 int i;
308 unsigned int cnt = 0;
310 dev_dbg(data, "dma_irq_handler\n");
312 if ((edma_shadow0_read_array(SH_IPR, 0) == 0)
313 && (edma_shadow0_read_array(SH_IPR, 1) == 0))
314 return IRQ_NONE;
316 while (1) {
317 int j;
318 if (edma_shadow0_read_array(SH_IPR, 0))
319 j = 0;
320 else if (edma_shadow0_read_array(SH_IPR, 1))
321 j = 1;
322 else
323 break;
324 dev_dbg(data, "IPR%d %08x\n", j,
325 edma_shadow0_read_array(SH_IPR, j));
326 for (i = 0; i < 32; i++) {
327 int k = (j << 5) + i;
328 if (edma_shadow0_read_array(SH_IPR, j) & (1 << i)) {
329 /* Clear the corresponding IPR bits */
330 edma_shadow0_write_array(SH_ICR, j, (1 << i));
331 if (intr_data[k].callback) {
332 intr_data[k].callback(k, DMA_COMPLETE,
333 intr_data[k].data);
337 cnt++;
338 if (cnt > 10)
339 break;
341 edma_shadow0_write(SH_IEVAL, 1);
342 return IRQ_HANDLED;
345 /******************************************************************************
347 * DMA error interrupt handler
349 *****************************************************************************/
350 static irqreturn_t dma_ccerr_handler(int irq, void *data)
352 int i;
353 unsigned int cnt = 0;
355 dev_dbg(data, "dma_ccerr_handler\n");
357 if ((edma_read_array(EDMA_EMR, 0) == 0) &&
358 (edma_read_array(EDMA_EMR, 1) == 0) &&
359 (edma_read(EDMA_QEMR) == 0) && (edma_read(EDMA_CCERR) == 0))
360 return IRQ_NONE;
362 while (1) {
363 int j = -1;
364 if (edma_read_array(EDMA_EMR, 0))
365 j = 0;
366 else if (edma_read_array(EDMA_EMR, 1))
367 j = 1;
368 if (j >= 0) {
369 dev_dbg(data, "EMR%d %08x\n", j,
370 edma_read_array(EDMA_EMR, j));
371 for (i = 0; i < 32; i++) {
372 int k = (j << 5) + i;
373 if (edma_read_array(EDMA_EMR, j) & (1 << i)) {
374 /* Clear the corresponding EMR bits */
375 edma_write_array(EDMA_EMCR, j, 1 << i);
376 /* Clear any SER */
377 edma_shadow0_write_array(SH_SECR, j,
378 (1 << i));
379 if (intr_data[k].callback) {
380 intr_data[k].callback(k,
381 DMA_CC_ERROR,
382 intr_data
383 [k].data);
387 } else if (edma_read(EDMA_QEMR)) {
388 dev_dbg(data, "QEMR %02x\n",
389 edma_read(EDMA_QEMR));
390 for (i = 0; i < 8; i++) {
391 if (edma_read(EDMA_QEMR) & (1 << i)) {
392 /* Clear the corresponding IPR bits */
393 edma_write(EDMA_QEMCR, 1 << i);
394 edma_shadow0_write(SH_QSECR, (1 << i));
396 /* NOTE: not reported!! */
399 } else if (edma_read(EDMA_CCERR)) {
400 dev_dbg(data, "CCERR %08x\n",
401 edma_read(EDMA_CCERR));
402 /* FIXME: CCERR.BIT(16) ignored! much better
403 * to just write CCERRCLR with CCERR value...
405 for (i = 0; i < 8; i++) {
406 if (edma_read(EDMA_CCERR) & (1 << i)) {
407 /* Clear the corresponding IPR bits */
408 edma_write(EDMA_CCERRCLR, 1 << i);
410 /* NOTE: not reported!! */
414 if ((edma_read_array(EDMA_EMR, 0) == 0)
415 && (edma_read_array(EDMA_EMR, 1) == 0)
416 && (edma_read(EDMA_QEMR) == 0)
417 && (edma_read(EDMA_CCERR) == 0)) {
418 break;
420 cnt++;
421 if (cnt > 10)
422 break;
424 edma_write(EDMA_EEVAL, 1);
425 return IRQ_HANDLED;
428 /******************************************************************************
430 * Transfer controller error interrupt handlers
432 *****************************************************************************/
434 #define tc_errs_handled false /* disabled as long as they're NOPs */
436 static irqreturn_t dma_tc0err_handler(int irq, void *data)
438 dev_dbg(data, "dma_tc0err_handler\n");
439 return IRQ_HANDLED;
442 static irqreturn_t dma_tc1err_handler(int irq, void *data)
444 dev_dbg(data, "dma_tc1err_handler\n");
445 return IRQ_HANDLED;
448 /*-----------------------------------------------------------------------*/
450 /* Resource alloc/free: dma channels, parameter RAM slots */
453 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
454 * @channel: specific channel to allocate; negative for "any unmapped channel"
455 * @callback: optional; to be issued on DMA completion or errors
456 * @data: passed to callback
457 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
458 * Controller (TC) executes requests using this channel. Use
459 * EVENTQ_DEFAULT unless you really need a high priority queue.
461 * This allocates a DMA channel and its associated parameter RAM slot.
462 * The parameter RAM is initialized to hold a dummy transfer.
464 * Normal use is to pass a specific channel number as @channel, to make
465 * use of hardware events mapped to that channel. When the channel will
466 * be used only for software triggering or event chaining, channels not
467 * mapped to hardware events (or mapped to unused events) are preferable.
469 * DMA transfers start from a channel using edma_start(), or by
470 * chaining. When the transfer described in that channel's parameter RAM
471 * slot completes, that slot's data may be reloaded through a link.
473 * DMA errors are only reported to the @callback associated with the
474 * channel driving that transfer, but transfer completion callbacks can
475 * be sent to another channel under control of the TCC field in
476 * the option word of the transfer's parameter RAM set. Drivers must not
477 * use DMA transfer completion callbacks for channels they did not allocate.
478 * (The same applies to TCC codes used in transfer chaining.)
480 * Returns the number of the channel, else negative errno.
482 int edma_alloc_channel(int channel,
483 void (*callback)(unsigned channel, u16 ch_status, void *data),
484 void *data,
485 enum dma_event_q eventq_no)
487 if (channel < 0) {
488 channel = 0;
489 for (;;) {
490 channel = find_next_bit(edma_noevent,
491 num_channels, channel);
492 if (channel == num_channels)
493 return -ENOMEM;
494 if (!test_and_set_bit(channel, edma_inuse))
495 break;
496 channel++;
498 } else if (channel >= num_channels) {
499 return -EINVAL;
500 } else if (test_and_set_bit(channel, edma_inuse)) {
501 return -EBUSY;
504 /* ensure access through shadow region 0 */
505 edma_or_array2(EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f));
507 /* ensure no events are pending */
508 edma_stop(channel);
509 memcpy_toio(edmacc_regs_base + PARM_OFFSET(channel),
510 &dummy_paramset, PARM_SIZE);
512 if (callback)
513 setup_dma_interrupt(channel, callback, data);
515 map_dmach_queue(channel, eventq_no);
517 return channel;
519 EXPORT_SYMBOL(edma_alloc_channel);
523 * edma_free_channel - deallocate DMA channel
524 * @channel: dma channel returned from edma_alloc_channel()
526 * This deallocates the DMA channel and associated parameter RAM slot
527 * allocated by edma_alloc_channel().
529 * Callers are responsible for ensuring the channel is inactive, and
530 * will not be reactivated by linking, chaining, or software calls to
531 * edma_start().
533 void edma_free_channel(unsigned channel)
535 if (channel >= num_channels)
536 return;
538 setup_dma_interrupt(channel, NULL, NULL);
539 /* REVISIT should probably take out of shadow region 0 */
541 memcpy_toio(edmacc_regs_base + PARM_OFFSET(channel),
542 &dummy_paramset, PARM_SIZE);
543 clear_bit(channel, edma_inuse);
545 EXPORT_SYMBOL(edma_free_channel);
548 * edma_alloc_slot - allocate DMA parameter RAM
549 * @slot: specific slot to allocate; negative for "any unused slot"
551 * This allocates a parameter RAM slot, initializing it to hold a
552 * dummy transfer. Slots allocated using this routine have not been
553 * mapped to a hardware DMA channel, and will normally be used by
554 * linking to them from a slot associated with a DMA channel.
556 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
557 * slots may be allocated on behalf of DSP firmware.
559 * Returns the number of the slot, else negative errno.
561 int edma_alloc_slot(int slot)
563 if (slot < 0) {
564 slot = num_channels;
565 for (;;) {
566 slot = find_next_zero_bit(edma_inuse,
567 num_slots, slot);
568 if (slot == num_slots)
569 return -ENOMEM;
570 if (!test_and_set_bit(slot, edma_inuse))
571 break;
573 } else if (slot < num_channels || slot >= num_slots) {
574 return -EINVAL;
575 } else if (test_and_set_bit(slot, edma_inuse)) {
576 return -EBUSY;
579 memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot),
580 &dummy_paramset, PARM_SIZE);
582 return slot;
584 EXPORT_SYMBOL(edma_alloc_slot);
587 * edma_free_slot - deallocate DMA parameter RAM
588 * @slot: parameter RAM slot returned from edma_alloc_slot()
590 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
591 * Callers are responsible for ensuring the slot is inactive, and will
592 * not be activated.
594 void edma_free_slot(unsigned slot)
596 if (slot < num_channels || slot >= num_slots)
597 return;
599 memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot),
600 &dummy_paramset, PARM_SIZE);
601 clear_bit(slot, edma_inuse);
603 EXPORT_SYMBOL(edma_free_slot);
605 /*-----------------------------------------------------------------------*/
607 /* Parameter RAM operations (i) -- read/write partial slots */
610 * edma_set_src - set initial DMA source address in parameter RAM slot
611 * @slot: parameter RAM slot being configured
612 * @src_port: physical address of source (memory, controller FIFO, etc)
613 * @addressMode: INCR, except in very rare cases
614 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
615 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
617 * Note that the source address is modified during the DMA transfer
618 * according to edma_set_src_index().
620 void edma_set_src(unsigned slot, dma_addr_t src_port,
621 enum address_mode mode, enum fifo_width width)
623 if (slot < num_slots) {
624 unsigned int i = edma_parm_read(PARM_OPT, slot);
626 if (mode) {
627 /* set SAM and program FWID */
628 i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
629 } else {
630 /* clear SAM */
631 i &= ~SAM;
633 edma_parm_write(PARM_OPT, slot, i);
635 /* set the source port address
636 in source register of param structure */
637 edma_parm_write(PARM_SRC, slot, src_port);
640 EXPORT_SYMBOL(edma_set_src);
643 * edma_set_dest - set initial DMA destination address in parameter RAM slot
644 * @slot: parameter RAM slot being configured
645 * @dest_port: physical address of destination (memory, controller FIFO, etc)
646 * @addressMode: INCR, except in very rare cases
647 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
648 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
650 * Note that the destination address is modified during the DMA transfer
651 * according to edma_set_dest_index().
653 void edma_set_dest(unsigned slot, dma_addr_t dest_port,
654 enum address_mode mode, enum fifo_width width)
656 if (slot < num_slots) {
657 unsigned int i = edma_parm_read(PARM_OPT, slot);
659 if (mode) {
660 /* set DAM and program FWID */
661 i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
662 } else {
663 /* clear DAM */
664 i &= ~DAM;
666 edma_parm_write(PARM_OPT, slot, i);
667 /* set the destination port address
668 in dest register of param structure */
669 edma_parm_write(PARM_DST, slot, dest_port);
672 EXPORT_SYMBOL(edma_set_dest);
675 * edma_get_position - returns the current transfer points
676 * @slot: parameter RAM slot being examined
677 * @src: pointer to source port position
678 * @dst: pointer to destination port position
680 * Returns current source and destination addresses for a particular
681 * parameter RAM slot. Its channel should not be active when this is called.
683 void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
685 struct edmacc_param temp;
687 edma_read_slot(slot, &temp);
688 if (src != NULL)
689 *src = temp.src;
690 if (dst != NULL)
691 *dst = temp.dst;
693 EXPORT_SYMBOL(edma_get_position);
696 * edma_set_src_index - configure DMA source address indexing
697 * @slot: parameter RAM slot being configured
698 * @src_bidx: byte offset between source arrays in a frame
699 * @src_cidx: byte offset between source frames in a block
701 * Offsets are specified to support either contiguous or discontiguous
702 * memory transfers, or repeated access to a hardware register, as needed.
703 * When accessing hardware registers, both offsets are normally zero.
705 void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
707 if (slot < num_slots) {
708 edma_parm_modify(PARM_SRC_DST_BIDX, slot,
709 0xffff0000, src_bidx);
710 edma_parm_modify(PARM_SRC_DST_CIDX, slot,
711 0xffff0000, src_cidx);
714 EXPORT_SYMBOL(edma_set_src_index);
717 * edma_set_dest_index - configure DMA destination address indexing
718 * @slot: parameter RAM slot being configured
719 * @dest_bidx: byte offset between destination arrays in a frame
720 * @dest_cidx: byte offset between destination frames in a block
722 * Offsets are specified to support either contiguous or discontiguous
723 * memory transfers, or repeated access to a hardware register, as needed.
724 * When accessing hardware registers, both offsets are normally zero.
726 void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
728 if (slot < num_slots) {
729 edma_parm_modify(PARM_SRC_DST_BIDX, slot,
730 0x0000ffff, dest_bidx << 16);
731 edma_parm_modify(PARM_SRC_DST_CIDX, slot,
732 0x0000ffff, dest_cidx << 16);
735 EXPORT_SYMBOL(edma_set_dest_index);
738 * edma_set_transfer_params - configure DMA transfer parameters
739 * @slot: parameter RAM slot being configured
740 * @acnt: how many bytes per array (at least one)
741 * @bcnt: how many arrays per frame (at least one)
742 * @ccnt: how many frames per block (at least one)
743 * @bcnt_rld: used only for A-Synchronized transfers; this specifies
744 * the value to reload into bcnt when it decrements to zero
745 * @sync_mode: ASYNC or ABSYNC
747 * See the EDMA3 documentation to understand how to configure and link
748 * transfers using the fields in PaRAM slots. If you are not doing it
749 * all at once with edma_write_slot(), you will use this routine
750 * plus two calls each for source and destination, setting the initial
751 * address and saying how to index that address.
753 * An example of an A-Synchronized transfer is a serial link using a
754 * single word shift register. In that case, @acnt would be equal to
755 * that word size; the serial controller issues a DMA synchronization
756 * event to transfer each word, and memory access by the DMA transfer
757 * controller will be word-at-a-time.
759 * An example of an AB-Synchronized transfer is a device using a FIFO.
760 * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
761 * The controller with the FIFO issues DMA synchronization events when
762 * the FIFO threshold is reached, and the DMA transfer controller will
763 * transfer one frame to (or from) the FIFO. It will probably use
764 * efficient burst modes to access memory.
766 void edma_set_transfer_params(unsigned slot,
767 u16 acnt, u16 bcnt, u16 ccnt,
768 u16 bcnt_rld, enum sync_dimension sync_mode)
770 if (slot < num_slots) {
771 edma_parm_modify(PARM_LINK_BCNTRLD, slot,
772 0x0000ffff, bcnt_rld << 16);
773 if (sync_mode == ASYNC)
774 edma_parm_and(PARM_OPT, slot, ~SYNCDIM);
775 else
776 edma_parm_or(PARM_OPT, slot, SYNCDIM);
777 /* Set the acount, bcount, ccount registers */
778 edma_parm_write(PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
779 edma_parm_write(PARM_CCNT, slot, ccnt);
782 EXPORT_SYMBOL(edma_set_transfer_params);
785 * edma_link - link one parameter RAM slot to another
786 * @from: parameter RAM slot originating the link
787 * @to: parameter RAM slot which is the link target
789 * The originating slot should not be part of any active DMA transfer.
791 void edma_link(unsigned from, unsigned to)
793 if (from >= num_slots)
794 return;
795 if (to >= num_slots)
796 return;
797 edma_parm_modify(PARM_LINK_BCNTRLD, from, 0xffff0000, PARM_OFFSET(to));
799 EXPORT_SYMBOL(edma_link);
802 * edma_unlink - cut link from one parameter RAM slot
803 * @from: parameter RAM slot originating the link
805 * The originating slot should not be part of any active DMA transfer.
806 * Its link is set to 0xffff.
808 void edma_unlink(unsigned from)
810 if (from >= num_slots)
811 return;
812 edma_parm_or(PARM_LINK_BCNTRLD, from, 0xffff);
814 EXPORT_SYMBOL(edma_unlink);
816 /*-----------------------------------------------------------------------*/
818 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
821 * edma_write_slot - write parameter RAM data for slot
822 * @slot: number of parameter RAM slot being modified
823 * @param: data to be written into parameter RAM slot
825 * Use this to assign all parameters of a transfer at once. This
826 * allows more efficient setup of transfers than issuing multiple
827 * calls to set up those parameters in small pieces, and provides
828 * complete control over all transfer options.
830 void edma_write_slot(unsigned slot, const struct edmacc_param *param)
832 if (slot >= num_slots)
833 return;
834 memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot), param, PARM_SIZE);
836 EXPORT_SYMBOL(edma_write_slot);
839 * edma_read_slot - read parameter RAM data from slot
840 * @slot: number of parameter RAM slot being copied
841 * @param: where to store copy of parameter RAM data
843 * Use this to read data from a parameter RAM slot, perhaps to
844 * save them as a template for later reuse.
846 void edma_read_slot(unsigned slot, struct edmacc_param *param)
848 if (slot >= num_slots)
849 return;
850 memcpy_fromio(param, edmacc_regs_base + PARM_OFFSET(slot), PARM_SIZE);
852 EXPORT_SYMBOL(edma_read_slot);
854 /*-----------------------------------------------------------------------*/
856 /* Various EDMA channel control operations */
859 * edma_pause - pause dma on a channel
860 * @channel: on which edma_start() has been called
862 * This temporarily disables EDMA hardware events on the specified channel,
863 * preventing them from triggering new transfers on its behalf
865 void edma_pause(unsigned channel)
867 if (channel < num_channels) {
868 unsigned int mask = (1 << (channel & 0x1f));
870 edma_shadow0_write_array(SH_EECR, channel >> 5, mask);
873 EXPORT_SYMBOL(edma_pause);
876 * edma_resume - resumes dma on a paused channel
877 * @channel: on which edma_pause() has been called
879 * This re-enables EDMA hardware events on the specified channel.
881 void edma_resume(unsigned channel)
883 if (channel < num_channels) {
884 unsigned int mask = (1 << (channel & 0x1f));
886 edma_shadow0_write_array(SH_EESR, channel >> 5, mask);
889 EXPORT_SYMBOL(edma_resume);
892 * edma_start - start dma on a channel
893 * @channel: channel being activated
895 * Channels with event associations will be triggered by their hardware
896 * events, and channels without such associations will be triggered by
897 * software. (At this writing there is no interface for using software
898 * triggers except with channels that don't support hardware triggers.)
900 * Returns zero on success, else negative errno.
902 int edma_start(unsigned channel)
904 if (channel < num_channels) {
905 int j = channel >> 5;
906 unsigned int mask = (1 << (channel & 0x1f));
908 /* EDMA channels without event association */
909 if (test_bit(channel, edma_noevent)) {
910 pr_debug("EDMA: ESR%d %08x\n", j,
911 edma_shadow0_read_array(SH_ESR, j));
912 edma_shadow0_write_array(SH_ESR, j, mask);
913 return 0;
916 /* EDMA channel with event association */
917 pr_debug("EDMA: ER%d %08x\n", j,
918 edma_shadow0_read_array(SH_ER, j));
919 /* Clear any pending error */
920 edma_write_array(EDMA_EMCR, j, mask);
921 /* Clear any SER */
922 edma_shadow0_write_array(SH_SECR, j, mask);
923 edma_shadow0_write_array(SH_EESR, j, mask);
924 pr_debug("EDMA: EER%d %08x\n", j,
925 edma_shadow0_read_array(SH_EER, j));
926 return 0;
929 return -EINVAL;
931 EXPORT_SYMBOL(edma_start);
934 * edma_stop - stops dma on the channel passed
935 * @channel: channel being deactivated
937 * When @lch is a channel, any active transfer is paused and
938 * all pending hardware events are cleared. The current transfer
939 * may not be resumed, and the channel's Parameter RAM should be
940 * reinitialized before being reused.
942 void edma_stop(unsigned channel)
944 if (channel < num_channels) {
945 int j = channel >> 5;
946 unsigned int mask = (1 << (channel & 0x1f));
948 edma_shadow0_write_array(SH_EECR, j, mask);
949 edma_shadow0_write_array(SH_ECR, j, mask);
950 edma_shadow0_write_array(SH_SECR, j, mask);
951 edma_write_array(EDMA_EMCR, j, mask);
953 pr_debug("EDMA: EER%d %08x\n", j,
954 edma_shadow0_read_array(SH_EER, j));
956 /* REVISIT: consider guarding against inappropriate event
957 * chaining by overwriting with dummy_paramset.
961 EXPORT_SYMBOL(edma_stop);
963 /******************************************************************************
965 * It cleans ParamEntry qand bring back EDMA to initial state if media has
966 * been removed before EDMA has finished.It is usedful for removable media.
967 * Arguments:
968 * ch_no - channel no
970 * Return: zero on success, or corresponding error no on failure
972 * FIXME this should not be needed ... edma_stop() should suffice.
974 *****************************************************************************/
976 void edma_clean_channel(unsigned channel)
978 if (channel < num_channels) {
979 int j = (channel >> 5);
980 unsigned int mask = 1 << (channel & 0x1f);
982 pr_debug("EDMA: EMR%d %08x\n", j,
983 edma_read_array(EDMA_EMR, j));
984 edma_shadow0_write_array(SH_ECR, j, mask);
985 /* Clear the corresponding EMR bits */
986 edma_write_array(EDMA_EMCR, j, mask);
987 /* Clear any SER */
988 edma_shadow0_write_array(SH_SECR, j, mask);
989 edma_write(EDMA_CCERRCLR, (1 << 16) | 0x3);
992 EXPORT_SYMBOL(edma_clean_channel);
995 * edma_clear_event - clear an outstanding event on the DMA channel
996 * Arguments:
997 * channel - channel number
999 void edma_clear_event(unsigned channel)
1001 if (channel >= num_channels)
1002 return;
1003 if (channel < 32)
1004 edma_write(EDMA_ECR, 1 << channel);
1005 else
1006 edma_write(EDMA_ECRH, 1 << (channel - 32));
1008 EXPORT_SYMBOL(edma_clear_event);
1010 /*-----------------------------------------------------------------------*/
1012 static int __init edma_probe(struct platform_device *pdev)
1014 struct edma_soc_info *info = pdev->dev.platform_data;
1015 int i;
1016 int status;
1017 const s8 *noevent;
1018 int irq = 0, err_irq = 0;
1019 struct resource *r;
1020 resource_size_t len;
1022 if (!info)
1023 return -ENODEV;
1025 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma_cc");
1026 if (!r)
1027 return -ENODEV;
1029 len = r->end - r->start + 1;
1031 r = request_mem_region(r->start, len, r->name);
1032 if (!r)
1033 return -EBUSY;
1035 edmacc_regs_base = ioremap(r->start, len);
1036 if (!edmacc_regs_base) {
1037 status = -EBUSY;
1038 goto fail1;
1041 num_channels = min_t(unsigned, info->n_channel, EDMA_MAX_DMACH);
1042 num_slots = min_t(unsigned, info->n_slot, EDMA_MAX_PARAMENTRY);
1044 dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n", edmacc_regs_base);
1046 for (i = 0; i < num_slots; i++)
1047 memcpy_toio(edmacc_regs_base + PARM_OFFSET(i),
1048 &dummy_paramset, PARM_SIZE);
1050 noevent = info->noevent;
1051 if (noevent) {
1052 while (*noevent != -1)
1053 set_bit(*noevent++, edma_noevent);
1056 irq = platform_get_irq(pdev, 0);
1057 status = request_irq(irq, dma_irq_handler, 0, "edma", &pdev->dev);
1058 if (status < 0) {
1059 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1060 irq, status);
1061 goto fail;
1064 err_irq = platform_get_irq(pdev, 1);
1065 status = request_irq(err_irq, dma_ccerr_handler, 0,
1066 "edma_error", &pdev->dev);
1067 if (status < 0) {
1068 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1069 err_irq, status);
1070 goto fail;
1073 if (tc_errs_handled) {
1074 status = request_irq(IRQ_TCERRINT0, dma_tc0err_handler, 0,
1075 "edma_tc0", &pdev->dev);
1076 if (status < 0) {
1077 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1078 IRQ_TCERRINT0, status);
1079 return status;
1081 status = request_irq(IRQ_TCERRINT, dma_tc1err_handler, 0,
1082 "edma_tc1", &pdev->dev);
1083 if (status < 0) {
1084 dev_dbg(&pdev->dev, "request_irq %d --> %d\n",
1085 IRQ_TCERRINT, status);
1086 return status;
1090 /* Everything lives on transfer controller 1 until otherwise specified.
1091 * This way, long transfers on the low priority queue
1092 * started by the codec engine will not cause audio defects.
1094 for (i = 0; i < num_channels; i++)
1095 map_dmach_queue(i, EVENTQ_1);
1097 /* Event queue to TC mapping */
1098 for (i = 0; queue_tc_mapping[i][0] != -1; i++)
1099 map_queue_tc(queue_tc_mapping[i][0], queue_tc_mapping[i][1]);
1101 /* Event queue priority mapping */
1102 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1103 assign_priority_to_queue(queue_priority_mapping[i][0],
1104 queue_priority_mapping[i][1]);
1106 for (i = 0; i < info->n_region; i++) {
1107 edma_write_array2(EDMA_DRAE, i, 0, 0x0);
1108 edma_write_array2(EDMA_DRAE, i, 1, 0x0);
1109 edma_write_array(EDMA_QRAE, i, 0x0);
1112 return 0;
1114 fail:
1115 if (err_irq)
1116 free_irq(err_irq, NULL);
1117 if (irq)
1118 free_irq(irq, NULL);
1119 iounmap(edmacc_regs_base);
1120 fail1:
1121 release_mem_region(r->start, len);
1122 return status;
1126 static struct platform_driver edma_driver = {
1127 .driver.name = "edma",
1130 static int __init edma_init(void)
1132 return platform_driver_probe(&edma_driver, edma_probe);
1134 arch_initcall(edma_init);