ocfs2: Make the left masklogs compat.
[taoma-kernel.git] / arch / mips / alchemy / common / dma.c
blobd5278877891d924837960dbc6e04c56cf8395c69
1 /*
3 * BRIEF MODULE DESCRIPTION
4 * A DMA channel allocator for Au1x00. API is modeled loosely off of
5 * linux/kernel/dma.c.
7 * Copyright 2000, 2008 MontaVista Software Inc.
8 * Author: MontaVista Software, Inc. <source@mvista.com>
9 * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
19 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
22 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
23 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 * You should have received a copy of the GNU General Public License along
28 * with this program; if not, write to the Free Software Foundation, Inc.,
29 * 675 Mass Ave, Cambridge, MA 02139, USA.
33 #include <linux/init.h>
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/errno.h>
37 #include <linux/spinlock.h>
38 #include <linux/interrupt.h>
40 #include <asm/mach-au1x00/au1000.h>
41 #include <asm/mach-au1x00/au1000_dma.h>
43 #if defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1500) || \
44 defined(CONFIG_SOC_AU1100)
46 * A note on resource allocation:
48 * All drivers needing DMA channels, should allocate and release them
49 * through the public routines `request_dma()' and `free_dma()'.
51 * In order to avoid problems, all processes should allocate resources in
52 * the same sequence and release them in the reverse order.
54 * So, when allocating DMAs and IRQs, first allocate the DMA, then the IRQ.
55 * When releasing them, first release the IRQ, then release the DMA. The
56 * main reason for this order is that, if you are requesting the DMA buffer
57 * done interrupt, you won't know the irq number until the DMA channel is
58 * returned from request_dma.
61 DEFINE_SPINLOCK(au1000_dma_spin_lock);
63 struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = {
64 {.dev_id = -1,},
65 {.dev_id = -1,},
66 {.dev_id = -1,},
67 {.dev_id = -1,},
68 {.dev_id = -1,},
69 {.dev_id = -1,},
70 {.dev_id = -1,},
71 {.dev_id = -1,}
73 EXPORT_SYMBOL(au1000_dma_table);
75 /* Device FIFO addresses and default DMA modes */
76 static const struct dma_dev {
77 unsigned int fifo_addr;
78 unsigned int dma_mode;
79 } dma_dev_table[DMA_NUM_DEV] = {
80 {UART0_ADDR + UART_TX, 0},
81 {UART0_ADDR + UART_RX, 0},
82 {0, 0},
83 {0, 0},
84 {AC97C_DATA, DMA_DW16 }, /* coherent */
85 {AC97C_DATA, DMA_DR | DMA_DW16 }, /* coherent */
86 {UART3_ADDR + UART_TX, DMA_DW8 | DMA_NC},
87 {UART3_ADDR + UART_RX, DMA_DR | DMA_DW8 | DMA_NC},
88 {USBD_EP0RD, DMA_DR | DMA_DW8 | DMA_NC},
89 {USBD_EP0WR, DMA_DW8 | DMA_NC},
90 {USBD_EP2WR, DMA_DW8 | DMA_NC},
91 {USBD_EP3WR, DMA_DW8 | DMA_NC},
92 {USBD_EP4RD, DMA_DR | DMA_DW8 | DMA_NC},
93 {USBD_EP5RD, DMA_DR | DMA_DW8 | DMA_NC},
94 {I2S_DATA, DMA_DW32 | DMA_NC},
95 {I2S_DATA, DMA_DR | DMA_DW32 | DMA_NC}
98 int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
99 int length, int *eof, void *data)
101 int i, len = 0;
102 struct dma_chan *chan;
104 for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) {
105 chan = get_dma_chan(i);
106 if (chan != NULL)
107 len += sprintf(buf + len, "%2d: %s\n",
108 i, chan->dev_str);
111 if (fpos >= len) {
112 *start = buf;
113 *eof = 1;
114 return 0;
116 *start = buf + fpos;
117 len -= fpos;
118 if (len > length)
119 return length;
120 *eof = 1;
121 return len;
124 /* Device FIFO addresses and default DMA modes - 2nd bank */
125 static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = {
126 { SD0_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */
127 { SD0_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 }, /* coherent */
128 { SD1_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */
129 { SD1_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 } /* coherent */
132 void dump_au1000_dma_channel(unsigned int dmanr)
134 struct dma_chan *chan;
136 if (dmanr >= NUM_AU1000_DMA_CHANNELS)
137 return;
138 chan = &au1000_dma_table[dmanr];
140 printk(KERN_INFO "Au1000 DMA%d Register Dump:\n", dmanr);
141 printk(KERN_INFO " mode = 0x%08x\n",
142 au_readl(chan->io + DMA_MODE_SET));
143 printk(KERN_INFO " addr = 0x%08x\n",
144 au_readl(chan->io + DMA_PERIPHERAL_ADDR));
145 printk(KERN_INFO " start0 = 0x%08x\n",
146 au_readl(chan->io + DMA_BUFFER0_START));
147 printk(KERN_INFO " start1 = 0x%08x\n",
148 au_readl(chan->io + DMA_BUFFER1_START));
149 printk(KERN_INFO " count0 = 0x%08x\n",
150 au_readl(chan->io + DMA_BUFFER0_COUNT));
151 printk(KERN_INFO " count1 = 0x%08x\n",
152 au_readl(chan->io + DMA_BUFFER1_COUNT));
156 * Finds a free channel, and binds the requested device to it.
157 * Returns the allocated channel number, or negative on error.
158 * Requests the DMA done IRQ if irqhandler != NULL.
160 int request_au1000_dma(int dev_id, const char *dev_str,
161 irq_handler_t irqhandler,
162 unsigned long irqflags,
163 void *irq_dev_id)
165 struct dma_chan *chan;
166 const struct dma_dev *dev;
167 int i, ret;
169 #if defined(CONFIG_SOC_AU1100)
170 if (dev_id < 0 || dev_id >= (DMA_NUM_DEV + DMA_NUM_DEV_BANK2))
171 return -EINVAL;
172 #else
173 if (dev_id < 0 || dev_id >= DMA_NUM_DEV)
174 return -EINVAL;
175 #endif
177 for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
178 if (au1000_dma_table[i].dev_id < 0)
179 break;
181 if (i == NUM_AU1000_DMA_CHANNELS)
182 return -ENODEV;
184 chan = &au1000_dma_table[i];
186 if (dev_id >= DMA_NUM_DEV) {
187 dev_id -= DMA_NUM_DEV;
188 dev = &dma_dev_table_bank2[dev_id];
189 } else
190 dev = &dma_dev_table[dev_id];
192 if (irqhandler) {
193 chan->irq_dev = irq_dev_id;
194 ret = request_irq(chan->irq, irqhandler, irqflags, dev_str,
195 chan->irq_dev);
196 if (ret) {
197 chan->irq_dev = NULL;
198 return ret;
200 } else {
201 chan->irq_dev = NULL;
204 /* fill it in */
205 chan->io = DMA_CHANNEL_BASE + i * DMA_CHANNEL_LEN;
206 chan->dev_id = dev_id;
207 chan->dev_str = dev_str;
208 chan->fifo_addr = dev->fifo_addr;
209 chan->mode = dev->dma_mode;
211 /* initialize the channel before returning */
212 init_dma(i);
214 return i;
216 EXPORT_SYMBOL(request_au1000_dma);
218 void free_au1000_dma(unsigned int dmanr)
220 struct dma_chan *chan = get_dma_chan(dmanr);
222 if (!chan) {
223 printk(KERN_ERR "Error trying to free DMA%d\n", dmanr);
224 return;
227 disable_dma(dmanr);
228 if (chan->irq_dev)
229 free_irq(chan->irq, chan->irq_dev);
231 chan->irq_dev = NULL;
232 chan->dev_id = -1;
234 EXPORT_SYMBOL(free_au1000_dma);
236 static int __init au1000_dma_init(void)
238 int base, i;
240 switch (alchemy_get_cputype()) {
241 case ALCHEMY_CPU_AU1000:
242 base = AU1000_DMA_INT_BASE;
243 break;
244 case ALCHEMY_CPU_AU1500:
245 base = AU1500_DMA_INT_BASE;
246 break;
247 case ALCHEMY_CPU_AU1100:
248 base = AU1100_DMA_INT_BASE;
249 break;
250 default:
251 goto out;
254 for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
255 au1000_dma_table[i].irq = base + i;
257 printk(KERN_INFO "Alchemy DMA initialized\n");
259 out:
260 return 0;
262 arch_initcall(au1000_dma_init);
264 #endif /* AU1000 AU1500 AU1100 */