3 * BRIEF MODULE DESCRIPTION
4 * A DMA channel allocator for Au1x00. API is modeled loosely off of
7 * Copyright 2000, 2008 MontaVista Software Inc.
8 * Author: MontaVista Software, Inc. <source@mvista.com>
9 * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
19 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
22 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
23 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 * You should have received a copy of the GNU General Public License along
28 * with this program; if not, write to the Free Software Foundation, Inc.,
29 * 675 Mass Ave, Cambridge, MA 02139, USA.
33 #include <linux/init.h>
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/errno.h>
37 #include <linux/spinlock.h>
38 #include <linux/interrupt.h>
40 #include <asm/mach-au1x00/au1000.h>
41 #include <asm/mach-au1x00/au1000_dma.h>
44 * A note on resource allocation:
46 * All drivers needing DMA channels, should allocate and release them
47 * through the public routines `request_dma()' and `free_dma()'.
49 * In order to avoid problems, all processes should allocate resources in
50 * the same sequence and release them in the reverse order.
52 * So, when allocating DMAs and IRQs, first allocate the DMA, then the IRQ.
53 * When releasing them, first release the IRQ, then release the DMA. The
54 * main reason for this order is that, if you are requesting the DMA buffer
55 * done interrupt, you won't know the irq number until the DMA channel is
56 * returned from request_dma.
59 /* DMA Channel register block spacing */
60 #define DMA_CHANNEL_LEN 0x00000100
62 DEFINE_SPINLOCK(au1000_dma_spin_lock
);
64 struct dma_chan au1000_dma_table
[NUM_AU1000_DMA_CHANNELS
] = {
74 EXPORT_SYMBOL(au1000_dma_table
);
76 /* Device FIFO addresses and default DMA modes */
77 static const struct dma_dev
{
78 unsigned int fifo_addr
;
79 unsigned int dma_mode
;
80 } dma_dev_table
[DMA_NUM_DEV
] = {
81 { AU1000_UART0_PHYS_ADDR
+ 0x04, DMA_DW8
}, /* UART0_TX */
82 { AU1000_UART0_PHYS_ADDR
+ 0x00, DMA_DW8
| DMA_DR
}, /* UART0_RX */
83 { 0, 0 }, /* DMA_REQ0 */
84 { 0, 0 }, /* DMA_REQ1 */
85 { AU1000_AC97_PHYS_ADDR
+ 0x08, DMA_DW16
}, /* AC97 TX c */
86 { AU1000_AC97_PHYS_ADDR
+ 0x08, DMA_DW16
| DMA_DR
}, /* AC97 RX c */
87 { AU1000_UART3_PHYS_ADDR
+ 0x04, DMA_DW8
| DMA_NC
}, /* UART3_TX */
88 { AU1000_UART3_PHYS_ADDR
+ 0x00, DMA_DW8
| DMA_NC
| DMA_DR
}, /* UART3_RX */
89 { AU1000_USB_UDC_PHYS_ADDR
+ 0x00, DMA_DW8
| DMA_NC
| DMA_DR
}, /* EP0RD */
90 { AU1000_USB_UDC_PHYS_ADDR
+ 0x04, DMA_DW8
| DMA_NC
}, /* EP0WR */
91 { AU1000_USB_UDC_PHYS_ADDR
+ 0x08, DMA_DW8
| DMA_NC
}, /* EP2WR */
92 { AU1000_USB_UDC_PHYS_ADDR
+ 0x0c, DMA_DW8
| DMA_NC
}, /* EP3WR */
93 { AU1000_USB_UDC_PHYS_ADDR
+ 0x10, DMA_DW8
| DMA_NC
| DMA_DR
}, /* EP4RD */
94 { AU1000_USB_UDC_PHYS_ADDR
+ 0x14, DMA_DW8
| DMA_NC
| DMA_DR
}, /* EP5RD */
95 /* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */
96 { AU1000_I2S_PHYS_ADDR
+ 0x00, DMA_DW32
| DMA_NC
}, /* I2S TX */
97 { AU1000_I2S_PHYS_ADDR
+ 0x00, DMA_DW32
| DMA_NC
| DMA_DR
}, /* I2S RX */
100 int au1000_dma_read_proc(char *buf
, char **start
, off_t fpos
,
101 int length
, int *eof
, void *data
)
104 struct dma_chan
*chan
;
106 for (i
= 0; i
< NUM_AU1000_DMA_CHANNELS
; i
++) {
107 chan
= get_dma_chan(i
);
109 len
+= sprintf(buf
+ len
, "%2d: %s\n",
126 /* Device FIFO addresses and default DMA modes - 2nd bank */
127 static const struct dma_dev dma_dev_table_bank2
[DMA_NUM_DEV_BANK2
] = {
128 { AU1100_SD0_PHYS_ADDR
+ 0x00, DMA_DS
| DMA_DW8
}, /* coherent */
129 { AU1100_SD0_PHYS_ADDR
+ 0x04, DMA_DS
| DMA_DW8
| DMA_DR
}, /* coherent */
130 { AU1100_SD1_PHYS_ADDR
+ 0x00, DMA_DS
| DMA_DW8
}, /* coherent */
131 { AU1100_SD1_PHYS_ADDR
+ 0x04, DMA_DS
| DMA_DW8
| DMA_DR
} /* coherent */
134 void dump_au1000_dma_channel(unsigned int dmanr
)
136 struct dma_chan
*chan
;
138 if (dmanr
>= NUM_AU1000_DMA_CHANNELS
)
140 chan
= &au1000_dma_table
[dmanr
];
142 printk(KERN_INFO
"Au1000 DMA%d Register Dump:\n", dmanr
);
143 printk(KERN_INFO
" mode = 0x%08x\n",
144 au_readl(chan
->io
+ DMA_MODE_SET
));
145 printk(KERN_INFO
" addr = 0x%08x\n",
146 au_readl(chan
->io
+ DMA_PERIPHERAL_ADDR
));
147 printk(KERN_INFO
" start0 = 0x%08x\n",
148 au_readl(chan
->io
+ DMA_BUFFER0_START
));
149 printk(KERN_INFO
" start1 = 0x%08x\n",
150 au_readl(chan
->io
+ DMA_BUFFER1_START
));
151 printk(KERN_INFO
" count0 = 0x%08x\n",
152 au_readl(chan
->io
+ DMA_BUFFER0_COUNT
));
153 printk(KERN_INFO
" count1 = 0x%08x\n",
154 au_readl(chan
->io
+ DMA_BUFFER1_COUNT
));
158 * Finds a free channel, and binds the requested device to it.
159 * Returns the allocated channel number, or negative on error.
160 * Requests the DMA done IRQ if irqhandler != NULL.
162 int request_au1000_dma(int dev_id
, const char *dev_str
,
163 irq_handler_t irqhandler
,
164 unsigned long irqflags
,
167 struct dma_chan
*chan
;
168 const struct dma_dev
*dev
;
171 if (alchemy_get_cputype() == ALCHEMY_CPU_AU1100
) {
172 if (dev_id
< 0 || dev_id
>= (DMA_NUM_DEV
+ DMA_NUM_DEV_BANK2
))
175 if (dev_id
< 0 || dev_id
>= DMA_NUM_DEV
)
179 for (i
= 0; i
< NUM_AU1000_DMA_CHANNELS
; i
++)
180 if (au1000_dma_table
[i
].dev_id
< 0)
183 if (i
== NUM_AU1000_DMA_CHANNELS
)
186 chan
= &au1000_dma_table
[i
];
188 if (dev_id
>= DMA_NUM_DEV
) {
189 dev_id
-= DMA_NUM_DEV
;
190 dev
= &dma_dev_table_bank2
[dev_id
];
192 dev
= &dma_dev_table
[dev_id
];
195 chan
->irq_dev
= irq_dev_id
;
196 ret
= request_irq(chan
->irq
, irqhandler
, irqflags
, dev_str
,
199 chan
->irq_dev
= NULL
;
203 chan
->irq_dev
= NULL
;
207 chan
->io
= KSEG1ADDR(AU1000_DMA_PHYS_ADDR
) + i
* DMA_CHANNEL_LEN
;
208 chan
->dev_id
= dev_id
;
209 chan
->dev_str
= dev_str
;
210 chan
->fifo_addr
= dev
->fifo_addr
;
211 chan
->mode
= dev
->dma_mode
;
213 /* initialize the channel before returning */
218 EXPORT_SYMBOL(request_au1000_dma
);
220 void free_au1000_dma(unsigned int dmanr
)
222 struct dma_chan
*chan
= get_dma_chan(dmanr
);
225 printk(KERN_ERR
"Error trying to free DMA%d\n", dmanr
);
231 free_irq(chan
->irq
, chan
->irq_dev
);
233 chan
->irq_dev
= NULL
;
236 EXPORT_SYMBOL(free_au1000_dma
);
238 static int __init
au1000_dma_init(void)
242 switch (alchemy_get_cputype()) {
243 case ALCHEMY_CPU_AU1000
:
244 base
= AU1000_DMA_INT_BASE
;
246 case ALCHEMY_CPU_AU1500
:
247 base
= AU1500_DMA_INT_BASE
;
249 case ALCHEMY_CPU_AU1100
:
250 base
= AU1100_DMA_INT_BASE
;
256 for (i
= 0; i
< NUM_AU1000_DMA_CHANNELS
; i
++)
257 au1000_dma_table
[i
].irq
= base
+ i
;
259 printk(KERN_INFO
"Alchemy DMA initialized\n");
264 arch_initcall(au1000_dma_init
);