2 * arch/arm/mach-ep93xx/dma-m2p.c
3 * M2P DMA handling for Cirrus EP93xx chips.
5 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Copyright (C) 2006 Applied Data Systems
8 * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
17 * On the EP93xx chip the following peripherals my be allocated to the 10
18 * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
20 * I2S contains 3 Tx and 3 Rx DMA Channels
21 * AAC contains 3 Tx and 3 Rx DMA Channels
22 * UART1 contains 1 Tx and 1 Rx DMA Channels
23 * UART2 contains 1 Tx and 1 Rx DMA Channels
24 * UART3 contains 1 Tx and 1 Rx DMA Channels
25 * IrDA contains 1 Tx and 1 Rx DMA Channels
27 * SSP and IDE use the Memory to Memory (M2M) channels and are not covered
28 * with this implementation.
31 #include <linux/kernel.h>
32 #include <linux/clk.h>
33 #include <linux/err.h>
34 #include <linux/interrupt.h>
35 #include <linux/module.h>
38 #include <mach/hardware.h>
40 #define M2P_CONTROL 0x00
41 #define M2P_CONTROL_STALL_IRQ_EN (1 << 0)
42 #define M2P_CONTROL_NFB_IRQ_EN (1 << 1)
43 #define M2P_CONTROL_ERROR_IRQ_EN (1 << 3)
44 #define M2P_CONTROL_ENABLE (1 << 4)
45 #define M2P_INTERRUPT 0x04
46 #define M2P_INTERRUPT_STALL (1 << 0)
47 #define M2P_INTERRUPT_NFB (1 << 1)
48 #define M2P_INTERRUPT_ERROR (1 << 3)
49 #define M2P_PPALLOC 0x08
50 #define M2P_STATUS 0x0c
51 #define M2P_REMAIN 0x14
52 #define M2P_MAXCNT0 0x20
53 #define M2P_BASE0 0x24
54 #define M2P_MAXCNT1 0x30
55 #define M2P_BASE1 0x34
57 #define STATE_IDLE 0 /* Channel is inactive. */
58 #define STATE_STALL 1 /* Channel is active, no buffers pending. */
59 #define STATE_ON 2 /* Channel is active, one buffer pending. */
60 #define STATE_NEXT 3 /* Channel is active, two buffers pending. */
72 struct ep93xx_dma_buffer
*buffer_xfer
;
73 struct ep93xx_dma_buffer
*buffer_next
;
74 struct list_head buffers_pending
;
77 static struct m2p_channel m2p_rx
[] = {
78 {"m2p1", EP93XX_DMA_BASE
+ 0x0040, IRQ_EP93XX_DMAM2P1
},
79 {"m2p3", EP93XX_DMA_BASE
+ 0x00c0, IRQ_EP93XX_DMAM2P3
},
80 {"m2p5", EP93XX_DMA_BASE
+ 0x0200, IRQ_EP93XX_DMAM2P5
},
81 {"m2p7", EP93XX_DMA_BASE
+ 0x0280, IRQ_EP93XX_DMAM2P7
},
82 {"m2p9", EP93XX_DMA_BASE
+ 0x0300, IRQ_EP93XX_DMAM2P9
},
86 static struct m2p_channel m2p_tx
[] = {
87 {"m2p0", EP93XX_DMA_BASE
+ 0x0000, IRQ_EP93XX_DMAM2P0
},
88 {"m2p2", EP93XX_DMA_BASE
+ 0x0080, IRQ_EP93XX_DMAM2P2
},
89 {"m2p4", EP93XX_DMA_BASE
+ 0x0240, IRQ_EP93XX_DMAM2P4
},
90 {"m2p6", EP93XX_DMA_BASE
+ 0x02c0, IRQ_EP93XX_DMAM2P6
},
91 {"m2p8", EP93XX_DMA_BASE
+ 0x0340, IRQ_EP93XX_DMAM2P8
},
95 static void feed_buf(struct m2p_channel
*ch
, struct ep93xx_dma_buffer
*buf
)
97 if (ch
->next_slot
== 0) {
98 writel(buf
->size
, ch
->base
+ M2P_MAXCNT0
);
99 writel(buf
->bus_addr
, ch
->base
+ M2P_BASE0
);
101 writel(buf
->size
, ch
->base
+ M2P_MAXCNT1
);
102 writel(buf
->bus_addr
, ch
->base
+ M2P_BASE1
);
107 static void choose_buffer_xfer(struct m2p_channel
*ch
)
109 struct ep93xx_dma_buffer
*buf
;
111 ch
->buffer_xfer
= NULL
;
112 if (!list_empty(&ch
->buffers_pending
)) {
113 buf
= list_entry(ch
->buffers_pending
.next
,
114 struct ep93xx_dma_buffer
, list
);
115 list_del(&buf
->list
);
117 ch
->buffer_xfer
= buf
;
121 static void choose_buffer_next(struct m2p_channel
*ch
)
123 struct ep93xx_dma_buffer
*buf
;
125 ch
->buffer_next
= NULL
;
126 if (!list_empty(&ch
->buffers_pending
)) {
127 buf
= list_entry(ch
->buffers_pending
.next
,
128 struct ep93xx_dma_buffer
, list
);
129 list_del(&buf
->list
);
131 ch
->buffer_next
= buf
;
135 static inline void m2p_set_control(struct m2p_channel
*ch
, u32 v
)
138 * The control register must be read immediately after being written so
139 * that the internal state machine is correctly updated. See the ep93xx
140 * users' guide for details.
142 writel(v
, ch
->base
+ M2P_CONTROL
);
143 readl(ch
->base
+ M2P_CONTROL
);
146 static inline int m2p_channel_state(struct m2p_channel
*ch
)
148 return (readl(ch
->base
+ M2P_STATUS
) >> 4) & 0x3;
151 static irqreturn_t
m2p_irq(int irq
, void *dev_id
)
153 struct m2p_channel
*ch
= dev_id
;
154 struct ep93xx_dma_m2p_client
*cl
;
160 spin_lock(&ch
->lock
);
161 irq_status
= readl(ch
->base
+ M2P_INTERRUPT
);
163 if (irq_status
& M2P_INTERRUPT_ERROR
) {
164 writel(M2P_INTERRUPT_ERROR
, ch
->base
+ M2P_INTERRUPT
);
168 if ((irq_status
& (M2P_INTERRUPT_STALL
| M2P_INTERRUPT_NFB
)) == 0) {
169 spin_unlock(&ch
->lock
);
173 switch (m2p_channel_state(ch
)) {
175 pr_crit("m2p_irq: dma interrupt without a dma buffer\n");
180 cl
->buffer_finished(cl
->cookie
, ch
->buffer_xfer
, 0, error
);
181 if (ch
->buffer_next
!= NULL
) {
182 cl
->buffer_finished(cl
->cookie
, ch
->buffer_next
,
185 choose_buffer_xfer(ch
);
186 choose_buffer_next(ch
);
187 if (ch
->buffer_xfer
!= NULL
)
188 cl
->buffer_started(cl
->cookie
, ch
->buffer_xfer
);
192 cl
->buffer_finished(cl
->cookie
, ch
->buffer_xfer
, 0, error
);
193 ch
->buffer_xfer
= ch
->buffer_next
;
194 choose_buffer_next(ch
);
195 cl
->buffer_started(cl
->cookie
, ch
->buffer_xfer
);
199 pr_crit("m2p_irq: dma interrupt while next\n");
204 v
= readl(ch
->base
+ M2P_CONTROL
) & ~(M2P_CONTROL_STALL_IRQ_EN
|
205 M2P_CONTROL_NFB_IRQ_EN
);
206 if (ch
->buffer_xfer
!= NULL
)
207 v
|= M2P_CONTROL_STALL_IRQ_EN
;
208 if (ch
->buffer_next
!= NULL
)
209 v
|= M2P_CONTROL_NFB_IRQ_EN
;
210 m2p_set_control(ch
, v
);
212 spin_unlock(&ch
->lock
);
216 static struct m2p_channel
*find_free_channel(struct ep93xx_dma_m2p_client
*cl
)
218 struct m2p_channel
*ch
;
221 if (cl
->flags
& EP93XX_DMA_M2P_RX
)
226 for (i
= 0; ch
[i
].base
; i
++) {
227 struct ep93xx_dma_m2p_client
*client
;
229 client
= ch
[i
].client
;
230 if (client
!= NULL
) {
233 port
= cl
->flags
& EP93XX_DMA_M2P_PORT_MASK
;
234 if (port
== (client
->flags
&
235 EP93XX_DMA_M2P_PORT_MASK
)) {
236 pr_warning("DMA channel already used by %s\n",
237 cl
->name
? : "unknown client");
238 return ERR_PTR(-EBUSY
);
243 for (i
= 0; ch
[i
].base
; i
++) {
244 if (ch
[i
].client
== NULL
)
248 pr_warning("No free DMA channel for %s\n",
249 cl
->name
? : "unknown client");
250 return ERR_PTR(-ENODEV
);
253 static void channel_enable(struct m2p_channel
*ch
)
255 struct ep93xx_dma_m2p_client
*cl
= ch
->client
;
260 v
= cl
->flags
& EP93XX_DMA_M2P_PORT_MASK
;
261 writel(v
, ch
->base
+ M2P_PPALLOC
);
263 v
= cl
->flags
& EP93XX_DMA_M2P_ERROR_MASK
;
264 v
|= M2P_CONTROL_ENABLE
| M2P_CONTROL_ERROR_IRQ_EN
;
265 m2p_set_control(ch
, v
);
268 static void channel_disable(struct m2p_channel
*ch
)
272 v
= readl(ch
->base
+ M2P_CONTROL
);
273 v
&= ~(M2P_CONTROL_STALL_IRQ_EN
| M2P_CONTROL_NFB_IRQ_EN
);
274 m2p_set_control(ch
, v
);
276 while (m2p_channel_state(ch
) == STATE_ON
)
279 m2p_set_control(ch
, 0x0);
281 while (m2p_channel_state(ch
) == STATE_STALL
)
284 clk_disable(ch
->clk
);
287 int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client
*cl
)
289 struct m2p_channel
*ch
;
292 ch
= find_free_channel(cl
);
296 err
= request_irq(ch
->irq
, m2p_irq
, 0, cl
->name
? : "dma-m2p", ch
);
302 ch
->buffer_xfer
= NULL
;
303 ch
->buffer_next
= NULL
;
304 INIT_LIST_HEAD(&ch
->buffers_pending
);
312 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register
);
314 void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client
*cl
)
316 struct m2p_channel
*ch
= cl
->channel
;
319 free_irq(ch
->irq
, ch
);
322 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister
);
324 void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client
*cl
,
325 struct ep93xx_dma_buffer
*buf
)
327 struct m2p_channel
*ch
= cl
->channel
;
331 spin_lock_irqsave(&ch
->lock
, flags
);
332 v
= readl(ch
->base
+ M2P_CONTROL
);
333 if (ch
->buffer_xfer
== NULL
) {
334 ch
->buffer_xfer
= buf
;
336 cl
->buffer_started(cl
->cookie
, buf
);
338 v
|= M2P_CONTROL_STALL_IRQ_EN
;
339 m2p_set_control(ch
, v
);
341 } else if (ch
->buffer_next
== NULL
) {
342 ch
->buffer_next
= buf
;
345 v
|= M2P_CONTROL_NFB_IRQ_EN
;
346 m2p_set_control(ch
, v
);
348 list_add_tail(&buf
->list
, &ch
->buffers_pending
);
350 spin_unlock_irqrestore(&ch
->lock
, flags
);
352 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit
);
354 void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client
*cl
,
355 struct ep93xx_dma_buffer
*buf
)
357 struct m2p_channel
*ch
= cl
->channel
;
359 list_add_tail(&buf
->list
, &ch
->buffers_pending
);
361 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive
);
363 void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client
*cl
)
365 struct m2p_channel
*ch
= cl
->channel
;
369 ch
->buffer_xfer
= NULL
;
370 ch
->buffer_next
= NULL
;
371 INIT_LIST_HEAD(&ch
->buffers_pending
);
374 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush
);
376 static int init_channel(struct m2p_channel
*ch
)
378 ch
->clk
= clk_get(NULL
, ch
->name
);
380 return PTR_ERR(ch
->clk
);
382 spin_lock_init(&ch
->lock
);
388 static int __init
ep93xx_dma_m2p_init(void)
393 for (i
= 0; m2p_rx
[i
].base
; i
++) {
394 ret
= init_channel(m2p_rx
+ i
);
399 for (i
= 0; m2p_tx
[i
].base
; i
++) {
400 ret
= init_channel(m2p_tx
+ i
);
405 pr_info("M2P DMA subsystem initialized\n");
408 arch_initcall(ep93xx_dma_m2p_init
);