2 * arch/arm/mach-ep93xx/dma-m2p.c
3 * M2P DMA handling for Cirrus EP93xx chips.
5 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Copyright (C) 2006 Applied Data Systems
8 * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
17 * On the EP93xx chip the following peripherals my be allocated to the 10
18 * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
20 * I2S contains 3 Tx and 3 Rx DMA Channels
21 * AAC contains 3 Tx and 3 Rx DMA Channels
22 * UART1 contains 1 Tx and 1 Rx DMA Channels
23 * UART2 contains 1 Tx and 1 Rx DMA Channels
24 * UART3 contains 1 Tx and 1 Rx DMA Channels
25 * IrDA contains 1 Tx and 1 Rx DMA Channels
27 * SSP and IDE use the Memory to Memory (M2M) channels and are not covered
28 * with this implementation.
31 #define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
33 #include <linux/kernel.h>
34 #include <linux/clk.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
37 #include <linux/module.h>
41 #include <mach/hardware.h>
43 #define M2P_CONTROL 0x00
44 #define M2P_CONTROL_STALL_IRQ_EN (1 << 0)
45 #define M2P_CONTROL_NFB_IRQ_EN (1 << 1)
46 #define M2P_CONTROL_ERROR_IRQ_EN (1 << 3)
47 #define M2P_CONTROL_ENABLE (1 << 4)
48 #define M2P_INTERRUPT 0x04
49 #define M2P_INTERRUPT_STALL (1 << 0)
50 #define M2P_INTERRUPT_NFB (1 << 1)
51 #define M2P_INTERRUPT_ERROR (1 << 3)
52 #define M2P_PPALLOC 0x08
53 #define M2P_STATUS 0x0c
54 #define M2P_REMAIN 0x14
55 #define M2P_MAXCNT0 0x20
56 #define M2P_BASE0 0x24
57 #define M2P_MAXCNT1 0x30
58 #define M2P_BASE1 0x34
60 #define STATE_IDLE 0 /* Channel is inactive. */
61 #define STATE_STALL 1 /* Channel is active, no buffers pending. */
62 #define STATE_ON 2 /* Channel is active, one buffer pending. */
63 #define STATE_NEXT 3 /* Channel is active, two buffers pending. */
75 struct ep93xx_dma_buffer
*buffer_xfer
;
76 struct ep93xx_dma_buffer
*buffer_next
;
77 struct list_head buffers_pending
;
80 static struct m2p_channel m2p_rx
[] = {
81 {"m2p1", EP93XX_DMA_BASE
+ 0x0040, IRQ_EP93XX_DMAM2P1
},
82 {"m2p3", EP93XX_DMA_BASE
+ 0x00c0, IRQ_EP93XX_DMAM2P3
},
83 {"m2p5", EP93XX_DMA_BASE
+ 0x0200, IRQ_EP93XX_DMAM2P5
},
84 {"m2p7", EP93XX_DMA_BASE
+ 0x0280, IRQ_EP93XX_DMAM2P7
},
85 {"m2p9", EP93XX_DMA_BASE
+ 0x0300, IRQ_EP93XX_DMAM2P9
},
89 static struct m2p_channel m2p_tx
[] = {
90 {"m2p0", EP93XX_DMA_BASE
+ 0x0000, IRQ_EP93XX_DMAM2P0
},
91 {"m2p2", EP93XX_DMA_BASE
+ 0x0080, IRQ_EP93XX_DMAM2P2
},
92 {"m2p4", EP93XX_DMA_BASE
+ 0x0240, IRQ_EP93XX_DMAM2P4
},
93 {"m2p6", EP93XX_DMA_BASE
+ 0x02c0, IRQ_EP93XX_DMAM2P6
},
94 {"m2p8", EP93XX_DMA_BASE
+ 0x0340, IRQ_EP93XX_DMAM2P8
},
98 static void feed_buf(struct m2p_channel
*ch
, struct ep93xx_dma_buffer
*buf
)
100 if (ch
->next_slot
== 0) {
101 writel(buf
->size
, ch
->base
+ M2P_MAXCNT0
);
102 writel(buf
->bus_addr
, ch
->base
+ M2P_BASE0
);
104 writel(buf
->size
, ch
->base
+ M2P_MAXCNT1
);
105 writel(buf
->bus_addr
, ch
->base
+ M2P_BASE1
);
110 static void choose_buffer_xfer(struct m2p_channel
*ch
)
112 struct ep93xx_dma_buffer
*buf
;
114 ch
->buffer_xfer
= NULL
;
115 if (!list_empty(&ch
->buffers_pending
)) {
116 buf
= list_entry(ch
->buffers_pending
.next
,
117 struct ep93xx_dma_buffer
, list
);
118 list_del(&buf
->list
);
120 ch
->buffer_xfer
= buf
;
124 static void choose_buffer_next(struct m2p_channel
*ch
)
126 struct ep93xx_dma_buffer
*buf
;
128 ch
->buffer_next
= NULL
;
129 if (!list_empty(&ch
->buffers_pending
)) {
130 buf
= list_entry(ch
->buffers_pending
.next
,
131 struct ep93xx_dma_buffer
, list
);
132 list_del(&buf
->list
);
134 ch
->buffer_next
= buf
;
138 static inline void m2p_set_control(struct m2p_channel
*ch
, u32 v
)
141 * The control register must be read immediately after being written so
142 * that the internal state machine is correctly updated. See the ep93xx
143 * users' guide for details.
145 writel(v
, ch
->base
+ M2P_CONTROL
);
146 readl(ch
->base
+ M2P_CONTROL
);
149 static inline int m2p_channel_state(struct m2p_channel
*ch
)
151 return (readl(ch
->base
+ M2P_STATUS
) >> 4) & 0x3;
154 static irqreturn_t
m2p_irq(int irq
, void *dev_id
)
156 struct m2p_channel
*ch
= dev_id
;
157 struct ep93xx_dma_m2p_client
*cl
;
163 spin_lock(&ch
->lock
);
164 irq_status
= readl(ch
->base
+ M2P_INTERRUPT
);
166 if (irq_status
& M2P_INTERRUPT_ERROR
) {
167 writel(M2P_INTERRUPT_ERROR
, ch
->base
+ M2P_INTERRUPT
);
171 if ((irq_status
& (M2P_INTERRUPT_STALL
| M2P_INTERRUPT_NFB
)) == 0) {
172 spin_unlock(&ch
->lock
);
176 switch (m2p_channel_state(ch
)) {
178 pr_crit("dma interrupt without a dma buffer\n");
183 cl
->buffer_finished(cl
->cookie
, ch
->buffer_xfer
, 0, error
);
184 if (ch
->buffer_next
!= NULL
) {
185 cl
->buffer_finished(cl
->cookie
, ch
->buffer_next
,
188 choose_buffer_xfer(ch
);
189 choose_buffer_next(ch
);
190 if (ch
->buffer_xfer
!= NULL
)
191 cl
->buffer_started(cl
->cookie
, ch
->buffer_xfer
);
195 cl
->buffer_finished(cl
->cookie
, ch
->buffer_xfer
, 0, error
);
196 ch
->buffer_xfer
= ch
->buffer_next
;
197 choose_buffer_next(ch
);
198 cl
->buffer_started(cl
->cookie
, ch
->buffer_xfer
);
202 pr_crit("dma interrupt while next\n");
207 v
= readl(ch
->base
+ M2P_CONTROL
) & ~(M2P_CONTROL_STALL_IRQ_EN
|
208 M2P_CONTROL_NFB_IRQ_EN
);
209 if (ch
->buffer_xfer
!= NULL
)
210 v
|= M2P_CONTROL_STALL_IRQ_EN
;
211 if (ch
->buffer_next
!= NULL
)
212 v
|= M2P_CONTROL_NFB_IRQ_EN
;
213 m2p_set_control(ch
, v
);
215 spin_unlock(&ch
->lock
);
219 static struct m2p_channel
*find_free_channel(struct ep93xx_dma_m2p_client
*cl
)
221 struct m2p_channel
*ch
;
224 if (cl
->flags
& EP93XX_DMA_M2P_RX
)
229 for (i
= 0; ch
[i
].base
; i
++) {
230 struct ep93xx_dma_m2p_client
*client
;
232 client
= ch
[i
].client
;
233 if (client
!= NULL
) {
236 port
= cl
->flags
& EP93XX_DMA_M2P_PORT_MASK
;
237 if (port
== (client
->flags
&
238 EP93XX_DMA_M2P_PORT_MASK
)) {
239 pr_warning("DMA channel already used by %s\n",
240 cl
->name
? : "unknown client");
241 return ERR_PTR(-EBUSY
);
246 for (i
= 0; ch
[i
].base
; i
++) {
247 if (ch
[i
].client
== NULL
)
251 pr_warning("No free DMA channel for %s\n",
252 cl
->name
? : "unknown client");
253 return ERR_PTR(-ENODEV
);
256 static void channel_enable(struct m2p_channel
*ch
)
258 struct ep93xx_dma_m2p_client
*cl
= ch
->client
;
263 v
= cl
->flags
& EP93XX_DMA_M2P_PORT_MASK
;
264 writel(v
, ch
->base
+ M2P_PPALLOC
);
266 v
= cl
->flags
& EP93XX_DMA_M2P_ERROR_MASK
;
267 v
|= M2P_CONTROL_ENABLE
| M2P_CONTROL_ERROR_IRQ_EN
;
268 m2p_set_control(ch
, v
);
271 static void channel_disable(struct m2p_channel
*ch
)
275 v
= readl(ch
->base
+ M2P_CONTROL
);
276 v
&= ~(M2P_CONTROL_STALL_IRQ_EN
| M2P_CONTROL_NFB_IRQ_EN
);
277 m2p_set_control(ch
, v
);
279 while (m2p_channel_state(ch
) >= STATE_ON
)
282 m2p_set_control(ch
, 0x0);
284 while (m2p_channel_state(ch
) == STATE_STALL
)
287 clk_disable(ch
->clk
);
290 int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client
*cl
)
292 struct m2p_channel
*ch
;
295 ch
= find_free_channel(cl
);
299 err
= request_irq(ch
->irq
, m2p_irq
, 0, cl
->name
? : "dma-m2p", ch
);
305 ch
->buffer_xfer
= NULL
;
306 ch
->buffer_next
= NULL
;
307 INIT_LIST_HEAD(&ch
->buffers_pending
);
315 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register
);
317 void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client
*cl
)
319 struct m2p_channel
*ch
= cl
->channel
;
322 free_irq(ch
->irq
, ch
);
325 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister
);
327 void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client
*cl
,
328 struct ep93xx_dma_buffer
*buf
)
330 struct m2p_channel
*ch
= cl
->channel
;
334 spin_lock_irqsave(&ch
->lock
, flags
);
335 v
= readl(ch
->base
+ M2P_CONTROL
);
336 if (ch
->buffer_xfer
== NULL
) {
337 ch
->buffer_xfer
= buf
;
339 cl
->buffer_started(cl
->cookie
, buf
);
341 v
|= M2P_CONTROL_STALL_IRQ_EN
;
342 m2p_set_control(ch
, v
);
344 } else if (ch
->buffer_next
== NULL
) {
345 ch
->buffer_next
= buf
;
348 v
|= M2P_CONTROL_NFB_IRQ_EN
;
349 m2p_set_control(ch
, v
);
351 list_add_tail(&buf
->list
, &ch
->buffers_pending
);
353 spin_unlock_irqrestore(&ch
->lock
, flags
);
355 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit
);
357 void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client
*cl
,
358 struct ep93xx_dma_buffer
*buf
)
360 struct m2p_channel
*ch
= cl
->channel
;
362 list_add_tail(&buf
->list
, &ch
->buffers_pending
);
364 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive
);
366 void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client
*cl
)
368 struct m2p_channel
*ch
= cl
->channel
;
372 ch
->buffer_xfer
= NULL
;
373 ch
->buffer_next
= NULL
;
374 INIT_LIST_HEAD(&ch
->buffers_pending
);
377 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush
);
379 static int init_channel(struct m2p_channel
*ch
)
381 ch
->clk
= clk_get(NULL
, ch
->name
);
383 return PTR_ERR(ch
->clk
);
385 spin_lock_init(&ch
->lock
);
391 static int __init
ep93xx_dma_m2p_init(void)
396 for (i
= 0; m2p_rx
[i
].base
; i
++) {
397 ret
= init_channel(m2p_rx
+ i
);
402 for (i
= 0; m2p_tx
[i
].base
; i
++) {
403 ret
= init_channel(m2p_tx
+ i
);
408 pr_info("M2P DMA subsystem initialized\n");
411 arch_initcall(ep93xx_dma_m2p_init
);