1 /* dma.c: DMA controller management on FR401 and the like
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15 #include <linux/errno.h>
16 #include <linux/init.h>
18 #include <asm/gpio-regs.h>
19 #include <asm/irc-regs.h>
20 #include <asm/cpu-irqs.h>
22 struct frv_dma_channel
{
24 #define FRV_DMA_FLAGS_RESERVED 0x01
25 #define FRV_DMA_FLAGS_INUSE 0x02
26 #define FRV_DMA_FLAGS_PAUSED 0x04
27 uint8_t cap
; /* capabilities available */
28 int irq
; /* completion IRQ */
32 const unsigned long ioaddr
; /* DMA controller regs addr */
34 dma_irq_handler_t handler
;
39 #define __get_DMAC(IO,X) ({ *(volatile unsigned long *)((IO) + DMAC_##X##x); })
41 #define __set_DMAC(IO,X,V) \
43 *(volatile unsigned long *)((IO) + DMAC_##X##x) = (V); \
47 #define ___set_DMAC(IO,X,V) \
49 *(volatile unsigned long *)((IO) + DMAC_##X##x) = (V); \
53 static struct frv_dma_channel frv_dma_channels
[FRV_DMA_NCHANS
] = {
55 .cap
= FRV_DMA_CAP_DREQ
| FRV_DMA_CAP_DACK
| FRV_DMA_CAP_DONE
,
57 .dreqbit
= SIR_DREQ0_INPUT
,
58 .dackbit
= SOR_DACK0_OUTPUT
,
59 .donebit
= SOR_DONE0_OUTPUT
,
63 .cap
= FRV_DMA_CAP_DREQ
| FRV_DMA_CAP_DACK
| FRV_DMA_CAP_DONE
,
65 .dreqbit
= SIR_DREQ1_INPUT
,
66 .dackbit
= SOR_DACK1_OUTPUT
,
67 .donebit
= SOR_DONE1_OUTPUT
,
71 .cap
= FRV_DMA_CAP_DREQ
| FRV_DMA_CAP_DACK
,
73 .dreqbit
= SIR_DREQ2_INPUT
,
74 .dackbit
= SOR_DACK2_OUTPUT
,
78 .cap
= FRV_DMA_CAP_DREQ
| FRV_DMA_CAP_DACK
,
80 .dreqbit
= SIR_DREQ3_INPUT
,
81 .dackbit
= SOR_DACK3_OUTPUT
,
85 .cap
= FRV_DMA_CAP_DREQ
,
87 .dreqbit
= SIR_DREQ4_INPUT
,
91 .cap
= FRV_DMA_CAP_DREQ
,
93 .dreqbit
= SIR_DREQ5_INPUT
,
97 .cap
= FRV_DMA_CAP_DREQ
,
99 .dreqbit
= SIR_DREQ6_INPUT
,
100 .ioaddr
= 0xfe001100,
103 .cap
= FRV_DMA_CAP_DREQ
,
105 .dreqbit
= SIR_DREQ7_INPUT
,
106 .ioaddr
= 0xfe001180,
110 static DEFINE_RWLOCK(frv_dma_channels_lock
);
112 unsigned int frv_dma_inprogress
;
114 #define frv_clear_dma_inprogress(channel) \
115 (void)__atomic32_fetch_and(~(1 << (channel)), &frv_dma_inprogress);
117 #define frv_set_dma_inprogress(channel) \
118 (void)__atomic32_fetch_or(1 << (channel), &frv_dma_inprogress);
120 /*****************************************************************************/
122 * DMA irq handler - determine channel involved, grab status and call real handler
124 static irqreturn_t
dma_irq_handler(int irq
, void *_channel
)
126 struct frv_dma_channel
*channel
= _channel
;
128 frv_clear_dma_inprogress(channel
- frv_dma_channels
);
129 return channel
->handler(channel
- frv_dma_channels
,
130 __get_DMAC(channel
->ioaddr
, CSTR
),
133 } /* end dma_irq_handler() */
135 /*****************************************************************************/
137 * Determine which DMA controllers are present on this CPU
139 void __init
frv_dma_init(void)
141 unsigned long psr
= __get_PSR();
144 /* First, determine how many DMA channels are available */
145 switch (PSR_IMPLE(psr
)) {
146 case PSR_IMPLE_FR405
:
147 case PSR_IMPLE_FR451
:
148 case PSR_IMPLE_FR501
:
149 case PSR_IMPLE_FR551
:
150 num_dma
= FRV_DMA_8CHANS
;
153 case PSR_IMPLE_FR401
:
155 num_dma
= FRV_DMA_4CHANS
;
159 /* Now mark all of the non-existent channels as reserved */
160 for(i
= num_dma
; i
< FRV_DMA_NCHANS
; i
++)
161 frv_dma_channels
[i
].flags
= FRV_DMA_FLAGS_RESERVED
;
163 } /* end frv_dma_init() */
165 /*****************************************************************************/
167 * allocate a DMA controller channel and the IRQ associated with it
169 int frv_dma_open(const char *devname
,
170 unsigned long dmamask
,
172 dma_irq_handler_t handler
,
173 unsigned long irq_flags
,
176 struct frv_dma_channel
*channel
;
180 write_lock(&frv_dma_channels_lock
);
184 for (dma
= FRV_DMA_NCHANS
- 1; dma
>= 0; dma
--) {
185 channel
= &frv_dma_channels
[dma
];
187 if (!test_bit(dma
, &dmamask
))
190 if ((channel
->cap
& dmacap
) != dmacap
)
193 if (!frv_dma_channels
[dma
].flags
)
200 ret
= request_irq(channel
->irq
, dma_irq_handler
, irq_flags
, devname
, channel
);
204 /* okay, we've allocated all the resources */
205 channel
= &frv_dma_channels
[dma
];
207 channel
->flags
|= FRV_DMA_FLAGS_INUSE
;
208 channel
->devname
= devname
;
209 channel
->handler
= handler
;
210 channel
->data
= data
;
212 /* Now make sure we are set up for DMA and not GPIO */
213 /* SIR bit must be set for DMA to work */
214 __set_SIR(channel
->dreqbit
| __get_SIR());
215 /* SOR bits depend on what the caller requests */
217 if(dmacap
& FRV_DMA_CAP_DACK
)
218 val
|= channel
->dackbit
;
220 val
&= ~channel
->dackbit
;
221 if(dmacap
& FRV_DMA_CAP_DONE
)
222 val
|= channel
->donebit
;
224 val
&= ~channel
->donebit
;
229 write_unlock(&frv_dma_channels_lock
);
231 } /* end frv_dma_open() */
233 EXPORT_SYMBOL(frv_dma_open
);
235 /*****************************************************************************/
237 * close a DMA channel and its associated interrupt
239 void frv_dma_close(int dma
)
241 struct frv_dma_channel
*channel
= &frv_dma_channels
[dma
];
244 write_lock_irqsave(&frv_dma_channels_lock
, flags
);
246 free_irq(channel
->irq
, channel
);
249 channel
->flags
&= ~FRV_DMA_FLAGS_INUSE
;
251 write_unlock_irqrestore(&frv_dma_channels_lock
, flags
);
252 } /* end frv_dma_close() */
254 EXPORT_SYMBOL(frv_dma_close
);
256 /*****************************************************************************/
258 * set static configuration on a DMA channel
260 void frv_dma_config(int dma
, unsigned long ccfr
, unsigned long cctr
, unsigned long apr
)
262 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
264 ___set_DMAC(ioaddr
, CCFR
, ccfr
);
265 ___set_DMAC(ioaddr
, CCTR
, cctr
);
266 ___set_DMAC(ioaddr
, APR
, apr
);
269 } /* end frv_dma_config() */
271 EXPORT_SYMBOL(frv_dma_config
);
273 /*****************************************************************************/
275 * start a DMA channel
277 void frv_dma_start(int dma
,
278 unsigned long sba
, unsigned long dba
,
279 unsigned long pix
, unsigned long six
, unsigned long bcl
)
281 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
283 ___set_DMAC(ioaddr
, SBA
, sba
);
284 ___set_DMAC(ioaddr
, DBA
, dba
);
285 ___set_DMAC(ioaddr
, PIX
, pix
);
286 ___set_DMAC(ioaddr
, SIX
, six
);
287 ___set_DMAC(ioaddr
, BCL
, bcl
);
288 ___set_DMAC(ioaddr
, CSTR
, 0);
291 __set_DMAC(ioaddr
, CCTR
, __get_DMAC(ioaddr
, CCTR
) | DMAC_CCTRx_ACT
);
292 frv_set_dma_inprogress(dma
);
294 } /* end frv_dma_start() */
296 EXPORT_SYMBOL(frv_dma_start
);
298 /*****************************************************************************/
300 * restart a DMA channel that's been stopped in circular addressing mode by comparison-end
302 void frv_dma_restart_circular(int dma
, unsigned long six
)
304 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
306 ___set_DMAC(ioaddr
, SIX
, six
);
307 ___set_DMAC(ioaddr
, CSTR
, __get_DMAC(ioaddr
, CSTR
) & ~DMAC_CSTRx_CE
);
310 __set_DMAC(ioaddr
, CCTR
, __get_DMAC(ioaddr
, CCTR
) | DMAC_CCTRx_ACT
);
311 frv_set_dma_inprogress(dma
);
313 } /* end frv_dma_restart_circular() */
315 EXPORT_SYMBOL(frv_dma_restart_circular
);
317 /*****************************************************************************/
321 void frv_dma_stop(int dma
)
323 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
326 ___set_DMAC(ioaddr
, CSTR
, 0);
327 cctr
= __get_DMAC(ioaddr
, CCTR
);
328 cctr
&= ~(DMAC_CCTRx_IE
| DMAC_CCTRx_ACT
);
329 cctr
|= DMAC_CCTRx_FC
; /* fifo clear */
330 __set_DMAC(ioaddr
, CCTR
, cctr
);
331 __set_DMAC(ioaddr
, BCL
, 0);
332 frv_clear_dma_inprogress(dma
);
333 } /* end frv_dma_stop() */
335 EXPORT_SYMBOL(frv_dma_stop
);
337 /*****************************************************************************/
339 * test interrupt status of DMA channel
341 int is_frv_dma_interrupting(int dma
)
343 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
345 return __get_DMAC(ioaddr
, CSTR
) & (1 << 23);
347 } /* end is_frv_dma_interrupting() */
349 EXPORT_SYMBOL(is_frv_dma_interrupting
);
351 /*****************************************************************************/
353 * dump data about a DMA channel
355 void frv_dma_dump(int dma
)
357 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
358 unsigned long cstr
, pix
, six
, bcl
;
360 cstr
= __get_DMAC(ioaddr
, CSTR
);
361 pix
= __get_DMAC(ioaddr
, PIX
);
362 six
= __get_DMAC(ioaddr
, SIX
);
363 bcl
= __get_DMAC(ioaddr
, BCL
);
365 printk("DMA[%d] cstr=%lx pix=%lx six=%lx bcl=%lx\n", dma
, cstr
, pix
, six
, bcl
);
367 } /* end frv_dma_dump() */
369 EXPORT_SYMBOL(frv_dma_dump
);
371 /*****************************************************************************/
373 * pause all DMA controllers
374 * - called by clock mangling routines
375 * - caller must be holding interrupts disabled
377 void frv_dma_pause_all(void)
379 struct frv_dma_channel
*channel
;
380 unsigned long ioaddr
;
381 unsigned long cstr
, cctr
;
384 write_lock(&frv_dma_channels_lock
);
386 for (dma
= FRV_DMA_NCHANS
- 1; dma
>= 0; dma
--) {
387 channel
= &frv_dma_channels
[dma
];
389 if (!(channel
->flags
& FRV_DMA_FLAGS_INUSE
))
392 ioaddr
= channel
->ioaddr
;
393 cctr
= __get_DMAC(ioaddr
, CCTR
);
394 if (cctr
& DMAC_CCTRx_ACT
) {
395 cctr
&= ~DMAC_CCTRx_ACT
;
396 __set_DMAC(ioaddr
, CCTR
, cctr
);
399 cstr
= __get_DMAC(ioaddr
, CSTR
);
400 } while (cstr
& DMAC_CSTRx_BUSY
);
402 if (cstr
& DMAC_CSTRx_FED
)
403 channel
->flags
|= FRV_DMA_FLAGS_PAUSED
;
404 frv_clear_dma_inprogress(dma
);
408 } /* end frv_dma_pause_all() */
410 EXPORT_SYMBOL(frv_dma_pause_all
);
412 /*****************************************************************************/
414 * resume paused DMA controllers
415 * - called by clock mangling routines
416 * - caller must be holding interrupts disabled
418 void frv_dma_resume_all(void)
420 struct frv_dma_channel
*channel
;
421 unsigned long ioaddr
;
422 unsigned long cstr
, cctr
;
425 for (dma
= FRV_DMA_NCHANS
- 1; dma
>= 0; dma
--) {
426 channel
= &frv_dma_channels
[dma
];
428 if (!(channel
->flags
& FRV_DMA_FLAGS_PAUSED
))
431 ioaddr
= channel
->ioaddr
;
432 cstr
= __get_DMAC(ioaddr
, CSTR
);
433 cstr
&= ~(DMAC_CSTRx_FED
| DMAC_CSTRx_INT
);
434 __set_DMAC(ioaddr
, CSTR
, cstr
);
436 cctr
= __get_DMAC(ioaddr
, CCTR
);
437 cctr
|= DMAC_CCTRx_ACT
;
438 __set_DMAC(ioaddr
, CCTR
, cctr
);
440 channel
->flags
&= ~FRV_DMA_FLAGS_PAUSED
;
441 frv_set_dma_inprogress(dma
);
444 write_unlock(&frv_dma_channels_lock
);
446 } /* end frv_dma_resume_all() */
448 EXPORT_SYMBOL(frv_dma_resume_all
);
450 /*****************************************************************************/
454 void frv_dma_status_clear(int dma
)
456 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
458 ___set_DMAC(ioaddr
, CSTR
, 0);
460 cctr
= __get_DMAC(ioaddr
, CCTR
);
461 } /* end frv_dma_status_clear() */
463 EXPORT_SYMBOL(frv_dma_status_clear
);