1 /* dma.c: DMA controller management on FR401 and the like
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15 #include <linux/errno.h>
16 #include <linux/init.h>
18 #include <asm/gpio-regs.h>
19 #include <asm/irc-regs.h>
20 #include <asm/cpu-irqs.h>
22 struct frv_dma_channel
{
24 #define FRV_DMA_FLAGS_RESERVED 0x01
25 #define FRV_DMA_FLAGS_INUSE 0x02
26 #define FRV_DMA_FLAGS_PAUSED 0x04
27 uint8_t cap
; /* capabilities available */
28 int irq
; /* completion IRQ */
32 const unsigned long ioaddr
; /* DMA controller regs addr */
34 dma_irq_handler_t handler
;
39 #define __get_DMAC(IO,X) ({ *(volatile unsigned long *)((IO) + DMAC_##X##x); })
41 #define __set_DMAC(IO,X,V) \
43 *(volatile unsigned long *)((IO) + DMAC_##X##x) = (V); \
47 #define ___set_DMAC(IO,X,V) \
49 *(volatile unsigned long *)((IO) + DMAC_##X##x) = (V); \
53 static struct frv_dma_channel frv_dma_channels
[FRV_DMA_NCHANS
] = {
55 .cap
= FRV_DMA_CAP_DREQ
| FRV_DMA_CAP_DACK
| FRV_DMA_CAP_DONE
,
57 .dreqbit
= SIR_DREQ0_INPUT
,
58 .dackbit
= SOR_DACK0_OUTPUT
,
59 .donebit
= SOR_DONE0_OUTPUT
,
63 .cap
= FRV_DMA_CAP_DREQ
| FRV_DMA_CAP_DACK
| FRV_DMA_CAP_DONE
,
65 .dreqbit
= SIR_DREQ1_INPUT
,
66 .dackbit
= SOR_DACK1_OUTPUT
,
67 .donebit
= SOR_DONE1_OUTPUT
,
71 .cap
= FRV_DMA_CAP_DREQ
| FRV_DMA_CAP_DACK
,
73 .dreqbit
= SIR_DREQ2_INPUT
,
74 .dackbit
= SOR_DACK2_OUTPUT
,
78 .cap
= FRV_DMA_CAP_DREQ
| FRV_DMA_CAP_DACK
,
80 .dreqbit
= SIR_DREQ3_INPUT
,
81 .dackbit
= SOR_DACK3_OUTPUT
,
85 .cap
= FRV_DMA_CAP_DREQ
,
87 .dreqbit
= SIR_DREQ4_INPUT
,
91 .cap
= FRV_DMA_CAP_DREQ
,
93 .dreqbit
= SIR_DREQ5_INPUT
,
97 .cap
= FRV_DMA_CAP_DREQ
,
99 .dreqbit
= SIR_DREQ6_INPUT
,
100 .ioaddr
= 0xfe001100,
103 .cap
= FRV_DMA_CAP_DREQ
,
105 .dreqbit
= SIR_DREQ7_INPUT
,
106 .ioaddr
= 0xfe001180,
110 static DEFINE_RWLOCK(frv_dma_channels_lock
);
112 unsigned long frv_dma_inprogress
;
114 #define frv_clear_dma_inprogress(channel) \
115 atomic_clear_mask(1 << (channel), &frv_dma_inprogress);
117 #define frv_set_dma_inprogress(channel) \
118 atomic_set_mask(1 << (channel), &frv_dma_inprogress);
120 /*****************************************************************************/
122 * DMA irq handler - determine channel involved, grab status and call real handler
124 static irqreturn_t
dma_irq_handler(int irq
, void *_channel
, struct pt_regs
*regs
)
126 struct frv_dma_channel
*channel
= _channel
;
128 frv_clear_dma_inprogress(channel
- frv_dma_channels
);
129 return channel
->handler(channel
- frv_dma_channels
,
130 __get_DMAC(channel
->ioaddr
, CSTR
),
134 } /* end dma_irq_handler() */
136 /*****************************************************************************/
138 * Determine which DMA controllers are present on this CPU
140 void __init
frv_dma_init(void)
142 unsigned long psr
= __get_PSR();
145 /* First, determine how many DMA channels are available */
146 switch (PSR_IMPLE(psr
)) {
147 case PSR_IMPLE_FR405
:
148 case PSR_IMPLE_FR451
:
149 case PSR_IMPLE_FR501
:
150 case PSR_IMPLE_FR551
:
151 num_dma
= FRV_DMA_8CHANS
;
154 case PSR_IMPLE_FR401
:
156 num_dma
= FRV_DMA_4CHANS
;
160 /* Now mark all of the non-existent channels as reserved */
161 for(i
= num_dma
; i
< FRV_DMA_NCHANS
; i
++)
162 frv_dma_channels
[i
].flags
= FRV_DMA_FLAGS_RESERVED
;
164 } /* end frv_dma_init() */
166 /*****************************************************************************/
168 * allocate a DMA controller channel and the IRQ associated with it
170 int frv_dma_open(const char *devname
,
171 unsigned long dmamask
,
173 dma_irq_handler_t handler
,
174 unsigned long irq_flags
,
177 struct frv_dma_channel
*channel
;
181 write_lock(&frv_dma_channels_lock
);
185 for (dma
= FRV_DMA_NCHANS
- 1; dma
>= 0; dma
--) {
186 channel
= &frv_dma_channels
[dma
];
188 if (!test_bit(dma
, &dmamask
))
191 if ((channel
->cap
& dmacap
) != dmacap
)
194 if (!frv_dma_channels
[dma
].flags
)
201 ret
= request_irq(channel
->irq
, dma_irq_handler
, irq_flags
, devname
, channel
);
205 /* okay, we've allocated all the resources */
206 channel
= &frv_dma_channels
[dma
];
208 channel
->flags
|= FRV_DMA_FLAGS_INUSE
;
209 channel
->devname
= devname
;
210 channel
->handler
= handler
;
211 channel
->data
= data
;
213 /* Now make sure we are set up for DMA and not GPIO */
214 /* SIR bit must be set for DMA to work */
215 __set_SIR(channel
->dreqbit
| __get_SIR());
216 /* SOR bits depend on what the caller requests */
218 if(dmacap
& FRV_DMA_CAP_DACK
)
219 val
|= channel
->dackbit
;
221 val
&= ~channel
->dackbit
;
222 if(dmacap
& FRV_DMA_CAP_DONE
)
223 val
|= channel
->donebit
;
225 val
&= ~channel
->donebit
;
230 write_unlock(&frv_dma_channels_lock
);
232 } /* end frv_dma_open() */
234 EXPORT_SYMBOL(frv_dma_open
);
236 /*****************************************************************************/
238 * close a DMA channel and its associated interrupt
240 void frv_dma_close(int dma
)
242 struct frv_dma_channel
*channel
= &frv_dma_channels
[dma
];
245 write_lock_irqsave(&frv_dma_channels_lock
, flags
);
247 free_irq(channel
->irq
, channel
);
250 channel
->flags
&= ~FRV_DMA_FLAGS_INUSE
;
252 write_unlock_irqrestore(&frv_dma_channels_lock
, flags
);
253 } /* end frv_dma_close() */
255 EXPORT_SYMBOL(frv_dma_close
);
257 /*****************************************************************************/
259 * set static configuration on a DMA channel
261 void frv_dma_config(int dma
, unsigned long ccfr
, unsigned long cctr
, unsigned long apr
)
263 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
265 ___set_DMAC(ioaddr
, CCFR
, ccfr
);
266 ___set_DMAC(ioaddr
, CCTR
, cctr
);
267 ___set_DMAC(ioaddr
, APR
, apr
);
270 } /* end frv_dma_config() */
272 EXPORT_SYMBOL(frv_dma_config
);
274 /*****************************************************************************/
276 * start a DMA channel
278 void frv_dma_start(int dma
,
279 unsigned long sba
, unsigned long dba
,
280 unsigned long pix
, unsigned long six
, unsigned long bcl
)
282 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
284 ___set_DMAC(ioaddr
, SBA
, sba
);
285 ___set_DMAC(ioaddr
, DBA
, dba
);
286 ___set_DMAC(ioaddr
, PIX
, pix
);
287 ___set_DMAC(ioaddr
, SIX
, six
);
288 ___set_DMAC(ioaddr
, BCL
, bcl
);
289 ___set_DMAC(ioaddr
, CSTR
, 0);
292 __set_DMAC(ioaddr
, CCTR
, __get_DMAC(ioaddr
, CCTR
) | DMAC_CCTRx_ACT
);
293 frv_set_dma_inprogress(dma
);
295 } /* end frv_dma_start() */
297 EXPORT_SYMBOL(frv_dma_start
);
299 /*****************************************************************************/
301 * restart a DMA channel that's been stopped in circular addressing mode by comparison-end
303 void frv_dma_restart_circular(int dma
, unsigned long six
)
305 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
307 ___set_DMAC(ioaddr
, SIX
, six
);
308 ___set_DMAC(ioaddr
, CSTR
, __get_DMAC(ioaddr
, CSTR
) & ~DMAC_CSTRx_CE
);
311 __set_DMAC(ioaddr
, CCTR
, __get_DMAC(ioaddr
, CCTR
) | DMAC_CCTRx_ACT
);
312 frv_set_dma_inprogress(dma
);
314 } /* end frv_dma_restart_circular() */
316 EXPORT_SYMBOL(frv_dma_restart_circular
);
318 /*****************************************************************************/
322 void frv_dma_stop(int dma
)
324 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
327 ___set_DMAC(ioaddr
, CSTR
, 0);
328 cctr
= __get_DMAC(ioaddr
, CCTR
);
329 cctr
&= ~(DMAC_CCTRx_IE
| DMAC_CCTRx_ACT
);
330 cctr
|= DMAC_CCTRx_FC
; /* fifo clear */
331 __set_DMAC(ioaddr
, CCTR
, cctr
);
332 __set_DMAC(ioaddr
, BCL
, 0);
333 frv_clear_dma_inprogress(dma
);
334 } /* end frv_dma_stop() */
336 EXPORT_SYMBOL(frv_dma_stop
);
338 /*****************************************************************************/
340 * test interrupt status of DMA channel
342 int is_frv_dma_interrupting(int dma
)
344 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
346 return __get_DMAC(ioaddr
, CSTR
) & (1 << 23);
348 } /* end is_frv_dma_interrupting() */
350 EXPORT_SYMBOL(is_frv_dma_interrupting
);
352 /*****************************************************************************/
354 * dump data about a DMA channel
356 void frv_dma_dump(int dma
)
358 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
359 unsigned long cstr
, pix
, six
, bcl
;
361 cstr
= __get_DMAC(ioaddr
, CSTR
);
362 pix
= __get_DMAC(ioaddr
, PIX
);
363 six
= __get_DMAC(ioaddr
, SIX
);
364 bcl
= __get_DMAC(ioaddr
, BCL
);
366 printk("DMA[%d] cstr=%lx pix=%lx six=%lx bcl=%lx\n", dma
, cstr
, pix
, six
, bcl
);
368 } /* end frv_dma_dump() */
370 EXPORT_SYMBOL(frv_dma_dump
);
372 /*****************************************************************************/
374 * pause all DMA controllers
375 * - called by clock mangling routines
376 * - caller must be holding interrupts disabled
378 void frv_dma_pause_all(void)
380 struct frv_dma_channel
*channel
;
381 unsigned long ioaddr
;
382 unsigned long cstr
, cctr
;
385 write_lock(&frv_dma_channels_lock
);
387 for (dma
= FRV_DMA_NCHANS
- 1; dma
>= 0; dma
--) {
388 channel
= &frv_dma_channels
[dma
];
390 if (!(channel
->flags
& FRV_DMA_FLAGS_INUSE
))
393 ioaddr
= channel
->ioaddr
;
394 cctr
= __get_DMAC(ioaddr
, CCTR
);
395 if (cctr
& DMAC_CCTRx_ACT
) {
396 cctr
&= ~DMAC_CCTRx_ACT
;
397 __set_DMAC(ioaddr
, CCTR
, cctr
);
400 cstr
= __get_DMAC(ioaddr
, CSTR
);
401 } while (cstr
& DMAC_CSTRx_BUSY
);
403 if (cstr
& DMAC_CSTRx_FED
)
404 channel
->flags
|= FRV_DMA_FLAGS_PAUSED
;
405 frv_clear_dma_inprogress(dma
);
409 } /* end frv_dma_pause_all() */
411 EXPORT_SYMBOL(frv_dma_pause_all
);
413 /*****************************************************************************/
415 * resume paused DMA controllers
416 * - called by clock mangling routines
417 * - caller must be holding interrupts disabled
419 void frv_dma_resume_all(void)
421 struct frv_dma_channel
*channel
;
422 unsigned long ioaddr
;
423 unsigned long cstr
, cctr
;
426 for (dma
= FRV_DMA_NCHANS
- 1; dma
>= 0; dma
--) {
427 channel
= &frv_dma_channels
[dma
];
429 if (!(channel
->flags
& FRV_DMA_FLAGS_PAUSED
))
432 ioaddr
= channel
->ioaddr
;
433 cstr
= __get_DMAC(ioaddr
, CSTR
);
434 cstr
&= ~(DMAC_CSTRx_FED
| DMAC_CSTRx_INT
);
435 __set_DMAC(ioaddr
, CSTR
, cstr
);
437 cctr
= __get_DMAC(ioaddr
, CCTR
);
438 cctr
|= DMAC_CCTRx_ACT
;
439 __set_DMAC(ioaddr
, CCTR
, cctr
);
441 channel
->flags
&= ~FRV_DMA_FLAGS_PAUSED
;
442 frv_set_dma_inprogress(dma
);
445 write_unlock(&frv_dma_channels_lock
);
447 } /* end frv_dma_resume_all() */
449 EXPORT_SYMBOL(frv_dma_resume_all
);
451 /*****************************************************************************/
455 void frv_dma_status_clear(int dma
)
457 unsigned long ioaddr
= frv_dma_channels
[dma
].ioaddr
;
459 ___set_DMAC(ioaddr
, CSTR
, 0);
461 cctr
= __get_DMAC(ioaddr
, CCTR
);
462 } /* end frv_dma_status_clear() */
464 EXPORT_SYMBOL(frv_dma_status_clear
);