No empty .Rs/.Re
[netbsd-mini2440.git] / sys / arch / arm / at91 / at91aic.c
blob82f6be9c376775df22aab4b44ed7abcdf5fdbea7
1 /* $Id: at91aic.c,v 1.3 2009/10/23 06:53:12 snj Exp $ */
2 /* $NetBSD: at91aic.c,v 1.2 2008/07/03 01:15:38 matt Exp $ */
4 /*
5 * Copyright (c) 2007 Embedtronics Oy.
6 * All rights reserved.
8 * Based on ep93xx_intr.c
9 * Copyright (c) 2002 The NetBSD Foundation, Inc.
10 * All rights reserved.
12 * This code is derived from software contributed to The NetBSD Foundation
13 * by Jesse Off
15 * This code is derived from software contributed to The NetBSD Foundation
16 * by Ichiro FUKUHARA and Naoto Shimazaki.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
42 * Interrupt support for the Atmel's AT91xx9xxx family controllers
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/termios.h>
50 #include <uvm/uvm_extern.h>
52 #include <machine/bus.h>
53 #include <machine/intr.h>
55 #include <arm/cpufunc.h>
57 #include <arm/at91/at91reg.h>
58 #include <arm/at91/at91var.h>
59 #include <arm/at91/at91aicreg.h>
60 #include <arm/at91/at91aicvar.h>
62 #define NIRQ 32
64 /* Interrupt handler queues. */
65 struct intrq intrq[NIRQ];
67 /* Interrupts to mask at each level. */
68 static u_int32_t aic_imask[NIPL];
70 /* Software copy of the IRQs we have enabled. */
71 volatile u_int32_t aic_intr_enabled;
73 #define AICREG(reg) *((volatile u_int32_t*) (AT91AIC_BASE + (reg)))
75 static int at91aic_match(device_t, cfdata_t, void *);
76 static void at91aic_attach(device_t, device_t, void *);
78 CFATTACH_DECL(at91aic, sizeof(struct device),
79 at91aic_match, at91aic_attach, NULL, NULL);
81 static int
82 at91aic_match(device_t parent, cfdata_t match, void *aux)
84 if (strcmp(match->cf_name, "at91aic") == 0)
85 return 2;
86 return 0;
89 static void
90 at91aic_attach(device_t parent, device_t self, void *aux)
92 (void)parent; (void)self; (void)aux;
93 printf("\n");
96 static inline void
97 at91_set_intrmask(u_int32_t aic_irqs)
99 AICREG(AIC_IDCR) = aic_irqs;
100 AICREG(AIC_IECR) = aic_intr_enabled & ~aic_irqs;
103 static inline void
104 at91_enable_irq(int irq)
106 aic_intr_enabled |= (1U << irq);
107 AICREG(AIC_IECR) = (1U << irq);
110 static inline void
111 at91_disable_irq(int irq)
113 aic_intr_enabled &= ~(1U << irq);
114 AICREG(AIC_IDCR) = (1U << irq);
118 * NOTE: This routine must be called with interrupts disabled in the CPSR.
120 static void
121 at91aic_calculate_masks(void)
123 struct intrq *iq;
124 struct intrhand *ih;
125 int irq, ipl;
127 /* First, figure out which IPLs each IRQ has. */
128 for (irq = 0; irq < NIRQ; irq++) {
129 int levels = 0;
130 iq = &intrq[irq];
131 at91_disable_irq(irq);
132 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
133 ih = TAILQ_NEXT(ih, ih_list))
134 levels |= (1U << ih->ih_ipl);
135 iq->iq_levels = levels;
138 /* Next, figure out which IRQs are used by each IPL. */
139 for (ipl = 0; ipl < NIPL; ipl++) {
140 int aic_irqs = 0;
141 for (irq = 0; irq < AIC_NIRQ; irq++) {
142 if (intrq[irq].iq_levels & (1U << ipl))
143 aic_irqs |= (1U << irq);
145 aic_imask[ipl] = aic_irqs;
148 aic_imask[IPL_NONE] = 0;
151 * splvm() blocks all interrupts that use the kernel memory
152 * allocation facilities.
154 aic_imask[IPL_VM] |= aic_imask[IPL_NONE];
157 * splclock() must block anything that uses the scheduler.
159 aic_imask[IPL_CLOCK] |= aic_imask[IPL_VM];
162 * splhigh() must block "everything".
164 aic_imask[IPL_HIGH] |= aic_imask[IPL_CLOCK];
167 * Now compute which IRQs must be blocked when servicing any
168 * given IRQ.
170 for (irq = 0; irq < MIN(NIRQ, AIC_NIRQ); irq++) {
171 iq = &intrq[irq];
172 if (TAILQ_FIRST(&iq->iq_list) != NULL)
173 at91_enable_irq(irq);
176 * update current mask
178 at91_set_intrmask(aic_imask[curcpl()]);
181 inline void
182 splx(int new)
184 int old;
185 u_int oldirqstate;
187 oldirqstate = disable_interrupts(I32_bit);
188 old = curcpl();
189 if (old != new) {
190 set_curcpl(new);
191 at91_set_intrmask(aic_imask[new]);
193 restore_interrupts(oldirqstate);
194 #ifdef __HAVE_FAST_SOFTINTS
195 cpu_dosoftints();
196 #endif
200 _splraise(int ipl)
202 int old;
203 u_int oldirqstate;
205 oldirqstate = disable_interrupts(I32_bit);
206 old = curcpl();
207 if (old != ipl) {
208 set_curcpl(ipl);
209 at91_set_intrmask(aic_imask[ipl]);
211 restore_interrupts(oldirqstate);
213 return (old);
217 _spllower(int ipl)
219 int old = curcpl();
221 if (old <= ipl)
222 return (old);
223 splx(ipl);
224 #ifdef __HAVE_FAST_SOFTINTS
225 cpu_dosoftints();
226 #endif
227 return (old);
231 * at91aic_init:
233 * Initialize the rest of the interrupt subsystem, making it
234 * ready to handle interrupts from devices.
236 void
237 at91aic_init(void)
239 struct intrq *iq;
240 int i;
242 aic_intr_enabled = 0;
244 // disable intrrupts:
245 AICREG(AIC_IDCR) = -1;
247 for (i = 0; i < NIRQ; i++) {
248 iq = &intrq[i];
249 TAILQ_INIT(&iq->iq_list);
251 sprintf(iq->iq_name, "irq %d", i);
252 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
253 NULL, "aic", iq->iq_name);
256 /* All interrupts should use IRQ not FIQ */
258 AICREG(AIC_IDCR) = -1; /* disable interrupts */
259 AICREG(AIC_ICCR) = -1; /* clear all interrupts */
260 AICREG(AIC_DCR) = 0; /* not in debug mode, just to make sure */
261 for (i = 0; i < NIRQ; i++) {
262 AICREG(AIC_SMR(i)) = 0; /* disable interrupt */
263 AICREG(AIC_SVR(i)) = (u_int32_t)&intrq[i]; // address of interrupt queue
265 AICREG(AIC_FVR) = 0; // fast interrupt...
266 AICREG(AIC_SPU) = 0; // spurious interrupt vector
268 AICREG(AIC_EOICR) = 0; /* clear logic... */
269 AICREG(AIC_EOICR) = 0; /* clear logic... */
271 at91aic_calculate_masks();
273 /* Enable IRQs (don't yet use FIQs). */
274 enable_interrupts(I32_bit);
277 void *
278 at91aic_intr_establish(int irq, int ipl, int type, int (*ih_func)(void *), void *arg)
280 struct intrq* iq;
281 struct intrhand* ih;
282 u_int oldirqstate;
283 unsigned ok;
284 uint32_t smr;
286 if (irq < 0 || irq >= NIRQ)
287 panic("intr_establish: IRQ %d out of range", irq);
288 if (ipl < 0 || ipl >= NIPL)
289 panic("intr_establish: IPL %d out of range", ipl);
291 smr = 1; // all interrupts have priority one.. ok?
292 switch (type) {
293 case _INTR_LOW_LEVEL:
294 smr |= AIC_SMR_SRCTYPE_LVL_LO;
295 break;
296 case INTR_HIGH_LEVEL:
297 smr |= AIC_SMR_SRCTYPE_LVL_HI;
298 break;
299 case INTR_FALLING_EDGE:
300 smr |= AIC_SMR_SRCTYPE_FALLING;
301 break;
302 case INTR_RISING_EDGE:
303 smr |= AIC_SMR_SRCTYPE_RISING;
304 break;
305 default:
306 panic("intr_establish: interrupt type %d is invalid", type);
309 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
310 if (ih == NULL)
311 return (NULL);
313 ih->ih_func = ih_func;
314 ih->ih_arg = arg;
315 ih->ih_irq = irq;
316 ih->ih_ipl = ipl;
318 iq = &intrq[irq];
320 oldirqstate = disable_interrupts(I32_bit);
321 if (TAILQ_FIRST(&iq->iq_list) == NULL || (iq->iq_type & ~type) == 0) {
322 AICREG(AIC_SMR(irq)) = smr;
323 iq->iq_type = type;
324 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
325 at91aic_calculate_masks();
326 ok = 1;
327 } else
328 ok = 0;
329 restore_interrupts(oldirqstate);
331 if (ok) {
332 #ifdef AT91AIC_DEBUG
333 int i;
334 printf("\n");
335 for (i = 0; i < NIPL; i++) {
336 printf("IPL%d: aic_imask=0x%08X\n", i, aic_imask[i]);
338 #endif
339 } else {
340 free(ih, M_DEVBUF);
341 ih = NULL;
344 return (ih);
347 void
348 at91aic_intr_disestablish(void *cookie)
350 struct intrhand* ih = cookie;
351 struct intrq* iq = &intrq[ih->ih_irq];
352 u_int oldirqstate;
354 oldirqstate = disable_interrupts(I32_bit);
355 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
356 at91aic_calculate_masks();
357 restore_interrupts(oldirqstate);
360 #include <arm/at91/at91reg.h>
361 #include <arm/at91/at91dbgureg.h>
362 #include <arm/at91/at91pdcreg.h>
364 static inline void intr_process(struct intrq *iq, int pcpl, struct irqframe *frame);
366 static inline void
367 intr_process(struct intrq *iq, int pcpl, struct irqframe *frame)
369 struct intrhand* ih;
370 u_int oldirqstate, intr;
372 intr = iq - intrq;
374 iq->iq_ev.ev_count++;
375 uvmexp.intrs++;
377 if ((1U << intr) & aic_imask[pcpl]) {
378 panic("interrupt %d should be masked! (aic_imask=0x%X)", intr, aic_imask[pcpl]);
381 if (iq->iq_busy) {
382 panic("interrupt %d busy!", intr);
385 iq->iq_busy = 1;
387 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
388 ih = TAILQ_NEXT(ih, ih_list)) {
389 set_curcpl(ih->ih_ipl);
390 at91_set_intrmask(aic_imask[ih->ih_ipl]);
391 oldirqstate = enable_interrupts(I32_bit);
392 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
393 restore_interrupts(oldirqstate);
396 if (!iq->iq_busy) {
397 panic("interrupt %d not busy!", intr);
399 iq->iq_busy = 0;
401 set_curcpl(pcpl);
402 at91_set_intrmask(aic_imask[pcpl]);
405 void
406 at91aic_intr_dispatch(struct irqframe *frame)
408 struct intrq* iq;
409 int pcpl = curcpl();
411 iq = (struct intrq *)AICREG(AIC_IVR); // get current queue
413 // OK, service interrupt
414 if (iq)
415 intr_process(iq, pcpl, frame);
417 AICREG(AIC_EOICR) = 0; // end of interrupt
420 #if 0
421 void
422 at91aic_intr_poll(int irq)
424 u_int oldirqstate;
425 uint32_t ipr;
426 int pcpl = curcpl();
428 oldirqstate = disable_interrupts(I32_bit);
429 ipr = AICREG(AIC_IPR);
430 if ((ipr & (1U << irq) & ~aic_imask[pcpl]))
431 intr_process(&intrq[irq], pcpl, NULL);
432 restore_interrupts(oldirqstate);
433 #ifdef __HAVE_FAST_SOFTINTS
434 cpu_dosoftints();
435 #endif
437 #endif
439 void
440 at91aic_intr_poll(void *ihp, int flags)
442 struct intrhand* ih = ihp;
443 u_int oldirqstate, irq = ih->ih_irq;
444 uint32_t ipr;
445 int pcpl = curcpl();
447 oldirqstate = disable_interrupts(I32_bit);
448 ipr = AICREG(AIC_IPR);
449 if ((ipr & (1U << irq))
450 && (flags || !(aic_imask[pcpl] & (1U << irq)))) {
451 set_curcpl(ih->ih_ipl);
452 at91_set_intrmask(aic_imask[ih->ih_ipl]);
453 (void)enable_interrupts(I32_bit);
454 (void)(*ih->ih_func)(ih->ih_arg ? ih->ih_arg : NULL);
455 (void)disable_interrupts(I32_bit);
456 set_curcpl(pcpl);
457 at91_set_intrmask(aic_imask[pcpl]);
459 restore_interrupts(oldirqstate);
461 #ifdef __HAVE_FAST_SOFTINTS
462 cpu_dosoftints();
463 #endif