1 /* $NetBSD: footbridge_irqhandler.c,v 1.21 2008/04/27 18:58:44 matt Exp $ */
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #ifndef ARM_SPL_NOINLINE
39 #define ARM_SPL_NOINLINE
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0,"$NetBSD: footbridge_irqhandler.c,v 1.21 2008/04/27 18:58:44 matt Exp $");
45 #include "opt_irqstats.h"
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <uvm/uvm_extern.h>
52 #include <machine/intr.h>
53 #include <machine/cpu.h>
54 #include <arm/footbridge/dc21285mem.h>
55 #include <arm/footbridge/dc21285reg.h>
57 #include <dev/pci/pcivar.h>
61 #include <dev/isa/isavar.h>
64 /* Interrupt handler queues. */
65 static struct intrq footbridge_intrq
[NIRQ
];
67 /* Interrupts to mask at each level. */
68 int footbridge_imask
[NIPL
];
70 /* Software copy of the IRQs we have enabled. */
71 volatile uint32_t intr_enabled
;
73 /* Interrupts pending */
74 volatile int footbridge_ipending
;
76 void footbridge_intr_dispatch(struct clockframe
*frame
);
78 const struct evcnt
*footbridge_pci_intr_evcnt(void *, pci_intr_handle_t
);
81 footbridge_pci_intr_evcnt(void *pcv
, pci_intr_handle_t ih
)
83 /* XXX check range is valid */
85 if (ih
>= 0x80 && ih
<= 0x8f) {
86 return isa_intr_evcnt(NULL
, (ih
& 0x0f));
89 return &footbridge_intrq
[ih
].iq_ev
;
93 footbridge_enable_irq(int irq
)
95 intr_enabled
|= (1U << irq
);
96 footbridge_set_intrmask();
100 footbridge_disable_irq(int irq
)
102 intr_enabled
&= ~(1U << irq
);
103 footbridge_set_intrmask();
107 * NOTE: This routine must be called with interrupts disabled in the CPSR.
110 footbridge_intr_calculate_masks(void)
116 /* First, figure out which IPLs each IRQ has. */
117 for (irq
= 0; irq
< NIRQ
; irq
++) {
119 iq
= &footbridge_intrq
[irq
];
120 footbridge_disable_irq(irq
);
121 TAILQ_FOREACH(ih
, &iq
->iq_list
, ih_list
) {
122 levels
|= (1U << ih
->ih_ipl
);
124 iq
->iq_levels
= levels
;
127 /* Next, figure out which IRQs are used by each IPL. */
128 for (ipl
= 0; ipl
< NIPL
; ipl
++) {
130 for (irq
= 0; irq
< NIRQ
; irq
++) {
131 if (footbridge_intrq
[irq
].iq_levels
& (1U << ipl
))
134 footbridge_imask
[ipl
] = irqs
;
137 /* IPL_NONE must open up all interrupts */
138 KASSERT(footbridge_imask
[IPL_NONE
] == 0);
139 KASSERT(footbridge_imask
[IPL_SOFTCLOCK
] == 0);
140 KASSERT(footbridge_imask
[IPL_SOFTBIO
] == 0);
141 KASSERT(footbridge_imask
[IPL_SOFTNET
] == 0);
142 KASSERT(footbridge_imask
[IPL_SOFTSERIAL
] == 0);
145 * Enforce a hierarchy that gives "slow" device (or devices with
146 * limited input buffer space/"real-time" requirements) a better
147 * chance at not dropping data.
149 footbridge_imask
[IPL_SCHED
] |= footbridge_imask
[IPL_VM
];
150 footbridge_imask
[IPL_HIGH
] |= footbridge_imask
[IPL_SCHED
];
153 * Calculate the ipl level to go to when handling this interrupt
155 for (irq
= 0, iq
= footbridge_intrq
; irq
< NIRQ
; irq
++, iq
++) {
156 int irqs
= (1U << irq
);
157 if (!TAILQ_EMPTY(&iq
->iq_list
)) {
158 footbridge_enable_irq(irq
);
159 TAILQ_FOREACH(ih
, &iq
->iq_list
, ih_list
) {
160 irqs
|= footbridge_imask
[ih
->ih_ipl
];
170 return (footbridge_splraise(ipl
));
173 /* this will always take us to the ipl passed in */
177 footbridge_splx(new);
183 return (footbridge_spllower(ipl
));
187 footbridge_intr_init(void)
193 set_curcpl(0xffffffff);
194 footbridge_ipending
= 0;
195 footbridge_set_intrmask();
197 for (i
= 0, iq
= footbridge_intrq
; i
< NIRQ
; i
++, iq
++) {
198 TAILQ_INIT(&iq
->iq_list
);
200 sprintf(iq
->iq_name
, "irq %d", i
);
201 evcnt_attach_dynamic(&iq
->iq_ev
, EVCNT_TYPE_INTR
,
202 NULL
, "footbridge", iq
->iq_name
);
205 footbridge_intr_calculate_masks();
207 /* Enable IRQ's, we don't have any FIQ's*/
208 enable_interrupts(I32_bit
);
212 footbridge_intr_claim(int irq
, int ipl
, const char *name
, int (*func
)(void *), void *arg
)
218 if (irq
< 0 || irq
> NIRQ
)
219 panic("footbridge_intr_establish: IRQ %d out of range", irq
);
221 ih
= malloc(sizeof(*ih
), M_DEVBUF
, M_NOWAIT
);
233 iq
= &footbridge_intrq
[irq
];
235 iq
->iq_ist
= IST_LEVEL
;
237 oldirqstate
= disable_interrupts(I32_bit
);
239 TAILQ_INSERT_TAIL(&iq
->iq_list
, ih
, ih_list
);
241 footbridge_intr_calculate_masks();
243 /* detach the existing event counter and add the new name */
244 evcnt_detach(&iq
->iq_ev
);
245 evcnt_attach_dynamic(&iq
->iq_ev
, EVCNT_TYPE_INTR
,
246 NULL
, "footbridge", name
);
248 restore_interrupts(oldirqstate
);
254 footbridge_intr_disestablish(void *cookie
)
256 struct intrhand
*ih
= cookie
;
257 struct intrq
*iq
= &footbridge_intrq
[ih
->ih_irq
];
260 /* XXX need to free ih ? */
261 oldirqstate
= disable_interrupts(I32_bit
);
263 TAILQ_REMOVE(&iq
->iq_list
, ih
, ih_list
);
265 footbridge_intr_calculate_masks();
267 restore_interrupts(oldirqstate
);
270 static inline uint32_t footbridge_intstatus(void)
272 return ((volatile uint32_t*)(DC21285_ARMCSR_VBASE
))[IRQ_STATUS
>>2];
275 /* called with external interrupts disabled */
277 footbridge_intr_dispatch(struct clockframe
*frame
)
281 int oldirqstate
, irq
, ibit
, hwpend
;
282 struct cpu_info
* const ci
= curcpu();
283 const int ppl
= ci
->ci_cpl
;
284 const int imask
= footbridge_imask
[ppl
];
286 hwpend
= footbridge_intstatus();
289 * Disable all the interrupts that are pending. We will
290 * reenable them once they are processed and not masked.
292 intr_enabled
&= ~hwpend
;
293 footbridge_set_intrmask();
295 while (hwpend
!= 0) {
297 irq
= ffs(hwpend
) - 1;
304 * IRQ is masked; mark it as pending and check
305 * the next one. Note: the IRQ is already disabled.
307 footbridge_ipending
|= ibit
;
311 footbridge_ipending
&= ~ibit
;
313 iq
= &footbridge_intrq
[irq
];
314 iq
->iq_ev
.ev_count
++;
316 TAILQ_FOREACH(ih
, &iq
->iq_list
, ih_list
) {
317 ci
->ci_cpl
= ih
->ih_ipl
;
318 oldirqstate
= enable_interrupts(I32_bit
);
319 intr_rc
= (*ih
->ih_func
)(ih
->ih_arg
? ih
->ih_arg
: frame
);
320 restore_interrupts(oldirqstate
);
327 /* Re-enable this interrupt now that's it's cleared. */
328 intr_enabled
|= ibit
;
329 footbridge_set_intrmask();
331 /* also check for any new interrupts that may have occurred,
332 * that we can handle at this spl level */
333 hwpend
|= (footbridge_ipending
& ICU_INT_HWMASK
) & ~imask
;
336 #ifdef __HAVE_FAST_SOFTINTS
338 #endif /* __HAVE_FAST_SOFTINTS */