Sync usage with man page.
[netbsd-mini2440.git] / sys / arch / arm / ep93xx / ep93xx_intr.c
blobef3af4cfc1cfbe63b666a5b1eb74c659f38af754
1 /* $NetBSD: ep93xx_intr.c,v 1.12 2008/04/27 18:58:44 matt Exp $ */
3 /*
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jesse Off
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by Ichiro FUKUHARA and Naoto Shimazaki.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: ep93xx_intr.c,v 1.12 2008/04/27 18:58:44 matt Exp $");
39 * Interrupt support for the Cirrus Logic EP93XX
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/termios.h>
47 #include <uvm/uvm_extern.h>
49 #include <machine/bus.h>
50 #include <machine/intr.h>
52 #include <arm/cpufunc.h>
54 #include <arm/ep93xx/ep93xxreg.h>
55 #include <arm/ep93xx/ep93xxvar.h>
57 /* Interrupt handler queues. */
58 struct intrq intrq[NIRQ];
60 /* Interrupts to mask at each level. */
61 static u_int32_t vic1_imask[NIPL];
62 static u_int32_t vic2_imask[NIPL];
64 /* Current interrupt priority level. */
65 volatile int hardware_spl_level;
67 /* Software copy of the IRQs we have enabled. */
68 volatile u_int32_t vic1_intr_enabled;
69 volatile u_int32_t vic2_intr_enabled;
71 /* Interrupts pending. */
72 static volatile int ipending;
74 void ep93xx_intr_dispatch(struct irqframe *frame);
76 #define VIC1REG(reg) *((volatile u_int32_t*) (EP93XX_AHB_VBASE + \
77 EP93XX_AHB_VIC1 + (reg)))
78 #define VIC2REG(reg) *((volatile u_int32_t*) (EP93XX_AHB_VBASE + \
79 EP93XX_AHB_VIC2 + (reg)))
81 static void
82 ep93xx_set_intrmask(u_int32_t vic1_irqs, u_int32_t vic2_irqs)
84 VIC1REG(EP93XX_VIC_IntEnClear) = vic1_irqs;
85 VIC1REG(EP93XX_VIC_IntEnable) = vic1_intr_enabled & ~vic1_irqs;
86 VIC2REG(EP93XX_VIC_IntEnClear) = vic2_irqs;
87 VIC2REG(EP93XX_VIC_IntEnable) = vic2_intr_enabled & ~vic2_irqs;
90 static void
91 ep93xx_enable_irq(int irq)
93 if (irq < VIC_NIRQ) {
94 vic1_intr_enabled |= (1U << irq);
95 VIC1REG(EP93XX_VIC_IntEnable) = (1U << irq);
96 } else {
97 vic2_intr_enabled |= (1U << (irq - VIC_NIRQ));
98 VIC2REG(EP93XX_VIC_IntEnable) = (1U << (irq - VIC_NIRQ));
102 static inline void
103 ep93xx_disable_irq(int irq)
105 if (irq < VIC_NIRQ) {
106 vic1_intr_enabled &= ~(1U << irq);
107 VIC1REG(EP93XX_VIC_IntEnClear) = (1U << irq);
108 } else {
109 vic2_intr_enabled &= ~(1U << (irq - VIC_NIRQ));
110 VIC2REG(EP93XX_VIC_IntEnClear) = (1U << (irq - VIC_NIRQ));
115 * NOTE: This routine must be called with interrupts disabled in the CPSR.
117 static void
118 ep93xx_intr_calculate_masks(void)
120 struct intrq *iq;
121 struct intrhand *ih;
122 int irq, ipl;
124 /* First, figure out which IPLs each IRQ has. */
125 for (irq = 0; irq < NIRQ; irq++) {
126 int levels = 0;
127 iq = &intrq[irq];
128 ep93xx_disable_irq(irq);
129 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
130 ih = TAILQ_NEXT(ih, ih_list))
131 levels |= (1U << ih->ih_ipl);
132 iq->iq_levels = levels;
135 /* Next, figure out which IRQs are used by each IPL. */
136 for (ipl = 0; ipl < NIPL; ipl++) {
137 int vic1_irqs = 0;
138 int vic2_irqs = 0;
139 for (irq = 0; irq < VIC_NIRQ; irq++) {
140 if (intrq[irq].iq_levels & (1U << ipl))
141 vic1_irqs |= (1U << irq);
143 vic1_imask[ipl] = vic1_irqs;
144 for (irq = 0; irq < VIC_NIRQ; irq++) {
145 if (intrq[irq + VIC_NIRQ].iq_levels & (1U << ipl))
146 vic2_irqs |= (1U << irq);
148 vic2_imask[ipl] = vic2_irqs;
151 KASSERT(vic1_imask[IPL_NONE] == 0);
152 KASSERT(vic2_imask[IPL_NONE] == 0);
155 * splclock() must block anything that uses the scheduler.
157 vic1_imask[IPL_SCHED] |= vic1_imask[IPL_VM];
158 vic2_imask[IPL_SCHED] |= vic2_imask[IPL_VM];
161 * splhigh() must block "everything".
163 vic1_imask[IPL_HIGH] |= vic1_imask[IPL_SCHED];
164 vic2_imask[IPL_HIGH] |= vic2_imask[IPL_SCHED];
167 * Now compute which IRQs must be blocked when servicing any
168 * given IRQ.
170 for (irq = 0; irq < NIRQ; irq++) {
171 int vic1_irqs;
172 int vic2_irqs;
174 if (irq < VIC_NIRQ) {
175 vic1_irqs = (1U << irq);
176 vic2_irqs = 0;
177 } else {
178 vic1_irqs = 0;
179 vic2_irqs = (1U << (irq - VIC_NIRQ));
181 iq = &intrq[irq];
182 if (TAILQ_FIRST(&iq->iq_list) != NULL)
183 ep93xx_enable_irq(irq);
184 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
185 ih = TAILQ_NEXT(ih, ih_list)) {
186 vic1_irqs |= vic1_imask[ih->ih_ipl];
187 vic2_irqs |= vic2_imask[ih->ih_ipl];
189 iq->iq_vic1_mask = vic1_irqs;
190 iq->iq_vic2_mask = vic2_irqs;
194 inline void
195 splx(int new)
197 int old;
198 u_int oldirqstate;
200 oldirqstate = disable_interrupts(I32_bit);
201 old = curcpl();
202 set_curcpl(new);
203 if (new != hardware_spl_level) {
204 hardware_spl_level = new;
205 ep93xx_set_intrmask(vic1_imask[new], vic2_imask[new]);
207 restore_interrupts(oldirqstate);
209 #ifdef __HAVE_FAST_SOFTINTS
210 cpu_dosoftints();
211 #endif
215 _splraise(int ipl)
217 int old;
218 u_int oldirqstate;
220 oldirqstate = disable_interrupts(I32_bit);
221 old = curcpl();
222 set_curcpl(ipl);
223 restore_interrupts(oldirqstate);
224 return (old);
228 _spllower(int ipl)
230 int old = curcpl();
232 if (old <= ipl)
233 return (old);
234 splx(ipl);
235 return (old);
239 * ep93xx_intr_init:
241 * Initialize the rest of the interrupt subsystem, making it
242 * ready to handle interrupts from devices.
244 void
245 ep93xx_intr_init(void)
247 struct intrq *iq;
248 int i;
250 vic1_intr_enabled = 0;
251 vic2_intr_enabled = 0;
253 for (i = 0; i < NIRQ; i++) {
254 iq = &intrq[i];
255 TAILQ_INIT(&iq->iq_list);
257 sprintf(iq->iq_name, "irq %d", i);
258 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
259 NULL, (i < VIC_NIRQ ? "vic1" : "vic2"),
260 iq->iq_name);
262 curcpu()->ci_intr_depth = 0;
263 set_curcpl(0);
264 hardware_spl_level = 0;
266 /* All interrupts should use IRQ not FIQ */
267 VIC1REG(EP93XX_VIC_IntSelect) = 0;
268 VIC2REG(EP93XX_VIC_IntSelect) = 0;
270 ep93xx_intr_calculate_masks();
272 /* Enable IRQs (don't yet use FIQs). */
273 enable_interrupts(I32_bit);
276 void *
277 ep93xx_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
279 struct intrq* iq;
280 struct intrhand* ih;
281 u_int oldirqstate;
283 if (irq < 0 || irq > NIRQ)
284 panic("ep93xx_intr_establish: IRQ %d out of range", irq);
285 if (ipl < 0 || ipl > NIPL)
286 panic("ep93xx_intr_establish: IPL %d out of range", ipl);
288 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
289 if (ih == NULL)
290 return (NULL);
292 ih->ih_func = ih_func;
293 ih->ih_arg = arg;
294 ih->ih_irq = irq;
295 ih->ih_ipl = ipl;
297 iq = &intrq[irq];
299 oldirqstate = disable_interrupts(I32_bit);
300 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
301 ep93xx_intr_calculate_masks();
302 restore_interrupts(oldirqstate);
304 return (ih);
307 void
308 ep93xx_intr_disestablish(void *cookie)
310 struct intrhand* ih = cookie;
311 struct intrq* iq = &intrq[ih->ih_irq];
312 u_int oldirqstate;
314 oldirqstate = disable_interrupts(I32_bit);
315 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
316 ep93xx_intr_calculate_masks();
317 restore_interrupts(oldirqstate);
320 void
321 ep93xx_intr_dispatch(struct irqframe *frame)
323 struct intrq* iq;
324 struct intrhand* ih;
325 u_int oldirqstate;
326 int pcpl;
327 u_int32_t vic1_hwpend;
328 u_int32_t vic2_hwpend;
329 int irq;
331 pcpl = curcpl();
333 vic1_hwpend = VIC1REG(EP93XX_VIC_IRQStatus);
334 vic2_hwpend = VIC2REG(EP93XX_VIC_IRQStatus);
336 hardware_spl_level = pcpl;
337 ep93xx_set_intrmask(vic1_imask[pcpl] | vic1_hwpend,
338 vic2_imask[pcpl] | vic2_hwpend);
340 vic1_hwpend &= ~vic1_imask[pcpl];
341 vic2_hwpend &= ~vic2_imask[pcpl];
343 if (vic1_hwpend) {
344 irq = ffs(vic1_hwpend) - 1;
346 iq = &intrq[irq];
347 iq->iq_ev.ev_count++;
348 uvmexp.intrs++;
349 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
350 set_curcpl(ih->ih_ipl);
351 oldirqstate = enable_interrupts(I32_bit);
352 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
353 restore_interrupts(oldirqstate);
355 } else if (vic2_hwpend) {
356 irq = ffs(vic2_hwpend) - 1;
358 iq = &intrq[irq + VIC_NIRQ];
359 iq->iq_ev.ev_count++;
360 uvmexp.intrs++;
361 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
362 set_curcpl(ih->ih_ipl);
363 oldirqstate = enable_interrupts(I32_bit);
364 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
365 restore_interrupts(oldirqstate);
369 set_curcpl(pcpl);
370 hardware_spl_level = pcpl;
371 ep93xx_set_intrmask(vic1_imask[pcpl], vic2_imask[pcpl]);
373 #ifdef __HAVE_FAST_SOFTINTS
374 cpu_dosoftints();
375 #endif