1 /* $NetBSD: i80321_icu.c,v 1.18 2008/04/27 18:58:45 matt Exp $ */
4 * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc.
7 * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.18 2008/04/27 18:58:45 matt Exp $");
41 #ifndef EVBARM_SPL_NOINLINE
42 #define EVBARM_SPL_NOINLINE
46 * Interrupt support for the Intel i80321 I/O Processor.
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
53 #include <uvm/uvm_extern.h>
55 #include <machine/bus.h>
56 #include <machine/intr.h>
58 #include <arm/cpufunc.h>
60 #include <arm/xscale/i80321reg.h>
61 #include <arm/xscale/i80321var.h>
63 /* Interrupt handler queues. */
64 struct intrq intrq
[NIRQ
];
66 /* Interrupts to mask at each level. */
67 int i80321_imask
[NIPL
];
69 /* Interrupts pending. */
70 volatile int i80321_ipending
;
72 /* Software copy of the IRQs we have enabled. */
73 volatile uint32_t intr_enabled
;
75 /* Mask if interrupts steered to FIQs. */
79 * Interrupt bit names.
81 const char * const i80321_irqnames
[] = {
116 void i80321_intr_dispatch(struct clockframe
*frame
);
118 static inline uint32_t
119 i80321_iintsrc_read(void)
123 __asm
volatile("mrc p6, 0, %0, c8, c0, 0"
127 * The IINTSRC register shows bits that are active even
128 * if they are masked in INTCTL, so we have to mask them
129 * off with the interrupts we consider enabled.
131 return (iintsrc
& intr_enabled
);
135 i80321_set_intrsteer(void)
138 __asm
volatile("mcr p6, 0, %0, c4, c0, 0"
140 : "r" (intr_steer
& ICU_INT_HWMASK
));
144 i80321_enable_irq(int irq
)
147 intr_enabled
|= (1U << irq
);
148 i80321_set_intrmask();
152 i80321_disable_irq(int irq
)
155 intr_enabled
&= ~(1U << irq
);
156 i80321_set_intrmask();
160 * NOTE: This routine must be called with interrupts disabled in the CPSR.
163 i80321_intr_calculate_masks(void)
169 /* First, figure out which IPLs each IRQ has. */
170 for (irq
= 0; irq
< NIRQ
; irq
++) {
173 i80321_disable_irq(irq
);
174 for (ih
= TAILQ_FIRST(&iq
->iq_list
); ih
!= NULL
;
175 ih
= TAILQ_NEXT(ih
, ih_list
))
176 levels
|= (1U << ih
->ih_ipl
);
177 iq
->iq_levels
= levels
;
180 /* Next, figure out which IRQs are used by each IPL. */
181 for (ipl
= 0; ipl
< NIPL
; ipl
++) {
183 for (irq
= 0; irq
< NIRQ
; irq
++) {
184 if (intrq
[irq
].iq_levels
& (1U << ipl
))
187 i80321_imask
[ipl
] = irqs
;
190 i80321_imask
[IPL_NONE
] = 0;
193 * Enforce a hierarchy that gives "slow" device (or devices with
194 * limited input buffer space/"real-time" requirements) a better
195 * chance at not dropping data.
200 * This assert might be useful, but only after some interrupts
201 * are configured. As it stands now, it will always fire early
202 * in the initialization phase. If it's useful enough to re-
203 * enable, it should be conditionalized on something else like
204 * having at least something in the levels/irqs above.
206 KASSERT(i80321_imask
[IPL_VM
] != 0);
208 i80321_imask
[IPL_SCHED
] |= i80321_imask
[IPL_VM
];
209 i80321_imask
[IPL_HIGH
] |= i80321_imask
[IPL_SCHED
];
212 * Now compute which IRQs must be blocked when servicing any
215 for (irq
= 0; irq
< NIRQ
; irq
++) {
216 int irqs
= (1U << irq
);
218 if (TAILQ_FIRST(&iq
->iq_list
) != NULL
)
219 i80321_enable_irq(irq
);
220 for (ih
= TAILQ_FIRST(&iq
->iq_list
); ih
!= NULL
;
221 ih
= TAILQ_NEXT(ih
, ih_list
))
222 irqs
|= i80321_imask
[ih
->ih_ipl
];
236 return (i80321_spllower(ipl
));
242 return (i80321_splraise(ipl
));
248 * Initialize the i80321 ICU. Called early in bootstrap
249 * to make sure the ICU is in a pristine state.
252 i80321_icu_init(void)
255 intr_enabled
= 0; /* All interrupts disabled */
256 i80321_set_intrmask();
258 intr_steer
= 0; /* All interrupts steered to IRQ */
259 i80321_set_intrsteer();
265 * Initialize the rest of the interrupt subsystem, making it
266 * ready to handle interrupts from devices.
269 i80321_intr_init(void)
276 for (i
= 0; i
< NIRQ
; i
++) {
278 TAILQ_INIT(&iq
->iq_list
);
280 evcnt_attach_dynamic(&iq
->iq_ev
, EVCNT_TYPE_INTR
,
281 NULL
, "iop321", i80321_irqnames
[i
]);
284 i80321_intr_calculate_masks();
286 /* Enable IRQs (don't yet use FIQs). */
287 enable_interrupts(I32_bit
);
291 i80321_intr_establish(int irq
, int ipl
, int (*func
)(void *), void *arg
)
297 if (irq
< 0 || irq
> NIRQ
)
298 panic("i80321_intr_establish: IRQ %d out of range", irq
);
300 ih
= malloc(sizeof(*ih
), M_DEVBUF
, M_NOWAIT
);
311 /* All IOP321 interrupts are level-triggered. */
312 iq
->iq_ist
= IST_LEVEL
;
314 oldirqstate
= disable_interrupts(I32_bit
);
316 TAILQ_INSERT_TAIL(&iq
->iq_list
, ih
, ih_list
);
318 i80321_intr_calculate_masks();
320 restore_interrupts(oldirqstate
);
326 i80321_intr_disestablish(void *cookie
)
328 struct intrhand
*ih
= cookie
;
329 struct intrq
*iq
= &intrq
[ih
->ih_irq
];
332 oldirqstate
= disable_interrupts(I32_bit
);
334 TAILQ_REMOVE(&iq
->iq_list
, ih
, ih_list
);
336 i80321_intr_calculate_masks();
338 restore_interrupts(oldirqstate
);
342 * Hardware interrupt handler.
344 * If I80321_HPI_ENABLED is defined, this code attempts to deal with
345 * HPI interrupts as best it can.
347 * The problem is that HPIs cannot be masked at the interrupt controller;
348 * they can only be masked by disabling IRQs in the XScale core.
350 * So, if an HPI comes in and we determine that it should be masked at
351 * the current IPL then we mark it pending in the usual way and set
352 * I32_bit in the interrupt frame. This ensures that when we return from
353 * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To
354 * ensure IRQs are enabled later, i80321_splx() has been modified to do
355 * just that when a pending HPI interrupt is unmasked.) Additionally,
356 * because HPIs are level-triggered, the registered handler for the HPI
357 * interrupt will also be invoked with IRQs disabled. If a masked HPI
358 * occurs at the same time as another unmasked higher priority interrupt,
359 * the higher priority handler will also be invoked with IRQs disabled.
360 * As a result, the system could end up executing a lot of code with IRQs
361 * completely disabled if the HPI's IPL is relatively low.
363 * At the present time, the only known use of HPI is for the console UART
364 * on a couple of boards. This is probably the least intrusive use of HPI
365 * as IPL_SERIAL is the highest priority IPL in the system anyway. The
366 * code has not been tested with HPI hooked up to a class of device which
367 * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to
368 * perform very poorly if at all, even though the following code has been
369 * designed (hopefully) to cope with it.
373 i80321_intr_dispatch(struct clockframe
*frame
)
377 int oldirqstate
, irq
, ibit
, hwpend
;
378 #ifdef I80321_HPI_ENABLED
381 struct cpu_info
* const ci
= curcpu();
382 const int ppl
= ci
->ci_cpl
;
383 const uint32_t imask
= i80321_imask
[ppl
];
385 hwpend
= i80321_iintsrc_read();
388 * Disable all the interrupts that are pending. We will
389 * reenable them once they are processed and not masked.
391 intr_enabled
&= ~hwpend
;
392 i80321_set_intrmask();
394 #ifdef I80321_HPI_ENABLED
395 oldirqstate
= 0; /* XXX: quell gcc warning */
398 while (hwpend
!= 0) {
399 #ifdef I80321_HPI_ENABLED
400 /* Deal with HPI interrupt first */
401 if (__predict_false(hwpend
& INT_HPIMASK
))
405 irq
= ffs(hwpend
) - 1;
412 * IRQ is masked; mark it as pending and check
413 * the next one. Note: the IRQ is already disabled.
415 #ifdef I80321_HPI_ENABLED
416 if (__predict_false(irq
== ICU_INT_HPI
)) {
418 * This is an HPI. We *must* disable
419 * IRQs in the interrupt frame until
420 * INT_HPIMASK is cleared by a later
421 * call to splx(). Otherwise the level-
422 * triggered interrupt will just keep
425 frame
->cf_if
.if_spsr
|= I32_bit
;
428 i80321_ipending
|= ibit
;
432 #ifdef I80321_HPI_ENABLED
433 oldpending
= i80321_ipending
| ibit
;
435 i80321_ipending
&= ~ibit
;
438 iq
->iq_ev
.ev_count
++;
440 #ifdef I80321_HPI_ENABLED
442 * Re-enable interrupts iff an HPI is not pending
444 if (__predict_true((oldpending
& INT_HPIMASK
) == 0)) {
446 TAILQ_FOREACH (ih
, &iq
->iq_list
, ih_list
) {
447 ci
->ci_cpl
= ih
->ih_ipl
;
448 oldirqstate
= enable_interrupts(I32_bit
);
449 (void) (*ih
->ih_func
)(ih
->ih_arg
? ih
->ih_arg
: frame
);
450 restore_interrupts(oldirqstate
);
452 #ifdef I80321_HPI_ENABLED
453 } else if (irq
== ICU_INT_HPI
) {
455 * We've just handled the HPI. Make sure IRQs
456 * are enabled in the interrupt frame.
457 * Here's hoping the handler really did clear
460 frame
->cf_if
.if_spsr
&= ~I32_bit
;
465 /* Re-enable this interrupt now that's it's cleared. */
466 intr_enabled
|= ibit
;
467 i80321_set_intrmask();
470 * Don't forget to include interrupts which may have
471 * arrived in the meantime.
473 hwpend
|= ((i80321_ipending
& ICU_INT_HWMASK
) & ~imask
);
476 #ifdef __HAVE_FAST_SOFTINTS