Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / arch / arm / xscale / i80321_icu.c
blob2f453d55592351daf59e57e81a4f8cdab8a5a338
1 /* $NetBSD: i80321_icu.c,v 1.18 2008/04/27 18:58:45 matt Exp $ */
3 /*
4 * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc.
5 * All rights reserved.
7 * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.18 2008/04/27 18:58:45 matt Exp $");
41 #ifndef EVBARM_SPL_NOINLINE
42 #define EVBARM_SPL_NOINLINE
43 #endif
46 * Interrupt support for the Intel i80321 I/O Processor.
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
53 #include <uvm/uvm_extern.h>
55 #include <machine/bus.h>
56 #include <machine/intr.h>
58 #include <arm/cpufunc.h>
60 #include <arm/xscale/i80321reg.h>
61 #include <arm/xscale/i80321var.h>
63 /* Interrupt handler queues. */
64 struct intrq intrq[NIRQ];
66 /* Interrupts to mask at each level. */
67 int i80321_imask[NIPL];
69 /* Interrupts pending. */
70 volatile int i80321_ipending;
72 /* Software copy of the IRQs we have enabled. */
73 volatile uint32_t intr_enabled;
75 /* Mask if interrupts steered to FIQs. */
76 uint32_t intr_steer;
79 * Interrupt bit names.
81 const char * const i80321_irqnames[] = {
82 "DMA0 EOT",
83 "DMA0 EOC",
84 "DMA1 EOT",
85 "DMA1 EOC",
86 "irq 4",
87 "irq 5",
88 "AAU EOT",
89 "AAU EOC",
90 "core PMU",
91 "TMR0 (hardclock)",
92 "TMR1",
93 "I2C0",
94 "I2C1",
95 "MU",
96 "BIST",
97 "periph PMU",
98 "XScale PMU",
99 "BIU error",
100 "ATU error",
101 "MCU error",
102 "DMA0 error",
103 "DMA1 error",
104 "irq 22",
105 "AAU error",
106 "MU error",
107 "SSP",
108 "irq 26",
109 "irq 27",
110 "irq 28",
111 "irq 29",
112 "irq 30",
113 "irq 31",
116 void i80321_intr_dispatch(struct clockframe *frame);
118 static inline uint32_t
119 i80321_iintsrc_read(void)
121 uint32_t iintsrc;
123 __asm volatile("mrc p6, 0, %0, c8, c0, 0"
124 : "=r" (iintsrc));
127 * The IINTSRC register shows bits that are active even
128 * if they are masked in INTCTL, so we have to mask them
129 * off with the interrupts we consider enabled.
131 return (iintsrc & intr_enabled);
134 static inline void
135 i80321_set_intrsteer(void)
138 __asm volatile("mcr p6, 0, %0, c4, c0, 0"
140 : "r" (intr_steer & ICU_INT_HWMASK));
143 static inline void
144 i80321_enable_irq(int irq)
147 intr_enabled |= (1U << irq);
148 i80321_set_intrmask();
151 static inline void
152 i80321_disable_irq(int irq)
155 intr_enabled &= ~(1U << irq);
156 i80321_set_intrmask();
160 * NOTE: This routine must be called with interrupts disabled in the CPSR.
162 static void
163 i80321_intr_calculate_masks(void)
165 struct intrq *iq;
166 struct intrhand *ih;
167 int irq, ipl;
169 /* First, figure out which IPLs each IRQ has. */
170 for (irq = 0; irq < NIRQ; irq++) {
171 int levels = 0;
172 iq = &intrq[irq];
173 i80321_disable_irq(irq);
174 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
175 ih = TAILQ_NEXT(ih, ih_list))
176 levels |= (1U << ih->ih_ipl);
177 iq->iq_levels = levels;
180 /* Next, figure out which IRQs are used by each IPL. */
181 for (ipl = 0; ipl < NIPL; ipl++) {
182 int irqs = 0;
183 for (irq = 0; irq < NIRQ; irq++) {
184 if (intrq[irq].iq_levels & (1U << ipl))
185 irqs |= (1U << irq);
187 i80321_imask[ipl] = irqs;
190 i80321_imask[IPL_NONE] = 0;
193 * Enforce a hierarchy that gives "slow" device (or devices with
194 * limited input buffer space/"real-time" requirements) a better
195 * chance at not dropping data.
198 #if 0
200 * This assert might be useful, but only after some interrupts
201 * are configured. As it stands now, it will always fire early
202 * in the initialization phase. If it's useful enough to re-
203 * enable, it should be conditionalized on something else like
204 * having at least something in the levels/irqs above.
206 KASSERT(i80321_imask[IPL_VM] != 0);
207 #endif
208 i80321_imask[IPL_SCHED] |= i80321_imask[IPL_VM];
209 i80321_imask[IPL_HIGH] |= i80321_imask[IPL_SCHED];
212 * Now compute which IRQs must be blocked when servicing any
213 * given IRQ.
215 for (irq = 0; irq < NIRQ; irq++) {
216 int irqs = (1U << irq);
217 iq = &intrq[irq];
218 if (TAILQ_FIRST(&iq->iq_list) != NULL)
219 i80321_enable_irq(irq);
220 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
221 ih = TAILQ_NEXT(ih, ih_list))
222 irqs |= i80321_imask[ih->ih_ipl];
223 iq->iq_mask = irqs;
227 void
228 splx(int new)
230 i80321_splx(new);
234 _spllower(int ipl)
236 return (i80321_spllower(ipl));
240 _splraise(int ipl)
242 return (i80321_splraise(ipl));
246 * i80321_icu_init:
248 * Initialize the i80321 ICU. Called early in bootstrap
249 * to make sure the ICU is in a pristine state.
251 void
252 i80321_icu_init(void)
255 intr_enabled = 0; /* All interrupts disabled */
256 i80321_set_intrmask();
258 intr_steer = 0; /* All interrupts steered to IRQ */
259 i80321_set_intrsteer();
263 * i80321_intr_init:
265 * Initialize the rest of the interrupt subsystem, making it
266 * ready to handle interrupts from devices.
268 void
269 i80321_intr_init(void)
271 struct intrq *iq;
272 int i;
274 intr_enabled = 0;
276 for (i = 0; i < NIRQ; i++) {
277 iq = &intrq[i];
278 TAILQ_INIT(&iq->iq_list);
280 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
281 NULL, "iop321", i80321_irqnames[i]);
284 i80321_intr_calculate_masks();
286 /* Enable IRQs (don't yet use FIQs). */
287 enable_interrupts(I32_bit);
290 void *
291 i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
293 struct intrq *iq;
294 struct intrhand *ih;
295 u_int oldirqstate;
297 if (irq < 0 || irq > NIRQ)
298 panic("i80321_intr_establish: IRQ %d out of range", irq);
300 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
301 if (ih == NULL)
302 return (NULL);
304 ih->ih_func = func;
305 ih->ih_arg = arg;
306 ih->ih_ipl = ipl;
307 ih->ih_irq = irq;
309 iq = &intrq[irq];
311 /* All IOP321 interrupts are level-triggered. */
312 iq->iq_ist = IST_LEVEL;
314 oldirqstate = disable_interrupts(I32_bit);
316 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
318 i80321_intr_calculate_masks();
320 restore_interrupts(oldirqstate);
322 return (ih);
325 void
326 i80321_intr_disestablish(void *cookie)
328 struct intrhand *ih = cookie;
329 struct intrq *iq = &intrq[ih->ih_irq];
330 int oldirqstate;
332 oldirqstate = disable_interrupts(I32_bit);
334 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
336 i80321_intr_calculate_masks();
338 restore_interrupts(oldirqstate);
342 * Hardware interrupt handler.
344 * If I80321_HPI_ENABLED is defined, this code attempts to deal with
345 * HPI interrupts as best it can.
347 * The problem is that HPIs cannot be masked at the interrupt controller;
348 * they can only be masked by disabling IRQs in the XScale core.
350 * So, if an HPI comes in and we determine that it should be masked at
351 * the current IPL then we mark it pending in the usual way and set
352 * I32_bit in the interrupt frame. This ensures that when we return from
353 * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To
354 * ensure IRQs are enabled later, i80321_splx() has been modified to do
355 * just that when a pending HPI interrupt is unmasked.) Additionally,
356 * because HPIs are level-triggered, the registered handler for the HPI
357 * interrupt will also be invoked with IRQs disabled. If a masked HPI
358 * occurs at the same time as another unmasked higher priority interrupt,
359 * the higher priority handler will also be invoked with IRQs disabled.
360 * As a result, the system could end up executing a lot of code with IRQs
361 * completely disabled if the HPI's IPL is relatively low.
363 * At the present time, the only known use of HPI is for the console UART
364 * on a couple of boards. This is probably the least intrusive use of HPI
365 * as IPL_SERIAL is the highest priority IPL in the system anyway. The
366 * code has not been tested with HPI hooked up to a class of device which
367 * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to
368 * perform very poorly if at all, even though the following code has been
369 * designed (hopefully) to cope with it.
372 void
373 i80321_intr_dispatch(struct clockframe *frame)
375 struct intrq *iq;
376 struct intrhand *ih;
377 int oldirqstate, irq, ibit, hwpend;
378 #ifdef I80321_HPI_ENABLED
379 int oldpending;
380 #endif
381 struct cpu_info * const ci = curcpu();
382 const int ppl = ci->ci_cpl;
383 const uint32_t imask = i80321_imask[ppl];
385 hwpend = i80321_iintsrc_read();
388 * Disable all the interrupts that are pending. We will
389 * reenable them once they are processed and not masked.
391 intr_enabled &= ~hwpend;
392 i80321_set_intrmask();
394 #ifdef I80321_HPI_ENABLED
395 oldirqstate = 0; /* XXX: quell gcc warning */
396 #endif
398 while (hwpend != 0) {
399 #ifdef I80321_HPI_ENABLED
400 /* Deal with HPI interrupt first */
401 if (__predict_false(hwpend & INT_HPIMASK))
402 irq = ICU_INT_HPI;
403 else
404 #endif
405 irq = ffs(hwpend) - 1;
406 ibit = (1U << irq);
408 hwpend &= ~ibit;
410 if (imask & ibit) {
412 * IRQ is masked; mark it as pending and check
413 * the next one. Note: the IRQ is already disabled.
415 #ifdef I80321_HPI_ENABLED
416 if (__predict_false(irq == ICU_INT_HPI)) {
418 * This is an HPI. We *must* disable
419 * IRQs in the interrupt frame until
420 * INT_HPIMASK is cleared by a later
421 * call to splx(). Otherwise the level-
422 * triggered interrupt will just keep
423 * coming back.
425 frame->cf_if.if_spsr |= I32_bit;
427 #endif
428 i80321_ipending |= ibit;
429 continue;
432 #ifdef I80321_HPI_ENABLED
433 oldpending = i80321_ipending | ibit;
434 #endif
435 i80321_ipending &= ~ibit;
437 iq = &intrq[irq];
438 iq->iq_ev.ev_count++;
439 uvmexp.intrs++;
440 #ifdef I80321_HPI_ENABLED
442 * Re-enable interrupts iff an HPI is not pending
444 if (__predict_true((oldpending & INT_HPIMASK) == 0)) {
445 #endif
446 TAILQ_FOREACH (ih, &iq->iq_list, ih_list) {
447 ci->ci_cpl = ih->ih_ipl;
448 oldirqstate = enable_interrupts(I32_bit);
449 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
450 restore_interrupts(oldirqstate);
452 #ifdef I80321_HPI_ENABLED
453 } else if (irq == ICU_INT_HPI) {
455 * We've just handled the HPI. Make sure IRQs
456 * are enabled in the interrupt frame.
457 * Here's hoping the handler really did clear
458 * down the source...
460 frame->cf_if.if_spsr &= ~I32_bit;
462 #endif
463 ci->ci_cpl = ppl;
465 /* Re-enable this interrupt now that's it's cleared. */
466 intr_enabled |= ibit;
467 i80321_set_intrmask();
470 * Don't forget to include interrupts which may have
471 * arrived in the meantime.
473 hwpend |= ((i80321_ipending & ICU_INT_HWMASK) & ~imask);
476 #ifdef __HAVE_FAST_SOFTINTS
477 cpu_dosoftints();
478 #endif