4 * Copyright (c) 2002 Wasabi Systems, Inc.
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
39 * Interrupt support for the ADI Engineering Big Endian Companion Chip.
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD$");
45 #ifndef EVBARM_SPL_NOINLINE
46 #define EVBARM_SPL_NOINLINE
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
55 #include <uvm/uvm_extern.h>
57 #include <arm/cpufunc.h>
59 #include <arm/xscale/beccreg.h>
60 #include <arm/xscale/beccvar.h>
62 #include <arm/xscale/i80200reg.h>
63 #include <arm/xscale/i80200var.h>
65 /* Interrupt handler queues. */
66 struct intrq intrq
[NIRQ
];
68 /* Interrupts to mask at each level. */
69 uint32_t becc_imask
[NIPL
];
71 /* Interrupts pending. */
72 volatile uint32_t becc_ipending
;
73 volatile uint32_t becc_sipending
;
75 /* Software copy of the IRQs we have enabled. */
76 volatile uint32_t intr_enabled
;
78 /* Mask if interrupts steered to FIQs. */
82 * Interrupt bit names.
83 * XXX Some of these are BRH-centric.
85 const char * const becc_irqnames
[] = {
112 "irq 26", /* PCI INTA */
113 "irq 27", /* PCI INTB */
114 "irq 28", /* PCI INTC */
115 "irq 29", /* PCI INTD */
120 void becc_intr_dispatch(struct irqframe
*frame
);
122 static inline uint32_t
127 icsr
= BECC_CSR_READ(BECC_ICSR
);
130 * The ICSR register shows bits that are active even if they are
131 * masked in ICMR, so we have to mask them off with the interrupts
132 * we consider enabled.
134 return (icsr
& intr_enabled
);
138 becc_set_intrsteer(void)
141 BECC_CSR_WRITE(BECC_ICSTR
, intr_steer
& ICU_VALID_MASK
);
142 (void) BECC_CSR_READ(BECC_ICSTR
);
146 becc_enable_irq(int irq
)
149 intr_enabled
|= (1U << irq
);
154 becc_disable_irq(int irq
)
157 intr_enabled
&= ~(1U << irq
);
162 * NOTE: This routine must be called with interrupts disabled in the CPSR.
165 becc_intr_calculate_masks(void)
171 /* First, figure out which IPLs each IRQ has. */
172 for (irq
= 0; irq
< NIRQ
; irq
++) {
175 becc_disable_irq(irq
);
176 for (ih
= TAILQ_FIRST(&iq
->iq_list
); ih
!= NULL
;
177 ih
= TAILQ_NEXT(ih
, ih_list
))
178 levels
|= (1U << ih
->ih_ipl
);
179 iq
->iq_levels
= levels
;
182 /* Next, figure out which IRQs are used by each IPL. */
183 for (ipl
= 0; ipl
< NIPL
; ipl
++) {
185 for (irq
= 0; irq
< NIRQ
; irq
++) {
186 if (intrq
[irq
].iq_levels
& (1U << ipl
))
189 becc_imask
[ipl
] = irqs
;
192 becc_imask
[IPL_NONE
] = 0;
195 * Enforce a hierarchy that gives "slow" device (or devices with
196 * limited input buffer space/"real-time" requirements) a better
197 * chance at not dropping data.
199 becc_imask
[IPL_VM
] |= becc_imask
[IPL_SOFTSERIAL
];
200 becc_imask
[IPL_SCHED
] |= becc_imask
[IPL_VM
];
201 becc_imask
[IPL_HIGH
] |= becc_imask
[IPL_SCHED
];
204 * Now compute which IRQs must be blocked when servicing any
207 for (irq
= 0; irq
< NIRQ
; irq
++) {
208 int irqs
= (1U << irq
);
210 if (TAILQ_FIRST(&iq
->iq_list
) != NULL
)
211 becc_enable_irq(irq
);
212 for (ih
= TAILQ_FIRST(&iq
->iq_list
); ih
!= NULL
;
213 ih
= TAILQ_NEXT(ih
, ih_list
))
214 irqs
|= becc_imask
[ih
->ih_ipl
];
228 return (becc_spllower(ipl
));
234 return (becc_splraise(ipl
));
240 * Initialize the BECC ICU. Called early in bootstrap
241 * to make sure the ICU is in a pristine state.
247 intr_enabled
= 0; /* All interrupts disabled */
250 intr_steer
= 0; /* All interrupts steered to IRQ */
251 becc_set_intrsteer();
253 i80200_extirq_dispatch
= becc_intr_dispatch
;
255 i80200_intr_enable(INTCTL_IM
);
261 * Initialize the rest of the interrupt subsystem, making it
262 * ready to handle interrupts from devices.
272 for (i
= 0; i
< NIRQ
; i
++) {
274 TAILQ_INIT(&iq
->iq_list
);
276 evcnt_attach_dynamic(&iq
->iq_ev
, EVCNT_TYPE_INTR
,
277 NULL
, "becc", becc_irqnames
[i
]);
280 becc_intr_calculate_masks();
282 /* Enable IRQs (don't yet use FIQs). */
283 enable_interrupts(I32_bit
);
287 becc_intr_establish(int irq
, int ipl
, int (*func
)(void *), void *arg
)
291 uint32_t oldirqstate
;
293 if (irq
< 0 || irq
> NIRQ
)
294 panic("becc_intr_establish: IRQ %d out of range", irq
);
296 ih
= malloc(sizeof(*ih
), M_DEVBUF
, M_NOWAIT
);
307 /* All BECC interrupts are level-triggered. */
308 iq
->iq_ist
= IST_LEVEL
;
310 oldirqstate
= disable_interrupts(I32_bit
);
312 TAILQ_INSERT_TAIL(&iq
->iq_list
, ih
, ih_list
);
314 becc_intr_calculate_masks();
316 restore_interrupts(oldirqstate
);
322 becc_intr_disestablish(void *cookie
)
324 struct intrhand
*ih
= cookie
;
325 struct intrq
*iq
= &intrq
[ih
->ih_irq
];
326 uint32_t oldirqstate
;
328 oldirqstate
= disable_interrupts(I32_bit
);
330 TAILQ_REMOVE(&iq
->iq_list
, ih
, ih_list
);
332 becc_intr_calculate_masks();
334 restore_interrupts(oldirqstate
);
338 becc_intr_dispatch(struct irqframe
*frame
)
342 uint32_t oldirqstate
, irq
, ibit
, hwpend
;
343 struct cpu_info
* const ci
= curcpu();
344 const int ppl
= ci
->ci_cpl
;
345 const uint32_t imask
= becc_imask
[ppl
];
347 hwpend
= becc_icsr_read();
350 * Disable all the interrupts that are pending. We will
351 * reenable them once they are processed and not masked.
353 intr_enabled
&= ~hwpend
;
356 while (hwpend
!= 0) {
357 irq
= ffs(hwpend
) - 1;
364 * IRQ is masked; mark it as pending and check
365 * the next one. Note: the IRQ is already disabled.
367 becc_ipending
|= ibit
;
371 becc_ipending
&= ~ibit
;
374 iq
->iq_ev
.ev_count
++;
376 TAILQ_FOREACH(ih
, &iq
->iq_list
, ih_list
) {
377 ci
->ci_cpl
= ih
->ih_ipl
;
378 oldirqstate
= enable_interrupts(I32_bit
);
379 (void) (*ih
->ih_func
)(ih
->ih_arg
? ih
->ih_arg
: frame
);
380 restore_interrupts(oldirqstate
);
385 /* Re-enable this interrupt now that's it's cleared. */
386 intr_enabled
|= ibit
;
390 if (becc_ipending
& ~imask
) {
391 intr_enabled
|= (becc_ipending
& ~imask
);
395 #ifdef __HAVE_FAST_SOFTINTS