No empty .Rs/.Re
[netbsd-mini2440.git] / sys / arch / arm / xscale / becc_icu.c
blob0457d25f5c326f173ff3ec77ef3d21dc7abc90ed
1 /* $NetBSD$ */
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * All rights reserved.
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
39 * Interrupt support for the ADI Engineering Big Endian Companion Chip.
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD$");
45 #ifndef EVBARM_SPL_NOINLINE
46 #define EVBARM_SPL_NOINLINE
47 #endif
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
52 #include <sys/bus.h>
53 #include <sys/intr.h>
55 #include <uvm/uvm_extern.h>
57 #include <arm/cpufunc.h>
59 #include <arm/xscale/beccreg.h>
60 #include <arm/xscale/beccvar.h>
62 #include <arm/xscale/i80200reg.h>
63 #include <arm/xscale/i80200var.h>
65 /* Interrupt handler queues. */
66 struct intrq intrq[NIRQ];
68 /* Interrupts to mask at each level. */
69 uint32_t becc_imask[NIPL];
71 /* Interrupts pending. */
72 volatile uint32_t becc_ipending;
73 volatile uint32_t becc_sipending;
75 /* Software copy of the IRQs we have enabled. */
76 volatile uint32_t intr_enabled;
78 /* Mask if interrupts steered to FIQs. */
79 uint32_t intr_steer;
82 * Interrupt bit names.
83 * XXX Some of these are BRH-centric.
85 const char * const becc_irqnames[] = {
86 "soft",
87 "timer A",
88 "timer B",
89 "irq 3",
90 "irq 4",
91 "irq 5",
92 "irq 6",
93 "diagerr",
94 "DMA EOT",
95 "DMA PERR",
96 "DMA TABT",
97 "DMA MABT",
98 "irq 12",
99 "irq 13",
100 "irq 14",
101 "irq 15",
102 "PCI PERR",
103 "irq 17",
104 "irq 18",
105 "PCI SERR",
106 "PCI OAPE",
107 "PCI OATA",
108 "PCI OAMA",
109 "irq 23",
110 "irq 24",
111 "irq 25",
112 "irq 26", /* PCI INTA */
113 "irq 27", /* PCI INTB */
114 "irq 28", /* PCI INTC */
115 "irq 29", /* PCI INTD */
116 "pushbutton",
117 "irq 31",
120 void becc_intr_dispatch(struct irqframe *frame);
122 static inline uint32_t
123 becc_icsr_read(void)
125 uint32_t icsr;
127 icsr = BECC_CSR_READ(BECC_ICSR);
130 * The ICSR register shows bits that are active even if they are
131 * masked in ICMR, so we have to mask them off with the interrupts
132 * we consider enabled.
134 return (icsr & intr_enabled);
137 static inline void
138 becc_set_intrsteer(void)
141 BECC_CSR_WRITE(BECC_ICSTR, intr_steer & ICU_VALID_MASK);
142 (void) BECC_CSR_READ(BECC_ICSTR);
145 static inline void
146 becc_enable_irq(int irq)
149 intr_enabled |= (1U << irq);
150 becc_set_intrmask();
153 static inline void
154 becc_disable_irq(int irq)
157 intr_enabled &= ~(1U << irq);
158 becc_set_intrmask();
162 * NOTE: This routine must be called with interrupts disabled in the CPSR.
164 static void
165 becc_intr_calculate_masks(void)
167 struct intrq *iq;
168 struct intrhand *ih;
169 int irq, ipl;
171 /* First, figure out which IPLs each IRQ has. */
172 for (irq = 0; irq < NIRQ; irq++) {
173 int levels = 0;
174 iq = &intrq[irq];
175 becc_disable_irq(irq);
176 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
177 ih = TAILQ_NEXT(ih, ih_list))
178 levels |= (1U << ih->ih_ipl);
179 iq->iq_levels = levels;
182 /* Next, figure out which IRQs are used by each IPL. */
183 for (ipl = 0; ipl < NIPL; ipl++) {
184 int irqs = 0;
185 for (irq = 0; irq < NIRQ; irq++) {
186 if (intrq[irq].iq_levels & (1U << ipl))
187 irqs |= (1U << irq);
189 becc_imask[ipl] = irqs;
192 becc_imask[IPL_NONE] = 0;
195 * Enforce a hierarchy that gives "slow" device (or devices with
196 * limited input buffer space/"real-time" requirements) a better
197 * chance at not dropping data.
199 becc_imask[IPL_VM] |= becc_imask[IPL_SOFTSERIAL];
200 becc_imask[IPL_SCHED] |= becc_imask[IPL_VM];
201 becc_imask[IPL_HIGH] |= becc_imask[IPL_SCHED];
204 * Now compute which IRQs must be blocked when servicing any
205 * given IRQ.
207 for (irq = 0; irq < NIRQ; irq++) {
208 int irqs = (1U << irq);
209 iq = &intrq[irq];
210 if (TAILQ_FIRST(&iq->iq_list) != NULL)
211 becc_enable_irq(irq);
212 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
213 ih = TAILQ_NEXT(ih, ih_list))
214 irqs |= becc_imask[ih->ih_ipl];
215 iq->iq_mask = irqs;
219 void
220 splx(int new)
222 becc_splx(new);
226 _spllower(int ipl)
228 return (becc_spllower(ipl));
232 _splraise(int ipl)
234 return (becc_splraise(ipl));
238 * becc_icu_init:
240 * Initialize the BECC ICU. Called early in bootstrap
241 * to make sure the ICU is in a pristine state.
243 void
244 becc_icu_init(void)
247 intr_enabled = 0; /* All interrupts disabled */
248 becc_set_intrmask();
250 intr_steer = 0; /* All interrupts steered to IRQ */
251 becc_set_intrsteer();
253 i80200_extirq_dispatch = becc_intr_dispatch;
255 i80200_intr_enable(INTCTL_IM);
259 * becc_intr_init:
261 * Initialize the rest of the interrupt subsystem, making it
262 * ready to handle interrupts from devices.
264 void
265 becc_intr_init(void)
267 struct intrq *iq;
268 int i;
270 intr_enabled = 0;
272 for (i = 0; i < NIRQ; i++) {
273 iq = &intrq[i];
274 TAILQ_INIT(&iq->iq_list);
276 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
277 NULL, "becc", becc_irqnames[i]);
280 becc_intr_calculate_masks();
282 /* Enable IRQs (don't yet use FIQs). */
283 enable_interrupts(I32_bit);
286 void *
287 becc_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
289 struct intrq *iq;
290 struct intrhand *ih;
291 uint32_t oldirqstate;
293 if (irq < 0 || irq > NIRQ)
294 panic("becc_intr_establish: IRQ %d out of range", irq);
296 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
297 if (ih == NULL)
298 return (NULL);
300 ih->ih_func = func;
301 ih->ih_arg = arg;
302 ih->ih_ipl = ipl;
303 ih->ih_irq = irq;
305 iq = &intrq[irq];
307 /* All BECC interrupts are level-triggered. */
308 iq->iq_ist = IST_LEVEL;
310 oldirqstate = disable_interrupts(I32_bit);
312 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
314 becc_intr_calculate_masks();
316 restore_interrupts(oldirqstate);
318 return (ih);
321 void
322 becc_intr_disestablish(void *cookie)
324 struct intrhand *ih = cookie;
325 struct intrq *iq = &intrq[ih->ih_irq];
326 uint32_t oldirqstate;
328 oldirqstate = disable_interrupts(I32_bit);
330 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
332 becc_intr_calculate_masks();
334 restore_interrupts(oldirqstate);
337 void
338 becc_intr_dispatch(struct irqframe *frame)
340 struct intrq *iq;
341 struct intrhand *ih;
342 uint32_t oldirqstate, irq, ibit, hwpend;
343 struct cpu_info * const ci = curcpu();
344 const int ppl = ci->ci_cpl;
345 const uint32_t imask = becc_imask[ppl];
347 hwpend = becc_icsr_read();
350 * Disable all the interrupts that are pending. We will
351 * reenable them once they are processed and not masked.
353 intr_enabled &= ~hwpend;
354 becc_set_intrmask();
356 while (hwpend != 0) {
357 irq = ffs(hwpend) - 1;
358 ibit = (1U << irq);
360 hwpend &= ~ibit;
362 if (imask & ibit) {
364 * IRQ is masked; mark it as pending and check
365 * the next one. Note: the IRQ is already disabled.
367 becc_ipending |= ibit;
368 continue;
371 becc_ipending &= ~ibit;
373 iq = &intrq[irq];
374 iq->iq_ev.ev_count++;
375 uvmexp.intrs++;
376 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
377 ci->ci_cpl = ih->ih_ipl;
378 oldirqstate = enable_interrupts(I32_bit);
379 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
380 restore_interrupts(oldirqstate);
383 ci->ci_cpl = ppl;
385 /* Re-enable this interrupt now that's it's cleared. */
386 intr_enabled |= ibit;
387 becc_set_intrmask();
390 if (becc_ipending & ~imask) {
391 intr_enabled |= (becc_ipending & ~imask);
392 becc_set_intrmask();
395 #ifdef __HAVE_FAST_SOFTINTS
396 cpu_dosoftints();
397 #endif