No empty .Rs/.Re
[netbsd-mini2440.git] / sys / arch / arm / omap / omap2430_intr.c
blob0dcdf9ee0dc2cdaa00b1a4d93db1cdadb0d53a7b
1 /* $NetBSD: omap2430_intr.c,v 1.3 2008/08/27 11:03:10 matt Exp $ */
2 /*
3 * Define the SDP2430 specific information and then include the generic OMAP
4 * interrupt header.
5 */
7 /*
8 * Copyright (c) 2007 Microsoft
9 * All rights reserved.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Microsoft
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 #include "opt_omap.h"
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: omap2430_intr.c,v 1.3 2008/08/27 11:03:10 matt Exp $");
40 #include <sys/param.h>
41 #include <sys/evcnt.h>
42 #include <sys/atomic.h>
44 #include <uvm/uvm_extern.h>
46 #include <machine/intr.h>
48 #include <arm/cpu.h>
49 #include <arm/armreg.h>
50 #include <arm/cpufunc.h>
51 #include <arm/omap/omap2_reg.h>
53 #include <machine/bus.h>
55 #ifdef OMAP_2430
56 #define NIGROUPS 8
58 #define GPIO1_BASE GPIO1_BASE_2430
59 #define GPIO2_BASE GPIO2_BASE_2430
60 #define GPIO3_BASE GPIO3_BASE_2430
61 #define GPIO4_BASE GPIO4_BASE_2430
62 #define GPIO5_BASE GPIO5_BASE_2430
63 #elif defined(OMAP_2420)
64 #define NIGROUPS 7
66 #define GPIO1_BASE GPIO1_BASE_2420
67 #define GPIO2_BASE GPIO2_BASE_2420
68 #define GPIO3_BASE GPIO3_BASE_2420
69 #define GPIO4_BASE GPIO4_BASE_2420
70 #endif
72 struct intrsource {
73 struct evcnt is_ev;
74 uint8_t is_ipl;
75 uint8_t is_group;
76 int (*is_func)(void *);
77 void *is_arg;
78 uint64_t is_marked;
81 static struct intrgroup {
82 uint32_t ig_irqsbyipl[NIPL];
83 uint32_t ig_irqs;
84 volatile uint32_t ig_pending_irqs;
85 uint32_t ig_enabled_irqs;
86 uint32_t ig_edge_rising;
87 uint32_t ig_edge_falling;
88 uint32_t ig_level_low;
89 uint32_t ig_level_high;
90 struct intrsource ig_sources[32];
91 bus_space_tag_t ig_memt;
92 bus_space_handle_t ig_memh;
93 } intrgroups[NIGROUPS] = {
94 [0].ig_sources[ 0 ... 31 ].is_group = 0,
95 [1].ig_sources[ 0 ... 31 ].is_group = 1,
96 [2].ig_sources[ 0 ... 31 ].is_group = 2,
97 [3].ig_sources[ 0 ... 31 ].is_group = 3,
98 [4].ig_sources[ 0 ... 31 ].is_group = 4,
99 [5].ig_sources[ 0 ... 31 ].is_group = 5,
100 [6].ig_sources[ 0 ... 31 ].is_group = 6,
101 #ifdef OMAP_2430
102 [7].ig_sources[ 0 ... 31 ].is_group = 7,
103 #endif
106 volatile uint32_t pending_ipls;
107 volatile uint32_t pending_igroupsbyipl[NIPL];
108 void omap2430_intr_init(bus_space_tag_t);
110 #define INTC_READ(ig, o) \
111 bus_space_read_4((ig)->ig_memt, (ig)->ig_memh, o)
112 #define INTC_WRITE(ig, o, v) \
113 bus_space_write_4((ig)->ig_memt, (ig)->ig_memh, o, v)
114 #define GPIO_READ(ig, o) \
115 bus_space_read_4((ig)->ig_memt, (ig)->ig_memh, o)
116 #define GPIO_WRITE(ig, o, v) \
117 bus_space_write_4((ig)->ig_memt, (ig)->ig_memh, o, v)
119 static void
120 unblock_irq(unsigned int group, int irq_mask)
122 struct intrgroup * const ig = &intrgroups[group];
123 KASSERT((irq_mask & ig->ig_enabled_irqs) == 0);
124 ig->ig_enabled_irqs |= irq_mask;
125 if (group < 3) {
126 INTC_WRITE(ig, INTC_MIR_CLEAR, irq_mask);
127 } else {
128 GPIO_WRITE(ig, GPIO_SETIRQENABLE1, irq_mask);
130 * Clear IRQSTATUS of level interrupts, if they are still
131 * asserted, IRQSTATUS will become set again and they will
132 * refire. This avoids one spurious interrupt for every
133 * real interrupt.
135 if (irq_mask & (ig->ig_level_low|ig->ig_level_high))
136 GPIO_WRITE(ig, GPIO_IRQSTATUS1,
137 irq_mask & (ig->ig_level_low|ig->ig_level_high));
140 /* Force INTC to recompute IRQ availability */
141 INTC_WRITE(&intrgroups[0], INTC_CONTROL, INTC_CONTROL_NEWIRQAGR);
144 static void
145 block_irq(unsigned int group, int irq_mask)
147 struct intrgroup * const ig = &intrgroups[group];
148 ig->ig_enabled_irqs &= ~irq_mask;
149 if (group < 3) {
150 INTC_WRITE(ig, INTC_MIR_SET, irq_mask);
151 return;
153 GPIO_WRITE(ig, GPIO_CLEARIRQENABLE1, irq_mask);
155 * Only clear(reenable) edge interrupts.
157 if (irq_mask & (ig->ig_edge_falling|ig->ig_edge_rising))
158 GPIO_WRITE(ig, GPIO_IRQSTATUS1, /* reset int bits */
159 irq_mask & (ig->ig_edge_falling|ig->ig_edge_rising));
162 static void
163 init_irq(int irq, int spl, int type)
165 struct intrgroup * const ig = &intrgroups[irq / 32];
166 uint32_t irq_mask = __BIT(irq & 31);
167 uint32_t v;
169 KASSERT(irq >= 0 && irq < 256);
170 ig->ig_sources[irq & 31].is_ipl = spl;
171 if (irq < 96) {
172 KASSERT(type == IST_LEVEL);
173 return;
176 ig->ig_enabled_irqs &= ~irq_mask;
177 GPIO_WRITE(ig, GPIO_CLEARIRQENABLE1, irq_mask);
179 v = GPIO_READ(ig, GPIO_OE);
180 GPIO_WRITE(ig, GPIO_OE, v | irq_mask); /* set as input */
182 ig->ig_edge_rising &= ~irq_mask;
183 ig->ig_edge_falling &= ~irq_mask;
184 ig->ig_level_low &= ~irq_mask;
185 ig->ig_level_high &= ~irq_mask;
187 switch (type) {
188 case IST_EDGE_BOTH:
189 ig->ig_edge_rising |= irq_mask;
190 ig->ig_edge_falling |= irq_mask;
191 break;
192 case IST_EDGE_RISING:
193 ig->ig_edge_rising |= irq_mask;
194 break;
195 case IST_EDGE_FALLING:
196 ig->ig_edge_falling |= irq_mask;
197 break;
198 case IST_LEVEL_LOW:
199 ig->ig_level_low |= irq_mask;
200 break;
201 case IST_LEVEL_HIGH:
202 ig->ig_level_high |= irq_mask;
203 break;
206 GPIO_WRITE(ig, GPIO_LEVELDETECT0, ig->ig_level_low);
207 GPIO_WRITE(ig, GPIO_LEVELDETECT1, ig->ig_level_high);
208 GPIO_WRITE(ig, GPIO_RISINGDETECT, ig->ig_edge_rising);
209 GPIO_WRITE(ig, GPIO_FALLINGDETECT, ig->ig_edge_falling);
213 * Called with interrupt disabled
215 static void
216 calculate_irq_masks(struct intrgroup *ig)
218 u_int irq;
219 int ipl;
220 uint32_t irq_mask;
222 memset(ig->ig_irqsbyipl, 0, sizeof(ig->ig_irqsbyipl));
223 ig->ig_irqs = 0;
225 for (irq_mask = 1, irq = 0; irq < 32; irq_mask <<= 1, irq++) {
226 if ((ipl = ig->ig_sources[irq].is_ipl) == IPL_NONE)
227 continue;
229 ig->ig_irqsbyipl[ipl] |= irq_mask;
230 ig->ig_irqs |= irq_mask;
235 * Called with interrupts disabled
237 static uint32_t
238 mark_pending_irqs(int group, uint32_t pending)
240 struct intrgroup * const ig = &intrgroups[group];
241 struct intrsource *is;
242 int n;
243 int ipl_mask = 0;
245 if (pending == 0)
246 return ipl_mask;
248 KASSERT((ig->ig_enabled_irqs & pending) == pending);
249 KASSERT((ig->ig_pending_irqs & pending) == 0);
251 ig->ig_pending_irqs |= pending;
252 block_irq(group, pending);
253 for (;;) {
254 n = ffs(pending);
255 if (n-- == 0)
256 break;
257 is = &ig->ig_sources[n];
258 KASSERT(ig->ig_irqsbyipl[is->is_ipl] & pending);
259 pending &= ~ig->ig_irqsbyipl[is->is_ipl];
260 ipl_mask |= __BIT(is->is_ipl);
261 KASSERT(ipl_mask < __BIT(NIPL));
262 pending_igroupsbyipl[is->is_ipl] |= __BIT(group);
263 is->is_marked++;
265 KASSERT(ipl_mask < __BIT(NIPL));
266 return ipl_mask;
270 * Called with interrupts disabled
272 static uint32_t
273 get_pending_irqs(void)
275 uint32_t pending[3];
276 uint32_t ipl_mask = 0;
277 uint32_t xpending;
279 pending[0] = INTC_READ(&intrgroups[0], INTC_PENDING_IRQ);
280 pending[1] = INTC_READ(&intrgroups[1], INTC_PENDING_IRQ);
281 pending[2] = INTC_READ(&intrgroups[2], INTC_PENDING_IRQ);
283 /* Get interrupt status of GPIO1 */
284 if (pending[GPIO1_MPU_IRQ / 32] & __BIT(GPIO1_MPU_IRQ & 31)) {
285 KASSERT(intrgroups[3].ig_enabled_irqs);
286 xpending = GPIO_READ(&intrgroups[3], GPIO_IRQSTATUS1);
287 xpending &= intrgroups[3].ig_enabled_irqs;
288 ipl_mask |= mark_pending_irqs(3, xpending);
291 /* Get interrupt status of GPIO2 */
292 if (pending[GPIO2_MPU_IRQ / 32] & __BIT(GPIO2_MPU_IRQ & 31)) {
293 KASSERT(intrgroups[4].ig_enabled_irqs);
294 xpending = GPIO_READ(&intrgroups[4], GPIO_IRQSTATUS1);
295 xpending &= intrgroups[4].ig_enabled_irqs;
296 ipl_mask |= mark_pending_irqs(4, xpending);
299 /* Get interrupt status of GPIO3 */
300 if (pending[GPIO3_MPU_IRQ / 32] & __BIT(GPIO3_MPU_IRQ & 31)) {
301 KASSERT(intrgroups[5].ig_enabled_irqs);
302 xpending = GPIO_READ(&intrgroups[5], GPIO_IRQSTATUS1);
303 xpending &= intrgroups[5].ig_enabled_irqs;
304 ipl_mask |= mark_pending_irqs(5, xpending);
307 /* Get interrupt status of GPIO4 */
308 if (pending[GPIO4_MPU_IRQ / 32] & __BIT(GPIO4_MPU_IRQ & 31)) {
309 KASSERT(intrgroups[6].ig_enabled_irqs);
310 xpending = GPIO_READ(&intrgroups[6], GPIO_IRQSTATUS1);
311 xpending &= intrgroups[6].ig_enabled_irqs;
312 ipl_mask |= mark_pending_irqs(6, xpending);
315 #ifdef OMAP_2430
316 /* Get interrupt status of GPIO5 */
317 if (pending[GPIO5_MPU_IRQ / 32] & __BIT(GPIO5_MPU_IRQ & 31)) {
318 KASSERT(intrgroups[7].ig_enabled_irqs);
319 xpending = GPIO_READ(&intrgroups[7], GPIO_IRQSTATUS1);
320 xpending = GPIO_READ(&intrgroups[7], GPIO_IRQSTATUS1);
321 xpending &= intrgroups[7].ig_enabled_irqs;
322 ipl_mask |= mark_pending_irqs(7, xpending);
324 #endif
326 /* Clear GPIO indication from summaries */
327 pending[GPIO1_MPU_IRQ / 32] &= ~__BIT(GPIO1_MPU_IRQ & 31);
328 pending[GPIO2_MPU_IRQ / 32] &= ~__BIT(GPIO2_MPU_IRQ & 31);
329 pending[GPIO3_MPU_IRQ / 32] &= ~__BIT(GPIO3_MPU_IRQ & 31);
330 pending[GPIO4_MPU_IRQ / 32] &= ~__BIT(GPIO4_MPU_IRQ & 31);
331 #ifdef OMAP_2430
332 pending[GPIO5_MPU_IRQ / 32] &= ~__BIT(GPIO5_MPU_IRQ & 31);
333 #endif
335 /* Now handle the primaries interrupt summaries */
336 ipl_mask |= mark_pending_irqs(0, pending[0]);
337 ipl_mask |= mark_pending_irqs(1, pending[1]);
338 ipl_mask |= mark_pending_irqs(2, pending[2]);
340 /* Force INTC to recompute IRQ availability */
341 INTC_WRITE(&intrgroups[0], INTC_CONTROL, INTC_CONTROL_NEWIRQAGR);
343 return ipl_mask;
346 static int last_delivered_ipl;
347 static u_long no_pending_irqs[NIPL][NIGROUPS];
349 static void
350 deliver_irqs(register_t psw, int ipl, void *frame)
352 struct intrgroup *ig;
353 struct intrsource *is;
354 uint32_t pending_irqs;
355 uint32_t irq_mask;
356 uint32_t blocked_irqs;
357 volatile uint32_t * const pending_igroups = &pending_igroupsbyipl[ipl];
358 const uint32_t ipl_mask = __BIT(ipl);
359 int n;
360 int saved_ipl = IPL_NONE; /* XXX stupid GCC */
361 unsigned int group;
362 int rv;
364 if (frame == NULL) {
365 saved_ipl = last_delivered_ipl;
366 KASSERT(saved_ipl < ipl);
367 last_delivered_ipl = ipl;
370 * We only must be called is there is this IPL has pending interrupts
371 * and therefore there must be at least one intrgroup with a pending
372 * interrupt.
374 KASSERT(pending_ipls & ipl_mask);
375 KASSERT(*pending_igroups);
378 * We loop until there are no more intrgroups with pending interrupts.
380 do {
381 group = 31 - __builtin_clz(*pending_igroups);
382 KASSERT(group < NIGROUPS);
384 ig = &intrgroups[group];
385 irq_mask = ig->ig_irqsbyipl[ipl];
386 pending_irqs = ig->ig_pending_irqs & irq_mask;
387 blocked_irqs = pending_irqs;
388 if ((*pending_igroups &= ~__BIT(group)) == 0)
389 pending_ipls &= ~ipl_mask;
390 #if 0
391 KASSERT(group < 3 || (GPIO_READ(ig, GPIO_IRQSTATUS1) & blocked_irqs) == 0);
392 #endif
394 * We couldn't gotten here unless there was at least one
395 * pending interrupt in this intrgroup.
397 if (pending_irqs == 0) {
398 no_pending_irqs[ipl][group]++;
399 continue;
401 #if 0
402 KASSERT(pending_irqs != 0);
403 #endif
404 do {
405 n = 31 - __builtin_clz(pending_irqs);
406 KASSERT(ig->ig_irqs & __BIT(n));
407 KASSERT(irq_mask & __BIT(n));
410 * If this was the last bit cleared for this IRQ,
411 * we need to clear this group's bit in
412 * pending_igroupsbyipl[ipl]. Now if that's now 0,
413 * we need to clear pending_ipls for this IPL.
415 ig->ig_pending_irqs &= ~__BIT(n);
416 if (irq_mask == __BIT(n))
417 KASSERT((ig->ig_pending_irqs & irq_mask) == 0);
418 is = &ig->ig_sources[n];
419 if (__predict_false(frame != NULL)) {
420 (*is->is_func)(frame);
421 } else {
422 restore_interrupts(psw);
423 rv = (*is->is_func)(is->is_arg);
424 disable_interrupts(I32_bit);
426 #if 0
427 if (rv && group >= 3) /* XXX */
428 GPIO_WRITE(ig, GPIO_IRQSTATUS1, __BIT(n));
429 #endif
430 #if 0
431 if (ig->ig_irqsbyipl[ipl] == __BIT(n))
432 KASSERT((ig->ig_pending_irqs & irq_mask) == 0);
433 #endif
434 is->is_ev.ev_count++;
435 pending_irqs = ig->ig_pending_irqs & irq_mask;
436 } while (pending_irqs);
438 * We don't block the interrupts individually because even if
439 * one was unblocked it couldn't be delivered since our
440 * current IPL would prevent it. So we wait until we can do
441 * them all at once.
443 #if 0
444 KASSERT(group < 3 || (GPIO_READ(ig, GPIO_IRQSTATUS1) & blocked_irqs) == 0);
445 #endif
446 unblock_irq(group, blocked_irqs);
447 } while (*pending_igroups);
449 * Since there are no more pending interrupts for this IPL,
450 * this IPL must not be present in the pending IPLs.
452 KASSERT((pending_ipls & ipl_mask) == 0);
453 KASSERT((intrgroups[0].ig_pending_irqs & intrgroups[0].ig_irqsbyipl[ipl]) == 0);
454 KASSERT((intrgroups[1].ig_pending_irqs & intrgroups[1].ig_irqsbyipl[ipl]) == 0);
455 KASSERT((intrgroups[2].ig_pending_irqs & intrgroups[2].ig_irqsbyipl[ipl]) == 0);
456 KASSERT((intrgroups[3].ig_pending_irqs & intrgroups[3].ig_irqsbyipl[ipl]) == 0);
457 KASSERT((intrgroups[4].ig_pending_irqs & intrgroups[4].ig_irqsbyipl[ipl]) == 0);
458 KASSERT((intrgroups[5].ig_pending_irqs & intrgroups[5].ig_irqsbyipl[ipl]) == 0);
459 KASSERT((intrgroups[6].ig_pending_irqs & intrgroups[6].ig_irqsbyipl[ipl]) == 0);
460 KASSERT((intrgroups[7].ig_pending_irqs & intrgroups[7].ig_irqsbyipl[ipl]) == 0);
461 if (frame == NULL)
462 last_delivered_ipl = saved_ipl;
465 static inline void
466 do_pending_ints(register_t psw, int newipl)
468 while ((pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
469 KASSERT(pending_ipls < __BIT(NIPL));
470 for (;;) {
471 int ipl = 31 - __builtin_clz(pending_ipls);
472 KASSERT(ipl < NIPL);
473 if (ipl <= newipl)
474 break;
476 curcpu()->ci_cpl = ipl;
477 deliver_irqs(psw, ipl, NULL);
480 curcpu()->ci_cpl = newipl;
484 _splraise(int newipl)
486 const int oldipl = curcpu()->ci_cpl;
487 KASSERT(newipl < NIPL);
488 if (newipl > curcpu()->ci_cpl)
489 curcpu()->ci_cpl = newipl;
490 return oldipl;
493 _spllower(int newipl)
495 const int oldipl = curcpu()->ci_cpl;
496 KASSERT(panicstr || newipl <= curcpu()->ci_cpl);
497 if (newipl < curcpu()->ci_cpl) {
498 register_t psw = disable_interrupts(I32_bit);
499 do_pending_ints(psw, newipl);
500 restore_interrupts(psw);
502 return oldipl;
505 void
506 splx(int savedipl)
508 KASSERT(savedipl < NIPL);
509 if (savedipl < curcpu()->ci_cpl) {
510 register_t psw = disable_interrupts(I32_bit);
511 do_pending_ints(psw, savedipl);
512 restore_interrupts(psw);
514 curcpu()->ci_cpl = savedipl;
517 void
518 omap_irq_handler(void *frame)
520 const int oldipl = curcpu()->ci_cpl;
521 const uint32_t oldipl_mask = __BIT(oldipl);
524 * When we enter there must be no pending IRQs for IPL greater than
525 * the current IPL. There might be pending IRQs for the current IPL
526 * if we are servicing interrupts.
528 KASSERT((pending_ipls & ~oldipl_mask) < oldipl_mask);
529 pending_ipls |= get_pending_irqs();
531 uvmexp.intrs++;
533 * We assume this isn't a clock intr. But if it is, deliver it
534 * unconditionally so it will always have the interrupted frame.
535 * The clock intr will handle being called at IPLs != IPL_CLOCK.
537 if (__predict_false(pending_ipls & __BIT(IPL_STATCLOCK))) {
538 deliver_irqs(0, IPL_STATCLOCK, frame);
539 pending_ipls &= ~__BIT(IPL_STATCLOCK);
541 if (__predict_false(pending_ipls & __BIT(IPL_CLOCK))) {
542 deliver_irqs(0, IPL_CLOCK, frame);
543 pending_ipls &= ~__BIT(IPL_CLOCK);
547 * Record the pending_ipls and deliver them if we can.
549 if ((pending_ipls & ~oldipl_mask) > oldipl_mask)
550 do_pending_ints(I32_bit, oldipl);
553 void *
554 omap_intr_establish(int irq, int ipl, const char *name,
555 int (*func)(void *), void *arg)
557 struct intrgroup *ig = &intrgroups[irq / 32];
558 struct intrsource *is;
559 register_t psw;
561 KASSERT(irq >= 0 && irq < 256);
562 is = &ig->ig_sources[irq & 0x1f];
563 KASSERT(irq != GPIO1_MPU_IRQ);
564 KASSERT(irq != GPIO2_MPU_IRQ);
565 KASSERT(irq != GPIO3_MPU_IRQ);
566 KASSERT(irq != GPIO4_MPU_IRQ);
567 KASSERT(irq != GPIO5_MPU_IRQ);
568 KASSERT(is->is_ipl == IPL_NONE);
570 is->is_func = func;
571 is->is_arg = arg;
572 psw = disable_interrupts(I32_bit);
573 evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL, name, "intr");
574 init_irq(irq, ipl, IST_LEVEL);
576 calculate_irq_masks(ig);
577 unblock_irq(is->is_group, __BIT(irq & 31));
578 restore_interrupts(psw);
579 return is;
582 void
583 omap_intr_disestablish(void *ih)
585 struct intrsource * const is = ih;
586 struct intrgroup *ig;
587 register_t psw;
588 uint32_t mask;
590 KASSERT(ih != NULL);
592 ig = &intrgroups[is->is_group];
593 psw = disable_interrupts(I32_bit);
594 mask = __BIT(is - ig->ig_sources);
595 block_irq(is->is_group, mask);
596 ig->ig_pending_irqs &= ~mask;
597 calculate_irq_masks(ig);
598 evcnt_detach(&is->is_ev);
599 restore_interrupts(psw);
602 #ifdef GPIO5_BASE
603 static void
604 gpio5_clkinit(bus_space_tag_t memt)
606 bus_space_handle_t memh;
607 uint32_t r;
608 int error;
610 error = bus_space_map(memt, OMAP2430_CM_BASE,
611 OMAP2430_CM_SIZE, 0, &memh);
612 if (error != 0)
613 panic("%s: cannot map OMAP2430_CM_BASE at %#x: %d\n",
614 __func__, OMAP2430_CM_BASE, error);
616 r = bus_space_read_4(memt, memh, OMAP2430_CM_FCLKEN2_CORE);
617 r |= OMAP2430_CM_FCLKEN2_CORE_EN_GPIO5;
618 bus_space_write_4(memt, memh, OMAP2430_CM_FCLKEN2_CORE, r);
620 r = bus_space_read_4(memt, memh, OMAP2430_CM_ICLKEN2_CORE);
621 r |= OMAP2430_CM_ICLKEN2_CORE_EN_GPIO5;
622 bus_space_write_4(memt, memh, OMAP2430_CM_ICLKEN2_CORE, r);
624 bus_space_unmap(memt, memh, OMAP2430_CM_SIZE);
626 #endif
628 void
629 omap2430_intr_init(bus_space_tag_t memt)
631 int error;
632 int group;
634 for (group = 0; group < NIGROUPS; group++)
635 intrgroups[group].ig_memt = memt;
636 error = bus_space_map(memt, INTC_BASE, 0x1000, 0,
637 &intrgroups[0].ig_memh);
638 if (error)
639 panic("failed to map interrupt registers: %d", error);
640 error = bus_space_subregion(memt, intrgroups[0].ig_memh, 0x20, 0x20,
641 &intrgroups[1].ig_memh);
642 if (error)
643 panic("failed to region interrupt registers: %d", error);
644 error = bus_space_subregion(memt, intrgroups[0].ig_memh, 0x40, 0x20,
645 &intrgroups[2].ig_memh);
646 if (error)
647 panic("failed to subregion interrupt registers: %d", error);
648 error = bus_space_map(memt, GPIO1_BASE, 0x400, 0,
649 &intrgroups[3].ig_memh);
650 if (error)
651 panic("failed to map gpio #1 registers: %d", error);
652 error = bus_space_map(memt, GPIO2_BASE, 0x400, 0,
653 &intrgroups[4].ig_memh);
654 if (error)
655 panic("failed to map gpio #2 registers: %d", error);
656 error = bus_space_map(memt, GPIO3_BASE, 0x400, 0,
657 &intrgroups[5].ig_memh);
658 if (error)
659 panic("failed to map gpio #3 registers: %d", error);
660 error = bus_space_map(memt, GPIO4_BASE, 0x400, 0,
661 &intrgroups[6].ig_memh);
662 if (error)
663 panic("failed to map gpio #4 registers: %d", error);
665 #ifdef GPIO5_BASE
666 gpio5_clkinit(memt);
667 error = bus_space_map(memt, GPIO5_BASE, 0x400, 0,
668 &intrgroups[7].ig_memh);
669 if (error)
670 panic("failed to map gpio #5 registers: %d", error);
671 #endif
673 INTC_WRITE(&intrgroups[0], INTC_MIR_SET, 0xffffffff);
674 INTC_WRITE(&intrgroups[1], INTC_MIR_SET, 0xffffffff);
675 INTC_WRITE(&intrgroups[2], INTC_MIR_SET, 0xffffffff);
676 INTC_WRITE(&intrgroups[GPIO1_MPU_IRQ / 32], INTC_MIR_CLEAR,
677 __BIT(GPIO1_MPU_IRQ & 31));
678 INTC_WRITE(&intrgroups[GPIO2_MPU_IRQ / 32], INTC_MIR_CLEAR,
679 __BIT(GPIO2_MPU_IRQ & 31));
680 INTC_WRITE(&intrgroups[GPIO3_MPU_IRQ / 32], INTC_MIR_CLEAR,
681 __BIT(GPIO3_MPU_IRQ & 31));
682 INTC_WRITE(&intrgroups[GPIO4_MPU_IRQ / 32], INTC_MIR_CLEAR,
683 __BIT(GPIO4_MPU_IRQ & 31));
684 #ifdef GPIO5_BASE
685 INTC_WRITE(&intrgroups[GPIO5_MPU_IRQ / 32], INTC_MIR_CLEAR,
686 __BIT(GPIO5_MPU_IRQ & 31));
687 #endif
690 * Setup the primary intrgroups.
692 calculate_irq_masks(&intrgroups[0]);
693 calculate_irq_masks(&intrgroups[1]);
694 calculate_irq_masks(&intrgroups[2]);