1 /* $NetBSD: omap2430_intr.c,v 1.3 2008/08/27 11:03:10 matt Exp $ */
3 * Define the SDP2430 specific information and then include the generic OMAP
8 * Copyright (c) 2007 Microsoft
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Microsoft
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: omap2430_intr.c,v 1.3 2008/08/27 11:03:10 matt Exp $");
40 #include <sys/param.h>
41 #include <sys/evcnt.h>
42 #include <sys/atomic.h>
44 #include <uvm/uvm_extern.h>
46 #include <machine/intr.h>
49 #include <arm/armreg.h>
50 #include <arm/cpufunc.h>
51 #include <arm/omap/omap2_reg.h>
53 #include <machine/bus.h>
58 #define GPIO1_BASE GPIO1_BASE_2430
59 #define GPIO2_BASE GPIO2_BASE_2430
60 #define GPIO3_BASE GPIO3_BASE_2430
61 #define GPIO4_BASE GPIO4_BASE_2430
62 #define GPIO5_BASE GPIO5_BASE_2430
63 #elif defined(OMAP_2420)
66 #define GPIO1_BASE GPIO1_BASE_2420
67 #define GPIO2_BASE GPIO2_BASE_2420
68 #define GPIO3_BASE GPIO3_BASE_2420
69 #define GPIO4_BASE GPIO4_BASE_2420
76 int (*is_func
)(void *);
81 static struct intrgroup
{
82 uint32_t ig_irqsbyipl
[NIPL
];
84 volatile uint32_t ig_pending_irqs
;
85 uint32_t ig_enabled_irqs
;
86 uint32_t ig_edge_rising
;
87 uint32_t ig_edge_falling
;
88 uint32_t ig_level_low
;
89 uint32_t ig_level_high
;
90 struct intrsource ig_sources
[32];
91 bus_space_tag_t ig_memt
;
92 bus_space_handle_t ig_memh
;
93 } intrgroups
[NIGROUPS
] = {
94 [0].ig_sources
[ 0 ... 31 ].is_group
= 0,
95 [1].ig_sources
[ 0 ... 31 ].is_group
= 1,
96 [2].ig_sources
[ 0 ... 31 ].is_group
= 2,
97 [3].ig_sources
[ 0 ... 31 ].is_group
= 3,
98 [4].ig_sources
[ 0 ... 31 ].is_group
= 4,
99 [5].ig_sources
[ 0 ... 31 ].is_group
= 5,
100 [6].ig_sources
[ 0 ... 31 ].is_group
= 6,
102 [7].ig_sources
[ 0 ... 31 ].is_group
= 7,
106 volatile uint32_t pending_ipls
;
107 volatile uint32_t pending_igroupsbyipl
[NIPL
];
108 void omap2430_intr_init(bus_space_tag_t
);
110 #define INTC_READ(ig, o) \
111 bus_space_read_4((ig)->ig_memt, (ig)->ig_memh, o)
112 #define INTC_WRITE(ig, o, v) \
113 bus_space_write_4((ig)->ig_memt, (ig)->ig_memh, o, v)
114 #define GPIO_READ(ig, o) \
115 bus_space_read_4((ig)->ig_memt, (ig)->ig_memh, o)
116 #define GPIO_WRITE(ig, o, v) \
117 bus_space_write_4((ig)->ig_memt, (ig)->ig_memh, o, v)
120 unblock_irq(unsigned int group
, int irq_mask
)
122 struct intrgroup
* const ig
= &intrgroups
[group
];
123 KASSERT((irq_mask
& ig
->ig_enabled_irqs
) == 0);
124 ig
->ig_enabled_irqs
|= irq_mask
;
126 INTC_WRITE(ig
, INTC_MIR_CLEAR
, irq_mask
);
128 GPIO_WRITE(ig
, GPIO_SETIRQENABLE1
, irq_mask
);
130 * Clear IRQSTATUS of level interrupts, if they are still
131 * asserted, IRQSTATUS will become set again and they will
132 * refire. This avoids one spurious interrupt for every
135 if (irq_mask
& (ig
->ig_level_low
|ig
->ig_level_high
))
136 GPIO_WRITE(ig
, GPIO_IRQSTATUS1
,
137 irq_mask
& (ig
->ig_level_low
|ig
->ig_level_high
));
140 /* Force INTC to recompute IRQ availability */
141 INTC_WRITE(&intrgroups
[0], INTC_CONTROL
, INTC_CONTROL_NEWIRQAGR
);
145 block_irq(unsigned int group
, int irq_mask
)
147 struct intrgroup
* const ig
= &intrgroups
[group
];
148 ig
->ig_enabled_irqs
&= ~irq_mask
;
150 INTC_WRITE(ig
, INTC_MIR_SET
, irq_mask
);
153 GPIO_WRITE(ig
, GPIO_CLEARIRQENABLE1
, irq_mask
);
155 * Only clear(reenable) edge interrupts.
157 if (irq_mask
& (ig
->ig_edge_falling
|ig
->ig_edge_rising
))
158 GPIO_WRITE(ig
, GPIO_IRQSTATUS1
, /* reset int bits */
159 irq_mask
& (ig
->ig_edge_falling
|ig
->ig_edge_rising
));
163 init_irq(int irq
, int spl
, int type
)
165 struct intrgroup
* const ig
= &intrgroups
[irq
/ 32];
166 uint32_t irq_mask
= __BIT(irq
& 31);
169 KASSERT(irq
>= 0 && irq
< 256);
170 ig
->ig_sources
[irq
& 31].is_ipl
= spl
;
172 KASSERT(type
== IST_LEVEL
);
176 ig
->ig_enabled_irqs
&= ~irq_mask
;
177 GPIO_WRITE(ig
, GPIO_CLEARIRQENABLE1
, irq_mask
);
179 v
= GPIO_READ(ig
, GPIO_OE
);
180 GPIO_WRITE(ig
, GPIO_OE
, v
| irq_mask
); /* set as input */
182 ig
->ig_edge_rising
&= ~irq_mask
;
183 ig
->ig_edge_falling
&= ~irq_mask
;
184 ig
->ig_level_low
&= ~irq_mask
;
185 ig
->ig_level_high
&= ~irq_mask
;
189 ig
->ig_edge_rising
|= irq_mask
;
190 ig
->ig_edge_falling
|= irq_mask
;
192 case IST_EDGE_RISING
:
193 ig
->ig_edge_rising
|= irq_mask
;
195 case IST_EDGE_FALLING
:
196 ig
->ig_edge_falling
|= irq_mask
;
199 ig
->ig_level_low
|= irq_mask
;
202 ig
->ig_level_high
|= irq_mask
;
206 GPIO_WRITE(ig
, GPIO_LEVELDETECT0
, ig
->ig_level_low
);
207 GPIO_WRITE(ig
, GPIO_LEVELDETECT1
, ig
->ig_level_high
);
208 GPIO_WRITE(ig
, GPIO_RISINGDETECT
, ig
->ig_edge_rising
);
209 GPIO_WRITE(ig
, GPIO_FALLINGDETECT
, ig
->ig_edge_falling
);
213 * Called with interrupt disabled
216 calculate_irq_masks(struct intrgroup
*ig
)
222 memset(ig
->ig_irqsbyipl
, 0, sizeof(ig
->ig_irqsbyipl
));
225 for (irq_mask
= 1, irq
= 0; irq
< 32; irq_mask
<<= 1, irq
++) {
226 if ((ipl
= ig
->ig_sources
[irq
].is_ipl
) == IPL_NONE
)
229 ig
->ig_irqsbyipl
[ipl
] |= irq_mask
;
230 ig
->ig_irqs
|= irq_mask
;
235 * Called with interrupts disabled
238 mark_pending_irqs(int group
, uint32_t pending
)
240 struct intrgroup
* const ig
= &intrgroups
[group
];
241 struct intrsource
*is
;
248 KASSERT((ig
->ig_enabled_irqs
& pending
) == pending
);
249 KASSERT((ig
->ig_pending_irqs
& pending
) == 0);
251 ig
->ig_pending_irqs
|= pending
;
252 block_irq(group
, pending
);
257 is
= &ig
->ig_sources
[n
];
258 KASSERT(ig
->ig_irqsbyipl
[is
->is_ipl
] & pending
);
259 pending
&= ~ig
->ig_irqsbyipl
[is
->is_ipl
];
260 ipl_mask
|= __BIT(is
->is_ipl
);
261 KASSERT(ipl_mask
< __BIT(NIPL
));
262 pending_igroupsbyipl
[is
->is_ipl
] |= __BIT(group
);
265 KASSERT(ipl_mask
< __BIT(NIPL
));
270 * Called with interrupts disabled
273 get_pending_irqs(void)
276 uint32_t ipl_mask
= 0;
279 pending
[0] = INTC_READ(&intrgroups
[0], INTC_PENDING_IRQ
);
280 pending
[1] = INTC_READ(&intrgroups
[1], INTC_PENDING_IRQ
);
281 pending
[2] = INTC_READ(&intrgroups
[2], INTC_PENDING_IRQ
);
283 /* Get interrupt status of GPIO1 */
284 if (pending
[GPIO1_MPU_IRQ
/ 32] & __BIT(GPIO1_MPU_IRQ
& 31)) {
285 KASSERT(intrgroups
[3].ig_enabled_irqs
);
286 xpending
= GPIO_READ(&intrgroups
[3], GPIO_IRQSTATUS1
);
287 xpending
&= intrgroups
[3].ig_enabled_irqs
;
288 ipl_mask
|= mark_pending_irqs(3, xpending
);
291 /* Get interrupt status of GPIO2 */
292 if (pending
[GPIO2_MPU_IRQ
/ 32] & __BIT(GPIO2_MPU_IRQ
& 31)) {
293 KASSERT(intrgroups
[4].ig_enabled_irqs
);
294 xpending
= GPIO_READ(&intrgroups
[4], GPIO_IRQSTATUS1
);
295 xpending
&= intrgroups
[4].ig_enabled_irqs
;
296 ipl_mask
|= mark_pending_irqs(4, xpending
);
299 /* Get interrupt status of GPIO3 */
300 if (pending
[GPIO3_MPU_IRQ
/ 32] & __BIT(GPIO3_MPU_IRQ
& 31)) {
301 KASSERT(intrgroups
[5].ig_enabled_irqs
);
302 xpending
= GPIO_READ(&intrgroups
[5], GPIO_IRQSTATUS1
);
303 xpending
&= intrgroups
[5].ig_enabled_irqs
;
304 ipl_mask
|= mark_pending_irqs(5, xpending
);
307 /* Get interrupt status of GPIO4 */
308 if (pending
[GPIO4_MPU_IRQ
/ 32] & __BIT(GPIO4_MPU_IRQ
& 31)) {
309 KASSERT(intrgroups
[6].ig_enabled_irqs
);
310 xpending
= GPIO_READ(&intrgroups
[6], GPIO_IRQSTATUS1
);
311 xpending
&= intrgroups
[6].ig_enabled_irqs
;
312 ipl_mask
|= mark_pending_irqs(6, xpending
);
316 /* Get interrupt status of GPIO5 */
317 if (pending
[GPIO5_MPU_IRQ
/ 32] & __BIT(GPIO5_MPU_IRQ
& 31)) {
318 KASSERT(intrgroups
[7].ig_enabled_irqs
);
319 xpending
= GPIO_READ(&intrgroups
[7], GPIO_IRQSTATUS1
);
320 xpending
= GPIO_READ(&intrgroups
[7], GPIO_IRQSTATUS1
);
321 xpending
&= intrgroups
[7].ig_enabled_irqs
;
322 ipl_mask
|= mark_pending_irqs(7, xpending
);
326 /* Clear GPIO indication from summaries */
327 pending
[GPIO1_MPU_IRQ
/ 32] &= ~__BIT(GPIO1_MPU_IRQ
& 31);
328 pending
[GPIO2_MPU_IRQ
/ 32] &= ~__BIT(GPIO2_MPU_IRQ
& 31);
329 pending
[GPIO3_MPU_IRQ
/ 32] &= ~__BIT(GPIO3_MPU_IRQ
& 31);
330 pending
[GPIO4_MPU_IRQ
/ 32] &= ~__BIT(GPIO4_MPU_IRQ
& 31);
332 pending
[GPIO5_MPU_IRQ
/ 32] &= ~__BIT(GPIO5_MPU_IRQ
& 31);
335 /* Now handle the primaries interrupt summaries */
336 ipl_mask
|= mark_pending_irqs(0, pending
[0]);
337 ipl_mask
|= mark_pending_irqs(1, pending
[1]);
338 ipl_mask
|= mark_pending_irqs(2, pending
[2]);
340 /* Force INTC to recompute IRQ availability */
341 INTC_WRITE(&intrgroups
[0], INTC_CONTROL
, INTC_CONTROL_NEWIRQAGR
);
346 static int last_delivered_ipl
;
347 static u_long no_pending_irqs
[NIPL
][NIGROUPS
];
350 deliver_irqs(register_t psw
, int ipl
, void *frame
)
352 struct intrgroup
*ig
;
353 struct intrsource
*is
;
354 uint32_t pending_irqs
;
356 uint32_t blocked_irqs
;
357 volatile uint32_t * const pending_igroups
= &pending_igroupsbyipl
[ipl
];
358 const uint32_t ipl_mask
= __BIT(ipl
);
360 int saved_ipl
= IPL_NONE
; /* XXX stupid GCC */
365 saved_ipl
= last_delivered_ipl
;
366 KASSERT(saved_ipl
< ipl
);
367 last_delivered_ipl
= ipl
;
370 * We only must be called is there is this IPL has pending interrupts
371 * and therefore there must be at least one intrgroup with a pending
374 KASSERT(pending_ipls
& ipl_mask
);
375 KASSERT(*pending_igroups
);
378 * We loop until there are no more intrgroups with pending interrupts.
381 group
= 31 - __builtin_clz(*pending_igroups
);
382 KASSERT(group
< NIGROUPS
);
384 ig
= &intrgroups
[group
];
385 irq_mask
= ig
->ig_irqsbyipl
[ipl
];
386 pending_irqs
= ig
->ig_pending_irqs
& irq_mask
;
387 blocked_irqs
= pending_irqs
;
388 if ((*pending_igroups
&= ~__BIT(group
)) == 0)
389 pending_ipls
&= ~ipl_mask
;
391 KASSERT(group
< 3 || (GPIO_READ(ig
, GPIO_IRQSTATUS1
) & blocked_irqs
) == 0);
394 * We couldn't gotten here unless there was at least one
395 * pending interrupt in this intrgroup.
397 if (pending_irqs
== 0) {
398 no_pending_irqs
[ipl
][group
]++;
402 KASSERT(pending_irqs
!= 0);
405 n
= 31 - __builtin_clz(pending_irqs
);
406 KASSERT(ig
->ig_irqs
& __BIT(n
));
407 KASSERT(irq_mask
& __BIT(n
));
410 * If this was the last bit cleared for this IRQ,
411 * we need to clear this group's bit in
412 * pending_igroupsbyipl[ipl]. Now if that's now 0,
413 * we need to clear pending_ipls for this IPL.
415 ig
->ig_pending_irqs
&= ~__BIT(n
);
416 if (irq_mask
== __BIT(n
))
417 KASSERT((ig
->ig_pending_irqs
& irq_mask
) == 0);
418 is
= &ig
->ig_sources
[n
];
419 if (__predict_false(frame
!= NULL
)) {
420 (*is
->is_func
)(frame
);
422 restore_interrupts(psw
);
423 rv
= (*is
->is_func
)(is
->is_arg
);
424 disable_interrupts(I32_bit
);
427 if (rv
&& group
>= 3) /* XXX */
428 GPIO_WRITE(ig
, GPIO_IRQSTATUS1
, __BIT(n
));
431 if (ig
->ig_irqsbyipl
[ipl
] == __BIT(n
))
432 KASSERT((ig
->ig_pending_irqs
& irq_mask
) == 0);
434 is
->is_ev
.ev_count
++;
435 pending_irqs
= ig
->ig_pending_irqs
& irq_mask
;
436 } while (pending_irqs
);
438 * We don't block the interrupts individually because even if
439 * one was unblocked it couldn't be delivered since our
440 * current IPL would prevent it. So we wait until we can do
444 KASSERT(group
< 3 || (GPIO_READ(ig
, GPIO_IRQSTATUS1
) & blocked_irqs
) == 0);
446 unblock_irq(group
, blocked_irqs
);
447 } while (*pending_igroups
);
449 * Since there are no more pending interrupts for this IPL,
450 * this IPL must not be present in the pending IPLs.
452 KASSERT((pending_ipls
& ipl_mask
) == 0);
453 KASSERT((intrgroups
[0].ig_pending_irqs
& intrgroups
[0].ig_irqsbyipl
[ipl
]) == 0);
454 KASSERT((intrgroups
[1].ig_pending_irqs
& intrgroups
[1].ig_irqsbyipl
[ipl
]) == 0);
455 KASSERT((intrgroups
[2].ig_pending_irqs
& intrgroups
[2].ig_irqsbyipl
[ipl
]) == 0);
456 KASSERT((intrgroups
[3].ig_pending_irqs
& intrgroups
[3].ig_irqsbyipl
[ipl
]) == 0);
457 KASSERT((intrgroups
[4].ig_pending_irqs
& intrgroups
[4].ig_irqsbyipl
[ipl
]) == 0);
458 KASSERT((intrgroups
[5].ig_pending_irqs
& intrgroups
[5].ig_irqsbyipl
[ipl
]) == 0);
459 KASSERT((intrgroups
[6].ig_pending_irqs
& intrgroups
[6].ig_irqsbyipl
[ipl
]) == 0);
460 KASSERT((intrgroups
[7].ig_pending_irqs
& intrgroups
[7].ig_irqsbyipl
[ipl
]) == 0);
462 last_delivered_ipl
= saved_ipl
;
466 do_pending_ints(register_t psw
, int newipl
)
468 while ((pending_ipls
& ~__BIT(newipl
)) > __BIT(newipl
)) {
469 KASSERT(pending_ipls
< __BIT(NIPL
));
471 int ipl
= 31 - __builtin_clz(pending_ipls
);
476 curcpu()->ci_cpl
= ipl
;
477 deliver_irqs(psw
, ipl
, NULL
);
480 curcpu()->ci_cpl
= newipl
;
484 _splraise(int newipl
)
486 const int oldipl
= curcpu()->ci_cpl
;
487 KASSERT(newipl
< NIPL
);
488 if (newipl
> curcpu()->ci_cpl
)
489 curcpu()->ci_cpl
= newipl
;
493 _spllower(int newipl
)
495 const int oldipl
= curcpu()->ci_cpl
;
496 KASSERT(panicstr
|| newipl
<= curcpu()->ci_cpl
);
497 if (newipl
< curcpu()->ci_cpl
) {
498 register_t psw
= disable_interrupts(I32_bit
);
499 do_pending_ints(psw
, newipl
);
500 restore_interrupts(psw
);
508 KASSERT(savedipl
< NIPL
);
509 if (savedipl
< curcpu()->ci_cpl
) {
510 register_t psw
= disable_interrupts(I32_bit
);
511 do_pending_ints(psw
, savedipl
);
512 restore_interrupts(psw
);
514 curcpu()->ci_cpl
= savedipl
;
518 omap_irq_handler(void *frame
)
520 const int oldipl
= curcpu()->ci_cpl
;
521 const uint32_t oldipl_mask
= __BIT(oldipl
);
524 * When we enter there must be no pending IRQs for IPL greater than
525 * the current IPL. There might be pending IRQs for the current IPL
526 * if we are servicing interrupts.
528 KASSERT((pending_ipls
& ~oldipl_mask
) < oldipl_mask
);
529 pending_ipls
|= get_pending_irqs();
533 * We assume this isn't a clock intr. But if it is, deliver it
534 * unconditionally so it will always have the interrupted frame.
535 * The clock intr will handle being called at IPLs != IPL_CLOCK.
537 if (__predict_false(pending_ipls
& __BIT(IPL_STATCLOCK
))) {
538 deliver_irqs(0, IPL_STATCLOCK
, frame
);
539 pending_ipls
&= ~__BIT(IPL_STATCLOCK
);
541 if (__predict_false(pending_ipls
& __BIT(IPL_CLOCK
))) {
542 deliver_irqs(0, IPL_CLOCK
, frame
);
543 pending_ipls
&= ~__BIT(IPL_CLOCK
);
547 * Record the pending_ipls and deliver them if we can.
549 if ((pending_ipls
& ~oldipl_mask
) > oldipl_mask
)
550 do_pending_ints(I32_bit
, oldipl
);
554 omap_intr_establish(int irq
, int ipl
, const char *name
,
555 int (*func
)(void *), void *arg
)
557 struct intrgroup
*ig
= &intrgroups
[irq
/ 32];
558 struct intrsource
*is
;
561 KASSERT(irq
>= 0 && irq
< 256);
562 is
= &ig
->ig_sources
[irq
& 0x1f];
563 KASSERT(irq
!= GPIO1_MPU_IRQ
);
564 KASSERT(irq
!= GPIO2_MPU_IRQ
);
565 KASSERT(irq
!= GPIO3_MPU_IRQ
);
566 KASSERT(irq
!= GPIO4_MPU_IRQ
);
567 KASSERT(irq
!= GPIO5_MPU_IRQ
);
568 KASSERT(is
->is_ipl
== IPL_NONE
);
572 psw
= disable_interrupts(I32_bit
);
573 evcnt_attach_dynamic(&is
->is_ev
, EVCNT_TYPE_INTR
, NULL
, name
, "intr");
574 init_irq(irq
, ipl
, IST_LEVEL
);
576 calculate_irq_masks(ig
);
577 unblock_irq(is
->is_group
, __BIT(irq
& 31));
578 restore_interrupts(psw
);
583 omap_intr_disestablish(void *ih
)
585 struct intrsource
* const is
= ih
;
586 struct intrgroup
*ig
;
592 ig
= &intrgroups
[is
->is_group
];
593 psw
= disable_interrupts(I32_bit
);
594 mask
= __BIT(is
- ig
->ig_sources
);
595 block_irq(is
->is_group
, mask
);
596 ig
->ig_pending_irqs
&= ~mask
;
597 calculate_irq_masks(ig
);
598 evcnt_detach(&is
->is_ev
);
599 restore_interrupts(psw
);
604 gpio5_clkinit(bus_space_tag_t memt
)
606 bus_space_handle_t memh
;
610 error
= bus_space_map(memt
, OMAP2430_CM_BASE
,
611 OMAP2430_CM_SIZE
, 0, &memh
);
613 panic("%s: cannot map OMAP2430_CM_BASE at %#x: %d\n",
614 __func__
, OMAP2430_CM_BASE
, error
);
616 r
= bus_space_read_4(memt
, memh
, OMAP2430_CM_FCLKEN2_CORE
);
617 r
|= OMAP2430_CM_FCLKEN2_CORE_EN_GPIO5
;
618 bus_space_write_4(memt
, memh
, OMAP2430_CM_FCLKEN2_CORE
, r
);
620 r
= bus_space_read_4(memt
, memh
, OMAP2430_CM_ICLKEN2_CORE
);
621 r
|= OMAP2430_CM_ICLKEN2_CORE_EN_GPIO5
;
622 bus_space_write_4(memt
, memh
, OMAP2430_CM_ICLKEN2_CORE
, r
);
624 bus_space_unmap(memt
, memh
, OMAP2430_CM_SIZE
);
629 omap2430_intr_init(bus_space_tag_t memt
)
634 for (group
= 0; group
< NIGROUPS
; group
++)
635 intrgroups
[group
].ig_memt
= memt
;
636 error
= bus_space_map(memt
, INTC_BASE
, 0x1000, 0,
637 &intrgroups
[0].ig_memh
);
639 panic("failed to map interrupt registers: %d", error
);
640 error
= bus_space_subregion(memt
, intrgroups
[0].ig_memh
, 0x20, 0x20,
641 &intrgroups
[1].ig_memh
);
643 panic("failed to region interrupt registers: %d", error
);
644 error
= bus_space_subregion(memt
, intrgroups
[0].ig_memh
, 0x40, 0x20,
645 &intrgroups
[2].ig_memh
);
647 panic("failed to subregion interrupt registers: %d", error
);
648 error
= bus_space_map(memt
, GPIO1_BASE
, 0x400, 0,
649 &intrgroups
[3].ig_memh
);
651 panic("failed to map gpio #1 registers: %d", error
);
652 error
= bus_space_map(memt
, GPIO2_BASE
, 0x400, 0,
653 &intrgroups
[4].ig_memh
);
655 panic("failed to map gpio #2 registers: %d", error
);
656 error
= bus_space_map(memt
, GPIO3_BASE
, 0x400, 0,
657 &intrgroups
[5].ig_memh
);
659 panic("failed to map gpio #3 registers: %d", error
);
660 error
= bus_space_map(memt
, GPIO4_BASE
, 0x400, 0,
661 &intrgroups
[6].ig_memh
);
663 panic("failed to map gpio #4 registers: %d", error
);
667 error
= bus_space_map(memt
, GPIO5_BASE
, 0x400, 0,
668 &intrgroups
[7].ig_memh
);
670 panic("failed to map gpio #5 registers: %d", error
);
673 INTC_WRITE(&intrgroups
[0], INTC_MIR_SET
, 0xffffffff);
674 INTC_WRITE(&intrgroups
[1], INTC_MIR_SET
, 0xffffffff);
675 INTC_WRITE(&intrgroups
[2], INTC_MIR_SET
, 0xffffffff);
676 INTC_WRITE(&intrgroups
[GPIO1_MPU_IRQ
/ 32], INTC_MIR_CLEAR
,
677 __BIT(GPIO1_MPU_IRQ
& 31));
678 INTC_WRITE(&intrgroups
[GPIO2_MPU_IRQ
/ 32], INTC_MIR_CLEAR
,
679 __BIT(GPIO2_MPU_IRQ
& 31));
680 INTC_WRITE(&intrgroups
[GPIO3_MPU_IRQ
/ 32], INTC_MIR_CLEAR
,
681 __BIT(GPIO3_MPU_IRQ
& 31));
682 INTC_WRITE(&intrgroups
[GPIO4_MPU_IRQ
/ 32], INTC_MIR_CLEAR
,
683 __BIT(GPIO4_MPU_IRQ
& 31));
685 INTC_WRITE(&intrgroups
[GPIO5_MPU_IRQ
/ 32], INTC_MIR_CLEAR
,
686 __BIT(GPIO5_MPU_IRQ
& 31));
690 * Setup the primary intrgroups.
692 calculate_irq_masks(&intrgroups
[0]);
693 calculate_irq_masks(&intrgroups
[1]);
694 calculate_irq_masks(&intrgroups
[2]);