1 /* $NetBSD: intr.c,v 1.5 2008/04/08 02:33:03 garbled Exp $ */
4 * Copyright (c) 2007 Michael Lorenz
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.5 2008/04/08 02:33:03 garbled Exp $");
32 #include "opt_multiprocessor.h"
34 #include <sys/param.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
39 #include <uvm/uvm_extern.h>
41 #include <arch/powerpc/pic/picvar.h>
43 #include "opt_interrupt.h"
44 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
45 #include <machine/isa_machdep.h>
49 #include <arch/powerpc/pic/ipivar.h>
52 #define MAX_PICS 8 /* 8 PICs ought to be enough for everyone */
54 #define NVIRQ 32 /* 32 virtual IRQs */
55 #define NIRQ 128 /* up to 128 HW IRQs */
57 #define HWIRQ_MAX (NVIRQ - 4 - 1)
58 #define HWIRQ_MASK 0x0fffffff
59 #define LEGAL_VIRQ(x) ((x) >= 0 && (x) < NVIRQ)
61 struct pic_ops
*pics
[MAX_PICS
];
69 static int fakeintr(void *);
70 static int mapirq(uint32_t);
71 static void intr_calculatemasks(void);
72 static struct pic_ops
*find_pic_by_irq(int);
74 static struct intr_source intrsources
[NVIRQ
];
81 for (i
= 0; i
< NIRQ
; i
++)
83 memset(intrsources
, 0, sizeof(intrsources
));
87 pic_add(struct pic_ops
*pic
)
90 if (num_pics
>= MAX_PICS
)
94 pic
->pic_intrbase
= max_base
;
95 max_base
+= pic
->pic_numintrs
;
98 return pic
->pic_intrbase
;
102 pic_finish_setup(void)
107 for (i
= 0; i
< num_pics
; i
++) {
109 if (pic
->pic_finish_setup
!= NULL
)
110 pic
->pic_finish_setup(pic
);
114 static struct pic_ops
*
115 find_pic_by_irq(int irq
)
117 struct pic_ops
*current
;
120 while (base
< num_pics
) {
122 current
= pics
[base
];
123 if ((irq
>= current
->pic_intrbase
) &&
124 (irq
< (current
->pic_intrbase
+ current
->pic_numintrs
))) {
141 * Register an interrupt handler.
144 intr_establish(int hwirq
, int type
, int level
, int (*ih_fun
)(void *),
147 struct intrhand
**p
, *q
, *ih
;
148 struct intr_source
*is
;
150 static struct intrhand fakehand
;
151 int irq
, maxlevel
= level
;
153 if (maxlevel
== IPL_NONE
)
156 if (hwirq
>= max_base
) {
158 panic("%s: bogus IRQ %d, max is %d", __func__
, hwirq
,
162 pic
= find_pic_by_irq(hwirq
);
165 panic("%s: cannot find a pic for IRQ %d", __func__
, hwirq
);
170 /* no point in sleeping unless someone can free memory. */
171 ih
= malloc(sizeof *ih
, M_DEVBUF
, cold
? M_NOWAIT
: M_WAITOK
);
173 panic("intr_establish: can't malloc handler info");
175 if (!LEGAL_VIRQ(irq
) || type
== IST_NONE
)
176 panic("intr_establish: bogus irq (%d) or type (%d)", irq
, type
);
178 is
= &intrsources
[irq
];
180 switch (is
->is_type
) {
186 if (type
== is
->is_type
)
189 if (type
!= IST_NONE
)
190 panic("intr_establish: can't share %s with %s",
191 intr_typename(is
->is_type
),
192 intr_typename(type
));
195 if (is
->is_hand
== NULL
) {
196 snprintf(is
->is_source
, sizeof(is
->is_source
), "irq %d",
198 evcnt_attach_dynamic(&is
->is_ev
, EVCNT_TYPE_INTR
, NULL
,
199 pic
->pic_name
, is
->is_source
);
203 * Figure out where to put the handler.
204 * This is O(N^2), but we want to preserve the order, and N is
207 for (p
= &is
->is_hand
; (q
= *p
) != NULL
; p
= &q
->ih_next
) {
209 maxlevel
= max(maxlevel
, q
->ih_level
);
213 * Actually install a fake handler momentarily, since we might be doing
214 * this with interrupts enabled and don't want the real routine called
215 * until masking is set up.
217 fakehand
.ih_level
= level
;
218 fakehand
.ih_fun
= fakeintr
;
222 * Poke the real handler in now.
227 ih
->ih_level
= level
;
231 if (pic
->pic_establish_irq
!= NULL
)
232 pic
->pic_establish_irq(pic
, hwirq
- pic
->pic_intrbase
,
233 is
->is_type
, maxlevel
);
236 * now that the handler is established we're actually ready to
237 * calculate the masks
239 intr_calculatemasks();
246 dummy_pic_establish_intr(struct pic_ops
*pic
, int irq
, int type
, int pri
)
251 * Deregister an interrupt handler.
254 intr_disestablish(void *arg
)
256 struct intrhand
*ih
= arg
;
257 int irq
= ih
->ih_irq
;
258 struct intr_source
*is
= &intrsources
[irq
];
259 struct intrhand
**p
, *q
;
261 if (!LEGAL_VIRQ(irq
))
262 panic("intr_disestablish: bogus irq %d", irq
);
265 * Remove the handler from the chain.
266 * This is O(n^2), too.
268 for (p
= &is
->is_hand
; (q
= *p
) != NULL
&& q
!= ih
; p
= &q
->ih_next
)
273 panic("intr_disestablish: handler not registered");
274 free((void *)ih
, M_DEVBUF
);
276 intr_calculatemasks();
278 if (is
->is_hand
== NULL
) {
279 is
->is_type
= IST_NONE
;
280 evcnt_detach(&is
->is_ev
);
285 * Map max_base irqs into 32 (bits).
294 panic("invalid irq %d", irq
);
296 if ((pic
= find_pic_by_irq(irq
)) == NULL
)
297 panic("%s: cannot find PIC for IRQ %d", __func__
, irq
);
305 panic("virq overflow");
307 intrsources
[v
].is_hwirq
= irq
;
308 intrsources
[v
].is_pic
= pic
;
311 printf("mapping irq %d to virq %d\n", irq
, v
);
316 static const char * const intr_typenames
[] = {
318 [IST_PULSE
] = "pulsed",
319 [IST_EDGE
] = "edge-triggered",
320 [IST_LEVEL
] = "level-triggered",
324 intr_typename(int type
)
326 KASSERT((unsigned int) type
< __arraycount(intr_typenames
));
327 KASSERT(intr_typenames
[type
] != NULL
);
328 return intr_typenames
[type
];
332 * Recalculate the interrupt masks from scratch.
333 * We could code special registry and deregistry versions of this function that
334 * would be faster, but the code would be nastier, and we don't expect this to
335 * happen very much anyway.
338 intr_calculatemasks(void)
340 struct intr_source
*is
;
342 struct pic_ops
*current
;
343 int irq
, level
, i
, base
;
345 /* First, figure out which levels each IRQ uses. */
346 for (irq
= 0, is
= intrsources
; irq
< NVIRQ
; irq
++, is
++) {
347 register int levels
= 0;
348 for (q
= is
->is_hand
; q
; q
= q
->ih_next
)
349 levels
|= 1 << q
->ih_level
;
350 is
->is_level
= levels
;
353 /* Then figure out which IRQs use each level. */
354 for (level
= 0; level
< NIPL
; level
++) {
355 register int irqs
= 0;
356 for (irq
= 0, is
= intrsources
; irq
< NVIRQ
; irq
++, is
++)
357 if (is
->is_level
& (1 << level
))
363 * IPL_CLOCK should mask clock interrupt even if interrupt handler
366 imask
[IPL_CLOCK
] |= 1 << SPL_CLOCK
;
369 * Initialize soft interrupt masks to block themselves.
371 imask
[IPL_SOFTCLOCK
] = 1 << SIR_CLOCK
;
372 imask
[IPL_SOFTNET
] = 1 << SIR_NET
;
373 imask
[IPL_SOFTSERIAL
] = 1 << SIR_SERIAL
;
376 * IPL_NONE is used for hardware interrupts that are never blocked,
377 * and do not block anything else.
383 * Enforce a sloppy hierarchy as in spl(9)
385 /* everything above softclock must block softclock */
386 for (i
= IPL_SOFTCLOCK
; i
< NIPL
; i
++)
387 imask
[i
] |= imask
[IPL_SOFTCLOCK
];
389 /* everything above softnet must block softnet */
390 for (i
= IPL_SOFTNET
; i
< NIPL
; i
++)
391 imask
[i
] |= imask
[IPL_SOFTNET
];
393 /* IPL_TTY must block softserial */
394 imask
[IPL_TTY
] |= imask
[IPL_SOFTSERIAL
];
396 /* IPL_VM must block net, block IO and tty */
397 imask
[IPL_VM
] |= (imask
[IPL_NET
] | imask
[IPL_BIO
] | imask
[IPL_TTY
]);
399 /* IPL_SERIAL must block IPL_TTY */
400 imask
[IPL_SERIAL
] |= imask
[IPL_TTY
];
402 /* IPL_HIGH must block all other priority levels */
403 for (i
= IPL_NONE
; i
< IPL_HIGH
; i
++)
404 imask
[IPL_HIGH
] |= imask
[i
];
405 #else /* !SLOPPY_IPLS */
407 * strict hierarchy - all IPLs block everything blocked by any lower
410 for (i
= 1; i
< NIPL
; i
++)
411 imask
[i
] |= imask
[i
- 1];
412 #endif /* !SLOPPY_IPLS */
415 for (i
= 0; i
< NIPL
; i
++) {
416 printf("%2d: %08x\n", i
, imask
[i
]);
420 /* And eventually calculate the complete masks. */
421 for (irq
= 0, is
= intrsources
; irq
< NVIRQ
; irq
++, is
++) {
422 register int irqs
= 1 << irq
;
423 for (q
= is
->is_hand
; q
; q
= q
->ih_next
)
424 irqs
|= imask
[q
->ih_level
];
428 /* Lastly, enable IRQs actually in use. */
429 for (base
= 0; base
< num_pics
; base
++) {
430 current
= pics
[base
];
431 for (i
= 0; i
< current
->pic_numintrs
; i
++)
432 current
->pic_disable_irq(current
, i
);
435 for (irq
= 0, is
= intrsources
; irq
< NVIRQ
; irq
++, is
++) {
437 pic_enable_irq(is
->is_hwirq
);
442 pic_enable_irq(int num
)
444 struct pic_ops
*current
;
447 current
= find_pic_by_irq(num
);
449 panic("%s: bogus IRQ %d", __func__
, num
);
450 type
= intrsources
[virq
[num
]].is_type
;
451 current
->pic_enable_irq(current
, num
- current
->pic_intrbase
, type
);
455 pic_mark_pending(int irq
)
457 struct cpu_info
* const ci
= curcpu();
462 printf("IRQ %d maps to 0\n", irq
);
465 mtmsr(msr
& ~PSL_EE
);
466 ci
->ci_ipending
|= 1 << v
;
471 pic_do_pending_int(void)
473 struct cpu_info
* const ci
= curcpu();
474 struct intr_source
*is
;
487 KASSERT(emsr
& PSL_EE
);
488 dmsr
= emsr
& ~PSL_EE
;
492 #ifdef __HAVE_FAST_SOFTINTS
496 /* Do now unmasked pendings */
498 while ((hwpend
= (ci
->ci_ipending
& ~pcpl
& HWIRQ_MASK
)) != 0) {
499 irq
= 31 - cntlzw(hwpend
);
500 KASSERT(irq
<= virq_max
);
501 ci
->ci_ipending
&= ~(1 << irq
);
506 is
= &intrsources
[irq
];
509 splraise(is
->is_mask
);
515 printf("NULL interrupt handler!\n");
516 panic("irq %02d, hwirq %02d, is %p\n",
517 irq
, is
->is_hwirq
, is
);
520 if (ih
->ih_level
== IPL_VM
) {
521 KERNEL_LOCK(1, NULL
);
523 (*ih
->ih_fun
)(ih
->ih_arg
);
524 if (ih
->ih_level
== IPL_VM
) {
525 KERNEL_UNLOCK_ONE(NULL
);
532 is
->is_ev
.ev_count
++;
533 pic
->pic_reenable_irq(pic
, is
->is_hwirq
- pic
->pic_intrbase
,
538 #ifdef __HAVE_FAST_SOFTINTS
539 if ((ci
->ci_ipending
& ~pcpl
) & (1 << SIR_SERIAL
)) {
540 ci
->ci_ipending
&= ~(1 << SIR_SERIAL
);
543 softintr__run(IPL_SOFTSERIAL
);
546 ci
->ci_ev_softserial
.ev_count
++;
549 if ((ci
->ci_ipending
& ~pcpl
) & (1 << SIR_NET
)) {
550 ci
->ci_ipending
&= ~(1 << SIR_NET
);
553 softintr__run(IPL_SOFTNET
);
556 ci
->ci_ev_softnet
.ev_count
++;
559 if ((ci
->ci_ipending
& ~pcpl
) & (1 << SIR_CLOCK
)) {
560 ci
->ci_ipending
&= ~(1 << SIR_CLOCK
);
563 softintr__run(IPL_SOFTCLOCK
);
566 ci
->ci_ev_softclock
.ev_count
++;
571 ci
->ci_cpl
= pcpl
; /* Don't use splx... we are here already! */
577 pic_handle_intr(void *cookie
)
579 struct pic_ops
*pic
= cookie
;
580 struct cpu_info
*ci
= curcpu();
581 struct intr_source
*is
;
584 int pcpl
, msr
, r_imen
, bail
;
586 realirq
= pic
->pic_get_irq(pic
, PIC_GET_IRQ
);
595 #ifdef MULTIPROCESSOR
596 /* THIS IS WRONG XXX */
597 while (realirq
== ipiops
.ppc_ipi_vector
) {
599 pic
->pic_ack_irq(pic
, realirq
);
600 realirq
= pic
->pic_get_irq(pic
, PIC_GET_RECHECK
);
602 if (realirq
== 255) {
607 irq
= virq
[realirq
+ pic
->pic_intrbase
];
610 printf("%s: %d virq 0\n", pic
->pic_name
, realirq
);
613 #endif /* PIC_DEBUG */
614 KASSERT(realirq
< pic
->pic_numintrs
);
616 is
= &intrsources
[irq
];
618 if ((pcpl
& r_imen
) != 0) {
620 ci
->ci_ipending
|= r_imen
; /* Masked! Mark this as pending */
621 pic
->pic_disable_irq(pic
, realirq
);
624 /* this interrupt is no longer pending */
625 ci
->ci_ipending
&= ~r_imen
;
628 splraise(is
->is_mask
);
632 while ((ih
!= NULL
) && (bail
< 10)) {
633 if (ih
->ih_fun
== NULL
)
634 panic("bogus handler for IRQ %s %d",
635 pic
->pic_name
, realirq
);
636 if (ih
->ih_level
== IPL_VM
) {
637 KERNEL_LOCK(1, NULL
);
639 (*ih
->ih_fun
)(ih
->ih_arg
);
640 if (ih
->ih_level
== IPL_VM
) {
641 KERNEL_UNLOCK_ONE(NULL
);
650 is
->is_ev
.ev_count
++;
655 #endif /* PIC_DEBUG */
656 pic
->pic_ack_irq(pic
, realirq
);
657 realirq
= pic
->pic_get_irq(pic
, PIC_GET_RECHECK
);
662 splx(pcpl
); /* Process pendings. */
672 KASSERT(pics
[primary_pic
] != NULL
);
673 pic_handle_intr(pics
[primary_pic
]);
682 struct cpu_info
*ci
= curcpu();
685 __asm
volatile("sync; eieio"); /* don't reorder.... */
688 ci
->ci_cpl
= ocpl
| ncpl
;
689 __asm
volatile("sync; eieio"); /* reorder protect */
696 struct cpu_info
*ci
= curcpu();
698 __asm
volatile("sync; eieio"); /* reorder protect */
700 if (ci
->ci_ipending
& ~ncpl
)
701 pic_do_pending_int();
702 __asm
volatile("sync; eieio"); /* reorder protect */
708 struct cpu_info
*ci
= curcpu();
711 __asm
volatile("sync; eieio"); /* reorder protect */
714 if (ci
->ci_ipending
& ~ncpl
)
715 pic_do_pending_int();
716 __asm
volatile("sync; eieio"); /* reorder protect */
720 /* Following code should be implemented with lwarx/stwcx to avoid
721 * the disable/enable. i need to read the manual once more.... */
728 mtmsr(msrsave
& ~PSL_EE
);
729 curcpu()->ci_ipending
|= 1 << ipl
;
734 genppc_cpu_configure(void)
736 aprint_normal("biomask %x netmask %x ttymask %x\n",
737 imask
[IPL_BIO
] & 0x1fffffff,
738 imask
[IPL_NET
] & 0x1fffffff,
739 imask
[IPL_TTY
] & 0x1fffffff);
744 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
746 * isa_intr_alloc needs to be done here, because it needs direct access to
747 * the various interrupt handler structures.
751 genppc_isa_intr_alloc(isa_chipset_tag_t ic
, struct pic_ops
*pic
,
752 int mask
, int type
, int *irq_p
)
756 int shared_depth
= 0;
757 struct intr_source
*is
;
762 for (irq
= 0; (mask
!= 0 && irq
< pic
->pic_numintrs
);
766 vi
= virq
[irq
+ pic
->pic_intrbase
];
771 is
= &intrsources
[vi
];
772 if (is
->is_type
== IST_NONE
) {
776 /* Level interrupts can be shared */
777 if (type
== IST_LEVEL
&& is
->is_type
== IST_LEVEL
) {
778 struct intrhand
*ih
= is
->is_hand
;
781 if (maybe_irq
== -1) {
785 for (depth
= 0; ih
!= NULL
; ih
= ih
->ih_next
)
787 if (depth
< shared_depth
) {
789 shared_depth
= depth
;
793 if (maybe_irq
!= -1) {