No empty .Rs/.Re
[netbsd-mini2440.git] / sys / arch / powerpc / pic / intr.c
bloba223dc50094afd1762866fb3242bdf1636deab14
1 /* $NetBSD: intr.c,v 1.5 2008/04/08 02:33:03 garbled Exp $ */
3 /*-
4 * Copyright (c) 2007 Michael Lorenz
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.5 2008/04/08 02:33:03 garbled Exp $");
32 #include "opt_multiprocessor.h"
34 #include <sys/param.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/cpu.h>
39 #include <uvm/uvm_extern.h>
41 #include <arch/powerpc/pic/picvar.h>
42 #include "opt_pic.h"
43 #include "opt_interrupt.h"
44 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
45 #include <machine/isa_machdep.h>
46 #endif
48 #ifdef MULTIPROCESSOR
49 #include <arch/powerpc/pic/ipivar.h>
50 #endif
52 #define MAX_PICS 8 /* 8 PICs ought to be enough for everyone */
54 #define NVIRQ 32 /* 32 virtual IRQs */
55 #define NIRQ 128 /* up to 128 HW IRQs */
57 #define HWIRQ_MAX (NVIRQ - 4 - 1)
58 #define HWIRQ_MASK 0x0fffffff
59 #define LEGAL_VIRQ(x) ((x) >= 0 && (x) < NVIRQ)
61 struct pic_ops *pics[MAX_PICS];
62 int num_pics = 0;
63 int max_base = 0;
64 uint8_t virq[NIRQ];
65 int virq_max = 0;
66 int imask[NIPL];
67 int primary_pic = 0;
69 static int fakeintr(void *);
70 static int mapirq(uint32_t);
71 static void intr_calculatemasks(void);
72 static struct pic_ops *find_pic_by_irq(int);
74 static struct intr_source intrsources[NVIRQ];
76 void
77 pic_init(void)
79 int i;
81 for (i = 0; i < NIRQ; i++)
82 virq[i] = 0;
83 memset(intrsources, 0, sizeof(intrsources));
86 int
87 pic_add(struct pic_ops *pic)
90 if (num_pics >= MAX_PICS)
91 return -1;
93 pics[num_pics] = pic;
94 pic->pic_intrbase = max_base;
95 max_base += pic->pic_numintrs;
96 num_pics++;
98 return pic->pic_intrbase;
101 void
102 pic_finish_setup(void)
104 struct pic_ops *pic;
105 int i;
107 for (i = 0; i < num_pics; i++) {
108 pic = pics[i];
109 if (pic->pic_finish_setup != NULL)
110 pic->pic_finish_setup(pic);
114 static struct pic_ops *
115 find_pic_by_irq(int irq)
117 struct pic_ops *current;
118 int base = 0;
120 while (base < num_pics) {
122 current = pics[base];
123 if ((irq >= current->pic_intrbase) &&
124 (irq < (current->pic_intrbase + current->pic_numintrs))) {
126 return current;
128 base++;
130 return NULL;
133 static int
134 fakeintr(void *arg)
137 return 0;
141 * Register an interrupt handler.
143 void *
144 intr_establish(int hwirq, int type, int level, int (*ih_fun)(void *),
145 void *ih_arg)
147 struct intrhand **p, *q, *ih;
148 struct intr_source *is;
149 struct pic_ops *pic;
150 static struct intrhand fakehand;
151 int irq, maxlevel = level;
153 if (maxlevel == IPL_NONE)
154 maxlevel = IPL_HIGH;
156 if (hwirq >= max_base) {
158 panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
159 max_base - 1);
162 pic = find_pic_by_irq(hwirq);
163 if (pic == NULL) {
165 panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
168 irq = mapirq(hwirq);
170 /* no point in sleeping unless someone can free memory. */
171 ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
172 if (ih == NULL)
173 panic("intr_establish: can't malloc handler info");
175 if (!LEGAL_VIRQ(irq) || type == IST_NONE)
176 panic("intr_establish: bogus irq (%d) or type (%d)", irq, type);
178 is = &intrsources[irq];
180 switch (is->is_type) {
181 case IST_NONE:
182 is->is_type = type;
183 break;
184 case IST_EDGE:
185 case IST_LEVEL:
186 if (type == is->is_type)
187 break;
188 case IST_PULSE:
189 if (type != IST_NONE)
190 panic("intr_establish: can't share %s with %s",
191 intr_typename(is->is_type),
192 intr_typename(type));
193 break;
195 if (is->is_hand == NULL) {
196 snprintf(is->is_source, sizeof(is->is_source), "irq %d",
197 is->is_hwirq);
198 evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
199 pic->pic_name, is->is_source);
203 * Figure out where to put the handler.
204 * This is O(N^2), but we want to preserve the order, and N is
205 * generally small.
207 for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
209 maxlevel = max(maxlevel, q->ih_level);
213 * Actually install a fake handler momentarily, since we might be doing
214 * this with interrupts enabled and don't want the real routine called
215 * until masking is set up.
217 fakehand.ih_level = level;
218 fakehand.ih_fun = fakeintr;
219 *p = &fakehand;
222 * Poke the real handler in now.
224 ih->ih_fun = ih_fun;
225 ih->ih_arg = ih_arg;
226 ih->ih_next = NULL;
227 ih->ih_level = level;
228 ih->ih_irq = irq;
229 *p = ih;
231 if (pic->pic_establish_irq != NULL)
232 pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
233 is->is_type, maxlevel);
236 * now that the handler is established we're actually ready to
237 * calculate the masks
239 intr_calculatemasks();
242 return ih;
245 void
246 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
251 * Deregister an interrupt handler.
253 void
254 intr_disestablish(void *arg)
256 struct intrhand *ih = arg;
257 int irq = ih->ih_irq;
258 struct intr_source *is = &intrsources[irq];
259 struct intrhand **p, *q;
261 if (!LEGAL_VIRQ(irq))
262 panic("intr_disestablish: bogus irq %d", irq);
265 * Remove the handler from the chain.
266 * This is O(n^2), too.
268 for (p = &is->is_hand; (q = *p) != NULL && q != ih; p = &q->ih_next)
270 if (q)
271 *p = q->ih_next;
272 else
273 panic("intr_disestablish: handler not registered");
274 free((void *)ih, M_DEVBUF);
276 intr_calculatemasks();
278 if (is->is_hand == NULL) {
279 is->is_type = IST_NONE;
280 evcnt_detach(&is->is_ev);
285 * Map max_base irqs into 32 (bits).
287 static int
288 mapirq(uint32_t irq)
290 struct pic_ops *pic;
291 int v;
293 if (irq >= max_base)
294 panic("invalid irq %d", irq);
296 if ((pic = find_pic_by_irq(irq)) == NULL)
297 panic("%s: cannot find PIC for IRQ %d", __func__, irq);
299 if (virq[irq])
300 return virq[irq];
302 virq_max++;
303 v = virq_max;
304 if (v > HWIRQ_MAX)
305 panic("virq overflow");
307 intrsources[v].is_hwirq = irq;
308 intrsources[v].is_pic = pic;
309 virq[irq] = v;
310 #ifdef PIC_DEBUG
311 printf("mapping irq %d to virq %d\n", irq, v);
312 #endif
313 return v;
316 static const char * const intr_typenames[] = {
317 [IST_NONE] = "none",
318 [IST_PULSE] = "pulsed",
319 [IST_EDGE] = "edge-triggered",
320 [IST_LEVEL] = "level-triggered",
323 const char *
324 intr_typename(int type)
326 KASSERT((unsigned int) type < __arraycount(intr_typenames));
327 KASSERT(intr_typenames[type] != NULL);
328 return intr_typenames[type];
332 * Recalculate the interrupt masks from scratch.
333 * We could code special registry and deregistry versions of this function that
334 * would be faster, but the code would be nastier, and we don't expect this to
335 * happen very much anyway.
337 static void
338 intr_calculatemasks(void)
340 struct intr_source *is;
341 struct intrhand *q;
342 struct pic_ops *current;
343 int irq, level, i, base;
345 /* First, figure out which levels each IRQ uses. */
346 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
347 register int levels = 0;
348 for (q = is->is_hand; q; q = q->ih_next)
349 levels |= 1 << q->ih_level;
350 is->is_level = levels;
353 /* Then figure out which IRQs use each level. */
354 for (level = 0; level < NIPL; level++) {
355 register int irqs = 0;
356 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++)
357 if (is->is_level & (1 << level))
358 irqs |= 1 << irq;
359 imask[level] = irqs;
363 * IPL_CLOCK should mask clock interrupt even if interrupt handler
364 * is not registered.
366 imask[IPL_CLOCK] |= 1 << SPL_CLOCK;
369 * Initialize soft interrupt masks to block themselves.
371 imask[IPL_SOFTCLOCK] = 1 << SIR_CLOCK;
372 imask[IPL_SOFTNET] = 1 << SIR_NET;
373 imask[IPL_SOFTSERIAL] = 1 << SIR_SERIAL;
376 * IPL_NONE is used for hardware interrupts that are never blocked,
377 * and do not block anything else.
379 imask[IPL_NONE] = 0;
381 #ifdef SLOPPY_IPLS
383 * Enforce a sloppy hierarchy as in spl(9)
385 /* everything above softclock must block softclock */
386 for (i = IPL_SOFTCLOCK; i < NIPL; i++)
387 imask[i] |= imask[IPL_SOFTCLOCK];
389 /* everything above softnet must block softnet */
390 for (i = IPL_SOFTNET; i < NIPL; i++)
391 imask[i] |= imask[IPL_SOFTNET];
393 /* IPL_TTY must block softserial */
394 imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
396 /* IPL_VM must block net, block IO and tty */
397 imask[IPL_VM] |= (imask[IPL_NET] | imask[IPL_BIO] | imask[IPL_TTY]);
399 /* IPL_SERIAL must block IPL_TTY */
400 imask[IPL_SERIAL] |= imask[IPL_TTY];
402 /* IPL_HIGH must block all other priority levels */
403 for (i = IPL_NONE; i < IPL_HIGH; i++)
404 imask[IPL_HIGH] |= imask[i];
405 #else /* !SLOPPY_IPLS */
407 * strict hierarchy - all IPLs block everything blocked by any lower
408 * IPL
410 for (i = 1; i < NIPL; i++)
411 imask[i] |= imask[i - 1];
412 #endif /* !SLOPPY_IPLS */
414 #ifdef DEBUG_IPL
415 for (i = 0; i < NIPL; i++) {
416 printf("%2d: %08x\n", i, imask[i]);
418 #endif
420 /* And eventually calculate the complete masks. */
421 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
422 register int irqs = 1 << irq;
423 for (q = is->is_hand; q; q = q->ih_next)
424 irqs |= imask[q->ih_level];
425 is->is_mask = irqs;
428 /* Lastly, enable IRQs actually in use. */
429 for (base = 0; base < num_pics; base++) {
430 current = pics[base];
431 for (i = 0; i < current->pic_numintrs; i++)
432 current->pic_disable_irq(current, i);
435 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
436 if (is->is_hand)
437 pic_enable_irq(is->is_hwirq);
441 void
442 pic_enable_irq(int num)
444 struct pic_ops *current;
445 int type;
447 current = find_pic_by_irq(num);
448 if (current == NULL)
449 panic("%s: bogus IRQ %d", __func__, num);
450 type = intrsources[virq[num]].is_type;
451 current->pic_enable_irq(current, num - current->pic_intrbase, type);
454 void
455 pic_mark_pending(int irq)
457 struct cpu_info * const ci = curcpu();
458 int v, msr;
460 v = virq[irq];
461 if (v == 0)
462 printf("IRQ %d maps to 0\n", irq);
464 msr = mfmsr();
465 mtmsr(msr & ~PSL_EE);
466 ci->ci_ipending |= 1 << v;
467 mtmsr(msr);
470 void
471 pic_do_pending_int(void)
473 struct cpu_info * const ci = curcpu();
474 struct intr_source *is;
475 struct intrhand *ih;
476 struct pic_ops *pic;
477 int irq;
478 int pcpl;
479 int hwpend;
480 int emsr, dmsr;
482 if (ci->ci_iactive)
483 return;
485 ci->ci_iactive = 1;
486 emsr = mfmsr();
487 KASSERT(emsr & PSL_EE);
488 dmsr = emsr & ~PSL_EE;
489 mtmsr(dmsr);
491 pcpl = ci->ci_cpl;
492 #ifdef __HAVE_FAST_SOFTINTS
493 again:
494 #endif
496 /* Do now unmasked pendings */
497 ci->ci_idepth++;
498 while ((hwpend = (ci->ci_ipending & ~pcpl & HWIRQ_MASK)) != 0) {
499 irq = 31 - cntlzw(hwpend);
500 KASSERT(irq <= virq_max);
501 ci->ci_ipending &= ~(1 << irq);
502 if (irq == 0) {
503 printf("VIRQ0");
504 continue;
506 is = &intrsources[irq];
507 pic = is->is_pic;
509 splraise(is->is_mask);
510 mtmsr(emsr);
511 ih = is->is_hand;
512 while (ih) {
513 #ifdef DIAGNOSTIC
514 if (!ih->ih_fun) {
515 printf("NULL interrupt handler!\n");
516 panic("irq %02d, hwirq %02d, is %p\n",
517 irq, is->is_hwirq, is);
519 #endif
520 if (ih->ih_level == IPL_VM) {
521 KERNEL_LOCK(1, NULL);
523 (*ih->ih_fun)(ih->ih_arg);
524 if (ih->ih_level == IPL_VM) {
525 KERNEL_UNLOCK_ONE(NULL);
527 ih = ih->ih_next;
529 mtmsr(dmsr);
530 ci->ci_cpl = pcpl;
532 is->is_ev.ev_count++;
533 pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
534 is->is_type);
536 ci->ci_idepth--;
538 #ifdef __HAVE_FAST_SOFTINTS
539 if ((ci->ci_ipending & ~pcpl) & (1 << SIR_SERIAL)) {
540 ci->ci_ipending &= ~(1 << SIR_SERIAL);
541 splsoftserial();
542 mtmsr(emsr);
543 softintr__run(IPL_SOFTSERIAL);
544 mtmsr(dmsr);
545 ci->ci_cpl = pcpl;
546 ci->ci_ev_softserial.ev_count++;
547 goto again;
549 if ((ci->ci_ipending & ~pcpl) & (1 << SIR_NET)) {
550 ci->ci_ipending &= ~(1 << SIR_NET);
551 splsoftnet();
552 mtmsr(emsr);
553 softintr__run(IPL_SOFTNET);
554 mtmsr(dmsr);
555 ci->ci_cpl = pcpl;
556 ci->ci_ev_softnet.ev_count++;
557 goto again;
559 if ((ci->ci_ipending & ~pcpl) & (1 << SIR_CLOCK)) {
560 ci->ci_ipending &= ~(1 << SIR_CLOCK);
561 splsoftclock();
562 mtmsr(emsr);
563 softintr__run(IPL_SOFTCLOCK);
564 mtmsr(dmsr);
565 ci->ci_cpl = pcpl;
566 ci->ci_ev_softclock.ev_count++;
567 goto again;
569 #endif
571 ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
572 ci->ci_iactive = 0;
573 mtmsr(emsr);
577 pic_handle_intr(void *cookie)
579 struct pic_ops *pic = cookie;
580 struct cpu_info *ci = curcpu();
581 struct intr_source *is;
582 struct intrhand *ih;
583 int irq, realirq;
584 int pcpl, msr, r_imen, bail;
586 realirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
587 if (realirq == 255)
588 return 0;
590 msr = mfmsr();
591 pcpl = ci->ci_cpl;
593 start:
595 #ifdef MULTIPROCESSOR
596 /* THIS IS WRONG XXX */
597 while (realirq == ipiops.ppc_ipi_vector) {
598 ppcipi_intr(NULL);
599 pic->pic_ack_irq(pic, realirq);
600 realirq = pic->pic_get_irq(pic, PIC_GET_RECHECK);
602 if (realirq == 255) {
603 return 0;
605 #endif
607 irq = virq[realirq + pic->pic_intrbase];
608 #ifdef PIC_DEBUG
609 if (irq == 0) {
610 printf("%s: %d virq 0\n", pic->pic_name, realirq);
611 goto boo;
613 #endif /* PIC_DEBUG */
614 KASSERT(realirq < pic->pic_numintrs);
615 r_imen = 1 << irq;
616 is = &intrsources[irq];
618 if ((pcpl & r_imen) != 0) {
620 ci->ci_ipending |= r_imen; /* Masked! Mark this as pending */
621 pic->pic_disable_irq(pic, realirq);
622 } else {
624 /* this interrupt is no longer pending */
625 ci->ci_ipending &= ~r_imen;
626 ci->ci_idepth++;
628 splraise(is->is_mask);
629 mtmsr(msr | PSL_EE);
630 ih = is->is_hand;
631 bail = 0;
632 while ((ih != NULL) && (bail < 10)) {
633 if (ih->ih_fun == NULL)
634 panic("bogus handler for IRQ %s %d",
635 pic->pic_name, realirq);
636 if (ih->ih_level == IPL_VM) {
637 KERNEL_LOCK(1, NULL);
639 (*ih->ih_fun)(ih->ih_arg);
640 if (ih->ih_level == IPL_VM) {
641 KERNEL_UNLOCK_ONE(NULL);
643 ih = ih->ih_next;
644 bail++;
646 mtmsr(msr);
647 ci->ci_cpl = pcpl;
649 uvmexp.intrs++;
650 is->is_ev.ev_count++;
651 ci->ci_idepth--;
653 #ifdef PIC_DEBUG
654 boo:
655 #endif /* PIC_DEBUG */
656 pic->pic_ack_irq(pic, realirq);
657 realirq = pic->pic_get_irq(pic, PIC_GET_RECHECK);
658 if (realirq != 255)
659 goto start;
661 mtmsr(msr | PSL_EE);
662 splx(pcpl); /* Process pendings. */
663 mtmsr(msr);
665 return 0;
668 void
669 pic_ext_intr(void)
672 KASSERT(pics[primary_pic] != NULL);
673 pic_handle_intr(pics[primary_pic]);
675 return;
680 splraise(int ncpl)
682 struct cpu_info *ci = curcpu();
683 int ocpl;
685 __asm volatile("sync; eieio"); /* don't reorder.... */
687 ocpl = ci->ci_cpl;
688 ci->ci_cpl = ocpl | ncpl;
689 __asm volatile("sync; eieio"); /* reorder protect */
690 return ocpl;
693 void
694 splx(int ncpl)
696 struct cpu_info *ci = curcpu();
698 __asm volatile("sync; eieio"); /* reorder protect */
699 ci->ci_cpl = ncpl;
700 if (ci->ci_ipending & ~ncpl)
701 pic_do_pending_int();
702 __asm volatile("sync; eieio"); /* reorder protect */
706 spllower(int ncpl)
708 struct cpu_info *ci = curcpu();
709 int ocpl;
711 __asm volatile("sync; eieio"); /* reorder protect */
712 ocpl = ci->ci_cpl;
713 ci->ci_cpl = ncpl;
714 if (ci->ci_ipending & ~ncpl)
715 pic_do_pending_int();
716 __asm volatile("sync; eieio"); /* reorder protect */
717 return ocpl;
720 /* Following code should be implemented with lwarx/stwcx to avoid
721 * the disable/enable. i need to read the manual once more.... */
722 void
723 softintr(int ipl)
725 int msrsave;
727 msrsave = mfmsr();
728 mtmsr(msrsave & ~PSL_EE);
729 curcpu()->ci_ipending |= 1 << ipl;
730 mtmsr(msrsave);
733 void
734 genppc_cpu_configure(void)
736 aprint_normal("biomask %x netmask %x ttymask %x\n",
737 imask[IPL_BIO] & 0x1fffffff,
738 imask[IPL_NET] & 0x1fffffff,
739 imask[IPL_TTY] & 0x1fffffff);
741 spl0();
744 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
746 * isa_intr_alloc needs to be done here, because it needs direct access to
747 * the various interrupt handler structures.
751 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
752 int mask, int type, int *irq_p)
754 int irq, vi;
755 int maybe_irq = -1;
756 int shared_depth = 0;
757 struct intr_source *is;
759 if (pic == NULL)
760 return 1;
762 for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
763 mask >>= 1, irq++) {
764 if ((mask & 1) == 0)
765 continue;
766 vi = virq[irq + pic->pic_intrbase];
767 if (!vi) {
768 *irq_p = irq;
769 return 0;
771 is = &intrsources[vi];
772 if (is->is_type == IST_NONE) {
773 *irq_p = irq;
774 return 0;
776 /* Level interrupts can be shared */
777 if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
778 struct intrhand *ih = is->is_hand;
779 int depth;
781 if (maybe_irq == -1) {
782 maybe_irq = irq;
783 continue;
785 for (depth = 0; ih != NULL; ih = ih->ih_next)
786 depth++;
787 if (depth < shared_depth) {
788 maybe_irq = irq;
789 shared_depth = depth;
793 if (maybe_irq != -1) {
794 *irq_p = maybe_irq;
795 return 0;
797 return 1;
799 #endif