* better
[mascara-docs.git] / i386 / linux-2.3.21 / arch / alpha / kernel / irq.c
blob43da6affd73c913c56daac4dfdeeb84ce3710837
1 /*
2 * linux/arch/alpha/kernel/irq.c
4 * Copyright (C) 1995 Linus Torvalds
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
10 * should be easier.
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/ptrace.h>
16 #include <linux/errno.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h>
20 #include <linux/interrupt.h>
21 #include <linux/malloc.h>
22 #include <linux/random.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
26 #include <asm/system.h>
27 #include <asm/io.h>
28 #include <asm/irq.h>
29 #include <asm/bitops.h>
30 #include <asm/machvec.h>
32 #include "proto.h"
33 #include "irq_impl.h"
35 #define vulp volatile unsigned long *
36 #define vuip volatile unsigned int *
38 /* Only uniprocessor needs this IRQ/BH locking depth, on SMP it lives
39 in the per-cpu structure for cache reasons. */
40 #ifndef __SMP__
41 int __local_irq_count;
42 int __local_bh_count;
43 #endif
45 #if NR_IRQS > 64
46 # error Unable to handle more than 64 irq levels.
47 #endif
49 #ifdef CONFIG_ALPHA_GENERIC
50 #define ACTUAL_NR_IRQS alpha_mv.nr_irqs
51 #else
52 #define ACTUAL_NR_IRQS NR_IRQS
53 #endif
55 /* Reserved interrupts. These must NEVER be requested by any driver!
56 IRQ 2 used by hw cascade */
57 #define IS_RESERVED_IRQ(irq) ((irq)==2)
61 * Shadow-copy of masked interrupts.
63 unsigned long alpha_irq_mask = ~0UL;
66 * The ack_irq routine used by 80% of the systems.
69 void
70 common_ack_irq(unsigned long irq)
72 if (irq < 16) {
73 /* Ack the interrupt making it the lowest priority */
74 /* First the slave .. */
75 if (irq > 7) {
76 outb(0xE0 | (irq - 8), 0xa0);
77 irq = 2;
79 /* .. then the master */
80 outb(0xE0 | irq, 0x20);
86 static void dummy_perf(unsigned long vector, struct pt_regs *regs)
88 printk(KERN_CRIT "Performance counter interrupt!\n");
91 void (*perf_irq)(unsigned long, struct pt_regs *) = dummy_perf;
94 * Dispatch device interrupts.
97 /* Handle ISA interrupt via the PICs. */
99 #if defined(CONFIG_ALPHA_GENERIC)
100 # define IACK_SC alpha_mv.iack_sc
101 #elif defined(CONFIG_ALPHA_APECS)
102 # define IACK_SC APECS_IACK_SC
103 #elif defined(CONFIG_ALPHA_LCA)
104 # define IACK_SC LCA_IACK_SC
105 #elif defined(CONFIG_ALPHA_CIA)
106 # define IACK_SC CIA_IACK_SC
107 #elif defined(CONFIG_ALPHA_PYXIS)
108 # define IACK_SC PYXIS_IACK_SC
109 #elif defined(CONFIG_ALPHA_TSUNAMI)
110 # define IACK_SC TSUNAMI_IACK_SC
111 #elif defined(CONFIG_ALPHA_POLARIS)
112 # define IACK_SC POLARIS_IACK_SC
113 #else
114 /* This is bogus but necessary to get it to compile on all platforms. */
115 # define IACK_SC 1L
116 #endif
118 void
119 isa_device_interrupt(unsigned long vector, struct pt_regs * regs)
121 #if 1
123 * Generate a PCI interrupt acknowledge cycle. The PIC will
124 * respond with the interrupt vector of the highest priority
125 * interrupt that is pending. The PALcode sets up the
126 * interrupts vectors such that irq level L generates vector L.
128 int j = *(vuip) IACK_SC;
129 j &= 0xff;
130 if (j == 7) {
131 if (!(inb(0x20) & 0x80)) {
132 /* It's only a passive release... */
133 return;
136 handle_irq(j, j, regs);
137 #else
138 unsigned long pic;
141 * It seems to me that the probability of two or more *device*
142 * interrupts occurring at almost exactly the same time is
143 * pretty low. So why pay the price of checking for
144 * additional interrupts here if the common case can be
145 * handled so much easier?
148 * The first read of gives you *all* interrupting lines.
149 * Therefore, read the mask register and and out those lines
150 * not enabled. Note that some documentation has 21 and a1
151 * write only. This is not true.
153 pic = inb(0x20) | (inb(0xA0) << 8); /* read isr */
154 pic &= ~alpha_irq_mask; /* apply mask */
155 pic &= 0xFFFB; /* mask out cascade & hibits */
157 while (pic) {
158 int j = ffz(~pic);
159 pic &= pic - 1;
160 handle_irq(j, j, regs);
162 #endif
165 /* Handle interrupts from the SRM, assuming no additional weirdness. */
167 void
168 srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
170 int irq, ack;
172 ack = irq = (vector - 0x800) >> 4;
173 handle_irq(irq, ack, regs);
178 * Initial irq handlers.
181 static struct irqaction timer_irq = { NULL, 0, 0, NULL, NULL, NULL};
182 static struct irqaction *irq_action[NR_IRQS];
185 static inline void
186 mask_irq(unsigned long irq)
188 alpha_mv.update_irq_hw(irq, alpha_irq_mask |= 1UL << irq, 0);
191 static inline void
192 unmask_irq(unsigned long irq)
194 alpha_mv.update_irq_hw(irq, alpha_irq_mask &= ~(1UL << irq), 1);
197 void
198 disable_irq_nosync(unsigned int irq_nr)
200 unsigned long flags;
202 save_and_cli(flags);
203 mask_irq(irq_nr);
204 restore_flags(flags);
207 void
208 disable_irq(unsigned int irq_nr)
210 /* This works non-SMP, and SMP until we write code to distribute
211 interrupts to more that cpu 0. */
212 disable_irq_nosync(irq_nr);
215 void
216 enable_irq(unsigned int irq_nr)
218 unsigned long flags;
220 save_and_cli(flags);
221 unmask_irq(irq_nr);
222 restore_flags(flags);
226 check_irq(unsigned int irq)
228 struct irqaction **p;
230 p = irq_action + irq;
231 if (*p == NULL)
232 return 0;
233 return -EBUSY;
237 request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
238 unsigned long irqflags, const char * devname, void *dev_id)
240 int shared = 0;
241 struct irqaction * action, **p;
242 unsigned long flags;
244 if (irq >= ACTUAL_NR_IRQS)
245 return -EINVAL;
246 if (IS_RESERVED_IRQ(irq))
247 return -EINVAL;
248 if (!handler)
249 return -EINVAL;
251 p = irq_action + irq;
252 action = *p;
253 if (action) {
254 /* Can't share interrupts unless both agree to */
255 if (!(action->flags & irqflags & SA_SHIRQ))
256 return -EBUSY;
258 /* Can't share interrupts unless both are same type */
259 if ((action->flags ^ irqflags) & SA_INTERRUPT)
260 return -EBUSY;
262 /* Add new interrupt at end of irq queue */
263 do {
264 p = &action->next;
265 action = *p;
266 } while (action);
267 shared = 1;
270 action = &timer_irq;
271 if (irq != TIMER_IRQ) {
272 action = (struct irqaction *)
273 kmalloc(sizeof(struct irqaction), GFP_KERNEL);
275 if (!action)
276 return -ENOMEM;
278 if (irqflags & SA_SAMPLE_RANDOM)
279 rand_initialize_irq(irq);
281 action->handler = handler;
282 action->flags = irqflags;
283 action->mask = 0;
284 action->name = devname;
285 action->next = NULL;
286 action->dev_id = dev_id;
288 save_and_cli(flags);
289 *p = action;
291 if (!shared)
292 unmask_irq(irq);
294 restore_flags(flags);
295 return 0;
298 void
299 free_irq(unsigned int irq, void *dev_id)
301 struct irqaction * action, **p;
302 unsigned long flags;
304 if (irq >= ACTUAL_NR_IRQS) {
305 printk("Trying to free IRQ%d\n",irq);
306 return;
308 if (IS_RESERVED_IRQ(irq)) {
309 printk("Trying to free reserved IRQ %d\n", irq);
310 return;
312 for (p = irq + irq_action; (action = *p) != NULL; p = &action->next) {
313 if (action->dev_id != dev_id)
314 continue;
316 /* Found it - now free it */
317 save_and_cli(flags);
318 *p = action->next;
319 if (!irq[irq_action])
320 mask_irq(irq);
321 restore_flags(flags);
322 kfree(action);
323 return;
325 printk("Trying to free free IRQ%d\n",irq);
328 int get_irq_list(char *buf)
330 int i;
331 struct irqaction * action;
332 char *p = buf;
334 #ifdef __SMP__
335 p += sprintf(p, " ");
336 for (i = 0; i < smp_num_cpus; i++)
337 p += sprintf(p, "CPU%d ", i);
338 *p++ = '\n';
339 #endif
341 for (i = 0; i < NR_IRQS; i++) {
342 action = irq_action[i];
343 if (!action)
344 continue;
345 p += sprintf(p, "%3d: ",i);
346 #ifndef __SMP__
347 p += sprintf(p, "%10u ", kstat_irqs(i));
348 #else
350 int j;
351 for (j = 0; j < smp_num_cpus; j++)
352 p += sprintf(p, "%10u ",
353 kstat.irqs[cpu_logical_map(j)][i]);
355 #endif
356 p += sprintf(p, " %c%s",
357 (action->flags & SA_INTERRUPT)?'+':' ',
358 action->name);
360 for (action=action->next; action; action = action->next) {
361 p += sprintf(p, ", %c%s",
362 (action->flags & SA_INTERRUPT)?'+':' ',
363 action->name);
365 *p++ = '\n';
367 return p - buf;
370 #ifdef __SMP__
371 /* Who has global_irq_lock. */
372 int global_irq_holder = NO_PROC_ID;
374 /* This protects IRQ's. */
375 spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED;
377 /* Global IRQ locking depth. */
378 atomic_t global_irq_count = ATOMIC_INIT(0);
380 /* This protects BH software state (masks, things like that). */
381 atomic_t global_bh_lock = ATOMIC_INIT(0);
382 atomic_t global_bh_count = ATOMIC_INIT(0);
384 static void *previous_irqholder = NULL;
386 #define MAXCOUNT 100000000
388 static void show(char * str, void *where);
390 #define SYNC_OTHER_CPUS(x) udelay((x)+1);
392 static inline void
393 wait_on_irq(int cpu, void *where)
395 int count = MAXCOUNT;
397 for (;;) {
400 * Wait until all interrupts are gone. Wait
401 * for bottom half handlers unless we're
402 * already executing in one..
404 if (!atomic_read(&global_irq_count)) {
405 if (local_bh_count(cpu)
406 || !atomic_read(&global_bh_count))
407 break;
410 /* Duh, we have to loop. Release the lock to avoid deadlocks */
411 spin_unlock(&global_irq_lock);
412 mb();
414 for (;;) {
415 if (!--count) {
416 show("wait_on_irq", where);
417 count = MAXCOUNT;
419 __sti();
420 SYNC_OTHER_CPUS(cpu);
421 __cli();
423 if (atomic_read(&global_irq_count))
424 continue;
425 if (spin_is_locked(&global_irq_lock))
426 continue;
427 if (!local_bh_count(cpu)
428 && atomic_read(&global_bh_count))
429 continue;
430 if (spin_trylock(&global_irq_lock))
431 break;
436 static inline void
437 get_irqlock(int cpu, void* where)
439 if (!spin_trylock(&global_irq_lock)) {
440 /* do we already hold the lock? */
441 if (cpu == global_irq_holder) {
442 #if 0
443 printk("get_irqlock: already held at %08lx\n",
444 previous_irqholder);
445 #endif
446 return;
448 /* Uhhuh.. Somebody else got it. Wait.. */
449 spin_lock(&global_irq_lock);
452 * Ok, we got the lock bit.
453 * But that's actually just the easy part.. Now
454 * we need to make sure that nobody else is running
455 * in an interrupt context.
457 wait_on_irq(cpu, where);
460 * Finally.
462 #if DEBUG_SPINLOCK
463 global_irq_lock.task = current;
464 global_irq_lock.previous = where;
465 #endif
466 global_irq_holder = cpu;
467 previous_irqholder = where;
470 void
471 __global_cli(void)
473 int cpu = smp_processor_id();
474 void *where = __builtin_return_address(0);
477 * Maximize ipl. If ipl was previously 0 and if this thread
478 * is not in an irq, then take global_irq_lock.
480 if (swpipl(7) == 0 && !local_irq_count(cpu))
481 get_irqlock(cpu, where);
484 void
485 __global_sti(void)
487 int cpu = smp_processor_id();
489 if (!local_irq_count(cpu))
490 release_irqlock(cpu);
491 __sti();
495 * SMP flags value to restore to:
496 * 0 - global cli
497 * 1 - global sti
498 * 2 - local cli
499 * 3 - local sti
501 unsigned long
502 __global_save_flags(void)
504 int retval;
505 int local_enabled;
506 unsigned long flags;
507 int cpu = smp_processor_id();
509 __save_flags(flags);
510 local_enabled = (!(flags & 7));
511 /* default to local */
512 retval = 2 + local_enabled;
514 /* Check for global flags if we're not in an interrupt. */
515 if (!local_irq_count(cpu)) {
516 if (local_enabled)
517 retval = 1;
518 if (global_irq_holder == cpu)
519 retval = 0;
521 return retval;
524 void
525 __global_restore_flags(unsigned long flags)
527 switch (flags) {
528 case 0:
529 __global_cli();
530 break;
531 case 1:
532 __global_sti();
533 break;
534 case 2:
535 __cli();
536 break;
537 case 3:
538 __sti();
539 break;
540 default:
541 printk("global_restore_flags: %08lx (%p)\n",
542 flags, __builtin_return_address(0));
546 #undef INIT_STUCK
547 #define INIT_STUCK (1<<26)
549 #undef STUCK
550 #define STUCK \
551 if (!--stuck) { \
552 printk("irq_enter stuck (irq=%d, cpu=%d, global=%d)\n", \
553 irq, cpu, global_irq_holder); \
554 stuck = INIT_STUCK; \
557 #undef VERBOSE_IRQLOCK_DEBUGGING
559 void
560 irq_enter(int cpu, int irq)
562 #ifdef VERBOSE_IRQLOCK_DEBUGGING
563 extern void smp_show_backtrace_all_cpus(void);
564 #endif
565 int stuck = INIT_STUCK;
567 hardirq_enter(cpu, irq);
568 barrier();
569 while (spin_is_locked(&global_irq_lock)) {
570 if (cpu == global_irq_holder) {
571 int globl_locked = spin_is_locked(&global_irq_lock);
572 int globl_icount = atomic_read(&global_irq_count);
573 int local_count = local_irq_count(cpu);
575 /* It is very important that we load the state
576 variables before we do the first call to
577 printk() as printk() could end up changing
578 them... */
580 printk("CPU[%d]: where [%p] glocked[%d] gicnt[%d]"
581 " licnt[%d]\n",
582 cpu, previous_irqholder, globl_locked,
583 globl_icount, local_count);
584 #ifdef VERBOSE_IRQLOCK_DEBUGGING
585 printk("Performing backtrace on all CPUs,"
586 " write this down!\n");
587 smp_show_backtrace_all_cpus();
588 #endif
589 break;
591 STUCK;
592 barrier();
596 void
597 irq_exit(int cpu, int irq)
599 hardirq_exit(cpu, irq);
600 release_irqlock(cpu);
603 static void
604 show(char * str, void *where)
606 #if 0
607 int i;
608 unsigned long *stack;
609 #endif
610 int cpu = smp_processor_id();
612 printk("\n%s, CPU %d: %p\n", str, cpu, where);
613 printk("irq: %d [%d %d]\n",
614 atomic_read(&global_irq_count),
615 cpu_data[0].irq_count,
616 cpu_data[1].irq_count);
618 printk("bh: %d [%d %d]\n",
619 atomic_read(&global_bh_count),
620 cpu_data[0].bh_count,
621 cpu_data[1].bh_count);
622 #if 0
623 stack = (unsigned long *) &str;
624 for (i = 40; i ; i--) {
625 unsigned long x = *++stack;
626 if (x > (unsigned long) &init_task_union &&
627 x < (unsigned long) &vsprintf) {
628 printk("<[%08lx]> ", x);
631 #endif
634 static inline void
635 wait_on_bh(void)
637 int count = MAXCOUNT;
638 do {
639 if (!--count) {
640 show("wait_on_bh", 0);
641 count = ~0;
643 /* nothing .. wait for the other bh's to go away */
644 barrier();
645 } while (atomic_read(&global_bh_count) != 0);
649 * This is called when we want to synchronize with
650 * bottom half handlers. We need to wait until
651 * no other CPU is executing any bottom half handler.
653 * Don't wait if we're already running in an interrupt
654 * context or are inside a bh handler.
656 void
657 synchronize_bh(void)
659 if (atomic_read(&global_bh_count) && !in_interrupt())
660 wait_on_bh();
664 * From its use, I infer that synchronize_irq() stalls a thread until
665 * the effects of a command to an external device are known to have
666 * taken hold. Typically, the command is to stop sending interrupts.
667 * The strategy here is wait until there is at most one processor
668 * (this one) in an irq. The memory barrier serializes the write to
669 * the device and the subsequent accesses of global_irq_count.
670 * --jmartin
672 #define DEBUG_SYNCHRONIZE_IRQ 0
674 void
675 synchronize_irq(void)
677 #if 0
678 /* Joe's version. */
679 int cpu = smp_processor_id();
680 int local_count;
681 int global_count;
682 int countdown = 1<<24;
683 void *where = __builtin_return_address(0);
685 mb();
686 do {
687 local_count = local_irq_count(cpu);
688 global_count = atomic_read(&global_irq_count);
689 if (DEBUG_SYNCHRONIZE_IRQ && (--countdown == 0)) {
690 printk("%d:%d/%d\n", cpu, local_count, global_count);
691 show("synchronize_irq", where);
692 break;
694 } while (global_count != local_count);
695 #else
696 /* Jay's version. */
697 if (atomic_read(&global_irq_count)) {
698 cli();
699 sti();
701 #endif
704 #else /* !__SMP__ */
706 #define irq_enter(cpu, irq) (++local_irq_count(cpu))
707 #define irq_exit(cpu, irq) (--local_irq_count(cpu))
709 #endif /* __SMP__ */
711 static void
712 unexpected_irq(int irq, struct pt_regs * regs)
714 #if 0
715 #if 1
716 printk("device_interrupt: unexpected interrupt %d\n", irq);
717 #else
718 struct irqaction *action;
719 int i;
721 printk("IO device interrupt, irq = %d\n", irq);
722 printk("PC = %016lx PS=%04lx\n", regs->pc, regs->ps);
723 printk("Expecting: ");
724 for (i = 0; i < ACTUAL_NR_IRQS; i++)
725 if ((action = irq_action[i]))
726 while (action->handler) {
727 printk("[%s:%d] ", action->name, i);
728 action = action->next;
730 printk("\n");
731 #endif
732 #endif
734 #if defined(CONFIG_ALPHA_JENSEN)
735 /* ??? Is all this just debugging, or are the inb's and outb's
736 necessary to make things work? */
737 printk("64=%02x, 60=%02x, 3fa=%02x 2fa=%02x\n",
738 inb(0x64), inb(0x60), inb(0x3fa), inb(0x2fa));
739 outb(0x0c, 0x3fc);
740 outb(0x0c, 0x2fc);
741 outb(0,0x61);
742 outb(0,0x461);
743 #endif
746 void
747 handle_irq(int irq, int ack, struct pt_regs * regs)
749 struct irqaction * action;
750 int cpu = smp_processor_id();
752 if ((unsigned) irq > ACTUAL_NR_IRQS) {
753 printk("device_interrupt: illegal interrupt %d\n", irq);
754 return;
757 #if 0
758 /* A useful bit of code to find out if an interrupt is going wild. */
760 static unsigned int last_msg, last_cc;
761 static int last_irq, count;
762 unsigned int cc;
764 __asm __volatile("rpcc %0" : "=r"(cc));
765 ++count;
766 if (cc - last_msg > 150000000 || irq != last_irq) {
767 printk("handle_irq: irq %d count %d cc %u @ %p\n",
768 irq, count, cc-last_cc, regs->pc);
769 count = 0;
770 last_msg = cc;
771 last_irq = irq;
773 last_cc = cc;
775 #endif
777 irq_enter(cpu, irq);
778 kstat.irqs[cpu][irq] += 1;
779 action = irq_action[irq];
782 * For normal interrupts, we mask it out, and then ACK it.
783 * This way another (more timing-critical) interrupt can
784 * come through while we're doing this one.
786 * Note! An irq without a handler gets masked and acked, but
787 * never unmasked. The autoirq stuff depends on this (it looks
788 * at the masks before and after doing the probing).
790 if (ack >= 0) {
791 mask_irq(ack);
792 alpha_mv.ack_irq(ack);
794 if (action) {
795 if (action->flags & SA_SAMPLE_RANDOM)
796 add_interrupt_randomness(irq);
797 do {
798 action->handler(irq, action->dev_id, regs);
799 action = action->next;
800 } while (action);
801 if (ack >= 0)
802 unmask_irq(ack);
803 } else {
804 unexpected_irq(irq, regs);
806 irq_exit(cpu, irq);
811 * Start listening for interrupts..
814 unsigned long
815 probe_irq_on(void)
817 struct irqaction * action;
818 unsigned long irqs = 0;
819 unsigned long delay;
820 unsigned int i;
822 for (i = ACTUAL_NR_IRQS - 1; i > 0; i--) {
823 if (!(PROBE_MASK & (1UL << i))) {
824 continue;
826 action = irq_action[i];
827 if (!action) {
828 enable_irq(i);
829 irqs |= (1UL << i);
834 * Wait about 100ms for spurious interrupts to mask themselves
835 * out again...
837 for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
838 barrier();
840 /* Now filter out any obviously spurious interrupts. */
841 return irqs & ~alpha_irq_mask;
845 * Get the result of the IRQ probe.. A negative result means that
846 * we have several candidates (but we return the lowest-numbered
847 * one).
851 probe_irq_off(unsigned long irqs)
853 int i;
855 irqs &= alpha_irq_mask;
856 if (!irqs)
857 return 0;
858 i = ffz(~irqs);
859 if (irqs != (1UL << i))
860 i = -i;
861 return i;
866 * The main interrupt entry point.
869 asmlinkage void
870 do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr,
871 unsigned long a3, unsigned long a4, unsigned long a5,
872 struct pt_regs regs)
874 switch (type) {
875 case 0:
876 #ifdef __SMP__
877 handle_ipi(&regs);
878 return;
879 #else
880 printk("Interprocessor interrupt? You must be kidding\n");
881 #endif
882 break;
883 case 1:
884 handle_irq(RTC_IRQ, -1, &regs);
885 return;
886 case 2:
887 alpha_mv.machine_check(vector, la_ptr, &regs);
888 return;
889 case 3:
890 alpha_mv.device_interrupt(vector, &regs);
891 return;
892 case 4:
893 perf_irq(vector, &regs);
894 return;
895 default:
896 printk("Hardware intr %ld %lx? Huh?\n", type, vector);
898 printk("PC = %016lx PS=%04lx\n", regs.pc, regs.ps);
901 void __init
902 init_IRQ(void)
904 wrent(entInt, 0);
905 alpha_mv.init_irq();
911 #define MCHK_K_TPERR 0x0080
912 #define MCHK_K_TCPERR 0x0082
913 #define MCHK_K_HERR 0x0084
914 #define MCHK_K_ECC_C 0x0086
915 #define MCHK_K_ECC_NC 0x0088
916 #define MCHK_K_OS_BUGCHECK 0x008A
917 #define MCHK_K_PAL_BUGCHECK 0x0090
919 #ifndef __SMP__
920 struct mcheck_info __mcheck_info;
921 #endif
923 void
924 process_mcheck_info(unsigned long vector, unsigned long la_ptr,
925 struct pt_regs *regs, const char *machine,
926 int expected)
928 struct el_common *mchk_header;
929 const char *reason;
932 * See if the machine check is due to a badaddr() and if so,
933 * ignore it.
936 #if DEBUG_MCHECK > 0
937 printk(KERN_CRIT "%s machine check %s\n", machine,
938 expected ? "expected." : "NOT expected!!!");
939 #endif
941 if (expected) {
942 int cpu = smp_processor_id();
943 mcheck_expected(cpu) = 0;
944 mcheck_taken(cpu) = 1;
945 return;
948 mchk_header = (struct el_common *)la_ptr;
950 printk(KERN_CRIT "%s machine check: vector=0x%lx pc=0x%lx code=0x%lx\n",
951 machine, vector, regs->pc, mchk_header->code);
953 switch ((unsigned int) mchk_header->code) {
954 /* Machine check reasons. Defined according to PALcode sources. */
955 case 0x80: reason = "tag parity error"; break;
956 case 0x82: reason = "tag control parity error"; break;
957 case 0x84: reason = "generic hard error"; break;
958 case 0x86: reason = "correctable ECC error"; break;
959 case 0x88: reason = "uncorrectable ECC error"; break;
960 case 0x8A: reason = "OS-specific PAL bugcheck"; break;
961 case 0x90: reason = "callsys in kernel mode"; break;
962 case 0x96: reason = "i-cache read retryable error"; break;
963 case 0x98: reason = "processor detected hard error"; break;
965 /* System specific (these are for Alcor, at least): */
966 case 0x203: reason = "system detected uncorrectable ECC error"; break;
967 case 0x204: reason = "SIO SERR occurred on PCI bus"; break;
968 case 0x205: reason = "parity error detected by CIA"; break;
969 case 0x206: reason = "SIO IOCHK occurred on ISA bus"; break;
970 case 0x207: reason = "non-existent memory error"; break;
971 case 0x208: reason = "MCHK_K_DCSR"; break;
972 case 0x209: reason = "PCI SERR detected"; break;
973 case 0x20b: reason = "PCI data parity error detected"; break;
974 case 0x20d: reason = "PCI address parity error detected"; break;
975 case 0x20f: reason = "PCI master abort error"; break;
976 case 0x211: reason = "PCI target abort error"; break;
977 case 0x213: reason = "scatter/gather PTE invalid error"; break;
978 case 0x215: reason = "flash ROM write error"; break;
979 case 0x217: reason = "IOA timeout detected"; break;
980 case 0x219: reason = "IOCHK#, EISA add-in board parity or other catastrophic error"; break;
981 case 0x21b: reason = "EISA fail-safe timer timeout"; break;
982 case 0x21d: reason = "EISA bus time-out"; break;
983 case 0x21f: reason = "EISA software generated NMI"; break;
984 case 0x221: reason = "unexpected ev5 IRQ[3] interrupt"; break;
985 default: reason = "unknown"; break;
988 printk(KERN_CRIT "machine check type: %s%s\n",
989 reason, mchk_header->retry ? " (retryable)" : "");
991 dik_show_regs(regs, NULL);
993 #if DEBUG_MCHECK > 1
995 /* Dump the logout area to give all info. */
996 unsigned long *ptr = (unsigned long *)la_ptr;
997 long i;
998 for (i = 0; i < mchk_header->size / sizeof(long); i += 2) {
999 printk(KERN_CRIT " +%8lx %016lx %016lx\n",
1000 i*sizeof(long), ptr[i], ptr[i+1]);
1003 #endif