PRCM: 34XX: Fix wrong shift value used in dpll4_m4x2_ck enable bit
[linux-ginger.git] / arch / sparc64 / kernel / traps.c
blob36974926265367824d5f7061badfef0d92d909b1
1 /* arch/sparc64/kernel/traps.c
3 * Copyright (C) 1995,1997 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
5 */
7 /*
8 * I like traps on v9, :))))
9 */
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/kallsyms.h>
15 #include <linux/signal.h>
16 #include <linux/smp.h>
17 #include <linux/mm.h>
18 #include <linux/init.h>
19 #include <linux/kdebug.h>
21 #include <asm/smp.h>
22 #include <asm/delay.h>
23 #include <asm/system.h>
24 #include <asm/ptrace.h>
25 #include <asm/oplib.h>
26 #include <asm/page.h>
27 #include <asm/pgtable.h>
28 #include <asm/unistd.h>
29 #include <asm/uaccess.h>
30 #include <asm/fpumacro.h>
31 #include <asm/lsu.h>
32 #include <asm/dcu.h>
33 #include <asm/estate.h>
34 #include <asm/chafsr.h>
35 #include <asm/sfafsr.h>
36 #include <asm/psrcompat.h>
37 #include <asm/processor.h>
38 #include <asm/timer.h>
39 #include <asm/head.h>
40 #ifdef CONFIG_KMOD
41 #include <linux/kmod.h>
42 #endif
43 #include <asm/prom.h>
45 #include "entry.h"
47 /* When an irrecoverable trap occurs at tl > 0, the trap entry
48 * code logs the trap state registers at every level in the trap
49 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
50 * is as follows:
52 struct tl1_traplog {
53 struct {
54 unsigned long tstate;
55 unsigned long tpc;
56 unsigned long tnpc;
57 unsigned long tt;
58 } trapstack[4];
59 unsigned long tl;
62 static void dump_tl1_traplog(struct tl1_traplog *p)
64 int i, limit;
66 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
67 "dumping track stack.\n", p->tl);
69 limit = (tlb_type == hypervisor) ? 2 : 4;
70 for (i = 0; i < limit; i++) {
71 printk(KERN_EMERG
72 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
73 "TNPC[%016lx] TT[%lx]\n",
74 i + 1,
75 p->trapstack[i].tstate, p->trapstack[i].tpc,
76 p->trapstack[i].tnpc, p->trapstack[i].tt);
77 print_symbol("TRAPLOG: TPC<%s>\n", p->trapstack[i].tpc);
81 void bad_trap(struct pt_regs *regs, long lvl)
83 char buffer[32];
84 siginfo_t info;
86 if (notify_die(DIE_TRAP, "bad trap", regs,
87 0, lvl, SIGTRAP) == NOTIFY_STOP)
88 return;
90 if (lvl < 0x100) {
91 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
92 die_if_kernel(buffer, regs);
95 lvl -= 0x100;
96 if (regs->tstate & TSTATE_PRIV) {
97 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
98 die_if_kernel(buffer, regs);
100 if (test_thread_flag(TIF_32BIT)) {
101 regs->tpc &= 0xffffffff;
102 regs->tnpc &= 0xffffffff;
104 info.si_signo = SIGILL;
105 info.si_errno = 0;
106 info.si_code = ILL_ILLTRP;
107 info.si_addr = (void __user *)regs->tpc;
108 info.si_trapno = lvl;
109 force_sig_info(SIGILL, &info, current);
112 void bad_trap_tl1(struct pt_regs *regs, long lvl)
114 char buffer[32];
116 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
117 0, lvl, SIGTRAP) == NOTIFY_STOP)
118 return;
120 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
122 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
123 die_if_kernel (buffer, regs);
126 #ifdef CONFIG_DEBUG_BUGVERBOSE
127 void do_BUG(const char *file, int line)
129 bust_spinlocks(1);
130 printk("kernel BUG at %s:%d!\n", file, line);
132 #endif
134 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
136 siginfo_t info;
138 if (notify_die(DIE_TRAP, "instruction access exception", regs,
139 0, 0x8, SIGTRAP) == NOTIFY_STOP)
140 return;
142 if (regs->tstate & TSTATE_PRIV) {
143 printk("spitfire_insn_access_exception: SFSR[%016lx] "
144 "SFAR[%016lx], going.\n", sfsr, sfar);
145 die_if_kernel("Iax", regs);
147 if (test_thread_flag(TIF_32BIT)) {
148 regs->tpc &= 0xffffffff;
149 regs->tnpc &= 0xffffffff;
151 info.si_signo = SIGSEGV;
152 info.si_errno = 0;
153 info.si_code = SEGV_MAPERR;
154 info.si_addr = (void __user *)regs->tpc;
155 info.si_trapno = 0;
156 force_sig_info(SIGSEGV, &info, current);
159 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
161 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
162 0, 0x8, SIGTRAP) == NOTIFY_STOP)
163 return;
165 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
166 spitfire_insn_access_exception(regs, sfsr, sfar);
169 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
171 unsigned short type = (type_ctx >> 16);
172 unsigned short ctx = (type_ctx & 0xffff);
173 siginfo_t info;
175 if (notify_die(DIE_TRAP, "instruction access exception", regs,
176 0, 0x8, SIGTRAP) == NOTIFY_STOP)
177 return;
179 if (regs->tstate & TSTATE_PRIV) {
180 printk("sun4v_insn_access_exception: ADDR[%016lx] "
181 "CTX[%04x] TYPE[%04x], going.\n",
182 addr, ctx, type);
183 die_if_kernel("Iax", regs);
186 if (test_thread_flag(TIF_32BIT)) {
187 regs->tpc &= 0xffffffff;
188 regs->tnpc &= 0xffffffff;
190 info.si_signo = SIGSEGV;
191 info.si_errno = 0;
192 info.si_code = SEGV_MAPERR;
193 info.si_addr = (void __user *) addr;
194 info.si_trapno = 0;
195 force_sig_info(SIGSEGV, &info, current);
198 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
200 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
201 0, 0x8, SIGTRAP) == NOTIFY_STOP)
202 return;
204 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
205 sun4v_insn_access_exception(regs, addr, type_ctx);
208 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
210 siginfo_t info;
212 if (notify_die(DIE_TRAP, "data access exception", regs,
213 0, 0x30, SIGTRAP) == NOTIFY_STOP)
214 return;
216 if (regs->tstate & TSTATE_PRIV) {
217 /* Test if this comes from uaccess places. */
218 const struct exception_table_entry *entry;
220 entry = search_exception_tables(regs->tpc);
221 if (entry) {
222 /* Ouch, somebody is trying VM hole tricks on us... */
223 #ifdef DEBUG_EXCEPTIONS
224 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
225 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
226 regs->tpc, entry->fixup);
227 #endif
228 regs->tpc = entry->fixup;
229 regs->tnpc = regs->tpc + 4;
230 return;
232 /* Shit... */
233 printk("spitfire_data_access_exception: SFSR[%016lx] "
234 "SFAR[%016lx], going.\n", sfsr, sfar);
235 die_if_kernel("Dax", regs);
238 info.si_signo = SIGSEGV;
239 info.si_errno = 0;
240 info.si_code = SEGV_MAPERR;
241 info.si_addr = (void __user *)sfar;
242 info.si_trapno = 0;
243 force_sig_info(SIGSEGV, &info, current);
246 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
248 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
249 0, 0x30, SIGTRAP) == NOTIFY_STOP)
250 return;
252 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
253 spitfire_data_access_exception(regs, sfsr, sfar);
256 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
258 unsigned short type = (type_ctx >> 16);
259 unsigned short ctx = (type_ctx & 0xffff);
260 siginfo_t info;
262 if (notify_die(DIE_TRAP, "data access exception", regs,
263 0, 0x8, SIGTRAP) == NOTIFY_STOP)
264 return;
266 if (regs->tstate & TSTATE_PRIV) {
267 printk("sun4v_data_access_exception: ADDR[%016lx] "
268 "CTX[%04x] TYPE[%04x], going.\n",
269 addr, ctx, type);
270 die_if_kernel("Dax", regs);
273 if (test_thread_flag(TIF_32BIT)) {
274 regs->tpc &= 0xffffffff;
275 regs->tnpc &= 0xffffffff;
277 info.si_signo = SIGSEGV;
278 info.si_errno = 0;
279 info.si_code = SEGV_MAPERR;
280 info.si_addr = (void __user *) addr;
281 info.si_trapno = 0;
282 force_sig_info(SIGSEGV, &info, current);
285 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
287 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
288 0, 0x8, SIGTRAP) == NOTIFY_STOP)
289 return;
291 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
292 sun4v_data_access_exception(regs, addr, type_ctx);
295 #ifdef CONFIG_PCI
296 /* This is really pathetic... */
297 extern volatile int pci_poke_in_progress;
298 extern volatile int pci_poke_cpu;
299 extern volatile int pci_poke_faulted;
300 #endif
302 /* When access exceptions happen, we must do this. */
303 static void spitfire_clean_and_reenable_l1_caches(void)
305 unsigned long va;
307 if (tlb_type != spitfire)
308 BUG();
310 /* Clean 'em. */
311 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
312 spitfire_put_icache_tag(va, 0x0);
313 spitfire_put_dcache_tag(va, 0x0);
316 /* Re-enable in LSU. */
317 __asm__ __volatile__("flush %%g6\n\t"
318 "membar #Sync\n\t"
319 "stxa %0, [%%g0] %1\n\t"
320 "membar #Sync"
321 : /* no outputs */
322 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
323 LSU_CONTROL_IM | LSU_CONTROL_DM),
324 "i" (ASI_LSU_CONTROL)
325 : "memory");
328 static void spitfire_enable_estate_errors(void)
330 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
331 "membar #Sync"
332 : /* no outputs */
333 : "r" (ESTATE_ERR_ALL),
334 "i" (ASI_ESTATE_ERROR_EN));
337 static char ecc_syndrome_table[] = {
338 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
339 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
340 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
341 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
342 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
343 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
344 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
345 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
346 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
347 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
348 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
349 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
350 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
351 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
352 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
353 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
354 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
355 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
356 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
357 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
358 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
359 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
360 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
361 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
362 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
363 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
364 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
365 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
366 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
367 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
368 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
369 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
372 static char *syndrome_unknown = "<Unknown>";
374 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
376 unsigned short scode;
377 char memmod_str[64], *p;
379 if (udbl & bit) {
380 scode = ecc_syndrome_table[udbl & 0xff];
381 if (prom_getunumber(scode, afar,
382 memmod_str, sizeof(memmod_str)) == -1)
383 p = syndrome_unknown;
384 else
385 p = memmod_str;
386 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
387 "Memory Module \"%s\"\n",
388 smp_processor_id(), scode, p);
391 if (udbh & bit) {
392 scode = ecc_syndrome_table[udbh & 0xff];
393 if (prom_getunumber(scode, afar,
394 memmod_str, sizeof(memmod_str)) == -1)
395 p = syndrome_unknown;
396 else
397 p = memmod_str;
398 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
399 "Memory Module \"%s\"\n",
400 smp_processor_id(), scode, p);
405 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
408 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
409 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
410 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
412 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
414 /* We always log it, even if someone is listening for this
415 * trap.
417 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
418 0, TRAP_TYPE_CEE, SIGTRAP);
420 /* The Correctable ECC Error trap does not disable I/D caches. So
421 * we only have to restore the ESTATE Error Enable register.
423 spitfire_enable_estate_errors();
426 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
428 siginfo_t info;
430 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
431 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
432 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
434 /* XXX add more human friendly logging of the error status
435 * XXX as is implemented for cheetah
438 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
440 /* We always log it, even if someone is listening for this
441 * trap.
443 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
444 0, tt, SIGTRAP);
446 if (regs->tstate & TSTATE_PRIV) {
447 if (tl1)
448 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
449 die_if_kernel("UE", regs);
452 /* XXX need more intelligent processing here, such as is implemented
453 * XXX for cheetah errors, in fact if the E-cache still holds the
454 * XXX line with bad parity this will loop
457 spitfire_clean_and_reenable_l1_caches();
458 spitfire_enable_estate_errors();
460 if (test_thread_flag(TIF_32BIT)) {
461 regs->tpc &= 0xffffffff;
462 regs->tnpc &= 0xffffffff;
464 info.si_signo = SIGBUS;
465 info.si_errno = 0;
466 info.si_code = BUS_OBJERR;
467 info.si_addr = (void *)0;
468 info.si_trapno = 0;
469 force_sig_info(SIGBUS, &info, current);
472 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
474 unsigned long afsr, tt, udbh, udbl;
475 int tl1;
477 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
478 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
479 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
480 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
481 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
483 #ifdef CONFIG_PCI
484 if (tt == TRAP_TYPE_DAE &&
485 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
486 spitfire_clean_and_reenable_l1_caches();
487 spitfire_enable_estate_errors();
489 pci_poke_faulted = 1;
490 regs->tnpc = regs->tpc + 4;
491 return;
493 #endif
495 if (afsr & SFAFSR_UE)
496 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
498 if (tt == TRAP_TYPE_CEE) {
499 /* Handle the case where we took a CEE trap, but ACK'd
500 * only the UE state in the UDB error registers.
502 if (afsr & SFAFSR_UE) {
503 if (udbh & UDBE_CE) {
504 __asm__ __volatile__(
505 "stxa %0, [%1] %2\n\t"
506 "membar #Sync"
507 : /* no outputs */
508 : "r" (udbh & UDBE_CE),
509 "r" (0x0), "i" (ASI_UDB_ERROR_W));
511 if (udbl & UDBE_CE) {
512 __asm__ __volatile__(
513 "stxa %0, [%1] %2\n\t"
514 "membar #Sync"
515 : /* no outputs */
516 : "r" (udbl & UDBE_CE),
517 "r" (0x18), "i" (ASI_UDB_ERROR_W));
521 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
525 int cheetah_pcache_forced_on;
527 void cheetah_enable_pcache(void)
529 unsigned long dcr;
531 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
532 smp_processor_id());
534 __asm__ __volatile__("ldxa [%%g0] %1, %0"
535 : "=r" (dcr)
536 : "i" (ASI_DCU_CONTROL_REG));
537 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
538 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
539 "membar #Sync"
540 : /* no outputs */
541 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
544 /* Cheetah error trap handling. */
545 static unsigned long ecache_flush_physbase;
546 static unsigned long ecache_flush_linesize;
547 static unsigned long ecache_flush_size;
549 /* This table is ordered in priority of errors and matches the
550 * AFAR overwrite policy as well.
553 struct afsr_error_table {
554 unsigned long mask;
555 const char *name;
558 static const char CHAFSR_PERR_msg[] =
559 "System interface protocol error";
560 static const char CHAFSR_IERR_msg[] =
561 "Internal processor error";
562 static const char CHAFSR_ISAP_msg[] =
563 "System request parity error on incoming addresss";
564 static const char CHAFSR_UCU_msg[] =
565 "Uncorrectable E-cache ECC error for ifetch/data";
566 static const char CHAFSR_UCC_msg[] =
567 "SW Correctable E-cache ECC error for ifetch/data";
568 static const char CHAFSR_UE_msg[] =
569 "Uncorrectable system bus data ECC error for read";
570 static const char CHAFSR_EDU_msg[] =
571 "Uncorrectable E-cache ECC error for stmerge/blkld";
572 static const char CHAFSR_EMU_msg[] =
573 "Uncorrectable system bus MTAG error";
574 static const char CHAFSR_WDU_msg[] =
575 "Uncorrectable E-cache ECC error for writeback";
576 static const char CHAFSR_CPU_msg[] =
577 "Uncorrectable ECC error for copyout";
578 static const char CHAFSR_CE_msg[] =
579 "HW corrected system bus data ECC error for read";
580 static const char CHAFSR_EDC_msg[] =
581 "HW corrected E-cache ECC error for stmerge/blkld";
582 static const char CHAFSR_EMC_msg[] =
583 "HW corrected system bus MTAG ECC error";
584 static const char CHAFSR_WDC_msg[] =
585 "HW corrected E-cache ECC error for writeback";
586 static const char CHAFSR_CPC_msg[] =
587 "HW corrected ECC error for copyout";
588 static const char CHAFSR_TO_msg[] =
589 "Unmapped error from system bus";
590 static const char CHAFSR_BERR_msg[] =
591 "Bus error response from system bus";
592 static const char CHAFSR_IVC_msg[] =
593 "HW corrected system bus data ECC error for ivec read";
594 static const char CHAFSR_IVU_msg[] =
595 "Uncorrectable system bus data ECC error for ivec read";
596 static struct afsr_error_table __cheetah_error_table[] = {
597 { CHAFSR_PERR, CHAFSR_PERR_msg },
598 { CHAFSR_IERR, CHAFSR_IERR_msg },
599 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
600 { CHAFSR_UCU, CHAFSR_UCU_msg },
601 { CHAFSR_UCC, CHAFSR_UCC_msg },
602 { CHAFSR_UE, CHAFSR_UE_msg },
603 { CHAFSR_EDU, CHAFSR_EDU_msg },
604 { CHAFSR_EMU, CHAFSR_EMU_msg },
605 { CHAFSR_WDU, CHAFSR_WDU_msg },
606 { CHAFSR_CPU, CHAFSR_CPU_msg },
607 { CHAFSR_CE, CHAFSR_CE_msg },
608 { CHAFSR_EDC, CHAFSR_EDC_msg },
609 { CHAFSR_EMC, CHAFSR_EMC_msg },
610 { CHAFSR_WDC, CHAFSR_WDC_msg },
611 { CHAFSR_CPC, CHAFSR_CPC_msg },
612 { CHAFSR_TO, CHAFSR_TO_msg },
613 { CHAFSR_BERR, CHAFSR_BERR_msg },
614 /* These two do not update the AFAR. */
615 { CHAFSR_IVC, CHAFSR_IVC_msg },
616 { CHAFSR_IVU, CHAFSR_IVU_msg },
617 { 0, NULL },
619 static const char CHPAFSR_DTO_msg[] =
620 "System bus unmapped error for prefetch/storequeue-read";
621 static const char CHPAFSR_DBERR_msg[] =
622 "System bus error for prefetch/storequeue-read";
623 static const char CHPAFSR_THCE_msg[] =
624 "Hardware corrected E-cache Tag ECC error";
625 static const char CHPAFSR_TSCE_msg[] =
626 "SW handled correctable E-cache Tag ECC error";
627 static const char CHPAFSR_TUE_msg[] =
628 "Uncorrectable E-cache Tag ECC error";
629 static const char CHPAFSR_DUE_msg[] =
630 "System bus uncorrectable data ECC error due to prefetch/store-fill";
631 static struct afsr_error_table __cheetah_plus_error_table[] = {
632 { CHAFSR_PERR, CHAFSR_PERR_msg },
633 { CHAFSR_IERR, CHAFSR_IERR_msg },
634 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
635 { CHAFSR_UCU, CHAFSR_UCU_msg },
636 { CHAFSR_UCC, CHAFSR_UCC_msg },
637 { CHAFSR_UE, CHAFSR_UE_msg },
638 { CHAFSR_EDU, CHAFSR_EDU_msg },
639 { CHAFSR_EMU, CHAFSR_EMU_msg },
640 { CHAFSR_WDU, CHAFSR_WDU_msg },
641 { CHAFSR_CPU, CHAFSR_CPU_msg },
642 { CHAFSR_CE, CHAFSR_CE_msg },
643 { CHAFSR_EDC, CHAFSR_EDC_msg },
644 { CHAFSR_EMC, CHAFSR_EMC_msg },
645 { CHAFSR_WDC, CHAFSR_WDC_msg },
646 { CHAFSR_CPC, CHAFSR_CPC_msg },
647 { CHAFSR_TO, CHAFSR_TO_msg },
648 { CHAFSR_BERR, CHAFSR_BERR_msg },
649 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
650 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
651 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
652 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
653 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
654 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
655 /* These two do not update the AFAR. */
656 { CHAFSR_IVC, CHAFSR_IVC_msg },
657 { CHAFSR_IVU, CHAFSR_IVU_msg },
658 { 0, NULL },
660 static const char JPAFSR_JETO_msg[] =
661 "System interface protocol error, hw timeout caused";
662 static const char JPAFSR_SCE_msg[] =
663 "Parity error on system snoop results";
664 static const char JPAFSR_JEIC_msg[] =
665 "System interface protocol error, illegal command detected";
666 static const char JPAFSR_JEIT_msg[] =
667 "System interface protocol error, illegal ADTYPE detected";
668 static const char JPAFSR_OM_msg[] =
669 "Out of range memory error has occurred";
670 static const char JPAFSR_ETP_msg[] =
671 "Parity error on L2 cache tag SRAM";
672 static const char JPAFSR_UMS_msg[] =
673 "Error due to unsupported store";
674 static const char JPAFSR_RUE_msg[] =
675 "Uncorrectable ECC error from remote cache/memory";
676 static const char JPAFSR_RCE_msg[] =
677 "Correctable ECC error from remote cache/memory";
678 static const char JPAFSR_BP_msg[] =
679 "JBUS parity error on returned read data";
680 static const char JPAFSR_WBP_msg[] =
681 "JBUS parity error on data for writeback or block store";
682 static const char JPAFSR_FRC_msg[] =
683 "Foreign read to DRAM incurring correctable ECC error";
684 static const char JPAFSR_FRU_msg[] =
685 "Foreign read to DRAM incurring uncorrectable ECC error";
686 static struct afsr_error_table __jalapeno_error_table[] = {
687 { JPAFSR_JETO, JPAFSR_JETO_msg },
688 { JPAFSR_SCE, JPAFSR_SCE_msg },
689 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
690 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
691 { CHAFSR_PERR, CHAFSR_PERR_msg },
692 { CHAFSR_IERR, CHAFSR_IERR_msg },
693 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
694 { CHAFSR_UCU, CHAFSR_UCU_msg },
695 { CHAFSR_UCC, CHAFSR_UCC_msg },
696 { CHAFSR_UE, CHAFSR_UE_msg },
697 { CHAFSR_EDU, CHAFSR_EDU_msg },
698 { JPAFSR_OM, JPAFSR_OM_msg },
699 { CHAFSR_WDU, CHAFSR_WDU_msg },
700 { CHAFSR_CPU, CHAFSR_CPU_msg },
701 { CHAFSR_CE, CHAFSR_CE_msg },
702 { CHAFSR_EDC, CHAFSR_EDC_msg },
703 { JPAFSR_ETP, JPAFSR_ETP_msg },
704 { CHAFSR_WDC, CHAFSR_WDC_msg },
705 { CHAFSR_CPC, CHAFSR_CPC_msg },
706 { CHAFSR_TO, CHAFSR_TO_msg },
707 { CHAFSR_BERR, CHAFSR_BERR_msg },
708 { JPAFSR_UMS, JPAFSR_UMS_msg },
709 { JPAFSR_RUE, JPAFSR_RUE_msg },
710 { JPAFSR_RCE, JPAFSR_RCE_msg },
711 { JPAFSR_BP, JPAFSR_BP_msg },
712 { JPAFSR_WBP, JPAFSR_WBP_msg },
713 { JPAFSR_FRC, JPAFSR_FRC_msg },
714 { JPAFSR_FRU, JPAFSR_FRU_msg },
715 /* These two do not update the AFAR. */
716 { CHAFSR_IVU, CHAFSR_IVU_msg },
717 { 0, NULL },
719 static struct afsr_error_table *cheetah_error_table;
720 static unsigned long cheetah_afsr_errors;
722 struct cheetah_err_info *cheetah_error_log;
724 static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
726 struct cheetah_err_info *p;
727 int cpu = smp_processor_id();
729 if (!cheetah_error_log)
730 return NULL;
732 p = cheetah_error_log + (cpu * 2);
733 if ((afsr & CHAFSR_TL1) != 0UL)
734 p++;
736 return p;
739 extern unsigned int tl0_icpe[], tl1_icpe[];
740 extern unsigned int tl0_dcpe[], tl1_dcpe[];
741 extern unsigned int tl0_fecc[], tl1_fecc[];
742 extern unsigned int tl0_cee[], tl1_cee[];
743 extern unsigned int tl0_iae[], tl1_iae[];
744 extern unsigned int tl0_dae[], tl1_dae[];
745 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
746 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
747 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
748 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
749 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
751 void __init cheetah_ecache_flush_init(void)
753 unsigned long largest_size, smallest_linesize, order, ver;
754 int i, sz;
756 /* Scan all cpu device tree nodes, note two values:
757 * 1) largest E-cache size
758 * 2) smallest E-cache line size
760 largest_size = 0UL;
761 smallest_linesize = ~0UL;
763 for (i = 0; i < NR_CPUS; i++) {
764 unsigned long val;
766 val = cpu_data(i).ecache_size;
767 if (!val)
768 continue;
770 if (val > largest_size)
771 largest_size = val;
773 val = cpu_data(i).ecache_line_size;
774 if (val < smallest_linesize)
775 smallest_linesize = val;
779 if (largest_size == 0UL || smallest_linesize == ~0UL) {
780 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
781 "parameters.\n");
782 prom_halt();
785 ecache_flush_size = (2 * largest_size);
786 ecache_flush_linesize = smallest_linesize;
788 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
790 if (ecache_flush_physbase == ~0UL) {
791 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
792 "contiguous physical memory.\n",
793 ecache_flush_size);
794 prom_halt();
797 /* Now allocate error trap reporting scoreboard. */
798 sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
799 for (order = 0; order < MAX_ORDER; order++) {
800 if ((PAGE_SIZE << order) >= sz)
801 break;
803 cheetah_error_log = (struct cheetah_err_info *)
804 __get_free_pages(GFP_KERNEL, order);
805 if (!cheetah_error_log) {
806 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
807 "error logging scoreboard (%d bytes).\n", sz);
808 prom_halt();
810 memset(cheetah_error_log, 0, PAGE_SIZE << order);
812 /* Mark all AFSRs as invalid so that the trap handler will
813 * log new new information there.
815 for (i = 0; i < 2 * NR_CPUS; i++)
816 cheetah_error_log[i].afsr = CHAFSR_INVALID;
818 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
819 if ((ver >> 32) == __JALAPENO_ID ||
820 (ver >> 32) == __SERRANO_ID) {
821 cheetah_error_table = &__jalapeno_error_table[0];
822 cheetah_afsr_errors = JPAFSR_ERRORS;
823 } else if ((ver >> 32) == 0x003e0015) {
824 cheetah_error_table = &__cheetah_plus_error_table[0];
825 cheetah_afsr_errors = CHPAFSR_ERRORS;
826 } else {
827 cheetah_error_table = &__cheetah_error_table[0];
828 cheetah_afsr_errors = CHAFSR_ERRORS;
831 /* Now patch trap tables. */
832 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
833 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
834 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
835 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
836 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
837 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
838 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
839 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
840 if (tlb_type == cheetah_plus) {
841 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
842 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
843 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
844 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
846 flushi(PAGE_OFFSET);
849 static void cheetah_flush_ecache(void)
851 unsigned long flush_base = ecache_flush_physbase;
852 unsigned long flush_linesize = ecache_flush_linesize;
853 unsigned long flush_size = ecache_flush_size;
855 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
856 " bne,pt %%xcc, 1b\n\t"
857 " ldxa [%2 + %0] %3, %%g0\n\t"
858 : "=&r" (flush_size)
859 : "0" (flush_size), "r" (flush_base),
860 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
863 static void cheetah_flush_ecache_line(unsigned long physaddr)
865 unsigned long alias;
867 physaddr &= ~(8UL - 1UL);
868 physaddr = (ecache_flush_physbase +
869 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
870 alias = physaddr + (ecache_flush_size >> 1UL);
871 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
872 "ldxa [%1] %2, %%g0\n\t"
873 "membar #Sync"
874 : /* no outputs */
875 : "r" (physaddr), "r" (alias),
876 "i" (ASI_PHYS_USE_EC));
879 /* Unfortunately, the diagnostic access to the I-cache tags we need to
880 * use to clear the thing interferes with I-cache coherency transactions.
882 * So we must only flush the I-cache when it is disabled.
884 static void __cheetah_flush_icache(void)
886 unsigned int icache_size, icache_line_size;
887 unsigned long addr;
889 icache_size = local_cpu_data().icache_size;
890 icache_line_size = local_cpu_data().icache_line_size;
892 /* Clear the valid bits in all the tags. */
893 for (addr = 0; addr < icache_size; addr += icache_line_size) {
894 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
895 "membar #Sync"
896 : /* no outputs */
897 : "r" (addr | (2 << 3)),
898 "i" (ASI_IC_TAG));
902 static void cheetah_flush_icache(void)
904 unsigned long dcu_save;
906 /* Save current DCU, disable I-cache. */
907 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
908 "or %0, %2, %%g1\n\t"
909 "stxa %%g1, [%%g0] %1\n\t"
910 "membar #Sync"
911 : "=r" (dcu_save)
912 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
913 : "g1");
915 __cheetah_flush_icache();
917 /* Restore DCU register */
918 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
919 "membar #Sync"
920 : /* no outputs */
921 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
924 static void cheetah_flush_dcache(void)
926 unsigned int dcache_size, dcache_line_size;
927 unsigned long addr;
929 dcache_size = local_cpu_data().dcache_size;
930 dcache_line_size = local_cpu_data().dcache_line_size;
932 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
933 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
934 "membar #Sync"
935 : /* no outputs */
936 : "r" (addr), "i" (ASI_DCACHE_TAG));
940 /* In order to make the even parity correct we must do two things.
941 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
942 * Next, we clear out all 32-bytes of data for that line. Data of
943 * all-zero + tag parity value of zero == correct parity.
945 static void cheetah_plus_zap_dcache_parity(void)
947 unsigned int dcache_size, dcache_line_size;
948 unsigned long addr;
950 dcache_size = local_cpu_data().dcache_size;
951 dcache_line_size = local_cpu_data().dcache_line_size;
953 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
954 unsigned long tag = (addr >> 14);
955 unsigned long line;
957 __asm__ __volatile__("membar #Sync\n\t"
958 "stxa %0, [%1] %2\n\t"
959 "membar #Sync"
960 : /* no outputs */
961 : "r" (tag), "r" (addr),
962 "i" (ASI_DCACHE_UTAG));
963 for (line = addr; line < addr + dcache_line_size; line += 8)
964 __asm__ __volatile__("membar #Sync\n\t"
965 "stxa %%g0, [%0] %1\n\t"
966 "membar #Sync"
967 : /* no outputs */
968 : "r" (line),
969 "i" (ASI_DCACHE_DATA));
973 /* Conversion tables used to frob Cheetah AFSR syndrome values into
974 * something palatable to the memory controller driver get_unumber
975 * routine.
977 #define MT0 137
978 #define MT1 138
979 #define MT2 139
980 #define NONE 254
981 #define MTC0 140
982 #define MTC1 141
983 #define MTC2 142
984 #define MTC3 143
985 #define C0 128
986 #define C1 129
987 #define C2 130
988 #define C3 131
989 #define C4 132
990 #define C5 133
991 #define C6 134
992 #define C7 135
993 #define C8 136
994 #define M2 144
995 #define M3 145
996 #define M4 146
997 #define M 147
998 static unsigned char cheetah_ecc_syntab[] = {
999 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1000 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1001 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1002 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1003 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1004 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1005 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1006 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1007 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1008 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1009 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1010 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1011 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1012 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1013 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1014 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1015 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1016 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1017 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1018 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1019 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1020 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1021 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1022 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1023 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1024 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1025 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1026 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1027 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1028 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1029 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1030 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1032 static unsigned char cheetah_mtag_syntab[] = {
1033 NONE, MTC0,
1034 MTC1, NONE,
1035 MTC2, NONE,
1036 NONE, MT0,
1037 MTC3, NONE,
1038 NONE, MT1,
1039 NONE, MT2,
1040 NONE, NONE
1043 /* Return the highest priority error conditon mentioned. */
1044 static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1046 unsigned long tmp = 0;
1047 int i;
1049 for (i = 0; cheetah_error_table[i].mask; i++) {
1050 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1051 return tmp;
1053 return tmp;
1056 static const char *cheetah_get_string(unsigned long bit)
1058 int i;
1060 for (i = 0; cheetah_error_table[i].mask; i++) {
1061 if ((bit & cheetah_error_table[i].mask) != 0UL)
1062 return cheetah_error_table[i].name;
1064 return "???";
1067 extern int chmc_getunumber(int, unsigned long, char *, int);
1069 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1070 unsigned long afsr, unsigned long afar, int recoverable)
1072 unsigned long hipri;
1073 char unum[256];
1075 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1076 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1077 afsr, afar,
1078 (afsr & CHAFSR_TL1) ? 1 : 0);
1079 printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1080 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1081 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1082 printk("%s" "ERROR(%d): ",
1083 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1084 print_symbol("TPC<%s>\n", regs->tpc);
1085 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1086 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1087 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1088 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1089 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1090 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1091 hipri = cheetah_get_hipri(afsr);
1092 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1093 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1094 hipri, cheetah_get_string(hipri));
1096 /* Try to get unumber if relevant. */
1097 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1098 CHAFSR_CPC | CHAFSR_CPU | \
1099 CHAFSR_UE | CHAFSR_CE | \
1100 CHAFSR_EDC | CHAFSR_EDU | \
1101 CHAFSR_UCC | CHAFSR_UCU | \
1102 CHAFSR_WDU | CHAFSR_WDC)
1103 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1104 if (afsr & ESYND_ERRORS) {
1105 int syndrome;
1106 int ret;
1108 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1109 syndrome = cheetah_ecc_syntab[syndrome];
1110 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1111 if (ret != -1)
1112 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1113 (recoverable ? KERN_WARNING : KERN_CRIT),
1114 smp_processor_id(), unum);
1115 } else if (afsr & MSYND_ERRORS) {
1116 int syndrome;
1117 int ret;
1119 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1120 syndrome = cheetah_mtag_syntab[syndrome];
1121 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1122 if (ret != -1)
1123 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1124 (recoverable ? KERN_WARNING : KERN_CRIT),
1125 smp_processor_id(), unum);
1128 /* Now dump the cache snapshots. */
1129 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1130 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1131 (int) info->dcache_index,
1132 info->dcache_tag,
1133 info->dcache_utag,
1134 info->dcache_stag);
1135 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1136 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1137 info->dcache_data[0],
1138 info->dcache_data[1],
1139 info->dcache_data[2],
1140 info->dcache_data[3]);
1141 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1142 "u[%016lx] l[%016lx]\n",
1143 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1144 (int) info->icache_index,
1145 info->icache_tag,
1146 info->icache_utag,
1147 info->icache_stag,
1148 info->icache_upper,
1149 info->icache_lower);
1150 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1151 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1152 info->icache_data[0],
1153 info->icache_data[1],
1154 info->icache_data[2],
1155 info->icache_data[3]);
1156 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1157 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1158 info->icache_data[4],
1159 info->icache_data[5],
1160 info->icache_data[6],
1161 info->icache_data[7]);
1162 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1163 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1164 (int) info->ecache_index, info->ecache_tag);
1165 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1166 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1167 info->ecache_data[0],
1168 info->ecache_data[1],
1169 info->ecache_data[2],
1170 info->ecache_data[3]);
1172 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1173 while (afsr != 0UL) {
1174 unsigned long bit = cheetah_get_hipri(afsr);
1176 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1177 (recoverable ? KERN_WARNING : KERN_CRIT),
1178 bit, cheetah_get_string(bit));
1180 afsr &= ~bit;
1183 if (!recoverable)
1184 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1187 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1189 unsigned long afsr, afar;
1190 int ret = 0;
1192 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1193 : "=r" (afsr)
1194 : "i" (ASI_AFSR));
1195 if ((afsr & cheetah_afsr_errors) != 0) {
1196 if (logp != NULL) {
1197 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1198 : "=r" (afar)
1199 : "i" (ASI_AFAR));
1200 logp->afsr = afsr;
1201 logp->afar = afar;
1203 ret = 1;
1205 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1206 "membar #Sync\n\t"
1207 : : "r" (afsr), "i" (ASI_AFSR));
1209 return ret;
1212 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1214 struct cheetah_err_info local_snapshot, *p;
1215 int recoverable;
1217 /* Flush E-cache */
1218 cheetah_flush_ecache();
1220 p = cheetah_get_error_log(afsr);
1221 if (!p) {
1222 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1223 afsr, afar);
1224 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1225 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1226 prom_halt();
1229 /* Grab snapshot of logged error. */
1230 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1232 /* If the current trap snapshot does not match what the
1233 * trap handler passed along into our args, big trouble.
1234 * In such a case, mark the local copy as invalid.
1236 * Else, it matches and we mark the afsr in the non-local
1237 * copy as invalid so we may log new error traps there.
1239 if (p->afsr != afsr || p->afar != afar)
1240 local_snapshot.afsr = CHAFSR_INVALID;
1241 else
1242 p->afsr = CHAFSR_INVALID;
1244 cheetah_flush_icache();
1245 cheetah_flush_dcache();
1247 /* Re-enable I-cache/D-cache */
1248 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1249 "or %%g1, %1, %%g1\n\t"
1250 "stxa %%g1, [%%g0] %0\n\t"
1251 "membar #Sync"
1252 : /* no outputs */
1253 : "i" (ASI_DCU_CONTROL_REG),
1254 "i" (DCU_DC | DCU_IC)
1255 : "g1");
1257 /* Re-enable error reporting */
1258 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1259 "or %%g1, %1, %%g1\n\t"
1260 "stxa %%g1, [%%g0] %0\n\t"
1261 "membar #Sync"
1262 : /* no outputs */
1263 : "i" (ASI_ESTATE_ERROR_EN),
1264 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1265 : "g1");
1267 /* Decide if we can continue after handling this trap and
1268 * logging the error.
1270 recoverable = 1;
1271 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1272 recoverable = 0;
1274 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1275 * error was logged while we had error reporting traps disabled.
1277 if (cheetah_recheck_errors(&local_snapshot)) {
1278 unsigned long new_afsr = local_snapshot.afsr;
1280 /* If we got a new asynchronous error, die... */
1281 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1282 CHAFSR_WDU | CHAFSR_CPU |
1283 CHAFSR_IVU | CHAFSR_UE |
1284 CHAFSR_BERR | CHAFSR_TO))
1285 recoverable = 0;
1288 /* Log errors. */
1289 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1291 if (!recoverable)
1292 panic("Irrecoverable Fast-ECC error trap.\n");
1294 /* Flush E-cache to kick the error trap handlers out. */
1295 cheetah_flush_ecache();
1298 /* Try to fix a correctable error by pushing the line out from
1299 * the E-cache. Recheck error reporting registers to see if the
1300 * problem is intermittent.
1302 static int cheetah_fix_ce(unsigned long physaddr)
1304 unsigned long orig_estate;
1305 unsigned long alias1, alias2;
1306 int ret;
1308 /* Make sure correctable error traps are disabled. */
1309 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1310 "andn %0, %1, %%g1\n\t"
1311 "stxa %%g1, [%%g0] %2\n\t"
1312 "membar #Sync"
1313 : "=&r" (orig_estate)
1314 : "i" (ESTATE_ERROR_CEEN),
1315 "i" (ASI_ESTATE_ERROR_EN)
1316 : "g1");
1318 /* We calculate alias addresses that will force the
1319 * cache line in question out of the E-cache. Then
1320 * we bring it back in with an atomic instruction so
1321 * that we get it in some modified/exclusive state,
1322 * then we displace it again to try and get proper ECC
1323 * pushed back into the system.
1325 physaddr &= ~(8UL - 1UL);
1326 alias1 = (ecache_flush_physbase +
1327 (physaddr & ((ecache_flush_size >> 1) - 1)));
1328 alias2 = alias1 + (ecache_flush_size >> 1);
1329 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1330 "ldxa [%1] %3, %%g0\n\t"
1331 "casxa [%2] %3, %%g0, %%g0\n\t"
1332 "membar #StoreLoad | #StoreStore\n\t"
1333 "ldxa [%0] %3, %%g0\n\t"
1334 "ldxa [%1] %3, %%g0\n\t"
1335 "membar #Sync"
1336 : /* no outputs */
1337 : "r" (alias1), "r" (alias2),
1338 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1340 /* Did that trigger another error? */
1341 if (cheetah_recheck_errors(NULL)) {
1342 /* Try one more time. */
1343 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1344 "membar #Sync"
1345 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1346 if (cheetah_recheck_errors(NULL))
1347 ret = 2;
1348 else
1349 ret = 1;
1350 } else {
1351 /* No new error, intermittent problem. */
1352 ret = 0;
1355 /* Restore error enables. */
1356 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1357 "membar #Sync"
1358 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1360 return ret;
1363 /* Return non-zero if PADDR is a valid physical memory address. */
1364 static int cheetah_check_main_memory(unsigned long paddr)
1366 unsigned long vaddr = PAGE_OFFSET + paddr;
1368 if (vaddr > (unsigned long) high_memory)
1369 return 0;
1371 return kern_addr_valid(vaddr);
1374 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1376 struct cheetah_err_info local_snapshot, *p;
1377 int recoverable, is_memory;
1379 p = cheetah_get_error_log(afsr);
1380 if (!p) {
1381 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1382 afsr, afar);
1383 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1384 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1385 prom_halt();
1388 /* Grab snapshot of logged error. */
1389 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1391 /* If the current trap snapshot does not match what the
1392 * trap handler passed along into our args, big trouble.
1393 * In such a case, mark the local copy as invalid.
1395 * Else, it matches and we mark the afsr in the non-local
1396 * copy as invalid so we may log new error traps there.
1398 if (p->afsr != afsr || p->afar != afar)
1399 local_snapshot.afsr = CHAFSR_INVALID;
1400 else
1401 p->afsr = CHAFSR_INVALID;
1403 is_memory = cheetah_check_main_memory(afar);
1405 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1406 /* XXX Might want to log the results of this operation
1407 * XXX somewhere... -DaveM
1409 cheetah_fix_ce(afar);
1413 int flush_all, flush_line;
1415 flush_all = flush_line = 0;
1416 if ((afsr & CHAFSR_EDC) != 0UL) {
1417 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1418 flush_line = 1;
1419 else
1420 flush_all = 1;
1421 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1422 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1423 flush_line = 1;
1424 else
1425 flush_all = 1;
1428 /* Trap handler only disabled I-cache, flush it. */
1429 cheetah_flush_icache();
1431 /* Re-enable I-cache */
1432 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1433 "or %%g1, %1, %%g1\n\t"
1434 "stxa %%g1, [%%g0] %0\n\t"
1435 "membar #Sync"
1436 : /* no outputs */
1437 : "i" (ASI_DCU_CONTROL_REG),
1438 "i" (DCU_IC)
1439 : "g1");
1441 if (flush_all)
1442 cheetah_flush_ecache();
1443 else if (flush_line)
1444 cheetah_flush_ecache_line(afar);
1447 /* Re-enable error reporting */
1448 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1449 "or %%g1, %1, %%g1\n\t"
1450 "stxa %%g1, [%%g0] %0\n\t"
1451 "membar #Sync"
1452 : /* no outputs */
1453 : "i" (ASI_ESTATE_ERROR_EN),
1454 "i" (ESTATE_ERROR_CEEN)
1455 : "g1");
1457 /* Decide if we can continue after handling this trap and
1458 * logging the error.
1460 recoverable = 1;
1461 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1462 recoverable = 0;
1464 /* Re-check AFSR/AFAR */
1465 (void) cheetah_recheck_errors(&local_snapshot);
1467 /* Log errors. */
1468 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1470 if (!recoverable)
1471 panic("Irrecoverable Correctable-ECC error trap.\n");
1474 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1476 struct cheetah_err_info local_snapshot, *p;
1477 int recoverable, is_memory;
1479 #ifdef CONFIG_PCI
1480 /* Check for the special PCI poke sequence. */
1481 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1482 cheetah_flush_icache();
1483 cheetah_flush_dcache();
1485 /* Re-enable I-cache/D-cache */
1486 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1487 "or %%g1, %1, %%g1\n\t"
1488 "stxa %%g1, [%%g0] %0\n\t"
1489 "membar #Sync"
1490 : /* no outputs */
1491 : "i" (ASI_DCU_CONTROL_REG),
1492 "i" (DCU_DC | DCU_IC)
1493 : "g1");
1495 /* Re-enable error reporting */
1496 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1497 "or %%g1, %1, %%g1\n\t"
1498 "stxa %%g1, [%%g0] %0\n\t"
1499 "membar #Sync"
1500 : /* no outputs */
1501 : "i" (ASI_ESTATE_ERROR_EN),
1502 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1503 : "g1");
1505 (void) cheetah_recheck_errors(NULL);
1507 pci_poke_faulted = 1;
1508 regs->tpc += 4;
1509 regs->tnpc = regs->tpc + 4;
1510 return;
1512 #endif
1514 p = cheetah_get_error_log(afsr);
1515 if (!p) {
1516 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1517 afsr, afar);
1518 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1519 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1520 prom_halt();
1523 /* Grab snapshot of logged error. */
1524 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1526 /* If the current trap snapshot does not match what the
1527 * trap handler passed along into our args, big trouble.
1528 * In such a case, mark the local copy as invalid.
1530 * Else, it matches and we mark the afsr in the non-local
1531 * copy as invalid so we may log new error traps there.
1533 if (p->afsr != afsr || p->afar != afar)
1534 local_snapshot.afsr = CHAFSR_INVALID;
1535 else
1536 p->afsr = CHAFSR_INVALID;
1538 is_memory = cheetah_check_main_memory(afar);
1541 int flush_all, flush_line;
1543 flush_all = flush_line = 0;
1544 if ((afsr & CHAFSR_EDU) != 0UL) {
1545 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1546 flush_line = 1;
1547 else
1548 flush_all = 1;
1549 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1550 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1551 flush_line = 1;
1552 else
1553 flush_all = 1;
1556 cheetah_flush_icache();
1557 cheetah_flush_dcache();
1559 /* Re-enable I/D caches */
1560 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1561 "or %%g1, %1, %%g1\n\t"
1562 "stxa %%g1, [%%g0] %0\n\t"
1563 "membar #Sync"
1564 : /* no outputs */
1565 : "i" (ASI_DCU_CONTROL_REG),
1566 "i" (DCU_IC | DCU_DC)
1567 : "g1");
1569 if (flush_all)
1570 cheetah_flush_ecache();
1571 else if (flush_line)
1572 cheetah_flush_ecache_line(afar);
1575 /* Re-enable error reporting */
1576 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1577 "or %%g1, %1, %%g1\n\t"
1578 "stxa %%g1, [%%g0] %0\n\t"
1579 "membar #Sync"
1580 : /* no outputs */
1581 : "i" (ASI_ESTATE_ERROR_EN),
1582 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1583 : "g1");
1585 /* Decide if we can continue after handling this trap and
1586 * logging the error.
1588 recoverable = 1;
1589 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1590 recoverable = 0;
1592 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1593 * error was logged while we had error reporting traps disabled.
1595 if (cheetah_recheck_errors(&local_snapshot)) {
1596 unsigned long new_afsr = local_snapshot.afsr;
1598 /* If we got a new asynchronous error, die... */
1599 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1600 CHAFSR_WDU | CHAFSR_CPU |
1601 CHAFSR_IVU | CHAFSR_UE |
1602 CHAFSR_BERR | CHAFSR_TO))
1603 recoverable = 0;
1606 /* Log errors. */
1607 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1609 /* "Recoverable" here means we try to yank the page from ever
1610 * being newly used again. This depends upon a few things:
1611 * 1) Must be main memory, and AFAR must be valid.
1612 * 2) If we trapped from user, OK.
1613 * 3) Else, if we trapped from kernel we must find exception
1614 * table entry (ie. we have to have been accessing user
1615 * space).
1617 * If AFAR is not in main memory, or we trapped from kernel
1618 * and cannot find an exception table entry, it is unacceptable
1619 * to try and continue.
1621 if (recoverable && is_memory) {
1622 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1623 /* OK, usermode access. */
1624 recoverable = 1;
1625 } else {
1626 const struct exception_table_entry *entry;
1628 entry = search_exception_tables(regs->tpc);
1629 if (entry) {
1630 /* OK, kernel access to userspace. */
1631 recoverable = 1;
1633 } else {
1634 /* BAD, privileged state is corrupted. */
1635 recoverable = 0;
1638 if (recoverable) {
1639 if (pfn_valid(afar >> PAGE_SHIFT))
1640 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1641 else
1642 recoverable = 0;
1644 /* Only perform fixup if we still have a
1645 * recoverable condition.
1647 if (recoverable) {
1648 regs->tpc = entry->fixup;
1649 regs->tnpc = regs->tpc + 4;
1653 } else {
1654 recoverable = 0;
1657 if (!recoverable)
1658 panic("Irrecoverable deferred error trap.\n");
1661 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1663 * Bit0: 0=dcache,1=icache
1664 * Bit1: 0=recoverable,1=unrecoverable
1666 * The hardware has disabled both the I-cache and D-cache in
1667 * the %dcr register.
1669 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1671 if (type & 0x1)
1672 __cheetah_flush_icache();
1673 else
1674 cheetah_plus_zap_dcache_parity();
1675 cheetah_flush_dcache();
1677 /* Re-enable I-cache/D-cache */
1678 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1679 "or %%g1, %1, %%g1\n\t"
1680 "stxa %%g1, [%%g0] %0\n\t"
1681 "membar #Sync"
1682 : /* no outputs */
1683 : "i" (ASI_DCU_CONTROL_REG),
1684 "i" (DCU_DC | DCU_IC)
1685 : "g1");
1687 if (type & 0x2) {
1688 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1689 smp_processor_id(),
1690 (type & 0x1) ? 'I' : 'D',
1691 regs->tpc);
1692 print_symbol(KERN_EMERG "TPC<%s>\n", regs->tpc);
1693 panic("Irrecoverable Cheetah+ parity error.");
1696 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1697 smp_processor_id(),
1698 (type & 0x1) ? 'I' : 'D',
1699 regs->tpc);
1700 print_symbol(KERN_WARNING "TPC<%s>\n", regs->tpc);
1703 struct sun4v_error_entry {
1704 u64 err_handle;
1705 u64 err_stick;
1707 u32 err_type;
1708 #define SUN4V_ERR_TYPE_UNDEFINED 0
1709 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1710 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1711 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1712 #define SUN4V_ERR_TYPE_WARNING_RES 4
1714 u32 err_attrs;
1715 #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1716 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1717 #define SUN4V_ERR_ATTRS_PIO 0x00000004
1718 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1719 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1720 #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1721 #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1722 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1724 u64 err_raddr;
1725 u32 err_size;
1726 u16 err_cpu;
1727 u16 err_pad;
1730 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1731 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1733 static const char *sun4v_err_type_to_str(u32 type)
1735 switch (type) {
1736 case SUN4V_ERR_TYPE_UNDEFINED:
1737 return "undefined";
1738 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1739 return "uncorrected resumable";
1740 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1741 return "precise nonresumable";
1742 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1743 return "deferred nonresumable";
1744 case SUN4V_ERR_TYPE_WARNING_RES:
1745 return "warning resumable";
1746 default:
1747 return "unknown";
1751 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1753 int cnt;
1755 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1756 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1757 pfx,
1758 ent->err_handle, ent->err_stick,
1759 ent->err_type,
1760 sun4v_err_type_to_str(ent->err_type));
1761 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1762 pfx,
1763 ent->err_attrs,
1764 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1765 "processor" : ""),
1766 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1767 "memory" : ""),
1768 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1769 "pio" : ""),
1770 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1771 "integer-regs" : ""),
1772 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1773 "fpu-regs" : ""),
1774 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1775 "user" : ""),
1776 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1777 "privileged" : ""),
1778 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1779 "queue-full" : ""));
1780 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1781 pfx,
1782 ent->err_raddr, ent->err_size, ent->err_cpu);
1784 __show_regs(regs);
1786 if ((cnt = atomic_read(ocnt)) != 0) {
1787 atomic_set(ocnt, 0);
1788 wmb();
1789 printk("%s: Queue overflowed %d times.\n",
1790 pfx, cnt);
1794 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1795 * Log the event and clear the first word of the entry.
1797 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1799 struct sun4v_error_entry *ent, local_copy;
1800 struct trap_per_cpu *tb;
1801 unsigned long paddr;
1802 int cpu;
1804 cpu = get_cpu();
1806 tb = &trap_block[cpu];
1807 paddr = tb->resum_kernel_buf_pa + offset;
1808 ent = __va(paddr);
1810 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1812 /* We have a local copy now, so release the entry. */
1813 ent->err_handle = 0;
1814 wmb();
1816 put_cpu();
1818 if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) {
1819 /* If err_type is 0x4, it's a powerdown request. Do
1820 * not do the usual resumable error log because that
1821 * makes it look like some abnormal error.
1823 printk(KERN_INFO "Power down request...\n");
1824 kill_cad_pid(SIGINT, 1);
1825 return;
1828 sun4v_log_error(regs, &local_copy, cpu,
1829 KERN_ERR "RESUMABLE ERROR",
1830 &sun4v_resum_oflow_cnt);
1833 /* If we try to printk() we'll probably make matters worse, by trying
1834 * to retake locks this cpu already holds or causing more errors. So
1835 * just bump a counter, and we'll report these counter bumps above.
1837 void sun4v_resum_overflow(struct pt_regs *regs)
1839 atomic_inc(&sun4v_resum_oflow_cnt);
1842 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1843 * Log the event, clear the first word of the entry, and die.
1845 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1847 struct sun4v_error_entry *ent, local_copy;
1848 struct trap_per_cpu *tb;
1849 unsigned long paddr;
1850 int cpu;
1852 cpu = get_cpu();
1854 tb = &trap_block[cpu];
1855 paddr = tb->nonresum_kernel_buf_pa + offset;
1856 ent = __va(paddr);
1858 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1860 /* We have a local copy now, so release the entry. */
1861 ent->err_handle = 0;
1862 wmb();
1864 put_cpu();
1866 #ifdef CONFIG_PCI
1867 /* Check for the special PCI poke sequence. */
1868 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1869 pci_poke_faulted = 1;
1870 regs->tpc += 4;
1871 regs->tnpc = regs->tpc + 4;
1872 return;
1874 #endif
1876 sun4v_log_error(regs, &local_copy, cpu,
1877 KERN_EMERG "NON-RESUMABLE ERROR",
1878 &sun4v_nonresum_oflow_cnt);
1880 panic("Non-resumable error.");
1883 /* If we try to printk() we'll probably make matters worse, by trying
1884 * to retake locks this cpu already holds or causing more errors. So
1885 * just bump a counter, and we'll report these counter bumps above.
1887 void sun4v_nonresum_overflow(struct pt_regs *regs)
1889 /* XXX Actually even this can make not that much sense. Perhaps
1890 * XXX we should just pull the plug and panic directly from here?
1892 atomic_inc(&sun4v_nonresum_oflow_cnt);
1895 unsigned long sun4v_err_itlb_vaddr;
1896 unsigned long sun4v_err_itlb_ctx;
1897 unsigned long sun4v_err_itlb_pte;
1898 unsigned long sun4v_err_itlb_error;
1900 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1902 if (tl > 1)
1903 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1905 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1906 regs->tpc, tl);
1907 print_symbol(KERN_EMERG "SUN4V-ITLB: TPC<%s>\n", regs->tpc);
1908 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1909 print_symbol(KERN_EMERG "SUN4V-ITLB: O7<%s>\n", regs->u_regs[UREG_I7]);
1910 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1911 "pte[%lx] error[%lx]\n",
1912 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1913 sun4v_err_itlb_pte, sun4v_err_itlb_error);
1915 prom_halt();
1918 unsigned long sun4v_err_dtlb_vaddr;
1919 unsigned long sun4v_err_dtlb_ctx;
1920 unsigned long sun4v_err_dtlb_pte;
1921 unsigned long sun4v_err_dtlb_error;
1923 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1925 if (tl > 1)
1926 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1928 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1929 regs->tpc, tl);
1930 print_symbol(KERN_EMERG "SUN4V-DTLB: TPC<%s>\n", regs->tpc);
1931 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1932 print_symbol(KERN_EMERG "SUN4V-DTLB: O7<%s>\n", regs->u_regs[UREG_I7]);
1933 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1934 "pte[%lx] error[%lx]\n",
1935 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1936 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
1938 prom_halt();
1941 void hypervisor_tlbop_error(unsigned long err, unsigned long op)
1943 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
1944 err, op);
1947 void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
1949 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
1950 err, op);
1953 void do_fpe_common(struct pt_regs *regs)
1955 if (regs->tstate & TSTATE_PRIV) {
1956 regs->tpc = regs->tnpc;
1957 regs->tnpc += 4;
1958 } else {
1959 unsigned long fsr = current_thread_info()->xfsr[0];
1960 siginfo_t info;
1962 if (test_thread_flag(TIF_32BIT)) {
1963 regs->tpc &= 0xffffffff;
1964 regs->tnpc &= 0xffffffff;
1966 info.si_signo = SIGFPE;
1967 info.si_errno = 0;
1968 info.si_addr = (void __user *)regs->tpc;
1969 info.si_trapno = 0;
1970 info.si_code = __SI_FAULT;
1971 if ((fsr & 0x1c000) == (1 << 14)) {
1972 if (fsr & 0x10)
1973 info.si_code = FPE_FLTINV;
1974 else if (fsr & 0x08)
1975 info.si_code = FPE_FLTOVF;
1976 else if (fsr & 0x04)
1977 info.si_code = FPE_FLTUND;
1978 else if (fsr & 0x02)
1979 info.si_code = FPE_FLTDIV;
1980 else if (fsr & 0x01)
1981 info.si_code = FPE_FLTRES;
1983 force_sig_info(SIGFPE, &info, current);
1987 void do_fpieee(struct pt_regs *regs)
1989 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1990 0, 0x24, SIGFPE) == NOTIFY_STOP)
1991 return;
1993 do_fpe_common(regs);
1996 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1998 void do_fpother(struct pt_regs *regs)
2000 struct fpustate *f = FPUSTATE;
2001 int ret = 0;
2003 if (notify_die(DIE_TRAP, "fpu exception other", regs,
2004 0, 0x25, SIGFPE) == NOTIFY_STOP)
2005 return;
2007 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2008 case (2 << 14): /* unfinished_FPop */
2009 case (3 << 14): /* unimplemented_FPop */
2010 ret = do_mathemu(regs, f);
2011 break;
2013 if (ret)
2014 return;
2015 do_fpe_common(regs);
2018 void do_tof(struct pt_regs *regs)
2020 siginfo_t info;
2022 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2023 0, 0x26, SIGEMT) == NOTIFY_STOP)
2024 return;
2026 if (regs->tstate & TSTATE_PRIV)
2027 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2028 if (test_thread_flag(TIF_32BIT)) {
2029 regs->tpc &= 0xffffffff;
2030 regs->tnpc &= 0xffffffff;
2032 info.si_signo = SIGEMT;
2033 info.si_errno = 0;
2034 info.si_code = EMT_TAGOVF;
2035 info.si_addr = (void __user *)regs->tpc;
2036 info.si_trapno = 0;
2037 force_sig_info(SIGEMT, &info, current);
2040 void do_div0(struct pt_regs *regs)
2042 siginfo_t info;
2044 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2045 0, 0x28, SIGFPE) == NOTIFY_STOP)
2046 return;
2048 if (regs->tstate & TSTATE_PRIV)
2049 die_if_kernel("TL0: Kernel divide by zero.", regs);
2050 if (test_thread_flag(TIF_32BIT)) {
2051 regs->tpc &= 0xffffffff;
2052 regs->tnpc &= 0xffffffff;
2054 info.si_signo = SIGFPE;
2055 info.si_errno = 0;
2056 info.si_code = FPE_INTDIV;
2057 info.si_addr = (void __user *)regs->tpc;
2058 info.si_trapno = 0;
2059 force_sig_info(SIGFPE, &info, current);
2062 static void instruction_dump(unsigned int *pc)
2064 int i;
2066 if ((((unsigned long) pc) & 3))
2067 return;
2069 printk("Instruction DUMP:");
2070 for (i = -3; i < 6; i++)
2071 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2072 printk("\n");
2075 static void user_instruction_dump(unsigned int __user *pc)
2077 int i;
2078 unsigned int buf[9];
2080 if ((((unsigned long) pc) & 3))
2081 return;
2083 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2084 return;
2086 printk("Instruction DUMP:");
2087 for (i = 0; i < 9; i++)
2088 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2089 printk("\n");
2092 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2094 unsigned long fp, thread_base, ksp;
2095 struct thread_info *tp;
2096 int count = 0;
2098 ksp = (unsigned long) _ksp;
2099 if (!tsk)
2100 tsk = current;
2101 tp = task_thread_info(tsk);
2102 if (ksp == 0UL) {
2103 if (tsk == current)
2104 asm("mov %%fp, %0" : "=r" (ksp));
2105 else
2106 ksp = tp->ksp;
2108 if (tp == current_thread_info())
2109 flushw_all();
2111 fp = ksp + STACK_BIAS;
2112 thread_base = (unsigned long) tp;
2114 printk("Call Trace:");
2115 #ifdef CONFIG_KALLSYMS
2116 printk("\n");
2117 #endif
2118 do {
2119 struct sparc_stackf *sf;
2120 struct pt_regs *regs;
2121 unsigned long pc;
2123 /* Bogus frame pointer? */
2124 if (fp < (thread_base + sizeof(struct thread_info)) ||
2125 fp >= (thread_base + THREAD_SIZE))
2126 break;
2127 sf = (struct sparc_stackf *) fp;
2128 regs = (struct pt_regs *) (sf + 1);
2130 if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) {
2131 if (!(regs->tstate & TSTATE_PRIV))
2132 break;
2133 pc = regs->tpc;
2134 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2135 } else {
2136 pc = sf->callers_pc;
2137 fp = (unsigned long)sf->fp + STACK_BIAS;
2140 printk(" [%016lx] ", pc);
2141 print_symbol("%s\n", pc);
2142 } while (++count < 16);
2143 #ifndef CONFIG_KALLSYMS
2144 printk("\n");
2145 #endif
2148 void dump_stack(void)
2150 show_stack(current, NULL);
2153 EXPORT_SYMBOL(dump_stack);
2155 static inline int is_kernel_stack(struct task_struct *task,
2156 struct reg_window *rw)
2158 unsigned long rw_addr = (unsigned long) rw;
2159 unsigned long thread_base, thread_end;
2161 if (rw_addr < PAGE_OFFSET) {
2162 if (task != &init_task)
2163 return 0;
2166 thread_base = (unsigned long) task_stack_page(task);
2167 thread_end = thread_base + sizeof(union thread_union);
2168 if (rw_addr >= thread_base &&
2169 rw_addr < thread_end &&
2170 !(rw_addr & 0x7UL))
2171 return 1;
2173 return 0;
2176 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2178 unsigned long fp = rw->ins[6];
2180 if (!fp)
2181 return NULL;
2183 return (struct reg_window *) (fp + STACK_BIAS);
2186 void die_if_kernel(char *str, struct pt_regs *regs)
2188 static int die_counter;
2189 extern void smp_report_regs(void);
2190 int count = 0;
2192 /* Amuse the user. */
2193 printk(
2194 " \\|/ ____ \\|/\n"
2195 " \"@'/ .. \\`@\"\n"
2196 " /_| \\__/ |_\\\n"
2197 " \\__U_/\n");
2199 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2200 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2201 __asm__ __volatile__("flushw");
2202 __show_regs(regs);
2203 add_taint(TAINT_DIE);
2204 if (regs->tstate & TSTATE_PRIV) {
2205 struct reg_window *rw = (struct reg_window *)
2206 (regs->u_regs[UREG_FP] + STACK_BIAS);
2208 /* Stop the back trace when we hit userland or we
2209 * find some badly aligned kernel stack.
2211 while (rw &&
2212 count++ < 30&&
2213 is_kernel_stack(current, rw)) {
2214 printk("Caller[%016lx]", rw->ins[7]);
2215 print_symbol(": %s", rw->ins[7]);
2216 printk("\n");
2218 rw = kernel_stack_up(rw);
2220 instruction_dump ((unsigned int *) regs->tpc);
2221 } else {
2222 if (test_thread_flag(TIF_32BIT)) {
2223 regs->tpc &= 0xffffffff;
2224 regs->tnpc &= 0xffffffff;
2226 user_instruction_dump ((unsigned int __user *) regs->tpc);
2228 #if 0
2229 #ifdef CONFIG_SMP
2230 smp_report_regs();
2231 #endif
2232 #endif
2233 if (regs->tstate & TSTATE_PRIV)
2234 do_exit(SIGKILL);
2235 do_exit(SIGSEGV);
2238 #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2239 #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2241 extern int handle_popc(u32 insn, struct pt_regs *regs);
2242 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2243 extern int vis_emul(struct pt_regs *, unsigned int);
2245 void do_illegal_instruction(struct pt_regs *regs)
2247 unsigned long pc = regs->tpc;
2248 unsigned long tstate = regs->tstate;
2249 u32 insn;
2250 siginfo_t info;
2252 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2253 0, 0x10, SIGILL) == NOTIFY_STOP)
2254 return;
2256 if (tstate & TSTATE_PRIV)
2257 die_if_kernel("Kernel illegal instruction", regs);
2258 if (test_thread_flag(TIF_32BIT))
2259 pc = (u32)pc;
2260 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2261 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2262 if (handle_popc(insn, regs))
2263 return;
2264 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2265 if (handle_ldf_stq(insn, regs))
2266 return;
2267 } else if (tlb_type == hypervisor) {
2268 if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2269 if (!vis_emul(regs, insn))
2270 return;
2271 } else {
2272 struct fpustate *f = FPUSTATE;
2274 /* XXX maybe verify XFSR bits like
2275 * XXX do_fpother() does?
2277 if (do_mathemu(regs, f))
2278 return;
2282 info.si_signo = SIGILL;
2283 info.si_errno = 0;
2284 info.si_code = ILL_ILLOPC;
2285 info.si_addr = (void __user *)pc;
2286 info.si_trapno = 0;
2287 force_sig_info(SIGILL, &info, current);
2290 extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2292 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2294 siginfo_t info;
2296 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2297 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2298 return;
2300 if (regs->tstate & TSTATE_PRIV) {
2301 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2302 return;
2304 info.si_signo = SIGBUS;
2305 info.si_errno = 0;
2306 info.si_code = BUS_ADRALN;
2307 info.si_addr = (void __user *)sfar;
2308 info.si_trapno = 0;
2309 force_sig_info(SIGBUS, &info, current);
2312 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2314 siginfo_t info;
2316 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2317 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2318 return;
2320 if (regs->tstate & TSTATE_PRIV) {
2321 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2322 return;
2324 info.si_signo = SIGBUS;
2325 info.si_errno = 0;
2326 info.si_code = BUS_ADRALN;
2327 info.si_addr = (void __user *) addr;
2328 info.si_trapno = 0;
2329 force_sig_info(SIGBUS, &info, current);
2332 void do_privop(struct pt_regs *regs)
2334 siginfo_t info;
2336 if (notify_die(DIE_TRAP, "privileged operation", regs,
2337 0, 0x11, SIGILL) == NOTIFY_STOP)
2338 return;
2340 if (test_thread_flag(TIF_32BIT)) {
2341 regs->tpc &= 0xffffffff;
2342 regs->tnpc &= 0xffffffff;
2344 info.si_signo = SIGILL;
2345 info.si_errno = 0;
2346 info.si_code = ILL_PRVOPC;
2347 info.si_addr = (void __user *)regs->tpc;
2348 info.si_trapno = 0;
2349 force_sig_info(SIGILL, &info, current);
2352 void do_privact(struct pt_regs *regs)
2354 do_privop(regs);
2357 /* Trap level 1 stuff or other traps we should never see... */
2358 void do_cee(struct pt_regs *regs)
2360 die_if_kernel("TL0: Cache Error Exception", regs);
2363 void do_cee_tl1(struct pt_regs *regs)
2365 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2366 die_if_kernel("TL1: Cache Error Exception", regs);
2369 void do_dae_tl1(struct pt_regs *regs)
2371 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2372 die_if_kernel("TL1: Data Access Exception", regs);
2375 void do_iae_tl1(struct pt_regs *regs)
2377 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2378 die_if_kernel("TL1: Instruction Access Exception", regs);
2381 void do_div0_tl1(struct pt_regs *regs)
2383 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2384 die_if_kernel("TL1: DIV0 Exception", regs);
2387 void do_fpdis_tl1(struct pt_regs *regs)
2389 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2390 die_if_kernel("TL1: FPU Disabled", regs);
2393 void do_fpieee_tl1(struct pt_regs *regs)
2395 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2396 die_if_kernel("TL1: FPU IEEE Exception", regs);
2399 void do_fpother_tl1(struct pt_regs *regs)
2401 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2402 die_if_kernel("TL1: FPU Other Exception", regs);
2405 void do_ill_tl1(struct pt_regs *regs)
2407 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2408 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2411 void do_irq_tl1(struct pt_regs *regs)
2413 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2414 die_if_kernel("TL1: IRQ Exception", regs);
2417 void do_lddfmna_tl1(struct pt_regs *regs)
2419 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2420 die_if_kernel("TL1: LDDF Exception", regs);
2423 void do_stdfmna_tl1(struct pt_regs *regs)
2425 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2426 die_if_kernel("TL1: STDF Exception", regs);
2429 void do_paw(struct pt_regs *regs)
2431 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2434 void do_paw_tl1(struct pt_regs *regs)
2436 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2437 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2440 void do_vaw(struct pt_regs *regs)
2442 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2445 void do_vaw_tl1(struct pt_regs *regs)
2447 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2448 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2451 void do_tof_tl1(struct pt_regs *regs)
2453 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2454 die_if_kernel("TL1: Tag Overflow Exception", regs);
2457 void do_getpsr(struct pt_regs *regs)
2459 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2460 regs->tpc = regs->tnpc;
2461 regs->tnpc += 4;
2462 if (test_thread_flag(TIF_32BIT)) {
2463 regs->tpc &= 0xffffffff;
2464 regs->tnpc &= 0xffffffff;
2468 struct trap_per_cpu trap_block[NR_CPUS];
2470 /* This can get invoked before sched_init() so play it super safe
2471 * and use hard_smp_processor_id().
2473 void init_cur_cpu_trap(struct thread_info *t)
2475 int cpu = hard_smp_processor_id();
2476 struct trap_per_cpu *p = &trap_block[cpu];
2478 p->thread = t;
2479 p->pgd_paddr = 0;
2482 extern void thread_info_offsets_are_bolixed_dave(void);
2483 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2484 extern void tsb_config_offsets_are_bolixed_dave(void);
2486 /* Only invoked on boot processor. */
2487 void __init trap_init(void)
2489 /* Compile time sanity check. */
2490 if (TI_TASK != offsetof(struct thread_info, task) ||
2491 TI_FLAGS != offsetof(struct thread_info, flags) ||
2492 TI_CPU != offsetof(struct thread_info, cpu) ||
2493 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2494 TI_KSP != offsetof(struct thread_info, ksp) ||
2495 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2496 TI_KREGS != offsetof(struct thread_info, kregs) ||
2497 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2498 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2499 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2500 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2501 TI_GSR != offsetof(struct thread_info, gsr) ||
2502 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2503 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2504 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2505 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2506 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2507 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2508 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2509 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2510 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2511 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2512 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2513 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2514 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2515 (TI_FPREGS & (64 - 1)))
2516 thread_info_offsets_are_bolixed_dave();
2518 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2519 (TRAP_PER_CPU_PGD_PADDR !=
2520 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2521 (TRAP_PER_CPU_CPU_MONDO_PA !=
2522 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2523 (TRAP_PER_CPU_DEV_MONDO_PA !=
2524 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2525 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2526 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2527 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2528 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2529 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2530 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2531 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2532 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2533 (TRAP_PER_CPU_FAULT_INFO !=
2534 offsetof(struct trap_per_cpu, fault_info)) ||
2535 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2536 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2537 (TRAP_PER_CPU_CPU_LIST_PA !=
2538 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2539 (TRAP_PER_CPU_TSB_HUGE !=
2540 offsetof(struct trap_per_cpu, tsb_huge)) ||
2541 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2542 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2543 (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2544 offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2545 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2546 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2547 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2548 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2549 (TRAP_PER_CPU_RESUM_QMASK !=
2550 offsetof(struct trap_per_cpu, resum_qmask)) ||
2551 (TRAP_PER_CPU_NONRESUM_QMASK !=
2552 offsetof(struct trap_per_cpu, nonresum_qmask)))
2553 trap_per_cpu_offsets_are_bolixed_dave();
2555 if ((TSB_CONFIG_TSB !=
2556 offsetof(struct tsb_config, tsb)) ||
2557 (TSB_CONFIG_RSS_LIMIT !=
2558 offsetof(struct tsb_config, tsb_rss_limit)) ||
2559 (TSB_CONFIG_NENTRIES !=
2560 offsetof(struct tsb_config, tsb_nentries)) ||
2561 (TSB_CONFIG_REG_VAL !=
2562 offsetof(struct tsb_config, tsb_reg_val)) ||
2563 (TSB_CONFIG_MAP_VADDR !=
2564 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2565 (TSB_CONFIG_MAP_PTE !=
2566 offsetof(struct tsb_config, tsb_map_pte)))
2567 tsb_config_offsets_are_bolixed_dave();
2569 /* Attach to the address space of init_task. On SMP we
2570 * do this in smp.c:smp_callin for other cpus.
2572 atomic_inc(&init_mm.mm_count);
2573 current->active_mm = &init_mm;