x86/xen: resume timer irqs early
[linux/fpc-iii.git] / arch / powerpc / kernel / traps.c
blobf783c932faeb3717eca6136cab5ab350f01e9a6e
1 /*
2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 * Copyright 2007-2010 Freescale Semiconductor, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Modified by Cort Dougan (cort@cs.nmt.edu)
11 * and Paul Mackerras (paulus@samba.org)
15 * This file handles the architecture-dependent parts of hardware exceptions
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/user.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/prctl.h>
30 #include <linux/delay.h>
31 #include <linux/kprobes.h>
32 #include <linux/kexec.h>
33 #include <linux/backlight.h>
34 #include <linux/bug.h>
35 #include <linux/kdebug.h>
36 #include <linux/debugfs.h>
37 #include <linux/ratelimit.h>
38 #include <linux/context_tracking.h>
40 #include <asm/emulated_ops.h>
41 #include <asm/pgtable.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
44 #include <asm/machdep.h>
45 #include <asm/rtas.h>
46 #include <asm/pmc.h>
47 #include <asm/reg.h>
48 #ifdef CONFIG_PMAC_BACKLIGHT
49 #include <asm/backlight.h>
50 #endif
51 #ifdef CONFIG_PPC64
52 #include <asm/firmware.h>
53 #include <asm/processor.h>
54 #include <asm/tm.h>
55 #endif
56 #include <asm/kexec.h>
57 #include <asm/ppc-opcode.h>
58 #include <asm/rio.h>
59 #include <asm/fadump.h>
60 #include <asm/switch_to.h>
61 #include <asm/tm.h>
62 #include <asm/debug.h>
63 #include <sysdev/fsl_pci.h>
65 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
66 int (*__debugger)(struct pt_regs *regs) __read_mostly;
67 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
68 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
69 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
70 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
71 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
72 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
74 EXPORT_SYMBOL(__debugger);
75 EXPORT_SYMBOL(__debugger_ipi);
76 EXPORT_SYMBOL(__debugger_bpt);
77 EXPORT_SYMBOL(__debugger_sstep);
78 EXPORT_SYMBOL(__debugger_iabr_match);
79 EXPORT_SYMBOL(__debugger_break_match);
80 EXPORT_SYMBOL(__debugger_fault_handler);
81 #endif
83 /* Transactional Memory trap debug */
84 #ifdef TM_DEBUG_SW
85 #define TM_DEBUG(x...) printk(KERN_INFO x)
86 #else
87 #define TM_DEBUG(x...) do { } while(0)
88 #endif
91 * Trap & Exception support
94 #ifdef CONFIG_PMAC_BACKLIGHT
95 static void pmac_backlight_unblank(void)
97 mutex_lock(&pmac_backlight_mutex);
98 if (pmac_backlight) {
99 struct backlight_properties *props;
101 props = &pmac_backlight->props;
102 props->brightness = props->max_brightness;
103 props->power = FB_BLANK_UNBLANK;
104 backlight_update_status(pmac_backlight);
106 mutex_unlock(&pmac_backlight_mutex);
108 #else
109 static inline void pmac_backlight_unblank(void) { }
110 #endif
112 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
113 static int die_owner = -1;
114 static unsigned int die_nest_count;
115 static int die_counter;
117 static unsigned __kprobes long oops_begin(struct pt_regs *regs)
119 int cpu;
120 unsigned long flags;
122 if (debugger(regs))
123 return 1;
125 oops_enter();
127 /* racy, but better than risking deadlock. */
128 raw_local_irq_save(flags);
129 cpu = smp_processor_id();
130 if (!arch_spin_trylock(&die_lock)) {
131 if (cpu == die_owner)
132 /* nested oops. should stop eventually */;
133 else
134 arch_spin_lock(&die_lock);
136 die_nest_count++;
137 die_owner = cpu;
138 console_verbose();
139 bust_spinlocks(1);
140 if (machine_is(powermac))
141 pmac_backlight_unblank();
142 return flags;
145 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
146 int signr)
148 bust_spinlocks(0);
149 die_owner = -1;
150 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
151 die_nest_count--;
152 oops_exit();
153 printk("\n");
154 if (!die_nest_count)
155 /* Nest count reaches zero, release the lock. */
156 arch_spin_unlock(&die_lock);
157 raw_local_irq_restore(flags);
159 crash_fadump(regs, "die oops");
162 * A system reset (0x100) is a request to dump, so we always send
163 * it through the crashdump code.
165 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
166 crash_kexec(regs);
169 * We aren't the primary crash CPU. We need to send it
170 * to a holding pattern to avoid it ending up in the panic
171 * code.
173 crash_kexec_secondary(regs);
176 if (!signr)
177 return;
180 * While our oops output is serialised by a spinlock, output
181 * from panic() called below can race and corrupt it. If we
182 * know we are going to panic, delay for 1 second so we have a
183 * chance to get clean backtraces from all CPUs that are oopsing.
185 if (in_interrupt() || panic_on_oops || !current->pid ||
186 is_global_init(current)) {
187 mdelay(MSEC_PER_SEC);
190 if (in_interrupt())
191 panic("Fatal exception in interrupt");
192 if (panic_on_oops)
193 panic("Fatal exception");
194 do_exit(signr);
197 static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
199 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
200 #ifdef CONFIG_PREEMPT
201 printk("PREEMPT ");
202 #endif
203 #ifdef CONFIG_SMP
204 printk("SMP NR_CPUS=%d ", NR_CPUS);
205 #endif
206 #ifdef CONFIG_DEBUG_PAGEALLOC
207 printk("DEBUG_PAGEALLOC ");
208 #endif
209 #ifdef CONFIG_NUMA
210 printk("NUMA ");
211 #endif
212 printk("%s\n", ppc_md.name ? ppc_md.name : "");
214 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
215 return 1;
217 print_modules();
218 show_regs(regs);
220 return 0;
223 void die(const char *str, struct pt_regs *regs, long err)
225 unsigned long flags = oops_begin(regs);
227 if (__die(str, regs, err))
228 err = 0;
229 oops_end(flags, regs, err);
232 void user_single_step_siginfo(struct task_struct *tsk,
233 struct pt_regs *regs, siginfo_t *info)
235 memset(info, 0, sizeof(*info));
236 info->si_signo = SIGTRAP;
237 info->si_code = TRAP_TRACE;
238 info->si_addr = (void __user *)regs->nip;
241 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
243 siginfo_t info;
244 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
245 "at %08lx nip %08lx lr %08lx code %x\n";
246 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
247 "at %016lx nip %016lx lr %016lx code %x\n";
249 if (!user_mode(regs)) {
250 die("Exception in kernel mode", regs, signr);
251 return;
254 if (show_unhandled_signals && unhandled_signal(current, signr)) {
255 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
256 current->comm, current->pid, signr,
257 addr, regs->nip, regs->link, code);
260 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
261 local_irq_enable();
263 current->thread.trap_nr = code;
264 memset(&info, 0, sizeof(info));
265 info.si_signo = signr;
266 info.si_code = code;
267 info.si_addr = (void __user *) addr;
268 force_sig_info(signr, &info, current);
271 #ifdef CONFIG_PPC64
272 void system_reset_exception(struct pt_regs *regs)
274 /* See if any machine dependent calls */
275 if (ppc_md.system_reset_exception) {
276 if (ppc_md.system_reset_exception(regs))
277 return;
280 die("System Reset", regs, SIGABRT);
282 /* Must die if the interrupt is not recoverable */
283 if (!(regs->msr & MSR_RI))
284 panic("Unrecoverable System Reset");
286 /* What should we do here? We could issue a shutdown or hard reset. */
288 #endif
291 * I/O accesses can cause machine checks on powermacs.
292 * Check if the NIP corresponds to the address of a sync
293 * instruction for which there is an entry in the exception
294 * table.
295 * Note that the 601 only takes a machine check on TEA
296 * (transfer error ack) signal assertion, and does not
297 * set any of the top 16 bits of SRR1.
298 * -- paulus.
300 static inline int check_io_access(struct pt_regs *regs)
302 #ifdef CONFIG_PPC32
303 unsigned long msr = regs->msr;
304 const struct exception_table_entry *entry;
305 unsigned int *nip = (unsigned int *)regs->nip;
307 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
308 && (entry = search_exception_tables(regs->nip)) != NULL) {
310 * Check that it's a sync instruction, or somewhere
311 * in the twi; isync; nop sequence that inb/inw/inl uses.
312 * As the address is in the exception table
313 * we should be able to read the instr there.
314 * For the debug message, we look at the preceding
315 * load or store.
317 if (*nip == 0x60000000) /* nop */
318 nip -= 2;
319 else if (*nip == 0x4c00012c) /* isync */
320 --nip;
321 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
322 /* sync or twi */
323 unsigned int rb;
325 --nip;
326 rb = (*nip >> 11) & 0x1f;
327 printk(KERN_DEBUG "%s bad port %lx at %p\n",
328 (*nip & 0x100)? "OUT to": "IN from",
329 regs->gpr[rb] - _IO_BASE, nip);
330 regs->msr |= MSR_RI;
331 regs->nip = entry->fixup;
332 return 1;
335 #endif /* CONFIG_PPC32 */
336 return 0;
339 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
340 /* On 4xx, the reason for the machine check or program exception
341 is in the ESR. */
342 #define get_reason(regs) ((regs)->dsisr)
343 #ifndef CONFIG_FSL_BOOKE
344 #define get_mc_reason(regs) ((regs)->dsisr)
345 #else
346 #define get_mc_reason(regs) (mfspr(SPRN_MCSR))
347 #endif
348 #define REASON_FP ESR_FP
349 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
350 #define REASON_PRIVILEGED ESR_PPR
351 #define REASON_TRAP ESR_PTR
353 /* single-step stuff */
354 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
355 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
357 #else
358 /* On non-4xx, the reason for the machine check or program
359 exception is in the MSR. */
360 #define get_reason(regs) ((regs)->msr)
361 #define get_mc_reason(regs) ((regs)->msr)
362 #define REASON_TM 0x200000
363 #define REASON_FP 0x100000
364 #define REASON_ILLEGAL 0x80000
365 #define REASON_PRIVILEGED 0x40000
366 #define REASON_TRAP 0x20000
368 #define single_stepping(regs) ((regs)->msr & MSR_SE)
369 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
370 #endif
372 #if defined(CONFIG_4xx)
373 int machine_check_4xx(struct pt_regs *regs)
375 unsigned long reason = get_mc_reason(regs);
377 if (reason & ESR_IMCP) {
378 printk("Instruction");
379 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
380 } else
381 printk("Data");
382 printk(" machine check in kernel mode.\n");
384 return 0;
387 int machine_check_440A(struct pt_regs *regs)
389 unsigned long reason = get_mc_reason(regs);
391 printk("Machine check in kernel mode.\n");
392 if (reason & ESR_IMCP){
393 printk("Instruction Synchronous Machine Check exception\n");
394 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
396 else {
397 u32 mcsr = mfspr(SPRN_MCSR);
398 if (mcsr & MCSR_IB)
399 printk("Instruction Read PLB Error\n");
400 if (mcsr & MCSR_DRB)
401 printk("Data Read PLB Error\n");
402 if (mcsr & MCSR_DWB)
403 printk("Data Write PLB Error\n");
404 if (mcsr & MCSR_TLBP)
405 printk("TLB Parity Error\n");
406 if (mcsr & MCSR_ICP){
407 flush_instruction_cache();
408 printk("I-Cache Parity Error\n");
410 if (mcsr & MCSR_DCSP)
411 printk("D-Cache Search Parity Error\n");
412 if (mcsr & MCSR_DCFP)
413 printk("D-Cache Flush Parity Error\n");
414 if (mcsr & MCSR_IMPE)
415 printk("Machine Check exception is imprecise\n");
417 /* Clear MCSR */
418 mtspr(SPRN_MCSR, mcsr);
420 return 0;
423 int machine_check_47x(struct pt_regs *regs)
425 unsigned long reason = get_mc_reason(regs);
426 u32 mcsr;
428 printk(KERN_ERR "Machine check in kernel mode.\n");
429 if (reason & ESR_IMCP) {
430 printk(KERN_ERR
431 "Instruction Synchronous Machine Check exception\n");
432 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
433 return 0;
435 mcsr = mfspr(SPRN_MCSR);
436 if (mcsr & MCSR_IB)
437 printk(KERN_ERR "Instruction Read PLB Error\n");
438 if (mcsr & MCSR_DRB)
439 printk(KERN_ERR "Data Read PLB Error\n");
440 if (mcsr & MCSR_DWB)
441 printk(KERN_ERR "Data Write PLB Error\n");
442 if (mcsr & MCSR_TLBP)
443 printk(KERN_ERR "TLB Parity Error\n");
444 if (mcsr & MCSR_ICP) {
445 flush_instruction_cache();
446 printk(KERN_ERR "I-Cache Parity Error\n");
448 if (mcsr & MCSR_DCSP)
449 printk(KERN_ERR "D-Cache Search Parity Error\n");
450 if (mcsr & PPC47x_MCSR_GPR)
451 printk(KERN_ERR "GPR Parity Error\n");
452 if (mcsr & PPC47x_MCSR_FPR)
453 printk(KERN_ERR "FPR Parity Error\n");
454 if (mcsr & PPC47x_MCSR_IPR)
455 printk(KERN_ERR "Machine Check exception is imprecise\n");
457 /* Clear MCSR */
458 mtspr(SPRN_MCSR, mcsr);
460 return 0;
462 #elif defined(CONFIG_E500)
463 int machine_check_e500mc(struct pt_regs *regs)
465 unsigned long mcsr = mfspr(SPRN_MCSR);
466 unsigned long reason = mcsr;
467 int recoverable = 1;
469 if (reason & MCSR_LD) {
470 recoverable = fsl_rio_mcheck_exception(regs);
471 if (recoverable == 1)
472 goto silent_out;
475 printk("Machine check in kernel mode.\n");
476 printk("Caused by (from MCSR=%lx): ", reason);
478 if (reason & MCSR_MCP)
479 printk("Machine Check Signal\n");
481 if (reason & MCSR_ICPERR) {
482 printk("Instruction Cache Parity Error\n");
485 * This is recoverable by invalidating the i-cache.
487 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
488 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
492 * This will generally be accompanied by an instruction
493 * fetch error report -- only treat MCSR_IF as fatal
494 * if it wasn't due to an L1 parity error.
496 reason &= ~MCSR_IF;
499 if (reason & MCSR_DCPERR_MC) {
500 printk("Data Cache Parity Error\n");
503 * In write shadow mode we auto-recover from the error, but it
504 * may still get logged and cause a machine check. We should
505 * only treat the non-write shadow case as non-recoverable.
507 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
508 recoverable = 0;
511 if (reason & MCSR_L2MMU_MHIT) {
512 printk("Hit on multiple TLB entries\n");
513 recoverable = 0;
516 if (reason & MCSR_NMI)
517 printk("Non-maskable interrupt\n");
519 if (reason & MCSR_IF) {
520 printk("Instruction Fetch Error Report\n");
521 recoverable = 0;
524 if (reason & MCSR_LD) {
525 printk("Load Error Report\n");
526 recoverable = 0;
529 if (reason & MCSR_ST) {
530 printk("Store Error Report\n");
531 recoverable = 0;
534 if (reason & MCSR_LDG) {
535 printk("Guarded Load Error Report\n");
536 recoverable = 0;
539 if (reason & MCSR_TLBSYNC)
540 printk("Simultaneous tlbsync operations\n");
542 if (reason & MCSR_BSL2_ERR) {
543 printk("Level 2 Cache Error\n");
544 recoverable = 0;
547 if (reason & MCSR_MAV) {
548 u64 addr;
550 addr = mfspr(SPRN_MCAR);
551 addr |= (u64)mfspr(SPRN_MCARU) << 32;
553 printk("Machine Check %s Address: %#llx\n",
554 reason & MCSR_MEA ? "Effective" : "Physical", addr);
557 silent_out:
558 mtspr(SPRN_MCSR, mcsr);
559 return mfspr(SPRN_MCSR) == 0 && recoverable;
562 int machine_check_e500(struct pt_regs *regs)
564 unsigned long reason = get_mc_reason(regs);
566 if (reason & MCSR_BUS_RBERR) {
567 if (fsl_rio_mcheck_exception(regs))
568 return 1;
569 if (fsl_pci_mcheck_exception(regs))
570 return 1;
573 printk("Machine check in kernel mode.\n");
574 printk("Caused by (from MCSR=%lx): ", reason);
576 if (reason & MCSR_MCP)
577 printk("Machine Check Signal\n");
578 if (reason & MCSR_ICPERR)
579 printk("Instruction Cache Parity Error\n");
580 if (reason & MCSR_DCP_PERR)
581 printk("Data Cache Push Parity Error\n");
582 if (reason & MCSR_DCPERR)
583 printk("Data Cache Parity Error\n");
584 if (reason & MCSR_BUS_IAERR)
585 printk("Bus - Instruction Address Error\n");
586 if (reason & MCSR_BUS_RAERR)
587 printk("Bus - Read Address Error\n");
588 if (reason & MCSR_BUS_WAERR)
589 printk("Bus - Write Address Error\n");
590 if (reason & MCSR_BUS_IBERR)
591 printk("Bus - Instruction Data Error\n");
592 if (reason & MCSR_BUS_RBERR)
593 printk("Bus - Read Data Bus Error\n");
594 if (reason & MCSR_BUS_WBERR)
595 printk("Bus - Read Data Bus Error\n");
596 if (reason & MCSR_BUS_IPERR)
597 printk("Bus - Instruction Parity Error\n");
598 if (reason & MCSR_BUS_RPERR)
599 printk("Bus - Read Parity Error\n");
601 return 0;
604 int machine_check_generic(struct pt_regs *regs)
606 return 0;
608 #elif defined(CONFIG_E200)
609 int machine_check_e200(struct pt_regs *regs)
611 unsigned long reason = get_mc_reason(regs);
613 printk("Machine check in kernel mode.\n");
614 printk("Caused by (from MCSR=%lx): ", reason);
616 if (reason & MCSR_MCP)
617 printk("Machine Check Signal\n");
618 if (reason & MCSR_CP_PERR)
619 printk("Cache Push Parity Error\n");
620 if (reason & MCSR_CPERR)
621 printk("Cache Parity Error\n");
622 if (reason & MCSR_EXCP_ERR)
623 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
624 if (reason & MCSR_BUS_IRERR)
625 printk("Bus - Read Bus Error on instruction fetch\n");
626 if (reason & MCSR_BUS_DRERR)
627 printk("Bus - Read Bus Error on data load\n");
628 if (reason & MCSR_BUS_WRERR)
629 printk("Bus - Write Bus Error on buffered store or cache line push\n");
631 return 0;
633 #else
634 int machine_check_generic(struct pt_regs *regs)
636 unsigned long reason = get_mc_reason(regs);
638 printk("Machine check in kernel mode.\n");
639 printk("Caused by (from SRR1=%lx): ", reason);
640 switch (reason & 0x601F0000) {
641 case 0x80000:
642 printk("Machine check signal\n");
643 break;
644 case 0: /* for 601 */
645 case 0x40000:
646 case 0x140000: /* 7450 MSS error and TEA */
647 printk("Transfer error ack signal\n");
648 break;
649 case 0x20000:
650 printk("Data parity error signal\n");
651 break;
652 case 0x10000:
653 printk("Address parity error signal\n");
654 break;
655 case 0x20000000:
656 printk("L1 Data Cache error\n");
657 break;
658 case 0x40000000:
659 printk("L1 Instruction Cache error\n");
660 break;
661 case 0x00100000:
662 printk("L2 data cache parity error\n");
663 break;
664 default:
665 printk("Unknown values in msr\n");
667 return 0;
669 #endif /* everything else */
671 void machine_check_exception(struct pt_regs *regs)
673 enum ctx_state prev_state = exception_enter();
674 int recover = 0;
676 __get_cpu_var(irq_stat).mce_exceptions++;
678 /* See if any machine dependent calls. In theory, we would want
679 * to call the CPU first, and call the ppc_md. one if the CPU
680 * one returns a positive number. However there is existing code
681 * that assumes the board gets a first chance, so let's keep it
682 * that way for now and fix things later. --BenH.
684 if (ppc_md.machine_check_exception)
685 recover = ppc_md.machine_check_exception(regs);
686 else if (cur_cpu_spec->machine_check)
687 recover = cur_cpu_spec->machine_check(regs);
689 if (recover > 0)
690 goto bail;
692 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
693 /* the qspan pci read routines can cause machine checks -- Cort
695 * yuck !!! that totally needs to go away ! There are better ways
696 * to deal with that than having a wart in the mcheck handler.
697 * -- BenH
699 bad_page_fault(regs, regs->dar, SIGBUS);
700 goto bail;
701 #endif
703 if (debugger_fault_handler(regs))
704 goto bail;
706 if (check_io_access(regs))
707 goto bail;
709 die("Machine check", regs, SIGBUS);
711 /* Must die if the interrupt is not recoverable */
712 if (!(regs->msr & MSR_RI))
713 panic("Unrecoverable Machine check");
715 bail:
716 exception_exit(prev_state);
719 void SMIException(struct pt_regs *regs)
721 die("System Management Interrupt", regs, SIGABRT);
724 void unknown_exception(struct pt_regs *regs)
726 enum ctx_state prev_state = exception_enter();
728 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
729 regs->nip, regs->msr, regs->trap);
731 _exception(SIGTRAP, regs, 0, 0);
733 exception_exit(prev_state);
736 void instruction_breakpoint_exception(struct pt_regs *regs)
738 enum ctx_state prev_state = exception_enter();
740 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
741 5, SIGTRAP) == NOTIFY_STOP)
742 goto bail;
743 if (debugger_iabr_match(regs))
744 goto bail;
745 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
747 bail:
748 exception_exit(prev_state);
751 void RunModeException(struct pt_regs *regs)
753 _exception(SIGTRAP, regs, 0, 0);
756 void __kprobes single_step_exception(struct pt_regs *regs)
758 enum ctx_state prev_state = exception_enter();
760 clear_single_step(regs);
762 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
763 5, SIGTRAP) == NOTIFY_STOP)
764 goto bail;
765 if (debugger_sstep(regs))
766 goto bail;
768 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
770 bail:
771 exception_exit(prev_state);
775 * After we have successfully emulated an instruction, we have to
776 * check if the instruction was being single-stepped, and if so,
777 * pretend we got a single-step exception. This was pointed out
778 * by Kumar Gala. -- paulus
780 static void emulate_single_step(struct pt_regs *regs)
782 if (single_stepping(regs))
783 single_step_exception(regs);
786 static inline int __parse_fpscr(unsigned long fpscr)
788 int ret = 0;
790 /* Invalid operation */
791 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
792 ret = FPE_FLTINV;
794 /* Overflow */
795 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
796 ret = FPE_FLTOVF;
798 /* Underflow */
799 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
800 ret = FPE_FLTUND;
802 /* Divide by zero */
803 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
804 ret = FPE_FLTDIV;
806 /* Inexact result */
807 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
808 ret = FPE_FLTRES;
810 return ret;
813 static void parse_fpe(struct pt_regs *regs)
815 int code = 0;
817 flush_fp_to_thread(current);
819 code = __parse_fpscr(current->thread.fpscr.val);
821 _exception(SIGFPE, regs, code, regs->nip);
825 * Illegal instruction emulation support. Originally written to
826 * provide the PVR to user applications using the mfspr rd, PVR.
827 * Return non-zero if we can't emulate, or -EFAULT if the associated
828 * memory access caused an access fault. Return zero on success.
830 * There are a couple of ways to do this, either "decode" the instruction
831 * or directly match lots of bits. In this case, matching lots of
832 * bits is faster and easier.
835 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
837 u8 rT = (instword >> 21) & 0x1f;
838 u8 rA = (instword >> 16) & 0x1f;
839 u8 NB_RB = (instword >> 11) & 0x1f;
840 u32 num_bytes;
841 unsigned long EA;
842 int pos = 0;
844 /* Early out if we are an invalid form of lswx */
845 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
846 if ((rT == rA) || (rT == NB_RB))
847 return -EINVAL;
849 EA = (rA == 0) ? 0 : regs->gpr[rA];
851 switch (instword & PPC_INST_STRING_MASK) {
852 case PPC_INST_LSWX:
853 case PPC_INST_STSWX:
854 EA += NB_RB;
855 num_bytes = regs->xer & 0x7f;
856 break;
857 case PPC_INST_LSWI:
858 case PPC_INST_STSWI:
859 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
860 break;
861 default:
862 return -EINVAL;
865 while (num_bytes != 0)
867 u8 val;
868 u32 shift = 8 * (3 - (pos & 0x3));
870 /* if process is 32-bit, clear upper 32 bits of EA */
871 if ((regs->msr & MSR_64BIT) == 0)
872 EA &= 0xFFFFFFFF;
874 switch ((instword & PPC_INST_STRING_MASK)) {
875 case PPC_INST_LSWX:
876 case PPC_INST_LSWI:
877 if (get_user(val, (u8 __user *)EA))
878 return -EFAULT;
879 /* first time updating this reg,
880 * zero it out */
881 if (pos == 0)
882 regs->gpr[rT] = 0;
883 regs->gpr[rT] |= val << shift;
884 break;
885 case PPC_INST_STSWI:
886 case PPC_INST_STSWX:
887 val = regs->gpr[rT] >> shift;
888 if (put_user(val, (u8 __user *)EA))
889 return -EFAULT;
890 break;
892 /* move EA to next address */
893 EA += 1;
894 num_bytes--;
896 /* manage our position within the register */
897 if (++pos == 4) {
898 pos = 0;
899 if (++rT == 32)
900 rT = 0;
904 return 0;
907 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
909 u32 ra,rs;
910 unsigned long tmp;
912 ra = (instword >> 16) & 0x1f;
913 rs = (instword >> 21) & 0x1f;
915 tmp = regs->gpr[rs];
916 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
917 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
918 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
919 regs->gpr[ra] = tmp;
921 return 0;
924 static int emulate_isel(struct pt_regs *regs, u32 instword)
926 u8 rT = (instword >> 21) & 0x1f;
927 u8 rA = (instword >> 16) & 0x1f;
928 u8 rB = (instword >> 11) & 0x1f;
929 u8 BC = (instword >> 6) & 0x1f;
930 u8 bit;
931 unsigned long tmp;
933 tmp = (rA == 0) ? 0 : regs->gpr[rA];
934 bit = (regs->ccr >> (31 - BC)) & 0x1;
936 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
938 return 0;
941 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
942 static inline bool tm_abort_check(struct pt_regs *regs, int cause)
944 /* If we're emulating a load/store in an active transaction, we cannot
945 * emulate it as the kernel operates in transaction suspended context.
946 * We need to abort the transaction. This creates a persistent TM
947 * abort so tell the user what caused it with a new code.
949 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
950 tm_enable();
951 tm_abort(cause);
952 return true;
954 return false;
956 #else
957 static inline bool tm_abort_check(struct pt_regs *regs, int reason)
959 return false;
961 #endif
963 static int emulate_instruction(struct pt_regs *regs)
965 u32 instword;
966 u32 rd;
968 if (!user_mode(regs))
969 return -EINVAL;
970 CHECK_FULL_REGS(regs);
972 if (get_user(instword, (u32 __user *)(regs->nip)))
973 return -EFAULT;
975 /* Emulate the mfspr rD, PVR. */
976 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
977 PPC_WARN_EMULATED(mfpvr, regs);
978 rd = (instword >> 21) & 0x1f;
979 regs->gpr[rd] = mfspr(SPRN_PVR);
980 return 0;
983 /* Emulating the dcba insn is just a no-op. */
984 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
985 PPC_WARN_EMULATED(dcba, regs);
986 return 0;
989 /* Emulate the mcrxr insn. */
990 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
991 int shift = (instword >> 21) & 0x1c;
992 unsigned long msk = 0xf0000000UL >> shift;
994 PPC_WARN_EMULATED(mcrxr, regs);
995 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
996 regs->xer &= ~0xf0000000UL;
997 return 0;
1000 /* Emulate load/store string insn. */
1001 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1002 if (tm_abort_check(regs,
1003 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1004 return -EINVAL;
1005 PPC_WARN_EMULATED(string, regs);
1006 return emulate_string_inst(regs, instword);
1009 /* Emulate the popcntb (Population Count Bytes) instruction. */
1010 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1011 PPC_WARN_EMULATED(popcntb, regs);
1012 return emulate_popcntb_inst(regs, instword);
1015 /* Emulate isel (Integer Select) instruction */
1016 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1017 PPC_WARN_EMULATED(isel, regs);
1018 return emulate_isel(regs, instword);
1021 #ifdef CONFIG_PPC64
1022 /* Emulate the mfspr rD, DSCR. */
1023 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1024 PPC_INST_MFSPR_DSCR_USER) ||
1025 ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1026 PPC_INST_MFSPR_DSCR)) &&
1027 cpu_has_feature(CPU_FTR_DSCR)) {
1028 PPC_WARN_EMULATED(mfdscr, regs);
1029 rd = (instword >> 21) & 0x1f;
1030 regs->gpr[rd] = mfspr(SPRN_DSCR);
1031 return 0;
1033 /* Emulate the mtspr DSCR, rD. */
1034 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1035 PPC_INST_MTSPR_DSCR_USER) ||
1036 ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1037 PPC_INST_MTSPR_DSCR)) &&
1038 cpu_has_feature(CPU_FTR_DSCR)) {
1039 PPC_WARN_EMULATED(mtdscr, regs);
1040 rd = (instword >> 21) & 0x1f;
1041 current->thread.dscr = regs->gpr[rd];
1042 current->thread.dscr_inherit = 1;
1043 mtspr(SPRN_DSCR, current->thread.dscr);
1044 return 0;
1046 #endif
1048 return -EINVAL;
1051 int is_valid_bugaddr(unsigned long addr)
1053 return is_kernel_addr(addr);
1056 #ifdef CONFIG_MATH_EMULATION
1057 static int emulate_math(struct pt_regs *regs)
1059 int ret;
1060 extern int do_mathemu(struct pt_regs *regs);
1062 ret = do_mathemu(regs);
1063 if (ret >= 0)
1064 PPC_WARN_EMULATED(math, regs);
1066 switch (ret) {
1067 case 0:
1068 emulate_single_step(regs);
1069 return 0;
1070 case 1: {
1071 int code = 0;
1072 code = __parse_fpscr(current->thread.fpscr.val);
1073 _exception(SIGFPE, regs, code, regs->nip);
1074 return 0;
1076 case -EFAULT:
1077 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1078 return 0;
1081 return -1;
1083 #else
1084 static inline int emulate_math(struct pt_regs *regs) { return -1; }
1085 #endif
1087 void __kprobes program_check_exception(struct pt_regs *regs)
1089 enum ctx_state prev_state = exception_enter();
1090 unsigned int reason = get_reason(regs);
1092 /* We can now get here via a FP Unavailable exception if the core
1093 * has no FPU, in that case the reason flags will be 0 */
1095 if (reason & REASON_FP) {
1096 /* IEEE FP exception */
1097 parse_fpe(regs);
1098 goto bail;
1100 if (reason & REASON_TRAP) {
1101 /* Debugger is first in line to stop recursive faults in
1102 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1103 if (debugger_bpt(regs))
1104 goto bail;
1106 /* trap exception */
1107 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1108 == NOTIFY_STOP)
1109 goto bail;
1111 if (!(regs->msr & MSR_PR) && /* not user-mode */
1112 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1113 regs->nip += 4;
1114 goto bail;
1116 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1117 goto bail;
1119 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1120 if (reason & REASON_TM) {
1121 /* This is a TM "Bad Thing Exception" program check.
1122 * This occurs when:
1123 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1124 * transition in TM states.
1125 * - A trechkpt is attempted when transactional.
1126 * - A treclaim is attempted when non transactional.
1127 * - A tend is illegally attempted.
1128 * - writing a TM SPR when transactional.
1130 if (!user_mode(regs) &&
1131 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1132 regs->nip += 4;
1133 goto bail;
1135 /* If usermode caused this, it's done something illegal and
1136 * gets a SIGILL slap on the wrist. We call it an illegal
1137 * operand to distinguish from the instruction just being bad
1138 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1139 * illegal /placement/ of a valid instruction.
1141 if (user_mode(regs)) {
1142 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1143 goto bail;
1144 } else {
1145 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1146 "at %lx (msr 0x%x)\n", regs->nip, reason);
1147 die("Unrecoverable exception", regs, SIGABRT);
1150 #endif
1153 * If we took the program check in the kernel skip down to sending a
1154 * SIGILL. The subsequent cases all relate to emulating instructions
1155 * which we should only do for userspace. We also do not want to enable
1156 * interrupts for kernel faults because that might lead to further
1157 * faults, and loose the context of the original exception.
1159 if (!user_mode(regs))
1160 goto sigill;
1162 /* We restore the interrupt state now */
1163 if (!arch_irq_disabled_regs(regs))
1164 local_irq_enable();
1166 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1167 * but there seems to be a hardware bug on the 405GP (RevD)
1168 * that means ESR is sometimes set incorrectly - either to
1169 * ESR_DST (!?) or 0. In the process of chasing this with the
1170 * hardware people - not sure if it can happen on any illegal
1171 * instruction or only on FP instructions, whether there is a
1172 * pattern to occurrences etc. -dgibson 31/Mar/2003
1174 if (!emulate_math(regs))
1175 goto bail;
1177 /* Try to emulate it if we should. */
1178 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1179 switch (emulate_instruction(regs)) {
1180 case 0:
1181 regs->nip += 4;
1182 emulate_single_step(regs);
1183 goto bail;
1184 case -EFAULT:
1185 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1186 goto bail;
1190 sigill:
1191 if (reason & REASON_PRIVILEGED)
1192 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1193 else
1194 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1196 bail:
1197 exception_exit(prev_state);
1201 * This occurs when running in hypervisor mode on POWER6 or later
1202 * and an illegal instruction is encountered.
1204 void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
1206 regs->msr |= REASON_ILLEGAL;
1207 program_check_exception(regs);
1210 void alignment_exception(struct pt_regs *regs)
1212 enum ctx_state prev_state = exception_enter();
1213 int sig, code, fixed = 0;
1215 /* We restore the interrupt state now */
1216 if (!arch_irq_disabled_regs(regs))
1217 local_irq_enable();
1219 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1220 goto bail;
1222 /* we don't implement logging of alignment exceptions */
1223 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1224 fixed = fix_alignment(regs);
1226 if (fixed == 1) {
1227 regs->nip += 4; /* skip over emulated instruction */
1228 emulate_single_step(regs);
1229 goto bail;
1232 /* Operand address was bad */
1233 if (fixed == -EFAULT) {
1234 sig = SIGSEGV;
1235 code = SEGV_ACCERR;
1236 } else {
1237 sig = SIGBUS;
1238 code = BUS_ADRALN;
1240 if (user_mode(regs))
1241 _exception(sig, regs, code, regs->dar);
1242 else
1243 bad_page_fault(regs, regs->dar, sig);
1245 bail:
1246 exception_exit(prev_state);
1249 void StackOverflow(struct pt_regs *regs)
1251 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1252 current, regs->gpr[1]);
1253 debugger(regs);
1254 show_regs(regs);
1255 panic("kernel stack overflow");
1258 void nonrecoverable_exception(struct pt_regs *regs)
1260 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1261 regs->nip, regs->msr);
1262 debugger(regs);
1263 die("nonrecoverable exception", regs, SIGKILL);
1266 void trace_syscall(struct pt_regs *regs)
1268 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
1269 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
1270 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
1273 void kernel_fp_unavailable_exception(struct pt_regs *regs)
1275 enum ctx_state prev_state = exception_enter();
1277 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1278 "%lx at %lx\n", regs->trap, regs->nip);
1279 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1281 exception_exit(prev_state);
1284 void altivec_unavailable_exception(struct pt_regs *regs)
1286 enum ctx_state prev_state = exception_enter();
1288 if (user_mode(regs)) {
1289 /* A user program has executed an altivec instruction,
1290 but this kernel doesn't support altivec. */
1291 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1292 goto bail;
1295 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1296 "%lx at %lx\n", regs->trap, regs->nip);
1297 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1299 bail:
1300 exception_exit(prev_state);
1303 void vsx_unavailable_exception(struct pt_regs *regs)
1305 if (user_mode(regs)) {
1306 /* A user program has executed an vsx instruction,
1307 but this kernel doesn't support vsx. */
1308 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1309 return;
1312 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1313 "%lx at %lx\n", regs->trap, regs->nip);
1314 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1317 #ifdef CONFIG_PPC64
1318 void facility_unavailable_exception(struct pt_regs *regs)
1320 static char *facility_strings[] = {
1321 [FSCR_FP_LG] = "FPU",
1322 [FSCR_VECVSX_LG] = "VMX/VSX",
1323 [FSCR_DSCR_LG] = "DSCR",
1324 [FSCR_PM_LG] = "PMU SPRs",
1325 [FSCR_BHRB_LG] = "BHRB",
1326 [FSCR_TM_LG] = "TM",
1327 [FSCR_EBB_LG] = "EBB",
1328 [FSCR_TAR_LG] = "TAR",
1330 char *facility = "unknown";
1331 u64 value;
1332 u8 status;
1333 bool hv;
1335 hv = (regs->trap == 0xf80);
1336 if (hv)
1337 value = mfspr(SPRN_HFSCR);
1338 else
1339 value = mfspr(SPRN_FSCR);
1341 status = value >> 56;
1342 if (status == FSCR_DSCR_LG) {
1343 /* User is acessing the DSCR. Set the inherit bit and allow
1344 * the user to set it directly in future by setting via the
1345 * FSCR DSCR bit. We always leave HFSCR DSCR set.
1347 current->thread.dscr_inherit = 1;
1348 mtspr(SPRN_FSCR, value | FSCR_DSCR);
1349 return;
1352 if ((status < ARRAY_SIZE(facility_strings)) &&
1353 facility_strings[status])
1354 facility = facility_strings[status];
1356 /* We restore the interrupt state now */
1357 if (!arch_irq_disabled_regs(regs))
1358 local_irq_enable();
1360 pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
1361 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
1363 if (user_mode(regs)) {
1364 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1365 return;
1368 die("Unexpected facility unavailable exception", regs, SIGABRT);
1370 #endif
1372 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1374 extern void do_load_up_fpu(struct pt_regs *regs);
1376 void fp_unavailable_tm(struct pt_regs *regs)
1378 /* Note: This does not handle any kind of FP laziness. */
1380 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1381 regs->nip, regs->msr);
1382 tm_enable();
1384 /* We can only have got here if the task started using FP after
1385 * beginning the transaction. So, the transactional regs are just a
1386 * copy of the checkpointed ones. But, we still need to recheckpoint
1387 * as we're enabling FP for the process; it will return, abort the
1388 * transaction, and probably retry but now with FP enabled. So the
1389 * checkpointed FP registers need to be loaded.
1391 tm_reclaim(&current->thread, current->thread.regs->msr,
1392 TM_CAUSE_FAC_UNAV);
1393 /* Reclaim didn't save out any FPRs to transact_fprs. */
1395 /* Enable FP for the task: */
1396 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
1398 /* This loads and recheckpoints the FP registers from
1399 * thread.fpr[]. They will remain in registers after the
1400 * checkpoint so we don't need to reload them after.
1402 tm_recheckpoint(&current->thread, regs->msr);
1405 #ifdef CONFIG_ALTIVEC
1406 extern void do_load_up_altivec(struct pt_regs *regs);
1408 void altivec_unavailable_tm(struct pt_regs *regs)
1410 /* See the comments in fp_unavailable_tm(). This function operates
1411 * the same way.
1414 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1415 "MSR=%lx\n",
1416 regs->nip, regs->msr);
1417 tm_enable();
1418 tm_reclaim(&current->thread, current->thread.regs->msr,
1419 TM_CAUSE_FAC_UNAV);
1420 regs->msr |= MSR_VEC;
1421 tm_recheckpoint(&current->thread, regs->msr);
1422 current->thread.used_vr = 1;
1424 #endif
1426 #ifdef CONFIG_VSX
1427 void vsx_unavailable_tm(struct pt_regs *regs)
1429 /* See the comments in fp_unavailable_tm(). This works similarly,
1430 * though we're loading both FP and VEC registers in here.
1432 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1433 * regs. Either way, set MSR_VSX.
1436 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1437 "MSR=%lx\n",
1438 regs->nip, regs->msr);
1440 tm_enable();
1441 /* This reclaims FP and/or VR regs if they're already enabled */
1442 tm_reclaim(&current->thread, current->thread.regs->msr,
1443 TM_CAUSE_FAC_UNAV);
1445 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
1446 MSR_VSX;
1447 /* This loads & recheckpoints FP and VRs. */
1448 tm_recheckpoint(&current->thread, regs->msr);
1449 current->thread.used_vsr = 1;
1451 #endif
1452 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1454 void performance_monitor_exception(struct pt_regs *regs)
1456 __get_cpu_var(irq_stat).pmu_irqs++;
1458 perf_irq(regs);
1461 #ifdef CONFIG_8xx
1462 void SoftwareEmulation(struct pt_regs *regs)
1464 CHECK_FULL_REGS(regs);
1466 if (!user_mode(regs)) {
1467 debugger(regs);
1468 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
1471 if (!emulate_math(regs))
1472 return;
1474 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1476 #endif /* CONFIG_8xx */
1478 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1479 static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1481 int changed = 0;
1483 * Determine the cause of the debug event, clear the
1484 * event flags and send a trap to the handler. Torez
1486 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1487 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1488 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1489 current->thread.dbcr2 &= ~DBCR2_DAC12MODE;
1490 #endif
1491 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1493 changed |= 0x01;
1494 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1495 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1496 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
1498 changed |= 0x01;
1499 } else if (debug_status & DBSR_IAC1) {
1500 current->thread.dbcr0 &= ~DBCR0_IAC1;
1501 dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1502 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1504 changed |= 0x01;
1505 } else if (debug_status & DBSR_IAC2) {
1506 current->thread.dbcr0 &= ~DBCR0_IAC2;
1507 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1509 changed |= 0x01;
1510 } else if (debug_status & DBSR_IAC3) {
1511 current->thread.dbcr0 &= ~DBCR0_IAC3;
1512 dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1513 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1515 changed |= 0x01;
1516 } else if (debug_status & DBSR_IAC4) {
1517 current->thread.dbcr0 &= ~DBCR0_IAC4;
1518 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1520 changed |= 0x01;
1523 * At the point this routine was called, the MSR(DE) was turned off.
1524 * Check all other debug flags and see if that bit needs to be turned
1525 * back on or not.
1527 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1))
1528 regs->msr |= MSR_DE;
1529 else
1530 /* Make sure the IDM flag is off */
1531 current->thread.dbcr0 &= ~DBCR0_IDM;
1533 if (changed & 0x01)
1534 mtspr(SPRN_DBCR0, current->thread.dbcr0);
1537 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1539 current->thread.dbsr = debug_status;
1541 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1542 * on server, it stops on the target of the branch. In order to simulate
1543 * the server behaviour, we thus restart right away with a single step
1544 * instead of stopping here when hitting a BT
1546 if (debug_status & DBSR_BT) {
1547 regs->msr &= ~MSR_DE;
1549 /* Disable BT */
1550 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1551 /* Clear the BT event */
1552 mtspr(SPRN_DBSR, DBSR_BT);
1554 /* Do the single step trick only when coming from userspace */
1555 if (user_mode(regs)) {
1556 current->thread.dbcr0 &= ~DBCR0_BT;
1557 current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1558 regs->msr |= MSR_DE;
1559 return;
1562 if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1563 5, SIGTRAP) == NOTIFY_STOP) {
1564 return;
1566 if (debugger_sstep(regs))
1567 return;
1568 } else if (debug_status & DBSR_IC) { /* Instruction complete */
1569 regs->msr &= ~MSR_DE;
1571 /* Disable instruction completion */
1572 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1573 /* Clear the instruction completion event */
1574 mtspr(SPRN_DBSR, DBSR_IC);
1576 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1577 5, SIGTRAP) == NOTIFY_STOP) {
1578 return;
1581 if (debugger_sstep(regs))
1582 return;
1584 if (user_mode(regs)) {
1585 current->thread.dbcr0 &= ~DBCR0_IC;
1586 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0,
1587 current->thread.dbcr1))
1588 regs->msr |= MSR_DE;
1589 else
1590 /* Make sure the IDM bit is off */
1591 current->thread.dbcr0 &= ~DBCR0_IDM;
1594 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1595 } else
1596 handle_debug(regs, debug_status);
1598 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1600 #if !defined(CONFIG_TAU_INT)
1601 void TAUException(struct pt_regs *regs)
1603 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
1604 regs->nip, regs->msr, regs->trap, print_tainted());
1606 #endif /* CONFIG_INT_TAU */
1608 #ifdef CONFIG_ALTIVEC
1609 void altivec_assist_exception(struct pt_regs *regs)
1611 int err;
1613 if (!user_mode(regs)) {
1614 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1615 " at %lx\n", regs->nip);
1616 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1619 flush_altivec_to_thread(current);
1621 PPC_WARN_EMULATED(altivec, regs);
1622 err = emulate_altivec(regs);
1623 if (err == 0) {
1624 regs->nip += 4; /* skip emulated instruction */
1625 emulate_single_step(regs);
1626 return;
1629 if (err == -EFAULT) {
1630 /* got an error reading the instruction */
1631 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1632 } else {
1633 /* didn't recognize the instruction */
1634 /* XXX quick hack for now: set the non-Java bit in the VSCR */
1635 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1636 "in %s at %lx\n", current->comm, regs->nip);
1637 current->thread.vscr.u[3] |= 0x10000;
1640 #endif /* CONFIG_ALTIVEC */
1642 #ifdef CONFIG_VSX
1643 void vsx_assist_exception(struct pt_regs *regs)
1645 if (!user_mode(regs)) {
1646 printk(KERN_EMERG "VSX assist exception in kernel mode"
1647 " at %lx\n", regs->nip);
1648 die("Kernel VSX assist exception", regs, SIGILL);
1651 flush_vsx_to_thread(current);
1652 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
1653 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1655 #endif /* CONFIG_VSX */
1657 #ifdef CONFIG_FSL_BOOKE
1658 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1659 unsigned long error_code)
1661 /* We treat cache locking instructions from the user
1662 * as priv ops, in the future we could try to do
1663 * something smarter
1665 if (error_code & (ESR_DLK|ESR_ILK))
1666 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1667 return;
1669 #endif /* CONFIG_FSL_BOOKE */
1671 #ifdef CONFIG_SPE
1672 void SPEFloatingPointException(struct pt_regs *regs)
1674 extern int do_spe_mathemu(struct pt_regs *regs);
1675 unsigned long spefscr;
1676 int fpexc_mode;
1677 int code = 0;
1678 int err;
1680 flush_spe_to_thread(current);
1682 spefscr = current->thread.spefscr;
1683 fpexc_mode = current->thread.fpexc_mode;
1685 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1686 code = FPE_FLTOVF;
1688 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1689 code = FPE_FLTUND;
1691 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1692 code = FPE_FLTDIV;
1693 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1694 code = FPE_FLTINV;
1696 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1697 code = FPE_FLTRES;
1699 err = do_spe_mathemu(regs);
1700 if (err == 0) {
1701 regs->nip += 4; /* skip emulated instruction */
1702 emulate_single_step(regs);
1703 return;
1706 if (err == -EFAULT) {
1707 /* got an error reading the instruction */
1708 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1709 } else if (err == -EINVAL) {
1710 /* didn't recognize the instruction */
1711 printk(KERN_ERR "unrecognized spe instruction "
1712 "in %s at %lx\n", current->comm, regs->nip);
1713 } else {
1714 _exception(SIGFPE, regs, code, regs->nip);
1717 return;
1720 void SPEFloatingPointRoundException(struct pt_regs *regs)
1722 extern int speround_handler(struct pt_regs *regs);
1723 int err;
1725 preempt_disable();
1726 if (regs->msr & MSR_SPE)
1727 giveup_spe(current);
1728 preempt_enable();
1730 regs->nip -= 4;
1731 err = speround_handler(regs);
1732 if (err == 0) {
1733 regs->nip += 4; /* skip emulated instruction */
1734 emulate_single_step(regs);
1735 return;
1738 if (err == -EFAULT) {
1739 /* got an error reading the instruction */
1740 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1741 } else if (err == -EINVAL) {
1742 /* didn't recognize the instruction */
1743 printk(KERN_ERR "unrecognized spe instruction "
1744 "in %s at %lx\n", current->comm, regs->nip);
1745 } else {
1746 _exception(SIGFPE, regs, 0, regs->nip);
1747 return;
1750 #endif
1753 * We enter here if we get an unrecoverable exception, that is, one
1754 * that happened at a point where the RI (recoverable interrupt) bit
1755 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1756 * we therefore lost state by taking this exception.
1758 void unrecoverable_exception(struct pt_regs *regs)
1760 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1761 regs->trap, regs->nip);
1762 die("Unrecoverable exception", regs, SIGABRT);
1765 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
1767 * Default handler for a Watchdog exception,
1768 * spins until a reboot occurs
1770 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1772 /* Generic WatchdogHandler, implement your own */
1773 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1774 return;
1777 void WatchdogException(struct pt_regs *regs)
1779 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1780 WatchdogHandler(regs);
1782 #endif
1785 * We enter here if we discover during exception entry that we are
1786 * running in supervisor mode with a userspace value in the stack pointer.
1788 void kernel_bad_stack(struct pt_regs *regs)
1790 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1791 regs->gpr[1], regs->nip);
1792 die("Bad kernel stack pointer", regs, SIGABRT);
1795 void __init trap_init(void)
1800 #ifdef CONFIG_PPC_EMULATED_STATS
1802 #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
1804 struct ppc_emulated ppc_emulated = {
1805 #ifdef CONFIG_ALTIVEC
1806 WARN_EMULATED_SETUP(altivec),
1807 #endif
1808 WARN_EMULATED_SETUP(dcba),
1809 WARN_EMULATED_SETUP(dcbz),
1810 WARN_EMULATED_SETUP(fp_pair),
1811 WARN_EMULATED_SETUP(isel),
1812 WARN_EMULATED_SETUP(mcrxr),
1813 WARN_EMULATED_SETUP(mfpvr),
1814 WARN_EMULATED_SETUP(multiple),
1815 WARN_EMULATED_SETUP(popcntb),
1816 WARN_EMULATED_SETUP(spe),
1817 WARN_EMULATED_SETUP(string),
1818 WARN_EMULATED_SETUP(unaligned),
1819 #ifdef CONFIG_MATH_EMULATION
1820 WARN_EMULATED_SETUP(math),
1821 #endif
1822 #ifdef CONFIG_VSX
1823 WARN_EMULATED_SETUP(vsx),
1824 #endif
1825 #ifdef CONFIG_PPC64
1826 WARN_EMULATED_SETUP(mfdscr),
1827 WARN_EMULATED_SETUP(mtdscr),
1828 #endif
1831 u32 ppc_warn_emulated;
1833 void ppc_warn_emulated_print(const char *type)
1835 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
1836 type);
1839 static int __init ppc_warn_emulated_init(void)
1841 struct dentry *dir, *d;
1842 unsigned int i;
1843 struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
1845 if (!powerpc_debugfs_root)
1846 return -ENODEV;
1848 dir = debugfs_create_dir("emulated_instructions",
1849 powerpc_debugfs_root);
1850 if (!dir)
1851 return -ENOMEM;
1853 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
1854 &ppc_warn_emulated);
1855 if (!d)
1856 goto fail;
1858 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
1859 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
1860 (u32 *)&entries[i].val.counter);
1861 if (!d)
1862 goto fail;
1865 return 0;
1867 fail:
1868 debugfs_remove_recursive(dir);
1869 return -ENOMEM;
1872 device_initcall(ppc_warn_emulated_init);
1874 #endif /* CONFIG_PPC_EMULATED_STATS */