2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 * Copyright 2007-2010 Freescale Semiconductor, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Modified by Cort Dougan (cort@cs.nmt.edu)
11 * and Paul Mackerras (paulus@samba.org)
15 * This file handles the architecture-dependent parts of hardware exceptions
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/debug.h>
21 #include <linux/kernel.h>
23 #include <linux/pkeys.h>
24 #include <linux/stddef.h>
25 #include <linux/unistd.h>
26 #include <linux/ptrace.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/init.h>
30 #include <linux/extable.h>
31 #include <linux/module.h> /* print_modules */
32 #include <linux/prctl.h>
33 #include <linux/delay.h>
34 #include <linux/kprobes.h>
35 #include <linux/kexec.h>
36 #include <linux/backlight.h>
37 #include <linux/bug.h>
38 #include <linux/kdebug.h>
39 #include <linux/ratelimit.h>
40 #include <linux/context_tracking.h>
41 #include <linux/smp.h>
42 #include <linux/console.h>
43 #include <linux/kmsg_dump.h>
45 #include <asm/emulated_ops.h>
46 #include <asm/pgtable.h>
47 #include <linux/uaccess.h>
48 #include <asm/debugfs.h>
50 #include <asm/machdep.h>
54 #ifdef CONFIG_PMAC_BACKLIGHT
55 #include <asm/backlight.h>
58 #include <asm/firmware.h>
59 #include <asm/processor.h>
62 #include <asm/kexec.h>
63 #include <asm/ppc-opcode.h>
65 #include <asm/fadump.h>
66 #include <asm/switch_to.h>
68 #include <asm/debug.h>
69 #include <asm/asm-prototypes.h>
71 #include <sysdev/fsl_pci.h>
72 #include <asm/kprobes.h>
74 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
75 int (*__debugger
)(struct pt_regs
*regs
) __read_mostly
;
76 int (*__debugger_ipi
)(struct pt_regs
*regs
) __read_mostly
;
77 int (*__debugger_bpt
)(struct pt_regs
*regs
) __read_mostly
;
78 int (*__debugger_sstep
)(struct pt_regs
*regs
) __read_mostly
;
79 int (*__debugger_iabr_match
)(struct pt_regs
*regs
) __read_mostly
;
80 int (*__debugger_break_match
)(struct pt_regs
*regs
) __read_mostly
;
81 int (*__debugger_fault_handler
)(struct pt_regs
*regs
) __read_mostly
;
83 EXPORT_SYMBOL(__debugger
);
84 EXPORT_SYMBOL(__debugger_ipi
);
85 EXPORT_SYMBOL(__debugger_bpt
);
86 EXPORT_SYMBOL(__debugger_sstep
);
87 EXPORT_SYMBOL(__debugger_iabr_match
);
88 EXPORT_SYMBOL(__debugger_break_match
);
89 EXPORT_SYMBOL(__debugger_fault_handler
);
92 /* Transactional Memory trap debug */
94 #define TM_DEBUG(x...) printk(KERN_INFO x)
96 #define TM_DEBUG(x...) do { } while(0)
100 * Trap & Exception support
103 #ifdef CONFIG_PMAC_BACKLIGHT
104 static void pmac_backlight_unblank(void)
106 mutex_lock(&pmac_backlight_mutex
);
107 if (pmac_backlight
) {
108 struct backlight_properties
*props
;
110 props
= &pmac_backlight
->props
;
111 props
->brightness
= props
->max_brightness
;
112 props
->power
= FB_BLANK_UNBLANK
;
113 backlight_update_status(pmac_backlight
);
115 mutex_unlock(&pmac_backlight_mutex
);
118 static inline void pmac_backlight_unblank(void) { }
122 * If oops/die is expected to crash the machine, return true here.
124 * This should not be expected to be 100% accurate, there may be
125 * notifiers registered or other unexpected conditions that may bring
126 * down the kernel. Or if the current process in the kernel is holding
127 * locks or has other critical state, the kernel may become effectively
130 bool die_will_crash(void)
132 if (should_fadump_crash())
134 if (kexec_should_crash(current
))
136 if (in_interrupt() || panic_on_oops
||
137 !current
->pid
|| is_global_init(current
))
143 static arch_spinlock_t die_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
144 static int die_owner
= -1;
145 static unsigned int die_nest_count
;
146 static int die_counter
;
148 extern void panic_flush_kmsg_start(void)
151 * These are mostly taken from kernel/panic.c, but tries to do
152 * relatively minimal work. Don't use delay functions (TB may
153 * be broken), don't crash dump (need to set a firmware log),
154 * don't run notifiers. We do want to get some information to
161 extern void panic_flush_kmsg_end(void)
163 printk_safe_flush_on_panic();
164 kmsg_dump(KMSG_DUMP_PANIC
);
167 console_flush_on_panic();
170 static unsigned long oops_begin(struct pt_regs
*regs
)
177 /* racy, but better than risking deadlock. */
178 raw_local_irq_save(flags
);
179 cpu
= smp_processor_id();
180 if (!arch_spin_trylock(&die_lock
)) {
181 if (cpu
== die_owner
)
182 /* nested oops. should stop eventually */;
184 arch_spin_lock(&die_lock
);
190 if (machine_is(powermac
))
191 pmac_backlight_unblank();
194 NOKPROBE_SYMBOL(oops_begin
);
196 static void oops_end(unsigned long flags
, struct pt_regs
*regs
,
200 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
204 if (!die_nest_count
) {
205 /* Nest count reaches zero, release the lock. */
207 arch_spin_unlock(&die_lock
);
209 raw_local_irq_restore(flags
);
211 crash_fadump(regs
, "die oops");
213 if (kexec_should_crash(current
))
220 * While our oops output is serialised by a spinlock, output
221 * from panic() called below can race and corrupt it. If we
222 * know we are going to panic, delay for 1 second so we have a
223 * chance to get clean backtraces from all CPUs that are oopsing.
225 if (in_interrupt() || panic_on_oops
|| !current
->pid
||
226 is_global_init(current
)) {
227 mdelay(MSEC_PER_SEC
);
231 panic("Fatal exception in interrupt");
233 panic("Fatal exception");
236 NOKPROBE_SYMBOL(oops_end
);
238 static int __die(const char *str
, struct pt_regs
*regs
, long err
)
240 printk("Oops: %s, sig: %ld [#%d]\n", str
, err
, ++die_counter
);
242 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN
))
247 if (IS_ENABLED(CONFIG_PREEMPT
))
250 if (IS_ENABLED(CONFIG_SMP
))
251 pr_cont("SMP NR_CPUS=%d ", NR_CPUS
);
253 if (debug_pagealloc_enabled())
254 pr_cont("DEBUG_PAGEALLOC ");
256 if (IS_ENABLED(CONFIG_NUMA
))
259 pr_cont("%s\n", ppc_md
.name
? ppc_md
.name
: "");
261 if (notify_die(DIE_OOPS
, str
, regs
, err
, 255, SIGSEGV
) == NOTIFY_STOP
)
269 NOKPROBE_SYMBOL(__die
);
271 void die(const char *str
, struct pt_regs
*regs
, long err
)
278 flags
= oops_begin(regs
);
279 if (__die(str
, regs
, err
))
281 oops_end(flags
, regs
, err
);
283 NOKPROBE_SYMBOL(die
);
285 void user_single_step_siginfo(struct task_struct
*tsk
,
286 struct pt_regs
*regs
, siginfo_t
*info
)
288 memset(info
, 0, sizeof(*info
));
289 info
->si_signo
= SIGTRAP
;
290 info
->si_code
= TRAP_TRACE
;
291 info
->si_addr
= (void __user
*)regs
->nip
;
295 void _exception_pkey(int signr
, struct pt_regs
*regs
, int code
,
296 unsigned long addr
, int key
)
299 const char fmt32
[] = KERN_INFO
"%s[%d]: unhandled signal %d " \
300 "at %08lx nip %08lx lr %08lx code %x\n";
301 const char fmt64
[] = KERN_INFO
"%s[%d]: unhandled signal %d " \
302 "at %016lx nip %016lx lr %016lx code %x\n";
304 if (!user_mode(regs
)) {
305 die("Exception in kernel mode", regs
, signr
);
309 if (show_unhandled_signals
&& unhandled_signal(current
, signr
)) {
310 printk_ratelimited(regs
->msr
& MSR_64BIT
? fmt64
: fmt32
,
311 current
->comm
, current
->pid
, signr
,
312 addr
, regs
->nip
, regs
->link
, code
);
315 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs
))
318 current
->thread
.trap_nr
= code
;
321 * Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need
322 * to capture the content, if the task gets killed.
324 thread_pkey_regs_save(¤t
->thread
);
326 memset(&info
, 0, sizeof(info
));
327 info
.si_signo
= signr
;
329 info
.si_addr
= (void __user
*) addr
;
332 force_sig_info(signr
, &info
, current
);
335 void _exception(int signr
, struct pt_regs
*regs
, int code
, unsigned long addr
)
337 _exception_pkey(signr
, regs
, code
, addr
, 0);
340 void system_reset_exception(struct pt_regs
*regs
)
343 * Avoid crashes in case of nested NMI exceptions. Recoverability
344 * is determined by RI and in_nmi
346 bool nested
= in_nmi();
350 __this_cpu_inc(irq_stat
.sreset_irqs
);
352 /* See if any machine dependent calls */
353 if (ppc_md
.system_reset_exception
) {
354 if (ppc_md
.system_reset_exception(regs
))
362 * A system reset is a request to dump, so we always send
363 * it through the crashdump code (if fadump or kdump are
366 crash_fadump(regs
, "System Reset");
371 * We aren't the primary crash CPU. We need to send it
372 * to a holding pattern to avoid it ending up in the panic
375 crash_kexec_secondary(regs
);
378 * No debugger or crash dump registered, print logs then
381 die("System Reset", regs
, SIGABRT
);
383 mdelay(2*MSEC_PER_SEC
); /* Wait a little while for others to print */
384 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
385 nmi_panic(regs
, "System Reset");
388 #ifdef CONFIG_PPC_BOOK3S_64
389 BUG_ON(get_paca()->in_nmi
== 0);
390 if (get_paca()->in_nmi
> 1)
391 nmi_panic(regs
, "Unrecoverable nested System Reset");
393 /* Must die if the interrupt is not recoverable */
394 if (!(regs
->msr
& MSR_RI
))
395 nmi_panic(regs
, "Unrecoverable System Reset");
400 /* What should we do here? We could issue a shutdown or hard reset. */
404 * I/O accesses can cause machine checks on powermacs.
405 * Check if the NIP corresponds to the address of a sync
406 * instruction for which there is an entry in the exception
408 * Note that the 601 only takes a machine check on TEA
409 * (transfer error ack) signal assertion, and does not
410 * set any of the top 16 bits of SRR1.
413 static inline int check_io_access(struct pt_regs
*regs
)
416 unsigned long msr
= regs
->msr
;
417 const struct exception_table_entry
*entry
;
418 unsigned int *nip
= (unsigned int *)regs
->nip
;
420 if (((msr
& 0xffff0000) == 0 || (msr
& (0x80000 | 0x40000)))
421 && (entry
= search_exception_tables(regs
->nip
)) != NULL
) {
423 * Check that it's a sync instruction, or somewhere
424 * in the twi; isync; nop sequence that inb/inw/inl uses.
425 * As the address is in the exception table
426 * we should be able to read the instr there.
427 * For the debug message, we look at the preceding
430 if (*nip
== PPC_INST_NOP
)
432 else if (*nip
== PPC_INST_ISYNC
)
434 if (*nip
== PPC_INST_SYNC
|| (*nip
>> 26) == OP_TRAP
) {
438 rb
= (*nip
>> 11) & 0x1f;
439 printk(KERN_DEBUG
"%s bad port %lx at %p\n",
440 (*nip
& 0x100)? "OUT to": "IN from",
441 regs
->gpr
[rb
] - _IO_BASE
, nip
);
443 regs
->nip
= extable_fixup(entry
);
447 #endif /* CONFIG_PPC32 */
451 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
452 /* On 4xx, the reason for the machine check or program exception
454 #define get_reason(regs) ((regs)->dsisr)
455 #define REASON_FP ESR_FP
456 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
457 #define REASON_PRIVILEGED ESR_PPR
458 #define REASON_TRAP ESR_PTR
460 /* single-step stuff */
461 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
462 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
465 /* On non-4xx, the reason for the machine check or program
466 exception is in the MSR. */
467 #define get_reason(regs) ((regs)->msr)
468 #define REASON_TM SRR1_PROGTM
469 #define REASON_FP SRR1_PROGFPE
470 #define REASON_ILLEGAL SRR1_PROGILL
471 #define REASON_PRIVILEGED SRR1_PROGPRIV
472 #define REASON_TRAP SRR1_PROGTRAP
474 #define single_stepping(regs) ((regs)->msr & MSR_SE)
475 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
478 #if defined(CONFIG_E500)
479 int machine_check_e500mc(struct pt_regs
*regs
)
481 unsigned long mcsr
= mfspr(SPRN_MCSR
);
482 unsigned long pvr
= mfspr(SPRN_PVR
);
483 unsigned long reason
= mcsr
;
486 if (reason
& MCSR_LD
) {
487 recoverable
= fsl_rio_mcheck_exception(regs
);
488 if (recoverable
== 1)
492 printk("Machine check in kernel mode.\n");
493 printk("Caused by (from MCSR=%lx): ", reason
);
495 if (reason
& MCSR_MCP
)
496 printk("Machine Check Signal\n");
498 if (reason
& MCSR_ICPERR
) {
499 printk("Instruction Cache Parity Error\n");
502 * This is recoverable by invalidating the i-cache.
504 mtspr(SPRN_L1CSR1
, mfspr(SPRN_L1CSR1
) | L1CSR1_ICFI
);
505 while (mfspr(SPRN_L1CSR1
) & L1CSR1_ICFI
)
509 * This will generally be accompanied by an instruction
510 * fetch error report -- only treat MCSR_IF as fatal
511 * if it wasn't due to an L1 parity error.
516 if (reason
& MCSR_DCPERR_MC
) {
517 printk("Data Cache Parity Error\n");
520 * In write shadow mode we auto-recover from the error, but it
521 * may still get logged and cause a machine check. We should
522 * only treat the non-write shadow case as non-recoverable.
524 /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
525 * is not implemented but L1 data cache always runs in write
526 * shadow mode. Hence on data cache parity errors HW will
527 * automatically invalidate the L1 Data Cache.
529 if (PVR_VER(pvr
) != PVR_VER_E6500
) {
530 if (!(mfspr(SPRN_L1CSR2
) & L1CSR2_DCWS
))
535 if (reason
& MCSR_L2MMU_MHIT
) {
536 printk("Hit on multiple TLB entries\n");
540 if (reason
& MCSR_NMI
)
541 printk("Non-maskable interrupt\n");
543 if (reason
& MCSR_IF
) {
544 printk("Instruction Fetch Error Report\n");
548 if (reason
& MCSR_LD
) {
549 printk("Load Error Report\n");
553 if (reason
& MCSR_ST
) {
554 printk("Store Error Report\n");
558 if (reason
& MCSR_LDG
) {
559 printk("Guarded Load Error Report\n");
563 if (reason
& MCSR_TLBSYNC
)
564 printk("Simultaneous tlbsync operations\n");
566 if (reason
& MCSR_BSL2_ERR
) {
567 printk("Level 2 Cache Error\n");
571 if (reason
& MCSR_MAV
) {
574 addr
= mfspr(SPRN_MCAR
);
575 addr
|= (u64
)mfspr(SPRN_MCARU
) << 32;
577 printk("Machine Check %s Address: %#llx\n",
578 reason
& MCSR_MEA
? "Effective" : "Physical", addr
);
582 mtspr(SPRN_MCSR
, mcsr
);
583 return mfspr(SPRN_MCSR
) == 0 && recoverable
;
586 int machine_check_e500(struct pt_regs
*regs
)
588 unsigned long reason
= mfspr(SPRN_MCSR
);
590 if (reason
& MCSR_BUS_RBERR
) {
591 if (fsl_rio_mcheck_exception(regs
))
593 if (fsl_pci_mcheck_exception(regs
))
597 printk("Machine check in kernel mode.\n");
598 printk("Caused by (from MCSR=%lx): ", reason
);
600 if (reason
& MCSR_MCP
)
601 printk("Machine Check Signal\n");
602 if (reason
& MCSR_ICPERR
)
603 printk("Instruction Cache Parity Error\n");
604 if (reason
& MCSR_DCP_PERR
)
605 printk("Data Cache Push Parity Error\n");
606 if (reason
& MCSR_DCPERR
)
607 printk("Data Cache Parity Error\n");
608 if (reason
& MCSR_BUS_IAERR
)
609 printk("Bus - Instruction Address Error\n");
610 if (reason
& MCSR_BUS_RAERR
)
611 printk("Bus - Read Address Error\n");
612 if (reason
& MCSR_BUS_WAERR
)
613 printk("Bus - Write Address Error\n");
614 if (reason
& MCSR_BUS_IBERR
)
615 printk("Bus - Instruction Data Error\n");
616 if (reason
& MCSR_BUS_RBERR
)
617 printk("Bus - Read Data Bus Error\n");
618 if (reason
& MCSR_BUS_WBERR
)
619 printk("Bus - Write Data Bus Error\n");
620 if (reason
& MCSR_BUS_IPERR
)
621 printk("Bus - Instruction Parity Error\n");
622 if (reason
& MCSR_BUS_RPERR
)
623 printk("Bus - Read Parity Error\n");
628 int machine_check_generic(struct pt_regs
*regs
)
632 #elif defined(CONFIG_E200)
633 int machine_check_e200(struct pt_regs
*regs
)
635 unsigned long reason
= mfspr(SPRN_MCSR
);
637 printk("Machine check in kernel mode.\n");
638 printk("Caused by (from MCSR=%lx): ", reason
);
640 if (reason
& MCSR_MCP
)
641 printk("Machine Check Signal\n");
642 if (reason
& MCSR_CP_PERR
)
643 printk("Cache Push Parity Error\n");
644 if (reason
& MCSR_CPERR
)
645 printk("Cache Parity Error\n");
646 if (reason
& MCSR_EXCP_ERR
)
647 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
648 if (reason
& MCSR_BUS_IRERR
)
649 printk("Bus - Read Bus Error on instruction fetch\n");
650 if (reason
& MCSR_BUS_DRERR
)
651 printk("Bus - Read Bus Error on data load\n");
652 if (reason
& MCSR_BUS_WRERR
)
653 printk("Bus - Write Bus Error on buffered store or cache line push\n");
657 #elif defined(CONFIG_PPC32)
658 int machine_check_generic(struct pt_regs
*regs
)
660 unsigned long reason
= regs
->msr
;
662 printk("Machine check in kernel mode.\n");
663 printk("Caused by (from SRR1=%lx): ", reason
);
664 switch (reason
& 0x601F0000) {
666 printk("Machine check signal\n");
668 case 0: /* for 601 */
670 case 0x140000: /* 7450 MSS error and TEA */
671 printk("Transfer error ack signal\n");
674 printk("Data parity error signal\n");
677 printk("Address parity error signal\n");
680 printk("L1 Data Cache error\n");
683 printk("L1 Instruction Cache error\n");
686 printk("L2 data cache parity error\n");
689 printk("Unknown values in msr\n");
693 #endif /* everything else */
695 void machine_check_exception(struct pt_regs
*regs
)
698 bool nested
= in_nmi();
702 /* 64s accounts the mce in machine_check_early when in HVMODE */
703 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64
) || !cpu_has_feature(CPU_FTR_HVMODE
))
704 __this_cpu_inc(irq_stat
.mce_exceptions
);
706 add_taint(TAINT_MACHINE_CHECK
, LOCKDEP_NOW_UNRELIABLE
);
708 /* See if any machine dependent calls. In theory, we would want
709 * to call the CPU first, and call the ppc_md. one if the CPU
710 * one returns a positive number. However there is existing code
711 * that assumes the board gets a first chance, so let's keep it
712 * that way for now and fix things later. --BenH.
714 if (ppc_md
.machine_check_exception
)
715 recover
= ppc_md
.machine_check_exception(regs
);
716 else if (cur_cpu_spec
->machine_check
)
717 recover
= cur_cpu_spec
->machine_check(regs
);
722 if (debugger_fault_handler(regs
))
725 if (check_io_access(regs
))
728 die("Machine check", regs
, SIGBUS
);
730 /* Must die if the interrupt is not recoverable */
731 if (!(regs
->msr
& MSR_RI
))
732 nmi_panic(regs
, "Unrecoverable Machine check");
739 void SMIException(struct pt_regs
*regs
)
741 die("System Management Interrupt", regs
, SIGABRT
);
745 static void p9_hmi_special_emu(struct pt_regs
*regs
)
747 unsigned int ra
, rb
, t
, i
, sel
, instr
, rc
;
748 const void __user
*addr
;
750 unsigned long ea
, msr
, msr_mask
;
753 if (__get_user_inatomic(instr
, (unsigned int __user
*)regs
->nip
))
757 * lxvb16x opcode: 0x7c0006d8
758 * lxvd2x opcode: 0x7c000698
759 * lxvh8x opcode: 0x7c000658
760 * lxvw4x opcode: 0x7c000618
762 if ((instr
& 0xfc00073e) != 0x7c000618) {
763 pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
765 smp_processor_id(), current
->comm
, current
->pid
,
770 /* Grab vector registers into the task struct */
771 msr
= regs
->msr
; /* Grab msr before we flush the bits */
772 flush_vsx_to_thread(current
);
773 enable_kernel_altivec();
776 * Is userspace running with a different endian (this is rare but
779 swap
= (msr
& MSR_LE
) != (MSR_KERNEL
& MSR_LE
);
781 /* Decode the instruction */
782 ra
= (instr
>> 16) & 0x1f;
783 rb
= (instr
>> 11) & 0x1f;
784 t
= (instr
>> 21) & 0x1f;
786 vdst
= (u8
*)¤t
->thread
.vr_state
.vr
[t
];
788 vdst
= (u8
*)¤t
->thread
.fp_state
.fpr
[t
][0];
790 /* Grab the vector address */
791 ea
= regs
->gpr
[rb
] + (ra
? regs
->gpr
[ra
] : 0);
794 addr
= (__force
const void __user
*)ea
;
797 if (!access_ok(VERIFY_READ
, addr
, 16)) {
798 pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
799 " instr=%08x addr=%016lx\n",
800 smp_processor_id(), current
->comm
, current
->pid
,
801 regs
->nip
, instr
, (unsigned long)addr
);
805 /* Read the vector */
807 if ((unsigned long)addr
& 0xfUL
)
809 rc
= __copy_from_user_inatomic(vbuf
, addr
, 16);
811 __get_user_atomic_128_aligned(vbuf
, addr
, rc
);
813 pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
814 " instr=%08x addr=%016lx\n",
815 smp_processor_id(), current
->comm
, current
->pid
,
816 regs
->nip
, instr
, (unsigned long)addr
);
820 pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
821 " instr=%08x addr=%016lx\n",
822 smp_processor_id(), current
->comm
, current
->pid
, regs
->nip
,
823 instr
, (unsigned long) addr
);
825 /* Grab instruction "selector" */
826 sel
= (instr
>> 6) & 3;
829 * Check to make sure the facility is actually enabled. This
830 * could happen if we get a false positive hit.
832 * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
833 * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
836 if ((sel
& 1) && (instr
& 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
838 if (!(msr
& msr_mask
)) {
839 pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
840 " instr=%08x msr:%016lx\n",
841 smp_processor_id(), current
->comm
, current
->pid
,
842 regs
->nip
, instr
, msr
);
846 /* Do logging here before we modify sel based on endian */
849 PPC_WARN_EMULATED(lxvw4x
, regs
);
852 PPC_WARN_EMULATED(lxvh8x
, regs
);
855 PPC_WARN_EMULATED(lxvd2x
, regs
);
857 case 3: /* lxvb16x */
858 PPC_WARN_EMULATED(lxvb16x
, regs
);
862 #ifdef __LITTLE_ENDIAN__
864 * An LE kernel stores the vector in the task struct as an LE
865 * byte array (effectively swapping both the components and
866 * the content of the components). Those instructions expect
867 * the components to remain in ascending address order, so we
870 * If we are running a BE user space, the expectation is that
871 * of a simple memcpy, so forcing the emulation to look like
872 * a lxvb16x should do the trick.
879 for (i
= 0; i
< 4; i
++)
880 ((u32
*)vdst
)[i
] = ((u32
*)vbuf
)[3-i
];
883 for (i
= 0; i
< 8; i
++)
884 ((u16
*)vdst
)[i
] = ((u16
*)vbuf
)[7-i
];
887 for (i
= 0; i
< 2; i
++)
888 ((u64
*)vdst
)[i
] = ((u64
*)vbuf
)[1-i
];
890 case 3: /* lxvb16x */
891 for (i
= 0; i
< 16; i
++)
892 vdst
[i
] = vbuf
[15-i
];
895 #else /* __LITTLE_ENDIAN__ */
896 /* On a big endian kernel, a BE userspace only needs a memcpy */
900 /* Otherwise, we need to swap the content of the components */
903 for (i
= 0; i
< 4; i
++)
904 ((u32
*)vdst
)[i
] = cpu_to_le32(((u32
*)vbuf
)[i
]);
907 for (i
= 0; i
< 8; i
++)
908 ((u16
*)vdst
)[i
] = cpu_to_le16(((u16
*)vbuf
)[i
]);
911 for (i
= 0; i
< 2; i
++)
912 ((u64
*)vdst
)[i
] = cpu_to_le64(((u64
*)vbuf
)[i
]);
914 case 3: /* lxvb16x */
915 memcpy(vdst
, vbuf
, 16);
918 #endif /* !__LITTLE_ENDIAN__ */
920 /* Go to next instruction */
923 #endif /* CONFIG_VSX */
925 void handle_hmi_exception(struct pt_regs
*regs
)
927 struct pt_regs
*old_regs
;
929 old_regs
= set_irq_regs(regs
);
933 /* Real mode flagged P9 special emu is needed */
934 if (local_paca
->hmi_p9_special_emu
) {
935 local_paca
->hmi_p9_special_emu
= 0;
938 * We don't want to take page faults while doing the
939 * emulation, we just replay the instruction if necessary.
942 p9_hmi_special_emu(regs
);
945 #endif /* CONFIG_VSX */
947 if (ppc_md
.handle_hmi_exception
)
948 ppc_md
.handle_hmi_exception(regs
);
951 set_irq_regs(old_regs
);
954 void unknown_exception(struct pt_regs
*regs
)
956 enum ctx_state prev_state
= exception_enter();
958 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
959 regs
->nip
, regs
->msr
, regs
->trap
);
961 _exception(SIGTRAP
, regs
, TRAP_FIXME
, 0);
963 exception_exit(prev_state
);
966 void instruction_breakpoint_exception(struct pt_regs
*regs
)
968 enum ctx_state prev_state
= exception_enter();
970 if (notify_die(DIE_IABR_MATCH
, "iabr_match", regs
, 5,
971 5, SIGTRAP
) == NOTIFY_STOP
)
973 if (debugger_iabr_match(regs
))
975 _exception(SIGTRAP
, regs
, TRAP_BRKPT
, regs
->nip
);
978 exception_exit(prev_state
);
981 void RunModeException(struct pt_regs
*regs
)
983 _exception(SIGTRAP
, regs
, TRAP_FIXME
, 0);
986 void single_step_exception(struct pt_regs
*regs
)
988 enum ctx_state prev_state
= exception_enter();
990 clear_single_step(regs
);
992 if (kprobe_post_handler(regs
))
995 if (notify_die(DIE_SSTEP
, "single_step", regs
, 5,
996 5, SIGTRAP
) == NOTIFY_STOP
)
998 if (debugger_sstep(regs
))
1001 _exception(SIGTRAP
, regs
, TRAP_TRACE
, regs
->nip
);
1004 exception_exit(prev_state
);
1006 NOKPROBE_SYMBOL(single_step_exception
);
1009 * After we have successfully emulated an instruction, we have to
1010 * check if the instruction was being single-stepped, and if so,
1011 * pretend we got a single-step exception. This was pointed out
1012 * by Kumar Gala. -- paulus
1014 static void emulate_single_step(struct pt_regs
*regs
)
1016 if (single_stepping(regs
))
1017 single_step_exception(regs
);
1020 static inline int __parse_fpscr(unsigned long fpscr
)
1022 int ret
= FPE_FIXME
;
1024 /* Invalid operation */
1025 if ((fpscr
& FPSCR_VE
) && (fpscr
& FPSCR_VX
))
1029 else if ((fpscr
& FPSCR_OE
) && (fpscr
& FPSCR_OX
))
1033 else if ((fpscr
& FPSCR_UE
) && (fpscr
& FPSCR_UX
))
1036 /* Divide by zero */
1037 else if ((fpscr
& FPSCR_ZE
) && (fpscr
& FPSCR_ZX
))
1040 /* Inexact result */
1041 else if ((fpscr
& FPSCR_XE
) && (fpscr
& FPSCR_XX
))
1047 static void parse_fpe(struct pt_regs
*regs
)
1051 flush_fp_to_thread(current
);
1053 code
= __parse_fpscr(current
->thread
.fp_state
.fpscr
);
1055 _exception(SIGFPE
, regs
, code
, regs
->nip
);
1059 * Illegal instruction emulation support. Originally written to
1060 * provide the PVR to user applications using the mfspr rd, PVR.
1061 * Return non-zero if we can't emulate, or -EFAULT if the associated
1062 * memory access caused an access fault. Return zero on success.
1064 * There are a couple of ways to do this, either "decode" the instruction
1065 * or directly match lots of bits. In this case, matching lots of
1066 * bits is faster and easier.
1069 static int emulate_string_inst(struct pt_regs
*regs
, u32 instword
)
1071 u8 rT
= (instword
>> 21) & 0x1f;
1072 u8 rA
= (instword
>> 16) & 0x1f;
1073 u8 NB_RB
= (instword
>> 11) & 0x1f;
1078 /* Early out if we are an invalid form of lswx */
1079 if ((instword
& PPC_INST_STRING_MASK
) == PPC_INST_LSWX
)
1080 if ((rT
== rA
) || (rT
== NB_RB
))
1083 EA
= (rA
== 0) ? 0 : regs
->gpr
[rA
];
1085 switch (instword
& PPC_INST_STRING_MASK
) {
1087 case PPC_INST_STSWX
:
1089 num_bytes
= regs
->xer
& 0x7f;
1092 case PPC_INST_STSWI
:
1093 num_bytes
= (NB_RB
== 0) ? 32 : NB_RB
;
1099 while (num_bytes
!= 0)
1102 u32 shift
= 8 * (3 - (pos
& 0x3));
1104 /* if process is 32-bit, clear upper 32 bits of EA */
1105 if ((regs
->msr
& MSR_64BIT
) == 0)
1108 switch ((instword
& PPC_INST_STRING_MASK
)) {
1111 if (get_user(val
, (u8 __user
*)EA
))
1113 /* first time updating this reg,
1117 regs
->gpr
[rT
] |= val
<< shift
;
1119 case PPC_INST_STSWI
:
1120 case PPC_INST_STSWX
:
1121 val
= regs
->gpr
[rT
] >> shift
;
1122 if (put_user(val
, (u8 __user
*)EA
))
1126 /* move EA to next address */
1130 /* manage our position within the register */
1141 static int emulate_popcntb_inst(struct pt_regs
*regs
, u32 instword
)
1146 ra
= (instword
>> 16) & 0x1f;
1147 rs
= (instword
>> 21) & 0x1f;
1149 tmp
= regs
->gpr
[rs
];
1150 tmp
= tmp
- ((tmp
>> 1) & 0x5555555555555555ULL
);
1151 tmp
= (tmp
& 0x3333333333333333ULL
) + ((tmp
>> 2) & 0x3333333333333333ULL
);
1152 tmp
= (tmp
+ (tmp
>> 4)) & 0x0f0f0f0f0f0f0f0fULL
;
1153 regs
->gpr
[ra
] = tmp
;
1158 static int emulate_isel(struct pt_regs
*regs
, u32 instword
)
1160 u8 rT
= (instword
>> 21) & 0x1f;
1161 u8 rA
= (instword
>> 16) & 0x1f;
1162 u8 rB
= (instword
>> 11) & 0x1f;
1163 u8 BC
= (instword
>> 6) & 0x1f;
1167 tmp
= (rA
== 0) ? 0 : regs
->gpr
[rA
];
1168 bit
= (regs
->ccr
>> (31 - BC
)) & 0x1;
1170 regs
->gpr
[rT
] = bit
? tmp
: regs
->gpr
[rB
];
1175 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1176 static inline bool tm_abort_check(struct pt_regs
*regs
, int cause
)
1178 /* If we're emulating a load/store in an active transaction, we cannot
1179 * emulate it as the kernel operates in transaction suspended context.
1180 * We need to abort the transaction. This creates a persistent TM
1181 * abort so tell the user what caused it with a new code.
1183 if (MSR_TM_TRANSACTIONAL(regs
->msr
)) {
1191 static inline bool tm_abort_check(struct pt_regs
*regs
, int reason
)
1197 static int emulate_instruction(struct pt_regs
*regs
)
1202 if (!user_mode(regs
))
1204 CHECK_FULL_REGS(regs
);
1206 if (get_user(instword
, (u32 __user
*)(regs
->nip
)))
1209 /* Emulate the mfspr rD, PVR. */
1210 if ((instword
& PPC_INST_MFSPR_PVR_MASK
) == PPC_INST_MFSPR_PVR
) {
1211 PPC_WARN_EMULATED(mfpvr
, regs
);
1212 rd
= (instword
>> 21) & 0x1f;
1213 regs
->gpr
[rd
] = mfspr(SPRN_PVR
);
1217 /* Emulating the dcba insn is just a no-op. */
1218 if ((instword
& PPC_INST_DCBA_MASK
) == PPC_INST_DCBA
) {
1219 PPC_WARN_EMULATED(dcba
, regs
);
1223 /* Emulate the mcrxr insn. */
1224 if ((instword
& PPC_INST_MCRXR_MASK
) == PPC_INST_MCRXR
) {
1225 int shift
= (instword
>> 21) & 0x1c;
1226 unsigned long msk
= 0xf0000000UL
>> shift
;
1228 PPC_WARN_EMULATED(mcrxr
, regs
);
1229 regs
->ccr
= (regs
->ccr
& ~msk
) | ((regs
->xer
>> shift
) & msk
);
1230 regs
->xer
&= ~0xf0000000UL
;
1234 /* Emulate load/store string insn. */
1235 if ((instword
& PPC_INST_STRING_GEN_MASK
) == PPC_INST_STRING
) {
1236 if (tm_abort_check(regs
,
1237 TM_CAUSE_EMULATE
| TM_CAUSE_PERSISTENT
))
1239 PPC_WARN_EMULATED(string
, regs
);
1240 return emulate_string_inst(regs
, instword
);
1243 /* Emulate the popcntb (Population Count Bytes) instruction. */
1244 if ((instword
& PPC_INST_POPCNTB_MASK
) == PPC_INST_POPCNTB
) {
1245 PPC_WARN_EMULATED(popcntb
, regs
);
1246 return emulate_popcntb_inst(regs
, instword
);
1249 /* Emulate isel (Integer Select) instruction */
1250 if ((instword
& PPC_INST_ISEL_MASK
) == PPC_INST_ISEL
) {
1251 PPC_WARN_EMULATED(isel
, regs
);
1252 return emulate_isel(regs
, instword
);
1255 /* Emulate sync instruction variants */
1256 if ((instword
& PPC_INST_SYNC_MASK
) == PPC_INST_SYNC
) {
1257 PPC_WARN_EMULATED(sync
, regs
);
1258 asm volatile("sync");
1263 /* Emulate the mfspr rD, DSCR. */
1264 if ((((instword
& PPC_INST_MFSPR_DSCR_USER_MASK
) ==
1265 PPC_INST_MFSPR_DSCR_USER
) ||
1266 ((instword
& PPC_INST_MFSPR_DSCR_MASK
) ==
1267 PPC_INST_MFSPR_DSCR
)) &&
1268 cpu_has_feature(CPU_FTR_DSCR
)) {
1269 PPC_WARN_EMULATED(mfdscr
, regs
);
1270 rd
= (instword
>> 21) & 0x1f;
1271 regs
->gpr
[rd
] = mfspr(SPRN_DSCR
);
1274 /* Emulate the mtspr DSCR, rD. */
1275 if ((((instword
& PPC_INST_MTSPR_DSCR_USER_MASK
) ==
1276 PPC_INST_MTSPR_DSCR_USER
) ||
1277 ((instword
& PPC_INST_MTSPR_DSCR_MASK
) ==
1278 PPC_INST_MTSPR_DSCR
)) &&
1279 cpu_has_feature(CPU_FTR_DSCR
)) {
1280 PPC_WARN_EMULATED(mtdscr
, regs
);
1281 rd
= (instword
>> 21) & 0x1f;
1282 current
->thread
.dscr
= regs
->gpr
[rd
];
1283 current
->thread
.dscr_inherit
= 1;
1284 mtspr(SPRN_DSCR
, current
->thread
.dscr
);
1292 int is_valid_bugaddr(unsigned long addr
)
1294 return is_kernel_addr(addr
);
1297 #ifdef CONFIG_MATH_EMULATION
1298 static int emulate_math(struct pt_regs
*regs
)
1301 extern int do_mathemu(struct pt_regs
*regs
);
1303 ret
= do_mathemu(regs
);
1305 PPC_WARN_EMULATED(math
, regs
);
1309 emulate_single_step(regs
);
1313 code
= __parse_fpscr(current
->thread
.fp_state
.fpscr
);
1314 _exception(SIGFPE
, regs
, code
, regs
->nip
);
1318 _exception(SIGSEGV
, regs
, SEGV_MAPERR
, regs
->nip
);
1325 static inline int emulate_math(struct pt_regs
*regs
) { return -1; }
1328 void program_check_exception(struct pt_regs
*regs
)
1330 enum ctx_state prev_state
= exception_enter();
1331 unsigned int reason
= get_reason(regs
);
1333 /* We can now get here via a FP Unavailable exception if the core
1334 * has no FPU, in that case the reason flags will be 0 */
1336 if (reason
& REASON_FP
) {
1337 /* IEEE FP exception */
1341 if (reason
& REASON_TRAP
) {
1342 unsigned long bugaddr
;
1343 /* Debugger is first in line to stop recursive faults in
1344 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1345 if (debugger_bpt(regs
))
1348 if (kprobe_handler(regs
))
1351 /* trap exception */
1352 if (notify_die(DIE_BPT
, "breakpoint", regs
, 5, 5, SIGTRAP
)
1356 bugaddr
= regs
->nip
;
1358 * Fixup bugaddr for BUG_ON() in real mode
1360 if (!is_kernel_addr(bugaddr
) && !(regs
->msr
& MSR_IR
))
1361 bugaddr
+= PAGE_OFFSET
;
1363 if (!(regs
->msr
& MSR_PR
) && /* not user-mode */
1364 report_bug(bugaddr
, regs
) == BUG_TRAP_TYPE_WARN
) {
1368 _exception(SIGTRAP
, regs
, TRAP_BRKPT
, regs
->nip
);
1371 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1372 if (reason
& REASON_TM
) {
1373 /* This is a TM "Bad Thing Exception" program check.
1375 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1376 * transition in TM states.
1377 * - A trechkpt is attempted when transactional.
1378 * - A treclaim is attempted when non transactional.
1379 * - A tend is illegally attempted.
1380 * - writing a TM SPR when transactional.
1382 * If usermode caused this, it's done something illegal and
1383 * gets a SIGILL slap on the wrist. We call it an illegal
1384 * operand to distinguish from the instruction just being bad
1385 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1386 * illegal /placement/ of a valid instruction.
1388 if (user_mode(regs
)) {
1389 _exception(SIGILL
, regs
, ILL_ILLOPN
, regs
->nip
);
1392 printk(KERN_EMERG
"Unexpected TM Bad Thing exception "
1393 "at %lx (msr 0x%x)\n", regs
->nip
, reason
);
1394 die("Unrecoverable exception", regs
, SIGABRT
);
1400 * If we took the program check in the kernel skip down to sending a
1401 * SIGILL. The subsequent cases all relate to emulating instructions
1402 * which we should only do for userspace. We also do not want to enable
1403 * interrupts for kernel faults because that might lead to further
1404 * faults, and loose the context of the original exception.
1406 if (!user_mode(regs
))
1409 /* We restore the interrupt state now */
1410 if (!arch_irq_disabled_regs(regs
))
1413 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1414 * but there seems to be a hardware bug on the 405GP (RevD)
1415 * that means ESR is sometimes set incorrectly - either to
1416 * ESR_DST (!?) or 0. In the process of chasing this with the
1417 * hardware people - not sure if it can happen on any illegal
1418 * instruction or only on FP instructions, whether there is a
1419 * pattern to occurrences etc. -dgibson 31/Mar/2003
1421 if (!emulate_math(regs
))
1424 /* Try to emulate it if we should. */
1425 if (reason
& (REASON_ILLEGAL
| REASON_PRIVILEGED
)) {
1426 switch (emulate_instruction(regs
)) {
1429 emulate_single_step(regs
);
1432 _exception(SIGSEGV
, regs
, SEGV_MAPERR
, regs
->nip
);
1438 if (reason
& REASON_PRIVILEGED
)
1439 _exception(SIGILL
, regs
, ILL_PRVOPC
, regs
->nip
);
1441 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1444 exception_exit(prev_state
);
1446 NOKPROBE_SYMBOL(program_check_exception
);
1449 * This occurs when running in hypervisor mode on POWER6 or later
1450 * and an illegal instruction is encountered.
1452 void emulation_assist_interrupt(struct pt_regs
*regs
)
1454 regs
->msr
|= REASON_ILLEGAL
;
1455 program_check_exception(regs
);
1457 NOKPROBE_SYMBOL(emulation_assist_interrupt
);
1459 void alignment_exception(struct pt_regs
*regs
)
1461 enum ctx_state prev_state
= exception_enter();
1462 int sig
, code
, fixed
= 0;
1464 /* We restore the interrupt state now */
1465 if (!arch_irq_disabled_regs(regs
))
1468 if (tm_abort_check(regs
, TM_CAUSE_ALIGNMENT
| TM_CAUSE_PERSISTENT
))
1471 /* we don't implement logging of alignment exceptions */
1472 if (!(current
->thread
.align_ctl
& PR_UNALIGN_SIGBUS
))
1473 fixed
= fix_alignment(regs
);
1476 regs
->nip
+= 4; /* skip over emulated instruction */
1477 emulate_single_step(regs
);
1481 /* Operand address was bad */
1482 if (fixed
== -EFAULT
) {
1489 if (user_mode(regs
))
1490 _exception(sig
, regs
, code
, regs
->dar
);
1492 bad_page_fault(regs
, regs
->dar
, sig
);
1495 exception_exit(prev_state
);
1498 void slb_miss_bad_addr(struct pt_regs
*regs
)
1500 enum ctx_state prev_state
= exception_enter();
1502 if (user_mode(regs
))
1503 _exception(SIGSEGV
, regs
, SEGV_BNDERR
, regs
->dar
);
1505 bad_page_fault(regs
, regs
->dar
, SIGSEGV
);
1507 exception_exit(prev_state
);
1510 void StackOverflow(struct pt_regs
*regs
)
1512 printk(KERN_CRIT
"Kernel stack overflow in process %p, r1=%lx\n",
1513 current
, regs
->gpr
[1]);
1516 panic("kernel stack overflow");
1519 void nonrecoverable_exception(struct pt_regs
*regs
)
1521 printk(KERN_ERR
"Non-recoverable exception at PC=%lx MSR=%lx\n",
1522 regs
->nip
, regs
->msr
);
1524 die("nonrecoverable exception", regs
, SIGKILL
);
1527 void kernel_fp_unavailable_exception(struct pt_regs
*regs
)
1529 enum ctx_state prev_state
= exception_enter();
1531 printk(KERN_EMERG
"Unrecoverable FP Unavailable Exception "
1532 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1533 die("Unrecoverable FP Unavailable Exception", regs
, SIGABRT
);
1535 exception_exit(prev_state
);
1538 void altivec_unavailable_exception(struct pt_regs
*regs
)
1540 enum ctx_state prev_state
= exception_enter();
1542 if (user_mode(regs
)) {
1543 /* A user program has executed an altivec instruction,
1544 but this kernel doesn't support altivec. */
1545 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1549 printk(KERN_EMERG
"Unrecoverable VMX/Altivec Unavailable Exception "
1550 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1551 die("Unrecoverable VMX/Altivec Unavailable Exception", regs
, SIGABRT
);
1554 exception_exit(prev_state
);
1557 void vsx_unavailable_exception(struct pt_regs
*regs
)
1559 if (user_mode(regs
)) {
1560 /* A user program has executed an vsx instruction,
1561 but this kernel doesn't support vsx. */
1562 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1566 printk(KERN_EMERG
"Unrecoverable VSX Unavailable Exception "
1567 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1568 die("Unrecoverable VSX Unavailable Exception", regs
, SIGABRT
);
1572 static void tm_unavailable(struct pt_regs
*regs
)
1574 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1575 if (user_mode(regs
)) {
1576 current
->thread
.load_tm
++;
1577 regs
->msr
|= MSR_TM
;
1579 tm_restore_sprs(¤t
->thread
);
1583 pr_emerg("Unrecoverable TM Unavailable Exception "
1584 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1585 die("Unrecoverable TM Unavailable Exception", regs
, SIGABRT
);
1588 void facility_unavailable_exception(struct pt_regs
*regs
)
1590 static char *facility_strings
[] = {
1591 [FSCR_FP_LG
] = "FPU",
1592 [FSCR_VECVSX_LG
] = "VMX/VSX",
1593 [FSCR_DSCR_LG
] = "DSCR",
1594 [FSCR_PM_LG
] = "PMU SPRs",
1595 [FSCR_BHRB_LG
] = "BHRB",
1596 [FSCR_TM_LG
] = "TM",
1597 [FSCR_EBB_LG
] = "EBB",
1598 [FSCR_TAR_LG
] = "TAR",
1599 [FSCR_MSGP_LG
] = "MSGP",
1600 [FSCR_SCV_LG
] = "SCV",
1602 char *facility
= "unknown";
1608 hv
= (TRAP(regs
) == 0xf80);
1610 value
= mfspr(SPRN_HFSCR
);
1612 value
= mfspr(SPRN_FSCR
);
1614 status
= value
>> 56;
1615 if (status
== FSCR_DSCR_LG
) {
1617 * User is accessing the DSCR register using the problem
1618 * state only SPR number (0x03) either through a mfspr or
1619 * a mtspr instruction. If it is a write attempt through
1620 * a mtspr, then we set the inherit bit. This also allows
1621 * the user to write or read the register directly in the
1622 * future by setting via the FSCR DSCR bit. But in case it
1623 * is a read DSCR attempt through a mfspr instruction, we
1624 * just emulate the instruction instead. This code path will
1625 * always emulate all the mfspr instructions till the user
1626 * has attempted at least one mtspr instruction. This way it
1627 * preserves the same behaviour when the user is accessing
1628 * the DSCR through privilege level only SPR number (0x11)
1629 * which is emulated through illegal instruction exception.
1630 * We always leave HFSCR DSCR set.
1632 if (get_user(instword
, (u32 __user
*)(regs
->nip
))) {
1633 pr_err("Failed to fetch the user instruction\n");
1637 /* Write into DSCR (mtspr 0x03, RS) */
1638 if ((instword
& PPC_INST_MTSPR_DSCR_USER_MASK
)
1639 == PPC_INST_MTSPR_DSCR_USER
) {
1640 rd
= (instword
>> 21) & 0x1f;
1641 current
->thread
.dscr
= regs
->gpr
[rd
];
1642 current
->thread
.dscr_inherit
= 1;
1643 current
->thread
.fscr
|= FSCR_DSCR
;
1644 mtspr(SPRN_FSCR
, current
->thread
.fscr
);
1647 /* Read from DSCR (mfspr RT, 0x03) */
1648 if ((instword
& PPC_INST_MFSPR_DSCR_USER_MASK
)
1649 == PPC_INST_MFSPR_DSCR_USER
) {
1650 if (emulate_instruction(regs
)) {
1651 pr_err("DSCR based mfspr emulation failed\n");
1655 emulate_single_step(regs
);
1660 if (status
== FSCR_TM_LG
) {
1662 * If we're here then the hardware is TM aware because it
1663 * generated an exception with FSRM_TM set.
1665 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1666 * told us not to do TM, or the kernel is not built with TM
1669 * If both of those things are true, then userspace can spam the
1670 * console by triggering the printk() below just by continually
1671 * doing tbegin (or any TM instruction). So in that case just
1672 * send the process a SIGILL immediately.
1674 if (!cpu_has_feature(CPU_FTR_TM
))
1677 tm_unavailable(regs
);
1681 if ((hv
|| status
>= 2) &&
1682 (status
< ARRAY_SIZE(facility_strings
)) &&
1683 facility_strings
[status
])
1684 facility
= facility_strings
[status
];
1686 /* We restore the interrupt state now */
1687 if (!arch_irq_disabled_regs(regs
))
1690 pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1691 hv
? "Hypervisor " : "", facility
, status
, regs
->nip
, regs
->msr
);
1694 if (user_mode(regs
)) {
1695 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1699 die("Unexpected facility unavailable exception", regs
, SIGABRT
);
1703 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1705 void fp_unavailable_tm(struct pt_regs
*regs
)
1707 /* Note: This does not handle any kind of FP laziness. */
1709 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1710 regs
->nip
, regs
->msr
);
1712 /* We can only have got here if the task started using FP after
1713 * beginning the transaction. So, the transactional regs are just a
1714 * copy of the checkpointed ones. But, we still need to recheckpoint
1715 * as we're enabling FP for the process; it will return, abort the
1716 * transaction, and probably retry but now with FP enabled. So the
1717 * checkpointed FP registers need to be loaded.
1719 tm_reclaim_current(TM_CAUSE_FAC_UNAV
);
1720 /* Reclaim didn't save out any FPRs to transact_fprs. */
1722 /* Enable FP for the task: */
1723 current
->thread
.load_fp
= 1;
1725 /* This loads and recheckpoints the FP registers from
1726 * thread.fpr[]. They will remain in registers after the
1727 * checkpoint so we don't need to reload them after.
1728 * If VMX is in use, the VRs now hold checkpointed values,
1729 * so we don't want to load the VRs from the thread_struct.
1731 tm_recheckpoint(¤t
->thread
);
1734 void altivec_unavailable_tm(struct pt_regs
*regs
)
1736 /* See the comments in fp_unavailable_tm(). This function operates
1740 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1742 regs
->nip
, regs
->msr
);
1743 tm_reclaim_current(TM_CAUSE_FAC_UNAV
);
1744 current
->thread
.load_vec
= 1;
1745 tm_recheckpoint(¤t
->thread
);
1746 current
->thread
.used_vr
= 1;
1749 void vsx_unavailable_tm(struct pt_regs
*regs
)
1751 /* See the comments in fp_unavailable_tm(). This works similarly,
1752 * though we're loading both FP and VEC registers in here.
1754 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1755 * regs. Either way, set MSR_VSX.
1758 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1760 regs
->nip
, regs
->msr
);
1762 current
->thread
.used_vsr
= 1;
1764 /* This reclaims FP and/or VR regs if they're already enabled */
1765 tm_reclaim_current(TM_CAUSE_FAC_UNAV
);
1767 current
->thread
.load_vec
= 1;
1768 current
->thread
.load_fp
= 1;
1770 tm_recheckpoint(¤t
->thread
);
1772 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1774 void performance_monitor_exception(struct pt_regs
*regs
)
1776 __this_cpu_inc(irq_stat
.pmu_irqs
);
1781 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1782 static void handle_debug(struct pt_regs
*regs
, unsigned long debug_status
)
1786 * Determine the cause of the debug event, clear the
1787 * event flags and send a trap to the handler. Torez
1789 if (debug_status
& (DBSR_DAC1R
| DBSR_DAC1W
)) {
1790 dbcr_dac(current
) &= ~(DBCR_DAC1R
| DBCR_DAC1W
);
1791 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1792 current
->thread
.debug
.dbcr2
&= ~DBCR2_DAC12MODE
;
1794 do_send_trap(regs
, mfspr(SPRN_DAC1
), debug_status
,
1797 } else if (debug_status
& (DBSR_DAC2R
| DBSR_DAC2W
)) {
1798 dbcr_dac(current
) &= ~(DBCR_DAC2R
| DBCR_DAC2W
);
1799 do_send_trap(regs
, mfspr(SPRN_DAC2
), debug_status
,
1802 } else if (debug_status
& DBSR_IAC1
) {
1803 current
->thread
.debug
.dbcr0
&= ~DBCR0_IAC1
;
1804 dbcr_iac_range(current
) &= ~DBCR_IAC12MODE
;
1805 do_send_trap(regs
, mfspr(SPRN_IAC1
), debug_status
,
1808 } else if (debug_status
& DBSR_IAC2
) {
1809 current
->thread
.debug
.dbcr0
&= ~DBCR0_IAC2
;
1810 do_send_trap(regs
, mfspr(SPRN_IAC2
), debug_status
,
1813 } else if (debug_status
& DBSR_IAC3
) {
1814 current
->thread
.debug
.dbcr0
&= ~DBCR0_IAC3
;
1815 dbcr_iac_range(current
) &= ~DBCR_IAC34MODE
;
1816 do_send_trap(regs
, mfspr(SPRN_IAC3
), debug_status
,
1819 } else if (debug_status
& DBSR_IAC4
) {
1820 current
->thread
.debug
.dbcr0
&= ~DBCR0_IAC4
;
1821 do_send_trap(regs
, mfspr(SPRN_IAC4
), debug_status
,
1826 * At the point this routine was called, the MSR(DE) was turned off.
1827 * Check all other debug flags and see if that bit needs to be turned
1830 if (DBCR_ACTIVE_EVENTS(current
->thread
.debug
.dbcr0
,
1831 current
->thread
.debug
.dbcr1
))
1832 regs
->msr
|= MSR_DE
;
1834 /* Make sure the IDM flag is off */
1835 current
->thread
.debug
.dbcr0
&= ~DBCR0_IDM
;
1838 mtspr(SPRN_DBCR0
, current
->thread
.debug
.dbcr0
);
1841 void DebugException(struct pt_regs
*regs
, unsigned long debug_status
)
1843 current
->thread
.debug
.dbsr
= debug_status
;
1845 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1846 * on server, it stops on the target of the branch. In order to simulate
1847 * the server behaviour, we thus restart right away with a single step
1848 * instead of stopping here when hitting a BT
1850 if (debug_status
& DBSR_BT
) {
1851 regs
->msr
&= ~MSR_DE
;
1854 mtspr(SPRN_DBCR0
, mfspr(SPRN_DBCR0
) & ~DBCR0_BT
);
1855 /* Clear the BT event */
1856 mtspr(SPRN_DBSR
, DBSR_BT
);
1858 /* Do the single step trick only when coming from userspace */
1859 if (user_mode(regs
)) {
1860 current
->thread
.debug
.dbcr0
&= ~DBCR0_BT
;
1861 current
->thread
.debug
.dbcr0
|= DBCR0_IDM
| DBCR0_IC
;
1862 regs
->msr
|= MSR_DE
;
1866 if (kprobe_post_handler(regs
))
1869 if (notify_die(DIE_SSTEP
, "block_step", regs
, 5,
1870 5, SIGTRAP
) == NOTIFY_STOP
) {
1873 if (debugger_sstep(regs
))
1875 } else if (debug_status
& DBSR_IC
) { /* Instruction complete */
1876 regs
->msr
&= ~MSR_DE
;
1878 /* Disable instruction completion */
1879 mtspr(SPRN_DBCR0
, mfspr(SPRN_DBCR0
) & ~DBCR0_IC
);
1880 /* Clear the instruction completion event */
1881 mtspr(SPRN_DBSR
, DBSR_IC
);
1883 if (kprobe_post_handler(regs
))
1886 if (notify_die(DIE_SSTEP
, "single_step", regs
, 5,
1887 5, SIGTRAP
) == NOTIFY_STOP
) {
1891 if (debugger_sstep(regs
))
1894 if (user_mode(regs
)) {
1895 current
->thread
.debug
.dbcr0
&= ~DBCR0_IC
;
1896 if (DBCR_ACTIVE_EVENTS(current
->thread
.debug
.dbcr0
,
1897 current
->thread
.debug
.dbcr1
))
1898 regs
->msr
|= MSR_DE
;
1900 /* Make sure the IDM bit is off */
1901 current
->thread
.debug
.dbcr0
&= ~DBCR0_IDM
;
1904 _exception(SIGTRAP
, regs
, TRAP_TRACE
, regs
->nip
);
1906 handle_debug(regs
, debug_status
);
1908 NOKPROBE_SYMBOL(DebugException
);
1909 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1911 #if !defined(CONFIG_TAU_INT)
1912 void TAUException(struct pt_regs
*regs
)
1914 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
1915 regs
->nip
, regs
->msr
, regs
->trap
, print_tainted());
1917 #endif /* CONFIG_INT_TAU */
1919 #ifdef CONFIG_ALTIVEC
1920 void altivec_assist_exception(struct pt_regs
*regs
)
1924 if (!user_mode(regs
)) {
1925 printk(KERN_EMERG
"VMX/Altivec assist exception in kernel mode"
1926 " at %lx\n", regs
->nip
);
1927 die("Kernel VMX/Altivec assist exception", regs
, SIGILL
);
1930 flush_altivec_to_thread(current
);
1932 PPC_WARN_EMULATED(altivec
, regs
);
1933 err
= emulate_altivec(regs
);
1935 regs
->nip
+= 4; /* skip emulated instruction */
1936 emulate_single_step(regs
);
1940 if (err
== -EFAULT
) {
1941 /* got an error reading the instruction */
1942 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, regs
->nip
);
1944 /* didn't recognize the instruction */
1945 /* XXX quick hack for now: set the non-Java bit in the VSCR */
1946 printk_ratelimited(KERN_ERR
"Unrecognized altivec instruction "
1947 "in %s at %lx\n", current
->comm
, regs
->nip
);
1948 current
->thread
.vr_state
.vscr
.u
[3] |= 0x10000;
1951 #endif /* CONFIG_ALTIVEC */
1953 #ifdef CONFIG_FSL_BOOKE
1954 void CacheLockingException(struct pt_regs
*regs
, unsigned long address
,
1955 unsigned long error_code
)
1957 /* We treat cache locking instructions from the user
1958 * as priv ops, in the future we could try to do
1961 if (error_code
& (ESR_DLK
|ESR_ILK
))
1962 _exception(SIGILL
, regs
, ILL_PRVOPC
, regs
->nip
);
1965 #endif /* CONFIG_FSL_BOOKE */
1968 void SPEFloatingPointException(struct pt_regs
*regs
)
1970 extern int do_spe_mathemu(struct pt_regs
*regs
);
1971 unsigned long spefscr
;
1973 int code
= FPE_FIXME
;
1976 flush_spe_to_thread(current
);
1978 spefscr
= current
->thread
.spefscr
;
1979 fpexc_mode
= current
->thread
.fpexc_mode
;
1981 if ((spefscr
& SPEFSCR_FOVF
) && (fpexc_mode
& PR_FP_EXC_OVF
)) {
1984 else if ((spefscr
& SPEFSCR_FUNF
) && (fpexc_mode
& PR_FP_EXC_UND
)) {
1987 else if ((spefscr
& SPEFSCR_FDBZ
) && (fpexc_mode
& PR_FP_EXC_DIV
))
1989 else if ((spefscr
& SPEFSCR_FINV
) && (fpexc_mode
& PR_FP_EXC_INV
)) {
1992 else if ((spefscr
& (SPEFSCR_FG
| SPEFSCR_FX
)) && (fpexc_mode
& PR_FP_EXC_RES
))
1995 err
= do_spe_mathemu(regs
);
1997 regs
->nip
+= 4; /* skip emulated instruction */
1998 emulate_single_step(regs
);
2002 if (err
== -EFAULT
) {
2003 /* got an error reading the instruction */
2004 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, regs
->nip
);
2005 } else if (err
== -EINVAL
) {
2006 /* didn't recognize the instruction */
2007 printk(KERN_ERR
"unrecognized spe instruction "
2008 "in %s at %lx\n", current
->comm
, regs
->nip
);
2010 _exception(SIGFPE
, regs
, code
, regs
->nip
);
2016 void SPEFloatingPointRoundException(struct pt_regs
*regs
)
2018 extern int speround_handler(struct pt_regs
*regs
);
2022 if (regs
->msr
& MSR_SPE
)
2023 giveup_spe(current
);
2027 err
= speround_handler(regs
);
2029 regs
->nip
+= 4; /* skip emulated instruction */
2030 emulate_single_step(regs
);
2034 if (err
== -EFAULT
) {
2035 /* got an error reading the instruction */
2036 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, regs
->nip
);
2037 } else if (err
== -EINVAL
) {
2038 /* didn't recognize the instruction */
2039 printk(KERN_ERR
"unrecognized spe instruction "
2040 "in %s at %lx\n", current
->comm
, regs
->nip
);
2042 _exception(SIGFPE
, regs
, FPE_FIXME
, regs
->nip
);
2049 * We enter here if we get an unrecoverable exception, that is, one
2050 * that happened at a point where the RI (recoverable interrupt) bit
2051 * in the MSR is 0. This indicates that SRR0/1 are live, and that
2052 * we therefore lost state by taking this exception.
2054 void unrecoverable_exception(struct pt_regs
*regs
)
2056 printk(KERN_EMERG
"Unrecoverable exception %lx at %lx\n",
2057 regs
->trap
, regs
->nip
);
2058 die("Unrecoverable exception", regs
, SIGABRT
);
2060 NOKPROBE_SYMBOL(unrecoverable_exception
);
2062 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
2064 * Default handler for a Watchdog exception,
2065 * spins until a reboot occurs
2067 void __attribute__ ((weak
)) WatchdogHandler(struct pt_regs
*regs
)
2069 /* Generic WatchdogHandler, implement your own */
2070 mtspr(SPRN_TCR
, mfspr(SPRN_TCR
)&(~TCR_WIE
));
2074 void WatchdogException(struct pt_regs
*regs
)
2076 printk (KERN_EMERG
"PowerPC Book-E Watchdog Exception\n");
2077 WatchdogHandler(regs
);
2082 * We enter here if we discover during exception entry that we are
2083 * running in supervisor mode with a userspace value in the stack pointer.
2085 void kernel_bad_stack(struct pt_regs
*regs
)
2087 printk(KERN_EMERG
"Bad kernel stack pointer %lx at %lx\n",
2088 regs
->gpr
[1], regs
->nip
);
2089 die("Bad kernel stack pointer", regs
, SIGABRT
);
2091 NOKPROBE_SYMBOL(kernel_bad_stack
);
2093 void __init
trap_init(void)
2098 #ifdef CONFIG_PPC_EMULATED_STATS
2100 #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
2102 struct ppc_emulated ppc_emulated
= {
2103 #ifdef CONFIG_ALTIVEC
2104 WARN_EMULATED_SETUP(altivec
),
2106 WARN_EMULATED_SETUP(dcba
),
2107 WARN_EMULATED_SETUP(dcbz
),
2108 WARN_EMULATED_SETUP(fp_pair
),
2109 WARN_EMULATED_SETUP(isel
),
2110 WARN_EMULATED_SETUP(mcrxr
),
2111 WARN_EMULATED_SETUP(mfpvr
),
2112 WARN_EMULATED_SETUP(multiple
),
2113 WARN_EMULATED_SETUP(popcntb
),
2114 WARN_EMULATED_SETUP(spe
),
2115 WARN_EMULATED_SETUP(string
),
2116 WARN_EMULATED_SETUP(sync
),
2117 WARN_EMULATED_SETUP(unaligned
),
2118 #ifdef CONFIG_MATH_EMULATION
2119 WARN_EMULATED_SETUP(math
),
2122 WARN_EMULATED_SETUP(vsx
),
2125 WARN_EMULATED_SETUP(mfdscr
),
2126 WARN_EMULATED_SETUP(mtdscr
),
2127 WARN_EMULATED_SETUP(lq_stq
),
2128 WARN_EMULATED_SETUP(lxvw4x
),
2129 WARN_EMULATED_SETUP(lxvh8x
),
2130 WARN_EMULATED_SETUP(lxvd2x
),
2131 WARN_EMULATED_SETUP(lxvb16x
),
2135 u32 ppc_warn_emulated
;
2137 void ppc_warn_emulated_print(const char *type
)
2139 pr_warn_ratelimited("%s used emulated %s instruction\n", current
->comm
,
2143 static int __init
ppc_warn_emulated_init(void)
2145 struct dentry
*dir
, *d
;
2147 struct ppc_emulated_entry
*entries
= (void *)&ppc_emulated
;
2149 if (!powerpc_debugfs_root
)
2152 dir
= debugfs_create_dir("emulated_instructions",
2153 powerpc_debugfs_root
);
2157 d
= debugfs_create_u32("do_warn", 0644, dir
,
2158 &ppc_warn_emulated
);
2162 for (i
= 0; i
< sizeof(ppc_emulated
)/sizeof(*entries
); i
++) {
2163 d
= debugfs_create_u32(entries
[i
].name
, 0644, dir
,
2164 (u32
*)&entries
[i
].val
.counter
);
2172 debugfs_remove_recursive(dir
);
2176 device_initcall(ppc_warn_emulated_init
);
2178 #endif /* CONFIG_PPC_EMULATED_STATS */