2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 * Copyright (C) 2014, Imagination Technologies Ltd.
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/kexec.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
25 #include <linux/sched.h>
26 #include <linux/smp.h>
27 #include <linux/spinlock.h>
28 #include <linux/kallsyms.h>
29 #include <linux/bootmem.h>
30 #include <linux/interrupt.h>
31 #include <linux/ptrace.h>
32 #include <linux/kgdb.h>
33 #include <linux/kdebug.h>
34 #include <linux/kprobes.h>
35 #include <linux/notifier.h>
36 #include <linux/kdb.h>
37 #include <linux/irq.h>
38 #include <linux/perf_event.h>
40 #include <asm/addrspace.h>
41 #include <asm/bootinfo.h>
42 #include <asm/branch.h>
43 #include <asm/break.h>
46 #include <asm/cpu-type.h>
49 #include <asm/fpu_emulator.h>
51 #include <asm/mips-r2-to-r6-emul.h>
52 #include <asm/mipsregs.h>
53 #include <asm/mipsmtregs.h>
54 #include <asm/module.h>
56 #include <asm/pgtable.h>
57 #include <asm/ptrace.h>
58 #include <asm/sections.h>
59 #include <asm/tlbdebug.h>
60 #include <asm/traps.h>
61 #include <asm/uaccess.h>
62 #include <asm/watch.h>
63 #include <asm/mmu_context.h>
64 #include <asm/types.h>
65 #include <asm/stacktrace.h>
68 extern void check_wait(void);
69 extern asmlinkage
void rollback_handle_int(void);
70 extern asmlinkage
void handle_int(void);
71 extern u32 handle_tlbl
[];
72 extern u32 handle_tlbs
[];
73 extern u32 handle_tlbm
[];
74 extern asmlinkage
void handle_adel(void);
75 extern asmlinkage
void handle_ades(void);
76 extern asmlinkage
void handle_ibe(void);
77 extern asmlinkage
void handle_dbe(void);
78 extern asmlinkage
void handle_sys(void);
79 extern asmlinkage
void handle_bp(void);
80 extern asmlinkage
void handle_ri(void);
81 extern asmlinkage
void handle_ri_rdhwr_vivt(void);
82 extern asmlinkage
void handle_ri_rdhwr(void);
83 extern asmlinkage
void handle_cpu(void);
84 extern asmlinkage
void handle_ov(void);
85 extern asmlinkage
void handle_tr(void);
86 extern asmlinkage
void handle_msa_fpe(void);
87 extern asmlinkage
void handle_fpe(void);
88 extern asmlinkage
void handle_ftlb(void);
89 extern asmlinkage
void handle_msa(void);
90 extern asmlinkage
void handle_mdmx(void);
91 extern asmlinkage
void handle_watch(void);
92 extern asmlinkage
void handle_mt(void);
93 extern asmlinkage
void handle_dsp(void);
94 extern asmlinkage
void handle_mcheck(void);
95 extern asmlinkage
void handle_reserved(void);
96 extern void tlb_do_page_fault_0(void);
98 void (*board_be_init
)(void);
99 int (*board_be_handler
)(struct pt_regs
*regs
, int is_fixup
);
100 void (*board_nmi_handler_setup
)(void);
101 void (*board_ejtag_handler_setup
)(void);
102 void (*board_bind_eic_interrupt
)(int irq
, int regset
);
103 void (*board_ebase_setup
)(void);
104 void(*board_cache_error_setup
)(void);
106 static void show_raw_backtrace(unsigned long reg29
)
108 unsigned long *sp
= (unsigned long *)(reg29
& ~3);
111 printk("Call Trace:");
112 #ifdef CONFIG_KALLSYMS
115 while (!kstack_end(sp
)) {
116 unsigned long __user
*p
=
117 (unsigned long __user
*)(unsigned long)sp
++;
118 if (__get_user(addr
, p
)) {
119 printk(" (Bad stack address)");
122 if (__kernel_text_address(addr
))
128 #ifdef CONFIG_KALLSYMS
130 static int __init
set_raw_show_trace(char *str
)
135 __setup("raw_show_trace", set_raw_show_trace
);
138 static void show_backtrace(struct task_struct
*task
, const struct pt_regs
*regs
)
140 unsigned long sp
= regs
->regs
[29];
141 unsigned long ra
= regs
->regs
[31];
142 unsigned long pc
= regs
->cp0_epc
;
147 if (raw_show_trace
|| !__kernel_text_address(pc
)) {
148 show_raw_backtrace(sp
);
151 printk("Call Trace:\n");
154 pc
= unwind_stack(task
, &sp
, pc
, &ra
);
160 * This routine abuses get_user()/put_user() to reference pointers
161 * with at least a bit of error checking ...
163 static void show_stacktrace(struct task_struct
*task
,
164 const struct pt_regs
*regs
)
166 const int field
= 2 * sizeof(unsigned long);
169 unsigned long __user
*sp
= (unsigned long __user
*)regs
->regs
[29];
173 while ((unsigned long) sp
& (PAGE_SIZE
- 1)) {
174 if (i
&& ((i
% (64 / field
)) == 0))
181 if (__get_user(stackdata
, sp
++)) {
182 printk(" (Bad stack address)");
186 printk(" %0*lx", field
, stackdata
);
190 show_backtrace(task
, regs
);
193 void show_stack(struct task_struct
*task
, unsigned long *sp
)
196 mm_segment_t old_fs
= get_fs();
198 regs
.regs
[29] = (unsigned long)sp
;
202 if (task
&& task
!= current
) {
203 regs
.regs
[29] = task
->thread
.reg29
;
205 regs
.cp0_epc
= task
->thread
.reg31
;
206 #ifdef CONFIG_KGDB_KDB
207 } else if (atomic_read(&kgdb_active
) != -1 &&
209 memcpy(®s
, kdb_current_regs
, sizeof(regs
));
210 #endif /* CONFIG_KGDB_KDB */
212 prepare_frametrace(®s
);
216 * show_stack() deals exclusively with kernel mode, so be sure to access
217 * the stack in the kernel (not user) address space.
220 show_stacktrace(task
, ®s
);
224 static void show_code(unsigned int __user
*pc
)
227 unsigned short __user
*pc16
= NULL
;
231 if ((unsigned long)pc
& 1)
232 pc16
= (unsigned short __user
*)((unsigned long)pc
& ~1);
233 for(i
= -3 ; i
< 6 ; i
++) {
235 if (pc16
? __get_user(insn
, pc16
+ i
) : __get_user(insn
, pc
+ i
)) {
236 printk(" (Bad address in epc)\n");
239 printk("%c%0*x%c", (i
?' ':'<'), pc16
? 4 : 8, insn
, (i
?' ':'>'));
243 static void __show_regs(const struct pt_regs
*regs
)
245 const int field
= 2 * sizeof(unsigned long);
246 unsigned int cause
= regs
->cp0_cause
;
247 unsigned int exccode
;
250 show_regs_print_info(KERN_DEFAULT
);
253 * Saved main processor registers
255 for (i
= 0; i
< 32; ) {
259 printk(" %0*lx", field
, 0UL);
260 else if (i
== 26 || i
== 27)
261 printk(" %*s", field
, "");
263 printk(" %0*lx", field
, regs
->regs
[i
]);
270 #ifdef CONFIG_CPU_HAS_SMARTMIPS
271 printk("Acx : %0*lx\n", field
, regs
->acx
);
273 printk("Hi : %0*lx\n", field
, regs
->hi
);
274 printk("Lo : %0*lx\n", field
, regs
->lo
);
277 * Saved cp0 registers
279 printk("epc : %0*lx %pS\n", field
, regs
->cp0_epc
,
280 (void *) regs
->cp0_epc
);
281 printk("ra : %0*lx %pS\n", field
, regs
->regs
[31],
282 (void *) regs
->regs
[31]);
284 printk("Status: %08x ", (uint32_t) regs
->cp0_status
);
287 if (regs
->cp0_status
& ST0_KUO
)
289 if (regs
->cp0_status
& ST0_IEO
)
291 if (regs
->cp0_status
& ST0_KUP
)
293 if (regs
->cp0_status
& ST0_IEP
)
295 if (regs
->cp0_status
& ST0_KUC
)
297 if (regs
->cp0_status
& ST0_IEC
)
299 } else if (cpu_has_4kex
) {
300 if (regs
->cp0_status
& ST0_KX
)
302 if (regs
->cp0_status
& ST0_SX
)
304 if (regs
->cp0_status
& ST0_UX
)
306 switch (regs
->cp0_status
& ST0_KSU
) {
311 printk("SUPERVISOR ");
320 if (regs
->cp0_status
& ST0_ERL
)
322 if (regs
->cp0_status
& ST0_EXL
)
324 if (regs
->cp0_status
& ST0_IE
)
329 exccode
= (cause
& CAUSEF_EXCCODE
) >> CAUSEB_EXCCODE
;
330 printk("Cause : %08x (ExcCode %02x)\n", cause
, exccode
);
332 if (1 <= exccode
&& exccode
<= 5)
333 printk("BadVA : %0*lx\n", field
, regs
->cp0_badvaddr
);
335 printk("PrId : %08x (%s)\n", read_c0_prid(),
340 * FIXME: really the generic show_regs should take a const pointer argument.
342 void show_regs(struct pt_regs
*regs
)
344 __show_regs((struct pt_regs
*)regs
);
347 void show_registers(struct pt_regs
*regs
)
349 const int field
= 2 * sizeof(unsigned long);
350 mm_segment_t old_fs
= get_fs();
354 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
355 current
->comm
, current
->pid
, current_thread_info(), current
,
356 field
, current_thread_info()->tp_value
);
357 if (cpu_has_userlocal
) {
360 tls
= read_c0_userlocal();
361 if (tls
!= current_thread_info()->tp_value
)
362 printk("*HwTLS: %0*lx\n", field
, tls
);
365 if (!user_mode(regs
))
366 /* Necessary for getting the correct stack content */
368 show_stacktrace(current
, regs
);
369 show_code((unsigned int __user
*) regs
->cp0_epc
);
374 static DEFINE_RAW_SPINLOCK(die_lock
);
376 void __noreturn
die(const char *str
, struct pt_regs
*regs
)
378 static int die_counter
;
383 if (notify_die(DIE_OOPS
, str
, regs
, 0, current
->thread
.trap_nr
,
384 SIGSEGV
) == NOTIFY_STOP
)
388 raw_spin_lock_irq(&die_lock
);
391 printk("%s[#%d]:\n", str
, ++die_counter
);
392 show_registers(regs
);
393 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
394 raw_spin_unlock_irq(&die_lock
);
399 panic("Fatal exception in interrupt");
402 printk(KERN_EMERG
"Fatal exception: panic in 5 seconds");
404 panic("Fatal exception");
407 if (regs
&& kexec_should_crash(current
))
413 extern struct exception_table_entry __start___dbe_table
[];
414 extern struct exception_table_entry __stop___dbe_table
[];
417 " .section __dbe_table, \"a\"\n"
420 /* Given an address, look for it in the exception tables. */
421 static const struct exception_table_entry
*search_dbe_tables(unsigned long addr
)
423 const struct exception_table_entry
*e
;
425 e
= search_extable(__start___dbe_table
, __stop___dbe_table
- 1, addr
);
427 e
= search_module_dbetables(addr
);
431 asmlinkage
void do_be(struct pt_regs
*regs
)
433 const int field
= 2 * sizeof(unsigned long);
434 const struct exception_table_entry
*fixup
= NULL
;
435 int data
= regs
->cp0_cause
& 4;
436 int action
= MIPS_BE_FATAL
;
437 enum ctx_state prev_state
;
439 prev_state
= exception_enter();
440 /* XXX For now. Fixme, this searches the wrong table ... */
441 if (data
&& !user_mode(regs
))
442 fixup
= search_dbe_tables(exception_epc(regs
));
445 action
= MIPS_BE_FIXUP
;
447 if (board_be_handler
)
448 action
= board_be_handler(regs
, fixup
!= NULL
);
451 case MIPS_BE_DISCARD
:
455 regs
->cp0_epc
= fixup
->nextinsn
;
464 * Assume it would be too dangerous to continue ...
466 printk(KERN_ALERT
"%s bus error, epc == %0*lx, ra == %0*lx\n",
467 data
? "Data" : "Instruction",
468 field
, regs
->cp0_epc
, field
, regs
->regs
[31]);
469 if (notify_die(DIE_OOPS
, "bus error", regs
, 0, current
->thread
.trap_nr
,
470 SIGBUS
) == NOTIFY_STOP
)
473 die_if_kernel("Oops", regs
);
474 force_sig(SIGBUS
, current
);
477 exception_exit(prev_state
);
481 * ll/sc, rdhwr, sync emulation
484 #define OPCODE 0xfc000000
485 #define BASE 0x03e00000
486 #define RT 0x001f0000
487 #define OFFSET 0x0000ffff
488 #define LL 0xc0000000
489 #define SC 0xe0000000
490 #define SPEC0 0x00000000
491 #define SPEC3 0x7c000000
492 #define RD 0x0000f800
493 #define FUNC 0x0000003f
494 #define SYNC 0x0000000f
495 #define RDHWR 0x0000003b
497 /* microMIPS definitions */
498 #define MM_POOL32A_FUNC 0xfc00ffff
499 #define MM_RDHWR 0x00006b3c
500 #define MM_RS 0x001f0000
501 #define MM_RT 0x03e00000
504 * The ll_bit is cleared by r*_switch.S
508 struct task_struct
*ll_task
;
510 static inline int simulate_ll(struct pt_regs
*regs
, unsigned int opcode
)
512 unsigned long value
, __user
*vaddr
;
516 * analyse the ll instruction that just caused a ri exception
517 * and put the referenced address to addr.
520 /* sign extend offset */
521 offset
= opcode
& OFFSET
;
525 vaddr
= (unsigned long __user
*)
526 ((unsigned long)(regs
->regs
[(opcode
& BASE
) >> 21]) + offset
);
528 if ((unsigned long)vaddr
& 3)
530 if (get_user(value
, vaddr
))
535 if (ll_task
== NULL
|| ll_task
== current
) {
544 regs
->regs
[(opcode
& RT
) >> 16] = value
;
549 static inline int simulate_sc(struct pt_regs
*regs
, unsigned int opcode
)
551 unsigned long __user
*vaddr
;
556 * analyse the sc instruction that just caused a ri exception
557 * and put the referenced address to addr.
560 /* sign extend offset */
561 offset
= opcode
& OFFSET
;
565 vaddr
= (unsigned long __user
*)
566 ((unsigned long)(regs
->regs
[(opcode
& BASE
) >> 21]) + offset
);
567 reg
= (opcode
& RT
) >> 16;
569 if ((unsigned long)vaddr
& 3)
574 if (ll_bit
== 0 || ll_task
!= current
) {
582 if (put_user(regs
->regs
[reg
], vaddr
))
591 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
592 * opcodes are supposed to result in coprocessor unusable exceptions if
593 * executed on ll/sc-less processors. That's the theory. In practice a
594 * few processors such as NEC's VR4100 throw reserved instruction exceptions
595 * instead, so we're doing the emulation thing in both exception handlers.
597 static int simulate_llsc(struct pt_regs
*regs
, unsigned int opcode
)
599 if ((opcode
& OPCODE
) == LL
) {
600 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS
,
602 return simulate_ll(regs
, opcode
);
604 if ((opcode
& OPCODE
) == SC
) {
605 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS
,
607 return simulate_sc(regs
, opcode
);
610 return -1; /* Must be something else ... */
614 * Simulate trapping 'rdhwr' instructions to provide user accessible
615 * registers not implemented in hardware.
617 static int simulate_rdhwr(struct pt_regs
*regs
, int rd
, int rt
)
619 struct thread_info
*ti
= task_thread_info(current
);
621 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS
,
624 case 0: /* CPU number */
625 regs
->regs
[rt
] = smp_processor_id();
627 case 1: /* SYNCI length */
628 regs
->regs
[rt
] = min(current_cpu_data
.dcache
.linesz
,
629 current_cpu_data
.icache
.linesz
);
631 case 2: /* Read count register */
632 regs
->regs
[rt
] = read_c0_count();
634 case 3: /* Count register resolution */
635 switch (current_cpu_type()) {
645 regs
->regs
[rt
] = ti
->tp_value
;
652 static int simulate_rdhwr_normal(struct pt_regs
*regs
, unsigned int opcode
)
654 if ((opcode
& OPCODE
) == SPEC3
&& (opcode
& FUNC
) == RDHWR
) {
655 int rd
= (opcode
& RD
) >> 11;
656 int rt
= (opcode
& RT
) >> 16;
658 simulate_rdhwr(regs
, rd
, rt
);
666 static int simulate_rdhwr_mm(struct pt_regs
*regs
, unsigned short opcode
)
668 if ((opcode
& MM_POOL32A_FUNC
) == MM_RDHWR
) {
669 int rd
= (opcode
& MM_RS
) >> 16;
670 int rt
= (opcode
& MM_RT
) >> 21;
671 simulate_rdhwr(regs
, rd
, rt
);
679 static int simulate_sync(struct pt_regs
*regs
, unsigned int opcode
)
681 if ((opcode
& OPCODE
) == SPEC0
&& (opcode
& FUNC
) == SYNC
) {
682 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS
,
687 return -1; /* Must be something else ... */
690 asmlinkage
void do_ov(struct pt_regs
*regs
)
692 enum ctx_state prev_state
;
695 prev_state
= exception_enter();
696 die_if_kernel("Integer overflow", regs
);
698 info
.si_code
= FPE_INTOVF
;
699 info
.si_signo
= SIGFPE
;
701 info
.si_addr
= (void __user
*) regs
->cp0_epc
;
702 force_sig_info(SIGFPE
, &info
, current
);
703 exception_exit(prev_state
);
706 int process_fpemu_return(int sig
, void __user
*fault_addr
, unsigned long fcr31
)
708 struct siginfo si
= { 0 };
715 si
.si_addr
= fault_addr
;
718 * Inexact can happen together with Overflow or Underflow.
719 * Respect the mask to deliver the correct exception.
721 fcr31
&= (fcr31
& FPU_CSR_ALL_E
) <<
722 (ffs(FPU_CSR_ALL_X
) - ffs(FPU_CSR_ALL_E
));
723 if (fcr31
& FPU_CSR_INV_X
)
724 si
.si_code
= FPE_FLTINV
;
725 else if (fcr31
& FPU_CSR_DIV_X
)
726 si
.si_code
= FPE_FLTDIV
;
727 else if (fcr31
& FPU_CSR_OVF_X
)
728 si
.si_code
= FPE_FLTOVF
;
729 else if (fcr31
& FPU_CSR_UDF_X
)
730 si
.si_code
= FPE_FLTUND
;
731 else if (fcr31
& FPU_CSR_INE_X
)
732 si
.si_code
= FPE_FLTRES
;
734 si
.si_code
= __SI_FAULT
;
735 force_sig_info(sig
, &si
, current
);
739 si
.si_addr
= fault_addr
;
741 si
.si_code
= BUS_ADRERR
;
742 force_sig_info(sig
, &si
, current
);
746 si
.si_addr
= fault_addr
;
748 down_read(¤t
->mm
->mmap_sem
);
749 if (find_vma(current
->mm
, (unsigned long)fault_addr
))
750 si
.si_code
= SEGV_ACCERR
;
752 si
.si_code
= SEGV_MAPERR
;
753 up_read(¤t
->mm
->mmap_sem
);
754 force_sig_info(sig
, &si
, current
);
758 force_sig(sig
, current
);
763 static int simulate_fp(struct pt_regs
*regs
, unsigned int opcode
,
764 unsigned long old_epc
, unsigned long old_ra
)
766 union mips_instruction inst
= { .word
= opcode
};
767 void __user
*fault_addr
;
771 /* If it's obviously not an FP instruction, skip it */
772 switch (inst
.i_format
.opcode
) {
786 * do_ri skipped over the instruction via compute_return_epc, undo
787 * that for the FPU emulator.
789 regs
->cp0_epc
= old_epc
;
790 regs
->regs
[31] = old_ra
;
792 /* Save the FP context to struct thread_struct */
795 /* Run the emulator */
796 sig
= fpu_emulator_cop1Handler(regs
, ¤t
->thread
.fpu
, 1,
798 fcr31
= current
->thread
.fpu
.fcr31
;
801 * We can't allow the emulated instruction to leave any of
802 * the cause bits set in $fcr31.
804 current
->thread
.fpu
.fcr31
&= ~FPU_CSR_ALL_X
;
806 /* Restore the hardware register state */
809 /* Send a signal if required. */
810 process_fpemu_return(sig
, fault_addr
, fcr31
);
816 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
818 asmlinkage
void do_fpe(struct pt_regs
*regs
, unsigned long fcr31
)
820 enum ctx_state prev_state
;
821 void __user
*fault_addr
;
824 prev_state
= exception_enter();
825 if (notify_die(DIE_FP
, "FP exception", regs
, 0, current
->thread
.trap_nr
,
826 SIGFPE
) == NOTIFY_STOP
)
829 /* Clear FCSR.Cause before enabling interrupts */
830 write_32bit_cp1_register(CP1_STATUS
, fcr31
& ~FPU_CSR_ALL_X
);
833 die_if_kernel("FP exception in kernel code", regs
);
835 if (fcr31
& FPU_CSR_UNI_X
) {
837 * Unimplemented operation exception. If we've got the full
838 * software emulator on-board, let's use it...
840 * Force FPU to dump state into task/thread context. We're
841 * moving a lot of data here for what is probably a single
842 * instruction, but the alternative is to pre-decode the FP
843 * register operands before invoking the emulator, which seems
844 * a bit extreme for what should be an infrequent event.
846 /* Ensure 'resume' not overwrite saved fp context again. */
849 /* Run the emulator */
850 sig
= fpu_emulator_cop1Handler(regs
, ¤t
->thread
.fpu
, 1,
852 fcr31
= current
->thread
.fpu
.fcr31
;
855 * We can't allow the emulated instruction to leave any of
856 * the cause bits set in $fcr31.
858 current
->thread
.fpu
.fcr31
&= ~FPU_CSR_ALL_X
;
860 /* Restore the hardware register state */
861 own_fpu(1); /* Using the FPU again. */
864 fault_addr
= (void __user
*) regs
->cp0_epc
;
867 /* Send a signal if required. */
868 process_fpemu_return(sig
, fault_addr
, fcr31
);
871 exception_exit(prev_state
);
874 void do_trap_or_bp(struct pt_regs
*regs
, unsigned int code
,
880 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
881 if (kgdb_ll_trap(DIE_TRAP
, str
, regs
, code
, current
->thread
.trap_nr
,
882 SIGTRAP
) == NOTIFY_STOP
)
884 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
886 if (notify_die(DIE_TRAP
, str
, regs
, code
, current
->thread
.trap_nr
,
887 SIGTRAP
) == NOTIFY_STOP
)
891 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
892 * insns, even for trap and break codes that indicate arithmetic
893 * failures. Weird ...
894 * But should we continue the brokenness??? --macro
899 scnprintf(b
, sizeof(b
), "%s instruction in kernel code", str
);
900 die_if_kernel(b
, regs
);
901 if (code
== BRK_DIVZERO
)
902 info
.si_code
= FPE_INTDIV
;
904 info
.si_code
= FPE_INTOVF
;
905 info
.si_signo
= SIGFPE
;
907 info
.si_addr
= (void __user
*) regs
->cp0_epc
;
908 force_sig_info(SIGFPE
, &info
, current
);
911 die_if_kernel("Kernel bug detected", regs
);
912 force_sig(SIGTRAP
, current
);
916 * This breakpoint code is used by the FPU emulator to retake
917 * control of the CPU after executing the instruction from the
918 * delay slot of an emulated branch.
920 * Terminate if exception was recognized as a delay slot return
921 * otherwise handle as normal.
923 if (do_dsemulret(regs
))
926 die_if_kernel("Math emu break/trap", regs
);
927 force_sig(SIGTRAP
, current
);
930 scnprintf(b
, sizeof(b
), "%s instruction in kernel code", str
);
931 die_if_kernel(b
, regs
);
932 force_sig(SIGTRAP
, current
);
936 asmlinkage
void do_bp(struct pt_regs
*regs
)
938 unsigned long epc
= msk_isa16_mode(exception_epc(regs
));
939 unsigned int opcode
, bcode
;
940 enum ctx_state prev_state
;
944 if (!user_mode(regs
))
947 prev_state
= exception_enter();
948 current
->thread
.trap_nr
= (regs
->cp0_cause
>> 2) & 0x1f;
949 if (get_isa16_mode(regs
->cp0_epc
)) {
952 if (__get_user(instr
[0], (u16 __user
*)epc
))
955 if (!cpu_has_mmips
) {
957 bcode
= (instr
[0] >> 5) & 0x3f;
958 } else if (mm_insn_16bit(instr
[0])) {
959 /* 16-bit microMIPS BREAK */
960 bcode
= instr
[0] & 0xf;
962 /* 32-bit microMIPS BREAK */
963 if (__get_user(instr
[1], (u16 __user
*)(epc
+ 2)))
965 opcode
= (instr
[0] << 16) | instr
[1];
966 bcode
= (opcode
>> 6) & ((1 << 20) - 1);
969 if (__get_user(opcode
, (unsigned int __user
*)epc
))
971 bcode
= (opcode
>> 6) & ((1 << 20) - 1);
975 * There is the ancient bug in the MIPS assemblers that the break
976 * code starts left to bit 16 instead to bit 6 in the opcode.
977 * Gas is bug-compatible, but not always, grrr...
978 * We handle both cases with a simple heuristics. --macro
980 if (bcode
>= (1 << 10))
981 bcode
= ((bcode
& ((1 << 10) - 1)) << 10) | (bcode
>> 10);
984 * notify the kprobe handlers, if instruction is likely to
989 if (notify_die(DIE_UPROBE
, "uprobe", regs
, bcode
,
990 current
->thread
.trap_nr
, SIGTRAP
) == NOTIFY_STOP
)
995 if (notify_die(DIE_UPROBE_XOL
, "uprobe_xol", regs
, bcode
,
996 current
->thread
.trap_nr
, SIGTRAP
) == NOTIFY_STOP
)
1001 if (notify_die(DIE_BREAK
, "debug", regs
, bcode
,
1002 current
->thread
.trap_nr
, SIGTRAP
) == NOTIFY_STOP
)
1006 case BRK_KPROBE_SSTEPBP
:
1007 if (notify_die(DIE_SSTEPBP
, "single_step", regs
, bcode
,
1008 current
->thread
.trap_nr
, SIGTRAP
) == NOTIFY_STOP
)
1016 do_trap_or_bp(regs
, bcode
, "Break");
1020 exception_exit(prev_state
);
1024 force_sig(SIGSEGV
, current
);
1028 asmlinkage
void do_tr(struct pt_regs
*regs
)
1030 u32 opcode
, tcode
= 0;
1031 enum ctx_state prev_state
;
1034 unsigned long epc
= msk_isa16_mode(exception_epc(regs
));
1037 if (!user_mode(regs
))
1040 prev_state
= exception_enter();
1041 current
->thread
.trap_nr
= (regs
->cp0_cause
>> 2) & 0x1f;
1042 if (get_isa16_mode(regs
->cp0_epc
)) {
1043 if (__get_user(instr
[0], (u16 __user
*)(epc
+ 0)) ||
1044 __get_user(instr
[1], (u16 __user
*)(epc
+ 2)))
1046 opcode
= (instr
[0] << 16) | instr
[1];
1047 /* Immediate versions don't provide a code. */
1048 if (!(opcode
& OPCODE
))
1049 tcode
= (opcode
>> 12) & ((1 << 4) - 1);
1051 if (__get_user(opcode
, (u32 __user
*)epc
))
1053 /* Immediate versions don't provide a code. */
1054 if (!(opcode
& OPCODE
))
1055 tcode
= (opcode
>> 6) & ((1 << 10) - 1);
1058 do_trap_or_bp(regs
, tcode
, "Trap");
1062 exception_exit(prev_state
);
1066 force_sig(SIGSEGV
, current
);
1070 asmlinkage
void do_ri(struct pt_regs
*regs
)
1072 unsigned int __user
*epc
= (unsigned int __user
*)exception_epc(regs
);
1073 unsigned long old_epc
= regs
->cp0_epc
;
1074 unsigned long old31
= regs
->regs
[31];
1075 enum ctx_state prev_state
;
1076 unsigned int opcode
= 0;
1080 * Avoid any kernel code. Just emulate the R2 instruction
1081 * as quickly as possible.
1083 if (mipsr2_emulation
&& cpu_has_mips_r6
&&
1084 likely(user_mode(regs
)) &&
1085 likely(get_user(opcode
, epc
) >= 0)) {
1086 unsigned long fcr31
= 0;
1088 status
= mipsr2_decoder(regs
, opcode
, &fcr31
);
1092 task_thread_info(current
)->r2_emul_return
= 1;
1097 process_fpemu_return(status
,
1098 ¤t
->thread
.cp0_baduaddr
,
1100 task_thread_info(current
)->r2_emul_return
= 1;
1107 prev_state
= exception_enter();
1108 current
->thread
.trap_nr
= (regs
->cp0_cause
>> 2) & 0x1f;
1110 if (notify_die(DIE_RI
, "RI Fault", regs
, 0, current
->thread
.trap_nr
,
1111 SIGILL
) == NOTIFY_STOP
)
1114 die_if_kernel("Reserved instruction in kernel code", regs
);
1116 if (unlikely(compute_return_epc(regs
) < 0))
1119 if (get_isa16_mode(regs
->cp0_epc
)) {
1120 unsigned short mmop
[2] = { 0 };
1122 if (unlikely(get_user(mmop
[0], epc
) < 0))
1124 if (unlikely(get_user(mmop
[1], epc
) < 0))
1126 opcode
= (mmop
[0] << 16) | mmop
[1];
1129 status
= simulate_rdhwr_mm(regs
, opcode
);
1131 if (unlikely(get_user(opcode
, epc
) < 0))
1134 if (!cpu_has_llsc
&& status
< 0)
1135 status
= simulate_llsc(regs
, opcode
);
1138 status
= simulate_rdhwr_normal(regs
, opcode
);
1141 status
= simulate_sync(regs
, opcode
);
1144 status
= simulate_fp(regs
, opcode
, old_epc
, old31
);
1150 if (unlikely(status
> 0)) {
1151 regs
->cp0_epc
= old_epc
; /* Undo skip-over. */
1152 regs
->regs
[31] = old31
;
1153 force_sig(status
, current
);
1157 exception_exit(prev_state
);
1161 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1162 * emulated more than some threshold number of instructions, force migration to
1163 * a "CPU" that has FP support.
1165 static void mt_ase_fp_affinity(void)
1167 #ifdef CONFIG_MIPS_MT_FPAFF
1168 if (mt_fpemul_threshold
> 0 &&
1169 ((current
->thread
.emulated_fp
++ > mt_fpemul_threshold
))) {
1171 * If there's no FPU present, or if the application has already
1172 * restricted the allowed set to exclude any CPUs with FPUs,
1173 * we'll skip the procedure.
1175 if (cpumask_intersects(¤t
->cpus_allowed
, &mt_fpu_cpumask
)) {
1178 current
->thread
.user_cpus_allowed
1179 = current
->cpus_allowed
;
1180 cpumask_and(&tmask
, ¤t
->cpus_allowed
,
1182 set_cpus_allowed_ptr(current
, &tmask
);
1183 set_thread_flag(TIF_FPUBOUND
);
1186 #endif /* CONFIG_MIPS_MT_FPAFF */
1190 * No lock; only written during early bootup by CPU 0.
1192 static RAW_NOTIFIER_HEAD(cu2_chain
);
1194 int __ref
register_cu2_notifier(struct notifier_block
*nb
)
1196 return raw_notifier_chain_register(&cu2_chain
, nb
);
1199 int cu2_notifier_call_chain(unsigned long val
, void *v
)
1201 return raw_notifier_call_chain(&cu2_chain
, val
, v
);
1204 static int default_cu2_call(struct notifier_block
*nfb
, unsigned long action
,
1207 struct pt_regs
*regs
= data
;
1209 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1210 "instruction", regs
);
1211 force_sig(SIGILL
, current
);
1216 static int wait_on_fp_mode_switch(atomic_t
*p
)
1219 * The FP mode for this task is currently being switched. That may
1220 * involve modifications to the format of this tasks FP context which
1221 * make it unsafe to proceed with execution for the moment. Instead,
1222 * schedule some other task.
1228 static int enable_restore_fp_context(int msa
)
1230 int err
, was_fpu_owner
, prior_msa
;
1233 * If an FP mode switch is currently underway, wait for it to
1234 * complete before proceeding.
1236 wait_on_atomic_t(¤t
->mm
->context
.fp_mode_switching
,
1237 wait_on_fp_mode_switch
, TASK_KILLABLE
);
1240 /* First time FP context user. */
1246 set_thread_flag(TIF_USEDMSA
);
1247 set_thread_flag(TIF_MSA_CTX_LIVE
);
1256 * This task has formerly used the FP context.
1258 * If this thread has no live MSA vector context then we can simply
1259 * restore the scalar FP context. If it has live MSA vector context
1260 * (that is, it has or may have used MSA since last performing a
1261 * function call) then we'll need to restore the vector context. This
1262 * applies even if we're currently only executing a scalar FP
1263 * instruction. This is because if we were to later execute an MSA
1264 * instruction then we'd either have to:
1266 * - Restore the vector context & clobber any registers modified by
1267 * scalar FP instructions between now & then.
1271 * - Not restore the vector context & lose the most significant bits
1272 * of all vector registers.
1274 * Neither of those options is acceptable. We cannot restore the least
1275 * significant bits of the registers now & only restore the most
1276 * significant bits later because the most significant bits of any
1277 * vector registers whose aliased FP register is modified now will have
1278 * been zeroed. We'd have no way to know that when restoring the vector
1279 * context & thus may load an outdated value for the most significant
1280 * bits of a vector register.
1282 if (!msa
&& !thread_msa_context_live())
1286 * This task is using or has previously used MSA. Thus we require
1287 * that Status.FR == 1.
1290 was_fpu_owner
= is_fpu_owner();
1291 err
= own_fpu_inatomic(0);
1296 write_msa_csr(current
->thread
.fpu
.msacsr
);
1297 set_thread_flag(TIF_USEDMSA
);
1300 * If this is the first time that the task is using MSA and it has
1301 * previously used scalar FP in this time slice then we already nave
1302 * FP context which we shouldn't clobber. We do however need to clear
1303 * the upper 64b of each vector register so that this task has no
1304 * opportunity to see data left behind by another.
1306 prior_msa
= test_and_set_thread_flag(TIF_MSA_CTX_LIVE
);
1307 if (!prior_msa
&& was_fpu_owner
) {
1315 * Restore the least significant 64b of each vector register
1316 * from the existing scalar FP context.
1318 _restore_fp(current
);
1321 * The task has not formerly used MSA, so clear the upper 64b
1322 * of each vector register such that it cannot see data left
1323 * behind by another task.
1327 /* We need to restore the vector context. */
1328 restore_msa(current
);
1330 /* Restore the scalar FP control & status register */
1332 write_32bit_cp1_register(CP1_STATUS
,
1333 current
->thread
.fpu
.fcr31
);
1342 asmlinkage
void do_cpu(struct pt_regs
*regs
)
1344 enum ctx_state prev_state
;
1345 unsigned int __user
*epc
;
1346 unsigned long old_epc
, old31
;
1347 void __user
*fault_addr
;
1348 unsigned int opcode
;
1349 unsigned long fcr31
;
1352 unsigned long __maybe_unused flags
;
1355 prev_state
= exception_enter();
1356 cpid
= (regs
->cp0_cause
>> CAUSEB_CE
) & 3;
1359 die_if_kernel("do_cpu invoked from kernel context!", regs
);
1363 epc
= (unsigned int __user
*)exception_epc(regs
);
1364 old_epc
= regs
->cp0_epc
;
1365 old31
= regs
->regs
[31];
1369 if (unlikely(compute_return_epc(regs
) < 0))
1372 if (get_isa16_mode(regs
->cp0_epc
)) {
1373 unsigned short mmop
[2] = { 0 };
1375 if (unlikely(get_user(mmop
[0], epc
) < 0))
1377 if (unlikely(get_user(mmop
[1], epc
) < 0))
1379 opcode
= (mmop
[0] << 16) | mmop
[1];
1382 status
= simulate_rdhwr_mm(regs
, opcode
);
1384 if (unlikely(get_user(opcode
, epc
) < 0))
1387 if (!cpu_has_llsc
&& status
< 0)
1388 status
= simulate_llsc(regs
, opcode
);
1391 status
= simulate_rdhwr_normal(regs
, opcode
);
1397 if (unlikely(status
> 0)) {
1398 regs
->cp0_epc
= old_epc
; /* Undo skip-over. */
1399 regs
->regs
[31] = old31
;
1400 force_sig(status
, current
);
1407 * The COP3 opcode space and consequently the CP0.Status.CU3
1408 * bit and the CP0.Cause.CE=3 encoding have been removed as
1409 * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs
1410 * up the space has been reused for COP1X instructions, that
1411 * are enabled by the CP0.Status.CU1 bit and consequently
1412 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1413 * exceptions. Some FPU-less processors that implement one
1414 * of these ISAs however use this code erroneously for COP1X
1415 * instructions. Therefore we redirect this trap to the FP
1418 if (raw_cpu_has_fpu
|| !cpu_has_mips_4_5_64_r2_r6
) {
1419 force_sig(SIGILL
, current
);
1425 err
= enable_restore_fp_context(0);
1427 if (raw_cpu_has_fpu
&& !err
)
1430 sig
= fpu_emulator_cop1Handler(regs
, ¤t
->thread
.fpu
, 0,
1432 fcr31
= current
->thread
.fpu
.fcr31
;
1435 * We can't allow the emulated instruction to leave
1436 * any of the cause bits set in $fcr31.
1438 current
->thread
.fpu
.fcr31
&= ~FPU_CSR_ALL_X
;
1440 /* Send a signal if required. */
1441 if (!process_fpemu_return(sig
, fault_addr
, fcr31
) && !err
)
1442 mt_ase_fp_affinity();
1447 raw_notifier_call_chain(&cu2_chain
, CU2_EXCEPTION
, regs
);
1451 exception_exit(prev_state
);
1454 asmlinkage
void do_msa_fpe(struct pt_regs
*regs
, unsigned int msacsr
)
1456 enum ctx_state prev_state
;
1458 prev_state
= exception_enter();
1459 current
->thread
.trap_nr
= (regs
->cp0_cause
>> 2) & 0x1f;
1460 if (notify_die(DIE_MSAFP
, "MSA FP exception", regs
, 0,
1461 current
->thread
.trap_nr
, SIGFPE
) == NOTIFY_STOP
)
1464 /* Clear MSACSR.Cause before enabling interrupts */
1465 write_msa_csr(msacsr
& ~MSA_CSR_CAUSEF
);
1468 die_if_kernel("do_msa_fpe invoked from kernel context!", regs
);
1469 force_sig(SIGFPE
, current
);
1471 exception_exit(prev_state
);
1474 asmlinkage
void do_msa(struct pt_regs
*regs
)
1476 enum ctx_state prev_state
;
1479 prev_state
= exception_enter();
1481 if (!cpu_has_msa
|| test_thread_flag(TIF_32BIT_FPREGS
)) {
1482 force_sig(SIGILL
, current
);
1486 die_if_kernel("do_msa invoked from kernel context!", regs
);
1488 err
= enable_restore_fp_context(1);
1490 force_sig(SIGILL
, current
);
1492 exception_exit(prev_state
);
1495 asmlinkage
void do_mdmx(struct pt_regs
*regs
)
1497 enum ctx_state prev_state
;
1499 prev_state
= exception_enter();
1500 force_sig(SIGILL
, current
);
1501 exception_exit(prev_state
);
1505 * Called with interrupts disabled.
1507 asmlinkage
void do_watch(struct pt_regs
*regs
)
1509 enum ctx_state prev_state
;
1512 prev_state
= exception_enter();
1514 * Clear WP (bit 22) bit of cause register so we don't loop
1517 cause
= read_c0_cause();
1518 cause
&= ~(1 << 22);
1519 write_c0_cause(cause
);
1522 * If the current thread has the watch registers loaded, save
1523 * their values and send SIGTRAP. Otherwise another thread
1524 * left the registers set, clear them and continue.
1526 if (test_tsk_thread_flag(current
, TIF_LOAD_WATCH
)) {
1527 mips_read_watch_registers();
1529 force_sig(SIGTRAP
, current
);
1531 mips_clear_watch_registers();
1534 exception_exit(prev_state
);
1537 asmlinkage
void do_mcheck(struct pt_regs
*regs
)
1539 int multi_match
= regs
->cp0_status
& ST0_TS
;
1540 enum ctx_state prev_state
;
1541 mm_segment_t old_fs
= get_fs();
1543 prev_state
= exception_enter();
1552 if (!user_mode(regs
))
1555 show_code((unsigned int __user
*) regs
->cp0_epc
);
1560 * Some chips may have other causes of machine check (e.g. SB1
1563 panic("Caught Machine Check exception - %scaused by multiple "
1564 "matching entries in the TLB.",
1565 (multi_match
) ? "" : "not ");
1568 asmlinkage
void do_mt(struct pt_regs
*regs
)
1572 subcode
= (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT
)
1573 >> VPECONTROL_EXCPT_SHIFT
;
1576 printk(KERN_DEBUG
"Thread Underflow\n");
1579 printk(KERN_DEBUG
"Thread Overflow\n");
1582 printk(KERN_DEBUG
"Invalid YIELD Qualifier\n");
1585 printk(KERN_DEBUG
"Gating Storage Exception\n");
1588 printk(KERN_DEBUG
"YIELD Scheduler Exception\n");
1591 printk(KERN_DEBUG
"Gating Storage Scheduler Exception\n");
1594 printk(KERN_DEBUG
"*** UNKNOWN THREAD EXCEPTION %d ***\n",
1598 die_if_kernel("MIPS MT Thread exception in kernel", regs
);
1600 force_sig(SIGILL
, current
);
1604 asmlinkage
void do_dsp(struct pt_regs
*regs
)
1607 panic("Unexpected DSP exception");
1609 force_sig(SIGILL
, current
);
1612 asmlinkage
void do_reserved(struct pt_regs
*regs
)
1615 * Game over - no way to handle this if it ever occurs. Most probably
1616 * caused by a new unknown cpu type or after another deadly
1617 * hard/software error.
1620 panic("Caught reserved exception %ld - should not happen.",
1621 (regs
->cp0_cause
& 0x7f) >> 2);
1624 static int __initdata l1parity
= 1;
1625 static int __init
nol1parity(char *s
)
1630 __setup("nol1par", nol1parity
);
1631 static int __initdata l2parity
= 1;
1632 static int __init
nol2parity(char *s
)
1637 __setup("nol2par", nol2parity
);
1640 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1641 * it different ways.
1643 static inline void parity_protection_init(void)
1645 switch (current_cpu_type()) {
1651 case CPU_INTERAPTIV
:
1654 case CPU_QEMU_GENERIC
:
1657 #define ERRCTL_PE 0x80000000
1658 #define ERRCTL_L2P 0x00800000
1659 unsigned long errctl
;
1660 unsigned int l1parity_present
, l2parity_present
;
1662 errctl
= read_c0_ecc();
1663 errctl
&= ~(ERRCTL_PE
|ERRCTL_L2P
);
1665 /* probe L1 parity support */
1666 write_c0_ecc(errctl
| ERRCTL_PE
);
1667 back_to_back_c0_hazard();
1668 l1parity_present
= (read_c0_ecc() & ERRCTL_PE
);
1670 /* probe L2 parity support */
1671 write_c0_ecc(errctl
|ERRCTL_L2P
);
1672 back_to_back_c0_hazard();
1673 l2parity_present
= (read_c0_ecc() & ERRCTL_L2P
);
1675 if (l1parity_present
&& l2parity_present
) {
1677 errctl
|= ERRCTL_PE
;
1678 if (l1parity
^ l2parity
)
1679 errctl
|= ERRCTL_L2P
;
1680 } else if (l1parity_present
) {
1682 errctl
|= ERRCTL_PE
;
1683 } else if (l2parity_present
) {
1685 errctl
|= ERRCTL_L2P
;
1687 /* No parity available */
1690 printk(KERN_INFO
"Writing ErrCtl register=%08lx\n", errctl
);
1692 write_c0_ecc(errctl
);
1693 back_to_back_c0_hazard();
1694 errctl
= read_c0_ecc();
1695 printk(KERN_INFO
"Readback ErrCtl register=%08lx\n", errctl
);
1697 if (l1parity_present
)
1698 printk(KERN_INFO
"Cache parity protection %sabled\n",
1699 (errctl
& ERRCTL_PE
) ? "en" : "dis");
1701 if (l2parity_present
) {
1702 if (l1parity_present
&& l1parity
)
1703 errctl
^= ERRCTL_L2P
;
1704 printk(KERN_INFO
"L2 cache parity protection %sabled\n",
1705 (errctl
& ERRCTL_L2P
) ? "en" : "dis");
1713 write_c0_ecc(0x80000000);
1714 back_to_back_c0_hazard();
1715 /* Set the PE bit (bit 31) in the c0_errctl register. */
1716 printk(KERN_INFO
"Cache parity protection %sabled\n",
1717 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1721 /* Clear the DE bit (bit 16) in the c0_status register. */
1722 printk(KERN_INFO
"Enable cache parity protection for "
1723 "MIPS 20KC/25KF CPUs.\n");
1724 clear_c0_status(ST0_DE
);
1731 asmlinkage
void cache_parity_error(void)
1733 const int field
= 2 * sizeof(unsigned long);
1734 unsigned int reg_val
;
1736 /* For the moment, report the problem and hang. */
1737 printk("Cache error exception:\n");
1738 printk("cp0_errorepc == %0*lx\n", field
, read_c0_errorepc());
1739 reg_val
= read_c0_cacheerr();
1740 printk("c0_cacheerr == %08x\n", reg_val
);
1742 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1743 reg_val
& (1<<30) ? "secondary" : "primary",
1744 reg_val
& (1<<31) ? "data" : "insn");
1745 if ((cpu_has_mips_r2_r6
) &&
1746 ((current_cpu_data
.processor_id
& 0xff0000) == PRID_COMP_MIPS
)) {
1747 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1748 reg_val
& (1<<29) ? "ED " : "",
1749 reg_val
& (1<<28) ? "ET " : "",
1750 reg_val
& (1<<27) ? "ES " : "",
1751 reg_val
& (1<<26) ? "EE " : "",
1752 reg_val
& (1<<25) ? "EB " : "",
1753 reg_val
& (1<<24) ? "EI " : "",
1754 reg_val
& (1<<23) ? "E1 " : "",
1755 reg_val
& (1<<22) ? "E0 " : "");
1757 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1758 reg_val
& (1<<29) ? "ED " : "",
1759 reg_val
& (1<<28) ? "ET " : "",
1760 reg_val
& (1<<26) ? "EE " : "",
1761 reg_val
& (1<<25) ? "EB " : "",
1762 reg_val
& (1<<24) ? "EI " : "",
1763 reg_val
& (1<<23) ? "E1 " : "",
1764 reg_val
& (1<<22) ? "E0 " : "");
1766 printk("IDX: 0x%08x\n", reg_val
& ((1<<22)-1));
1768 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1769 if (reg_val
& (1<<22))
1770 printk("DErrAddr0: 0x%0*lx\n", field
, read_c0_derraddr0());
1772 if (reg_val
& (1<<23))
1773 printk("DErrAddr1: 0x%0*lx\n", field
, read_c0_derraddr1());
1776 panic("Can't handle the cache error!");
1779 asmlinkage
void do_ftlb(void)
1781 const int field
= 2 * sizeof(unsigned long);
1782 unsigned int reg_val
;
1784 /* For the moment, report the problem and hang. */
1785 if ((cpu_has_mips_r2_r6
) &&
1786 ((current_cpu_data
.processor_id
& 0xff0000) == PRID_COMP_MIPS
)) {
1787 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1789 pr_err("cp0_errorepc == %0*lx\n", field
, read_c0_errorepc());
1790 reg_val
= read_c0_cacheerr();
1791 pr_err("c0_cacheerr == %08x\n", reg_val
);
1793 if ((reg_val
& 0xc0000000) == 0xc0000000) {
1794 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1796 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1797 reg_val
& (1<<30) ? "secondary" : "primary",
1798 reg_val
& (1<<31) ? "data" : "insn");
1801 pr_err("FTLB error exception\n");
1803 /* Just print the cacheerr bits for now */
1804 cache_parity_error();
1808 * SDBBP EJTAG debug exception handler.
1809 * We skip the instruction and return to the next instruction.
1811 void ejtag_exception_handler(struct pt_regs
*regs
)
1813 const int field
= 2 * sizeof(unsigned long);
1814 unsigned long depc
, old_epc
, old_ra
;
1817 printk(KERN_DEBUG
"SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1818 depc
= read_c0_depc();
1819 debug
= read_c0_debug();
1820 printk(KERN_DEBUG
"c0_depc = %0*lx, DEBUG = %08x\n", field
, depc
, debug
);
1821 if (debug
& 0x80000000) {
1823 * In branch delay slot.
1824 * We cheat a little bit here and use EPC to calculate the
1825 * debug return address (DEPC). EPC is restored after the
1828 old_epc
= regs
->cp0_epc
;
1829 old_ra
= regs
->regs
[31];
1830 regs
->cp0_epc
= depc
;
1831 compute_return_epc(regs
);
1832 depc
= regs
->cp0_epc
;
1833 regs
->cp0_epc
= old_epc
;
1834 regs
->regs
[31] = old_ra
;
1837 write_c0_depc(depc
);
1840 printk(KERN_DEBUG
"\n\n----- Enable EJTAG single stepping ----\n\n");
1841 write_c0_debug(debug
| 0x100);
1846 * NMI exception handler.
1847 * No lock; only written during early bootup by CPU 0.
1849 static RAW_NOTIFIER_HEAD(nmi_chain
);
1851 int register_nmi_notifier(struct notifier_block
*nb
)
1853 return raw_notifier_chain_register(&nmi_chain
, nb
);
1856 void __noreturn
nmi_exception_handler(struct pt_regs
*regs
)
1861 raw_notifier_call_chain(&nmi_chain
, 0, regs
);
1863 snprintf(str
, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1864 smp_processor_id(), regs
->cp0_epc
);
1865 regs
->cp0_epc
= read_c0_errorepc();
1870 #define VECTORSPACING 0x100 /* for EI/VI mode */
1872 unsigned long ebase
;
1873 unsigned long exception_handlers
[32];
1874 unsigned long vi_handlers
[64];
1876 void __init
*set_except_vector(int n
, void *addr
)
1878 unsigned long handler
= (unsigned long) addr
;
1879 unsigned long old_handler
;
1881 #ifdef CONFIG_CPU_MICROMIPS
1883 * Only the TLB handlers are cache aligned with an even
1884 * address. All other handlers are on an odd address and
1885 * require no modification. Otherwise, MIPS32 mode will
1886 * be entered when handling any TLB exceptions. That
1887 * would be bad...since we must stay in microMIPS mode.
1889 if (!(handler
& 0x1))
1892 old_handler
= xchg(&exception_handlers
[n
], handler
);
1894 if (n
== 0 && cpu_has_divec
) {
1895 #ifdef CONFIG_CPU_MICROMIPS
1896 unsigned long jump_mask
= ~((1 << 27) - 1);
1898 unsigned long jump_mask
= ~((1 << 28) - 1);
1900 u32
*buf
= (u32
*)(ebase
+ 0x200);
1901 unsigned int k0
= 26;
1902 if ((handler
& jump_mask
) == ((ebase
+ 0x200) & jump_mask
)) {
1903 uasm_i_j(&buf
, handler
& ~jump_mask
);
1906 UASM_i_LA(&buf
, k0
, handler
);
1907 uasm_i_jr(&buf
, k0
);
1910 local_flush_icache_range(ebase
+ 0x200, (unsigned long)buf
);
1912 return (void *)old_handler
;
1915 static void do_default_vi(void)
1917 show_regs(get_irq_regs());
1918 panic("Caught unexpected vectored interrupt.");
1921 static void *set_vi_srs_handler(int n
, vi_handler_t addr
, int srs
)
1923 unsigned long handler
;
1924 unsigned long old_handler
= vi_handlers
[n
];
1925 int srssets
= current_cpu_data
.srsets
;
1929 BUG_ON(!cpu_has_veic
&& !cpu_has_vint
);
1932 handler
= (unsigned long) do_default_vi
;
1935 handler
= (unsigned long) addr
;
1936 vi_handlers
[n
] = handler
;
1938 b
= (unsigned char *)(ebase
+ 0x200 + n
*VECTORSPACING
);
1941 panic("Shadow register set %d not supported", srs
);
1944 if (board_bind_eic_interrupt
)
1945 board_bind_eic_interrupt(n
, srs
);
1946 } else if (cpu_has_vint
) {
1947 /* SRSMap is only defined if shadow sets are implemented */
1949 change_c0_srsmap(0xf << n
*4, srs
<< n
*4);
1954 * If no shadow set is selected then use the default handler
1955 * that does normal register saving and standard interrupt exit
1957 extern char except_vec_vi
, except_vec_vi_lui
;
1958 extern char except_vec_vi_ori
, except_vec_vi_end
;
1959 extern char rollback_except_vec_vi
;
1960 char *vec_start
= using_rollback_handler() ?
1961 &rollback_except_vec_vi
: &except_vec_vi
;
1962 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1963 const int lui_offset
= &except_vec_vi_lui
- vec_start
+ 2;
1964 const int ori_offset
= &except_vec_vi_ori
- vec_start
+ 2;
1966 const int lui_offset
= &except_vec_vi_lui
- vec_start
;
1967 const int ori_offset
= &except_vec_vi_ori
- vec_start
;
1969 const int handler_len
= &except_vec_vi_end
- vec_start
;
1971 if (handler_len
> VECTORSPACING
) {
1973 * Sigh... panicing won't help as the console
1974 * is probably not configured :(
1976 panic("VECTORSPACING too small");
1979 set_handler(((unsigned long)b
- ebase
), vec_start
,
1980 #ifdef CONFIG_CPU_MICROMIPS
1985 h
= (u16
*)(b
+ lui_offset
);
1986 *h
= (handler
>> 16) & 0xffff;
1987 h
= (u16
*)(b
+ ori_offset
);
1988 *h
= (handler
& 0xffff);
1989 local_flush_icache_range((unsigned long)b
,
1990 (unsigned long)(b
+handler_len
));
1994 * In other cases jump directly to the interrupt handler. It
1995 * is the handler's responsibility to save registers if required
1996 * (eg hi/lo) and return from the exception using "eret".
2002 #ifdef CONFIG_CPU_MICROMIPS
2003 insn
= 0xd4000000 | (((u32
)handler
& 0x07ffffff) >> 1);
2005 insn
= 0x08000000 | (((u32
)handler
& 0x0fffffff) >> 2);
2007 h
[0] = (insn
>> 16) & 0xffff;
2008 h
[1] = insn
& 0xffff;
2011 local_flush_icache_range((unsigned long)b
,
2012 (unsigned long)(b
+8));
2015 return (void *)old_handler
;
2018 void *set_vi_handler(int n
, vi_handler_t addr
)
2020 return set_vi_srs_handler(n
, addr
, 0);
2023 extern void tlb_init(void);
2028 int cp0_compare_irq
;
2029 EXPORT_SYMBOL_GPL(cp0_compare_irq
);
2030 int cp0_compare_irq_shift
;
2033 * Performance counter IRQ or -1 if shared with timer
2035 int cp0_perfcount_irq
;
2036 EXPORT_SYMBOL_GPL(cp0_perfcount_irq
);
2039 * Fast debug channel IRQ or -1 if not present
2042 EXPORT_SYMBOL_GPL(cp0_fdc_irq
);
2046 static int __init
ulri_disable(char *s
)
2048 pr_info("Disabling ulri\n");
2053 __setup("noulri", ulri_disable
);
2055 /* configure STATUS register */
2056 static void configure_status(void)
2059 * Disable coprocessors and select 32-bit or 64-bit addressing
2060 * and the 16/32 or 32/32 FPR register model. Reset the BEV
2061 * flag that some firmware may have left set and the TS bit (for
2062 * IP27). Set XX for ISA IV code to work.
2064 unsigned int status_set
= ST0_CU0
;
2066 status_set
|= ST0_FR
|ST0_KX
|ST0_SX
|ST0_UX
;
2068 if (current_cpu_data
.isa_level
& MIPS_CPU_ISA_IV
)
2069 status_set
|= ST0_XX
;
2071 status_set
|= ST0_MX
;
2073 change_c0_status(ST0_CU
|ST0_MX
|ST0_RE
|ST0_FR
|ST0_BEV
|ST0_TS
|ST0_KX
|ST0_SX
|ST0_UX
,
2077 /* configure HWRENA register */
2078 static void configure_hwrena(void)
2080 unsigned int hwrena
= cpu_hwrena_impl_bits
;
2082 if (cpu_has_mips_r2_r6
)
2083 hwrena
|= 0x0000000f;
2085 if (!noulri
&& cpu_has_userlocal
)
2086 hwrena
|= (1 << 29);
2089 write_c0_hwrena(hwrena
);
2092 static void configure_exception_vector(void)
2094 if (cpu_has_veic
|| cpu_has_vint
) {
2095 unsigned long sr
= set_c0_status(ST0_BEV
);
2096 write_c0_ebase(ebase
);
2097 write_c0_status(sr
);
2098 /* Setting vector spacing enables EI/VI mode */
2099 change_c0_intctl(0x3e0, VECTORSPACING
);
2101 if (cpu_has_divec
) {
2102 if (cpu_has_mipsmt
) {
2103 unsigned int vpflags
= dvpe();
2104 set_c0_cause(CAUSEF_IV
);
2107 set_c0_cause(CAUSEF_IV
);
2111 void per_cpu_trap_init(bool is_boot_cpu
)
2113 unsigned int cpu
= smp_processor_id();
2118 configure_exception_vector();
2121 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2123 * o read IntCtl.IPTI to determine the timer interrupt
2124 * o read IntCtl.IPPCI to determine the performance counter interrupt
2125 * o read IntCtl.IPFDC to determine the fast debug channel interrupt
2127 if (cpu_has_mips_r2_r6
) {
2128 cp0_compare_irq_shift
= CAUSEB_TI
- CAUSEB_IP
;
2129 cp0_compare_irq
= (read_c0_intctl() >> INTCTLB_IPTI
) & 7;
2130 cp0_perfcount_irq
= (read_c0_intctl() >> INTCTLB_IPPCI
) & 7;
2131 cp0_fdc_irq
= (read_c0_intctl() >> INTCTLB_IPFDC
) & 7;
2136 cp0_compare_irq
= CP0_LEGACY_COMPARE_IRQ
;
2137 cp0_compare_irq_shift
= CP0_LEGACY_PERFCNT_IRQ
;
2138 cp0_perfcount_irq
= -1;
2142 if (!cpu_data
[cpu
].asid_cache
)
2143 cpu_data
[cpu
].asid_cache
= ASID_FIRST_VERSION
;
2145 atomic_inc(&init_mm
.mm_count
);
2146 current
->active_mm
= &init_mm
;
2147 BUG_ON(current
->mm
);
2148 enter_lazy_tlb(&init_mm
, current
);
2150 /* Boot CPU's cache setup in setup_arch(). */
2154 TLBMISS_HANDLER_SETUP();
2157 /* Install CPU exception handler */
2158 void set_handler(unsigned long offset
, void *addr
, unsigned long size
)
2160 #ifdef CONFIG_CPU_MICROMIPS
2161 memcpy((void *)(ebase
+ offset
), ((unsigned char *)addr
- 1), size
);
2163 memcpy((void *)(ebase
+ offset
), addr
, size
);
2165 local_flush_icache_range(ebase
+ offset
, ebase
+ offset
+ size
);
2168 static char panic_null_cerr
[] =
2169 "Trying to set NULL cache error exception handler";
2172 * Install uncached CPU exception handler.
2173 * This is suitable only for the cache error exception which is the only
2174 * exception handler that is being run uncached.
2176 void set_uncached_handler(unsigned long offset
, void *addr
,
2179 unsigned long uncached_ebase
= CKSEG1ADDR(ebase
);
2182 panic(panic_null_cerr
);
2184 memcpy((void *)(uncached_ebase
+ offset
), addr
, size
);
2187 static int __initdata rdhwr_noopt
;
2188 static int __init
set_rdhwr_noopt(char *str
)
2194 __setup("rdhwr_noopt", set_rdhwr_noopt
);
2196 void __init
trap_init(void)
2198 extern char except_vec3_generic
;
2199 extern char except_vec4
;
2200 extern char except_vec3_r4000
;
2205 if (cpu_has_veic
|| cpu_has_vint
) {
2206 unsigned long size
= 0x200 + VECTORSPACING
*64;
2207 ebase
= (unsigned long)
2208 __alloc_bootmem(size
, 1 << fls(size
), 0);
2212 if (cpu_has_mips_r2_r6
)
2213 ebase
+= (read_c0_ebase() & 0x3ffff000);
2216 if (cpu_has_mmips
) {
2217 unsigned int config3
= read_c0_config3();
2219 if (IS_ENABLED(CONFIG_CPU_MICROMIPS
))
2220 write_c0_config3(config3
| MIPS_CONF3_ISA_OE
);
2222 write_c0_config3(config3
& ~MIPS_CONF3_ISA_OE
);
2225 if (board_ebase_setup
)
2226 board_ebase_setup();
2227 per_cpu_trap_init(true);
2230 * Copy the generic exception handlers to their final destination.
2231 * This will be overriden later as suitable for a particular
2234 set_handler(0x180, &except_vec3_generic
, 0x80);
2237 * Setup default vectors
2239 for (i
= 0; i
<= 31; i
++)
2240 set_except_vector(i
, handle_reserved
);
2243 * Copy the EJTAG debug exception vector handler code to it's final
2246 if (cpu_has_ejtag
&& board_ejtag_handler_setup
)
2247 board_ejtag_handler_setup();
2250 * Only some CPUs have the watch exceptions.
2253 set_except_vector(23, handle_watch
);
2256 * Initialise interrupt handlers
2258 if (cpu_has_veic
|| cpu_has_vint
) {
2259 int nvec
= cpu_has_veic
? 64 : 8;
2260 for (i
= 0; i
< nvec
; i
++)
2261 set_vi_handler(i
, NULL
);
2263 else if (cpu_has_divec
)
2264 set_handler(0x200, &except_vec4
, 0x8);
2267 * Some CPUs can enable/disable for cache parity detection, but does
2268 * it different ways.
2270 parity_protection_init();
2273 * The Data Bus Errors / Instruction Bus Errors are signaled
2274 * by external hardware. Therefore these two exceptions
2275 * may have board specific handlers.
2280 set_except_vector(0, using_rollback_handler() ? rollback_handle_int
2282 set_except_vector(1, handle_tlbm
);
2283 set_except_vector(2, handle_tlbl
);
2284 set_except_vector(3, handle_tlbs
);
2286 set_except_vector(4, handle_adel
);
2287 set_except_vector(5, handle_ades
);
2289 set_except_vector(6, handle_ibe
);
2290 set_except_vector(7, handle_dbe
);
2292 set_except_vector(8, handle_sys
);
2293 set_except_vector(9, handle_bp
);
2294 set_except_vector(10, rdhwr_noopt
? handle_ri
:
2295 (cpu_has_vtag_icache
?
2296 handle_ri_rdhwr_vivt
: handle_ri_rdhwr
));
2297 set_except_vector(11, handle_cpu
);
2298 set_except_vector(12, handle_ov
);
2299 set_except_vector(13, handle_tr
);
2300 set_except_vector(14, handle_msa_fpe
);
2302 if (current_cpu_type() == CPU_R6000
||
2303 current_cpu_type() == CPU_R6000A
) {
2305 * The R6000 is the only R-series CPU that features a machine
2306 * check exception (similar to the R4000 cache error) and
2307 * unaligned ldc1/sdc1 exception. The handlers have not been
2308 * written yet. Well, anyway there is no R6000 machine on the
2309 * current list of targets for Linux/MIPS.
2310 * (Duh, crap, there is someone with a triple R6k machine)
2312 //set_except_vector(14, handle_mc);
2313 //set_except_vector(15, handle_ndc);
2317 if (board_nmi_handler_setup
)
2318 board_nmi_handler_setup();
2320 if (cpu_has_fpu
&& !cpu_has_nofpuex
)
2321 set_except_vector(15, handle_fpe
);
2323 set_except_vector(16, handle_ftlb
);
2325 if (cpu_has_rixiex
) {
2326 set_except_vector(19, tlb_do_page_fault_0
);
2327 set_except_vector(20, tlb_do_page_fault_0
);
2330 set_except_vector(21, handle_msa
);
2331 set_except_vector(22, handle_mdmx
);
2334 set_except_vector(24, handle_mcheck
);
2337 set_except_vector(25, handle_mt
);
2339 set_except_vector(26, handle_dsp
);
2341 if (board_cache_error_setup
)
2342 board_cache_error_setup();
2345 /* Special exception: R4[04]00 uses also the divec space. */
2346 set_handler(0x180, &except_vec3_r4000
, 0x100);
2347 else if (cpu_has_4kex
)
2348 set_handler(0x180, &except_vec3_generic
, 0x80);
2350 set_handler(0x080, &except_vec3_generic
, 0x80);
2352 local_flush_icache_range(ebase
, ebase
+ 0x400);
2354 sort_extable(__start___dbe_table
, __stop___dbe_table
);
2356 cu2_notifier(default_cu2_call
, 0x80000000); /* Run last */
2359 static int trap_pm_notifier(struct notifier_block
*self
, unsigned long cmd
,
2363 case CPU_PM_ENTER_FAILED
:
2367 configure_exception_vector();
2369 /* Restore register with CPU number for TLB handlers */
2370 TLBMISS_HANDLER_RESTORE();
2378 static struct notifier_block trap_pm_notifier_block
= {
2379 .notifier_call
= trap_pm_notifier
,
2382 static int __init
trap_pm_init(void)
2384 return cpu_pm_register_notifier(&trap_pm_notifier_block
);
2386 arch_initcall(trap_pm_init
);