1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/traps.c
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
10 #include <linux/context_tracking.h>
11 #include <linux/signal.h>
12 #include <linux/kallsyms.h>
13 #include <linux/kprobes.h>
14 #include <linux/spinlock.h>
15 #include <linux/uaccess.h>
16 #include <linux/hardirq.h>
17 #include <linux/kdebug.h>
18 #include <linux/module.h>
19 #include <linux/kexec.h>
20 #include <linux/delay.h>
21 #include <linux/efi.h>
22 #include <linux/init.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/debug.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/sizes.h>
27 #include <linux/syscalls.h>
28 #include <linux/mm_types.h>
29 #include <linux/kasan.h>
30 #include <linux/ubsan.h>
31 #include <linux/cfi.h>
33 #include <asm/atomic.h>
35 #include <asm/cpufeature.h>
36 #include <asm/daifflags.h>
37 #include <asm/debug-monitors.h>
40 #include <asm/exception.h>
41 #include <asm/extable.h>
43 #include <asm/kprobes.h>
44 #include <asm/text-patching.h>
45 #include <asm/traps.h>
47 #include <asm/stack_pointer.h>
48 #include <asm/stacktrace.h>
49 #include <asm/system_misc.h>
50 #include <asm/sysreg.h>
52 static bool __kprobes
__check_eq(unsigned long pstate
)
54 return (pstate
& PSR_Z_BIT
) != 0;
57 static bool __kprobes
__check_ne(unsigned long pstate
)
59 return (pstate
& PSR_Z_BIT
) == 0;
62 static bool __kprobes
__check_cs(unsigned long pstate
)
64 return (pstate
& PSR_C_BIT
) != 0;
67 static bool __kprobes
__check_cc(unsigned long pstate
)
69 return (pstate
& PSR_C_BIT
) == 0;
72 static bool __kprobes
__check_mi(unsigned long pstate
)
74 return (pstate
& PSR_N_BIT
) != 0;
77 static bool __kprobes
__check_pl(unsigned long pstate
)
79 return (pstate
& PSR_N_BIT
) == 0;
82 static bool __kprobes
__check_vs(unsigned long pstate
)
84 return (pstate
& PSR_V_BIT
) != 0;
87 static bool __kprobes
__check_vc(unsigned long pstate
)
89 return (pstate
& PSR_V_BIT
) == 0;
92 static bool __kprobes
__check_hi(unsigned long pstate
)
94 pstate
&= ~(pstate
>> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
95 return (pstate
& PSR_C_BIT
) != 0;
98 static bool __kprobes
__check_ls(unsigned long pstate
)
100 pstate
&= ~(pstate
>> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
101 return (pstate
& PSR_C_BIT
) == 0;
104 static bool __kprobes
__check_ge(unsigned long pstate
)
106 pstate
^= (pstate
<< 3); /* PSR_N_BIT ^= PSR_V_BIT */
107 return (pstate
& PSR_N_BIT
) == 0;
110 static bool __kprobes
__check_lt(unsigned long pstate
)
112 pstate
^= (pstate
<< 3); /* PSR_N_BIT ^= PSR_V_BIT */
113 return (pstate
& PSR_N_BIT
) != 0;
116 static bool __kprobes
__check_gt(unsigned long pstate
)
118 /*PSR_N_BIT ^= PSR_V_BIT */
119 unsigned long temp
= pstate
^ (pstate
<< 3);
121 temp
|= (pstate
<< 1); /*PSR_N_BIT |= PSR_Z_BIT */
122 return (temp
& PSR_N_BIT
) == 0;
125 static bool __kprobes
__check_le(unsigned long pstate
)
127 /*PSR_N_BIT ^= PSR_V_BIT */
128 unsigned long temp
= pstate
^ (pstate
<< 3);
130 temp
|= (pstate
<< 1); /*PSR_N_BIT |= PSR_Z_BIT */
131 return (temp
& PSR_N_BIT
) != 0;
134 static bool __kprobes
__check_al(unsigned long pstate
)
140 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
141 * it behaves identically to 0b1110 ("al").
143 pstate_check_t
* const aarch32_opcode_cond_checks
[16] = {
144 __check_eq
, __check_ne
, __check_cs
, __check_cc
,
145 __check_mi
, __check_pl
, __check_vs
, __check_vc
,
146 __check_hi
, __check_ls
, __check_ge
, __check_lt
,
147 __check_gt
, __check_le
, __check_al
, __check_al
150 int show_unhandled_signals
= 0;
152 static void dump_kernel_instr(const char *lvl
, struct pt_regs
*regs
)
154 unsigned long addr
= instruction_pointer(regs
);
155 char str
[sizeof("00000000 ") * 5 + 2 + 1], *p
= str
;
161 for (i
= -4; i
< 1; i
++) {
162 unsigned int val
, bad
;
164 bad
= aarch64_insn_read(&((u32
*)addr
)[i
], &val
);
167 p
+= sprintf(p
, i
== 0 ? "(%08x) " : "%08x ", val
);
169 p
+= sprintf(p
, i
== 0 ? "(????????) " : "???????? ");
172 printk("%sCode: %s\n", lvl
, str
);
175 #ifdef CONFIG_PREEMPT
176 #define S_PREEMPT " PREEMPT"
177 #elif defined(CONFIG_PREEMPT_RT)
178 #define S_PREEMPT " PREEMPT_RT"
185 static int __die(const char *str
, long err
, struct pt_regs
*regs
)
187 static int die_counter
;
190 pr_emerg("Internal error: %s: %016lx [#%d]" S_PREEMPT S_SMP
"\n",
191 str
, err
, ++die_counter
);
193 /* trap and error numbers are mostly meaningless on ARM */
194 ret
= notify_die(DIE_OOPS
, str
, regs
, err
, 0, SIGSEGV
);
195 if (ret
== NOTIFY_STOP
)
201 dump_kernel_instr(KERN_EMERG
, regs
);
206 static DEFINE_RAW_SPINLOCK(die_lock
);
209 * This function is protected against re-entrancy.
211 void die(const char *str
, struct pt_regs
*regs
, long err
)
216 raw_spin_lock_irqsave(&die_lock
, flags
);
222 ret
= __die(str
, err
, regs
);
224 if (regs
&& kexec_should_crash(current
))
228 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
232 panic("%s: Fatal exception in interrupt", str
);
234 panic("%s: Fatal exception", str
);
236 raw_spin_unlock_irqrestore(&die_lock
, flags
);
238 if (ret
!= NOTIFY_STOP
)
239 make_task_dead(SIGSEGV
);
242 static void arm64_show_signal(int signo
, const char *str
)
244 static DEFINE_RATELIMIT_STATE(rs
, DEFAULT_RATELIMIT_INTERVAL
,
245 DEFAULT_RATELIMIT_BURST
);
246 struct task_struct
*tsk
= current
;
247 unsigned long esr
= tsk
->thread
.fault_code
;
248 struct pt_regs
*regs
= task_pt_regs(tsk
);
250 /* Leave if the signal won't be shown */
251 if (!show_unhandled_signals
||
252 !unhandled_signal(tsk
, signo
) ||
256 pr_info("%s[%d]: unhandled exception: ", tsk
->comm
, task_pid_nr(tsk
));
258 pr_cont("%s, ESR 0x%016lx, ", esr_get_class_string(esr
), esr
);
261 print_vma_addr(KERN_CONT
" in ", regs
->pc
);
266 void arm64_force_sig_fault(int signo
, int code
, unsigned long far
,
269 arm64_show_signal(signo
, str
);
270 if (signo
== SIGKILL
)
273 force_sig_fault(signo
, code
, (void __user
*)far
);
276 void arm64_force_sig_fault_pkey(unsigned long far
, const char *str
, int pkey
)
278 arm64_show_signal(SIGSEGV
, str
);
279 force_sig_pkuerr((void __user
*)far
, pkey
);
282 void arm64_force_sig_mceerr(int code
, unsigned long far
, short lsb
,
285 arm64_show_signal(SIGBUS
, str
);
286 force_sig_mceerr(code
, (void __user
*)far
, lsb
);
289 void arm64_force_sig_ptrace_errno_trap(int errno
, unsigned long far
,
292 arm64_show_signal(SIGTRAP
, str
);
293 force_sig_ptrace_errno_trap(errno
, (void __user
*)far
);
296 void arm64_notify_die(const char *str
, struct pt_regs
*regs
,
297 int signo
, int sicode
, unsigned long far
,
300 if (user_mode(regs
)) {
301 WARN_ON(regs
!= current_pt_regs());
302 current
->thread
.fault_address
= 0;
303 current
->thread
.fault_code
= err
;
305 arm64_force_sig_fault(signo
, sicode
, far
, str
);
312 #define PSTATE_IT_1_0_SHIFT 25
313 #define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
314 #define PSTATE_IT_7_2_SHIFT 10
315 #define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
317 static u32
compat_get_it_state(struct pt_regs
*regs
)
319 u32 it
, pstate
= regs
->pstate
;
321 it
= (pstate
& PSTATE_IT_1_0_MASK
) >> PSTATE_IT_1_0_SHIFT
;
322 it
|= ((pstate
& PSTATE_IT_7_2_MASK
) >> PSTATE_IT_7_2_SHIFT
) << 2;
327 static void compat_set_it_state(struct pt_regs
*regs
, u32 it
)
331 pstate_it
= (it
<< PSTATE_IT_1_0_SHIFT
) & PSTATE_IT_1_0_MASK
;
332 pstate_it
|= ((it
>> 2) << PSTATE_IT_7_2_SHIFT
) & PSTATE_IT_7_2_MASK
;
334 regs
->pstate
&= ~PSR_AA32_IT_MASK
;
335 regs
->pstate
|= pstate_it
;
338 static void advance_itstate(struct pt_regs
*regs
)
343 if (!(regs
->pstate
& PSR_AA32_T_BIT
) ||
344 !(regs
->pstate
& PSR_AA32_IT_MASK
))
347 it
= compat_get_it_state(regs
);
350 * If this is the last instruction of the block, wipe the IT
351 * state. Otherwise advance it.
356 it
= (it
& 0xe0) | ((it
<< 1) & 0x1f);
358 compat_set_it_state(regs
, it
);
361 static void advance_itstate(struct pt_regs
*regs
)
366 void arm64_skip_faulting_instruction(struct pt_regs
*regs
, unsigned long size
)
371 * If we were single stepping, we want to get the step exception after
372 * we return from the trap.
375 user_fastforward_single_step(current
);
377 if (compat_user_mode(regs
))
378 advance_itstate(regs
);
380 regs
->pstate
&= ~PSR_BTYPE_MASK
;
383 static int user_insn_read(struct pt_regs
*regs
, u32
*insnp
)
386 unsigned long pc
= instruction_pointer(regs
);
388 if (compat_thumb_mode(regs
)) {
389 /* 16-bit Thumb instruction */
391 if (get_user(instr_le
, (__le16 __user
*)pc
))
393 instr
= le16_to_cpu(instr_le
);
394 if (aarch32_insn_is_wide(instr
)) {
397 if (get_user(instr_le
, (__le16 __user
*)(pc
+ 2)))
399 instr2
= le16_to_cpu(instr_le
);
400 instr
= (instr
<< 16) | instr2
;
403 /* 32-bit ARM instruction */
405 if (get_user(instr_le
, (__le32 __user
*)pc
))
407 instr
= le32_to_cpu(instr_le
);
414 void force_signal_inject(int signal
, int code
, unsigned long address
, unsigned long err
)
417 struct pt_regs
*regs
= current_pt_regs();
419 if (WARN_ON(!user_mode(regs
)))
424 desc
= "undefined instruction";
427 desc
= "illegal memory access";
430 desc
= "unknown or unrecoverable error";
434 /* Force signals we don't understand to SIGKILL */
435 if (WARN_ON(signal
!= SIGKILL
&&
436 siginfo_layout(signal
, code
) != SIL_FAULT
)) {
440 arm64_notify_die(desc
, regs
, signal
, code
, address
, err
);
444 * Set up process info to signal segmentation fault - called on access error.
446 void arm64_notify_segfault(unsigned long addr
)
450 mmap_read_lock(current
->mm
);
451 if (find_vma(current
->mm
, untagged_addr(addr
)) == NULL
)
455 mmap_read_unlock(current
->mm
);
457 force_signal_inject(SIGSEGV
, code
, addr
, 0);
460 void do_el0_undef(struct pt_regs
*regs
, unsigned long esr
)
464 /* check for AArch32 breakpoint instructions */
465 if (!aarch32_break_handler(regs
))
468 if (user_insn_read(regs
, &insn
))
471 if (try_emulate_mrs(regs
, insn
))
474 if (try_emulate_armv8_deprecated(regs
, insn
))
478 force_signal_inject(SIGILL
, ILL_ILLOPC
, regs
->pc
, 0);
481 void do_el1_undef(struct pt_regs
*regs
, unsigned long esr
)
485 if (aarch64_insn_read((void *)regs
->pc
, &insn
))
488 if (try_emulate_el1_ssbs(regs
, insn
))
492 die("Oops - Undefined instruction", regs
, esr
);
495 void do_el0_bti(struct pt_regs
*regs
)
497 force_signal_inject(SIGILL
, ILL_ILLOPC
, regs
->pc
, 0);
500 void do_el1_bti(struct pt_regs
*regs
, unsigned long esr
)
502 if (efi_runtime_fixup_exception(regs
, "BTI violation")) {
503 regs
->pstate
&= ~PSR_BTYPE_MASK
;
506 die("Oops - BTI", regs
, esr
);
509 void do_el0_gcs(struct pt_regs
*regs
, unsigned long esr
)
511 force_signal_inject(SIGSEGV
, SEGV_CPERR
, regs
->pc
, 0);
514 void do_el1_gcs(struct pt_regs
*regs
, unsigned long esr
)
516 die("Oops - GCS", regs
, esr
);
519 void do_el0_fpac(struct pt_regs
*regs
, unsigned long esr
)
521 force_signal_inject(SIGILL
, ILL_ILLOPN
, regs
->pc
, esr
);
524 void do_el1_fpac(struct pt_regs
*regs
, unsigned long esr
)
527 * Unexpected FPAC exception in the kernel: kill the task before it
528 * does any more harm.
530 die("Oops - FPAC", regs
, esr
);
533 void do_el0_mops(struct pt_regs
*regs
, unsigned long esr
)
535 arm64_mops_reset_regs(®s
->user_regs
, esr
);
538 * If single stepping then finish the step before executing the
539 * prologue instruction.
541 user_fastforward_single_step(current
);
544 void do_el1_mops(struct pt_regs
*regs
, unsigned long esr
)
546 arm64_mops_reset_regs(®s
->user_regs
, esr
);
548 kernel_fastforward_single_step(regs
);
551 #define __user_cache_maint(insn, address, res) \
552 if (address >= TASK_SIZE_MAX) { \
555 uaccess_ttbr0_enable(); \
557 "1: " insn ", %1\n" \
560 _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \
563 uaccess_ttbr0_disable(); \
566 static void user_cache_maint_handler(unsigned long esr
, struct pt_regs
*regs
)
568 unsigned long tagged_address
, address
;
569 int rt
= ESR_ELx_SYS64_ISS_RT(esr
);
570 int crm
= (esr
& ESR_ELx_SYS64_ISS_CRM_MASK
) >> ESR_ELx_SYS64_ISS_CRM_SHIFT
;
573 tagged_address
= pt_regs_read_reg(regs
, rt
);
574 address
= untagged_addr(tagged_address
);
577 case ESR_ELx_SYS64_ISS_CRM_DC_CVAU
: /* DC CVAU, gets promoted */
578 __user_cache_maint("dc civac", address
, ret
);
580 case ESR_ELx_SYS64_ISS_CRM_DC_CVAC
: /* DC CVAC, gets promoted */
581 __user_cache_maint("dc civac", address
, ret
);
583 case ESR_ELx_SYS64_ISS_CRM_DC_CVADP
: /* DC CVADP */
584 __user_cache_maint("sys 3, c7, c13, 1", address
, ret
);
586 case ESR_ELx_SYS64_ISS_CRM_DC_CVAP
: /* DC CVAP */
587 __user_cache_maint("sys 3, c7, c12, 1", address
, ret
);
589 case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC
: /* DC CIVAC */
590 __user_cache_maint("dc civac", address
, ret
);
592 case ESR_ELx_SYS64_ISS_CRM_IC_IVAU
: /* IC IVAU */
593 __user_cache_maint("ic ivau", address
, ret
);
596 force_signal_inject(SIGILL
, ILL_ILLOPC
, regs
->pc
, 0);
601 arm64_notify_segfault(tagged_address
);
603 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
606 static void ctr_read_handler(unsigned long esr
, struct pt_regs
*regs
)
608 int rt
= ESR_ELx_SYS64_ISS_RT(esr
);
609 unsigned long val
= arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0
);
611 if (cpus_have_final_cap(ARM64_WORKAROUND_1542419
)) {
612 /* Hide DIC so that we can trap the unnecessary maintenance...*/
613 val
&= ~BIT(CTR_EL0_DIC_SHIFT
);
615 /* ... and fake IminLine to reduce the number of traps. */
616 val
&= ~CTR_EL0_IminLine_MASK
;
617 val
|= (PAGE_SHIFT
- 2) & CTR_EL0_IminLine_MASK
;
620 pt_regs_write_reg(regs
, rt
, val
);
622 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
625 static void cntvct_read_handler(unsigned long esr
, struct pt_regs
*regs
)
627 if (test_thread_flag(TIF_TSC_SIGSEGV
)) {
630 int rt
= ESR_ELx_SYS64_ISS_RT(esr
);
632 pt_regs_write_reg(regs
, rt
, arch_timer_read_counter());
633 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
637 static void cntfrq_read_handler(unsigned long esr
, struct pt_regs
*regs
)
639 if (test_thread_flag(TIF_TSC_SIGSEGV
)) {
642 int rt
= ESR_ELx_SYS64_ISS_RT(esr
);
644 pt_regs_write_reg(regs
, rt
, arch_timer_get_rate());
645 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
649 static void mrs_handler(unsigned long esr
, struct pt_regs
*regs
)
653 rt
= ESR_ELx_SYS64_ISS_RT(esr
);
654 sysreg
= esr_sys64_to_sysreg(esr
);
656 if (do_emulate_mrs(regs
, sysreg
, rt
) != 0)
657 force_signal_inject(SIGILL
, ILL_ILLOPC
, regs
->pc
, 0);
660 static void wfi_handler(unsigned long esr
, struct pt_regs
*regs
)
662 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
666 unsigned long esr_mask
;
667 unsigned long esr_val
;
668 void (*handler
)(unsigned long esr
, struct pt_regs
*regs
);
671 static const struct sys64_hook sys64_hooks
[] = {
673 .esr_mask
= ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK
,
674 .esr_val
= ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL
,
675 .handler
= user_cache_maint_handler
,
678 /* Trap read access to CTR_EL0 */
679 .esr_mask
= ESR_ELx_SYS64_ISS_SYS_OP_MASK
,
680 .esr_val
= ESR_ELx_SYS64_ISS_SYS_CTR_READ
,
681 .handler
= ctr_read_handler
,
684 /* Trap read access to CNTVCT_EL0 */
685 .esr_mask
= ESR_ELx_SYS64_ISS_SYS_OP_MASK
,
686 .esr_val
= ESR_ELx_SYS64_ISS_SYS_CNTVCT
,
687 .handler
= cntvct_read_handler
,
690 /* Trap read access to CNTVCTSS_EL0 */
691 .esr_mask
= ESR_ELx_SYS64_ISS_SYS_OP_MASK
,
692 .esr_val
= ESR_ELx_SYS64_ISS_SYS_CNTVCTSS
,
693 .handler
= cntvct_read_handler
,
696 /* Trap read access to CNTFRQ_EL0 */
697 .esr_mask
= ESR_ELx_SYS64_ISS_SYS_OP_MASK
,
698 .esr_val
= ESR_ELx_SYS64_ISS_SYS_CNTFRQ
,
699 .handler
= cntfrq_read_handler
,
702 /* Trap read access to CPUID registers */
703 .esr_mask
= ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK
,
704 .esr_val
= ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL
,
705 .handler
= mrs_handler
,
708 /* Trap WFI instructions executed in userspace */
709 .esr_mask
= ESR_ELx_WFx_MASK
,
710 .esr_val
= ESR_ELx_WFx_WFI_VAL
,
711 .handler
= wfi_handler
,
717 static bool cp15_cond_valid(unsigned long esr
, struct pt_regs
*regs
)
721 /* Only a T32 instruction can trap without CV being set */
722 if (!(esr
& ESR_ELx_CV
)) {
725 it
= compat_get_it_state(regs
);
731 cond
= (esr
& ESR_ELx_COND_MASK
) >> ESR_ELx_COND_SHIFT
;
734 return aarch32_opcode_cond_checks
[cond
](regs
->pstate
);
737 static void compat_cntfrq_read_handler(unsigned long esr
, struct pt_regs
*regs
)
739 int reg
= (esr
& ESR_ELx_CP15_32_ISS_RT_MASK
) >> ESR_ELx_CP15_32_ISS_RT_SHIFT
;
741 pt_regs_write_reg(regs
, reg
, arch_timer_get_rate());
742 arm64_skip_faulting_instruction(regs
, 4);
745 static const struct sys64_hook cp15_32_hooks
[] = {
747 .esr_mask
= ESR_ELx_CP15_32_ISS_SYS_MASK
,
748 .esr_val
= ESR_ELx_CP15_32_ISS_SYS_CNTFRQ
,
749 .handler
= compat_cntfrq_read_handler
,
754 static void compat_cntvct_read_handler(unsigned long esr
, struct pt_regs
*regs
)
756 int rt
= (esr
& ESR_ELx_CP15_64_ISS_RT_MASK
) >> ESR_ELx_CP15_64_ISS_RT_SHIFT
;
757 int rt2
= (esr
& ESR_ELx_CP15_64_ISS_RT2_MASK
) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT
;
758 u64 val
= arch_timer_read_counter();
760 pt_regs_write_reg(regs
, rt
, lower_32_bits(val
));
761 pt_regs_write_reg(regs
, rt2
, upper_32_bits(val
));
762 arm64_skip_faulting_instruction(regs
, 4);
765 static const struct sys64_hook cp15_64_hooks
[] = {
767 .esr_mask
= ESR_ELx_CP15_64_ISS_SYS_MASK
,
768 .esr_val
= ESR_ELx_CP15_64_ISS_SYS_CNTVCT
,
769 .handler
= compat_cntvct_read_handler
,
772 .esr_mask
= ESR_ELx_CP15_64_ISS_SYS_MASK
,
773 .esr_val
= ESR_ELx_CP15_64_ISS_SYS_CNTVCTSS
,
774 .handler
= compat_cntvct_read_handler
,
779 void do_el0_cp15(unsigned long esr
, struct pt_regs
*regs
)
781 const struct sys64_hook
*hook
, *hook_base
;
783 if (!cp15_cond_valid(esr
, regs
)) {
785 * There is no T16 variant of a CP access, so we
786 * always advance PC by 4 bytes.
788 arm64_skip_faulting_instruction(regs
, 4);
792 switch (ESR_ELx_EC(esr
)) {
793 case ESR_ELx_EC_CP15_32
:
794 hook_base
= cp15_32_hooks
;
796 case ESR_ELx_EC_CP15_64
:
797 hook_base
= cp15_64_hooks
;
800 do_el0_undef(regs
, esr
);
804 for (hook
= hook_base
; hook
->handler
; hook
++)
805 if ((hook
->esr_mask
& esr
) == hook
->esr_val
) {
806 hook
->handler(esr
, regs
);
811 * New cp15 instructions may previously have been undefined at
812 * EL0. Fall back to our usual undefined instruction handler
813 * so that we handle these consistently.
815 do_el0_undef(regs
, esr
);
819 void do_el0_sys(unsigned long esr
, struct pt_regs
*regs
)
821 const struct sys64_hook
*hook
;
823 for (hook
= sys64_hooks
; hook
->handler
; hook
++)
824 if ((hook
->esr_mask
& esr
) == hook
->esr_val
) {
825 hook
->handler(esr
, regs
);
830 * New SYS instructions may previously have been undefined at EL0. Fall
831 * back to our usual undefined instruction handler so that we handle
832 * these consistently.
834 do_el0_undef(regs
, esr
);
837 static const char *esr_class_str
[] = {
838 [0 ... ESR_ELx_EC_MAX
] = "UNRECOGNIZED EC",
839 [ESR_ELx_EC_UNKNOWN
] = "Unknown/Uncategorized",
840 [ESR_ELx_EC_WFx
] = "WFI/WFE",
841 [ESR_ELx_EC_CP15_32
] = "CP15 MCR/MRC",
842 [ESR_ELx_EC_CP15_64
] = "CP15 MCRR/MRRC",
843 [ESR_ELx_EC_CP14_MR
] = "CP14 MCR/MRC",
844 [ESR_ELx_EC_CP14_LS
] = "CP14 LDC/STC",
845 [ESR_ELx_EC_FP_ASIMD
] = "ASIMD",
846 [ESR_ELx_EC_CP10_ID
] = "CP10 MRC/VMRS",
847 [ESR_ELx_EC_PAC
] = "PAC",
848 [ESR_ELx_EC_CP14_64
] = "CP14 MCRR/MRRC",
849 [ESR_ELx_EC_BTI
] = "BTI",
850 [ESR_ELx_EC_ILL
] = "PSTATE.IL",
851 [ESR_ELx_EC_SVC32
] = "SVC (AArch32)",
852 [ESR_ELx_EC_HVC32
] = "HVC (AArch32)",
853 [ESR_ELx_EC_SMC32
] = "SMC (AArch32)",
854 [ESR_ELx_EC_SVC64
] = "SVC (AArch64)",
855 [ESR_ELx_EC_HVC64
] = "HVC (AArch64)",
856 [ESR_ELx_EC_SMC64
] = "SMC (AArch64)",
857 [ESR_ELx_EC_SYS64
] = "MSR/MRS (AArch64)",
858 [ESR_ELx_EC_SVE
] = "SVE",
859 [ESR_ELx_EC_ERET
] = "ERET/ERETAA/ERETAB",
860 [ESR_ELx_EC_FPAC
] = "FPAC",
861 [ESR_ELx_EC_SME
] = "SME",
862 [ESR_ELx_EC_IMP_DEF
] = "EL3 IMP DEF",
863 [ESR_ELx_EC_IABT_LOW
] = "IABT (lower EL)",
864 [ESR_ELx_EC_IABT_CUR
] = "IABT (current EL)",
865 [ESR_ELx_EC_PC_ALIGN
] = "PC Alignment",
866 [ESR_ELx_EC_DABT_LOW
] = "DABT (lower EL)",
867 [ESR_ELx_EC_DABT_CUR
] = "DABT (current EL)",
868 [ESR_ELx_EC_SP_ALIGN
] = "SP Alignment",
869 [ESR_ELx_EC_MOPS
] = "MOPS",
870 [ESR_ELx_EC_FP_EXC32
] = "FP (AArch32)",
871 [ESR_ELx_EC_FP_EXC64
] = "FP (AArch64)",
872 [ESR_ELx_EC_GCS
] = "Guarded Control Stack",
873 [ESR_ELx_EC_SERROR
] = "SError",
874 [ESR_ELx_EC_BREAKPT_LOW
] = "Breakpoint (lower EL)",
875 [ESR_ELx_EC_BREAKPT_CUR
] = "Breakpoint (current EL)",
876 [ESR_ELx_EC_SOFTSTP_LOW
] = "Software Step (lower EL)",
877 [ESR_ELx_EC_SOFTSTP_CUR
] = "Software Step (current EL)",
878 [ESR_ELx_EC_WATCHPT_LOW
] = "Watchpoint (lower EL)",
879 [ESR_ELx_EC_WATCHPT_CUR
] = "Watchpoint (current EL)",
880 [ESR_ELx_EC_BKPT32
] = "BKPT (AArch32)",
881 [ESR_ELx_EC_VECTOR32
] = "Vector catch (AArch32)",
882 [ESR_ELx_EC_BRK64
] = "BRK (AArch64)",
885 const char *esr_get_class_string(unsigned long esr
)
887 return esr_class_str
[ESR_ELx_EC(esr
)];
891 * bad_el0_sync handles unexpected, but potentially recoverable synchronous
892 * exceptions taken from EL0.
894 void bad_el0_sync(struct pt_regs
*regs
, int reason
, unsigned long esr
)
896 unsigned long pc
= instruction_pointer(regs
);
898 current
->thread
.fault_address
= 0;
899 current
->thread
.fault_code
= esr
;
901 arm64_force_sig_fault(SIGILL
, ILL_ILLOPC
, pc
,
902 "Bad EL0 synchronous exception");
905 #ifdef CONFIG_VMAP_STACK
907 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE
/sizeof(long)], overflow_stack
)
910 void __noreturn
panic_bad_stack(struct pt_regs
*regs
, unsigned long esr
, unsigned long far
)
912 unsigned long tsk_stk
= (unsigned long)current
->stack
;
913 unsigned long irq_stk
= (unsigned long)this_cpu_read(irq_stack_ptr
);
914 unsigned long ovf_stk
= (unsigned long)this_cpu_ptr(overflow_stack
);
917 pr_emerg("Insufficient stack space to handle exception!");
919 pr_emerg("ESR: 0x%016lx -- %s\n", esr
, esr_get_class_string(esr
));
920 pr_emerg("FAR: 0x%016lx\n", far
);
922 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
923 tsk_stk
, tsk_stk
+ THREAD_SIZE
);
924 pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n",
925 irq_stk
, irq_stk
+ IRQ_STACK_SIZE
);
926 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
927 ovf_stk
, ovf_stk
+ OVERFLOW_STACK_SIZE
);
932 * We use nmi_panic to limit the potential for recusive overflows, and
933 * to get a better stack trace.
935 nmi_panic(NULL
, "kernel stack overflow");
940 void __noreturn
arm64_serror_panic(struct pt_regs
*regs
, unsigned long esr
)
944 pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n",
945 smp_processor_id(), esr
, esr_get_class_string(esr
));
949 nmi_panic(regs
, "Asynchronous SError Interrupt");
954 bool arm64_is_fatal_ras_serror(struct pt_regs
*regs
, unsigned long esr
)
956 unsigned long aet
= arm64_ras_serror_get_severity(esr
);
959 case ESR_ELx_AET_CE
: /* corrected error */
960 case ESR_ELx_AET_UEO
: /* restartable, not yet consumed */
962 * The CPU can make progress. We may take UEO again as
963 * a more severe error.
967 case ESR_ELx_AET_UEU
: /* Uncorrected Unrecoverable */
968 case ESR_ELx_AET_UER
: /* Uncorrected Recoverable */
970 * The CPU can't make progress. The exception may have
973 * Neoverse-N1 #1349291 means a non-KVM SError reported as
974 * Unrecoverable should be treated as Uncontainable. We
975 * call arm64_serror_panic() in both cases.
979 case ESR_ELx_AET_UC
: /* Uncontainable or Uncategorized error */
981 /* Error has been silently propagated */
982 arm64_serror_panic(regs
, esr
);
986 void do_serror(struct pt_regs
*regs
, unsigned long esr
)
988 /* non-RAS errors are not containable */
989 if (!arm64_is_ras_serror(esr
) || arm64_is_fatal_ras_serror(regs
, esr
))
990 arm64_serror_panic(regs
, esr
);
993 /* GENERIC_BUG traps */
994 #ifdef CONFIG_GENERIC_BUG
995 int is_valid_bugaddr(unsigned long addr
)
998 * bug_handler() only called for BRK #BUG_BRK_IMM.
999 * So the answer is trivial -- any spurious instances with no
1000 * bug table entry will be rejected by report_bug() and passed
1001 * back to the debug-monitors code and handled as a fatal
1002 * unexpected debug exception.
1008 static int bug_handler(struct pt_regs
*regs
, unsigned long esr
)
1010 switch (report_bug(regs
->pc
, regs
)) {
1011 case BUG_TRAP_TYPE_BUG
:
1012 die("Oops - BUG", regs
, esr
);
1015 case BUG_TRAP_TYPE_WARN
:
1019 /* unknown/unrecognised bug trap type */
1020 return DBG_HOOK_ERROR
;
1023 /* If thread survives, skip over the BUG instruction and continue: */
1024 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
1025 return DBG_HOOK_HANDLED
;
1028 static struct break_hook bug_break_hook
= {
1033 #ifdef CONFIG_CFI_CLANG
1034 static int cfi_handler(struct pt_regs
*regs
, unsigned long esr
)
1036 unsigned long target
;
1039 target
= pt_regs_read_reg(regs
, FIELD_GET(CFI_BRK_IMM_TARGET
, esr
));
1040 type
= (u32
)pt_regs_read_reg(regs
, FIELD_GET(CFI_BRK_IMM_TYPE
, esr
));
1042 switch (report_cfi_failure(regs
, regs
->pc
, &target
, type
)) {
1043 case BUG_TRAP_TYPE_BUG
:
1044 die("Oops - CFI", regs
, esr
);
1047 case BUG_TRAP_TYPE_WARN
:
1051 return DBG_HOOK_ERROR
;
1054 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
1055 return DBG_HOOK_HANDLED
;
1058 static struct break_hook cfi_break_hook
= {
1060 .imm
= CFI_BRK_IMM_BASE
,
1061 .mask
= CFI_BRK_IMM_MASK
,
1063 #endif /* CONFIG_CFI_CLANG */
1065 static int reserved_fault_handler(struct pt_regs
*regs
, unsigned long esr
)
1067 pr_err("%s generated an invalid instruction at %pS!\n",
1068 "Kernel text patching",
1069 (void *)instruction_pointer(regs
));
1071 /* We cannot handle this */
1072 return DBG_HOOK_ERROR
;
1075 static struct break_hook fault_break_hook
= {
1076 .fn
= reserved_fault_handler
,
1077 .imm
= FAULT_BRK_IMM
,
1080 #ifdef CONFIG_KASAN_SW_TAGS
1082 #define KASAN_ESR_RECOVER 0x20
1083 #define KASAN_ESR_WRITE 0x10
1084 #define KASAN_ESR_SIZE_MASK 0x0f
1085 #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
1087 static int kasan_handler(struct pt_regs
*regs
, unsigned long esr
)
1089 bool recover
= esr
& KASAN_ESR_RECOVER
;
1090 bool write
= esr
& KASAN_ESR_WRITE
;
1091 size_t size
= KASAN_ESR_SIZE(esr
);
1092 void *addr
= (void *)regs
->regs
[0];
1095 kasan_report(addr
, size
, write
, pc
);
1098 * The instrumentation allows to control whether we can proceed after
1099 * a crash was detected. This is done by passing the -recover flag to
1100 * the compiler. Disabling recovery allows to generate more compact
1103 * Unfortunately disabling recovery doesn't work for the kernel right
1104 * now. KASAN reporting is disabled in some contexts (for example when
1105 * the allocator accesses slab object metadata; this is controlled by
1106 * current->kasan_depth). All these accesses are detected by the tool,
1107 * even though the reports for them are not printed.
1109 * This is something that might be fixed at some point in the future.
1112 die("Oops - KASAN", regs
, esr
);
1114 /* If thread survives, skip over the brk instruction and continue: */
1115 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
1116 return DBG_HOOK_HANDLED
;
1119 static struct break_hook kasan_break_hook
= {
1120 .fn
= kasan_handler
,
1121 .imm
= KASAN_BRK_IMM
,
1122 .mask
= KASAN_BRK_MASK
,
1126 #ifdef CONFIG_UBSAN_TRAP
1127 static int ubsan_handler(struct pt_regs
*regs
, unsigned long esr
)
1129 die(report_ubsan_failure(regs
, esr
& UBSAN_BRK_MASK
), regs
, esr
);
1130 return DBG_HOOK_HANDLED
;
1133 static struct break_hook ubsan_break_hook
= {
1134 .fn
= ubsan_handler
,
1135 .imm
= UBSAN_BRK_IMM
,
1136 .mask
= UBSAN_BRK_MASK
,
1141 * Initial handler for AArch64 BRK exceptions
1142 * This handler only used until debug_traps_init().
1144 int __init
early_brk64(unsigned long addr
, unsigned long esr
,
1145 struct pt_regs
*regs
)
1147 #ifdef CONFIG_CFI_CLANG
1148 if (esr_is_cfi_brk(esr
))
1149 return cfi_handler(regs
, esr
) != DBG_HOOK_HANDLED
;
1151 #ifdef CONFIG_KASAN_SW_TAGS
1152 if ((esr_brk_comment(esr
) & ~KASAN_BRK_MASK
) == KASAN_BRK_IMM
)
1153 return kasan_handler(regs
, esr
) != DBG_HOOK_HANDLED
;
1155 #ifdef CONFIG_UBSAN_TRAP
1156 if ((esr_brk_comment(esr
) & ~UBSAN_BRK_MASK
) == UBSAN_BRK_IMM
)
1157 return ubsan_handler(regs
, esr
) != DBG_HOOK_HANDLED
;
1159 return bug_handler(regs
, esr
) != DBG_HOOK_HANDLED
;
1162 void __init
trap_init(void)
1164 register_kernel_break_hook(&bug_break_hook
);
1165 #ifdef CONFIG_CFI_CLANG
1166 register_kernel_break_hook(&cfi_break_hook
);
1168 register_kernel_break_hook(&fault_break_hook
);
1169 #ifdef CONFIG_KASAN_SW_TAGS
1170 register_kernel_break_hook(&kasan_break_hook
);
1172 #ifdef CONFIG_UBSAN_TRAP
1173 register_kernel_break_hook(&ubsan_break_hook
);