1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/traps.c
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
10 #include <linux/context_tracking.h>
11 #include <linux/signal.h>
12 #include <linux/personality.h>
13 #include <linux/kallsyms.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/uaccess.h>
17 #include <linux/hardirq.h>
18 #include <linux/kdebug.h>
19 #include <linux/module.h>
20 #include <linux/kexec.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/debug.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/sizes.h>
27 #include <linux/syscalls.h>
28 #include <linux/mm_types.h>
29 #include <linux/kasan.h>
31 #include <asm/atomic.h>
33 #include <asm/cpufeature.h>
34 #include <asm/daifflags.h>
35 #include <asm/debug-monitors.h>
37 #include <asm/exception.h>
38 #include <asm/extable.h>
40 #include <asm/kprobes.h>
41 #include <asm/traps.h>
43 #include <asm/stack_pointer.h>
44 #include <asm/stacktrace.h>
45 #include <asm/system_misc.h>
46 #include <asm/sysreg.h>
48 static const char *handler
[]= {
55 int show_unhandled_signals
= 0;
57 static void dump_kernel_instr(const char *lvl
, struct pt_regs
*regs
)
59 unsigned long addr
= instruction_pointer(regs
);
60 char str
[sizeof("00000000 ") * 5 + 2 + 1], *p
= str
;
66 for (i
= -4; i
< 1; i
++) {
67 unsigned int val
, bad
;
69 bad
= aarch64_insn_read(&((u32
*)addr
)[i
], &val
);
72 p
+= sprintf(p
, i
== 0 ? "(%08x) " : "%08x ", val
);
74 p
+= sprintf(p
, "bad PC value");
79 printk("%sCode: %s\n", lvl
, str
);
83 #define S_PREEMPT " PREEMPT"
84 #elif defined(CONFIG_PREEMPT_RT)
85 #define S_PREEMPT " PREEMPT_RT"
92 static int __die(const char *str
, int err
, struct pt_regs
*regs
)
94 static int die_counter
;
97 pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP
"\n",
98 str
, err
, ++die_counter
);
100 /* trap and error numbers are mostly meaningless on ARM */
101 ret
= notify_die(DIE_OOPS
, str
, regs
, err
, 0, SIGSEGV
);
102 if (ret
== NOTIFY_STOP
)
108 dump_kernel_instr(KERN_EMERG
, regs
);
113 static DEFINE_RAW_SPINLOCK(die_lock
);
116 * This function is protected against re-entrancy.
118 void die(const char *str
, struct pt_regs
*regs
, int err
)
123 raw_spin_lock_irqsave(&die_lock
, flags
);
129 ret
= __die(str
, err
, regs
);
131 if (regs
&& kexec_should_crash(current
))
135 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
139 panic("%s: Fatal exception in interrupt", str
);
141 panic("%s: Fatal exception", str
);
143 raw_spin_unlock_irqrestore(&die_lock
, flags
);
145 if (ret
!= NOTIFY_STOP
)
149 static void arm64_show_signal(int signo
, const char *str
)
151 static DEFINE_RATELIMIT_STATE(rs
, DEFAULT_RATELIMIT_INTERVAL
,
152 DEFAULT_RATELIMIT_BURST
);
153 struct task_struct
*tsk
= current
;
154 unsigned int esr
= tsk
->thread
.fault_code
;
155 struct pt_regs
*regs
= task_pt_regs(tsk
);
157 /* Leave if the signal won't be shown */
158 if (!show_unhandled_signals
||
159 !unhandled_signal(tsk
, signo
) ||
163 pr_info("%s[%d]: unhandled exception: ", tsk
->comm
, task_pid_nr(tsk
));
165 pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr
), esr
);
168 print_vma_addr(KERN_CONT
" in ", regs
->pc
);
173 void arm64_force_sig_fault(int signo
, int code
, unsigned long far
,
176 arm64_show_signal(signo
, str
);
177 if (signo
== SIGKILL
)
180 force_sig_fault(signo
, code
, (void __user
*)far
);
183 void arm64_force_sig_mceerr(int code
, unsigned long far
, short lsb
,
186 arm64_show_signal(SIGBUS
, str
);
187 force_sig_mceerr(code
, (void __user
*)far
, lsb
);
190 void arm64_force_sig_ptrace_errno_trap(int errno
, unsigned long far
,
193 arm64_show_signal(SIGTRAP
, str
);
194 force_sig_ptrace_errno_trap(errno
, (void __user
*)far
);
197 void arm64_notify_die(const char *str
, struct pt_regs
*regs
,
198 int signo
, int sicode
, unsigned long far
,
201 if (user_mode(regs
)) {
202 WARN_ON(regs
!= current_pt_regs());
203 current
->thread
.fault_address
= 0;
204 current
->thread
.fault_code
= err
;
206 arm64_force_sig_fault(signo
, sicode
, far
, str
);
213 #define PSTATE_IT_1_0_SHIFT 25
214 #define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
215 #define PSTATE_IT_7_2_SHIFT 10
216 #define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
218 static u32
compat_get_it_state(struct pt_regs
*regs
)
220 u32 it
, pstate
= regs
->pstate
;
222 it
= (pstate
& PSTATE_IT_1_0_MASK
) >> PSTATE_IT_1_0_SHIFT
;
223 it
|= ((pstate
& PSTATE_IT_7_2_MASK
) >> PSTATE_IT_7_2_SHIFT
) << 2;
228 static void compat_set_it_state(struct pt_regs
*regs
, u32 it
)
232 pstate_it
= (it
<< PSTATE_IT_1_0_SHIFT
) & PSTATE_IT_1_0_MASK
;
233 pstate_it
|= ((it
>> 2) << PSTATE_IT_7_2_SHIFT
) & PSTATE_IT_7_2_MASK
;
235 regs
->pstate
&= ~PSR_AA32_IT_MASK
;
236 regs
->pstate
|= pstate_it
;
239 static void advance_itstate(struct pt_regs
*regs
)
244 if (!(regs
->pstate
& PSR_AA32_T_BIT
) ||
245 !(regs
->pstate
& PSR_AA32_IT_MASK
))
248 it
= compat_get_it_state(regs
);
251 * If this is the last instruction of the block, wipe the IT
252 * state. Otherwise advance it.
257 it
= (it
& 0xe0) | ((it
<< 1) & 0x1f);
259 compat_set_it_state(regs
, it
);
262 static void advance_itstate(struct pt_regs
*regs
)
267 void arm64_skip_faulting_instruction(struct pt_regs
*regs
, unsigned long size
)
272 * If we were single stepping, we want to get the step exception after
273 * we return from the trap.
276 user_fastforward_single_step(current
);
278 if (compat_user_mode(regs
))
279 advance_itstate(regs
);
281 regs
->pstate
&= ~PSR_BTYPE_MASK
;
284 static LIST_HEAD(undef_hook
);
285 static DEFINE_RAW_SPINLOCK(undef_lock
);
287 void register_undef_hook(struct undef_hook
*hook
)
291 raw_spin_lock_irqsave(&undef_lock
, flags
);
292 list_add(&hook
->node
, &undef_hook
);
293 raw_spin_unlock_irqrestore(&undef_lock
, flags
);
296 void unregister_undef_hook(struct undef_hook
*hook
)
300 raw_spin_lock_irqsave(&undef_lock
, flags
);
301 list_del(&hook
->node
);
302 raw_spin_unlock_irqrestore(&undef_lock
, flags
);
305 static int call_undef_hook(struct pt_regs
*regs
)
307 struct undef_hook
*hook
;
310 int (*fn
)(struct pt_regs
*regs
, u32 instr
) = NULL
;
311 void __user
*pc
= (void __user
*)instruction_pointer(regs
);
313 if (!user_mode(regs
)) {
315 if (get_kernel_nofault(instr_le
, (__force __le32
*)pc
))
317 instr
= le32_to_cpu(instr_le
);
318 } else if (compat_thumb_mode(regs
)) {
319 /* 16-bit Thumb instruction */
321 if (get_user(instr_le
, (__le16 __user
*)pc
))
323 instr
= le16_to_cpu(instr_le
);
324 if (aarch32_insn_is_wide(instr
)) {
327 if (get_user(instr_le
, (__le16 __user
*)(pc
+ 2)))
329 instr2
= le16_to_cpu(instr_le
);
330 instr
= (instr
<< 16) | instr2
;
333 /* 32-bit ARM instruction */
335 if (get_user(instr_le
, (__le32 __user
*)pc
))
337 instr
= le32_to_cpu(instr_le
);
340 raw_spin_lock_irqsave(&undef_lock
, flags
);
341 list_for_each_entry(hook
, &undef_hook
, node
)
342 if ((instr
& hook
->instr_mask
) == hook
->instr_val
&&
343 (regs
->pstate
& hook
->pstate_mask
) == hook
->pstate_val
)
346 raw_spin_unlock_irqrestore(&undef_lock
, flags
);
348 return fn
? fn(regs
, instr
) : 1;
351 void force_signal_inject(int signal
, int code
, unsigned long address
, unsigned int err
)
354 struct pt_regs
*regs
= current_pt_regs();
356 if (WARN_ON(!user_mode(regs
)))
361 desc
= "undefined instruction";
364 desc
= "illegal memory access";
367 desc
= "unknown or unrecoverable error";
371 /* Force signals we don't understand to SIGKILL */
372 if (WARN_ON(signal
!= SIGKILL
&&
373 siginfo_layout(signal
, code
) != SIL_FAULT
)) {
377 arm64_notify_die(desc
, regs
, signal
, code
, address
, err
);
381 * Set up process info to signal segmentation fault - called on access error.
383 void arm64_notify_segfault(unsigned long addr
)
387 mmap_read_lock(current
->mm
);
388 if (find_vma(current
->mm
, untagged_addr(addr
)) == NULL
)
392 mmap_read_unlock(current
->mm
);
394 force_signal_inject(SIGSEGV
, code
, addr
, 0);
397 void do_undefinstr(struct pt_regs
*regs
)
399 /* check for AArch32 breakpoint instructions */
400 if (!aarch32_break_handler(regs
))
403 if (call_undef_hook(regs
) == 0)
406 BUG_ON(!user_mode(regs
));
407 force_signal_inject(SIGILL
, ILL_ILLOPC
, regs
->pc
, 0);
409 NOKPROBE_SYMBOL(do_undefinstr
);
411 void do_bti(struct pt_regs
*regs
)
413 BUG_ON(!user_mode(regs
));
414 force_signal_inject(SIGILL
, ILL_ILLOPC
, regs
->pc
, 0);
416 NOKPROBE_SYMBOL(do_bti
);
418 void do_ptrauth_fault(struct pt_regs
*regs
, unsigned int esr
)
421 * Unexpected FPAC exception or pointer authentication failure in
422 * the kernel: kill the task before it does any more harm.
424 BUG_ON(!user_mode(regs
));
425 force_signal_inject(SIGILL
, ILL_ILLOPN
, regs
->pc
, esr
);
427 NOKPROBE_SYMBOL(do_ptrauth_fault
);
429 #define __user_cache_maint(insn, address, res) \
430 if (address >= user_addr_max()) { \
433 uaccess_ttbr0_enable(); \
435 "1: " insn ", %1\n" \
438 " .pushsection .fixup,\"ax\"\n" \
440 "3: mov %w0, %w2\n" \
443 _ASM_EXTABLE(1b, 3b) \
445 : "r" (address), "i" (-EFAULT)); \
446 uaccess_ttbr0_disable(); \
449 static void user_cache_maint_handler(unsigned int esr
, struct pt_regs
*regs
)
451 unsigned long tagged_address
, address
;
452 int rt
= ESR_ELx_SYS64_ISS_RT(esr
);
453 int crm
= (esr
& ESR_ELx_SYS64_ISS_CRM_MASK
) >> ESR_ELx_SYS64_ISS_CRM_SHIFT
;
456 tagged_address
= pt_regs_read_reg(regs
, rt
);
457 address
= untagged_addr(tagged_address
);
460 case ESR_ELx_SYS64_ISS_CRM_DC_CVAU
: /* DC CVAU, gets promoted */
461 __user_cache_maint("dc civac", address
, ret
);
463 case ESR_ELx_SYS64_ISS_CRM_DC_CVAC
: /* DC CVAC, gets promoted */
464 __user_cache_maint("dc civac", address
, ret
);
466 case ESR_ELx_SYS64_ISS_CRM_DC_CVADP
: /* DC CVADP */
467 __user_cache_maint("sys 3, c7, c13, 1", address
, ret
);
469 case ESR_ELx_SYS64_ISS_CRM_DC_CVAP
: /* DC CVAP */
470 __user_cache_maint("sys 3, c7, c12, 1", address
, ret
);
472 case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC
: /* DC CIVAC */
473 __user_cache_maint("dc civac", address
, ret
);
475 case ESR_ELx_SYS64_ISS_CRM_IC_IVAU
: /* IC IVAU */
476 __user_cache_maint("ic ivau", address
, ret
);
479 force_signal_inject(SIGILL
, ILL_ILLOPC
, regs
->pc
, 0);
484 arm64_notify_segfault(tagged_address
);
486 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
489 static void ctr_read_handler(unsigned int esr
, struct pt_regs
*regs
)
491 int rt
= ESR_ELx_SYS64_ISS_RT(esr
);
492 unsigned long val
= arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0
);
494 if (cpus_have_const_cap(ARM64_WORKAROUND_1542419
)) {
495 /* Hide DIC so that we can trap the unnecessary maintenance...*/
496 val
&= ~BIT(CTR_DIC_SHIFT
);
498 /* ... and fake IminLine to reduce the number of traps. */
499 val
&= ~CTR_IMINLINE_MASK
;
500 val
|= (PAGE_SHIFT
- 2) & CTR_IMINLINE_MASK
;
503 pt_regs_write_reg(regs
, rt
, val
);
505 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
508 static void cntvct_read_handler(unsigned int esr
, struct pt_regs
*regs
)
510 int rt
= ESR_ELx_SYS64_ISS_RT(esr
);
512 pt_regs_write_reg(regs
, rt
, arch_timer_read_counter());
513 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
516 static void cntfrq_read_handler(unsigned int esr
, struct pt_regs
*regs
)
518 int rt
= ESR_ELx_SYS64_ISS_RT(esr
);
520 pt_regs_write_reg(regs
, rt
, arch_timer_get_rate());
521 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
524 static void mrs_handler(unsigned int esr
, struct pt_regs
*regs
)
528 rt
= ESR_ELx_SYS64_ISS_RT(esr
);
529 sysreg
= esr_sys64_to_sysreg(esr
);
531 if (do_emulate_mrs(regs
, sysreg
, rt
) != 0)
532 force_signal_inject(SIGILL
, ILL_ILLOPC
, regs
->pc
, 0);
535 static void wfi_handler(unsigned int esr
, struct pt_regs
*regs
)
537 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
541 unsigned int esr_mask
;
542 unsigned int esr_val
;
543 void (*handler
)(unsigned int esr
, struct pt_regs
*regs
);
546 static const struct sys64_hook sys64_hooks
[] = {
548 .esr_mask
= ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK
,
549 .esr_val
= ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL
,
550 .handler
= user_cache_maint_handler
,
553 /* Trap read access to CTR_EL0 */
554 .esr_mask
= ESR_ELx_SYS64_ISS_SYS_OP_MASK
,
555 .esr_val
= ESR_ELx_SYS64_ISS_SYS_CTR_READ
,
556 .handler
= ctr_read_handler
,
559 /* Trap read access to CNTVCT_EL0 */
560 .esr_mask
= ESR_ELx_SYS64_ISS_SYS_OP_MASK
,
561 .esr_val
= ESR_ELx_SYS64_ISS_SYS_CNTVCT
,
562 .handler
= cntvct_read_handler
,
565 /* Trap read access to CNTFRQ_EL0 */
566 .esr_mask
= ESR_ELx_SYS64_ISS_SYS_OP_MASK
,
567 .esr_val
= ESR_ELx_SYS64_ISS_SYS_CNTFRQ
,
568 .handler
= cntfrq_read_handler
,
571 /* Trap read access to CPUID registers */
572 .esr_mask
= ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK
,
573 .esr_val
= ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL
,
574 .handler
= mrs_handler
,
577 /* Trap WFI instructions executed in userspace */
578 .esr_mask
= ESR_ELx_WFx_MASK
,
579 .esr_val
= ESR_ELx_WFx_WFI_VAL
,
580 .handler
= wfi_handler
,
586 static bool cp15_cond_valid(unsigned int esr
, struct pt_regs
*regs
)
590 /* Only a T32 instruction can trap without CV being set */
591 if (!(esr
& ESR_ELx_CV
)) {
594 it
= compat_get_it_state(regs
);
600 cond
= (esr
& ESR_ELx_COND_MASK
) >> ESR_ELx_COND_SHIFT
;
603 return aarch32_opcode_cond_checks
[cond
](regs
->pstate
);
606 static void compat_cntfrq_read_handler(unsigned int esr
, struct pt_regs
*regs
)
608 int reg
= (esr
& ESR_ELx_CP15_32_ISS_RT_MASK
) >> ESR_ELx_CP15_32_ISS_RT_SHIFT
;
610 pt_regs_write_reg(regs
, reg
, arch_timer_get_rate());
611 arm64_skip_faulting_instruction(regs
, 4);
614 static const struct sys64_hook cp15_32_hooks
[] = {
616 .esr_mask
= ESR_ELx_CP15_32_ISS_SYS_MASK
,
617 .esr_val
= ESR_ELx_CP15_32_ISS_SYS_CNTFRQ
,
618 .handler
= compat_cntfrq_read_handler
,
623 static void compat_cntvct_read_handler(unsigned int esr
, struct pt_regs
*regs
)
625 int rt
= (esr
& ESR_ELx_CP15_64_ISS_RT_MASK
) >> ESR_ELx_CP15_64_ISS_RT_SHIFT
;
626 int rt2
= (esr
& ESR_ELx_CP15_64_ISS_RT2_MASK
) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT
;
627 u64 val
= arch_timer_read_counter();
629 pt_regs_write_reg(regs
, rt
, lower_32_bits(val
));
630 pt_regs_write_reg(regs
, rt2
, upper_32_bits(val
));
631 arm64_skip_faulting_instruction(regs
, 4);
634 static const struct sys64_hook cp15_64_hooks
[] = {
636 .esr_mask
= ESR_ELx_CP15_64_ISS_SYS_MASK
,
637 .esr_val
= ESR_ELx_CP15_64_ISS_SYS_CNTVCT
,
638 .handler
= compat_cntvct_read_handler
,
643 void do_cp15instr(unsigned int esr
, struct pt_regs
*regs
)
645 const struct sys64_hook
*hook
, *hook_base
;
647 if (!cp15_cond_valid(esr
, regs
)) {
649 * There is no T16 variant of a CP access, so we
650 * always advance PC by 4 bytes.
652 arm64_skip_faulting_instruction(regs
, 4);
656 switch (ESR_ELx_EC(esr
)) {
657 case ESR_ELx_EC_CP15_32
:
658 hook_base
= cp15_32_hooks
;
660 case ESR_ELx_EC_CP15_64
:
661 hook_base
= cp15_64_hooks
;
668 for (hook
= hook_base
; hook
->handler
; hook
++)
669 if ((hook
->esr_mask
& esr
) == hook
->esr_val
) {
670 hook
->handler(esr
, regs
);
675 * New cp15 instructions may previously have been undefined at
676 * EL0. Fall back to our usual undefined instruction handler
677 * so that we handle these consistently.
681 NOKPROBE_SYMBOL(do_cp15instr
);
684 void do_sysinstr(unsigned int esr
, struct pt_regs
*regs
)
686 const struct sys64_hook
*hook
;
688 for (hook
= sys64_hooks
; hook
->handler
; hook
++)
689 if ((hook
->esr_mask
& esr
) == hook
->esr_val
) {
690 hook
->handler(esr
, regs
);
695 * New SYS instructions may previously have been undefined at EL0. Fall
696 * back to our usual undefined instruction handler so that we handle
697 * these consistently.
701 NOKPROBE_SYMBOL(do_sysinstr
);
703 static const char *esr_class_str
[] = {
704 [0 ... ESR_ELx_EC_MAX
] = "UNRECOGNIZED EC",
705 [ESR_ELx_EC_UNKNOWN
] = "Unknown/Uncategorized",
706 [ESR_ELx_EC_WFx
] = "WFI/WFE",
707 [ESR_ELx_EC_CP15_32
] = "CP15 MCR/MRC",
708 [ESR_ELx_EC_CP15_64
] = "CP15 MCRR/MRRC",
709 [ESR_ELx_EC_CP14_MR
] = "CP14 MCR/MRC",
710 [ESR_ELx_EC_CP14_LS
] = "CP14 LDC/STC",
711 [ESR_ELx_EC_FP_ASIMD
] = "ASIMD",
712 [ESR_ELx_EC_CP10_ID
] = "CP10 MRC/VMRS",
713 [ESR_ELx_EC_PAC
] = "PAC",
714 [ESR_ELx_EC_CP14_64
] = "CP14 MCRR/MRRC",
715 [ESR_ELx_EC_BTI
] = "BTI",
716 [ESR_ELx_EC_ILL
] = "PSTATE.IL",
717 [ESR_ELx_EC_SVC32
] = "SVC (AArch32)",
718 [ESR_ELx_EC_HVC32
] = "HVC (AArch32)",
719 [ESR_ELx_EC_SMC32
] = "SMC (AArch32)",
720 [ESR_ELx_EC_SVC64
] = "SVC (AArch64)",
721 [ESR_ELx_EC_HVC64
] = "HVC (AArch64)",
722 [ESR_ELx_EC_SMC64
] = "SMC (AArch64)",
723 [ESR_ELx_EC_SYS64
] = "MSR/MRS (AArch64)",
724 [ESR_ELx_EC_SVE
] = "SVE",
725 [ESR_ELx_EC_ERET
] = "ERET/ERETAA/ERETAB",
726 [ESR_ELx_EC_FPAC
] = "FPAC",
727 [ESR_ELx_EC_IMP_DEF
] = "EL3 IMP DEF",
728 [ESR_ELx_EC_IABT_LOW
] = "IABT (lower EL)",
729 [ESR_ELx_EC_IABT_CUR
] = "IABT (current EL)",
730 [ESR_ELx_EC_PC_ALIGN
] = "PC Alignment",
731 [ESR_ELx_EC_DABT_LOW
] = "DABT (lower EL)",
732 [ESR_ELx_EC_DABT_CUR
] = "DABT (current EL)",
733 [ESR_ELx_EC_SP_ALIGN
] = "SP Alignment",
734 [ESR_ELx_EC_FP_EXC32
] = "FP (AArch32)",
735 [ESR_ELx_EC_FP_EXC64
] = "FP (AArch64)",
736 [ESR_ELx_EC_SERROR
] = "SError",
737 [ESR_ELx_EC_BREAKPT_LOW
] = "Breakpoint (lower EL)",
738 [ESR_ELx_EC_BREAKPT_CUR
] = "Breakpoint (current EL)",
739 [ESR_ELx_EC_SOFTSTP_LOW
] = "Software Step (lower EL)",
740 [ESR_ELx_EC_SOFTSTP_CUR
] = "Software Step (current EL)",
741 [ESR_ELx_EC_WATCHPT_LOW
] = "Watchpoint (lower EL)",
742 [ESR_ELx_EC_WATCHPT_CUR
] = "Watchpoint (current EL)",
743 [ESR_ELx_EC_BKPT32
] = "BKPT (AArch32)",
744 [ESR_ELx_EC_VECTOR32
] = "Vector catch (AArch32)",
745 [ESR_ELx_EC_BRK64
] = "BRK (AArch64)",
748 const char *esr_get_class_string(u32 esr
)
750 return esr_class_str
[ESR_ELx_EC(esr
)];
754 * bad_mode handles the impossible case in the exception vector. This is always
757 asmlinkage
void notrace
bad_mode(struct pt_regs
*regs
, int reason
, unsigned int esr
)
759 arm64_enter_nmi(regs
);
763 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
764 handler
[reason
], smp_processor_id(), esr
,
765 esr_get_class_string(esr
));
773 * bad_el0_sync handles unexpected, but potentially recoverable synchronous
774 * exceptions taken from EL0. Unlike bad_mode, this returns.
776 void bad_el0_sync(struct pt_regs
*regs
, int reason
, unsigned int esr
)
778 unsigned long pc
= instruction_pointer(regs
);
780 current
->thread
.fault_address
= 0;
781 current
->thread
.fault_code
= esr
;
783 arm64_force_sig_fault(SIGILL
, ILL_ILLOPC
, pc
,
784 "Bad EL0 synchronous exception");
787 #ifdef CONFIG_VMAP_STACK
789 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE
/sizeof(long)], overflow_stack
)
792 asmlinkage
void noinstr
handle_bad_stack(struct pt_regs
*regs
)
794 unsigned long tsk_stk
= (unsigned long)current
->stack
;
795 unsigned long irq_stk
= (unsigned long)this_cpu_read(irq_stack_ptr
);
796 unsigned long ovf_stk
= (unsigned long)this_cpu_ptr(overflow_stack
);
797 unsigned int esr
= read_sysreg(esr_el1
);
798 unsigned long far
= read_sysreg(far_el1
);
800 arm64_enter_nmi(regs
);
803 pr_emerg("Insufficient stack space to handle exception!");
805 pr_emerg("ESR: 0x%08x -- %s\n", esr
, esr_get_class_string(esr
));
806 pr_emerg("FAR: 0x%016lx\n", far
);
808 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
809 tsk_stk
, tsk_stk
+ THREAD_SIZE
);
810 pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n",
811 irq_stk
, irq_stk
+ IRQ_STACK_SIZE
);
812 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
813 ovf_stk
, ovf_stk
+ OVERFLOW_STACK_SIZE
);
818 * We use nmi_panic to limit the potential for recusive overflows, and
819 * to get a better stack trace.
821 nmi_panic(NULL
, "kernel stack overflow");
826 void __noreturn
arm64_serror_panic(struct pt_regs
*regs
, u32 esr
)
830 pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
831 smp_processor_id(), esr
, esr_get_class_string(esr
));
835 nmi_panic(regs
, "Asynchronous SError Interrupt");
841 bool arm64_is_fatal_ras_serror(struct pt_regs
*regs
, unsigned int esr
)
843 u32 aet
= arm64_ras_serror_get_severity(esr
);
846 case ESR_ELx_AET_CE
: /* corrected error */
847 case ESR_ELx_AET_UEO
: /* restartable, not yet consumed */
849 * The CPU can make progress. We may take UEO again as
850 * a more severe error.
854 case ESR_ELx_AET_UEU
: /* Uncorrected Unrecoverable */
855 case ESR_ELx_AET_UER
: /* Uncorrected Recoverable */
857 * The CPU can't make progress. The exception may have
860 * Neoverse-N1 #1349291 means a non-KVM SError reported as
861 * Unrecoverable should be treated as Uncontainable. We
862 * call arm64_serror_panic() in both cases.
866 case ESR_ELx_AET_UC
: /* Uncontainable or Uncategorized error */
868 /* Error has been silently propagated */
869 arm64_serror_panic(regs
, esr
);
873 asmlinkage
void noinstr
do_serror(struct pt_regs
*regs
, unsigned int esr
)
875 arm64_enter_nmi(regs
);
877 /* non-RAS errors are not containable */
878 if (!arm64_is_ras_serror(esr
) || arm64_is_fatal_ras_serror(regs
, esr
))
879 arm64_serror_panic(regs
, esr
);
881 arm64_exit_nmi(regs
);
884 /* GENERIC_BUG traps */
886 int is_valid_bugaddr(unsigned long addr
)
889 * bug_handler() only called for BRK #BUG_BRK_IMM.
890 * So the answer is trivial -- any spurious instances with no
891 * bug table entry will be rejected by report_bug() and passed
892 * back to the debug-monitors code and handled as a fatal
893 * unexpected debug exception.
898 static int bug_handler(struct pt_regs
*regs
, unsigned int esr
)
900 switch (report_bug(regs
->pc
, regs
)) {
901 case BUG_TRAP_TYPE_BUG
:
902 die("Oops - BUG", regs
, 0);
905 case BUG_TRAP_TYPE_WARN
:
909 /* unknown/unrecognised bug trap type */
910 return DBG_HOOK_ERROR
;
913 /* If thread survives, skip over the BUG instruction and continue: */
914 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
915 return DBG_HOOK_HANDLED
;
918 static struct break_hook bug_break_hook
= {
923 static int reserved_fault_handler(struct pt_regs
*regs
, unsigned int esr
)
925 pr_err("%s generated an invalid instruction at %pS!\n",
926 in_bpf_jit(regs
) ? "BPF JIT" : "Kernel text patching",
927 (void *)instruction_pointer(regs
));
929 /* We cannot handle this */
930 return DBG_HOOK_ERROR
;
933 static struct break_hook fault_break_hook
= {
934 .fn
= reserved_fault_handler
,
935 .imm
= FAULT_BRK_IMM
,
938 #ifdef CONFIG_KASAN_SW_TAGS
940 #define KASAN_ESR_RECOVER 0x20
941 #define KASAN_ESR_WRITE 0x10
942 #define KASAN_ESR_SIZE_MASK 0x0f
943 #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
945 static int kasan_handler(struct pt_regs
*regs
, unsigned int esr
)
947 bool recover
= esr
& KASAN_ESR_RECOVER
;
948 bool write
= esr
& KASAN_ESR_WRITE
;
949 size_t size
= KASAN_ESR_SIZE(esr
);
950 u64 addr
= regs
->regs
[0];
953 kasan_report(addr
, size
, write
, pc
);
956 * The instrumentation allows to control whether we can proceed after
957 * a crash was detected. This is done by passing the -recover flag to
958 * the compiler. Disabling recovery allows to generate more compact
961 * Unfortunately disabling recovery doesn't work for the kernel right
962 * now. KASAN reporting is disabled in some contexts (for example when
963 * the allocator accesses slab object metadata; this is controlled by
964 * current->kasan_depth). All these accesses are detected by the tool,
965 * even though the reports for them are not printed.
967 * This is something that might be fixed at some point in the future.
970 die("Oops - KASAN", regs
, 0);
972 /* If thread survives, skip over the brk instruction and continue: */
973 arm64_skip_faulting_instruction(regs
, AARCH64_INSN_SIZE
);
974 return DBG_HOOK_HANDLED
;
977 static struct break_hook kasan_break_hook
= {
979 .imm
= KASAN_BRK_IMM
,
980 .mask
= KASAN_BRK_MASK
,
985 * Initial handler for AArch64 BRK exceptions
986 * This handler only used until debug_traps_init().
988 int __init
early_brk64(unsigned long addr
, unsigned int esr
,
989 struct pt_regs
*regs
)
991 #ifdef CONFIG_KASAN_SW_TAGS
992 unsigned int comment
= esr
& ESR_ELx_BRK64_ISS_COMMENT_MASK
;
994 if ((comment
& ~KASAN_BRK_MASK
) == KASAN_BRK_IMM
)
995 return kasan_handler(regs
, esr
) != DBG_HOOK_HANDLED
;
997 return bug_handler(regs
, esr
) != DBG_HOOK_HANDLED
;
1000 void __init
trap_init(void)
1002 register_kernel_break_hook(&bug_break_hook
);
1003 register_kernel_break_hook(&fault_break_hook
);
1004 #ifdef CONFIG_KASAN_SW_TAGS
1005 register_kernel_break_hook(&kasan_break_hook
);