1 // SPDX-License-Identifier: GPL-2.0
3 * Exception handling code
5 * Copyright (C) 2019 ARM Ltd.
8 #include <linux/context_tracking.h>
9 #include <linux/kasan.h>
10 #include <linux/linkage.h>
11 #include <linux/lockdep.h>
12 #include <linux/ptrace.h>
13 #include <linux/resume_user_mode.h>
14 #include <linux/sched.h>
15 #include <linux/sched/debug.h>
16 #include <linux/thread_info.h>
18 #include <asm/cpufeature.h>
19 #include <asm/daifflags.h>
21 #include <asm/exception.h>
22 #include <asm/irq_regs.h>
23 #include <asm/kprobes.h>
25 #include <asm/processor.h>
27 #include <asm/stacktrace.h>
28 #include <asm/sysreg.h>
29 #include <asm/system_misc.h>
32 * Handle IRQ/context state management when entering from kernel mode.
33 * Before this function is called it is not safe to call regular kernel code,
34 * instrumentable code, or any code which may trigger an exception.
36 * This is intended to match the logic in irqentry_enter(), handling the kernel
37 * mode transitions only.
39 static __always_inline
void __enter_from_kernel_mode(struct pt_regs
*regs
)
41 regs
->exit_rcu
= false;
43 if (!IS_ENABLED(CONFIG_TINY_RCU
) && is_idle_task(current
)) {
44 lockdep_hardirqs_off(CALLER_ADDR0
);
46 trace_hardirqs_off_finish();
48 regs
->exit_rcu
= true;
52 lockdep_hardirqs_off(CALLER_ADDR0
);
53 rcu_irq_enter_check_tick();
54 trace_hardirqs_off_finish();
57 static void noinstr
enter_from_kernel_mode(struct pt_regs
*regs
)
59 __enter_from_kernel_mode(regs
);
60 mte_check_tfsr_entry();
61 mte_disable_tco_entry(current
);
65 * Handle IRQ/context state management when exiting to kernel mode.
66 * After this function returns it is not safe to call regular kernel code,
67 * instrumentable code, or any code which may trigger an exception.
69 * This is intended to match the logic in irqentry_exit(), handling the kernel
70 * mode transitions only, and with preemption handled elsewhere.
72 static __always_inline
void __exit_to_kernel_mode(struct pt_regs
*regs
)
74 lockdep_assert_irqs_disabled();
76 if (interrupts_enabled(regs
)) {
78 trace_hardirqs_on_prepare();
79 lockdep_hardirqs_on_prepare();
81 lockdep_hardirqs_on(CALLER_ADDR0
);
92 static void noinstr
exit_to_kernel_mode(struct pt_regs
*regs
)
94 mte_check_tfsr_exit();
95 __exit_to_kernel_mode(regs
);
99 * Handle IRQ/context state management when entering from user mode.
100 * Before this function is called it is not safe to call regular kernel code,
101 * instrumentable code, or any code which may trigger an exception.
103 static __always_inline
void __enter_from_user_mode(void)
105 lockdep_hardirqs_off(CALLER_ADDR0
);
106 CT_WARN_ON(ct_state() != CT_STATE_USER
);
108 trace_hardirqs_off_finish();
109 mte_disable_tco_entry(current
);
112 static __always_inline
void enter_from_user_mode(struct pt_regs
*regs
)
114 __enter_from_user_mode();
118 * Handle IRQ/context state management when exiting to user mode.
119 * After this function returns it is not safe to call regular kernel code,
120 * instrumentable code, or any code which may trigger an exception.
122 static __always_inline
void __exit_to_user_mode(void)
124 trace_hardirqs_on_prepare();
125 lockdep_hardirqs_on_prepare();
127 lockdep_hardirqs_on(CALLER_ADDR0
);
130 static void do_notify_resume(struct pt_regs
*regs
, unsigned long thread_flags
)
135 if (thread_flags
& _TIF_NEED_RESCHED
)
138 if (thread_flags
& _TIF_UPROBE
)
139 uprobe_notify_resume(regs
);
141 if (thread_flags
& _TIF_MTE_ASYNC_FAULT
) {
142 clear_thread_flag(TIF_MTE_ASYNC_FAULT
);
143 send_sig_fault(SIGSEGV
, SEGV_MTEAERR
,
144 (void __user
*)NULL
, current
);
147 if (thread_flags
& (_TIF_SIGPENDING
| _TIF_NOTIFY_SIGNAL
))
150 if (thread_flags
& _TIF_NOTIFY_RESUME
)
151 resume_user_mode_work(regs
);
153 if (thread_flags
& _TIF_FOREIGN_FPSTATE
)
154 fpsimd_restore_current_state();
157 thread_flags
= read_thread_flags();
158 } while (thread_flags
& _TIF_WORK_MASK
);
161 static __always_inline
void exit_to_user_mode_prepare(struct pt_regs
*regs
)
167 flags
= read_thread_flags();
168 if (unlikely(flags
& _TIF_WORK_MASK
))
169 do_notify_resume(regs
, flags
);
176 static __always_inline
void exit_to_user_mode(struct pt_regs
*regs
)
178 exit_to_user_mode_prepare(regs
);
179 mte_check_tfsr_exit();
180 __exit_to_user_mode();
183 asmlinkage
void noinstr
asm_exit_to_user_mode(struct pt_regs
*regs
)
185 exit_to_user_mode(regs
);
189 * Handle IRQ/context state management when entering an NMI from user/kernel
190 * mode. Before this function is called it is not safe to call regular kernel
191 * code, instrumentable code, or any code which may trigger an exception.
193 static void noinstr
arm64_enter_nmi(struct pt_regs
*regs
)
195 regs
->lockdep_hardirqs
= lockdep_hardirqs_enabled();
198 lockdep_hardirqs_off(CALLER_ADDR0
);
199 lockdep_hardirq_enter();
202 trace_hardirqs_off_finish();
207 * Handle IRQ/context state management when exiting an NMI from user/kernel
208 * mode. After this function returns it is not safe to call regular kernel
209 * code, instrumentable code, or any code which may trigger an exception.
211 static void noinstr
arm64_exit_nmi(struct pt_regs
*regs
)
213 bool restore
= regs
->lockdep_hardirqs
;
217 trace_hardirqs_on_prepare();
218 lockdep_hardirqs_on_prepare();
222 lockdep_hardirq_exit();
224 lockdep_hardirqs_on(CALLER_ADDR0
);
229 * Handle IRQ/context state management when entering a debug exception from
230 * kernel mode. Before this function is called it is not safe to call regular
231 * kernel code, instrumentable code, or any code which may trigger an exception.
233 static void noinstr
arm64_enter_el1_dbg(struct pt_regs
*regs
)
235 regs
->lockdep_hardirqs
= lockdep_hardirqs_enabled();
237 lockdep_hardirqs_off(CALLER_ADDR0
);
240 trace_hardirqs_off_finish();
244 * Handle IRQ/context state management when exiting a debug exception from
245 * kernel mode. After this function returns it is not safe to call regular
246 * kernel code, instrumentable code, or any code which may trigger an exception.
248 static void noinstr
arm64_exit_el1_dbg(struct pt_regs
*regs
)
250 bool restore
= regs
->lockdep_hardirqs
;
253 trace_hardirqs_on_prepare();
254 lockdep_hardirqs_on_prepare();
259 lockdep_hardirqs_on(CALLER_ADDR0
);
262 #ifdef CONFIG_PREEMPT_DYNAMIC
263 DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched
);
264 #define need_irq_preemption() \
265 (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
267 #define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
270 static void __sched
arm64_preempt_schedule_irq(void)
272 if (!need_irq_preemption())
276 * Note: thread_info::preempt_count includes both thread_info::count
277 * and thread_info::need_resched, and is not equivalent to
280 if (READ_ONCE(current_thread_info()->preempt_count
) != 0)
284 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
285 * priority masking is used the GIC irqchip driver will clear DAIF.IF
286 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
287 * DAIF we must have handled an NMI, so skip preemption.
289 if (system_uses_irq_prio_masking() && read_sysreg(daif
))
293 * Preempting a task from an IRQ means we leave copies of PSTATE
294 * on the stack. cpufeature's enable calls may modify PSTATE, but
295 * resuming one of these preempted tasks would undo those changes.
297 * Only allow a task to be preempted once cpufeatures have been
300 if (system_capabilities_finalized())
301 preempt_schedule_irq();
304 static void do_interrupt_handler(struct pt_regs
*regs
,
305 void (*handler
)(struct pt_regs
*))
307 struct pt_regs
*old_regs
= set_irq_regs(regs
);
309 if (on_thread_stack())
310 call_on_irq_stack(regs
, handler
);
314 set_irq_regs(old_regs
);
317 extern void (*handle_arch_irq
)(struct pt_regs
*);
318 extern void (*handle_arch_fiq
)(struct pt_regs
*);
320 static void noinstr
__panic_unhandled(struct pt_regs
*regs
, const char *vector
,
323 arm64_enter_nmi(regs
);
327 pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
328 vector
, smp_processor_id(), esr
,
329 esr_get_class_string(esr
));
332 panic("Unhandled exception");
335 #define UNHANDLED(el, regsize, vector) \
336 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \
338 const char *desc = #regsize "-bit " #el " " #vector; \
339 __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \
342 #ifdef CONFIG_ARM64_ERRATUM_1463225
343 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa
);
345 static void cortex_a76_erratum_1463225_svc_handler(void)
349 if (!unlikely(test_thread_flag(TIF_SINGLESTEP
)))
352 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225
)))
355 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa
, 1);
356 reg
= read_sysreg(mdscr_el1
);
357 val
= reg
| DBG_MDSCR_SS
| DBG_MDSCR_KDE
;
358 write_sysreg(val
, mdscr_el1
);
359 asm volatile("msr daifclr, #8");
362 /* We will have taken a single-step exception by this point */
364 write_sysreg(reg
, mdscr_el1
);
365 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa
, 0);
368 static __always_inline
bool
369 cortex_a76_erratum_1463225_debug_handler(struct pt_regs
*regs
)
371 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa
))
375 * We've taken a dummy step exception from the kernel to ensure
376 * that interrupts are re-enabled on the syscall path. Return back
377 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
378 * masked so that we can safely restore the mdscr and get on with
379 * handling the syscall.
381 regs
->pstate
|= PSR_D_BIT
;
384 #else /* CONFIG_ARM64_ERRATUM_1463225 */
385 static void cortex_a76_erratum_1463225_svc_handler(void) { }
386 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs
*regs
)
390 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
393 * As per the ABI exit SME streaming mode and clear the SVE state not
394 * shared with FPSIMD on syscall entry.
396 static inline void fp_user_discard(void)
399 * If SME is active then exit streaming mode. If ZA is active
400 * then flush the SVE registers but leave userspace access to
401 * both SVE and SME enabled, otherwise disable SME for the
402 * task and fall through to disabling SVE too. This means
403 * that after a syscall we never have any streaming mode
404 * register state to track, if this changes the KVM code will
407 if (system_supports_sme())
410 if (!system_supports_sve())
413 if (test_thread_flag(TIF_SVE
)) {
414 unsigned int sve_vq_minus_one
;
416 sve_vq_minus_one
= sve_vq_from_vl(task_get_sve_vl(current
)) - 1;
417 sve_flush_live(true, sve_vq_minus_one
);
421 UNHANDLED(el1t
, 64, sync
)
422 UNHANDLED(el1t
, 64, irq
)
423 UNHANDLED(el1t
, 64, fiq
)
424 UNHANDLED(el1t
, 64, error
)
426 static void noinstr
el1_abort(struct pt_regs
*regs
, unsigned long esr
)
428 unsigned long far
= read_sysreg(far_el1
);
430 enter_from_kernel_mode(regs
);
431 local_daif_inherit(regs
);
432 do_mem_abort(far
, esr
, regs
);
434 exit_to_kernel_mode(regs
);
437 static void noinstr
el1_pc(struct pt_regs
*regs
, unsigned long esr
)
439 unsigned long far
= read_sysreg(far_el1
);
441 enter_from_kernel_mode(regs
);
442 local_daif_inherit(regs
);
443 do_sp_pc_abort(far
, esr
, regs
);
445 exit_to_kernel_mode(regs
);
448 static void noinstr
el1_undef(struct pt_regs
*regs
, unsigned long esr
)
450 enter_from_kernel_mode(regs
);
451 local_daif_inherit(regs
);
452 do_el1_undef(regs
, esr
);
454 exit_to_kernel_mode(regs
);
457 static void noinstr
el1_bti(struct pt_regs
*regs
, unsigned long esr
)
459 enter_from_kernel_mode(regs
);
460 local_daif_inherit(regs
);
461 do_el1_bti(regs
, esr
);
463 exit_to_kernel_mode(regs
);
466 static void noinstr
el1_gcs(struct pt_regs
*regs
, unsigned long esr
)
468 enter_from_kernel_mode(regs
);
469 local_daif_inherit(regs
);
470 do_el1_gcs(regs
, esr
);
472 exit_to_kernel_mode(regs
);
475 static void noinstr
el1_mops(struct pt_regs
*regs
, unsigned long esr
)
477 enter_from_kernel_mode(regs
);
478 local_daif_inherit(regs
);
479 do_el1_mops(regs
, esr
);
481 exit_to_kernel_mode(regs
);
484 static void noinstr
el1_dbg(struct pt_regs
*regs
, unsigned long esr
)
486 unsigned long far
= read_sysreg(far_el1
);
488 arm64_enter_el1_dbg(regs
);
489 if (!cortex_a76_erratum_1463225_debug_handler(regs
))
490 do_debug_exception(far
, esr
, regs
);
491 arm64_exit_el1_dbg(regs
);
494 static void noinstr
el1_fpac(struct pt_regs
*regs
, unsigned long esr
)
496 enter_from_kernel_mode(regs
);
497 local_daif_inherit(regs
);
498 do_el1_fpac(regs
, esr
);
500 exit_to_kernel_mode(regs
);
503 asmlinkage
void noinstr
el1h_64_sync_handler(struct pt_regs
*regs
)
505 unsigned long esr
= read_sysreg(esr_el1
);
507 switch (ESR_ELx_EC(esr
)) {
508 case ESR_ELx_EC_DABT_CUR
:
509 case ESR_ELx_EC_IABT_CUR
:
510 el1_abort(regs
, esr
);
513 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
514 * recursive exception when trying to push the initial pt_regs.
516 case ESR_ELx_EC_PC_ALIGN
:
519 case ESR_ELx_EC_SYS64
:
520 case ESR_ELx_EC_UNKNOWN
:
521 el1_undef(regs
, esr
);
529 case ESR_ELx_EC_MOPS
:
532 case ESR_ELx_EC_BREAKPT_CUR
:
533 case ESR_ELx_EC_SOFTSTP_CUR
:
534 case ESR_ELx_EC_WATCHPT_CUR
:
535 case ESR_ELx_EC_BRK64
:
538 case ESR_ELx_EC_FPAC
:
542 __panic_unhandled(regs
, "64-bit el1h sync", esr
);
546 static __always_inline
void __el1_pnmi(struct pt_regs
*regs
,
547 void (*handler
)(struct pt_regs
*))
549 arm64_enter_nmi(regs
);
550 do_interrupt_handler(regs
, handler
);
551 arm64_exit_nmi(regs
);
554 static __always_inline
void __el1_irq(struct pt_regs
*regs
,
555 void (*handler
)(struct pt_regs
*))
557 enter_from_kernel_mode(regs
);
560 do_interrupt_handler(regs
, handler
);
563 arm64_preempt_schedule_irq();
565 exit_to_kernel_mode(regs
);
567 static void noinstr
el1_interrupt(struct pt_regs
*regs
,
568 void (*handler
)(struct pt_regs
*))
570 write_sysreg(DAIF_PROCCTX_NOIRQ
, daif
);
572 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI
) && !interrupts_enabled(regs
))
573 __el1_pnmi(regs
, handler
);
575 __el1_irq(regs
, handler
);
578 asmlinkage
void noinstr
el1h_64_irq_handler(struct pt_regs
*regs
)
580 el1_interrupt(regs
, handle_arch_irq
);
583 asmlinkage
void noinstr
el1h_64_fiq_handler(struct pt_regs
*regs
)
585 el1_interrupt(regs
, handle_arch_fiq
);
588 asmlinkage
void noinstr
el1h_64_error_handler(struct pt_regs
*regs
)
590 unsigned long esr
= read_sysreg(esr_el1
);
592 local_daif_restore(DAIF_ERRCTX
);
593 arm64_enter_nmi(regs
);
594 do_serror(regs
, esr
);
595 arm64_exit_nmi(regs
);
598 static void noinstr
el0_da(struct pt_regs
*regs
, unsigned long esr
)
600 unsigned long far
= read_sysreg(far_el1
);
602 enter_from_user_mode(regs
);
603 local_daif_restore(DAIF_PROCCTX
);
604 do_mem_abort(far
, esr
, regs
);
605 exit_to_user_mode(regs
);
608 static void noinstr
el0_ia(struct pt_regs
*regs
, unsigned long esr
)
610 unsigned long far
= read_sysreg(far_el1
);
613 * We've taken an instruction abort from userspace and not yet
614 * re-enabled IRQs. If the address is a kernel address, apply
615 * BP hardening prior to enabling IRQs and pre-emption.
617 if (!is_ttbr0_addr(far
))
618 arm64_apply_bp_hardening();
620 enter_from_user_mode(regs
);
621 local_daif_restore(DAIF_PROCCTX
);
622 do_mem_abort(far
, esr
, regs
);
623 exit_to_user_mode(regs
);
626 static void noinstr
el0_fpsimd_acc(struct pt_regs
*regs
, unsigned long esr
)
628 enter_from_user_mode(regs
);
629 local_daif_restore(DAIF_PROCCTX
);
630 do_fpsimd_acc(esr
, regs
);
631 exit_to_user_mode(regs
);
634 static void noinstr
el0_sve_acc(struct pt_regs
*regs
, unsigned long esr
)
636 enter_from_user_mode(regs
);
637 local_daif_restore(DAIF_PROCCTX
);
638 do_sve_acc(esr
, regs
);
639 exit_to_user_mode(regs
);
642 static void noinstr
el0_sme_acc(struct pt_regs
*regs
, unsigned long esr
)
644 enter_from_user_mode(regs
);
645 local_daif_restore(DAIF_PROCCTX
);
646 do_sme_acc(esr
, regs
);
647 exit_to_user_mode(regs
);
650 static void noinstr
el0_fpsimd_exc(struct pt_regs
*regs
, unsigned long esr
)
652 enter_from_user_mode(regs
);
653 local_daif_restore(DAIF_PROCCTX
);
654 do_fpsimd_exc(esr
, regs
);
655 exit_to_user_mode(regs
);
658 static void noinstr
el0_sys(struct pt_regs
*regs
, unsigned long esr
)
660 enter_from_user_mode(regs
);
661 local_daif_restore(DAIF_PROCCTX
);
662 do_el0_sys(esr
, regs
);
663 exit_to_user_mode(regs
);
666 static void noinstr
el0_pc(struct pt_regs
*regs
, unsigned long esr
)
668 unsigned long far
= read_sysreg(far_el1
);
670 if (!is_ttbr0_addr(instruction_pointer(regs
)))
671 arm64_apply_bp_hardening();
673 enter_from_user_mode(regs
);
674 local_daif_restore(DAIF_PROCCTX
);
675 do_sp_pc_abort(far
, esr
, regs
);
676 exit_to_user_mode(regs
);
679 static void noinstr
el0_sp(struct pt_regs
*regs
, unsigned long esr
)
681 enter_from_user_mode(regs
);
682 local_daif_restore(DAIF_PROCCTX
);
683 do_sp_pc_abort(regs
->sp
, esr
, regs
);
684 exit_to_user_mode(regs
);
687 static void noinstr
el0_undef(struct pt_regs
*regs
, unsigned long esr
)
689 enter_from_user_mode(regs
);
690 local_daif_restore(DAIF_PROCCTX
);
691 do_el0_undef(regs
, esr
);
692 exit_to_user_mode(regs
);
695 static void noinstr
el0_bti(struct pt_regs
*regs
)
697 enter_from_user_mode(regs
);
698 local_daif_restore(DAIF_PROCCTX
);
700 exit_to_user_mode(regs
);
703 static void noinstr
el0_mops(struct pt_regs
*regs
, unsigned long esr
)
705 enter_from_user_mode(regs
);
706 local_daif_restore(DAIF_PROCCTX
);
707 do_el0_mops(regs
, esr
);
708 exit_to_user_mode(regs
);
711 static void noinstr
el0_gcs(struct pt_regs
*regs
, unsigned long esr
)
713 enter_from_user_mode(regs
);
714 local_daif_restore(DAIF_PROCCTX
);
715 do_el0_gcs(regs
, esr
);
716 exit_to_user_mode(regs
);
719 static void noinstr
el0_inv(struct pt_regs
*regs
, unsigned long esr
)
721 enter_from_user_mode(regs
);
722 local_daif_restore(DAIF_PROCCTX
);
723 bad_el0_sync(regs
, 0, esr
);
724 exit_to_user_mode(regs
);
727 static void noinstr
el0_dbg(struct pt_regs
*regs
, unsigned long esr
)
729 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
730 unsigned long far
= read_sysreg(far_el1
);
732 enter_from_user_mode(regs
);
733 do_debug_exception(far
, esr
, regs
);
734 local_daif_restore(DAIF_PROCCTX
);
735 exit_to_user_mode(regs
);
738 static void noinstr
el0_svc(struct pt_regs
*regs
)
740 enter_from_user_mode(regs
);
741 cortex_a76_erratum_1463225_svc_handler();
743 local_daif_restore(DAIF_PROCCTX
);
745 exit_to_user_mode(regs
);
748 static void noinstr
el0_fpac(struct pt_regs
*regs
, unsigned long esr
)
750 enter_from_user_mode(regs
);
751 local_daif_restore(DAIF_PROCCTX
);
752 do_el0_fpac(regs
, esr
);
753 exit_to_user_mode(regs
);
756 asmlinkage
void noinstr
el0t_64_sync_handler(struct pt_regs
*regs
)
758 unsigned long esr
= read_sysreg(esr_el1
);
760 switch (ESR_ELx_EC(esr
)) {
761 case ESR_ELx_EC_SVC64
:
764 case ESR_ELx_EC_DABT_LOW
:
767 case ESR_ELx_EC_IABT_LOW
:
770 case ESR_ELx_EC_FP_ASIMD
:
771 el0_fpsimd_acc(regs
, esr
);
774 el0_sve_acc(regs
, esr
);
777 el0_sme_acc(regs
, esr
);
779 case ESR_ELx_EC_FP_EXC64
:
780 el0_fpsimd_exc(regs
, esr
);
782 case ESR_ELx_EC_SYS64
:
786 case ESR_ELx_EC_SP_ALIGN
:
789 case ESR_ELx_EC_PC_ALIGN
:
792 case ESR_ELx_EC_UNKNOWN
:
793 el0_undef(regs
, esr
);
798 case ESR_ELx_EC_MOPS
:
804 case ESR_ELx_EC_BREAKPT_LOW
:
805 case ESR_ELx_EC_SOFTSTP_LOW
:
806 case ESR_ELx_EC_WATCHPT_LOW
:
807 case ESR_ELx_EC_BRK64
:
810 case ESR_ELx_EC_FPAC
:
818 static void noinstr
el0_interrupt(struct pt_regs
*regs
,
819 void (*handler
)(struct pt_regs
*))
821 enter_from_user_mode(regs
);
823 write_sysreg(DAIF_PROCCTX_NOIRQ
, daif
);
825 if (regs
->pc
& BIT(55))
826 arm64_apply_bp_hardening();
829 do_interrupt_handler(regs
, handler
);
832 exit_to_user_mode(regs
);
835 static void noinstr
__el0_irq_handler_common(struct pt_regs
*regs
)
837 el0_interrupt(regs
, handle_arch_irq
);
840 asmlinkage
void noinstr
el0t_64_irq_handler(struct pt_regs
*regs
)
842 __el0_irq_handler_common(regs
);
845 static void noinstr
__el0_fiq_handler_common(struct pt_regs
*regs
)
847 el0_interrupt(regs
, handle_arch_fiq
);
850 asmlinkage
void noinstr
el0t_64_fiq_handler(struct pt_regs
*regs
)
852 __el0_fiq_handler_common(regs
);
855 static void noinstr
__el0_error_handler_common(struct pt_regs
*regs
)
857 unsigned long esr
= read_sysreg(esr_el1
);
859 enter_from_user_mode(regs
);
860 local_daif_restore(DAIF_ERRCTX
);
861 arm64_enter_nmi(regs
);
862 do_serror(regs
, esr
);
863 arm64_exit_nmi(regs
);
864 local_daif_restore(DAIF_PROCCTX
);
865 exit_to_user_mode(regs
);
868 asmlinkage
void noinstr
el0t_64_error_handler(struct pt_regs
*regs
)
870 __el0_error_handler_common(regs
);
874 static void noinstr
el0_cp15(struct pt_regs
*regs
, unsigned long esr
)
876 enter_from_user_mode(regs
);
877 local_daif_restore(DAIF_PROCCTX
);
878 do_el0_cp15(esr
, regs
);
879 exit_to_user_mode(regs
);
882 static void noinstr
el0_svc_compat(struct pt_regs
*regs
)
884 enter_from_user_mode(regs
);
885 cortex_a76_erratum_1463225_svc_handler();
886 local_daif_restore(DAIF_PROCCTX
);
887 do_el0_svc_compat(regs
);
888 exit_to_user_mode(regs
);
891 asmlinkage
void noinstr
el0t_32_sync_handler(struct pt_regs
*regs
)
893 unsigned long esr
= read_sysreg(esr_el1
);
895 switch (ESR_ELx_EC(esr
)) {
896 case ESR_ELx_EC_SVC32
:
897 el0_svc_compat(regs
);
899 case ESR_ELx_EC_DABT_LOW
:
902 case ESR_ELx_EC_IABT_LOW
:
905 case ESR_ELx_EC_FP_ASIMD
:
906 el0_fpsimd_acc(regs
, esr
);
908 case ESR_ELx_EC_FP_EXC32
:
909 el0_fpsimd_exc(regs
, esr
);
911 case ESR_ELx_EC_PC_ALIGN
:
914 case ESR_ELx_EC_UNKNOWN
:
915 case ESR_ELx_EC_CP14_MR
:
916 case ESR_ELx_EC_CP14_LS
:
917 case ESR_ELx_EC_CP14_64
:
918 el0_undef(regs
, esr
);
920 case ESR_ELx_EC_CP15_32
:
921 case ESR_ELx_EC_CP15_64
:
924 case ESR_ELx_EC_BREAKPT_LOW
:
925 case ESR_ELx_EC_SOFTSTP_LOW
:
926 case ESR_ELx_EC_WATCHPT_LOW
:
927 case ESR_ELx_EC_BKPT32
:
935 asmlinkage
void noinstr
el0t_32_irq_handler(struct pt_regs
*regs
)
937 __el0_irq_handler_common(regs
);
940 asmlinkage
void noinstr
el0t_32_fiq_handler(struct pt_regs
*regs
)
942 __el0_fiq_handler_common(regs
);
945 asmlinkage
void noinstr
el0t_32_error_handler(struct pt_regs
*regs
)
947 __el0_error_handler_common(regs
);
949 #else /* CONFIG_COMPAT */
950 UNHANDLED(el0t
, 32, sync
)
951 UNHANDLED(el0t
, 32, irq
)
952 UNHANDLED(el0t
, 32, fiq
)
953 UNHANDLED(el0t
, 32, error
)
954 #endif /* CONFIG_COMPAT */
956 #ifdef CONFIG_VMAP_STACK
957 asmlinkage
void noinstr __noreturn
handle_bad_stack(struct pt_regs
*regs
)
959 unsigned long esr
= read_sysreg(esr_el1
);
960 unsigned long far
= read_sysreg(far_el1
);
962 arm64_enter_nmi(regs
);
963 panic_bad_stack(regs
, esr
, far
);
965 #endif /* CONFIG_VMAP_STACK */
967 #ifdef CONFIG_ARM_SDE_INTERFACE
968 asmlinkage noinstr
unsigned long
969 __sdei_handler(struct pt_regs
*regs
, struct sdei_registered_event
*arg
)
974 * We didn't take an exception to get here, so the HW hasn't
975 * set/cleared bits in PSTATE that we may rely on.
977 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
978 * whether PSTATE bits are inherited unchanged or generated from
979 * scratch, and the TF-A implementation always clears PAN and always
980 * clears UAO. There are no other known implementations.
982 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
983 * PSTATE is modified upon architectural exceptions, and so PAN is
984 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
987 * We must explicitly reset PAN to the expected state, including
988 * clearing it when the host isn't using it, in case a VM had it set.
990 if (system_uses_hw_pan())
992 else if (cpu_has_pan())
995 arm64_enter_nmi(regs
);
996 ret
= do_sdei_event(regs
, arg
);
997 arm64_exit_nmi(regs
);
1001 #endif /* CONFIG_ARM_SDE_INTERFACE */