1 // SPDX-License-Identifier: GPL-2.0
3 * Exception handling code
5 * Copyright (C) 2019 ARM Ltd.
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
18 #include <asm/sysreg.h>
21 * This is intended to match the logic in irqentry_enter(), handling the kernel
22 * mode transitions only.
24 static void noinstr
enter_from_kernel_mode(struct pt_regs
*regs
)
26 regs
->exit_rcu
= false;
28 if (!IS_ENABLED(CONFIG_TINY_RCU
) && is_idle_task(current
)) {
29 lockdep_hardirqs_off(CALLER_ADDR0
);
31 trace_hardirqs_off_finish();
33 regs
->exit_rcu
= true;
37 lockdep_hardirqs_off(CALLER_ADDR0
);
38 rcu_irq_enter_check_tick();
39 trace_hardirqs_off_finish();
43 * This is intended to match the logic in irqentry_exit(), handling the kernel
44 * mode transitions only, and with preemption handled elsewhere.
46 static void noinstr
exit_to_kernel_mode(struct pt_regs
*regs
)
48 lockdep_assert_irqs_disabled();
50 if (interrupts_enabled(regs
)) {
52 trace_hardirqs_on_prepare();
53 lockdep_hardirqs_on_prepare(CALLER_ADDR0
);
55 lockdep_hardirqs_on(CALLER_ADDR0
);
66 void noinstr
arm64_enter_nmi(struct pt_regs
*regs
)
68 regs
->lockdep_hardirqs
= lockdep_hardirqs_enabled();
71 lockdep_hardirqs_off(CALLER_ADDR0
);
72 lockdep_hardirq_enter();
75 trace_hardirqs_off_finish();
79 void noinstr
arm64_exit_nmi(struct pt_regs
*regs
)
81 bool restore
= regs
->lockdep_hardirqs
;
85 trace_hardirqs_on_prepare();
86 lockdep_hardirqs_on_prepare(CALLER_ADDR0
);
90 lockdep_hardirq_exit();
92 lockdep_hardirqs_on(CALLER_ADDR0
);
96 asmlinkage
void noinstr
enter_el1_irq_or_nmi(struct pt_regs
*regs
)
98 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI
) && !interrupts_enabled(regs
))
99 arm64_enter_nmi(regs
);
101 enter_from_kernel_mode(regs
);
104 asmlinkage
void noinstr
exit_el1_irq_or_nmi(struct pt_regs
*regs
)
106 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI
) && !interrupts_enabled(regs
))
107 arm64_exit_nmi(regs
);
109 exit_to_kernel_mode(regs
);
112 static void noinstr
el1_abort(struct pt_regs
*regs
, unsigned long esr
)
114 unsigned long far
= read_sysreg(far_el1
);
116 enter_from_kernel_mode(regs
);
117 local_daif_inherit(regs
);
118 do_mem_abort(far
, esr
, regs
);
120 exit_to_kernel_mode(regs
);
123 static void noinstr
el1_pc(struct pt_regs
*regs
, unsigned long esr
)
125 unsigned long far
= read_sysreg(far_el1
);
127 enter_from_kernel_mode(regs
);
128 local_daif_inherit(regs
);
129 do_sp_pc_abort(far
, esr
, regs
);
131 exit_to_kernel_mode(regs
);
134 static void noinstr
el1_undef(struct pt_regs
*regs
)
136 enter_from_kernel_mode(regs
);
137 local_daif_inherit(regs
);
140 exit_to_kernel_mode(regs
);
143 static void noinstr
el1_inv(struct pt_regs
*regs
, unsigned long esr
)
145 enter_from_kernel_mode(regs
);
146 local_daif_inherit(regs
);
147 bad_mode(regs
, 0, esr
);
149 exit_to_kernel_mode(regs
);
152 static void noinstr
arm64_enter_el1_dbg(struct pt_regs
*regs
)
154 regs
->lockdep_hardirqs
= lockdep_hardirqs_enabled();
156 lockdep_hardirqs_off(CALLER_ADDR0
);
159 trace_hardirqs_off_finish();
162 static void noinstr
arm64_exit_el1_dbg(struct pt_regs
*regs
)
164 bool restore
= regs
->lockdep_hardirqs
;
167 trace_hardirqs_on_prepare();
168 lockdep_hardirqs_on_prepare(CALLER_ADDR0
);
173 lockdep_hardirqs_on(CALLER_ADDR0
);
176 static void noinstr
el1_dbg(struct pt_regs
*regs
, unsigned long esr
)
178 unsigned long far
= read_sysreg(far_el1
);
181 * The CPU masked interrupts, and we are leaving them masked during
182 * do_debug_exception(). Update PMR as if we had called
185 if (system_uses_irq_prio_masking())
186 gic_write_pmr(GIC_PRIO_IRQON
| GIC_PRIO_PSR_I_SET
);
188 arm64_enter_el1_dbg(regs
);
189 do_debug_exception(far
, esr
, regs
);
190 arm64_exit_el1_dbg(regs
);
193 static void noinstr
el1_fpac(struct pt_regs
*regs
, unsigned long esr
)
195 enter_from_kernel_mode(regs
);
196 local_daif_inherit(regs
);
197 do_ptrauth_fault(regs
, esr
);
199 exit_to_kernel_mode(regs
);
202 asmlinkage
void noinstr
el1_sync_handler(struct pt_regs
*regs
)
204 unsigned long esr
= read_sysreg(esr_el1
);
206 switch (ESR_ELx_EC(esr
)) {
207 case ESR_ELx_EC_DABT_CUR
:
208 case ESR_ELx_EC_IABT_CUR
:
209 el1_abort(regs
, esr
);
212 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
213 * recursive exception when trying to push the initial pt_regs.
215 case ESR_ELx_EC_PC_ALIGN
:
218 case ESR_ELx_EC_SYS64
:
219 case ESR_ELx_EC_UNKNOWN
:
222 case ESR_ELx_EC_BREAKPT_CUR
:
223 case ESR_ELx_EC_SOFTSTP_CUR
:
224 case ESR_ELx_EC_WATCHPT_CUR
:
225 case ESR_ELx_EC_BRK64
:
228 case ESR_ELx_EC_FPAC
:
236 asmlinkage
void noinstr
enter_from_user_mode(void)
238 lockdep_hardirqs_off(CALLER_ADDR0
);
239 CT_WARN_ON(ct_state() != CONTEXT_USER
);
241 trace_hardirqs_off_finish();
244 asmlinkage
void noinstr
exit_to_user_mode(void)
246 trace_hardirqs_on_prepare();
247 lockdep_hardirqs_on_prepare(CALLER_ADDR0
);
249 lockdep_hardirqs_on(CALLER_ADDR0
);
252 static void noinstr
el0_da(struct pt_regs
*regs
, unsigned long esr
)
254 unsigned long far
= read_sysreg(far_el1
);
256 enter_from_user_mode();
257 local_daif_restore(DAIF_PROCCTX
);
258 do_mem_abort(far
, esr
, regs
);
261 static void noinstr
el0_ia(struct pt_regs
*regs
, unsigned long esr
)
263 unsigned long far
= read_sysreg(far_el1
);
266 * We've taken an instruction abort from userspace and not yet
267 * re-enabled IRQs. If the address is a kernel address, apply
268 * BP hardening prior to enabling IRQs and pre-emption.
270 if (!is_ttbr0_addr(far
))
271 arm64_apply_bp_hardening();
273 enter_from_user_mode();
274 local_daif_restore(DAIF_PROCCTX
);
275 do_mem_abort(far
, esr
, regs
);
278 static void noinstr
el0_fpsimd_acc(struct pt_regs
*regs
, unsigned long esr
)
280 enter_from_user_mode();
281 local_daif_restore(DAIF_PROCCTX
);
282 do_fpsimd_acc(esr
, regs
);
285 static void noinstr
el0_sve_acc(struct pt_regs
*regs
, unsigned long esr
)
287 enter_from_user_mode();
288 local_daif_restore(DAIF_PROCCTX
);
289 do_sve_acc(esr
, regs
);
292 static void noinstr
el0_fpsimd_exc(struct pt_regs
*regs
, unsigned long esr
)
294 enter_from_user_mode();
295 local_daif_restore(DAIF_PROCCTX
);
296 do_fpsimd_exc(esr
, regs
);
299 static void noinstr
el0_sys(struct pt_regs
*regs
, unsigned long esr
)
301 enter_from_user_mode();
302 local_daif_restore(DAIF_PROCCTX
);
303 do_sysinstr(esr
, regs
);
306 static void noinstr
el0_pc(struct pt_regs
*regs
, unsigned long esr
)
308 unsigned long far
= read_sysreg(far_el1
);
310 if (!is_ttbr0_addr(instruction_pointer(regs
)))
311 arm64_apply_bp_hardening();
313 enter_from_user_mode();
314 local_daif_restore(DAIF_PROCCTX
);
315 do_sp_pc_abort(far
, esr
, regs
);
318 static void noinstr
el0_sp(struct pt_regs
*regs
, unsigned long esr
)
320 enter_from_user_mode();
321 local_daif_restore(DAIF_PROCCTX
);
322 do_sp_pc_abort(regs
->sp
, esr
, regs
);
325 static void noinstr
el0_undef(struct pt_regs
*regs
)
327 enter_from_user_mode();
328 local_daif_restore(DAIF_PROCCTX
);
332 static void noinstr
el0_bti(struct pt_regs
*regs
)
334 enter_from_user_mode();
335 local_daif_restore(DAIF_PROCCTX
);
339 static void noinstr
el0_inv(struct pt_regs
*regs
, unsigned long esr
)
341 enter_from_user_mode();
342 local_daif_restore(DAIF_PROCCTX
);
343 bad_el0_sync(regs
, 0, esr
);
346 static void noinstr
el0_dbg(struct pt_regs
*regs
, unsigned long esr
)
348 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
349 unsigned long far
= read_sysreg(far_el1
);
351 if (system_uses_irq_prio_masking())
352 gic_write_pmr(GIC_PRIO_IRQON
| GIC_PRIO_PSR_I_SET
);
354 enter_from_user_mode();
355 do_debug_exception(far
, esr
, regs
);
356 local_daif_restore(DAIF_PROCCTX_NOIRQ
);
359 static void noinstr
el0_svc(struct pt_regs
*regs
)
361 if (system_uses_irq_prio_masking())
362 gic_write_pmr(GIC_PRIO_IRQON
| GIC_PRIO_PSR_I_SET
);
364 enter_from_user_mode();
368 static void noinstr
el0_fpac(struct pt_regs
*regs
, unsigned long esr
)
370 enter_from_user_mode();
371 local_daif_restore(DAIF_PROCCTX
);
372 do_ptrauth_fault(regs
, esr
);
375 asmlinkage
void noinstr
el0_sync_handler(struct pt_regs
*regs
)
377 unsigned long esr
= read_sysreg(esr_el1
);
379 switch (ESR_ELx_EC(esr
)) {
380 case ESR_ELx_EC_SVC64
:
383 case ESR_ELx_EC_DABT_LOW
:
386 case ESR_ELx_EC_IABT_LOW
:
389 case ESR_ELx_EC_FP_ASIMD
:
390 el0_fpsimd_acc(regs
, esr
);
393 el0_sve_acc(regs
, esr
);
395 case ESR_ELx_EC_FP_EXC64
:
396 el0_fpsimd_exc(regs
, esr
);
398 case ESR_ELx_EC_SYS64
:
402 case ESR_ELx_EC_SP_ALIGN
:
405 case ESR_ELx_EC_PC_ALIGN
:
408 case ESR_ELx_EC_UNKNOWN
:
414 case ESR_ELx_EC_BREAKPT_LOW
:
415 case ESR_ELx_EC_SOFTSTP_LOW
:
416 case ESR_ELx_EC_WATCHPT_LOW
:
417 case ESR_ELx_EC_BRK64
:
420 case ESR_ELx_EC_FPAC
:
429 static void noinstr
el0_cp15(struct pt_regs
*regs
, unsigned long esr
)
431 enter_from_user_mode();
432 local_daif_restore(DAIF_PROCCTX
);
433 do_cp15instr(esr
, regs
);
436 static void noinstr
el0_svc_compat(struct pt_regs
*regs
)
438 if (system_uses_irq_prio_masking())
439 gic_write_pmr(GIC_PRIO_IRQON
| GIC_PRIO_PSR_I_SET
);
441 enter_from_user_mode();
442 do_el0_svc_compat(regs
);
445 asmlinkage
void noinstr
el0_sync_compat_handler(struct pt_regs
*regs
)
447 unsigned long esr
= read_sysreg(esr_el1
);
449 switch (ESR_ELx_EC(esr
)) {
450 case ESR_ELx_EC_SVC32
:
451 el0_svc_compat(regs
);
453 case ESR_ELx_EC_DABT_LOW
:
456 case ESR_ELx_EC_IABT_LOW
:
459 case ESR_ELx_EC_FP_ASIMD
:
460 el0_fpsimd_acc(regs
, esr
);
462 case ESR_ELx_EC_FP_EXC32
:
463 el0_fpsimd_exc(regs
, esr
);
465 case ESR_ELx_EC_PC_ALIGN
:
468 case ESR_ELx_EC_UNKNOWN
:
469 case ESR_ELx_EC_CP14_MR
:
470 case ESR_ELx_EC_CP14_LS
:
471 case ESR_ELx_EC_CP14_64
:
474 case ESR_ELx_EC_CP15_32
:
475 case ESR_ELx_EC_CP15_64
:
478 case ESR_ELx_EC_BREAKPT_LOW
:
479 case ESR_ELx_EC_SOFTSTP_LOW
:
480 case ESR_ELx_EC_WATCHPT_LOW
:
481 case ESR_ELx_EC_BKPT32
:
488 #endif /* CONFIG_COMPAT */