WIP FPC-III support
[linux/fpc-iii.git] / arch / arm64 / kernel / entry-common.c
blob5346953e4382e9424079ad52cffe69f4e83bc6bc
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Exception handling code
5 * Copyright (C) 2019 ARM Ltd.
6 */
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
17 #include <asm/mmu.h>
18 #include <asm/sysreg.h>
21 * This is intended to match the logic in irqentry_enter(), handling the kernel
22 * mode transitions only.
24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
26 regs->exit_rcu = false;
28 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 lockdep_hardirqs_off(CALLER_ADDR0);
30 rcu_irq_enter();
31 trace_hardirqs_off_finish();
33 regs->exit_rcu = true;
34 return;
37 lockdep_hardirqs_off(CALLER_ADDR0);
38 rcu_irq_enter_check_tick();
39 trace_hardirqs_off_finish();
43 * This is intended to match the logic in irqentry_exit(), handling the kernel
44 * mode transitions only, and with preemption handled elsewhere.
46 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
48 lockdep_assert_irqs_disabled();
50 if (interrupts_enabled(regs)) {
51 if (regs->exit_rcu) {
52 trace_hardirqs_on_prepare();
53 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
54 rcu_irq_exit();
55 lockdep_hardirqs_on(CALLER_ADDR0);
56 return;
59 trace_hardirqs_on();
60 } else {
61 if (regs->exit_rcu)
62 rcu_irq_exit();
66 void noinstr arm64_enter_nmi(struct pt_regs *regs)
68 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
70 __nmi_enter();
71 lockdep_hardirqs_off(CALLER_ADDR0);
72 lockdep_hardirq_enter();
73 rcu_nmi_enter();
75 trace_hardirqs_off_finish();
76 ftrace_nmi_enter();
79 void noinstr arm64_exit_nmi(struct pt_regs *regs)
81 bool restore = regs->lockdep_hardirqs;
83 ftrace_nmi_exit();
84 if (restore) {
85 trace_hardirqs_on_prepare();
86 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
89 rcu_nmi_exit();
90 lockdep_hardirq_exit();
91 if (restore)
92 lockdep_hardirqs_on(CALLER_ADDR0);
93 __nmi_exit();
96 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
98 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
99 arm64_enter_nmi(regs);
100 else
101 enter_from_kernel_mode(regs);
104 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
106 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
107 arm64_exit_nmi(regs);
108 else
109 exit_to_kernel_mode(regs);
112 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
114 unsigned long far = read_sysreg(far_el1);
116 enter_from_kernel_mode(regs);
117 local_daif_inherit(regs);
118 do_mem_abort(far, esr, regs);
119 local_daif_mask();
120 exit_to_kernel_mode(regs);
123 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
125 unsigned long far = read_sysreg(far_el1);
127 enter_from_kernel_mode(regs);
128 local_daif_inherit(regs);
129 do_sp_pc_abort(far, esr, regs);
130 local_daif_mask();
131 exit_to_kernel_mode(regs);
134 static void noinstr el1_undef(struct pt_regs *regs)
136 enter_from_kernel_mode(regs);
137 local_daif_inherit(regs);
138 do_undefinstr(regs);
139 local_daif_mask();
140 exit_to_kernel_mode(regs);
143 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
145 enter_from_kernel_mode(regs);
146 local_daif_inherit(regs);
147 bad_mode(regs, 0, esr);
148 local_daif_mask();
149 exit_to_kernel_mode(regs);
152 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
154 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
156 lockdep_hardirqs_off(CALLER_ADDR0);
157 rcu_nmi_enter();
159 trace_hardirqs_off_finish();
162 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
164 bool restore = regs->lockdep_hardirqs;
166 if (restore) {
167 trace_hardirqs_on_prepare();
168 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
171 rcu_nmi_exit();
172 if (restore)
173 lockdep_hardirqs_on(CALLER_ADDR0);
176 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
178 unsigned long far = read_sysreg(far_el1);
181 * The CPU masked interrupts, and we are leaving them masked during
182 * do_debug_exception(). Update PMR as if we had called
183 * local_daif_mask().
185 if (system_uses_irq_prio_masking())
186 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
188 arm64_enter_el1_dbg(regs);
189 do_debug_exception(far, esr, regs);
190 arm64_exit_el1_dbg(regs);
193 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
195 enter_from_kernel_mode(regs);
196 local_daif_inherit(regs);
197 do_ptrauth_fault(regs, esr);
198 local_daif_mask();
199 exit_to_kernel_mode(regs);
202 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
204 unsigned long esr = read_sysreg(esr_el1);
206 switch (ESR_ELx_EC(esr)) {
207 case ESR_ELx_EC_DABT_CUR:
208 case ESR_ELx_EC_IABT_CUR:
209 el1_abort(regs, esr);
210 break;
212 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
213 * recursive exception when trying to push the initial pt_regs.
215 case ESR_ELx_EC_PC_ALIGN:
216 el1_pc(regs, esr);
217 break;
218 case ESR_ELx_EC_SYS64:
219 case ESR_ELx_EC_UNKNOWN:
220 el1_undef(regs);
221 break;
222 case ESR_ELx_EC_BREAKPT_CUR:
223 case ESR_ELx_EC_SOFTSTP_CUR:
224 case ESR_ELx_EC_WATCHPT_CUR:
225 case ESR_ELx_EC_BRK64:
226 el1_dbg(regs, esr);
227 break;
228 case ESR_ELx_EC_FPAC:
229 el1_fpac(regs, esr);
230 break;
231 default:
232 el1_inv(regs, esr);
236 asmlinkage void noinstr enter_from_user_mode(void)
238 lockdep_hardirqs_off(CALLER_ADDR0);
239 CT_WARN_ON(ct_state() != CONTEXT_USER);
240 user_exit_irqoff();
241 trace_hardirqs_off_finish();
244 asmlinkage void noinstr exit_to_user_mode(void)
246 trace_hardirqs_on_prepare();
247 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
248 user_enter_irqoff();
249 lockdep_hardirqs_on(CALLER_ADDR0);
252 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
254 unsigned long far = read_sysreg(far_el1);
256 enter_from_user_mode();
257 local_daif_restore(DAIF_PROCCTX);
258 do_mem_abort(far, esr, regs);
261 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
263 unsigned long far = read_sysreg(far_el1);
266 * We've taken an instruction abort from userspace and not yet
267 * re-enabled IRQs. If the address is a kernel address, apply
268 * BP hardening prior to enabling IRQs and pre-emption.
270 if (!is_ttbr0_addr(far))
271 arm64_apply_bp_hardening();
273 enter_from_user_mode();
274 local_daif_restore(DAIF_PROCCTX);
275 do_mem_abort(far, esr, regs);
278 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
280 enter_from_user_mode();
281 local_daif_restore(DAIF_PROCCTX);
282 do_fpsimd_acc(esr, regs);
285 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
287 enter_from_user_mode();
288 local_daif_restore(DAIF_PROCCTX);
289 do_sve_acc(esr, regs);
292 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
294 enter_from_user_mode();
295 local_daif_restore(DAIF_PROCCTX);
296 do_fpsimd_exc(esr, regs);
299 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
301 enter_from_user_mode();
302 local_daif_restore(DAIF_PROCCTX);
303 do_sysinstr(esr, regs);
306 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
308 unsigned long far = read_sysreg(far_el1);
310 if (!is_ttbr0_addr(instruction_pointer(regs)))
311 arm64_apply_bp_hardening();
313 enter_from_user_mode();
314 local_daif_restore(DAIF_PROCCTX);
315 do_sp_pc_abort(far, esr, regs);
318 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
320 enter_from_user_mode();
321 local_daif_restore(DAIF_PROCCTX);
322 do_sp_pc_abort(regs->sp, esr, regs);
325 static void noinstr el0_undef(struct pt_regs *regs)
327 enter_from_user_mode();
328 local_daif_restore(DAIF_PROCCTX);
329 do_undefinstr(regs);
332 static void noinstr el0_bti(struct pt_regs *regs)
334 enter_from_user_mode();
335 local_daif_restore(DAIF_PROCCTX);
336 do_bti(regs);
339 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
341 enter_from_user_mode();
342 local_daif_restore(DAIF_PROCCTX);
343 bad_el0_sync(regs, 0, esr);
346 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
348 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
349 unsigned long far = read_sysreg(far_el1);
351 if (system_uses_irq_prio_masking())
352 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
354 enter_from_user_mode();
355 do_debug_exception(far, esr, regs);
356 local_daif_restore(DAIF_PROCCTX_NOIRQ);
359 static void noinstr el0_svc(struct pt_regs *regs)
361 if (system_uses_irq_prio_masking())
362 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
364 enter_from_user_mode();
365 do_el0_svc(regs);
368 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
370 enter_from_user_mode();
371 local_daif_restore(DAIF_PROCCTX);
372 do_ptrauth_fault(regs, esr);
375 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
377 unsigned long esr = read_sysreg(esr_el1);
379 switch (ESR_ELx_EC(esr)) {
380 case ESR_ELx_EC_SVC64:
381 el0_svc(regs);
382 break;
383 case ESR_ELx_EC_DABT_LOW:
384 el0_da(regs, esr);
385 break;
386 case ESR_ELx_EC_IABT_LOW:
387 el0_ia(regs, esr);
388 break;
389 case ESR_ELx_EC_FP_ASIMD:
390 el0_fpsimd_acc(regs, esr);
391 break;
392 case ESR_ELx_EC_SVE:
393 el0_sve_acc(regs, esr);
394 break;
395 case ESR_ELx_EC_FP_EXC64:
396 el0_fpsimd_exc(regs, esr);
397 break;
398 case ESR_ELx_EC_SYS64:
399 case ESR_ELx_EC_WFx:
400 el0_sys(regs, esr);
401 break;
402 case ESR_ELx_EC_SP_ALIGN:
403 el0_sp(regs, esr);
404 break;
405 case ESR_ELx_EC_PC_ALIGN:
406 el0_pc(regs, esr);
407 break;
408 case ESR_ELx_EC_UNKNOWN:
409 el0_undef(regs);
410 break;
411 case ESR_ELx_EC_BTI:
412 el0_bti(regs);
413 break;
414 case ESR_ELx_EC_BREAKPT_LOW:
415 case ESR_ELx_EC_SOFTSTP_LOW:
416 case ESR_ELx_EC_WATCHPT_LOW:
417 case ESR_ELx_EC_BRK64:
418 el0_dbg(regs, esr);
419 break;
420 case ESR_ELx_EC_FPAC:
421 el0_fpac(regs, esr);
422 break;
423 default:
424 el0_inv(regs, esr);
428 #ifdef CONFIG_COMPAT
429 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
431 enter_from_user_mode();
432 local_daif_restore(DAIF_PROCCTX);
433 do_cp15instr(esr, regs);
436 static void noinstr el0_svc_compat(struct pt_regs *regs)
438 if (system_uses_irq_prio_masking())
439 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
441 enter_from_user_mode();
442 do_el0_svc_compat(regs);
445 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
447 unsigned long esr = read_sysreg(esr_el1);
449 switch (ESR_ELx_EC(esr)) {
450 case ESR_ELx_EC_SVC32:
451 el0_svc_compat(regs);
452 break;
453 case ESR_ELx_EC_DABT_LOW:
454 el0_da(regs, esr);
455 break;
456 case ESR_ELx_EC_IABT_LOW:
457 el0_ia(regs, esr);
458 break;
459 case ESR_ELx_EC_FP_ASIMD:
460 el0_fpsimd_acc(regs, esr);
461 break;
462 case ESR_ELx_EC_FP_EXC32:
463 el0_fpsimd_exc(regs, esr);
464 break;
465 case ESR_ELx_EC_PC_ALIGN:
466 el0_pc(regs, esr);
467 break;
468 case ESR_ELx_EC_UNKNOWN:
469 case ESR_ELx_EC_CP14_MR:
470 case ESR_ELx_EC_CP14_LS:
471 case ESR_ELx_EC_CP14_64:
472 el0_undef(regs);
473 break;
474 case ESR_ELx_EC_CP15_32:
475 case ESR_ELx_EC_CP15_64:
476 el0_cp15(regs, esr);
477 break;
478 case ESR_ELx_EC_BREAKPT_LOW:
479 case ESR_ELx_EC_SOFTSTP_LOW:
480 case ESR_ELx_EC_WATCHPT_LOW:
481 case ESR_ELx_EC_BKPT32:
482 el0_dbg(regs, esr);
483 break;
484 default:
485 el0_inv(regs, esr);
488 #endif /* CONFIG_COMPAT */