2 * Low-level exception handling code
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/arm-smccc.h>
22 #include <linux/init.h>
23 #include <linux/linkage.h>
25 #include <asm/alternative.h>
26 #include <asm/assembler.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/cpufeature.h>
29 #include <asm/errno.h>
32 #include <asm/memory.h>
34 #include <asm/processor.h>
35 #include <asm/thread_info.h>
36 #include <asm/asm-uaccess.h>
37 #include <asm/unistd.h>
38 #include <asm/kernel-pgtable.h>
41 * Context tracking subsystem. Used to instrument transitions
42 * between user and kernel mode.
44 .macro ct_user_exit, syscall = 0
45 #ifdef CONFIG_CONTEXT_TRACKING
46 bl context_tracking_user_exit
49 * Save/restore needed during syscalls. Restore syscall arguments from
50 * the values already saved on stack during kernel_entry.
53 ldp x2, x3, [sp, #S_X2]
54 ldp x4, x5, [sp, #S_X4]
55 ldp x6, x7, [sp, #S_X6]
61 #ifdef CONFIG_CONTEXT_TRACKING
62 bl context_tracking_user_enter
75 .macro kernel_ventry, el, label, regsize = 64
77 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
78 alternative_if ARM64_UNMAP_KERNEL_AT_EL0
87 alternative_else_nop_endif
90 sub sp, sp, #S_FRAME_SIZE
94 .macro tramp_alias, dst, sym
95 mov_q \dst, TRAMP_VALIAS
96 add \dst, \dst, #(\sym - .entry.tramp.text)
99 // This macro corrupts x0-x3. It is the caller's duty
100 // to save/restore them if required.
101 .macro apply_ssbd, state, targ, tmp1, tmp2
102 #ifdef CONFIG_ARM64_SSBD
103 alternative_cb arm64_enable_wa2_handling
106 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
108 ldr \tmp2, [tsk, #TI_FLAGS]
109 tbnz \tmp2, #TIF_SSBD, \targ
110 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
112 alternative_cb arm64_update_smccc_conduit
113 nop // Patched to SMC/HVC #0
118 .macro kernel_entry, el, regsize = 64
120 mov w0, w0 // zero upper 32 bits of x0
122 stp x0, x1, [sp, #16 * 0]
123 stp x2, x3, [sp, #16 * 1]
124 stp x4, x5, [sp, #16 * 2]
125 stp x6, x7, [sp, #16 * 3]
126 stp x8, x9, [sp, #16 * 4]
127 stp x10, x11, [sp, #16 * 5]
128 stp x12, x13, [sp, #16 * 6]
129 stp x14, x15, [sp, #16 * 7]
130 stp x16, x17, [sp, #16 * 8]
131 stp x18, x19, [sp, #16 * 9]
132 stp x20, x21, [sp, #16 * 10]
133 stp x22, x23, [sp, #16 * 11]
134 stp x24, x25, [sp, #16 * 12]
135 stp x26, x27, [sp, #16 * 13]
136 stp x28, x29, [sp, #16 * 14]
141 and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear,
142 ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
143 disable_step_tsk x19, x20 // exceptions when scheduling.
145 apply_ssbd 1, 1f, x22, x23
147 #ifdef CONFIG_ARM64_SSBD
148 ldp x0, x1, [sp, #16 * 0]
149 ldp x2, x3, [sp, #16 * 1]
153 mov x29, xzr // fp pointed to user-space
155 add x21, sp, #S_FRAME_SIZE
157 /* Save the task's original addr_limit and set USER_DS */
158 ldr x20, [tsk, #TI_ADDR_LIMIT]
159 str x20, [sp, #S_ORIG_ADDR_LIMIT]
161 str x20, [tsk, #TI_ADDR_LIMIT]
162 /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
163 .endif /* \el == 0 */
166 stp lr, x21, [sp, #S_LR]
167 stp x22, x23, [sp, #S_PC]
170 * Set syscallno to -1 by default (overridden later if real syscall).
174 str x21, [sp, #S_SYSCALLNO]
178 * Set sp_el0 to current thread_info.
185 * Registers that may be useful after this macro is invoked:
189 * x23 - aborted PSTATE
193 .macro kernel_exit, el
195 /* Restore the task's original addr_limit. */
196 ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
197 str x20, [tsk, #TI_ADDR_LIMIT]
199 /* No need to restore UAO, it will be restored from SPSR_EL1 */
202 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
205 ldr x23, [sp, #S_SP] // load return stack pointer
207 tst x22, #PSR_MODE32_BIT // native task?
210 #ifdef CONFIG_ARM64_ERRATUM_845719
211 alternative_if ARM64_WORKAROUND_845719
212 #ifdef CONFIG_PID_IN_CONTEXTIDR
213 mrs x29, contextidr_el1
214 msr contextidr_el1, x29
216 msr contextidr_el1, xzr
218 alternative_else_nop_endif
221 apply_ssbd 0, 5f, x0, x1
224 msr elr_el1, x21 // set up the return data
226 ldp x0, x1, [sp, #16 * 0]
227 ldp x2, x3, [sp, #16 * 1]
228 ldp x4, x5, [sp, #16 * 2]
229 ldp x6, x7, [sp, #16 * 3]
230 ldp x8, x9, [sp, #16 * 4]
231 ldp x10, x11, [sp, #16 * 5]
232 ldp x12, x13, [sp, #16 * 6]
233 ldp x14, x15, [sp, #16 * 7]
234 ldp x16, x17, [sp, #16 * 8]
235 ldp x18, x19, [sp, #16 * 9]
236 ldp x20, x21, [sp, #16 * 10]
237 ldp x22, x23, [sp, #16 * 11]
238 ldp x24, x25, [sp, #16 * 12]
239 ldp x26, x27, [sp, #16 * 13]
240 ldp x28, x29, [sp, #16 * 14]
242 add sp, sp, #S_FRAME_SIZE // restore sp
245 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
246 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
249 tramp_alias x30, tramp_exit_native
252 tramp_alias x30, tramp_exit_compat
260 .macro get_thread_info, rd
264 .macro irq_stack_entry
265 mov x19, sp // preserve the original sp
268 * Compare sp with the current thread_info, if the top
269 * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
270 * should switch to the irq stack.
272 and x25, x19, #~(THREAD_SIZE - 1)
276 adr_this_cpu x25, irq_stack, x26
277 mov x26, #IRQ_STACK_START_SP
280 /* switch to the irq stack */
284 * Add a dummy stack frame, this non-standard format is fixed up
287 stp x29, x19, [sp, #-16]!
294 * x19 should be preserved between irq_stack_entry and
297 .macro irq_stack_exit
302 * These are the registers used in the syscall handler, and allow us to
303 * have in theory up to 7 arguments to a function - x0 to x6.
305 * x7 is reserved for the system call number in 32-bit mode.
307 sc_nr .req x25 // number of system calls
308 scno .req x26 // syscall number
309 stbl .req x27 // syscall table pointer
310 tsk .req x28 // current thread_info
313 * Interrupt handling.
316 ldr_l x1, handle_arch_irq
328 .pushsection ".entry.text", "ax"
332 kernel_ventry 1, sync_invalid // Synchronous EL1t
333 kernel_ventry 1, irq_invalid // IRQ EL1t
334 kernel_ventry 1, fiq_invalid // FIQ EL1t
335 kernel_ventry 1, error_invalid // Error EL1t
337 kernel_ventry 1, sync // Synchronous EL1h
338 kernel_ventry 1, irq // IRQ EL1h
339 kernel_ventry 1, fiq_invalid // FIQ EL1h
340 kernel_ventry 1, error_invalid // Error EL1h
342 kernel_ventry 0, sync // Synchronous 64-bit EL0
343 kernel_ventry 0, irq // IRQ 64-bit EL0
344 kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
345 kernel_ventry 0, error_invalid // Error 64-bit EL0
348 kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
349 kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
350 kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
351 kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0
353 kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
354 kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
355 kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
356 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
361 * Invalid mode handlers
363 .macro inv_entry, el, reason, regsize = 64
364 kernel_entry \el, \regsize
372 inv_entry 0, BAD_SYNC
373 ENDPROC(el0_sync_invalid)
377 ENDPROC(el0_irq_invalid)
381 ENDPROC(el0_fiq_invalid)
384 inv_entry 0, BAD_ERROR
385 ENDPROC(el0_error_invalid)
388 el0_fiq_invalid_compat:
389 inv_entry 0, BAD_FIQ, 32
390 ENDPROC(el0_fiq_invalid_compat)
392 el0_error_invalid_compat:
393 inv_entry 0, BAD_ERROR, 32
394 ENDPROC(el0_error_invalid_compat)
398 inv_entry 1, BAD_SYNC
399 ENDPROC(el1_sync_invalid)
403 ENDPROC(el1_irq_invalid)
407 ENDPROC(el1_fiq_invalid)
410 inv_entry 1, BAD_ERROR
411 ENDPROC(el1_error_invalid)
419 mrs x1, esr_el1 // read the syndrome register
420 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
421 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
423 cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
425 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
427 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
429 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
431 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
433 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
439 * Fall through to the Data abort case
443 * Data abort handling
447 // re-enable interrupts if they were enabled in the aborted context
448 tbnz x23, #7, 1f // PSR_I_BIT
451 clear_address_tag x0, x3
452 mov x2, sp // struct pt_regs
455 // disable interrupts before pulling preserved data off the stack
460 * Stack or PC alignment exception handling
468 * Undefined instruction
475 * Debug exception handling
477 cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
478 cinc x24, x24, eq // set bit '0'
479 tbz x24, #0, el1_inv // EL1 only
481 mov x2, sp // struct pt_regs
482 bl do_debug_exception
485 // TODO: add support for undefined instructions in kernel mode
497 #ifdef CONFIG_TRACE_IRQFLAGS
498 bl trace_hardirqs_off
503 #ifdef CONFIG_PREEMPT
504 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
505 cbnz w24, 1f // preempt count != 0
506 ldr x0, [tsk, #TI_FLAGS] // get flags
507 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
511 #ifdef CONFIG_TRACE_IRQFLAGS
517 #ifdef CONFIG_PREEMPT
520 1: bl preempt_schedule_irq // irq en/disable is done inside
521 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
522 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
532 mrs x25, esr_el1 // read the syndrome register
533 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
534 cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
536 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
538 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
540 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
542 cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
544 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
546 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
548 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
550 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
552 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
560 mrs x25, esr_el1 // read the syndrome register
561 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
562 cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
564 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
566 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
568 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
570 cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
572 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
574 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
576 cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
578 cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
580 cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
582 cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
584 cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
586 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
591 * AArch32 syscall handling
593 adrp stbl, compat_sys_call_table // load compat syscall table pointer
594 uxtw scno, w7 // syscall number in w7 (r7)
595 mov sc_nr, #__NR_compat_syscalls
606 * Data abort handling
609 // enable interrupts before calling the main handler
612 clear_address_tag x0, x26
619 * Instruction abort handling
622 msr daifclr, #(8 | 4 | 1)
623 #ifdef CONFIG_TRACE_IRQFLAGS
624 bl trace_hardirqs_off
630 bl do_el0_ia_bp_hardening
634 * Floating Point or Advanced SIMD access
644 * Floating Point or Advanced SIMD exception
654 * Stack or PC alignment exception handling
658 #ifdef CONFIG_TRACE_IRQFLAGS
659 bl trace_hardirqs_off
669 * Undefined instruction
671 // enable interrupts before calling the main handler
679 * System instructions, for trapped cache maintenance instructions
689 * Debug exception handling
691 tbnz x24, #0, el0_inv // EL0 only
695 bl do_debug_exception
714 #ifdef CONFIG_TRACE_IRQFLAGS
715 bl trace_hardirqs_off
719 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
721 bl do_el0_irq_bp_hardening
726 #ifdef CONFIG_TRACE_IRQFLAGS
733 * Register switch for AArch64. The callee-saved registers need to be saved
734 * and restored. On entry:
735 * x0 = previous task_struct (must be preserved across the switch)
736 * x1 = next task_struct
737 * Previous and next are guaranteed not to be the same.
741 mov x10, #THREAD_CPU_CONTEXT
744 stp x19, x20, [x8], #16 // store callee-saved registers
745 stp x21, x22, [x8], #16
746 stp x23, x24, [x8], #16
747 stp x25, x26, [x8], #16
748 stp x27, x28, [x8], #16
749 stp x29, x9, [x8], #16
752 ldp x19, x20, [x8], #16 // restore callee-saved registers
753 ldp x21, x22, [x8], #16
754 ldp x23, x24, [x8], #16
755 ldp x25, x26, [x8], #16
756 ldp x27, x28, [x8], #16
757 ldp x29, x9, [x8], #16
760 and x9, x9, #~(THREAD_SIZE - 1)
763 ENDPROC(cpu_switch_to)
766 * This is the fast syscall return path. We do as little as possible here,
767 * and this includes saving x0 back into the kernel stack.
770 disable_irq // disable interrupts
771 str x0, [sp, #S_X0] // returned x0
772 ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
773 and x2, x1, #_TIF_SYSCALL_WORK
774 cbnz x2, ret_fast_syscall_trace
775 and x2, x1, #_TIF_WORK_MASK
776 cbnz x2, work_pending
777 enable_step_tsk x1, x2
779 ret_fast_syscall_trace:
780 enable_irq // enable interrupts
781 b __sys_trace_return_skipped // we already saved x0
784 * Ok, we need to do extra processing, enter the slow path.
789 #ifdef CONFIG_TRACE_IRQFLAGS
790 bl trace_hardirqs_on // enabled while in userspace
792 ldr x1, [tsk, #TI_FLAGS] // re-check for single-step
795 * "slow" syscall return path.
798 disable_irq // disable interrupts
799 ldr x1, [tsk, #TI_FLAGS]
800 and x2, x1, #_TIF_WORK_MASK
801 cbnz x2, work_pending
803 enable_step_tsk x1, x2
808 * This is how we return from a fork.
812 cbz x19, 1f // not a kernel thread
815 1: get_thread_info tsk
817 ENDPROC(ret_from_fork)
824 adrp stbl, sys_call_table // load syscall table pointer
825 uxtw scno, w8 // syscall number in w8
826 mov sc_nr, #__NR_syscalls
827 el0_svc_naked: // compat entry point
828 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
832 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
833 tst x16, #_TIF_SYSCALL_WORK
835 cmp scno, sc_nr // check upper syscall limit
837 mask_nospec64 scno, sc_nr, x19 // enforce bounds for syscall number
838 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
839 blr x16 // call sys_* routine
848 * This is the really slow path. We're going to be doing context
849 * switches, and waiting for our parent to respond.
852 mov w0, #-1 // set default errno for
853 cmp scno, x0 // user-issued syscall(-1)
858 bl syscall_trace_enter
859 cmp w0, #-1 // skip the syscall?
860 b.eq __sys_trace_return_skipped
861 uxtw scno, w0 // syscall number (possibly new)
862 mov x1, sp // pointer to regs
863 cmp scno, sc_nr // check upper syscall limit
865 ldp x0, x1, [sp] // restore the syscall args
866 ldp x2, x3, [sp, #S_X2]
867 ldp x4, x5, [sp, #S_X4]
868 ldp x6, x7, [sp, #S_X6]
869 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
870 blr x16 // call sys_* routine
873 str x0, [sp, #S_X0] // save returned x0
874 __sys_trace_return_skipped:
876 bl syscall_trace_exit
884 .popsection // .entry.text
886 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
888 * Exception vectors trampoline.
890 .pushsection ".entry.tramp.text", "ax"
892 .macro tramp_map_kernel, tmp
894 sub \tmp, \tmp, #SWAPPER_DIR_SIZE
895 bic \tmp, \tmp, #USER_ASID_FLAG
899 .macro tramp_unmap_kernel, tmp
901 add \tmp, \tmp, #SWAPPER_DIR_SIZE
902 orr \tmp, \tmp, #USER_ASID_FLAG
905 * We avoid running the post_ttbr_update_workaround here because
906 * it's only needed by Cavium ThunderX, which requires KPTI to be
911 .macro tramp_ventry, regsize = 64
915 msr tpidrro_el0, x30 // Restored in kernel_ventry
918 * Defend against branch aliasing attacks by pushing a dummy
919 * entry onto the return stack and using a RET instruction to
920 * enter the full-fat kernel vectors.
926 #ifdef CONFIG_RANDOMIZE_BASE
927 adr x30, tramp_vectors + PAGE_SIZE
933 prfm plil1strm, [x30, #(1b - tramp_vectors)]
935 add x30, x30, #(1b - tramp_vectors)
940 .macro tramp_exit, regsize = 64
941 adr x30, tramp_vectors
943 tramp_unmap_kernel x30
965 ENTRY(tramp_exit_native)
967 END(tramp_exit_native)
969 ENTRY(tramp_exit_compat)
971 END(tramp_exit_compat)
974 .popsection // .entry.tramp.text
975 #ifdef CONFIG_RANDOMIZE_BASE
976 .pushsection ".rodata", "a"
978 .globl __entry_tramp_data_start
979 __entry_tramp_data_start:
981 .popsection // .rodata
982 #endif /* CONFIG_RANDOMIZE_BASE */
983 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
986 * Special system call wrappers.
988 ENTRY(sys_rt_sigreturn_wrapper)
991 ENDPROC(sys_rt_sigreturn_wrapper)