2 * Low-level exception handling code
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/init.h>
22 #include <linux/linkage.h>
24 #include <asm/alternative.h>
25 #include <asm/assembler.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/cpufeature.h>
28 #include <asm/errno.h>
30 #include <asm/thread_info.h>
31 #include <asm/unistd.h>
34 * Context tracking subsystem. Used to instrument transitions
35 * between user and kernel mode.
37 .macro ct_user_exit, syscall = 0
38 #ifdef CONFIG_CONTEXT_TRACKING
39 bl context_tracking_user_exit
42 * Save/restore needed during syscalls. Restore syscall arguments from
43 * the values already saved on stack during kernel_entry.
46 ldp x2, x3, [sp, #S_X2]
47 ldp x4, x5, [sp, #S_X4]
48 ldp x6, x7, [sp, #S_X6]
54 #ifdef CONFIG_CONTEXT_TRACKING
55 bl context_tracking_user_enter
68 .macro kernel_entry, el, regsize = 64
69 sub sp, sp, #S_FRAME_SIZE
71 mov w0, w0 // zero upper 32 bits of x0
73 stp x0, x1, [sp, #16 * 0]
74 stp x2, x3, [sp, #16 * 1]
75 stp x4, x5, [sp, #16 * 2]
76 stp x6, x7, [sp, #16 * 3]
77 stp x8, x9, [sp, #16 * 4]
78 stp x10, x11, [sp, #16 * 5]
79 stp x12, x13, [sp, #16 * 6]
80 stp x14, x15, [sp, #16 * 7]
81 stp x16, x17, [sp, #16 * 8]
82 stp x18, x19, [sp, #16 * 9]
83 stp x20, x21, [sp, #16 * 10]
84 stp x22, x23, [sp, #16 * 11]
85 stp x24, x25, [sp, #16 * 12]
86 stp x26, x27, [sp, #16 * 13]
87 stp x28, x29, [sp, #16 * 14]
91 get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
92 ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
93 disable_step_tsk x19, x20 // exceptions when scheduling.
95 add x21, sp, #S_FRAME_SIZE
99 stp lr, x21, [sp, #S_LR]
100 stp x22, x23, [sp, #S_PC]
103 * Set syscallno to -1 by default (overridden later if real syscall).
107 str x21, [sp, #S_SYSCALLNO]
111 * Registers that may be useful after this macro is invoked:
115 * x23 - aborted PSTATE
119 .macro kernel_exit, el
120 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
123 ldr x23, [sp, #S_SP] // load return stack pointer
126 #ifdef CONFIG_ARM64_ERRATUM_845719
131 #ifdef CONFIG_PID_IN_CONTEXTIDR
133 #define SEQUENCE_ORG "nop ; nop ; nop"
134 #define SEQUENCE_ALT "tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:"
138 #define SEQUENCE_ORG "nop ; nop"
139 #define SEQUENCE_ALT "tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:"
143 alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719
147 msr elr_el1, x21 // set up the return data
149 ldp x0, x1, [sp, #16 * 0]
150 ldp x2, x3, [sp, #16 * 1]
151 ldp x4, x5, [sp, #16 * 2]
152 ldp x6, x7, [sp, #16 * 3]
153 ldp x8, x9, [sp, #16 * 4]
154 ldp x10, x11, [sp, #16 * 5]
155 ldp x12, x13, [sp, #16 * 6]
156 ldp x14, x15, [sp, #16 * 7]
157 ldp x16, x17, [sp, #16 * 8]
158 ldp x18, x19, [sp, #16 * 9]
159 ldp x20, x21, [sp, #16 * 10]
160 ldp x22, x23, [sp, #16 * 11]
161 ldp x24, x25, [sp, #16 * 12]
162 ldp x26, x27, [sp, #16 * 13]
163 ldp x28, x29, [sp, #16 * 14]
165 add sp, sp, #S_FRAME_SIZE // restore sp
166 eret // return to kernel
169 .macro get_thread_info, rd
171 and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
175 * These are the registers used in the syscall handler, and allow us to
176 * have in theory up to 7 arguments to a function - x0 to x6.
178 * x7 is reserved for the system call number in 32-bit mode.
180 sc_nr .req x25 // number of system calls
181 scno .req x26 // syscall number
182 stbl .req x27 // syscall table pointer
183 tsk .req x28 // current thread_info
186 * Interrupt handling.
189 adrp x1, handle_arch_irq
190 ldr x1, [x1, #:lo12:handle_arch_irq]
203 ventry el1_sync_invalid // Synchronous EL1t
204 ventry el1_irq_invalid // IRQ EL1t
205 ventry el1_fiq_invalid // FIQ EL1t
206 ventry el1_error_invalid // Error EL1t
208 ventry el1_sync // Synchronous EL1h
209 ventry el1_irq // IRQ EL1h
210 ventry el1_fiq_invalid // FIQ EL1h
211 ventry el1_error_invalid // Error EL1h
213 ventry el0_sync // Synchronous 64-bit EL0
214 ventry el0_irq // IRQ 64-bit EL0
215 ventry el0_fiq_invalid // FIQ 64-bit EL0
216 ventry el0_error_invalid // Error 64-bit EL0
219 ventry el0_sync_compat // Synchronous 32-bit EL0
220 ventry el0_irq_compat // IRQ 32-bit EL0
221 ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
222 ventry el0_error_invalid_compat // Error 32-bit EL0
224 ventry el0_sync_invalid // Synchronous 32-bit EL0
225 ventry el0_irq_invalid // IRQ 32-bit EL0
226 ventry el0_fiq_invalid // FIQ 32-bit EL0
227 ventry el0_error_invalid // Error 32-bit EL0
232 * Invalid mode handlers
234 .macro inv_entry, el, reason, regsize = 64
235 kernel_entry el, \regsize
243 inv_entry 0, BAD_SYNC
244 ENDPROC(el0_sync_invalid)
248 ENDPROC(el0_irq_invalid)
252 ENDPROC(el0_fiq_invalid)
255 inv_entry 0, BAD_ERROR
256 ENDPROC(el0_error_invalid)
259 el0_fiq_invalid_compat:
260 inv_entry 0, BAD_FIQ, 32
261 ENDPROC(el0_fiq_invalid_compat)
263 el0_error_invalid_compat:
264 inv_entry 0, BAD_ERROR, 32
265 ENDPROC(el0_error_invalid_compat)
269 inv_entry 1, BAD_SYNC
270 ENDPROC(el1_sync_invalid)
274 ENDPROC(el1_irq_invalid)
278 ENDPROC(el1_fiq_invalid)
281 inv_entry 1, BAD_ERROR
282 ENDPROC(el1_error_invalid)
290 mrs x1, esr_el1 // read the syndrome register
291 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
292 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
294 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
296 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
298 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
300 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
302 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
307 * Data abort handling
311 // re-enable interrupts if they were enabled in the aborted context
312 tbnz x23, #7, 1f // PSR_I_BIT
315 mov x2, sp // struct pt_regs
318 // disable interrupts before pulling preserved data off the stack
323 * Stack or PC alignment exception handling
331 * Undefined instruction
338 * Debug exception handling
340 cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
341 cinc x24, x24, eq // set bit '0'
342 tbz x24, #0, el1_inv // EL1 only
344 mov x2, sp // struct pt_regs
345 bl do_debug_exception
348 // TODO: add support for undefined instructions in kernel mode
360 #ifdef CONFIG_TRACE_IRQFLAGS
361 bl trace_hardirqs_off
366 #ifdef CONFIG_PREEMPT
368 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
369 cbnz w24, 1f // preempt count != 0
370 ldr x0, [tsk, #TI_FLAGS] // get flags
371 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
375 #ifdef CONFIG_TRACE_IRQFLAGS
381 #ifdef CONFIG_PREEMPT
384 1: bl preempt_schedule_irq // irq en/disable is done inside
385 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
386 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
396 mrs x25, esr_el1 // read the syndrome register
397 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
398 cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
400 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
402 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
404 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
406 cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
408 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
410 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
412 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
414 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
416 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
424 mrs x25, esr_el1 // read the syndrome register
425 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
426 cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
428 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
430 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
432 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
434 cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
436 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
438 cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
440 cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
442 cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
444 cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
446 cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
448 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
453 * AArch32 syscall handling
455 adrp stbl, compat_sys_call_table // load compat syscall table pointer
456 uxtw scno, w7 // syscall number in w7 (r7)
457 mov sc_nr, #__NR_compat_syscalls
468 * Data abort handling
471 // enable interrupts before calling the main handler
474 bic x0, x26, #(0xff << 56)
481 * Instruction abort handling
484 // enable interrupts before calling the main handler
488 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
494 * Floating Point or Advanced SIMD access
504 * Floating Point or Advanced SIMD exception
514 * Stack or PC alignment exception handling
517 // enable interrupts before calling the main handler
527 * Undefined instruction
529 // enable interrupts before calling the main handler
537 * Debug exception handling
539 tbnz x24, #0, el0_inv // EL0 only
543 bl do_debug_exception
562 #ifdef CONFIG_TRACE_IRQFLAGS
563 bl trace_hardirqs_off
569 #ifdef CONFIG_TRACE_IRQFLAGS
576 * Register switch for AArch64. The callee-saved registers need to be saved
577 * and restored. On entry:
578 * x0 = previous task_struct (must be preserved across the switch)
579 * x1 = next task_struct
580 * Previous and next are guaranteed not to be the same.
584 mov x10, #THREAD_CPU_CONTEXT
587 stp x19, x20, [x8], #16 // store callee-saved registers
588 stp x21, x22, [x8], #16
589 stp x23, x24, [x8], #16
590 stp x25, x26, [x8], #16
591 stp x27, x28, [x8], #16
592 stp x29, x9, [x8], #16
595 ldp x19, x20, [x8], #16 // restore callee-saved registers
596 ldp x21, x22, [x8], #16
597 ldp x23, x24, [x8], #16
598 ldp x25, x26, [x8], #16
599 ldp x27, x28, [x8], #16
600 ldp x29, x9, [x8], #16
604 ENDPROC(cpu_switch_to)
607 * This is the fast syscall return path. We do as little as possible here,
608 * and this includes saving x0 back into the kernel stack.
611 disable_irq // disable interrupts
612 str x0, [sp, #S_X0] // returned x0
613 ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
614 and x2, x1, #_TIF_SYSCALL_WORK
615 cbnz x2, ret_fast_syscall_trace
616 and x2, x1, #_TIF_WORK_MASK
617 cbnz x2, work_pending
618 enable_step_tsk x1, x2
620 ret_fast_syscall_trace:
621 enable_irq // enable interrupts
622 b __sys_trace_return_skipped // we already saved x0
625 * Ok, we need to do extra processing, enter the slow path.
628 tbnz x1, #TIF_NEED_RESCHED, work_resched
629 /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
630 ldr x2, [sp, #S_PSTATE]
632 tst x2, #PSR_MODE_MASK // user mode regs?
633 b.ne no_work_pending // returning to kernel
634 enable_irq // enable interrupts for do_notify_resume()
641 * "slow" syscall return path.
644 disable_irq // disable interrupts
645 ldr x1, [tsk, #TI_FLAGS]
646 and x2, x1, #_TIF_WORK_MASK
647 cbnz x2, work_pending
648 enable_step_tsk x1, x2
654 * This is how we return from a fork.
658 cbz x19, 1f // not a kernel thread
661 1: get_thread_info tsk
663 ENDPROC(ret_from_fork)
670 adrp stbl, sys_call_table // load syscall table pointer
671 uxtw scno, w8 // syscall number in w8
672 mov sc_nr, #__NR_syscalls
673 el0_svc_naked: // compat entry point
674 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
678 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
679 tst x16, #_TIF_SYSCALL_WORK
681 cmp scno, sc_nr // check upper syscall limit
683 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
684 blr x16 // call sys_* routine
693 * This is the really slow path. We're going to be doing context
694 * switches, and waiting for our parent to respond.
697 mov w0, #-1 // set default errno for
698 cmp scno, x0 // user-issued syscall(-1)
703 bl syscall_trace_enter
704 cmp w0, #-1 // skip the syscall?
705 b.eq __sys_trace_return_skipped
706 uxtw scno, w0 // syscall number (possibly new)
707 mov x1, sp // pointer to regs
708 cmp scno, sc_nr // check upper syscall limit
710 ldp x0, x1, [sp] // restore the syscall args
711 ldp x2, x3, [sp, #S_X2]
712 ldp x4, x5, [sp, #S_X4]
713 ldp x6, x7, [sp, #S_X6]
714 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
715 blr x16 // call sys_* routine
718 str x0, [sp, #S_X0] // save returned x0
719 __sys_trace_return_skipped:
721 bl syscall_trace_exit
730 * Special system call wrappers.
732 ENTRY(sys_rt_sigreturn_wrapper)
735 ENDPROC(sys_rt_sigreturn_wrapper)