1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
5 #include <linux/arm-smccc.h>
6 #include <linux/arm_sdei.h>
7 #include <linux/hardirq.h>
8 #include <linux/irqflags.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/scs.h>
11 #include <linux/uaccess.h>
13 #include <asm/alternative.h>
14 #include <asm/exception.h>
15 #include <asm/kprobes.h>
17 #include <asm/ptrace.h>
18 #include <asm/sections.h>
19 #include <asm/stacktrace.h>
20 #include <asm/sysreg.h>
21 #include <asm/vmap_stack.h>
23 unsigned long sdei_exit_mode
;
26 * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
27 * register, meaning SDEI has to switch to its own stack. We need two stacks as
28 * a critical event may interrupt a normal event that has just taken a
29 * synchronous exception, and is using sp as scratch register. For a critical
30 * event interrupting a normal event, we can't reliably tell if we were on the
32 * For now, we allocate stacks when the driver is probed.
34 DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr
);
35 DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr
);
37 #ifdef CONFIG_VMAP_STACK
38 DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr
);
39 DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr
);
42 DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr
);
43 DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr
);
45 #ifdef CONFIG_SHADOW_CALL_STACK
46 DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr
);
47 DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr
);
50 DEFINE_PER_CPU(struct sdei_registered_event
*, sdei_active_normal_event
);
51 DEFINE_PER_CPU(struct sdei_registered_event
*, sdei_active_critical_event
);
53 static void _free_sdei_stack(unsigned long * __percpu
*ptr
, int cpu
)
57 p
= per_cpu(*ptr
, cpu
);
59 per_cpu(*ptr
, cpu
) = NULL
;
64 static void free_sdei_stacks(void)
68 if (!IS_ENABLED(CONFIG_VMAP_STACK
))
71 for_each_possible_cpu(cpu
) {
72 _free_sdei_stack(&sdei_stack_normal_ptr
, cpu
);
73 _free_sdei_stack(&sdei_stack_critical_ptr
, cpu
);
77 static int _init_sdei_stack(unsigned long * __percpu
*ptr
, int cpu
)
81 p
= arch_alloc_vmap_stack(SDEI_STACK_SIZE
, cpu_to_node(cpu
));
84 per_cpu(*ptr
, cpu
) = p
;
89 static int init_sdei_stacks(void)
94 if (!IS_ENABLED(CONFIG_VMAP_STACK
))
97 for_each_possible_cpu(cpu
) {
98 err
= _init_sdei_stack(&sdei_stack_normal_ptr
, cpu
);
101 err
= _init_sdei_stack(&sdei_stack_critical_ptr
, cpu
);
112 static void _free_sdei_scs(unsigned long * __percpu
*ptr
, int cpu
)
116 s
= per_cpu(*ptr
, cpu
);
118 per_cpu(*ptr
, cpu
) = NULL
;
123 static void free_sdei_scs(void)
127 for_each_possible_cpu(cpu
) {
128 _free_sdei_scs(&sdei_shadow_call_stack_normal_ptr
, cpu
);
129 _free_sdei_scs(&sdei_shadow_call_stack_critical_ptr
, cpu
);
133 static int _init_sdei_scs(unsigned long * __percpu
*ptr
, int cpu
)
137 s
= scs_alloc(cpu_to_node(cpu
));
140 per_cpu(*ptr
, cpu
) = s
;
145 static int init_sdei_scs(void)
150 if (!scs_is_enabled())
153 for_each_possible_cpu(cpu
) {
154 err
= _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr
, cpu
);
157 err
= _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr
, cpu
);
168 unsigned long sdei_arch_get_entry_point(int conduit
)
171 * SDEI works between adjacent exception levels. If we booted at EL1 we
172 * assume a hypervisor is marshalling events. If we booted at EL2 and
173 * dropped to EL1 because we don't support VHE, then we can't support
177 pr_err("Not supported on this hardware/boot configuration\n");
181 if (init_sdei_stacks())
185 goto out_err_free_stacks
;
187 sdei_exit_mode
= (conduit
== SMCCC_CONDUIT_HVC
) ? SDEI_EXIT_HVC
: SDEI_EXIT_SMC
;
189 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
190 if (arm64_kernel_unmapped_at_el0()) {
191 unsigned long offset
;
193 offset
= (unsigned long)__sdei_asm_entry_trampoline
-
194 (unsigned long)__entry_tramp_text_start
;
195 return TRAMP_VALIAS
+ offset
;
197 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
198 return (unsigned long)__sdei_asm_handler
;
207 * do_sdei_event() returns one of:
208 * SDEI_EV_HANDLED - success, return to the interrupted context.
209 * SDEI_EV_FAILED - failure, return this error code to firmare.
210 * virtual-address - success, return to this address.
212 unsigned long __kprobes
do_sdei_event(struct pt_regs
*regs
,
213 struct sdei_registered_event
*arg
)
217 int clobbered_registers
= 4;
218 u64 elr
= read_sysreg(elr_el1
);
219 u32 kernel_mode
= read_sysreg(CurrentEL
) | 1; /* +SPSel */
220 unsigned long vbar
= read_sysreg(vbar_el1
);
222 if (arm64_kernel_unmapped_at_el0())
223 clobbered_registers
++;
225 /* Retrieve the missing registers values */
226 for (i
= 0; i
< clobbered_registers
; i
++) {
227 /* from within the handler, this call always succeeds */
228 sdei_api_event_context(i
, ®s
->regs
[i
]);
231 err
= sdei_event_handler(regs
, arg
);
233 return SDEI_EV_FAILED
;
235 if (elr
!= read_sysreg(elr_el1
)) {
237 * We took a synchronous exception from the SDEI handler.
238 * This could deadlock, and if you interrupt KVM it will
241 pr_warn("unsafe: exception during handler\n");
244 mode
= regs
->pstate
& (PSR_MODE32_BIT
| PSR_MODE_MASK
);
247 * If we interrupted the kernel with interrupts masked, we always go
248 * back to wherever we came from.
250 if (mode
== kernel_mode
&& !interrupts_enabled(regs
))
251 return SDEI_EV_HANDLED
;
254 * Otherwise, we pretend this was an IRQ. This lets user space tasks
255 * receive signals before we return to them, and KVM to invoke it's
256 * world switch to do the same.
258 * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
261 if (mode
== kernel_mode
)
263 else if (mode
& PSR_MODE32_BIT
)