WIP FPC-III support
[linux/fpc-iii.git] / arch / arm64 / kernel / sdei.c
blob2c7ca449dd5111206fa8559065fd95ce2858dcf9
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
5 #include <linux/arm-smccc.h>
6 #include <linux/arm_sdei.h>
7 #include <linux/hardirq.h>
8 #include <linux/irqflags.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/scs.h>
11 #include <linux/uaccess.h>
13 #include <asm/alternative.h>
14 #include <asm/exception.h>
15 #include <asm/kprobes.h>
16 #include <asm/mmu.h>
17 #include <asm/ptrace.h>
18 #include <asm/sections.h>
19 #include <asm/stacktrace.h>
20 #include <asm/sysreg.h>
21 #include <asm/vmap_stack.h>
23 unsigned long sdei_exit_mode;
26 * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
27 * register, meaning SDEI has to switch to its own stack. We need two stacks as
28 * a critical event may interrupt a normal event that has just taken a
29 * synchronous exception, and is using sp as scratch register. For a critical
30 * event interrupting a normal event, we can't reliably tell if we were on the
31 * sdei stack.
32 * For now, we allocate stacks when the driver is probed.
34 DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
35 DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
37 #ifdef CONFIG_VMAP_STACK
38 DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
39 DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
40 #endif
42 DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
43 DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
45 #ifdef CONFIG_SHADOW_CALL_STACK
46 DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
47 DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
48 #endif
50 static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
52 unsigned long *p;
54 p = per_cpu(*ptr, cpu);
55 if (p) {
56 per_cpu(*ptr, cpu) = NULL;
57 vfree(p);
61 static void free_sdei_stacks(void)
63 int cpu;
65 if (!IS_ENABLED(CONFIG_VMAP_STACK))
66 return;
68 for_each_possible_cpu(cpu) {
69 _free_sdei_stack(&sdei_stack_normal_ptr, cpu);
70 _free_sdei_stack(&sdei_stack_critical_ptr, cpu);
74 static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
76 unsigned long *p;
78 p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
79 if (!p)
80 return -ENOMEM;
81 per_cpu(*ptr, cpu) = p;
83 return 0;
86 static int init_sdei_stacks(void)
88 int cpu;
89 int err = 0;
91 if (!IS_ENABLED(CONFIG_VMAP_STACK))
92 return 0;
94 for_each_possible_cpu(cpu) {
95 err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
96 if (err)
97 break;
98 err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
99 if (err)
100 break;
103 if (err)
104 free_sdei_stacks();
106 return err;
109 static void _free_sdei_scs(unsigned long * __percpu *ptr, int cpu)
111 void *s;
113 s = per_cpu(*ptr, cpu);
114 if (s) {
115 per_cpu(*ptr, cpu) = NULL;
116 scs_free(s);
120 static void free_sdei_scs(void)
122 int cpu;
124 for_each_possible_cpu(cpu) {
125 _free_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
126 _free_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
130 static int _init_sdei_scs(unsigned long * __percpu *ptr, int cpu)
132 void *s;
134 s = scs_alloc(cpu_to_node(cpu));
135 if (!s)
136 return -ENOMEM;
137 per_cpu(*ptr, cpu) = s;
139 return 0;
142 static int init_sdei_scs(void)
144 int cpu;
145 int err = 0;
147 if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
148 return 0;
150 for_each_possible_cpu(cpu) {
151 err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
152 if (err)
153 break;
154 err = _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
155 if (err)
156 break;
159 if (err)
160 free_sdei_scs();
162 return err;
165 static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
167 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
168 unsigned long high = low + SDEI_STACK_SIZE;
170 return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
173 static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
175 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
176 unsigned long high = low + SDEI_STACK_SIZE;
178 return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
181 bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
183 if (!IS_ENABLED(CONFIG_VMAP_STACK))
184 return false;
186 if (on_sdei_critical_stack(sp, info))
187 return true;
189 if (on_sdei_normal_stack(sp, info))
190 return true;
192 return false;
195 unsigned long sdei_arch_get_entry_point(int conduit)
198 * SDEI works between adjacent exception levels. If we booted at EL1 we
199 * assume a hypervisor is marshalling events. If we booted at EL2 and
200 * dropped to EL1 because we don't support VHE, then we can't support
201 * SDEI.
203 if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
204 pr_err("Not supported on this hardware/boot configuration\n");
205 goto out_err;
208 if (init_sdei_stacks())
209 goto out_err;
211 if (init_sdei_scs())
212 goto out_err_free_stacks;
214 sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
216 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
217 if (arm64_kernel_unmapped_at_el0()) {
218 unsigned long offset;
220 offset = (unsigned long)__sdei_asm_entry_trampoline -
221 (unsigned long)__entry_tramp_text_start;
222 return TRAMP_VALIAS + offset;
223 } else
224 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
225 return (unsigned long)__sdei_asm_handler;
227 out_err_free_stacks:
228 free_sdei_stacks();
229 out_err:
230 return 0;
234 * __sdei_handler() returns one of:
235 * SDEI_EV_HANDLED - success, return to the interrupted context.
236 * SDEI_EV_FAILED - failure, return this error code to firmare.
237 * virtual-address - success, return to this address.
239 static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
240 struct sdei_registered_event *arg)
242 u32 mode;
243 int i, err = 0;
244 int clobbered_registers = 4;
245 u64 elr = read_sysreg(elr_el1);
246 u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */
247 unsigned long vbar = read_sysreg(vbar_el1);
249 if (arm64_kernel_unmapped_at_el0())
250 clobbered_registers++;
252 /* Retrieve the missing registers values */
253 for (i = 0; i < clobbered_registers; i++) {
254 /* from within the handler, this call always succeeds */
255 sdei_api_event_context(i, &regs->regs[i]);
258 err = sdei_event_handler(regs, arg);
259 if (err)
260 return SDEI_EV_FAILED;
262 if (elr != read_sysreg(elr_el1)) {
264 * We took a synchronous exception from the SDEI handler.
265 * This could deadlock, and if you interrupt KVM it will
266 * hyp-panic instead.
268 pr_warn("unsafe: exception during handler\n");
271 mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
274 * If we interrupted the kernel with interrupts masked, we always go
275 * back to wherever we came from.
277 if (mode == kernel_mode && !interrupts_enabled(regs))
278 return SDEI_EV_HANDLED;
281 * Otherwise, we pretend this was an IRQ. This lets user space tasks
282 * receive signals before we return to them, and KVM to invoke it's
283 * world switch to do the same.
285 * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
286 * address'.
288 if (mode == kernel_mode)
289 return vbar + 0x280;
290 else if (mode & PSR_MODE32_BIT)
291 return vbar + 0x680;
293 return vbar + 0x480;
296 static void __kprobes notrace __sdei_pstate_entry(void)
299 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
300 * whether PSTATE bits are inherited unchanged or generated from
301 * scratch, and the TF-A implementation always clears PAN and always
302 * clears UAO. There are no other known implementations.
304 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
305 * PSTATE is modified upon architectural exceptions, and so PAN is
306 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
307 * cleared.
309 * We must explicitly reset PAN to the expected state, including
310 * clearing it when the host isn't using it, in case a VM had it set.
312 if (system_uses_hw_pan())
313 set_pstate_pan(1);
314 else if (cpu_has_pan())
315 set_pstate_pan(0);
318 asmlinkage noinstr unsigned long
319 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
321 unsigned long ret;
324 * We didn't take an exception to get here, so the HW hasn't
325 * set/cleared bits in PSTATE that we may rely on. Initialize PAN.
327 __sdei_pstate_entry();
329 arm64_enter_nmi(regs);
331 ret = _sdei_handler(regs, arg);
333 arm64_exit_nmi(regs);
335 return ret;