Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / arm64 / kernel / signal32.c
blob2f507f565c48ae0fe8a27c3cf2b8e286e038e940
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/kernel/signal.c
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 * Modified by Will Deacon <will.deacon@arm.com>
8 */
10 #include <linux/compat.h>
11 #include <linux/signal.h>
12 #include <linux/syscalls.h>
13 #include <linux/ratelimit.h>
15 #include <asm/esr.h>
16 #include <asm/fpsimd.h>
17 #include <asm/signal32.h>
18 #include <asm/traps.h>
19 #include <linux/uaccess.h>
20 #include <asm/unistd.h>
21 #include <asm/vdso.h>
23 struct compat_vfp_sigframe {
24 compat_ulong_t magic;
25 compat_ulong_t size;
26 struct compat_user_vfp {
27 compat_u64 fpregs[32];
28 compat_ulong_t fpscr;
29 } ufp;
30 struct compat_user_vfp_exc {
31 compat_ulong_t fpexc;
32 compat_ulong_t fpinst;
33 compat_ulong_t fpinst2;
34 } ufp_exc;
35 } __attribute__((__aligned__(8)));
37 #define VFP_MAGIC 0x56465001
38 #define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe)
40 #define FSR_WRITE_SHIFT (11)
42 struct compat_aux_sigframe {
43 struct compat_vfp_sigframe vfp;
45 /* Something that isn't a valid magic number for any coprocessor. */
46 unsigned long end_magic;
47 } __attribute__((__aligned__(8)));
49 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
51 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
53 compat_sigset_t cset;
55 cset.sig[0] = set->sig[0] & 0xffffffffull;
56 cset.sig[1] = set->sig[0] >> 32;
58 return copy_to_user(uset, &cset, sizeof(*uset));
61 static inline int get_sigset_t(sigset_t *set,
62 const compat_sigset_t __user *uset)
64 compat_sigset_t s32;
66 if (copy_from_user(&s32, uset, sizeof(*uset)))
67 return -EFAULT;
69 set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
70 return 0;
74 * VFP save/restore code.
76 * We have to be careful with endianness, since the fpsimd context-switch
77 * code operates on 128-bit (Q) register values whereas the compat ABI
78 * uses an array of 64-bit (D) registers. Consequently, we need to swap
79 * the two halves of each Q register when running on a big-endian CPU.
81 union __fpsimd_vreg {
82 __uint128_t raw;
83 struct {
84 #ifdef __AARCH64EB__
85 u64 hi;
86 u64 lo;
87 #else
88 u64 lo;
89 u64 hi;
90 #endif
94 static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
96 struct user_fpsimd_state const *fpsimd =
97 &current->thread.uw.fpsimd_state;
98 compat_ulong_t magic = VFP_MAGIC;
99 compat_ulong_t size = VFP_STORAGE_SIZE;
100 compat_ulong_t fpscr, fpexc;
101 int i, err = 0;
104 * Save the hardware registers to the fpsimd_state structure.
105 * Note that this also saves V16-31, which aren't visible
106 * in AArch32.
108 fpsimd_signal_preserve_current_state();
110 /* Place structure header on the stack */
111 __put_user_error(magic, &frame->magic, err);
112 __put_user_error(size, &frame->size, err);
115 * Now copy the FP registers. Since the registers are packed,
116 * we can copy the prefix we want (V0-V15) as it is.
118 for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
119 union __fpsimd_vreg vreg = {
120 .raw = fpsimd->vregs[i >> 1],
123 __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
124 __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
127 /* Create an AArch32 fpscr from the fpsr and the fpcr. */
128 fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
129 (fpsimd->fpcr & VFP_FPSCR_CTRL_MASK);
130 __put_user_error(fpscr, &frame->ufp.fpscr, err);
133 * The exception register aren't available so we fake up a
134 * basic FPEXC and zero everything else.
136 fpexc = (1 << 30);
137 __put_user_error(fpexc, &frame->ufp_exc.fpexc, err);
138 __put_user_error(0, &frame->ufp_exc.fpinst, err);
139 __put_user_error(0, &frame->ufp_exc.fpinst2, err);
141 return err ? -EFAULT : 0;
144 static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
146 struct user_fpsimd_state fpsimd;
147 compat_ulong_t magic = VFP_MAGIC;
148 compat_ulong_t size = VFP_STORAGE_SIZE;
149 compat_ulong_t fpscr;
150 int i, err = 0;
152 __get_user_error(magic, &frame->magic, err);
153 __get_user_error(size, &frame->size, err);
155 if (err)
156 return -EFAULT;
157 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
158 return -EINVAL;
160 /* Copy the FP registers into the start of the fpsimd_state. */
161 for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
162 union __fpsimd_vreg vreg;
164 __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
165 __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
166 fpsimd.vregs[i >> 1] = vreg.raw;
169 /* Extract the fpsr and the fpcr from the fpscr */
170 __get_user_error(fpscr, &frame->ufp.fpscr, err);
171 fpsimd.fpsr = fpscr & VFP_FPSCR_STAT_MASK;
172 fpsimd.fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
175 * We don't need to touch the exception register, so
176 * reload the hardware state.
178 if (!err)
179 fpsimd_update_current_state(&fpsimd);
181 return err ? -EFAULT : 0;
184 static int compat_restore_sigframe(struct pt_regs *regs,
185 struct compat_sigframe __user *sf)
187 int err;
188 sigset_t set;
189 struct compat_aux_sigframe __user *aux;
190 unsigned long psr;
192 err = get_sigset_t(&set, &sf->uc.uc_sigmask);
193 if (err == 0) {
194 sigdelsetmask(&set, ~_BLOCKABLE);
195 set_current_blocked(&set);
198 __get_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
199 __get_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err);
200 __get_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err);
201 __get_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err);
202 __get_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err);
203 __get_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err);
204 __get_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err);
205 __get_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err);
206 __get_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err);
207 __get_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err);
208 __get_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err);
209 __get_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err);
210 __get_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err);
211 __get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
212 __get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
213 __get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
214 __get_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err);
216 regs->pstate = compat_psr_to_pstate(psr);
219 * Avoid compat_sys_sigreturn() restarting.
221 forget_syscall(regs);
223 err |= !valid_user_regs(&regs->user_regs, current);
225 aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
226 if (err == 0 && system_supports_fpsimd())
227 err |= compat_restore_vfp_context(&aux->vfp);
229 return err;
232 COMPAT_SYSCALL_DEFINE0(sigreturn)
234 struct pt_regs *regs = current_pt_regs();
235 struct compat_sigframe __user *frame;
237 /* Always make any pending restarted system calls return -EINTR */
238 current->restart_block.fn = do_no_restart_syscall;
241 * Since we stacked the signal on a 64-bit boundary,
242 * then 'sp' should be word aligned here. If it's
243 * not, then the user is trying to mess with us.
245 if (regs->compat_sp & 7)
246 goto badframe;
248 frame = (struct compat_sigframe __user *)regs->compat_sp;
250 if (!access_ok(frame, sizeof (*frame)))
251 goto badframe;
253 if (compat_restore_sigframe(regs, frame))
254 goto badframe;
256 return regs->regs[0];
258 badframe:
259 arm64_notify_segfault(regs->compat_sp);
260 return 0;
263 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
265 struct pt_regs *regs = current_pt_regs();
266 struct compat_rt_sigframe __user *frame;
268 /* Always make any pending restarted system calls return -EINTR */
269 current->restart_block.fn = do_no_restart_syscall;
272 * Since we stacked the signal on a 64-bit boundary,
273 * then 'sp' should be word aligned here. If it's
274 * not, then the user is trying to mess with us.
276 if (regs->compat_sp & 7)
277 goto badframe;
279 frame = (struct compat_rt_sigframe __user *)regs->compat_sp;
281 if (!access_ok(frame, sizeof (*frame)))
282 goto badframe;
284 if (compat_restore_sigframe(regs, &frame->sig))
285 goto badframe;
287 if (compat_restore_altstack(&frame->sig.uc.uc_stack))
288 goto badframe;
290 return regs->regs[0];
292 badframe:
293 arm64_notify_segfault(regs->compat_sp);
294 return 0;
297 static void __user *compat_get_sigframe(struct ksignal *ksig,
298 struct pt_regs *regs,
299 int framesize)
301 compat_ulong_t sp = sigsp(regs->compat_sp, ksig);
302 void __user *frame;
305 * ATPCS B01 mandates 8-byte alignment
307 frame = compat_ptr((compat_uptr_t)((sp - framesize) & ~7));
310 * Check that we can actually write to the signal frame.
312 if (!access_ok(frame, framesize))
313 frame = NULL;
315 return frame;
318 static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
319 compat_ulong_t __user *rc, void __user *frame,
320 int usig)
322 compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler);
323 compat_ulong_t retcode;
324 compat_ulong_t spsr = regs->pstate & ~(PSR_f | PSR_AA32_E_BIT);
325 int thumb;
327 /* Check if the handler is written for ARM or Thumb */
328 thumb = handler & 1;
330 if (thumb)
331 spsr |= PSR_AA32_T_BIT;
332 else
333 spsr &= ~PSR_AA32_T_BIT;
335 /* The IT state must be cleared for both ARM and Thumb-2 */
336 spsr &= ~PSR_AA32_IT_MASK;
338 /* Restore the original endianness */
339 spsr |= PSR_AA32_ENDSTATE;
341 if (ka->sa.sa_flags & SA_RESTORER) {
342 retcode = ptr_to_compat(ka->sa.sa_restorer);
343 } else {
344 /* Set up sigreturn pointer */
345 unsigned int idx = thumb << 1;
347 if (ka->sa.sa_flags & SA_SIGINFO)
348 idx += 3;
350 retcode = (unsigned long)current->mm->context.sigpage +
351 (idx << 2) + thumb;
354 regs->regs[0] = usig;
355 regs->compat_sp = ptr_to_compat(frame);
356 regs->compat_lr = retcode;
357 regs->pc = handler;
358 regs->pstate = spsr;
361 static int compat_setup_sigframe(struct compat_sigframe __user *sf,
362 struct pt_regs *regs, sigset_t *set)
364 struct compat_aux_sigframe __user *aux;
365 unsigned long psr = pstate_to_compat_psr(regs->pstate);
366 int err = 0;
368 __put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
369 __put_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err);
370 __put_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err);
371 __put_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err);
372 __put_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err);
373 __put_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err);
374 __put_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err);
375 __put_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err);
376 __put_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err);
377 __put_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err);
378 __put_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err);
379 __put_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err);
380 __put_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err);
381 __put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
382 __put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
383 __put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
384 __put_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err);
386 __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
387 /* set the compat FSR WnR */
388 __put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) <<
389 FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err);
390 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
391 __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
393 err |= put_sigset_t(&sf->uc.uc_sigmask, set);
395 aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
397 if (err == 0 && system_supports_fpsimd())
398 err |= compat_preserve_vfp_context(&aux->vfp);
399 __put_user_error(0, &aux->end_magic, err);
401 return err;
405 * 32-bit signal handling routines called from signal.c
407 int compat_setup_rt_frame(int usig, struct ksignal *ksig,
408 sigset_t *set, struct pt_regs *regs)
410 struct compat_rt_sigframe __user *frame;
411 int err = 0;
413 frame = compat_get_sigframe(ksig, regs, sizeof(*frame));
415 if (!frame)
416 return 1;
418 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
420 __put_user_error(0, &frame->sig.uc.uc_flags, err);
421 __put_user_error(0, &frame->sig.uc.uc_link, err);
423 err |= __compat_save_altstack(&frame->sig.uc.uc_stack, regs->compat_sp);
425 err |= compat_setup_sigframe(&frame->sig, regs, set);
427 if (err == 0) {
428 compat_setup_return(regs, &ksig->ka, frame->sig.retcode, frame, usig);
429 regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info;
430 regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc;
433 return err;
436 int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
437 struct pt_regs *regs)
439 struct compat_sigframe __user *frame;
440 int err = 0;
442 frame = compat_get_sigframe(ksig, regs, sizeof(*frame));
444 if (!frame)
445 return 1;
447 __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
449 err |= compat_setup_sigframe(frame, regs, set);
450 if (err == 0)
451 compat_setup_return(regs, &ksig->ka, frame->retcode, frame, usig);
453 return err;
456 void compat_setup_restart_syscall(struct pt_regs *regs)
458 regs->regs[7] = __NR_compat_restart_syscall;