drm/rockchip: Don't change hdmi reference clock rate
[drm/drm-misc.git] / arch / arm64 / kernel / signal.c
blob14ac6fdb872b9672e4b16a097f1b577aae8dec50
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/kernel/signal.c
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
9 #include <linux/cache.h>
10 #include <linux/compat.h>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/signal.h>
14 #include <linux/freezer.h>
15 #include <linux/stddef.h>
16 #include <linux/uaccess.h>
17 #include <linux/sizes.h>
18 #include <linux/string.h>
19 #include <linux/ratelimit.h>
20 #include <linux/rseq.h>
21 #include <linux/syscalls.h>
22 #include <linux/pkeys.h>
24 #include <asm/daifflags.h>
25 #include <asm/debug-monitors.h>
26 #include <asm/elf.h>
27 #include <asm/exception.h>
28 #include <asm/cacheflush.h>
29 #include <asm/gcs.h>
30 #include <asm/ucontext.h>
31 #include <asm/unistd.h>
32 #include <asm/fpsimd.h>
33 #include <asm/ptrace.h>
34 #include <asm/syscall.h>
35 #include <asm/signal32.h>
36 #include <asm/traps.h>
37 #include <asm/vdso.h>
39 #ifdef CONFIG_ARM64_GCS
40 #define GCS_SIGNAL_CAP(addr) (((unsigned long)addr) & GCS_CAP_ADDR_MASK)
42 static bool gcs_signal_cap_valid(u64 addr, u64 val)
44 return val == GCS_SIGNAL_CAP(addr);
46 #endif
49 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
51 struct rt_sigframe {
52 struct siginfo info;
53 struct ucontext uc;
56 struct rt_sigframe_user_layout {
57 struct rt_sigframe __user *sigframe;
58 struct frame_record __user *next_frame;
60 unsigned long size; /* size of allocated sigframe data */
61 unsigned long limit; /* largest allowed size */
63 unsigned long fpsimd_offset;
64 unsigned long esr_offset;
65 unsigned long gcs_offset;
66 unsigned long sve_offset;
67 unsigned long tpidr2_offset;
68 unsigned long za_offset;
69 unsigned long zt_offset;
70 unsigned long fpmr_offset;
71 unsigned long poe_offset;
72 unsigned long extra_offset;
73 unsigned long end_offset;
77 * Holds any EL0-controlled state that influences unprivileged memory accesses.
78 * This includes both accesses done in userspace and uaccess done in the kernel.
80 * This state needs to be carefully managed to ensure that it doesn't cause
81 * uaccess to fail when setting up the signal frame, and the signal handler
82 * itself also expects a well-defined state when entered.
84 struct user_access_state {
85 u64 por_el0;
88 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
89 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
92 * Save the user access state into ua_state and reset it to disable any
93 * restrictions.
95 static void save_reset_user_access_state(struct user_access_state *ua_state)
97 if (system_supports_poe()) {
98 u64 por_enable_all = 0;
100 for (int pkey = 0; pkey < arch_max_pkey(); pkey++)
101 por_enable_all |= POE_RXW << (pkey * POR_BITS_PER_PKEY);
103 ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0);
104 write_sysreg_s(por_enable_all, SYS_POR_EL0);
105 /* Ensure that any subsequent uaccess observes the updated value */
106 isb();
111 * Set the user access state for invoking the signal handler.
113 * No uaccess should be done after that function is called.
115 static void set_handler_user_access_state(void)
117 if (system_supports_poe())
118 write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
122 * Restore the user access state to the values saved in ua_state.
124 * No uaccess should be done after that function is called.
126 static void restore_user_access_state(const struct user_access_state *ua_state)
128 if (system_supports_poe())
129 write_sysreg_s(ua_state->por_el0, SYS_POR_EL0);
132 static void init_user_layout(struct rt_sigframe_user_layout *user)
134 const size_t reserved_size =
135 sizeof(user->sigframe->uc.uc_mcontext.__reserved);
137 memset(user, 0, sizeof(*user));
138 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
140 user->limit = user->size + reserved_size;
142 user->limit -= TERMINATOR_SIZE;
143 user->limit -= EXTRA_CONTEXT_SIZE;
144 /* Reserve space for extension and terminator ^ */
147 static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
149 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
153 * Sanity limit on the approximate maximum size of signal frame we'll
154 * try to generate. Stack alignment padding and the frame record are
155 * not taken into account. This limit is not a guarantee and is
156 * NOT ABI.
158 #define SIGFRAME_MAXSZ SZ_256K
160 static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
161 unsigned long *offset, size_t size, bool extend)
163 size_t padded_size = round_up(size, 16);
165 if (padded_size > user->limit - user->size &&
166 !user->extra_offset &&
167 extend) {
168 int ret;
170 user->limit += EXTRA_CONTEXT_SIZE;
171 ret = __sigframe_alloc(user, &user->extra_offset,
172 sizeof(struct extra_context), false);
173 if (ret) {
174 user->limit -= EXTRA_CONTEXT_SIZE;
175 return ret;
178 /* Reserve space for the __reserved[] terminator */
179 user->size += TERMINATOR_SIZE;
182 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
183 * the terminator:
185 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
188 /* Still not enough space? Bad luck! */
189 if (padded_size > user->limit - user->size)
190 return -ENOMEM;
192 *offset = user->size;
193 user->size += padded_size;
195 return 0;
199 * Allocate space for an optional record of <size> bytes in the user
200 * signal frame. The offset from the signal frame base address to the
201 * allocated block is assigned to *offset.
203 static int sigframe_alloc(struct rt_sigframe_user_layout *user,
204 unsigned long *offset, size_t size)
206 return __sigframe_alloc(user, offset, size, true);
209 /* Allocate the null terminator record and prevent further allocations */
210 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
212 int ret;
214 /* Un-reserve the space reserved for the terminator: */
215 user->limit += TERMINATOR_SIZE;
217 ret = sigframe_alloc(user, &user->end_offset,
218 sizeof(struct _aarch64_ctx));
219 if (ret)
220 return ret;
222 /* Prevent further allocation: */
223 user->limit = user->size;
224 return 0;
227 static void __user *apply_user_offset(
228 struct rt_sigframe_user_layout const *user, unsigned long offset)
230 char __user *base = (char __user *)user->sigframe;
232 return base + offset;
235 struct user_ctxs {
236 struct fpsimd_context __user *fpsimd;
237 u32 fpsimd_size;
238 struct sve_context __user *sve;
239 u32 sve_size;
240 struct tpidr2_context __user *tpidr2;
241 u32 tpidr2_size;
242 struct za_context __user *za;
243 u32 za_size;
244 struct zt_context __user *zt;
245 u32 zt_size;
246 struct fpmr_context __user *fpmr;
247 u32 fpmr_size;
248 struct poe_context __user *poe;
249 u32 poe_size;
250 struct gcs_context __user *gcs;
251 u32 gcs_size;
254 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
256 struct user_fpsimd_state const *fpsimd =
257 &current->thread.uw.fpsimd_state;
258 int err;
260 /* copy the FP and status/control registers */
261 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
262 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
263 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
265 /* copy the magic/size information */
266 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
267 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
269 return err ? -EFAULT : 0;
272 static int restore_fpsimd_context(struct user_ctxs *user)
274 struct user_fpsimd_state fpsimd;
275 int err = 0;
277 /* check the size information */
278 if (user->fpsimd_size != sizeof(struct fpsimd_context))
279 return -EINVAL;
281 /* copy the FP and status/control registers */
282 err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs),
283 sizeof(fpsimd.vregs));
284 __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err);
285 __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err);
287 clear_thread_flag(TIF_SVE);
288 current->thread.fp_type = FP_STATE_FPSIMD;
290 /* load the hardware registers from the fpsimd_state structure */
291 if (!err)
292 fpsimd_update_current_state(&fpsimd);
294 return err ? -EFAULT : 0;
297 static int preserve_fpmr_context(struct fpmr_context __user *ctx)
299 int err = 0;
301 current->thread.uw.fpmr = read_sysreg_s(SYS_FPMR);
303 __put_user_error(FPMR_MAGIC, &ctx->head.magic, err);
304 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
305 __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err);
307 return err;
310 static int restore_fpmr_context(struct user_ctxs *user)
312 u64 fpmr;
313 int err = 0;
315 if (user->fpmr_size != sizeof(*user->fpmr))
316 return -EINVAL;
318 __get_user_error(fpmr, &user->fpmr->fpmr, err);
319 if (!err)
320 write_sysreg_s(fpmr, SYS_FPMR);
322 return err;
325 static int preserve_poe_context(struct poe_context __user *ctx,
326 const struct user_access_state *ua_state)
328 int err = 0;
330 __put_user_error(POE_MAGIC, &ctx->head.magic, err);
331 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
332 __put_user_error(ua_state->por_el0, &ctx->por_el0, err);
334 return err;
337 static int restore_poe_context(struct user_ctxs *user,
338 struct user_access_state *ua_state)
340 u64 por_el0;
341 int err = 0;
343 if (user->poe_size != sizeof(*user->poe))
344 return -EINVAL;
346 __get_user_error(por_el0, &(user->poe->por_el0), err);
347 if (!err)
348 ua_state->por_el0 = por_el0;
350 return err;
353 #ifdef CONFIG_ARM64_SVE
355 static int preserve_sve_context(struct sve_context __user *ctx)
357 int err = 0;
358 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
359 u16 flags = 0;
360 unsigned int vl = task_get_sve_vl(current);
361 unsigned int vq = 0;
363 if (thread_sm_enabled(&current->thread)) {
364 vl = task_get_sme_vl(current);
365 vq = sve_vq_from_vl(vl);
366 flags |= SVE_SIG_FLAG_SM;
367 } else if (current->thread.fp_type == FP_STATE_SVE) {
368 vq = sve_vq_from_vl(vl);
371 memset(reserved, 0, sizeof(reserved));
373 __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
374 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
375 &ctx->head.size, err);
376 __put_user_error(vl, &ctx->vl, err);
377 __put_user_error(flags, &ctx->flags, err);
378 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
379 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
381 if (vq) {
383 * This assumes that the SVE state has already been saved to
384 * the task struct by calling the function
385 * fpsimd_signal_preserve_current_state().
387 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
388 current->thread.sve_state,
389 SVE_SIG_REGS_SIZE(vq));
392 return err ? -EFAULT : 0;
395 static int restore_sve_fpsimd_context(struct user_ctxs *user)
397 int err = 0;
398 unsigned int vl, vq;
399 struct user_fpsimd_state fpsimd;
400 u16 user_vl, flags;
402 if (user->sve_size < sizeof(*user->sve))
403 return -EINVAL;
405 __get_user_error(user_vl, &(user->sve->vl), err);
406 __get_user_error(flags, &(user->sve->flags), err);
407 if (err)
408 return err;
410 if (flags & SVE_SIG_FLAG_SM) {
411 if (!system_supports_sme())
412 return -EINVAL;
414 vl = task_get_sme_vl(current);
415 } else {
417 * A SME only system use SVE for streaming mode so can
418 * have a SVE formatted context with a zero VL and no
419 * payload data.
421 if (!system_supports_sve() && !system_supports_sme())
422 return -EINVAL;
424 vl = task_get_sve_vl(current);
427 if (user_vl != vl)
428 return -EINVAL;
430 if (user->sve_size == sizeof(*user->sve)) {
431 clear_thread_flag(TIF_SVE);
432 current->thread.svcr &= ~SVCR_SM_MASK;
433 current->thread.fp_type = FP_STATE_FPSIMD;
434 goto fpsimd_only;
437 vq = sve_vq_from_vl(vl);
439 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq))
440 return -EINVAL;
443 * Careful: we are about __copy_from_user() directly into
444 * thread.sve_state with preemption enabled, so protection is
445 * needed to prevent a racing context switch from writing stale
446 * registers back over the new data.
449 fpsimd_flush_task_state(current);
450 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
452 sve_alloc(current, true);
453 if (!current->thread.sve_state) {
454 clear_thread_flag(TIF_SVE);
455 return -ENOMEM;
458 err = __copy_from_user(current->thread.sve_state,
459 (char __user const *)user->sve +
460 SVE_SIG_REGS_OFFSET,
461 SVE_SIG_REGS_SIZE(vq));
462 if (err)
463 return -EFAULT;
465 if (flags & SVE_SIG_FLAG_SM)
466 current->thread.svcr |= SVCR_SM_MASK;
467 else
468 set_thread_flag(TIF_SVE);
469 current->thread.fp_type = FP_STATE_SVE;
471 fpsimd_only:
472 /* copy the FP and status/control registers */
473 /* restore_sigframe() already checked that user->fpsimd != NULL. */
474 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
475 sizeof(fpsimd.vregs));
476 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
477 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
479 /* load the hardware registers from the fpsimd_state structure */
480 if (!err)
481 fpsimd_update_current_state(&fpsimd);
483 return err ? -EFAULT : 0;
486 #else /* ! CONFIG_ARM64_SVE */
488 static int restore_sve_fpsimd_context(struct user_ctxs *user)
490 WARN_ON_ONCE(1);
491 return -EINVAL;
494 /* Turn any non-optimised out attempts to use this into a link error: */
495 extern int preserve_sve_context(void __user *ctx);
497 #endif /* ! CONFIG_ARM64_SVE */
499 #ifdef CONFIG_ARM64_SME
501 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx)
503 int err = 0;
505 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
507 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err);
508 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
509 __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err);
511 return err;
514 static int restore_tpidr2_context(struct user_ctxs *user)
516 u64 tpidr2_el0;
517 int err = 0;
519 if (user->tpidr2_size != sizeof(*user->tpidr2))
520 return -EINVAL;
522 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err);
523 if (!err)
524 write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0);
526 return err;
529 static int preserve_za_context(struct za_context __user *ctx)
531 int err = 0;
532 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
533 unsigned int vl = task_get_sme_vl(current);
534 unsigned int vq;
536 if (thread_za_enabled(&current->thread))
537 vq = sve_vq_from_vl(vl);
538 else
539 vq = 0;
541 memset(reserved, 0, sizeof(reserved));
543 __put_user_error(ZA_MAGIC, &ctx->head.magic, err);
544 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
545 &ctx->head.size, err);
546 __put_user_error(vl, &ctx->vl, err);
547 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
548 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
550 if (vq) {
552 * This assumes that the ZA state has already been saved to
553 * the task struct by calling the function
554 * fpsimd_signal_preserve_current_state().
556 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
557 current->thread.sme_state,
558 ZA_SIG_REGS_SIZE(vq));
561 return err ? -EFAULT : 0;
564 static int restore_za_context(struct user_ctxs *user)
566 int err = 0;
567 unsigned int vq;
568 u16 user_vl;
570 if (user->za_size < sizeof(*user->za))
571 return -EINVAL;
573 __get_user_error(user_vl, &(user->za->vl), err);
574 if (err)
575 return err;
577 if (user_vl != task_get_sme_vl(current))
578 return -EINVAL;
580 if (user->za_size == sizeof(*user->za)) {
581 current->thread.svcr &= ~SVCR_ZA_MASK;
582 return 0;
585 vq = sve_vq_from_vl(user_vl);
587 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq))
588 return -EINVAL;
591 * Careful: we are about __copy_from_user() directly into
592 * thread.sme_state with preemption enabled, so protection is
593 * needed to prevent a racing context switch from writing stale
594 * registers back over the new data.
597 fpsimd_flush_task_state(current);
598 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
600 sme_alloc(current, true);
601 if (!current->thread.sme_state) {
602 current->thread.svcr &= ~SVCR_ZA_MASK;
603 clear_thread_flag(TIF_SME);
604 return -ENOMEM;
607 err = __copy_from_user(current->thread.sme_state,
608 (char __user const *)user->za +
609 ZA_SIG_REGS_OFFSET,
610 ZA_SIG_REGS_SIZE(vq));
611 if (err)
612 return -EFAULT;
614 set_thread_flag(TIF_SME);
615 current->thread.svcr |= SVCR_ZA_MASK;
617 return 0;
620 static int preserve_zt_context(struct zt_context __user *ctx)
622 int err = 0;
623 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
625 if (WARN_ON(!thread_za_enabled(&current->thread)))
626 return -EINVAL;
628 memset(reserved, 0, sizeof(reserved));
630 __put_user_error(ZT_MAGIC, &ctx->head.magic, err);
631 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16),
632 &ctx->head.size, err);
633 __put_user_error(1, &ctx->nregs, err);
634 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
635 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
638 * This assumes that the ZT state has already been saved to
639 * the task struct by calling the function
640 * fpsimd_signal_preserve_current_state().
642 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET,
643 thread_zt_state(&current->thread),
644 ZT_SIG_REGS_SIZE(1));
646 return err ? -EFAULT : 0;
649 static int restore_zt_context(struct user_ctxs *user)
651 int err;
652 u16 nregs;
654 /* ZA must be restored first for this check to be valid */
655 if (!thread_za_enabled(&current->thread))
656 return -EINVAL;
658 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1))
659 return -EINVAL;
661 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs)))
662 return -EFAULT;
664 if (nregs != 1)
665 return -EINVAL;
668 * Careful: we are about __copy_from_user() directly into
669 * thread.zt_state with preemption enabled, so protection is
670 * needed to prevent a racing context switch from writing stale
671 * registers back over the new data.
674 fpsimd_flush_task_state(current);
675 /* From now, fpsimd_thread_switch() won't touch ZT in thread state */
677 err = __copy_from_user(thread_zt_state(&current->thread),
678 (char __user const *)user->zt +
679 ZT_SIG_REGS_OFFSET,
680 ZT_SIG_REGS_SIZE(1));
681 if (err)
682 return -EFAULT;
684 return 0;
687 #else /* ! CONFIG_ARM64_SME */
689 /* Turn any non-optimised out attempts to use these into a link error: */
690 extern int preserve_tpidr2_context(void __user *ctx);
691 extern int restore_tpidr2_context(struct user_ctxs *user);
692 extern int preserve_za_context(void __user *ctx);
693 extern int restore_za_context(struct user_ctxs *user);
694 extern int preserve_zt_context(void __user *ctx);
695 extern int restore_zt_context(struct user_ctxs *user);
697 #endif /* ! CONFIG_ARM64_SME */
699 #ifdef CONFIG_ARM64_GCS
701 static int preserve_gcs_context(struct gcs_context __user *ctx)
703 int err = 0;
704 u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
707 * If GCS is enabled we will add a cap token to the frame,
708 * include it in the GCSPR_EL0 we report to support stack
709 * switching via sigreturn if GCS is enabled. We do not allow
710 * enabling via sigreturn so the token is only relevant for
711 * threads with GCS enabled.
713 if (task_gcs_el0_enabled(current))
714 gcspr -= 8;
716 __put_user_error(GCS_MAGIC, &ctx->head.magic, err);
717 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
718 __put_user_error(gcspr, &ctx->gcspr, err);
719 __put_user_error(0, &ctx->reserved, err);
720 __put_user_error(current->thread.gcs_el0_mode,
721 &ctx->features_enabled, err);
723 return err;
726 static int restore_gcs_context(struct user_ctxs *user)
728 u64 gcspr, enabled;
729 int err = 0;
731 if (user->gcs_size != sizeof(*user->gcs))
732 return -EINVAL;
734 __get_user_error(gcspr, &user->gcs->gcspr, err);
735 __get_user_error(enabled, &user->gcs->features_enabled, err);
736 if (err)
737 return err;
739 /* Don't allow unknown modes */
740 if (enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
741 return -EINVAL;
743 err = gcs_check_locked(current, enabled);
744 if (err != 0)
745 return err;
747 /* Don't allow enabling */
748 if (!task_gcs_el0_enabled(current) &&
749 (enabled & PR_SHADOW_STACK_ENABLE))
750 return -EINVAL;
752 /* If we are disabling disable everything */
753 if (!(enabled & PR_SHADOW_STACK_ENABLE))
754 enabled = 0;
756 current->thread.gcs_el0_mode = enabled;
759 * We let userspace set GCSPR_EL0 to anything here, we will
760 * validate later in gcs_restore_signal().
762 write_sysreg_s(gcspr, SYS_GCSPR_EL0);
764 return 0;
767 #else /* ! CONFIG_ARM64_GCS */
769 /* Turn any non-optimised out attempts to use these into a link error: */
770 extern int preserve_gcs_context(void __user *ctx);
771 extern int restore_gcs_context(struct user_ctxs *user);
773 #endif /* ! CONFIG_ARM64_GCS */
775 static int parse_user_sigframe(struct user_ctxs *user,
776 struct rt_sigframe __user *sf)
778 struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
779 struct _aarch64_ctx __user *head;
780 char __user *base = (char __user *)&sc->__reserved;
781 size_t offset = 0;
782 size_t limit = sizeof(sc->__reserved);
783 bool have_extra_context = false;
784 char const __user *const sfp = (char const __user *)sf;
786 user->fpsimd = NULL;
787 user->sve = NULL;
788 user->tpidr2 = NULL;
789 user->za = NULL;
790 user->zt = NULL;
791 user->fpmr = NULL;
792 user->poe = NULL;
793 user->gcs = NULL;
795 if (!IS_ALIGNED((unsigned long)base, 16))
796 goto invalid;
798 while (1) {
799 int err = 0;
800 u32 magic, size;
801 char const __user *userp;
802 struct extra_context const __user *extra;
803 u64 extra_datap;
804 u32 extra_size;
805 struct _aarch64_ctx const __user *end;
806 u32 end_magic, end_size;
808 if (limit - offset < sizeof(*head))
809 goto invalid;
811 if (!IS_ALIGNED(offset, 16))
812 goto invalid;
814 head = (struct _aarch64_ctx __user *)(base + offset);
815 __get_user_error(magic, &head->magic, err);
816 __get_user_error(size, &head->size, err);
817 if (err)
818 return err;
820 if (limit - offset < size)
821 goto invalid;
823 switch (magic) {
824 case 0:
825 if (size)
826 goto invalid;
828 goto done;
830 case FPSIMD_MAGIC:
831 if (!system_supports_fpsimd())
832 goto invalid;
833 if (user->fpsimd)
834 goto invalid;
836 user->fpsimd = (struct fpsimd_context __user *)head;
837 user->fpsimd_size = size;
838 break;
840 case ESR_MAGIC:
841 /* ignore */
842 break;
844 case POE_MAGIC:
845 if (!system_supports_poe())
846 goto invalid;
848 if (user->poe)
849 goto invalid;
851 user->poe = (struct poe_context __user *)head;
852 user->poe_size = size;
853 break;
855 case SVE_MAGIC:
856 if (!system_supports_sve() && !system_supports_sme())
857 goto invalid;
859 if (user->sve)
860 goto invalid;
862 user->sve = (struct sve_context __user *)head;
863 user->sve_size = size;
864 break;
866 case TPIDR2_MAGIC:
867 if (!system_supports_tpidr2())
868 goto invalid;
870 if (user->tpidr2)
871 goto invalid;
873 user->tpidr2 = (struct tpidr2_context __user *)head;
874 user->tpidr2_size = size;
875 break;
877 case ZA_MAGIC:
878 if (!system_supports_sme())
879 goto invalid;
881 if (user->za)
882 goto invalid;
884 user->za = (struct za_context __user *)head;
885 user->za_size = size;
886 break;
888 case ZT_MAGIC:
889 if (!system_supports_sme2())
890 goto invalid;
892 if (user->zt)
893 goto invalid;
895 user->zt = (struct zt_context __user *)head;
896 user->zt_size = size;
897 break;
899 case FPMR_MAGIC:
900 if (!system_supports_fpmr())
901 goto invalid;
903 if (user->fpmr)
904 goto invalid;
906 user->fpmr = (struct fpmr_context __user *)head;
907 user->fpmr_size = size;
908 break;
910 case GCS_MAGIC:
911 if (!system_supports_gcs())
912 goto invalid;
914 if (user->gcs)
915 goto invalid;
917 user->gcs = (struct gcs_context __user *)head;
918 user->gcs_size = size;
919 break;
921 case EXTRA_MAGIC:
922 if (have_extra_context)
923 goto invalid;
925 if (size < sizeof(*extra))
926 goto invalid;
928 userp = (char const __user *)head;
930 extra = (struct extra_context const __user *)userp;
931 userp += size;
933 __get_user_error(extra_datap, &extra->datap, err);
934 __get_user_error(extra_size, &extra->size, err);
935 if (err)
936 return err;
938 /* Check for the dummy terminator in __reserved[]: */
940 if (limit - offset - size < TERMINATOR_SIZE)
941 goto invalid;
943 end = (struct _aarch64_ctx const __user *)userp;
944 userp += TERMINATOR_SIZE;
946 __get_user_error(end_magic, &end->magic, err);
947 __get_user_error(end_size, &end->size, err);
948 if (err)
949 return err;
951 if (end_magic || end_size)
952 goto invalid;
954 /* Prevent looping/repeated parsing of extra_context */
955 have_extra_context = true;
957 base = (__force void __user *)extra_datap;
958 if (!IS_ALIGNED((unsigned long)base, 16))
959 goto invalid;
961 if (!IS_ALIGNED(extra_size, 16))
962 goto invalid;
964 if (base != userp)
965 goto invalid;
967 /* Reject "unreasonably large" frames: */
968 if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
969 goto invalid;
972 * Ignore trailing terminator in __reserved[]
973 * and start parsing extra data:
975 offset = 0;
976 limit = extra_size;
978 if (!access_ok(base, limit))
979 goto invalid;
981 continue;
983 default:
984 goto invalid;
987 if (size < sizeof(*head))
988 goto invalid;
990 if (limit - offset < size)
991 goto invalid;
993 offset += size;
996 done:
997 return 0;
999 invalid:
1000 return -EINVAL;
1003 static int restore_sigframe(struct pt_regs *regs,
1004 struct rt_sigframe __user *sf,
1005 struct user_access_state *ua_state)
1007 sigset_t set;
1008 int i, err;
1009 struct user_ctxs user;
1011 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
1012 if (err == 0)
1013 set_current_blocked(&set);
1015 for (i = 0; i < 31; i++)
1016 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
1017 err);
1018 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
1019 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
1020 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
1023 * Avoid sys_rt_sigreturn() restarting.
1025 forget_syscall(regs);
1027 err |= !valid_user_regs(&regs->user_regs, current);
1028 if (err == 0)
1029 err = parse_user_sigframe(&user, sf);
1031 if (err == 0 && system_supports_fpsimd()) {
1032 if (!user.fpsimd)
1033 return -EINVAL;
1035 if (user.sve)
1036 err = restore_sve_fpsimd_context(&user);
1037 else
1038 err = restore_fpsimd_context(&user);
1041 if (err == 0 && system_supports_gcs() && user.gcs)
1042 err = restore_gcs_context(&user);
1044 if (err == 0 && system_supports_tpidr2() && user.tpidr2)
1045 err = restore_tpidr2_context(&user);
1047 if (err == 0 && system_supports_fpmr() && user.fpmr)
1048 err = restore_fpmr_context(&user);
1050 if (err == 0 && system_supports_sme() && user.za)
1051 err = restore_za_context(&user);
1053 if (err == 0 && system_supports_sme2() && user.zt)
1054 err = restore_zt_context(&user);
1056 if (err == 0 && system_supports_poe() && user.poe)
1057 err = restore_poe_context(&user, ua_state);
1059 return err;
1062 #ifdef CONFIG_ARM64_GCS
1063 static int gcs_restore_signal(void)
1065 unsigned long __user *gcspr_el0;
1066 u64 cap;
1067 int ret;
1069 if (!system_supports_gcs())
1070 return 0;
1072 if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE))
1073 return 0;
1075 gcspr_el0 = (unsigned long __user *)read_sysreg_s(SYS_GCSPR_EL0);
1078 * Ensure that any changes to the GCS done via GCS operations
1079 * are visible to the normal reads we do to validate the
1080 * token.
1082 gcsb_dsync();
1085 * GCSPR_EL0 should be pointing at a capped GCS, read the cap.
1086 * We don't enforce that this is in a GCS page, if it is not
1087 * then faults will be generated on GCS operations - the main
1088 * concern is to protect GCS pages.
1090 ret = copy_from_user(&cap, gcspr_el0, sizeof(cap));
1091 if (ret)
1092 return -EFAULT;
1095 * Check that the cap is the actual GCS before replacing it.
1097 if (!gcs_signal_cap_valid((u64)gcspr_el0, cap))
1098 return -EINVAL;
1100 /* Invalidate the token to prevent reuse */
1101 put_user_gcs(0, (__user void*)gcspr_el0, &ret);
1102 if (ret != 0)
1103 return -EFAULT;
1105 write_sysreg_s(gcspr_el0 + 1, SYS_GCSPR_EL0);
1107 return 0;
1110 #else
1111 static int gcs_restore_signal(void) { return 0; }
1112 #endif
1114 SYSCALL_DEFINE0(rt_sigreturn)
1116 struct pt_regs *regs = current_pt_regs();
1117 struct rt_sigframe __user *frame;
1118 struct user_access_state ua_state;
1120 /* Always make any pending restarted system calls return -EINTR */
1121 current->restart_block.fn = do_no_restart_syscall;
1124 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
1125 * be word aligned here.
1127 if (regs->sp & 15)
1128 goto badframe;
1130 frame = (struct rt_sigframe __user *)regs->sp;
1132 if (!access_ok(frame, sizeof (*frame)))
1133 goto badframe;
1135 if (restore_sigframe(regs, frame, &ua_state))
1136 goto badframe;
1138 if (gcs_restore_signal())
1139 goto badframe;
1141 if (restore_altstack(&frame->uc.uc_stack))
1142 goto badframe;
1144 restore_user_access_state(&ua_state);
1146 return regs->regs[0];
1148 badframe:
1149 arm64_notify_segfault(regs->sp);
1150 return 0;
1154 * Determine the layout of optional records in the signal frame
1156 * add_all: if true, lays out the biggest possible signal frame for
1157 * this task; otherwise, generates a layout for the current state
1158 * of the task.
1160 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
1161 bool add_all)
1163 int err;
1165 if (system_supports_fpsimd()) {
1166 err = sigframe_alloc(user, &user->fpsimd_offset,
1167 sizeof(struct fpsimd_context));
1168 if (err)
1169 return err;
1172 /* fault information, if valid */
1173 if (add_all || current->thread.fault_code) {
1174 err = sigframe_alloc(user, &user->esr_offset,
1175 sizeof(struct esr_context));
1176 if (err)
1177 return err;
1180 #ifdef CONFIG_ARM64_GCS
1181 if (system_supports_gcs() && (add_all || current->thread.gcspr_el0)) {
1182 err = sigframe_alloc(user, &user->gcs_offset,
1183 sizeof(struct gcs_context));
1184 if (err)
1185 return err;
1187 #endif
1189 if (system_supports_sve() || system_supports_sme()) {
1190 unsigned int vq = 0;
1192 if (add_all || current->thread.fp_type == FP_STATE_SVE ||
1193 thread_sm_enabled(&current->thread)) {
1194 int vl = max(sve_max_vl(), sme_max_vl());
1196 if (!add_all)
1197 vl = thread_get_cur_vl(&current->thread);
1199 vq = sve_vq_from_vl(vl);
1202 err = sigframe_alloc(user, &user->sve_offset,
1203 SVE_SIG_CONTEXT_SIZE(vq));
1204 if (err)
1205 return err;
1208 if (system_supports_tpidr2()) {
1209 err = sigframe_alloc(user, &user->tpidr2_offset,
1210 sizeof(struct tpidr2_context));
1211 if (err)
1212 return err;
1215 if (system_supports_sme()) {
1216 unsigned int vl;
1217 unsigned int vq = 0;
1219 if (add_all)
1220 vl = sme_max_vl();
1221 else
1222 vl = task_get_sme_vl(current);
1224 if (thread_za_enabled(&current->thread))
1225 vq = sve_vq_from_vl(vl);
1227 err = sigframe_alloc(user, &user->za_offset,
1228 ZA_SIG_CONTEXT_SIZE(vq));
1229 if (err)
1230 return err;
1233 if (system_supports_sme2()) {
1234 if (add_all || thread_za_enabled(&current->thread)) {
1235 err = sigframe_alloc(user, &user->zt_offset,
1236 ZT_SIG_CONTEXT_SIZE(1));
1237 if (err)
1238 return err;
1242 if (system_supports_fpmr()) {
1243 err = sigframe_alloc(user, &user->fpmr_offset,
1244 sizeof(struct fpmr_context));
1245 if (err)
1246 return err;
1249 if (system_supports_poe()) {
1250 err = sigframe_alloc(user, &user->poe_offset,
1251 sizeof(struct poe_context));
1252 if (err)
1253 return err;
1256 return sigframe_alloc_end(user);
1259 static int setup_sigframe(struct rt_sigframe_user_layout *user,
1260 struct pt_regs *regs, sigset_t *set,
1261 const struct user_access_state *ua_state)
1263 int i, err = 0;
1264 struct rt_sigframe __user *sf = user->sigframe;
1266 /* set up the stack frame for unwinding */
1267 __put_user_error(regs->regs[29], &user->next_frame->fp, err);
1268 __put_user_error(regs->regs[30], &user->next_frame->lr, err);
1270 for (i = 0; i < 31; i++)
1271 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
1272 err);
1273 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
1274 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
1275 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
1277 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
1279 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
1281 if (err == 0 && system_supports_fpsimd()) {
1282 struct fpsimd_context __user *fpsimd_ctx =
1283 apply_user_offset(user, user->fpsimd_offset);
1284 err |= preserve_fpsimd_context(fpsimd_ctx);
1287 /* fault information, if valid */
1288 if (err == 0 && user->esr_offset) {
1289 struct esr_context __user *esr_ctx =
1290 apply_user_offset(user, user->esr_offset);
1292 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
1293 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
1294 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
1297 if (system_supports_gcs() && err == 0 && user->gcs_offset) {
1298 struct gcs_context __user *gcs_ctx =
1299 apply_user_offset(user, user->gcs_offset);
1300 err |= preserve_gcs_context(gcs_ctx);
1303 /* Scalable Vector Extension state (including streaming), if present */
1304 if ((system_supports_sve() || system_supports_sme()) &&
1305 err == 0 && user->sve_offset) {
1306 struct sve_context __user *sve_ctx =
1307 apply_user_offset(user, user->sve_offset);
1308 err |= preserve_sve_context(sve_ctx);
1311 /* TPIDR2 if supported */
1312 if (system_supports_tpidr2() && err == 0) {
1313 struct tpidr2_context __user *tpidr2_ctx =
1314 apply_user_offset(user, user->tpidr2_offset);
1315 err |= preserve_tpidr2_context(tpidr2_ctx);
1318 /* FPMR if supported */
1319 if (system_supports_fpmr() && err == 0) {
1320 struct fpmr_context __user *fpmr_ctx =
1321 apply_user_offset(user, user->fpmr_offset);
1322 err |= preserve_fpmr_context(fpmr_ctx);
1325 if (system_supports_poe() && err == 0) {
1326 struct poe_context __user *poe_ctx =
1327 apply_user_offset(user, user->poe_offset);
1329 err |= preserve_poe_context(poe_ctx, ua_state);
1332 /* ZA state if present */
1333 if (system_supports_sme() && err == 0 && user->za_offset) {
1334 struct za_context __user *za_ctx =
1335 apply_user_offset(user, user->za_offset);
1336 err |= preserve_za_context(za_ctx);
1339 /* ZT state if present */
1340 if (system_supports_sme2() && err == 0 && user->zt_offset) {
1341 struct zt_context __user *zt_ctx =
1342 apply_user_offset(user, user->zt_offset);
1343 err |= preserve_zt_context(zt_ctx);
1346 if (err == 0 && user->extra_offset) {
1347 char __user *sfp = (char __user *)user->sigframe;
1348 char __user *userp =
1349 apply_user_offset(user, user->extra_offset);
1351 struct extra_context __user *extra;
1352 struct _aarch64_ctx __user *end;
1353 u64 extra_datap;
1354 u32 extra_size;
1356 extra = (struct extra_context __user *)userp;
1357 userp += EXTRA_CONTEXT_SIZE;
1359 end = (struct _aarch64_ctx __user *)userp;
1360 userp += TERMINATOR_SIZE;
1363 * extra_datap is just written to the signal frame.
1364 * The value gets cast back to a void __user *
1365 * during sigreturn.
1367 extra_datap = (__force u64)userp;
1368 extra_size = sfp + round_up(user->size, 16) - userp;
1370 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
1371 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
1372 __put_user_error(extra_datap, &extra->datap, err);
1373 __put_user_error(extra_size, &extra->size, err);
1375 /* Add the terminator */
1376 __put_user_error(0, &end->magic, err);
1377 __put_user_error(0, &end->size, err);
1380 /* set the "end" magic */
1381 if (err == 0) {
1382 struct _aarch64_ctx __user *end =
1383 apply_user_offset(user, user->end_offset);
1385 __put_user_error(0, &end->magic, err);
1386 __put_user_error(0, &end->size, err);
1389 return err;
1392 static int get_sigframe(struct rt_sigframe_user_layout *user,
1393 struct ksignal *ksig, struct pt_regs *regs)
1395 unsigned long sp, sp_top;
1396 int err;
1398 init_user_layout(user);
1399 err = setup_sigframe_layout(user, false);
1400 if (err)
1401 return err;
1403 sp = sp_top = sigsp(regs->sp, ksig);
1405 sp = round_down(sp - sizeof(struct frame_record), 16);
1406 user->next_frame = (struct frame_record __user *)sp;
1408 sp = round_down(sp, 16) - sigframe_size(user);
1409 user->sigframe = (struct rt_sigframe __user *)sp;
1412 * Check that we can actually write to the signal frame.
1414 if (!access_ok(user->sigframe, sp_top - sp))
1415 return -EFAULT;
1417 return 0;
1420 #ifdef CONFIG_ARM64_GCS
1422 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
1424 unsigned long __user *gcspr_el0;
1425 int ret = 0;
1427 if (!system_supports_gcs())
1428 return 0;
1430 if (!task_gcs_el0_enabled(current))
1431 return 0;
1434 * We are entering a signal handler, current register state is
1435 * active.
1437 gcspr_el0 = (unsigned long __user *)read_sysreg_s(SYS_GCSPR_EL0);
1440 * Push a cap and the GCS entry for the trampoline onto the GCS.
1442 put_user_gcs((unsigned long)sigtramp, gcspr_el0 - 2, &ret);
1443 put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 1), gcspr_el0 - 1, &ret);
1444 if (ret != 0)
1445 return ret;
1447 gcspr_el0 -= 2;
1448 write_sysreg_s((unsigned long)gcspr_el0, SYS_GCSPR_EL0);
1450 return 0;
1452 #else
1454 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
1456 return 0;
1459 #endif
1461 static int setup_return(struct pt_regs *regs, struct ksignal *ksig,
1462 struct rt_sigframe_user_layout *user, int usig)
1464 __sigrestore_t sigtramp;
1466 regs->regs[0] = usig;
1467 regs->sp = (unsigned long)user->sigframe;
1468 regs->regs[29] = (unsigned long)&user->next_frame->fp;
1469 regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
1472 * Signal delivery is a (wacky) indirect function call in
1473 * userspace, so simulate the same setting of BTYPE as a BLR
1474 * <register containing the signal handler entry point>.
1475 * Signal delivery to a location in a PROT_BTI guarded page
1476 * that is not a function entry point will now trigger a
1477 * SIGILL in userspace.
1479 * If the signal handler entry point is not in a PROT_BTI
1480 * guarded page, this is harmless.
1482 if (system_supports_bti()) {
1483 regs->pstate &= ~PSR_BTYPE_MASK;
1484 regs->pstate |= PSR_BTYPE_C;
1487 /* TCO (Tag Check Override) always cleared for signal handlers */
1488 regs->pstate &= ~PSR_TCO_BIT;
1490 /* Signal handlers are invoked with ZA and streaming mode disabled */
1491 if (system_supports_sme()) {
1493 * If we were in streaming mode the saved register
1494 * state was SVE but we will exit SM and use the
1495 * FPSIMD register state - flush the saved FPSIMD
1496 * register state in case it gets loaded.
1498 if (current->thread.svcr & SVCR_SM_MASK) {
1499 memset(&current->thread.uw.fpsimd_state, 0,
1500 sizeof(current->thread.uw.fpsimd_state));
1501 current->thread.fp_type = FP_STATE_FPSIMD;
1504 current->thread.svcr &= ~(SVCR_ZA_MASK |
1505 SVCR_SM_MASK);
1506 sme_smstop();
1509 if (ksig->ka.sa.sa_flags & SA_RESTORER)
1510 sigtramp = ksig->ka.sa.sa_restorer;
1511 else
1512 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
1514 regs->regs[30] = (unsigned long)sigtramp;
1516 return gcs_signal_entry(sigtramp, ksig);
1519 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
1520 struct pt_regs *regs)
1522 struct rt_sigframe_user_layout user;
1523 struct rt_sigframe __user *frame;
1524 struct user_access_state ua_state;
1525 int err = 0;
1527 fpsimd_signal_preserve_current_state();
1529 if (get_sigframe(&user, ksig, regs))
1530 return 1;
1532 save_reset_user_access_state(&ua_state);
1533 frame = user.sigframe;
1535 __put_user_error(0, &frame->uc.uc_flags, err);
1536 __put_user_error(NULL, &frame->uc.uc_link, err);
1538 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
1539 err |= setup_sigframe(&user, regs, set, &ua_state);
1540 if (err == 0) {
1541 err = setup_return(regs, ksig, &user, usig);
1542 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
1543 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
1544 regs->regs[1] = (unsigned long)&frame->info;
1545 regs->regs[2] = (unsigned long)&frame->uc;
1549 if (err == 0)
1550 set_handler_user_access_state();
1551 else
1552 restore_user_access_state(&ua_state);
1554 return err;
1557 static void setup_restart_syscall(struct pt_regs *regs)
1559 if (is_compat_task())
1560 compat_setup_restart_syscall(regs);
1561 else
1562 regs->regs[8] = __NR_restart_syscall;
1566 * OK, we're invoking a handler
1568 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1570 sigset_t *oldset = sigmask_to_save();
1571 int usig = ksig->sig;
1572 int ret;
1574 rseq_signal_deliver(ksig, regs);
1577 * Set up the stack frame
1579 if (is_compat_task()) {
1580 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1581 ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
1582 else
1583 ret = compat_setup_frame(usig, ksig, oldset, regs);
1584 } else {
1585 ret = setup_rt_frame(usig, ksig, oldset, regs);
1589 * Check that the resulting registers are actually sane.
1591 ret |= !valid_user_regs(&regs->user_regs, current);
1593 /* Step into the signal handler if we are stepping */
1594 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
1598 * Note that 'init' is a special process: it doesn't get signals it doesn't
1599 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1600 * mistake.
1602 * Note that we go through the signals twice: once to check the signals that
1603 * the kernel can handle, and then we build all the user-level signal handling
1604 * stack-frames in one go after that.
1606 void do_signal(struct pt_regs *regs)
1608 unsigned long continue_addr = 0, restart_addr = 0;
1609 int retval = 0;
1610 struct ksignal ksig;
1611 bool syscall = in_syscall(regs);
1614 * If we were from a system call, check for system call restarting...
1616 if (syscall) {
1617 continue_addr = regs->pc;
1618 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
1619 retval = regs->regs[0];
1622 * Avoid additional syscall restarting via ret_to_user.
1624 forget_syscall(regs);
1627 * Prepare for system call restart. We do this here so that a
1628 * debugger will see the already changed PC.
1630 switch (retval) {
1631 case -ERESTARTNOHAND:
1632 case -ERESTARTSYS:
1633 case -ERESTARTNOINTR:
1634 case -ERESTART_RESTARTBLOCK:
1635 regs->regs[0] = regs->orig_x0;
1636 regs->pc = restart_addr;
1637 break;
1642 * Get the signal to deliver. When running under ptrace, at this point
1643 * the debugger may change all of our registers.
1645 if (get_signal(&ksig)) {
1647 * Depending on the signal settings, we may need to revert the
1648 * decision to restart the system call, but skip this if a
1649 * debugger has chosen to restart at a different PC.
1651 if (regs->pc == restart_addr &&
1652 (retval == -ERESTARTNOHAND ||
1653 retval == -ERESTART_RESTARTBLOCK ||
1654 (retval == -ERESTARTSYS &&
1655 !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
1656 syscall_set_return_value(current, regs, -EINTR, 0);
1657 regs->pc = continue_addr;
1660 handle_signal(&ksig, regs);
1661 return;
1665 * Handle restarting a different system call. As above, if a debugger
1666 * has chosen to restart at a different PC, ignore the restart.
1668 if (syscall && regs->pc == restart_addr) {
1669 if (retval == -ERESTART_RESTARTBLOCK)
1670 setup_restart_syscall(regs);
1671 user_rewind_single_step(current);
1674 restore_saved_sigmask();
1677 unsigned long __ro_after_init signal_minsigstksz;
1680 * Determine the stack space required for guaranteed signal devliery.
1681 * This function is used to populate AT_MINSIGSTKSZ at process startup.
1682 * cpufeatures setup is assumed to be complete.
1684 void __init minsigstksz_setup(void)
1686 struct rt_sigframe_user_layout user;
1688 init_user_layout(&user);
1691 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
1692 * be big enough, but it's our best guess:
1694 if (WARN_ON(setup_sigframe_layout(&user, true)))
1695 return;
1697 signal_minsigstksz = sigframe_size(&user) +
1698 round_up(sizeof(struct frame_record), 16) +
1699 16; /* max alignment padding */
1703 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1704 * changes likely come with new fields that should be added below.
1706 static_assert(NSIGILL == 11);
1707 static_assert(NSIGFPE == 15);
1708 static_assert(NSIGSEGV == 10);
1709 static_assert(NSIGBUS == 5);
1710 static_assert(NSIGTRAP == 6);
1711 static_assert(NSIGCHLD == 6);
1712 static_assert(NSIGSYS == 2);
1713 static_assert(sizeof(siginfo_t) == 128);
1714 static_assert(__alignof__(siginfo_t) == 8);
1715 static_assert(offsetof(siginfo_t, si_signo) == 0x00);
1716 static_assert(offsetof(siginfo_t, si_errno) == 0x04);
1717 static_assert(offsetof(siginfo_t, si_code) == 0x08);
1718 static_assert(offsetof(siginfo_t, si_pid) == 0x10);
1719 static_assert(offsetof(siginfo_t, si_uid) == 0x14);
1720 static_assert(offsetof(siginfo_t, si_tid) == 0x10);
1721 static_assert(offsetof(siginfo_t, si_overrun) == 0x14);
1722 static_assert(offsetof(siginfo_t, si_status) == 0x18);
1723 static_assert(offsetof(siginfo_t, si_utime) == 0x20);
1724 static_assert(offsetof(siginfo_t, si_stime) == 0x28);
1725 static_assert(offsetof(siginfo_t, si_value) == 0x18);
1726 static_assert(offsetof(siginfo_t, si_int) == 0x18);
1727 static_assert(offsetof(siginfo_t, si_ptr) == 0x18);
1728 static_assert(offsetof(siginfo_t, si_addr) == 0x10);
1729 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18);
1730 static_assert(offsetof(siginfo_t, si_lower) == 0x20);
1731 static_assert(offsetof(siginfo_t, si_upper) == 0x28);
1732 static_assert(offsetof(siginfo_t, si_pkey) == 0x20);
1733 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
1734 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
1735 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
1736 static_assert(offsetof(siginfo_t, si_band) == 0x10);
1737 static_assert(offsetof(siginfo_t, si_fd) == 0x18);
1738 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10);
1739 static_assert(offsetof(siginfo_t, si_syscall) == 0x18);
1740 static_assert(offsetof(siginfo_t, si_arch) == 0x1c);