1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/signal.c
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
9 #include <linux/cache.h>
10 #include <linux/compat.h>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/signal.h>
14 #include <linux/freezer.h>
15 #include <linux/stddef.h>
16 #include <linux/uaccess.h>
17 #include <linux/sizes.h>
18 #include <linux/string.h>
19 #include <linux/ratelimit.h>
20 #include <linux/rseq.h>
21 #include <linux/syscalls.h>
23 #include <asm/daifflags.h>
24 #include <asm/debug-monitors.h>
26 #include <asm/exception.h>
27 #include <asm/cacheflush.h>
28 #include <asm/ucontext.h>
29 #include <asm/unistd.h>
30 #include <asm/fpsimd.h>
31 #include <asm/ptrace.h>
32 #include <asm/syscall.h>
33 #include <asm/signal32.h>
34 #include <asm/traps.h>
38 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
50 struct rt_sigframe_user_layout
{
51 struct rt_sigframe __user
*sigframe
;
52 struct frame_record __user
*next_frame
;
54 unsigned long size
; /* size of allocated sigframe data */
55 unsigned long limit
; /* largest allowed size */
57 unsigned long fpsimd_offset
;
58 unsigned long esr_offset
;
59 unsigned long sve_offset
;
60 unsigned long tpidr2_offset
;
61 unsigned long za_offset
;
62 unsigned long zt_offset
;
63 unsigned long fpmr_offset
;
64 unsigned long poe_offset
;
65 unsigned long extra_offset
;
66 unsigned long end_offset
;
69 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
70 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
71 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
73 static void init_user_layout(struct rt_sigframe_user_layout
*user
)
75 const size_t reserved_size
=
76 sizeof(user
->sigframe
->uc
.uc_mcontext
.__reserved
);
78 memset(user
, 0, sizeof(*user
));
79 user
->size
= offsetof(struct rt_sigframe
, uc
.uc_mcontext
.__reserved
);
81 user
->limit
= user
->size
+ reserved_size
;
83 user
->limit
-= TERMINATOR_SIZE
;
84 user
->limit
-= EXTRA_CONTEXT_SIZE
;
85 /* Reserve space for extension and terminator ^ */
88 static size_t sigframe_size(struct rt_sigframe_user_layout
const *user
)
90 return round_up(max(user
->size
, sizeof(struct rt_sigframe
)), 16);
94 * Sanity limit on the approximate maximum size of signal frame we'll
95 * try to generate. Stack alignment padding and the frame record are
96 * not taken into account. This limit is not a guarantee and is
99 #define SIGFRAME_MAXSZ SZ_256K
101 static int __sigframe_alloc(struct rt_sigframe_user_layout
*user
,
102 unsigned long *offset
, size_t size
, bool extend
)
104 size_t padded_size
= round_up(size
, 16);
106 if (padded_size
> user
->limit
- user
->size
&&
107 !user
->extra_offset
&&
111 user
->limit
+= EXTRA_CONTEXT_SIZE
;
112 ret
= __sigframe_alloc(user
, &user
->extra_offset
,
113 sizeof(struct extra_context
), false);
115 user
->limit
-= EXTRA_CONTEXT_SIZE
;
119 /* Reserve space for the __reserved[] terminator */
120 user
->size
+= TERMINATOR_SIZE
;
123 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
126 user
->limit
= SIGFRAME_MAXSZ
- TERMINATOR_SIZE
;
129 /* Still not enough space? Bad luck! */
130 if (padded_size
> user
->limit
- user
->size
)
133 *offset
= user
->size
;
134 user
->size
+= padded_size
;
140 * Allocate space for an optional record of <size> bytes in the user
141 * signal frame. The offset from the signal frame base address to the
142 * allocated block is assigned to *offset.
144 static int sigframe_alloc(struct rt_sigframe_user_layout
*user
,
145 unsigned long *offset
, size_t size
)
147 return __sigframe_alloc(user
, offset
, size
, true);
150 /* Allocate the null terminator record and prevent further allocations */
151 static int sigframe_alloc_end(struct rt_sigframe_user_layout
*user
)
155 /* Un-reserve the space reserved for the terminator: */
156 user
->limit
+= TERMINATOR_SIZE
;
158 ret
= sigframe_alloc(user
, &user
->end_offset
,
159 sizeof(struct _aarch64_ctx
));
163 /* Prevent further allocation: */
164 user
->limit
= user
->size
;
168 static void __user
*apply_user_offset(
169 struct rt_sigframe_user_layout
const *user
, unsigned long offset
)
171 char __user
*base
= (char __user
*)user
->sigframe
;
173 return base
+ offset
;
177 struct fpsimd_context __user
*fpsimd
;
179 struct sve_context __user
*sve
;
181 struct tpidr2_context __user
*tpidr2
;
183 struct za_context __user
*za
;
185 struct zt_context __user
*zt
;
187 struct fpmr_context __user
*fpmr
;
189 struct poe_context __user
*poe
;
193 static int preserve_fpsimd_context(struct fpsimd_context __user
*ctx
)
195 struct user_fpsimd_state
const *fpsimd
=
196 ¤t
->thread
.uw
.fpsimd_state
;
199 /* copy the FP and status/control registers */
200 err
= __copy_to_user(ctx
->vregs
, fpsimd
->vregs
, sizeof(fpsimd
->vregs
));
201 __put_user_error(fpsimd
->fpsr
, &ctx
->fpsr
, err
);
202 __put_user_error(fpsimd
->fpcr
, &ctx
->fpcr
, err
);
204 /* copy the magic/size information */
205 __put_user_error(FPSIMD_MAGIC
, &ctx
->head
.magic
, err
);
206 __put_user_error(sizeof(struct fpsimd_context
), &ctx
->head
.size
, err
);
208 return err
? -EFAULT
: 0;
211 static int restore_fpsimd_context(struct user_ctxs
*user
)
213 struct user_fpsimd_state fpsimd
;
216 /* check the size information */
217 if (user
->fpsimd_size
!= sizeof(struct fpsimd_context
))
220 /* copy the FP and status/control registers */
221 err
= __copy_from_user(fpsimd
.vregs
, &(user
->fpsimd
->vregs
),
222 sizeof(fpsimd
.vregs
));
223 __get_user_error(fpsimd
.fpsr
, &(user
->fpsimd
->fpsr
), err
);
224 __get_user_error(fpsimd
.fpcr
, &(user
->fpsimd
->fpcr
), err
);
226 clear_thread_flag(TIF_SVE
);
227 current
->thread
.fp_type
= FP_STATE_FPSIMD
;
229 /* load the hardware registers from the fpsimd_state structure */
231 fpsimd_update_current_state(&fpsimd
);
233 return err
? -EFAULT
: 0;
236 static int preserve_fpmr_context(struct fpmr_context __user
*ctx
)
240 current
->thread
.uw
.fpmr
= read_sysreg_s(SYS_FPMR
);
242 __put_user_error(FPMR_MAGIC
, &ctx
->head
.magic
, err
);
243 __put_user_error(sizeof(*ctx
), &ctx
->head
.size
, err
);
244 __put_user_error(current
->thread
.uw
.fpmr
, &ctx
->fpmr
, err
);
249 static int restore_fpmr_context(struct user_ctxs
*user
)
254 if (user
->fpmr_size
!= sizeof(*user
->fpmr
))
257 __get_user_error(fpmr
, &user
->fpmr
->fpmr
, err
);
259 write_sysreg_s(fpmr
, SYS_FPMR
);
264 static int preserve_poe_context(struct poe_context __user
*ctx
)
268 __put_user_error(POE_MAGIC
, &ctx
->head
.magic
, err
);
269 __put_user_error(sizeof(*ctx
), &ctx
->head
.size
, err
);
270 __put_user_error(read_sysreg_s(SYS_POR_EL0
), &ctx
->por_el0
, err
);
275 static int restore_poe_context(struct user_ctxs
*user
)
280 if (user
->poe_size
!= sizeof(*user
->poe
))
283 __get_user_error(por_el0
, &(user
->poe
->por_el0
), err
);
285 write_sysreg_s(por_el0
, SYS_POR_EL0
);
290 #ifdef CONFIG_ARM64_SVE
292 static int preserve_sve_context(struct sve_context __user
*ctx
)
295 u16 reserved
[ARRAY_SIZE(ctx
->__reserved
)];
297 unsigned int vl
= task_get_sve_vl(current
);
300 if (thread_sm_enabled(¤t
->thread
)) {
301 vl
= task_get_sme_vl(current
);
302 vq
= sve_vq_from_vl(vl
);
303 flags
|= SVE_SIG_FLAG_SM
;
304 } else if (current
->thread
.fp_type
== FP_STATE_SVE
) {
305 vq
= sve_vq_from_vl(vl
);
308 memset(reserved
, 0, sizeof(reserved
));
310 __put_user_error(SVE_MAGIC
, &ctx
->head
.magic
, err
);
311 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq
), 16),
312 &ctx
->head
.size
, err
);
313 __put_user_error(vl
, &ctx
->vl
, err
);
314 __put_user_error(flags
, &ctx
->flags
, err
);
315 BUILD_BUG_ON(sizeof(ctx
->__reserved
) != sizeof(reserved
));
316 err
|= __copy_to_user(&ctx
->__reserved
, reserved
, sizeof(reserved
));
320 * This assumes that the SVE state has already been saved to
321 * the task struct by calling the function
322 * fpsimd_signal_preserve_current_state().
324 err
|= __copy_to_user((char __user
*)ctx
+ SVE_SIG_REGS_OFFSET
,
325 current
->thread
.sve_state
,
326 SVE_SIG_REGS_SIZE(vq
));
329 return err
? -EFAULT
: 0;
332 static int restore_sve_fpsimd_context(struct user_ctxs
*user
)
336 struct user_fpsimd_state fpsimd
;
339 if (user
->sve_size
< sizeof(*user
->sve
))
342 __get_user_error(user_vl
, &(user
->sve
->vl
), err
);
343 __get_user_error(flags
, &(user
->sve
->flags
), err
);
347 if (flags
& SVE_SIG_FLAG_SM
) {
348 if (!system_supports_sme())
351 vl
= task_get_sme_vl(current
);
354 * A SME only system use SVE for streaming mode so can
355 * have a SVE formatted context with a zero VL and no
358 if (!system_supports_sve() && !system_supports_sme())
361 vl
= task_get_sve_vl(current
);
367 if (user
->sve_size
== sizeof(*user
->sve
)) {
368 clear_thread_flag(TIF_SVE
);
369 current
->thread
.svcr
&= ~SVCR_SM_MASK
;
370 current
->thread
.fp_type
= FP_STATE_FPSIMD
;
374 vq
= sve_vq_from_vl(vl
);
376 if (user
->sve_size
< SVE_SIG_CONTEXT_SIZE(vq
))
380 * Careful: we are about __copy_from_user() directly into
381 * thread.sve_state with preemption enabled, so protection is
382 * needed to prevent a racing context switch from writing stale
383 * registers back over the new data.
386 fpsimd_flush_task_state(current
);
387 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
389 sve_alloc(current
, true);
390 if (!current
->thread
.sve_state
) {
391 clear_thread_flag(TIF_SVE
);
395 err
= __copy_from_user(current
->thread
.sve_state
,
396 (char __user
const *)user
->sve
+
398 SVE_SIG_REGS_SIZE(vq
));
402 if (flags
& SVE_SIG_FLAG_SM
)
403 current
->thread
.svcr
|= SVCR_SM_MASK
;
405 set_thread_flag(TIF_SVE
);
406 current
->thread
.fp_type
= FP_STATE_SVE
;
409 /* copy the FP and status/control registers */
410 /* restore_sigframe() already checked that user->fpsimd != NULL. */
411 err
= __copy_from_user(fpsimd
.vregs
, user
->fpsimd
->vregs
,
412 sizeof(fpsimd
.vregs
));
413 __get_user_error(fpsimd
.fpsr
, &user
->fpsimd
->fpsr
, err
);
414 __get_user_error(fpsimd
.fpcr
, &user
->fpsimd
->fpcr
, err
);
416 /* load the hardware registers from the fpsimd_state structure */
418 fpsimd_update_current_state(&fpsimd
);
420 return err
? -EFAULT
: 0;
423 #else /* ! CONFIG_ARM64_SVE */
425 static int restore_sve_fpsimd_context(struct user_ctxs
*user
)
431 /* Turn any non-optimised out attempts to use this into a link error: */
432 extern int preserve_sve_context(void __user
*ctx
);
434 #endif /* ! CONFIG_ARM64_SVE */
436 #ifdef CONFIG_ARM64_SME
438 static int preserve_tpidr2_context(struct tpidr2_context __user
*ctx
)
442 current
->thread
.tpidr2_el0
= read_sysreg_s(SYS_TPIDR2_EL0
);
444 __put_user_error(TPIDR2_MAGIC
, &ctx
->head
.magic
, err
);
445 __put_user_error(sizeof(*ctx
), &ctx
->head
.size
, err
);
446 __put_user_error(current
->thread
.tpidr2_el0
, &ctx
->tpidr2
, err
);
451 static int restore_tpidr2_context(struct user_ctxs
*user
)
456 if (user
->tpidr2_size
!= sizeof(*user
->tpidr2
))
459 __get_user_error(tpidr2_el0
, &user
->tpidr2
->tpidr2
, err
);
461 write_sysreg_s(tpidr2_el0
, SYS_TPIDR2_EL0
);
466 static int preserve_za_context(struct za_context __user
*ctx
)
469 u16 reserved
[ARRAY_SIZE(ctx
->__reserved
)];
470 unsigned int vl
= task_get_sme_vl(current
);
473 if (thread_za_enabled(¤t
->thread
))
474 vq
= sve_vq_from_vl(vl
);
478 memset(reserved
, 0, sizeof(reserved
));
480 __put_user_error(ZA_MAGIC
, &ctx
->head
.magic
, err
);
481 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq
), 16),
482 &ctx
->head
.size
, err
);
483 __put_user_error(vl
, &ctx
->vl
, err
);
484 BUILD_BUG_ON(sizeof(ctx
->__reserved
) != sizeof(reserved
));
485 err
|= __copy_to_user(&ctx
->__reserved
, reserved
, sizeof(reserved
));
489 * This assumes that the ZA state has already been saved to
490 * the task struct by calling the function
491 * fpsimd_signal_preserve_current_state().
493 err
|= __copy_to_user((char __user
*)ctx
+ ZA_SIG_REGS_OFFSET
,
494 current
->thread
.sme_state
,
495 ZA_SIG_REGS_SIZE(vq
));
498 return err
? -EFAULT
: 0;
501 static int restore_za_context(struct user_ctxs
*user
)
507 if (user
->za_size
< sizeof(*user
->za
))
510 __get_user_error(user_vl
, &(user
->za
->vl
), err
);
514 if (user_vl
!= task_get_sme_vl(current
))
517 if (user
->za_size
== sizeof(*user
->za
)) {
518 current
->thread
.svcr
&= ~SVCR_ZA_MASK
;
522 vq
= sve_vq_from_vl(user_vl
);
524 if (user
->za_size
< ZA_SIG_CONTEXT_SIZE(vq
))
528 * Careful: we are about __copy_from_user() directly into
529 * thread.sme_state with preemption enabled, so protection is
530 * needed to prevent a racing context switch from writing stale
531 * registers back over the new data.
534 fpsimd_flush_task_state(current
);
535 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
537 sme_alloc(current
, true);
538 if (!current
->thread
.sme_state
) {
539 current
->thread
.svcr
&= ~SVCR_ZA_MASK
;
540 clear_thread_flag(TIF_SME
);
544 err
= __copy_from_user(current
->thread
.sme_state
,
545 (char __user
const *)user
->za
+
547 ZA_SIG_REGS_SIZE(vq
));
551 set_thread_flag(TIF_SME
);
552 current
->thread
.svcr
|= SVCR_ZA_MASK
;
557 static int preserve_zt_context(struct zt_context __user
*ctx
)
560 u16 reserved
[ARRAY_SIZE(ctx
->__reserved
)];
562 if (WARN_ON(!thread_za_enabled(¤t
->thread
)))
565 memset(reserved
, 0, sizeof(reserved
));
567 __put_user_error(ZT_MAGIC
, &ctx
->head
.magic
, err
);
568 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16),
569 &ctx
->head
.size
, err
);
570 __put_user_error(1, &ctx
->nregs
, err
);
571 BUILD_BUG_ON(sizeof(ctx
->__reserved
) != sizeof(reserved
));
572 err
|= __copy_to_user(&ctx
->__reserved
, reserved
, sizeof(reserved
));
575 * This assumes that the ZT state has already been saved to
576 * the task struct by calling the function
577 * fpsimd_signal_preserve_current_state().
579 err
|= __copy_to_user((char __user
*)ctx
+ ZT_SIG_REGS_OFFSET
,
580 thread_zt_state(¤t
->thread
),
581 ZT_SIG_REGS_SIZE(1));
583 return err
? -EFAULT
: 0;
586 static int restore_zt_context(struct user_ctxs
*user
)
591 /* ZA must be restored first for this check to be valid */
592 if (!thread_za_enabled(¤t
->thread
))
595 if (user
->zt_size
!= ZT_SIG_CONTEXT_SIZE(1))
598 if (__copy_from_user(&nregs
, &(user
->zt
->nregs
), sizeof(nregs
)))
605 * Careful: we are about __copy_from_user() directly into
606 * thread.zt_state with preemption enabled, so protection is
607 * needed to prevent a racing context switch from writing stale
608 * registers back over the new data.
611 fpsimd_flush_task_state(current
);
612 /* From now, fpsimd_thread_switch() won't touch ZT in thread state */
614 err
= __copy_from_user(thread_zt_state(¤t
->thread
),
615 (char __user
const *)user
->zt
+
617 ZT_SIG_REGS_SIZE(1));
624 #else /* ! CONFIG_ARM64_SME */
626 /* Turn any non-optimised out attempts to use these into a link error: */
627 extern int preserve_tpidr2_context(void __user
*ctx
);
628 extern int restore_tpidr2_context(struct user_ctxs
*user
);
629 extern int preserve_za_context(void __user
*ctx
);
630 extern int restore_za_context(struct user_ctxs
*user
);
631 extern int preserve_zt_context(void __user
*ctx
);
632 extern int restore_zt_context(struct user_ctxs
*user
);
634 #endif /* ! CONFIG_ARM64_SME */
636 static int parse_user_sigframe(struct user_ctxs
*user
,
637 struct rt_sigframe __user
*sf
)
639 struct sigcontext __user
*const sc
= &sf
->uc
.uc_mcontext
;
640 struct _aarch64_ctx __user
*head
;
641 char __user
*base
= (char __user
*)&sc
->__reserved
;
643 size_t limit
= sizeof(sc
->__reserved
);
644 bool have_extra_context
= false;
645 char const __user
*const sfp
= (char const __user
*)sf
;
655 if (!IS_ALIGNED((unsigned long)base
, 16))
661 char const __user
*userp
;
662 struct extra_context
const __user
*extra
;
665 struct _aarch64_ctx
const __user
*end
;
666 u32 end_magic
, end_size
;
668 if (limit
- offset
< sizeof(*head
))
671 if (!IS_ALIGNED(offset
, 16))
674 head
= (struct _aarch64_ctx __user
*)(base
+ offset
);
675 __get_user_error(magic
, &head
->magic
, err
);
676 __get_user_error(size
, &head
->size
, err
);
680 if (limit
- offset
< size
)
691 if (!system_supports_fpsimd())
696 user
->fpsimd
= (struct fpsimd_context __user
*)head
;
697 user
->fpsimd_size
= size
;
705 if (!system_supports_poe())
711 user
->poe
= (struct poe_context __user
*)head
;
712 user
->poe_size
= size
;
716 if (!system_supports_sve() && !system_supports_sme())
722 user
->sve
= (struct sve_context __user
*)head
;
723 user
->sve_size
= size
;
727 if (!system_supports_tpidr2())
733 user
->tpidr2
= (struct tpidr2_context __user
*)head
;
734 user
->tpidr2_size
= size
;
738 if (!system_supports_sme())
744 user
->za
= (struct za_context __user
*)head
;
745 user
->za_size
= size
;
749 if (!system_supports_sme2())
755 user
->zt
= (struct zt_context __user
*)head
;
756 user
->zt_size
= size
;
760 if (!system_supports_fpmr())
766 user
->fpmr
= (struct fpmr_context __user
*)head
;
767 user
->fpmr_size
= size
;
771 if (have_extra_context
)
774 if (size
< sizeof(*extra
))
777 userp
= (char const __user
*)head
;
779 extra
= (struct extra_context
const __user
*)userp
;
782 __get_user_error(extra_datap
, &extra
->datap
, err
);
783 __get_user_error(extra_size
, &extra
->size
, err
);
787 /* Check for the dummy terminator in __reserved[]: */
789 if (limit
- offset
- size
< TERMINATOR_SIZE
)
792 end
= (struct _aarch64_ctx
const __user
*)userp
;
793 userp
+= TERMINATOR_SIZE
;
795 __get_user_error(end_magic
, &end
->magic
, err
);
796 __get_user_error(end_size
, &end
->size
, err
);
800 if (end_magic
|| end_size
)
803 /* Prevent looping/repeated parsing of extra_context */
804 have_extra_context
= true;
806 base
= (__force
void __user
*)extra_datap
;
807 if (!IS_ALIGNED((unsigned long)base
, 16))
810 if (!IS_ALIGNED(extra_size
, 16))
816 /* Reject "unreasonably large" frames: */
817 if (extra_size
> sfp
+ SIGFRAME_MAXSZ
- userp
)
821 * Ignore trailing terminator in __reserved[]
822 * and start parsing extra data:
827 if (!access_ok(base
, limit
))
836 if (size
< sizeof(*head
))
839 if (limit
- offset
< size
)
852 static int restore_sigframe(struct pt_regs
*regs
,
853 struct rt_sigframe __user
*sf
)
857 struct user_ctxs user
;
859 err
= __copy_from_user(&set
, &sf
->uc
.uc_sigmask
, sizeof(set
));
861 set_current_blocked(&set
);
863 for (i
= 0; i
< 31; i
++)
864 __get_user_error(regs
->regs
[i
], &sf
->uc
.uc_mcontext
.regs
[i
],
866 __get_user_error(regs
->sp
, &sf
->uc
.uc_mcontext
.sp
, err
);
867 __get_user_error(regs
->pc
, &sf
->uc
.uc_mcontext
.pc
, err
);
868 __get_user_error(regs
->pstate
, &sf
->uc
.uc_mcontext
.pstate
, err
);
871 * Avoid sys_rt_sigreturn() restarting.
873 forget_syscall(regs
);
875 err
|= !valid_user_regs(®s
->user_regs
, current
);
877 err
= parse_user_sigframe(&user
, sf
);
879 if (err
== 0 && system_supports_fpsimd()) {
884 err
= restore_sve_fpsimd_context(&user
);
886 err
= restore_fpsimd_context(&user
);
889 if (err
== 0 && system_supports_tpidr2() && user
.tpidr2
)
890 err
= restore_tpidr2_context(&user
);
892 if (err
== 0 && system_supports_fpmr() && user
.fpmr
)
893 err
= restore_fpmr_context(&user
);
895 if (err
== 0 && system_supports_sme() && user
.za
)
896 err
= restore_za_context(&user
);
898 if (err
== 0 && system_supports_sme2() && user
.zt
)
899 err
= restore_zt_context(&user
);
901 if (err
== 0 && system_supports_poe() && user
.poe
)
902 err
= restore_poe_context(&user
);
907 SYSCALL_DEFINE0(rt_sigreturn
)
909 struct pt_regs
*regs
= current_pt_regs();
910 struct rt_sigframe __user
*frame
;
912 /* Always make any pending restarted system calls return -EINTR */
913 current
->restart_block
.fn
= do_no_restart_syscall
;
916 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
917 * be word aligned here.
922 frame
= (struct rt_sigframe __user
*)regs
->sp
;
924 if (!access_ok(frame
, sizeof (*frame
)))
927 if (restore_sigframe(regs
, frame
))
930 if (restore_altstack(&frame
->uc
.uc_stack
))
933 return regs
->regs
[0];
936 arm64_notify_segfault(regs
->sp
);
941 * Determine the layout of optional records in the signal frame
943 * add_all: if true, lays out the biggest possible signal frame for
944 * this task; otherwise, generates a layout for the current state
947 static int setup_sigframe_layout(struct rt_sigframe_user_layout
*user
,
952 if (system_supports_fpsimd()) {
953 err
= sigframe_alloc(user
, &user
->fpsimd_offset
,
954 sizeof(struct fpsimd_context
));
959 /* fault information, if valid */
960 if (add_all
|| current
->thread
.fault_code
) {
961 err
= sigframe_alloc(user
, &user
->esr_offset
,
962 sizeof(struct esr_context
));
967 if (system_supports_sve() || system_supports_sme()) {
970 if (add_all
|| current
->thread
.fp_type
== FP_STATE_SVE
||
971 thread_sm_enabled(¤t
->thread
)) {
972 int vl
= max(sve_max_vl(), sme_max_vl());
975 vl
= thread_get_cur_vl(¤t
->thread
);
977 vq
= sve_vq_from_vl(vl
);
980 err
= sigframe_alloc(user
, &user
->sve_offset
,
981 SVE_SIG_CONTEXT_SIZE(vq
));
986 if (system_supports_tpidr2()) {
987 err
= sigframe_alloc(user
, &user
->tpidr2_offset
,
988 sizeof(struct tpidr2_context
));
993 if (system_supports_sme()) {
1000 vl
= task_get_sme_vl(current
);
1002 if (thread_za_enabled(¤t
->thread
))
1003 vq
= sve_vq_from_vl(vl
);
1005 err
= sigframe_alloc(user
, &user
->za_offset
,
1006 ZA_SIG_CONTEXT_SIZE(vq
));
1011 if (system_supports_sme2()) {
1012 if (add_all
|| thread_za_enabled(¤t
->thread
)) {
1013 err
= sigframe_alloc(user
, &user
->zt_offset
,
1014 ZT_SIG_CONTEXT_SIZE(1));
1020 if (system_supports_fpmr()) {
1021 err
= sigframe_alloc(user
, &user
->fpmr_offset
,
1022 sizeof(struct fpmr_context
));
1027 if (system_supports_poe()) {
1028 err
= sigframe_alloc(user
, &user
->poe_offset
,
1029 sizeof(struct poe_context
));
1034 return sigframe_alloc_end(user
);
1037 static int setup_sigframe(struct rt_sigframe_user_layout
*user
,
1038 struct pt_regs
*regs
, sigset_t
*set
)
1041 struct rt_sigframe __user
*sf
= user
->sigframe
;
1043 /* set up the stack frame for unwinding */
1044 __put_user_error(regs
->regs
[29], &user
->next_frame
->fp
, err
);
1045 __put_user_error(regs
->regs
[30], &user
->next_frame
->lr
, err
);
1047 for (i
= 0; i
< 31; i
++)
1048 __put_user_error(regs
->regs
[i
], &sf
->uc
.uc_mcontext
.regs
[i
],
1050 __put_user_error(regs
->sp
, &sf
->uc
.uc_mcontext
.sp
, err
);
1051 __put_user_error(regs
->pc
, &sf
->uc
.uc_mcontext
.pc
, err
);
1052 __put_user_error(regs
->pstate
, &sf
->uc
.uc_mcontext
.pstate
, err
);
1054 __put_user_error(current
->thread
.fault_address
, &sf
->uc
.uc_mcontext
.fault_address
, err
);
1056 err
|= __copy_to_user(&sf
->uc
.uc_sigmask
, set
, sizeof(*set
));
1058 if (err
== 0 && system_supports_fpsimd()) {
1059 struct fpsimd_context __user
*fpsimd_ctx
=
1060 apply_user_offset(user
, user
->fpsimd_offset
);
1061 err
|= preserve_fpsimd_context(fpsimd_ctx
);
1064 /* fault information, if valid */
1065 if (err
== 0 && user
->esr_offset
) {
1066 struct esr_context __user
*esr_ctx
=
1067 apply_user_offset(user
, user
->esr_offset
);
1069 __put_user_error(ESR_MAGIC
, &esr_ctx
->head
.magic
, err
);
1070 __put_user_error(sizeof(*esr_ctx
), &esr_ctx
->head
.size
, err
);
1071 __put_user_error(current
->thread
.fault_code
, &esr_ctx
->esr
, err
);
1074 /* Scalable Vector Extension state (including streaming), if present */
1075 if ((system_supports_sve() || system_supports_sme()) &&
1076 err
== 0 && user
->sve_offset
) {
1077 struct sve_context __user
*sve_ctx
=
1078 apply_user_offset(user
, user
->sve_offset
);
1079 err
|= preserve_sve_context(sve_ctx
);
1082 /* TPIDR2 if supported */
1083 if (system_supports_tpidr2() && err
== 0) {
1084 struct tpidr2_context __user
*tpidr2_ctx
=
1085 apply_user_offset(user
, user
->tpidr2_offset
);
1086 err
|= preserve_tpidr2_context(tpidr2_ctx
);
1089 /* FPMR if supported */
1090 if (system_supports_fpmr() && err
== 0) {
1091 struct fpmr_context __user
*fpmr_ctx
=
1092 apply_user_offset(user
, user
->fpmr_offset
);
1093 err
|= preserve_fpmr_context(fpmr_ctx
);
1096 if (system_supports_poe() && err
== 0 && user
->poe_offset
) {
1097 struct poe_context __user
*poe_ctx
=
1098 apply_user_offset(user
, user
->poe_offset
);
1100 err
|= preserve_poe_context(poe_ctx
);
1104 /* ZA state if present */
1105 if (system_supports_sme() && err
== 0 && user
->za_offset
) {
1106 struct za_context __user
*za_ctx
=
1107 apply_user_offset(user
, user
->za_offset
);
1108 err
|= preserve_za_context(za_ctx
);
1111 /* ZT state if present */
1112 if (system_supports_sme2() && err
== 0 && user
->zt_offset
) {
1113 struct zt_context __user
*zt_ctx
=
1114 apply_user_offset(user
, user
->zt_offset
);
1115 err
|= preserve_zt_context(zt_ctx
);
1118 if (err
== 0 && user
->extra_offset
) {
1119 char __user
*sfp
= (char __user
*)user
->sigframe
;
1120 char __user
*userp
=
1121 apply_user_offset(user
, user
->extra_offset
);
1123 struct extra_context __user
*extra
;
1124 struct _aarch64_ctx __user
*end
;
1128 extra
= (struct extra_context __user
*)userp
;
1129 userp
+= EXTRA_CONTEXT_SIZE
;
1131 end
= (struct _aarch64_ctx __user
*)userp
;
1132 userp
+= TERMINATOR_SIZE
;
1135 * extra_datap is just written to the signal frame.
1136 * The value gets cast back to a void __user *
1139 extra_datap
= (__force u64
)userp
;
1140 extra_size
= sfp
+ round_up(user
->size
, 16) - userp
;
1142 __put_user_error(EXTRA_MAGIC
, &extra
->head
.magic
, err
);
1143 __put_user_error(EXTRA_CONTEXT_SIZE
, &extra
->head
.size
, err
);
1144 __put_user_error(extra_datap
, &extra
->datap
, err
);
1145 __put_user_error(extra_size
, &extra
->size
, err
);
1147 /* Add the terminator */
1148 __put_user_error(0, &end
->magic
, err
);
1149 __put_user_error(0, &end
->size
, err
);
1152 /* set the "end" magic */
1154 struct _aarch64_ctx __user
*end
=
1155 apply_user_offset(user
, user
->end_offset
);
1157 __put_user_error(0, &end
->magic
, err
);
1158 __put_user_error(0, &end
->size
, err
);
1164 static int get_sigframe(struct rt_sigframe_user_layout
*user
,
1165 struct ksignal
*ksig
, struct pt_regs
*regs
)
1167 unsigned long sp
, sp_top
;
1170 init_user_layout(user
);
1171 err
= setup_sigframe_layout(user
, false);
1175 sp
= sp_top
= sigsp(regs
->sp
, ksig
);
1177 sp
= round_down(sp
- sizeof(struct frame_record
), 16);
1178 user
->next_frame
= (struct frame_record __user
*)sp
;
1180 sp
= round_down(sp
, 16) - sigframe_size(user
);
1181 user
->sigframe
= (struct rt_sigframe __user
*)sp
;
1184 * Check that we can actually write to the signal frame.
1186 if (!access_ok(user
->sigframe
, sp_top
- sp
))
1192 static void setup_return(struct pt_regs
*regs
, struct k_sigaction
*ka
,
1193 struct rt_sigframe_user_layout
*user
, int usig
)
1195 __sigrestore_t sigtramp
;
1197 regs
->regs
[0] = usig
;
1198 regs
->sp
= (unsigned long)user
->sigframe
;
1199 regs
->regs
[29] = (unsigned long)&user
->next_frame
->fp
;
1200 regs
->pc
= (unsigned long)ka
->sa
.sa_handler
;
1203 * Signal delivery is a (wacky) indirect function call in
1204 * userspace, so simulate the same setting of BTYPE as a BLR
1205 * <register containing the signal handler entry point>.
1206 * Signal delivery to a location in a PROT_BTI guarded page
1207 * that is not a function entry point will now trigger a
1208 * SIGILL in userspace.
1210 * If the signal handler entry point is not in a PROT_BTI
1211 * guarded page, this is harmless.
1213 if (system_supports_bti()) {
1214 regs
->pstate
&= ~PSR_BTYPE_MASK
;
1215 regs
->pstate
|= PSR_BTYPE_C
;
1218 /* TCO (Tag Check Override) always cleared for signal handlers */
1219 regs
->pstate
&= ~PSR_TCO_BIT
;
1221 /* Signal handlers are invoked with ZA and streaming mode disabled */
1222 if (system_supports_sme()) {
1224 * If we were in streaming mode the saved register
1225 * state was SVE but we will exit SM and use the
1226 * FPSIMD register state - flush the saved FPSIMD
1227 * register state in case it gets loaded.
1229 if (current
->thread
.svcr
& SVCR_SM_MASK
) {
1230 memset(¤t
->thread
.uw
.fpsimd_state
, 0,
1231 sizeof(current
->thread
.uw
.fpsimd_state
));
1232 current
->thread
.fp_type
= FP_STATE_FPSIMD
;
1235 current
->thread
.svcr
&= ~(SVCR_ZA_MASK
|
1240 if (system_supports_poe())
1241 write_sysreg_s(POR_EL0_INIT
, SYS_POR_EL0
);
1243 if (ka
->sa
.sa_flags
& SA_RESTORER
)
1244 sigtramp
= ka
->sa
.sa_restorer
;
1246 sigtramp
= VDSO_SYMBOL(current
->mm
->context
.vdso
, sigtramp
);
1248 regs
->regs
[30] = (unsigned long)sigtramp
;
1251 static int setup_rt_frame(int usig
, struct ksignal
*ksig
, sigset_t
*set
,
1252 struct pt_regs
*regs
)
1254 struct rt_sigframe_user_layout user
;
1255 struct rt_sigframe __user
*frame
;
1258 fpsimd_signal_preserve_current_state();
1260 if (get_sigframe(&user
, ksig
, regs
))
1263 frame
= user
.sigframe
;
1265 __put_user_error(0, &frame
->uc
.uc_flags
, err
);
1266 __put_user_error(NULL
, &frame
->uc
.uc_link
, err
);
1268 err
|= __save_altstack(&frame
->uc
.uc_stack
, regs
->sp
);
1269 err
|= setup_sigframe(&user
, regs
, set
);
1271 setup_return(regs
, &ksig
->ka
, &user
, usig
);
1272 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
) {
1273 err
|= copy_siginfo_to_user(&frame
->info
, &ksig
->info
);
1274 regs
->regs
[1] = (unsigned long)&frame
->info
;
1275 regs
->regs
[2] = (unsigned long)&frame
->uc
;
1282 static void setup_restart_syscall(struct pt_regs
*regs
)
1284 if (is_compat_task())
1285 compat_setup_restart_syscall(regs
);
1287 regs
->regs
[8] = __NR_restart_syscall
;
1291 * OK, we're invoking a handler
1293 static void handle_signal(struct ksignal
*ksig
, struct pt_regs
*regs
)
1295 sigset_t
*oldset
= sigmask_to_save();
1296 int usig
= ksig
->sig
;
1299 rseq_signal_deliver(ksig
, regs
);
1302 * Set up the stack frame
1304 if (is_compat_task()) {
1305 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
)
1306 ret
= compat_setup_rt_frame(usig
, ksig
, oldset
, regs
);
1308 ret
= compat_setup_frame(usig
, ksig
, oldset
, regs
);
1310 ret
= setup_rt_frame(usig
, ksig
, oldset
, regs
);
1314 * Check that the resulting registers are actually sane.
1316 ret
|= !valid_user_regs(®s
->user_regs
, current
);
1318 /* Step into the signal handler if we are stepping */
1319 signal_setup_done(ret
, ksig
, test_thread_flag(TIF_SINGLESTEP
));
1323 * Note that 'init' is a special process: it doesn't get signals it doesn't
1324 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1327 * Note that we go through the signals twice: once to check the signals that
1328 * the kernel can handle, and then we build all the user-level signal handling
1329 * stack-frames in one go after that.
1331 void do_signal(struct pt_regs
*regs
)
1333 unsigned long continue_addr
= 0, restart_addr
= 0;
1335 struct ksignal ksig
;
1336 bool syscall
= in_syscall(regs
);
1339 * If we were from a system call, check for system call restarting...
1342 continue_addr
= regs
->pc
;
1343 restart_addr
= continue_addr
- (compat_thumb_mode(regs
) ? 2 : 4);
1344 retval
= regs
->regs
[0];
1347 * Avoid additional syscall restarting via ret_to_user.
1349 forget_syscall(regs
);
1352 * Prepare for system call restart. We do this here so that a
1353 * debugger will see the already changed PC.
1356 case -ERESTARTNOHAND
:
1358 case -ERESTARTNOINTR
:
1359 case -ERESTART_RESTARTBLOCK
:
1360 regs
->regs
[0] = regs
->orig_x0
;
1361 regs
->pc
= restart_addr
;
1367 * Get the signal to deliver. When running under ptrace, at this point
1368 * the debugger may change all of our registers.
1370 if (get_signal(&ksig
)) {
1372 * Depending on the signal settings, we may need to revert the
1373 * decision to restart the system call, but skip this if a
1374 * debugger has chosen to restart at a different PC.
1376 if (regs
->pc
== restart_addr
&&
1377 (retval
== -ERESTARTNOHAND
||
1378 retval
== -ERESTART_RESTARTBLOCK
||
1379 (retval
== -ERESTARTSYS
&&
1380 !(ksig
.ka
.sa
.sa_flags
& SA_RESTART
)))) {
1381 syscall_set_return_value(current
, regs
, -EINTR
, 0);
1382 regs
->pc
= continue_addr
;
1385 handle_signal(&ksig
, regs
);
1390 * Handle restarting a different system call. As above, if a debugger
1391 * has chosen to restart at a different PC, ignore the restart.
1393 if (syscall
&& regs
->pc
== restart_addr
) {
1394 if (retval
== -ERESTART_RESTARTBLOCK
)
1395 setup_restart_syscall(regs
);
1396 user_rewind_single_step(current
);
1399 restore_saved_sigmask();
1402 unsigned long __ro_after_init signal_minsigstksz
;
1405 * Determine the stack space required for guaranteed signal devliery.
1406 * This function is used to populate AT_MINSIGSTKSZ at process startup.
1407 * cpufeatures setup is assumed to be complete.
1409 void __init
minsigstksz_setup(void)
1411 struct rt_sigframe_user_layout user
;
1413 init_user_layout(&user
);
1416 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
1417 * be big enough, but it's our best guess:
1419 if (WARN_ON(setup_sigframe_layout(&user
, true)))
1422 signal_minsigstksz
= sigframe_size(&user
) +
1423 round_up(sizeof(struct frame_record
), 16) +
1424 16; /* max alignment padding */
1428 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1429 * changes likely come with new fields that should be added below.
1431 static_assert(NSIGILL
== 11);
1432 static_assert(NSIGFPE
== 15);
1433 static_assert(NSIGSEGV
== 10);
1434 static_assert(NSIGBUS
== 5);
1435 static_assert(NSIGTRAP
== 6);
1436 static_assert(NSIGCHLD
== 6);
1437 static_assert(NSIGSYS
== 2);
1438 static_assert(sizeof(siginfo_t
) == 128);
1439 static_assert(__alignof__(siginfo_t
) == 8);
1440 static_assert(offsetof(siginfo_t
, si_signo
) == 0x00);
1441 static_assert(offsetof(siginfo_t
, si_errno
) == 0x04);
1442 static_assert(offsetof(siginfo_t
, si_code
) == 0x08);
1443 static_assert(offsetof(siginfo_t
, si_pid
) == 0x10);
1444 static_assert(offsetof(siginfo_t
, si_uid
) == 0x14);
1445 static_assert(offsetof(siginfo_t
, si_tid
) == 0x10);
1446 static_assert(offsetof(siginfo_t
, si_overrun
) == 0x14);
1447 static_assert(offsetof(siginfo_t
, si_status
) == 0x18);
1448 static_assert(offsetof(siginfo_t
, si_utime
) == 0x20);
1449 static_assert(offsetof(siginfo_t
, si_stime
) == 0x28);
1450 static_assert(offsetof(siginfo_t
, si_value
) == 0x18);
1451 static_assert(offsetof(siginfo_t
, si_int
) == 0x18);
1452 static_assert(offsetof(siginfo_t
, si_ptr
) == 0x18);
1453 static_assert(offsetof(siginfo_t
, si_addr
) == 0x10);
1454 static_assert(offsetof(siginfo_t
, si_addr_lsb
) == 0x18);
1455 static_assert(offsetof(siginfo_t
, si_lower
) == 0x20);
1456 static_assert(offsetof(siginfo_t
, si_upper
) == 0x28);
1457 static_assert(offsetof(siginfo_t
, si_pkey
) == 0x20);
1458 static_assert(offsetof(siginfo_t
, si_perf_data
) == 0x18);
1459 static_assert(offsetof(siginfo_t
, si_perf_type
) == 0x20);
1460 static_assert(offsetof(siginfo_t
, si_perf_flags
) == 0x24);
1461 static_assert(offsetof(siginfo_t
, si_band
) == 0x10);
1462 static_assert(offsetof(siginfo_t
, si_fd
) == 0x18);
1463 static_assert(offsetof(siginfo_t
, si_call_addr
) == 0x10);
1464 static_assert(offsetof(siginfo_t
, si_syscall
) == 0x18);
1465 static_assert(offsetof(siginfo_t
, si_arch
) == 0x1c);