1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/signal.c
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
9 #include <linux/cache.h>
10 #include <linux/compat.h>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/signal.h>
14 #include <linux/freezer.h>
15 #include <linux/stddef.h>
16 #include <linux/uaccess.h>
17 #include <linux/sizes.h>
18 #include <linux/string.h>
19 #include <linux/ratelimit.h>
20 #include <linux/rseq.h>
21 #include <linux/syscalls.h>
22 #include <linux/pkeys.h>
24 #include <asm/daifflags.h>
25 #include <asm/debug-monitors.h>
27 #include <asm/exception.h>
28 #include <asm/cacheflush.h>
30 #include <asm/ucontext.h>
31 #include <asm/unistd.h>
32 #include <asm/fpsimd.h>
33 #include <asm/ptrace.h>
34 #include <asm/syscall.h>
35 #include <asm/signal32.h>
36 #include <asm/traps.h>
39 #ifdef CONFIG_ARM64_GCS
40 #define GCS_SIGNAL_CAP(addr) (((unsigned long)addr) & GCS_CAP_ADDR_MASK)
42 static bool gcs_signal_cap_valid(u64 addr
, u64 val
)
44 return val
== GCS_SIGNAL_CAP(addr
);
49 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
56 struct rt_sigframe_user_layout
{
57 struct rt_sigframe __user
*sigframe
;
58 struct frame_record __user
*next_frame
;
60 unsigned long size
; /* size of allocated sigframe data */
61 unsigned long limit
; /* largest allowed size */
63 unsigned long fpsimd_offset
;
64 unsigned long esr_offset
;
65 unsigned long gcs_offset
;
66 unsigned long sve_offset
;
67 unsigned long tpidr2_offset
;
68 unsigned long za_offset
;
69 unsigned long zt_offset
;
70 unsigned long fpmr_offset
;
71 unsigned long poe_offset
;
72 unsigned long extra_offset
;
73 unsigned long end_offset
;
77 * Holds any EL0-controlled state that influences unprivileged memory accesses.
78 * This includes both accesses done in userspace and uaccess done in the kernel.
80 * This state needs to be carefully managed to ensure that it doesn't cause
81 * uaccess to fail when setting up the signal frame, and the signal handler
82 * itself also expects a well-defined state when entered.
84 struct user_access_state
{
88 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
89 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
92 * Save the user access state into ua_state and reset it to disable any
95 static void save_reset_user_access_state(struct user_access_state
*ua_state
)
97 if (system_supports_poe()) {
98 u64 por_enable_all
= 0;
100 for (int pkey
= 0; pkey
< arch_max_pkey(); pkey
++)
101 por_enable_all
|= POE_RXW
<< (pkey
* POR_BITS_PER_PKEY
);
103 ua_state
->por_el0
= read_sysreg_s(SYS_POR_EL0
);
104 write_sysreg_s(por_enable_all
, SYS_POR_EL0
);
105 /* Ensure that any subsequent uaccess observes the updated value */
111 * Set the user access state for invoking the signal handler.
113 * No uaccess should be done after that function is called.
115 static void set_handler_user_access_state(void)
117 if (system_supports_poe())
118 write_sysreg_s(POR_EL0_INIT
, SYS_POR_EL0
);
122 * Restore the user access state to the values saved in ua_state.
124 * No uaccess should be done after that function is called.
126 static void restore_user_access_state(const struct user_access_state
*ua_state
)
128 if (system_supports_poe())
129 write_sysreg_s(ua_state
->por_el0
, SYS_POR_EL0
);
132 static void init_user_layout(struct rt_sigframe_user_layout
*user
)
134 const size_t reserved_size
=
135 sizeof(user
->sigframe
->uc
.uc_mcontext
.__reserved
);
137 memset(user
, 0, sizeof(*user
));
138 user
->size
= offsetof(struct rt_sigframe
, uc
.uc_mcontext
.__reserved
);
140 user
->limit
= user
->size
+ reserved_size
;
142 user
->limit
-= TERMINATOR_SIZE
;
143 user
->limit
-= EXTRA_CONTEXT_SIZE
;
144 /* Reserve space for extension and terminator ^ */
147 static size_t sigframe_size(struct rt_sigframe_user_layout
const *user
)
149 return round_up(max(user
->size
, sizeof(struct rt_sigframe
)), 16);
153 * Sanity limit on the approximate maximum size of signal frame we'll
154 * try to generate. Stack alignment padding and the frame record are
155 * not taken into account. This limit is not a guarantee and is
158 #define SIGFRAME_MAXSZ SZ_256K
160 static int __sigframe_alloc(struct rt_sigframe_user_layout
*user
,
161 unsigned long *offset
, size_t size
, bool extend
)
163 size_t padded_size
= round_up(size
, 16);
165 if (padded_size
> user
->limit
- user
->size
&&
166 !user
->extra_offset
&&
170 user
->limit
+= EXTRA_CONTEXT_SIZE
;
171 ret
= __sigframe_alloc(user
, &user
->extra_offset
,
172 sizeof(struct extra_context
), false);
174 user
->limit
-= EXTRA_CONTEXT_SIZE
;
178 /* Reserve space for the __reserved[] terminator */
179 user
->size
+= TERMINATOR_SIZE
;
182 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
185 user
->limit
= SIGFRAME_MAXSZ
- TERMINATOR_SIZE
;
188 /* Still not enough space? Bad luck! */
189 if (padded_size
> user
->limit
- user
->size
)
192 *offset
= user
->size
;
193 user
->size
+= padded_size
;
199 * Allocate space for an optional record of <size> bytes in the user
200 * signal frame. The offset from the signal frame base address to the
201 * allocated block is assigned to *offset.
203 static int sigframe_alloc(struct rt_sigframe_user_layout
*user
,
204 unsigned long *offset
, size_t size
)
206 return __sigframe_alloc(user
, offset
, size
, true);
209 /* Allocate the null terminator record and prevent further allocations */
210 static int sigframe_alloc_end(struct rt_sigframe_user_layout
*user
)
214 /* Un-reserve the space reserved for the terminator: */
215 user
->limit
+= TERMINATOR_SIZE
;
217 ret
= sigframe_alloc(user
, &user
->end_offset
,
218 sizeof(struct _aarch64_ctx
));
222 /* Prevent further allocation: */
223 user
->limit
= user
->size
;
227 static void __user
*apply_user_offset(
228 struct rt_sigframe_user_layout
const *user
, unsigned long offset
)
230 char __user
*base
= (char __user
*)user
->sigframe
;
232 return base
+ offset
;
236 struct fpsimd_context __user
*fpsimd
;
238 struct sve_context __user
*sve
;
240 struct tpidr2_context __user
*tpidr2
;
242 struct za_context __user
*za
;
244 struct zt_context __user
*zt
;
246 struct fpmr_context __user
*fpmr
;
248 struct poe_context __user
*poe
;
250 struct gcs_context __user
*gcs
;
254 static int preserve_fpsimd_context(struct fpsimd_context __user
*ctx
)
256 struct user_fpsimd_state
const *fpsimd
=
257 ¤t
->thread
.uw
.fpsimd_state
;
260 /* copy the FP and status/control registers */
261 err
= __copy_to_user(ctx
->vregs
, fpsimd
->vregs
, sizeof(fpsimd
->vregs
));
262 __put_user_error(fpsimd
->fpsr
, &ctx
->fpsr
, err
);
263 __put_user_error(fpsimd
->fpcr
, &ctx
->fpcr
, err
);
265 /* copy the magic/size information */
266 __put_user_error(FPSIMD_MAGIC
, &ctx
->head
.magic
, err
);
267 __put_user_error(sizeof(struct fpsimd_context
), &ctx
->head
.size
, err
);
269 return err
? -EFAULT
: 0;
272 static int restore_fpsimd_context(struct user_ctxs
*user
)
274 struct user_fpsimd_state fpsimd
;
277 /* check the size information */
278 if (user
->fpsimd_size
!= sizeof(struct fpsimd_context
))
281 /* copy the FP and status/control registers */
282 err
= __copy_from_user(fpsimd
.vregs
, &(user
->fpsimd
->vregs
),
283 sizeof(fpsimd
.vregs
));
284 __get_user_error(fpsimd
.fpsr
, &(user
->fpsimd
->fpsr
), err
);
285 __get_user_error(fpsimd
.fpcr
, &(user
->fpsimd
->fpcr
), err
);
287 clear_thread_flag(TIF_SVE
);
288 current
->thread
.fp_type
= FP_STATE_FPSIMD
;
290 /* load the hardware registers from the fpsimd_state structure */
292 fpsimd_update_current_state(&fpsimd
);
294 return err
? -EFAULT
: 0;
297 static int preserve_fpmr_context(struct fpmr_context __user
*ctx
)
301 current
->thread
.uw
.fpmr
= read_sysreg_s(SYS_FPMR
);
303 __put_user_error(FPMR_MAGIC
, &ctx
->head
.magic
, err
);
304 __put_user_error(sizeof(*ctx
), &ctx
->head
.size
, err
);
305 __put_user_error(current
->thread
.uw
.fpmr
, &ctx
->fpmr
, err
);
310 static int restore_fpmr_context(struct user_ctxs
*user
)
315 if (user
->fpmr_size
!= sizeof(*user
->fpmr
))
318 __get_user_error(fpmr
, &user
->fpmr
->fpmr
, err
);
320 write_sysreg_s(fpmr
, SYS_FPMR
);
325 static int preserve_poe_context(struct poe_context __user
*ctx
,
326 const struct user_access_state
*ua_state
)
330 __put_user_error(POE_MAGIC
, &ctx
->head
.magic
, err
);
331 __put_user_error(sizeof(*ctx
), &ctx
->head
.size
, err
);
332 __put_user_error(ua_state
->por_el0
, &ctx
->por_el0
, err
);
337 static int restore_poe_context(struct user_ctxs
*user
,
338 struct user_access_state
*ua_state
)
343 if (user
->poe_size
!= sizeof(*user
->poe
))
346 __get_user_error(por_el0
, &(user
->poe
->por_el0
), err
);
348 ua_state
->por_el0
= por_el0
;
353 #ifdef CONFIG_ARM64_SVE
355 static int preserve_sve_context(struct sve_context __user
*ctx
)
358 u16 reserved
[ARRAY_SIZE(ctx
->__reserved
)];
360 unsigned int vl
= task_get_sve_vl(current
);
363 if (thread_sm_enabled(¤t
->thread
)) {
364 vl
= task_get_sme_vl(current
);
365 vq
= sve_vq_from_vl(vl
);
366 flags
|= SVE_SIG_FLAG_SM
;
367 } else if (current
->thread
.fp_type
== FP_STATE_SVE
) {
368 vq
= sve_vq_from_vl(vl
);
371 memset(reserved
, 0, sizeof(reserved
));
373 __put_user_error(SVE_MAGIC
, &ctx
->head
.magic
, err
);
374 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq
), 16),
375 &ctx
->head
.size
, err
);
376 __put_user_error(vl
, &ctx
->vl
, err
);
377 __put_user_error(flags
, &ctx
->flags
, err
);
378 BUILD_BUG_ON(sizeof(ctx
->__reserved
) != sizeof(reserved
));
379 err
|= __copy_to_user(&ctx
->__reserved
, reserved
, sizeof(reserved
));
383 * This assumes that the SVE state has already been saved to
384 * the task struct by calling the function
385 * fpsimd_signal_preserve_current_state().
387 err
|= __copy_to_user((char __user
*)ctx
+ SVE_SIG_REGS_OFFSET
,
388 current
->thread
.sve_state
,
389 SVE_SIG_REGS_SIZE(vq
));
392 return err
? -EFAULT
: 0;
395 static int restore_sve_fpsimd_context(struct user_ctxs
*user
)
399 struct user_fpsimd_state fpsimd
;
402 if (user
->sve_size
< sizeof(*user
->sve
))
405 __get_user_error(user_vl
, &(user
->sve
->vl
), err
);
406 __get_user_error(flags
, &(user
->sve
->flags
), err
);
410 if (flags
& SVE_SIG_FLAG_SM
) {
411 if (!system_supports_sme())
414 vl
= task_get_sme_vl(current
);
417 * A SME only system use SVE for streaming mode so can
418 * have a SVE formatted context with a zero VL and no
421 if (!system_supports_sve() && !system_supports_sme())
424 vl
= task_get_sve_vl(current
);
430 if (user
->sve_size
== sizeof(*user
->sve
)) {
431 clear_thread_flag(TIF_SVE
);
432 current
->thread
.svcr
&= ~SVCR_SM_MASK
;
433 current
->thread
.fp_type
= FP_STATE_FPSIMD
;
437 vq
= sve_vq_from_vl(vl
);
439 if (user
->sve_size
< SVE_SIG_CONTEXT_SIZE(vq
))
443 * Careful: we are about __copy_from_user() directly into
444 * thread.sve_state with preemption enabled, so protection is
445 * needed to prevent a racing context switch from writing stale
446 * registers back over the new data.
449 fpsimd_flush_task_state(current
);
450 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
452 sve_alloc(current
, true);
453 if (!current
->thread
.sve_state
) {
454 clear_thread_flag(TIF_SVE
);
458 err
= __copy_from_user(current
->thread
.sve_state
,
459 (char __user
const *)user
->sve
+
461 SVE_SIG_REGS_SIZE(vq
));
465 if (flags
& SVE_SIG_FLAG_SM
)
466 current
->thread
.svcr
|= SVCR_SM_MASK
;
468 set_thread_flag(TIF_SVE
);
469 current
->thread
.fp_type
= FP_STATE_SVE
;
472 /* copy the FP and status/control registers */
473 /* restore_sigframe() already checked that user->fpsimd != NULL. */
474 err
= __copy_from_user(fpsimd
.vregs
, user
->fpsimd
->vregs
,
475 sizeof(fpsimd
.vregs
));
476 __get_user_error(fpsimd
.fpsr
, &user
->fpsimd
->fpsr
, err
);
477 __get_user_error(fpsimd
.fpcr
, &user
->fpsimd
->fpcr
, err
);
479 /* load the hardware registers from the fpsimd_state structure */
481 fpsimd_update_current_state(&fpsimd
);
483 return err
? -EFAULT
: 0;
486 #else /* ! CONFIG_ARM64_SVE */
488 static int restore_sve_fpsimd_context(struct user_ctxs
*user
)
494 /* Turn any non-optimised out attempts to use this into a link error: */
495 extern int preserve_sve_context(void __user
*ctx
);
497 #endif /* ! CONFIG_ARM64_SVE */
499 #ifdef CONFIG_ARM64_SME
501 static int preserve_tpidr2_context(struct tpidr2_context __user
*ctx
)
505 current
->thread
.tpidr2_el0
= read_sysreg_s(SYS_TPIDR2_EL0
);
507 __put_user_error(TPIDR2_MAGIC
, &ctx
->head
.magic
, err
);
508 __put_user_error(sizeof(*ctx
), &ctx
->head
.size
, err
);
509 __put_user_error(current
->thread
.tpidr2_el0
, &ctx
->tpidr2
, err
);
514 static int restore_tpidr2_context(struct user_ctxs
*user
)
519 if (user
->tpidr2_size
!= sizeof(*user
->tpidr2
))
522 __get_user_error(tpidr2_el0
, &user
->tpidr2
->tpidr2
, err
);
524 write_sysreg_s(tpidr2_el0
, SYS_TPIDR2_EL0
);
529 static int preserve_za_context(struct za_context __user
*ctx
)
532 u16 reserved
[ARRAY_SIZE(ctx
->__reserved
)];
533 unsigned int vl
= task_get_sme_vl(current
);
536 if (thread_za_enabled(¤t
->thread
))
537 vq
= sve_vq_from_vl(vl
);
541 memset(reserved
, 0, sizeof(reserved
));
543 __put_user_error(ZA_MAGIC
, &ctx
->head
.magic
, err
);
544 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq
), 16),
545 &ctx
->head
.size
, err
);
546 __put_user_error(vl
, &ctx
->vl
, err
);
547 BUILD_BUG_ON(sizeof(ctx
->__reserved
) != sizeof(reserved
));
548 err
|= __copy_to_user(&ctx
->__reserved
, reserved
, sizeof(reserved
));
552 * This assumes that the ZA state has already been saved to
553 * the task struct by calling the function
554 * fpsimd_signal_preserve_current_state().
556 err
|= __copy_to_user((char __user
*)ctx
+ ZA_SIG_REGS_OFFSET
,
557 current
->thread
.sme_state
,
558 ZA_SIG_REGS_SIZE(vq
));
561 return err
? -EFAULT
: 0;
564 static int restore_za_context(struct user_ctxs
*user
)
570 if (user
->za_size
< sizeof(*user
->za
))
573 __get_user_error(user_vl
, &(user
->za
->vl
), err
);
577 if (user_vl
!= task_get_sme_vl(current
))
580 if (user
->za_size
== sizeof(*user
->za
)) {
581 current
->thread
.svcr
&= ~SVCR_ZA_MASK
;
585 vq
= sve_vq_from_vl(user_vl
);
587 if (user
->za_size
< ZA_SIG_CONTEXT_SIZE(vq
))
591 * Careful: we are about __copy_from_user() directly into
592 * thread.sme_state with preemption enabled, so protection is
593 * needed to prevent a racing context switch from writing stale
594 * registers back over the new data.
597 fpsimd_flush_task_state(current
);
598 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
600 sme_alloc(current
, true);
601 if (!current
->thread
.sme_state
) {
602 current
->thread
.svcr
&= ~SVCR_ZA_MASK
;
603 clear_thread_flag(TIF_SME
);
607 err
= __copy_from_user(current
->thread
.sme_state
,
608 (char __user
const *)user
->za
+
610 ZA_SIG_REGS_SIZE(vq
));
614 set_thread_flag(TIF_SME
);
615 current
->thread
.svcr
|= SVCR_ZA_MASK
;
620 static int preserve_zt_context(struct zt_context __user
*ctx
)
623 u16 reserved
[ARRAY_SIZE(ctx
->__reserved
)];
625 if (WARN_ON(!thread_za_enabled(¤t
->thread
)))
628 memset(reserved
, 0, sizeof(reserved
));
630 __put_user_error(ZT_MAGIC
, &ctx
->head
.magic
, err
);
631 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16),
632 &ctx
->head
.size
, err
);
633 __put_user_error(1, &ctx
->nregs
, err
);
634 BUILD_BUG_ON(sizeof(ctx
->__reserved
) != sizeof(reserved
));
635 err
|= __copy_to_user(&ctx
->__reserved
, reserved
, sizeof(reserved
));
638 * This assumes that the ZT state has already been saved to
639 * the task struct by calling the function
640 * fpsimd_signal_preserve_current_state().
642 err
|= __copy_to_user((char __user
*)ctx
+ ZT_SIG_REGS_OFFSET
,
643 thread_zt_state(¤t
->thread
),
644 ZT_SIG_REGS_SIZE(1));
646 return err
? -EFAULT
: 0;
649 static int restore_zt_context(struct user_ctxs
*user
)
654 /* ZA must be restored first for this check to be valid */
655 if (!thread_za_enabled(¤t
->thread
))
658 if (user
->zt_size
!= ZT_SIG_CONTEXT_SIZE(1))
661 if (__copy_from_user(&nregs
, &(user
->zt
->nregs
), sizeof(nregs
)))
668 * Careful: we are about __copy_from_user() directly into
669 * thread.zt_state with preemption enabled, so protection is
670 * needed to prevent a racing context switch from writing stale
671 * registers back over the new data.
674 fpsimd_flush_task_state(current
);
675 /* From now, fpsimd_thread_switch() won't touch ZT in thread state */
677 err
= __copy_from_user(thread_zt_state(¤t
->thread
),
678 (char __user
const *)user
->zt
+
680 ZT_SIG_REGS_SIZE(1));
687 #else /* ! CONFIG_ARM64_SME */
689 /* Turn any non-optimised out attempts to use these into a link error: */
690 extern int preserve_tpidr2_context(void __user
*ctx
);
691 extern int restore_tpidr2_context(struct user_ctxs
*user
);
692 extern int preserve_za_context(void __user
*ctx
);
693 extern int restore_za_context(struct user_ctxs
*user
);
694 extern int preserve_zt_context(void __user
*ctx
);
695 extern int restore_zt_context(struct user_ctxs
*user
);
697 #endif /* ! CONFIG_ARM64_SME */
699 #ifdef CONFIG_ARM64_GCS
701 static int preserve_gcs_context(struct gcs_context __user
*ctx
)
704 u64 gcspr
= read_sysreg_s(SYS_GCSPR_EL0
);
707 * If GCS is enabled we will add a cap token to the frame,
708 * include it in the GCSPR_EL0 we report to support stack
709 * switching via sigreturn if GCS is enabled. We do not allow
710 * enabling via sigreturn so the token is only relevant for
711 * threads with GCS enabled.
713 if (task_gcs_el0_enabled(current
))
716 __put_user_error(GCS_MAGIC
, &ctx
->head
.magic
, err
);
717 __put_user_error(sizeof(*ctx
), &ctx
->head
.size
, err
);
718 __put_user_error(gcspr
, &ctx
->gcspr
, err
);
719 __put_user_error(0, &ctx
->reserved
, err
);
720 __put_user_error(current
->thread
.gcs_el0_mode
,
721 &ctx
->features_enabled
, err
);
726 static int restore_gcs_context(struct user_ctxs
*user
)
731 if (user
->gcs_size
!= sizeof(*user
->gcs
))
734 __get_user_error(gcspr
, &user
->gcs
->gcspr
, err
);
735 __get_user_error(enabled
, &user
->gcs
->features_enabled
, err
);
739 /* Don't allow unknown modes */
740 if (enabled
& ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK
)
743 err
= gcs_check_locked(current
, enabled
);
747 /* Don't allow enabling */
748 if (!task_gcs_el0_enabled(current
) &&
749 (enabled
& PR_SHADOW_STACK_ENABLE
))
752 /* If we are disabling disable everything */
753 if (!(enabled
& PR_SHADOW_STACK_ENABLE
))
756 current
->thread
.gcs_el0_mode
= enabled
;
759 * We let userspace set GCSPR_EL0 to anything here, we will
760 * validate later in gcs_restore_signal().
762 write_sysreg_s(gcspr
, SYS_GCSPR_EL0
);
767 #else /* ! CONFIG_ARM64_GCS */
769 /* Turn any non-optimised out attempts to use these into a link error: */
770 extern int preserve_gcs_context(void __user
*ctx
);
771 extern int restore_gcs_context(struct user_ctxs
*user
);
773 #endif /* ! CONFIG_ARM64_GCS */
775 static int parse_user_sigframe(struct user_ctxs
*user
,
776 struct rt_sigframe __user
*sf
)
778 struct sigcontext __user
*const sc
= &sf
->uc
.uc_mcontext
;
779 struct _aarch64_ctx __user
*head
;
780 char __user
*base
= (char __user
*)&sc
->__reserved
;
782 size_t limit
= sizeof(sc
->__reserved
);
783 bool have_extra_context
= false;
784 char const __user
*const sfp
= (char const __user
*)sf
;
795 if (!IS_ALIGNED((unsigned long)base
, 16))
801 char const __user
*userp
;
802 struct extra_context
const __user
*extra
;
805 struct _aarch64_ctx
const __user
*end
;
806 u32 end_magic
, end_size
;
808 if (limit
- offset
< sizeof(*head
))
811 if (!IS_ALIGNED(offset
, 16))
814 head
= (struct _aarch64_ctx __user
*)(base
+ offset
);
815 __get_user_error(magic
, &head
->magic
, err
);
816 __get_user_error(size
, &head
->size
, err
);
820 if (limit
- offset
< size
)
831 if (!system_supports_fpsimd())
836 user
->fpsimd
= (struct fpsimd_context __user
*)head
;
837 user
->fpsimd_size
= size
;
845 if (!system_supports_poe())
851 user
->poe
= (struct poe_context __user
*)head
;
852 user
->poe_size
= size
;
856 if (!system_supports_sve() && !system_supports_sme())
862 user
->sve
= (struct sve_context __user
*)head
;
863 user
->sve_size
= size
;
867 if (!system_supports_tpidr2())
873 user
->tpidr2
= (struct tpidr2_context __user
*)head
;
874 user
->tpidr2_size
= size
;
878 if (!system_supports_sme())
884 user
->za
= (struct za_context __user
*)head
;
885 user
->za_size
= size
;
889 if (!system_supports_sme2())
895 user
->zt
= (struct zt_context __user
*)head
;
896 user
->zt_size
= size
;
900 if (!system_supports_fpmr())
906 user
->fpmr
= (struct fpmr_context __user
*)head
;
907 user
->fpmr_size
= size
;
911 if (!system_supports_gcs())
917 user
->gcs
= (struct gcs_context __user
*)head
;
918 user
->gcs_size
= size
;
922 if (have_extra_context
)
925 if (size
< sizeof(*extra
))
928 userp
= (char const __user
*)head
;
930 extra
= (struct extra_context
const __user
*)userp
;
933 __get_user_error(extra_datap
, &extra
->datap
, err
);
934 __get_user_error(extra_size
, &extra
->size
, err
);
938 /* Check for the dummy terminator in __reserved[]: */
940 if (limit
- offset
- size
< TERMINATOR_SIZE
)
943 end
= (struct _aarch64_ctx
const __user
*)userp
;
944 userp
+= TERMINATOR_SIZE
;
946 __get_user_error(end_magic
, &end
->magic
, err
);
947 __get_user_error(end_size
, &end
->size
, err
);
951 if (end_magic
|| end_size
)
954 /* Prevent looping/repeated parsing of extra_context */
955 have_extra_context
= true;
957 base
= (__force
void __user
*)extra_datap
;
958 if (!IS_ALIGNED((unsigned long)base
, 16))
961 if (!IS_ALIGNED(extra_size
, 16))
967 /* Reject "unreasonably large" frames: */
968 if (extra_size
> sfp
+ SIGFRAME_MAXSZ
- userp
)
972 * Ignore trailing terminator in __reserved[]
973 * and start parsing extra data:
978 if (!access_ok(base
, limit
))
987 if (size
< sizeof(*head
))
990 if (limit
- offset
< size
)
1003 static int restore_sigframe(struct pt_regs
*regs
,
1004 struct rt_sigframe __user
*sf
,
1005 struct user_access_state
*ua_state
)
1009 struct user_ctxs user
;
1011 err
= __copy_from_user(&set
, &sf
->uc
.uc_sigmask
, sizeof(set
));
1013 set_current_blocked(&set
);
1015 for (i
= 0; i
< 31; i
++)
1016 __get_user_error(regs
->regs
[i
], &sf
->uc
.uc_mcontext
.regs
[i
],
1018 __get_user_error(regs
->sp
, &sf
->uc
.uc_mcontext
.sp
, err
);
1019 __get_user_error(regs
->pc
, &sf
->uc
.uc_mcontext
.pc
, err
);
1020 __get_user_error(regs
->pstate
, &sf
->uc
.uc_mcontext
.pstate
, err
);
1023 * Avoid sys_rt_sigreturn() restarting.
1025 forget_syscall(regs
);
1027 err
|= !valid_user_regs(®s
->user_regs
, current
);
1029 err
= parse_user_sigframe(&user
, sf
);
1031 if (err
== 0 && system_supports_fpsimd()) {
1036 err
= restore_sve_fpsimd_context(&user
);
1038 err
= restore_fpsimd_context(&user
);
1041 if (err
== 0 && system_supports_gcs() && user
.gcs
)
1042 err
= restore_gcs_context(&user
);
1044 if (err
== 0 && system_supports_tpidr2() && user
.tpidr2
)
1045 err
= restore_tpidr2_context(&user
);
1047 if (err
== 0 && system_supports_fpmr() && user
.fpmr
)
1048 err
= restore_fpmr_context(&user
);
1050 if (err
== 0 && system_supports_sme() && user
.za
)
1051 err
= restore_za_context(&user
);
1053 if (err
== 0 && system_supports_sme2() && user
.zt
)
1054 err
= restore_zt_context(&user
);
1056 if (err
== 0 && system_supports_poe() && user
.poe
)
1057 err
= restore_poe_context(&user
, ua_state
);
1062 #ifdef CONFIG_ARM64_GCS
1063 static int gcs_restore_signal(void)
1065 unsigned long __user
*gcspr_el0
;
1069 if (!system_supports_gcs())
1072 if (!(current
->thread
.gcs_el0_mode
& PR_SHADOW_STACK_ENABLE
))
1075 gcspr_el0
= (unsigned long __user
*)read_sysreg_s(SYS_GCSPR_EL0
);
1078 * Ensure that any changes to the GCS done via GCS operations
1079 * are visible to the normal reads we do to validate the
1085 * GCSPR_EL0 should be pointing at a capped GCS, read the cap.
1086 * We don't enforce that this is in a GCS page, if it is not
1087 * then faults will be generated on GCS operations - the main
1088 * concern is to protect GCS pages.
1090 ret
= copy_from_user(&cap
, gcspr_el0
, sizeof(cap
));
1095 * Check that the cap is the actual GCS before replacing it.
1097 if (!gcs_signal_cap_valid((u64
)gcspr_el0
, cap
))
1100 /* Invalidate the token to prevent reuse */
1101 put_user_gcs(0, (__user
void*)gcspr_el0
, &ret
);
1105 write_sysreg_s(gcspr_el0
+ 1, SYS_GCSPR_EL0
);
1111 static int gcs_restore_signal(void) { return 0; }
1114 SYSCALL_DEFINE0(rt_sigreturn
)
1116 struct pt_regs
*regs
= current_pt_regs();
1117 struct rt_sigframe __user
*frame
;
1118 struct user_access_state ua_state
;
1120 /* Always make any pending restarted system calls return -EINTR */
1121 current
->restart_block
.fn
= do_no_restart_syscall
;
1124 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
1125 * be word aligned here.
1130 frame
= (struct rt_sigframe __user
*)regs
->sp
;
1132 if (!access_ok(frame
, sizeof (*frame
)))
1135 if (restore_sigframe(regs
, frame
, &ua_state
))
1138 if (gcs_restore_signal())
1141 if (restore_altstack(&frame
->uc
.uc_stack
))
1144 restore_user_access_state(&ua_state
);
1146 return regs
->regs
[0];
1149 arm64_notify_segfault(regs
->sp
);
1154 * Determine the layout of optional records in the signal frame
1156 * add_all: if true, lays out the biggest possible signal frame for
1157 * this task; otherwise, generates a layout for the current state
1160 static int setup_sigframe_layout(struct rt_sigframe_user_layout
*user
,
1165 if (system_supports_fpsimd()) {
1166 err
= sigframe_alloc(user
, &user
->fpsimd_offset
,
1167 sizeof(struct fpsimd_context
));
1172 /* fault information, if valid */
1173 if (add_all
|| current
->thread
.fault_code
) {
1174 err
= sigframe_alloc(user
, &user
->esr_offset
,
1175 sizeof(struct esr_context
));
1180 #ifdef CONFIG_ARM64_GCS
1181 if (system_supports_gcs() && (add_all
|| current
->thread
.gcspr_el0
)) {
1182 err
= sigframe_alloc(user
, &user
->gcs_offset
,
1183 sizeof(struct gcs_context
));
1189 if (system_supports_sve() || system_supports_sme()) {
1190 unsigned int vq
= 0;
1192 if (add_all
|| current
->thread
.fp_type
== FP_STATE_SVE
||
1193 thread_sm_enabled(¤t
->thread
)) {
1194 int vl
= max(sve_max_vl(), sme_max_vl());
1197 vl
= thread_get_cur_vl(¤t
->thread
);
1199 vq
= sve_vq_from_vl(vl
);
1202 err
= sigframe_alloc(user
, &user
->sve_offset
,
1203 SVE_SIG_CONTEXT_SIZE(vq
));
1208 if (system_supports_tpidr2()) {
1209 err
= sigframe_alloc(user
, &user
->tpidr2_offset
,
1210 sizeof(struct tpidr2_context
));
1215 if (system_supports_sme()) {
1217 unsigned int vq
= 0;
1222 vl
= task_get_sme_vl(current
);
1224 if (thread_za_enabled(¤t
->thread
))
1225 vq
= sve_vq_from_vl(vl
);
1227 err
= sigframe_alloc(user
, &user
->za_offset
,
1228 ZA_SIG_CONTEXT_SIZE(vq
));
1233 if (system_supports_sme2()) {
1234 if (add_all
|| thread_za_enabled(¤t
->thread
)) {
1235 err
= sigframe_alloc(user
, &user
->zt_offset
,
1236 ZT_SIG_CONTEXT_SIZE(1));
1242 if (system_supports_fpmr()) {
1243 err
= sigframe_alloc(user
, &user
->fpmr_offset
,
1244 sizeof(struct fpmr_context
));
1249 if (system_supports_poe()) {
1250 err
= sigframe_alloc(user
, &user
->poe_offset
,
1251 sizeof(struct poe_context
));
1256 return sigframe_alloc_end(user
);
1259 static int setup_sigframe(struct rt_sigframe_user_layout
*user
,
1260 struct pt_regs
*regs
, sigset_t
*set
,
1261 const struct user_access_state
*ua_state
)
1264 struct rt_sigframe __user
*sf
= user
->sigframe
;
1266 /* set up the stack frame for unwinding */
1267 __put_user_error(regs
->regs
[29], &user
->next_frame
->fp
, err
);
1268 __put_user_error(regs
->regs
[30], &user
->next_frame
->lr
, err
);
1270 for (i
= 0; i
< 31; i
++)
1271 __put_user_error(regs
->regs
[i
], &sf
->uc
.uc_mcontext
.regs
[i
],
1273 __put_user_error(regs
->sp
, &sf
->uc
.uc_mcontext
.sp
, err
);
1274 __put_user_error(regs
->pc
, &sf
->uc
.uc_mcontext
.pc
, err
);
1275 __put_user_error(regs
->pstate
, &sf
->uc
.uc_mcontext
.pstate
, err
);
1277 __put_user_error(current
->thread
.fault_address
, &sf
->uc
.uc_mcontext
.fault_address
, err
);
1279 err
|= __copy_to_user(&sf
->uc
.uc_sigmask
, set
, sizeof(*set
));
1281 if (err
== 0 && system_supports_fpsimd()) {
1282 struct fpsimd_context __user
*fpsimd_ctx
=
1283 apply_user_offset(user
, user
->fpsimd_offset
);
1284 err
|= preserve_fpsimd_context(fpsimd_ctx
);
1287 /* fault information, if valid */
1288 if (err
== 0 && user
->esr_offset
) {
1289 struct esr_context __user
*esr_ctx
=
1290 apply_user_offset(user
, user
->esr_offset
);
1292 __put_user_error(ESR_MAGIC
, &esr_ctx
->head
.magic
, err
);
1293 __put_user_error(sizeof(*esr_ctx
), &esr_ctx
->head
.size
, err
);
1294 __put_user_error(current
->thread
.fault_code
, &esr_ctx
->esr
, err
);
1297 if (system_supports_gcs() && err
== 0 && user
->gcs_offset
) {
1298 struct gcs_context __user
*gcs_ctx
=
1299 apply_user_offset(user
, user
->gcs_offset
);
1300 err
|= preserve_gcs_context(gcs_ctx
);
1303 /* Scalable Vector Extension state (including streaming), if present */
1304 if ((system_supports_sve() || system_supports_sme()) &&
1305 err
== 0 && user
->sve_offset
) {
1306 struct sve_context __user
*sve_ctx
=
1307 apply_user_offset(user
, user
->sve_offset
);
1308 err
|= preserve_sve_context(sve_ctx
);
1311 /* TPIDR2 if supported */
1312 if (system_supports_tpidr2() && err
== 0) {
1313 struct tpidr2_context __user
*tpidr2_ctx
=
1314 apply_user_offset(user
, user
->tpidr2_offset
);
1315 err
|= preserve_tpidr2_context(tpidr2_ctx
);
1318 /* FPMR if supported */
1319 if (system_supports_fpmr() && err
== 0) {
1320 struct fpmr_context __user
*fpmr_ctx
=
1321 apply_user_offset(user
, user
->fpmr_offset
);
1322 err
|= preserve_fpmr_context(fpmr_ctx
);
1325 if (system_supports_poe() && err
== 0) {
1326 struct poe_context __user
*poe_ctx
=
1327 apply_user_offset(user
, user
->poe_offset
);
1329 err
|= preserve_poe_context(poe_ctx
, ua_state
);
1332 /* ZA state if present */
1333 if (system_supports_sme() && err
== 0 && user
->za_offset
) {
1334 struct za_context __user
*za_ctx
=
1335 apply_user_offset(user
, user
->za_offset
);
1336 err
|= preserve_za_context(za_ctx
);
1339 /* ZT state if present */
1340 if (system_supports_sme2() && err
== 0 && user
->zt_offset
) {
1341 struct zt_context __user
*zt_ctx
=
1342 apply_user_offset(user
, user
->zt_offset
);
1343 err
|= preserve_zt_context(zt_ctx
);
1346 if (err
== 0 && user
->extra_offset
) {
1347 char __user
*sfp
= (char __user
*)user
->sigframe
;
1348 char __user
*userp
=
1349 apply_user_offset(user
, user
->extra_offset
);
1351 struct extra_context __user
*extra
;
1352 struct _aarch64_ctx __user
*end
;
1356 extra
= (struct extra_context __user
*)userp
;
1357 userp
+= EXTRA_CONTEXT_SIZE
;
1359 end
= (struct _aarch64_ctx __user
*)userp
;
1360 userp
+= TERMINATOR_SIZE
;
1363 * extra_datap is just written to the signal frame.
1364 * The value gets cast back to a void __user *
1367 extra_datap
= (__force u64
)userp
;
1368 extra_size
= sfp
+ round_up(user
->size
, 16) - userp
;
1370 __put_user_error(EXTRA_MAGIC
, &extra
->head
.magic
, err
);
1371 __put_user_error(EXTRA_CONTEXT_SIZE
, &extra
->head
.size
, err
);
1372 __put_user_error(extra_datap
, &extra
->datap
, err
);
1373 __put_user_error(extra_size
, &extra
->size
, err
);
1375 /* Add the terminator */
1376 __put_user_error(0, &end
->magic
, err
);
1377 __put_user_error(0, &end
->size
, err
);
1380 /* set the "end" magic */
1382 struct _aarch64_ctx __user
*end
=
1383 apply_user_offset(user
, user
->end_offset
);
1385 __put_user_error(0, &end
->magic
, err
);
1386 __put_user_error(0, &end
->size
, err
);
1392 static int get_sigframe(struct rt_sigframe_user_layout
*user
,
1393 struct ksignal
*ksig
, struct pt_regs
*regs
)
1395 unsigned long sp
, sp_top
;
1398 init_user_layout(user
);
1399 err
= setup_sigframe_layout(user
, false);
1403 sp
= sp_top
= sigsp(regs
->sp
, ksig
);
1405 sp
= round_down(sp
- sizeof(struct frame_record
), 16);
1406 user
->next_frame
= (struct frame_record __user
*)sp
;
1408 sp
= round_down(sp
, 16) - sigframe_size(user
);
1409 user
->sigframe
= (struct rt_sigframe __user
*)sp
;
1412 * Check that we can actually write to the signal frame.
1414 if (!access_ok(user
->sigframe
, sp_top
- sp
))
1420 #ifdef CONFIG_ARM64_GCS
1422 static int gcs_signal_entry(__sigrestore_t sigtramp
, struct ksignal
*ksig
)
1424 unsigned long __user
*gcspr_el0
;
1427 if (!system_supports_gcs())
1430 if (!task_gcs_el0_enabled(current
))
1434 * We are entering a signal handler, current register state is
1437 gcspr_el0
= (unsigned long __user
*)read_sysreg_s(SYS_GCSPR_EL0
);
1440 * Push a cap and the GCS entry for the trampoline onto the GCS.
1442 put_user_gcs((unsigned long)sigtramp
, gcspr_el0
- 2, &ret
);
1443 put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0
- 1), gcspr_el0
- 1, &ret
);
1448 write_sysreg_s((unsigned long)gcspr_el0
, SYS_GCSPR_EL0
);
1454 static int gcs_signal_entry(__sigrestore_t sigtramp
, struct ksignal
*ksig
)
1461 static int setup_return(struct pt_regs
*regs
, struct ksignal
*ksig
,
1462 struct rt_sigframe_user_layout
*user
, int usig
)
1464 __sigrestore_t sigtramp
;
1466 regs
->regs
[0] = usig
;
1467 regs
->sp
= (unsigned long)user
->sigframe
;
1468 regs
->regs
[29] = (unsigned long)&user
->next_frame
->fp
;
1469 regs
->pc
= (unsigned long)ksig
->ka
.sa
.sa_handler
;
1472 * Signal delivery is a (wacky) indirect function call in
1473 * userspace, so simulate the same setting of BTYPE as a BLR
1474 * <register containing the signal handler entry point>.
1475 * Signal delivery to a location in a PROT_BTI guarded page
1476 * that is not a function entry point will now trigger a
1477 * SIGILL in userspace.
1479 * If the signal handler entry point is not in a PROT_BTI
1480 * guarded page, this is harmless.
1482 if (system_supports_bti()) {
1483 regs
->pstate
&= ~PSR_BTYPE_MASK
;
1484 regs
->pstate
|= PSR_BTYPE_C
;
1487 /* TCO (Tag Check Override) always cleared for signal handlers */
1488 regs
->pstate
&= ~PSR_TCO_BIT
;
1490 /* Signal handlers are invoked with ZA and streaming mode disabled */
1491 if (system_supports_sme()) {
1493 * If we were in streaming mode the saved register
1494 * state was SVE but we will exit SM and use the
1495 * FPSIMD register state - flush the saved FPSIMD
1496 * register state in case it gets loaded.
1498 if (current
->thread
.svcr
& SVCR_SM_MASK
) {
1499 memset(¤t
->thread
.uw
.fpsimd_state
, 0,
1500 sizeof(current
->thread
.uw
.fpsimd_state
));
1501 current
->thread
.fp_type
= FP_STATE_FPSIMD
;
1504 current
->thread
.svcr
&= ~(SVCR_ZA_MASK
|
1509 if (ksig
->ka
.sa
.sa_flags
& SA_RESTORER
)
1510 sigtramp
= ksig
->ka
.sa
.sa_restorer
;
1512 sigtramp
= VDSO_SYMBOL(current
->mm
->context
.vdso
, sigtramp
);
1514 regs
->regs
[30] = (unsigned long)sigtramp
;
1516 return gcs_signal_entry(sigtramp
, ksig
);
1519 static int setup_rt_frame(int usig
, struct ksignal
*ksig
, sigset_t
*set
,
1520 struct pt_regs
*regs
)
1522 struct rt_sigframe_user_layout user
;
1523 struct rt_sigframe __user
*frame
;
1524 struct user_access_state ua_state
;
1527 fpsimd_signal_preserve_current_state();
1529 if (get_sigframe(&user
, ksig
, regs
))
1532 save_reset_user_access_state(&ua_state
);
1533 frame
= user
.sigframe
;
1535 __put_user_error(0, &frame
->uc
.uc_flags
, err
);
1536 __put_user_error(NULL
, &frame
->uc
.uc_link
, err
);
1538 err
|= __save_altstack(&frame
->uc
.uc_stack
, regs
->sp
);
1539 err
|= setup_sigframe(&user
, regs
, set
, &ua_state
);
1541 err
= setup_return(regs
, ksig
, &user
, usig
);
1542 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
) {
1543 err
|= copy_siginfo_to_user(&frame
->info
, &ksig
->info
);
1544 regs
->regs
[1] = (unsigned long)&frame
->info
;
1545 regs
->regs
[2] = (unsigned long)&frame
->uc
;
1550 set_handler_user_access_state();
1552 restore_user_access_state(&ua_state
);
1557 static void setup_restart_syscall(struct pt_regs
*regs
)
1559 if (is_compat_task())
1560 compat_setup_restart_syscall(regs
);
1562 regs
->regs
[8] = __NR_restart_syscall
;
1566 * OK, we're invoking a handler
1568 static void handle_signal(struct ksignal
*ksig
, struct pt_regs
*regs
)
1570 sigset_t
*oldset
= sigmask_to_save();
1571 int usig
= ksig
->sig
;
1574 rseq_signal_deliver(ksig
, regs
);
1577 * Set up the stack frame
1579 if (is_compat_task()) {
1580 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
)
1581 ret
= compat_setup_rt_frame(usig
, ksig
, oldset
, regs
);
1583 ret
= compat_setup_frame(usig
, ksig
, oldset
, regs
);
1585 ret
= setup_rt_frame(usig
, ksig
, oldset
, regs
);
1589 * Check that the resulting registers are actually sane.
1591 ret
|= !valid_user_regs(®s
->user_regs
, current
);
1593 /* Step into the signal handler if we are stepping */
1594 signal_setup_done(ret
, ksig
, test_thread_flag(TIF_SINGLESTEP
));
1598 * Note that 'init' is a special process: it doesn't get signals it doesn't
1599 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1602 * Note that we go through the signals twice: once to check the signals that
1603 * the kernel can handle, and then we build all the user-level signal handling
1604 * stack-frames in one go after that.
1606 void do_signal(struct pt_regs
*regs
)
1608 unsigned long continue_addr
= 0, restart_addr
= 0;
1610 struct ksignal ksig
;
1611 bool syscall
= in_syscall(regs
);
1614 * If we were from a system call, check for system call restarting...
1617 continue_addr
= regs
->pc
;
1618 restart_addr
= continue_addr
- (compat_thumb_mode(regs
) ? 2 : 4);
1619 retval
= regs
->regs
[0];
1622 * Avoid additional syscall restarting via ret_to_user.
1624 forget_syscall(regs
);
1627 * Prepare for system call restart. We do this here so that a
1628 * debugger will see the already changed PC.
1631 case -ERESTARTNOHAND
:
1633 case -ERESTARTNOINTR
:
1634 case -ERESTART_RESTARTBLOCK
:
1635 regs
->regs
[0] = regs
->orig_x0
;
1636 regs
->pc
= restart_addr
;
1642 * Get the signal to deliver. When running under ptrace, at this point
1643 * the debugger may change all of our registers.
1645 if (get_signal(&ksig
)) {
1647 * Depending on the signal settings, we may need to revert the
1648 * decision to restart the system call, but skip this if a
1649 * debugger has chosen to restart at a different PC.
1651 if (regs
->pc
== restart_addr
&&
1652 (retval
== -ERESTARTNOHAND
||
1653 retval
== -ERESTART_RESTARTBLOCK
||
1654 (retval
== -ERESTARTSYS
&&
1655 !(ksig
.ka
.sa
.sa_flags
& SA_RESTART
)))) {
1656 syscall_set_return_value(current
, regs
, -EINTR
, 0);
1657 regs
->pc
= continue_addr
;
1660 handle_signal(&ksig
, regs
);
1665 * Handle restarting a different system call. As above, if a debugger
1666 * has chosen to restart at a different PC, ignore the restart.
1668 if (syscall
&& regs
->pc
== restart_addr
) {
1669 if (retval
== -ERESTART_RESTARTBLOCK
)
1670 setup_restart_syscall(regs
);
1671 user_rewind_single_step(current
);
1674 restore_saved_sigmask();
1677 unsigned long __ro_after_init signal_minsigstksz
;
1680 * Determine the stack space required for guaranteed signal devliery.
1681 * This function is used to populate AT_MINSIGSTKSZ at process startup.
1682 * cpufeatures setup is assumed to be complete.
1684 void __init
minsigstksz_setup(void)
1686 struct rt_sigframe_user_layout user
;
1688 init_user_layout(&user
);
1691 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
1692 * be big enough, but it's our best guess:
1694 if (WARN_ON(setup_sigframe_layout(&user
, true)))
1697 signal_minsigstksz
= sigframe_size(&user
) +
1698 round_up(sizeof(struct frame_record
), 16) +
1699 16; /* max alignment padding */
1703 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1704 * changes likely come with new fields that should be added below.
1706 static_assert(NSIGILL
== 11);
1707 static_assert(NSIGFPE
== 15);
1708 static_assert(NSIGSEGV
== 10);
1709 static_assert(NSIGBUS
== 5);
1710 static_assert(NSIGTRAP
== 6);
1711 static_assert(NSIGCHLD
== 6);
1712 static_assert(NSIGSYS
== 2);
1713 static_assert(sizeof(siginfo_t
) == 128);
1714 static_assert(__alignof__(siginfo_t
) == 8);
1715 static_assert(offsetof(siginfo_t
, si_signo
) == 0x00);
1716 static_assert(offsetof(siginfo_t
, si_errno
) == 0x04);
1717 static_assert(offsetof(siginfo_t
, si_code
) == 0x08);
1718 static_assert(offsetof(siginfo_t
, si_pid
) == 0x10);
1719 static_assert(offsetof(siginfo_t
, si_uid
) == 0x14);
1720 static_assert(offsetof(siginfo_t
, si_tid
) == 0x10);
1721 static_assert(offsetof(siginfo_t
, si_overrun
) == 0x14);
1722 static_assert(offsetof(siginfo_t
, si_status
) == 0x18);
1723 static_assert(offsetof(siginfo_t
, si_utime
) == 0x20);
1724 static_assert(offsetof(siginfo_t
, si_stime
) == 0x28);
1725 static_assert(offsetof(siginfo_t
, si_value
) == 0x18);
1726 static_assert(offsetof(siginfo_t
, si_int
) == 0x18);
1727 static_assert(offsetof(siginfo_t
, si_ptr
) == 0x18);
1728 static_assert(offsetof(siginfo_t
, si_addr
) == 0x10);
1729 static_assert(offsetof(siginfo_t
, si_addr_lsb
) == 0x18);
1730 static_assert(offsetof(siginfo_t
, si_lower
) == 0x20);
1731 static_assert(offsetof(siginfo_t
, si_upper
) == 0x28);
1732 static_assert(offsetof(siginfo_t
, si_pkey
) == 0x20);
1733 static_assert(offsetof(siginfo_t
, si_perf_data
) == 0x18);
1734 static_assert(offsetof(siginfo_t
, si_perf_type
) == 0x20);
1735 static_assert(offsetof(siginfo_t
, si_perf_flags
) == 0x24);
1736 static_assert(offsetof(siginfo_t
, si_band
) == 0x10);
1737 static_assert(offsetof(siginfo_t
, si_fd
) == 0x18);
1738 static_assert(offsetof(siginfo_t
, si_call_addr
) == 0x10);
1739 static_assert(offsetof(siginfo_t
, si_syscall
) == 0x18);
1740 static_assert(offsetof(siginfo_t
, si_arch
) == 0x1c);