1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/ptrace.c
6 * edited by Linus Torvalds
7 * ARM modifications Copyright (C) 2000 Russell King
8 * Copyright (C) 2012 ARM Ltd.
11 #include <linux/audit.h>
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task_stack.h>
17 #include <linux/nospec.h>
18 #include <linux/smp.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/seccomp.h>
22 #include <linux/security.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/regset.h>
30 #include <linux/tracehook.h>
31 #include <linux/elf.h>
33 #include <asm/compat.h>
34 #include <asm/cpufeature.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/fpsimd.h>
38 #include <asm/pointer_auth.h>
39 #include <asm/stacktrace.h>
40 #include <asm/syscall.h>
41 #include <asm/traps.h>
42 #include <asm/system_misc.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/syscalls.h>
47 struct pt_regs_offset
{
52 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
53 #define REG_OFFSET_END {.name = NULL, .offset = 0}
54 #define GPR_OFFSET_NAME(r) \
55 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
57 static const struct pt_regs_offset regoffset_table
[] = {
89 {.name
= "lr", .offset
= offsetof(struct pt_regs
, regs
[30])},
92 REG_OFFSET_NAME(pstate
),
97 * regs_query_register_offset() - query register offset from its name
98 * @name: the name of a register
100 * regs_query_register_offset() returns the offset of a register in struct
101 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
103 int regs_query_register_offset(const char *name
)
105 const struct pt_regs_offset
*roff
;
107 for (roff
= regoffset_table
; roff
->name
!= NULL
; roff
++)
108 if (!strcmp(roff
->name
, name
))
114 * regs_within_kernel_stack() - check the address in the stack
115 * @regs: pt_regs which contains kernel stack pointer.
116 * @addr: address which is checked.
118 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
119 * If @addr is within the kernel stack, it returns true. If not, returns false.
121 static bool regs_within_kernel_stack(struct pt_regs
*regs
, unsigned long addr
)
123 return ((addr
& ~(THREAD_SIZE
- 1)) ==
124 (kernel_stack_pointer(regs
) & ~(THREAD_SIZE
- 1))) ||
125 on_irq_stack(addr
, NULL
);
129 * regs_get_kernel_stack_nth() - get Nth entry of the stack
130 * @regs: pt_regs which contains kernel stack pointer.
131 * @n: stack entry number.
133 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
134 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
137 unsigned long regs_get_kernel_stack_nth(struct pt_regs
*regs
, unsigned int n
)
139 unsigned long *addr
= (unsigned long *)kernel_stack_pointer(regs
);
142 if (regs_within_kernel_stack(regs
, (unsigned long)addr
))
149 * TODO: does not yet catch signals sent when the child dies.
150 * in exit.c or in signal.c.
154 * Called by kernel/ptrace.c when detaching..
156 void ptrace_disable(struct task_struct
*child
)
159 * This would be better off in core code, but PTRACE_DETACH has
160 * grown its fair share of arch-specific worts and changing it
161 * is likely to cause regressions on obscure architectures.
163 user_disable_single_step(child
);
166 #ifdef CONFIG_HAVE_HW_BREAKPOINT
168 * Handle hitting a HW-breakpoint.
170 static void ptrace_hbptriggered(struct perf_event
*bp
,
171 struct perf_sample_data
*data
,
172 struct pt_regs
*regs
)
174 struct arch_hw_breakpoint
*bkpt
= counter_arch_bp(bp
);
175 const char *desc
= "Hardware breakpoint trap (ptrace)";
178 if (is_compat_task()) {
182 for (i
= 0; i
< ARM_MAX_BRP
; ++i
) {
183 if (current
->thread
.debug
.hbp_break
[i
] == bp
) {
184 si_errno
= (i
<< 1) + 1;
189 for (i
= 0; i
< ARM_MAX_WRP
; ++i
) {
190 if (current
->thread
.debug
.hbp_watch
[i
] == bp
) {
191 si_errno
= -((i
<< 1) + 1);
195 arm64_force_sig_ptrace_errno_trap(si_errno
, bkpt
->trigger
,
199 arm64_force_sig_fault(SIGTRAP
, TRAP_HWBKPT
, bkpt
->trigger
, desc
);
203 * Unregister breakpoints from this task and reset the pointers in
206 void flush_ptrace_hw_breakpoint(struct task_struct
*tsk
)
209 struct thread_struct
*t
= &tsk
->thread
;
211 for (i
= 0; i
< ARM_MAX_BRP
; i
++) {
212 if (t
->debug
.hbp_break
[i
]) {
213 unregister_hw_breakpoint(t
->debug
.hbp_break
[i
]);
214 t
->debug
.hbp_break
[i
] = NULL
;
218 for (i
= 0; i
< ARM_MAX_WRP
; i
++) {
219 if (t
->debug
.hbp_watch
[i
]) {
220 unregister_hw_breakpoint(t
->debug
.hbp_watch
[i
]);
221 t
->debug
.hbp_watch
[i
] = NULL
;
226 void ptrace_hw_copy_thread(struct task_struct
*tsk
)
228 memset(&tsk
->thread
.debug
, 0, sizeof(struct debug_info
));
231 static struct perf_event
*ptrace_hbp_get_event(unsigned int note_type
,
232 struct task_struct
*tsk
,
235 struct perf_event
*bp
= ERR_PTR(-EINVAL
);
238 case NT_ARM_HW_BREAK
:
239 if (idx
>= ARM_MAX_BRP
)
241 idx
= array_index_nospec(idx
, ARM_MAX_BRP
);
242 bp
= tsk
->thread
.debug
.hbp_break
[idx
];
244 case NT_ARM_HW_WATCH
:
245 if (idx
>= ARM_MAX_WRP
)
247 idx
= array_index_nospec(idx
, ARM_MAX_WRP
);
248 bp
= tsk
->thread
.debug
.hbp_watch
[idx
];
256 static int ptrace_hbp_set_event(unsigned int note_type
,
257 struct task_struct
*tsk
,
259 struct perf_event
*bp
)
264 case NT_ARM_HW_BREAK
:
265 if (idx
>= ARM_MAX_BRP
)
267 idx
= array_index_nospec(idx
, ARM_MAX_BRP
);
268 tsk
->thread
.debug
.hbp_break
[idx
] = bp
;
271 case NT_ARM_HW_WATCH
:
272 if (idx
>= ARM_MAX_WRP
)
274 idx
= array_index_nospec(idx
, ARM_MAX_WRP
);
275 tsk
->thread
.debug
.hbp_watch
[idx
] = bp
;
284 static struct perf_event
*ptrace_hbp_create(unsigned int note_type
,
285 struct task_struct
*tsk
,
288 struct perf_event
*bp
;
289 struct perf_event_attr attr
;
293 case NT_ARM_HW_BREAK
:
294 type
= HW_BREAKPOINT_X
;
296 case NT_ARM_HW_WATCH
:
297 type
= HW_BREAKPOINT_RW
;
300 return ERR_PTR(-EINVAL
);
303 ptrace_breakpoint_init(&attr
);
306 * Initialise fields to sane defaults
307 * (i.e. values that will pass validation).
310 attr
.bp_len
= HW_BREAKPOINT_LEN_4
;
314 bp
= register_user_hw_breakpoint(&attr
, ptrace_hbptriggered
, NULL
, tsk
);
318 err
= ptrace_hbp_set_event(note_type
, tsk
, idx
, bp
);
325 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type
,
326 struct arch_hw_breakpoint_ctrl ctrl
,
327 struct perf_event_attr
*attr
)
329 int err
, len
, type
, offset
, disabled
= !ctrl
.enabled
;
331 attr
->disabled
= disabled
;
335 err
= arch_bp_generic_fields(ctrl
, &len
, &type
, &offset
);
340 case NT_ARM_HW_BREAK
:
341 if ((type
& HW_BREAKPOINT_X
) != type
)
344 case NT_ARM_HW_WATCH
:
345 if ((type
& HW_BREAKPOINT_RW
) != type
)
353 attr
->bp_type
= type
;
354 attr
->bp_addr
+= offset
;
359 static int ptrace_hbp_get_resource_info(unsigned int note_type
, u32
*info
)
365 case NT_ARM_HW_BREAK
:
366 num
= hw_breakpoint_slots(TYPE_INST
);
368 case NT_ARM_HW_WATCH
:
369 num
= hw_breakpoint_slots(TYPE_DATA
);
375 reg
|= debug_monitors_arch();
383 static int ptrace_hbp_get_ctrl(unsigned int note_type
,
384 struct task_struct
*tsk
,
388 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
393 *ctrl
= bp
? encode_ctrl_reg(counter_arch_bp(bp
)->ctrl
) : 0;
397 static int ptrace_hbp_get_addr(unsigned int note_type
,
398 struct task_struct
*tsk
,
402 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
407 *addr
= bp
? counter_arch_bp(bp
)->address
: 0;
411 static struct perf_event
*ptrace_hbp_get_initialised_bp(unsigned int note_type
,
412 struct task_struct
*tsk
,
415 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
418 bp
= ptrace_hbp_create(note_type
, tsk
, idx
);
423 static int ptrace_hbp_set_ctrl(unsigned int note_type
,
424 struct task_struct
*tsk
,
429 struct perf_event
*bp
;
430 struct perf_event_attr attr
;
431 struct arch_hw_breakpoint_ctrl ctrl
;
433 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
440 decode_ctrl_reg(uctrl
, &ctrl
);
441 err
= ptrace_hbp_fill_attr_ctrl(note_type
, ctrl
, &attr
);
445 return modify_user_hw_breakpoint(bp
, &attr
);
448 static int ptrace_hbp_set_addr(unsigned int note_type
,
449 struct task_struct
*tsk
,
454 struct perf_event
*bp
;
455 struct perf_event_attr attr
;
457 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
465 err
= modify_user_hw_breakpoint(bp
, &attr
);
469 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
470 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
471 #define PTRACE_HBP_PAD_SZ sizeof(u32)
473 static int hw_break_get(struct task_struct
*target
,
474 const struct user_regset
*regset
,
477 unsigned int note_type
= regset
->core_note_type
;
483 ret
= ptrace_hbp_get_resource_info(note_type
, &info
);
487 membuf_write(&to
, &info
, sizeof(info
));
488 membuf_zero(&to
, sizeof(u32
));
489 /* (address, ctrl) registers */
491 ret
= ptrace_hbp_get_addr(note_type
, target
, idx
, &addr
);
494 ret
= ptrace_hbp_get_ctrl(note_type
, target
, idx
, &ctrl
);
497 membuf_store(&to
, addr
);
498 membuf_store(&to
, ctrl
);
499 membuf_zero(&to
, sizeof(u32
));
505 static int hw_break_set(struct task_struct
*target
,
506 const struct user_regset
*regset
,
507 unsigned int pos
, unsigned int count
,
508 const void *kbuf
, const void __user
*ubuf
)
510 unsigned int note_type
= regset
->core_note_type
;
511 int ret
, idx
= 0, offset
, limit
;
515 /* Resource info and pad */
516 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
517 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
, 0, offset
);
521 /* (address, ctrl) registers */
522 limit
= regset
->n
* regset
->size
;
523 while (count
&& offset
< limit
) {
524 if (count
< PTRACE_HBP_ADDR_SZ
)
526 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
527 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
530 ret
= ptrace_hbp_set_addr(note_type
, target
, idx
, addr
);
533 offset
+= PTRACE_HBP_ADDR_SZ
;
537 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
538 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
541 ret
= ptrace_hbp_set_ctrl(note_type
, target
, idx
, ctrl
);
544 offset
+= PTRACE_HBP_CTRL_SZ
;
546 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
548 offset
+ PTRACE_HBP_PAD_SZ
);
551 offset
+= PTRACE_HBP_PAD_SZ
;
557 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
559 static int gpr_get(struct task_struct
*target
,
560 const struct user_regset
*regset
,
563 struct user_pt_regs
*uregs
= &task_pt_regs(target
)->user_regs
;
564 return membuf_write(&to
, uregs
, sizeof(*uregs
));
567 static int gpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
568 unsigned int pos
, unsigned int count
,
569 const void *kbuf
, const void __user
*ubuf
)
572 struct user_pt_regs newregs
= task_pt_regs(target
)->user_regs
;
574 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newregs
, 0, -1);
578 if (!valid_user_regs(&newregs
, target
))
581 task_pt_regs(target
)->user_regs
= newregs
;
585 static int fpr_active(struct task_struct
*target
, const struct user_regset
*regset
)
587 if (!system_supports_fpsimd())
593 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
595 static int __fpr_get(struct task_struct
*target
,
596 const struct user_regset
*regset
,
599 struct user_fpsimd_state
*uregs
;
601 sve_sync_to_fpsimd(target
);
603 uregs
= &target
->thread
.uw
.fpsimd_state
;
605 return membuf_write(&to
, uregs
, sizeof(*uregs
));
608 static int fpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
611 if (!system_supports_fpsimd())
614 if (target
== current
)
615 fpsimd_preserve_current_state();
617 return __fpr_get(target
, regset
, to
);
620 static int __fpr_set(struct task_struct
*target
,
621 const struct user_regset
*regset
,
622 unsigned int pos
, unsigned int count
,
623 const void *kbuf
, const void __user
*ubuf
,
624 unsigned int start_pos
)
627 struct user_fpsimd_state newstate
;
630 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
631 * short copyin can't resurrect stale data.
633 sve_sync_to_fpsimd(target
);
635 newstate
= target
->thread
.uw
.fpsimd_state
;
637 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newstate
,
638 start_pos
, start_pos
+ sizeof(newstate
));
642 target
->thread
.uw
.fpsimd_state
= newstate
;
647 static int fpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
648 unsigned int pos
, unsigned int count
,
649 const void *kbuf
, const void __user
*ubuf
)
653 if (!system_supports_fpsimd())
656 ret
= __fpr_set(target
, regset
, pos
, count
, kbuf
, ubuf
, 0);
660 sve_sync_from_fpsimd_zeropad(target
);
661 fpsimd_flush_task_state(target
);
666 static int tls_get(struct task_struct
*target
, const struct user_regset
*regset
,
669 if (target
== current
)
670 tls_preserve_current_state();
672 return membuf_store(&to
, target
->thread
.uw
.tp_value
);
675 static int tls_set(struct task_struct
*target
, const struct user_regset
*regset
,
676 unsigned int pos
, unsigned int count
,
677 const void *kbuf
, const void __user
*ubuf
)
680 unsigned long tls
= target
->thread
.uw
.tp_value
;
682 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
686 target
->thread
.uw
.tp_value
= tls
;
690 static int system_call_get(struct task_struct
*target
,
691 const struct user_regset
*regset
,
694 return membuf_store(&to
, task_pt_regs(target
)->syscallno
);
697 static int system_call_set(struct task_struct
*target
,
698 const struct user_regset
*regset
,
699 unsigned int pos
, unsigned int count
,
700 const void *kbuf
, const void __user
*ubuf
)
702 int syscallno
= task_pt_regs(target
)->syscallno
;
705 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &syscallno
, 0, -1);
709 task_pt_regs(target
)->syscallno
= syscallno
;
713 #ifdef CONFIG_ARM64_SVE
715 static void sve_init_header_from_task(struct user_sve_header
*header
,
716 struct task_struct
*target
)
720 memset(header
, 0, sizeof(*header
));
722 header
->flags
= test_tsk_thread_flag(target
, TIF_SVE
) ?
723 SVE_PT_REGS_SVE
: SVE_PT_REGS_FPSIMD
;
724 if (test_tsk_thread_flag(target
, TIF_SVE_VL_INHERIT
))
725 header
->flags
|= SVE_PT_VL_INHERIT
;
727 header
->vl
= target
->thread
.sve_vl
;
728 vq
= sve_vq_from_vl(header
->vl
);
730 header
->max_vl
= sve_max_vl
;
731 header
->size
= SVE_PT_SIZE(vq
, header
->flags
);
732 header
->max_size
= SVE_PT_SIZE(sve_vq_from_vl(header
->max_vl
),
736 static unsigned int sve_size_from_header(struct user_sve_header
const *header
)
738 return ALIGN(header
->size
, SVE_VQ_BYTES
);
741 static int sve_get(struct task_struct
*target
,
742 const struct user_regset
*regset
,
745 struct user_sve_header header
;
747 unsigned long start
, end
;
749 if (!system_supports_sve())
753 sve_init_header_from_task(&header
, target
);
754 vq
= sve_vq_from_vl(header
.vl
);
756 membuf_write(&to
, &header
, sizeof(header
));
758 if (target
== current
)
759 fpsimd_preserve_current_state();
761 /* Registers: FPSIMD-only case */
763 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET
!= sizeof(header
));
764 if ((header
.flags
& SVE_PT_REGS_MASK
) == SVE_PT_REGS_FPSIMD
)
765 return __fpr_get(target
, regset
, to
);
767 /* Otherwise: full SVE case */
769 BUILD_BUG_ON(SVE_PT_SVE_OFFSET
!= sizeof(header
));
770 start
= SVE_PT_SVE_OFFSET
;
771 end
= SVE_PT_SVE_FFR_OFFSET(vq
) + SVE_PT_SVE_FFR_SIZE(vq
);
772 membuf_write(&to
, target
->thread
.sve_state
, end
- start
);
775 end
= SVE_PT_SVE_FPSR_OFFSET(vq
);
776 membuf_zero(&to
, end
- start
);
779 * Copy fpsr, and fpcr which must follow contiguously in
780 * struct fpsimd_state:
783 end
= SVE_PT_SVE_FPCR_OFFSET(vq
) + SVE_PT_SVE_FPCR_SIZE
;
784 membuf_write(&to
, &target
->thread
.uw
.fpsimd_state
.fpsr
, end
- start
);
787 end
= sve_size_from_header(&header
);
788 return membuf_zero(&to
, end
- start
);
791 static int sve_set(struct task_struct
*target
,
792 const struct user_regset
*regset
,
793 unsigned int pos
, unsigned int count
,
794 const void *kbuf
, const void __user
*ubuf
)
797 struct user_sve_header header
;
799 unsigned long start
, end
;
801 if (!system_supports_sve())
805 if (count
< sizeof(header
))
807 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &header
,
813 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
814 * sve_set_vector_length(), which will also validate them for us:
816 ret
= sve_set_vector_length(target
, header
.vl
,
817 ((unsigned long)header
.flags
& ~SVE_PT_REGS_MASK
) << 16);
821 /* Actual VL set may be less than the user asked for: */
822 vq
= sve_vq_from_vl(target
->thread
.sve_vl
);
824 /* Registers: FPSIMD-only case */
826 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET
!= sizeof(header
));
827 if ((header
.flags
& SVE_PT_REGS_MASK
) == SVE_PT_REGS_FPSIMD
) {
828 ret
= __fpr_set(target
, regset
, pos
, count
, kbuf
, ubuf
,
829 SVE_PT_FPSIMD_OFFSET
);
830 clear_tsk_thread_flag(target
, TIF_SVE
);
834 /* Otherwise: full SVE case */
837 * If setting a different VL from the requested VL and there is
838 * register data, the data layout will be wrong: don't even
839 * try to set the registers in this case.
841 if (count
&& vq
!= sve_vq_from_vl(header
.vl
)) {
849 * Ensure target->thread.sve_state is up to date with target's
850 * FPSIMD regs, so that a short copyin leaves trailing registers
853 fpsimd_sync_to_sve(target
);
854 set_tsk_thread_flag(target
, TIF_SVE
);
856 BUILD_BUG_ON(SVE_PT_SVE_OFFSET
!= sizeof(header
));
857 start
= SVE_PT_SVE_OFFSET
;
858 end
= SVE_PT_SVE_FFR_OFFSET(vq
) + SVE_PT_SVE_FFR_SIZE(vq
);
859 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
860 target
->thread
.sve_state
,
866 end
= SVE_PT_SVE_FPSR_OFFSET(vq
);
867 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
873 * Copy fpsr, and fpcr which must follow contiguously in
874 * struct fpsimd_state:
877 end
= SVE_PT_SVE_FPCR_OFFSET(vq
) + SVE_PT_SVE_FPCR_SIZE
;
878 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
879 &target
->thread
.uw
.fpsimd_state
.fpsr
,
883 fpsimd_flush_task_state(target
);
887 #endif /* CONFIG_ARM64_SVE */
889 #ifdef CONFIG_ARM64_PTR_AUTH
890 static int pac_mask_get(struct task_struct
*target
,
891 const struct user_regset
*regset
,
895 * The PAC bits can differ across data and instruction pointers
896 * depending on TCR_EL1.TBID*, which we may make use of in future, so
897 * we expose separate masks.
899 unsigned long mask
= ptrauth_user_pac_mask();
900 struct user_pac_mask uregs
= {
905 if (!system_supports_address_auth())
908 return membuf_write(&to
, &uregs
, sizeof(uregs
));
911 #ifdef CONFIG_CHECKPOINT_RESTORE
912 static __uint128_t
pac_key_to_user(const struct ptrauth_key
*key
)
914 return (__uint128_t
)key
->hi
<< 64 | key
->lo
;
917 static struct ptrauth_key
pac_key_from_user(__uint128_t ukey
)
919 struct ptrauth_key key
= {
920 .lo
= (unsigned long)ukey
,
921 .hi
= (unsigned long)(ukey
>> 64),
927 static void pac_address_keys_to_user(struct user_pac_address_keys
*ukeys
,
928 const struct ptrauth_keys_user
*keys
)
930 ukeys
->apiakey
= pac_key_to_user(&keys
->apia
);
931 ukeys
->apibkey
= pac_key_to_user(&keys
->apib
);
932 ukeys
->apdakey
= pac_key_to_user(&keys
->apda
);
933 ukeys
->apdbkey
= pac_key_to_user(&keys
->apdb
);
936 static void pac_address_keys_from_user(struct ptrauth_keys_user
*keys
,
937 const struct user_pac_address_keys
*ukeys
)
939 keys
->apia
= pac_key_from_user(ukeys
->apiakey
);
940 keys
->apib
= pac_key_from_user(ukeys
->apibkey
);
941 keys
->apda
= pac_key_from_user(ukeys
->apdakey
);
942 keys
->apdb
= pac_key_from_user(ukeys
->apdbkey
);
945 static int pac_address_keys_get(struct task_struct
*target
,
946 const struct user_regset
*regset
,
949 struct ptrauth_keys_user
*keys
= &target
->thread
.keys_user
;
950 struct user_pac_address_keys user_keys
;
952 if (!system_supports_address_auth())
955 pac_address_keys_to_user(&user_keys
, keys
);
957 return membuf_write(&to
, &user_keys
, sizeof(user_keys
));
960 static int pac_address_keys_set(struct task_struct
*target
,
961 const struct user_regset
*regset
,
962 unsigned int pos
, unsigned int count
,
963 const void *kbuf
, const void __user
*ubuf
)
965 struct ptrauth_keys_user
*keys
= &target
->thread
.keys_user
;
966 struct user_pac_address_keys user_keys
;
969 if (!system_supports_address_auth())
972 pac_address_keys_to_user(&user_keys
, keys
);
973 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
977 pac_address_keys_from_user(keys
, &user_keys
);
982 static void pac_generic_keys_to_user(struct user_pac_generic_keys
*ukeys
,
983 const struct ptrauth_keys_user
*keys
)
985 ukeys
->apgakey
= pac_key_to_user(&keys
->apga
);
988 static void pac_generic_keys_from_user(struct ptrauth_keys_user
*keys
,
989 const struct user_pac_generic_keys
*ukeys
)
991 keys
->apga
= pac_key_from_user(ukeys
->apgakey
);
994 static int pac_generic_keys_get(struct task_struct
*target
,
995 const struct user_regset
*regset
,
998 struct ptrauth_keys_user
*keys
= &target
->thread
.keys_user
;
999 struct user_pac_generic_keys user_keys
;
1001 if (!system_supports_generic_auth())
1004 pac_generic_keys_to_user(&user_keys
, keys
);
1006 return membuf_write(&to
, &user_keys
, sizeof(user_keys
));
1009 static int pac_generic_keys_set(struct task_struct
*target
,
1010 const struct user_regset
*regset
,
1011 unsigned int pos
, unsigned int count
,
1012 const void *kbuf
, const void __user
*ubuf
)
1014 struct ptrauth_keys_user
*keys
= &target
->thread
.keys_user
;
1015 struct user_pac_generic_keys user_keys
;
1018 if (!system_supports_generic_auth())
1021 pac_generic_keys_to_user(&user_keys
, keys
);
1022 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1026 pac_generic_keys_from_user(keys
, &user_keys
);
1030 #endif /* CONFIG_CHECKPOINT_RESTORE */
1031 #endif /* CONFIG_ARM64_PTR_AUTH */
1033 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1034 static int tagged_addr_ctrl_get(struct task_struct
*target
,
1035 const struct user_regset
*regset
,
1038 long ctrl
= get_tagged_addr_ctrl(target
);
1040 if (IS_ERR_VALUE(ctrl
))
1043 return membuf_write(&to
, &ctrl
, sizeof(ctrl
));
1046 static int tagged_addr_ctrl_set(struct task_struct
*target
, const struct
1047 user_regset
*regset
, unsigned int pos
,
1048 unsigned int count
, const void *kbuf
, const
1054 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
, 0, -1);
1058 return set_tagged_addr_ctrl(target
, ctrl
);
1062 enum aarch64_regset
{
1066 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1071 #ifdef CONFIG_ARM64_SVE
1074 #ifdef CONFIG_ARM64_PTR_AUTH
1076 #ifdef CONFIG_CHECKPOINT_RESTORE
1081 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1082 REGSET_TAGGED_ADDR_CTRL
,
1086 static const struct user_regset aarch64_regsets
[] = {
1088 .core_note_type
= NT_PRSTATUS
,
1089 .n
= sizeof(struct user_pt_regs
) / sizeof(u64
),
1090 .size
= sizeof(u64
),
1091 .align
= sizeof(u64
),
1092 .regset_get
= gpr_get
,
1096 .core_note_type
= NT_PRFPREG
,
1097 .n
= sizeof(struct user_fpsimd_state
) / sizeof(u32
),
1099 * We pretend we have 32-bit registers because the fpsr and
1100 * fpcr are 32-bits wide.
1102 .size
= sizeof(u32
),
1103 .align
= sizeof(u32
),
1104 .active
= fpr_active
,
1105 .regset_get
= fpr_get
,
1109 .core_note_type
= NT_ARM_TLS
,
1111 .size
= sizeof(void *),
1112 .align
= sizeof(void *),
1113 .regset_get
= tls_get
,
1116 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1117 [REGSET_HW_BREAK
] = {
1118 .core_note_type
= NT_ARM_HW_BREAK
,
1119 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1120 .size
= sizeof(u32
),
1121 .align
= sizeof(u32
),
1122 .regset_get
= hw_break_get
,
1123 .set
= hw_break_set
,
1125 [REGSET_HW_WATCH
] = {
1126 .core_note_type
= NT_ARM_HW_WATCH
,
1127 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1128 .size
= sizeof(u32
),
1129 .align
= sizeof(u32
),
1130 .regset_get
= hw_break_get
,
1131 .set
= hw_break_set
,
1134 [REGSET_SYSTEM_CALL
] = {
1135 .core_note_type
= NT_ARM_SYSTEM_CALL
,
1137 .size
= sizeof(int),
1138 .align
= sizeof(int),
1139 .regset_get
= system_call_get
,
1140 .set
= system_call_set
,
1142 #ifdef CONFIG_ARM64_SVE
1143 [REGSET_SVE
] = { /* Scalable Vector Extension */
1144 .core_note_type
= NT_ARM_SVE
,
1145 .n
= DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX
, SVE_PT_REGS_SVE
),
1147 .size
= SVE_VQ_BYTES
,
1148 .align
= SVE_VQ_BYTES
,
1149 .regset_get
= sve_get
,
1153 #ifdef CONFIG_ARM64_PTR_AUTH
1154 [REGSET_PAC_MASK
] = {
1155 .core_note_type
= NT_ARM_PAC_MASK
,
1156 .n
= sizeof(struct user_pac_mask
) / sizeof(u64
),
1157 .size
= sizeof(u64
),
1158 .align
= sizeof(u64
),
1159 .regset_get
= pac_mask_get
,
1160 /* this cannot be set dynamically */
1162 #ifdef CONFIG_CHECKPOINT_RESTORE
1163 [REGSET_PACA_KEYS
] = {
1164 .core_note_type
= NT_ARM_PACA_KEYS
,
1165 .n
= sizeof(struct user_pac_address_keys
) / sizeof(__uint128_t
),
1166 .size
= sizeof(__uint128_t
),
1167 .align
= sizeof(__uint128_t
),
1168 .regset_get
= pac_address_keys_get
,
1169 .set
= pac_address_keys_set
,
1171 [REGSET_PACG_KEYS
] = {
1172 .core_note_type
= NT_ARM_PACG_KEYS
,
1173 .n
= sizeof(struct user_pac_generic_keys
) / sizeof(__uint128_t
),
1174 .size
= sizeof(__uint128_t
),
1175 .align
= sizeof(__uint128_t
),
1176 .regset_get
= pac_generic_keys_get
,
1177 .set
= pac_generic_keys_set
,
1181 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1182 [REGSET_TAGGED_ADDR_CTRL
] = {
1183 .core_note_type
= NT_ARM_TAGGED_ADDR_CTRL
,
1185 .size
= sizeof(long),
1186 .align
= sizeof(long),
1187 .regset_get
= tagged_addr_ctrl_get
,
1188 .set
= tagged_addr_ctrl_set
,
1193 static const struct user_regset_view user_aarch64_view
= {
1194 .name
= "aarch64", .e_machine
= EM_AARCH64
,
1195 .regsets
= aarch64_regsets
, .n
= ARRAY_SIZE(aarch64_regsets
)
1198 #ifdef CONFIG_COMPAT
1199 enum compat_regset
{
1204 static inline compat_ulong_t
compat_get_user_reg(struct task_struct
*task
, int idx
)
1206 struct pt_regs
*regs
= task_pt_regs(task
);
1212 return pstate_to_compat_psr(regs
->pstate
);
1214 return regs
->orig_x0
;
1216 return regs
->regs
[idx
];
1220 static int compat_gpr_get(struct task_struct
*target
,
1221 const struct user_regset
*regset
,
1227 membuf_store(&to
, compat_get_user_reg(target
, i
++));
1231 static int compat_gpr_set(struct task_struct
*target
,
1232 const struct user_regset
*regset
,
1233 unsigned int pos
, unsigned int count
,
1234 const void *kbuf
, const void __user
*ubuf
)
1236 struct pt_regs newregs
;
1238 unsigned int i
, start
, num_regs
;
1240 /* Calculate the number of AArch32 registers contained in count */
1241 num_regs
= count
/ regset
->size
;
1243 /* Convert pos into an register number */
1244 start
= pos
/ regset
->size
;
1246 if (start
+ num_regs
> regset
->n
)
1249 newregs
= *task_pt_regs(target
);
1251 for (i
= 0; i
< num_regs
; ++i
) {
1252 unsigned int idx
= start
+ i
;
1256 memcpy(®
, kbuf
, sizeof(reg
));
1257 kbuf
+= sizeof(reg
);
1259 ret
= copy_from_user(®
, ubuf
, sizeof(reg
));
1265 ubuf
+= sizeof(reg
);
1273 reg
= compat_psr_to_pstate(reg
);
1274 newregs
.pstate
= reg
;
1277 newregs
.orig_x0
= reg
;
1280 newregs
.regs
[idx
] = reg
;
1285 if (valid_user_regs(&newregs
.user_regs
, target
))
1286 *task_pt_regs(target
) = newregs
;
1293 static int compat_vfp_get(struct task_struct
*target
,
1294 const struct user_regset
*regset
,
1297 struct user_fpsimd_state
*uregs
;
1298 compat_ulong_t fpscr
;
1300 if (!system_supports_fpsimd())
1303 uregs
= &target
->thread
.uw
.fpsimd_state
;
1305 if (target
== current
)
1306 fpsimd_preserve_current_state();
1309 * The VFP registers are packed into the fpsimd_state, so they all sit
1310 * nicely together for us. We just need to create the fpscr separately.
1312 membuf_write(&to
, uregs
, VFP_STATE_SIZE
- sizeof(compat_ulong_t
));
1313 fpscr
= (uregs
->fpsr
& VFP_FPSCR_STAT_MASK
) |
1314 (uregs
->fpcr
& VFP_FPSCR_CTRL_MASK
);
1315 return membuf_store(&to
, fpscr
);
1318 static int compat_vfp_set(struct task_struct
*target
,
1319 const struct user_regset
*regset
,
1320 unsigned int pos
, unsigned int count
,
1321 const void *kbuf
, const void __user
*ubuf
)
1323 struct user_fpsimd_state
*uregs
;
1324 compat_ulong_t fpscr
;
1325 int ret
, vregs_end_pos
;
1327 if (!system_supports_fpsimd())
1330 uregs
= &target
->thread
.uw
.fpsimd_state
;
1332 vregs_end_pos
= VFP_STATE_SIZE
- sizeof(compat_ulong_t
);
1333 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
1336 if (count
&& !ret
) {
1337 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &fpscr
,
1338 vregs_end_pos
, VFP_STATE_SIZE
);
1340 uregs
->fpsr
= fpscr
& VFP_FPSCR_STAT_MASK
;
1341 uregs
->fpcr
= fpscr
& VFP_FPSCR_CTRL_MASK
;
1345 fpsimd_flush_task_state(target
);
1349 static int compat_tls_get(struct task_struct
*target
,
1350 const struct user_regset
*regset
,
1353 return membuf_store(&to
, (compat_ulong_t
)target
->thread
.uw
.tp_value
);
1356 static int compat_tls_set(struct task_struct
*target
,
1357 const struct user_regset
*regset
, unsigned int pos
,
1358 unsigned int count
, const void *kbuf
,
1359 const void __user
*ubuf
)
1362 compat_ulong_t tls
= target
->thread
.uw
.tp_value
;
1364 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
1368 target
->thread
.uw
.tp_value
= tls
;
1372 static const struct user_regset aarch32_regsets
[] = {
1373 [REGSET_COMPAT_GPR
] = {
1374 .core_note_type
= NT_PRSTATUS
,
1375 .n
= COMPAT_ELF_NGREG
,
1376 .size
= sizeof(compat_elf_greg_t
),
1377 .align
= sizeof(compat_elf_greg_t
),
1378 .regset_get
= compat_gpr_get
,
1379 .set
= compat_gpr_set
1381 [REGSET_COMPAT_VFP
] = {
1382 .core_note_type
= NT_ARM_VFP
,
1383 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
1384 .size
= sizeof(compat_ulong_t
),
1385 .align
= sizeof(compat_ulong_t
),
1386 .active
= fpr_active
,
1387 .regset_get
= compat_vfp_get
,
1388 .set
= compat_vfp_set
1392 static const struct user_regset_view user_aarch32_view
= {
1393 .name
= "aarch32", .e_machine
= EM_ARM
,
1394 .regsets
= aarch32_regsets
, .n
= ARRAY_SIZE(aarch32_regsets
)
1397 static const struct user_regset aarch32_ptrace_regsets
[] = {
1399 .core_note_type
= NT_PRSTATUS
,
1400 .n
= COMPAT_ELF_NGREG
,
1401 .size
= sizeof(compat_elf_greg_t
),
1402 .align
= sizeof(compat_elf_greg_t
),
1403 .regset_get
= compat_gpr_get
,
1404 .set
= compat_gpr_set
1407 .core_note_type
= NT_ARM_VFP
,
1408 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
1409 .size
= sizeof(compat_ulong_t
),
1410 .align
= sizeof(compat_ulong_t
),
1411 .regset_get
= compat_vfp_get
,
1412 .set
= compat_vfp_set
1415 .core_note_type
= NT_ARM_TLS
,
1417 .size
= sizeof(compat_ulong_t
),
1418 .align
= sizeof(compat_ulong_t
),
1419 .regset_get
= compat_tls_get
,
1420 .set
= compat_tls_set
,
1422 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1423 [REGSET_HW_BREAK
] = {
1424 .core_note_type
= NT_ARM_HW_BREAK
,
1425 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1426 .size
= sizeof(u32
),
1427 .align
= sizeof(u32
),
1428 .regset_get
= hw_break_get
,
1429 .set
= hw_break_set
,
1431 [REGSET_HW_WATCH
] = {
1432 .core_note_type
= NT_ARM_HW_WATCH
,
1433 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1434 .size
= sizeof(u32
),
1435 .align
= sizeof(u32
),
1436 .regset_get
= hw_break_get
,
1437 .set
= hw_break_set
,
1440 [REGSET_SYSTEM_CALL
] = {
1441 .core_note_type
= NT_ARM_SYSTEM_CALL
,
1443 .size
= sizeof(int),
1444 .align
= sizeof(int),
1445 .regset_get
= system_call_get
,
1446 .set
= system_call_set
,
1450 static const struct user_regset_view user_aarch32_ptrace_view
= {
1451 .name
= "aarch32", .e_machine
= EM_ARM
,
1452 .regsets
= aarch32_ptrace_regsets
, .n
= ARRAY_SIZE(aarch32_ptrace_regsets
)
1455 static int compat_ptrace_read_user(struct task_struct
*tsk
, compat_ulong_t off
,
1456 compat_ulong_t __user
*ret
)
1463 if (off
== COMPAT_PT_TEXT_ADDR
)
1464 tmp
= tsk
->mm
->start_code
;
1465 else if (off
== COMPAT_PT_DATA_ADDR
)
1466 tmp
= tsk
->mm
->start_data
;
1467 else if (off
== COMPAT_PT_TEXT_END_ADDR
)
1468 tmp
= tsk
->mm
->end_code
;
1469 else if (off
< sizeof(compat_elf_gregset_t
))
1470 tmp
= compat_get_user_reg(tsk
, off
>> 2);
1471 else if (off
>= COMPAT_USER_SZ
)
1476 return put_user(tmp
, ret
);
1479 static int compat_ptrace_write_user(struct task_struct
*tsk
, compat_ulong_t off
,
1482 struct pt_regs newregs
= *task_pt_regs(tsk
);
1483 unsigned int idx
= off
/ 4;
1485 if (off
& 3 || off
>= COMPAT_USER_SZ
)
1488 if (off
>= sizeof(compat_elf_gregset_t
))
1496 newregs
.pstate
= compat_psr_to_pstate(val
);
1499 newregs
.orig_x0
= val
;
1502 newregs
.regs
[idx
] = val
;
1505 if (!valid_user_regs(&newregs
.user_regs
, tsk
))
1508 *task_pt_regs(tsk
) = newregs
;
1512 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1515 * Convert a virtual register number into an index for a thread_info
1516 * breakpoint array. Breakpoints are identified using positive numbers
1517 * whilst watchpoints are negative. The registers are laid out as pairs
1518 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1519 * Register 0 is reserved for describing resource information.
1521 static int compat_ptrace_hbp_num_to_idx(compat_long_t num
)
1523 return (abs(num
) - 1) >> 1;
1526 static int compat_ptrace_hbp_get_resource_info(u32
*kdata
)
1528 u8 num_brps
, num_wrps
, debug_arch
, wp_len
;
1531 num_brps
= hw_breakpoint_slots(TYPE_INST
);
1532 num_wrps
= hw_breakpoint_slots(TYPE_DATA
);
1534 debug_arch
= debug_monitors_arch();
1548 static int compat_ptrace_hbp_get(unsigned int note_type
,
1549 struct task_struct
*tsk
,
1556 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);
1559 err
= ptrace_hbp_get_addr(note_type
, tsk
, idx
, &addr
);
1562 err
= ptrace_hbp_get_ctrl(note_type
, tsk
, idx
, &ctrl
);
1569 static int compat_ptrace_hbp_set(unsigned int note_type
,
1570 struct task_struct
*tsk
,
1577 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);
1581 err
= ptrace_hbp_set_addr(note_type
, tsk
, idx
, addr
);
1584 err
= ptrace_hbp_set_ctrl(note_type
, tsk
, idx
, ctrl
);
1590 static int compat_ptrace_gethbpregs(struct task_struct
*tsk
, compat_long_t num
,
1591 compat_ulong_t __user
*data
)
1598 ret
= compat_ptrace_hbp_get(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
1600 } else if (num
== 0) {
1601 ret
= compat_ptrace_hbp_get_resource_info(&kdata
);
1604 ret
= compat_ptrace_hbp_get(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
1608 ret
= put_user(kdata
, data
);
1613 static int compat_ptrace_sethbpregs(struct task_struct
*tsk
, compat_long_t num
,
1614 compat_ulong_t __user
*data
)
1622 ret
= get_user(kdata
, data
);
1627 ret
= compat_ptrace_hbp_set(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
1629 ret
= compat_ptrace_hbp_set(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
1633 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1635 long compat_arch_ptrace(struct task_struct
*child
, compat_long_t request
,
1636 compat_ulong_t caddr
, compat_ulong_t cdata
)
1638 unsigned long addr
= caddr
;
1639 unsigned long data
= cdata
;
1640 void __user
*datap
= compat_ptr(data
);
1644 case PTRACE_PEEKUSR
:
1645 ret
= compat_ptrace_read_user(child
, addr
, datap
);
1648 case PTRACE_POKEUSR
:
1649 ret
= compat_ptrace_write_user(child
, addr
, data
);
1652 case COMPAT_PTRACE_GETREGS
:
1653 ret
= copy_regset_to_user(child
,
1656 0, sizeof(compat_elf_gregset_t
),
1660 case COMPAT_PTRACE_SETREGS
:
1661 ret
= copy_regset_from_user(child
,
1664 0, sizeof(compat_elf_gregset_t
),
1668 case COMPAT_PTRACE_GET_THREAD_AREA
:
1669 ret
= put_user((compat_ulong_t
)child
->thread
.uw
.tp_value
,
1670 (compat_ulong_t __user
*)datap
);
1673 case COMPAT_PTRACE_SET_SYSCALL
:
1674 task_pt_regs(child
)->syscallno
= data
;
1678 case COMPAT_PTRACE_GETVFPREGS
:
1679 ret
= copy_regset_to_user(child
,
1686 case COMPAT_PTRACE_SETVFPREGS
:
1687 ret
= copy_regset_from_user(child
,
1694 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1695 case COMPAT_PTRACE_GETHBPREGS
:
1696 ret
= compat_ptrace_gethbpregs(child
, addr
, datap
);
1699 case COMPAT_PTRACE_SETHBPREGS
:
1700 ret
= compat_ptrace_sethbpregs(child
, addr
, datap
);
1705 ret
= compat_ptrace_request(child
, request
, addr
,
1712 #endif /* CONFIG_COMPAT */
1714 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1716 #ifdef CONFIG_COMPAT
1718 * Core dumping of 32-bit tasks or compat ptrace requests must use the
1719 * user_aarch32_view compatible with arm32. Native ptrace requests on
1720 * 32-bit children use an extended user_aarch32_ptrace_view to allow
1721 * access to the TLS register.
1723 if (is_compat_task())
1724 return &user_aarch32_view
;
1725 else if (is_compat_thread(task_thread_info(task
)))
1726 return &user_aarch32_ptrace_view
;
1728 return &user_aarch64_view
;
1731 long arch_ptrace(struct task_struct
*child
, long request
,
1732 unsigned long addr
, unsigned long data
)
1735 case PTRACE_PEEKMTETAGS
:
1736 case PTRACE_POKEMTETAGS
:
1737 return mte_ptrace_copy_tags(child
, request
, addr
, data
);
1740 return ptrace_request(child
, request
, addr
, data
);
1743 enum ptrace_syscall_dir
{
1744 PTRACE_SYSCALL_ENTER
= 0,
1745 PTRACE_SYSCALL_EXIT
,
1748 static void tracehook_report_syscall(struct pt_regs
*regs
,
1749 enum ptrace_syscall_dir dir
)
1752 unsigned long saved_reg
;
1755 * We have some ABI weirdness here in the way that we handle syscall
1756 * exit stops because we indicate whether or not the stop has been
1757 * signalled from syscall entry or syscall exit by clobbering a general
1758 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
1759 * and restoring its old value after the stop. This means that:
1761 * - Any writes by the tracer to this register during the stop are
1762 * ignored/discarded.
1764 * - The actual value of the register is not available during the stop,
1765 * so the tracer cannot save it and restore it later.
1767 * - Syscall stops behave differently to seccomp and pseudo-step traps
1768 * (the latter do not nobble any registers).
1770 regno
= (is_compat_task() ? 12 : 7);
1771 saved_reg
= regs
->regs
[regno
];
1772 regs
->regs
[regno
] = dir
;
1774 if (dir
== PTRACE_SYSCALL_ENTER
) {
1775 if (tracehook_report_syscall_entry(regs
))
1776 forget_syscall(regs
);
1777 regs
->regs
[regno
] = saved_reg
;
1778 } else if (!test_thread_flag(TIF_SINGLESTEP
)) {
1779 tracehook_report_syscall_exit(regs
, 0);
1780 regs
->regs
[regno
] = saved_reg
;
1782 regs
->regs
[regno
] = saved_reg
;
1785 * Signal a pseudo-step exception since we are stepping but
1786 * tracer modifications to the registers may have rewound the
1789 tracehook_report_syscall_exit(regs
, 1);
1793 int syscall_trace_enter(struct pt_regs
*regs
)
1795 unsigned long flags
= READ_ONCE(current_thread_info()->flags
);
1797 if (flags
& (_TIF_SYSCALL_EMU
| _TIF_SYSCALL_TRACE
)) {
1798 tracehook_report_syscall(regs
, PTRACE_SYSCALL_ENTER
);
1799 if (!in_syscall(regs
) || (flags
& _TIF_SYSCALL_EMU
))
1803 /* Do the secure computing after ptrace; failures should be fast. */
1804 if (secure_computing() == -1)
1807 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1808 trace_sys_enter(regs
, regs
->syscallno
);
1810 audit_syscall_entry(regs
->syscallno
, regs
->orig_x0
, regs
->regs
[1],
1811 regs
->regs
[2], regs
->regs
[3]);
1813 return regs
->syscallno
;
1816 void syscall_trace_exit(struct pt_regs
*regs
)
1818 unsigned long flags
= READ_ONCE(current_thread_info()->flags
);
1820 audit_syscall_exit(regs
);
1822 if (flags
& _TIF_SYSCALL_TRACEPOINT
)
1823 trace_sys_exit(regs
, regs_return_value(regs
));
1825 if (flags
& (_TIF_SYSCALL_TRACE
| _TIF_SINGLESTEP
))
1826 tracehook_report_syscall(regs
, PTRACE_SYSCALL_EXIT
);
1832 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
1833 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
1834 * not described in ARM DDI 0487D.a.
1835 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
1836 * be allocated an EL0 meaning in future.
1837 * Userspace cannot use these until they have an architectural meaning.
1838 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
1839 * We also reserve IL for the kernel; SS is handled dynamically.
1841 #define SPSR_EL1_AARCH64_RES0_BITS \
1842 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
1843 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
1844 #define SPSR_EL1_AARCH32_RES0_BITS \
1845 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
1847 static int valid_compat_regs(struct user_pt_regs
*regs
)
1849 regs
->pstate
&= ~SPSR_EL1_AARCH32_RES0_BITS
;
1851 if (!system_supports_mixed_endian_el0()) {
1852 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
1853 regs
->pstate
|= PSR_AA32_E_BIT
;
1855 regs
->pstate
&= ~PSR_AA32_E_BIT
;
1858 if (user_mode(regs
) && (regs
->pstate
& PSR_MODE32_BIT
) &&
1859 (regs
->pstate
& PSR_AA32_A_BIT
) == 0 &&
1860 (regs
->pstate
& PSR_AA32_I_BIT
) == 0 &&
1861 (regs
->pstate
& PSR_AA32_F_BIT
) == 0) {
1866 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1869 regs
->pstate
&= PSR_AA32_N_BIT
| PSR_AA32_Z_BIT
|
1870 PSR_AA32_C_BIT
| PSR_AA32_V_BIT
|
1871 PSR_AA32_Q_BIT
| PSR_AA32_IT_MASK
|
1872 PSR_AA32_GE_MASK
| PSR_AA32_E_BIT
|
1874 regs
->pstate
|= PSR_MODE32_BIT
;
1879 static int valid_native_regs(struct user_pt_regs
*regs
)
1881 regs
->pstate
&= ~SPSR_EL1_AARCH64_RES0_BITS
;
1883 if (user_mode(regs
) && !(regs
->pstate
& PSR_MODE32_BIT
) &&
1884 (regs
->pstate
& PSR_D_BIT
) == 0 &&
1885 (regs
->pstate
& PSR_A_BIT
) == 0 &&
1886 (regs
->pstate
& PSR_I_BIT
) == 0 &&
1887 (regs
->pstate
& PSR_F_BIT
) == 0) {
1891 /* Force PSR to a valid 64-bit EL0t */
1892 regs
->pstate
&= PSR_N_BIT
| PSR_Z_BIT
| PSR_C_BIT
| PSR_V_BIT
;
1898 * Are the current registers suitable for user mode? (used to maintain
1899 * security in signal handlers)
1901 int valid_user_regs(struct user_pt_regs
*regs
, struct task_struct
*task
)
1903 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
1904 user_regs_reset_single_step(regs
, task
);
1906 if (is_compat_thread(task_thread_info(task
)))
1907 return valid_compat_regs(regs
);
1909 return valid_native_regs(regs
);