1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/ptrace.c
6 * edited by Linus Torvalds
7 * ARM modifications Copyright (C) 2000 Russell King
8 * Copyright (C) 2012 ARM Ltd.
11 #include <linux/audit.h>
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task_stack.h>
17 #include <linux/nospec.h>
18 #include <linux/smp.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/seccomp.h>
22 #include <linux/security.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/regset.h>
30 #include <linux/tracehook.h>
31 #include <linux/elf.h>
33 #include <asm/compat.h>
34 #include <asm/cpufeature.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/fpsimd.h>
37 #include <asm/pgtable.h>
38 #include <asm/pointer_auth.h>
39 #include <asm/stacktrace.h>
40 #include <asm/syscall.h>
41 #include <asm/traps.h>
42 #include <asm/system_misc.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/syscalls.h>
47 struct pt_regs_offset
{
52 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
53 #define REG_OFFSET_END {.name = NULL, .offset = 0}
54 #define GPR_OFFSET_NAME(r) \
55 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
57 static const struct pt_regs_offset regoffset_table
[] = {
89 {.name
= "lr", .offset
= offsetof(struct pt_regs
, regs
[30])},
92 REG_OFFSET_NAME(pstate
),
97 * regs_query_register_offset() - query register offset from its name
98 * @name: the name of a register
100 * regs_query_register_offset() returns the offset of a register in struct
101 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
103 int regs_query_register_offset(const char *name
)
105 const struct pt_regs_offset
*roff
;
107 for (roff
= regoffset_table
; roff
->name
!= NULL
; roff
++)
108 if (!strcmp(roff
->name
, name
))
114 * regs_within_kernel_stack() - check the address in the stack
115 * @regs: pt_regs which contains kernel stack pointer.
116 * @addr: address which is checked.
118 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
119 * If @addr is within the kernel stack, it returns true. If not, returns false.
121 static bool regs_within_kernel_stack(struct pt_regs
*regs
, unsigned long addr
)
123 return ((addr
& ~(THREAD_SIZE
- 1)) ==
124 (kernel_stack_pointer(regs
) & ~(THREAD_SIZE
- 1))) ||
125 on_irq_stack(addr
, NULL
);
129 * regs_get_kernel_stack_nth() - get Nth entry of the stack
130 * @regs: pt_regs which contains kernel stack pointer.
131 * @n: stack entry number.
133 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
134 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
137 unsigned long regs_get_kernel_stack_nth(struct pt_regs
*regs
, unsigned int n
)
139 unsigned long *addr
= (unsigned long *)kernel_stack_pointer(regs
);
142 if (regs_within_kernel_stack(regs
, (unsigned long)addr
))
149 * TODO: does not yet catch signals sent when the child dies.
150 * in exit.c or in signal.c.
154 * Called by kernel/ptrace.c when detaching..
156 void ptrace_disable(struct task_struct
*child
)
159 * This would be better off in core code, but PTRACE_DETACH has
160 * grown its fair share of arch-specific worts and changing it
161 * is likely to cause regressions on obscure architectures.
163 user_disable_single_step(child
);
166 #ifdef CONFIG_HAVE_HW_BREAKPOINT
168 * Handle hitting a HW-breakpoint.
170 static void ptrace_hbptriggered(struct perf_event
*bp
,
171 struct perf_sample_data
*data
,
172 struct pt_regs
*regs
)
174 struct arch_hw_breakpoint
*bkpt
= counter_arch_bp(bp
);
175 const char *desc
= "Hardware breakpoint trap (ptrace)";
178 if (is_compat_task()) {
182 for (i
= 0; i
< ARM_MAX_BRP
; ++i
) {
183 if (current
->thread
.debug
.hbp_break
[i
] == bp
) {
184 si_errno
= (i
<< 1) + 1;
189 for (i
= 0; i
< ARM_MAX_WRP
; ++i
) {
190 if (current
->thread
.debug
.hbp_watch
[i
] == bp
) {
191 si_errno
= -((i
<< 1) + 1);
195 arm64_force_sig_ptrace_errno_trap(si_errno
,
196 (void __user
*)bkpt
->trigger
,
200 arm64_force_sig_fault(SIGTRAP
, TRAP_HWBKPT
,
201 (void __user
*)(bkpt
->trigger
),
206 * Unregister breakpoints from this task and reset the pointers in
209 void flush_ptrace_hw_breakpoint(struct task_struct
*tsk
)
212 struct thread_struct
*t
= &tsk
->thread
;
214 for (i
= 0; i
< ARM_MAX_BRP
; i
++) {
215 if (t
->debug
.hbp_break
[i
]) {
216 unregister_hw_breakpoint(t
->debug
.hbp_break
[i
]);
217 t
->debug
.hbp_break
[i
] = NULL
;
221 for (i
= 0; i
< ARM_MAX_WRP
; i
++) {
222 if (t
->debug
.hbp_watch
[i
]) {
223 unregister_hw_breakpoint(t
->debug
.hbp_watch
[i
]);
224 t
->debug
.hbp_watch
[i
] = NULL
;
229 void ptrace_hw_copy_thread(struct task_struct
*tsk
)
231 memset(&tsk
->thread
.debug
, 0, sizeof(struct debug_info
));
234 static struct perf_event
*ptrace_hbp_get_event(unsigned int note_type
,
235 struct task_struct
*tsk
,
238 struct perf_event
*bp
= ERR_PTR(-EINVAL
);
241 case NT_ARM_HW_BREAK
:
242 if (idx
>= ARM_MAX_BRP
)
244 idx
= array_index_nospec(idx
, ARM_MAX_BRP
);
245 bp
= tsk
->thread
.debug
.hbp_break
[idx
];
247 case NT_ARM_HW_WATCH
:
248 if (idx
>= ARM_MAX_WRP
)
250 idx
= array_index_nospec(idx
, ARM_MAX_WRP
);
251 bp
= tsk
->thread
.debug
.hbp_watch
[idx
];
259 static int ptrace_hbp_set_event(unsigned int note_type
,
260 struct task_struct
*tsk
,
262 struct perf_event
*bp
)
267 case NT_ARM_HW_BREAK
:
268 if (idx
>= ARM_MAX_BRP
)
270 idx
= array_index_nospec(idx
, ARM_MAX_BRP
);
271 tsk
->thread
.debug
.hbp_break
[idx
] = bp
;
274 case NT_ARM_HW_WATCH
:
275 if (idx
>= ARM_MAX_WRP
)
277 idx
= array_index_nospec(idx
, ARM_MAX_WRP
);
278 tsk
->thread
.debug
.hbp_watch
[idx
] = bp
;
287 static struct perf_event
*ptrace_hbp_create(unsigned int note_type
,
288 struct task_struct
*tsk
,
291 struct perf_event
*bp
;
292 struct perf_event_attr attr
;
296 case NT_ARM_HW_BREAK
:
297 type
= HW_BREAKPOINT_X
;
299 case NT_ARM_HW_WATCH
:
300 type
= HW_BREAKPOINT_RW
;
303 return ERR_PTR(-EINVAL
);
306 ptrace_breakpoint_init(&attr
);
309 * Initialise fields to sane defaults
310 * (i.e. values that will pass validation).
313 attr
.bp_len
= HW_BREAKPOINT_LEN_4
;
317 bp
= register_user_hw_breakpoint(&attr
, ptrace_hbptriggered
, NULL
, tsk
);
321 err
= ptrace_hbp_set_event(note_type
, tsk
, idx
, bp
);
328 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type
,
329 struct arch_hw_breakpoint_ctrl ctrl
,
330 struct perf_event_attr
*attr
)
332 int err
, len
, type
, offset
, disabled
= !ctrl
.enabled
;
334 attr
->disabled
= disabled
;
338 err
= arch_bp_generic_fields(ctrl
, &len
, &type
, &offset
);
343 case NT_ARM_HW_BREAK
:
344 if ((type
& HW_BREAKPOINT_X
) != type
)
347 case NT_ARM_HW_WATCH
:
348 if ((type
& HW_BREAKPOINT_RW
) != type
)
356 attr
->bp_type
= type
;
357 attr
->bp_addr
+= offset
;
362 static int ptrace_hbp_get_resource_info(unsigned int note_type
, u32
*info
)
368 case NT_ARM_HW_BREAK
:
369 num
= hw_breakpoint_slots(TYPE_INST
);
371 case NT_ARM_HW_WATCH
:
372 num
= hw_breakpoint_slots(TYPE_DATA
);
378 reg
|= debug_monitors_arch();
386 static int ptrace_hbp_get_ctrl(unsigned int note_type
,
387 struct task_struct
*tsk
,
391 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
396 *ctrl
= bp
? encode_ctrl_reg(counter_arch_bp(bp
)->ctrl
) : 0;
400 static int ptrace_hbp_get_addr(unsigned int note_type
,
401 struct task_struct
*tsk
,
405 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
410 *addr
= bp
? counter_arch_bp(bp
)->address
: 0;
414 static struct perf_event
*ptrace_hbp_get_initialised_bp(unsigned int note_type
,
415 struct task_struct
*tsk
,
418 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
421 bp
= ptrace_hbp_create(note_type
, tsk
, idx
);
426 static int ptrace_hbp_set_ctrl(unsigned int note_type
,
427 struct task_struct
*tsk
,
432 struct perf_event
*bp
;
433 struct perf_event_attr attr
;
434 struct arch_hw_breakpoint_ctrl ctrl
;
436 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
443 decode_ctrl_reg(uctrl
, &ctrl
);
444 err
= ptrace_hbp_fill_attr_ctrl(note_type
, ctrl
, &attr
);
448 return modify_user_hw_breakpoint(bp
, &attr
);
451 static int ptrace_hbp_set_addr(unsigned int note_type
,
452 struct task_struct
*tsk
,
457 struct perf_event
*bp
;
458 struct perf_event_attr attr
;
460 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
468 err
= modify_user_hw_breakpoint(bp
, &attr
);
472 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
473 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
474 #define PTRACE_HBP_PAD_SZ sizeof(u32)
476 static int hw_break_get(struct task_struct
*target
,
477 const struct user_regset
*regset
,
478 unsigned int pos
, unsigned int count
,
479 void *kbuf
, void __user
*ubuf
)
481 unsigned int note_type
= regset
->core_note_type
;
482 int ret
, idx
= 0, offset
, limit
;
487 ret
= ptrace_hbp_get_resource_info(note_type
, &info
);
491 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &info
, 0,
497 offset
= offsetof(struct user_hwdebug_state
, pad
);
498 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
, offset
,
499 offset
+ PTRACE_HBP_PAD_SZ
);
503 /* (address, ctrl) registers */
504 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
505 limit
= regset
->n
* regset
->size
;
506 while (count
&& offset
< limit
) {
507 ret
= ptrace_hbp_get_addr(note_type
, target
, idx
, &addr
);
510 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
511 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
514 offset
+= PTRACE_HBP_ADDR_SZ
;
516 ret
= ptrace_hbp_get_ctrl(note_type
, target
, idx
, &ctrl
);
519 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
520 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
523 offset
+= PTRACE_HBP_CTRL_SZ
;
525 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
527 offset
+ PTRACE_HBP_PAD_SZ
);
530 offset
+= PTRACE_HBP_PAD_SZ
;
537 static int hw_break_set(struct task_struct
*target
,
538 const struct user_regset
*regset
,
539 unsigned int pos
, unsigned int count
,
540 const void *kbuf
, const void __user
*ubuf
)
542 unsigned int note_type
= regset
->core_note_type
;
543 int ret
, idx
= 0, offset
, limit
;
547 /* Resource info and pad */
548 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
549 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
, 0, offset
);
553 /* (address, ctrl) registers */
554 limit
= regset
->n
* regset
->size
;
555 while (count
&& offset
< limit
) {
556 if (count
< PTRACE_HBP_ADDR_SZ
)
558 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
559 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
562 ret
= ptrace_hbp_set_addr(note_type
, target
, idx
, addr
);
565 offset
+= PTRACE_HBP_ADDR_SZ
;
569 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
570 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
573 ret
= ptrace_hbp_set_ctrl(note_type
, target
, idx
, ctrl
);
576 offset
+= PTRACE_HBP_CTRL_SZ
;
578 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
580 offset
+ PTRACE_HBP_PAD_SZ
);
583 offset
+= PTRACE_HBP_PAD_SZ
;
589 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
591 static int gpr_get(struct task_struct
*target
,
592 const struct user_regset
*regset
,
593 unsigned int pos
, unsigned int count
,
594 void *kbuf
, void __user
*ubuf
)
596 struct user_pt_regs
*uregs
= &task_pt_regs(target
)->user_regs
;
597 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0, -1);
600 static int gpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
601 unsigned int pos
, unsigned int count
,
602 const void *kbuf
, const void __user
*ubuf
)
605 struct user_pt_regs newregs
= task_pt_regs(target
)->user_regs
;
607 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newregs
, 0, -1);
611 if (!valid_user_regs(&newregs
, target
))
614 task_pt_regs(target
)->user_regs
= newregs
;
618 static int fpr_active(struct task_struct
*target
, const struct user_regset
*regset
)
620 if (!system_supports_fpsimd())
626 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
628 static int __fpr_get(struct task_struct
*target
,
629 const struct user_regset
*regset
,
630 unsigned int pos
, unsigned int count
,
631 void *kbuf
, void __user
*ubuf
, unsigned int start_pos
)
633 struct user_fpsimd_state
*uregs
;
635 sve_sync_to_fpsimd(target
);
637 uregs
= &target
->thread
.uw
.fpsimd_state
;
639 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
,
640 start_pos
, start_pos
+ sizeof(*uregs
));
643 static int fpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
644 unsigned int pos
, unsigned int count
,
645 void *kbuf
, void __user
*ubuf
)
647 if (!system_supports_fpsimd())
650 if (target
== current
)
651 fpsimd_preserve_current_state();
653 return __fpr_get(target
, regset
, pos
, count
, kbuf
, ubuf
, 0);
656 static int __fpr_set(struct task_struct
*target
,
657 const struct user_regset
*regset
,
658 unsigned int pos
, unsigned int count
,
659 const void *kbuf
, const void __user
*ubuf
,
660 unsigned int start_pos
)
663 struct user_fpsimd_state newstate
;
666 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
667 * short copyin can't resurrect stale data.
669 sve_sync_to_fpsimd(target
);
671 newstate
= target
->thread
.uw
.fpsimd_state
;
673 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newstate
,
674 start_pos
, start_pos
+ sizeof(newstate
));
678 target
->thread
.uw
.fpsimd_state
= newstate
;
683 static int fpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
684 unsigned int pos
, unsigned int count
,
685 const void *kbuf
, const void __user
*ubuf
)
689 if (!system_supports_fpsimd())
692 ret
= __fpr_set(target
, regset
, pos
, count
, kbuf
, ubuf
, 0);
696 sve_sync_from_fpsimd_zeropad(target
);
697 fpsimd_flush_task_state(target
);
702 static int tls_get(struct task_struct
*target
, const struct user_regset
*regset
,
703 unsigned int pos
, unsigned int count
,
704 void *kbuf
, void __user
*ubuf
)
706 unsigned long *tls
= &target
->thread
.uw
.tp_value
;
708 if (target
== current
)
709 tls_preserve_current_state();
711 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, tls
, 0, -1);
714 static int tls_set(struct task_struct
*target
, const struct user_regset
*regset
,
715 unsigned int pos
, unsigned int count
,
716 const void *kbuf
, const void __user
*ubuf
)
719 unsigned long tls
= target
->thread
.uw
.tp_value
;
721 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
725 target
->thread
.uw
.tp_value
= tls
;
729 static int system_call_get(struct task_struct
*target
,
730 const struct user_regset
*regset
,
731 unsigned int pos
, unsigned int count
,
732 void *kbuf
, void __user
*ubuf
)
734 int syscallno
= task_pt_regs(target
)->syscallno
;
736 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
740 static int system_call_set(struct task_struct
*target
,
741 const struct user_regset
*regset
,
742 unsigned int pos
, unsigned int count
,
743 const void *kbuf
, const void __user
*ubuf
)
745 int syscallno
= task_pt_regs(target
)->syscallno
;
748 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &syscallno
, 0, -1);
752 task_pt_regs(target
)->syscallno
= syscallno
;
756 #ifdef CONFIG_ARM64_SVE
758 static void sve_init_header_from_task(struct user_sve_header
*header
,
759 struct task_struct
*target
)
763 memset(header
, 0, sizeof(*header
));
765 header
->flags
= test_tsk_thread_flag(target
, TIF_SVE
) ?
766 SVE_PT_REGS_SVE
: SVE_PT_REGS_FPSIMD
;
767 if (test_tsk_thread_flag(target
, TIF_SVE_VL_INHERIT
))
768 header
->flags
|= SVE_PT_VL_INHERIT
;
770 header
->vl
= target
->thread
.sve_vl
;
771 vq
= sve_vq_from_vl(header
->vl
);
773 header
->max_vl
= sve_max_vl
;
774 header
->size
= SVE_PT_SIZE(vq
, header
->flags
);
775 header
->max_size
= SVE_PT_SIZE(sve_vq_from_vl(header
->max_vl
),
779 static unsigned int sve_size_from_header(struct user_sve_header
const *header
)
781 return ALIGN(header
->size
, SVE_VQ_BYTES
);
784 static unsigned int sve_get_size(struct task_struct
*target
,
785 const struct user_regset
*regset
)
787 struct user_sve_header header
;
789 if (!system_supports_sve())
792 sve_init_header_from_task(&header
, target
);
793 return sve_size_from_header(&header
);
796 static int sve_get(struct task_struct
*target
,
797 const struct user_regset
*regset
,
798 unsigned int pos
, unsigned int count
,
799 void *kbuf
, void __user
*ubuf
)
802 struct user_sve_header header
;
804 unsigned long start
, end
;
806 if (!system_supports_sve())
810 sve_init_header_from_task(&header
, target
);
811 vq
= sve_vq_from_vl(header
.vl
);
813 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &header
,
818 if (target
== current
)
819 fpsimd_preserve_current_state();
821 /* Registers: FPSIMD-only case */
823 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET
!= sizeof(header
));
824 if ((header
.flags
& SVE_PT_REGS_MASK
) == SVE_PT_REGS_FPSIMD
)
825 return __fpr_get(target
, regset
, pos
, count
, kbuf
, ubuf
,
826 SVE_PT_FPSIMD_OFFSET
);
828 /* Otherwise: full SVE case */
830 BUILD_BUG_ON(SVE_PT_SVE_OFFSET
!= sizeof(header
));
831 start
= SVE_PT_SVE_OFFSET
;
832 end
= SVE_PT_SVE_FFR_OFFSET(vq
) + SVE_PT_SVE_FFR_SIZE(vq
);
833 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
834 target
->thread
.sve_state
,
840 end
= SVE_PT_SVE_FPSR_OFFSET(vq
);
841 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
847 * Copy fpsr, and fpcr which must follow contiguously in
848 * struct fpsimd_state:
851 end
= SVE_PT_SVE_FPCR_OFFSET(vq
) + SVE_PT_SVE_FPCR_SIZE
;
852 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
853 &target
->thread
.uw
.fpsimd_state
.fpsr
,
859 end
= sve_size_from_header(&header
);
860 return user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
864 static int sve_set(struct task_struct
*target
,
865 const struct user_regset
*regset
,
866 unsigned int pos
, unsigned int count
,
867 const void *kbuf
, const void __user
*ubuf
)
870 struct user_sve_header header
;
872 unsigned long start
, end
;
874 if (!system_supports_sve())
878 if (count
< sizeof(header
))
880 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &header
,
886 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
887 * sve_set_vector_length(), which will also validate them for us:
889 ret
= sve_set_vector_length(target
, header
.vl
,
890 ((unsigned long)header
.flags
& ~SVE_PT_REGS_MASK
) << 16);
894 /* Actual VL set may be less than the user asked for: */
895 vq
= sve_vq_from_vl(target
->thread
.sve_vl
);
897 /* Registers: FPSIMD-only case */
899 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET
!= sizeof(header
));
900 if ((header
.flags
& SVE_PT_REGS_MASK
) == SVE_PT_REGS_FPSIMD
) {
901 ret
= __fpr_set(target
, regset
, pos
, count
, kbuf
, ubuf
,
902 SVE_PT_FPSIMD_OFFSET
);
903 clear_tsk_thread_flag(target
, TIF_SVE
);
907 /* Otherwise: full SVE case */
910 * If setting a different VL from the requested VL and there is
911 * register data, the data layout will be wrong: don't even
912 * try to set the registers in this case.
914 if (count
&& vq
!= sve_vq_from_vl(header
.vl
)) {
922 * Ensure target->thread.sve_state is up to date with target's
923 * FPSIMD regs, so that a short copyin leaves trailing registers
926 fpsimd_sync_to_sve(target
);
927 set_tsk_thread_flag(target
, TIF_SVE
);
929 BUILD_BUG_ON(SVE_PT_SVE_OFFSET
!= sizeof(header
));
930 start
= SVE_PT_SVE_OFFSET
;
931 end
= SVE_PT_SVE_FFR_OFFSET(vq
) + SVE_PT_SVE_FFR_SIZE(vq
);
932 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
933 target
->thread
.sve_state
,
939 end
= SVE_PT_SVE_FPSR_OFFSET(vq
);
940 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
946 * Copy fpsr, and fpcr which must follow contiguously in
947 * struct fpsimd_state:
950 end
= SVE_PT_SVE_FPCR_OFFSET(vq
) + SVE_PT_SVE_FPCR_SIZE
;
951 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
952 &target
->thread
.uw
.fpsimd_state
.fpsr
,
956 fpsimd_flush_task_state(target
);
960 #endif /* CONFIG_ARM64_SVE */
962 #ifdef CONFIG_ARM64_PTR_AUTH
963 static int pac_mask_get(struct task_struct
*target
,
964 const struct user_regset
*regset
,
965 unsigned int pos
, unsigned int count
,
966 void *kbuf
, void __user
*ubuf
)
969 * The PAC bits can differ across data and instruction pointers
970 * depending on TCR_EL1.TBID*, which we may make use of in future, so
971 * we expose separate masks.
973 unsigned long mask
= ptrauth_user_pac_mask();
974 struct user_pac_mask uregs
= {
979 if (!system_supports_address_auth())
982 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &uregs
, 0, -1);
985 #ifdef CONFIG_CHECKPOINT_RESTORE
986 static __uint128_t
pac_key_to_user(const struct ptrauth_key
*key
)
988 return (__uint128_t
)key
->hi
<< 64 | key
->lo
;
991 static struct ptrauth_key
pac_key_from_user(__uint128_t ukey
)
993 struct ptrauth_key key
= {
994 .lo
= (unsigned long)ukey
,
995 .hi
= (unsigned long)(ukey
>> 64),
1001 static void pac_address_keys_to_user(struct user_pac_address_keys
*ukeys
,
1002 const struct ptrauth_keys_user
*keys
)
1004 ukeys
->apiakey
= pac_key_to_user(&keys
->apia
);
1005 ukeys
->apibkey
= pac_key_to_user(&keys
->apib
);
1006 ukeys
->apdakey
= pac_key_to_user(&keys
->apda
);
1007 ukeys
->apdbkey
= pac_key_to_user(&keys
->apdb
);
1010 static void pac_address_keys_from_user(struct ptrauth_keys_user
*keys
,
1011 const struct user_pac_address_keys
*ukeys
)
1013 keys
->apia
= pac_key_from_user(ukeys
->apiakey
);
1014 keys
->apib
= pac_key_from_user(ukeys
->apibkey
);
1015 keys
->apda
= pac_key_from_user(ukeys
->apdakey
);
1016 keys
->apdb
= pac_key_from_user(ukeys
->apdbkey
);
1019 static int pac_address_keys_get(struct task_struct
*target
,
1020 const struct user_regset
*regset
,
1021 unsigned int pos
, unsigned int count
,
1022 void *kbuf
, void __user
*ubuf
)
1024 struct ptrauth_keys_user
*keys
= &target
->thread
.keys_user
;
1025 struct user_pac_address_keys user_keys
;
1027 if (!system_supports_address_auth())
1030 pac_address_keys_to_user(&user_keys
, keys
);
1032 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1036 static int pac_address_keys_set(struct task_struct
*target
,
1037 const struct user_regset
*regset
,
1038 unsigned int pos
, unsigned int count
,
1039 const void *kbuf
, const void __user
*ubuf
)
1041 struct ptrauth_keys_user
*keys
= &target
->thread
.keys_user
;
1042 struct user_pac_address_keys user_keys
;
1045 if (!system_supports_address_auth())
1048 pac_address_keys_to_user(&user_keys
, keys
);
1049 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1053 pac_address_keys_from_user(keys
, &user_keys
);
1058 static void pac_generic_keys_to_user(struct user_pac_generic_keys
*ukeys
,
1059 const struct ptrauth_keys_user
*keys
)
1061 ukeys
->apgakey
= pac_key_to_user(&keys
->apga
);
1064 static void pac_generic_keys_from_user(struct ptrauth_keys_user
*keys
,
1065 const struct user_pac_generic_keys
*ukeys
)
1067 keys
->apga
= pac_key_from_user(ukeys
->apgakey
);
1070 static int pac_generic_keys_get(struct task_struct
*target
,
1071 const struct user_regset
*regset
,
1072 unsigned int pos
, unsigned int count
,
1073 void *kbuf
, void __user
*ubuf
)
1075 struct ptrauth_keys_user
*keys
= &target
->thread
.keys_user
;
1076 struct user_pac_generic_keys user_keys
;
1078 if (!system_supports_generic_auth())
1081 pac_generic_keys_to_user(&user_keys
, keys
);
1083 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1087 static int pac_generic_keys_set(struct task_struct
*target
,
1088 const struct user_regset
*regset
,
1089 unsigned int pos
, unsigned int count
,
1090 const void *kbuf
, const void __user
*ubuf
)
1092 struct ptrauth_keys_user
*keys
= &target
->thread
.keys_user
;
1093 struct user_pac_generic_keys user_keys
;
1096 if (!system_supports_generic_auth())
1099 pac_generic_keys_to_user(&user_keys
, keys
);
1100 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1104 pac_generic_keys_from_user(keys
, &user_keys
);
1108 #endif /* CONFIG_CHECKPOINT_RESTORE */
1109 #endif /* CONFIG_ARM64_PTR_AUTH */
1111 enum aarch64_regset
{
1115 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1120 #ifdef CONFIG_ARM64_SVE
1123 #ifdef CONFIG_ARM64_PTR_AUTH
1125 #ifdef CONFIG_CHECKPOINT_RESTORE
1132 static const struct user_regset aarch64_regsets
[] = {
1134 .core_note_type
= NT_PRSTATUS
,
1135 .n
= sizeof(struct user_pt_regs
) / sizeof(u64
),
1136 .size
= sizeof(u64
),
1137 .align
= sizeof(u64
),
1142 .core_note_type
= NT_PRFPREG
,
1143 .n
= sizeof(struct user_fpsimd_state
) / sizeof(u32
),
1145 * We pretend we have 32-bit registers because the fpsr and
1146 * fpcr are 32-bits wide.
1148 .size
= sizeof(u32
),
1149 .align
= sizeof(u32
),
1150 .active
= fpr_active
,
1155 .core_note_type
= NT_ARM_TLS
,
1157 .size
= sizeof(void *),
1158 .align
= sizeof(void *),
1162 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1163 [REGSET_HW_BREAK
] = {
1164 .core_note_type
= NT_ARM_HW_BREAK
,
1165 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1166 .size
= sizeof(u32
),
1167 .align
= sizeof(u32
),
1168 .get
= hw_break_get
,
1169 .set
= hw_break_set
,
1171 [REGSET_HW_WATCH
] = {
1172 .core_note_type
= NT_ARM_HW_WATCH
,
1173 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1174 .size
= sizeof(u32
),
1175 .align
= sizeof(u32
),
1176 .get
= hw_break_get
,
1177 .set
= hw_break_set
,
1180 [REGSET_SYSTEM_CALL
] = {
1181 .core_note_type
= NT_ARM_SYSTEM_CALL
,
1183 .size
= sizeof(int),
1184 .align
= sizeof(int),
1185 .get
= system_call_get
,
1186 .set
= system_call_set
,
1188 #ifdef CONFIG_ARM64_SVE
1189 [REGSET_SVE
] = { /* Scalable Vector Extension */
1190 .core_note_type
= NT_ARM_SVE
,
1191 .n
= DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX
, SVE_PT_REGS_SVE
),
1193 .size
= SVE_VQ_BYTES
,
1194 .align
= SVE_VQ_BYTES
,
1197 .get_size
= sve_get_size
,
1200 #ifdef CONFIG_ARM64_PTR_AUTH
1201 [REGSET_PAC_MASK
] = {
1202 .core_note_type
= NT_ARM_PAC_MASK
,
1203 .n
= sizeof(struct user_pac_mask
) / sizeof(u64
),
1204 .size
= sizeof(u64
),
1205 .align
= sizeof(u64
),
1206 .get
= pac_mask_get
,
1207 /* this cannot be set dynamically */
1209 #ifdef CONFIG_CHECKPOINT_RESTORE
1210 [REGSET_PACA_KEYS
] = {
1211 .core_note_type
= NT_ARM_PACA_KEYS
,
1212 .n
= sizeof(struct user_pac_address_keys
) / sizeof(__uint128_t
),
1213 .size
= sizeof(__uint128_t
),
1214 .align
= sizeof(__uint128_t
),
1215 .get
= pac_address_keys_get
,
1216 .set
= pac_address_keys_set
,
1218 [REGSET_PACG_KEYS
] = {
1219 .core_note_type
= NT_ARM_PACG_KEYS
,
1220 .n
= sizeof(struct user_pac_generic_keys
) / sizeof(__uint128_t
),
1221 .size
= sizeof(__uint128_t
),
1222 .align
= sizeof(__uint128_t
),
1223 .get
= pac_generic_keys_get
,
1224 .set
= pac_generic_keys_set
,
1230 static const struct user_regset_view user_aarch64_view
= {
1231 .name
= "aarch64", .e_machine
= EM_AARCH64
,
1232 .regsets
= aarch64_regsets
, .n
= ARRAY_SIZE(aarch64_regsets
)
1235 #ifdef CONFIG_COMPAT
1236 enum compat_regset
{
1241 static int compat_gpr_get(struct task_struct
*target
,
1242 const struct user_regset
*regset
,
1243 unsigned int pos
, unsigned int count
,
1244 void *kbuf
, void __user
*ubuf
)
1247 unsigned int i
, start
, num_regs
;
1249 /* Calculate the number of AArch32 registers contained in count */
1250 num_regs
= count
/ regset
->size
;
1252 /* Convert pos into an register number */
1253 start
= pos
/ regset
->size
;
1255 if (start
+ num_regs
> regset
->n
)
1258 for (i
= 0; i
< num_regs
; ++i
) {
1259 unsigned int idx
= start
+ i
;
1264 reg
= task_pt_regs(target
)->pc
;
1267 reg
= task_pt_regs(target
)->pstate
;
1268 reg
= pstate_to_compat_psr(reg
);
1271 reg
= task_pt_regs(target
)->orig_x0
;
1274 reg
= task_pt_regs(target
)->regs
[idx
];
1278 memcpy(kbuf
, ®
, sizeof(reg
));
1279 kbuf
+= sizeof(reg
);
1281 ret
= copy_to_user(ubuf
, ®
, sizeof(reg
));
1287 ubuf
+= sizeof(reg
);
1294 static int compat_gpr_set(struct task_struct
*target
,
1295 const struct user_regset
*regset
,
1296 unsigned int pos
, unsigned int count
,
1297 const void *kbuf
, const void __user
*ubuf
)
1299 struct pt_regs newregs
;
1301 unsigned int i
, start
, num_regs
;
1303 /* Calculate the number of AArch32 registers contained in count */
1304 num_regs
= count
/ regset
->size
;
1306 /* Convert pos into an register number */
1307 start
= pos
/ regset
->size
;
1309 if (start
+ num_regs
> regset
->n
)
1312 newregs
= *task_pt_regs(target
);
1314 for (i
= 0; i
< num_regs
; ++i
) {
1315 unsigned int idx
= start
+ i
;
1319 memcpy(®
, kbuf
, sizeof(reg
));
1320 kbuf
+= sizeof(reg
);
1322 ret
= copy_from_user(®
, ubuf
, sizeof(reg
));
1328 ubuf
+= sizeof(reg
);
1336 reg
= compat_psr_to_pstate(reg
);
1337 newregs
.pstate
= reg
;
1340 newregs
.orig_x0
= reg
;
1343 newregs
.regs
[idx
] = reg
;
1348 if (valid_user_regs(&newregs
.user_regs
, target
))
1349 *task_pt_regs(target
) = newregs
;
1356 static int compat_vfp_get(struct task_struct
*target
,
1357 const struct user_regset
*regset
,
1358 unsigned int pos
, unsigned int count
,
1359 void *kbuf
, void __user
*ubuf
)
1361 struct user_fpsimd_state
*uregs
;
1362 compat_ulong_t fpscr
;
1363 int ret
, vregs_end_pos
;
1365 if (!system_supports_fpsimd())
1368 uregs
= &target
->thread
.uw
.fpsimd_state
;
1370 if (target
== current
)
1371 fpsimd_preserve_current_state();
1374 * The VFP registers are packed into the fpsimd_state, so they all sit
1375 * nicely together for us. We just need to create the fpscr separately.
1377 vregs_end_pos
= VFP_STATE_SIZE
- sizeof(compat_ulong_t
);
1378 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
,
1381 if (count
&& !ret
) {
1382 fpscr
= (uregs
->fpsr
& VFP_FPSCR_STAT_MASK
) |
1383 (uregs
->fpcr
& VFP_FPSCR_CTRL_MASK
);
1385 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &fpscr
,
1386 vregs_end_pos
, VFP_STATE_SIZE
);
1392 static int compat_vfp_set(struct task_struct
*target
,
1393 const struct user_regset
*regset
,
1394 unsigned int pos
, unsigned int count
,
1395 const void *kbuf
, const void __user
*ubuf
)
1397 struct user_fpsimd_state
*uregs
;
1398 compat_ulong_t fpscr
;
1399 int ret
, vregs_end_pos
;
1401 if (!system_supports_fpsimd())
1404 uregs
= &target
->thread
.uw
.fpsimd_state
;
1406 vregs_end_pos
= VFP_STATE_SIZE
- sizeof(compat_ulong_t
);
1407 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
1410 if (count
&& !ret
) {
1411 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &fpscr
,
1412 vregs_end_pos
, VFP_STATE_SIZE
);
1414 uregs
->fpsr
= fpscr
& VFP_FPSCR_STAT_MASK
;
1415 uregs
->fpcr
= fpscr
& VFP_FPSCR_CTRL_MASK
;
1419 fpsimd_flush_task_state(target
);
1423 static int compat_tls_get(struct task_struct
*target
,
1424 const struct user_regset
*regset
, unsigned int pos
,
1425 unsigned int count
, void *kbuf
, void __user
*ubuf
)
1427 compat_ulong_t tls
= (compat_ulong_t
)target
->thread
.uw
.tp_value
;
1428 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
1431 static int compat_tls_set(struct task_struct
*target
,
1432 const struct user_regset
*regset
, unsigned int pos
,
1433 unsigned int count
, const void *kbuf
,
1434 const void __user
*ubuf
)
1437 compat_ulong_t tls
= target
->thread
.uw
.tp_value
;
1439 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
1443 target
->thread
.uw
.tp_value
= tls
;
1447 static const struct user_regset aarch32_regsets
[] = {
1448 [REGSET_COMPAT_GPR
] = {
1449 .core_note_type
= NT_PRSTATUS
,
1450 .n
= COMPAT_ELF_NGREG
,
1451 .size
= sizeof(compat_elf_greg_t
),
1452 .align
= sizeof(compat_elf_greg_t
),
1453 .get
= compat_gpr_get
,
1454 .set
= compat_gpr_set
1456 [REGSET_COMPAT_VFP
] = {
1457 .core_note_type
= NT_ARM_VFP
,
1458 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
1459 .size
= sizeof(compat_ulong_t
),
1460 .align
= sizeof(compat_ulong_t
),
1461 .active
= fpr_active
,
1462 .get
= compat_vfp_get
,
1463 .set
= compat_vfp_set
1467 static const struct user_regset_view user_aarch32_view
= {
1468 .name
= "aarch32", .e_machine
= EM_ARM
,
1469 .regsets
= aarch32_regsets
, .n
= ARRAY_SIZE(aarch32_regsets
)
1472 static const struct user_regset aarch32_ptrace_regsets
[] = {
1474 .core_note_type
= NT_PRSTATUS
,
1475 .n
= COMPAT_ELF_NGREG
,
1476 .size
= sizeof(compat_elf_greg_t
),
1477 .align
= sizeof(compat_elf_greg_t
),
1478 .get
= compat_gpr_get
,
1479 .set
= compat_gpr_set
1482 .core_note_type
= NT_ARM_VFP
,
1483 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
1484 .size
= sizeof(compat_ulong_t
),
1485 .align
= sizeof(compat_ulong_t
),
1486 .get
= compat_vfp_get
,
1487 .set
= compat_vfp_set
1490 .core_note_type
= NT_ARM_TLS
,
1492 .size
= sizeof(compat_ulong_t
),
1493 .align
= sizeof(compat_ulong_t
),
1494 .get
= compat_tls_get
,
1495 .set
= compat_tls_set
,
1497 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1498 [REGSET_HW_BREAK
] = {
1499 .core_note_type
= NT_ARM_HW_BREAK
,
1500 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1501 .size
= sizeof(u32
),
1502 .align
= sizeof(u32
),
1503 .get
= hw_break_get
,
1504 .set
= hw_break_set
,
1506 [REGSET_HW_WATCH
] = {
1507 .core_note_type
= NT_ARM_HW_WATCH
,
1508 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1509 .size
= sizeof(u32
),
1510 .align
= sizeof(u32
),
1511 .get
= hw_break_get
,
1512 .set
= hw_break_set
,
1515 [REGSET_SYSTEM_CALL
] = {
1516 .core_note_type
= NT_ARM_SYSTEM_CALL
,
1518 .size
= sizeof(int),
1519 .align
= sizeof(int),
1520 .get
= system_call_get
,
1521 .set
= system_call_set
,
1525 static const struct user_regset_view user_aarch32_ptrace_view
= {
1526 .name
= "aarch32", .e_machine
= EM_ARM
,
1527 .regsets
= aarch32_ptrace_regsets
, .n
= ARRAY_SIZE(aarch32_ptrace_regsets
)
1530 static int compat_ptrace_read_user(struct task_struct
*tsk
, compat_ulong_t off
,
1531 compat_ulong_t __user
*ret
)
1538 if (off
== COMPAT_PT_TEXT_ADDR
)
1539 tmp
= tsk
->mm
->start_code
;
1540 else if (off
== COMPAT_PT_DATA_ADDR
)
1541 tmp
= tsk
->mm
->start_data
;
1542 else if (off
== COMPAT_PT_TEXT_END_ADDR
)
1543 tmp
= tsk
->mm
->end_code
;
1544 else if (off
< sizeof(compat_elf_gregset_t
))
1545 return copy_regset_to_user(tsk
, &user_aarch32_view
,
1546 REGSET_COMPAT_GPR
, off
,
1547 sizeof(compat_ulong_t
), ret
);
1548 else if (off
>= COMPAT_USER_SZ
)
1553 return put_user(tmp
, ret
);
1556 static int compat_ptrace_write_user(struct task_struct
*tsk
, compat_ulong_t off
,
1560 mm_segment_t old_fs
= get_fs();
1562 if (off
& 3 || off
>= COMPAT_USER_SZ
)
1565 if (off
>= sizeof(compat_elf_gregset_t
))
1569 ret
= copy_regset_from_user(tsk
, &user_aarch32_view
,
1570 REGSET_COMPAT_GPR
, off
,
1571 sizeof(compat_ulong_t
),
1578 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1581 * Convert a virtual register number into an index for a thread_info
1582 * breakpoint array. Breakpoints are identified using positive numbers
1583 * whilst watchpoints are negative. The registers are laid out as pairs
1584 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1585 * Register 0 is reserved for describing resource information.
1587 static int compat_ptrace_hbp_num_to_idx(compat_long_t num
)
1589 return (abs(num
) - 1) >> 1;
1592 static int compat_ptrace_hbp_get_resource_info(u32
*kdata
)
1594 u8 num_brps
, num_wrps
, debug_arch
, wp_len
;
1597 num_brps
= hw_breakpoint_slots(TYPE_INST
);
1598 num_wrps
= hw_breakpoint_slots(TYPE_DATA
);
1600 debug_arch
= debug_monitors_arch();
1614 static int compat_ptrace_hbp_get(unsigned int note_type
,
1615 struct task_struct
*tsk
,
1622 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);
1625 err
= ptrace_hbp_get_addr(note_type
, tsk
, idx
, &addr
);
1628 err
= ptrace_hbp_get_ctrl(note_type
, tsk
, idx
, &ctrl
);
1635 static int compat_ptrace_hbp_set(unsigned int note_type
,
1636 struct task_struct
*tsk
,
1643 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);
1647 err
= ptrace_hbp_set_addr(note_type
, tsk
, idx
, addr
);
1650 err
= ptrace_hbp_set_ctrl(note_type
, tsk
, idx
, ctrl
);
1656 static int compat_ptrace_gethbpregs(struct task_struct
*tsk
, compat_long_t num
,
1657 compat_ulong_t __user
*data
)
1664 ret
= compat_ptrace_hbp_get(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
1666 } else if (num
== 0) {
1667 ret
= compat_ptrace_hbp_get_resource_info(&kdata
);
1670 ret
= compat_ptrace_hbp_get(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
1674 ret
= put_user(kdata
, data
);
1679 static int compat_ptrace_sethbpregs(struct task_struct
*tsk
, compat_long_t num
,
1680 compat_ulong_t __user
*data
)
1688 ret
= get_user(kdata
, data
);
1693 ret
= compat_ptrace_hbp_set(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
1695 ret
= compat_ptrace_hbp_set(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
1699 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1701 long compat_arch_ptrace(struct task_struct
*child
, compat_long_t request
,
1702 compat_ulong_t caddr
, compat_ulong_t cdata
)
1704 unsigned long addr
= caddr
;
1705 unsigned long data
= cdata
;
1706 void __user
*datap
= compat_ptr(data
);
1710 case PTRACE_PEEKUSR
:
1711 ret
= compat_ptrace_read_user(child
, addr
, datap
);
1714 case PTRACE_POKEUSR
:
1715 ret
= compat_ptrace_write_user(child
, addr
, data
);
1718 case COMPAT_PTRACE_GETREGS
:
1719 ret
= copy_regset_to_user(child
,
1722 0, sizeof(compat_elf_gregset_t
),
1726 case COMPAT_PTRACE_SETREGS
:
1727 ret
= copy_regset_from_user(child
,
1730 0, sizeof(compat_elf_gregset_t
),
1734 case COMPAT_PTRACE_GET_THREAD_AREA
:
1735 ret
= put_user((compat_ulong_t
)child
->thread
.uw
.tp_value
,
1736 (compat_ulong_t __user
*)datap
);
1739 case COMPAT_PTRACE_SET_SYSCALL
:
1740 task_pt_regs(child
)->syscallno
= data
;
1744 case COMPAT_PTRACE_GETVFPREGS
:
1745 ret
= copy_regset_to_user(child
,
1752 case COMPAT_PTRACE_SETVFPREGS
:
1753 ret
= copy_regset_from_user(child
,
1760 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1761 case COMPAT_PTRACE_GETHBPREGS
:
1762 ret
= compat_ptrace_gethbpregs(child
, addr
, datap
);
1765 case COMPAT_PTRACE_SETHBPREGS
:
1766 ret
= compat_ptrace_sethbpregs(child
, addr
, datap
);
1771 ret
= compat_ptrace_request(child
, request
, addr
,
1778 #endif /* CONFIG_COMPAT */
1780 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1782 #ifdef CONFIG_COMPAT
1784 * Core dumping of 32-bit tasks or compat ptrace requests must use the
1785 * user_aarch32_view compatible with arm32. Native ptrace requests on
1786 * 32-bit children use an extended user_aarch32_ptrace_view to allow
1787 * access to the TLS register.
1789 if (is_compat_task())
1790 return &user_aarch32_view
;
1791 else if (is_compat_thread(task_thread_info(task
)))
1792 return &user_aarch32_ptrace_view
;
1794 return &user_aarch64_view
;
1797 long arch_ptrace(struct task_struct
*child
, long request
,
1798 unsigned long addr
, unsigned long data
)
1800 return ptrace_request(child
, request
, addr
, data
);
1803 enum ptrace_syscall_dir
{
1804 PTRACE_SYSCALL_ENTER
= 0,
1805 PTRACE_SYSCALL_EXIT
,
1808 static void tracehook_report_syscall(struct pt_regs
*regs
,
1809 enum ptrace_syscall_dir dir
)
1812 unsigned long saved_reg
;
1815 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1816 * used to denote syscall entry/exit:
1818 regno
= (is_compat_task() ? 12 : 7);
1819 saved_reg
= regs
->regs
[regno
];
1820 regs
->regs
[regno
] = dir
;
1822 if (dir
== PTRACE_SYSCALL_EXIT
)
1823 tracehook_report_syscall_exit(regs
, 0);
1824 else if (tracehook_report_syscall_entry(regs
))
1825 forget_syscall(regs
);
1827 regs
->regs
[regno
] = saved_reg
;
1830 int syscall_trace_enter(struct pt_regs
*regs
)
1832 if (test_thread_flag(TIF_SYSCALL_TRACE
) ||
1833 test_thread_flag(TIF_SYSCALL_EMU
)) {
1834 tracehook_report_syscall(regs
, PTRACE_SYSCALL_ENTER
);
1835 if (!in_syscall(regs
) || test_thread_flag(TIF_SYSCALL_EMU
))
1839 /* Do the secure computing after ptrace; failures should be fast. */
1840 if (secure_computing() == -1)
1843 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1844 trace_sys_enter(regs
, regs
->syscallno
);
1846 audit_syscall_entry(regs
->syscallno
, regs
->orig_x0
, regs
->regs
[1],
1847 regs
->regs
[2], regs
->regs
[3]);
1849 return regs
->syscallno
;
1852 void syscall_trace_exit(struct pt_regs
*regs
)
1854 audit_syscall_exit(regs
);
1856 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1857 trace_sys_exit(regs
, regs_return_value(regs
));
1859 if (test_thread_flag(TIF_SYSCALL_TRACE
))
1860 tracehook_report_syscall(regs
, PTRACE_SYSCALL_EXIT
);
1866 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
1867 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
1868 * not described in ARM DDI 0487D.a.
1869 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
1870 * be allocated an EL0 meaning in future.
1871 * Userspace cannot use these until they have an architectural meaning.
1872 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
1873 * We also reserve IL for the kernel; SS is handled dynamically.
1875 #define SPSR_EL1_AARCH64_RES0_BITS \
1876 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
1877 GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
1878 #define SPSR_EL1_AARCH32_RES0_BITS \
1879 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
1881 static int valid_compat_regs(struct user_pt_regs
*regs
)
1883 regs
->pstate
&= ~SPSR_EL1_AARCH32_RES0_BITS
;
1885 if (!system_supports_mixed_endian_el0()) {
1886 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
1887 regs
->pstate
|= PSR_AA32_E_BIT
;
1889 regs
->pstate
&= ~PSR_AA32_E_BIT
;
1892 if (user_mode(regs
) && (regs
->pstate
& PSR_MODE32_BIT
) &&
1893 (regs
->pstate
& PSR_AA32_A_BIT
) == 0 &&
1894 (regs
->pstate
& PSR_AA32_I_BIT
) == 0 &&
1895 (regs
->pstate
& PSR_AA32_F_BIT
) == 0) {
1900 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1903 regs
->pstate
&= PSR_AA32_N_BIT
| PSR_AA32_Z_BIT
|
1904 PSR_AA32_C_BIT
| PSR_AA32_V_BIT
|
1905 PSR_AA32_Q_BIT
| PSR_AA32_IT_MASK
|
1906 PSR_AA32_GE_MASK
| PSR_AA32_E_BIT
|
1908 regs
->pstate
|= PSR_MODE32_BIT
;
1913 static int valid_native_regs(struct user_pt_regs
*regs
)
1915 regs
->pstate
&= ~SPSR_EL1_AARCH64_RES0_BITS
;
1917 if (user_mode(regs
) && !(regs
->pstate
& PSR_MODE32_BIT
) &&
1918 (regs
->pstate
& PSR_D_BIT
) == 0 &&
1919 (regs
->pstate
& PSR_A_BIT
) == 0 &&
1920 (regs
->pstate
& PSR_I_BIT
) == 0 &&
1921 (regs
->pstate
& PSR_F_BIT
) == 0) {
1925 /* Force PSR to a valid 64-bit EL0t */
1926 regs
->pstate
&= PSR_N_BIT
| PSR_Z_BIT
| PSR_C_BIT
| PSR_V_BIT
;
1932 * Are the current registers suitable for user mode? (used to maintain
1933 * security in signal handlers)
1935 int valid_user_regs(struct user_pt_regs
*regs
, struct task_struct
*task
)
1937 if (!test_tsk_thread_flag(task
, TIF_SINGLESTEP
))
1938 regs
->pstate
&= ~DBG_SPSR_SS
;
1940 if (is_compat_thread(task_thread_info(task
)))
1941 return valid_compat_regs(regs
);
1943 return valid_native_regs(regs
);