2 * Based on arch/arm/kernel/ptrace.c
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/audit.h>
23 #include <linux/compat.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
27 #include <linux/smp.h>
28 #include <linux/ptrace.h>
29 #include <linux/user.h>
30 #include <linux/security.h>
31 #include <linux/init.h>
32 #include <linux/signal.h>
33 #include <linux/uaccess.h>
34 #include <linux/perf_event.h>
35 #include <linux/hw_breakpoint.h>
36 #include <linux/regset.h>
37 #include <linux/tracehook.h>
38 #include <linux/elf.h>
40 #include <asm/compat.h>
41 #include <asm/debug-monitors.h>
42 #include <asm/pgtable.h>
43 #include <asm/syscall.h>
44 #include <asm/traps.h>
45 #include <asm/system_misc.h>
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/syscalls.h>
51 * TODO: does not yet catch signals sent when the child dies.
52 * in exit.c or in signal.c.
56 * Called by kernel/ptrace.c when detaching..
58 void ptrace_disable(struct task_struct
*child
)
62 #ifdef CONFIG_HAVE_HW_BREAKPOINT
64 * Handle hitting a HW-breakpoint.
66 static void ptrace_hbptriggered(struct perf_event
*bp
,
67 struct perf_sample_data
*data
,
70 struct arch_hw_breakpoint
*bkpt
= counter_arch_bp(bp
);
74 .si_code
= TRAP_HWBKPT
,
75 .si_addr
= (void __user
*)(bkpt
->trigger
),
81 if (!is_compat_task())
84 for (i
= 0; i
< ARM_MAX_BRP
; ++i
) {
85 if (current
->thread
.debug
.hbp_break
[i
] == bp
) {
86 info
.si_errno
= (i
<< 1) + 1;
90 for (i
= ARM_MAX_BRP
; i
< ARM_MAX_HBP_SLOTS
&& !bp
; ++i
) {
91 if (current
->thread
.debug
.hbp_watch
[i
] == bp
) {
92 info
.si_errno
= -((i
<< 1) + 1);
99 force_sig_info(SIGTRAP
, &info
, current
);
103 * Unregister breakpoints from this task and reset the pointers in
106 void flush_ptrace_hw_breakpoint(struct task_struct
*tsk
)
109 struct thread_struct
*t
= &tsk
->thread
;
111 for (i
= 0; i
< ARM_MAX_BRP
; i
++) {
112 if (t
->debug
.hbp_break
[i
]) {
113 unregister_hw_breakpoint(t
->debug
.hbp_break
[i
]);
114 t
->debug
.hbp_break
[i
] = NULL
;
118 for (i
= 0; i
< ARM_MAX_WRP
; i
++) {
119 if (t
->debug
.hbp_watch
[i
]) {
120 unregister_hw_breakpoint(t
->debug
.hbp_watch
[i
]);
121 t
->debug
.hbp_watch
[i
] = NULL
;
126 void ptrace_hw_copy_thread(struct task_struct
*tsk
)
128 memset(&tsk
->thread
.debug
, 0, sizeof(struct debug_info
));
131 static struct perf_event
*ptrace_hbp_get_event(unsigned int note_type
,
132 struct task_struct
*tsk
,
135 struct perf_event
*bp
= ERR_PTR(-EINVAL
);
138 case NT_ARM_HW_BREAK
:
139 if (idx
< ARM_MAX_BRP
)
140 bp
= tsk
->thread
.debug
.hbp_break
[idx
];
142 case NT_ARM_HW_WATCH
:
143 if (idx
< ARM_MAX_WRP
)
144 bp
= tsk
->thread
.debug
.hbp_watch
[idx
];
151 static int ptrace_hbp_set_event(unsigned int note_type
,
152 struct task_struct
*tsk
,
154 struct perf_event
*bp
)
159 case NT_ARM_HW_BREAK
:
160 if (idx
< ARM_MAX_BRP
) {
161 tsk
->thread
.debug
.hbp_break
[idx
] = bp
;
165 case NT_ARM_HW_WATCH
:
166 if (idx
< ARM_MAX_WRP
) {
167 tsk
->thread
.debug
.hbp_watch
[idx
] = bp
;
176 static struct perf_event
*ptrace_hbp_create(unsigned int note_type
,
177 struct task_struct
*tsk
,
180 struct perf_event
*bp
;
181 struct perf_event_attr attr
;
185 case NT_ARM_HW_BREAK
:
186 type
= HW_BREAKPOINT_X
;
188 case NT_ARM_HW_WATCH
:
189 type
= HW_BREAKPOINT_RW
;
192 return ERR_PTR(-EINVAL
);
195 ptrace_breakpoint_init(&attr
);
198 * Initialise fields to sane defaults
199 * (i.e. values that will pass validation).
202 attr
.bp_len
= HW_BREAKPOINT_LEN_4
;
206 bp
= register_user_hw_breakpoint(&attr
, ptrace_hbptriggered
, NULL
, tsk
);
210 err
= ptrace_hbp_set_event(note_type
, tsk
, idx
, bp
);
217 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type
,
218 struct arch_hw_breakpoint_ctrl ctrl
,
219 struct perf_event_attr
*attr
)
221 int err
, len
, type
, disabled
= !ctrl
.enabled
;
223 attr
->disabled
= disabled
;
227 err
= arch_bp_generic_fields(ctrl
, &len
, &type
);
232 case NT_ARM_HW_BREAK
:
233 if ((type
& HW_BREAKPOINT_X
) != type
)
236 case NT_ARM_HW_WATCH
:
237 if ((type
& HW_BREAKPOINT_RW
) != type
)
245 attr
->bp_type
= type
;
250 static int ptrace_hbp_get_resource_info(unsigned int note_type
, u32
*info
)
256 case NT_ARM_HW_BREAK
:
257 num
= hw_breakpoint_slots(TYPE_INST
);
259 case NT_ARM_HW_WATCH
:
260 num
= hw_breakpoint_slots(TYPE_DATA
);
266 reg
|= debug_monitors_arch();
274 static int ptrace_hbp_get_ctrl(unsigned int note_type
,
275 struct task_struct
*tsk
,
279 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
284 *ctrl
= bp
? encode_ctrl_reg(counter_arch_bp(bp
)->ctrl
) : 0;
288 static int ptrace_hbp_get_addr(unsigned int note_type
,
289 struct task_struct
*tsk
,
293 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
298 *addr
= bp
? bp
->attr
.bp_addr
: 0;
302 static struct perf_event
*ptrace_hbp_get_initialised_bp(unsigned int note_type
,
303 struct task_struct
*tsk
,
306 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
309 bp
= ptrace_hbp_create(note_type
, tsk
, idx
);
314 static int ptrace_hbp_set_ctrl(unsigned int note_type
,
315 struct task_struct
*tsk
,
320 struct perf_event
*bp
;
321 struct perf_event_attr attr
;
322 struct arch_hw_breakpoint_ctrl ctrl
;
324 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
331 decode_ctrl_reg(uctrl
, &ctrl
);
332 err
= ptrace_hbp_fill_attr_ctrl(note_type
, ctrl
, &attr
);
336 return modify_user_hw_breakpoint(bp
, &attr
);
339 static int ptrace_hbp_set_addr(unsigned int note_type
,
340 struct task_struct
*tsk
,
345 struct perf_event
*bp
;
346 struct perf_event_attr attr
;
348 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
356 err
= modify_user_hw_breakpoint(bp
, &attr
);
360 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
361 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
362 #define PTRACE_HBP_PAD_SZ sizeof(u32)
364 static int hw_break_get(struct task_struct
*target
,
365 const struct user_regset
*regset
,
366 unsigned int pos
, unsigned int count
,
367 void *kbuf
, void __user
*ubuf
)
369 unsigned int note_type
= regset
->core_note_type
;
370 int ret
, idx
= 0, offset
, limit
;
375 ret
= ptrace_hbp_get_resource_info(note_type
, &info
);
379 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &info
, 0,
385 offset
= offsetof(struct user_hwdebug_state
, pad
);
386 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
, offset
,
387 offset
+ PTRACE_HBP_PAD_SZ
);
391 /* (address, ctrl) registers */
392 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
393 limit
= regset
->n
* regset
->size
;
394 while (count
&& offset
< limit
) {
395 ret
= ptrace_hbp_get_addr(note_type
, target
, idx
, &addr
);
398 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
399 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
402 offset
+= PTRACE_HBP_ADDR_SZ
;
404 ret
= ptrace_hbp_get_ctrl(note_type
, target
, idx
, &ctrl
);
407 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
408 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
411 offset
+= PTRACE_HBP_CTRL_SZ
;
413 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
415 offset
+ PTRACE_HBP_PAD_SZ
);
418 offset
+= PTRACE_HBP_PAD_SZ
;
425 static int hw_break_set(struct task_struct
*target
,
426 const struct user_regset
*regset
,
427 unsigned int pos
, unsigned int count
,
428 const void *kbuf
, const void __user
*ubuf
)
430 unsigned int note_type
= regset
->core_note_type
;
431 int ret
, idx
= 0, offset
, limit
;
435 /* Resource info and pad */
436 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
437 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
, 0, offset
);
441 /* (address, ctrl) registers */
442 limit
= regset
->n
* regset
->size
;
443 while (count
&& offset
< limit
) {
444 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
445 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
448 ret
= ptrace_hbp_set_addr(note_type
, target
, idx
, addr
);
451 offset
+= PTRACE_HBP_ADDR_SZ
;
453 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
454 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
457 ret
= ptrace_hbp_set_ctrl(note_type
, target
, idx
, ctrl
);
460 offset
+= PTRACE_HBP_CTRL_SZ
;
462 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
464 offset
+ PTRACE_HBP_PAD_SZ
);
467 offset
+= PTRACE_HBP_PAD_SZ
;
473 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
475 static int gpr_get(struct task_struct
*target
,
476 const struct user_regset
*regset
,
477 unsigned int pos
, unsigned int count
,
478 void *kbuf
, void __user
*ubuf
)
480 struct user_pt_regs
*uregs
= &task_pt_regs(target
)->user_regs
;
481 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0, -1);
484 static int gpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
485 unsigned int pos
, unsigned int count
,
486 const void *kbuf
, const void __user
*ubuf
)
489 struct user_pt_regs newregs
;
491 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newregs
, 0, -1);
495 if (!valid_user_regs(&newregs
))
498 task_pt_regs(target
)->user_regs
= newregs
;
503 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
505 static int fpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
506 unsigned int pos
, unsigned int count
,
507 void *kbuf
, void __user
*ubuf
)
509 struct user_fpsimd_state
*uregs
;
510 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
511 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0, -1);
514 static int fpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
515 unsigned int pos
, unsigned int count
,
516 const void *kbuf
, const void __user
*ubuf
)
519 struct user_fpsimd_state newstate
;
521 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newstate
, 0, -1);
525 target
->thread
.fpsimd_state
.user_fpsimd
= newstate
;
526 fpsimd_flush_task_state(target
);
530 static int tls_get(struct task_struct
*target
, const struct user_regset
*regset
,
531 unsigned int pos
, unsigned int count
,
532 void *kbuf
, void __user
*ubuf
)
534 unsigned long *tls
= &target
->thread
.tp_value
;
535 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, tls
, 0, -1);
538 static int tls_set(struct task_struct
*target
, const struct user_regset
*regset
,
539 unsigned int pos
, unsigned int count
,
540 const void *kbuf
, const void __user
*ubuf
)
545 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
549 target
->thread
.tp_value
= tls
;
553 enum aarch64_regset
{
557 #ifdef CONFIG_HAVE_HW_BREAKPOINT
563 static const struct user_regset aarch64_regsets
[] = {
565 .core_note_type
= NT_PRSTATUS
,
566 .n
= sizeof(struct user_pt_regs
) / sizeof(u64
),
568 .align
= sizeof(u64
),
573 .core_note_type
= NT_PRFPREG
,
574 .n
= sizeof(struct user_fpsimd_state
) / sizeof(u32
),
576 * We pretend we have 32-bit registers because the fpsr and
577 * fpcr are 32-bits wide.
580 .align
= sizeof(u32
),
585 .core_note_type
= NT_ARM_TLS
,
587 .size
= sizeof(void *),
588 .align
= sizeof(void *),
592 #ifdef CONFIG_HAVE_HW_BREAKPOINT
593 [REGSET_HW_BREAK
] = {
594 .core_note_type
= NT_ARM_HW_BREAK
,
595 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
597 .align
= sizeof(u32
),
601 [REGSET_HW_WATCH
] = {
602 .core_note_type
= NT_ARM_HW_WATCH
,
603 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
605 .align
= sizeof(u32
),
612 static const struct user_regset_view user_aarch64_view
= {
613 .name
= "aarch64", .e_machine
= EM_AARCH64
,
614 .regsets
= aarch64_regsets
, .n
= ARRAY_SIZE(aarch64_regsets
)
618 #include <linux/compat.h>
625 static int compat_gpr_get(struct task_struct
*target
,
626 const struct user_regset
*regset
,
627 unsigned int pos
, unsigned int count
,
628 void *kbuf
, void __user
*ubuf
)
631 unsigned int i
, start
, num_regs
;
633 /* Calculate the number of AArch32 registers contained in count */
634 num_regs
= count
/ regset
->size
;
636 /* Convert pos into an register number */
637 start
= pos
/ regset
->size
;
639 if (start
+ num_regs
> regset
->n
)
642 for (i
= 0; i
< num_regs
; ++i
) {
643 unsigned int idx
= start
+ i
;
648 reg
= task_pt_regs(target
)->pc
;
651 reg
= task_pt_regs(target
)->pstate
;
654 reg
= task_pt_regs(target
)->orig_x0
;
657 reg
= task_pt_regs(target
)->regs
[idx
];
661 memcpy(kbuf
, ®
, sizeof(reg
));
664 ret
= copy_to_user(ubuf
, ®
, sizeof(reg
));
675 static int compat_gpr_set(struct task_struct
*target
,
676 const struct user_regset
*regset
,
677 unsigned int pos
, unsigned int count
,
678 const void *kbuf
, const void __user
*ubuf
)
680 struct pt_regs newregs
;
682 unsigned int i
, start
, num_regs
;
684 /* Calculate the number of AArch32 registers contained in count */
685 num_regs
= count
/ regset
->size
;
687 /* Convert pos into an register number */
688 start
= pos
/ regset
->size
;
690 if (start
+ num_regs
> regset
->n
)
693 newregs
= *task_pt_regs(target
);
695 for (i
= 0; i
< num_regs
; ++i
) {
696 unsigned int idx
= start
+ i
;
700 memcpy(®
, kbuf
, sizeof(reg
));
703 ret
= copy_from_user(®
, ubuf
, sizeof(reg
));
715 newregs
.pstate
= reg
;
718 newregs
.orig_x0
= reg
;
721 newregs
.regs
[idx
] = reg
;
726 if (valid_user_regs(&newregs
.user_regs
))
727 *task_pt_regs(target
) = newregs
;
734 static int compat_vfp_get(struct task_struct
*target
,
735 const struct user_regset
*regset
,
736 unsigned int pos
, unsigned int count
,
737 void *kbuf
, void __user
*ubuf
)
739 struct user_fpsimd_state
*uregs
;
740 compat_ulong_t fpscr
;
743 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
746 * The VFP registers are packed into the fpsimd_state, so they all sit
747 * nicely together for us. We just need to create the fpscr separately.
749 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
750 VFP_STATE_SIZE
- sizeof(compat_ulong_t
));
753 fpscr
= (uregs
->fpsr
& VFP_FPSCR_STAT_MASK
) |
754 (uregs
->fpcr
& VFP_FPSCR_CTRL_MASK
);
755 ret
= put_user(fpscr
, (compat_ulong_t
*)ubuf
);
761 static int compat_vfp_set(struct task_struct
*target
,
762 const struct user_regset
*regset
,
763 unsigned int pos
, unsigned int count
,
764 const void *kbuf
, const void __user
*ubuf
)
766 struct user_fpsimd_state
*uregs
;
767 compat_ulong_t fpscr
;
770 if (pos
+ count
> VFP_STATE_SIZE
)
773 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
775 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
776 VFP_STATE_SIZE
- sizeof(compat_ulong_t
));
779 ret
= get_user(fpscr
, (compat_ulong_t
*)ubuf
);
780 uregs
->fpsr
= fpscr
& VFP_FPSCR_STAT_MASK
;
781 uregs
->fpcr
= fpscr
& VFP_FPSCR_CTRL_MASK
;
784 fpsimd_flush_task_state(target
);
788 static const struct user_regset aarch32_regsets
[] = {
789 [REGSET_COMPAT_GPR
] = {
790 .core_note_type
= NT_PRSTATUS
,
791 .n
= COMPAT_ELF_NGREG
,
792 .size
= sizeof(compat_elf_greg_t
),
793 .align
= sizeof(compat_elf_greg_t
),
794 .get
= compat_gpr_get
,
795 .set
= compat_gpr_set
797 [REGSET_COMPAT_VFP
] = {
798 .core_note_type
= NT_ARM_VFP
,
799 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
800 .size
= sizeof(compat_ulong_t
),
801 .align
= sizeof(compat_ulong_t
),
802 .get
= compat_vfp_get
,
803 .set
= compat_vfp_set
807 static const struct user_regset_view user_aarch32_view
= {
808 .name
= "aarch32", .e_machine
= EM_ARM
,
809 .regsets
= aarch32_regsets
, .n
= ARRAY_SIZE(aarch32_regsets
)
812 static int compat_ptrace_read_user(struct task_struct
*tsk
, compat_ulong_t off
,
813 compat_ulong_t __user
*ret
)
820 if (off
== COMPAT_PT_TEXT_ADDR
)
821 tmp
= tsk
->mm
->start_code
;
822 else if (off
== COMPAT_PT_DATA_ADDR
)
823 tmp
= tsk
->mm
->start_data
;
824 else if (off
== COMPAT_PT_TEXT_END_ADDR
)
825 tmp
= tsk
->mm
->end_code
;
826 else if (off
< sizeof(compat_elf_gregset_t
))
827 return copy_regset_to_user(tsk
, &user_aarch32_view
,
828 REGSET_COMPAT_GPR
, off
,
829 sizeof(compat_ulong_t
), ret
);
830 else if (off
>= COMPAT_USER_SZ
)
835 return put_user(tmp
, ret
);
838 static int compat_ptrace_write_user(struct task_struct
*tsk
, compat_ulong_t off
,
842 mm_segment_t old_fs
= get_fs();
844 if (off
& 3 || off
>= COMPAT_USER_SZ
)
847 if (off
>= sizeof(compat_elf_gregset_t
))
851 ret
= copy_regset_from_user(tsk
, &user_aarch32_view
,
852 REGSET_COMPAT_GPR
, off
,
853 sizeof(compat_ulong_t
),
860 #ifdef CONFIG_HAVE_HW_BREAKPOINT
863 * Convert a virtual register number into an index for a thread_info
864 * breakpoint array. Breakpoints are identified using positive numbers
865 * whilst watchpoints are negative. The registers are laid out as pairs
866 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
867 * Register 0 is reserved for describing resource information.
869 static int compat_ptrace_hbp_num_to_idx(compat_long_t num
)
871 return (abs(num
) - 1) >> 1;
874 static int compat_ptrace_hbp_get_resource_info(u32
*kdata
)
876 u8 num_brps
, num_wrps
, debug_arch
, wp_len
;
879 num_brps
= hw_breakpoint_slots(TYPE_INST
);
880 num_wrps
= hw_breakpoint_slots(TYPE_DATA
);
882 debug_arch
= debug_monitors_arch();
896 static int compat_ptrace_hbp_get(unsigned int note_type
,
897 struct task_struct
*tsk
,
904 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);;
907 err
= ptrace_hbp_get_addr(note_type
, tsk
, idx
, &addr
);
910 err
= ptrace_hbp_get_ctrl(note_type
, tsk
, idx
, &ctrl
);
917 static int compat_ptrace_hbp_set(unsigned int note_type
,
918 struct task_struct
*tsk
,
925 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);
929 err
= ptrace_hbp_set_addr(note_type
, tsk
, idx
, addr
);
932 err
= ptrace_hbp_set_ctrl(note_type
, tsk
, idx
, ctrl
);
938 static int compat_ptrace_gethbpregs(struct task_struct
*tsk
, compat_long_t num
,
939 compat_ulong_t __user
*data
)
943 mm_segment_t old_fs
= get_fs();
948 ret
= compat_ptrace_hbp_get(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
950 } else if (num
== 0) {
951 ret
= compat_ptrace_hbp_get_resource_info(&kdata
);
954 ret
= compat_ptrace_hbp_get(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
959 ret
= put_user(kdata
, data
);
964 static int compat_ptrace_sethbpregs(struct task_struct
*tsk
, compat_long_t num
,
965 compat_ulong_t __user
*data
)
969 mm_segment_t old_fs
= get_fs();
974 ret
= get_user(kdata
, data
);
980 ret
= compat_ptrace_hbp_set(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
982 ret
= compat_ptrace_hbp_set(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
987 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
989 long compat_arch_ptrace(struct task_struct
*child
, compat_long_t request
,
990 compat_ulong_t caddr
, compat_ulong_t cdata
)
992 unsigned long addr
= caddr
;
993 unsigned long data
= cdata
;
994 void __user
*datap
= compat_ptr(data
);
999 ret
= compat_ptrace_read_user(child
, addr
, datap
);
1002 case PTRACE_POKEUSR
:
1003 ret
= compat_ptrace_write_user(child
, addr
, data
);
1006 case COMPAT_PTRACE_GETREGS
:
1007 ret
= copy_regset_to_user(child
,
1010 0, sizeof(compat_elf_gregset_t
),
1014 case COMPAT_PTRACE_SETREGS
:
1015 ret
= copy_regset_from_user(child
,
1018 0, sizeof(compat_elf_gregset_t
),
1022 case COMPAT_PTRACE_GET_THREAD_AREA
:
1023 ret
= put_user((compat_ulong_t
)child
->thread
.tp_value
,
1024 (compat_ulong_t __user
*)datap
);
1027 case COMPAT_PTRACE_SET_SYSCALL
:
1028 task_pt_regs(child
)->syscallno
= data
;
1032 case COMPAT_PTRACE_GETVFPREGS
:
1033 ret
= copy_regset_to_user(child
,
1040 case COMPAT_PTRACE_SETVFPREGS
:
1041 ret
= copy_regset_from_user(child
,
1048 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1049 case COMPAT_PTRACE_GETHBPREGS
:
1050 ret
= compat_ptrace_gethbpregs(child
, addr
, datap
);
1053 case COMPAT_PTRACE_SETHBPREGS
:
1054 ret
= compat_ptrace_sethbpregs(child
, addr
, datap
);
1059 ret
= compat_ptrace_request(child
, request
, addr
,
1066 #endif /* CONFIG_COMPAT */
1068 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1070 #ifdef CONFIG_COMPAT
1071 if (is_compat_thread(task_thread_info(task
)))
1072 return &user_aarch32_view
;
1074 return &user_aarch64_view
;
1077 long arch_ptrace(struct task_struct
*child
, long request
,
1078 unsigned long addr
, unsigned long data
)
1080 return ptrace_request(child
, request
, addr
, data
);
1083 enum ptrace_syscall_dir
{
1084 PTRACE_SYSCALL_ENTER
= 0,
1085 PTRACE_SYSCALL_EXIT
,
1088 static void tracehook_report_syscall(struct pt_regs
*regs
,
1089 enum ptrace_syscall_dir dir
)
1092 unsigned long saved_reg
;
1095 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1096 * used to denote syscall entry/exit:
1098 regno
= (is_compat_task() ? 12 : 7);
1099 saved_reg
= regs
->regs
[regno
];
1100 regs
->regs
[regno
] = dir
;
1102 if (dir
== PTRACE_SYSCALL_EXIT
)
1103 tracehook_report_syscall_exit(regs
, 0);
1104 else if (tracehook_report_syscall_entry(regs
))
1105 regs
->syscallno
= ~0UL;
1107 regs
->regs
[regno
] = saved_reg
;
1110 asmlinkage
int syscall_trace_enter(struct pt_regs
*regs
)
1112 if (test_thread_flag(TIF_SYSCALL_TRACE
))
1113 tracehook_report_syscall(regs
, PTRACE_SYSCALL_ENTER
);
1115 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1116 trace_sys_enter(regs
, regs
->syscallno
);
1118 audit_syscall_entry(syscall_get_arch(), regs
->syscallno
,
1119 regs
->orig_x0
, regs
->regs
[1], regs
->regs
[2], regs
->regs
[3]);
1121 return regs
->syscallno
;
1124 asmlinkage
void syscall_trace_exit(struct pt_regs
*regs
)
1126 audit_syscall_exit(regs
);
1128 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1129 trace_sys_exit(regs
, regs_return_value(regs
));
1131 if (test_thread_flag(TIF_SYSCALL_TRACE
))
1132 tracehook_report_syscall(regs
, PTRACE_SYSCALL_EXIT
);