1 // SPDX-License-Identifier: GPL-2.0
3 * SuperH process tracing
5 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
6 * Copyright (C) 2002 - 2009 Paul Mundt
8 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/signal.h>
21 #include <linux/audit.h>
22 #include <linux/seccomp.h>
23 #include <linux/elf.h>
24 #include <linux/regset.h>
25 #include <linux/hw_breakpoint.h>
26 #include <linux/uaccess.h>
27 #include <asm/processor.h>
28 #include <asm/mmu_context.h>
29 #include <asm/syscalls.h>
32 #define CREATE_TRACE_POINTS
33 #include <trace/events/syscalls.h>
36 * This routine will get a word off of the process kernel stack.
38 static inline int get_stack_long(struct task_struct
*task
, int offset
)
42 stack
= (unsigned char *)task_pt_regs(task
);
44 return (*((int *)stack
));
48 * This routine will put a word on the process kernel stack.
50 static inline int put_stack_long(struct task_struct
*task
, int offset
,
55 stack
= (unsigned char *)task_pt_regs(task
);
57 *(unsigned long *) stack
= data
;
61 void ptrace_triggered(struct perf_event
*bp
,
62 struct perf_sample_data
*data
, struct pt_regs
*regs
)
64 struct perf_event_attr attr
;
67 * Disable the breakpoint request here since ptrace has defined a
68 * one-shot behaviour for breakpoint exceptions.
72 modify_user_hw_breakpoint(bp
, &attr
);
75 static int set_single_step(struct task_struct
*tsk
, unsigned long addr
)
77 struct thread_struct
*thread
= &tsk
->thread
;
78 struct perf_event
*bp
;
79 struct perf_event_attr attr
;
81 bp
= thread
->ptrace_bps
[0];
83 ptrace_breakpoint_init(&attr
);
86 attr
.bp_len
= HW_BREAKPOINT_LEN_2
;
87 attr
.bp_type
= HW_BREAKPOINT_R
;
89 bp
= register_user_hw_breakpoint(&attr
, ptrace_triggered
,
94 thread
->ptrace_bps
[0] = bp
;
100 /* reenable breakpoint */
101 attr
.disabled
= false;
102 err
= modify_user_hw_breakpoint(bp
, &attr
);
110 void user_enable_single_step(struct task_struct
*child
)
112 unsigned long pc
= get_stack_long(child
, offsetof(struct pt_regs
, pc
));
114 set_tsk_thread_flag(child
, TIF_SINGLESTEP
);
116 set_single_step(child
, pc
);
119 void user_disable_single_step(struct task_struct
*child
)
121 clear_tsk_thread_flag(child
, TIF_SINGLESTEP
);
125 * Called by kernel/ptrace.c when detaching..
127 * Make sure single step bits etc are not set.
129 void ptrace_disable(struct task_struct
*child
)
131 user_disable_single_step(child
);
134 static int genregs_get(struct task_struct
*target
,
135 const struct user_regset
*regset
,
138 const struct pt_regs
*regs
= task_pt_regs(target
);
140 return membuf_write(&to
, regs
, sizeof(struct pt_regs
));
143 static int genregs_set(struct task_struct
*target
,
144 const struct user_regset
*regset
,
145 unsigned int pos
, unsigned int count
,
146 const void *kbuf
, const void __user
*ubuf
)
148 struct pt_regs
*regs
= task_pt_regs(target
);
151 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
153 0, 16 * sizeof(unsigned long));
154 if (!ret
&& count
> 0)
155 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
157 offsetof(struct pt_regs
, pc
),
158 sizeof(struct pt_regs
));
160 user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
161 sizeof(struct pt_regs
), -1);
167 static int fpregs_get(struct task_struct
*target
,
168 const struct user_regset
*regset
,
173 ret
= init_fpu(target
);
177 return membuf_write(&to
, target
->thread
.xstate
,
178 sizeof(struct user_fpu_struct
));
181 static int fpregs_set(struct task_struct
*target
,
182 const struct user_regset
*regset
,
183 unsigned int pos
, unsigned int count
,
184 const void *kbuf
, const void __user
*ubuf
)
188 ret
= init_fpu(target
);
192 set_stopped_child_used_math(target
);
194 if ((boot_cpu_data
.flags
& CPU_HAS_FPU
))
195 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
196 &target
->thread
.xstate
->hardfpu
, 0, -1);
198 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
199 &target
->thread
.xstate
->softfpu
, 0, -1);
202 static int fpregs_active(struct task_struct
*target
,
203 const struct user_regset
*regset
)
205 return tsk_used_math(target
) ? regset
->n
: 0;
210 static int dspregs_get(struct task_struct
*target
,
211 const struct user_regset
*regset
,
214 const struct pt_dspregs
*regs
=
215 (struct pt_dspregs
*)&target
->thread
.dsp_status
.dsp_regs
;
217 return membuf_write(&to
, regs
, sizeof(struct pt_dspregs
));
220 static int dspregs_set(struct task_struct
*target
,
221 const struct user_regset
*regset
,
222 unsigned int pos
, unsigned int count
,
223 const void *kbuf
, const void __user
*ubuf
)
225 struct pt_dspregs
*regs
=
226 (struct pt_dspregs
*)&target
->thread
.dsp_status
.dsp_regs
;
229 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, regs
,
230 0, sizeof(struct pt_dspregs
));
232 user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
233 sizeof(struct pt_dspregs
), -1);
238 static int dspregs_active(struct task_struct
*target
,
239 const struct user_regset
*regset
)
241 struct pt_regs
*regs
= task_pt_regs(target
);
243 return regs
->sr
& SR_DSP
? regset
->n
: 0;
247 const struct pt_regs_offset regoffset_table
[] = {
258 REGS_OFFSET_NAME(10),
259 REGS_OFFSET_NAME(11),
260 REGS_OFFSET_NAME(12),
261 REGS_OFFSET_NAME(13),
262 REGS_OFFSET_NAME(14),
263 REGS_OFFSET_NAME(15),
267 REG_OFFSET_NAME(gbr
),
268 REG_OFFSET_NAME(mach
),
269 REG_OFFSET_NAME(macl
),
270 REG_OFFSET_NAME(tra
),
275 * These are our native regset flavours.
287 static const struct user_regset sh_regsets
[] = {
291 * PC, PR, SR, GBR, MACH, MACL, TRA
294 .core_note_type
= NT_PRSTATUS
,
296 .size
= sizeof(long),
297 .align
= sizeof(long),
298 .regset_get
= genregs_get
,
304 .core_note_type
= NT_PRFPREG
,
305 .n
= sizeof(struct user_fpu_struct
) / sizeof(long),
306 .size
= sizeof(long),
307 .align
= sizeof(long),
308 .regset_get
= fpregs_get
,
310 .active
= fpregs_active
,
316 .n
= sizeof(struct pt_dspregs
) / sizeof(long),
317 .size
= sizeof(long),
318 .align
= sizeof(long),
319 .regset_get
= dspregs_get
,
321 .active
= dspregs_active
,
326 static const struct user_regset_view user_sh_native_view
= {
329 .regsets
= sh_regsets
,
330 .n
= ARRAY_SIZE(sh_regsets
),
333 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
335 return &user_sh_native_view
;
338 long arch_ptrace(struct task_struct
*child
, long request
,
339 unsigned long addr
, unsigned long data
)
341 unsigned long __user
*datap
= (unsigned long __user
*)data
;
345 /* read the word at location addr in the USER area. */
346 case PTRACE_PEEKUSR
: {
350 if ((addr
& 3) || addr
< 0 ||
351 addr
> sizeof(struct user
) - 3)
354 if (addr
< sizeof(struct pt_regs
))
355 tmp
= get_stack_long(child
, addr
);
356 else if (addr
>= offsetof(struct user
, fpu
) &&
357 addr
< offsetof(struct user
, u_fpvalid
)) {
358 if (!tsk_used_math(child
)) {
359 if (addr
== offsetof(struct user
, fpu
.fpscr
))
365 ret
= init_fpu(child
);
368 index
= addr
- offsetof(struct user
, fpu
);
369 tmp
= ((unsigned long *)child
->thread
.xstate
)
372 } else if (addr
== offsetof(struct user
, u_fpvalid
))
373 tmp
= !!tsk_used_math(child
);
374 else if (addr
== PT_TEXT_ADDR
)
375 tmp
= child
->mm
->start_code
;
376 else if (addr
== PT_DATA_ADDR
)
377 tmp
= child
->mm
->start_data
;
378 else if (addr
== PT_TEXT_END_ADDR
)
379 tmp
= child
->mm
->end_code
;
380 else if (addr
== PT_TEXT_LEN
)
381 tmp
= child
->mm
->end_code
- child
->mm
->start_code
;
384 ret
= put_user(tmp
, datap
);
388 case PTRACE_POKEUSR
: /* write the word at location addr in the USER area */
390 if ((addr
& 3) || addr
< 0 ||
391 addr
> sizeof(struct user
) - 3)
394 if (addr
< sizeof(struct pt_regs
))
395 ret
= put_stack_long(child
, addr
, data
);
396 else if (addr
>= offsetof(struct user
, fpu
) &&
397 addr
< offsetof(struct user
, u_fpvalid
)) {
399 ret
= init_fpu(child
);
402 index
= addr
- offsetof(struct user
, fpu
);
403 set_stopped_child_used_math(child
);
404 ((unsigned long *)child
->thread
.xstate
)
407 } else if (addr
== offsetof(struct user
, u_fpvalid
)) {
408 conditional_stopped_child_used_math(data
, child
);
414 return copy_regset_to_user(child
, &user_sh_native_view
,
416 0, sizeof(struct pt_regs
),
419 return copy_regset_from_user(child
, &user_sh_native_view
,
421 0, sizeof(struct pt_regs
),
424 case PTRACE_GETFPREGS
:
425 return copy_regset_to_user(child
, &user_sh_native_view
,
427 0, sizeof(struct user_fpu_struct
),
429 case PTRACE_SETFPREGS
:
430 return copy_regset_from_user(child
, &user_sh_native_view
,
432 0, sizeof(struct user_fpu_struct
),
436 case PTRACE_GETDSPREGS
:
437 return copy_regset_to_user(child
, &user_sh_native_view
,
439 0, sizeof(struct pt_dspregs
),
441 case PTRACE_SETDSPREGS
:
442 return copy_regset_from_user(child
, &user_sh_native_view
,
444 0, sizeof(struct pt_dspregs
),
448 ret
= ptrace_request(child
, request
, addr
, data
);
455 asmlinkage
long do_syscall_trace_enter(struct pt_regs
*regs
)
457 if (test_thread_flag(TIF_SYSCALL_TRACE
) &&
458 ptrace_report_syscall_entry(regs
)) {
459 regs
->regs
[0] = -ENOSYS
;
463 if (secure_computing() == -1)
466 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
467 trace_sys_enter(regs
, regs
->regs
[0]);
469 audit_syscall_entry(regs
->regs
[3], regs
->regs
[4], regs
->regs
[5],
470 regs
->regs
[6], regs
->regs
[7]);
475 asmlinkage
void do_syscall_trace_leave(struct pt_regs
*regs
)
479 audit_syscall_exit(regs
);
481 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
482 trace_sys_exit(regs
, regs
->regs
[0]);
484 step
= test_thread_flag(TIF_SINGLESTEP
);
485 if (step
|| test_thread_flag(TIF_SYSCALL_TRACE
))
486 ptrace_report_syscall_exit(regs
, step
);