treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / sh / kernel / ptrace_32.c
blobd5052c30a0e9abbab07458799b8dcc21a09ba81e
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SuperH process tracing
5 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
6 * Copyright (C) 2002 - 2009 Paul Mundt
8 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
9 */
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/signal.h>
20 #include <linux/io.h>
21 #include <linux/audit.h>
22 #include <linux/seccomp.h>
23 #include <linux/tracehook.h>
24 #include <linux/elf.h>
25 #include <linux/regset.h>
26 #include <linux/hw_breakpoint.h>
27 #include <linux/uaccess.h>
28 #include <asm/pgtable.h>
29 #include <asm/processor.h>
30 #include <asm/mmu_context.h>
31 #include <asm/syscalls.h>
32 #include <asm/fpu.h>
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/syscalls.h>
38 * This routine will get a word off of the process kernel stack.
40 static inline int get_stack_long(struct task_struct *task, int offset)
42 unsigned char *stack;
44 stack = (unsigned char *)task_pt_regs(task);
45 stack += offset;
46 return (*((int *)stack));
50 * This routine will put a word on the process kernel stack.
52 static inline int put_stack_long(struct task_struct *task, int offset,
53 unsigned long data)
55 unsigned char *stack;
57 stack = (unsigned char *)task_pt_regs(task);
58 stack += offset;
59 *(unsigned long *) stack = data;
60 return 0;
63 void ptrace_triggered(struct perf_event *bp,
64 struct perf_sample_data *data, struct pt_regs *regs)
66 struct perf_event_attr attr;
69 * Disable the breakpoint request here since ptrace has defined a
70 * one-shot behaviour for breakpoint exceptions.
72 attr = bp->attr;
73 attr.disabled = true;
74 modify_user_hw_breakpoint(bp, &attr);
77 static int set_single_step(struct task_struct *tsk, unsigned long addr)
79 struct thread_struct *thread = &tsk->thread;
80 struct perf_event *bp;
81 struct perf_event_attr attr;
83 bp = thread->ptrace_bps[0];
84 if (!bp) {
85 ptrace_breakpoint_init(&attr);
87 attr.bp_addr = addr;
88 attr.bp_len = HW_BREAKPOINT_LEN_2;
89 attr.bp_type = HW_BREAKPOINT_R;
91 bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
92 NULL, tsk);
93 if (IS_ERR(bp))
94 return PTR_ERR(bp);
96 thread->ptrace_bps[0] = bp;
97 } else {
98 int err;
100 attr = bp->attr;
101 attr.bp_addr = addr;
102 /* reenable breakpoint */
103 attr.disabled = false;
104 err = modify_user_hw_breakpoint(bp, &attr);
105 if (unlikely(err))
106 return err;
109 return 0;
112 void user_enable_single_step(struct task_struct *child)
114 unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
116 set_tsk_thread_flag(child, TIF_SINGLESTEP);
118 set_single_step(child, pc);
121 void user_disable_single_step(struct task_struct *child)
123 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
127 * Called by kernel/ptrace.c when detaching..
129 * Make sure single step bits etc are not set.
131 void ptrace_disable(struct task_struct *child)
133 user_disable_single_step(child);
136 static int genregs_get(struct task_struct *target,
137 const struct user_regset *regset,
138 unsigned int pos, unsigned int count,
139 void *kbuf, void __user *ubuf)
141 const struct pt_regs *regs = task_pt_regs(target);
142 int ret;
144 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
145 regs->regs,
146 0, 16 * sizeof(unsigned long));
147 if (!ret)
148 /* PC, PR, SR, GBR, MACH, MACL, TRA */
149 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
150 &regs->pc,
151 offsetof(struct pt_regs, pc),
152 sizeof(struct pt_regs));
153 if (!ret)
154 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
155 sizeof(struct pt_regs), -1);
157 return ret;
160 static int genregs_set(struct task_struct *target,
161 const struct user_regset *regset,
162 unsigned int pos, unsigned int count,
163 const void *kbuf, const void __user *ubuf)
165 struct pt_regs *regs = task_pt_regs(target);
166 int ret;
168 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
169 regs->regs,
170 0, 16 * sizeof(unsigned long));
171 if (!ret && count > 0)
172 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
173 &regs->pc,
174 offsetof(struct pt_regs, pc),
175 sizeof(struct pt_regs));
176 if (!ret)
177 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
178 sizeof(struct pt_regs), -1);
180 return ret;
183 #ifdef CONFIG_SH_FPU
184 int fpregs_get(struct task_struct *target,
185 const struct user_regset *regset,
186 unsigned int pos, unsigned int count,
187 void *kbuf, void __user *ubuf)
189 int ret;
191 ret = init_fpu(target);
192 if (ret)
193 return ret;
195 if ((boot_cpu_data.flags & CPU_HAS_FPU))
196 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
197 &target->thread.xstate->hardfpu, 0, -1);
199 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
200 &target->thread.xstate->softfpu, 0, -1);
203 static int fpregs_set(struct task_struct *target,
204 const struct user_regset *regset,
205 unsigned int pos, unsigned int count,
206 const void *kbuf, const void __user *ubuf)
208 int ret;
210 ret = init_fpu(target);
211 if (ret)
212 return ret;
214 set_stopped_child_used_math(target);
216 if ((boot_cpu_data.flags & CPU_HAS_FPU))
217 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
218 &target->thread.xstate->hardfpu, 0, -1);
220 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
221 &target->thread.xstate->softfpu, 0, -1);
224 static int fpregs_active(struct task_struct *target,
225 const struct user_regset *regset)
227 return tsk_used_math(target) ? regset->n : 0;
229 #endif
231 #ifdef CONFIG_SH_DSP
232 static int dspregs_get(struct task_struct *target,
233 const struct user_regset *regset,
234 unsigned int pos, unsigned int count,
235 void *kbuf, void __user *ubuf)
237 const struct pt_dspregs *regs =
238 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
239 int ret;
241 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
242 0, sizeof(struct pt_dspregs));
243 if (!ret)
244 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
245 sizeof(struct pt_dspregs), -1);
247 return ret;
250 static int dspregs_set(struct task_struct *target,
251 const struct user_regset *regset,
252 unsigned int pos, unsigned int count,
253 const void *kbuf, const void __user *ubuf)
255 struct pt_dspregs *regs =
256 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
257 int ret;
259 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
260 0, sizeof(struct pt_dspregs));
261 if (!ret)
262 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
263 sizeof(struct pt_dspregs), -1);
265 return ret;
268 static int dspregs_active(struct task_struct *target,
269 const struct user_regset *regset)
271 struct pt_regs *regs = task_pt_regs(target);
273 return regs->sr & SR_DSP ? regset->n : 0;
275 #endif
277 const struct pt_regs_offset regoffset_table[] = {
278 REGS_OFFSET_NAME(0),
279 REGS_OFFSET_NAME(1),
280 REGS_OFFSET_NAME(2),
281 REGS_OFFSET_NAME(3),
282 REGS_OFFSET_NAME(4),
283 REGS_OFFSET_NAME(5),
284 REGS_OFFSET_NAME(6),
285 REGS_OFFSET_NAME(7),
286 REGS_OFFSET_NAME(8),
287 REGS_OFFSET_NAME(9),
288 REGS_OFFSET_NAME(10),
289 REGS_OFFSET_NAME(11),
290 REGS_OFFSET_NAME(12),
291 REGS_OFFSET_NAME(13),
292 REGS_OFFSET_NAME(14),
293 REGS_OFFSET_NAME(15),
294 REG_OFFSET_NAME(pc),
295 REG_OFFSET_NAME(pr),
296 REG_OFFSET_NAME(sr),
297 REG_OFFSET_NAME(gbr),
298 REG_OFFSET_NAME(mach),
299 REG_OFFSET_NAME(macl),
300 REG_OFFSET_NAME(tra),
301 REG_OFFSET_END,
305 * These are our native regset flavours.
307 enum sh_regset {
308 REGSET_GENERAL,
309 #ifdef CONFIG_SH_FPU
310 REGSET_FPU,
311 #endif
312 #ifdef CONFIG_SH_DSP
313 REGSET_DSP,
314 #endif
317 static const struct user_regset sh_regsets[] = {
319 * Format is:
320 * R0 --> R15
321 * PC, PR, SR, GBR, MACH, MACL, TRA
323 [REGSET_GENERAL] = {
324 .core_note_type = NT_PRSTATUS,
325 .n = ELF_NGREG,
326 .size = sizeof(long),
327 .align = sizeof(long),
328 .get = genregs_get,
329 .set = genregs_set,
332 #ifdef CONFIG_SH_FPU
333 [REGSET_FPU] = {
334 .core_note_type = NT_PRFPREG,
335 .n = sizeof(struct user_fpu_struct) / sizeof(long),
336 .size = sizeof(long),
337 .align = sizeof(long),
338 .get = fpregs_get,
339 .set = fpregs_set,
340 .active = fpregs_active,
342 #endif
344 #ifdef CONFIG_SH_DSP
345 [REGSET_DSP] = {
346 .n = sizeof(struct pt_dspregs) / sizeof(long),
347 .size = sizeof(long),
348 .align = sizeof(long),
349 .get = dspregs_get,
350 .set = dspregs_set,
351 .active = dspregs_active,
353 #endif
356 static const struct user_regset_view user_sh_native_view = {
357 .name = "sh",
358 .e_machine = EM_SH,
359 .regsets = sh_regsets,
360 .n = ARRAY_SIZE(sh_regsets),
363 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
365 return &user_sh_native_view;
368 long arch_ptrace(struct task_struct *child, long request,
369 unsigned long addr, unsigned long data)
371 unsigned long __user *datap = (unsigned long __user *)data;
372 int ret;
374 switch (request) {
375 /* read the word at location addr in the USER area. */
376 case PTRACE_PEEKUSR: {
377 unsigned long tmp;
379 ret = -EIO;
380 if ((addr & 3) || addr < 0 ||
381 addr > sizeof(struct user) - 3)
382 break;
384 if (addr < sizeof(struct pt_regs))
385 tmp = get_stack_long(child, addr);
386 else if (addr >= offsetof(struct user, fpu) &&
387 addr < offsetof(struct user, u_fpvalid)) {
388 if (!tsk_used_math(child)) {
389 if (addr == offsetof(struct user, fpu.fpscr))
390 tmp = FPSCR_INIT;
391 else
392 tmp = 0;
393 } else {
394 unsigned long index;
395 ret = init_fpu(child);
396 if (ret)
397 break;
398 index = addr - offsetof(struct user, fpu);
399 tmp = ((unsigned long *)child->thread.xstate)
400 [index >> 2];
402 } else if (addr == offsetof(struct user, u_fpvalid))
403 tmp = !!tsk_used_math(child);
404 else if (addr == PT_TEXT_ADDR)
405 tmp = child->mm->start_code;
406 else if (addr == PT_DATA_ADDR)
407 tmp = child->mm->start_data;
408 else if (addr == PT_TEXT_END_ADDR)
409 tmp = child->mm->end_code;
410 else if (addr == PT_TEXT_LEN)
411 tmp = child->mm->end_code - child->mm->start_code;
412 else
413 tmp = 0;
414 ret = put_user(tmp, datap);
415 break;
418 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
419 ret = -EIO;
420 if ((addr & 3) || addr < 0 ||
421 addr > sizeof(struct user) - 3)
422 break;
424 if (addr < sizeof(struct pt_regs))
425 ret = put_stack_long(child, addr, data);
426 else if (addr >= offsetof(struct user, fpu) &&
427 addr < offsetof(struct user, u_fpvalid)) {
428 unsigned long index;
429 ret = init_fpu(child);
430 if (ret)
431 break;
432 index = addr - offsetof(struct user, fpu);
433 set_stopped_child_used_math(child);
434 ((unsigned long *)child->thread.xstate)
435 [index >> 2] = data;
436 ret = 0;
437 } else if (addr == offsetof(struct user, u_fpvalid)) {
438 conditional_stopped_child_used_math(data, child);
439 ret = 0;
441 break;
443 case PTRACE_GETREGS:
444 return copy_regset_to_user(child, &user_sh_native_view,
445 REGSET_GENERAL,
446 0, sizeof(struct pt_regs),
447 datap);
448 case PTRACE_SETREGS:
449 return copy_regset_from_user(child, &user_sh_native_view,
450 REGSET_GENERAL,
451 0, sizeof(struct pt_regs),
452 datap);
453 #ifdef CONFIG_SH_FPU
454 case PTRACE_GETFPREGS:
455 return copy_regset_to_user(child, &user_sh_native_view,
456 REGSET_FPU,
457 0, sizeof(struct user_fpu_struct),
458 datap);
459 case PTRACE_SETFPREGS:
460 return copy_regset_from_user(child, &user_sh_native_view,
461 REGSET_FPU,
462 0, sizeof(struct user_fpu_struct),
463 datap);
464 #endif
465 #ifdef CONFIG_SH_DSP
466 case PTRACE_GETDSPREGS:
467 return copy_regset_to_user(child, &user_sh_native_view,
468 REGSET_DSP,
469 0, sizeof(struct pt_dspregs),
470 datap);
471 case PTRACE_SETDSPREGS:
472 return copy_regset_from_user(child, &user_sh_native_view,
473 REGSET_DSP,
474 0, sizeof(struct pt_dspregs),
475 datap);
476 #endif
477 default:
478 ret = ptrace_request(child, request, addr, data);
479 break;
482 return ret;
485 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
487 long ret = 0;
489 secure_computing_strict(regs->regs[0]);
491 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
492 tracehook_report_syscall_entry(regs))
494 * Tracing decided this syscall should not happen.
495 * We'll return a bogus call number to get an ENOSYS
496 * error, but leave the original number in regs->regs[0].
498 ret = -1L;
500 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
501 trace_sys_enter(regs, regs->regs[0]);
503 audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5],
504 regs->regs[6], regs->regs[7]);
506 return ret ?: regs->regs[0];
509 asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
511 int step;
513 audit_syscall_exit(regs);
515 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
516 trace_sys_exit(regs, regs->regs[0]);
518 step = test_thread_flag(TIF_SINGLESTEP);
519 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
520 tracehook_report_syscall_exit(regs, step);