treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / hexagon / kernel / ptrace.c
blobdcbf7ea960cc8508fa5d9fe33fae6c64bd56be7f
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Ptrace support for Hexagon
5 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
6 */
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/errno.h>
14 #include <linux/ptrace.h>
15 #include <linux/regset.h>
16 #include <linux/user.h>
17 #include <linux/elf.h>
19 #include <asm/user.h>
21 #if arch_has_single_step()
22 /* Both called from ptrace_resume */
23 void user_enable_single_step(struct task_struct *child)
25 pt_set_singlestep(task_pt_regs(child));
26 set_tsk_thread_flag(child, TIF_SINGLESTEP);
29 void user_disable_single_step(struct task_struct *child)
31 pt_clr_singlestep(task_pt_regs(child));
32 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
34 #endif
36 static int genregs_get(struct task_struct *target,
37 const struct user_regset *regset,
38 unsigned int pos, unsigned int count,
39 void *kbuf, void __user *ubuf)
41 int ret;
42 unsigned int dummy;
43 struct pt_regs *regs = task_pt_regs(target);
46 if (!regs)
47 return -EIO;
49 /* The general idea here is that the copyout must happen in
50 * exactly the same order in which the userspace expects these
51 * regs. Now, the sequence in userspace does not match the
52 * sequence in the kernel, so everything past the 32 gprs
53 * happens one at a time.
55 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
56 &regs->r00, 0, 32*sizeof(unsigned long));
58 #define ONEXT(KPT_REG, USR_REG) \
59 if (!ret) \
60 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, \
61 KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
62 offsetof(struct user_regs_struct, USR_REG) + \
63 sizeof(unsigned long));
65 /* Must be exactly same sequence as struct user_regs_struct */
66 ONEXT(&regs->sa0, sa0);
67 ONEXT(&regs->lc0, lc0);
68 ONEXT(&regs->sa1, sa1);
69 ONEXT(&regs->lc1, lc1);
70 ONEXT(&regs->m0, m0);
71 ONEXT(&regs->m1, m1);
72 ONEXT(&regs->usr, usr);
73 ONEXT(&regs->preds, p3_0);
74 ONEXT(&regs->gp, gp);
75 ONEXT(&regs->ugp, ugp);
76 ONEXT(&pt_elr(regs), pc);
77 dummy = pt_cause(regs);
78 ONEXT(&dummy, cause);
79 ONEXT(&pt_badva(regs), badva);
80 #if CONFIG_HEXAGON_ARCH_VERSION >=4
81 ONEXT(&regs->cs0, cs0);
82 ONEXT(&regs->cs1, cs1);
83 #endif
85 /* Pad the rest with zeros, if needed */
86 if (!ret)
87 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
88 offsetof(struct user_regs_struct, pad1), -1);
89 return ret;
92 static int genregs_set(struct task_struct *target,
93 const struct user_regset *regset,
94 unsigned int pos, unsigned int count,
95 const void *kbuf, const void __user *ubuf)
97 int ret;
98 unsigned long bucket;
99 struct pt_regs *regs = task_pt_regs(target);
101 if (!regs)
102 return -EIO;
104 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
105 &regs->r00, 0, 32*sizeof(unsigned long));
107 #define INEXT(KPT_REG, USR_REG) \
108 if (!ret) \
109 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
110 KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
111 offsetof(struct user_regs_struct, USR_REG) + \
112 sizeof(unsigned long));
114 /* Must be exactly same sequence as struct user_regs_struct */
115 INEXT(&regs->sa0, sa0);
116 INEXT(&regs->lc0, lc0);
117 INEXT(&regs->sa1, sa1);
118 INEXT(&regs->lc1, lc1);
119 INEXT(&regs->m0, m0);
120 INEXT(&regs->m1, m1);
121 INEXT(&regs->usr, usr);
122 INEXT(&regs->preds, p3_0);
123 INEXT(&regs->gp, gp);
124 INEXT(&regs->ugp, ugp);
125 INEXT(&pt_elr(regs), pc);
127 /* CAUSE and BADVA aren't writeable. */
128 INEXT(&bucket, cause);
129 INEXT(&bucket, badva);
131 #if CONFIG_HEXAGON_ARCH_VERSION >=4
132 INEXT(&regs->cs0, cs0);
133 INEXT(&regs->cs1, cs1);
134 #endif
136 /* Ignore the rest, if needed */
137 if (!ret)
138 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
139 offsetof(struct user_regs_struct, pad1), -1);
141 if (ret)
142 return ret;
145 * This is special; SP is actually restored by the VM via the
146 * special event record which is set by the special trap.
148 regs->hvmer.vmpsp = regs->r29;
149 return 0;
152 enum hexagon_regset {
153 REGSET_GENERAL,
156 static const struct user_regset hexagon_regsets[] = {
157 [REGSET_GENERAL] = {
158 .core_note_type = NT_PRSTATUS,
159 .n = ELF_NGREG,
160 .size = sizeof(unsigned long),
161 .align = sizeof(unsigned long),
162 .get = genregs_get,
163 .set = genregs_set,
167 static const struct user_regset_view hexagon_user_view = {
168 .name = "hexagon",
169 .e_machine = ELF_ARCH,
170 .ei_osabi = ELF_OSABI,
171 .regsets = hexagon_regsets,
172 .e_flags = ELF_CORE_EFLAGS,
173 .n = ARRAY_SIZE(hexagon_regsets)
176 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
178 return &hexagon_user_view;
181 void ptrace_disable(struct task_struct *child)
183 /* Boilerplate - resolves to null inline if no HW single-step */
184 user_disable_single_step(child);
187 long arch_ptrace(struct task_struct *child, long request,
188 unsigned long addr, unsigned long data)
190 return ptrace_request(child, request, addr, data);