treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / powerpc / include / asm / ptrace.h
blobee3ada66deb58cb094162ca5a13c9fdb836950c1
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright (C) 2001 PPC64 Team, IBM Corp
5 * This struct defines the way the registers are stored on the
6 * kernel stack during a system call or other kernel entry.
8 * this should only contain volatile regs
9 * since we can keep non-volatile in the thread_struct
10 * should set this up when only volatiles are saved
11 * by intr code.
13 * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
14 * that the overall structure is a multiple of 16 bytes in length.
16 * Note that the offsets of the fields in this struct correspond with
17 * the PT_* values below. This simplifies arch/powerpc/kernel/ptrace.c.
19 #ifndef _ASM_POWERPC_PTRACE_H
20 #define _ASM_POWERPC_PTRACE_H
22 #include <uapi/asm/ptrace.h>
23 #include <asm/asm-const.h>
25 #ifndef __ASSEMBLY__
26 struct pt_regs
28 union {
29 struct user_pt_regs user_regs;
30 struct {
31 unsigned long gpr[32];
32 unsigned long nip;
33 unsigned long msr;
34 unsigned long orig_gpr3;
35 unsigned long ctr;
36 unsigned long link;
37 unsigned long xer;
38 unsigned long ccr;
39 #ifdef CONFIG_PPC64
40 unsigned long softe;
41 #else
42 unsigned long mq;
43 #endif
44 unsigned long trap;
45 unsigned long dar;
46 unsigned long dsisr;
47 unsigned long result;
51 union {
52 struct {
53 #ifdef CONFIG_PPC64
54 unsigned long ppr;
55 #endif
56 #ifdef CONFIG_PPC_KUAP
57 unsigned long kuap;
58 #endif
60 unsigned long __pad[2]; /* Maintain 16 byte interrupt stack alignment */
63 #endif
65 #ifdef __powerpc64__
68 * Size of redzone that userspace is allowed to use below the stack
69 * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in
70 * the new ELFv2 little-endian ABI, so we allow the larger amount.
72 * For kernel code we allow a 288-byte redzone, in order to conserve
73 * kernel stack space; gcc currently only uses 288 bytes, and will
74 * hopefully allow explicit control of the redzone size in future.
76 #define USER_REDZONE_SIZE 512
77 #define KERNEL_REDZONE_SIZE 288
79 #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
80 #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
81 #define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
82 #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
83 STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
84 #define STACK_FRAME_MARKER 12
86 #ifdef PPC64_ELF_ABI_v2
87 #define STACK_FRAME_MIN_SIZE 32
88 #else
89 #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
90 #endif
92 /* Size of dummy stack frame allocated when calling signal handler. */
93 #define __SIGNAL_FRAMESIZE 128
94 #define __SIGNAL_FRAMESIZE32 64
96 #else /* __powerpc64__ */
98 #define USER_REDZONE_SIZE 0
99 #define KERNEL_REDZONE_SIZE 0
100 #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
101 #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
102 #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
103 #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
104 #define STACK_FRAME_MARKER 2
105 #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
107 /* Size of stack frame allocated when calling signal handler. */
108 #define __SIGNAL_FRAMESIZE 64
110 #endif /* __powerpc64__ */
112 #ifndef __ASSEMBLY__
114 static inline unsigned long instruction_pointer(struct pt_regs *regs)
116 return regs->nip;
119 static inline void instruction_pointer_set(struct pt_regs *regs,
120 unsigned long val)
122 regs->nip = val;
125 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
127 return regs->gpr[1];
130 static inline unsigned long frame_pointer(struct pt_regs *regs)
132 return 0;
135 #ifdef CONFIG_SMP
136 extern unsigned long profile_pc(struct pt_regs *regs);
137 #else
138 #define profile_pc(regs) instruction_pointer(regs)
139 #endif
141 #define kernel_stack_pointer(regs) ((regs)->gpr[1])
142 static inline int is_syscall_success(struct pt_regs *regs)
144 return !(regs->ccr & 0x10000000);
147 static inline long regs_return_value(struct pt_regs *regs)
149 if (is_syscall_success(regs))
150 return regs->gpr[3];
151 else
152 return -regs->gpr[3];
155 static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
157 regs->gpr[3] = rc;
160 #ifdef __powerpc64__
161 #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
162 #else
163 #define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
164 #endif
166 #define force_successful_syscall_return() \
167 do { \
168 set_thread_flag(TIF_NOERROR); \
169 } while(0)
171 struct task_struct;
172 extern int ptrace_get_reg(struct task_struct *task, int regno,
173 unsigned long *data);
174 extern int ptrace_put_reg(struct task_struct *task, int regno,
175 unsigned long data);
177 #define current_pt_regs() \
178 ((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1)
180 * We use the least-significant bit of the trap field to indicate
181 * whether we have saved the full set of registers, or only a
182 * partial set. A 1 there means the partial set.
183 * On 4xx we use the next bit to indicate whether the exception
184 * is a critical exception (1 means it is).
186 #define FULL_REGS(regs) (((regs)->trap & 1) == 0)
187 #ifndef __powerpc64__
188 #define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
189 #define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
190 #define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
191 #endif /* ! __powerpc64__ */
192 #define TRAP(regs) ((regs)->trap & ~0xF)
193 #ifdef __powerpc64__
194 #define NV_REG_POISON 0xdeadbeefdeadbeefUL
195 #define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1)
196 #else
197 #define NV_REG_POISON 0xdeadbeef
198 #define CHECK_FULL_REGS(regs) \
199 do { \
200 if ((regs)->trap & 1) \
201 printk(KERN_CRIT "%s: partial register set\n", __func__); \
202 } while (0)
203 #endif /* __powerpc64__ */
205 #define arch_has_single_step() (1)
206 #ifndef CONFIG_BOOK3S_601
207 #define arch_has_block_step() (true)
208 #else
209 #define arch_has_block_step() (false)
210 #endif
211 #define ARCH_HAS_USER_SINGLE_STEP_REPORT
214 * kprobe-based event tracer support
217 #include <linux/stddef.h>
218 #include <linux/thread_info.h>
219 extern int regs_query_register_offset(const char *name);
220 extern const char *regs_query_register_name(unsigned int offset);
221 #define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr))
224 * regs_get_register() - get register value from its offset
225 * @regs: pt_regs from which register value is gotten
226 * @offset: offset number of the register.
228 * regs_get_register returns the value of a register whose offset from @regs.
229 * The @offset is the offset of the register in struct pt_regs.
230 * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
232 static inline unsigned long regs_get_register(struct pt_regs *regs,
233 unsigned int offset)
235 if (unlikely(offset > MAX_REG_OFFSET))
236 return 0;
237 return *(unsigned long *)((unsigned long)regs + offset);
241 * regs_within_kernel_stack() - check the address in the stack
242 * @regs: pt_regs which contains kernel stack pointer.
243 * @addr: address which is checked.
245 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
246 * If @addr is within the kernel stack, it returns true. If not, returns false.
249 static inline bool regs_within_kernel_stack(struct pt_regs *regs,
250 unsigned long addr)
252 return ((addr & ~(THREAD_SIZE - 1)) ==
253 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
257 * regs_get_kernel_stack_nth() - get Nth entry of the stack
258 * @regs: pt_regs which contains kernel stack pointer.
259 * @n: stack entry number.
261 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
262 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
263 * this returns 0.
265 static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
266 unsigned int n)
268 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
269 addr += n;
270 if (regs_within_kernel_stack(regs, (unsigned long)addr))
271 return *addr;
272 else
273 return 0;
276 #endif /* __ASSEMBLY__ */
278 #ifndef __powerpc64__
279 #else /* __powerpc64__ */
280 #define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
281 #define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
282 #define PT_VSCR_32 (PT_VR0 + 32*4 + 3)
283 #define PT_VRSAVE_32 (PT_VR0 + 33*4)
284 #define PT_VSR0_32 300 /* each VSR reg occupies 4 slots in 32-bit */
285 #endif /* __powerpc64__ */
286 #endif /* _ASM_POWERPC_PTRACE_H */