1 #ifndef _ASM_X86_PTRACE_H
2 #define _ASM_X86_PTRACE_H
4 #include <asm/segment.h>
5 #include <asm/page_types.h>
6 #include <uapi/asm/ptrace.h>
23 unsigned long orig_ax
;
35 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
36 * unless syscall needs a complete, fully filled "struct pt_regs".
44 /* These regs are callee-clobbered. Always saved on kernel entry. */
55 * On syscall entry, this is syscall#. On CPU exception, this is error code.
56 * On hw interrupt, it's IRQ number:
58 unsigned long orig_ax
;
59 /* Return frame for iretq */
65 /* top of stack page */
68 #endif /* !__i386__ */
70 #ifdef CONFIG_PARAVIRT
71 #include <asm/paravirt_types.h>
77 extern unsigned long profile_pc(struct pt_regs
*regs
);
78 #define profile_pc profile_pc
81 convert_ip_to_linear(struct task_struct
*child
, struct pt_regs
*regs
);
82 extern void send_sigtrap(struct task_struct
*tsk
, struct pt_regs
*regs
,
83 int error_code
, int si_code
);
86 extern unsigned long syscall_trace_enter_phase1(struct pt_regs
*, u32 arch
);
87 extern long syscall_trace_enter_phase2(struct pt_regs
*, u32 arch
,
88 unsigned long phase1_result
);
90 extern long syscall_trace_enter(struct pt_regs
*);
92 static inline unsigned long regs_return_value(struct pt_regs
*regs
)
98 * user_mode(regs) determines whether a register set came from user
99 * mode. On x86_32, this is true if V8086 mode was enabled OR if the
100 * register set was from protected mode with RPL-3 CS value. This
101 * tricky test checks that with one comparison.
103 * On x86_64, vm86 mode is mercifully nonexistent, and we don't need
106 static inline int user_mode(struct pt_regs
*regs
)
109 return ((regs
->cs
& SEGMENT_RPL_MASK
) | (regs
->flags
& X86_VM_MASK
)) >= USER_RPL
;
111 return !!(regs
->cs
& 3);
115 static inline int v8086_mode(struct pt_regs
*regs
)
118 return (regs
->flags
& X86_VM_MASK
);
120 return 0; /* No V86 mode support in long mode */
125 static inline bool user_64bit_mode(struct pt_regs
*regs
)
127 #ifndef CONFIG_PARAVIRT
129 * On non-paravirt systems, this is the only long mode CPL 3
130 * selector. We do not allow long mode selectors in the LDT.
132 return regs
->cs
== __USER_CS
;
134 /* Headers are too twisted for this to go in paravirt.h. */
135 return regs
->cs
== __USER_CS
|| regs
->cs
== pv_info
.extra_user_64bit_cs
;
139 #define current_user_stack_pointer() current_pt_regs()->sp
140 #define compat_user_stack_pointer() current_pt_regs()->sp
144 extern unsigned long kernel_stack_pointer(struct pt_regs
*regs
);
146 static inline unsigned long kernel_stack_pointer(struct pt_regs
*regs
)
152 #define GET_IP(regs) ((regs)->ip)
153 #define GET_FP(regs) ((regs)->bp)
154 #define GET_USP(regs) ((regs)->sp)
156 #include <asm-generic/ptrace.h>
158 /* Query offset/name of register from its name/offset */
159 extern int regs_query_register_offset(const char *name
);
160 extern const char *regs_query_register_name(unsigned int offset
);
161 #define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
164 * regs_get_register() - get register value from its offset
165 * @regs: pt_regs from which register value is gotten.
166 * @offset: offset number of the register.
168 * regs_get_register returns the value of a register. The @offset is the
169 * offset of the register in struct pt_regs address which specified by @regs.
170 * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
172 static inline unsigned long regs_get_register(struct pt_regs
*regs
,
175 if (unlikely(offset
> MAX_REG_OFFSET
))
179 * Traps from the kernel do not save sp and ss.
180 * Use the helper function to retrieve sp.
182 if (offset
== offsetof(struct pt_regs
, sp
) &&
183 regs
->cs
== __KERNEL_CS
)
184 return kernel_stack_pointer(regs
);
186 return *(unsigned long *)((unsigned long)regs
+ offset
);
190 * regs_within_kernel_stack() - check the address in the stack
191 * @regs: pt_regs which contains kernel stack pointer.
192 * @addr: address which is checked.
194 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
195 * If @addr is within the kernel stack, it returns true. If not, returns false.
197 static inline int regs_within_kernel_stack(struct pt_regs
*regs
,
200 return ((addr
& ~(THREAD_SIZE
- 1)) ==
201 (kernel_stack_pointer(regs
) & ~(THREAD_SIZE
- 1)));
205 * regs_get_kernel_stack_nth() - get Nth entry of the stack
206 * @regs: pt_regs which contains kernel stack pointer.
207 * @n: stack entry number.
209 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
210 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
213 static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs
*regs
,
216 unsigned long *addr
= (unsigned long *)kernel_stack_pointer(regs
);
218 if (regs_within_kernel_stack(regs
, (unsigned long)addr
))
224 #define arch_has_single_step() (1)
225 #ifdef CONFIG_X86_DEBUGCTLMSR
226 #define arch_has_block_step() (1)
228 #define arch_has_block_step() (boot_cpu_data.x86 >= 6)
231 #define ARCH_HAS_USER_SINGLE_STEP_INFO
234 * When hitting ptrace_stop(), we cannot return using SYSRET because
235 * that does not restore the full CPU state, only a minimal set. The
236 * ptracer can change arbitrary register values, which is usually okay
237 * because the usual ptrace stops run off the signal delivery path which
238 * forces IRET; however, ptrace_event() stops happen in arbitrary places
239 * in the kernel and don't force IRET path.
241 * So force IRET path after a ptrace stop.
243 #define arch_ptrace_stop_needed(code, info) \
250 extern int do_get_thread_area(struct task_struct
*p
, int idx
,
251 struct user_desc __user
*info
);
252 extern int do_set_thread_area(struct task_struct
*p
, int idx
,
253 struct user_desc __user
*info
, int can_allocate
);
255 #endif /* !__ASSEMBLY__ */
256 #endif /* _ASM_X86_PTRACE_H */