1 #ifndef _ASM_X86_PTRACE_H
2 #define _ASM_X86_PTRACE_H
4 #include <linux/compiler.h> /* For __user */
5 #include <asm/ptrace-abi.h>
6 #include <asm/processor-flags.h>
9 #include <asm/segment.h>
15 /* this struct defines the way the registers are stored on the
16 stack during a system call. */
40 #else /* __KERNEL__ */
54 unsigned long orig_ax
;
62 #endif /* __KERNEL__ */
75 /* arguments: non interrupts/non tracing syscalls only save upto here*/
85 unsigned long orig_rax
;
86 /* end of arguments */
87 /* cpu exception frame or undefined */
93 /* top of stack page */
96 #else /* __KERNEL__ */
105 /* arguments: non interrupts/non tracing syscalls only save upto here*/
115 unsigned long orig_ax
;
116 /* end of arguments */
117 /* cpu exception frame or undefined */
123 /* top of stack page */
126 #endif /* __KERNEL__ */
127 #endif /* !__i386__ */
132 #include <linux/init.h>
137 extern unsigned long profile_pc(struct pt_regs
*regs
);
140 convert_ip_to_linear(struct task_struct
*child
, struct pt_regs
*regs
);
141 extern void send_sigtrap(struct task_struct
*tsk
, struct pt_regs
*regs
,
142 int error_code
, int si_code
);
143 void signal_fault(struct pt_regs
*regs
, void __user
*frame
, char *where
);
145 extern long syscall_trace_enter(struct pt_regs
*);
146 extern void syscall_trace_leave(struct pt_regs
*);
148 static inline unsigned long regs_return_value(struct pt_regs
*regs
)
154 * user_mode_vm(regs) determines whether a register set came from user mode.
155 * This is true if V8086 mode was enabled OR if the register set was from
156 * protected mode with RPL-3 CS value. This tricky test checks that with
157 * one comparison. Many places in the kernel can bypass this full check
158 * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
160 static inline int user_mode(struct pt_regs
*regs
)
163 return (regs
->cs
& SEGMENT_RPL_MASK
) == USER_RPL
;
165 return !!(regs
->cs
& 3);
169 static inline int user_mode_vm(struct pt_regs
*regs
)
172 return ((regs
->cs
& SEGMENT_RPL_MASK
) | (regs
->flags
& X86_VM_MASK
)) >=
175 return user_mode(regs
);
179 static inline int v8086_mode(struct pt_regs
*regs
)
182 return (regs
->flags
& X86_VM_MASK
);
184 return 0; /* No V86 mode support in long mode */
189 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
190 * when it traps. The previous stack will be directly underneath the saved
191 * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'.
193 * This is valid only for kernel mode traps.
195 static inline unsigned long kernel_stack_pointer(struct pt_regs
*regs
)
198 return (unsigned long)(®s
->sp
);
204 static inline unsigned long instruction_pointer(struct pt_regs
*regs
)
209 static inline unsigned long frame_pointer(struct pt_regs
*regs
)
214 static inline unsigned long user_stack_pointer(struct pt_regs
*regs
)
220 * These are defined as per linux/ptrace.h, which see.
222 #define arch_has_single_step() (1)
223 extern void user_enable_single_step(struct task_struct
*);
224 extern void user_disable_single_step(struct task_struct
*);
226 extern void user_enable_block_step(struct task_struct
*);
227 #ifdef CONFIG_X86_DEBUGCTLMSR
228 #define arch_has_block_step() (1)
230 #define arch_has_block_step() (boot_cpu_data.x86 >= 6)
234 extern int do_get_thread_area(struct task_struct
*p
, int idx
,
235 struct user_desc __user
*info
);
236 extern int do_set_thread_area(struct task_struct
*p
, int idx
,
237 struct user_desc __user
*info
, int can_allocate
);
239 #ifdef CONFIG_X86_PTRACE_BTS
240 extern void ptrace_bts_untrace(struct task_struct
*tsk
);
242 #define arch_ptrace_untrace(tsk) ptrace_bts_untrace(tsk)
243 #endif /* CONFIG_X86_PTRACE_BTS */
245 #endif /* __KERNEL__ */
247 #endif /* !__ASSEMBLY__ */
249 #endif /* _ASM_X86_PTRACE_H */