Linux 4.6-rc6
[cris-mirror.git] / include / linux / ptrace.h
blob504c98a278d46606d27f09d109589e0a1e2263d8
1 #ifndef _LINUX_PTRACE_H
2 #define _LINUX_PTRACE_H
4 #include <linux/compiler.h> /* For unlikely. */
5 #include <linux/sched.h> /* For struct task_struct. */
6 #include <linux/err.h> /* for IS_ERR_VALUE */
7 #include <linux/bug.h> /* For BUG_ON. */
8 #include <linux/pid_namespace.h> /* For task_active_pid_ns. */
9 #include <uapi/linux/ptrace.h>
12 * Ptrace flags
14 * The owner ship rules for task->ptrace which holds the ptrace
15 * flags is simple. When a task is running it owns it's task->ptrace
16 * flags. When the a task is stopped the ptracer owns task->ptrace.
19 #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
20 #define PT_PTRACED 0x00000001
21 #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
22 #define PT_PTRACE_CAP 0x00000004 /* ptracer can follow suid-exec */
24 #define PT_OPT_FLAG_SHIFT 3
25 /* PT_TRACE_* event enable flags */
26 #define PT_EVENT_FLAG(event) (1 << (PT_OPT_FLAG_SHIFT + (event)))
27 #define PT_TRACESYSGOOD PT_EVENT_FLAG(0)
28 #define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK)
29 #define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
30 #define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
31 #define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
32 #define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
33 #define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
34 #define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
36 #define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
37 #define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
39 /* single stepping state bits (used on ARM and PA-RISC) */
40 #define PT_SINGLESTEP_BIT 31
41 #define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
42 #define PT_BLOCKSTEP_BIT 30
43 #define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
45 extern long arch_ptrace(struct task_struct *child, long request,
46 unsigned long addr, unsigned long data);
47 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
48 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
49 extern void ptrace_disable(struct task_struct *);
50 extern int ptrace_request(struct task_struct *child, long request,
51 unsigned long addr, unsigned long data);
52 extern void ptrace_notify(int exit_code);
53 extern void __ptrace_link(struct task_struct *child,
54 struct task_struct *new_parent);
55 extern void __ptrace_unlink(struct task_struct *child);
56 extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
57 #define PTRACE_MODE_READ 0x01
58 #define PTRACE_MODE_ATTACH 0x02
59 #define PTRACE_MODE_NOAUDIT 0x04
60 #define PTRACE_MODE_FSCREDS 0x08
61 #define PTRACE_MODE_REALCREDS 0x10
63 /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
64 #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
65 #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
66 #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
67 #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
69 /**
70 * ptrace_may_access - check whether the caller is permitted to access
71 * a target task.
72 * @task: target task
73 * @mode: selects type of access and caller credentials
75 * Returns true on success, false on denial.
77 * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
78 * be set in @mode to specify whether the access was requested through
79 * a filesystem syscall (should use effective capabilities and fsuid
80 * of the caller) or through an explicit syscall such as
81 * process_vm_writev or ptrace (and should use the real credentials).
83 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
85 static inline int ptrace_reparented(struct task_struct *child)
87 return !same_thread_group(child->real_parent, child->parent);
90 static inline void ptrace_unlink(struct task_struct *child)
92 if (unlikely(child->ptrace))
93 __ptrace_unlink(child);
96 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
97 unsigned long data);
98 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
99 unsigned long data);
102 * ptrace_parent - return the task that is tracing the given task
103 * @task: task to consider
105 * Returns %NULL if no one is tracing @task, or the &struct task_struct
106 * pointer to its tracer.
108 * Must called under rcu_read_lock(). The pointer returned might be kept
109 * live only by RCU. During exec, this may be called with task_lock() held
110 * on @task, still held from when check_unsafe_exec() was called.
112 static inline struct task_struct *ptrace_parent(struct task_struct *task)
114 if (unlikely(task->ptrace))
115 return rcu_dereference(task->parent);
116 return NULL;
120 * ptrace_event_enabled - test whether a ptrace event is enabled
121 * @task: ptracee of interest
122 * @event: %PTRACE_EVENT_* to test
124 * Test whether @event is enabled for ptracee @task.
126 * Returns %true if @event is enabled, %false otherwise.
128 static inline bool ptrace_event_enabled(struct task_struct *task, int event)
130 return task->ptrace & PT_EVENT_FLAG(event);
134 * ptrace_event - possibly stop for a ptrace event notification
135 * @event: %PTRACE_EVENT_* value to report
136 * @message: value for %PTRACE_GETEVENTMSG to return
138 * Check whether @event is enabled and, if so, report @event and @message
139 * to the ptrace parent.
141 * Called without locks.
143 static inline void ptrace_event(int event, unsigned long message)
145 if (unlikely(ptrace_event_enabled(current, event))) {
146 current->ptrace_message = message;
147 ptrace_notify((event << 8) | SIGTRAP);
148 } else if (event == PTRACE_EVENT_EXEC) {
149 /* legacy EXEC report via SIGTRAP */
150 if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
151 send_sig(SIGTRAP, current, 0);
156 * ptrace_event_pid - possibly stop for a ptrace event notification
157 * @event: %PTRACE_EVENT_* value to report
158 * @pid: process identifier for %PTRACE_GETEVENTMSG to return
160 * Check whether @event is enabled and, if so, report @event and @pid
161 * to the ptrace parent. @pid is reported as the pid_t seen from the
162 * the ptrace parent's pid namespace.
164 * Called without locks.
166 static inline void ptrace_event_pid(int event, struct pid *pid)
169 * FIXME: There's a potential race if a ptracer in a different pid
170 * namespace than parent attaches between computing message below and
171 * when we acquire tasklist_lock in ptrace_stop(). If this happens,
172 * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
174 unsigned long message = 0;
175 struct pid_namespace *ns;
177 rcu_read_lock();
178 ns = task_active_pid_ns(rcu_dereference(current->parent));
179 if (ns)
180 message = pid_nr_ns(pid, ns);
181 rcu_read_unlock();
183 ptrace_event(event, message);
187 * ptrace_init_task - initialize ptrace state for a new child
188 * @child: new child task
189 * @ptrace: true if child should be ptrace'd by parent's tracer
191 * This is called immediately after adding @child to its parent's children
192 * list. @ptrace is false in the normal case, and true to ptrace @child.
194 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
196 static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
198 INIT_LIST_HEAD(&child->ptrace_entry);
199 INIT_LIST_HEAD(&child->ptraced);
200 child->jobctl = 0;
201 child->ptrace = 0;
202 child->parent = child->real_parent;
204 if (unlikely(ptrace) && current->ptrace) {
205 child->ptrace = current->ptrace;
206 __ptrace_link(child, current->parent);
208 if (child->ptrace & PT_SEIZED)
209 task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
210 else
211 sigaddset(&child->pending.signal, SIGSTOP);
213 set_tsk_thread_flag(child, TIF_SIGPENDING);
218 * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
219 * @task: task in %EXIT_DEAD state
221 * Called with write_lock(&tasklist_lock) held.
223 static inline void ptrace_release_task(struct task_struct *task)
225 BUG_ON(!list_empty(&task->ptraced));
226 ptrace_unlink(task);
227 BUG_ON(!list_empty(&task->ptrace_entry));
230 #ifndef force_successful_syscall_return
232 * System call handlers that, upon successful completion, need to return a
233 * negative value should call force_successful_syscall_return() right before
234 * returning. On architectures where the syscall convention provides for a
235 * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
236 * others), this macro can be used to ensure that the error flag will not get
237 * set. On architectures which do not support a separate error flag, the macro
238 * is a no-op and the spurious error condition needs to be filtered out by some
239 * other means (e.g., in user-level, by passing an extra argument to the
240 * syscall handler, or something along those lines).
242 #define force_successful_syscall_return() do { } while (0)
243 #endif
245 #ifndef is_syscall_success
247 * On most systems we can tell if a syscall is a success based on if the retval
248 * is an error value. On some systems like ia64 and powerpc they have different
249 * indicators of success/failure and must define their own.
251 #define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
252 #endif
255 * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
257 * These do-nothing inlines are used when the arch does not
258 * implement single-step. The kerneldoc comments are here
259 * to document the interface for all arch definitions.
262 #ifndef arch_has_single_step
264 * arch_has_single_step - does this CPU support user-mode single-step?
266 * If this is defined, then there must be function declarations or
267 * inlines for user_enable_single_step() and user_disable_single_step().
268 * arch_has_single_step() should evaluate to nonzero iff the machine
269 * supports instruction single-step for user mode.
270 * It can be a constant or it can test a CPU feature bit.
272 #define arch_has_single_step() (0)
275 * user_enable_single_step - single-step in user-mode task
276 * @task: either current or a task stopped in %TASK_TRACED
278 * This can only be called when arch_has_single_step() has returned nonzero.
279 * Set @task so that when it returns to user mode, it will trap after the
280 * next single instruction executes. If arch_has_block_step() is defined,
281 * this must clear the effects of user_enable_block_step() too.
283 static inline void user_enable_single_step(struct task_struct *task)
285 BUG(); /* This can never be called. */
289 * user_disable_single_step - cancel user-mode single-step
290 * @task: either current or a task stopped in %TASK_TRACED
292 * Clear @task of the effects of user_enable_single_step() and
293 * user_enable_block_step(). This can be called whether or not either
294 * of those was ever called on @task, and even if arch_has_single_step()
295 * returned zero.
297 static inline void user_disable_single_step(struct task_struct *task)
300 #else
301 extern void user_enable_single_step(struct task_struct *);
302 extern void user_disable_single_step(struct task_struct *);
303 #endif /* arch_has_single_step */
305 #ifndef arch_has_block_step
307 * arch_has_block_step - does this CPU support user-mode block-step?
309 * If this is defined, then there must be a function declaration or inline
310 * for user_enable_block_step(), and arch_has_single_step() must be defined
311 * too. arch_has_block_step() should evaluate to nonzero iff the machine
312 * supports step-until-branch for user mode. It can be a constant or it
313 * can test a CPU feature bit.
315 #define arch_has_block_step() (0)
318 * user_enable_block_step - step until branch in user-mode task
319 * @task: either current or a task stopped in %TASK_TRACED
321 * This can only be called when arch_has_block_step() has returned nonzero,
322 * and will never be called when single-instruction stepping is being used.
323 * Set @task so that when it returns to user mode, it will trap after the
324 * next branch or trap taken.
326 static inline void user_enable_block_step(struct task_struct *task)
328 BUG(); /* This can never be called. */
330 #else
331 extern void user_enable_block_step(struct task_struct *);
332 #endif /* arch_has_block_step */
334 #ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
335 extern void user_single_step_siginfo(struct task_struct *tsk,
336 struct pt_regs *regs, siginfo_t *info);
337 #else
338 static inline void user_single_step_siginfo(struct task_struct *tsk,
339 struct pt_regs *regs, siginfo_t *info)
341 memset(info, 0, sizeof(*info));
342 info->si_signo = SIGTRAP;
344 #endif
346 #ifndef arch_ptrace_stop_needed
348 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
349 * @code: current->exit_code value ptrace will stop with
350 * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
352 * This is called with the siglock held, to decide whether or not it's
353 * necessary to release the siglock and call arch_ptrace_stop() with the
354 * same @code and @info arguments. It can be defined to a constant if
355 * arch_ptrace_stop() is never required, or always is. On machines where
356 * this makes sense, it should be defined to a quick test to optimize out
357 * calling arch_ptrace_stop() when it would be superfluous. For example,
358 * if the thread has not been back to user mode since the last stop, the
359 * thread state might indicate that nothing needs to be done.
361 * This is guaranteed to be invoked once before a task stops for ptrace and
362 * may include arch-specific operations necessary prior to a ptrace stop.
364 #define arch_ptrace_stop_needed(code, info) (0)
365 #endif
367 #ifndef arch_ptrace_stop
369 * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
370 * @code: current->exit_code value ptrace will stop with
371 * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
373 * This is called with no locks held when arch_ptrace_stop_needed() has
374 * just returned nonzero. It is allowed to block, e.g. for user memory
375 * access. The arch can have machine-specific work to be done before
376 * ptrace stops. On ia64, register backing store gets written back to user
377 * memory here. Since this can be costly (requires dropping the siglock),
378 * we only do it when the arch requires it for this particular stop, as
379 * indicated by arch_ptrace_stop_needed().
381 #define arch_ptrace_stop(code, info) do { } while (0)
382 #endif
384 #ifndef current_pt_regs
385 #define current_pt_regs() task_pt_regs(current)
386 #endif
388 #ifndef ptrace_signal_deliver
389 #define ptrace_signal_deliver() ((void)0)
390 #endif
393 * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
394 * on *all* architectures; the only reason to have a per-arch definition
395 * is optimisation.
397 #ifndef signal_pt_regs
398 #define signal_pt_regs() task_pt_regs(current)
399 #endif
401 #ifndef current_user_stack_pointer
402 #define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
403 #endif
405 extern int task_current_syscall(struct task_struct *target, long *callno,
406 unsigned long args[6], unsigned int maxargs,
407 unsigned long *sp, unsigned long *pc);
409 #endif