1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PTRACE_H
3 #define _LINUX_PTRACE_H
5 #include <linux/compiler.h> /* For unlikely. */
6 #include <linux/sched.h> /* For struct task_struct. */
7 #include <linux/sched/signal.h> /* For send_sig(), same_thread_group(), etc. */
8 #include <linux/err.h> /* for IS_ERR_VALUE */
9 #include <linux/bug.h> /* For BUG_ON. */
10 #include <linux/pid_namespace.h> /* For task_active_pid_ns. */
11 #include <uapi/linux/ptrace.h>
13 extern int ptrace_access_vm(struct task_struct
*tsk
, unsigned long addr
,
14 void *buf
, int len
, unsigned int gup_flags
);
19 * The owner ship rules for task->ptrace which holds the ptrace
20 * flags is simple. When a task is running it owns it's task->ptrace
21 * flags. When the a task is stopped the ptracer owns task->ptrace.
24 #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
25 #define PT_PTRACED 0x00000001
26 #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
28 #define PT_OPT_FLAG_SHIFT 3
29 /* PT_TRACE_* event enable flags */
30 #define PT_EVENT_FLAG(event) (1 << (PT_OPT_FLAG_SHIFT + (event)))
31 #define PT_TRACESYSGOOD PT_EVENT_FLAG(0)
32 #define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK)
33 #define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
34 #define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
35 #define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
36 #define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
37 #define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
38 #define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
40 #define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
41 #define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
43 /* single stepping state bits (used on ARM and PA-RISC) */
44 #define PT_SINGLESTEP_BIT 31
45 #define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
46 #define PT_BLOCKSTEP_BIT 30
47 #define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
49 extern long arch_ptrace(struct task_struct
*child
, long request
,
50 unsigned long addr
, unsigned long data
);
51 extern int ptrace_readdata(struct task_struct
*tsk
, unsigned long src
, char __user
*dst
, int len
);
52 extern int ptrace_writedata(struct task_struct
*tsk
, char __user
*src
, unsigned long dst
, int len
);
53 extern void ptrace_disable(struct task_struct
*);
54 extern int ptrace_request(struct task_struct
*child
, long request
,
55 unsigned long addr
, unsigned long data
);
56 extern void ptrace_notify(int exit_code
);
57 extern void __ptrace_link(struct task_struct
*child
,
58 struct task_struct
*new_parent
,
59 const struct cred
*ptracer_cred
);
60 extern void __ptrace_unlink(struct task_struct
*child
);
61 extern void exit_ptrace(struct task_struct
*tracer
, struct list_head
*dead
);
62 #define PTRACE_MODE_READ 0x01
63 #define PTRACE_MODE_ATTACH 0x02
64 #define PTRACE_MODE_NOAUDIT 0x04
65 #define PTRACE_MODE_FSCREDS 0x08
66 #define PTRACE_MODE_REALCREDS 0x10
68 /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
69 #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
70 #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
71 #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
72 #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
75 * ptrace_may_access - check whether the caller is permitted to access
78 * @mode: selects type of access and caller credentials
80 * Returns true on success, false on denial.
82 * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
83 * be set in @mode to specify whether the access was requested through
84 * a filesystem syscall (should use effective capabilities and fsuid
85 * of the caller) or through an explicit syscall such as
86 * process_vm_writev or ptrace (and should use the real credentials).
88 extern bool ptrace_may_access(struct task_struct
*task
, unsigned int mode
);
90 static inline int ptrace_reparented(struct task_struct
*child
)
92 return !same_thread_group(child
->real_parent
, child
->parent
);
95 static inline void ptrace_unlink(struct task_struct
*child
)
97 if (unlikely(child
->ptrace
))
98 __ptrace_unlink(child
);
101 int generic_ptrace_peekdata(struct task_struct
*tsk
, unsigned long addr
,
103 int generic_ptrace_pokedata(struct task_struct
*tsk
, unsigned long addr
,
107 * ptrace_parent - return the task that is tracing the given task
108 * @task: task to consider
110 * Returns %NULL if no one is tracing @task, or the &struct task_struct
111 * pointer to its tracer.
113 * Must called under rcu_read_lock(). The pointer returned might be kept
114 * live only by RCU. During exec, this may be called with task_lock() held
115 * on @task, still held from when check_unsafe_exec() was called.
117 static inline struct task_struct
*ptrace_parent(struct task_struct
*task
)
119 if (unlikely(task
->ptrace
))
120 return rcu_dereference(task
->parent
);
125 * ptrace_event_enabled - test whether a ptrace event is enabled
126 * @task: ptracee of interest
127 * @event: %PTRACE_EVENT_* to test
129 * Test whether @event is enabled for ptracee @task.
131 * Returns %true if @event is enabled, %false otherwise.
133 static inline bool ptrace_event_enabled(struct task_struct
*task
, int event
)
135 return task
->ptrace
& PT_EVENT_FLAG(event
);
139 * ptrace_event - possibly stop for a ptrace event notification
140 * @event: %PTRACE_EVENT_* value to report
141 * @message: value for %PTRACE_GETEVENTMSG to return
143 * Check whether @event is enabled and, if so, report @event and @message
144 * to the ptrace parent.
146 * Called without locks.
148 static inline void ptrace_event(int event
, unsigned long message
)
150 if (unlikely(ptrace_event_enabled(current
, event
))) {
151 current
->ptrace_message
= message
;
152 ptrace_notify((event
<< 8) | SIGTRAP
);
153 } else if (event
== PTRACE_EVENT_EXEC
) {
154 /* legacy EXEC report via SIGTRAP */
155 if ((current
->ptrace
& (PT_PTRACED
|PT_SEIZED
)) == PT_PTRACED
)
156 send_sig(SIGTRAP
, current
, 0);
161 * ptrace_event_pid - possibly stop for a ptrace event notification
162 * @event: %PTRACE_EVENT_* value to report
163 * @pid: process identifier for %PTRACE_GETEVENTMSG to return
165 * Check whether @event is enabled and, if so, report @event and @pid
166 * to the ptrace parent. @pid is reported as the pid_t seen from the
167 * the ptrace parent's pid namespace.
169 * Called without locks.
171 static inline void ptrace_event_pid(int event
, struct pid
*pid
)
174 * FIXME: There's a potential race if a ptracer in a different pid
175 * namespace than parent attaches between computing message below and
176 * when we acquire tasklist_lock in ptrace_stop(). If this happens,
177 * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
179 unsigned long message
= 0;
180 struct pid_namespace
*ns
;
183 ns
= task_active_pid_ns(rcu_dereference(current
->parent
));
185 message
= pid_nr_ns(pid
, ns
);
188 ptrace_event(event
, message
);
192 * ptrace_init_task - initialize ptrace state for a new child
193 * @child: new child task
194 * @ptrace: true if child should be ptrace'd by parent's tracer
196 * This is called immediately after adding @child to its parent's children
197 * list. @ptrace is false in the normal case, and true to ptrace @child.
199 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
201 static inline void ptrace_init_task(struct task_struct
*child
, bool ptrace
)
203 INIT_LIST_HEAD(&child
->ptrace_entry
);
204 INIT_LIST_HEAD(&child
->ptraced
);
207 child
->parent
= child
->real_parent
;
209 if (unlikely(ptrace
) && current
->ptrace
) {
210 child
->ptrace
= current
->ptrace
;
211 __ptrace_link(child
, current
->parent
, current
->ptracer_cred
);
213 if (child
->ptrace
& PT_SEIZED
)
214 task_set_jobctl_pending(child
, JOBCTL_TRAP_STOP
);
216 sigaddset(&child
->pending
.signal
, SIGSTOP
);
218 set_tsk_thread_flag(child
, TIF_SIGPENDING
);
221 child
->ptracer_cred
= NULL
;
225 * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
226 * @task: task in %EXIT_DEAD state
228 * Called with write_lock(&tasklist_lock) held.
230 static inline void ptrace_release_task(struct task_struct
*task
)
232 BUG_ON(!list_empty(&task
->ptraced
));
234 BUG_ON(!list_empty(&task
->ptrace_entry
));
237 #ifndef force_successful_syscall_return
239 * System call handlers that, upon successful completion, need to return a
240 * negative value should call force_successful_syscall_return() right before
241 * returning. On architectures where the syscall convention provides for a
242 * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
243 * others), this macro can be used to ensure that the error flag will not get
244 * set. On architectures which do not support a separate error flag, the macro
245 * is a no-op and the spurious error condition needs to be filtered out by some
246 * other means (e.g., in user-level, by passing an extra argument to the
247 * syscall handler, or something along those lines).
249 #define force_successful_syscall_return() do { } while (0)
252 #ifndef is_syscall_success
254 * On most systems we can tell if a syscall is a success based on if the retval
255 * is an error value. On some systems like ia64 and powerpc they have different
256 * indicators of success/failure and must define their own.
258 #define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
262 * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
264 * These do-nothing inlines are used when the arch does not
265 * implement single-step. The kerneldoc comments are here
266 * to document the interface for all arch definitions.
269 #ifndef arch_has_single_step
271 * arch_has_single_step - does this CPU support user-mode single-step?
273 * If this is defined, then there must be function declarations or
274 * inlines for user_enable_single_step() and user_disable_single_step().
275 * arch_has_single_step() should evaluate to nonzero iff the machine
276 * supports instruction single-step for user mode.
277 * It can be a constant or it can test a CPU feature bit.
279 #define arch_has_single_step() (0)
282 * user_enable_single_step - single-step in user-mode task
283 * @task: either current or a task stopped in %TASK_TRACED
285 * This can only be called when arch_has_single_step() has returned nonzero.
286 * Set @task so that when it returns to user mode, it will trap after the
287 * next single instruction executes. If arch_has_block_step() is defined,
288 * this must clear the effects of user_enable_block_step() too.
290 static inline void user_enable_single_step(struct task_struct
*task
)
292 BUG(); /* This can never be called. */
296 * user_disable_single_step - cancel user-mode single-step
297 * @task: either current or a task stopped in %TASK_TRACED
299 * Clear @task of the effects of user_enable_single_step() and
300 * user_enable_block_step(). This can be called whether or not either
301 * of those was ever called on @task, and even if arch_has_single_step()
304 static inline void user_disable_single_step(struct task_struct
*task
)
308 extern void user_enable_single_step(struct task_struct
*);
309 extern void user_disable_single_step(struct task_struct
*);
310 #endif /* arch_has_single_step */
312 #ifndef arch_has_block_step
314 * arch_has_block_step - does this CPU support user-mode block-step?
316 * If this is defined, then there must be a function declaration or inline
317 * for user_enable_block_step(), and arch_has_single_step() must be defined
318 * too. arch_has_block_step() should evaluate to nonzero iff the machine
319 * supports step-until-branch for user mode. It can be a constant or it
320 * can test a CPU feature bit.
322 #define arch_has_block_step() (0)
325 * user_enable_block_step - step until branch in user-mode task
326 * @task: either current or a task stopped in %TASK_TRACED
328 * This can only be called when arch_has_block_step() has returned nonzero,
329 * and will never be called when single-instruction stepping is being used.
330 * Set @task so that when it returns to user mode, it will trap after the
331 * next branch or trap taken.
333 static inline void user_enable_block_step(struct task_struct
*task
)
335 BUG(); /* This can never be called. */
338 extern void user_enable_block_step(struct task_struct
*);
339 #endif /* arch_has_block_step */
341 #ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
342 extern void user_single_step_siginfo(struct task_struct
*tsk
,
343 struct pt_regs
*regs
, siginfo_t
*info
);
345 static inline void user_single_step_siginfo(struct task_struct
*tsk
,
346 struct pt_regs
*regs
, siginfo_t
*info
)
348 memset(info
, 0, sizeof(*info
));
349 info
->si_signo
= SIGTRAP
;
353 #ifndef arch_ptrace_stop_needed
355 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
356 * @code: current->exit_code value ptrace will stop with
357 * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
359 * This is called with the siglock held, to decide whether or not it's
360 * necessary to release the siglock and call arch_ptrace_stop() with the
361 * same @code and @info arguments. It can be defined to a constant if
362 * arch_ptrace_stop() is never required, or always is. On machines where
363 * this makes sense, it should be defined to a quick test to optimize out
364 * calling arch_ptrace_stop() when it would be superfluous. For example,
365 * if the thread has not been back to user mode since the last stop, the
366 * thread state might indicate that nothing needs to be done.
368 * This is guaranteed to be invoked once before a task stops for ptrace and
369 * may include arch-specific operations necessary prior to a ptrace stop.
371 #define arch_ptrace_stop_needed(code, info) (0)
374 #ifndef arch_ptrace_stop
376 * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
377 * @code: current->exit_code value ptrace will stop with
378 * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
380 * This is called with no locks held when arch_ptrace_stop_needed() has
381 * just returned nonzero. It is allowed to block, e.g. for user memory
382 * access. The arch can have machine-specific work to be done before
383 * ptrace stops. On ia64, register backing store gets written back to user
384 * memory here. Since this can be costly (requires dropping the siglock),
385 * we only do it when the arch requires it for this particular stop, as
386 * indicated by arch_ptrace_stop_needed().
388 #define arch_ptrace_stop(code, info) do { } while (0)
391 #ifndef current_pt_regs
392 #define current_pt_regs() task_pt_regs(current)
396 * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
397 * on *all* architectures; the only reason to have a per-arch definition
400 #ifndef signal_pt_regs
401 #define signal_pt_regs() task_pt_regs(current)
404 #ifndef current_user_stack_pointer
405 #define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
408 extern int task_current_syscall(struct task_struct
*target
, long *callno
,
409 unsigned long args
[6], unsigned int maxargs
,
410 unsigned long *sp
, unsigned long *pc
);