mm-only debug patch...
[mmotm.git] / include / linux / tracehook.h
blobad7b31b0bf3d0a01bf4ed03f61716603f9d7715d
1 /*
2 * Tracing hooks
4 * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
10 * This file defines hook entry points called by core code where
11 * user tracing/debugging support might need to do something. These
12 * entry points are called tracehook_*(). Each hook declared below
13 * has a detailed kerneldoc comment giving the context (locking et
14 * al) from which it is called, and the meaning of its return value.
16 * Each function here typically has only one call site, so it is ok
17 * to have some nontrivial tracehook_*() inlines. In all cases, the
18 * fast path when no tracing is enabled should be very short.
20 * The purpose of this file and the tracehook_* layer is to consolidate
21 * the interface that the kernel core and arch code uses to enable any
22 * user debugging or tracing facility (such as ptrace). The interfaces
23 * here are carefully documented so that maintainers of core and arch
24 * code do not need to think about the implementation details of the
25 * tracing facilities. Likewise, maintainers of the tracing code do not
26 * need to understand all the calling core or arch code in detail, just
27 * documented circumstances of each call, such as locking conditions.
29 * If the calling core code changes so that locking is different, then
30 * it is ok to change the interface documented here. The maintainer of
31 * core code changing should notify the maintainers of the tracing code
32 * that they need to work out the change.
34 * Some tracehook_*() inlines take arguments that the current tracing
35 * implementations might not necessarily use. These function signatures
36 * are chosen to pass in all the information that is on hand in the
37 * caller and might conceivably be relevant to a tracer, so that the
38 * core code won't have to be updated when tracing adds more features.
39 * If a call site changes so that some of those parameters are no longer
40 * already on hand without extra work, then the tracehook_* interface
41 * can change so there is no make-work burden on the core code. The
42 * maintainer of core code changing should notify the maintainers of the
43 * tracing code that they need to work out the change.
46 #ifndef _LINUX_TRACEHOOK_H
47 #define _LINUX_TRACEHOOK_H 1
49 #include <linux/sched.h>
50 #include <linux/ptrace.h>
51 #include <linux/security.h>
52 #include <linux/utrace.h>
53 struct linux_binprm;
55 /**
56 * tracehook_expect_breakpoints - guess if task memory might be touched
57 * @task: current task, making a new mapping
59 * Return nonzero if @task is expected to want breakpoint insertion in
60 * its memory at some point. A zero return is no guarantee it won't
61 * be done, but this is a hint that it's known to be likely.
63 * May be called with @task->mm->mmap_sem held for writing.
65 static inline int tracehook_expect_breakpoints(struct task_struct *task)
67 if (unlikely(task_utrace_flags(task) & UTRACE_EVENT(SIGNAL_CORE)))
68 return 1;
69 return (task_ptrace(task) & PT_PTRACED) != 0;
73 * ptrace report for syscall entry and exit looks identical.
75 static inline void ptrace_report_syscall(struct pt_regs *regs)
77 int ptrace = task_ptrace(current);
79 if (!(ptrace & PT_PTRACED))
80 return;
82 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
85 * this isn't the same as continuing with a signal, but it will do
86 * for normal use. strace only continues with a signal if the
87 * stopping signal is not SIGTRAP. -brl
89 if (current->exit_code) {
90 send_sig(current->exit_code, current, 1);
91 current->exit_code = 0;
95 /**
96 * tracehook_report_syscall_entry - task is about to attempt a system call
97 * @regs: user register state of current task
99 * This will be called if %TIF_SYSCALL_TRACE has been set, when the
100 * current task has just entered the kernel for a system call.
101 * Full user register state is available here. Changing the values
102 * in @regs can affect the system call number and arguments to be tried.
103 * It is safe to block here, preventing the system call from beginning.
105 * Returns zero normally, or nonzero if the calling arch code should abort
106 * the system call. That must prevent normal entry so no system call is
107 * made. If @task ever returns to user mode after this, its register state
108 * is unspecified, but should be something harmless like an %ENOSYS error
109 * return. It should preserve enough information so that syscall_rollback()
110 * can work (see asm-generic/syscall.h).
112 * Called without locks, just after entering kernel mode.
114 static inline __must_check int tracehook_report_syscall_entry(
115 struct pt_regs *regs)
117 if ((task_utrace_flags(current) & UTRACE_EVENT(SYSCALL_ENTRY)) &&
118 utrace_report_syscall_entry(regs))
119 return 1;
120 ptrace_report_syscall(regs);
121 return 0;
125 * tracehook_report_syscall_exit - task has just finished a system call
126 * @regs: user register state of current task
127 * @step: nonzero if simulating single-step or block-step
129 * This will be called if %TIF_SYSCALL_TRACE has been set, when the
130 * current task has just finished an attempted system call. Full
131 * user register state is available here. It is safe to block here,
132 * preventing signals from being processed.
134 * If @step is nonzero, this report is also in lieu of the normal
135 * trap that would follow the system call instruction because
136 * user_enable_block_step() or user_enable_single_step() was used.
137 * In this case, %TIF_SYSCALL_TRACE might not be set.
139 * Called without locks, just before checking for pending signals.
141 static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
143 if (task_utrace_flags(current) & UTRACE_EVENT(SYSCALL_EXIT))
144 utrace_report_syscall_exit(regs);
145 ptrace_report_syscall(regs);
149 * tracehook_unsafe_exec - check for exec declared unsafe due to tracing
150 * @task: current task doing exec
152 * Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
154 * @task->cred_guard_mutex is held by the caller through the do_execve().
156 static inline int tracehook_unsafe_exec(struct task_struct *task)
158 int unsafe = 0;
159 int ptrace = task_ptrace(task);
160 if (ptrace & PT_PTRACED) {
161 if (ptrace & PT_PTRACE_CAP)
162 unsafe |= LSM_UNSAFE_PTRACE_CAP;
163 else
164 unsafe |= LSM_UNSAFE_PTRACE;
166 return unsafe;
170 * tracehook_tracer_task - return the task that is tracing the given task
171 * @tsk: task to consider
173 * Returns NULL if noone is tracing @task, or the &struct task_struct
174 * pointer to its tracer.
176 * Must called under rcu_read_lock(). The pointer returned might be kept
177 * live only by RCU. During exec, this may be called with task_lock()
178 * held on @task, still held from when tracehook_unsafe_exec() was called.
180 static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk)
182 if (task_ptrace(tsk) & PT_PTRACED)
183 return rcu_dereference(tsk->parent);
184 return NULL;
188 * tracehook_report_exec - a successful exec was completed
189 * @fmt: &struct linux_binfmt that performed the exec
190 * @bprm: &struct linux_binprm containing exec details
191 * @regs: user-mode register state
193 * An exec just completed, we are shortly going to return to user mode.
194 * The freshly initialized register state can be seen and changed in @regs.
195 * The name, file and other pointers in @bprm are still on hand to be
196 * inspected, but will be freed as soon as this returns.
198 * Called with no locks, but with some kernel resources held live
199 * and a reference on @fmt->module.
201 static inline void tracehook_report_exec(struct linux_binfmt *fmt,
202 struct linux_binprm *bprm,
203 struct pt_regs *regs)
205 if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(EXEC)))
206 utrace_report_exec(fmt, bprm, regs);
207 if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
208 unlikely(task_ptrace(current) & PT_PTRACED))
209 send_sig(SIGTRAP, current, 0);
213 * tracehook_report_exit - task has begun to exit
214 * @exit_code: pointer to value destined for @current->exit_code
216 * @exit_code points to the value passed to do_exit(), which tracing
217 * might change here. This is almost the first thing in do_exit(),
218 * before freeing any resources or setting the %PF_EXITING flag.
220 * Called with no locks held.
222 static inline void tracehook_report_exit(long *exit_code)
224 if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(EXIT)))
225 utrace_report_exit(exit_code);
226 ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
230 * tracehook_prepare_clone - prepare for new child to be cloned
231 * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
233 * This is called before a new user task is to be cloned.
234 * Its return value will be passed to tracehook_finish_clone().
236 * Called with no locks held.
238 static inline int tracehook_prepare_clone(unsigned clone_flags)
240 if (clone_flags & CLONE_UNTRACED)
241 return 0;
243 if (clone_flags & CLONE_VFORK) {
244 if (current->ptrace & PT_TRACE_VFORK)
245 return PTRACE_EVENT_VFORK;
246 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
247 if (current->ptrace & PT_TRACE_CLONE)
248 return PTRACE_EVENT_CLONE;
249 } else if (current->ptrace & PT_TRACE_FORK)
250 return PTRACE_EVENT_FORK;
252 return 0;
256 * tracehook_finish_clone - new child created and being attached
257 * @child: new child task
258 * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
259 * @trace: return value from tracehook_prepare_clone()
261 * This is called immediately after adding @child to its parent's children list.
262 * The @trace value is that returned by tracehook_prepare_clone().
264 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
266 static inline void tracehook_finish_clone(struct task_struct *child,
267 unsigned long clone_flags, int trace)
269 utrace_init_task(child);
270 ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace);
274 * tracehook_report_clone - in parent, new child is about to start running
275 * @regs: parent's user register state
276 * @clone_flags: flags from parent's system call
277 * @pid: new child's PID in the parent's namespace
278 * @child: new child task
280 * Called after a child is set up, but before it has been started running.
281 * This is not a good place to block, because the child has not started
282 * yet. Suspend the child here if desired, and then block in
283 * tracehook_report_clone_complete(). This must prevent the child from
284 * self-reaping if tracehook_report_clone_complete() uses the @child
285 * pointer; otherwise it might have died and been released by the time
286 * tracehook_report_clone_complete() is called.
288 * Called with no locks held, but the child cannot run until this returns.
290 static inline void tracehook_report_clone(struct pt_regs *regs,
291 unsigned long clone_flags,
292 pid_t pid, struct task_struct *child)
294 if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(CLONE)))
295 utrace_report_clone(clone_flags, child);
296 if (unlikely(task_ptrace(child))) {
298 * It doesn't matter who attached/attaching to this
299 * task, the pending SIGSTOP is right in any case.
301 sigaddset(&child->pending.signal, SIGSTOP);
302 set_tsk_thread_flag(child, TIF_SIGPENDING);
307 * tracehook_report_clone_complete - new child is running
308 * @trace: return value from tracehook_prepare_clone()
309 * @regs: parent's user register state
310 * @clone_flags: flags from parent's system call
311 * @pid: new child's PID in the parent's namespace
312 * @child: child task, already running
314 * This is called just after the child has started running. This is
315 * just before the clone/fork syscall returns, or blocks for vfork
316 * child completion if @clone_flags has the %CLONE_VFORK bit set.
317 * The @child pointer may be invalid if a self-reaping child died and
318 * tracehook_report_clone() took no action to prevent it from self-reaping.
320 * Called with no locks held.
322 static inline void tracehook_report_clone_complete(int trace,
323 struct pt_regs *regs,
324 unsigned long clone_flags,
325 pid_t pid,
326 struct task_struct *child)
328 if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(CLONE)) &&
329 (clone_flags & CLONE_VFORK))
330 utrace_finish_vfork(current);
331 if (unlikely(trace))
332 ptrace_event(0, trace, pid);
336 * tracehook_report_vfork_done - vfork parent's child has exited or exec'd
337 * @child: child task, already running
338 * @pid: new child's PID in the parent's namespace
340 * Called after a %CLONE_VFORK parent has waited for the child to complete.
341 * The clone/vfork system call will return immediately after this.
342 * The @child pointer may be invalid if a self-reaping child died and
343 * tracehook_report_clone() took no action to prevent it from self-reaping.
345 * Called with no locks held.
347 static inline void tracehook_report_vfork_done(struct task_struct *child,
348 pid_t pid)
350 ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid);
354 * tracehook_prepare_release_task - task is being reaped, clean up tracing
355 * @task: task in %EXIT_DEAD state
357 * This is called in release_task() just before @task gets finally reaped
358 * and freed. This would be the ideal place to remove and clean up any
359 * tracing-related state for @task.
361 * Called with no locks held.
363 static inline void tracehook_prepare_release_task(struct task_struct *task)
365 utrace_release_task(task);
369 * tracehook_finish_release_task - final tracing clean-up
370 * @task: task in %EXIT_DEAD state
372 * This is called in release_task() when @task is being in the middle of
373 * being reaped. After this, there must be no tracing entanglements.
375 * Called with write_lock_irq(&tasklist_lock) held.
377 static inline void tracehook_finish_release_task(struct task_struct *task)
379 ptrace_release_task(task);
380 BUG_ON(task->exit_state != EXIT_DEAD);
384 * tracehook_signal_handler - signal handler setup is complete
385 * @sig: number of signal being delivered
386 * @info: siginfo_t of signal being delivered
387 * @ka: sigaction setting that chose the handler
388 * @regs: user register state
389 * @stepping: nonzero if debugger single-step or block-step in use
391 * Called by the arch code after a signal handler has been set up.
392 * Register and stack state reflects the user handler about to run.
393 * Signal mask changes have already been made.
395 * Called without locks, shortly before returning to user mode
396 * (or handling more signals).
398 static inline void tracehook_signal_handler(int sig, siginfo_t *info,
399 const struct k_sigaction *ka,
400 struct pt_regs *regs, int stepping)
402 if (task_utrace_flags(current))
403 utrace_signal_handler(current, stepping);
404 if (stepping)
405 ptrace_notify(SIGTRAP);
409 * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
410 * @task: task receiving the signal
411 * @sig: signal number being sent
413 * Return zero iff tracing doesn't care to examine this ignored signal,
414 * so it can short-circuit normal delivery and never even get queued.
416 * Called with @task->sighand->siglock held.
418 static inline int tracehook_consider_ignored_signal(struct task_struct *task,
419 int sig)
421 if (unlikely(task_utrace_flags(task) & UTRACE_EVENT(SIGNAL_IGN)))
422 return 1;
423 return (task_ptrace(task) & PT_PTRACED) != 0;
427 * tracehook_consider_fatal_signal - suppress special handling of fatal signal
428 * @task: task receiving the signal
429 * @sig: signal number being sent
431 * Return nonzero to prevent special handling of this termination signal.
432 * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is
433 * ignored, in which case force_sig() is about to reset it to %SIG_DFL.
434 * When this returns zero, this signal might cause a quick termination
435 * that does not give the debugger a chance to intercept the signal.
437 * Called with or without @task->sighand->siglock held.
439 static inline int tracehook_consider_fatal_signal(struct task_struct *task,
440 int sig)
442 if (unlikely(task_utrace_flags(task) & (UTRACE_EVENT(SIGNAL_TERM) |
443 UTRACE_EVENT(SIGNAL_CORE))))
444 return 1;
445 return (task_ptrace(task) & PT_PTRACED) != 0;
449 * tracehook_force_sigpending - let tracing force signal_pending(current) on
451 * Called when recomputing our signal_pending() flag. Return nonzero
452 * to force the signal_pending() flag on, so that tracehook_get_signal()
453 * will be called before the next return to user mode.
455 * Called with @current->sighand->siglock held.
457 static inline int tracehook_force_sigpending(void)
459 if (unlikely(task_utrace_flags(current)))
460 return utrace_interrupt_pending();
461 return 0;
465 * tracehook_get_signal - deliver synthetic signal to traced task
466 * @task: @current
467 * @regs: task_pt_regs(@current)
468 * @info: details of synthetic signal
469 * @return_ka: sigaction for synthetic signal
471 * Return zero to check for a real pending signal normally.
472 * Return -1 after releasing the siglock to repeat the check.
473 * Return a signal number to induce an artifical signal delivery,
474 * setting *@info and *@return_ka to specify its details and behavior.
476 * The @return_ka->sa_handler value controls the disposition of the
477 * signal, no matter the signal number. For %SIG_DFL, the return value
478 * is a representative signal to indicate the behavior (e.g. %SIGTERM
479 * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop,
480 * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number
481 * reported will be @info->si_signo instead.
483 * Called with @task->sighand->siglock held, before dequeuing pending signals.
485 static inline int tracehook_get_signal(struct task_struct *task,
486 struct pt_regs *regs,
487 siginfo_t *info,
488 struct k_sigaction *return_ka)
490 if (unlikely(task_utrace_flags(task)))
491 return utrace_get_signal(task, regs, info, return_ka);
492 return 0;
496 * tracehook_notify_jctl - report about job control stop/continue
497 * @notify: zero, %CLD_STOPPED or %CLD_CONTINUED
498 * @why: %CLD_STOPPED or %CLD_CONTINUED
500 * This is called when we might call do_notify_parent_cldstop().
502 * @notify is zero if we would not ordinarily send a %SIGCHLD,
503 * or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD.
505 * @why is %CLD_STOPPED when about to stop for job control;
506 * we are already in %TASK_STOPPED state, about to call schedule().
507 * It might also be that we have just exited (check %PF_EXITING),
508 * but need to report that a group-wide stop is complete.
510 * @why is %CLD_CONTINUED when waking up after job control stop and
511 * ready to make a delayed @notify report.
513 * Return the %CLD_* value for %SIGCHLD, or zero to generate no signal.
515 * Called with the siglock held.
517 static inline int tracehook_notify_jctl(int notify, int why)
519 if (task_utrace_flags(current) & UTRACE_EVENT(JCTL))
520 utrace_report_jctl(notify, why);
521 return notify ?: (current->ptrace & PT_PTRACED) ? why : 0;
525 * tracehook_finish_jctl - report about return from job control stop
527 * This is called by do_signal_stop() after wakeup.
529 static inline void tracehook_finish_jctl(void)
533 #define DEATH_REAP -1
534 #define DEATH_DELAYED_GROUP_LEADER -2
537 * tracehook_notify_death - task is dead, ready to notify parent
538 * @task: @current task now exiting
539 * @death_cookie: value to pass to tracehook_report_death()
540 * @group_dead: nonzero if this was the last thread in the group to die
542 * A return value >= 0 means call do_notify_parent() with that signal
543 * number. Negative return value can be %DEATH_REAP to self-reap right
544 * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our
545 * parent. Note that a return value of 0 means a do_notify_parent() call
546 * that sends no signal, but still wakes up a parent blocked in wait*().
548 * Called with write_lock_irq(&tasklist_lock) held.
550 static inline int tracehook_notify_death(struct task_struct *task,
551 void **death_cookie, int group_dead)
553 *death_cookie = task_utrace_struct(task);
555 if (task_detached(task))
556 return task->ptrace ? SIGCHLD : DEATH_REAP;
559 * If something other than our normal parent is ptracing us, then
560 * send it a SIGCHLD instead of honoring exit_signal. exit_signal
561 * only has special meaning to our real parent.
563 if (thread_group_empty(task) && !ptrace_reparented(task))
564 return task->exit_signal;
566 return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER;
570 * tracehook_report_death - task is dead and ready to be reaped
571 * @task: @current task now exiting
572 * @signal: return value from tracheook_notify_death()
573 * @death_cookie: value passed back from tracehook_notify_death()
574 * @group_dead: nonzero if this was the last thread in the group to die
576 * Thread has just become a zombie or is about to self-reap. If positive,
577 * @signal is the signal number just sent to the parent (usually %SIGCHLD).
578 * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is
579 * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie.
580 * The @death_cookie was passed back by tracehook_notify_death().
582 * If normal reaping is not inhibited, @task->exit_state might be changing
583 * in parallel.
585 * Called without locks.
587 static inline void tracehook_report_death(struct task_struct *task,
588 int signal, void *death_cookie,
589 int group_dead)
591 smp_mb();
592 if (task_utrace_flags(task) & _UTRACE_DEATH_EVENTS)
593 utrace_report_death(task, death_cookie, group_dead, signal);
596 #ifdef TIF_NOTIFY_RESUME
598 * set_notify_resume - cause tracehook_notify_resume() to be called
599 * @task: task that will call tracehook_notify_resume()
601 * Calling this arranges that @task will call tracehook_notify_resume()
602 * before returning to user mode. If it's already running in user mode,
603 * it will enter the kernel and call tracehook_notify_resume() soon.
604 * If it's blocked, it will not be woken.
606 static inline void set_notify_resume(struct task_struct *task)
608 if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
609 kick_process(task);
613 * tracehook_notify_resume - report when about to return to user mode
614 * @regs: user-mode registers of @current task
616 * This is called when %TIF_NOTIFY_RESUME has been set. Now we are
617 * about to return to user mode, and the user state in @regs can be
618 * inspected or adjusted. The caller in arch code has cleared
619 * %TIF_NOTIFY_RESUME before the call. If the flag gets set again
620 * asynchronously, this will be called again before we return to
621 * user mode.
623 * Called without locks. However, on some machines this may be
624 * called with interrupts disabled.
626 static inline void tracehook_notify_resume(struct pt_regs *regs)
628 struct task_struct *task = current;
630 * This pairs with the barrier implicit in set_notify_resume().
631 * It ensures that we read the nonzero utrace_flags set before
632 * set_notify_resume() was called by utrace setup.
634 smp_rmb();
635 if (task_utrace_flags(task))
636 utrace_resume(task, regs);
638 #endif /* TIF_NOTIFY_RESUME */
640 #endif /* <linux/tracehook.h> */