2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/ptrace.h>
19 #include <linux/security.h>
20 #include <linux/signal.h>
21 #include <linux/audit.h>
22 #include <linux/pid_namespace.h>
23 #include <linux/syscalls.h>
24 #include <linux/uaccess.h>
28 * Initialize a new task whose father had been ptraced.
30 * Called from copy_process().
32 void ptrace_fork(struct task_struct
*child
, unsigned long clone_flags
)
34 arch_ptrace_fork(child
, clone_flags
);
38 * ptrace a task: make the debugger its new parent and
39 * move it to the ptrace list.
41 * Must be called with the tasklist lock write-held.
43 void __ptrace_link(struct task_struct
*child
, struct task_struct
*new_parent
)
45 BUG_ON(!list_empty(&child
->ptrace_entry
));
46 list_add(&child
->ptrace_entry
, &new_parent
->ptraced
);
47 child
->parent
= new_parent
;
51 * Turn a tracing stop into a normal stop now, since with no tracer there
52 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a
53 * signal sent that would resume the child, but didn't because it was in
54 * TASK_TRACED, resume it now.
55 * Requires that irqs be disabled.
57 static void ptrace_untrace(struct task_struct
*child
)
59 spin_lock(&child
->sighand
->siglock
);
60 if (task_is_traced(child
)) {
62 * If the group stop is completed or in progress,
63 * this thread was already counted as stopped.
65 if (child
->signal
->flags
& SIGNAL_STOP_STOPPED
||
66 child
->signal
->group_stop_count
)
67 __set_task_state(child
, TASK_STOPPED
);
69 signal_wake_up(child
, 1);
71 spin_unlock(&child
->sighand
->siglock
);
75 * unptrace a task: move it back to its original parent and
76 * remove it from the ptrace list.
78 * Must be called with the tasklist lock write-held.
80 void __ptrace_unlink(struct task_struct
*child
)
82 BUG_ON(!child
->ptrace
);
85 child
->parent
= child
->real_parent
;
86 list_del_init(&child
->ptrace_entry
);
88 arch_ptrace_untrace(child
);
89 if (task_is_traced(child
))
90 ptrace_untrace(child
);
94 * Check that we have indeed attached to the thing..
96 int ptrace_check_attach(struct task_struct
*child
, int kill
)
101 * We take the read lock around doing both checks to close a
102 * possible race where someone else was tracing our child and
103 * detached between these two checks. After this locked check,
104 * we are sure that this is our traced child and that can only
105 * be changed by us so it's not changing right after this.
107 read_lock(&tasklist_lock
);
108 if ((child
->ptrace
& PT_PTRACED
) && child
->parent
== current
) {
111 * child->sighand can't be NULL, release_task()
112 * does ptrace_unlink() before __exit_signal().
114 spin_lock_irq(&child
->sighand
->siglock
);
115 if (task_is_stopped(child
))
116 child
->state
= TASK_TRACED
;
117 else if (!task_is_traced(child
) && !kill
)
119 spin_unlock_irq(&child
->sighand
->siglock
);
121 read_unlock(&tasklist_lock
);
124 ret
= wait_task_inactive(child
, TASK_TRACED
) ? 0 : -ESRCH
;
126 /* All systems go.. */
130 int __ptrace_may_access(struct task_struct
*task
, unsigned int mode
)
132 const struct cred
*cred
= current_cred(), *tcred
;
134 /* May we inspect the given task?
135 * This check is used both for attaching with ptrace
136 * and for allowing access to sensitive information in /proc.
138 * ptrace_attach denies several cases that /proc allows
139 * because setting up the necessary parent/child relationship
140 * or halting the specified task is impossible.
143 /* Don't let security modules deny introspection */
147 tcred
= __task_cred(task
);
148 if ((cred
->uid
!= tcred
->euid
||
149 cred
->uid
!= tcred
->suid
||
150 cred
->uid
!= tcred
->uid
||
151 cred
->gid
!= tcred
->egid
||
152 cred
->gid
!= tcred
->sgid
||
153 cred
->gid
!= tcred
->gid
) &&
154 !capable(CAP_SYS_PTRACE
)) {
161 dumpable
= get_dumpable(task
->mm
);
162 if (!dumpable
&& !capable(CAP_SYS_PTRACE
))
165 return security_ptrace_may_access(task
, mode
);
168 bool ptrace_may_access(struct task_struct
*task
, unsigned int mode
)
172 err
= __ptrace_may_access(task
, mode
);
177 int ptrace_attach(struct task_struct
*task
)
185 if (same_thread_group(task
, current
))
188 /* Protect the target's credential calculations against our
189 * interference; SUID, SGID and LSM creds get determined differently
192 retval
= mutex_lock_interruptible(&task
->cred_guard_mutex
);
201 * We want to hold both the task-lock and the
202 * tasklist_lock for writing at the same time.
203 * But that's against the rules (tasklist_lock
204 * is taken for reading by interrupts on other
205 * cpu's that may have task_lock).
208 if (!write_trylock_irqsave(&tasklist_lock
, flags
)) {
212 } while (!write_can_lock(&tasklist_lock
));
218 /* the same process cannot be attached many times */
219 if (task
->ptrace
& PT_PTRACED
)
221 retval
= __ptrace_may_access(task
, PTRACE_MODE_ATTACH
);
226 task
->ptrace
|= PT_PTRACED
;
227 if (capable(CAP_SYS_PTRACE
))
228 task
->ptrace
|= PT_PTRACE_CAP
;
230 __ptrace_link(task
, current
);
232 send_sig_info(SIGSTOP
, SEND_SIG_FORCED
, task
);
234 write_unlock_irqrestore(&tasklist_lock
, flags
);
236 mutex_unlock(&task
->cred_guard_mutex
);
242 * Called with irqs disabled, returns true if childs should reap themselves.
244 static int ignoring_children(struct sighand_struct
*sigh
)
247 spin_lock(&sigh
->siglock
);
248 ret
= (sigh
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
) ||
249 (sigh
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
);
250 spin_unlock(&sigh
->siglock
);
255 * Called with tasklist_lock held for writing.
256 * Unlink a traced task, and clean it up if it was a traced zombie.
257 * Return true if it needs to be reaped with release_task().
258 * (We can't call release_task() here because we already hold tasklist_lock.)
260 * If it's a zombie, our attachedness prevented normal parent notification
261 * or self-reaping. Do notification now if it would have happened earlier.
262 * If it should reap itself, return true.
264 * If it's our own child, there is no notification to do.
265 * But if our normal children self-reap, then this child
266 * was prevented by ptrace and we must reap it now.
268 static bool __ptrace_detach(struct task_struct
*tracer
, struct task_struct
*p
)
272 if (p
->exit_state
== EXIT_ZOMBIE
) {
273 if (!task_detached(p
) && thread_group_empty(p
)) {
274 if (!same_thread_group(p
->real_parent
, tracer
))
275 do_notify_parent(p
, p
->exit_signal
);
276 else if (ignoring_children(tracer
->sighand
))
279 if (task_detached(p
)) {
280 /* Mark it as in the process of being reaped. */
281 p
->exit_state
= EXIT_DEAD
;
289 int ptrace_detach(struct task_struct
*child
, unsigned int data
)
293 if (!valid_signal(data
))
296 /* Architecture-specific hardware disable .. */
297 ptrace_disable(child
);
298 clear_tsk_thread_flag(child
, TIF_SYSCALL_TRACE
);
300 write_lock_irq(&tasklist_lock
);
302 * This child can be already killed. Make sure de_thread() or
303 * our sub-thread doing do_wait() didn't do release_task() yet.
306 child
->exit_code
= data
;
307 dead
= __ptrace_detach(current
, child
);
309 write_unlock_irq(&tasklist_lock
);
318 * Detach all tasks we were using ptrace on.
320 void exit_ptrace(struct task_struct
*tracer
)
322 struct task_struct
*p
, *n
;
323 LIST_HEAD(ptrace_dead
);
325 write_lock_irq(&tasklist_lock
);
326 list_for_each_entry_safe(p
, n
, &tracer
->ptraced
, ptrace_entry
) {
327 if (__ptrace_detach(tracer
, p
))
328 list_add(&p
->ptrace_entry
, &ptrace_dead
);
330 write_unlock_irq(&tasklist_lock
);
332 BUG_ON(!list_empty(&tracer
->ptraced
));
334 list_for_each_entry_safe(p
, n
, &ptrace_dead
, ptrace_entry
) {
335 list_del_init(&p
->ptrace_entry
);
340 int ptrace_readdata(struct task_struct
*tsk
, unsigned long src
, char __user
*dst
, int len
)
346 int this_len
, retval
;
348 this_len
= (len
> sizeof(buf
)) ? sizeof(buf
) : len
;
349 retval
= access_process_vm(tsk
, src
, buf
, this_len
, 0);
355 if (copy_to_user(dst
, buf
, retval
))
365 int ptrace_writedata(struct task_struct
*tsk
, char __user
*src
, unsigned long dst
, int len
)
371 int this_len
, retval
;
373 this_len
= (len
> sizeof(buf
)) ? sizeof(buf
) : len
;
374 if (copy_from_user(buf
, src
, this_len
))
376 retval
= access_process_vm(tsk
, dst
, buf
, this_len
, 1);
390 static int ptrace_setoptions(struct task_struct
*child
, long data
)
392 child
->ptrace
&= ~PT_TRACE_MASK
;
394 if (data
& PTRACE_O_TRACESYSGOOD
)
395 child
->ptrace
|= PT_TRACESYSGOOD
;
397 if (data
& PTRACE_O_TRACEFORK
)
398 child
->ptrace
|= PT_TRACE_FORK
;
400 if (data
& PTRACE_O_TRACEVFORK
)
401 child
->ptrace
|= PT_TRACE_VFORK
;
403 if (data
& PTRACE_O_TRACECLONE
)
404 child
->ptrace
|= PT_TRACE_CLONE
;
406 if (data
& PTRACE_O_TRACEEXEC
)
407 child
->ptrace
|= PT_TRACE_EXEC
;
409 if (data
& PTRACE_O_TRACEVFORKDONE
)
410 child
->ptrace
|= PT_TRACE_VFORK_DONE
;
412 if (data
& PTRACE_O_TRACEEXIT
)
413 child
->ptrace
|= PT_TRACE_EXIT
;
415 return (data
& ~PTRACE_O_MASK
) ? -EINVAL
: 0;
418 static int ptrace_getsiginfo(struct task_struct
*child
, siginfo_t
*info
)
422 read_lock(&tasklist_lock
);
423 if (likely(child
->sighand
!= NULL
)) {
425 spin_lock_irq(&child
->sighand
->siglock
);
426 if (likely(child
->last_siginfo
!= NULL
)) {
427 *info
= *child
->last_siginfo
;
430 spin_unlock_irq(&child
->sighand
->siglock
);
432 read_unlock(&tasklist_lock
);
436 static int ptrace_setsiginfo(struct task_struct
*child
, const siginfo_t
*info
)
440 read_lock(&tasklist_lock
);
441 if (likely(child
->sighand
!= NULL
)) {
443 spin_lock_irq(&child
->sighand
->siglock
);
444 if (likely(child
->last_siginfo
!= NULL
)) {
445 *child
->last_siginfo
= *info
;
448 spin_unlock_irq(&child
->sighand
->siglock
);
450 read_unlock(&tasklist_lock
);
455 #ifdef PTRACE_SINGLESTEP
456 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
458 #define is_singlestep(request) 0
461 #ifdef PTRACE_SINGLEBLOCK
462 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
464 #define is_singleblock(request) 0
468 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
470 #define is_sysemu_singlestep(request) 0
473 static int ptrace_resume(struct task_struct
*child
, long request
, long data
)
475 if (!valid_signal(data
))
478 if (request
== PTRACE_SYSCALL
)
479 set_tsk_thread_flag(child
, TIF_SYSCALL_TRACE
);
481 clear_tsk_thread_flag(child
, TIF_SYSCALL_TRACE
);
483 #ifdef TIF_SYSCALL_EMU
484 if (request
== PTRACE_SYSEMU
|| request
== PTRACE_SYSEMU_SINGLESTEP
)
485 set_tsk_thread_flag(child
, TIF_SYSCALL_EMU
);
487 clear_tsk_thread_flag(child
, TIF_SYSCALL_EMU
);
490 if (is_singleblock(request
)) {
491 if (unlikely(!arch_has_block_step()))
493 user_enable_block_step(child
);
494 } else if (is_singlestep(request
) || is_sysemu_singlestep(request
)) {
495 if (unlikely(!arch_has_single_step()))
497 user_enable_single_step(child
);
499 user_disable_single_step(child
);
502 child
->exit_code
= data
;
503 wake_up_process(child
);
508 int ptrace_request(struct task_struct
*child
, long request
,
509 long addr
, long data
)
515 case PTRACE_PEEKTEXT
:
516 case PTRACE_PEEKDATA
:
517 return generic_ptrace_peekdata(child
, addr
, data
);
518 case PTRACE_POKETEXT
:
519 case PTRACE_POKEDATA
:
520 return generic_ptrace_pokedata(child
, addr
, data
);
522 #ifdef PTRACE_OLDSETOPTIONS
523 case PTRACE_OLDSETOPTIONS
:
525 case PTRACE_SETOPTIONS
:
526 ret
= ptrace_setoptions(child
, data
);
528 case PTRACE_GETEVENTMSG
:
529 ret
= put_user(child
->ptrace_message
, (unsigned long __user
*) data
);
532 case PTRACE_GETSIGINFO
:
533 ret
= ptrace_getsiginfo(child
, &siginfo
);
535 ret
= copy_siginfo_to_user((siginfo_t __user
*) data
,
539 case PTRACE_SETSIGINFO
:
540 if (copy_from_user(&siginfo
, (siginfo_t __user
*) data
,
544 ret
= ptrace_setsiginfo(child
, &siginfo
);
547 case PTRACE_DETACH
: /* detach a process that was attached. */
548 ret
= ptrace_detach(child
, data
);
551 #ifdef PTRACE_SINGLESTEP
552 case PTRACE_SINGLESTEP
:
554 #ifdef PTRACE_SINGLEBLOCK
555 case PTRACE_SINGLEBLOCK
:
559 case PTRACE_SYSEMU_SINGLESTEP
:
563 return ptrace_resume(child
, request
, data
);
566 if (child
->exit_state
) /* already dead */
568 return ptrace_resume(child
, request
, SIGKILL
);
578 * ptrace_traceme -- helper for PTRACE_TRACEME
580 * Performs checks and sets PT_PTRACED.
581 * Should be used by all ptrace implementations for PTRACE_TRACEME.
583 int ptrace_traceme(void)
588 * Are we already being traced?
592 if (!(current
->ptrace
& PT_PTRACED
)) {
594 * See ptrace_attach() comments about the locking here.
597 if (!write_trylock_irqsave(&tasklist_lock
, flags
)) {
598 task_unlock(current
);
601 } while (!write_can_lock(&tasklist_lock
));
605 ret
= security_ptrace_traceme(current
->parent
);
608 * Check PF_EXITING to ensure ->real_parent has not passed
609 * exit_ptrace(). Otherwise we don't report the error but
610 * pretend ->real_parent untraces us right after return.
612 if (!ret
&& !(current
->real_parent
->flags
& PF_EXITING
)) {
613 current
->ptrace
|= PT_PTRACED
;
614 __ptrace_link(current
, current
->real_parent
);
617 write_unlock_irqrestore(&tasklist_lock
, flags
);
619 task_unlock(current
);
624 * ptrace_get_task_struct -- grab a task struct reference for ptrace
625 * @pid: process id to grab a task_struct reference of
627 * This function is a helper for ptrace implementations. It checks
628 * permissions and then grabs a task struct for use of the actual
629 * ptrace implementation.
631 * Returns the task_struct for @pid or an ERR_PTR() on failure.
633 struct task_struct
*ptrace_get_task_struct(pid_t pid
)
635 struct task_struct
*child
;
637 read_lock(&tasklist_lock
);
638 child
= find_task_by_vpid(pid
);
640 get_task_struct(child
);
642 read_unlock(&tasklist_lock
);
644 return ERR_PTR(-ESRCH
);
648 #ifndef arch_ptrace_attach
649 #define arch_ptrace_attach(child) do { } while (0)
652 SYSCALL_DEFINE4(ptrace
, long, request
, long, pid
, long, addr
, long, data
)
654 struct task_struct
*child
;
658 * This lock_kernel fixes a subtle race with suid exec
661 if (request
== PTRACE_TRACEME
) {
662 ret
= ptrace_traceme();
664 arch_ptrace_attach(current
);
668 child
= ptrace_get_task_struct(pid
);
670 ret
= PTR_ERR(child
);
674 if (request
== PTRACE_ATTACH
) {
675 ret
= ptrace_attach(child
);
677 * Some architectures need to do book-keeping after
681 arch_ptrace_attach(child
);
682 goto out_put_task_struct
;
685 ret
= ptrace_check_attach(child
, request
== PTRACE_KILL
);
687 goto out_put_task_struct
;
689 ret
= arch_ptrace(child
, request
, addr
, data
);
692 put_task_struct(child
);
698 int generic_ptrace_peekdata(struct task_struct
*tsk
, long addr
, long data
)
703 copied
= access_process_vm(tsk
, addr
, &tmp
, sizeof(tmp
), 0);
704 if (copied
!= sizeof(tmp
))
706 return put_user(tmp
, (unsigned long __user
*)data
);
709 int generic_ptrace_pokedata(struct task_struct
*tsk
, long addr
, long data
)
713 copied
= access_process_vm(tsk
, addr
, &data
, sizeof(data
), 1);
714 return (copied
== sizeof(data
)) ? 0 : -EIO
;
717 #if defined CONFIG_COMPAT
718 #include <linux/compat.h>
720 int compat_ptrace_request(struct task_struct
*child
, compat_long_t request
,
721 compat_ulong_t addr
, compat_ulong_t data
)
723 compat_ulong_t __user
*datap
= compat_ptr(data
);
729 case PTRACE_PEEKTEXT
:
730 case PTRACE_PEEKDATA
:
731 ret
= access_process_vm(child
, addr
, &word
, sizeof(word
), 0);
732 if (ret
!= sizeof(word
))
735 ret
= put_user(word
, datap
);
738 case PTRACE_POKETEXT
:
739 case PTRACE_POKEDATA
:
740 ret
= access_process_vm(child
, addr
, &data
, sizeof(data
), 1);
741 ret
= (ret
!= sizeof(data
) ? -EIO
: 0);
744 case PTRACE_GETEVENTMSG
:
745 ret
= put_user((compat_ulong_t
) child
->ptrace_message
, datap
);
748 case PTRACE_GETSIGINFO
:
749 ret
= ptrace_getsiginfo(child
, &siginfo
);
751 ret
= copy_siginfo_to_user32(
752 (struct compat_siginfo __user
*) datap
,
756 case PTRACE_SETSIGINFO
:
757 memset(&siginfo
, 0, sizeof siginfo
);
758 if (copy_siginfo_from_user32(
759 &siginfo
, (struct compat_siginfo __user
*) datap
))
762 ret
= ptrace_setsiginfo(child
, &siginfo
);
766 ret
= ptrace_request(child
, request
, addr
, data
);
772 asmlinkage
long compat_sys_ptrace(compat_long_t request
, compat_long_t pid
,
773 compat_long_t addr
, compat_long_t data
)
775 struct task_struct
*child
;
779 * This lock_kernel fixes a subtle race with suid exec
782 if (request
== PTRACE_TRACEME
) {
783 ret
= ptrace_traceme();
787 child
= ptrace_get_task_struct(pid
);
789 ret
= PTR_ERR(child
);
793 if (request
== PTRACE_ATTACH
) {
794 ret
= ptrace_attach(child
);
796 * Some architectures need to do book-keeping after
800 arch_ptrace_attach(child
);
801 goto out_put_task_struct
;
804 ret
= ptrace_check_attach(child
, request
== PTRACE_KILL
);
806 ret
= compat_arch_ptrace(child
, request
, addr
, data
);
809 put_task_struct(child
);
814 #endif /* CONFIG_COMPAT */