1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/ptrace.c
5 * (C) Copyright 1999 Linus Torvalds
7 * Common interfaces for "ptrace()" which we do not want
8 * to continually duplicate across every architecture.
11 #include <linux/capability.h>
12 #include <linux/export.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/coredump.h>
16 #include <linux/sched/task.h>
17 #include <linux/errno.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/ptrace.h>
22 #include <linux/security.h>
23 #include <linux/signal.h>
24 #include <linux/uio.h>
25 #include <linux/audit.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/syscalls.h>
28 #include <linux/uaccess.h>
29 #include <linux/regset.h>
30 #include <linux/hw_breakpoint.h>
31 #include <linux/cn_proc.h>
32 #include <linux/compat.h>
33 #include <linux/sched/signal.h>
34 #include <linux/minmax.h>
35 #include <linux/syscall_user_dispatch.h>
37 #include <asm/syscall.h> /* for syscall_get_* */
40 * Access another process' address space via ptrace.
41 * Source/target buffer must be kernel space,
42 * Do not walk the page table directly, use get_user_pages
44 int ptrace_access_vm(struct task_struct
*tsk
, unsigned long addr
,
45 void *buf
, int len
, unsigned int gup_flags
)
50 mm
= get_task_mm(tsk
);
55 (current
!= tsk
->parent
) ||
56 ((get_dumpable(mm
) != SUID_DUMP_USER
) &&
57 !ptracer_capable(tsk
, mm
->user_ns
))) {
62 ret
= access_remote_vm(mm
, addr
, buf
, len
, gup_flags
);
69 void __ptrace_link(struct task_struct
*child
, struct task_struct
*new_parent
,
70 const struct cred
*ptracer_cred
)
72 BUG_ON(!list_empty(&child
->ptrace_entry
));
73 list_add(&child
->ptrace_entry
, &new_parent
->ptraced
);
74 child
->parent
= new_parent
;
75 child
->ptracer_cred
= get_cred(ptracer_cred
);
79 * ptrace a task: make the debugger its new parent and
80 * move it to the ptrace list.
82 * Must be called with the tasklist lock write-held.
84 static void ptrace_link(struct task_struct
*child
, struct task_struct
*new_parent
)
86 __ptrace_link(child
, new_parent
, current_cred());
90 * __ptrace_unlink - unlink ptracee and restore its execution state
91 * @child: ptracee to be unlinked
93 * Remove @child from the ptrace list, move it back to the original parent,
94 * and restore the execution state so that it conforms to the group stop
97 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
98 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
99 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
100 * If the ptracer is exiting, the ptracee can be in any state.
102 * After detach, the ptracee should be in a state which conforms to the
103 * group stop. If the group is stopped or in the process of stopping, the
104 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
105 * up from TASK_TRACED.
107 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
108 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
109 * to but in the opposite direction of what happens while attaching to a
110 * stopped task. However, in this direction, the intermediate RUNNING
111 * state is not hidden even from the current ptracer and if it immediately
112 * re-attaches and performs a WNOHANG wait(2), it may fail.
115 * write_lock_irq(tasklist_lock)
117 void __ptrace_unlink(struct task_struct
*child
)
119 const struct cred
*old_cred
;
120 BUG_ON(!child
->ptrace
);
122 clear_task_syscall_work(child
, SYSCALL_TRACE
);
123 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
124 clear_task_syscall_work(child
, SYSCALL_EMU
);
127 child
->parent
= child
->real_parent
;
128 list_del_init(&child
->ptrace_entry
);
129 old_cred
= child
->ptracer_cred
;
130 child
->ptracer_cred
= NULL
;
133 spin_lock(&child
->sighand
->siglock
);
136 * Clear all pending traps and TRAPPING. TRAPPING should be
137 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
139 task_clear_jobctl_pending(child
, JOBCTL_TRAP_MASK
);
140 task_clear_jobctl_trapping(child
);
143 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
146 if (!(child
->flags
& PF_EXITING
) &&
147 (child
->signal
->flags
& SIGNAL_STOP_STOPPED
||
148 child
->signal
->group_stop_count
))
149 child
->jobctl
|= JOBCTL_STOP_PENDING
;
152 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
153 * @child in the butt. Note that @resume should be used iff @child
154 * is in TASK_TRACED; otherwise, we might unduly disrupt
155 * TASK_KILLABLE sleeps.
157 if (child
->jobctl
& JOBCTL_STOP_PENDING
|| task_is_traced(child
))
158 ptrace_signal_wake_up(child
, true);
160 spin_unlock(&child
->sighand
->siglock
);
163 static bool looks_like_a_spurious_pid(struct task_struct
*task
)
165 if (task
->exit_code
!= ((PTRACE_EVENT_EXEC
<< 8) | SIGTRAP
))
168 if (task_pid_vnr(task
) == task
->ptrace_message
)
171 * The tracee changed its pid but the PTRACE_EVENT_EXEC event
172 * was not wait()'ed, most probably debugger targets the old
173 * leader which was destroyed in de_thread().
179 * Ensure that nothing can wake it up, even SIGKILL
181 * A task is switched to this state while a ptrace operation is in progress;
182 * such that the ptrace operation is uninterruptible.
184 static bool ptrace_freeze_traced(struct task_struct
*task
)
188 /* Lockless, nobody but us can set this flag */
189 if (task
->jobctl
& JOBCTL_LISTENING
)
192 spin_lock_irq(&task
->sighand
->siglock
);
193 if (task_is_traced(task
) && !looks_like_a_spurious_pid(task
) &&
194 !__fatal_signal_pending(task
)) {
195 task
->jobctl
|= JOBCTL_PTRACE_FROZEN
;
198 spin_unlock_irq(&task
->sighand
->siglock
);
203 static void ptrace_unfreeze_traced(struct task_struct
*task
)
208 * The child may be awake and may have cleared
209 * JOBCTL_PTRACE_FROZEN (see ptrace_resume). The child will
210 * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew.
212 if (lock_task_sighand(task
, &flags
)) {
213 task
->jobctl
&= ~JOBCTL_PTRACE_FROZEN
;
214 if (__fatal_signal_pending(task
)) {
215 task
->jobctl
&= ~JOBCTL_TRACED
;
216 wake_up_state(task
, __TASK_TRACED
);
218 unlock_task_sighand(task
, &flags
);
223 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
224 * @child: ptracee to check for
225 * @ignore_state: don't check whether @child is currently %TASK_TRACED
227 * Check whether @child is being ptraced by %current and ready for further
228 * ptrace operations. If @ignore_state is %false, @child also should be in
229 * %TASK_TRACED state and on return the child is guaranteed to be traced
230 * and not executing. If @ignore_state is %true, @child can be in any
234 * Grabs and releases tasklist_lock and @child->sighand->siglock.
237 * 0 on success, -ESRCH if %child is not ready.
239 static int ptrace_check_attach(struct task_struct
*child
, bool ignore_state
)
244 * We take the read lock around doing both checks to close a
245 * possible race where someone else was tracing our child and
246 * detached between these two checks. After this locked check,
247 * we are sure that this is our traced child and that can only
248 * be changed by us so it's not changing right after this.
250 read_lock(&tasklist_lock
);
251 if (child
->ptrace
&& child
->parent
== current
) {
253 * child->sighand can't be NULL, release_task()
254 * does ptrace_unlink() before __exit_signal().
256 if (ignore_state
|| ptrace_freeze_traced(child
))
259 read_unlock(&tasklist_lock
);
261 if (!ret
&& !ignore_state
&&
262 WARN_ON_ONCE(!wait_task_inactive(child
, __TASK_TRACED
|TASK_FROZEN
)))
268 static bool ptrace_has_cap(struct user_namespace
*ns
, unsigned int mode
)
270 if (mode
& PTRACE_MODE_NOAUDIT
)
271 return ns_capable_noaudit(ns
, CAP_SYS_PTRACE
);
272 return ns_capable(ns
, CAP_SYS_PTRACE
);
275 /* Returns 0 on success, -errno on denial. */
276 static int __ptrace_may_access(struct task_struct
*task
, unsigned int mode
)
278 const struct cred
*cred
= current_cred(), *tcred
;
279 struct mm_struct
*mm
;
283 if (!(mode
& PTRACE_MODE_FSCREDS
) == !(mode
& PTRACE_MODE_REALCREDS
)) {
284 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
288 /* May we inspect the given task?
289 * This check is used both for attaching with ptrace
290 * and for allowing access to sensitive information in /proc.
292 * ptrace_attach denies several cases that /proc allows
293 * because setting up the necessary parent/child relationship
294 * or halting the specified task is impossible.
297 /* Don't let security modules deny introspection */
298 if (same_thread_group(task
, current
))
301 if (mode
& PTRACE_MODE_FSCREDS
) {
302 caller_uid
= cred
->fsuid
;
303 caller_gid
= cred
->fsgid
;
306 * Using the euid would make more sense here, but something
307 * in userland might rely on the old behavior, and this
308 * shouldn't be a security problem since
309 * PTRACE_MODE_REALCREDS implies that the caller explicitly
310 * used a syscall that requests access to another process
311 * (and not a filesystem syscall to procfs).
313 caller_uid
= cred
->uid
;
314 caller_gid
= cred
->gid
;
316 tcred
= __task_cred(task
);
317 if (uid_eq(caller_uid
, tcred
->euid
) &&
318 uid_eq(caller_uid
, tcred
->suid
) &&
319 uid_eq(caller_uid
, tcred
->uid
) &&
320 gid_eq(caller_gid
, tcred
->egid
) &&
321 gid_eq(caller_gid
, tcred
->sgid
) &&
322 gid_eq(caller_gid
, tcred
->gid
))
324 if (ptrace_has_cap(tcred
->user_ns
, mode
))
331 * If a task drops privileges and becomes nondumpable (through a syscall
332 * like setresuid()) while we are trying to access it, we must ensure
333 * that the dumpability is read after the credentials; otherwise,
334 * we may be able to attach to a task that we shouldn't be able to
335 * attach to (as if the task had dropped privileges without becoming
337 * Pairs with a write barrier in commit_creds().
342 ((get_dumpable(mm
) != SUID_DUMP_USER
) &&
343 !ptrace_has_cap(mm
->user_ns
, mode
)))
346 return security_ptrace_access_check(task
, mode
);
349 bool ptrace_may_access(struct task_struct
*task
, unsigned int mode
)
353 err
= __ptrace_may_access(task
, mode
);
358 static int check_ptrace_options(unsigned long data
)
360 if (data
& ~(unsigned long)PTRACE_O_MASK
)
363 if (unlikely(data
& PTRACE_O_SUSPEND_SECCOMP
)) {
364 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE
) ||
365 !IS_ENABLED(CONFIG_SECCOMP
))
368 if (!capable(CAP_SYS_ADMIN
))
371 if (seccomp_mode(¤t
->seccomp
) != SECCOMP_MODE_DISABLED
||
372 current
->ptrace
& PT_SUSPEND_SECCOMP
)
378 static inline void ptrace_set_stopped(struct task_struct
*task
, bool seize
)
380 guard(spinlock
)(&task
->sighand
->siglock
);
382 /* SEIZE doesn't trap tracee on attach */
384 send_signal_locked(SIGSTOP
, SEND_SIG_PRIV
, task
, PIDTYPE_PID
);
386 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
387 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
388 * will be cleared if the child completes the transition or any
389 * event which clears the group stop states happens. We'll wait
390 * for the transition to complete before returning from this
393 * This hides STOPPED -> RUNNING -> TRACED transition from the
394 * attaching thread but a different thread in the same group can
395 * still observe the transient RUNNING state. IOW, if another
396 * thread's WNOHANG wait(2) on the stopped tracee races against
397 * ATTACH, the wait(2) may fail due to the transient RUNNING.
399 * The following task_is_stopped() test is safe as both transitions
400 * in and out of STOPPED are protected by siglock.
402 if (task_is_stopped(task
) &&
403 task_set_jobctl_pending(task
, JOBCTL_TRAP_STOP
| JOBCTL_TRAPPING
)) {
404 task
->jobctl
&= ~JOBCTL_STOPPED
;
405 signal_wake_up_state(task
, __TASK_STOPPED
);
409 static int ptrace_attach(struct task_struct
*task
, long request
,
413 bool seize
= (request
== PTRACE_SEIZE
);
420 * This duplicates the check in check_ptrace_options() because
421 * ptrace_attach() and ptrace_setoptions() have historically
422 * used different error codes for unknown ptrace options.
424 if (flags
& ~(unsigned long)PTRACE_O_MASK
)
427 retval
= check_ptrace_options(flags
);
430 flags
= PT_PTRACED
| PT_SEIZED
| (flags
<< PT_OPT_FLAG_SHIFT
);
437 if (unlikely(task
->flags
& PF_KTHREAD
))
439 if (same_thread_group(task
, current
))
443 * Protect exec's credential calculations against our interference;
444 * SUID, SGID and LSM creds get determined differently
447 scoped_cond_guard (mutex_intr
, return -ERESTARTNOINTR
,
448 &task
->signal
->cred_guard_mutex
) {
450 scoped_guard (task_lock
, task
) {
451 retval
= __ptrace_may_access(task
, PTRACE_MODE_ATTACH_REALCREDS
);
456 scoped_guard (write_lock_irq
, &tasklist_lock
) {
457 if (unlikely(task
->exit_state
))
462 task
->ptrace
= flags
;
463 ptrace_link(task
, current
);
464 ptrace_set_stopped(task
, seize
);
469 * We do not bother to change retval or clear JOBCTL_TRAPPING
470 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
471 * not return to user-mode, it will exit and clear this bit in
472 * __ptrace_unlink() if it wasn't already cleared by the tracee;
473 * and until then nobody can ptrace this task.
475 wait_on_bit(&task
->jobctl
, JOBCTL_TRAPPING_BIT
, TASK_KILLABLE
);
476 proc_ptrace_connector(task
, PTRACE_ATTACH
);
482 * ptrace_traceme -- helper for PTRACE_TRACEME
484 * Performs checks and sets PT_PTRACED.
485 * Should be used by all ptrace implementations for PTRACE_TRACEME.
487 static int ptrace_traceme(void)
491 write_lock_irq(&tasklist_lock
);
492 /* Are we already being traced? */
493 if (!current
->ptrace
) {
494 ret
= security_ptrace_traceme(current
->parent
);
496 * Check PF_EXITING to ensure ->real_parent has not passed
497 * exit_ptrace(). Otherwise we don't report the error but
498 * pretend ->real_parent untraces us right after return.
500 if (!ret
&& !(current
->real_parent
->flags
& PF_EXITING
)) {
501 current
->ptrace
= PT_PTRACED
;
502 ptrace_link(current
, current
->real_parent
);
505 write_unlock_irq(&tasklist_lock
);
511 * Called with irqs disabled, returns true if childs should reap themselves.
513 static int ignoring_children(struct sighand_struct
*sigh
)
516 spin_lock(&sigh
->siglock
);
517 ret
= (sigh
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
) ||
518 (sigh
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
);
519 spin_unlock(&sigh
->siglock
);
524 * Called with tasklist_lock held for writing.
525 * Unlink a traced task, and clean it up if it was a traced zombie.
526 * Return true if it needs to be reaped with release_task().
527 * (We can't call release_task() here because we already hold tasklist_lock.)
529 * If it's a zombie, our attachedness prevented normal parent notification
530 * or self-reaping. Do notification now if it would have happened earlier.
531 * If it should reap itself, return true.
533 * If it's our own child, there is no notification to do. But if our normal
534 * children self-reap, then this child was prevented by ptrace and we must
535 * reap it now, in that case we must also wake up sub-threads sleeping in
538 static bool __ptrace_detach(struct task_struct
*tracer
, struct task_struct
*p
)
544 if (p
->exit_state
!= EXIT_ZOMBIE
)
547 dead
= !thread_group_leader(p
);
549 if (!dead
&& thread_group_empty(p
)) {
550 if (!same_thread_group(p
->real_parent
, tracer
))
551 dead
= do_notify_parent(p
, p
->exit_signal
);
552 else if (ignoring_children(tracer
->sighand
)) {
553 __wake_up_parent(p
, tracer
);
557 /* Mark it as in the process of being reaped. */
559 p
->exit_state
= EXIT_DEAD
;
563 static int ptrace_detach(struct task_struct
*child
, unsigned int data
)
565 if (!valid_signal(data
))
568 /* Architecture-specific hardware disable .. */
569 ptrace_disable(child
);
571 write_lock_irq(&tasklist_lock
);
573 * We rely on ptrace_freeze_traced(). It can't be killed and
574 * untraced by another thread, it can't be a zombie.
576 WARN_ON(!child
->ptrace
|| child
->exit_state
);
578 * tasklist_lock avoids the race with wait_task_stopped(), see
579 * the comment in ptrace_resume().
581 child
->exit_code
= data
;
582 __ptrace_detach(current
, child
);
583 write_unlock_irq(&tasklist_lock
);
585 proc_ptrace_connector(child
, PTRACE_DETACH
);
591 * Detach all tasks we were using ptrace on. Called with tasklist held
594 void exit_ptrace(struct task_struct
*tracer
, struct list_head
*dead
)
596 struct task_struct
*p
, *n
;
598 list_for_each_entry_safe(p
, n
, &tracer
->ptraced
, ptrace_entry
) {
599 if (unlikely(p
->ptrace
& PT_EXITKILL
))
600 send_sig_info(SIGKILL
, SEND_SIG_PRIV
, p
);
602 if (__ptrace_detach(tracer
, p
))
603 list_add(&p
->ptrace_entry
, dead
);
607 int ptrace_readdata(struct task_struct
*tsk
, unsigned long src
, char __user
*dst
, int len
)
613 int this_len
, retval
;
615 this_len
= (len
> sizeof(buf
)) ? sizeof(buf
) : len
;
616 retval
= ptrace_access_vm(tsk
, src
, buf
, this_len
, FOLL_FORCE
);
623 if (copy_to_user(dst
, buf
, retval
))
633 int ptrace_writedata(struct task_struct
*tsk
, char __user
*src
, unsigned long dst
, int len
)
639 int this_len
, retval
;
641 this_len
= (len
> sizeof(buf
)) ? sizeof(buf
) : len
;
642 if (copy_from_user(buf
, src
, this_len
))
644 retval
= ptrace_access_vm(tsk
, dst
, buf
, this_len
,
645 FOLL_FORCE
| FOLL_WRITE
);
659 static int ptrace_setoptions(struct task_struct
*child
, unsigned long data
)
664 ret
= check_ptrace_options(data
);
668 /* Avoid intermediate state when all opts are cleared */
669 flags
= child
->ptrace
;
670 flags
&= ~(PTRACE_O_MASK
<< PT_OPT_FLAG_SHIFT
);
671 flags
|= (data
<< PT_OPT_FLAG_SHIFT
);
672 child
->ptrace
= flags
;
677 static int ptrace_getsiginfo(struct task_struct
*child
, kernel_siginfo_t
*info
)
682 if (lock_task_sighand(child
, &flags
)) {
684 if (likely(child
->last_siginfo
!= NULL
)) {
685 copy_siginfo(info
, child
->last_siginfo
);
688 unlock_task_sighand(child
, &flags
);
693 static int ptrace_setsiginfo(struct task_struct
*child
, const kernel_siginfo_t
*info
)
698 if (lock_task_sighand(child
, &flags
)) {
700 if (likely(child
->last_siginfo
!= NULL
)) {
701 copy_siginfo(child
->last_siginfo
, info
);
704 unlock_task_sighand(child
, &flags
);
709 static int ptrace_peek_siginfo(struct task_struct
*child
,
713 struct ptrace_peeksiginfo_args arg
;
714 struct sigpending
*pending
;
718 ret
= copy_from_user(&arg
, (void __user
*) addr
,
719 sizeof(struct ptrace_peeksiginfo_args
));
723 if (arg
.flags
& ~PTRACE_PEEKSIGINFO_SHARED
)
724 return -EINVAL
; /* unknown flags */
729 /* Ensure arg.off fits in an unsigned long */
730 if (arg
.off
> ULONG_MAX
)
733 if (arg
.flags
& PTRACE_PEEKSIGINFO_SHARED
)
734 pending
= &child
->signal
->shared_pending
;
736 pending
= &child
->pending
;
738 for (i
= 0; i
< arg
.nr
; ) {
739 kernel_siginfo_t info
;
740 unsigned long off
= arg
.off
+ i
;
743 spin_lock_irq(&child
->sighand
->siglock
);
744 list_for_each_entry(q
, &pending
->list
, list
) {
747 copy_siginfo(&info
, &q
->info
);
751 spin_unlock_irq(&child
->sighand
->siglock
);
753 if (!found
) /* beyond the end of the list */
757 if (unlikely(in_compat_syscall())) {
758 compat_siginfo_t __user
*uinfo
= compat_ptr(data
);
760 if (copy_siginfo_to_user32(uinfo
, &info
)) {
768 siginfo_t __user
*uinfo
= (siginfo_t __user
*) data
;
770 if (copy_siginfo_to_user(uinfo
, &info
)) {
776 data
+= sizeof(siginfo_t
);
779 if (signal_pending(current
))
792 static long ptrace_get_rseq_configuration(struct task_struct
*task
,
793 unsigned long size
, void __user
*data
)
795 struct ptrace_rseq_configuration conf
= {
796 .rseq_abi_pointer
= (u64
)(uintptr_t)task
->rseq
,
797 .rseq_abi_size
= task
->rseq_len
,
798 .signature
= task
->rseq_sig
,
802 size
= min_t(unsigned long, size
, sizeof(conf
));
803 if (copy_to_user(data
, &conf
, size
))
809 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
811 #ifdef PTRACE_SINGLEBLOCK
812 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
814 #define is_singleblock(request) 0
818 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
820 #define is_sysemu_singlestep(request) 0
823 static int ptrace_resume(struct task_struct
*child
, long request
,
826 if (!valid_signal(data
))
829 if (request
== PTRACE_SYSCALL
)
830 set_task_syscall_work(child
, SYSCALL_TRACE
);
832 clear_task_syscall_work(child
, SYSCALL_TRACE
);
834 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
835 if (request
== PTRACE_SYSEMU
|| request
== PTRACE_SYSEMU_SINGLESTEP
)
836 set_task_syscall_work(child
, SYSCALL_EMU
);
838 clear_task_syscall_work(child
, SYSCALL_EMU
);
841 if (is_singleblock(request
)) {
842 if (unlikely(!arch_has_block_step()))
844 user_enable_block_step(child
);
845 } else if (is_singlestep(request
) || is_sysemu_singlestep(request
)) {
846 if (unlikely(!arch_has_single_step()))
848 user_enable_single_step(child
);
850 user_disable_single_step(child
);
854 * Change ->exit_code and ->state under siglock to avoid the race
855 * with wait_task_stopped() in between; a non-zero ->exit_code will
856 * wrongly look like another report from tracee.
858 * Note that we need siglock even if ->exit_code == data and/or this
859 * status was not reported yet, the new status must not be cleared by
860 * wait_task_stopped() after resume.
862 spin_lock_irq(&child
->sighand
->siglock
);
863 child
->exit_code
= data
;
864 child
->jobctl
&= ~JOBCTL_TRACED
;
865 wake_up_state(child
, __TASK_TRACED
);
866 spin_unlock_irq(&child
->sighand
->siglock
);
871 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
873 static const struct user_regset
*
874 find_regset(const struct user_regset_view
*view
, unsigned int type
)
876 const struct user_regset
*regset
;
879 for (n
= 0; n
< view
->n
; ++n
) {
880 regset
= view
->regsets
+ n
;
881 if (regset
->core_note_type
== type
)
888 static int ptrace_regset(struct task_struct
*task
, int req
, unsigned int type
,
891 const struct user_regset_view
*view
= task_user_regset_view(task
);
892 const struct user_regset
*regset
= find_regset(view
, type
);
895 if (!regset
|| (kiov
->iov_len
% regset
->size
) != 0)
898 regset_no
= regset
- view
->regsets
;
899 kiov
->iov_len
= min(kiov
->iov_len
,
900 (__kernel_size_t
) (regset
->n
* regset
->size
));
902 if (req
== PTRACE_GETREGSET
)
903 return copy_regset_to_user(task
, view
, regset_no
, 0,
904 kiov
->iov_len
, kiov
->iov_base
);
906 return copy_regset_from_user(task
, view
, regset_no
, 0,
907 kiov
->iov_len
, kiov
->iov_base
);
911 * This is declared in linux/regset.h and defined in machine-dependent
912 * code. We put the export here, near the primary machine-neutral use,
913 * to ensure no machine forgets it.
915 EXPORT_SYMBOL_GPL(task_user_regset_view
);
918 ptrace_get_syscall_info_entry(struct task_struct
*child
, struct pt_regs
*regs
,
919 struct ptrace_syscall_info
*info
)
921 unsigned long args
[ARRAY_SIZE(info
->entry
.args
)];
924 info
->op
= PTRACE_SYSCALL_INFO_ENTRY
;
925 info
->entry
.nr
= syscall_get_nr(child
, regs
);
926 syscall_get_arguments(child
, regs
, args
);
927 for (i
= 0; i
< ARRAY_SIZE(args
); i
++)
928 info
->entry
.args
[i
] = args
[i
];
930 /* args is the last field in struct ptrace_syscall_info.entry */
931 return offsetofend(struct ptrace_syscall_info
, entry
.args
);
935 ptrace_get_syscall_info_seccomp(struct task_struct
*child
, struct pt_regs
*regs
,
936 struct ptrace_syscall_info
*info
)
939 * As struct ptrace_syscall_info.entry is currently a subset
940 * of struct ptrace_syscall_info.seccomp, it makes sense to
941 * initialize that subset using ptrace_get_syscall_info_entry().
942 * This can be reconsidered in the future if these structures
943 * diverge significantly enough.
945 ptrace_get_syscall_info_entry(child
, regs
, info
);
946 info
->op
= PTRACE_SYSCALL_INFO_SECCOMP
;
947 info
->seccomp
.ret_data
= child
->ptrace_message
;
949 /* ret_data is the last field in struct ptrace_syscall_info.seccomp */
950 return offsetofend(struct ptrace_syscall_info
, seccomp
.ret_data
);
954 ptrace_get_syscall_info_exit(struct task_struct
*child
, struct pt_regs
*regs
,
955 struct ptrace_syscall_info
*info
)
957 info
->op
= PTRACE_SYSCALL_INFO_EXIT
;
958 info
->exit
.rval
= syscall_get_error(child
, regs
);
959 info
->exit
.is_error
= !!info
->exit
.rval
;
960 if (!info
->exit
.is_error
)
961 info
->exit
.rval
= syscall_get_return_value(child
, regs
);
963 /* is_error is the last field in struct ptrace_syscall_info.exit */
964 return offsetofend(struct ptrace_syscall_info
, exit
.is_error
);
968 ptrace_get_syscall_info(struct task_struct
*child
, unsigned long user_size
,
971 struct pt_regs
*regs
= task_pt_regs(child
);
972 struct ptrace_syscall_info info
= {
973 .op
= PTRACE_SYSCALL_INFO_NONE
,
974 .arch
= syscall_get_arch(child
),
975 .instruction_pointer
= instruction_pointer(regs
),
976 .stack_pointer
= user_stack_pointer(regs
),
978 unsigned long actual_size
= offsetof(struct ptrace_syscall_info
, entry
);
979 unsigned long write_size
;
982 * This does not need lock_task_sighand() to access
983 * child->last_siginfo because ptrace_freeze_traced()
984 * called earlier by ptrace_check_attach() ensures that
985 * the tracee cannot go away and clear its last_siginfo.
987 switch (child
->last_siginfo
? child
->last_siginfo
->si_code
: 0) {
989 switch (child
->ptrace_message
) {
990 case PTRACE_EVENTMSG_SYSCALL_ENTRY
:
991 actual_size
= ptrace_get_syscall_info_entry(child
, regs
,
994 case PTRACE_EVENTMSG_SYSCALL_EXIT
:
995 actual_size
= ptrace_get_syscall_info_exit(child
, regs
,
1000 case SIGTRAP
| (PTRACE_EVENT_SECCOMP
<< 8):
1001 actual_size
= ptrace_get_syscall_info_seccomp(child
, regs
,
1006 write_size
= min(actual_size
, user_size
);
1007 return copy_to_user(datavp
, &info
, write_size
) ? -EFAULT
: actual_size
;
1009 #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
1011 int ptrace_request(struct task_struct
*child
, long request
,
1012 unsigned long addr
, unsigned long data
)
1014 bool seized
= child
->ptrace
& PT_SEIZED
;
1016 kernel_siginfo_t siginfo
, *si
;
1017 void __user
*datavp
= (void __user
*) data
;
1018 unsigned long __user
*datalp
= datavp
;
1019 unsigned long flags
;
1022 case PTRACE_PEEKTEXT
:
1023 case PTRACE_PEEKDATA
:
1024 return generic_ptrace_peekdata(child
, addr
, data
);
1025 case PTRACE_POKETEXT
:
1026 case PTRACE_POKEDATA
:
1027 return generic_ptrace_pokedata(child
, addr
, data
);
1029 #ifdef PTRACE_OLDSETOPTIONS
1030 case PTRACE_OLDSETOPTIONS
:
1032 case PTRACE_SETOPTIONS
:
1033 ret
= ptrace_setoptions(child
, data
);
1035 case PTRACE_GETEVENTMSG
:
1036 ret
= put_user(child
->ptrace_message
, datalp
);
1039 case PTRACE_PEEKSIGINFO
:
1040 ret
= ptrace_peek_siginfo(child
, addr
, data
);
1043 case PTRACE_GETSIGINFO
:
1044 ret
= ptrace_getsiginfo(child
, &siginfo
);
1046 ret
= copy_siginfo_to_user(datavp
, &siginfo
);
1049 case PTRACE_SETSIGINFO
:
1050 ret
= copy_siginfo_from_user(&siginfo
, datavp
);
1052 ret
= ptrace_setsiginfo(child
, &siginfo
);
1055 case PTRACE_GETSIGMASK
: {
1058 if (addr
!= sizeof(sigset_t
)) {
1063 if (test_tsk_restore_sigmask(child
))
1064 mask
= &child
->saved_sigmask
;
1066 mask
= &child
->blocked
;
1068 if (copy_to_user(datavp
, mask
, sizeof(sigset_t
)))
1076 case PTRACE_SETSIGMASK
: {
1079 if (addr
!= sizeof(sigset_t
)) {
1084 if (copy_from_user(&new_set
, datavp
, sizeof(sigset_t
))) {
1089 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1092 * Every thread does recalc_sigpending() after resume, so
1093 * retarget_shared_pending() and recalc_sigpending() are not
1096 spin_lock_irq(&child
->sighand
->siglock
);
1097 child
->blocked
= new_set
;
1098 spin_unlock_irq(&child
->sighand
->siglock
);
1100 clear_tsk_restore_sigmask(child
);
1106 case PTRACE_INTERRUPT
:
1108 * Stop tracee without any side-effect on signal or job
1109 * control. At least one trap is guaranteed to happen
1110 * after this request. If @child is already trapped, the
1111 * current trap is not disturbed and another trap will
1112 * happen after the current trap is ended with PTRACE_CONT.
1114 * The actual trap might not be PTRACE_EVENT_STOP trap but
1115 * the pending condition is cleared regardless.
1117 if (unlikely(!seized
|| !lock_task_sighand(child
, &flags
)))
1121 * INTERRUPT doesn't disturb existing trap sans one
1122 * exception. If ptracer issued LISTEN for the current
1123 * STOP, this INTERRUPT should clear LISTEN and re-trap
1126 if (likely(task_set_jobctl_pending(child
, JOBCTL_TRAP_STOP
)))
1127 ptrace_signal_wake_up(child
, child
->jobctl
& JOBCTL_LISTENING
);
1129 unlock_task_sighand(child
, &flags
);
1135 * Listen for events. Tracee must be in STOP. It's not
1136 * resumed per-se but is not considered to be in TRACED by
1137 * wait(2) or ptrace(2). If an async event (e.g. group
1138 * stop state change) happens, tracee will enter STOP trap
1139 * again. Alternatively, ptracer can issue INTERRUPT to
1140 * finish listening and re-trap tracee into STOP.
1142 if (unlikely(!seized
|| !lock_task_sighand(child
, &flags
)))
1145 si
= child
->last_siginfo
;
1146 if (likely(si
&& (si
->si_code
>> 8) == PTRACE_EVENT_STOP
)) {
1147 child
->jobctl
|= JOBCTL_LISTENING
;
1149 * If NOTIFY is set, it means event happened between
1150 * start of this trap and now. Trigger re-trap.
1152 if (child
->jobctl
& JOBCTL_TRAP_NOTIFY
)
1153 ptrace_signal_wake_up(child
, true);
1156 unlock_task_sighand(child
, &flags
);
1159 case PTRACE_DETACH
: /* detach a process that was attached. */
1160 ret
= ptrace_detach(child
, data
);
1163 #ifdef CONFIG_BINFMT_ELF_FDPIC
1164 case PTRACE_GETFDPIC
: {
1165 struct mm_struct
*mm
= get_task_mm(child
);
1166 unsigned long tmp
= 0;
1173 case PTRACE_GETFDPIC_EXEC
:
1174 tmp
= mm
->context
.exec_fdpic_loadmap
;
1176 case PTRACE_GETFDPIC_INTERP
:
1177 tmp
= mm
->context
.interp_fdpic_loadmap
;
1184 ret
= put_user(tmp
, datalp
);
1189 case PTRACE_SINGLESTEP
:
1190 #ifdef PTRACE_SINGLEBLOCK
1191 case PTRACE_SINGLEBLOCK
:
1193 #ifdef PTRACE_SYSEMU
1195 case PTRACE_SYSEMU_SINGLESTEP
:
1197 case PTRACE_SYSCALL
:
1199 return ptrace_resume(child
, request
, data
);
1202 send_sig_info(SIGKILL
, SEND_SIG_NOINFO
, child
);
1205 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1206 case PTRACE_GETREGSET
:
1207 case PTRACE_SETREGSET
: {
1209 struct iovec __user
*uiov
= datavp
;
1211 if (!access_ok(uiov
, sizeof(*uiov
)))
1214 if (__get_user(kiov
.iov_base
, &uiov
->iov_base
) ||
1215 __get_user(kiov
.iov_len
, &uiov
->iov_len
))
1218 ret
= ptrace_regset(child
, request
, addr
, &kiov
);
1220 ret
= __put_user(kiov
.iov_len
, &uiov
->iov_len
);
1224 case PTRACE_GET_SYSCALL_INFO
:
1225 ret
= ptrace_get_syscall_info(child
, addr
, datavp
);
1229 case PTRACE_SECCOMP_GET_FILTER
:
1230 ret
= seccomp_get_filter(child
, addr
, datavp
);
1233 case PTRACE_SECCOMP_GET_METADATA
:
1234 ret
= seccomp_get_metadata(child
, addr
, datavp
);
1238 case PTRACE_GET_RSEQ_CONFIGURATION
:
1239 ret
= ptrace_get_rseq_configuration(child
, addr
, datavp
);
1243 case PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG
:
1244 ret
= syscall_user_dispatch_set_config(child
, addr
, datavp
);
1247 case PTRACE_GET_SYSCALL_USER_DISPATCH_CONFIG
:
1248 ret
= syscall_user_dispatch_get_config(child
, addr
, datavp
);
1258 SYSCALL_DEFINE4(ptrace
, long, request
, long, pid
, unsigned long, addr
,
1259 unsigned long, data
)
1261 struct task_struct
*child
;
1264 if (request
== PTRACE_TRACEME
) {
1265 ret
= ptrace_traceme();
1269 child
= find_get_task_by_vpid(pid
);
1275 if (request
== PTRACE_ATTACH
|| request
== PTRACE_SEIZE
) {
1276 ret
= ptrace_attach(child
, request
, addr
, data
);
1277 goto out_put_task_struct
;
1280 ret
= ptrace_check_attach(child
, request
== PTRACE_KILL
||
1281 request
== PTRACE_INTERRUPT
);
1283 goto out_put_task_struct
;
1285 ret
= arch_ptrace(child
, request
, addr
, data
);
1286 if (ret
|| request
!= PTRACE_DETACH
)
1287 ptrace_unfreeze_traced(child
);
1289 out_put_task_struct
:
1290 put_task_struct(child
);
1295 int generic_ptrace_peekdata(struct task_struct
*tsk
, unsigned long addr
,
1301 copied
= ptrace_access_vm(tsk
, addr
, &tmp
, sizeof(tmp
), FOLL_FORCE
);
1302 if (copied
!= sizeof(tmp
))
1304 return put_user(tmp
, (unsigned long __user
*)data
);
1307 int generic_ptrace_pokedata(struct task_struct
*tsk
, unsigned long addr
,
1312 copied
= ptrace_access_vm(tsk
, addr
, &data
, sizeof(data
),
1313 FOLL_FORCE
| FOLL_WRITE
);
1314 return (copied
== sizeof(data
)) ? 0 : -EIO
;
1317 #if defined CONFIG_COMPAT
1319 int compat_ptrace_request(struct task_struct
*child
, compat_long_t request
,
1320 compat_ulong_t addr
, compat_ulong_t data
)
1322 compat_ulong_t __user
*datap
= compat_ptr(data
);
1323 compat_ulong_t word
;
1324 kernel_siginfo_t siginfo
;
1328 case PTRACE_PEEKTEXT
:
1329 case PTRACE_PEEKDATA
:
1330 ret
= ptrace_access_vm(child
, addr
, &word
, sizeof(word
),
1332 if (ret
!= sizeof(word
))
1335 ret
= put_user(word
, datap
);
1338 case PTRACE_POKETEXT
:
1339 case PTRACE_POKEDATA
:
1340 ret
= ptrace_access_vm(child
, addr
, &data
, sizeof(data
),
1341 FOLL_FORCE
| FOLL_WRITE
);
1342 ret
= (ret
!= sizeof(data
) ? -EIO
: 0);
1345 case PTRACE_GETEVENTMSG
:
1346 ret
= put_user((compat_ulong_t
) child
->ptrace_message
, datap
);
1349 case PTRACE_GETSIGINFO
:
1350 ret
= ptrace_getsiginfo(child
, &siginfo
);
1352 ret
= copy_siginfo_to_user32(
1353 (struct compat_siginfo __user
*) datap
,
1357 case PTRACE_SETSIGINFO
:
1358 ret
= copy_siginfo_from_user32(
1359 &siginfo
, (struct compat_siginfo __user
*) datap
);
1361 ret
= ptrace_setsiginfo(child
, &siginfo
);
1363 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1364 case PTRACE_GETREGSET
:
1365 case PTRACE_SETREGSET
:
1368 struct compat_iovec __user
*uiov
=
1369 (struct compat_iovec __user
*) datap
;
1373 if (!access_ok(uiov
, sizeof(*uiov
)))
1376 if (__get_user(ptr
, &uiov
->iov_base
) ||
1377 __get_user(len
, &uiov
->iov_len
))
1380 kiov
.iov_base
= compat_ptr(ptr
);
1383 ret
= ptrace_regset(child
, request
, addr
, &kiov
);
1385 ret
= __put_user(kiov
.iov_len
, &uiov
->iov_len
);
1391 ret
= ptrace_request(child
, request
, addr
, data
);
1397 COMPAT_SYSCALL_DEFINE4(ptrace
, compat_long_t
, request
, compat_long_t
, pid
,
1398 compat_long_t
, addr
, compat_long_t
, data
)
1400 struct task_struct
*child
;
1403 if (request
== PTRACE_TRACEME
) {
1404 ret
= ptrace_traceme();
1408 child
= find_get_task_by_vpid(pid
);
1414 if (request
== PTRACE_ATTACH
|| request
== PTRACE_SEIZE
) {
1415 ret
= ptrace_attach(child
, request
, addr
, data
);
1416 goto out_put_task_struct
;
1419 ret
= ptrace_check_attach(child
, request
== PTRACE_KILL
||
1420 request
== PTRACE_INTERRUPT
);
1422 ret
= compat_arch_ptrace(child
, request
, addr
, data
);
1423 if (ret
|| request
!= PTRACE_DETACH
)
1424 ptrace_unfreeze_traced(child
);
1427 out_put_task_struct
:
1428 put_task_struct(child
);
1432 #endif /* CONFIG_COMPAT */