1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/ptrace.c
5 * (C) Copyright 1999 Linus Torvalds
7 * Common interfaces for "ptrace()" which we do not want
8 * to continually duplicate across every architecture.
11 #include <linux/capability.h>
12 #include <linux/export.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/coredump.h>
16 #include <linux/sched/task.h>
17 #include <linux/errno.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/ptrace.h>
22 #include <linux/security.h>
23 #include <linux/signal.h>
24 #include <linux/uio.h>
25 #include <linux/audit.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/syscalls.h>
28 #include <linux/uaccess.h>
29 #include <linux/regset.h>
30 #include <linux/hw_breakpoint.h>
31 #include <linux/cn_proc.h>
32 #include <linux/compat.h>
33 #include <linux/sched/signal.h>
35 #include <asm/syscall.h> /* for syscall_get_* */
38 * Access another process' address space via ptrace.
39 * Source/target buffer must be kernel space,
40 * Do not walk the page table directly, use get_user_pages
42 int ptrace_access_vm(struct task_struct
*tsk
, unsigned long addr
,
43 void *buf
, int len
, unsigned int gup_flags
)
48 mm
= get_task_mm(tsk
);
53 (current
!= tsk
->parent
) ||
54 ((get_dumpable(mm
) != SUID_DUMP_USER
) &&
55 !ptracer_capable(tsk
, mm
->user_ns
))) {
60 ret
= __access_remote_vm(tsk
, mm
, addr
, buf
, len
, gup_flags
);
67 void __ptrace_link(struct task_struct
*child
, struct task_struct
*new_parent
,
68 const struct cred
*ptracer_cred
)
70 BUG_ON(!list_empty(&child
->ptrace_entry
));
71 list_add(&child
->ptrace_entry
, &new_parent
->ptraced
);
72 child
->parent
= new_parent
;
73 child
->ptracer_cred
= get_cred(ptracer_cred
);
77 * ptrace a task: make the debugger its new parent and
78 * move it to the ptrace list.
80 * Must be called with the tasklist lock write-held.
82 static void ptrace_link(struct task_struct
*child
, struct task_struct
*new_parent
)
84 __ptrace_link(child
, new_parent
, current_cred());
88 * __ptrace_unlink - unlink ptracee and restore its execution state
89 * @child: ptracee to be unlinked
91 * Remove @child from the ptrace list, move it back to the original parent,
92 * and restore the execution state so that it conforms to the group stop
95 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
96 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
97 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
98 * If the ptracer is exiting, the ptracee can be in any state.
100 * After detach, the ptracee should be in a state which conforms to the
101 * group stop. If the group is stopped or in the process of stopping, the
102 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
103 * up from TASK_TRACED.
105 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
106 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
107 * to but in the opposite direction of what happens while attaching to a
108 * stopped task. However, in this direction, the intermediate RUNNING
109 * state is not hidden even from the current ptracer and if it immediately
110 * re-attaches and performs a WNOHANG wait(2), it may fail.
113 * write_lock_irq(tasklist_lock)
115 void __ptrace_unlink(struct task_struct
*child
)
117 const struct cred
*old_cred
;
118 BUG_ON(!child
->ptrace
);
120 clear_tsk_thread_flag(child
, TIF_SYSCALL_TRACE
);
121 #ifdef TIF_SYSCALL_EMU
122 clear_tsk_thread_flag(child
, TIF_SYSCALL_EMU
);
125 child
->parent
= child
->real_parent
;
126 list_del_init(&child
->ptrace_entry
);
127 old_cred
= child
->ptracer_cred
;
128 child
->ptracer_cred
= NULL
;
131 spin_lock(&child
->sighand
->siglock
);
134 * Clear all pending traps and TRAPPING. TRAPPING should be
135 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
137 task_clear_jobctl_pending(child
, JOBCTL_TRAP_MASK
);
138 task_clear_jobctl_trapping(child
);
141 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
144 if (!(child
->flags
& PF_EXITING
) &&
145 (child
->signal
->flags
& SIGNAL_STOP_STOPPED
||
146 child
->signal
->group_stop_count
)) {
147 child
->jobctl
|= JOBCTL_STOP_PENDING
;
150 * This is only possible if this thread was cloned by the
151 * traced task running in the stopped group, set the signal
152 * for the future reports.
153 * FIXME: we should change ptrace_init_task() to handle this
156 if (!(child
->jobctl
& JOBCTL_STOP_SIGMASK
))
157 child
->jobctl
|= SIGSTOP
;
161 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
162 * @child in the butt. Note that @resume should be used iff @child
163 * is in TASK_TRACED; otherwise, we might unduly disrupt
164 * TASK_KILLABLE sleeps.
166 if (child
->jobctl
& JOBCTL_STOP_PENDING
|| task_is_traced(child
))
167 ptrace_signal_wake_up(child
, true);
169 spin_unlock(&child
->sighand
->siglock
);
172 /* Ensure that nothing can wake it up, even SIGKILL */
173 static bool ptrace_freeze_traced(struct task_struct
*task
)
177 /* Lockless, nobody but us can set this flag */
178 if (task
->jobctl
& JOBCTL_LISTENING
)
181 spin_lock_irq(&task
->sighand
->siglock
);
182 if (task_is_traced(task
) && !__fatal_signal_pending(task
)) {
183 task
->state
= __TASK_TRACED
;
186 spin_unlock_irq(&task
->sighand
->siglock
);
191 static void ptrace_unfreeze_traced(struct task_struct
*task
)
193 if (task
->state
!= __TASK_TRACED
)
196 WARN_ON(!task
->ptrace
|| task
->parent
!= current
);
199 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
200 * Recheck state under the lock to close this race.
202 spin_lock_irq(&task
->sighand
->siglock
);
203 if (task
->state
== __TASK_TRACED
) {
204 if (__fatal_signal_pending(task
))
205 wake_up_state(task
, __TASK_TRACED
);
207 task
->state
= TASK_TRACED
;
209 spin_unlock_irq(&task
->sighand
->siglock
);
213 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
214 * @child: ptracee to check for
215 * @ignore_state: don't check whether @child is currently %TASK_TRACED
217 * Check whether @child is being ptraced by %current and ready for further
218 * ptrace operations. If @ignore_state is %false, @child also should be in
219 * %TASK_TRACED state and on return the child is guaranteed to be traced
220 * and not executing. If @ignore_state is %true, @child can be in any
224 * Grabs and releases tasklist_lock and @child->sighand->siglock.
227 * 0 on success, -ESRCH if %child is not ready.
229 static int ptrace_check_attach(struct task_struct
*child
, bool ignore_state
)
234 * We take the read lock around doing both checks to close a
235 * possible race where someone else was tracing our child and
236 * detached between these two checks. After this locked check,
237 * we are sure that this is our traced child and that can only
238 * be changed by us so it's not changing right after this.
240 read_lock(&tasklist_lock
);
241 if (child
->ptrace
&& child
->parent
== current
) {
242 WARN_ON(child
->state
== __TASK_TRACED
);
244 * child->sighand can't be NULL, release_task()
245 * does ptrace_unlink() before __exit_signal().
247 if (ignore_state
|| ptrace_freeze_traced(child
))
250 read_unlock(&tasklist_lock
);
252 if (!ret
&& !ignore_state
) {
253 if (!wait_task_inactive(child
, __TASK_TRACED
)) {
255 * This can only happen if may_ptrace_stop() fails and
256 * ptrace_stop() changes ->state back to TASK_RUNNING,
257 * so we should not worry about leaking __TASK_TRACED.
259 WARN_ON(child
->state
== __TASK_TRACED
);
267 static bool ptrace_has_cap(const struct cred
*cred
, struct user_namespace
*ns
,
272 if (mode
& PTRACE_MODE_NOAUDIT
)
273 ret
= security_capable(cred
, ns
, CAP_SYS_PTRACE
, CAP_OPT_NOAUDIT
);
275 ret
= security_capable(cred
, ns
, CAP_SYS_PTRACE
, CAP_OPT_NONE
);
280 /* Returns 0 on success, -errno on denial. */
281 static int __ptrace_may_access(struct task_struct
*task
, unsigned int mode
)
283 const struct cred
*cred
= current_cred(), *tcred
;
284 struct mm_struct
*mm
;
288 if (!(mode
& PTRACE_MODE_FSCREDS
) == !(mode
& PTRACE_MODE_REALCREDS
)) {
289 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
293 /* May we inspect the given task?
294 * This check is used both for attaching with ptrace
295 * and for allowing access to sensitive information in /proc.
297 * ptrace_attach denies several cases that /proc allows
298 * because setting up the necessary parent/child relationship
299 * or halting the specified task is impossible.
302 /* Don't let security modules deny introspection */
303 if (same_thread_group(task
, current
))
306 if (mode
& PTRACE_MODE_FSCREDS
) {
307 caller_uid
= cred
->fsuid
;
308 caller_gid
= cred
->fsgid
;
311 * Using the euid would make more sense here, but something
312 * in userland might rely on the old behavior, and this
313 * shouldn't be a security problem since
314 * PTRACE_MODE_REALCREDS implies that the caller explicitly
315 * used a syscall that requests access to another process
316 * (and not a filesystem syscall to procfs).
318 caller_uid
= cred
->uid
;
319 caller_gid
= cred
->gid
;
321 tcred
= __task_cred(task
);
322 if (uid_eq(caller_uid
, tcred
->euid
) &&
323 uid_eq(caller_uid
, tcred
->suid
) &&
324 uid_eq(caller_uid
, tcred
->uid
) &&
325 gid_eq(caller_gid
, tcred
->egid
) &&
326 gid_eq(caller_gid
, tcred
->sgid
) &&
327 gid_eq(caller_gid
, tcred
->gid
))
329 if (ptrace_has_cap(cred
, tcred
->user_ns
, mode
))
336 * If a task drops privileges and becomes nondumpable (through a syscall
337 * like setresuid()) while we are trying to access it, we must ensure
338 * that the dumpability is read after the credentials; otherwise,
339 * we may be able to attach to a task that we shouldn't be able to
340 * attach to (as if the task had dropped privileges without becoming
342 * Pairs with a write barrier in commit_creds().
347 ((get_dumpable(mm
) != SUID_DUMP_USER
) &&
348 !ptrace_has_cap(cred
, mm
->user_ns
, mode
)))
351 return security_ptrace_access_check(task
, mode
);
354 bool ptrace_may_access(struct task_struct
*task
, unsigned int mode
)
358 err
= __ptrace_may_access(task
, mode
);
363 static int ptrace_attach(struct task_struct
*task
, long request
,
367 bool seize
= (request
== PTRACE_SEIZE
);
374 if (flags
& ~(unsigned long)PTRACE_O_MASK
)
376 flags
= PT_PTRACED
| PT_SEIZED
| (flags
<< PT_OPT_FLAG_SHIFT
);
384 if (unlikely(task
->flags
& PF_KTHREAD
))
386 if (same_thread_group(task
, current
))
390 * Protect exec's credential calculations against our interference;
391 * SUID, SGID and LSM creds get determined differently
394 retval
= -ERESTARTNOINTR
;
395 if (mutex_lock_interruptible(&task
->signal
->cred_guard_mutex
))
399 retval
= __ptrace_may_access(task
, PTRACE_MODE_ATTACH_REALCREDS
);
404 write_lock_irq(&tasklist_lock
);
406 if (unlikely(task
->exit_state
))
407 goto unlock_tasklist
;
409 goto unlock_tasklist
;
413 task
->ptrace
= flags
;
415 ptrace_link(task
, current
);
417 /* SEIZE doesn't trap tracee on attach */
419 send_sig_info(SIGSTOP
, SEND_SIG_PRIV
, task
);
421 spin_lock(&task
->sighand
->siglock
);
424 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
425 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
426 * will be cleared if the child completes the transition or any
427 * event which clears the group stop states happens. We'll wait
428 * for the transition to complete before returning from this
431 * This hides STOPPED -> RUNNING -> TRACED transition from the
432 * attaching thread but a different thread in the same group can
433 * still observe the transient RUNNING state. IOW, if another
434 * thread's WNOHANG wait(2) on the stopped tracee races against
435 * ATTACH, the wait(2) may fail due to the transient RUNNING.
437 * The following task_is_stopped() test is safe as both transitions
438 * in and out of STOPPED are protected by siglock.
440 if (task_is_stopped(task
) &&
441 task_set_jobctl_pending(task
, JOBCTL_TRAP_STOP
| JOBCTL_TRAPPING
))
442 signal_wake_up_state(task
, __TASK_STOPPED
);
444 spin_unlock(&task
->sighand
->siglock
);
448 write_unlock_irq(&tasklist_lock
);
450 mutex_unlock(&task
->signal
->cred_guard_mutex
);
454 * We do not bother to change retval or clear JOBCTL_TRAPPING
455 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
456 * not return to user-mode, it will exit and clear this bit in
457 * __ptrace_unlink() if it wasn't already cleared by the tracee;
458 * and until then nobody can ptrace this task.
460 wait_on_bit(&task
->jobctl
, JOBCTL_TRAPPING_BIT
, TASK_KILLABLE
);
461 proc_ptrace_connector(task
, PTRACE_ATTACH
);
468 * ptrace_traceme -- helper for PTRACE_TRACEME
470 * Performs checks and sets PT_PTRACED.
471 * Should be used by all ptrace implementations for PTRACE_TRACEME.
473 static int ptrace_traceme(void)
477 write_lock_irq(&tasklist_lock
);
478 /* Are we already being traced? */
479 if (!current
->ptrace
) {
480 ret
= security_ptrace_traceme(current
->parent
);
482 * Check PF_EXITING to ensure ->real_parent has not passed
483 * exit_ptrace(). Otherwise we don't report the error but
484 * pretend ->real_parent untraces us right after return.
486 if (!ret
&& !(current
->real_parent
->flags
& PF_EXITING
)) {
487 current
->ptrace
= PT_PTRACED
;
488 ptrace_link(current
, current
->real_parent
);
491 write_unlock_irq(&tasklist_lock
);
497 * Called with irqs disabled, returns true if childs should reap themselves.
499 static int ignoring_children(struct sighand_struct
*sigh
)
502 spin_lock(&sigh
->siglock
);
503 ret
= (sigh
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
) ||
504 (sigh
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
);
505 spin_unlock(&sigh
->siglock
);
510 * Called with tasklist_lock held for writing.
511 * Unlink a traced task, and clean it up if it was a traced zombie.
512 * Return true if it needs to be reaped with release_task().
513 * (We can't call release_task() here because we already hold tasklist_lock.)
515 * If it's a zombie, our attachedness prevented normal parent notification
516 * or self-reaping. Do notification now if it would have happened earlier.
517 * If it should reap itself, return true.
519 * If it's our own child, there is no notification to do. But if our normal
520 * children self-reap, then this child was prevented by ptrace and we must
521 * reap it now, in that case we must also wake up sub-threads sleeping in
524 static bool __ptrace_detach(struct task_struct
*tracer
, struct task_struct
*p
)
530 if (p
->exit_state
!= EXIT_ZOMBIE
)
533 dead
= !thread_group_leader(p
);
535 if (!dead
&& thread_group_empty(p
)) {
536 if (!same_thread_group(p
->real_parent
, tracer
))
537 dead
= do_notify_parent(p
, p
->exit_signal
);
538 else if (ignoring_children(tracer
->sighand
)) {
539 __wake_up_parent(p
, tracer
);
543 /* Mark it as in the process of being reaped. */
545 p
->exit_state
= EXIT_DEAD
;
549 static int ptrace_detach(struct task_struct
*child
, unsigned int data
)
551 if (!valid_signal(data
))
554 /* Architecture-specific hardware disable .. */
555 ptrace_disable(child
);
557 write_lock_irq(&tasklist_lock
);
559 * We rely on ptrace_freeze_traced(). It can't be killed and
560 * untraced by another thread, it can't be a zombie.
562 WARN_ON(!child
->ptrace
|| child
->exit_state
);
564 * tasklist_lock avoids the race with wait_task_stopped(), see
565 * the comment in ptrace_resume().
567 child
->exit_code
= data
;
568 __ptrace_detach(current
, child
);
569 write_unlock_irq(&tasklist_lock
);
571 proc_ptrace_connector(child
, PTRACE_DETACH
);
577 * Detach all tasks we were using ptrace on. Called with tasklist held
580 void exit_ptrace(struct task_struct
*tracer
, struct list_head
*dead
)
582 struct task_struct
*p
, *n
;
584 list_for_each_entry_safe(p
, n
, &tracer
->ptraced
, ptrace_entry
) {
585 if (unlikely(p
->ptrace
& PT_EXITKILL
))
586 send_sig_info(SIGKILL
, SEND_SIG_PRIV
, p
);
588 if (__ptrace_detach(tracer
, p
))
589 list_add(&p
->ptrace_entry
, dead
);
593 int ptrace_readdata(struct task_struct
*tsk
, unsigned long src
, char __user
*dst
, int len
)
599 int this_len
, retval
;
601 this_len
= (len
> sizeof(buf
)) ? sizeof(buf
) : len
;
602 retval
= ptrace_access_vm(tsk
, src
, buf
, this_len
, FOLL_FORCE
);
609 if (copy_to_user(dst
, buf
, retval
))
619 int ptrace_writedata(struct task_struct
*tsk
, char __user
*src
, unsigned long dst
, int len
)
625 int this_len
, retval
;
627 this_len
= (len
> sizeof(buf
)) ? sizeof(buf
) : len
;
628 if (copy_from_user(buf
, src
, this_len
))
630 retval
= ptrace_access_vm(tsk
, dst
, buf
, this_len
,
631 FOLL_FORCE
| FOLL_WRITE
);
645 static int ptrace_setoptions(struct task_struct
*child
, unsigned long data
)
649 if (data
& ~(unsigned long)PTRACE_O_MASK
)
652 if (unlikely(data
& PTRACE_O_SUSPEND_SECCOMP
)) {
653 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE
) ||
654 !IS_ENABLED(CONFIG_SECCOMP
))
657 if (!capable(CAP_SYS_ADMIN
))
660 if (seccomp_mode(¤t
->seccomp
) != SECCOMP_MODE_DISABLED
||
661 current
->ptrace
& PT_SUSPEND_SECCOMP
)
665 /* Avoid intermediate state when all opts are cleared */
666 flags
= child
->ptrace
;
667 flags
&= ~(PTRACE_O_MASK
<< PT_OPT_FLAG_SHIFT
);
668 flags
|= (data
<< PT_OPT_FLAG_SHIFT
);
669 child
->ptrace
= flags
;
674 static int ptrace_getsiginfo(struct task_struct
*child
, kernel_siginfo_t
*info
)
679 if (lock_task_sighand(child
, &flags
)) {
681 if (likely(child
->last_siginfo
!= NULL
)) {
682 copy_siginfo(info
, child
->last_siginfo
);
685 unlock_task_sighand(child
, &flags
);
690 static int ptrace_setsiginfo(struct task_struct
*child
, const kernel_siginfo_t
*info
)
695 if (lock_task_sighand(child
, &flags
)) {
697 if (likely(child
->last_siginfo
!= NULL
)) {
698 copy_siginfo(child
->last_siginfo
, info
);
701 unlock_task_sighand(child
, &flags
);
706 static int ptrace_peek_siginfo(struct task_struct
*child
,
710 struct ptrace_peeksiginfo_args arg
;
711 struct sigpending
*pending
;
715 ret
= copy_from_user(&arg
, (void __user
*) addr
,
716 sizeof(struct ptrace_peeksiginfo_args
));
720 if (arg
.flags
& ~PTRACE_PEEKSIGINFO_SHARED
)
721 return -EINVAL
; /* unknown flags */
726 /* Ensure arg.off fits in an unsigned long */
727 if (arg
.off
> ULONG_MAX
)
730 if (arg
.flags
& PTRACE_PEEKSIGINFO_SHARED
)
731 pending
= &child
->signal
->shared_pending
;
733 pending
= &child
->pending
;
735 for (i
= 0; i
< arg
.nr
; ) {
736 kernel_siginfo_t info
;
737 unsigned long off
= arg
.off
+ i
;
740 spin_lock_irq(&child
->sighand
->siglock
);
741 list_for_each_entry(q
, &pending
->list
, list
) {
744 copy_siginfo(&info
, &q
->info
);
748 spin_unlock_irq(&child
->sighand
->siglock
);
750 if (!found
) /* beyond the end of the list */
754 if (unlikely(in_compat_syscall())) {
755 compat_siginfo_t __user
*uinfo
= compat_ptr(data
);
757 if (copy_siginfo_to_user32(uinfo
, &info
)) {
765 siginfo_t __user
*uinfo
= (siginfo_t __user
*) data
;
767 if (copy_siginfo_to_user(uinfo
, &info
)) {
773 data
+= sizeof(siginfo_t
);
776 if (signal_pending(current
))
788 #ifdef PTRACE_SINGLESTEP
789 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
791 #define is_singlestep(request) 0
794 #ifdef PTRACE_SINGLEBLOCK
795 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
797 #define is_singleblock(request) 0
801 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
803 #define is_sysemu_singlestep(request) 0
806 static int ptrace_resume(struct task_struct
*child
, long request
,
811 if (!valid_signal(data
))
814 if (request
== PTRACE_SYSCALL
)
815 set_tsk_thread_flag(child
, TIF_SYSCALL_TRACE
);
817 clear_tsk_thread_flag(child
, TIF_SYSCALL_TRACE
);
819 #ifdef TIF_SYSCALL_EMU
820 if (request
== PTRACE_SYSEMU
|| request
== PTRACE_SYSEMU_SINGLESTEP
)
821 set_tsk_thread_flag(child
, TIF_SYSCALL_EMU
);
823 clear_tsk_thread_flag(child
, TIF_SYSCALL_EMU
);
826 if (is_singleblock(request
)) {
827 if (unlikely(!arch_has_block_step()))
829 user_enable_block_step(child
);
830 } else if (is_singlestep(request
) || is_sysemu_singlestep(request
)) {
831 if (unlikely(!arch_has_single_step()))
833 user_enable_single_step(child
);
835 user_disable_single_step(child
);
839 * Change ->exit_code and ->state under siglock to avoid the race
840 * with wait_task_stopped() in between; a non-zero ->exit_code will
841 * wrongly look like another report from tracee.
843 * Note that we need siglock even if ->exit_code == data and/or this
844 * status was not reported yet, the new status must not be cleared by
845 * wait_task_stopped() after resume.
847 * If data == 0 we do not care if wait_task_stopped() reports the old
848 * status and clears the code too; this can't race with the tracee, it
849 * takes siglock after resume.
851 need_siglock
= data
&& !thread_group_empty(current
);
853 spin_lock_irq(&child
->sighand
->siglock
);
854 child
->exit_code
= data
;
855 wake_up_state(child
, __TASK_TRACED
);
857 spin_unlock_irq(&child
->sighand
->siglock
);
862 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
864 static const struct user_regset
*
865 find_regset(const struct user_regset_view
*view
, unsigned int type
)
867 const struct user_regset
*regset
;
870 for (n
= 0; n
< view
->n
; ++n
) {
871 regset
= view
->regsets
+ n
;
872 if (regset
->core_note_type
== type
)
879 static int ptrace_regset(struct task_struct
*task
, int req
, unsigned int type
,
882 const struct user_regset_view
*view
= task_user_regset_view(task
);
883 const struct user_regset
*regset
= find_regset(view
, type
);
886 if (!regset
|| (kiov
->iov_len
% regset
->size
) != 0)
889 regset_no
= regset
- view
->regsets
;
890 kiov
->iov_len
= min(kiov
->iov_len
,
891 (__kernel_size_t
) (regset
->n
* regset
->size
));
893 if (req
== PTRACE_GETREGSET
)
894 return copy_regset_to_user(task
, view
, regset_no
, 0,
895 kiov
->iov_len
, kiov
->iov_base
);
897 return copy_regset_from_user(task
, view
, regset_no
, 0,
898 kiov
->iov_len
, kiov
->iov_base
);
902 * This is declared in linux/regset.h and defined in machine-dependent
903 * code. We put the export here, near the primary machine-neutral use,
904 * to ensure no machine forgets it.
906 EXPORT_SYMBOL_GPL(task_user_regset_view
);
909 ptrace_get_syscall_info_entry(struct task_struct
*child
, struct pt_regs
*regs
,
910 struct ptrace_syscall_info
*info
)
912 unsigned long args
[ARRAY_SIZE(info
->entry
.args
)];
915 info
->op
= PTRACE_SYSCALL_INFO_ENTRY
;
916 info
->entry
.nr
= syscall_get_nr(child
, regs
);
917 syscall_get_arguments(child
, regs
, args
);
918 for (i
= 0; i
< ARRAY_SIZE(args
); i
++)
919 info
->entry
.args
[i
] = args
[i
];
921 /* args is the last field in struct ptrace_syscall_info.entry */
922 return offsetofend(struct ptrace_syscall_info
, entry
.args
);
926 ptrace_get_syscall_info_seccomp(struct task_struct
*child
, struct pt_regs
*regs
,
927 struct ptrace_syscall_info
*info
)
930 * As struct ptrace_syscall_info.entry is currently a subset
931 * of struct ptrace_syscall_info.seccomp, it makes sense to
932 * initialize that subset using ptrace_get_syscall_info_entry().
933 * This can be reconsidered in the future if these structures
934 * diverge significantly enough.
936 ptrace_get_syscall_info_entry(child
, regs
, info
);
937 info
->op
= PTRACE_SYSCALL_INFO_SECCOMP
;
938 info
->seccomp
.ret_data
= child
->ptrace_message
;
940 /* ret_data is the last field in struct ptrace_syscall_info.seccomp */
941 return offsetofend(struct ptrace_syscall_info
, seccomp
.ret_data
);
945 ptrace_get_syscall_info_exit(struct task_struct
*child
, struct pt_regs
*regs
,
946 struct ptrace_syscall_info
*info
)
948 info
->op
= PTRACE_SYSCALL_INFO_EXIT
;
949 info
->exit
.rval
= syscall_get_error(child
, regs
);
950 info
->exit
.is_error
= !!info
->exit
.rval
;
951 if (!info
->exit
.is_error
)
952 info
->exit
.rval
= syscall_get_return_value(child
, regs
);
954 /* is_error is the last field in struct ptrace_syscall_info.exit */
955 return offsetofend(struct ptrace_syscall_info
, exit
.is_error
);
959 ptrace_get_syscall_info(struct task_struct
*child
, unsigned long user_size
,
962 struct pt_regs
*regs
= task_pt_regs(child
);
963 struct ptrace_syscall_info info
= {
964 .op
= PTRACE_SYSCALL_INFO_NONE
,
965 .arch
= syscall_get_arch(child
),
966 .instruction_pointer
= instruction_pointer(regs
),
967 .stack_pointer
= user_stack_pointer(regs
),
969 unsigned long actual_size
= offsetof(struct ptrace_syscall_info
, entry
);
970 unsigned long write_size
;
973 * This does not need lock_task_sighand() to access
974 * child->last_siginfo because ptrace_freeze_traced()
975 * called earlier by ptrace_check_attach() ensures that
976 * the tracee cannot go away and clear its last_siginfo.
978 switch (child
->last_siginfo
? child
->last_siginfo
->si_code
: 0) {
980 switch (child
->ptrace_message
) {
981 case PTRACE_EVENTMSG_SYSCALL_ENTRY
:
982 actual_size
= ptrace_get_syscall_info_entry(child
, regs
,
985 case PTRACE_EVENTMSG_SYSCALL_EXIT
:
986 actual_size
= ptrace_get_syscall_info_exit(child
, regs
,
991 case SIGTRAP
| (PTRACE_EVENT_SECCOMP
<< 8):
992 actual_size
= ptrace_get_syscall_info_seccomp(child
, regs
,
997 write_size
= min(actual_size
, user_size
);
998 return copy_to_user(datavp
, &info
, write_size
) ? -EFAULT
: actual_size
;
1000 #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
1002 int ptrace_request(struct task_struct
*child
, long request
,
1003 unsigned long addr
, unsigned long data
)
1005 bool seized
= child
->ptrace
& PT_SEIZED
;
1007 kernel_siginfo_t siginfo
, *si
;
1008 void __user
*datavp
= (void __user
*) data
;
1009 unsigned long __user
*datalp
= datavp
;
1010 unsigned long flags
;
1013 case PTRACE_PEEKTEXT
:
1014 case PTRACE_PEEKDATA
:
1015 return generic_ptrace_peekdata(child
, addr
, data
);
1016 case PTRACE_POKETEXT
:
1017 case PTRACE_POKEDATA
:
1018 return generic_ptrace_pokedata(child
, addr
, data
);
1020 #ifdef PTRACE_OLDSETOPTIONS
1021 case PTRACE_OLDSETOPTIONS
:
1023 case PTRACE_SETOPTIONS
:
1024 ret
= ptrace_setoptions(child
, data
);
1026 case PTRACE_GETEVENTMSG
:
1027 ret
= put_user(child
->ptrace_message
, datalp
);
1030 case PTRACE_PEEKSIGINFO
:
1031 ret
= ptrace_peek_siginfo(child
, addr
, data
);
1034 case PTRACE_GETSIGINFO
:
1035 ret
= ptrace_getsiginfo(child
, &siginfo
);
1037 ret
= copy_siginfo_to_user(datavp
, &siginfo
);
1040 case PTRACE_SETSIGINFO
:
1041 ret
= copy_siginfo_from_user(&siginfo
, datavp
);
1043 ret
= ptrace_setsiginfo(child
, &siginfo
);
1046 case PTRACE_GETSIGMASK
: {
1049 if (addr
!= sizeof(sigset_t
)) {
1054 if (test_tsk_restore_sigmask(child
))
1055 mask
= &child
->saved_sigmask
;
1057 mask
= &child
->blocked
;
1059 if (copy_to_user(datavp
, mask
, sizeof(sigset_t
)))
1067 case PTRACE_SETSIGMASK
: {
1070 if (addr
!= sizeof(sigset_t
)) {
1075 if (copy_from_user(&new_set
, datavp
, sizeof(sigset_t
))) {
1080 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1083 * Every thread does recalc_sigpending() after resume, so
1084 * retarget_shared_pending() and recalc_sigpending() are not
1087 spin_lock_irq(&child
->sighand
->siglock
);
1088 child
->blocked
= new_set
;
1089 spin_unlock_irq(&child
->sighand
->siglock
);
1091 clear_tsk_restore_sigmask(child
);
1097 case PTRACE_INTERRUPT
:
1099 * Stop tracee without any side-effect on signal or job
1100 * control. At least one trap is guaranteed to happen
1101 * after this request. If @child is already trapped, the
1102 * current trap is not disturbed and another trap will
1103 * happen after the current trap is ended with PTRACE_CONT.
1105 * The actual trap might not be PTRACE_EVENT_STOP trap but
1106 * the pending condition is cleared regardless.
1108 if (unlikely(!seized
|| !lock_task_sighand(child
, &flags
)))
1112 * INTERRUPT doesn't disturb existing trap sans one
1113 * exception. If ptracer issued LISTEN for the current
1114 * STOP, this INTERRUPT should clear LISTEN and re-trap
1117 if (likely(task_set_jobctl_pending(child
, JOBCTL_TRAP_STOP
)))
1118 ptrace_signal_wake_up(child
, child
->jobctl
& JOBCTL_LISTENING
);
1120 unlock_task_sighand(child
, &flags
);
1126 * Listen for events. Tracee must be in STOP. It's not
1127 * resumed per-se but is not considered to be in TRACED by
1128 * wait(2) or ptrace(2). If an async event (e.g. group
1129 * stop state change) happens, tracee will enter STOP trap
1130 * again. Alternatively, ptracer can issue INTERRUPT to
1131 * finish listening and re-trap tracee into STOP.
1133 if (unlikely(!seized
|| !lock_task_sighand(child
, &flags
)))
1136 si
= child
->last_siginfo
;
1137 if (likely(si
&& (si
->si_code
>> 8) == PTRACE_EVENT_STOP
)) {
1138 child
->jobctl
|= JOBCTL_LISTENING
;
1140 * If NOTIFY is set, it means event happened between
1141 * start of this trap and now. Trigger re-trap.
1143 if (child
->jobctl
& JOBCTL_TRAP_NOTIFY
)
1144 ptrace_signal_wake_up(child
, true);
1147 unlock_task_sighand(child
, &flags
);
1150 case PTRACE_DETACH
: /* detach a process that was attached. */
1151 ret
= ptrace_detach(child
, data
);
1154 #ifdef CONFIG_BINFMT_ELF_FDPIC
1155 case PTRACE_GETFDPIC
: {
1156 struct mm_struct
*mm
= get_task_mm(child
);
1157 unsigned long tmp
= 0;
1164 case PTRACE_GETFDPIC_EXEC
:
1165 tmp
= mm
->context
.exec_fdpic_loadmap
;
1167 case PTRACE_GETFDPIC_INTERP
:
1168 tmp
= mm
->context
.interp_fdpic_loadmap
;
1175 ret
= put_user(tmp
, datalp
);
1180 #ifdef PTRACE_SINGLESTEP
1181 case PTRACE_SINGLESTEP
:
1183 #ifdef PTRACE_SINGLEBLOCK
1184 case PTRACE_SINGLEBLOCK
:
1186 #ifdef PTRACE_SYSEMU
1188 case PTRACE_SYSEMU_SINGLESTEP
:
1190 case PTRACE_SYSCALL
:
1192 return ptrace_resume(child
, request
, data
);
1195 if (child
->exit_state
) /* already dead */
1197 return ptrace_resume(child
, request
, SIGKILL
);
1199 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1200 case PTRACE_GETREGSET
:
1201 case PTRACE_SETREGSET
: {
1203 struct iovec __user
*uiov
= datavp
;
1205 if (!access_ok(uiov
, sizeof(*uiov
)))
1208 if (__get_user(kiov
.iov_base
, &uiov
->iov_base
) ||
1209 __get_user(kiov
.iov_len
, &uiov
->iov_len
))
1212 ret
= ptrace_regset(child
, request
, addr
, &kiov
);
1214 ret
= __put_user(kiov
.iov_len
, &uiov
->iov_len
);
1218 case PTRACE_GET_SYSCALL_INFO
:
1219 ret
= ptrace_get_syscall_info(child
, addr
, datavp
);
1223 case PTRACE_SECCOMP_GET_FILTER
:
1224 ret
= seccomp_get_filter(child
, addr
, datavp
);
1227 case PTRACE_SECCOMP_GET_METADATA
:
1228 ret
= seccomp_get_metadata(child
, addr
, datavp
);
1238 #ifndef arch_ptrace_attach
1239 #define arch_ptrace_attach(child) do { } while (0)
1242 SYSCALL_DEFINE4(ptrace
, long, request
, long, pid
, unsigned long, addr
,
1243 unsigned long, data
)
1245 struct task_struct
*child
;
1248 if (request
== PTRACE_TRACEME
) {
1249 ret
= ptrace_traceme();
1251 arch_ptrace_attach(current
);
1255 child
= find_get_task_by_vpid(pid
);
1261 if (request
== PTRACE_ATTACH
|| request
== PTRACE_SEIZE
) {
1262 ret
= ptrace_attach(child
, request
, addr
, data
);
1264 * Some architectures need to do book-keeping after
1268 arch_ptrace_attach(child
);
1269 goto out_put_task_struct
;
1272 ret
= ptrace_check_attach(child
, request
== PTRACE_KILL
||
1273 request
== PTRACE_INTERRUPT
);
1275 goto out_put_task_struct
;
1277 ret
= arch_ptrace(child
, request
, addr
, data
);
1278 if (ret
|| request
!= PTRACE_DETACH
)
1279 ptrace_unfreeze_traced(child
);
1281 out_put_task_struct
:
1282 put_task_struct(child
);
1287 int generic_ptrace_peekdata(struct task_struct
*tsk
, unsigned long addr
,
1293 copied
= ptrace_access_vm(tsk
, addr
, &tmp
, sizeof(tmp
), FOLL_FORCE
);
1294 if (copied
!= sizeof(tmp
))
1296 return put_user(tmp
, (unsigned long __user
*)data
);
1299 int generic_ptrace_pokedata(struct task_struct
*tsk
, unsigned long addr
,
1304 copied
= ptrace_access_vm(tsk
, addr
, &data
, sizeof(data
),
1305 FOLL_FORCE
| FOLL_WRITE
);
1306 return (copied
== sizeof(data
)) ? 0 : -EIO
;
1309 #if defined CONFIG_COMPAT
1311 int compat_ptrace_request(struct task_struct
*child
, compat_long_t request
,
1312 compat_ulong_t addr
, compat_ulong_t data
)
1314 compat_ulong_t __user
*datap
= compat_ptr(data
);
1315 compat_ulong_t word
;
1316 kernel_siginfo_t siginfo
;
1320 case PTRACE_PEEKTEXT
:
1321 case PTRACE_PEEKDATA
:
1322 ret
= ptrace_access_vm(child
, addr
, &word
, sizeof(word
),
1324 if (ret
!= sizeof(word
))
1327 ret
= put_user(word
, datap
);
1330 case PTRACE_POKETEXT
:
1331 case PTRACE_POKEDATA
:
1332 ret
= ptrace_access_vm(child
, addr
, &data
, sizeof(data
),
1333 FOLL_FORCE
| FOLL_WRITE
);
1334 ret
= (ret
!= sizeof(data
) ? -EIO
: 0);
1337 case PTRACE_GETEVENTMSG
:
1338 ret
= put_user((compat_ulong_t
) child
->ptrace_message
, datap
);
1341 case PTRACE_GETSIGINFO
:
1342 ret
= ptrace_getsiginfo(child
, &siginfo
);
1344 ret
= copy_siginfo_to_user32(
1345 (struct compat_siginfo __user
*) datap
,
1349 case PTRACE_SETSIGINFO
:
1350 ret
= copy_siginfo_from_user32(
1351 &siginfo
, (struct compat_siginfo __user
*) datap
);
1353 ret
= ptrace_setsiginfo(child
, &siginfo
);
1355 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1356 case PTRACE_GETREGSET
:
1357 case PTRACE_SETREGSET
:
1360 struct compat_iovec __user
*uiov
=
1361 (struct compat_iovec __user
*) datap
;
1365 if (!access_ok(uiov
, sizeof(*uiov
)))
1368 if (__get_user(ptr
, &uiov
->iov_base
) ||
1369 __get_user(len
, &uiov
->iov_len
))
1372 kiov
.iov_base
= compat_ptr(ptr
);
1375 ret
= ptrace_regset(child
, request
, addr
, &kiov
);
1377 ret
= __put_user(kiov
.iov_len
, &uiov
->iov_len
);
1383 ret
= ptrace_request(child
, request
, addr
, data
);
1389 COMPAT_SYSCALL_DEFINE4(ptrace
, compat_long_t
, request
, compat_long_t
, pid
,
1390 compat_long_t
, addr
, compat_long_t
, data
)
1392 struct task_struct
*child
;
1395 if (request
== PTRACE_TRACEME
) {
1396 ret
= ptrace_traceme();
1400 child
= find_get_task_by_vpid(pid
);
1406 if (request
== PTRACE_ATTACH
|| request
== PTRACE_SEIZE
) {
1407 ret
= ptrace_attach(child
, request
, addr
, data
);
1409 * Some architectures need to do book-keeping after
1413 arch_ptrace_attach(child
);
1414 goto out_put_task_struct
;
1417 ret
= ptrace_check_attach(child
, request
== PTRACE_KILL
||
1418 request
== PTRACE_INTERRUPT
);
1420 ret
= compat_arch_ptrace(child
, request
, addr
, data
);
1421 if (ret
|| request
!= PTRACE_DETACH
)
1422 ptrace_unfreeze_traced(child
);
1425 out_put_task_struct
:
1426 put_task_struct(child
);
1430 #endif /* CONFIG_COMPAT */