1 /* Copyright (c) 2005-2007, Google Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * Author: Markus Gutschke
34 #include "base/linuxthreads.h"
46 #include <sys/socket.h>
49 #include "base/linux_syscall_support.h"
50 #include "base/thread_lister.h"
52 #ifndef CLONE_UNTRACED
53 #define CLONE_UNTRACED 0x00800000
57 /* Synchronous signals that should not be blocked while in the lister thread.
59 static const int sync_signals
[] = { SIGABRT
, SIGILL
, SIGFPE
, SIGSEGV
, SIGBUS
,
62 /* itoa() is not a standard function, and we cannot safely call printf()
63 * after suspending threads. So, we just implement our own copy. A
64 * recursive approach is the easiest here.
66 static char *local_itoa(char *buf
, int i
) {
69 return local_itoa(buf
, -i
);
72 buf
= local_itoa(buf
, i
/10);
73 *buf
++ = (i
%10) + '0';
80 /* Wrapper around clone() that runs "fn" on the same stack as the
81 * caller! Unlike fork(), the cloned thread shares the same address space.
82 * The caller must be careful to use only minimal amounts of stack until
83 * the cloned thread has returned.
84 * There is a good chance that the cloned thread and the caller will share
85 * the same copy of errno!
88 #if __GNUC__ == 3 && __GNUC_MINOR__ >= 1 || __GNUC__ > 3
89 /* Try to force this function into a separate stack frame, and make sure
90 * that arguments are passed on the stack.
92 static int local_clone (int (*fn
)(void *), void *arg
, ...)
93 __attribute__ ((noinline
));
97 static int local_clone (int (*fn
)(void *), void *arg
, ...) {
98 /* Leave 4kB of gap between the callers stack and the new clone. This
99 * should be more than sufficient for the caller to call waitpid() until
100 * the cloned thread terminates.
102 * It is important that we set the CLONE_UNTRACED flag, because newer
103 * versions of "gdb" otherwise attempt to attach to our thread, and will
104 * attempt to reap its status codes. This subsequently results in the
105 * caller hanging indefinitely in waitpid(), waiting for a change in
106 * status that will never happen. By setting the CLONE_UNTRACED flag, we
107 * prevent "gdb" from stealing events, but we still expect the thread
108 * lister to fail, because it cannot PTRACE_ATTACH to the process that
109 * is being debugged. This is OK and the error code will be reported
112 return sys_clone(fn
, (char *)&arg
- 4096,
113 CLONE_VM
|CLONE_FS
|CLONE_FILES
|CLONE_UNTRACED
, arg
, 0, 0, 0);
117 /* Local substitute for the atoi() function, which is not necessarily safe
118 * to call once threads are suspended (depending on whether libc looks up
119 * locale information, when executing atoi()).
121 static int local_atoi(const char *s
) {
126 while (*s
>= '0' && *s
<= '9')
127 n
= 10*n
+ (*s
++ - '0');
132 /* Re-runs fn until it doesn't cause EINTR
134 #define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
137 /* Wrap a class around system calls, in order to give us access to
138 * a private copy of errno. This only works in C++, but it has the
139 * advantage of not needing nested functions, which are a non-standard
140 * language extension.
146 #define SYS_CPLUSPLUS
147 #define SYS_ERRNO my_errno
148 #define SYS_INLINE inline
149 #define SYS_PREFIX -1
150 #undef SYS_LINUX_SYSCALL_SUPPORT_H
151 #include "linux_syscall_support.h"
152 SysCalls() : my_errno(0) { }
156 #define ERRNO sys.my_errno
158 #define ERRNO my_errno
162 /* Wrapper for open() which is guaranteed to never return EINTR.
164 static int c_open(const char *fname
, int flags
, int mode
) {
166 NO_INTR(rc
= sys_open(fname
, flags
, mode
));
171 /* abort() is not safely reentrant, and changes it's behavior each time
172 * it is called. This means, if the main application ever called abort()
173 * we cannot safely call it again. This would happen if we were called
174 * from a SIGABRT signal handler in the main application. So, document
175 * that calling SIGABRT from the thread lister makes it not signal safe
177 * Also, since we share address space with the main application, we
178 * cannot call abort() from the callback and expect the main application
179 * to behave correctly afterwards. In fact, the only thing we can do, is
180 * to terminate the main application with extreme prejudice (aka
182 * We set up our own SIGABRT handler to do this.
183 * In order to find the main application from the signal handler, we
184 * need to store information about it in global variables. This is
185 * safe, because the main application should be suspended at this
186 * time. If the callback ever called ResumeAllProcessThreads(), then
187 * we are running a higher risk, though. So, try to avoid calling
188 * abort() after calling ResumeAllProcessThreads.
190 static volatile int *sig_pids
, sig_num_threads
, sig_proc
, sig_marker
;
193 /* Signal handler to help us recover from dying while we are attached to
196 static void SignalHandler(int signum
, siginfo_t
*si
, void *data
) {
197 if (sig_pids
!= NULL
) {
198 if (signum
== SIGABRT
) {
199 while (sig_num_threads
-- > 0) {
200 /* Not sure if sched_yield is really necessary here, but it does not */
201 /* hurt, and it might be necessary for the same reasons that we have */
202 /* to do so in sys_ptrace_detach(). */
204 sys_ptrace(PTRACE_KILL
, sig_pids
[sig_num_threads
], 0, 0);
206 } else if (sig_num_threads
> 0) {
207 ResumeAllProcessThreads(sig_num_threads
, (int *)sig_pids
);
212 NO_INTR(sys_close(sig_marker
));
215 NO_INTR(sys_close(sig_proc
));
218 sys__exit(signum
== SIGABRT
? 1 : 2);
222 /* Try to dirty the stack, and hope that the compiler is not smart enough
223 * to optimize this function away. Or worse, the compiler could inline the
224 * function and permanently allocate the data on the stack.
226 static void DirtyStack(size_t amount
) {
228 memset(buf
, 0, amount
);
229 sys_read(-1, buf
, amount
);
233 /* Data structure for passing arguments to the lister thread.
235 #define ALT_STACKSIZE (MINSIGSTKSZ + 4096)
237 struct ListerParams
{
240 ListAllProcessThreadsCallBack callback
;
246 static void ListerThread(struct ListerParams
*args
) {
247 int found_parent
= 0;
248 pid_t clone_pid
= sys_gettid(), ppid
= sys_getppid();
249 char proc_self_task
[80], marker_name
[48], *marker_path
;
250 const char *proc_paths
[3];
251 const char *const *proc_path
= proc_paths
;
252 int proc
= -1, marker
= -1, num_threads
= 0;
253 int max_threads
= 0, sig
;
254 struct kernel_stat marker_sb
, proc_sb
;
257 /* Create "marker" that we can use to detect threads sharing the same
258 * address space and the same file handles. By setting the FD_CLOEXEC flag
259 * we minimize the risk of misidentifying child processes as threads;
260 * and since there is still a race condition, we will filter those out
263 if ((marker
= sys_socket(PF_LOCAL
, SOCK_DGRAM
, 0)) < 0 ||
264 sys_fcntl(marker
, F_SETFD
, FD_CLOEXEC
) < 0) {
269 NO_INTR(sys_close(marker
));
270 sig_marker
= marker
= -1;
272 NO_INTR(sys_close(proc
));
273 sig_proc
= proc
= -1;
277 /* Compute search paths for finding thread directories in /proc */
278 local_itoa(strrchr(strcpy(proc_self_task
, "/proc/"), '\000'), ppid
);
279 strcpy(marker_name
, proc_self_task
);
280 marker_path
= marker_name
+ strlen(marker_name
);
281 strcat(proc_self_task
, "/task/");
282 proc_paths
[0] = proc_self_task
; /* /proc/$$/task/ */
283 proc_paths
[1] = "/proc/"; /* /proc/ */
284 proc_paths
[2] = NULL
;
286 /* Compute path for marker socket in /proc */
287 local_itoa(strcpy(marker_path
, "/fd/") + 4, marker
);
288 if (sys_stat(marker_name
, &marker_sb
) < 0) {
292 /* Catch signals on an alternate pre-allocated stack. This way, we can
293 * safely execute the signal handler even if we ran out of memory.
295 memset(&altstack
, 0, sizeof(altstack
));
296 altstack
.ss_sp
= args
->altstack_mem
;
297 altstack
.ss_flags
= 0;
298 altstack
.ss_size
= ALT_STACKSIZE
;
299 sys_sigaltstack(&altstack
, (const stack_t
*)NULL
);
301 /* Some kernels forget to wake up traced processes, when the
302 * tracer dies. So, intercept synchronous signals and make sure
303 * that we wake up our tracees before dying. It is the caller's
304 * responsibility to ensure that asynchronous signals do not
305 * interfere with this function.
309 for (sig
= 0; sig
< sizeof(sync_signals
)/sizeof(*sync_signals
); sig
++) {
310 struct kernel_sigaction sa
;
311 memset(&sa
, 0, sizeof(sa
));
312 sa
.sa_sigaction_
= SignalHandler
;
313 sys_sigfillset(&sa
.sa_mask
);
314 sa
.sa_flags
= SA_ONSTACK
|SA_SIGINFO
|SA_RESETHAND
;
315 sys_sigaction(sync_signals
[sig
], &sa
, (struct kernel_sigaction
*)NULL
);
318 /* Read process directories in /proc/... */
320 /* Some kernels know about threads, and hide them in "/proc"
321 * (although they are still there, if you know the process
322 * id). Threads are moved into a separate "task" directory. We
323 * check there first, and then fall back on the older naming
324 * convention if necessary.
326 if ((sig_proc
= proc
= c_open(*proc_path
, O_RDONLY
|O_DIRECTORY
, 0)) < 0) {
327 if (*++proc_path
!= NULL
)
331 if (sys_fstat(proc
, &proc_sb
) < 0)
334 /* Since we are suspending threads, we cannot call any libc
335 * functions that might acquire locks. Most notably, we cannot
336 * call malloc(). So, we have to allocate memory on the stack,
337 * instead. Since we do not know how much memory we need, we
338 * make a best guess. And if we guessed incorrectly we retry on
339 * a second iteration (by jumping to "detach_threads").
341 * Unless the number of threads is increasing very rapidly, we
342 * should never need to do so, though, as our guestimate is very
345 if (max_threads
< proc_sb
.st_nlink
+ 100)
346 max_threads
= proc_sb
.st_nlink
+ 100;
349 pid_t pids
[max_threads
];
350 int added_entries
= 0;
351 sig_num_threads
= num_threads
;
354 struct kernel_dirent
*entry
;
356 ssize_t nbytes
= sys_getdents(proc
, (struct kernel_dirent
*)buf
,
360 else if (nbytes
== 0) {
362 /* Need to keep iterating over "/proc" in multiple
363 * passes until we no longer find any more threads. This
364 * algorithm eventually completes, when all threads have
368 sys_lseek(proc
, 0, SEEK_SET
);
373 for (entry
= (struct kernel_dirent
*)buf
;
374 entry
< (struct kernel_dirent
*)&buf
[nbytes
];
375 entry
= (struct kernel_dirent
*)((char *)entry
+entry
->d_reclen
)) {
376 if (entry
->d_ino
!= 0) {
377 const char *ptr
= entry
->d_name
;
380 /* Some kernels hide threads by preceding the pid with a '.' */
384 /* If the directory is not numeric, it cannot be a
387 if (*ptr
< '0' || *ptr
> '9')
389 pid
= local_atoi(ptr
);
391 /* Attach (and suspend) all threads */
392 if (pid
&& pid
!= clone_pid
) {
393 struct kernel_stat tmp_sb
;
394 char fname
[entry
->d_reclen
+ 48];
395 strcat(strcat(strcpy(fname
, "/proc/"),
396 entry
->d_name
), marker_path
);
398 /* Check if the marker is identical to the one we created */
399 if (sys_stat(fname
, &tmp_sb
) >= 0 &&
400 marker_sb
.st_ino
== tmp_sb
.st_ino
) {
403 /* Found one of our threads, make sure it is no duplicate */
404 for (i
= 0; i
< num_threads
; i
++) {
405 /* Linear search is slow, but should not matter much for
406 * the typically small number of threads.
408 if (pids
[i
] == pid
) {
409 /* Found a duplicate; most likely on second pass */
414 /* Check whether data structure needs growing */
415 if (num_threads
>= max_threads
) {
416 /* Back to square one, this time with more memory */
417 NO_INTR(sys_close(proc
));
421 /* Attaching to thread suspends it */
422 pids
[num_threads
++] = pid
;
423 sig_num_threads
= num_threads
;
424 if (sys_ptrace(PTRACE_ATTACH
, pid
, (void *)0,
426 /* If operation failed, ignore thread. Maybe it
427 * just died? There might also be a race
428 * condition with a concurrent core dumper or
429 * with a debugger. In that case, we will just
430 * make a best effort, rather than failing
434 sig_num_threads
= num_threads
;
437 while (sys_waitpid(pid
, (int *)0, __WALL
) < 0) {
438 if (errno
!= EINTR
) {
439 sys_ptrace_detach(pid
);
441 sig_num_threads
= num_threads
;
446 if (sys_ptrace(PTRACE_PEEKDATA
, pid
, &i
, &j
) || i
++ != j
||
447 sys_ptrace(PTRACE_PEEKDATA
, pid
, &i
, &j
) || i
!= j
) {
448 /* Address spaces are distinct, even though both
449 * processes show the "marker". This is probably
450 * a forked child process rather than a thread.
452 sys_ptrace_detach(pid
);
454 sig_num_threads
= num_threads
;
456 found_parent
|= pid
== ppid
;
465 NO_INTR(sys_close(proc
));
466 sig_proc
= proc
= -1;
468 /* If we failed to find any threads, try looking somewhere else in
469 * /proc. Maybe, threads are reported differently on this system.
471 if (num_threads
> 1 || !*++proc_path
) {
472 NO_INTR(sys_close(marker
));
473 sig_marker
= marker
= -1;
475 /* If we never found the parent process, something is very wrong.
476 * Most likely, we are running in debugger. Any attempt to operate
477 * on the threads would be very incomplete. Let's just report an
478 * error to the caller.
481 ResumeAllProcessThreads(num_threads
, pids
);
485 /* Now we are ready to call the callback,
486 * which takes care of resuming the threads for us.
488 args
->result
= args
->callback(args
->parameter
, num_threads
,
492 /* Callback should have resumed threads, but better safe than sorry */
493 if (ResumeAllProcessThreads(num_threads
, pids
)) {
494 /* Callback forgot to resume at least one thread, report error */
502 /* Resume all threads prior to retrying the operation */
503 ResumeAllProcessThreads(num_threads
, pids
);
506 sig_num_threads
= num_threads
;
513 /* This function gets the list of all linux threads of the current process
514 * passes them to the 'callback' along with the 'parameter' pointer; at the
515 * call back call time all the threads are paused via
517 * The callback is executed from a separate thread which shares only the
518 * address space, the filesystem, and the filehandles with the caller. Most
519 * notably, it does not share the same pid and ppid; and if it terminates,
520 * the rest of the application is still there. 'callback' is supposed to do
521 * or arrange for ResumeAllProcessThreads. This happens automatically, if
522 * the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous
523 * signals are blocked. If the 'callback' decides to unblock them, it must
524 * ensure that they cannot terminate the application, or that
525 * ResumeAllProcessThreads will get called.
526 * It is an error for the 'callback' to make any library calls that could
527 * acquire locks. Most notably, this means that most system calls have to
528 * avoid going through libc. Also, this means that it is not legal to call
530 * We return -1 on error and the return value of 'callback' on success.
532 int ListAllProcessThreads(void *parameter
,
533 ListAllProcessThreadsCallBack callback
, ...) {
534 char altstack_mem
[ALT_STACKSIZE
];
535 struct ListerParams args
;
537 int dumpable
= 1, sig
;
538 struct kernel_sigset_t sig_blocked
, sig_old
;
540 va_start(args
.ap
, callback
);
542 /* If we are short on virtual memory, initializing the alternate stack
543 * might trigger a SIGSEGV. Let's do this early, before it could get us
544 * into more trouble (i.e. before signal handlers try to use the alternate
545 * stack, and before we attach to other threads).
547 memset(altstack_mem
, 0, sizeof(altstack_mem
));
549 /* Some of our cleanup functions could conceivable use more stack space.
550 * Try to touch the stack right now. This could be defeated by the compiler
551 * being too smart for it's own good, so try really hard.
555 /* Make this process "dumpable". This is necessary in order to ptrace()
556 * after having called setuid().
558 dumpable
= sys_prctl(PR_GET_DUMPABLE
, 0);
560 sys_prctl(PR_SET_DUMPABLE
, 1);
562 /* Fill in argument block for dumper thread */
565 args
.altstack_mem
= altstack_mem
;
566 args
.parameter
= parameter
;
567 args
.callback
= callback
;
569 /* Before cloning the thread lister, block all asynchronous signals, as we */
570 /* are not prepared to handle them. */
571 sys_sigfillset(&sig_blocked
);
572 for (sig
= 0; sig
< sizeof(sync_signals
)/sizeof(*sync_signals
); sig
++) {
573 sys_sigdelset(&sig_blocked
, sync_signals
[sig
]);
575 if (sys_sigprocmask(SIG_BLOCK
, &sig_blocked
, &sig_old
)) {
582 /* After cloning, both the parent and the child share the same instance
583 * of errno. We must make sure that at least one of these processes
584 * (in our case, the parent) uses modified syscall macros that update
585 * a local copy of errno, instead.
588 #define sys0_sigprocmask sys.sigprocmask
589 #define sys0_waitpid sys.waitpid
593 #define SYS_ERRNO my_errno
594 #define SYS_INLINE inline
596 #undef SYS_LINUX_SYSCALL_SUPPORT_H
597 #include "linux_syscall_support.h"
601 clone_pid
= local_clone((int (*)(void *))ListerThread
, &args
);
604 sys_sigprocmask(SIG_SETMASK
, &sig_old
, &sig_old
);
606 if (clone_pid
>= 0) {
608 while ((rc
= sys0_waitpid(clone_pid
, &status
, __WALL
)) < 0 &&
615 } else if (WIFEXITED(status
)) {
616 switch (WEXITSTATUS(status
)) {
617 case 0: break; /* Normal process termination */
618 case 2: args
.err
= EFAULT
; /* Some fault (e.g. SIGSEGV) detected */
621 case 3: args
.err
= EPERM
; /* Process is already being traced */
624 default:args
.err
= ECHILD
; /* Child died unexpectedly */
628 } else if (!WIFEXITED(status
)) {
629 args
.err
= EFAULT
; /* Terminated due to an unhandled signal*/
634 args
.err
= clone_errno
;
638 /* Restore the "dumpable" state of the process */
641 sys_prctl(PR_SET_DUMPABLE
, dumpable
);
649 /* This function resumes the list of all linux threads that
650 * ListAllProcessThreads pauses before giving to its callback.
651 * The function returns non-zero if at least one thread was
652 * suspended and has now been resumed.
654 int ResumeAllProcessThreads(int num_threads
, pid_t
*thread_pids
) {
655 int detached_at_least_one
= 0;
656 while (num_threads
-- > 0) {
657 detached_at_least_one
|= sys_ptrace_detach(thread_pids
[num_threads
]) >= 0;
659 return detached_at_least_one
;