arm: Support pac_key_* register operand for MRS/MSR in Armv8.1-M Mainline
[binutils-gdb.git] / gdbserver / linux-low.cc
blob7b1ec61212db8aedd45ddb38a197be9588ee527d
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2024 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "linux-low.h"
20 #include "nat/linux-osdata.h"
21 #include "gdbsupport/agent.h"
22 #include "tdesc.h"
23 #include "gdbsupport/event-loop.h"
24 #include "gdbsupport/event-pipe.h"
25 #include "gdbsupport/rsp-low.h"
26 #include "gdbsupport/signals-state-save-restore.h"
27 #include "nat/linux-nat.h"
28 #include "nat/linux-waitpid.h"
29 #include "gdbsupport/gdb_wait.h"
30 #include "nat/gdb_ptrace.h"
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include <signal.h>
35 #include <sys/ioctl.h>
36 #include <fcntl.h>
37 #include <unistd.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include <langinfo.h>
47 #include <iconv.h>
48 #include "gdbsupport/filestuff.h"
49 #include "gdbsupport/gdb-safe-ctype.h"
50 #include "tracepoint.h"
51 #include <inttypes.h>
52 #include "gdbsupport/common-inferior.h"
53 #include "nat/fork-inferior.h"
54 #include "gdbsupport/environ.h"
55 #include "gdbsupport/gdb-sigmask.h"
56 #include "gdbsupport/scoped_restore.h"
57 #ifndef ELFMAG0
58 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
59 then ELFMAG0 will have been defined. If it didn't get included by
60 gdb_proc_service.h then including it will likely introduce a duplicate
61 definition of elf_fpregset_t. */
62 #include <elf.h>
63 #endif
64 #include "nat/linux-namespaces.h"
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
70 #ifndef AT_HWCAP2
71 #define AT_HWCAP2 26
72 #endif
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* These are still undefined in 3.10 kernels. */
86 #elif defined(__TMS320C6X__)
87 #define PT_TEXT_ADDR (0x10000*4)
88 #define PT_DATA_ADDR (0x10004*4)
89 #define PT_TEXT_END_ADDR (0x10008*4)
90 #endif
91 #endif
93 #if (defined(__UCLIBC__) \
94 && defined(HAS_NOMMU) \
95 && defined(PT_TEXT_ADDR) \
96 && defined(PT_DATA_ADDR) \
97 && defined(PT_TEXT_END_ADDR))
98 #define SUPPORTS_READ_OFFSETS
99 #endif
101 #ifdef HAVE_LINUX_BTRACE
102 # include "nat/linux-btrace.h"
103 # include "gdbsupport/btrace-common.h"
104 #endif
106 #ifndef HAVE_ELF32_AUXV_T
107 /* Copied from glibc's elf.h. */
108 typedef struct
110 uint32_t a_type; /* Entry type */
111 union
113 uint32_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118 } Elf32_auxv_t;
119 #endif
121 #ifndef HAVE_ELF64_AUXV_T
122 /* Copied from glibc's elf.h. */
123 typedef struct
125 uint64_t a_type; /* Entry type */
126 union
128 uint64_t a_val; /* Integer value */
129 /* We use to have pointer elements added here. We cannot do that,
130 though, since it does not work when using 32-bit definitions
131 on 64-bit platforms and vice versa. */
132 } a_un;
133 } Elf64_auxv_t;
134 #endif
136 /* See nat/linux-nat.h. */
137 enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
139 /* Return TRUE if THREAD is the leader thread of the process. */
141 static bool
142 is_leader (thread_info *thread)
144 return thread->id.pid () == thread->id.lwp ();
147 /* Return true if we should report thread exit events to GDB, for
148 THR. */
150 static bool
151 report_exit_events_for (thread_info *thr)
153 client_state &cs = get_client_state ();
155 return (cs.report_thread_events
156 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
159 /* LWP accessors. */
161 /* See nat/linux-nat.h. */
163 ptid_t
164 ptid_of_lwp (struct lwp_info *lwp)
166 return get_lwp_thread (lwp)->id;
169 /* See nat/linux-nat.h. */
171 void
172 lwp_set_arch_private_info (struct lwp_info *lwp,
173 struct arch_lwp_info *info)
175 lwp->arch_private = info;
178 /* See nat/linux-nat.h. */
180 struct arch_lwp_info *
181 lwp_arch_private_info (struct lwp_info *lwp)
183 return lwp->arch_private;
186 /* See nat/linux-nat.h. */
189 lwp_is_stopped (struct lwp_info *lwp)
191 return lwp->stopped;
194 /* See nat/linux-nat.h. */
196 enum target_stop_reason
197 lwp_stop_reason (struct lwp_info *lwp)
199 return lwp->stop_reason;
202 /* See nat/linux-nat.h. */
205 lwp_is_stepping (struct lwp_info *lwp)
207 return lwp->stepping;
210 /* A list of all unknown processes which receive stop signals. Some
211 other process will presumably claim each of these as forked
212 children momentarily. */
214 struct simple_pid_list
216 /* The process ID. */
217 int pid;
219 /* The status as reported by waitpid. */
220 int status;
222 /* Next in chain. */
223 struct simple_pid_list *next;
225 static struct simple_pid_list *stopped_pids;
227 /* Trivial list manipulation functions to keep track of a list of new
228 stopped processes. */
230 static void
231 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
233 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
235 new_pid->pid = pid;
236 new_pid->status = status;
237 new_pid->next = *listp;
238 *listp = new_pid;
241 static int
242 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
244 struct simple_pid_list **p;
246 for (p = listp; *p != NULL; p = &(*p)->next)
247 if ((*p)->pid == pid)
249 struct simple_pid_list *next = (*p)->next;
251 *statusp = (*p)->status;
252 xfree (*p);
253 *p = next;
254 return 1;
256 return 0;
259 enum stopping_threads_kind
261 /* Not stopping threads presently. */
262 NOT_STOPPING_THREADS,
264 /* Stopping threads. */
265 STOPPING_THREADS,
267 /* Stopping and suspending threads. */
268 STOPPING_AND_SUSPENDING_THREADS
271 /* This is set while stop_all_lwps is in effect. */
272 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
274 /* FIXME make into a target method? */
275 int using_threads = 1;
277 /* True if we're presently stabilizing threads (moving them out of
278 jump pads). */
279 static int stabilizing_threads;
281 static void unsuspend_all_lwps (struct lwp_info *except);
282 static void mark_lwp_dead (struct lwp_info *lwp, int wstat,
283 bool thread_event);
284 static int lwp_is_marked_dead (struct lwp_info *lwp);
285 static int kill_lwp (unsigned long lwpid, int signo);
286 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
287 static int linux_low_ptrace_options (int attached);
288 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
290 /* When the event-loop is doing a step-over, this points at the thread
291 being stepped. */
292 static ptid_t step_over_bkpt;
294 bool
295 linux_process_target::low_supports_breakpoints ()
297 return false;
300 CORE_ADDR
301 linux_process_target::low_get_pc (regcache *regcache)
303 return 0;
306 void
307 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
309 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
312 std::vector<CORE_ADDR>
313 linux_process_target::low_get_next_pcs (regcache *regcache)
315 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
316 "implemented");
320 linux_process_target::low_decr_pc_after_break ()
322 return 0;
325 /* True if LWP is stopped in its stepping range. */
327 static int
328 lwp_in_step_range (struct lwp_info *lwp)
330 CORE_ADDR pc = lwp->stop_pc;
332 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
335 /* The event pipe registered as a waitable file in the event loop. */
336 static event_pipe linux_event_pipe;
338 /* True if we're currently in async mode. */
339 #define target_is_async_p() (linux_event_pipe.is_open ())
341 static void send_sigstop (struct lwp_info *lwp);
343 /* Return non-zero if HEADER is a 64-bit ELF file. */
345 static int
346 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
348 if (header->e_ident[EI_MAG0] == ELFMAG0
349 && header->e_ident[EI_MAG1] == ELFMAG1
350 && header->e_ident[EI_MAG2] == ELFMAG2
351 && header->e_ident[EI_MAG3] == ELFMAG3)
353 *machine = header->e_machine;
354 return header->e_ident[EI_CLASS] == ELFCLASS64;
357 *machine = EM_NONE;
358 return -1;
361 /* Return non-zero if FILE is a 64-bit ELF file,
362 zero if the file is not a 64-bit ELF file,
363 and -1 if the file is not accessible or doesn't exist. */
365 static int
366 elf_64_file_p (const char *file, unsigned int *machine)
368 Elf64_Ehdr header;
369 int fd;
371 fd = open (file, O_RDONLY);
372 if (fd < 0)
373 return -1;
375 if (read (fd, &header, sizeof (header)) != sizeof (header))
377 close (fd);
378 return 0;
380 close (fd);
382 return elf_64_header_p (&header, machine);
385 /* Accepts an integer PID; Returns true if the executable PID is
386 running is a 64-bit ELF file.. */
389 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
391 char file[PATH_MAX];
393 sprintf (file, "/proc/%d/exe", pid);
394 return elf_64_file_p (file, machine);
397 void
398 linux_process_target::delete_lwp (lwp_info *lwp)
400 thread_info *thr = get_lwp_thread (lwp);
402 threads_debug_printf ("deleting %ld", thr->id.lwp ());
404 thr->process ()->remove_thread (thr);
406 low_delete_thread (lwp->arch_private);
408 delete lwp;
411 void
412 linux_process_target::low_delete_thread (arch_lwp_info *info)
414 /* Default implementation should be overridden if architecture-specific
415 info is being used. */
416 gdb_assert (info == nullptr);
419 /* Open the /proc/PID/mem file for PROC. */
421 static void
422 open_proc_mem_file (process_info *proc)
424 gdb_assert (proc->priv->mem_fd == -1);
426 char filename[64];
427 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
429 proc->priv->mem_fd
430 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
433 process_info *
434 linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
436 struct process_info *proc;
438 proc = add_process (pid, attached);
439 proc->priv = XCNEW (struct process_info_private);
441 proc->priv->arch_private = low_new_process ();
442 proc->priv->mem_fd = -1;
444 return proc;
448 process_info *
449 linux_process_target::add_linux_process (int pid, int attached)
451 process_info *proc = add_linux_process_no_mem_file (pid, attached);
452 open_proc_mem_file (proc);
453 return proc;
456 void
457 linux_process_target::remove_linux_process (process_info *proc)
459 if (proc->priv->mem_fd >= 0)
460 close (proc->priv->mem_fd);
462 this->low_delete_process (proc->priv->arch_private);
464 xfree (proc->priv);
465 proc->priv = nullptr;
467 remove_process (proc);
470 arch_process_info *
471 linux_process_target::low_new_process ()
473 return nullptr;
476 void
477 linux_process_target::low_delete_process (arch_process_info *info)
479 /* Default implementation must be overridden if architecture-specific
480 info exists. */
481 gdb_assert (info == nullptr);
484 void
485 linux_process_target::low_new_fork (process_info *parent, process_info *child)
487 /* Nop. */
490 void
491 linux_process_target::arch_setup_thread (thread_info *thread)
493 scoped_restore_current_thread restore_thread;
494 switch_to_thread (thread);
496 low_arch_setup ();
500 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
501 int wstat)
503 client_state &cs = get_client_state ();
504 struct lwp_info *event_lwp = *orig_event_lwp;
505 int event = linux_ptrace_get_extended_event (wstat);
506 struct thread_info *event_thr = get_lwp_thread (event_lwp);
508 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
510 /* All extended events we currently use are mid-syscall. Only
511 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
512 you have to be using PTRACE_SEIZE to get that. */
513 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
515 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
516 || (event == PTRACE_EVENT_CLONE))
518 unsigned long new_pid;
519 int ret, status;
521 /* Get the pid of the new lwp. */
522 ptrace (PTRACE_GETEVENTMSG, event_thr->id.lwp (), (PTRACE_TYPE_ARG3) 0,
523 &new_pid);
525 /* If we haven't already seen the new PID stop, wait for it now. */
526 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
528 /* The new child has a pending SIGSTOP. We can't affect it until it
529 hits the SIGSTOP, but we're already attached. */
531 ret = my_waitpid (new_pid, &status, __WALL);
533 if (ret == -1)
534 perror_with_name ("waiting for new child");
535 else if (ret != new_pid)
536 warning ("wait returned unexpected PID %d", ret);
537 else if (!WIFSTOPPED (status))
538 warning ("wait returned unexpected status 0x%x", status);
541 if (debug_threads)
543 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
544 (event == PTRACE_EVENT_FORK ? "fork"
545 : event == PTRACE_EVENT_VFORK ? "vfork"
546 : event == PTRACE_EVENT_CLONE ? "clone"
547 : "???"),
548 event_thr->id.lwp (),
549 new_pid);
552 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
553 ? ptid_t (new_pid, new_pid)
554 : ptid_t (event_thr->id.pid (), new_pid));
556 process_info *child_proc = nullptr;
558 if (event != PTRACE_EVENT_CLONE)
560 /* Add the new process to the tables before we add the LWP.
561 We need to do this even if the new process will be
562 detached. See breakpoint cloning code further below. */
563 child_proc = add_linux_process (new_pid, 0);
566 lwp_info *child_lwp = add_lwp (child_ptid);
567 gdb_assert (child_lwp != NULL);
568 child_lwp->stopped = 1;
569 if (event != PTRACE_EVENT_CLONE)
570 child_lwp->must_set_ptrace_flags = 1;
571 child_lwp->status_pending_p = 0;
573 thread_info *child_thr = get_lwp_thread (child_lwp);
575 /* If we're suspending all threads, leave this one suspended
576 too. If the fork/clone parent is stepping over a breakpoint,
577 all other threads have been suspended already. Leave the
578 child suspended too. */
579 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
580 || event_lwp->bp_reinsert != 0)
582 threads_debug_printf ("leaving child suspended");
583 child_lwp->suspended = 1;
586 if (event_lwp->bp_reinsert != 0
587 && supports_software_single_step ()
588 && event == PTRACE_EVENT_VFORK)
590 /* If we leave single-step breakpoints there, child will
591 hit it, so uninsert single-step breakpoints from parent
592 (and child). Once vfork child is done, reinsert
593 them back to parent. */
594 uninsert_single_step_breakpoints (event_thr);
597 if (event != PTRACE_EVENT_CLONE)
599 /* Clone the breakpoint lists of the parent. We need to do
600 this even if the new process will be detached, since we
601 will need the process object and the breakpoints to
602 remove any breakpoints from memory when we detach, and
603 the client side will access registers. */
604 gdb_assert (child_proc != NULL);
606 process_info *parent_proc = event_thr->process ();
607 child_proc->attached = parent_proc->attached;
609 clone_all_breakpoints (child_thr, event_thr);
611 target_desc_up tdesc = allocate_target_description ();
612 copy_target_description (tdesc.get (), parent_proc->tdesc);
613 child_proc->tdesc = tdesc.release ();
615 /* Clone arch-specific process data. */
616 low_new_fork (parent_proc, child_proc);
619 /* Save fork/clone info in the parent thread. */
620 if (event == PTRACE_EVENT_FORK)
621 event_lwp->waitstatus.set_forked (child_ptid);
622 else if (event == PTRACE_EVENT_VFORK)
623 event_lwp->waitstatus.set_vforked (child_ptid);
624 else if (event == PTRACE_EVENT_CLONE
625 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
626 event_lwp->waitstatus.set_thread_cloned (child_ptid);
628 if (event != PTRACE_EVENT_CLONE
629 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
631 /* The status_pending field contains bits denoting the
632 extended event, so when the pending event is handled, the
633 handler will look at lwp->waitstatus. */
634 event_lwp->status_pending_p = 1;
635 event_lwp->status_pending = wstat;
637 /* Link the threads until the parent's event is passed on to
638 GDB. */
639 event_lwp->relative = child_lwp;
640 child_lwp->relative = event_lwp;
643 /* If the parent thread is doing step-over with single-step
644 breakpoints, the list of single-step breakpoints are cloned
645 from the parent's. Remove them from the child process.
646 In case of vfork, we'll reinsert them back once vforked
647 child is done. */
648 if (event_lwp->bp_reinsert != 0
649 && supports_software_single_step ())
651 /* The child process is forked and stopped, so it is safe
652 to access its memory without stopping all other threads
653 from other processes. */
654 delete_single_step_breakpoints (child_thr);
656 gdb_assert (has_single_step_breakpoints (event_thr));
657 gdb_assert (!has_single_step_breakpoints (child_thr));
660 /* Normally we will get the pending SIGSTOP. But in some cases
661 we might get another signal delivered to the group first.
662 If we do get another signal, be sure not to lose it. */
663 if (WSTOPSIG (status) != SIGSTOP)
665 child_lwp->stop_expected = 1;
666 child_lwp->status_pending_p = 1;
667 child_lwp->status_pending = status;
669 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
671 child_lwp->waitstatus.set_thread_created ();
672 child_lwp->status_pending_p = 1;
673 child_lwp->status_pending = status;
676 if (event == PTRACE_EVENT_CLONE)
678 #ifdef USE_THREAD_DB
679 thread_db_notice_clone (event_thr, child_ptid);
680 #endif
683 if (event == PTRACE_EVENT_CLONE
684 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
686 threads_debug_printf
687 ("not reporting clone event from LWP %ld, new child is %ld\n",
688 event_thr->id.lwp (),
689 new_pid);
690 return 1;
693 /* Leave the child stopped until GDB processes the parent
694 event. */
695 child_thr->last_resume_kind = resume_stop;
696 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
698 /* Report the event. */
699 threads_debug_printf
700 ("reporting %s event from LWP %ld, new child is %ld\n",
701 (event == PTRACE_EVENT_FORK ? "fork"
702 : event == PTRACE_EVENT_VFORK ? "vfork"
703 : event == PTRACE_EVENT_CLONE ? "clone"
704 : "???"),
705 event_thr->id.lwp (),
706 new_pid);
707 return 0;
709 else if (event == PTRACE_EVENT_VFORK_DONE)
711 event_lwp->waitstatus.set_vfork_done ();
713 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
715 reinsert_single_step_breakpoints (event_thr);
717 gdb_assert (has_single_step_breakpoints (event_thr));
720 /* Report the event. */
721 return 0;
723 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
725 std::vector<int> syscalls_to_catch;
726 ptid_t event_ptid;
727 pid_t event_pid;
729 threads_debug_printf ("Got exec event from LWP %ld",
730 event_thr->id.lwp ());
732 /* Get the event ptid. */
733 event_ptid = event_thr->id;
734 event_pid = event_ptid.pid ();
736 /* Save the syscall list from the execing process. */
737 process_info *proc = event_thr->process ();
738 syscalls_to_catch = std::move (proc->syscalls_to_catch);
740 /* Delete the execing process and all its threads. */
741 mourn (proc);
742 switch_to_thread (nullptr);
744 /* Create a new process/lwp/thread. */
745 proc = add_linux_process (event_pid, 0);
746 event_lwp = add_lwp (event_ptid);
747 event_thr = get_lwp_thread (event_lwp);
748 gdb_assert (current_thread == event_thr);
749 arch_setup_thread (event_thr);
751 /* Set the event status. */
752 event_lwp->waitstatus.set_execd
753 (make_unique_xstrdup
754 (linux_proc_pid_to_exec_file (event_thr->id.lwp ())));
756 /* Mark the exec status as pending. */
757 event_lwp->stopped = 1;
758 event_lwp->status_pending_p = 1;
759 event_lwp->status_pending = wstat;
760 event_thr->last_resume_kind = resume_continue;
761 event_thr->last_status.set_ignore ();
763 /* Update syscall state in the new lwp, effectively mid-syscall too. */
764 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
766 /* Restore the list to catch. Don't rely on the client, which is free
767 to avoid sending a new list when the architecture doesn't change.
768 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
769 proc->syscalls_to_catch = std::move (syscalls_to_catch);
771 /* Report the event. */
772 *orig_event_lwp = event_lwp;
773 return 0;
776 internal_error (_("unknown ptrace event %d"), event);
779 CORE_ADDR
780 linux_process_target::get_pc (lwp_info *lwp)
782 process_info *proc = get_lwp_thread (lwp)->process ();
783 gdb_assert (!proc->starting_up);
785 if (!low_supports_breakpoints ())
786 return 0;
788 scoped_restore_current_thread restore_thread;
789 switch_to_thread (get_lwp_thread (lwp));
791 struct regcache *regcache = get_thread_regcache (current_thread, 1);
792 CORE_ADDR pc = low_get_pc (regcache);
794 threads_debug_printf ("pc is 0x%lx", (long) pc);
796 return pc;
799 void
800 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
802 struct regcache *regcache;
804 scoped_restore_current_thread restore_thread;
805 switch_to_thread (get_lwp_thread (lwp));
807 regcache = get_thread_regcache (current_thread, 1);
808 low_get_syscall_trapinfo (regcache, sysno);
810 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
813 void
814 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
816 /* By default, report an unknown system call number. */
817 *sysno = UNKNOWN_SYSCALL;
820 bool
821 linux_process_target::save_stop_reason (lwp_info *lwp)
823 CORE_ADDR pc;
824 CORE_ADDR sw_breakpoint_pc;
825 siginfo_t siginfo;
827 if (!low_supports_breakpoints ())
828 return false;
830 process_info *proc = get_lwp_thread (lwp)->process ();
831 if (proc->starting_up)
833 /* Claim we have the stop PC so that the caller doesn't try to
834 fetch it itself. */
835 return true;
838 pc = get_pc (lwp);
839 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
841 /* breakpoint_at reads from the current thread. */
842 scoped_restore_current_thread restore_thread;
843 switch_to_thread (get_lwp_thread (lwp));
845 if (ptrace (PTRACE_GETSIGINFO, current_thread->id.lwp (),
846 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
848 if (siginfo.si_signo == SIGTRAP)
850 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
851 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
853 /* The si_code is ambiguous on this arch -- check debug
854 registers. */
855 if (!check_stopped_by_watchpoint (lwp))
856 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
858 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
860 /* If we determine the LWP stopped for a SW breakpoint,
861 trust it. Particularly don't check watchpoint
862 registers, because at least on s390, we'd find
863 stopped-by-watchpoint as long as there's a watchpoint
864 set. */
865 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
867 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
869 /* This can indicate either a hardware breakpoint or
870 hardware watchpoint. Check debug registers. */
871 if (!check_stopped_by_watchpoint (lwp))
872 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
874 else if (siginfo.si_code == TRAP_TRACE)
876 /* We may have single stepped an instruction that
877 triggered a watchpoint. In that case, on some
878 architectures (such as x86), instead of TRAP_HWBKPT,
879 si_code indicates TRAP_TRACE, and we need to check
880 the debug registers separately. */
881 if (!check_stopped_by_watchpoint (lwp))
882 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
887 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
889 threads_debug_printf
890 ("%s stopped by software breakpoint",
891 target_pid_to_str (get_lwp_thread (lwp)->id).c_str ());
893 /* Back up the PC if necessary. */
894 if (pc != sw_breakpoint_pc)
896 struct regcache *regcache
897 = get_thread_regcache (current_thread, 1);
898 low_set_pc (regcache, sw_breakpoint_pc);
901 /* Update this so we record the correct stop PC below. */
902 pc = sw_breakpoint_pc;
904 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
905 threads_debug_printf
906 ("%s stopped by hardware breakpoint",
907 target_pid_to_str (get_lwp_thread (lwp)->id).c_str ());
908 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
909 threads_debug_printf
910 ("%s stopped by hardware watchpoint",
911 target_pid_to_str (get_lwp_thread (lwp)->id).c_str ());
912 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
913 threads_debug_printf
914 ("%s stopped by trace",
915 target_pid_to_str (get_lwp_thread (lwp)->id).c_str ());
917 lwp->stop_pc = pc;
918 return true;
921 lwp_info *
922 linux_process_target::add_lwp (ptid_t ptid)
924 lwp_info *lwp = new lwp_info;
926 lwp->thread = find_process_pid (ptid.pid ())->add_thread (ptid, lwp);
928 low_new_thread (lwp);
930 return lwp;
933 void
934 linux_process_target::low_new_thread (lwp_info *info)
936 /* Nop. */
939 /* Callback to be used when calling fork_inferior, responsible for
940 actually initiating the tracing of the inferior. */
942 static void
943 linux_ptrace_fun ()
945 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
946 (PTRACE_TYPE_ARG4) 0) < 0)
947 trace_start_error_with_name ("ptrace");
949 if (setpgid (0, 0) < 0)
950 trace_start_error_with_name ("setpgid");
952 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
953 stdout to stderr so that inferior i/o doesn't corrupt the connection.
954 Also, redirect stdin to /dev/null. */
955 if (remote_connection_is_stdio ())
957 if (close (0) < 0)
958 trace_start_error_with_name ("close");
959 if (open ("/dev/null", O_RDONLY) < 0)
960 trace_start_error_with_name ("open");
961 if (dup2 (2, 1) < 0)
962 trace_start_error_with_name ("dup2");
963 if (write (2, "stdin/stdout redirected\n",
964 sizeof ("stdin/stdout redirected\n") - 1) < 0)
966 /* Errors ignored. */;
971 /* Start an inferior process and returns its pid.
972 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
973 are its arguments. */
976 linux_process_target::create_inferior (const char *program,
977 const std::vector<char *> &program_args)
979 client_state &cs = get_client_state ();
980 struct lwp_info *new_lwp;
981 int pid;
982 ptid_t ptid;
985 maybe_disable_address_space_randomization restore_personality
986 (cs.disable_randomization);
987 std::string str_program_args = construct_inferior_arguments (program_args);
989 pid = fork_inferior (program,
990 str_program_args.c_str (),
991 get_environ ()->envp (), linux_ptrace_fun,
992 NULL, NULL, NULL, NULL);
995 /* When spawning a new process, we can't open the mem file yet. We
996 still have to nurse the process through the shell, and that execs
997 a couple times. The address space a /proc/PID/mem file is
998 accessing is destroyed on exec. */
999 process_info *proc = add_linux_process_no_mem_file (pid, 0);
1001 ptid = ptid_t (pid, pid);
1002 new_lwp = add_lwp (ptid);
1003 new_lwp->must_set_ptrace_flags = 1;
1005 post_fork_inferior (pid, program);
1007 /* PROC is now past the shell running the program we want, so we can
1008 open the /proc/PID/mem file. */
1009 open_proc_mem_file (proc);
1011 return pid;
1014 /* Implement the post_create_inferior target_ops method. */
1016 void
1017 linux_process_target::post_create_inferior ()
1019 struct lwp_info *lwp = get_thread_lwp (current_thread);
1021 low_arch_setup ();
1023 if (lwp->must_set_ptrace_flags)
1025 struct process_info *proc = current_process ();
1026 int options = linux_low_ptrace_options (proc->attached);
1028 linux_enable_event_reporting (current_thread->id.lwp (), options);
1029 lwp->must_set_ptrace_flags = 0;
1034 linux_process_target::attach_lwp (ptid_t ptid)
1036 struct lwp_info *new_lwp;
1037 int lwpid = ptid.lwp ();
1039 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1040 != 0)
1041 return errno;
1043 new_lwp = add_lwp (ptid);
1045 /* We need to wait for SIGSTOP before being able to make the next
1046 ptrace call on this LWP. */
1047 new_lwp->must_set_ptrace_flags = 1;
1049 if (linux_proc_pid_is_stopped (lwpid))
1051 threads_debug_printf ("Attached to a stopped process");
1053 /* The process is definitely stopped. It is in a job control
1054 stop, unless the kernel predates the TASK_STOPPED /
1055 TASK_TRACED distinction, in which case it might be in a
1056 ptrace stop. Make sure it is in a ptrace stop; from there we
1057 can kill it, signal it, et cetera.
1059 First make sure there is a pending SIGSTOP. Since we are
1060 already attached, the process can not transition from stopped
1061 to running without a PTRACE_CONT; so we know this signal will
1062 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1063 probably already in the queue (unless this kernel is old
1064 enough to use TASK_STOPPED for ptrace stops); but since
1065 SIGSTOP is not an RT signal, it can only be queued once. */
1066 kill_lwp (lwpid, SIGSTOP);
1068 /* Finally, resume the stopped process. This will deliver the
1069 SIGSTOP (or a higher priority signal, just like normal
1070 PTRACE_ATTACH), which we'll catch later on. */
1071 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1074 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1075 brings it to a halt.
1077 There are several cases to consider here:
1079 1) gdbserver has already attached to the process and is being notified
1080 of a new thread that is being created.
1081 In this case we should ignore that SIGSTOP and resume the
1082 process. This is handled below by setting stop_expected = 1,
1083 and the fact that add_thread sets last_resume_kind ==
1084 resume_continue.
1086 2) This is the first thread (the process thread), and we're attaching
1087 to it via attach_inferior.
1088 In this case we want the process thread to stop.
1089 This is handled by having linux_attach set last_resume_kind ==
1090 resume_stop after we return.
1092 If the pid we are attaching to is also the tgid, we attach to and
1093 stop all the existing threads. Otherwise, we attach to pid and
1094 ignore any other threads in the same group as this pid.
1096 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1097 existing threads.
1098 In this case we want the thread to stop.
1099 FIXME: This case is currently not properly handled.
1100 We should wait for the SIGSTOP but don't. Things work apparently
1101 because enough time passes between when we ptrace (ATTACH) and when
1102 gdb makes the next ptrace call on the thread.
1104 On the other hand, if we are currently trying to stop all threads, we
1105 should treat the new thread as if we had sent it a SIGSTOP. This works
1106 because we are guaranteed that the add_lwp call above added us to the
1107 end of the list, and so the new thread has not yet reached
1108 wait_for_sigstop (but will). */
1109 new_lwp->stop_expected = 1;
1111 return 0;
1114 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1115 already attached. Returns true if a new LWP is found, false
1116 otherwise. */
1118 static int
1119 attach_proc_task_lwp_callback (ptid_t ptid)
1121 /* Is this a new thread? */
1122 if (find_thread_ptid (ptid) == NULL)
1124 int lwpid = ptid.lwp ();
1125 int err;
1127 threads_debug_printf ("Found new lwp %d", lwpid);
1129 err = the_linux_target->attach_lwp (ptid);
1131 /* Be quiet if we simply raced with the thread exiting. EPERM
1132 is returned if the thread's task still exists, and is marked
1133 as exited or zombie, as well as other conditions, so in that
1134 case, confirm the status in /proc/PID/status. */
1135 if (err == ESRCH
1136 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1137 threads_debug_printf
1138 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1139 lwpid, err, safe_strerror (err));
1140 else if (err != 0)
1142 std::string reason
1143 = linux_ptrace_attach_fail_reason_string (ptid, err);
1145 error (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1148 return 1;
1150 return 0;
1153 static void async_file_mark (void);
1155 /* Attach to PID. If PID is the tgid, attach to it and all
1156 of its threads. */
1159 linux_process_target::attach (unsigned long pid)
1161 struct process_info *proc;
1162 struct thread_info *initial_thread;
1163 ptid_t ptid = ptid_t (pid, pid);
1164 int err;
1166 /* Delay opening the /proc/PID/mem file until we've successfully
1167 attached. */
1168 proc = add_linux_process_no_mem_file (pid, 1);
1170 /* Attach to PID. We will check for other threads
1171 soon. */
1172 err = attach_lwp (ptid);
1173 if (err != 0)
1175 this->remove_linux_process (proc);
1177 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1178 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1181 open_proc_mem_file (proc);
1183 /* Don't ignore the initial SIGSTOP if we just attached to this
1184 process. It will be collected by wait shortly. */
1185 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1186 gdb_assert (initial_thread != nullptr);
1187 initial_thread->last_resume_kind = resume_stop;
1189 /* We must attach to every LWP. If /proc is mounted, use that to
1190 find them now. On the one hand, the inferior may be using raw
1191 clone instead of using pthreads. On the other hand, even if it
1192 is using pthreads, GDB may not be connected yet (thread_db needs
1193 to do symbol lookups, through qSymbol). Also, thread_db walks
1194 structures in the inferior's address space to find the list of
1195 threads/LWPs, and those structures may well be corrupted. Note
1196 that once thread_db is loaded, we'll still use it to list threads
1197 and associate pthread info with each LWP. */
1200 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1202 catch (const gdb_exception_error &)
1204 /* Make sure we do not deliver the SIGSTOP to the process. */
1205 initial_thread->last_resume_kind = resume_continue;
1207 this->detach (proc);
1208 throw;
1211 /* GDB will shortly read the xml target description for this
1212 process, to figure out the process' architecture. But the target
1213 description is only filled in when the first process/thread in
1214 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1215 that now, otherwise, if GDB is fast enough, it could read the
1216 target description _before_ that initial stop. */
1217 if (non_stop)
1219 struct lwp_info *lwp;
1220 int wstat, lwpid;
1221 ptid_t pid_ptid = ptid_t (pid);
1223 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1224 gdb_assert (lwpid > 0);
1226 lwp = find_lwp_pid (ptid_t (lwpid));
1227 gdb_assert (lwp != nullptr);
1229 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1231 lwp->status_pending_p = 1;
1232 lwp->status_pending = wstat;
1235 initial_thread->last_resume_kind = resume_continue;
1237 async_file_mark ();
1239 gdb_assert (proc->tdesc != NULL);
1242 return 0;
1245 static int
1246 last_thread_of_process_p (int pid)
1248 bool seen_one = false;
1250 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1252 if (!seen_one)
1254 /* This is the first thread of this process we see. */
1255 seen_one = true;
1256 return false;
1258 else
1260 /* This is the second thread of this process we see. */
1261 return true;
1265 return thread == NULL;
1268 /* Kill LWP. */
1270 static void
1271 linux_kill_one_lwp (struct lwp_info *lwp)
1273 struct thread_info *thr = get_lwp_thread (lwp);
1274 int pid = thr->id.lwp ();
1276 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1277 there is no signal context, and ptrace(PTRACE_KILL) (or
1278 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1279 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1280 alternative is to kill with SIGKILL. We only need one SIGKILL
1281 per process, not one for each thread. But since we still support
1282 support debugging programs using raw clone without CLONE_THREAD,
1283 we send one for each thread. For years, we used PTRACE_KILL
1284 only, so we're being a bit paranoid about some old kernels where
1285 PTRACE_KILL might work better (dubious if there are any such, but
1286 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1287 second, and so we're fine everywhere. */
1289 errno = 0;
1290 kill_lwp (pid, SIGKILL);
1291 if (debug_threads)
1293 int save_errno = errno;
1295 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1296 target_pid_to_str (thr->id).c_str (),
1297 save_errno ? safe_strerror (save_errno) : "OK");
1300 errno = 0;
1301 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1302 if (debug_threads)
1304 int save_errno = errno;
1306 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1307 target_pid_to_str (thr->id).c_str (),
1308 save_errno ? safe_strerror (save_errno) : "OK");
1312 /* Kill LWP and wait for it to die. */
1314 static void
1315 kill_wait_lwp (struct lwp_info *lwp)
1317 struct thread_info *thr = get_lwp_thread (lwp);
1318 int pid = thr->id.pid ();
1319 int lwpid = thr->id.lwp ();
1320 int wstat;
1321 int res;
1323 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1327 linux_kill_one_lwp (lwp);
1329 /* Make sure it died. Notes:
1331 - The loop is most likely unnecessary.
1333 - We don't use wait_for_event as that could delete lwps
1334 while we're iterating over them. We're not interested in
1335 any pending status at this point, only in making sure all
1336 wait status on the kernel side are collected until the
1337 process is reaped.
1339 - We don't use __WALL here as the __WALL emulation relies on
1340 SIGCHLD, and killing a stopped process doesn't generate
1341 one, nor an exit status.
1343 res = my_waitpid (lwpid, &wstat, 0);
1344 if (res == -1 && errno == ECHILD)
1345 res = my_waitpid (lwpid, &wstat, __WCLONE);
1346 } while (res > 0 && WIFSTOPPED (wstat));
1348 /* Even if it was stopped, the child may have already disappeared.
1349 E.g., if it was killed by SIGKILL. */
1350 if (res < 0 && errno != ECHILD)
1351 perror_with_name ("kill_wait_lwp");
1354 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1355 except the leader. */
1357 static void
1358 kill_one_lwp_callback (thread_info *thread, int pid)
1360 struct lwp_info *lwp = get_thread_lwp (thread);
1362 /* We avoid killing the first thread here, because of a Linux kernel (at
1363 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1364 the children get a chance to be reaped, it will remain a zombie
1365 forever. */
1367 if (thread->id.lwp () == pid)
1369 threads_debug_printf ("is last of process %s",
1370 target_pid_to_str (thread->id).c_str ());
1371 return;
1374 kill_wait_lwp (lwp);
1378 linux_process_target::kill (process_info *process)
1380 int pid = process->pid;
1382 /* If we're killing a running inferior, make sure it is stopped
1383 first, as PTRACE_KILL will not work otherwise. */
1384 stop_all_lwps (0, NULL);
1386 process->for_each_thread ([&] (thread_info *thread)
1388 kill_one_lwp_callback (thread, pid);
1391 /* See the comment in linux_kill_one_lwp. We did not kill the first
1392 thread in the list, so do so now. */
1393 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1395 if (lwp == NULL)
1396 threads_debug_printf ("cannot find lwp for pid: %d", pid);
1397 else
1398 kill_wait_lwp (lwp);
1400 mourn (process);
1402 /* Since we presently can only stop all lwps of all processes, we
1403 need to unstop lwps of other processes. */
1404 unstop_all_lwps (0, NULL);
1405 return 0;
1408 /* Get pending signal of THREAD, for detaching purposes. This is the
1409 signal the thread last stopped for, which we need to deliver to the
1410 thread when detaching, otherwise, it'd be suppressed/lost. */
1412 static int
1413 get_detach_signal (struct thread_info *thread)
1415 client_state &cs = get_client_state ();
1416 enum gdb_signal signo = GDB_SIGNAL_0;
1417 int status;
1418 struct lwp_info *lp = get_thread_lwp (thread);
1420 if (lp->status_pending_p)
1421 status = lp->status_pending;
1422 else
1424 /* If the thread had been suspended by gdbserver, and it stopped
1425 cleanly, then it'll have stopped with SIGSTOP. But we don't
1426 want to deliver that SIGSTOP. */
1427 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1428 || thread->last_status.sig () == GDB_SIGNAL_0)
1429 return 0;
1431 /* Otherwise, we may need to deliver the signal we
1432 intercepted. */
1433 status = lp->last_status;
1436 if (!WIFSTOPPED (status))
1438 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1439 target_pid_to_str (thread->id).c_str ());
1440 return 0;
1443 /* Extended wait statuses aren't real SIGTRAPs. */
1444 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1446 threads_debug_printf ("lwp %s had stopped with extended "
1447 "status: no pending signal",
1448 target_pid_to_str (thread->id).c_str ());
1449 return 0;
1452 signo = gdb_signal_from_host (WSTOPSIG (status));
1454 if (cs.program_signals_p && !cs.program_signals[signo])
1456 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1457 target_pid_to_str (thread->id).c_str (),
1458 gdb_signal_to_string (signo));
1459 return 0;
1461 else if (!cs.program_signals_p
1462 /* If we have no way to know which signals GDB does not
1463 want to have passed to the program, assume
1464 SIGTRAP/SIGINT, which is GDB's default. */
1465 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1467 threads_debug_printf ("lwp %s had signal %s, "
1468 "but we don't know if we should pass it. "
1469 "Default to not.",
1470 target_pid_to_str (thread->id).c_str (),
1471 gdb_signal_to_string (signo));
1472 return 0;
1474 else
1476 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1477 target_pid_to_str (thread->id).c_str (),
1478 gdb_signal_to_string (signo));
1480 return WSTOPSIG (status);
1484 void
1485 linux_process_target::detach_one_lwp (lwp_info *lwp)
1487 struct thread_info *thread = get_lwp_thread (lwp);
1488 int sig;
1490 /* If there is a pending SIGSTOP, get rid of it. */
1491 if (lwp->stop_expected)
1493 threads_debug_printf ("Sending SIGCONT to %s",
1494 target_pid_to_str (thread->id).c_str ());
1496 kill_lwp (thread->id.lwp (), SIGCONT);
1497 lwp->stop_expected = 0;
1500 /* Pass on any pending signal for this thread. */
1501 sig = get_detach_signal (thread);
1503 /* Preparing to resume may try to write registers, and fail if the
1504 lwp is zombie. If that happens, ignore the error. We'll handle
1505 it below, when detach fails with ESRCH. */
1508 /* Flush any pending changes to the process's registers. */
1509 regcache_invalidate_thread (thread);
1511 /* Finally, let it resume. */
1512 low_prepare_to_resume (lwp);
1514 catch (const gdb_exception_error &ex)
1516 if (!check_ptrace_stopped_lwp_gone (lwp))
1517 throw;
1520 int lwpid = thread->id.lwp ();
1521 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1522 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1524 int save_errno = errno;
1526 /* We know the thread exists, so ESRCH must mean the lwp is
1527 zombie. This can happen if one of the already-detached
1528 threads exits the whole thread group. In that case we're
1529 still attached, and must reap the lwp. */
1530 if (save_errno == ESRCH)
1532 int ret, status;
1534 ret = my_waitpid (lwpid, &status, __WALL);
1535 if (ret == -1)
1537 warning (_("Couldn't reap LWP %d while detaching: %s"),
1538 lwpid, safe_strerror (errno));
1540 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1542 warning (_("Reaping LWP %d while detaching "
1543 "returned unexpected status 0x%x"),
1544 lwpid, status);
1547 else
1549 error (_("Can't detach %s: %s"),
1550 target_pid_to_str (thread->id).c_str (),
1551 safe_strerror (save_errno));
1554 else
1555 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1556 target_pid_to_str (thread->id).c_str (),
1557 strsignal (sig));
1559 delete_lwp (lwp);
1563 linux_process_target::detach (process_info *process)
1565 struct lwp_info *main_lwp;
1567 /* As there's a step over already in progress, let it finish first,
1568 otherwise nesting a stabilize_threads operation on top gets real
1569 messy. */
1570 complete_ongoing_step_over ();
1572 /* Stop all threads before detaching. First, ptrace requires that
1573 the thread is stopped to successfully detach. Second, thread_db
1574 may need to uninstall thread event breakpoints from memory, which
1575 only works with a stopped process anyway. */
1576 stop_all_lwps (0, NULL);
1578 #ifdef USE_THREAD_DB
1579 thread_db_detach (process);
1580 #endif
1582 /* Stabilize threads (move out of jump pads). */
1583 target_stabilize_threads ();
1585 /* Detach from the clone lwps first. If the thread group exits just
1586 while we're detaching, we must reap the clone lwps before we're
1587 able to reap the leader. */
1588 process->for_each_thread ([this] (thread_info *thread)
1590 /* We don't actually detach from the thread group leader just yet.
1591 If the thread group exits, we must reap the zombie clone lwps
1592 before we're able to reap the leader. */
1593 if (thread->id.pid () == thread->id.lwp ())
1594 return;
1596 lwp_info *lwp = get_thread_lwp (thread);
1597 detach_one_lwp (lwp);
1600 main_lwp = find_lwp_pid (ptid_t (process->pid));
1601 gdb_assert (main_lwp != nullptr);
1602 detach_one_lwp (main_lwp);
1604 mourn (process);
1606 /* Since we presently can only stop all lwps of all processes, we
1607 need to unstop lwps of other processes. */
1608 unstop_all_lwps (0, NULL);
1609 return 0;
1612 /* Remove all LWPs that belong to process PROC from the lwp list. */
1614 void
1615 linux_process_target::mourn (process_info *process)
1617 #ifdef USE_THREAD_DB
1618 thread_db_mourn (process);
1619 #endif
1621 process->for_each_thread ([this] (thread_info *thread)
1623 delete_lwp (get_thread_lwp (thread));
1626 this->remove_linux_process (process);
1629 void
1630 linux_process_target::join (int pid)
1632 int status, ret;
1634 do {
1635 ret = my_waitpid (pid, &status, 0);
1636 if (WIFEXITED (status) || WIFSIGNALED (status))
1637 break;
1638 } while (ret != -1 || errno != ECHILD);
1641 /* Return true if the given thread is still alive. */
1643 bool
1644 linux_process_target::thread_alive (ptid_t ptid)
1646 struct lwp_info *lwp = find_lwp_pid (ptid);
1648 /* We assume we always know if a thread exits. If a whole process
1649 exited but we still haven't been able to report it to GDB, we'll
1650 hold on to the last lwp of the dead process. */
1651 if (lwp != NULL)
1652 return !lwp_is_marked_dead (lwp);
1653 else
1654 return 0;
1657 bool
1658 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1660 struct lwp_info *lp = get_thread_lwp (thread);
1662 if (!lp->status_pending_p)
1663 return 0;
1665 if (thread->last_resume_kind != resume_stop
1666 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1667 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1669 CORE_ADDR pc;
1670 int discard = 0;
1672 gdb_assert (lp->last_status != 0);
1674 pc = get_pc (lp);
1676 scoped_restore_current_thread restore_thread;
1677 switch_to_thread (thread);
1679 if (pc != lp->stop_pc)
1681 threads_debug_printf ("PC of %ld changed",
1682 thread->id.lwp ());
1683 discard = 1;
1686 if (discard)
1688 threads_debug_printf ("discarding pending breakpoint status");
1689 lp->status_pending_p = 0;
1690 return 0;
1694 return 1;
1697 /* Returns true if LWP is resumed from the client's perspective. */
1699 static int
1700 lwp_resumed (struct lwp_info *lwp)
1702 struct thread_info *thread = get_lwp_thread (lwp);
1704 if (thread->last_resume_kind != resume_stop)
1705 return 1;
1707 /* Did gdb send us a `vCont;t', but we haven't reported the
1708 corresponding stop to gdb yet? If so, the thread is still
1709 resumed/running from gdb's perspective. */
1710 if (thread->last_resume_kind == resume_stop
1711 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1712 return 1;
1714 return 0;
1717 bool
1718 linux_process_target::status_pending_p_callback (thread_info *thread,
1719 ptid_t ptid)
1721 struct lwp_info *lp = get_thread_lwp (thread);
1723 /* Check if we're only interested in events from a specific process
1724 or a specific LWP. */
1725 if (!thread->id.matches (ptid))
1726 return 0;
1728 if (!lwp_resumed (lp))
1729 return 0;
1731 if (lp->status_pending_p
1732 && !thread_still_has_status_pending (thread))
1734 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1735 return 0;
1738 return lp->status_pending_p;
1741 struct lwp_info *
1742 find_lwp_pid (ptid_t ptid)
1744 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1745 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
1747 return thr_arg->id.lwp () == lwp;
1750 if (thread == NULL)
1751 return NULL;
1753 return get_thread_lwp (thread);
1756 /* Return the number of known LWPs in PROCESS. */
1758 static int
1759 num_lwps (process_info *process)
1761 int count = 0;
1763 process->for_each_thread ([&] (thread_info *thread)
1765 count++;
1768 return count;
1771 /* See nat/linux-nat.h. */
1773 struct lwp_info *
1774 iterate_over_lwps (ptid_t filter,
1775 gdb::function_view<iterate_over_lwps_ftype> callback)
1777 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1779 lwp_info *lwp = get_thread_lwp (thr_arg);
1781 return callback (lwp);
1784 if (thread == NULL)
1785 return NULL;
1787 return get_thread_lwp (thread);
1790 bool
1791 linux_process_target::check_zombie_leaders ()
1793 bool new_pending_event = false;
1795 for_each_process ([&] (process_info *proc)
1797 pid_t leader_pid = proc->pid;
1798 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1800 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1801 "num_lwps=%d, zombie=%d",
1802 leader_pid, leader_lp!= NULL, num_lwps (proc),
1803 linux_proc_pid_is_zombie (leader_pid));
1805 if (leader_lp != NULL && !leader_lp->stopped
1806 /* Check if there are other threads in the group, as we may
1807 have raced with the inferior simply exiting. Note this
1808 isn't a watertight check. If the inferior is
1809 multi-threaded and is exiting, it may be we see the
1810 leader as zombie before we reap all the non-leader
1811 threads. See comments below. */
1812 && !last_thread_of_process_p (leader_pid)
1813 && linux_proc_pid_is_zombie (leader_pid))
1815 /* A zombie leader in a multi-threaded program can mean one
1816 of three things:
1818 #1 - Only the leader exited, not the whole program, e.g.,
1819 with pthread_exit. Since we can't reap the leader's exit
1820 status until all other threads are gone and reaped too,
1821 we want to delete the zombie leader right away, as it
1822 can't be debugged, we can't read its registers, etc.
1823 This is the main reason we check for zombie leaders
1824 disappearing.
1826 #2 - The whole thread-group/process exited (a group exit,
1827 via e.g. exit(3), and there is (or will be shortly) an
1828 exit reported for each thread in the process, and then
1829 finally an exit for the leader once the non-leaders are
1830 reaped.
1832 #3 - There are 3 or more threads in the group, and a
1833 thread other than the leader exec'd. See comments on
1834 exec events at the top of the file.
1836 Ideally we would never delete the leader for case #2.
1837 Instead, we want to collect the exit status of each
1838 non-leader thread, and then finally collect the exit
1839 status of the leader as normal and use its exit code as
1840 whole-process exit code. Unfortunately, there's no
1841 race-free way to distinguish cases #1 and #2. We can't
1842 assume the exit events for the non-leaders threads are
1843 already pending in the kernel, nor can we assume the
1844 non-leader threads are in zombie state already. Between
1845 the leader becoming zombie and the non-leaders exiting
1846 and becoming zombie themselves, there's a small time
1847 window, so such a check would be racy. Temporarily
1848 pausing all threads and checking to see if all threads
1849 exit or not before re-resuming them would work in the
1850 case that all threads are running right now, but it
1851 wouldn't work if some thread is currently already
1852 ptrace-stopped, e.g., due to scheduler-locking.
1854 So what we do is we delete the leader anyhow, and then
1855 later on when we see its exit status, we re-add it back.
1856 We also make sure that we only report a whole-process
1857 exit when we see the leader exiting, as opposed to when
1858 the last LWP in the LWP list exits, which can be a
1859 non-leader if we deleted the leader here. */
1860 threads_debug_printf ("Thread group leader %d zombie "
1861 "(it exited, or another thread execd), "
1862 "deleting it.",
1863 leader_pid);
1865 thread_info *leader_thread = get_lwp_thread (leader_lp);
1866 if (report_exit_events_for (leader_thread))
1868 mark_lwp_dead (leader_lp, W_EXITCODE (0, 0), true);
1869 new_pending_event = true;
1871 else
1872 delete_lwp (leader_lp);
1876 return new_pending_event;
1879 /* Callback for `find_thread'. Returns the first LWP that is not
1880 stopped. */
1882 static bool
1883 not_stopped_callback (thread_info *thread, ptid_t filter)
1885 if (!thread->id.matches (filter))
1886 return false;
1888 lwp_info *lwp = get_thread_lwp (thread);
1890 return !lwp->stopped;
1893 /* Increment LWP's suspend count. */
1895 static void
1896 lwp_suspended_inc (struct lwp_info *lwp)
1898 lwp->suspended++;
1900 if (lwp->suspended > 4)
1901 threads_debug_printf
1902 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1903 get_lwp_thread (lwp)->id.lwp (), lwp->suspended);
1906 /* Decrement LWP's suspend count. */
1908 static void
1909 lwp_suspended_decr (struct lwp_info *lwp)
1911 lwp->suspended--;
1913 if (lwp->suspended < 0)
1915 struct thread_info *thread = get_lwp_thread (lwp);
1917 internal_error ("unsuspend LWP %ld, suspended=%d\n", thread->id.lwp (),
1918 lwp->suspended);
1922 /* This function should only be called if the LWP got a SIGTRAP.
1924 Handle any tracepoint steps or hits. Return true if a tracepoint
1925 event was handled, 0 otherwise. */
1927 static int
1928 handle_tracepoints (struct lwp_info *lwp)
1930 struct thread_info *tinfo = get_lwp_thread (lwp);
1931 int tpoint_related_event = 0;
1933 gdb_assert (lwp->suspended == 0);
1935 /* If this tracepoint hit causes a tracing stop, we'll immediately
1936 uninsert tracepoints. To do this, we temporarily pause all
1937 threads, unpatch away, and then unpause threads. We need to make
1938 sure the unpausing doesn't resume LWP too. */
1939 lwp_suspended_inc (lwp);
1941 /* And we need to be sure that any all-threads-stopping doesn't try
1942 to move threads out of the jump pads, as it could deadlock the
1943 inferior (LWP could be in the jump pad, maybe even holding the
1944 lock.) */
1946 /* Do any necessary step collect actions. */
1947 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1949 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1951 /* See if we just hit a tracepoint and do its main collect
1952 actions. */
1953 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1955 lwp_suspended_decr (lwp);
1957 gdb_assert (lwp->suspended == 0);
1958 gdb_assert (!stabilizing_threads
1959 || (lwp->collecting_fast_tracepoint
1960 != fast_tpoint_collect_result::not_collecting));
1962 if (tpoint_related_event)
1964 threads_debug_printf ("got a tracepoint event");
1965 return 1;
1968 return 0;
1971 fast_tpoint_collect_result
1972 linux_process_target::linux_fast_tracepoint_collecting
1973 (lwp_info *lwp, fast_tpoint_collect_status *status)
1975 CORE_ADDR thread_area;
1976 struct thread_info *thread = get_lwp_thread (lwp);
1978 /* Get the thread area address. This is used to recognize which
1979 thread is which when tracing with the in-process agent library.
1980 We don't read anything from the address, and treat it as opaque;
1981 it's the address itself that we assume is unique per-thread. */
1982 if (low_get_thread_area (thread->id.lwp (), &thread_area) == -1)
1983 return fast_tpoint_collect_result::not_collecting;
1985 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1989 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1991 return -1;
1994 bool
1995 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1997 scoped_restore_current_thread restore_thread;
1998 switch_to_thread (get_lwp_thread (lwp));
2000 if ((wstat == NULL
2001 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2002 && supports_fast_tracepoints ()
2003 && agent_loaded_p ())
2005 struct fast_tpoint_collect_status status;
2007 threads_debug_printf
2008 ("Checking whether LWP %ld needs to move out of the jump pad.",
2009 current_thread->id.lwp ());
2011 fast_tpoint_collect_result r
2012 = linux_fast_tracepoint_collecting (lwp, &status);
2014 if (wstat == NULL
2015 || (WSTOPSIG (*wstat) != SIGILL
2016 && WSTOPSIG (*wstat) != SIGFPE
2017 && WSTOPSIG (*wstat) != SIGSEGV
2018 && WSTOPSIG (*wstat) != SIGBUS))
2020 lwp->collecting_fast_tracepoint = r;
2022 if (r != fast_tpoint_collect_result::not_collecting)
2024 if (r == fast_tpoint_collect_result::before_insn
2025 && lwp->exit_jump_pad_bkpt == NULL)
2027 /* Haven't executed the original instruction yet.
2028 Set breakpoint there, and wait till it's hit,
2029 then single-step until exiting the jump pad. */
2030 lwp->exit_jump_pad_bkpt
2031 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2034 threads_debug_printf
2035 ("Checking whether LWP %ld needs to move out of the jump pad..."
2036 " it does", current_thread->id.lwp ());
2038 return true;
2041 else
2043 /* If we get a synchronous signal while collecting, *and*
2044 while executing the (relocated) original instruction,
2045 reset the PC to point at the tpoint address, before
2046 reporting to GDB. Otherwise, it's an IPA lib bug: just
2047 report the signal to GDB, and pray for the best. */
2049 lwp->collecting_fast_tracepoint
2050 = fast_tpoint_collect_result::not_collecting;
2052 if (r != fast_tpoint_collect_result::not_collecting
2053 && (status.adjusted_insn_addr <= lwp->stop_pc
2054 && lwp->stop_pc < status.adjusted_insn_addr_end))
2056 siginfo_t info;
2057 struct regcache *regcache;
2059 /* The si_addr on a few signals references the address
2060 of the faulting instruction. Adjust that as
2061 well. */
2062 if ((WSTOPSIG (*wstat) == SIGILL
2063 || WSTOPSIG (*wstat) == SIGFPE
2064 || WSTOPSIG (*wstat) == SIGBUS
2065 || WSTOPSIG (*wstat) == SIGSEGV)
2066 && ptrace (PTRACE_GETSIGINFO, current_thread->id.lwp (),
2067 (PTRACE_TYPE_ARG3) 0, &info) == 0
2068 /* Final check just to make sure we don't clobber
2069 the siginfo of non-kernel-sent signals. */
2070 && (uintptr_t) info.si_addr == lwp->stop_pc)
2072 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2073 ptrace (PTRACE_SETSIGINFO, current_thread->id.lwp (),
2074 (PTRACE_TYPE_ARG3) 0, &info);
2077 regcache = get_thread_regcache (current_thread, 1);
2078 low_set_pc (regcache, status.tpoint_addr);
2079 lwp->stop_pc = status.tpoint_addr;
2081 /* Cancel any fast tracepoint lock this thread was
2082 holding. */
2083 force_unlock_trace_buffer ();
2086 if (lwp->exit_jump_pad_bkpt != NULL)
2088 threads_debug_printf
2089 ("Cancelling fast exit-jump-pad: removing bkpt."
2090 "stopping all threads momentarily.");
2092 stop_all_lwps (1, lwp);
2094 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2095 lwp->exit_jump_pad_bkpt = NULL;
2097 unstop_all_lwps (1, lwp);
2099 gdb_assert (lwp->suspended >= 0);
2104 threads_debug_printf
2105 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2106 current_thread->id.lwp ());
2108 return false;
2111 /* Enqueue one signal in the "signals to report later when out of the
2112 jump pad" list. */
2114 static void
2115 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2117 struct thread_info *thread = get_lwp_thread (lwp);
2119 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2120 WSTOPSIG (*wstat), thread->id.lwp ());
2122 if (debug_threads)
2124 for (const auto &sig : lwp->pending_signals_to_report)
2125 threads_debug_printf (" Already queued %d", sig.signal);
2127 threads_debug_printf (" (no more currently queued signals)");
2130 /* Don't enqueue non-RT signals if they are already in the deferred
2131 queue. (SIGSTOP being the easiest signal to see ending up here
2132 twice) */
2133 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2135 for (const auto &sig : lwp->pending_signals_to_report)
2137 if (sig.signal == WSTOPSIG (*wstat))
2139 threads_debug_printf
2140 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2141 sig.signal, thread->id.lwp ());
2142 return;
2147 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2149 ptrace (PTRACE_GETSIGINFO, thread->id.lwp (), (PTRACE_TYPE_ARG3) 0,
2150 &lwp->pending_signals_to_report.back ().info);
2153 /* Dequeue one signal from the "signals to report later when out of
2154 the jump pad" list. */
2156 static int
2157 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2159 struct thread_info *thread = get_lwp_thread (lwp);
2161 if (!lwp->pending_signals_to_report.empty ())
2163 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2165 *wstat = W_STOPCODE (p_sig.signal);
2166 if (p_sig.info.si_signo != 0)
2167 ptrace (PTRACE_SETSIGINFO, thread->id.lwp (), (PTRACE_TYPE_ARG3) 0,
2168 &p_sig.info);
2170 lwp->pending_signals_to_report.pop_front ();
2172 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2173 WSTOPSIG (*wstat), thread->id.lwp ());
2175 if (debug_threads)
2177 for (const auto &sig : lwp->pending_signals_to_report)
2178 threads_debug_printf (" Still queued %d", sig.signal);
2180 threads_debug_printf (" (no more queued signals)");
2183 return 1;
2186 return 0;
2189 bool
2190 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2192 scoped_restore_current_thread restore_thread;
2193 switch_to_thread (get_lwp_thread (child));
2195 if (low_stopped_by_watchpoint ())
2197 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2198 child->stopped_data_address = low_stopped_data_address ();
2201 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2204 bool
2205 linux_process_target::low_stopped_by_watchpoint ()
2207 return false;
2210 CORE_ADDR
2211 linux_process_target::low_stopped_data_address ()
2213 return 0;
2216 /* Return the ptrace options that we want to try to enable. */
2218 static int
2219 linux_low_ptrace_options (int attached)
2221 client_state &cs = get_client_state ();
2222 int options = 0;
2224 if (!attached)
2225 options |= PTRACE_O_EXITKILL;
2227 if (cs.report_fork_events)
2228 options |= PTRACE_O_TRACEFORK;
2230 if (cs.report_vfork_events)
2231 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2233 if (cs.report_exec_events)
2234 options |= PTRACE_O_TRACEEXEC;
2236 options |= PTRACE_O_TRACESYSGOOD;
2238 return options;
2241 void
2242 linux_process_target::filter_event (int lwpid, int wstat)
2244 struct lwp_info *child;
2245 struct thread_info *thread;
2246 int have_stop_pc = 0;
2248 child = find_lwp_pid (ptid_t (lwpid));
2250 /* Check for events reported by anything not in our LWP list. */
2251 if (child == nullptr)
2253 if (WIFSTOPPED (wstat))
2255 if (WSTOPSIG (wstat) == SIGTRAP
2256 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2258 /* A non-leader thread exec'ed after we've seen the
2259 leader zombie, and removed it from our lists (in
2260 check_zombie_leaders). The non-leader thread changes
2261 its tid to the tgid. */
2262 threads_debug_printf
2263 ("Re-adding thread group leader LWP %d after exec.",
2264 lwpid);
2266 child = add_lwp (ptid_t (lwpid, lwpid));
2267 child->stopped = 1;
2268 switch_to_thread (child->thread);
2270 else
2272 /* A process we are controlling has forked and the new
2273 child's stop was reported to us by the kernel. Save
2274 its PID and go back to waiting for the fork event to
2275 be reported - the stopped process might be returned
2276 from waitpid before or after the fork event is. */
2277 threads_debug_printf
2278 ("Saving LWP %d status %s in stopped_pids list",
2279 lwpid, status_to_str (wstat).c_str ());
2280 add_to_pid_list (&stopped_pids, lwpid, wstat);
2283 else
2285 /* Don't report an event for the exit of an LWP not in our
2286 list, i.e. not part of any inferior we're debugging.
2287 This can happen if we detach from a program we originally
2288 forked and then it exits. However, note that we may have
2289 earlier deleted a leader of an inferior we're debugging,
2290 in check_zombie_leaders. Re-add it back here if so. */
2291 find_process ([&] (process_info *proc)
2293 if (proc->pid == lwpid)
2295 threads_debug_printf
2296 ("Re-adding thread group leader LWP %d after exit.",
2297 lwpid);
2299 child = add_lwp (ptid_t (lwpid, lwpid));
2300 return true;
2302 return false;
2306 if (child == nullptr)
2307 return;
2310 thread = get_lwp_thread (child);
2312 child->stopped = 1;
2314 child->last_status = wstat;
2316 /* Check if the thread has exited. */
2317 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2319 threads_debug_printf ("%d exited", lwpid);
2321 if (finish_step_over (child))
2323 /* Unsuspend all other LWPs, and set them back running again. */
2324 unsuspend_all_lwps (child);
2327 /* If this is not the leader LWP, then the exit signal was not
2328 the end of the debugged application and should be ignored,
2329 unless GDB wants to hear about thread exits. */
2330 if (report_exit_events_for (thread) || is_leader (thread))
2332 /* Since events are serialized to GDB core, and we can't
2333 report this one right now. Leave the status pending for
2334 the next time we're able to report it. */
2335 mark_lwp_dead (child, wstat, false);
2336 return;
2338 else
2340 delete_lwp (child);
2341 return;
2345 gdb_assert (WIFSTOPPED (wstat));
2347 if (WIFSTOPPED (wstat))
2349 /* Architecture-specific setup after inferior is running. */
2350 process_info *proc = find_process_pid (thread->id.pid ());
2352 if (proc->tdesc == NULL)
2354 if (proc->attached)
2356 /* This needs to happen after we have attached to the
2357 inferior and it is stopped for the first time, but
2358 before we access any inferior registers. */
2359 arch_setup_thread (thread);
2361 else
2363 /* The process is started, but GDBserver will do
2364 architecture-specific setup after the program stops at
2365 the first instruction. */
2366 child->status_pending_p = 1;
2367 child->status_pending = wstat;
2368 return;
2373 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2375 process_info *proc = find_process_pid (thread->id.pid ());
2376 int options = linux_low_ptrace_options (proc->attached);
2378 linux_enable_event_reporting (lwpid, options);
2379 child->must_set_ptrace_flags = 0;
2382 /* Always update syscall_state, even if it will be filtered later. */
2383 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2385 child->syscall_state
2386 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2387 ? TARGET_WAITKIND_SYSCALL_RETURN
2388 : TARGET_WAITKIND_SYSCALL_ENTRY);
2390 else
2392 /* Almost all other ptrace-stops are known to be outside of system
2393 calls, with further exceptions in handle_extended_wait. */
2394 child->syscall_state = TARGET_WAITKIND_IGNORE;
2397 /* Be careful to not overwrite stop_pc until save_stop_reason is
2398 called. */
2399 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2400 && linux_is_extended_waitstatus (wstat))
2402 child->stop_pc = get_pc (child);
2403 if (handle_extended_wait (&child, wstat))
2405 /* The event has been handled, so just return without
2406 reporting it. */
2407 return;
2411 if (linux_wstatus_maybe_breakpoint (wstat))
2413 if (save_stop_reason (child))
2414 have_stop_pc = 1;
2417 if (!have_stop_pc)
2418 child->stop_pc = get_pc (child);
2420 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2421 && child->stop_expected)
2423 threads_debug_printf ("Expected stop.");
2425 child->stop_expected = 0;
2427 if (thread->last_resume_kind == resume_stop)
2429 /* We want to report the stop to the core. Treat the
2430 SIGSTOP as a normal event. */
2431 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2432 target_pid_to_str (thread->id).c_str ());
2434 else if (stopping_threads != NOT_STOPPING_THREADS)
2436 /* Stopping threads. We don't want this SIGSTOP to end up
2437 pending. */
2438 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2439 target_pid_to_str (thread->id).c_str ());
2440 return;
2442 else
2444 /* This is a delayed SIGSTOP. Filter out the event. */
2445 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2446 child->stepping ? "step" : "continue",
2447 target_pid_to_str (thread->id).c_str ());
2449 resume_one_lwp (child, child->stepping, 0, NULL);
2450 return;
2454 child->status_pending_p = 1;
2455 child->status_pending = wstat;
2456 return;
2459 bool
2460 linux_process_target::maybe_hw_step (thread_info *thread)
2462 if (supports_hardware_single_step ())
2463 return true;
2464 else
2466 /* GDBserver must insert single-step breakpoint for software
2467 single step. */
2468 gdb_assert (has_single_step_breakpoints (thread));
2469 return false;
2473 void
2474 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2476 struct lwp_info *lp = get_thread_lwp (thread);
2478 if (lp->stopped
2479 && !lp->suspended
2480 && !lp->status_pending_p
2481 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2483 int step = 0;
2485 if (thread->last_resume_kind == resume_step)
2487 if (supports_software_single_step ())
2488 install_software_single_step_breakpoints (lp);
2490 step = maybe_hw_step (thread);
2493 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2494 target_pid_to_str (thread->id).c_str (),
2495 paddress (lp->stop_pc), step);
2497 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2502 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2503 ptid_t filter_ptid,
2504 int *wstatp, int options)
2506 struct thread_info *event_thread;
2507 struct lwp_info *event_child, *requested_child;
2508 sigset_t block_mask, prev_mask;
2510 retry:
2511 /* N.B. event_thread points to the thread_info struct that contains
2512 event_child. Keep them in sync. */
2513 event_thread = NULL;
2514 event_child = NULL;
2515 requested_child = NULL;
2517 /* Check for a lwp with a pending status. */
2519 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2521 event_thread = find_thread_in_random ([&] (thread_info *thread)
2523 return status_pending_p_callback (thread, filter_ptid);
2526 if (event_thread != NULL)
2528 event_child = get_thread_lwp (event_thread);
2529 threads_debug_printf ("Got a pending child %ld", event_thread->id.lwp ());
2532 else if (filter_ptid != null_ptid)
2534 requested_child = find_lwp_pid (filter_ptid);
2535 gdb_assert (requested_child != nullptr);
2537 if (stopping_threads == NOT_STOPPING_THREADS
2538 && requested_child->status_pending_p
2539 && (requested_child->collecting_fast_tracepoint
2540 != fast_tpoint_collect_result::not_collecting))
2542 enqueue_one_deferred_signal (requested_child,
2543 &requested_child->status_pending);
2544 requested_child->status_pending_p = 0;
2545 requested_child->status_pending = 0;
2546 resume_one_lwp (requested_child, 0, 0, NULL);
2549 if (requested_child->suspended
2550 && requested_child->status_pending_p)
2552 internal_error ("requesting an event out of a"
2553 " suspended child?");
2556 if (requested_child->status_pending_p)
2558 event_child = requested_child;
2559 event_thread = get_lwp_thread (event_child);
2563 if (event_child != NULL)
2565 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2566 event_thread->id.lwp (),
2567 event_child->status_pending);
2569 *wstatp = event_child->status_pending;
2570 event_child->status_pending_p = 0;
2571 event_child->status_pending = 0;
2572 switch_to_thread (event_thread);
2573 return event_thread->id.lwp ();
2576 /* But if we don't find a pending event, we'll have to wait.
2578 We only enter this loop if no process has a pending wait status.
2579 Thus any action taken in response to a wait status inside this
2580 loop is responding as soon as we detect the status, not after any
2581 pending events. */
2583 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2584 all signals while here. */
2585 sigfillset (&block_mask);
2586 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2588 /* Always pull all events out of the kernel. We'll randomly select
2589 an event LWP out of all that have events, to prevent
2590 starvation. */
2591 while (event_child == NULL)
2593 pid_t ret = 0;
2595 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2596 quirks:
2598 - If the thread group leader exits while other threads in the
2599 thread group still exist, waitpid(TGID, ...) hangs. That
2600 waitpid won't return an exit status until the other threads
2601 in the group are reaped.
2603 - When a non-leader thread execs, that thread just vanishes
2604 without reporting an exit (so we'd hang if we waited for it
2605 explicitly in that case). The exec event is reported to
2606 the TGID pid. */
2607 errno = 0;
2608 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2610 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2611 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2613 if (ret > 0)
2615 threads_debug_printf ("waitpid %ld received %s",
2616 (long) ret, status_to_str (*wstatp).c_str ());
2618 /* Filter all events. IOW, leave all events pending. We'll
2619 randomly select an event LWP out of all that have events
2620 below. */
2621 filter_event (ret, *wstatp);
2622 /* Retry until nothing comes out of waitpid. A single
2623 SIGCHLD can indicate more than one child stopped. */
2624 continue;
2627 /* Now that we've pulled all events out of the kernel, resume
2628 LWPs that don't have an interesting event to report. */
2629 if (stopping_threads == NOT_STOPPING_THREADS)
2630 for_each_thread ([this] (thread_info *thread)
2632 resume_stopped_resumed_lwps (thread);
2635 /* ... and find an LWP with a status to report to the core, if
2636 any. */
2637 event_thread = find_thread_in_random ([&] (thread_info *thread)
2639 return status_pending_p_callback (thread, filter_ptid);
2642 if (event_thread != NULL)
2644 event_child = get_thread_lwp (event_thread);
2645 *wstatp = event_child->status_pending;
2646 event_child->status_pending_p = 0;
2647 event_child->status_pending = 0;
2648 break;
2651 /* Check for zombie thread group leaders. Those can't be reaped
2652 until all other threads in the thread group are. */
2653 if (check_zombie_leaders ())
2654 goto retry;
2656 auto not_stopped = [&] (thread_info *thread)
2658 return not_stopped_callback (thread, wait_ptid);
2661 /* If there are no resumed children left in the set of LWPs we
2662 want to wait for, bail. We can't just block in
2663 waitpid/sigsuspend, because lwps might have been left stopped
2664 in trace-stop state, and we'd be stuck forever waiting for
2665 their status to change (which would only happen if we resumed
2666 them). Even if WNOHANG is set, this return code is preferred
2667 over 0 (below), as it is more detailed. */
2668 if (find_thread (not_stopped) == NULL)
2670 threads_debug_printf ("exit (no unwaited-for LWP)");
2672 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2673 return -1;
2676 /* No interesting event to report to the caller. */
2677 if ((options & WNOHANG))
2679 threads_debug_printf ("WNOHANG set, no event found");
2681 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2682 return 0;
2685 /* Block until we get an event reported with SIGCHLD. */
2686 threads_debug_printf ("sigsuspend'ing");
2688 sigsuspend (&prev_mask);
2689 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2690 goto retry;
2693 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2695 switch_to_thread (event_thread);
2697 return event_thread->id.lwp ();
2701 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2703 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2706 /* Select one LWP out of those that have events pending. */
2708 static void
2709 select_event_lwp (struct lwp_info **orig_lp)
2711 struct thread_info *event_thread = NULL;
2713 /* In all-stop, give preference to the LWP that is being
2714 single-stepped. There will be at most one, and it's the LWP that
2715 the core is most interested in. If we didn't do this, then we'd
2716 have to handle pending step SIGTRAPs somehow in case the core
2717 later continues the previously-stepped thread, otherwise we'd
2718 report the pending SIGTRAP, and the core, not having stepped the
2719 thread, wouldn't understand what the trap was for, and therefore
2720 would report it to the user as a random signal. */
2721 if (!non_stop)
2723 event_thread = find_thread ([] (thread_info *thread)
2725 lwp_info *lp = get_thread_lwp (thread);
2727 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2728 && thread->last_resume_kind == resume_step
2729 && lp->status_pending_p);
2732 if (event_thread != NULL)
2733 threads_debug_printf
2734 ("Select single-step %s",
2735 target_pid_to_str (event_thread->id).c_str ());
2737 if (event_thread == NULL)
2739 /* No single-stepping LWP. Select one at random, out of those
2740 which have had events. */
2742 event_thread = find_thread_in_random ([&] (thread_info *thread)
2744 lwp_info *lp = get_thread_lwp (thread);
2746 /* Only resumed LWPs that have an event pending. */
2747 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2748 && lp->status_pending_p);
2752 if (event_thread != NULL)
2754 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2756 /* Switch the event LWP. */
2757 *orig_lp = event_lp;
2761 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2762 NULL. */
2764 static void
2765 unsuspend_all_lwps (struct lwp_info *except)
2767 for_each_thread ([&] (thread_info *thread)
2769 lwp_info *lwp = get_thread_lwp (thread);
2771 if (lwp != except)
2772 lwp_suspended_decr (lwp);
2776 static bool lwp_running (thread_info *thread);
2778 /* Stabilize threads (move out of jump pads).
2780 If a thread is midway collecting a fast tracepoint, we need to
2781 finish the collection and move it out of the jump pad before
2782 reporting the signal.
2784 This avoids recursion while collecting (when a signal arrives
2785 midway, and the signal handler itself collects), which would trash
2786 the trace buffer. In case the user set a breakpoint in a signal
2787 handler, this avoids the backtrace showing the jump pad, etc..
2788 Most importantly, there are certain things we can't do safely if
2789 threads are stopped in a jump pad (or in its callee's). For
2790 example:
2792 - starting a new trace run. A thread still collecting the
2793 previous run, could trash the trace buffer when resumed. The trace
2794 buffer control structures would have been reset but the thread had
2795 no way to tell. The thread could even midway memcpy'ing to the
2796 buffer, which would mean that when resumed, it would clobber the
2797 trace buffer that had been set for a new run.
2799 - we can't rewrite/reuse the jump pads for new tracepoints
2800 safely. Say you do tstart while a thread is stopped midway while
2801 collecting. When the thread is later resumed, it finishes the
2802 collection, and returns to the jump pad, to execute the original
2803 instruction that was under the tracepoint jump at the time the
2804 older run had been started. If the jump pad had been rewritten
2805 since for something else in the new run, the thread would now
2806 execute the wrong / random instructions. */
2808 void
2809 linux_process_target::stabilize_threads ()
2811 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2813 return stuck_in_jump_pad (thread);
2816 if (thread_stuck != NULL)
2818 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2819 thread_stuck->id.lwp ());
2820 return;
2823 scoped_restore_current_thread restore_thread;
2825 stabilizing_threads = 1;
2827 /* Kick 'em all. */
2828 for_each_thread ([this] (thread_info *thread)
2830 move_out_of_jump_pad (thread);
2833 /* Loop until all are stopped out of the jump pads. */
2834 while (find_thread (lwp_running) != NULL)
2836 struct target_waitstatus ourstatus;
2837 struct lwp_info *lwp;
2838 int wstat;
2840 /* Note that we go through the full wait even loop. While
2841 moving threads out of jump pad, we need to be able to step
2842 over internal breakpoints and such. */
2843 wait_1 (minus_one_ptid, &ourstatus, 0);
2845 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2847 lwp = get_thread_lwp (current_thread);
2849 /* Lock it. */
2850 lwp_suspended_inc (lwp);
2852 if (ourstatus.sig () != GDB_SIGNAL_0
2853 || current_thread->last_resume_kind == resume_stop)
2855 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2856 enqueue_one_deferred_signal (lwp, &wstat);
2861 unsuspend_all_lwps (NULL);
2863 stabilizing_threads = 0;
2865 if (debug_threads)
2867 thread_stuck = find_thread ([this] (thread_info *thread)
2869 return stuck_in_jump_pad (thread);
2872 if (thread_stuck != NULL)
2873 threads_debug_printf
2874 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2875 thread_stuck->id.lwp ());
2879 /* Convenience function that is called when the kernel reports an
2880 event that is not passed out to GDB. */
2882 static ptid_t
2883 ignore_event (struct target_waitstatus *ourstatus)
2885 /* If we got an event, there may still be others, as a single
2886 SIGCHLD can indicate more than one child stopped. This forces
2887 another target_wait call. */
2888 async_file_mark ();
2890 ourstatus->set_ignore ();
2891 return null_ptid;
2894 ptid_t
2895 linux_process_target::filter_exit_event (lwp_info *event_child,
2896 target_waitstatus *ourstatus)
2898 struct thread_info *thread = get_lwp_thread (event_child);
2899 ptid_t ptid = thread->id;
2901 if (ourstatus->kind () == TARGET_WAITKIND_THREAD_EXITED)
2903 /* We're reporting a thread exit for the leader. The exit was
2904 detected by check_zombie_leaders. */
2905 gdb_assert (is_leader (thread));
2906 gdb_assert (report_exit_events_for (thread));
2908 delete_lwp (event_child);
2909 return ptid;
2912 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2913 if a non-leader thread exits with a signal, we'd report it to the
2914 core which would interpret it as the whole-process exiting.
2915 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2916 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2917 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2918 return ptid;
2920 if (!is_leader (thread))
2922 if (report_exit_events_for (thread))
2923 ourstatus->set_thread_exited (0);
2924 else
2925 ourstatus->set_ignore ();
2927 delete_lwp (event_child);
2929 return ptid;
2932 /* Returns 1 if GDB is interested in any event_child syscalls. */
2934 static int
2935 gdb_catching_syscalls_p (struct lwp_info *event_child)
2937 return !get_lwp_thread (event_child)->process ()->syscalls_to_catch.empty ();
2940 bool
2941 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2943 int sysno;
2944 struct thread_info *thread = get_lwp_thread (event_child);
2945 process_info *proc = thread->process ();
2947 if (proc->syscalls_to_catch.empty ())
2948 return false;
2950 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2951 return true;
2953 get_syscall_trapinfo (event_child, &sysno);
2955 for (int iter : proc->syscalls_to_catch)
2956 if (iter == sysno)
2957 return true;
2959 return false;
2962 ptid_t
2963 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2964 target_wait_flags target_options)
2966 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2968 client_state &cs = get_client_state ();
2969 int w;
2970 struct lwp_info *event_child;
2971 int options;
2972 int pid;
2973 int step_over_finished;
2974 int bp_explains_trap;
2975 int maybe_internal_trap;
2976 int report_to_gdb;
2977 int trace_event;
2978 int in_step_range;
2980 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
2982 /* Translate generic target options into linux options. */
2983 options = __WALL;
2984 if (target_options & TARGET_WNOHANG)
2985 options |= WNOHANG;
2987 bp_explains_trap = 0;
2988 trace_event = 0;
2989 in_step_range = 0;
2990 ourstatus->set_ignore ();
2992 bool was_any_resumed = any_resumed ();
2994 if (step_over_bkpt == null_ptid)
2995 pid = wait_for_event (ptid, &w, options);
2996 else
2998 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
2999 target_pid_to_str (step_over_bkpt).c_str ());
3000 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3003 if (pid == 0 || (pid == -1 && !was_any_resumed))
3005 gdb_assert (target_options & TARGET_WNOHANG);
3007 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
3009 ourstatus->set_ignore ();
3010 return null_ptid;
3012 else if (pid == -1)
3014 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
3016 ourstatus->set_no_resumed ();
3017 return null_ptid;
3020 event_child = get_thread_lwp (current_thread);
3022 /* wait_for_event only returns an exit status for the last
3023 child of a process. Report it. */
3024 if (WIFEXITED (w) || WIFSIGNALED (w))
3026 if (WIFEXITED (w))
3028 /* If we already have the exit recorded in waitstatus, use
3029 it. This will happen when we detect a zombie leader,
3030 when we had GDB_THREAD_OPTION_EXIT enabled for it. We
3031 want to report its exit as TARGET_WAITKIND_THREAD_EXITED,
3032 as the whole process hasn't exited yet. */
3033 const target_waitstatus &ws = event_child->waitstatus;
3034 if (ws.kind () != TARGET_WAITKIND_IGNORE)
3036 gdb_assert (ws.kind () == TARGET_WAITKIND_EXITED
3037 || ws.kind () == TARGET_WAITKIND_THREAD_EXITED);
3038 *ourstatus = ws;
3040 else
3041 ourstatus->set_exited (WEXITSTATUS (w));
3043 threads_debug_printf
3044 ("ret = %s, exited with retcode %d",
3045 target_pid_to_str (current_thread->id).c_str (),
3046 WEXITSTATUS (w));
3048 else
3050 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3052 threads_debug_printf
3053 ("ret = %s, terminated with signal %d",
3054 target_pid_to_str (current_thread->id).c_str (),
3055 WTERMSIG (w));
3058 return filter_exit_event (event_child, ourstatus);
3061 /* If step-over executes a breakpoint instruction, in the case of a
3062 hardware single step it means a gdb/gdbserver breakpoint had been
3063 planted on top of a permanent breakpoint, in the case of a software
3064 single step it may just mean that gdbserver hit the reinsert breakpoint.
3065 The PC has been adjusted by save_stop_reason to point at
3066 the breakpoint address.
3067 So in the case of the hardware single step advance the PC manually
3068 past the breakpoint and in the case of software single step advance only
3069 if it's not the single_step_breakpoint we are hitting.
3070 This avoids that a program would keep trapping a permanent breakpoint
3071 forever. */
3072 if (step_over_bkpt != null_ptid
3073 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3074 && (event_child->stepping
3075 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3077 int increment_pc = 0;
3078 int breakpoint_kind = 0;
3079 CORE_ADDR stop_pc = event_child->stop_pc;
3081 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3082 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3084 threads_debug_printf
3085 ("step-over for %s executed software breakpoint",
3086 target_pid_to_str (current_thread->id).c_str ());
3088 if (increment_pc != 0)
3090 struct regcache *regcache
3091 = get_thread_regcache (current_thread, 1);
3093 event_child->stop_pc += increment_pc;
3094 low_set_pc (regcache, event_child->stop_pc);
3096 if (!low_breakpoint_at (event_child->stop_pc))
3097 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3101 /* If this event was not handled before, and is not a SIGTRAP, we
3102 report it. SIGILL and SIGSEGV are also treated as traps in case
3103 a breakpoint is inserted at the current PC. If this target does
3104 not support internal breakpoints at all, we also report the
3105 SIGTRAP without further processing; it's of no concern to us. */
3106 maybe_internal_trap
3107 = (low_supports_breakpoints ()
3108 && (WSTOPSIG (w) == SIGTRAP
3109 || ((WSTOPSIG (w) == SIGILL
3110 || WSTOPSIG (w) == SIGSEGV)
3111 && low_breakpoint_at (event_child->stop_pc))));
3113 if (maybe_internal_trap)
3115 /* Handle anything that requires bookkeeping before deciding to
3116 report the event or continue waiting. */
3118 /* First check if we can explain the SIGTRAP with an internal
3119 breakpoint, or if we should possibly report the event to GDB.
3120 Do this before anything that may remove or insert a
3121 breakpoint. */
3122 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3124 /* We have a SIGTRAP, possibly a step-over dance has just
3125 finished. If so, tweak the state machine accordingly,
3126 reinsert breakpoints and delete any single-step
3127 breakpoints. */
3128 step_over_finished = finish_step_over (event_child);
3130 /* Now invoke the callbacks of any internal breakpoints there. */
3131 check_breakpoints (event_child->stop_pc);
3133 /* Handle tracepoint data collecting. This may overflow the
3134 trace buffer, and cause a tracing stop, removing
3135 breakpoints. */
3136 trace_event = handle_tracepoints (event_child);
3138 if (bp_explains_trap)
3139 threads_debug_printf ("Hit a gdbserver breakpoint.");
3141 else
3143 /* We have some other signal, possibly a step-over dance was in
3144 progress, and it should be cancelled too. */
3145 step_over_finished = finish_step_over (event_child);
3148 /* We have all the data we need. Either report the event to GDB, or
3149 resume threads and keep waiting for more. */
3151 /* If we're collecting a fast tracepoint, finish the collection and
3152 move out of the jump pad before delivering a signal. See
3153 linux_stabilize_threads. */
3155 if (WIFSTOPPED (w)
3156 && WSTOPSIG (w) != SIGTRAP
3157 && supports_fast_tracepoints ()
3158 && agent_loaded_p ())
3160 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3161 "to defer or adjust it.",
3162 WSTOPSIG (w), current_thread->id.lwp ());
3164 /* Allow debugging the jump pad itself. */
3165 if (current_thread->last_resume_kind != resume_step
3166 && maybe_move_out_of_jump_pad (event_child, &w))
3168 enqueue_one_deferred_signal (event_child, &w);
3170 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3171 WSTOPSIG (w), current_thread->id.lwp ());
3173 resume_one_lwp (event_child, 0, 0, NULL);
3175 return ignore_event (ourstatus);
3179 if (event_child->collecting_fast_tracepoint
3180 != fast_tpoint_collect_result::not_collecting)
3182 threads_debug_printf
3183 ("LWP %ld was trying to move out of the jump pad (%d). "
3184 "Check if we're already there.",
3185 current_thread->id.lwp (),
3186 (int) event_child->collecting_fast_tracepoint);
3188 trace_event = 1;
3190 event_child->collecting_fast_tracepoint
3191 = linux_fast_tracepoint_collecting (event_child, NULL);
3193 if (event_child->collecting_fast_tracepoint
3194 != fast_tpoint_collect_result::before_insn)
3196 /* No longer need this breakpoint. */
3197 if (event_child->exit_jump_pad_bkpt != NULL)
3199 threads_debug_printf
3200 ("No longer need exit-jump-pad bkpt; removing it."
3201 "stopping all threads momentarily.");
3203 /* Other running threads could hit this breakpoint.
3204 We don't handle moribund locations like GDB does,
3205 instead we always pause all threads when removing
3206 breakpoints, so that any step-over or
3207 decr_pc_after_break adjustment is always taken
3208 care of while the breakpoint is still
3209 inserted. */
3210 stop_all_lwps (1, event_child);
3212 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3213 event_child->exit_jump_pad_bkpt = NULL;
3215 unstop_all_lwps (1, event_child);
3217 gdb_assert (event_child->suspended >= 0);
3221 if (event_child->collecting_fast_tracepoint
3222 == fast_tpoint_collect_result::not_collecting)
3224 threads_debug_printf
3225 ("fast tracepoint finished collecting successfully.");
3227 /* We may have a deferred signal to report. */
3228 if (dequeue_one_deferred_signal (event_child, &w))
3229 threads_debug_printf ("dequeued one signal.");
3230 else
3232 threads_debug_printf ("no deferred signals.");
3234 if (stabilizing_threads)
3236 ourstatus->set_stopped (GDB_SIGNAL_0);
3238 threads_debug_printf
3239 ("ret = %s, stopped while stabilizing threads",
3240 target_pid_to_str (current_thread->id).c_str ());
3242 return current_thread->id;
3248 /* Check whether GDB would be interested in this event. */
3250 /* Check if GDB is interested in this syscall. */
3251 if (WIFSTOPPED (w)
3252 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3253 && !gdb_catch_this_syscall (event_child))
3255 threads_debug_printf ("Ignored syscall for LWP %ld.",
3256 current_thread->id.lwp ());
3258 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3260 return ignore_event (ourstatus);
3263 /* If GDB is not interested in this signal, don't stop other
3264 threads, and don't report it to GDB. Just resume the inferior
3265 right away. We do this for threading-related signals as well as
3266 any that GDB specifically requested we ignore. But never ignore
3267 SIGSTOP if we sent it ourselves, and do not ignore signals when
3268 stepping - they may require special handling to skip the signal
3269 handler. Also never ignore signals that could be caused by a
3270 breakpoint. */
3271 if (WIFSTOPPED (w)
3272 && current_thread->last_resume_kind != resume_step
3273 && (
3274 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3275 (current_process ()->priv->thread_db != NULL
3276 && (WSTOPSIG (w) == __SIGRTMIN
3277 || WSTOPSIG (w) == __SIGRTMIN + 1))
3279 #endif
3280 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3281 && !(WSTOPSIG (w) == SIGSTOP
3282 && current_thread->last_resume_kind == resume_stop)
3283 && !linux_wstatus_maybe_breakpoint (w))))
3285 siginfo_t info, *info_p;
3287 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3288 WSTOPSIG (w), current_thread->id.lwp ());
3290 if (ptrace (PTRACE_GETSIGINFO, current_thread->id.lwp (),
3291 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3292 info_p = &info;
3293 else
3294 info_p = NULL;
3296 if (step_over_finished)
3298 /* We cancelled this thread's step-over above. We still
3299 need to unsuspend all other LWPs, and set them back
3300 running again while the signal handler runs. */
3301 unsuspend_all_lwps (event_child);
3303 /* Enqueue the pending signal info so that proceed_all_lwps
3304 doesn't lose it. */
3305 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3307 proceed_all_lwps ();
3309 else
3311 resume_one_lwp (event_child, event_child->stepping,
3312 WSTOPSIG (w), info_p);
3315 return ignore_event (ourstatus);
3318 /* Note that all addresses are always "out of the step range" when
3319 there's no range to begin with. */
3320 in_step_range = lwp_in_step_range (event_child);
3322 /* If GDB wanted this thread to single step, and the thread is out
3323 of the step range, we always want to report the SIGTRAP, and let
3324 GDB handle it. Watchpoints should always be reported. So should
3325 signals we can't explain. A SIGTRAP we can't explain could be a
3326 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3327 do, we're be able to handle GDB breakpoints on top of internal
3328 breakpoints, by handling the internal breakpoint and still
3329 reporting the event to GDB. If we don't, we're out of luck, GDB
3330 won't see the breakpoint hit. If we see a single-step event but
3331 the thread should be continuing, don't pass the trap to gdb.
3332 That indicates that we had previously finished a single-step but
3333 left the single-step pending -- see
3334 complete_ongoing_step_over. */
3335 report_to_gdb = (!maybe_internal_trap
3336 || (current_thread->last_resume_kind == resume_step
3337 && !in_step_range)
3338 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3339 || (!in_step_range
3340 && !bp_explains_trap
3341 && !trace_event
3342 && !step_over_finished
3343 && !(current_thread->last_resume_kind == resume_continue
3344 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3345 || (gdb_breakpoint_here (event_child->stop_pc)
3346 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3347 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3348 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3350 run_breakpoint_commands (event_child->stop_pc);
3352 /* We found no reason GDB would want us to stop. We either hit one
3353 of our own breakpoints, or finished an internal step GDB
3354 shouldn't know about. */
3355 if (!report_to_gdb)
3357 if (bp_explains_trap)
3358 threads_debug_printf ("Hit a gdbserver breakpoint.");
3360 if (step_over_finished)
3361 threads_debug_printf ("Step-over finished.");
3363 if (trace_event)
3364 threads_debug_printf ("Tracepoint event.");
3366 if (lwp_in_step_range (event_child))
3367 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3368 paddress (event_child->stop_pc),
3369 paddress (event_child->step_range_start),
3370 paddress (event_child->step_range_end));
3372 /* We're not reporting this breakpoint to GDB, so apply the
3373 decr_pc_after_break adjustment to the inferior's regcache
3374 ourselves. */
3376 if (low_supports_breakpoints ())
3378 struct regcache *regcache
3379 = get_thread_regcache (current_thread, 1);
3380 low_set_pc (regcache, event_child->stop_pc);
3383 if (step_over_finished)
3385 /* If we have finished stepping over a breakpoint, we've
3386 stopped and suspended all LWPs momentarily except the
3387 stepping one. This is where we resume them all again.
3388 We're going to keep waiting, so use proceed, which
3389 handles stepping over the next breakpoint. */
3390 unsuspend_all_lwps (event_child);
3392 else
3394 /* Remove the single-step breakpoints if any. Note that
3395 there isn't single-step breakpoint if we finished stepping
3396 over. */
3397 if (supports_software_single_step ()
3398 && has_single_step_breakpoints (current_thread))
3400 stop_all_lwps (0, event_child);
3401 delete_single_step_breakpoints (current_thread);
3402 unstop_all_lwps (0, event_child);
3406 threads_debug_printf ("proceeding all threads.");
3408 proceed_all_lwps ();
3410 return ignore_event (ourstatus);
3413 if (debug_threads)
3415 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3416 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3417 get_lwp_thread (event_child)->id.lwp (),
3418 event_child->waitstatus.to_string ().c_str ());
3420 if (current_thread->last_resume_kind == resume_step)
3422 if (event_child->step_range_start == event_child->step_range_end)
3423 threads_debug_printf
3424 ("GDB wanted to single-step, reporting event.");
3425 else if (!lwp_in_step_range (event_child))
3426 threads_debug_printf ("Out of step range, reporting event.");
3429 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3430 threads_debug_printf ("Stopped by watchpoint.");
3431 else if (gdb_breakpoint_here (event_child->stop_pc))
3432 threads_debug_printf ("Stopped by GDB breakpoint.");
3435 threads_debug_printf ("Hit a non-gdbserver trap event.");
3437 /* Alright, we're going to report a stop. */
3439 /* Remove single-step breakpoints. */
3440 if (supports_software_single_step ())
3442 /* Remove single-step breakpoints or not. It it is true, stop all
3443 lwps, so that other threads won't hit the breakpoint in the
3444 staled memory. */
3445 int remove_single_step_breakpoints_p = 0;
3447 if (non_stop)
3449 remove_single_step_breakpoints_p
3450 = has_single_step_breakpoints (current_thread);
3452 else
3454 /* In all-stop, a stop reply cancels all previous resume
3455 requests. Delete all single-step breakpoints. */
3457 find_thread ([&] (thread_info *thread) {
3458 if (has_single_step_breakpoints (thread))
3460 remove_single_step_breakpoints_p = 1;
3461 return true;
3464 return false;
3468 if (remove_single_step_breakpoints_p)
3470 /* If we remove single-step breakpoints from memory, stop all lwps,
3471 so that other threads won't hit the breakpoint in the staled
3472 memory. */
3473 stop_all_lwps (0, event_child);
3475 if (non_stop)
3477 gdb_assert (has_single_step_breakpoints (current_thread));
3478 delete_single_step_breakpoints (current_thread);
3480 else
3482 for_each_thread ([] (thread_info *thread){
3483 if (has_single_step_breakpoints (thread))
3484 delete_single_step_breakpoints (thread);
3488 unstop_all_lwps (0, event_child);
3492 if (!stabilizing_threads)
3494 /* In all-stop, stop all threads. */
3495 if (!non_stop)
3496 stop_all_lwps (0, NULL);
3498 if (step_over_finished)
3500 if (!non_stop)
3502 /* If we were doing a step-over, all other threads but
3503 the stepping one had been paused in start_step_over,
3504 with their suspend counts incremented. We don't want
3505 to do a full unstop/unpause, because we're in
3506 all-stop mode (so we want threads stopped), but we
3507 still need to unsuspend the other threads, to
3508 decrement their `suspended' count back. */
3509 unsuspend_all_lwps (event_child);
3511 else
3513 /* If we just finished a step-over, then all threads had
3514 been momentarily paused. In all-stop, that's fine,
3515 we want threads stopped by now anyway. In non-stop,
3516 we need to re-resume threads that GDB wanted to be
3517 running. */
3518 unstop_all_lwps (1, event_child);
3522 /* If we're not waiting for a specific LWP, choose an event LWP
3523 from among those that have had events. Giving equal priority
3524 to all LWPs that have had events helps prevent
3525 starvation. */
3526 if (ptid == minus_one_ptid)
3528 event_child->status_pending_p = 1;
3529 event_child->status_pending = w;
3531 select_event_lwp (&event_child);
3533 /* current_thread and event_child must stay in sync. */
3534 switch_to_thread (get_lwp_thread (event_child));
3536 event_child->status_pending_p = 0;
3537 w = event_child->status_pending;
3541 /* Stabilize threads (move out of jump pads). */
3542 if (!non_stop)
3543 target_stabilize_threads ();
3545 else
3547 /* If we just finished a step-over, then all threads had been
3548 momentarily paused. In all-stop, that's fine, we want
3549 threads stopped by now anyway. In non-stop, we need to
3550 re-resume threads that GDB wanted to be running. */
3551 if (step_over_finished)
3552 unstop_all_lwps (1, event_child);
3555 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3556 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3558 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3560 /* If the reported event is an exit, fork, vfork, clone or exec,
3561 let GDB know. */
3563 /* Break the unreported fork/vfork/clone relationship chain. */
3564 if (is_new_child_status (event_child->waitstatus.kind ()))
3566 event_child->relative->relative = NULL;
3567 event_child->relative = NULL;
3570 *ourstatus = event_child->waitstatus;
3571 /* Clear the event lwp's waitstatus since we handled it already. */
3572 event_child->waitstatus.set_ignore ();
3574 else
3576 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3577 event_child->waitstatus wasn't filled in with the details, so look at
3578 the wait status W. */
3579 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3581 int syscall_number;
3583 get_syscall_trapinfo (event_child, &syscall_number);
3584 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3585 ourstatus->set_syscall_entry (syscall_number);
3586 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3587 ourstatus->set_syscall_return (syscall_number);
3588 else
3589 gdb_assert_not_reached ("unexpected syscall state");
3591 else if (current_thread->last_resume_kind == resume_stop
3592 && WSTOPSIG (w) == SIGSTOP)
3594 /* A thread that has been requested to stop by GDB with vCont;t,
3595 and it stopped cleanly, so report as SIG0. The use of
3596 SIGSTOP is an implementation detail. */
3597 ourstatus->set_stopped (GDB_SIGNAL_0);
3599 else
3600 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3603 /* Now that we've selected our final event LWP, un-adjust its PC if
3604 it was a software breakpoint, and the client doesn't know we can
3605 adjust the breakpoint ourselves. */
3606 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3607 && !cs.swbreak_feature)
3609 int decr_pc = low_decr_pc_after_break ();
3611 if (decr_pc != 0)
3613 struct regcache *regcache
3614 = get_thread_regcache (current_thread, 1);
3615 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3619 gdb_assert (step_over_bkpt == null_ptid);
3621 threads_debug_printf ("ret = %s, %s",
3622 target_pid_to_str (current_thread->id).c_str (),
3623 ourstatus->to_string ().c_str ());
3625 return filter_exit_event (event_child, ourstatus);
3628 /* Get rid of any pending event in the pipe. */
3629 static void
3630 async_file_flush (void)
3632 linux_event_pipe.flush ();
3635 /* Put something in the pipe, so the event loop wakes up. */
3636 static void
3637 async_file_mark (void)
3639 linux_event_pipe.mark ();
3642 ptid_t
3643 linux_process_target::wait (ptid_t ptid,
3644 target_waitstatus *ourstatus,
3645 target_wait_flags target_options)
3647 ptid_t event_ptid;
3649 /* Flush the async file first. */
3650 if (target_is_async_p ())
3651 async_file_flush ();
3655 event_ptid = wait_1 (ptid, ourstatus, target_options);
3657 while ((target_options & TARGET_WNOHANG) == 0
3658 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3660 /* If at least one stop was reported, there may be more. A single
3661 SIGCHLD can signal more than one child stop. */
3662 if (target_is_async_p ()
3663 && (target_options & TARGET_WNOHANG) != 0
3664 && event_ptid != null_ptid)
3665 async_file_mark ();
3667 return event_ptid;
3670 /* Send a signal to an LWP. */
3672 static int
3673 kill_lwp (unsigned long lwpid, int signo)
3675 int ret;
3677 errno = 0;
3678 ret = syscall (__NR_tkill, lwpid, signo);
3679 if (errno == ENOSYS)
3681 /* If tkill fails, then we are not using nptl threads, a
3682 configuration we no longer support. */
3683 perror_with_name (("tkill"));
3685 return ret;
3688 void
3689 linux_stop_lwp (struct lwp_info *lwp)
3691 send_sigstop (lwp);
3694 static void
3695 send_sigstop (struct lwp_info *lwp)
3697 int pid = get_lwp_thread (lwp)->id.lwp ();
3699 /* If we already have a pending stop signal for this process, don't
3700 send another. */
3701 if (lwp->stop_expected)
3703 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3705 return;
3708 threads_debug_printf ("Sending sigstop to lwp %d", pid);
3710 lwp->stop_expected = 1;
3711 kill_lwp (pid, SIGSTOP);
3714 static void
3715 send_sigstop (thread_info *thread, lwp_info *except)
3717 struct lwp_info *lwp = get_thread_lwp (thread);
3719 /* Ignore EXCEPT. */
3720 if (lwp == except)
3721 return;
3723 if (lwp->stopped)
3724 return;
3726 send_sigstop (lwp);
3729 /* Increment the suspend count of an LWP, and stop it, if not stopped
3730 yet. */
3731 static void
3732 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3734 struct lwp_info *lwp = get_thread_lwp (thread);
3736 /* Ignore EXCEPT. */
3737 if (lwp == except)
3738 return;
3740 lwp_suspended_inc (lwp);
3742 send_sigstop (thread, except);
3745 /* Mark LWP dead, with WSTAT as exit status pending to report later.
3746 If THREAD_EVENT is true, interpret WSTAT as a thread exit event
3747 instead of a process exit event. This is meaningful for the leader
3748 thread, as we normally report a process-wide exit event when we see
3749 the leader exit, and a thread exit event when we see any other
3750 thread exit. */
3752 static void
3753 mark_lwp_dead (struct lwp_info *lwp, int wstat, bool thread_event)
3755 /* Store the exit status for later. */
3756 lwp->status_pending_p = 1;
3757 lwp->status_pending = wstat;
3759 /* Store in waitstatus as well, as there's nothing else to process
3760 for this event. */
3761 if (WIFEXITED (wstat))
3763 if (thread_event)
3764 lwp->waitstatus.set_thread_exited (WEXITSTATUS (wstat));
3765 else
3766 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3768 else if (WIFSIGNALED (wstat))
3770 gdb_assert (!thread_event);
3771 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3773 else
3774 gdb_assert_not_reached ("unknown status kind");
3776 /* Prevent trying to stop it. */
3777 lwp->stopped = 1;
3779 /* No further stops are expected from a dead lwp. */
3780 lwp->stop_expected = 0;
3783 /* Return true if LWP has exited already, and has a pending exit event
3784 to report to GDB. */
3786 static int
3787 lwp_is_marked_dead (struct lwp_info *lwp)
3789 return (lwp->status_pending_p
3790 && (WIFEXITED (lwp->status_pending)
3791 || WIFSIGNALED (lwp->status_pending)));
3794 void
3795 linux_process_target::wait_for_sigstop ()
3797 struct thread_info *saved_thread;
3798 ptid_t saved_tid;
3799 int wstat;
3800 int ret;
3802 saved_thread = current_thread;
3803 if (saved_thread != NULL)
3804 saved_tid = saved_thread->id;
3805 else
3806 saved_tid = null_ptid; /* avoid bogus unused warning */
3808 scoped_restore_current_thread restore_thread;
3810 threads_debug_printf ("pulling events");
3812 /* Passing NULL_PTID as filter indicates we want all events to be
3813 left pending. Eventually this returns when there are no
3814 unwaited-for children left. */
3815 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3816 gdb_assert (ret == -1);
3818 if (saved_thread == NULL || mythread_alive (saved_tid))
3819 return;
3820 else
3822 threads_debug_printf ("Previously current thread died.");
3824 /* We can't change the current inferior behind GDB's back,
3825 otherwise, a subsequent command may apply to the wrong
3826 process. */
3827 restore_thread.dont_restore ();
3828 switch_to_thread (nullptr);
3832 bool
3833 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3835 struct lwp_info *lwp = get_thread_lwp (thread);
3837 if (lwp->suspended != 0)
3839 internal_error ("LWP %ld is suspended, suspended=%d\n",
3840 thread->id.lwp (), lwp->suspended);
3842 gdb_assert (lwp->stopped);
3844 /* Allow debugging the jump pad, gdb_collect, etc.. */
3845 return (supports_fast_tracepoints ()
3846 && agent_loaded_p ()
3847 && (gdb_breakpoint_here (lwp->stop_pc)
3848 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3849 || thread->last_resume_kind == resume_step)
3850 && (linux_fast_tracepoint_collecting (lwp, NULL)
3851 != fast_tpoint_collect_result::not_collecting));
3854 void
3855 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3857 struct lwp_info *lwp = get_thread_lwp (thread);
3858 int *wstat;
3860 if (lwp->suspended != 0)
3862 internal_error ("LWP %ld is suspended, suspended=%d\n",
3863 thread->id.lwp (), lwp->suspended);
3865 gdb_assert (lwp->stopped);
3867 /* For gdb_breakpoint_here. */
3868 scoped_restore_current_thread restore_thread;
3869 switch_to_thread (thread);
3871 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3873 /* Allow debugging the jump pad, gdb_collect, etc. */
3874 if (!gdb_breakpoint_here (lwp->stop_pc)
3875 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3876 && thread->last_resume_kind != resume_step
3877 && maybe_move_out_of_jump_pad (lwp, wstat))
3879 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3880 thread->id.lwp ());
3882 if (wstat)
3884 lwp->status_pending_p = 0;
3885 enqueue_one_deferred_signal (lwp, wstat);
3887 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3888 WSTOPSIG (*wstat), thread->id.lwp ());
3891 resume_one_lwp (lwp, 0, 0, NULL);
3893 else
3894 lwp_suspended_inc (lwp);
3897 static bool
3898 lwp_running (thread_info *thread)
3900 struct lwp_info *lwp = get_thread_lwp (thread);
3902 if (lwp_is_marked_dead (lwp))
3903 return false;
3905 return !lwp->stopped;
3908 void
3909 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3911 /* Should not be called recursively. */
3912 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3914 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3916 threads_debug_printf
3917 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3918 (except != NULL
3919 ? target_pid_to_str (get_lwp_thread (except)->id).c_str ()
3920 : "none"));
3922 stopping_threads = (suspend
3923 ? STOPPING_AND_SUSPENDING_THREADS
3924 : STOPPING_THREADS);
3926 if (suspend)
3927 for_each_thread ([&] (thread_info *thread)
3929 suspend_and_send_sigstop (thread, except);
3931 else
3932 for_each_thread ([&] (thread_info *thread)
3934 send_sigstop (thread, except);
3937 wait_for_sigstop ();
3938 stopping_threads = NOT_STOPPING_THREADS;
3940 threads_debug_printf ("setting stopping_threads back to !stopping");
3943 /* Enqueue one signal in the chain of signals which need to be
3944 delivered to this process on next resume. */
3946 static void
3947 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3949 lwp->pending_signals.emplace_back (signal);
3950 if (info == nullptr)
3951 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3952 else
3953 lwp->pending_signals.back ().info = *info;
3956 void
3957 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3959 struct thread_info *thread = get_lwp_thread (lwp);
3960 struct regcache *regcache = get_thread_regcache (thread, 1);
3962 scoped_restore_current_thread restore_thread;
3964 switch_to_thread (thread);
3965 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3967 for (CORE_ADDR pc : next_pcs)
3968 set_single_step_breakpoint (pc, current_thread->id);
3972 linux_process_target::single_step (lwp_info* lwp)
3974 int step = 0;
3976 if (supports_hardware_single_step ())
3978 step = 1;
3980 else if (supports_software_single_step ())
3982 install_software_single_step_breakpoints (lwp);
3983 step = 0;
3985 else
3986 threads_debug_printf ("stepping is not implemented on this target");
3988 return step;
3991 /* The signal can be delivered to the inferior if we are not trying to
3992 finish a fast tracepoint collect. Since signal can be delivered in
3993 the step-over, the program may go to signal handler and trap again
3994 after return from the signal handler. We can live with the spurious
3995 double traps. */
3997 static int
3998 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4000 return (lwp->collecting_fast_tracepoint
4001 == fast_tpoint_collect_result::not_collecting);
4004 void
4005 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4006 int signal, siginfo_t *info)
4008 struct thread_info *thread = get_lwp_thread (lwp);
4009 int ptrace_request;
4011 /* Note that target description may not be initialised
4012 (proc->tdesc == NULL) at this point because the program hasn't
4013 stopped at the first instruction yet. It means GDBserver skips
4014 the extra traps from the wrapper program (see option --wrapper).
4015 Code in this function that requires register access should be
4016 guarded by proc->tdesc == NULL or something else. */
4018 if (lwp->stopped == 0)
4019 return;
4021 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
4023 fast_tpoint_collect_result fast_tp_collecting
4024 = lwp->collecting_fast_tracepoint;
4026 gdb_assert (!stabilizing_threads
4027 || (fast_tp_collecting
4028 != fast_tpoint_collect_result::not_collecting));
4030 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4031 user used the "jump" command, or "set $pc = foo"). */
4032 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4034 /* Collecting 'while-stepping' actions doesn't make sense
4035 anymore. */
4036 release_while_stepping_state_list (thread);
4039 /* If we have pending signals or status, and a new signal, enqueue the
4040 signal. Also enqueue the signal if it can't be delivered to the
4041 inferior right now. */
4042 if (signal != 0
4043 && (lwp->status_pending_p
4044 || !lwp->pending_signals.empty ()
4045 || !lwp_signal_can_be_delivered (lwp)))
4047 enqueue_pending_signal (lwp, signal, info);
4049 /* Postpone any pending signal. It was enqueued above. */
4050 signal = 0;
4053 if (lwp->status_pending_p)
4055 threads_debug_printf
4056 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4057 thread->id.lwp (), step ? "step" : "continue",
4058 lwp->stop_expected ? "expected" : "not expected");
4059 return;
4062 scoped_restore_current_thread restore_thread;
4063 switch_to_thread (thread);
4065 /* This bit needs some thinking about. If we get a signal that
4066 we must report while a single-step reinsert is still pending,
4067 we often end up resuming the thread. It might be better to
4068 (ew) allow a stack of pending events; then we could be sure that
4069 the reinsert happened right away and not lose any signals.
4071 Making this stack would also shrink the window in which breakpoints are
4072 uninserted (see comment in linux_wait_for_lwp) but not enough for
4073 complete correctness, so it won't solve that problem. It may be
4074 worthwhile just to solve this one, however. */
4075 if (lwp->bp_reinsert != 0)
4077 threads_debug_printf (" pending reinsert at 0x%s",
4078 paddress (lwp->bp_reinsert));
4080 if (supports_hardware_single_step ())
4082 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4084 if (step == 0)
4085 warning ("BAD - reinserting but not stepping.");
4086 if (lwp->suspended)
4087 warning ("BAD - reinserting and suspended(%d).",
4088 lwp->suspended);
4092 step = maybe_hw_step (thread);
4095 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4096 threads_debug_printf
4097 ("lwp %ld wants to get out of fast tracepoint jump pad "
4098 "(exit-jump-pad-bkpt)", thread->id.lwp ());
4100 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4102 threads_debug_printf
4103 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4104 thread->id.lwp ());
4106 if (supports_hardware_single_step ())
4107 step = 1;
4108 else
4110 internal_error ("moving out of jump pad single-stepping"
4111 " not implemented on this target");
4115 /* If we have while-stepping actions in this thread set it stepping.
4116 If we have a signal to deliver, it may or may not be set to
4117 SIG_IGN, we don't know. Assume so, and allow collecting
4118 while-stepping into a signal handler. A possible smart thing to
4119 do would be to set an internal breakpoint at the signal return
4120 address, continue, and carry on catching this while-stepping
4121 action only when that breakpoint is hit. A future
4122 enhancement. */
4123 if (thread->while_stepping != NULL)
4125 threads_debug_printf
4126 ("lwp %ld has a while-stepping action -> forcing step.",
4127 thread->id.lwp ());
4129 step = single_step (lwp);
4132 if (thread->process ()->tdesc != nullptr && low_supports_breakpoints ())
4134 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4136 lwp->stop_pc = low_get_pc (regcache);
4138 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4139 (long) lwp->stop_pc);
4142 /* If we have pending signals, consume one if it can be delivered to
4143 the inferior. */
4144 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4146 const pending_signal &p_sig = lwp->pending_signals.front ();
4148 signal = p_sig.signal;
4149 if (p_sig.info.si_signo != 0)
4150 ptrace (PTRACE_SETSIGINFO, thread->id.lwp (), (PTRACE_TYPE_ARG3) 0,
4151 &p_sig.info);
4153 lwp->pending_signals.pop_front ();
4156 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4157 thread->id.lwp (), step ? "step" : "continue", signal,
4158 lwp->stop_expected ? "expected" : "not expected");
4160 low_prepare_to_resume (lwp);
4162 regcache_invalidate_thread (thread);
4163 errno = 0;
4164 lwp->stepping = step;
4165 if (step)
4166 ptrace_request = PTRACE_SINGLESTEP;
4167 else if (gdb_catching_syscalls_p (lwp))
4168 ptrace_request = PTRACE_SYSCALL;
4169 else
4170 ptrace_request = PTRACE_CONT;
4171 ptrace (ptrace_request,
4172 thread->id.lwp (),
4173 (PTRACE_TYPE_ARG3) 0,
4174 /* Coerce to a uintptr_t first to avoid potential gcc warning
4175 of coercing an 8 byte integer to a 4 byte pointer. */
4176 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4178 if (errno)
4180 int saved_errno = errno;
4182 threads_debug_printf ("ptrace errno = %d (%s)",
4183 saved_errno, strerror (saved_errno));
4185 errno = saved_errno;
4186 perror_with_name ("resuming thread");
4189 /* Successfully resumed. Clear state that no longer makes sense,
4190 and mark the LWP as running. Must not do this before resuming
4191 otherwise if that fails other code will be confused. E.g., we'd
4192 later try to stop the LWP and hang forever waiting for a stop
4193 status. Note that we must not throw after this is cleared,
4194 otherwise handle_zombie_lwp_error would get confused. */
4195 lwp->stopped = 0;
4196 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4199 void
4200 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4202 /* Nop. */
4205 /* Called when we try to resume a stopped LWP and that errors out. If
4206 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4207 or about to become), discard the error, clear any pending status
4208 the LWP may have, and return true (we'll collect the exit status
4209 soon enough). Otherwise, return false. */
4211 static int
4212 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4214 struct thread_info *thread = get_lwp_thread (lp);
4216 /* If we get an error after resuming the LWP successfully, we'd
4217 confuse !T state for the LWP being gone. */
4218 gdb_assert (lp->stopped);
4220 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4221 because even if ptrace failed with ESRCH, the tracee may be "not
4222 yet fully dead", but already refusing ptrace requests. In that
4223 case the tracee has 'R (Running)' state for a little bit
4224 (observed in Linux 3.18). See also the note on ESRCH in the
4225 ptrace(2) man page. Instead, check whether the LWP has any state
4226 other than ptrace-stopped. */
4228 /* Don't assume anything if /proc/PID/status can't be read. */
4229 if (linux_proc_pid_is_trace_stopped_nowarn (thread->id.lwp ()) == 0)
4231 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4232 lp->status_pending_p = 0;
4233 return 1;
4235 return 0;
4238 void
4239 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4240 siginfo_t *info)
4244 resume_one_lwp_throw (lwp, step, signal, info);
4246 catch (const gdb_exception_error &ex)
4248 if (check_ptrace_stopped_lwp_gone (lwp))
4250 /* This could because we tried to resume an LWP after its leader
4251 exited. Mark it as resumed, so we can collect an exit event
4252 from it. */
4253 lwp->stopped = 0;
4254 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4256 else
4257 throw;
4261 /* This function is called once per thread via for_each_thread.
4262 We look up which resume request applies to THREAD and mark it with a
4263 pointer to the appropriate resume request.
4265 This algorithm is O(threads * resume elements), but resume elements
4266 is small (and will remain small at least until GDB supports thread
4267 suspension). */
4269 static void
4270 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4272 struct lwp_info *lwp = get_thread_lwp (thread);
4274 for (int ndx = 0; ndx < n; ndx++)
4276 ptid_t ptid = resume[ndx].thread;
4277 if (ptid == minus_one_ptid
4278 || ptid == thread->id
4279 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4280 of PID'. */
4281 || (ptid.pid () == thread->id.pid ()
4282 && (ptid.is_pid ()
4283 || ptid.lwp () == -1)))
4285 if (resume[ndx].kind == resume_stop
4286 && thread->last_resume_kind == resume_stop)
4288 threads_debug_printf
4289 ("already %s LWP %ld at GDB's request",
4290 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4291 ? "stopped" : "stopping"),
4292 thread->id.lwp ());
4294 continue;
4297 /* Ignore (wildcard) resume requests for already-resumed
4298 threads. */
4299 if (resume[ndx].kind != resume_stop
4300 && thread->last_resume_kind != resume_stop)
4302 threads_debug_printf
4303 ("already %s LWP %ld at GDB's request",
4304 (thread->last_resume_kind == resume_step
4305 ? "stepping" : "continuing"),
4306 thread->id.lwp ());
4307 continue;
4310 /* Don't let wildcard resumes resume fork/vfork/clone
4311 children that GDB does not yet know are new children. */
4312 if (lwp->relative != NULL)
4314 struct lwp_info *rel = lwp->relative;
4316 if (rel->status_pending_p
4317 && is_new_child_status (rel->waitstatus.kind ()))
4319 threads_debug_printf
4320 ("not resuming LWP %ld: has queued stop reply",
4321 thread->id.lwp ());
4322 continue;
4326 /* If the thread has a pending event that has already been
4327 reported to GDBserver core, but GDB has not pulled the
4328 event out of the vStopped queue yet, likewise, ignore the
4329 (wildcard) resume request. */
4330 if (in_queued_stop_replies (thread->id))
4332 threads_debug_printf
4333 ("not resuming LWP %ld: has queued stop reply",
4334 thread->id.lwp ());
4335 continue;
4338 lwp->resume = &resume[ndx];
4339 thread->last_resume_kind = lwp->resume->kind;
4341 lwp->step_range_start = lwp->resume->step_range_start;
4342 lwp->step_range_end = lwp->resume->step_range_end;
4344 /* If we had a deferred signal to report, dequeue one now.
4345 This can happen if LWP gets more than one signal while
4346 trying to get out of a jump pad. */
4347 if (lwp->stopped
4348 && !lwp->status_pending_p
4349 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4351 lwp->status_pending_p = 1;
4353 threads_debug_printf
4354 ("Dequeueing deferred signal %d for LWP %ld, "
4355 "leaving status pending.",
4356 WSTOPSIG (lwp->status_pending),
4357 thread->id.lwp ());
4360 return;
4364 /* No resume action for this thread. */
4365 lwp->resume = NULL;
4368 bool
4369 linux_process_target::resume_status_pending (thread_info *thread)
4371 struct lwp_info *lwp = get_thread_lwp (thread);
4373 /* LWPs which will not be resumed are not interesting, because
4374 we might not wait for them next time through linux_wait. */
4375 if (lwp->resume == NULL)
4376 return false;
4378 return thread_still_has_status_pending (thread);
4381 bool
4382 linux_process_target::thread_needs_step_over (thread_info *thread)
4384 struct lwp_info *lwp = get_thread_lwp (thread);
4385 CORE_ADDR pc;
4387 /* GDBserver is skipping the extra traps from the wrapper program,
4388 don't have to do step over. */
4389 if (thread->process ()->tdesc == nullptr)
4390 return false;
4392 /* LWPs which will not be resumed are not interesting, because we
4393 might not wait for them next time through linux_wait. */
4395 if (!lwp->stopped)
4397 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4398 thread->id.lwp ());
4399 return false;
4402 if (thread->last_resume_kind == resume_stop)
4404 threads_debug_printf
4405 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4406 thread->id.lwp ());
4407 return false;
4410 gdb_assert (lwp->suspended >= 0);
4412 if (lwp->suspended)
4414 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4415 thread->id.lwp ());
4416 return false;
4419 if (lwp->status_pending_p)
4421 threads_debug_printf
4422 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4423 thread->id.lwp ());
4424 return false;
4427 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4428 or we have. */
4429 pc = get_pc (lwp);
4431 /* If the PC has changed since we stopped, then don't do anything,
4432 and let the breakpoint/tracepoint be hit. This happens if, for
4433 instance, GDB handled the decr_pc_after_break subtraction itself,
4434 GDB is OOL stepping this thread, or the user has issued a "jump"
4435 command, or poked thread's registers herself. */
4436 if (pc != lwp->stop_pc)
4438 threads_debug_printf
4439 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4440 "Old stop_pc was 0x%s, PC is now 0x%s", thread->id.lwp (),
4441 paddress (lwp->stop_pc), paddress (pc));
4442 return false;
4445 /* On software single step target, resume the inferior with signal
4446 rather than stepping over. */
4447 if (supports_software_single_step ()
4448 && !lwp->pending_signals.empty ()
4449 && lwp_signal_can_be_delivered (lwp))
4451 threads_debug_printf
4452 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4453 thread->id.lwp ());
4455 return false;
4458 scoped_restore_current_thread restore_thread;
4459 switch_to_thread (thread);
4461 /* We can only step over breakpoints we know about. */
4462 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4464 /* Don't step over a breakpoint that GDB expects to hit
4465 though. If the condition is being evaluated on the target's side
4466 and it evaluate to false, step over this breakpoint as well. */
4467 if (gdb_breakpoint_here (pc)
4468 && gdb_condition_true_at_breakpoint (pc)
4469 && gdb_no_commands_at_breakpoint (pc))
4471 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4472 " GDB breakpoint at 0x%s; skipping step over",
4473 thread->id.lwp (), paddress (pc));
4475 return false;
4477 else
4479 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4480 "found breakpoint at 0x%s",
4481 thread->id.lwp (), paddress (pc));
4483 /* We've found an lwp that needs stepping over --- return 1 so
4484 that find_thread stops looking. */
4485 return true;
4489 threads_debug_printf
4490 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4491 thread->id.lwp (), paddress (pc));
4493 return false;
4496 void
4497 linux_process_target::start_step_over (lwp_info *lwp)
4499 struct thread_info *thread = get_lwp_thread (lwp);
4500 CORE_ADDR pc;
4502 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4503 thread->id.lwp ());
4505 stop_all_lwps (1, lwp);
4507 if (lwp->suspended != 0)
4509 internal_error ("LWP %ld suspended=%d\n", thread->id.lwp (),
4510 lwp->suspended);
4513 threads_debug_printf ("Done stopping all threads for step-over.");
4515 /* Note, we should always reach here with an already adjusted PC,
4516 either by GDB (if we're resuming due to GDB's request), or by our
4517 caller, if we just finished handling an internal breakpoint GDB
4518 shouldn't care about. */
4519 pc = get_pc (lwp);
4521 bool step = false;
4523 scoped_restore_current_thread restore_thread;
4524 switch_to_thread (thread);
4526 lwp->bp_reinsert = pc;
4527 uninsert_breakpoints_at (pc);
4528 uninsert_fast_tracepoint_jumps_at (pc);
4530 step = single_step (lwp);
4533 resume_one_lwp (lwp, step, 0, NULL);
4535 /* Require next event from this LWP. */
4536 step_over_bkpt = thread->id;
4539 bool
4540 linux_process_target::finish_step_over (lwp_info *lwp)
4542 if (lwp->bp_reinsert != 0)
4544 scoped_restore_current_thread restore_thread;
4546 threads_debug_printf ("Finished step over.");
4548 switch_to_thread (get_lwp_thread (lwp));
4550 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4551 may be no breakpoint to reinsert there by now. */
4552 reinsert_breakpoints_at (lwp->bp_reinsert);
4553 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4555 lwp->bp_reinsert = 0;
4557 /* Delete any single-step breakpoints. No longer needed. We
4558 don't have to worry about other threads hitting this trap,
4559 and later not being able to explain it, because we were
4560 stepping over a breakpoint, and we hold all threads but
4561 LWP stopped while doing that. */
4562 if (!supports_hardware_single_step ())
4564 gdb_assert (has_single_step_breakpoints (current_thread));
4565 delete_single_step_breakpoints (current_thread);
4568 step_over_bkpt = null_ptid;
4569 return true;
4571 else
4572 return false;
4575 void
4576 linux_process_target::complete_ongoing_step_over ()
4578 if (step_over_bkpt != null_ptid)
4580 struct lwp_info *lwp;
4581 int wstat;
4582 int ret;
4584 threads_debug_printf ("detach: step over in progress, finish it first");
4586 /* Passing NULL_PTID as filter indicates we want all events to
4587 be left pending. Eventually this returns when there are no
4588 unwaited-for children left. */
4589 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4590 __WALL);
4591 gdb_assert (ret == -1);
4593 lwp = find_lwp_pid (step_over_bkpt);
4594 if (lwp != NULL)
4596 finish_step_over (lwp);
4598 /* If we got our step SIGTRAP, don't leave it pending,
4599 otherwise we would report it to GDB as a spurious
4600 SIGTRAP. */
4601 gdb_assert (lwp->status_pending_p);
4602 if (WIFSTOPPED (lwp->status_pending)
4603 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4605 thread_info *thread = get_lwp_thread (lwp);
4606 if (thread->last_resume_kind != resume_step)
4608 threads_debug_printf ("detach: discard step-over SIGTRAP");
4610 lwp->status_pending_p = 0;
4611 lwp->status_pending = 0;
4612 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4614 else
4615 threads_debug_printf
4616 ("detach: resume_step, not discarding step-over SIGTRAP");
4619 step_over_bkpt = null_ptid;
4620 unsuspend_all_lwps (lwp);
4624 void
4625 linux_process_target::resume_one_thread (thread_info *thread,
4626 bool leave_all_stopped)
4628 struct lwp_info *lwp = get_thread_lwp (thread);
4629 int leave_pending;
4631 if (lwp->resume == NULL)
4632 return;
4634 if (lwp->resume->kind == resume_stop)
4636 threads_debug_printf ("resume_stop request for LWP %ld",
4637 thread->id.lwp ());
4639 if (!lwp->stopped)
4641 threads_debug_printf ("stopping LWP %ld", thread->id.lwp ());
4643 /* Stop the thread, and wait for the event asynchronously,
4644 through the event loop. */
4645 send_sigstop (lwp);
4647 else
4649 threads_debug_printf ("already stopped LWP %ld", thread->id.lwp ());
4651 /* The LWP may have been stopped in an internal event that
4652 was not meant to be notified back to GDB (e.g., gdbserver
4653 breakpoint), so we should be reporting a stop event in
4654 this case too. */
4656 /* If the thread already has a pending SIGSTOP, this is a
4657 no-op. Otherwise, something later will presumably resume
4658 the thread and this will cause it to cancel any pending
4659 operation, due to last_resume_kind == resume_stop. If
4660 the thread already has a pending status to report, we
4661 will still report it the next time we wait - see
4662 status_pending_p_callback. */
4664 /* If we already have a pending signal to report, then
4665 there's no need to queue a SIGSTOP, as this means we're
4666 midway through moving the LWP out of the jumppad, and we
4667 will report the pending signal as soon as that is
4668 finished. */
4669 if (lwp->pending_signals_to_report.empty ())
4670 send_sigstop (lwp);
4673 /* For stop requests, we're done. */
4674 lwp->resume = NULL;
4675 thread->last_status.set_ignore ();
4676 return;
4679 /* If this thread which is about to be resumed has a pending status,
4680 then don't resume it - we can just report the pending status.
4681 Likewise if it is suspended, because e.g., another thread is
4682 stepping past a breakpoint. Make sure to queue any signals that
4683 would otherwise be sent. In all-stop mode, we do this decision
4684 based on if *any* thread has a pending status. If there's a
4685 thread that needs the step-over-breakpoint dance, then don't
4686 resume any other thread but that particular one. */
4687 leave_pending = (lwp->suspended
4688 || lwp->status_pending_p
4689 || leave_all_stopped);
4691 /* If we have a new signal, enqueue the signal. */
4692 if (lwp->resume->sig != 0)
4694 siginfo_t info, *info_p;
4696 /* If this is the same signal we were previously stopped by,
4697 make sure to queue its siginfo. */
4698 if (WIFSTOPPED (lwp->last_status)
4699 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4700 && ptrace (PTRACE_GETSIGINFO, thread->id.lwp (),
4701 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4702 info_p = &info;
4703 else
4704 info_p = NULL;
4706 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4709 if (!leave_pending)
4711 threads_debug_printf ("resuming LWP %ld", thread->id.lwp ());
4713 proceed_one_lwp (thread, NULL);
4715 else
4716 threads_debug_printf ("leaving LWP %ld stopped", thread->id.lwp ());
4718 thread->last_status.set_ignore ();
4719 lwp->resume = NULL;
4722 void
4723 linux_process_target::resume (thread_resume *resume_info, size_t n)
4725 struct thread_info *need_step_over = NULL;
4727 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4729 for_each_thread ([&] (thread_info *thread)
4731 linux_set_resume_request (thread, resume_info, n);
4734 /* If there is a thread which would otherwise be resumed, which has
4735 a pending status, then don't resume any threads - we can just
4736 report the pending status. Make sure to queue any signals that
4737 would otherwise be sent. In non-stop mode, we'll apply this
4738 logic to each thread individually. We consume all pending events
4739 before considering to start a step-over (in all-stop). */
4740 bool any_pending = false;
4741 if (!non_stop)
4742 any_pending = find_thread ([this] (thread_info *thread)
4744 return resume_status_pending (thread);
4745 }) != nullptr;
4747 /* If there is a thread which would otherwise be resumed, which is
4748 stopped at a breakpoint that needs stepping over, then don't
4749 resume any threads - have it step over the breakpoint with all
4750 other threads stopped, then resume all threads again. Make sure
4751 to queue any signals that would otherwise be delivered or
4752 queued. */
4753 if (!any_pending && low_supports_breakpoints ())
4754 need_step_over = find_thread ([this] (thread_info *thread)
4756 return thread_needs_step_over (thread);
4759 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4761 if (need_step_over != NULL)
4762 threads_debug_printf ("Not resuming all, need step over");
4763 else if (any_pending)
4764 threads_debug_printf ("Not resuming, all-stop and found "
4765 "an LWP with pending status");
4766 else
4767 threads_debug_printf ("Resuming, no pending status or step over needed");
4769 /* Even if we're leaving threads stopped, queue all signals we'd
4770 otherwise deliver. */
4771 for_each_thread ([&] (thread_info *thread)
4773 resume_one_thread (thread, leave_all_stopped);
4776 if (need_step_over)
4777 start_step_over (get_thread_lwp (need_step_over));
4779 /* We may have events that were pending that can/should be sent to
4780 the client now. Trigger a linux_wait call. */
4781 if (target_is_async_p ())
4782 async_file_mark ();
4785 void
4786 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4788 struct lwp_info *lwp = get_thread_lwp (thread);
4789 int step;
4791 if (lwp == except)
4792 return;
4794 threads_debug_printf ("lwp %ld", thread->id.lwp ());
4796 if (!lwp->stopped)
4798 threads_debug_printf (" LWP %ld already running", thread->id.lwp ());
4799 return;
4802 if (thread->last_resume_kind == resume_stop
4803 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4805 threads_debug_printf (" client wants LWP to remain %ld stopped",
4806 thread->id.lwp ());
4807 return;
4810 if (lwp->status_pending_p)
4812 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4813 thread->id.lwp ());
4814 return;
4817 gdb_assert (lwp->suspended >= 0);
4819 if (lwp->suspended)
4821 threads_debug_printf (" LWP %ld is suspended", thread->id.lwp ());
4822 return;
4825 if (thread->last_resume_kind == resume_stop
4826 && lwp->pending_signals_to_report.empty ()
4827 && (lwp->collecting_fast_tracepoint
4828 == fast_tpoint_collect_result::not_collecting))
4830 /* We haven't reported this LWP as stopped yet (otherwise, the
4831 last_status.kind check above would catch it, and we wouldn't
4832 reach here. This LWP may have been momentarily paused by a
4833 stop_all_lwps call while handling for example, another LWP's
4834 step-over. In that case, the pending expected SIGSTOP signal
4835 that was queued at vCont;t handling time will have already
4836 been consumed by wait_for_sigstop, and so we need to requeue
4837 another one here. Note that if the LWP already has a SIGSTOP
4838 pending, this is a no-op. */
4840 threads_debug_printf
4841 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4842 thread->id.lwp ());
4844 send_sigstop (lwp);
4847 if (thread->last_resume_kind == resume_step)
4849 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4850 thread->id.lwp ());
4852 /* If resume_step is requested by GDB, install single-step
4853 breakpoints when the thread is about to be actually resumed if
4854 the single-step breakpoints weren't removed. */
4855 if (supports_software_single_step ()
4856 && !has_single_step_breakpoints (thread))
4857 install_software_single_step_breakpoints (lwp);
4859 step = maybe_hw_step (thread);
4861 else if (lwp->bp_reinsert != 0)
4863 threads_debug_printf (" stepping LWP %ld, reinsert set",
4864 thread->id.lwp ());
4866 step = maybe_hw_step (thread);
4868 else
4869 step = 0;
4871 resume_one_lwp (lwp, step, 0, NULL);
4874 void
4875 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4876 lwp_info *except)
4878 struct lwp_info *lwp = get_thread_lwp (thread);
4880 if (lwp == except)
4881 return;
4883 lwp_suspended_decr (lwp);
4885 proceed_one_lwp (thread, except);
4888 void
4889 linux_process_target::proceed_all_lwps ()
4891 struct thread_info *need_step_over;
4893 /* If there is a thread which would otherwise be resumed, which is
4894 stopped at a breakpoint that needs stepping over, then don't
4895 resume any threads - have it step over the breakpoint with all
4896 other threads stopped, then resume all threads again. */
4898 if (low_supports_breakpoints ())
4900 need_step_over = find_thread ([this] (thread_info *thread)
4902 return thread_needs_step_over (thread);
4905 if (need_step_over != NULL)
4907 threads_debug_printf ("found thread %ld needing a step-over",
4908 need_step_over->id.lwp ());
4910 start_step_over (get_thread_lwp (need_step_over));
4911 return;
4915 threads_debug_printf ("Proceeding, no step-over needed");
4917 for_each_thread ([this] (thread_info *thread)
4919 proceed_one_lwp (thread, NULL);
4923 void
4924 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4926 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4928 if (except)
4929 threads_debug_printf ("except=(LWP %ld)",
4930 get_lwp_thread (except)->id.lwp ());
4931 else
4932 threads_debug_printf ("except=nullptr");
4934 if (unsuspend)
4935 for_each_thread ([&] (thread_info *thread)
4937 unsuspend_and_proceed_one_lwp (thread, except);
4939 else
4940 for_each_thread ([&] (thread_info *thread)
4942 proceed_one_lwp (thread, except);
4947 #ifdef HAVE_LINUX_REGSETS
4949 #define use_linux_regsets 1
4951 /* Returns true if REGSET has been disabled. */
4953 static int
4954 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4956 return (info->disabled_regsets != NULL
4957 && info->disabled_regsets[regset - info->regsets]);
4960 /* Disable REGSET. */
4962 static void
4963 disable_regset (struct regsets_info *info, struct regset_info *regset)
4965 int dr_offset;
4967 dr_offset = regset - info->regsets;
4968 if (info->disabled_regsets == NULL)
4969 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4970 info->disabled_regsets[dr_offset] = 1;
4973 static int
4974 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4975 struct regcache *regcache)
4977 struct regset_info *regset;
4978 int saw_general_regs = 0;
4979 int pid = current_thread->id.lwp ();
4980 struct iovec iov;
4982 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4984 void *buf, *data;
4985 int nt_type, res;
4987 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4988 continue;
4990 buf = xmalloc (regset->size);
4992 nt_type = regset->nt_type;
4993 if (nt_type)
4995 iov.iov_base = buf;
4996 iov.iov_len = regset->size;
4997 data = (void *) &iov;
4999 else
5000 data = buf;
5002 #ifndef __sparc__
5003 res = ptrace (regset->get_request, pid,
5004 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5005 #else
5006 res = ptrace (regset->get_request, pid, data, nt_type);
5007 #endif
5008 if (res < 0)
5010 if (errno == EIO
5011 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5013 /* If we get EIO on a regset, or an EINVAL and the regset is
5014 optional, do not try it again for this process mode. */
5015 disable_regset (regsets_info, regset);
5017 else if (errno == ENODATA)
5019 /* ENODATA may be returned if the regset is currently
5020 not "active". This can happen in normal operation,
5021 so suppress the warning in this case. */
5023 else if (errno == ESRCH)
5025 /* At this point, ESRCH should mean the process is
5026 already gone, in which case we simply ignore attempts
5027 to read its registers. */
5029 else
5031 char s[256];
5032 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5033 pid);
5034 perror (s);
5037 else
5039 if (regset->type == GENERAL_REGS)
5040 saw_general_regs = 1;
5041 regset->store_function (regcache, buf);
5043 free (buf);
5045 if (saw_general_regs)
5046 return 0;
5047 else
5048 return 1;
5051 static int
5052 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5053 struct regcache *regcache)
5055 struct regset_info *regset;
5056 int saw_general_regs = 0;
5057 int pid = current_thread->id.lwp ();
5058 struct iovec iov;
5060 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5062 void *buf, *data;
5063 int nt_type, res;
5065 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5066 || regset->fill_function == NULL)
5067 continue;
5069 buf = xmalloc (regset->size);
5071 /* First fill the buffer with the current register set contents,
5072 in case there are any items in the kernel's regset that are
5073 not in gdbserver's regcache. */
5075 nt_type = regset->nt_type;
5076 if (nt_type)
5078 iov.iov_base = buf;
5079 iov.iov_len = regset->size;
5080 data = (void *) &iov;
5082 else
5083 data = buf;
5085 #ifndef __sparc__
5086 res = ptrace (regset->get_request, pid,
5087 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5088 #else
5089 res = ptrace (regset->get_request, pid, data, nt_type);
5090 #endif
5092 if (res == 0)
5094 /* Then overlay our cached registers on that. */
5095 regset->fill_function (regcache, buf);
5097 /* Only now do we write the register set. */
5098 #ifndef __sparc__
5099 res = ptrace (regset->set_request, pid,
5100 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5101 #else
5102 res = ptrace (regset->set_request, pid, data, nt_type);
5103 #endif
5106 if (res < 0)
5108 if (errno == EIO
5109 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5111 /* If we get EIO on a regset, or an EINVAL and the regset is
5112 optional, do not try it again for this process mode. */
5113 disable_regset (regsets_info, regset);
5115 else if (errno == ESRCH)
5117 /* At this point, ESRCH should mean the process is
5118 already gone, in which case we simply ignore attempts
5119 to change its registers. See also the related
5120 comment in resume_one_lwp. */
5121 free (buf);
5122 return 0;
5124 else
5126 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5129 else if (regset->type == GENERAL_REGS)
5130 saw_general_regs = 1;
5131 free (buf);
5133 if (saw_general_regs)
5134 return 0;
5135 else
5136 return 1;
5139 #else /* !HAVE_LINUX_REGSETS */
5141 #define use_linux_regsets 0
5142 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5143 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5145 #endif
5147 /* Return 1 if register REGNO is supported by one of the regset ptrace
5148 calls or 0 if it has to be transferred individually. */
5150 static int
5151 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5153 unsigned char mask = 1 << (regno % 8);
5154 size_t index = regno / 8;
5156 return (use_linux_regsets
5157 && (regs_info->regset_bitmap == NULL
5158 || (regs_info->regset_bitmap[index] & mask) != 0));
5161 #ifdef HAVE_LINUX_USRREGS
5163 static int
5164 register_addr (const struct usrregs_info *usrregs, int regnum)
5166 int addr;
5168 if (regnum < 0 || regnum >= usrregs->num_regs)
5169 error ("Invalid register number %d.", regnum);
5171 addr = usrregs->regmap[regnum];
5173 return addr;
5177 void
5178 linux_process_target::fetch_register (const usrregs_info *usrregs,
5179 regcache *regcache, int regno)
5181 CORE_ADDR regaddr;
5182 int i, size;
5183 char *buf;
5185 if (regno >= usrregs->num_regs)
5186 return;
5187 if (low_cannot_fetch_register (regno))
5188 return;
5190 regaddr = register_addr (usrregs, regno);
5191 if (regaddr == -1)
5192 return;
5194 size = ((register_size (regcache->tdesc, regno)
5195 + sizeof (PTRACE_XFER_TYPE) - 1)
5196 & -sizeof (PTRACE_XFER_TYPE));
5197 buf = (char *) alloca (size);
5199 int pid = current_thread->id.lwp ();
5201 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5203 errno = 0;
5204 *(PTRACE_XFER_TYPE *) (buf + i) =
5205 ptrace (PTRACE_PEEKUSER, pid,
5206 /* Coerce to a uintptr_t first to avoid potential gcc warning
5207 of coercing an 8 byte integer to a 4 byte pointer. */
5208 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5209 regaddr += sizeof (PTRACE_XFER_TYPE);
5210 if (errno != 0)
5212 /* Mark register REGNO unavailable. */
5213 supply_register (regcache, regno, NULL);
5214 return;
5218 low_supply_ptrace_register (regcache, regno, buf);
5221 void
5222 linux_process_target::store_register (const usrregs_info *usrregs,
5223 regcache *regcache, int regno)
5225 CORE_ADDR regaddr;
5226 int i, size;
5227 char *buf;
5229 if (regno >= usrregs->num_regs)
5230 return;
5231 if (low_cannot_store_register (regno))
5232 return;
5234 regaddr = register_addr (usrregs, regno);
5235 if (regaddr == -1)
5236 return;
5238 size = ((register_size (regcache->tdesc, regno)
5239 + sizeof (PTRACE_XFER_TYPE) - 1)
5240 & -sizeof (PTRACE_XFER_TYPE));
5241 buf = (char *) alloca (size);
5242 memset (buf, 0, size);
5244 low_collect_ptrace_register (regcache, regno, buf);
5246 int pid = current_thread->id.lwp ();
5248 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5250 errno = 0;
5251 ptrace (PTRACE_POKEUSER, pid,
5252 /* Coerce to a uintptr_t first to avoid potential gcc warning
5253 about coercing an 8 byte integer to a 4 byte pointer. */
5254 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5255 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5256 if (errno != 0)
5258 /* At this point, ESRCH should mean the process is
5259 already gone, in which case we simply ignore attempts
5260 to change its registers. See also the related
5261 comment in resume_one_lwp. */
5262 if (errno == ESRCH)
5263 return;
5266 if (!low_cannot_store_register (regno))
5267 error ("writing register %d: %s", regno, safe_strerror (errno));
5269 regaddr += sizeof (PTRACE_XFER_TYPE);
5272 #endif /* HAVE_LINUX_USRREGS */
5274 void
5275 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5276 int regno, char *buf)
5278 collect_register (regcache, regno, buf);
5281 void
5282 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5283 int regno, const char *buf)
5285 supply_register (regcache, regno, buf);
5288 void
5289 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5290 regcache *regcache,
5291 int regno, int all)
5293 #ifdef HAVE_LINUX_USRREGS
5294 struct usrregs_info *usr = regs_info->usrregs;
5296 if (regno == -1)
5298 for (regno = 0; regno < usr->num_regs; regno++)
5299 if (all || !linux_register_in_regsets (regs_info, regno))
5300 fetch_register (usr, regcache, regno);
5302 else
5303 fetch_register (usr, regcache, regno);
5304 #endif
5307 void
5308 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5309 regcache *regcache,
5310 int regno, int all)
5312 #ifdef HAVE_LINUX_USRREGS
5313 struct usrregs_info *usr = regs_info->usrregs;
5315 if (regno == -1)
5317 for (regno = 0; regno < usr->num_regs; regno++)
5318 if (all || !linux_register_in_regsets (regs_info, regno))
5319 store_register (usr, regcache, regno);
5321 else
5322 store_register (usr, regcache, regno);
5323 #endif
5326 void
5327 linux_process_target::fetch_registers (regcache *regcache, int regno)
5329 int use_regsets;
5330 int all = 0;
5331 const regs_info *regs_info = get_regs_info ();
5333 if (regno == -1)
5335 if (regs_info->usrregs != NULL)
5336 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5337 low_fetch_register (regcache, regno);
5339 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5340 if (regs_info->usrregs != NULL)
5341 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5343 else
5345 if (low_fetch_register (regcache, regno))
5346 return;
5348 use_regsets = linux_register_in_regsets (regs_info, regno);
5349 if (use_regsets)
5350 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5351 regcache);
5352 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5353 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5357 void
5358 linux_process_target::store_registers (regcache *regcache, int regno)
5360 int use_regsets;
5361 int all = 0;
5362 const regs_info *regs_info = get_regs_info ();
5364 if (regno == -1)
5366 all = regsets_store_inferior_registers (regs_info->regsets_info,
5367 regcache);
5368 if (regs_info->usrregs != NULL)
5369 usr_store_inferior_registers (regs_info, regcache, regno, all);
5371 else
5373 use_regsets = linux_register_in_regsets (regs_info, regno);
5374 if (use_regsets)
5375 all = regsets_store_inferior_registers (regs_info->regsets_info,
5376 regcache);
5377 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5378 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5382 bool
5383 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5385 return false;
5388 /* A wrapper for the read_memory target op. */
5390 static int
5391 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5393 return the_target->read_memory (memaddr, myaddr, len);
5397 /* Helper for read_memory/write_memory using /proc/PID/mem. Because
5398 we can use a single read/write call, this can be much more
5399 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5400 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5401 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5402 not null, then we're reading, otherwise we're writing. */
5404 static int
5405 proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5406 const gdb_byte *writebuf, int len)
5408 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
5410 process_info *proc = current_process ();
5412 int fd = proc->priv->mem_fd;
5413 if (fd == -1)
5414 return EIO;
5416 while (len > 0)
5418 int bytes;
5420 /* Use pread64/pwrite64 if available, since they save a syscall
5421 and can handle 64-bit offsets even on 32-bit platforms (for
5422 instance, SPARC debugging a SPARC64 application). But only
5423 use them if the offset isn't so high that when cast to off_t
5424 it'd be negative, as seen on SPARC64. pread64/pwrite64
5425 outright reject such offsets. lseek does not. */
5426 #ifdef HAVE_PREAD64
5427 if ((off_t) memaddr >= 0)
5428 bytes = (readbuf != nullptr
5429 ? pread64 (fd, readbuf, len, memaddr)
5430 : pwrite64 (fd, writebuf, len, memaddr));
5431 else
5432 #endif
5434 bytes = -1;
5435 if (lseek (fd, memaddr, SEEK_SET) != -1)
5436 bytes = (readbuf != nullptr
5437 ? read (fd, readbuf, len)
5438 : write (fd, writebuf, len));
5441 if (bytes < 0)
5442 return errno;
5443 else if (bytes == 0)
5445 /* EOF means the address space is gone, the whole process
5446 exited or execed. */
5447 return EIO;
5450 memaddr += bytes;
5451 if (readbuf != nullptr)
5452 readbuf += bytes;
5453 else
5454 writebuf += bytes;
5455 len -= bytes;
5458 return 0;
5462 linux_process_target::read_memory (CORE_ADDR memaddr,
5463 unsigned char *myaddr, int len)
5465 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
5468 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5469 memory at MEMADDR. On failure (cannot write to the inferior)
5470 returns the value of errno. Always succeeds if LEN is zero. */
5473 linux_process_target::write_memory (CORE_ADDR memaddr,
5474 const unsigned char *myaddr, int len)
5476 if (debug_threads)
5478 /* Dump up to four bytes. */
5479 char str[4 * 2 + 1];
5480 char *p = str;
5481 int dump = len < 4 ? len : 4;
5483 for (int i = 0; i < dump; i++)
5485 sprintf (p, "%02x", myaddr[i]);
5486 p += 2;
5488 *p = '\0';
5490 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5491 str, (long) memaddr, current_process ()->pid);
5494 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
5497 void
5498 linux_process_target::look_up_symbols ()
5500 #ifdef USE_THREAD_DB
5501 struct process_info *proc = current_process ();
5503 if (proc->priv->thread_db != NULL)
5504 return;
5506 thread_db_init ();
5507 #endif
5510 void
5511 linux_process_target::request_interrupt ()
5513 /* Send a SIGINT to the process group. This acts just like the user
5514 typed a ^C on the controlling terminal. */
5515 int res = ::kill (-signal_pid, SIGINT);
5516 if (res == -1)
5517 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5518 signal_pid, safe_strerror (errno));
5521 bool
5522 linux_process_target::supports_read_auxv ()
5524 return true;
5527 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5528 to debugger memory starting at MYADDR. */
5531 linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5532 unsigned char *myaddr, unsigned int len)
5534 char filename[PATH_MAX];
5535 int fd, n;
5537 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5539 fd = open (filename, O_RDONLY);
5540 if (fd < 0)
5541 return -1;
5543 if (offset != (CORE_ADDR) 0
5544 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5545 n = -1;
5546 else
5547 n = read (fd, myaddr, len);
5549 close (fd);
5551 return n;
5555 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5556 int size, raw_breakpoint *bp)
5558 if (type == raw_bkpt_type_sw)
5559 return insert_memory_breakpoint (bp);
5560 else
5561 return low_insert_point (type, addr, size, bp);
5565 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5566 int size, raw_breakpoint *bp)
5568 /* Unsupported (see target.h). */
5569 return 1;
5573 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5574 int size, raw_breakpoint *bp)
5576 if (type == raw_bkpt_type_sw)
5577 return remove_memory_breakpoint (bp);
5578 else
5579 return low_remove_point (type, addr, size, bp);
5583 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5584 int size, raw_breakpoint *bp)
5586 /* Unsupported (see target.h). */
5587 return 1;
5590 /* Implement the stopped_by_sw_breakpoint target_ops
5591 method. */
5593 bool
5594 linux_process_target::stopped_by_sw_breakpoint ()
5596 struct lwp_info *lwp = get_thread_lwp (current_thread);
5598 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5601 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5602 method. */
5604 bool
5605 linux_process_target::supports_stopped_by_sw_breakpoint ()
5607 return true;
5610 /* Implement the stopped_by_hw_breakpoint target_ops
5611 method. */
5613 bool
5614 linux_process_target::stopped_by_hw_breakpoint ()
5616 struct lwp_info *lwp = get_thread_lwp (current_thread);
5618 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5621 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5622 method. */
5624 bool
5625 linux_process_target::supports_stopped_by_hw_breakpoint ()
5627 return true;
5630 /* Implement the supports_hardware_single_step target_ops method. */
5632 bool
5633 linux_process_target::supports_hardware_single_step ()
5635 return true;
5638 bool
5639 linux_process_target::stopped_by_watchpoint ()
5641 struct lwp_info *lwp = get_thread_lwp (current_thread);
5643 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5646 CORE_ADDR
5647 linux_process_target::stopped_data_address ()
5649 struct lwp_info *lwp = get_thread_lwp (current_thread);
5651 return lwp->stopped_data_address;
5654 /* This is only used for targets that define PT_TEXT_ADDR,
5655 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5656 the target has different ways of acquiring this information, like
5657 loadmaps. */
5659 bool
5660 linux_process_target::supports_read_offsets ()
5662 #ifdef SUPPORTS_READ_OFFSETS
5663 return true;
5664 #else
5665 return false;
5666 #endif
5669 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5670 to tell gdb about. */
5673 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5675 #ifdef SUPPORTS_READ_OFFSETS
5676 unsigned long text, text_end, data;
5677 int pid = current_thread->id.lwp ();
5679 errno = 0;
5681 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5682 (PTRACE_TYPE_ARG4) 0);
5683 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5684 (PTRACE_TYPE_ARG4) 0);
5685 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5686 (PTRACE_TYPE_ARG4) 0);
5688 if (errno == 0)
5690 /* Both text and data offsets produced at compile-time (and so
5691 used by gdb) are relative to the beginning of the program,
5692 with the data segment immediately following the text segment.
5693 However, the actual runtime layout in memory may put the data
5694 somewhere else, so when we send gdb a data base-address, we
5695 use the real data base address and subtract the compile-time
5696 data base-address from it (which is just the length of the
5697 text segment). BSS immediately follows data in both
5698 cases. */
5699 *text_p = text;
5700 *data_p = data - (text_end - text);
5702 return 1;
5704 return 0;
5705 #else
5706 gdb_assert_not_reached ("target op read_offsets not supported");
5707 #endif
5710 bool
5711 linux_process_target::supports_get_tls_address ()
5713 #ifdef USE_THREAD_DB
5714 return true;
5715 #else
5716 return false;
5717 #endif
5721 linux_process_target::get_tls_address (thread_info *thread,
5722 CORE_ADDR offset,
5723 CORE_ADDR load_module,
5724 CORE_ADDR *address)
5726 #ifdef USE_THREAD_DB
5727 return thread_db_get_tls_address (thread, offset, load_module, address);
5728 #else
5729 return -1;
5730 #endif
5733 bool
5734 linux_process_target::supports_qxfer_osdata ()
5736 return true;
5740 linux_process_target::qxfer_osdata (const char *annex,
5741 unsigned char *readbuf,
5742 unsigned const char *writebuf,
5743 CORE_ADDR offset, int len)
5745 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5748 void
5749 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5750 gdb_byte *inf_siginfo, int direction)
5752 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5754 /* If there was no callback, or the callback didn't do anything,
5755 then just do a straight memcpy. */
5756 if (!done)
5758 if (direction == 1)
5759 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5760 else
5761 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5765 bool
5766 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5767 int direction)
5769 return false;
5772 bool
5773 linux_process_target::supports_qxfer_siginfo ()
5775 return true;
5779 linux_process_target::qxfer_siginfo (const char *annex,
5780 unsigned char *readbuf,
5781 unsigned const char *writebuf,
5782 CORE_ADDR offset, int len)
5784 siginfo_t siginfo;
5785 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5787 if (current_thread == NULL)
5788 return -1;
5790 int pid = current_thread->id.lwp ();
5792 threads_debug_printf ("%s siginfo for lwp %d.",
5793 readbuf != NULL ? "Reading" : "Writing",
5794 pid);
5796 if (offset >= sizeof (siginfo))
5797 return -1;
5799 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5800 return -1;
5802 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5803 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5804 inferior with a 64-bit GDBSERVER should look the same as debugging it
5805 with a 32-bit GDBSERVER, we need to convert it. */
5806 siginfo_fixup (&siginfo, inf_siginfo, 0);
5808 if (offset + len > sizeof (siginfo))
5809 len = sizeof (siginfo) - offset;
5811 if (readbuf != NULL)
5812 memcpy (readbuf, inf_siginfo + offset, len);
5813 else
5815 memcpy (inf_siginfo + offset, writebuf, len);
5817 /* Convert back to ptrace layout before flushing it out. */
5818 siginfo_fixup (&siginfo, inf_siginfo, 1);
5820 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5821 return -1;
5824 return len;
5827 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5828 so we notice when children change state; as the handler for the
5829 sigsuspend in my_waitpid. */
5831 static void
5832 sigchld_handler (int signo)
5834 int old_errno = errno;
5836 if (debug_threads)
5840 /* Use the async signal safe debug function. */
5841 if (debug_write ("sigchld_handler\n",
5842 sizeof ("sigchld_handler\n") - 1) < 0)
5843 break; /* just ignore */
5844 } while (0);
5847 if (target_is_async_p ())
5848 async_file_mark (); /* trigger a linux_wait */
5850 errno = old_errno;
5853 bool
5854 linux_process_target::supports_non_stop ()
5856 return true;
5859 bool
5860 linux_process_target::async (bool enable)
5862 bool previous = target_is_async_p ();
5864 threads_debug_printf ("async (%d), previous=%d",
5865 enable, previous);
5867 if (previous != enable)
5869 sigset_t mask;
5870 sigemptyset (&mask);
5871 sigaddset (&mask, SIGCHLD);
5873 gdb_sigmask (SIG_BLOCK, &mask, NULL);
5875 if (enable)
5877 if (!linux_event_pipe.open_pipe ())
5879 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5881 warning ("creating event pipe failed.");
5882 return previous;
5885 /* Register the event loop handler. */
5886 add_file_handler (linux_event_pipe.event_fd (),
5887 handle_target_event, NULL,
5888 "linux-low");
5890 /* Always trigger a linux_wait. */
5891 async_file_mark ();
5893 else
5895 delete_file_handler (linux_event_pipe.event_fd ());
5897 linux_event_pipe.close_pipe ();
5900 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5903 return previous;
5907 linux_process_target::start_non_stop (bool nonstop)
5909 /* Register or unregister from event-loop accordingly. */
5910 target_async (nonstop);
5912 if (target_is_async_p () != (nonstop != false))
5913 return -1;
5915 return 0;
5918 bool
5919 linux_process_target::supports_multi_process ()
5921 return true;
5924 /* Check if fork events are supported. */
5926 bool
5927 linux_process_target::supports_fork_events ()
5929 return true;
5932 /* Check if vfork events are supported. */
5934 bool
5935 linux_process_target::supports_vfork_events ()
5937 return true;
5940 /* Return the set of supported thread options. */
5942 gdb_thread_options
5943 linux_process_target::supported_thread_options ()
5945 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
5948 /* Check if exec events are supported. */
5950 bool
5951 linux_process_target::supports_exec_events ()
5953 return true;
5956 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5957 ptrace flags for all inferiors. This is in case the new GDB connection
5958 doesn't support the same set of events that the previous one did. */
5960 void
5961 linux_process_target::handle_new_gdb_connection ()
5963 /* Request that all the lwps reset their ptrace options. */
5964 for_each_thread ([] (thread_info *thread)
5966 struct lwp_info *lwp = get_thread_lwp (thread);
5968 if (!lwp->stopped)
5970 /* Stop the lwp so we can modify its ptrace options. */
5971 lwp->must_set_ptrace_flags = 1;
5972 linux_stop_lwp (lwp);
5974 else
5976 /* Already stopped; go ahead and set the ptrace options. */
5977 process_info *proc = find_process_pid (thread->id.pid ());
5978 int options = linux_low_ptrace_options (proc->attached);
5980 linux_enable_event_reporting (thread->id.lwp (), options);
5981 lwp->must_set_ptrace_flags = 0;
5987 linux_process_target::handle_monitor_command (char *mon)
5989 #ifdef USE_THREAD_DB
5990 return thread_db_handle_monitor_command (mon);
5991 #else
5992 return 0;
5993 #endif
5997 linux_process_target::core_of_thread (ptid_t ptid)
5999 return linux_common_core_of_thread (ptid);
6002 bool
6003 linux_process_target::supports_disable_randomization ()
6005 return true;
6008 bool
6009 linux_process_target::supports_agent ()
6011 return true;
6014 bool
6015 linux_process_target::supports_range_stepping ()
6017 if (supports_software_single_step ())
6018 return true;
6020 return low_supports_range_stepping ();
6023 bool
6024 linux_process_target::low_supports_range_stepping ()
6026 return false;
6029 bool
6030 linux_process_target::supports_pid_to_exec_file ()
6032 return true;
6035 const char *
6036 linux_process_target::pid_to_exec_file (int pid)
6038 return linux_proc_pid_to_exec_file (pid);
6041 bool
6042 linux_process_target::supports_multifs ()
6044 return true;
6048 linux_process_target::multifs_open (int pid, const char *filename,
6049 int flags, mode_t mode)
6051 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6055 linux_process_target::multifs_unlink (int pid, const char *filename)
6057 return linux_mntns_unlink (pid, filename);
6060 ssize_t
6061 linux_process_target::multifs_readlink (int pid, const char *filename,
6062 char *buf, size_t bufsiz)
6064 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6067 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6068 struct target_loadseg
6070 /* Core address to which the segment is mapped. */
6071 Elf32_Addr addr;
6072 /* VMA recorded in the program header. */
6073 Elf32_Addr p_vaddr;
6074 /* Size of this segment in memory. */
6075 Elf32_Word p_memsz;
6078 # if defined PT_GETDSBT
6079 struct target_loadmap
6081 /* Protocol version number, must be zero. */
6082 Elf32_Word version;
6083 /* Pointer to the DSBT table, its size, and the DSBT index. */
6084 unsigned *dsbt_table;
6085 unsigned dsbt_size, dsbt_index;
6086 /* Number of segments in this map. */
6087 Elf32_Word nsegs;
6088 /* The actual memory map. */
6089 struct target_loadseg segs[/*nsegs*/];
6091 # define LINUX_LOADMAP PT_GETDSBT
6092 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6093 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6094 # else
6095 struct target_loadmap
6097 /* Protocol version number, must be zero. */
6098 Elf32_Half version;
6099 /* Number of segments in this map. */
6100 Elf32_Half nsegs;
6101 /* The actual memory map. */
6102 struct target_loadseg segs[/*nsegs*/];
6104 # define LINUX_LOADMAP PTRACE_GETFDPIC
6105 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6106 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6107 # endif
6109 bool
6110 linux_process_target::supports_read_loadmap ()
6112 return true;
6116 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6117 unsigned char *myaddr, unsigned int len)
6119 int pid = current_thread->id.lwp ();
6120 int addr = -1;
6121 struct target_loadmap *data = NULL;
6122 unsigned int actual_length, copy_length;
6124 if (strcmp (annex, "exec") == 0)
6125 addr = (int) LINUX_LOADMAP_EXEC;
6126 else if (strcmp (annex, "interp") == 0)
6127 addr = (int) LINUX_LOADMAP_INTERP;
6128 else
6129 return -1;
6131 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6132 return -1;
6134 if (data == NULL)
6135 return -1;
6137 actual_length = sizeof (struct target_loadmap)
6138 + sizeof (struct target_loadseg) * data->nsegs;
6140 if (offset < 0 || offset > actual_length)
6141 return -1;
6143 copy_length = actual_length - offset < len ? actual_length - offset : len;
6144 memcpy (myaddr, (char *) data + offset, copy_length);
6145 return copy_length;
6147 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6149 bool
6150 linux_process_target::supports_catch_syscall ()
6152 return low_supports_catch_syscall ();
6155 bool
6156 linux_process_target::low_supports_catch_syscall ()
6158 return false;
6161 CORE_ADDR
6162 linux_process_target::read_pc (regcache *regcache)
6164 if (!low_supports_breakpoints ())
6165 return 0;
6167 return low_get_pc (regcache);
6170 void
6171 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6173 gdb_assert (low_supports_breakpoints ());
6175 low_set_pc (regcache, pc);
6178 bool
6179 linux_process_target::supports_thread_stopped ()
6181 return true;
6184 bool
6185 linux_process_target::thread_stopped (thread_info *thread)
6187 return get_thread_lwp (thread)->stopped;
6190 bool
6191 linux_process_target::any_resumed ()
6193 bool any_resumed;
6195 auto status_pending_p_any = [&] (thread_info *thread)
6197 return status_pending_p_callback (thread, minus_one_ptid);
6200 auto not_stopped = [&] (thread_info *thread)
6202 return not_stopped_callback (thread, minus_one_ptid);
6205 /* Find a resumed LWP, if any. */
6206 if (find_thread (status_pending_p_any) != NULL)
6207 any_resumed = 1;
6208 else if (find_thread (not_stopped) != NULL)
6209 any_resumed = 1;
6210 else
6211 any_resumed = 0;
6213 return any_resumed;
6216 /* This exposes stop-all-threads functionality to other modules. */
6218 void
6219 linux_process_target::pause_all (bool freeze)
6221 stop_all_lwps (freeze, NULL);
6224 /* This exposes unstop-all-threads functionality to other gdbserver
6225 modules. */
6227 void
6228 linux_process_target::unpause_all (bool unfreeze)
6230 unstop_all_lwps (unfreeze, NULL);
6233 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6235 static int
6236 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6237 CORE_ADDR *phdr_memaddr, int *num_phdr)
6239 char filename[PATH_MAX];
6240 int fd;
6241 const int auxv_size = is_elf64
6242 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6243 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6245 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6247 fd = open (filename, O_RDONLY);
6248 if (fd < 0)
6249 return 1;
6251 *phdr_memaddr = 0;
6252 *num_phdr = 0;
6253 while (read (fd, buf, auxv_size) == auxv_size
6254 && (*phdr_memaddr == 0 || *num_phdr == 0))
6256 if (is_elf64)
6258 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6260 switch (aux->a_type)
6262 case AT_PHDR:
6263 *phdr_memaddr = aux->a_un.a_val;
6264 break;
6265 case AT_PHNUM:
6266 *num_phdr = aux->a_un.a_val;
6267 break;
6270 else
6272 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6274 switch (aux->a_type)
6276 case AT_PHDR:
6277 *phdr_memaddr = aux->a_un.a_val;
6278 break;
6279 case AT_PHNUM:
6280 *num_phdr = aux->a_un.a_val;
6281 break;
6286 close (fd);
6288 if (*phdr_memaddr == 0 || *num_phdr == 0)
6290 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6291 "phdr_memaddr = %ld, phdr_num = %d",
6292 (long) *phdr_memaddr, *num_phdr);
6293 return 2;
6296 return 0;
6299 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6301 static CORE_ADDR
6302 get_dynamic (const int pid, const int is_elf64)
6304 CORE_ADDR phdr_memaddr, relocation;
6305 int num_phdr, i;
6306 unsigned char *phdr_buf;
6307 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6309 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6310 return 0;
6312 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6313 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6315 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6316 return 0;
6318 /* Compute relocation: it is expected to be 0 for "regular" executables,
6319 non-zero for PIE ones. */
6320 relocation = -1;
6321 for (i = 0; relocation == -1 && i < num_phdr; i++)
6322 if (is_elf64)
6324 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6326 if (p->p_type == PT_PHDR)
6327 relocation = phdr_memaddr - p->p_vaddr;
6329 else
6331 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6333 if (p->p_type == PT_PHDR)
6334 relocation = phdr_memaddr - p->p_vaddr;
6337 if (relocation == -1)
6339 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6340 any real world executables, including PIE executables, have always
6341 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6342 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6343 or present DT_DEBUG anyway (fpc binaries are statically linked).
6345 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6347 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6349 return 0;
6352 for (i = 0; i < num_phdr; i++)
6354 if (is_elf64)
6356 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6358 if (p->p_type == PT_DYNAMIC)
6359 return p->p_vaddr + relocation;
6361 else
6363 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6365 if (p->p_type == PT_DYNAMIC)
6366 return p->p_vaddr + relocation;
6370 return 0;
6373 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6374 can be 0 if the inferior does not yet have the library list initialized.
6375 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6376 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6378 static CORE_ADDR
6379 get_r_debug (const int pid, const int is_elf64)
6381 CORE_ADDR dynamic_memaddr;
6382 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6383 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6384 CORE_ADDR map = -1;
6386 dynamic_memaddr = get_dynamic (pid, is_elf64);
6387 if (dynamic_memaddr == 0)
6388 return map;
6390 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6392 if (is_elf64)
6394 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6395 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6396 union
6398 Elf64_Xword map;
6399 unsigned char buf[sizeof (Elf64_Xword)];
6401 rld_map;
6402 #endif
6403 #ifdef DT_MIPS_RLD_MAP
6404 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6406 if (linux_read_memory (dyn->d_un.d_val,
6407 rld_map.buf, sizeof (rld_map.buf)) == 0)
6408 return rld_map.map;
6409 else
6410 break;
6412 #endif /* DT_MIPS_RLD_MAP */
6413 #ifdef DT_MIPS_RLD_MAP_REL
6414 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6416 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6417 rld_map.buf, sizeof (rld_map.buf)) == 0)
6418 return rld_map.map;
6419 else
6420 break;
6422 #endif /* DT_MIPS_RLD_MAP_REL */
6424 if (dyn->d_tag == DT_DEBUG && map == -1)
6425 map = dyn->d_un.d_val;
6427 if (dyn->d_tag == DT_NULL)
6428 break;
6430 else
6432 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6433 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6434 union
6436 Elf32_Word map;
6437 unsigned char buf[sizeof (Elf32_Word)];
6439 rld_map;
6440 #endif
6441 #ifdef DT_MIPS_RLD_MAP
6442 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6444 if (linux_read_memory (dyn->d_un.d_val,
6445 rld_map.buf, sizeof (rld_map.buf)) == 0)
6446 return rld_map.map;
6447 else
6448 break;
6450 #endif /* DT_MIPS_RLD_MAP */
6451 #ifdef DT_MIPS_RLD_MAP_REL
6452 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6454 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6455 rld_map.buf, sizeof (rld_map.buf)) == 0)
6456 return rld_map.map;
6457 else
6458 break;
6460 #endif /* DT_MIPS_RLD_MAP_REL */
6462 if (dyn->d_tag == DT_DEBUG && map == -1)
6463 map = dyn->d_un.d_val;
6465 if (dyn->d_tag == DT_NULL)
6466 break;
6469 dynamic_memaddr += dyn_size;
6472 return map;
6475 /* Read one pointer from MEMADDR in the inferior. */
6477 static int
6478 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6480 int ret;
6482 /* Go through a union so this works on either big or little endian
6483 hosts, when the inferior's pointer size is smaller than the size
6484 of CORE_ADDR. It is assumed the inferior's endianness is the
6485 same of the superior's. */
6486 union
6488 CORE_ADDR core_addr;
6489 unsigned int ui;
6490 unsigned char uc;
6491 } addr;
6493 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6494 if (ret == 0)
6496 if (ptr_size == sizeof (CORE_ADDR))
6497 *ptr = addr.core_addr;
6498 else if (ptr_size == sizeof (unsigned int))
6499 *ptr = addr.ui;
6500 else
6501 gdb_assert_not_reached ("unhandled pointer size");
6503 return ret;
6506 bool
6507 linux_process_target::supports_qxfer_libraries_svr4 ()
6509 return true;
6512 struct link_map_offsets
6514 /* Offset and size of r_debug.r_version. */
6515 int r_version_offset;
6517 /* Offset and size of r_debug.r_map. */
6518 int r_map_offset;
6520 /* Offset of r_debug_extended.r_next. */
6521 int r_next_offset;
6523 /* Offset to l_addr field in struct link_map. */
6524 int l_addr_offset;
6526 /* Offset to l_name field in struct link_map. */
6527 int l_name_offset;
6529 /* Offset to l_ld field in struct link_map. */
6530 int l_ld_offset;
6532 /* Offset to l_next field in struct link_map. */
6533 int l_next_offset;
6535 /* Offset to l_prev field in struct link_map. */
6536 int l_prev_offset;
6539 static const link_map_offsets lmo_32bit_offsets =
6541 0, /* r_version offset. */
6542 4, /* r_debug.r_map offset. */
6543 20, /* r_debug_extended.r_next. */
6544 0, /* l_addr offset in link_map. */
6545 4, /* l_name offset in link_map. */
6546 8, /* l_ld offset in link_map. */
6547 12, /* l_next offset in link_map. */
6548 16 /* l_prev offset in link_map. */
6551 static const link_map_offsets lmo_64bit_offsets =
6553 0, /* r_version offset. */
6554 8, /* r_debug.r_map offset. */
6555 40, /* r_debug_extended.r_next. */
6556 0, /* l_addr offset in link_map. */
6557 8, /* l_name offset in link_map. */
6558 16, /* l_ld offset in link_map. */
6559 24, /* l_next offset in link_map. */
6560 32 /* l_prev offset in link_map. */
6563 /* Get the loaded shared libraries from one namespace. */
6565 static void
6566 read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6567 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
6569 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6571 while (lm_addr
6572 && read_one_ptr (lm_addr + lmo->l_name_offset,
6573 &l_name, ptr_size) == 0
6574 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6575 &l_addr, ptr_size) == 0
6576 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6577 &l_ld, ptr_size) == 0
6578 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6579 &l_prev, ptr_size) == 0
6580 && read_one_ptr (lm_addr + lmo->l_next_offset,
6581 &l_next, ptr_size) == 0)
6583 unsigned char libname[PATH_MAX];
6585 if (lm_prev != l_prev)
6587 warning ("Corrupted shared library list: 0x%s != 0x%s",
6588 paddress (lm_prev), paddress (l_prev));
6589 break;
6592 /* Not checking for error because reading may stop before we've got
6593 PATH_MAX worth of characters. */
6594 libname[0] = '\0';
6595 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6596 libname[sizeof (libname) - 1] = '\0';
6597 if (libname[0] != '\0')
6599 string_appendf (document, "<library name=\"");
6600 xml_escape_text_append (document, (char *) libname);
6601 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
6602 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
6603 paddress (lm_addr), paddress (l_addr),
6604 paddress (l_ld), paddress (lmid));
6607 lm_prev = lm_addr;
6608 lm_addr = l_next;
6612 /* Construct qXfer:libraries-svr4:read reply. */
6615 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6616 unsigned char *readbuf,
6617 unsigned const char *writebuf,
6618 CORE_ADDR offset, int len)
6620 struct process_info_private *const priv = current_process ()->priv;
6621 char filename[PATH_MAX];
6622 int is_elf64;
6623 unsigned int machine;
6624 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
6626 if (writebuf != NULL)
6627 return -2;
6628 if (readbuf == NULL)
6629 return -1;
6631 int pid = current_thread->id.lwp ();
6632 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6633 is_elf64 = elf_64_file_p (filename, &machine);
6634 const link_map_offsets *lmo;
6635 int ptr_size;
6636 if (is_elf64)
6638 lmo = &lmo_64bit_offsets;
6639 ptr_size = 8;
6641 else
6643 lmo = &lmo_32bit_offsets;
6644 ptr_size = 4;
6647 while (annex[0] != '\0')
6649 const char *sep;
6650 CORE_ADDR *addrp;
6651 int name_len;
6653 sep = strchr (annex, '=');
6654 if (sep == NULL)
6655 break;
6657 name_len = sep - annex;
6658 if (name_len == 4 && startswith (annex, "lmid"))
6659 addrp = &lmid;
6660 else if (name_len == 5 && startswith (annex, "start"))
6661 addrp = &lm_addr;
6662 else if (name_len == 4 && startswith (annex, "prev"))
6663 addrp = &lm_prev;
6664 else
6666 annex = strchr (sep, ';');
6667 if (annex == NULL)
6668 break;
6669 annex++;
6670 continue;
6673 annex = decode_address_to_semicolon (addrp, sep + 1);
6676 std::string document = "<library-list-svr4 version=\"1.0\"";
6678 /* When the starting LM_ADDR is passed in the annex, only traverse that
6679 namespace, which is assumed to be identified by LMID.
6681 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6682 if (lm_addr != 0)
6684 document += ">";
6685 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
6687 else
6689 if (lm_prev != 0)
6690 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
6692 /* We could interpret LMID as 'provide only the libraries for this
6693 namespace' but GDB is currently only providing lmid, start, and
6694 prev, or nothing. */
6695 if (lmid != 0)
6696 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6698 CORE_ADDR r_debug = priv->r_debug;
6699 if (r_debug == 0)
6700 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
6702 /* We failed to find DT_DEBUG. Such situation will not change
6703 for this inferior - do not retry it. Report it to GDB as
6704 E01, see for the reasons at the GDB solib-svr4.c side. */
6705 if (r_debug == (CORE_ADDR) -1)
6706 return -1;
6708 /* Terminate the header if we end up with an empty list. */
6709 if (r_debug == 0)
6710 document += ">";
6712 while (r_debug != 0)
6714 int r_version = 0;
6715 if (linux_read_memory (r_debug + lmo->r_version_offset,
6716 (unsigned char *) &r_version,
6717 sizeof (r_version)) != 0)
6719 warning ("unable to read r_version from 0x%s",
6720 paddress (r_debug + lmo->r_version_offset));
6721 break;
6724 if (r_version < 1)
6726 warning ("unexpected r_debug version %d", r_version);
6727 break;
6730 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6731 ptr_size) != 0)
6733 warning ("unable to read r_map from 0x%s",
6734 paddress (r_debug + lmo->r_map_offset));
6735 break;
6738 /* We read the entire namespace. */
6739 lm_prev = 0;
6741 /* The first entry corresponds to the main executable unless the
6742 dynamic loader was loaded late by a static executable. But
6743 in such case the main executable does not have PT_DYNAMIC
6744 present and we would not have gotten here. */
6745 if (r_debug == priv->r_debug)
6747 if (lm_addr != 0)
6748 string_appendf (document, " main-lm=\"0x%s\">",
6749 paddress (lm_addr));
6750 else
6751 document += ">";
6753 lm_prev = lm_addr;
6754 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6755 &lm_addr, ptr_size) != 0)
6757 warning ("unable to read l_next from 0x%s",
6758 paddress (lm_addr + lmo->l_next_offset));
6759 break;
6763 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
6765 if (r_version < 2)
6766 break;
6768 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6769 ptr_size) != 0)
6771 warning ("unable to read r_next from 0x%s",
6772 paddress (r_debug + lmo->r_next_offset));
6773 break;
6778 document += "</library-list-svr4>";
6780 int document_len = document.length ();
6781 if (offset < document_len)
6782 document_len -= offset;
6783 else
6784 document_len = 0;
6785 if (len > document_len)
6786 len = document_len;
6788 memcpy (readbuf, document.data () + offset, len);
6790 return len;
6793 #ifdef HAVE_LINUX_BTRACE
6795 bool
6796 linux_process_target::supports_btrace ()
6798 return true;
6801 btrace_target_info *
6802 linux_process_target::enable_btrace (thread_info *tp,
6803 const btrace_config *conf)
6805 return linux_enable_btrace (tp->id, conf);
6808 /* See to_disable_btrace target method. */
6811 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6813 enum btrace_error err;
6815 err = linux_disable_btrace (tinfo);
6816 return (err == BTRACE_ERR_NONE ? 0 : -1);
6819 /* Encode an Intel Processor Trace configuration. */
6821 static void
6822 linux_low_encode_pt_config (std::string *buffer,
6823 const struct btrace_data_pt_config *config)
6825 *buffer += "<pt-config>\n";
6827 switch (config->cpu.vendor)
6829 case CV_INTEL:
6830 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6831 "model=\"%u\" stepping=\"%u\"/>\n",
6832 config->cpu.family, config->cpu.model,
6833 config->cpu.stepping);
6834 break;
6836 default:
6837 break;
6840 *buffer += "</pt-config>\n";
6843 /* Encode a raw buffer. */
6845 static void
6846 linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
6847 unsigned int size)
6849 if (size == 0)
6850 return;
6852 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6853 *buffer += "<raw>\n";
6855 while (size-- > 0)
6857 char elem[2];
6859 elem[0] = tohex ((*data >> 4) & 0xf);
6860 elem[1] = tohex (*data++ & 0xf);
6862 buffer->append (elem, 2);
6865 *buffer += "</raw>\n";
6868 /* See to_read_btrace target method. */
6871 linux_process_target::read_btrace (btrace_target_info *tinfo,
6872 std::string *buffer,
6873 enum btrace_read_type type)
6875 struct btrace_data btrace;
6876 enum btrace_error err;
6878 err = linux_read_btrace (&btrace, tinfo, type);
6879 if (err != BTRACE_ERR_NONE)
6881 if (err == BTRACE_ERR_OVERFLOW)
6882 *buffer += "E.Overflow.";
6883 else
6884 *buffer += "E.Generic Error.";
6886 return -1;
6889 switch (btrace.format)
6891 case BTRACE_FORMAT_NONE:
6892 *buffer += "E.No Trace.";
6893 return -1;
6895 case BTRACE_FORMAT_BTS:
6896 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6897 *buffer += "<btrace version=\"1.0\">\n";
6899 for (const btrace_block &block : *btrace.variant.bts.blocks)
6900 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6901 paddress (block.begin), paddress (block.end));
6903 *buffer += "</btrace>\n";
6904 break;
6906 case BTRACE_FORMAT_PT:
6907 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6908 *buffer += "<btrace version=\"1.0\">\n";
6909 *buffer += "<pt>\n";
6911 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6913 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6914 btrace.variant.pt.size);
6916 *buffer += "</pt>\n";
6917 *buffer += "</btrace>\n";
6918 break;
6920 default:
6921 *buffer += "E.Unsupported Trace Format.";
6922 return -1;
6925 return 0;
6928 /* See to_btrace_conf target method. */
6931 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6932 std::string *buffer)
6934 const struct btrace_config *conf;
6936 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6937 *buffer += "<btrace-conf version=\"1.0\">\n";
6939 conf = linux_btrace_conf (tinfo);
6940 if (conf != NULL)
6942 switch (conf->format)
6944 case BTRACE_FORMAT_NONE:
6945 break;
6947 case BTRACE_FORMAT_BTS:
6948 string_xml_appendf (*buffer, "<bts");
6949 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6950 string_xml_appendf (*buffer, " />\n");
6951 break;
6953 case BTRACE_FORMAT_PT:
6954 string_xml_appendf (*buffer, "<pt");
6955 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6956 string_xml_appendf (*buffer, "/>\n");
6957 string_xml_appendf (*buffer, " ptwrite=\"%s\"",
6958 conf->pt.ptwrite ? "yes" : "no");
6959 string_xml_appendf (*buffer, " event-tracing=\"%s\"",
6960 conf->pt.event_tracing ? "yes" : "no");
6961 string_xml_appendf (*buffer, "/>\n");
6962 break;
6966 *buffer += "</btrace-conf>\n";
6967 return 0;
6969 #endif /* HAVE_LINUX_BTRACE */
6971 /* See nat/linux-nat.h. */
6973 ptid_t
6974 current_lwp_ptid (void)
6976 return current_thread->id;
6979 /* A helper function that copies NAME to DEST, replacing non-printable
6980 characters with '?'. Returns the original DEST as a
6981 convenience. */
6983 static const char *
6984 replace_non_ascii (char *dest, const char *name)
6986 const char *result = dest;
6987 while (*name != '\0')
6989 if (!ISPRINT (*name))
6990 *dest++ = '?';
6991 else
6992 *dest++ = *name;
6993 ++name;
6995 *dest = '\0';
6996 return result;
6999 const char *
7000 linux_process_target::thread_name (ptid_t thread)
7002 static char dest[100];
7004 const char *name = linux_proc_tid_get_name (thread);
7005 if (name == nullptr)
7006 return nullptr;
7008 /* Linux limits the comm file to 16 bytes (including the trailing
7009 \0. If the program or thread name is set when using a multi-byte
7010 encoding, this might cause it to be truncated mid-character. In
7011 this situation, sending the truncated form in an XML <thread>
7012 response will cause a parse error in gdb. So, instead convert
7013 from the locale's encoding (we can't be sure this is the correct
7014 encoding, but it's as good a guess as we have) to UTF-8, but in a
7015 way that ignores any encoding errors. See PR remote/30618. */
7016 const char *cset = nl_langinfo (CODESET);
7017 iconv_t handle = iconv_open ("UTF-8//IGNORE", cset);
7018 if (handle == (iconv_t) -1)
7019 return replace_non_ascii (dest, name);
7021 size_t inbytes = strlen (name);
7022 char *inbuf = const_cast<char *> (name);
7023 size_t outbytes = sizeof (dest);
7024 char *outbuf = dest;
7025 size_t result = iconv (handle, &inbuf, &inbytes, &outbuf, &outbytes);
7027 if (result == (size_t) -1)
7029 if (errno == E2BIG)
7030 outbuf = &dest[sizeof (dest) - 1];
7031 else if ((errno == EILSEQ || errno == EINVAL)
7032 && outbuf < &dest[sizeof (dest) - 2])
7033 *outbuf++ = '?';
7035 *outbuf = '\0';
7037 iconv_close (handle);
7038 return *dest == '\0' ? nullptr : dest;
7041 #if USE_THREAD_DB
7042 bool
7043 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7044 int *handle_len)
7046 return thread_db_thread_handle (ptid, handle, handle_len);
7048 #endif
7050 thread_info *
7051 linux_process_target::thread_pending_parent (thread_info *thread)
7053 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7055 if (parent == nullptr)
7056 return nullptr;
7058 return get_lwp_thread (parent);
7061 thread_info *
7062 linux_process_target::thread_pending_child (thread_info *thread,
7063 target_waitkind *kind)
7065 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
7067 if (child == nullptr)
7068 return nullptr;
7070 return get_lwp_thread (child);
7073 /* Default implementation of linux_target_ops method "set_pc" for
7074 32-bit pc register which is literally named "pc". */
7076 void
7077 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7079 uint32_t newpc = pc;
7081 supply_register_by_name (regcache, "pc", &newpc);
7084 /* Default implementation of linux_target_ops method "get_pc" for
7085 32-bit pc register which is literally named "pc". */
7087 CORE_ADDR
7088 linux_get_pc_32bit (struct regcache *regcache)
7090 uint32_t pc;
7092 collect_register_by_name (regcache, "pc", &pc);
7093 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
7094 return pc;
7097 /* Default implementation of linux_target_ops method "set_pc" for
7098 64-bit pc register which is literally named "pc". */
7100 void
7101 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7103 uint64_t newpc = pc;
7105 supply_register_by_name (regcache, "pc", &newpc);
7108 /* Default implementation of linux_target_ops method "get_pc" for
7109 64-bit pc register which is literally named "pc". */
7111 CORE_ADDR
7112 linux_get_pc_64bit (struct regcache *regcache)
7114 uint64_t pc;
7116 collect_register_by_name (regcache, "pc", &pc);
7117 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
7118 return pc;
7121 /* See linux-low.h. */
7124 linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7126 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7127 int offset = 0;
7129 gdb_assert (wordsize == 4 || wordsize == 8);
7131 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7132 == 2 * wordsize)
7134 if (wordsize == 4)
7136 uint32_t *data_p = (uint32_t *) data;
7137 if (data_p[0] == match)
7139 *valp = data_p[1];
7140 return 1;
7143 else
7145 uint64_t *data_p = (uint64_t *) data;
7146 if (data_p[0] == match)
7148 *valp = data_p[1];
7149 return 1;
7153 offset += 2 * wordsize;
7156 return 0;
7159 /* See linux-low.h. */
7161 CORE_ADDR
7162 linux_get_hwcap (int pid, int wordsize)
7164 CORE_ADDR hwcap = 0;
7165 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
7166 return hwcap;
7169 /* See linux-low.h. */
7171 CORE_ADDR
7172 linux_get_hwcap2 (int pid, int wordsize)
7174 CORE_ADDR hwcap2 = 0;
7175 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
7176 return hwcap2;
7179 #ifdef HAVE_LINUX_REGSETS
7180 void
7181 initialize_regsets_info (struct regsets_info *info)
7183 for (info->num_regsets = 0;
7184 info->regsets[info->num_regsets].size >= 0;
7185 info->num_regsets++)
7188 #endif
7190 void
7191 initialize_low (void)
7193 struct sigaction sigchld_action;
7195 memset (&sigchld_action, 0, sizeof (sigchld_action));
7196 set_target_ops (the_linux_target);
7198 linux_ptrace_init_warnings ();
7199 linux_proc_init_warnings ();
7201 sigchld_action.sa_handler = sigchld_handler;
7202 sigemptyset (&sigchld_action.sa_mask);
7203 sigchld_action.sa_flags = SA_RESTART;
7204 sigaction (SIGCHLD, &sigchld_action, NULL);
7206 initialize_low_arch ();
7208 linux_check_ptrace_features ();