Updated Serbian translation for the bfd sub-directory
[binutils-gdb.git] / gdbserver / linux-low.cc
blob50ce2b449270465c97d734b1310e26885dd18feb
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2024 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "linux-low.h"
20 #include "nat/linux-osdata.h"
21 #include "gdbsupport/agent.h"
22 #include "tdesc.h"
23 #include "gdbsupport/event-loop.h"
24 #include "gdbsupport/event-pipe.h"
25 #include "gdbsupport/rsp-low.h"
26 #include "gdbsupport/signals-state-save-restore.h"
27 #include "nat/linux-nat.h"
28 #include "nat/linux-waitpid.h"
29 #include "gdbsupport/gdb_wait.h"
30 #include "nat/gdb_ptrace.h"
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include <signal.h>
35 #include <sys/ioctl.h>
36 #include <fcntl.h>
37 #include <unistd.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include <langinfo.h>
47 #include <iconv.h>
48 #include "gdbsupport/filestuff.h"
49 #include "gdbsupport/gdb-safe-ctype.h"
50 #include "tracepoint.h"
51 #include <inttypes.h>
52 #include "gdbsupport/common-inferior.h"
53 #include "nat/fork-inferior.h"
54 #include "gdbsupport/environ.h"
55 #include "gdbsupport/gdb-sigmask.h"
56 #include "gdbsupport/scoped_restore.h"
57 #ifndef ELFMAG0
58 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
59 then ELFMAG0 will have been defined. If it didn't get included by
60 gdb_proc_service.h then including it will likely introduce a duplicate
61 definition of elf_fpregset_t. */
62 #include <elf.h>
63 #endif
64 #include "nat/linux-namespaces.h"
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
70 #ifndef AT_HWCAP2
71 #define AT_HWCAP2 26
72 #endif
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* These are still undefined in 3.10 kernels. */
86 #elif defined(__TMS320C6X__)
87 #define PT_TEXT_ADDR (0x10000*4)
88 #define PT_DATA_ADDR (0x10004*4)
89 #define PT_TEXT_END_ADDR (0x10008*4)
90 #endif
91 #endif
93 #if (defined(__UCLIBC__) \
94 && defined(HAS_NOMMU) \
95 && defined(PT_TEXT_ADDR) \
96 && defined(PT_DATA_ADDR) \
97 && defined(PT_TEXT_END_ADDR))
98 #define SUPPORTS_READ_OFFSETS
99 #endif
101 #ifdef HAVE_LINUX_BTRACE
102 # include "nat/linux-btrace.h"
103 # include "gdbsupport/btrace-common.h"
104 #endif
106 #ifndef HAVE_ELF32_AUXV_T
107 /* Copied from glibc's elf.h. */
108 typedef struct
110 uint32_t a_type; /* Entry type */
111 union
113 uint32_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118 } Elf32_auxv_t;
119 #endif
121 #ifndef HAVE_ELF64_AUXV_T
122 /* Copied from glibc's elf.h. */
123 typedef struct
125 uint64_t a_type; /* Entry type */
126 union
128 uint64_t a_val; /* Integer value */
129 /* We use to have pointer elements added here. We cannot do that,
130 though, since it does not work when using 32-bit definitions
131 on 64-bit platforms and vice versa. */
132 } a_un;
133 } Elf64_auxv_t;
134 #endif
136 /* See nat/linux-nat.h. */
137 enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
139 /* Return TRUE if THREAD is the leader thread of the process. */
141 static bool
142 is_leader (thread_info *thread)
144 return thread->id.pid () == thread->id.lwp ();
147 /* Return true if we should report thread exit events to GDB, for
148 THR. */
150 static bool
151 report_exit_events_for (thread_info *thr)
153 client_state &cs = get_client_state ();
155 return (cs.report_thread_events
156 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
159 /* LWP accessors. */
161 /* See nat/linux-nat.h. */
163 ptid_t
164 ptid_of_lwp (struct lwp_info *lwp)
166 return lwp->thread->id;
169 /* See nat/linux-nat.h. */
171 void
172 lwp_set_arch_private_info (struct lwp_info *lwp,
173 struct arch_lwp_info *info)
175 lwp->arch_private = info;
178 /* See nat/linux-nat.h. */
180 struct arch_lwp_info *
181 lwp_arch_private_info (struct lwp_info *lwp)
183 return lwp->arch_private;
186 /* See nat/linux-nat.h. */
189 lwp_is_stopped (struct lwp_info *lwp)
191 return lwp->stopped;
194 /* See nat/linux-nat.h. */
196 enum target_stop_reason
197 lwp_stop_reason (struct lwp_info *lwp)
199 return lwp->stop_reason;
202 /* See nat/linux-nat.h. */
205 lwp_is_stepping (struct lwp_info *lwp)
207 return lwp->stepping;
210 /* A list of all unknown processes which receive stop signals. Some
211 other process will presumably claim each of these as forked
212 children momentarily. */
214 struct simple_pid_list
216 /* The process ID. */
217 int pid;
219 /* The status as reported by waitpid. */
220 int status;
222 /* Next in chain. */
223 struct simple_pid_list *next;
225 static struct simple_pid_list *stopped_pids;
227 /* Trivial list manipulation functions to keep track of a list of new
228 stopped processes. */
230 static void
231 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
233 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
235 new_pid->pid = pid;
236 new_pid->status = status;
237 new_pid->next = *listp;
238 *listp = new_pid;
241 static int
242 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
244 struct simple_pid_list **p;
246 for (p = listp; *p != NULL; p = &(*p)->next)
247 if ((*p)->pid == pid)
249 struct simple_pid_list *next = (*p)->next;
251 *statusp = (*p)->status;
252 xfree (*p);
253 *p = next;
254 return 1;
256 return 0;
259 enum stopping_threads_kind
261 /* Not stopping threads presently. */
262 NOT_STOPPING_THREADS,
264 /* Stopping threads. */
265 STOPPING_THREADS,
267 /* Stopping and suspending threads. */
268 STOPPING_AND_SUSPENDING_THREADS
271 /* This is set while stop_all_lwps is in effect. */
272 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
274 /* FIXME make into a target method? */
275 int using_threads = 1;
277 /* True if we're presently stabilizing threads (moving them out of
278 jump pads). */
279 static int stabilizing_threads;
281 static void unsuspend_all_lwps (struct lwp_info *except);
282 static void mark_lwp_dead (struct lwp_info *lwp, int wstat,
283 bool thread_event);
284 static int lwp_is_marked_dead (struct lwp_info *lwp);
285 static int kill_lwp (unsigned long lwpid, int signo);
286 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
287 static int linux_low_ptrace_options (int attached);
288 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
290 /* When the event-loop is doing a step-over, this points at the thread
291 being stepped. */
292 static ptid_t step_over_bkpt;
294 bool
295 linux_process_target::low_supports_breakpoints ()
297 return false;
300 CORE_ADDR
301 linux_process_target::low_get_pc (regcache *regcache)
303 return 0;
306 void
307 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
309 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
312 std::vector<CORE_ADDR>
313 linux_process_target::low_get_next_pcs (regcache *regcache)
315 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
316 "implemented");
320 linux_process_target::low_decr_pc_after_break ()
322 return 0;
325 /* True if LWP is stopped in its stepping range. */
327 static int
328 lwp_in_step_range (struct lwp_info *lwp)
330 CORE_ADDR pc = lwp->stop_pc;
332 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
335 /* The event pipe registered as a waitable file in the event loop. */
336 static event_pipe linux_event_pipe;
338 /* True if we're currently in async mode. */
339 #define target_is_async_p() (linux_event_pipe.is_open ())
341 static void send_sigstop (struct lwp_info *lwp);
343 /* Return non-zero if HEADER is a 64-bit ELF file. */
345 static int
346 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
348 if (header->e_ident[EI_MAG0] == ELFMAG0
349 && header->e_ident[EI_MAG1] == ELFMAG1
350 && header->e_ident[EI_MAG2] == ELFMAG2
351 && header->e_ident[EI_MAG3] == ELFMAG3)
353 *machine = header->e_machine;
354 return header->e_ident[EI_CLASS] == ELFCLASS64;
357 *machine = EM_NONE;
358 return -1;
361 /* Return non-zero if FILE is a 64-bit ELF file,
362 zero if the file is not a 64-bit ELF file,
363 and -1 if the file is not accessible or doesn't exist. */
365 static int
366 elf_64_file_p (const char *file, unsigned int *machine)
368 Elf64_Ehdr header;
369 int fd;
371 fd = open (file, O_RDONLY);
372 if (fd < 0)
373 return -1;
375 if (read (fd, &header, sizeof (header)) != sizeof (header))
377 close (fd);
378 return 0;
380 close (fd);
382 return elf_64_header_p (&header, machine);
385 /* Accepts an integer PID; Returns true if the executable PID is
386 running is a 64-bit ELF file.. */
389 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
391 char file[PATH_MAX];
393 sprintf (file, "/proc/%d/exe", pid);
394 return elf_64_file_p (file, machine);
397 void
398 linux_process_target::delete_lwp (lwp_info *lwp)
400 thread_info *thr = lwp->thread;
402 threads_debug_printf ("deleting %ld", thr->id.lwp ());
404 thr->process ()->remove_thread (thr);
406 low_delete_thread (lwp->arch_private);
408 delete lwp;
411 void
412 linux_process_target::low_delete_thread (arch_lwp_info *info)
414 /* Default implementation should be overridden if architecture-specific
415 info is being used. */
416 gdb_assert (info == nullptr);
419 /* Open the /proc/PID/mem file for PROC. */
421 static void
422 open_proc_mem_file (process_info *proc)
424 gdb_assert (proc->priv->mem_fd == -1);
426 char filename[64];
427 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
429 proc->priv->mem_fd
430 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
433 process_info *
434 linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
436 struct process_info *proc;
438 proc = add_process (pid, attached);
439 proc->priv = XCNEW (struct process_info_private);
441 proc->priv->arch_private = low_new_process ();
442 proc->priv->mem_fd = -1;
444 return proc;
448 process_info *
449 linux_process_target::add_linux_process (int pid, int attached)
451 process_info *proc = add_linux_process_no_mem_file (pid, attached);
452 open_proc_mem_file (proc);
453 return proc;
456 void
457 linux_process_target::remove_linux_process (process_info *proc)
459 if (proc->priv->mem_fd >= 0)
460 close (proc->priv->mem_fd);
462 this->low_delete_process (proc->priv->arch_private);
464 xfree (proc->priv);
465 proc->priv = nullptr;
467 remove_process (proc);
470 arch_process_info *
471 linux_process_target::low_new_process ()
473 return nullptr;
476 void
477 linux_process_target::low_delete_process (arch_process_info *info)
479 /* Default implementation must be overridden if architecture-specific
480 info exists. */
481 gdb_assert (info == nullptr);
484 void
485 linux_process_target::low_new_fork (process_info *parent, process_info *child)
487 /* Nop. */
490 void
491 linux_process_target::arch_setup_thread (thread_info *thread)
493 scoped_restore_current_thread restore_thread;
494 switch_to_thread (thread);
496 low_arch_setup ();
500 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
501 int wstat)
503 client_state &cs = get_client_state ();
504 struct lwp_info *event_lwp = *orig_event_lwp;
505 int event = linux_ptrace_get_extended_event (wstat);
506 thread_info *event_thr = event_lwp->thread;
508 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
510 /* All extended events we currently use are mid-syscall. Only
511 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
512 you have to be using PTRACE_SEIZE to get that. */
513 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
515 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
516 || (event == PTRACE_EVENT_CLONE))
518 unsigned long new_pid;
519 int ret, status;
521 /* Get the pid of the new lwp. */
522 ptrace (PTRACE_GETEVENTMSG, event_thr->id.lwp (), (PTRACE_TYPE_ARG3) 0,
523 &new_pid);
525 /* If we haven't already seen the new PID stop, wait for it now. */
526 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
528 /* The new child has a pending SIGSTOP. We can't affect it until it
529 hits the SIGSTOP, but we're already attached. */
531 ret = my_waitpid (new_pid, &status, __WALL);
533 if (ret == -1)
534 perror_with_name ("waiting for new child");
535 else if (ret != new_pid)
536 warning ("wait returned unexpected PID %d", ret);
537 else if (!WIFSTOPPED (status))
538 warning ("wait returned unexpected status 0x%x", status);
541 if (debug_threads)
543 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
544 (event == PTRACE_EVENT_FORK ? "fork"
545 : event == PTRACE_EVENT_VFORK ? "vfork"
546 : event == PTRACE_EVENT_CLONE ? "clone"
547 : "???"),
548 event_thr->id.lwp (),
549 new_pid);
552 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
553 ? ptid_t (new_pid, new_pid)
554 : ptid_t (event_thr->id.pid (), new_pid));
556 process_info *child_proc = nullptr;
558 if (event != PTRACE_EVENT_CLONE)
560 /* Add the new process to the tables before we add the LWP.
561 We need to do this even if the new process will be
562 detached. See breakpoint cloning code further below. */
563 child_proc = add_linux_process (new_pid, 0);
566 lwp_info *child_lwp = add_lwp (child_ptid);
567 gdb_assert (child_lwp != NULL);
568 child_lwp->stopped = 1;
569 if (event != PTRACE_EVENT_CLONE)
570 child_lwp->must_set_ptrace_flags = 1;
571 child_lwp->status_pending_p = 0;
573 thread_info *child_thr = child_lwp->thread;
575 /* If we're suspending all threads, leave this one suspended
576 too. If the fork/clone parent is stepping over a breakpoint,
577 all other threads have been suspended already. Leave the
578 child suspended too. */
579 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
580 || event_lwp->bp_reinsert != 0)
582 threads_debug_printf ("leaving child suspended");
583 child_lwp->suspended = 1;
586 if (event_lwp->bp_reinsert != 0
587 && supports_software_single_step ()
588 && event == PTRACE_EVENT_VFORK)
590 /* If we leave single-step breakpoints there, child will
591 hit it, so uninsert single-step breakpoints from parent
592 (and child). Once vfork child is done, reinsert
593 them back to parent. */
594 uninsert_single_step_breakpoints (event_thr);
597 if (event != PTRACE_EVENT_CLONE)
599 /* Clone the breakpoint lists of the parent. We need to do
600 this even if the new process will be detached, since we
601 will need the process object and the breakpoints to
602 remove any breakpoints from memory when we detach, and
603 the client side will access registers. */
604 gdb_assert (child_proc != NULL);
606 process_info *parent_proc = event_thr->process ();
607 child_proc->attached = parent_proc->attached;
609 clone_all_breakpoints (child_thr, event_thr);
611 target_desc_up tdesc = allocate_target_description ();
612 copy_target_description (tdesc.get (), parent_proc->tdesc);
613 child_proc->tdesc = tdesc.release ();
615 /* Clone arch-specific process data. */
616 low_new_fork (parent_proc, child_proc);
619 /* Save fork/clone info in the parent thread. */
620 if (event == PTRACE_EVENT_FORK)
621 event_lwp->waitstatus.set_forked (child_ptid);
622 else if (event == PTRACE_EVENT_VFORK)
623 event_lwp->waitstatus.set_vforked (child_ptid);
624 else if (event == PTRACE_EVENT_CLONE
625 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
626 event_lwp->waitstatus.set_thread_cloned (child_ptid);
628 if (event != PTRACE_EVENT_CLONE
629 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
631 /* The status_pending field contains bits denoting the
632 extended event, so when the pending event is handled, the
633 handler will look at lwp->waitstatus. */
634 event_lwp->status_pending_p = 1;
635 event_lwp->status_pending = wstat;
637 /* Link the threads until the parent's event is passed on to
638 GDB. */
639 event_lwp->relative = child_lwp;
640 child_lwp->relative = event_lwp;
643 /* If the parent thread is doing step-over with single-step
644 breakpoints, the list of single-step breakpoints are cloned
645 from the parent's. Remove them from the child process.
646 In case of vfork, we'll reinsert them back once vforked
647 child is done. */
648 if (event_lwp->bp_reinsert != 0
649 && supports_software_single_step ())
651 /* The child process is forked and stopped, so it is safe
652 to access its memory without stopping all other threads
653 from other processes. */
654 delete_single_step_breakpoints (child_thr);
656 gdb_assert (has_single_step_breakpoints (event_thr));
657 gdb_assert (!has_single_step_breakpoints (child_thr));
660 /* Normally we will get the pending SIGSTOP. But in some cases
661 we might get another signal delivered to the group first.
662 If we do get another signal, be sure not to lose it. */
663 if (WSTOPSIG (status) != SIGSTOP)
665 child_lwp->stop_expected = 1;
666 child_lwp->status_pending_p = 1;
667 child_lwp->status_pending = status;
669 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
671 child_lwp->waitstatus.set_thread_created ();
672 child_lwp->status_pending_p = 1;
673 child_lwp->status_pending = status;
676 if (event == PTRACE_EVENT_CLONE)
678 #ifdef USE_THREAD_DB
679 thread_db_notice_clone (event_thr, child_ptid);
680 #endif
683 if (event == PTRACE_EVENT_CLONE
684 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
686 threads_debug_printf
687 ("not reporting clone event from LWP %ld, new child is %ld\n",
688 event_thr->id.lwp (),
689 new_pid);
690 return 1;
693 /* Leave the child stopped until GDB processes the parent
694 event. */
695 child_thr->last_resume_kind = resume_stop;
696 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
698 /* Report the event. */
699 threads_debug_printf
700 ("reporting %s event from LWP %ld, new child is %ld\n",
701 (event == PTRACE_EVENT_FORK ? "fork"
702 : event == PTRACE_EVENT_VFORK ? "vfork"
703 : event == PTRACE_EVENT_CLONE ? "clone"
704 : "???"),
705 event_thr->id.lwp (),
706 new_pid);
707 return 0;
709 else if (event == PTRACE_EVENT_VFORK_DONE)
711 event_lwp->waitstatus.set_vfork_done ();
713 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
715 reinsert_single_step_breakpoints (event_thr);
717 gdb_assert (has_single_step_breakpoints (event_thr));
720 /* Report the event. */
721 return 0;
723 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
725 std::vector<int> syscalls_to_catch;
726 ptid_t event_ptid;
727 pid_t event_pid;
729 threads_debug_printf ("Got exec event from LWP %ld",
730 event_thr->id.lwp ());
732 /* Get the event ptid. */
733 event_ptid = event_thr->id;
734 event_pid = event_ptid.pid ();
736 /* Save the syscall list from the execing process. */
737 process_info *proc = event_thr->process ();
738 syscalls_to_catch = std::move (proc->syscalls_to_catch);
740 /* Delete the execing process and all its threads. */
741 mourn (proc);
742 switch_to_thread (nullptr);
744 /* Create a new process/lwp/thread. */
745 proc = add_linux_process (event_pid, 0);
746 event_lwp = add_lwp (event_ptid);
747 event_thr = event_lwp->thread;
748 gdb_assert (current_thread == event_thr);
749 arch_setup_thread (event_thr);
751 /* Set the event status. */
752 event_lwp->waitstatus.set_execd
753 (make_unique_xstrdup
754 (linux_proc_pid_to_exec_file (event_thr->id.lwp ())));
756 /* Mark the exec status as pending. */
757 event_lwp->stopped = 1;
758 event_lwp->status_pending_p = 1;
759 event_lwp->status_pending = wstat;
760 event_thr->last_resume_kind = resume_continue;
761 event_thr->last_status.set_ignore ();
763 /* Update syscall state in the new lwp, effectively mid-syscall too. */
764 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
766 /* Restore the list to catch. Don't rely on the client, which is free
767 to avoid sending a new list when the architecture doesn't change.
768 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
769 proc->syscalls_to_catch = std::move (syscalls_to_catch);
771 /* Report the event. */
772 *orig_event_lwp = event_lwp;
773 return 0;
776 internal_error (_("unknown ptrace event %d"), event);
779 CORE_ADDR
780 linux_process_target::get_pc (lwp_info *lwp)
782 process_info *proc = lwp->thread->process ();
783 gdb_assert (!proc->starting_up);
785 if (!low_supports_breakpoints ())
786 return 0;
788 scoped_restore_current_thread restore_thread;
789 switch_to_thread (lwp->thread);
791 regcache *regcache = get_thread_regcache (current_thread);
792 CORE_ADDR pc = low_get_pc (regcache);
794 threads_debug_printf ("pc is 0x%lx", (long) pc);
796 return pc;
799 void
800 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
802 struct regcache *regcache;
804 scoped_restore_current_thread restore_thread;
805 switch_to_thread (lwp->thread);
807 regcache = get_thread_regcache (current_thread);
808 low_get_syscall_trapinfo (regcache, sysno);
810 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
813 void
814 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
816 /* By default, report an unknown system call number. */
817 *sysno = UNKNOWN_SYSCALL;
820 bool
821 linux_process_target::save_stop_reason (lwp_info *lwp)
823 CORE_ADDR pc;
824 CORE_ADDR sw_breakpoint_pc;
825 siginfo_t siginfo;
827 if (!low_supports_breakpoints ())
828 return false;
830 process_info *proc = lwp->thread->process ();
831 if (proc->starting_up)
833 /* Claim we have the stop PC so that the caller doesn't try to
834 fetch it itself. */
835 return true;
838 pc = get_pc (lwp);
839 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
841 /* breakpoint_at reads from the current thread. */
842 scoped_restore_current_thread restore_thread;
843 switch_to_thread (lwp->thread);
845 if (ptrace (PTRACE_GETSIGINFO, current_thread->id.lwp (),
846 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
848 if (siginfo.si_signo == SIGTRAP)
850 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
851 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
853 /* The si_code is ambiguous on this arch -- check debug
854 registers. */
855 if (!check_stopped_by_watchpoint (lwp))
856 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
858 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
860 /* If we determine the LWP stopped for a SW breakpoint,
861 trust it. Particularly don't check watchpoint
862 registers, because at least on s390, we'd find
863 stopped-by-watchpoint as long as there's a watchpoint
864 set. */
865 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
867 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
869 /* This can indicate either a hardware breakpoint or
870 hardware watchpoint. Check debug registers. */
871 if (!check_stopped_by_watchpoint (lwp))
872 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
874 else if (siginfo.si_code == TRAP_TRACE)
876 /* We may have single stepped an instruction that
877 triggered a watchpoint. In that case, on some
878 architectures (such as x86), instead of TRAP_HWBKPT,
879 si_code indicates TRAP_TRACE, and we need to check
880 the debug registers separately. */
881 if (!check_stopped_by_watchpoint (lwp))
882 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
887 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
889 threads_debug_printf
890 ("%s stopped by software breakpoint",
891 target_pid_to_str (lwp->thread->id).c_str ());
893 /* Back up the PC if necessary. */
894 if (pc != sw_breakpoint_pc)
896 struct regcache *regcache
897 = get_thread_regcache (current_thread);
898 low_set_pc (regcache, sw_breakpoint_pc);
901 /* Update this so we record the correct stop PC below. */
902 pc = sw_breakpoint_pc;
904 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
905 threads_debug_printf
906 ("%s stopped by hardware breakpoint",
907 target_pid_to_str (lwp->thread->id).c_str ());
908 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
909 threads_debug_printf
910 ("%s stopped by hardware watchpoint",
911 target_pid_to_str (lwp->thread->id).c_str ());
912 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
913 threads_debug_printf
914 ("%s stopped by trace",
915 target_pid_to_str (lwp->thread->id).c_str ());
917 lwp->stop_pc = pc;
918 return true;
921 lwp_info *
922 linux_process_target::add_lwp (ptid_t ptid)
924 lwp_info *lwp = new lwp_info;
926 lwp->thread = find_process_pid (ptid.pid ())->add_thread (ptid, lwp);
928 low_new_thread (lwp);
930 return lwp;
933 void
934 linux_process_target::low_new_thread (lwp_info *info)
936 /* Nop. */
939 /* Callback to be used when calling fork_inferior, responsible for
940 actually initiating the tracing of the inferior. */
942 static void
943 linux_ptrace_fun ()
945 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
946 (PTRACE_TYPE_ARG4) 0) < 0)
947 trace_start_error_with_name ("ptrace");
949 if (setpgid (0, 0) < 0)
950 trace_start_error_with_name ("setpgid");
952 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
953 stdout to stderr so that inferior i/o doesn't corrupt the connection.
954 Also, redirect stdin to /dev/null. */
955 if (remote_connection_is_stdio ())
957 if (close (0) < 0)
958 trace_start_error_with_name ("close");
959 if (open ("/dev/null", O_RDONLY) < 0)
960 trace_start_error_with_name ("open");
961 if (dup2 (2, 1) < 0)
962 trace_start_error_with_name ("dup2");
963 if (write (2, "stdin/stdout redirected\n",
964 sizeof ("stdin/stdout redirected\n") - 1) < 0)
966 /* Errors ignored. */;
971 /* Start an inferior process and returns its pid.
972 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
973 are its arguments. */
976 linux_process_target::create_inferior (const char *program,
977 const std::vector<char *> &program_args)
979 client_state &cs = get_client_state ();
980 struct lwp_info *new_lwp;
981 int pid;
982 ptid_t ptid;
985 maybe_disable_address_space_randomization restore_personality
986 (cs.disable_randomization);
987 std::string str_program_args = construct_inferior_arguments (program_args);
989 pid = fork_inferior (program,
990 str_program_args.c_str (),
991 get_environ ()->envp (), linux_ptrace_fun,
992 NULL, NULL, NULL, NULL);
995 /* When spawning a new process, we can't open the mem file yet. We
996 still have to nurse the process through the shell, and that execs
997 a couple times. The address space a /proc/PID/mem file is
998 accessing is destroyed on exec. */
999 process_info *proc = add_linux_process_no_mem_file (pid, 0);
1001 ptid = ptid_t (pid, pid);
1002 new_lwp = add_lwp (ptid);
1003 new_lwp->must_set_ptrace_flags = 1;
1005 post_fork_inferior (pid, program);
1007 /* PROC is now past the shell running the program we want, so we can
1008 open the /proc/PID/mem file. */
1009 open_proc_mem_file (proc);
1011 return pid;
1014 /* Implement the post_create_inferior target_ops method. */
1016 void
1017 linux_process_target::post_create_inferior ()
1019 struct lwp_info *lwp = get_thread_lwp (current_thread);
1021 low_arch_setup ();
1023 if (lwp->must_set_ptrace_flags)
1025 struct process_info *proc = current_process ();
1026 int options = linux_low_ptrace_options (proc->attached);
1028 linux_enable_event_reporting (current_thread->id.lwp (), options);
1029 lwp->must_set_ptrace_flags = 0;
1034 linux_process_target::attach_lwp (ptid_t ptid)
1036 struct lwp_info *new_lwp;
1037 int lwpid = ptid.lwp ();
1039 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1040 != 0)
1041 return errno;
1043 new_lwp = add_lwp (ptid);
1045 /* We need to wait for SIGSTOP before being able to make the next
1046 ptrace call on this LWP. */
1047 new_lwp->must_set_ptrace_flags = 1;
1049 if (linux_proc_pid_is_stopped (lwpid))
1051 threads_debug_printf ("Attached to a stopped process");
1053 /* The process is definitely stopped. It is in a job control
1054 stop, unless the kernel predates the TASK_STOPPED /
1055 TASK_TRACED distinction, in which case it might be in a
1056 ptrace stop. Make sure it is in a ptrace stop; from there we
1057 can kill it, signal it, et cetera.
1059 First make sure there is a pending SIGSTOP. Since we are
1060 already attached, the process can not transition from stopped
1061 to running without a PTRACE_CONT; so we know this signal will
1062 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1063 probably already in the queue (unless this kernel is old
1064 enough to use TASK_STOPPED for ptrace stops); but since
1065 SIGSTOP is not an RT signal, it can only be queued once. */
1066 kill_lwp (lwpid, SIGSTOP);
1068 /* Finally, resume the stopped process. This will deliver the
1069 SIGSTOP (or a higher priority signal, just like normal
1070 PTRACE_ATTACH), which we'll catch later on. */
1071 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1074 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1075 brings it to a halt.
1077 There are several cases to consider here:
1079 1) gdbserver has already attached to the process and is being notified
1080 of a new thread that is being created.
1081 In this case we should ignore that SIGSTOP and resume the
1082 process. This is handled below by setting stop_expected = 1,
1083 and the fact that add_thread sets last_resume_kind ==
1084 resume_continue.
1086 2) This is the first thread (the process thread), and we're attaching
1087 to it via attach_inferior.
1088 In this case we want the process thread to stop.
1089 This is handled by having linux_attach set last_resume_kind ==
1090 resume_stop after we return.
1092 If the pid we are attaching to is also the tgid, we attach to and
1093 stop all the existing threads. Otherwise, we attach to pid and
1094 ignore any other threads in the same group as this pid.
1096 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1097 existing threads.
1098 In this case we want the thread to stop.
1099 FIXME: This case is currently not properly handled.
1100 We should wait for the SIGSTOP but don't. Things work apparently
1101 because enough time passes between when we ptrace (ATTACH) and when
1102 gdb makes the next ptrace call on the thread.
1104 On the other hand, if we are currently trying to stop all threads, we
1105 should treat the new thread as if we had sent it a SIGSTOP. This works
1106 because we are guaranteed that the add_lwp call above added us to the
1107 end of the list, and so the new thread has not yet reached
1108 wait_for_sigstop (but will). */
1109 new_lwp->stop_expected = 1;
1111 return 0;
1114 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1115 already attached. Returns true if a new LWP is found, false
1116 otherwise. */
1118 static int
1119 attach_proc_task_lwp_callback (ptid_t ptid)
1121 /* Is this a new thread? */
1122 if (find_thread_ptid (ptid) == NULL)
1124 int lwpid = ptid.lwp ();
1125 int err;
1127 threads_debug_printf ("Found new lwp %d", lwpid);
1129 err = the_linux_target->attach_lwp (ptid);
1131 /* Be quiet if we simply raced with the thread exiting. EPERM
1132 is returned if the thread's task still exists, and is marked
1133 as exited or zombie, as well as other conditions, so in that
1134 case, confirm the status in /proc/PID/status. */
1135 if (err == ESRCH
1136 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1137 threads_debug_printf
1138 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1139 lwpid, err, safe_strerror (err));
1140 else if (err != 0)
1142 std::string reason
1143 = linux_ptrace_attach_fail_reason_string (ptid, err);
1145 error (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1148 return 1;
1150 return 0;
1153 static void async_file_mark (void);
1155 /* Attach to PID. If PID is the tgid, attach to it and all
1156 of its threads. */
1159 linux_process_target::attach (unsigned long pid)
1161 struct process_info *proc;
1162 thread_info *initial_thread;
1163 ptid_t ptid = ptid_t (pid, pid);
1164 int err;
1166 /* Delay opening the /proc/PID/mem file until we've successfully
1167 attached. */
1168 proc = add_linux_process_no_mem_file (pid, 1);
1170 /* Attach to PID. We will check for other threads
1171 soon. */
1172 err = attach_lwp (ptid);
1173 if (err != 0)
1175 this->remove_linux_process (proc);
1177 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1178 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1181 open_proc_mem_file (proc);
1183 /* Don't ignore the initial SIGSTOP if we just attached to this
1184 process. It will be collected by wait shortly. */
1185 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1186 gdb_assert (initial_thread != nullptr);
1187 initial_thread->last_resume_kind = resume_stop;
1189 /* We must attach to every LWP. If /proc is mounted, use that to
1190 find them now. On the one hand, the inferior may be using raw
1191 clone instead of using pthreads. On the other hand, even if it
1192 is using pthreads, GDB may not be connected yet (thread_db needs
1193 to do symbol lookups, through qSymbol). Also, thread_db walks
1194 structures in the inferior's address space to find the list of
1195 threads/LWPs, and those structures may well be corrupted. Note
1196 that once thread_db is loaded, we'll still use it to list threads
1197 and associate pthread info with each LWP. */
1200 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1202 catch (const gdb_exception_error &)
1204 /* Make sure we do not deliver the SIGSTOP to the process. */
1205 initial_thread->last_resume_kind = resume_continue;
1207 this->detach (proc);
1208 throw;
1211 /* GDB will shortly read the xml target description for this
1212 process, to figure out the process' architecture. But the target
1213 description is only filled in when the first process/thread in
1214 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1215 that now, otherwise, if GDB is fast enough, it could read the
1216 target description _before_ that initial stop. */
1217 if (non_stop)
1219 struct lwp_info *lwp;
1220 int wstat, lwpid;
1221 ptid_t pid_ptid = ptid_t (pid);
1223 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1224 gdb_assert (lwpid > 0);
1226 lwp = find_lwp_pid (ptid_t (lwpid));
1227 gdb_assert (lwp != nullptr);
1229 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1231 lwp->status_pending_p = 1;
1232 lwp->status_pending = wstat;
1235 initial_thread->last_resume_kind = resume_continue;
1237 async_file_mark ();
1239 gdb_assert (proc->tdesc != NULL);
1242 return 0;
1245 static int
1246 last_thread_of_process_p (int pid)
1248 bool seen_one = false;
1250 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1252 if (!seen_one)
1254 /* This is the first thread of this process we see. */
1255 seen_one = true;
1256 return false;
1258 else
1260 /* This is the second thread of this process we see. */
1261 return true;
1265 return thread == NULL;
1268 /* Kill LWP. */
1270 static void
1271 linux_kill_one_lwp (struct lwp_info *lwp)
1273 thread_info *thr = lwp->thread;
1274 int pid = thr->id.lwp ();
1276 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1277 there is no signal context, and ptrace(PTRACE_KILL) (or
1278 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1279 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1280 alternative is to kill with SIGKILL. We only need one SIGKILL
1281 per process, not one for each thread. But since we still support
1282 support debugging programs using raw clone without CLONE_THREAD,
1283 we send one for each thread. For years, we used PTRACE_KILL
1284 only, so we're being a bit paranoid about some old kernels where
1285 PTRACE_KILL might work better (dubious if there are any such, but
1286 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1287 second, and so we're fine everywhere. */
1289 errno = 0;
1290 kill_lwp (pid, SIGKILL);
1291 if (debug_threads)
1293 int save_errno = errno;
1295 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1296 target_pid_to_str (thr->id).c_str (),
1297 save_errno ? safe_strerror (save_errno) : "OK");
1300 errno = 0;
1301 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1302 if (debug_threads)
1304 int save_errno = errno;
1306 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1307 target_pid_to_str (thr->id).c_str (),
1308 save_errno ? safe_strerror (save_errno) : "OK");
1312 /* Kill LWP and wait for it to die. */
1314 static void
1315 kill_wait_lwp (struct lwp_info *lwp)
1317 thread_info *thr = lwp->thread;
1318 int pid = thr->id.pid ();
1319 int lwpid = thr->id.lwp ();
1320 int wstat;
1321 int res;
1323 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1327 linux_kill_one_lwp (lwp);
1329 /* Make sure it died. Notes:
1331 - The loop is most likely unnecessary.
1333 - We don't use wait_for_event as that could delete lwps
1334 while we're iterating over them. We're not interested in
1335 any pending status at this point, only in making sure all
1336 wait status on the kernel side are collected until the
1337 process is reaped.
1339 - We don't use __WALL here as the __WALL emulation relies on
1340 SIGCHLD, and killing a stopped process doesn't generate
1341 one, nor an exit status.
1343 res = my_waitpid (lwpid, &wstat, 0);
1344 if (res == -1 && errno == ECHILD)
1345 res = my_waitpid (lwpid, &wstat, __WCLONE);
1346 } while (res > 0 && WIFSTOPPED (wstat));
1348 /* Even if it was stopped, the child may have already disappeared.
1349 E.g., if it was killed by SIGKILL. */
1350 if (res < 0 && errno != ECHILD)
1351 perror_with_name ("kill_wait_lwp");
1354 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1355 except the leader. */
1357 static void
1358 kill_one_lwp_callback (thread_info *thread, int pid)
1360 struct lwp_info *lwp = get_thread_lwp (thread);
1362 /* We avoid killing the first thread here, because of a Linux kernel (at
1363 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1364 the children get a chance to be reaped, it will remain a zombie
1365 forever. */
1367 if (thread->id.lwp () == pid)
1369 threads_debug_printf ("is last of process %s",
1370 target_pid_to_str (thread->id).c_str ());
1371 return;
1374 kill_wait_lwp (lwp);
1378 linux_process_target::kill (process_info *process)
1380 int pid = process->pid;
1382 /* If we're killing a running inferior, make sure it is stopped
1383 first, as PTRACE_KILL will not work otherwise. */
1384 stop_all_lwps (0, NULL);
1386 process->for_each_thread ([&] (thread_info *thread)
1388 kill_one_lwp_callback (thread, pid);
1391 /* See the comment in linux_kill_one_lwp. We did not kill the first
1392 thread in the list, so do so now. */
1393 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1395 if (lwp == NULL)
1396 threads_debug_printf ("cannot find lwp for pid: %d", pid);
1397 else
1398 kill_wait_lwp (lwp);
1400 mourn (process);
1402 /* Since we presently can only stop all lwps of all processes, we
1403 need to unstop lwps of other processes. */
1404 unstop_all_lwps (0, NULL);
1405 return 0;
1408 /* Get pending signal of THREAD, for detaching purposes. This is the
1409 signal the thread last stopped for, which we need to deliver to the
1410 thread when detaching, otherwise, it'd be suppressed/lost. */
1412 static int
1413 get_detach_signal (thread_info *thread)
1415 client_state &cs = get_client_state ();
1416 enum gdb_signal signo = GDB_SIGNAL_0;
1417 int status;
1418 struct lwp_info *lp = get_thread_lwp (thread);
1420 if (lp->status_pending_p)
1421 status = lp->status_pending;
1422 else
1424 /* If the thread had been suspended by gdbserver, and it stopped
1425 cleanly, then it'll have stopped with SIGSTOP. But we don't
1426 want to deliver that SIGSTOP. */
1427 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1428 || thread->last_status.sig () == GDB_SIGNAL_0)
1429 return 0;
1431 /* Otherwise, we may need to deliver the signal we
1432 intercepted. */
1433 status = lp->last_status;
1436 if (!WIFSTOPPED (status))
1438 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1439 target_pid_to_str (thread->id).c_str ());
1440 return 0;
1443 /* Extended wait statuses aren't real SIGTRAPs. */
1444 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1446 threads_debug_printf ("lwp %s had stopped with extended "
1447 "status: no pending signal",
1448 target_pid_to_str (thread->id).c_str ());
1449 return 0;
1452 signo = gdb_signal_from_host (WSTOPSIG (status));
1454 if (cs.program_signals_p && !cs.program_signals[signo])
1456 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1457 target_pid_to_str (thread->id).c_str (),
1458 gdb_signal_to_string (signo));
1459 return 0;
1461 else if (!cs.program_signals_p
1462 /* If we have no way to know which signals GDB does not
1463 want to have passed to the program, assume
1464 SIGTRAP/SIGINT, which is GDB's default. */
1465 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1467 threads_debug_printf ("lwp %s had signal %s, "
1468 "but we don't know if we should pass it. "
1469 "Default to not.",
1470 target_pid_to_str (thread->id).c_str (),
1471 gdb_signal_to_string (signo));
1472 return 0;
1474 else
1476 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1477 target_pid_to_str (thread->id).c_str (),
1478 gdb_signal_to_string (signo));
1480 return WSTOPSIG (status);
1484 void
1485 linux_process_target::detach_one_lwp (lwp_info *lwp)
1487 thread_info *thread = lwp->thread;
1488 int sig;
1490 /* If there is a pending SIGSTOP, get rid of it. */
1491 if (lwp->stop_expected)
1493 threads_debug_printf ("Sending SIGCONT to %s",
1494 target_pid_to_str (thread->id).c_str ());
1496 kill_lwp (thread->id.lwp (), SIGCONT);
1497 lwp->stop_expected = 0;
1500 /* Pass on any pending signal for this thread. */
1501 sig = get_detach_signal (thread);
1503 /* Preparing to resume may try to write registers, and fail if the
1504 lwp is zombie. If that happens, ignore the error. We'll handle
1505 it below, when detach fails with ESRCH. */
1508 /* Flush any pending changes to the process's registers. */
1509 regcache_invalidate_thread (thread);
1511 /* Finally, let it resume. */
1512 low_prepare_to_resume (lwp);
1514 catch (const gdb_exception_error &ex)
1516 if (!check_ptrace_stopped_lwp_gone (lwp))
1517 throw;
1520 int lwpid = thread->id.lwp ();
1521 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1522 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1524 int save_errno = errno;
1526 /* We know the thread exists, so ESRCH must mean the lwp is
1527 zombie. This can happen if one of the already-detached
1528 threads exits the whole thread group. In that case we're
1529 still attached, and must reap the lwp. */
1530 if (save_errno == ESRCH)
1532 int ret, status;
1534 ret = my_waitpid (lwpid, &status, __WALL);
1535 if (ret == -1)
1537 warning (_("Couldn't reap LWP %d while detaching: %s"),
1538 lwpid, safe_strerror (errno));
1540 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1542 warning (_("Reaping LWP %d while detaching "
1543 "returned unexpected status 0x%x"),
1544 lwpid, status);
1547 else
1549 error (_("Can't detach %s: %s"),
1550 target_pid_to_str (thread->id).c_str (),
1551 safe_strerror (save_errno));
1554 else
1555 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1556 target_pid_to_str (thread->id).c_str (),
1557 strsignal (sig));
1559 delete_lwp (lwp);
1563 linux_process_target::detach (process_info *process)
1565 struct lwp_info *main_lwp;
1567 /* As there's a step over already in progress, let it finish first,
1568 otherwise nesting a stabilize_threads operation on top gets real
1569 messy. */
1570 complete_ongoing_step_over ();
1572 /* Stop all threads before detaching. First, ptrace requires that
1573 the thread is stopped to successfully detach. Second, thread_db
1574 may need to uninstall thread event breakpoints from memory, which
1575 only works with a stopped process anyway. */
1576 stop_all_lwps (0, NULL);
1578 #ifdef USE_THREAD_DB
1579 thread_db_detach (process);
1580 #endif
1582 /* Stabilize threads (move out of jump pads). */
1583 target_stabilize_threads ();
1585 /* Detach from the clone lwps first. If the thread group exits just
1586 while we're detaching, we must reap the clone lwps before we're
1587 able to reap the leader. */
1588 process->for_each_thread ([this] (thread_info *thread)
1590 /* We don't actually detach from the thread group leader just yet.
1591 If the thread group exits, we must reap the zombie clone lwps
1592 before we're able to reap the leader. */
1593 if (thread->id.pid () == thread->id.lwp ())
1594 return;
1596 lwp_info *lwp = get_thread_lwp (thread);
1597 detach_one_lwp (lwp);
1600 main_lwp = find_lwp_pid (ptid_t (process->pid));
1601 gdb_assert (main_lwp != nullptr);
1602 detach_one_lwp (main_lwp);
1604 mourn (process);
1606 /* Since we presently can only stop all lwps of all processes, we
1607 need to unstop lwps of other processes. */
1608 unstop_all_lwps (0, NULL);
1609 return 0;
1612 /* Remove all LWPs that belong to process PROC from the lwp list. */
1614 void
1615 linux_process_target::mourn (process_info *process)
1617 #ifdef USE_THREAD_DB
1618 thread_db_mourn (process);
1619 #endif
1621 process->for_each_thread ([this] (thread_info *thread)
1623 delete_lwp (get_thread_lwp (thread));
1626 this->remove_linux_process (process);
1629 void
1630 linux_process_target::join (int pid)
1632 int status, ret;
1634 do {
1635 ret = my_waitpid (pid, &status, 0);
1636 if (WIFEXITED (status) || WIFSIGNALED (status))
1637 break;
1638 } while (ret != -1 || errno != ECHILD);
1641 /* Return true if the given thread is still alive. */
1643 bool
1644 linux_process_target::thread_alive (ptid_t ptid)
1646 struct lwp_info *lwp = find_lwp_pid (ptid);
1648 /* We assume we always know if a thread exits. If a whole process
1649 exited but we still haven't been able to report it to GDB, we'll
1650 hold on to the last lwp of the dead process. */
1651 if (lwp != NULL)
1652 return !lwp_is_marked_dead (lwp);
1653 else
1654 return 0;
1657 bool
1658 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1660 struct lwp_info *lp = get_thread_lwp (thread);
1662 if (!lp->status_pending_p)
1663 return 0;
1665 if (thread->last_resume_kind != resume_stop
1666 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1667 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1669 CORE_ADDR pc;
1670 int discard = 0;
1672 gdb_assert (lp->last_status != 0);
1674 pc = get_pc (lp);
1676 scoped_restore_current_thread restore_thread;
1677 switch_to_thread (thread);
1679 if (pc != lp->stop_pc)
1681 threads_debug_printf ("PC of %ld changed",
1682 thread->id.lwp ());
1683 discard = 1;
1686 if (discard)
1688 threads_debug_printf ("discarding pending breakpoint status");
1689 lp->status_pending_p = 0;
1690 return 0;
1694 return 1;
1697 /* Returns true if LWP is resumed from the client's perspective. */
1699 static int
1700 lwp_resumed (struct lwp_info *lwp)
1702 thread_info *thread = lwp->thread;
1704 if (thread->last_resume_kind != resume_stop)
1705 return 1;
1707 /* Did gdb send us a `vCont;t', but we haven't reported the
1708 corresponding stop to gdb yet? If so, the thread is still
1709 resumed/running from gdb's perspective. */
1710 if (thread->last_resume_kind == resume_stop
1711 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1712 return 1;
1714 return 0;
1717 bool
1718 linux_process_target::status_pending_p_callback (thread_info *thread,
1719 ptid_t ptid)
1721 struct lwp_info *lp = get_thread_lwp (thread);
1723 /* Check if we're only interested in events from a specific process
1724 or a specific LWP. */
1725 if (!thread->id.matches (ptid))
1726 return 0;
1728 if (!lwp_resumed (lp))
1729 return 0;
1731 if (lp->status_pending_p
1732 && !thread_still_has_status_pending (thread))
1734 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1735 return 0;
1738 return lp->status_pending_p;
1741 struct lwp_info *
1742 find_lwp_pid (ptid_t ptid)
1744 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1745 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
1747 return thr_arg->id.lwp () == lwp;
1750 if (thread == NULL)
1751 return NULL;
1753 return get_thread_lwp (thread);
1756 /* Return the number of known LWPs in PROCESS. */
1758 static int
1759 num_lwps (process_info *process)
1761 int count = 0;
1763 process->for_each_thread ([&] (thread_info *thread)
1765 count++;
1768 return count;
1771 /* See nat/linux-nat.h. */
1773 struct lwp_info *
1774 iterate_over_lwps (ptid_t filter,
1775 gdb::function_view<iterate_over_lwps_ftype> callback)
1777 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1779 lwp_info *lwp = get_thread_lwp (thr_arg);
1781 return callback (lwp);
1784 if (thread == NULL)
1785 return NULL;
1787 return get_thread_lwp (thread);
1790 bool
1791 linux_process_target::check_zombie_leaders ()
1793 bool new_pending_event = false;
1795 for_each_process ([&] (process_info *proc)
1797 pid_t leader_pid = proc->pid;
1798 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1800 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1801 "num_lwps=%d, zombie=%d",
1802 leader_pid, leader_lp!= NULL, num_lwps (proc),
1803 linux_proc_pid_is_zombie (leader_pid));
1805 if (leader_lp != NULL && !leader_lp->stopped
1806 /* Check if there are other threads in the group, as we may
1807 have raced with the inferior simply exiting. Note this
1808 isn't a watertight check. If the inferior is
1809 multi-threaded and is exiting, it may be we see the
1810 leader as zombie before we reap all the non-leader
1811 threads. See comments below. */
1812 && !last_thread_of_process_p (leader_pid)
1813 && linux_proc_pid_is_zombie (leader_pid))
1815 /* A zombie leader in a multi-threaded program can mean one
1816 of three things:
1818 #1 - Only the leader exited, not the whole program, e.g.,
1819 with pthread_exit. Since we can't reap the leader's exit
1820 status until all other threads are gone and reaped too,
1821 we want to delete the zombie leader right away, as it
1822 can't be debugged, we can't read its registers, etc.
1823 This is the main reason we check for zombie leaders
1824 disappearing.
1826 #2 - The whole thread-group/process exited (a group exit,
1827 via e.g. exit(3), and there is (or will be shortly) an
1828 exit reported for each thread in the process, and then
1829 finally an exit for the leader once the non-leaders are
1830 reaped.
1832 #3 - There are 3 or more threads in the group, and a
1833 thread other than the leader exec'd. See comments on
1834 exec events at the top of the file.
1836 Ideally we would never delete the leader for case #2.
1837 Instead, we want to collect the exit status of each
1838 non-leader thread, and then finally collect the exit
1839 status of the leader as normal and use its exit code as
1840 whole-process exit code. Unfortunately, there's no
1841 race-free way to distinguish cases #1 and #2. We can't
1842 assume the exit events for the non-leaders threads are
1843 already pending in the kernel, nor can we assume the
1844 non-leader threads are in zombie state already. Between
1845 the leader becoming zombie and the non-leaders exiting
1846 and becoming zombie themselves, there's a small time
1847 window, so such a check would be racy. Temporarily
1848 pausing all threads and checking to see if all threads
1849 exit or not before re-resuming them would work in the
1850 case that all threads are running right now, but it
1851 wouldn't work if some thread is currently already
1852 ptrace-stopped, e.g., due to scheduler-locking.
1854 So what we do is we delete the leader anyhow, and then
1855 later on when we see its exit status, we re-add it back.
1856 We also make sure that we only report a whole-process
1857 exit when we see the leader exiting, as opposed to when
1858 the last LWP in the LWP list exits, which can be a
1859 non-leader if we deleted the leader here. */
1860 threads_debug_printf ("Thread group leader %d zombie "
1861 "(it exited, or another thread execd), "
1862 "deleting it.",
1863 leader_pid);
1865 thread_info *leader_thread = leader_lp->thread;
1866 if (report_exit_events_for (leader_thread))
1868 mark_lwp_dead (leader_lp, W_EXITCODE (0, 0), true);
1869 new_pending_event = true;
1871 else
1872 delete_lwp (leader_lp);
1876 return new_pending_event;
1879 /* Callback for `find_thread'. Returns the first LWP that is not
1880 stopped. */
1882 static bool
1883 not_stopped_callback (thread_info *thread, ptid_t filter)
1885 if (!thread->id.matches (filter))
1886 return false;
1888 lwp_info *lwp = get_thread_lwp (thread);
1890 return !lwp->stopped;
1893 /* Increment LWP's suspend count. */
1895 static void
1896 lwp_suspended_inc (struct lwp_info *lwp)
1898 lwp->suspended++;
1900 if (lwp->suspended > 4)
1901 threads_debug_printf
1902 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1903 lwp->thread->id.lwp (), lwp->suspended);
1906 /* Decrement LWP's suspend count. */
1908 static void
1909 lwp_suspended_decr (struct lwp_info *lwp)
1911 lwp->suspended--;
1913 if (lwp->suspended < 0)
1915 thread_info *thread = lwp->thread;
1917 internal_error ("unsuspend LWP %ld, suspended=%d\n", thread->id.lwp (),
1918 lwp->suspended);
1922 /* This function should only be called if the LWP got a SIGTRAP.
1924 Handle any tracepoint steps or hits. Return true if a tracepoint
1925 event was handled, 0 otherwise. */
1927 static int
1928 handle_tracepoints (struct lwp_info *lwp)
1930 thread_info *tinfo = lwp->thread;
1931 int tpoint_related_event = 0;
1933 gdb_assert (lwp->suspended == 0);
1935 /* If this tracepoint hit causes a tracing stop, we'll immediately
1936 uninsert tracepoints. To do this, we temporarily pause all
1937 threads, unpatch away, and then unpause threads. We need to make
1938 sure the unpausing doesn't resume LWP too. */
1939 lwp_suspended_inc (lwp);
1941 /* And we need to be sure that any all-threads-stopping doesn't try
1942 to move threads out of the jump pads, as it could deadlock the
1943 inferior (LWP could be in the jump pad, maybe even holding the
1944 lock.) */
1946 /* Do any necessary step collect actions. */
1947 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1949 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1951 /* See if we just hit a tracepoint and do its main collect
1952 actions. */
1953 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1955 lwp_suspended_decr (lwp);
1957 gdb_assert (lwp->suspended == 0);
1958 gdb_assert (!stabilizing_threads
1959 || (lwp->collecting_fast_tracepoint
1960 != fast_tpoint_collect_result::not_collecting));
1962 if (tpoint_related_event)
1964 threads_debug_printf ("got a tracepoint event");
1965 return 1;
1968 return 0;
1971 fast_tpoint_collect_result
1972 linux_process_target::linux_fast_tracepoint_collecting
1973 (lwp_info *lwp, fast_tpoint_collect_status *status)
1975 CORE_ADDR thread_area;
1976 thread_info *thread = lwp->thread;
1978 /* Get the thread area address. This is used to recognize which
1979 thread is which when tracing with the in-process agent library.
1980 We don't read anything from the address, and treat it as opaque;
1981 it's the address itself that we assume is unique per-thread. */
1982 if (low_get_thread_area (thread->id.lwp (), &thread_area) == -1)
1983 return fast_tpoint_collect_result::not_collecting;
1985 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1989 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1991 return -1;
1994 bool
1995 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1997 scoped_restore_current_thread restore_thread;
1998 switch_to_thread (lwp->thread);
2000 if ((wstat == NULL
2001 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2002 && supports_fast_tracepoints ()
2003 && agent_loaded_p ())
2005 struct fast_tpoint_collect_status status;
2007 threads_debug_printf
2008 ("Checking whether LWP %ld needs to move out of the jump pad.",
2009 current_thread->id.lwp ());
2011 fast_tpoint_collect_result r
2012 = linux_fast_tracepoint_collecting (lwp, &status);
2014 if (wstat == NULL
2015 || (WSTOPSIG (*wstat) != SIGILL
2016 && WSTOPSIG (*wstat) != SIGFPE
2017 && WSTOPSIG (*wstat) != SIGSEGV
2018 && WSTOPSIG (*wstat) != SIGBUS))
2020 lwp->collecting_fast_tracepoint = r;
2022 if (r != fast_tpoint_collect_result::not_collecting)
2024 if (r == fast_tpoint_collect_result::before_insn
2025 && lwp->exit_jump_pad_bkpt == NULL)
2027 /* Haven't executed the original instruction yet.
2028 Set breakpoint there, and wait till it's hit,
2029 then single-step until exiting the jump pad. */
2030 lwp->exit_jump_pad_bkpt
2031 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2034 threads_debug_printf
2035 ("Checking whether LWP %ld needs to move out of the jump pad..."
2036 " it does", current_thread->id.lwp ());
2038 return true;
2041 else
2043 /* If we get a synchronous signal while collecting, *and*
2044 while executing the (relocated) original instruction,
2045 reset the PC to point at the tpoint address, before
2046 reporting to GDB. Otherwise, it's an IPA lib bug: just
2047 report the signal to GDB, and pray for the best. */
2049 lwp->collecting_fast_tracepoint
2050 = fast_tpoint_collect_result::not_collecting;
2052 if (r != fast_tpoint_collect_result::not_collecting
2053 && (status.adjusted_insn_addr <= lwp->stop_pc
2054 && lwp->stop_pc < status.adjusted_insn_addr_end))
2056 siginfo_t info;
2057 struct regcache *regcache;
2059 /* The si_addr on a few signals references the address
2060 of the faulting instruction. Adjust that as
2061 well. */
2062 if ((WSTOPSIG (*wstat) == SIGILL
2063 || WSTOPSIG (*wstat) == SIGFPE
2064 || WSTOPSIG (*wstat) == SIGBUS
2065 || WSTOPSIG (*wstat) == SIGSEGV)
2066 && ptrace (PTRACE_GETSIGINFO, current_thread->id.lwp (),
2067 (PTRACE_TYPE_ARG3) 0, &info) == 0
2068 /* Final check just to make sure we don't clobber
2069 the siginfo of non-kernel-sent signals. */
2070 && (uintptr_t) info.si_addr == lwp->stop_pc)
2072 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2073 ptrace (PTRACE_SETSIGINFO, current_thread->id.lwp (),
2074 (PTRACE_TYPE_ARG3) 0, &info);
2077 regcache = get_thread_regcache (current_thread);
2078 low_set_pc (regcache, status.tpoint_addr);
2079 lwp->stop_pc = status.tpoint_addr;
2081 /* Cancel any fast tracepoint lock this thread was
2082 holding. */
2083 force_unlock_trace_buffer ();
2086 if (lwp->exit_jump_pad_bkpt != NULL)
2088 threads_debug_printf
2089 ("Cancelling fast exit-jump-pad: removing bkpt."
2090 "stopping all threads momentarily.");
2092 stop_all_lwps (1, lwp);
2094 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2095 lwp->exit_jump_pad_bkpt = NULL;
2097 unstop_all_lwps (1, lwp);
2099 gdb_assert (lwp->suspended >= 0);
2104 threads_debug_printf
2105 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2106 current_thread->id.lwp ());
2108 return false;
2111 /* Enqueue one signal in the "signals to report later when out of the
2112 jump pad" list. */
2114 static void
2115 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2117 thread_info *thread = lwp->thread;
2119 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2120 WSTOPSIG (*wstat), thread->id.lwp ());
2122 if (debug_threads)
2124 for (const auto &sig : lwp->pending_signals_to_report)
2125 threads_debug_printf (" Already queued %d", sig.signal);
2127 threads_debug_printf (" (no more currently queued signals)");
2130 /* Don't enqueue non-RT signals if they are already in the deferred
2131 queue. (SIGSTOP being the easiest signal to see ending up here
2132 twice) */
2133 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2135 for (const auto &sig : lwp->pending_signals_to_report)
2137 if (sig.signal == WSTOPSIG (*wstat))
2139 threads_debug_printf
2140 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2141 sig.signal, thread->id.lwp ());
2142 return;
2147 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2149 ptrace (PTRACE_GETSIGINFO, thread->id.lwp (), (PTRACE_TYPE_ARG3) 0,
2150 &lwp->pending_signals_to_report.back ().info);
2153 /* Dequeue one signal from the "signals to report later when out of
2154 the jump pad" list. */
2156 static int
2157 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2159 thread_info *thread = lwp->thread;
2161 if (!lwp->pending_signals_to_report.empty ())
2163 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2165 *wstat = W_STOPCODE (p_sig.signal);
2166 if (p_sig.info.si_signo != 0)
2167 ptrace (PTRACE_SETSIGINFO, thread->id.lwp (), (PTRACE_TYPE_ARG3) 0,
2168 &p_sig.info);
2170 lwp->pending_signals_to_report.pop_front ();
2172 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2173 WSTOPSIG (*wstat), thread->id.lwp ());
2175 if (debug_threads)
2177 for (const auto &sig : lwp->pending_signals_to_report)
2178 threads_debug_printf (" Still queued %d", sig.signal);
2180 threads_debug_printf (" (no more queued signals)");
2183 return 1;
2186 return 0;
2189 bool
2190 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2192 scoped_restore_current_thread restore_thread;
2193 switch_to_thread (child->thread);
2195 if (low_stopped_by_watchpoint ())
2197 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2198 child->stopped_data_address = low_stopped_data_address ();
2201 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2204 bool
2205 linux_process_target::low_stopped_by_watchpoint ()
2207 return false;
2210 CORE_ADDR
2211 linux_process_target::low_stopped_data_address ()
2213 return 0;
2216 /* Return the ptrace options that we want to try to enable. */
2218 static int
2219 linux_low_ptrace_options (int attached)
2221 client_state &cs = get_client_state ();
2222 int options = 0;
2224 if (!attached)
2225 options |= PTRACE_O_EXITKILL;
2227 if (cs.report_fork_events)
2228 options |= PTRACE_O_TRACEFORK;
2230 if (cs.report_vfork_events)
2231 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2233 if (cs.report_exec_events)
2234 options |= PTRACE_O_TRACEEXEC;
2236 options |= PTRACE_O_TRACESYSGOOD;
2238 return options;
2241 void
2242 linux_process_target::filter_event (int lwpid, int wstat)
2244 struct lwp_info *child;
2245 thread_info *thread;
2246 int have_stop_pc = 0;
2248 child = find_lwp_pid (ptid_t (lwpid));
2250 /* Check for events reported by anything not in our LWP list. */
2251 if (child == nullptr)
2253 if (WIFSTOPPED (wstat))
2255 if (WSTOPSIG (wstat) == SIGTRAP
2256 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2258 /* A non-leader thread exec'ed after we've seen the
2259 leader zombie, and removed it from our lists (in
2260 check_zombie_leaders). The non-leader thread changes
2261 its tid to the tgid. */
2262 threads_debug_printf
2263 ("Re-adding thread group leader LWP %d after exec.",
2264 lwpid);
2266 child = add_lwp (ptid_t (lwpid, lwpid));
2267 child->stopped = 1;
2268 switch_to_thread (child->thread);
2270 else
2272 /* A process we are controlling has forked and the new
2273 child's stop was reported to us by the kernel. Save
2274 its PID and go back to waiting for the fork event to
2275 be reported - the stopped process might be returned
2276 from waitpid before or after the fork event is. */
2277 threads_debug_printf
2278 ("Saving LWP %d status %s in stopped_pids list",
2279 lwpid, status_to_str (wstat).c_str ());
2280 add_to_pid_list (&stopped_pids, lwpid, wstat);
2283 else
2285 /* Don't report an event for the exit of an LWP not in our
2286 list, i.e. not part of any inferior we're debugging.
2287 This can happen if we detach from a program we originally
2288 forked and then it exits. However, note that we may have
2289 earlier deleted a leader of an inferior we're debugging,
2290 in check_zombie_leaders. Re-add it back here if so. */
2291 find_process ([&] (process_info *proc)
2293 if (proc->pid == lwpid)
2295 threads_debug_printf
2296 ("Re-adding thread group leader LWP %d after exit.",
2297 lwpid);
2299 child = add_lwp (ptid_t (lwpid, lwpid));
2300 return true;
2302 return false;
2306 if (child == nullptr)
2307 return;
2310 thread = child->thread;
2312 child->stopped = 1;
2314 child->last_status = wstat;
2316 /* Check if the thread has exited. */
2317 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2319 threads_debug_printf ("%d exited", lwpid);
2321 if (finish_step_over (child))
2323 /* Unsuspend all other LWPs, and set them back running again. */
2324 unsuspend_all_lwps (child);
2327 /* If this is not the leader LWP, then the exit signal was not
2328 the end of the debugged application and should be ignored,
2329 unless GDB wants to hear about thread exits. */
2330 if (report_exit_events_for (thread) || is_leader (thread))
2332 /* Since events are serialized to GDB core, and we can't
2333 report this one right now. Leave the status pending for
2334 the next time we're able to report it. */
2335 mark_lwp_dead (child, wstat, false);
2336 return;
2338 else
2340 delete_lwp (child);
2341 return;
2345 gdb_assert (WIFSTOPPED (wstat));
2347 if (WIFSTOPPED (wstat))
2349 /* Architecture-specific setup after inferior is running. */
2350 process_info *proc = find_process_pid (thread->id.pid ());
2352 if (proc->tdesc == NULL)
2354 if (proc->attached)
2356 /* This needs to happen after we have attached to the
2357 inferior and it is stopped for the first time, but
2358 before we access any inferior registers. */
2359 arch_setup_thread (thread);
2361 else
2363 /* The process is started, but GDBserver will do
2364 architecture-specific setup after the program stops at
2365 the first instruction. */
2366 child->status_pending_p = 1;
2367 child->status_pending = wstat;
2368 return;
2373 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2375 process_info *proc = find_process_pid (thread->id.pid ());
2376 int options = linux_low_ptrace_options (proc->attached);
2378 linux_enable_event_reporting (lwpid, options);
2379 child->must_set_ptrace_flags = 0;
2382 /* Always update syscall_state, even if it will be filtered later. */
2383 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2385 child->syscall_state
2386 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2387 ? TARGET_WAITKIND_SYSCALL_RETURN
2388 : TARGET_WAITKIND_SYSCALL_ENTRY);
2390 else
2392 /* Almost all other ptrace-stops are known to be outside of system
2393 calls, with further exceptions in handle_extended_wait. */
2394 child->syscall_state = TARGET_WAITKIND_IGNORE;
2397 /* Be careful to not overwrite stop_pc until save_stop_reason is
2398 called. */
2399 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2400 && linux_is_extended_waitstatus (wstat))
2402 child->stop_pc = get_pc (child);
2403 if (handle_extended_wait (&child, wstat))
2405 /* The event has been handled, so just return without
2406 reporting it. */
2407 return;
2411 if (linux_wstatus_maybe_breakpoint (wstat))
2413 if (save_stop_reason (child))
2414 have_stop_pc = 1;
2417 if (!have_stop_pc)
2418 child->stop_pc = get_pc (child);
2420 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2421 && child->stop_expected)
2423 threads_debug_printf ("Expected stop.");
2425 child->stop_expected = 0;
2427 if (thread->last_resume_kind == resume_stop)
2429 /* We want to report the stop to the core. Treat the
2430 SIGSTOP as a normal event. */
2431 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2432 target_pid_to_str (thread->id).c_str ());
2434 else if (stopping_threads != NOT_STOPPING_THREADS)
2436 /* Stopping threads. We don't want this SIGSTOP to end up
2437 pending. */
2438 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2439 target_pid_to_str (thread->id).c_str ());
2440 return;
2442 else
2444 /* This is a delayed SIGSTOP. Filter out the event. */
2445 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2446 child->stepping ? "step" : "continue",
2447 target_pid_to_str (thread->id).c_str ());
2449 resume_one_lwp (child, child->stepping, 0, NULL);
2450 return;
2454 child->status_pending_p = 1;
2455 child->status_pending = wstat;
2456 return;
2459 bool
2460 linux_process_target::maybe_hw_step (thread_info *thread)
2462 if (supports_hardware_single_step ())
2463 return true;
2464 else
2466 /* GDBserver must insert single-step breakpoint for software
2467 single step. */
2468 gdb_assert (has_single_step_breakpoints (thread));
2469 return false;
2473 void
2474 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2476 struct lwp_info *lp = get_thread_lwp (thread);
2478 if (lp->stopped
2479 && !lp->suspended
2480 && !lp->status_pending_p
2481 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2483 int step = 0;
2485 if (thread->last_resume_kind == resume_step)
2487 if (supports_software_single_step ())
2488 install_software_single_step_breakpoints (lp);
2490 step = maybe_hw_step (thread);
2493 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2494 target_pid_to_str (thread->id).c_str (),
2495 paddress (lp->stop_pc), step);
2497 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2502 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2503 ptid_t filter_ptid,
2504 int *wstatp, int options)
2506 thread_info *event_thread;
2507 struct lwp_info *event_child, *requested_child;
2508 sigset_t block_mask, prev_mask;
2510 retry:
2511 /* N.B. event_thread points to the thread_info struct that contains
2512 event_child. Keep them in sync. */
2513 event_thread = NULL;
2514 event_child = NULL;
2515 requested_child = NULL;
2517 /* Check for a lwp with a pending status. */
2519 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2521 event_thread = find_thread_in_random ([&] (thread_info *thread)
2523 return status_pending_p_callback (thread, filter_ptid);
2526 if (event_thread != NULL)
2528 event_child = get_thread_lwp (event_thread);
2529 threads_debug_printf ("Got a pending child %ld", event_thread->id.lwp ());
2532 else if (filter_ptid != null_ptid)
2534 requested_child = find_lwp_pid (filter_ptid);
2535 gdb_assert (requested_child != nullptr);
2537 if (stopping_threads == NOT_STOPPING_THREADS
2538 && requested_child->status_pending_p
2539 && (requested_child->collecting_fast_tracepoint
2540 != fast_tpoint_collect_result::not_collecting))
2542 enqueue_one_deferred_signal (requested_child,
2543 &requested_child->status_pending);
2544 requested_child->status_pending_p = 0;
2545 requested_child->status_pending = 0;
2546 resume_one_lwp (requested_child, 0, 0, NULL);
2549 if (requested_child->suspended
2550 && requested_child->status_pending_p)
2552 internal_error ("requesting an event out of a"
2553 " suspended child?");
2556 if (requested_child->status_pending_p)
2558 event_child = requested_child;
2559 event_thread = event_child->thread;
2563 if (event_child != NULL)
2565 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2566 event_thread->id.lwp (),
2567 event_child->status_pending);
2569 *wstatp = event_child->status_pending;
2570 event_child->status_pending_p = 0;
2571 event_child->status_pending = 0;
2572 switch_to_thread (event_thread);
2573 return event_thread->id.lwp ();
2576 /* But if we don't find a pending event, we'll have to wait.
2578 We only enter this loop if no process has a pending wait status.
2579 Thus any action taken in response to a wait status inside this
2580 loop is responding as soon as we detect the status, not after any
2581 pending events. */
2583 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2584 all signals while here. */
2585 sigfillset (&block_mask);
2586 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2588 /* Always pull all events out of the kernel. We'll randomly select
2589 an event LWP out of all that have events, to prevent
2590 starvation. */
2591 while (event_child == NULL)
2593 pid_t ret = 0;
2595 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2596 quirks:
2598 - If the thread group leader exits while other threads in the
2599 thread group still exist, waitpid(TGID, ...) hangs. That
2600 waitpid won't return an exit status until the other threads
2601 in the group are reaped.
2603 - When a non-leader thread execs, that thread just vanishes
2604 without reporting an exit (so we'd hang if we waited for it
2605 explicitly in that case). The exec event is reported to
2606 the TGID pid. */
2607 errno = 0;
2608 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2610 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2611 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2613 if (ret > 0)
2615 threads_debug_printf ("waitpid %ld received %s",
2616 (long) ret, status_to_str (*wstatp).c_str ());
2618 /* Filter all events. IOW, leave all events pending. We'll
2619 randomly select an event LWP out of all that have events
2620 below. */
2621 filter_event (ret, *wstatp);
2622 /* Retry until nothing comes out of waitpid. A single
2623 SIGCHLD can indicate more than one child stopped. */
2624 continue;
2627 /* Now that we've pulled all events out of the kernel, resume
2628 LWPs that don't have an interesting event to report. */
2629 if (stopping_threads == NOT_STOPPING_THREADS)
2630 for_each_thread ([this] (thread_info *thread)
2632 resume_stopped_resumed_lwps (thread);
2635 /* ... and find an LWP with a status to report to the core, if
2636 any. */
2637 event_thread = find_thread_in_random ([&] (thread_info *thread)
2639 return status_pending_p_callback (thread, filter_ptid);
2642 if (event_thread != NULL)
2644 event_child = get_thread_lwp (event_thread);
2645 *wstatp = event_child->status_pending;
2646 event_child->status_pending_p = 0;
2647 event_child->status_pending = 0;
2648 break;
2651 /* Check for zombie thread group leaders. Those can't be reaped
2652 until all other threads in the thread group are. */
2653 if (check_zombie_leaders ())
2654 goto retry;
2656 auto not_stopped = [&] (thread_info *thread)
2658 return not_stopped_callback (thread, wait_ptid);
2661 /* If there are no resumed children left in the set of LWPs we
2662 want to wait for, bail. We can't just block in
2663 waitpid/sigsuspend, because lwps might have been left stopped
2664 in trace-stop state, and we'd be stuck forever waiting for
2665 their status to change (which would only happen if we resumed
2666 them). Even if WNOHANG is set, this return code is preferred
2667 over 0 (below), as it is more detailed. */
2668 if (find_thread (not_stopped) == NULL)
2670 threads_debug_printf ("exit (no unwaited-for LWP)");
2672 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2673 return -1;
2676 /* No interesting event to report to the caller. */
2677 if ((options & WNOHANG))
2679 threads_debug_printf ("WNOHANG set, no event found");
2681 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2682 return 0;
2685 /* Block until we get an event reported with SIGCHLD. */
2686 threads_debug_printf ("sigsuspend'ing");
2688 sigsuspend (&prev_mask);
2689 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2690 goto retry;
2693 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2695 switch_to_thread (event_thread);
2697 return event_thread->id.lwp ();
2701 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2703 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2706 /* Select one LWP out of those that have events pending. */
2708 static void
2709 select_event_lwp (struct lwp_info **orig_lp)
2711 thread_info *event_thread = NULL;
2713 /* In all-stop, give preference to the LWP that is being
2714 single-stepped. There will be at most one, and it's the LWP that
2715 the core is most interested in. If we didn't do this, then we'd
2716 have to handle pending step SIGTRAPs somehow in case the core
2717 later continues the previously-stepped thread, otherwise we'd
2718 report the pending SIGTRAP, and the core, not having stepped the
2719 thread, wouldn't understand what the trap was for, and therefore
2720 would report it to the user as a random signal. */
2721 if (!non_stop)
2723 event_thread = find_thread ([] (thread_info *thread)
2725 lwp_info *lp = get_thread_lwp (thread);
2727 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2728 && thread->last_resume_kind == resume_step
2729 && lp->status_pending_p);
2732 if (event_thread != NULL)
2733 threads_debug_printf
2734 ("Select single-step %s",
2735 target_pid_to_str (event_thread->id).c_str ());
2737 if (event_thread == NULL)
2739 /* No single-stepping LWP. Select one at random, out of those
2740 which have had events. */
2742 event_thread = find_thread_in_random ([&] (thread_info *thread)
2744 lwp_info *lp = get_thread_lwp (thread);
2746 /* Only resumed LWPs that have an event pending. */
2747 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2748 && lp->status_pending_p);
2752 if (event_thread != NULL)
2754 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2756 /* Switch the event LWP. */
2757 *orig_lp = event_lp;
2761 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2762 NULL. */
2764 static void
2765 unsuspend_all_lwps (struct lwp_info *except)
2767 for_each_thread ([&] (thread_info *thread)
2769 lwp_info *lwp = get_thread_lwp (thread);
2771 if (lwp != except)
2772 lwp_suspended_decr (lwp);
2776 static bool lwp_running (thread_info *thread);
2778 /* Stabilize threads (move out of jump pads).
2780 If a thread is midway collecting a fast tracepoint, we need to
2781 finish the collection and move it out of the jump pad before
2782 reporting the signal.
2784 This avoids recursion while collecting (when a signal arrives
2785 midway, and the signal handler itself collects), which would trash
2786 the trace buffer. In case the user set a breakpoint in a signal
2787 handler, this avoids the backtrace showing the jump pad, etc..
2788 Most importantly, there are certain things we can't do safely if
2789 threads are stopped in a jump pad (or in its callee's). For
2790 example:
2792 - starting a new trace run. A thread still collecting the
2793 previous run, could trash the trace buffer when resumed. The trace
2794 buffer control structures would have been reset but the thread had
2795 no way to tell. The thread could even midway memcpy'ing to the
2796 buffer, which would mean that when resumed, it would clobber the
2797 trace buffer that had been set for a new run.
2799 - we can't rewrite/reuse the jump pads for new tracepoints
2800 safely. Say you do tstart while a thread is stopped midway while
2801 collecting. When the thread is later resumed, it finishes the
2802 collection, and returns to the jump pad, to execute the original
2803 instruction that was under the tracepoint jump at the time the
2804 older run had been started. If the jump pad had been rewritten
2805 since for something else in the new run, the thread would now
2806 execute the wrong / random instructions. */
2808 void
2809 linux_process_target::stabilize_threads ()
2811 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2813 return stuck_in_jump_pad (thread);
2816 if (thread_stuck != NULL)
2818 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2819 thread_stuck->id.lwp ());
2820 return;
2823 scoped_restore_current_thread restore_thread;
2825 stabilizing_threads = 1;
2827 /* Kick 'em all. */
2828 for_each_thread ([this] (thread_info *thread)
2830 move_out_of_jump_pad (thread);
2833 /* Loop until all are stopped out of the jump pads. */
2834 while (find_thread (lwp_running) != NULL)
2836 struct target_waitstatus ourstatus;
2837 struct lwp_info *lwp;
2838 int wstat;
2840 /* Note that we go through the full wait even loop. While
2841 moving threads out of jump pad, we need to be able to step
2842 over internal breakpoints and such. */
2843 wait_1 (minus_one_ptid, &ourstatus, 0);
2845 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2847 lwp = get_thread_lwp (current_thread);
2849 /* Lock it. */
2850 lwp_suspended_inc (lwp);
2852 if (ourstatus.sig () != GDB_SIGNAL_0
2853 || current_thread->last_resume_kind == resume_stop)
2855 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2856 enqueue_one_deferred_signal (lwp, &wstat);
2861 unsuspend_all_lwps (NULL);
2863 stabilizing_threads = 0;
2865 if (debug_threads)
2867 thread_stuck = find_thread ([this] (thread_info *thread)
2869 return stuck_in_jump_pad (thread);
2872 if (thread_stuck != NULL)
2873 threads_debug_printf
2874 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2875 thread_stuck->id.lwp ());
2879 /* Convenience function that is called when the kernel reports an
2880 event that is not passed out to GDB. */
2882 static ptid_t
2883 ignore_event (struct target_waitstatus *ourstatus)
2885 /* If we got an event, there may still be others, as a single
2886 SIGCHLD can indicate more than one child stopped. This forces
2887 another target_wait call. */
2888 async_file_mark ();
2890 ourstatus->set_ignore ();
2891 return null_ptid;
2894 ptid_t
2895 linux_process_target::filter_exit_event (lwp_info *event_child,
2896 target_waitstatus *ourstatus)
2898 thread_info *thread = event_child->thread;
2899 ptid_t ptid = thread->id;
2901 if (ourstatus->kind () == TARGET_WAITKIND_THREAD_EXITED)
2903 /* We're reporting a thread exit for the leader. The exit was
2904 detected by check_zombie_leaders. */
2905 gdb_assert (is_leader (thread));
2906 gdb_assert (report_exit_events_for (thread));
2908 delete_lwp (event_child);
2909 return ptid;
2912 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2913 if a non-leader thread exits with a signal, we'd report it to the
2914 core which would interpret it as the whole-process exiting.
2915 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2916 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2917 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2918 return ptid;
2920 if (!is_leader (thread))
2922 if (report_exit_events_for (thread))
2923 ourstatus->set_thread_exited (0);
2924 else
2925 ourstatus->set_ignore ();
2927 delete_lwp (event_child);
2929 return ptid;
2932 /* Returns 1 if GDB is interested in any event_child syscalls. */
2934 static int
2935 gdb_catching_syscalls_p (struct lwp_info *event_child)
2937 return !event_child->thread->process ()->syscalls_to_catch.empty ();
2940 bool
2941 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2943 int sysno;
2944 thread_info *thread = event_child->thread;
2945 process_info *proc = thread->process ();
2947 if (proc->syscalls_to_catch.empty ())
2948 return false;
2950 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2951 return true;
2953 get_syscall_trapinfo (event_child, &sysno);
2955 for (int iter : proc->syscalls_to_catch)
2956 if (iter == sysno)
2957 return true;
2959 return false;
2962 ptid_t
2963 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2964 target_wait_flags target_options)
2966 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2968 client_state &cs = get_client_state ();
2969 int w;
2970 struct lwp_info *event_child;
2971 int options;
2972 int pid;
2973 int step_over_finished;
2974 int bp_explains_trap;
2975 int maybe_internal_trap;
2976 int report_to_gdb;
2977 int trace_event;
2978 int in_step_range;
2980 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
2982 /* Translate generic target options into linux options. */
2983 options = __WALL;
2984 if (target_options & TARGET_WNOHANG)
2985 options |= WNOHANG;
2987 bp_explains_trap = 0;
2988 trace_event = 0;
2989 in_step_range = 0;
2990 ourstatus->set_ignore ();
2992 bool was_any_resumed = any_resumed ();
2994 if (step_over_bkpt == null_ptid)
2995 pid = wait_for_event (ptid, &w, options);
2996 else
2998 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
2999 target_pid_to_str (step_over_bkpt).c_str ());
3000 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3003 if (pid == 0 || (pid == -1 && !was_any_resumed))
3005 gdb_assert (target_options & TARGET_WNOHANG);
3007 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
3009 ourstatus->set_ignore ();
3010 return null_ptid;
3012 else if (pid == -1)
3014 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
3016 ourstatus->set_no_resumed ();
3017 return null_ptid;
3020 event_child = get_thread_lwp (current_thread);
3022 /* wait_for_event only returns an exit status for the last
3023 child of a process. Report it. */
3024 if (WIFEXITED (w) || WIFSIGNALED (w))
3026 if (WIFEXITED (w))
3028 /* If we already have the exit recorded in waitstatus, use
3029 it. This will happen when we detect a zombie leader,
3030 when we had GDB_THREAD_OPTION_EXIT enabled for it. We
3031 want to report its exit as TARGET_WAITKIND_THREAD_EXITED,
3032 as the whole process hasn't exited yet. */
3033 const target_waitstatus &ws = event_child->waitstatus;
3034 if (ws.kind () != TARGET_WAITKIND_IGNORE)
3036 gdb_assert (ws.kind () == TARGET_WAITKIND_EXITED
3037 || ws.kind () == TARGET_WAITKIND_THREAD_EXITED);
3038 *ourstatus = ws;
3040 else
3041 ourstatus->set_exited (WEXITSTATUS (w));
3043 threads_debug_printf
3044 ("ret = %s, exited with retcode %d",
3045 target_pid_to_str (current_thread->id).c_str (),
3046 WEXITSTATUS (w));
3048 else
3050 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3052 threads_debug_printf
3053 ("ret = %s, terminated with signal %d",
3054 target_pid_to_str (current_thread->id).c_str (),
3055 WTERMSIG (w));
3058 return filter_exit_event (event_child, ourstatus);
3061 /* If step-over executes a breakpoint instruction, in the case of a
3062 hardware single step it means a gdb/gdbserver breakpoint had been
3063 planted on top of a permanent breakpoint, in the case of a software
3064 single step it may just mean that gdbserver hit the reinsert breakpoint.
3065 The PC has been adjusted by save_stop_reason to point at
3066 the breakpoint address.
3067 So in the case of the hardware single step advance the PC manually
3068 past the breakpoint and in the case of software single step advance only
3069 if it's not the single_step_breakpoint we are hitting.
3070 This avoids that a program would keep trapping a permanent breakpoint
3071 forever. */
3072 if (step_over_bkpt != null_ptid
3073 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3074 && (event_child->stepping
3075 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3077 int increment_pc = 0;
3078 int breakpoint_kind = 0;
3079 CORE_ADDR stop_pc = event_child->stop_pc;
3081 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3082 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3084 threads_debug_printf
3085 ("step-over for %s executed software breakpoint",
3086 target_pid_to_str (current_thread->id).c_str ());
3088 if (increment_pc != 0)
3090 struct regcache *regcache
3091 = get_thread_regcache (current_thread);
3093 event_child->stop_pc += increment_pc;
3094 low_set_pc (regcache, event_child->stop_pc);
3096 if (!low_breakpoint_at (event_child->stop_pc))
3097 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3101 /* If this event was not handled before, and is not a SIGTRAP, we
3102 report it. SIGILL and SIGSEGV are also treated as traps in case
3103 a breakpoint is inserted at the current PC. If this target does
3104 not support internal breakpoints at all, we also report the
3105 SIGTRAP without further processing; it's of no concern to us. */
3106 maybe_internal_trap
3107 = (low_supports_breakpoints ()
3108 && (WSTOPSIG (w) == SIGTRAP
3109 || ((WSTOPSIG (w) == SIGILL
3110 || WSTOPSIG (w) == SIGSEGV)
3111 && low_breakpoint_at (event_child->stop_pc))));
3113 if (maybe_internal_trap)
3115 /* Handle anything that requires bookkeeping before deciding to
3116 report the event or continue waiting. */
3118 /* First check if we can explain the SIGTRAP with an internal
3119 breakpoint, or if we should possibly report the event to GDB.
3120 Do this before anything that may remove or insert a
3121 breakpoint. */
3122 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3124 /* We have a SIGTRAP, possibly a step-over dance has just
3125 finished. If so, tweak the state machine accordingly,
3126 reinsert breakpoints and delete any single-step
3127 breakpoints. */
3128 step_over_finished = finish_step_over (event_child);
3130 /* Now invoke the callbacks of any internal breakpoints there. */
3131 check_breakpoints (event_child->stop_pc);
3133 /* Handle tracepoint data collecting. This may overflow the
3134 trace buffer, and cause a tracing stop, removing
3135 breakpoints. */
3136 trace_event = handle_tracepoints (event_child);
3138 if (bp_explains_trap)
3139 threads_debug_printf ("Hit a gdbserver breakpoint.");
3141 else
3143 /* We have some other signal, possibly a step-over dance was in
3144 progress, and it should be cancelled too. */
3145 step_over_finished = finish_step_over (event_child);
3148 /* We have all the data we need. Either report the event to GDB, or
3149 resume threads and keep waiting for more. */
3151 /* If we're collecting a fast tracepoint, finish the collection and
3152 move out of the jump pad before delivering a signal. See
3153 linux_stabilize_threads. */
3155 if (WIFSTOPPED (w)
3156 && WSTOPSIG (w) != SIGTRAP
3157 && supports_fast_tracepoints ()
3158 && agent_loaded_p ())
3160 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3161 "to defer or adjust it.",
3162 WSTOPSIG (w), current_thread->id.lwp ());
3164 /* Allow debugging the jump pad itself. */
3165 if (current_thread->last_resume_kind != resume_step
3166 && maybe_move_out_of_jump_pad (event_child, &w))
3168 enqueue_one_deferred_signal (event_child, &w);
3170 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3171 WSTOPSIG (w), current_thread->id.lwp ());
3173 resume_one_lwp (event_child, 0, 0, NULL);
3175 return ignore_event (ourstatus);
3179 if (event_child->collecting_fast_tracepoint
3180 != fast_tpoint_collect_result::not_collecting)
3182 threads_debug_printf
3183 ("LWP %ld was trying to move out of the jump pad (%d). "
3184 "Check if we're already there.",
3185 current_thread->id.lwp (),
3186 (int) event_child->collecting_fast_tracepoint);
3188 trace_event = 1;
3190 event_child->collecting_fast_tracepoint
3191 = linux_fast_tracepoint_collecting (event_child, NULL);
3193 if (event_child->collecting_fast_tracepoint
3194 != fast_tpoint_collect_result::before_insn)
3196 /* No longer need this breakpoint. */
3197 if (event_child->exit_jump_pad_bkpt != NULL)
3199 threads_debug_printf
3200 ("No longer need exit-jump-pad bkpt; removing it."
3201 "stopping all threads momentarily.");
3203 /* Other running threads could hit this breakpoint.
3204 We don't handle moribund locations like GDB does,
3205 instead we always pause all threads when removing
3206 breakpoints, so that any step-over or
3207 decr_pc_after_break adjustment is always taken
3208 care of while the breakpoint is still
3209 inserted. */
3210 stop_all_lwps (1, event_child);
3212 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3213 event_child->exit_jump_pad_bkpt = NULL;
3215 unstop_all_lwps (1, event_child);
3217 gdb_assert (event_child->suspended >= 0);
3221 if (event_child->collecting_fast_tracepoint
3222 == fast_tpoint_collect_result::not_collecting)
3224 threads_debug_printf
3225 ("fast tracepoint finished collecting successfully.");
3227 /* We may have a deferred signal to report. */
3228 if (dequeue_one_deferred_signal (event_child, &w))
3229 threads_debug_printf ("dequeued one signal.");
3230 else
3232 threads_debug_printf ("no deferred signals.");
3234 if (stabilizing_threads)
3236 ourstatus->set_stopped (GDB_SIGNAL_0);
3238 threads_debug_printf
3239 ("ret = %s, stopped while stabilizing threads",
3240 target_pid_to_str (current_thread->id).c_str ());
3242 return current_thread->id;
3248 /* Check whether GDB would be interested in this event. */
3250 /* Check if GDB is interested in this syscall. */
3251 if (WIFSTOPPED (w)
3252 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3253 && !gdb_catch_this_syscall (event_child))
3255 threads_debug_printf ("Ignored syscall for LWP %ld.",
3256 current_thread->id.lwp ());
3258 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3260 return ignore_event (ourstatus);
3263 /* If GDB is not interested in this signal, don't stop other
3264 threads, and don't report it to GDB. Just resume the inferior
3265 right away. We do this for threading-related signals as well as
3266 any that GDB specifically requested we ignore. But never ignore
3267 SIGSTOP if we sent it ourselves, and do not ignore signals when
3268 stepping - they may require special handling to skip the signal
3269 handler. Also never ignore signals that could be caused by a
3270 breakpoint. */
3271 if (WIFSTOPPED (w)
3272 && current_thread->last_resume_kind != resume_step
3273 && (
3274 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3275 (current_process ()->priv->thread_db != NULL
3276 && (WSTOPSIG (w) == __SIGRTMIN
3277 || WSTOPSIG (w) == __SIGRTMIN + 1))
3279 #endif
3280 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3281 && !(WSTOPSIG (w) == SIGSTOP
3282 && current_thread->last_resume_kind == resume_stop)
3283 && !linux_wstatus_maybe_breakpoint (w))))
3285 siginfo_t info, *info_p;
3287 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3288 WSTOPSIG (w), current_thread->id.lwp ());
3290 if (ptrace (PTRACE_GETSIGINFO, current_thread->id.lwp (),
3291 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3292 info_p = &info;
3293 else
3294 info_p = NULL;
3296 if (step_over_finished)
3298 /* We cancelled this thread's step-over above. We still
3299 need to unsuspend all other LWPs, and set them back
3300 running again while the signal handler runs. */
3301 unsuspend_all_lwps (event_child);
3303 /* Enqueue the pending signal info so that proceed_all_lwps
3304 doesn't lose it. */
3305 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3307 proceed_all_lwps ();
3309 else
3311 resume_one_lwp (event_child, event_child->stepping,
3312 WSTOPSIG (w), info_p);
3315 return ignore_event (ourstatus);
3318 /* Note that all addresses are always "out of the step range" when
3319 there's no range to begin with. */
3320 in_step_range = lwp_in_step_range (event_child);
3322 /* If GDB wanted this thread to single step, and the thread is out
3323 of the step range, we always want to report the SIGTRAP, and let
3324 GDB handle it. Watchpoints should always be reported. So should
3325 signals we can't explain. A SIGTRAP we can't explain could be a
3326 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3327 do, we're be able to handle GDB breakpoints on top of internal
3328 breakpoints, by handling the internal breakpoint and still
3329 reporting the event to GDB. If we don't, we're out of luck, GDB
3330 won't see the breakpoint hit. If we see a single-step event but
3331 the thread should be continuing, don't pass the trap to gdb.
3332 That indicates that we had previously finished a single-step but
3333 left the single-step pending -- see
3334 complete_ongoing_step_over. */
3335 report_to_gdb = (!maybe_internal_trap
3336 || (current_thread->last_resume_kind == resume_step
3337 && !in_step_range)
3338 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3339 || (!in_step_range
3340 && !bp_explains_trap
3341 && !trace_event
3342 && !step_over_finished
3343 && !(current_thread->last_resume_kind == resume_continue
3344 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3345 || (gdb_breakpoint_here (event_child->stop_pc)
3346 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3347 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3348 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3350 run_breakpoint_commands (event_child->stop_pc);
3352 /* We found no reason GDB would want us to stop. We either hit one
3353 of our own breakpoints, or finished an internal step GDB
3354 shouldn't know about. */
3355 if (!report_to_gdb)
3357 if (bp_explains_trap)
3358 threads_debug_printf ("Hit a gdbserver breakpoint.");
3360 if (step_over_finished)
3361 threads_debug_printf ("Step-over finished.");
3363 if (trace_event)
3364 threads_debug_printf ("Tracepoint event.");
3366 if (lwp_in_step_range (event_child))
3367 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3368 paddress (event_child->stop_pc),
3369 paddress (event_child->step_range_start),
3370 paddress (event_child->step_range_end));
3372 /* We're not reporting this breakpoint to GDB, so apply the
3373 decr_pc_after_break adjustment to the inferior's regcache
3374 ourselves. */
3376 if (low_supports_breakpoints ())
3378 struct regcache *regcache
3379 = get_thread_regcache (current_thread);
3380 low_set_pc (regcache, event_child->stop_pc);
3383 if (step_over_finished)
3385 /* If we have finished stepping over a breakpoint, we've
3386 stopped and suspended all LWPs momentarily except the
3387 stepping one. This is where we resume them all again.
3388 We're going to keep waiting, so use proceed, which
3389 handles stepping over the next breakpoint. */
3390 unsuspend_all_lwps (event_child);
3392 else
3394 /* Remove the single-step breakpoints if any. Note that
3395 there isn't single-step breakpoint if we finished stepping
3396 over. */
3397 if (supports_software_single_step ()
3398 && has_single_step_breakpoints (current_thread))
3400 stop_all_lwps (0, event_child);
3401 delete_single_step_breakpoints (current_thread);
3402 unstop_all_lwps (0, event_child);
3406 threads_debug_printf ("proceeding all threads.");
3408 proceed_all_lwps ();
3410 return ignore_event (ourstatus);
3413 if (debug_threads)
3415 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3416 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3417 event_child->thread->id.lwp (),
3418 event_child->waitstatus.to_string ().c_str ());
3420 if (current_thread->last_resume_kind == resume_step)
3422 if (event_child->step_range_start == event_child->step_range_end)
3423 threads_debug_printf
3424 ("GDB wanted to single-step, reporting event.");
3425 else if (!lwp_in_step_range (event_child))
3426 threads_debug_printf ("Out of step range, reporting event.");
3429 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3430 threads_debug_printf ("Stopped by watchpoint.");
3431 else if (gdb_breakpoint_here (event_child->stop_pc))
3432 threads_debug_printf ("Stopped by GDB breakpoint.");
3435 threads_debug_printf ("Hit a non-gdbserver trap event.");
3437 /* Alright, we're going to report a stop. */
3439 /* Remove single-step breakpoints. */
3440 if (supports_software_single_step ())
3442 /* Remove single-step breakpoints or not. It it is true, stop all
3443 lwps, so that other threads won't hit the breakpoint in the
3444 staled memory. */
3445 int remove_single_step_breakpoints_p = 0;
3447 if (non_stop)
3449 remove_single_step_breakpoints_p
3450 = has_single_step_breakpoints (current_thread);
3452 else
3454 /* In all-stop, a stop reply cancels all previous resume
3455 requests. Delete all single-step breakpoints. */
3457 find_thread ([&] (thread_info *thread) {
3458 if (has_single_step_breakpoints (thread))
3460 remove_single_step_breakpoints_p = 1;
3461 return true;
3464 return false;
3468 if (remove_single_step_breakpoints_p)
3470 /* If we remove single-step breakpoints from memory, stop all lwps,
3471 so that other threads won't hit the breakpoint in the staled
3472 memory. */
3473 stop_all_lwps (0, event_child);
3475 if (non_stop)
3477 gdb_assert (has_single_step_breakpoints (current_thread));
3478 delete_single_step_breakpoints (current_thread);
3480 else
3482 for_each_thread ([] (thread_info *thread){
3483 if (has_single_step_breakpoints (thread))
3484 delete_single_step_breakpoints (thread);
3488 unstop_all_lwps (0, event_child);
3492 if (!stabilizing_threads)
3494 /* In all-stop, stop all threads. */
3495 if (!non_stop)
3496 stop_all_lwps (0, NULL);
3498 if (step_over_finished)
3500 if (!non_stop)
3502 /* If we were doing a step-over, all other threads but
3503 the stepping one had been paused in start_step_over,
3504 with their suspend counts incremented. We don't want
3505 to do a full unstop/unpause, because we're in
3506 all-stop mode (so we want threads stopped), but we
3507 still need to unsuspend the other threads, to
3508 decrement their `suspended' count back. */
3509 unsuspend_all_lwps (event_child);
3511 else
3513 /* If we just finished a step-over, then all threads had
3514 been momentarily paused. In all-stop, that's fine,
3515 we want threads stopped by now anyway. In non-stop,
3516 we need to re-resume threads that GDB wanted to be
3517 running. */
3518 unstop_all_lwps (1, event_child);
3522 /* If we're not waiting for a specific LWP, choose an event LWP
3523 from among those that have had events. Giving equal priority
3524 to all LWPs that have had events helps prevent
3525 starvation. */
3526 if (ptid == minus_one_ptid)
3528 event_child->status_pending_p = 1;
3529 event_child->status_pending = w;
3531 select_event_lwp (&event_child);
3533 /* current_thread and event_child must stay in sync. */
3534 switch_to_thread (event_child->thread);
3536 event_child->status_pending_p = 0;
3537 w = event_child->status_pending;
3541 /* Stabilize threads (move out of jump pads). */
3542 if (!non_stop)
3543 target_stabilize_threads ();
3545 else
3547 /* If we just finished a step-over, then all threads had been
3548 momentarily paused. In all-stop, that's fine, we want
3549 threads stopped by now anyway. In non-stop, we need to
3550 re-resume threads that GDB wanted to be running. */
3551 if (step_over_finished)
3552 unstop_all_lwps (1, event_child);
3555 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3556 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3558 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3560 /* If the reported event is an exit, fork, vfork, clone or exec,
3561 let GDB know. */
3563 /* Break the unreported fork/vfork/clone relationship chain. */
3564 if (is_new_child_status (event_child->waitstatus.kind ()))
3566 event_child->relative->relative = NULL;
3567 event_child->relative = NULL;
3570 *ourstatus = event_child->waitstatus;
3571 /* Clear the event lwp's waitstatus since we handled it already. */
3572 event_child->waitstatus.set_ignore ();
3574 else
3576 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3577 event_child->waitstatus wasn't filled in with the details, so look at
3578 the wait status W. */
3579 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3581 int syscall_number;
3583 get_syscall_trapinfo (event_child, &syscall_number);
3584 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3585 ourstatus->set_syscall_entry (syscall_number);
3586 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3587 ourstatus->set_syscall_return (syscall_number);
3588 else
3589 gdb_assert_not_reached ("unexpected syscall state");
3591 else if (current_thread->last_resume_kind == resume_stop
3592 && WSTOPSIG (w) == SIGSTOP)
3594 /* A thread that has been requested to stop by GDB with vCont;t,
3595 and it stopped cleanly, so report as SIG0. The use of
3596 SIGSTOP is an implementation detail. */
3597 ourstatus->set_stopped (GDB_SIGNAL_0);
3599 else
3600 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3603 /* Now that we've selected our final event LWP, un-adjust its PC if
3604 it was a software breakpoint, and the client doesn't know we can
3605 adjust the breakpoint ourselves. */
3606 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3607 && !cs.swbreak_feature)
3609 int decr_pc = low_decr_pc_after_break ();
3611 if (decr_pc != 0)
3613 struct regcache *regcache
3614 = get_thread_regcache (current_thread);
3615 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3619 gdb_assert (step_over_bkpt == null_ptid);
3621 threads_debug_printf ("ret = %s, %s",
3622 target_pid_to_str (current_thread->id).c_str (),
3623 ourstatus->to_string ().c_str ());
3625 return filter_exit_event (event_child, ourstatus);
3628 /* Get rid of any pending event in the pipe. */
3629 static void
3630 async_file_flush (void)
3632 linux_event_pipe.flush ();
3635 /* Put something in the pipe, so the event loop wakes up. */
3636 static void
3637 async_file_mark (void)
3639 linux_event_pipe.mark ();
3642 ptid_t
3643 linux_process_target::wait (ptid_t ptid,
3644 target_waitstatus *ourstatus,
3645 target_wait_flags target_options)
3647 ptid_t event_ptid;
3649 /* Flush the async file first. */
3650 if (target_is_async_p ())
3651 async_file_flush ();
3655 event_ptid = wait_1 (ptid, ourstatus, target_options);
3657 while ((target_options & TARGET_WNOHANG) == 0
3658 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3660 /* If at least one stop was reported, there may be more. A single
3661 SIGCHLD can signal more than one child stop. */
3662 if (target_is_async_p ()
3663 && (target_options & TARGET_WNOHANG) != 0
3664 && event_ptid != null_ptid)
3665 async_file_mark ();
3667 return event_ptid;
3670 /* Send a signal to an LWP. */
3672 static int
3673 kill_lwp (unsigned long lwpid, int signo)
3675 int ret;
3677 errno = 0;
3678 ret = syscall (__NR_tkill, lwpid, signo);
3679 if (errno == ENOSYS)
3681 /* If tkill fails, then we are not using nptl threads, a
3682 configuration we no longer support. */
3683 perror_with_name (("tkill"));
3685 return ret;
3688 void
3689 linux_stop_lwp (struct lwp_info *lwp)
3691 send_sigstop (lwp);
3694 static void
3695 send_sigstop (struct lwp_info *lwp)
3697 int pid = lwp->thread->id.lwp ();
3699 /* If we already have a pending stop signal for this process, don't
3700 send another. */
3701 if (lwp->stop_expected)
3703 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3705 return;
3708 threads_debug_printf ("Sending sigstop to lwp %d", pid);
3710 lwp->stop_expected = 1;
3711 kill_lwp (pid, SIGSTOP);
3714 static void
3715 send_sigstop (thread_info *thread, lwp_info *except)
3717 struct lwp_info *lwp = get_thread_lwp (thread);
3719 /* Ignore EXCEPT. */
3720 if (lwp == except)
3721 return;
3723 if (lwp->stopped)
3724 return;
3726 send_sigstop (lwp);
3729 /* Increment the suspend count of an LWP, and stop it, if not stopped
3730 yet. */
3731 static void
3732 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3734 struct lwp_info *lwp = get_thread_lwp (thread);
3736 /* Ignore EXCEPT. */
3737 if (lwp == except)
3738 return;
3740 lwp_suspended_inc (lwp);
3742 send_sigstop (thread, except);
3745 /* Mark LWP dead, with WSTAT as exit status pending to report later.
3746 If THREAD_EVENT is true, interpret WSTAT as a thread exit event
3747 instead of a process exit event. This is meaningful for the leader
3748 thread, as we normally report a process-wide exit event when we see
3749 the leader exit, and a thread exit event when we see any other
3750 thread exit. */
3752 static void
3753 mark_lwp_dead (struct lwp_info *lwp, int wstat, bool thread_event)
3755 /* Store the exit status for later. */
3756 lwp->status_pending_p = 1;
3757 lwp->status_pending = wstat;
3759 /* Store in waitstatus as well, as there's nothing else to process
3760 for this event. */
3761 if (WIFEXITED (wstat))
3763 if (thread_event)
3764 lwp->waitstatus.set_thread_exited (WEXITSTATUS (wstat));
3765 else
3766 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3768 else if (WIFSIGNALED (wstat))
3770 gdb_assert (!thread_event);
3771 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3773 else
3774 gdb_assert_not_reached ("unknown status kind");
3776 /* Prevent trying to stop it. */
3777 lwp->stopped = 1;
3779 /* No further stops are expected from a dead lwp. */
3780 lwp->stop_expected = 0;
3783 /* Return true if LWP has exited already, and has a pending exit event
3784 to report to GDB. */
3786 static int
3787 lwp_is_marked_dead (struct lwp_info *lwp)
3789 return (lwp->status_pending_p
3790 && (WIFEXITED (lwp->status_pending)
3791 || WIFSIGNALED (lwp->status_pending)));
3794 void
3795 linux_process_target::wait_for_sigstop ()
3797 thread_info *saved_thread;
3798 ptid_t saved_tid;
3799 int wstat;
3800 int ret;
3802 saved_thread = current_thread;
3803 if (saved_thread != NULL)
3804 saved_tid = saved_thread->id;
3805 else
3806 saved_tid = null_ptid; /* avoid bogus unused warning */
3808 scoped_restore_current_thread restore_thread;
3810 threads_debug_printf ("pulling events");
3812 /* Passing NULL_PTID as filter indicates we want all events to be
3813 left pending. Eventually this returns when there are no
3814 unwaited-for children left. */
3815 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3816 gdb_assert (ret == -1);
3818 if (saved_thread == NULL || mythread_alive (saved_tid))
3819 return;
3820 else
3822 threads_debug_printf ("Previously current thread died.");
3824 /* We can't change the current inferior behind GDB's back,
3825 otherwise, a subsequent command may apply to the wrong
3826 process. */
3827 restore_thread.dont_restore ();
3828 switch_to_thread (nullptr);
3832 bool
3833 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3835 struct lwp_info *lwp = get_thread_lwp (thread);
3837 if (lwp->suspended != 0)
3839 internal_error ("LWP %ld is suspended, suspended=%d\n",
3840 thread->id.lwp (), lwp->suspended);
3842 gdb_assert (lwp->stopped);
3844 /* Allow debugging the jump pad, gdb_collect, etc.. */
3845 return (supports_fast_tracepoints ()
3846 && agent_loaded_p ()
3847 && (gdb_breakpoint_here (lwp->stop_pc)
3848 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3849 || thread->last_resume_kind == resume_step)
3850 && (linux_fast_tracepoint_collecting (lwp, NULL)
3851 != fast_tpoint_collect_result::not_collecting));
3854 void
3855 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3857 struct lwp_info *lwp = get_thread_lwp (thread);
3858 int *wstat;
3860 if (lwp->suspended != 0)
3862 internal_error ("LWP %ld is suspended, suspended=%d\n",
3863 thread->id.lwp (), lwp->suspended);
3865 gdb_assert (lwp->stopped);
3867 /* For gdb_breakpoint_here. */
3868 scoped_restore_current_thread restore_thread;
3869 switch_to_thread (thread);
3871 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3873 /* Allow debugging the jump pad, gdb_collect, etc. */
3874 if (!gdb_breakpoint_here (lwp->stop_pc)
3875 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3876 && thread->last_resume_kind != resume_step
3877 && maybe_move_out_of_jump_pad (lwp, wstat))
3879 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3880 thread->id.lwp ());
3882 if (wstat)
3884 lwp->status_pending_p = 0;
3885 enqueue_one_deferred_signal (lwp, wstat);
3887 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3888 WSTOPSIG (*wstat), thread->id.lwp ());
3891 resume_one_lwp (lwp, 0, 0, NULL);
3893 else
3894 lwp_suspended_inc (lwp);
3897 static bool
3898 lwp_running (thread_info *thread)
3900 struct lwp_info *lwp = get_thread_lwp (thread);
3902 if (lwp_is_marked_dead (lwp))
3903 return false;
3905 return !lwp->stopped;
3908 void
3909 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3911 /* Should not be called recursively. */
3912 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3914 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3916 threads_debug_printf
3917 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3918 (except != NULL
3919 ? target_pid_to_str (except->thread->id).c_str ()
3920 : "none"));
3922 stopping_threads = (suspend
3923 ? STOPPING_AND_SUSPENDING_THREADS
3924 : STOPPING_THREADS);
3926 if (suspend)
3927 for_each_thread ([&] (thread_info *thread)
3929 suspend_and_send_sigstop (thread, except);
3931 else
3932 for_each_thread ([&] (thread_info *thread)
3934 send_sigstop (thread, except);
3937 wait_for_sigstop ();
3938 stopping_threads = NOT_STOPPING_THREADS;
3940 threads_debug_printf ("setting stopping_threads back to !stopping");
3943 /* Enqueue one signal in the chain of signals which need to be
3944 delivered to this process on next resume. */
3946 static void
3947 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3949 lwp->pending_signals.emplace_back (signal);
3950 if (info == nullptr)
3951 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3952 else
3953 lwp->pending_signals.back ().info = *info;
3956 void
3957 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3959 thread_info *thread = lwp->thread;
3960 regcache *regcache = get_thread_regcache (thread);
3962 scoped_restore_current_thread restore_thread;
3964 switch_to_thread (thread);
3965 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3967 for (CORE_ADDR pc : next_pcs)
3968 set_single_step_breakpoint (pc, current_thread->id);
3972 linux_process_target::single_step (lwp_info* lwp)
3974 int step = 0;
3976 if (supports_hardware_single_step ())
3978 step = 1;
3980 else if (supports_software_single_step ())
3982 install_software_single_step_breakpoints (lwp);
3983 step = 0;
3985 else
3986 threads_debug_printf ("stepping is not implemented on this target");
3988 return step;
3991 /* The signal can be delivered to the inferior if we are not trying to
3992 finish a fast tracepoint collect. Since signal can be delivered in
3993 the step-over, the program may go to signal handler and trap again
3994 after return from the signal handler. We can live with the spurious
3995 double traps. */
3997 static int
3998 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4000 return (lwp->collecting_fast_tracepoint
4001 == fast_tpoint_collect_result::not_collecting);
4004 void
4005 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4006 int signal, siginfo_t *info)
4008 thread_info *thread = lwp->thread;
4009 int ptrace_request;
4011 /* Note that target description may not be initialised
4012 (proc->tdesc == NULL) at this point because the program hasn't
4013 stopped at the first instruction yet. It means GDBserver skips
4014 the extra traps from the wrapper program (see option --wrapper).
4015 Code in this function that requires register access should be
4016 guarded by proc->tdesc == NULL or something else. */
4018 if (lwp->stopped == 0)
4019 return;
4021 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
4023 fast_tpoint_collect_result fast_tp_collecting
4024 = lwp->collecting_fast_tracepoint;
4026 gdb_assert (!stabilizing_threads
4027 || (fast_tp_collecting
4028 != fast_tpoint_collect_result::not_collecting));
4030 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4031 user used the "jump" command, or "set $pc = foo"). */
4032 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4034 /* Collecting 'while-stepping' actions doesn't make sense
4035 anymore. */
4036 release_while_stepping_state_list (thread);
4039 /* If we have pending signals or status, and a new signal, enqueue the
4040 signal. Also enqueue the signal if it can't be delivered to the
4041 inferior right now. */
4042 if (signal != 0
4043 && (lwp->status_pending_p
4044 || !lwp->pending_signals.empty ()
4045 || !lwp_signal_can_be_delivered (lwp)))
4047 enqueue_pending_signal (lwp, signal, info);
4049 /* Postpone any pending signal. It was enqueued above. */
4050 signal = 0;
4053 if (lwp->status_pending_p)
4055 threads_debug_printf
4056 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4057 thread->id.lwp (), step ? "step" : "continue",
4058 lwp->stop_expected ? "expected" : "not expected");
4059 return;
4062 scoped_restore_current_thread restore_thread;
4063 switch_to_thread (thread);
4065 /* This bit needs some thinking about. If we get a signal that
4066 we must report while a single-step reinsert is still pending,
4067 we often end up resuming the thread. It might be better to
4068 (ew) allow a stack of pending events; then we could be sure that
4069 the reinsert happened right away and not lose any signals.
4071 Making this stack would also shrink the window in which breakpoints are
4072 uninserted (see comment in linux_wait_for_lwp) but not enough for
4073 complete correctness, so it won't solve that problem. It may be
4074 worthwhile just to solve this one, however. */
4075 if (lwp->bp_reinsert != 0)
4077 threads_debug_printf (" pending reinsert at 0x%s",
4078 paddress (lwp->bp_reinsert));
4080 if (supports_hardware_single_step ())
4082 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4084 if (step == 0)
4085 warning ("BAD - reinserting but not stepping.");
4086 if (lwp->suspended)
4087 warning ("BAD - reinserting and suspended(%d).",
4088 lwp->suspended);
4092 step = maybe_hw_step (thread);
4095 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4096 threads_debug_printf
4097 ("lwp %ld wants to get out of fast tracepoint jump pad "
4098 "(exit-jump-pad-bkpt)", thread->id.lwp ());
4100 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4102 threads_debug_printf
4103 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4104 thread->id.lwp ());
4106 if (supports_hardware_single_step ())
4107 step = 1;
4108 else
4110 internal_error ("moving out of jump pad single-stepping"
4111 " not implemented on this target");
4115 /* If we have while-stepping actions in this thread set it stepping.
4116 If we have a signal to deliver, it may or may not be set to
4117 SIG_IGN, we don't know. Assume so, and allow collecting
4118 while-stepping into a signal handler. A possible smart thing to
4119 do would be to set an internal breakpoint at the signal return
4120 address, continue, and carry on catching this while-stepping
4121 action only when that breakpoint is hit. A future
4122 enhancement. */
4123 if (thread->while_stepping != NULL)
4125 threads_debug_printf
4126 ("lwp %ld has a while-stepping action -> forcing step.",
4127 thread->id.lwp ());
4129 step = single_step (lwp);
4132 if (thread->process ()->tdesc != nullptr && low_supports_breakpoints ())
4134 regcache *regcache = get_thread_regcache (current_thread);
4136 lwp->stop_pc = low_get_pc (regcache);
4138 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4139 (long) lwp->stop_pc);
4142 /* If we have pending signals, consume one if it can be delivered to
4143 the inferior. */
4144 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4146 const pending_signal &p_sig = lwp->pending_signals.front ();
4148 signal = p_sig.signal;
4149 if (p_sig.info.si_signo != 0)
4150 ptrace (PTRACE_SETSIGINFO, thread->id.lwp (), (PTRACE_TYPE_ARG3) 0,
4151 &p_sig.info);
4153 lwp->pending_signals.pop_front ();
4156 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4157 thread->id.lwp (), step ? "step" : "continue", signal,
4158 lwp->stop_expected ? "expected" : "not expected");
4160 low_prepare_to_resume (lwp);
4162 regcache_invalidate_thread (thread);
4163 errno = 0;
4164 lwp->stepping = step;
4165 if (step)
4166 ptrace_request = PTRACE_SINGLESTEP;
4167 else if (gdb_catching_syscalls_p (lwp))
4168 ptrace_request = PTRACE_SYSCALL;
4169 else
4170 ptrace_request = PTRACE_CONT;
4171 ptrace (ptrace_request,
4172 thread->id.lwp (),
4173 (PTRACE_TYPE_ARG3) 0,
4174 /* Coerce to a uintptr_t first to avoid potential gcc warning
4175 of coercing an 8 byte integer to a 4 byte pointer. */
4176 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4178 if (errno)
4180 int saved_errno = errno;
4182 threads_debug_printf ("ptrace errno = %d (%s)",
4183 saved_errno, strerror (saved_errno));
4185 errno = saved_errno;
4186 perror_with_name ("resuming thread");
4189 /* Successfully resumed. Clear state that no longer makes sense,
4190 and mark the LWP as running. Must not do this before resuming
4191 otherwise if that fails other code will be confused. E.g., we'd
4192 later try to stop the LWP and hang forever waiting for a stop
4193 status. Note that we must not throw after this is cleared,
4194 otherwise handle_zombie_lwp_error would get confused. */
4195 lwp->stopped = 0;
4196 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4199 void
4200 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4202 /* Nop. */
4205 /* Called when we try to resume a stopped LWP and that errors out. If
4206 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4207 or about to become), discard the error, clear any pending status
4208 the LWP may have, and return true (we'll collect the exit status
4209 soon enough). Otherwise, return false. */
4211 static int
4212 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4214 thread_info *thread = lp->thread;
4216 /* If we get an error after resuming the LWP successfully, we'd
4217 confuse !T state for the LWP being gone. */
4218 gdb_assert (lp->stopped);
4220 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4221 because even if ptrace failed with ESRCH, the tracee may be "not
4222 yet fully dead", but already refusing ptrace requests. In that
4223 case the tracee has 'R (Running)' state for a little bit
4224 (observed in Linux 3.18). See also the note on ESRCH in the
4225 ptrace(2) man page. Instead, check whether the LWP has any state
4226 other than ptrace-stopped. */
4228 /* Don't assume anything if /proc/PID/status can't be read. */
4229 if (linux_proc_pid_is_trace_stopped_nowarn (thread->id.lwp ()) == 0)
4231 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4232 lp->status_pending_p = 0;
4233 return 1;
4235 return 0;
4238 void
4239 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4240 siginfo_t *info)
4244 resume_one_lwp_throw (lwp, step, signal, info);
4246 catch (const gdb_exception_error &ex)
4248 if (check_ptrace_stopped_lwp_gone (lwp))
4250 /* This could because we tried to resume an LWP after its leader
4251 exited. Mark it as resumed, so we can collect an exit event
4252 from it. */
4253 lwp->stopped = 0;
4254 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4256 else
4257 throw;
4261 /* This function is called once per thread via for_each_thread.
4262 We look up which resume request applies to THREAD and mark it with a
4263 pointer to the appropriate resume request.
4265 This algorithm is O(threads * resume elements), but resume elements
4266 is small (and will remain small at least until GDB supports thread
4267 suspension). */
4269 static void
4270 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4272 struct lwp_info *lwp = get_thread_lwp (thread);
4274 for (int ndx = 0; ndx < n; ndx++)
4276 ptid_t ptid = resume[ndx].thread;
4277 if (ptid == minus_one_ptid
4278 || ptid == thread->id
4279 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4280 of PID'. */
4281 || (ptid.pid () == thread->id.pid ()
4282 && (ptid.is_pid ()
4283 || ptid.lwp () == -1)))
4285 if (resume[ndx].kind == resume_stop
4286 && thread->last_resume_kind == resume_stop)
4288 threads_debug_printf
4289 ("already %s LWP %ld at GDB's request",
4290 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4291 ? "stopped" : "stopping"),
4292 thread->id.lwp ());
4294 continue;
4297 /* Ignore (wildcard) resume requests for already-resumed
4298 threads. */
4299 if (resume[ndx].kind != resume_stop
4300 && thread->last_resume_kind != resume_stop)
4302 threads_debug_printf
4303 ("already %s LWP %ld at GDB's request",
4304 (thread->last_resume_kind == resume_step
4305 ? "stepping" : "continuing"),
4306 thread->id.lwp ());
4307 continue;
4310 /* Don't let wildcard resumes resume fork/vfork/clone
4311 children that GDB does not yet know are new children. */
4312 if (lwp->relative != NULL)
4314 struct lwp_info *rel = lwp->relative;
4316 if (rel->status_pending_p
4317 && is_new_child_status (rel->waitstatus.kind ()))
4319 threads_debug_printf
4320 ("not resuming LWP %ld: has queued stop reply",
4321 thread->id.lwp ());
4322 continue;
4326 /* If the thread has a pending event that has already been
4327 reported to GDBserver core, but GDB has not pulled the
4328 event out of the vStopped queue yet, likewise, ignore the
4329 (wildcard) resume request. */
4330 if (in_queued_stop_replies (thread->id))
4332 threads_debug_printf
4333 ("not resuming LWP %ld: has queued stop reply",
4334 thread->id.lwp ());
4335 continue;
4338 lwp->resume = &resume[ndx];
4339 thread->last_resume_kind = lwp->resume->kind;
4341 lwp->step_range_start = lwp->resume->step_range_start;
4342 lwp->step_range_end = lwp->resume->step_range_end;
4344 /* If we had a deferred signal to report, dequeue one now.
4345 This can happen if LWP gets more than one signal while
4346 trying to get out of a jump pad. */
4347 if (lwp->stopped
4348 && !lwp->status_pending_p
4349 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4351 lwp->status_pending_p = 1;
4353 threads_debug_printf
4354 ("Dequeueing deferred signal %d for LWP %ld, "
4355 "leaving status pending.",
4356 WSTOPSIG (lwp->status_pending),
4357 thread->id.lwp ());
4360 return;
4364 /* No resume action for this thread. */
4365 lwp->resume = NULL;
4368 bool
4369 linux_process_target::resume_status_pending (thread_info *thread)
4371 struct lwp_info *lwp = get_thread_lwp (thread);
4373 /* LWPs which will not be resumed are not interesting, because
4374 we might not wait for them next time through linux_wait. */
4375 if (lwp->resume == NULL)
4376 return false;
4378 return thread_still_has_status_pending (thread);
4381 bool
4382 linux_process_target::thread_needs_step_over (thread_info *thread)
4384 struct lwp_info *lwp = get_thread_lwp (thread);
4385 CORE_ADDR pc;
4387 /* GDBserver is skipping the extra traps from the wrapper program,
4388 don't have to do step over. */
4389 if (thread->process ()->tdesc == nullptr)
4390 return false;
4392 /* LWPs which will not be resumed are not interesting, because we
4393 might not wait for them next time through linux_wait. */
4395 if (!lwp->stopped)
4397 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4398 thread->id.lwp ());
4399 return false;
4402 if (thread->last_resume_kind == resume_stop)
4404 threads_debug_printf
4405 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4406 thread->id.lwp ());
4407 return false;
4410 gdb_assert (lwp->suspended >= 0);
4412 if (lwp->suspended)
4414 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4415 thread->id.lwp ());
4416 return false;
4419 if (lwp->status_pending_p)
4421 threads_debug_printf
4422 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4423 thread->id.lwp ());
4424 return false;
4427 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4428 or we have. */
4429 pc = get_pc (lwp);
4431 /* If the PC has changed since we stopped, then don't do anything,
4432 and let the breakpoint/tracepoint be hit. This happens if, for
4433 instance, GDB handled the decr_pc_after_break subtraction itself,
4434 GDB is OOL stepping this thread, or the user has issued a "jump"
4435 command, or poked thread's registers herself. */
4436 if (pc != lwp->stop_pc)
4438 threads_debug_printf
4439 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4440 "Old stop_pc was 0x%s, PC is now 0x%s", thread->id.lwp (),
4441 paddress (lwp->stop_pc), paddress (pc));
4442 return false;
4445 /* On software single step target, resume the inferior with signal
4446 rather than stepping over. */
4447 if (supports_software_single_step ()
4448 && !lwp->pending_signals.empty ()
4449 && lwp_signal_can_be_delivered (lwp))
4451 threads_debug_printf
4452 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4453 thread->id.lwp ());
4455 return false;
4458 scoped_restore_current_thread restore_thread;
4459 switch_to_thread (thread);
4461 /* We can only step over breakpoints we know about. */
4462 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4464 /* Don't step over a breakpoint that GDB expects to hit
4465 though. If the condition is being evaluated on the target's side
4466 and it evaluate to false, step over this breakpoint as well. */
4467 if (gdb_breakpoint_here (pc)
4468 && gdb_condition_true_at_breakpoint (pc)
4469 && gdb_no_commands_at_breakpoint (pc))
4471 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4472 " GDB breakpoint at 0x%s; skipping step over",
4473 thread->id.lwp (), paddress (pc));
4475 return false;
4477 else
4479 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4480 "found breakpoint at 0x%s",
4481 thread->id.lwp (), paddress (pc));
4483 /* We've found an lwp that needs stepping over --- return 1 so
4484 that find_thread stops looking. */
4485 return true;
4489 threads_debug_printf
4490 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4491 thread->id.lwp (), paddress (pc));
4493 return false;
4496 void
4497 linux_process_target::start_step_over (lwp_info *lwp)
4499 thread_info *thread = lwp->thread;
4500 CORE_ADDR pc;
4502 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4503 thread->id.lwp ());
4505 stop_all_lwps (1, lwp);
4507 if (lwp->suspended != 0)
4509 internal_error ("LWP %ld suspended=%d\n", thread->id.lwp (),
4510 lwp->suspended);
4513 threads_debug_printf ("Done stopping all threads for step-over.");
4515 /* Note, we should always reach here with an already adjusted PC,
4516 either by GDB (if we're resuming due to GDB's request), or by our
4517 caller, if we just finished handling an internal breakpoint GDB
4518 shouldn't care about. */
4519 pc = get_pc (lwp);
4521 bool step = false;
4523 scoped_restore_current_thread restore_thread;
4524 switch_to_thread (thread);
4526 lwp->bp_reinsert = pc;
4527 uninsert_breakpoints_at (pc);
4528 uninsert_fast_tracepoint_jumps_at (pc);
4530 step = single_step (lwp);
4533 resume_one_lwp (lwp, step, 0, NULL);
4535 /* Require next event from this LWP. */
4536 step_over_bkpt = thread->id;
4539 bool
4540 linux_process_target::finish_step_over (lwp_info *lwp)
4542 if (lwp->bp_reinsert != 0)
4544 scoped_restore_current_thread restore_thread;
4546 threads_debug_printf ("Finished step over.");
4548 switch_to_thread (lwp->thread);
4550 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4551 may be no breakpoint to reinsert there by now. */
4552 reinsert_breakpoints_at (lwp->bp_reinsert);
4553 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4555 lwp->bp_reinsert = 0;
4557 /* Delete any single-step breakpoints. No longer needed. We
4558 don't have to worry about other threads hitting this trap,
4559 and later not being able to explain it, because we were
4560 stepping over a breakpoint, and we hold all threads but
4561 LWP stopped while doing that. */
4562 if (!supports_hardware_single_step ())
4564 gdb_assert (has_single_step_breakpoints (current_thread));
4565 delete_single_step_breakpoints (current_thread);
4568 step_over_bkpt = null_ptid;
4569 return true;
4571 else
4572 return false;
4575 void
4576 linux_process_target::complete_ongoing_step_over ()
4578 if (step_over_bkpt != null_ptid)
4580 struct lwp_info *lwp;
4581 int wstat;
4582 int ret;
4584 threads_debug_printf ("detach: step over in progress, finish it first");
4586 /* Passing NULL_PTID as filter indicates we want all events to
4587 be left pending. Eventually this returns when there are no
4588 unwaited-for children left. */
4589 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4590 __WALL);
4591 gdb_assert (ret == -1);
4593 lwp = find_lwp_pid (step_over_bkpt);
4594 if (lwp != NULL)
4596 finish_step_over (lwp);
4598 /* If we got our step SIGTRAP, don't leave it pending,
4599 otherwise we would report it to GDB as a spurious
4600 SIGTRAP. */
4601 gdb_assert (lwp->status_pending_p);
4602 if (WIFSTOPPED (lwp->status_pending)
4603 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4605 thread_info *thread = lwp->thread;
4606 if (thread->last_resume_kind != resume_step)
4608 threads_debug_printf ("detach: discard step-over SIGTRAP");
4610 lwp->status_pending_p = 0;
4611 lwp->status_pending = 0;
4612 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4614 else
4615 threads_debug_printf
4616 ("detach: resume_step, not discarding step-over SIGTRAP");
4619 step_over_bkpt = null_ptid;
4620 unsuspend_all_lwps (lwp);
4624 void
4625 linux_process_target::resume_one_thread (thread_info *thread,
4626 bool leave_all_stopped)
4628 struct lwp_info *lwp = get_thread_lwp (thread);
4629 int leave_pending;
4631 if (lwp->resume == NULL)
4632 return;
4634 if (lwp->resume->kind == resume_stop)
4636 threads_debug_printf ("resume_stop request for LWP %ld",
4637 thread->id.lwp ());
4639 if (!lwp->stopped)
4641 threads_debug_printf ("stopping LWP %ld", thread->id.lwp ());
4643 /* Stop the thread, and wait for the event asynchronously,
4644 through the event loop. */
4645 send_sigstop (lwp);
4647 else
4649 threads_debug_printf ("already stopped LWP %ld", thread->id.lwp ());
4651 /* The LWP may have been stopped in an internal event that
4652 was not meant to be notified back to GDB (e.g., gdbserver
4653 breakpoint), so we should be reporting a stop event in
4654 this case too. */
4656 /* If the thread already has a pending SIGSTOP, this is a
4657 no-op. Otherwise, something later will presumably resume
4658 the thread and this will cause it to cancel any pending
4659 operation, due to last_resume_kind == resume_stop. If
4660 the thread already has a pending status to report, we
4661 will still report it the next time we wait - see
4662 status_pending_p_callback. */
4664 /* If we already have a pending signal to report, then
4665 there's no need to queue a SIGSTOP, as this means we're
4666 midway through moving the LWP out of the jumppad, and we
4667 will report the pending signal as soon as that is
4668 finished. */
4669 if (lwp->pending_signals_to_report.empty ())
4670 send_sigstop (lwp);
4673 /* For stop requests, we're done. */
4674 lwp->resume = NULL;
4675 thread->last_status.set_ignore ();
4676 return;
4679 /* If this thread which is about to be resumed has a pending status,
4680 then don't resume it - we can just report the pending status.
4681 Likewise if it is suspended, because e.g., another thread is
4682 stepping past a breakpoint. Make sure to queue any signals that
4683 would otherwise be sent. In all-stop mode, we do this decision
4684 based on if *any* thread has a pending status. If there's a
4685 thread that needs the step-over-breakpoint dance, then don't
4686 resume any other thread but that particular one. */
4687 leave_pending = (lwp->suspended
4688 || lwp->status_pending_p
4689 || leave_all_stopped);
4691 /* If we have a new signal, enqueue the signal. */
4692 if (lwp->resume->sig != 0)
4694 siginfo_t info, *info_p;
4696 /* If this is the same signal we were previously stopped by,
4697 make sure to queue its siginfo. */
4698 if (WIFSTOPPED (lwp->last_status)
4699 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4700 && ptrace (PTRACE_GETSIGINFO, thread->id.lwp (),
4701 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4702 info_p = &info;
4703 else
4704 info_p = NULL;
4706 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4709 if (!leave_pending)
4711 threads_debug_printf ("resuming LWP %ld", thread->id.lwp ());
4713 proceed_one_lwp (thread, NULL);
4715 else
4716 threads_debug_printf ("leaving LWP %ld stopped", thread->id.lwp ());
4718 thread->last_status.set_ignore ();
4719 lwp->resume = NULL;
4722 void
4723 linux_process_target::resume (thread_resume *resume_info, size_t n)
4725 thread_info *need_step_over = NULL;
4727 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4729 for_each_thread ([&] (thread_info *thread)
4731 linux_set_resume_request (thread, resume_info, n);
4734 /* If there is a thread which would otherwise be resumed, which has
4735 a pending status, then don't resume any threads - we can just
4736 report the pending status. Make sure to queue any signals that
4737 would otherwise be sent. In non-stop mode, we'll apply this
4738 logic to each thread individually. We consume all pending events
4739 before considering to start a step-over (in all-stop). */
4740 bool any_pending = false;
4741 if (!non_stop)
4742 any_pending = find_thread ([this] (thread_info *thread)
4744 return resume_status_pending (thread);
4745 }) != nullptr;
4747 /* If there is a thread which would otherwise be resumed, which is
4748 stopped at a breakpoint that needs stepping over, then don't
4749 resume any threads - have it step over the breakpoint with all
4750 other threads stopped, then resume all threads again. Make sure
4751 to queue any signals that would otherwise be delivered or
4752 queued. */
4753 if (!any_pending && low_supports_breakpoints ())
4754 need_step_over = find_thread ([this] (thread_info *thread)
4756 return thread_needs_step_over (thread);
4759 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4761 if (need_step_over != NULL)
4762 threads_debug_printf ("Not resuming all, need step over");
4763 else if (any_pending)
4764 threads_debug_printf ("Not resuming, all-stop and found "
4765 "an LWP with pending status");
4766 else
4767 threads_debug_printf ("Resuming, no pending status or step over needed");
4769 /* Even if we're leaving threads stopped, queue all signals we'd
4770 otherwise deliver. */
4771 for_each_thread ([&] (thread_info *thread)
4773 resume_one_thread (thread, leave_all_stopped);
4776 if (need_step_over)
4777 start_step_over (get_thread_lwp (need_step_over));
4779 /* We may have events that were pending that can/should be sent to
4780 the client now. Trigger a linux_wait call. */
4781 if (target_is_async_p ())
4782 async_file_mark ();
4785 void
4786 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4788 struct lwp_info *lwp = get_thread_lwp (thread);
4789 int step;
4791 if (lwp == except)
4792 return;
4794 threads_debug_printf ("lwp %ld", thread->id.lwp ());
4796 if (!lwp->stopped)
4798 threads_debug_printf (" LWP %ld already running", thread->id.lwp ());
4799 return;
4802 if (thread->last_resume_kind == resume_stop
4803 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4805 threads_debug_printf (" client wants LWP to remain %ld stopped",
4806 thread->id.lwp ());
4807 return;
4810 if (lwp->status_pending_p)
4812 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4813 thread->id.lwp ());
4814 return;
4817 gdb_assert (lwp->suspended >= 0);
4819 if (lwp->suspended)
4821 threads_debug_printf (" LWP %ld is suspended", thread->id.lwp ());
4822 return;
4825 if (thread->last_resume_kind == resume_stop
4826 && lwp->pending_signals_to_report.empty ()
4827 && (lwp->collecting_fast_tracepoint
4828 == fast_tpoint_collect_result::not_collecting))
4830 /* We haven't reported this LWP as stopped yet (otherwise, the
4831 last_status.kind check above would catch it, and we wouldn't
4832 reach here. This LWP may have been momentarily paused by a
4833 stop_all_lwps call while handling for example, another LWP's
4834 step-over. In that case, the pending expected SIGSTOP signal
4835 that was queued at vCont;t handling time will have already
4836 been consumed by wait_for_sigstop, and so we need to requeue
4837 another one here. Note that if the LWP already has a SIGSTOP
4838 pending, this is a no-op. */
4840 threads_debug_printf
4841 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4842 thread->id.lwp ());
4844 send_sigstop (lwp);
4847 if (thread->last_resume_kind == resume_step)
4849 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4850 thread->id.lwp ());
4852 /* If resume_step is requested by GDB, install single-step
4853 breakpoints when the thread is about to be actually resumed if
4854 the single-step breakpoints weren't removed. */
4855 if (supports_software_single_step ()
4856 && !has_single_step_breakpoints (thread))
4857 install_software_single_step_breakpoints (lwp);
4859 step = maybe_hw_step (thread);
4861 else if (lwp->bp_reinsert != 0)
4863 threads_debug_printf (" stepping LWP %ld, reinsert set",
4864 thread->id.lwp ());
4866 step = maybe_hw_step (thread);
4868 else
4869 step = 0;
4871 resume_one_lwp (lwp, step, 0, NULL);
4874 void
4875 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4876 lwp_info *except)
4878 struct lwp_info *lwp = get_thread_lwp (thread);
4880 if (lwp == except)
4881 return;
4883 lwp_suspended_decr (lwp);
4885 proceed_one_lwp (thread, except);
4888 void
4889 linux_process_target::proceed_all_lwps ()
4891 thread_info *need_step_over;
4893 /* If there is a thread which would otherwise be resumed, which is
4894 stopped at a breakpoint that needs stepping over, then don't
4895 resume any threads - have it step over the breakpoint with all
4896 other threads stopped, then resume all threads again. */
4898 if (low_supports_breakpoints ())
4900 need_step_over = find_thread ([this] (thread_info *thread)
4902 return thread_needs_step_over (thread);
4905 if (need_step_over != NULL)
4907 threads_debug_printf ("found thread %ld needing a step-over",
4908 need_step_over->id.lwp ());
4910 start_step_over (get_thread_lwp (need_step_over));
4911 return;
4915 threads_debug_printf ("Proceeding, no step-over needed");
4917 for_each_thread ([this] (thread_info *thread)
4919 proceed_one_lwp (thread, NULL);
4923 void
4924 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4926 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4928 if (except)
4929 threads_debug_printf ("except=(LWP %ld)", except->thread->id.lwp ());
4930 else
4931 threads_debug_printf ("except=nullptr");
4933 if (unsuspend)
4934 for_each_thread ([&] (thread_info *thread)
4936 unsuspend_and_proceed_one_lwp (thread, except);
4938 else
4939 for_each_thread ([&] (thread_info *thread)
4941 proceed_one_lwp (thread, except);
4946 #ifdef HAVE_LINUX_REGSETS
4948 #define use_linux_regsets 1
4950 /* Returns true if REGSET has been disabled. */
4952 static int
4953 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4955 return (info->disabled_regsets != NULL
4956 && info->disabled_regsets[regset - info->regsets]);
4959 /* Disable REGSET. */
4961 static void
4962 disable_regset (struct regsets_info *info, struct regset_info *regset)
4964 int dr_offset;
4966 dr_offset = regset - info->regsets;
4967 if (info->disabled_regsets == NULL)
4968 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4969 info->disabled_regsets[dr_offset] = 1;
4972 static int
4973 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4974 struct regcache *regcache)
4976 struct regset_info *regset;
4977 int saw_general_regs = 0;
4978 int pid = current_thread->id.lwp ();
4979 struct iovec iov;
4981 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4983 void *buf, *data;
4984 int nt_type, res;
4986 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4987 continue;
4989 buf = xmalloc (regset->size);
4991 nt_type = regset->nt_type;
4992 if (nt_type)
4994 iov.iov_base = buf;
4995 iov.iov_len = regset->size;
4996 data = (void *) &iov;
4998 else
4999 data = buf;
5001 #ifndef __sparc__
5002 res = ptrace (regset->get_request, pid,
5003 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5004 #else
5005 res = ptrace (regset->get_request, pid, data, nt_type);
5006 #endif
5007 if (res < 0)
5009 if (errno == EIO
5010 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5012 /* If we get EIO on a regset, or an EINVAL and the regset is
5013 optional, do not try it again for this process mode. */
5014 disable_regset (regsets_info, regset);
5016 else if (errno == ENODATA)
5018 /* ENODATA may be returned if the regset is currently
5019 not "active". This can happen in normal operation,
5020 so suppress the warning in this case. */
5022 else if (errno == ESRCH)
5024 /* At this point, ESRCH should mean the process is
5025 already gone, in which case we simply ignore attempts
5026 to read its registers. */
5028 else
5030 char s[256];
5031 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5032 pid);
5033 perror (s);
5036 else
5038 if (regset->type == GENERAL_REGS)
5039 saw_general_regs = 1;
5040 regset->store_function (regcache, buf);
5042 free (buf);
5044 if (saw_general_regs)
5045 return 0;
5046 else
5047 return 1;
5050 static int
5051 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5052 struct regcache *regcache)
5054 struct regset_info *regset;
5055 int saw_general_regs = 0;
5056 int pid = current_thread->id.lwp ();
5057 struct iovec iov;
5059 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5061 void *buf, *data;
5062 int nt_type, res;
5064 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5065 || regset->fill_function == NULL)
5066 continue;
5068 buf = xmalloc (regset->size);
5070 /* First fill the buffer with the current register set contents,
5071 in case there are any items in the kernel's regset that are
5072 not in gdbserver's regcache. */
5074 nt_type = regset->nt_type;
5075 if (nt_type)
5077 iov.iov_base = buf;
5078 iov.iov_len = regset->size;
5079 data = (void *) &iov;
5081 else
5082 data = buf;
5084 #ifndef __sparc__
5085 res = ptrace (regset->get_request, pid,
5086 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5087 #else
5088 res = ptrace (regset->get_request, pid, data, nt_type);
5089 #endif
5091 if (res == 0)
5093 /* Then overlay our cached registers on that. */
5094 regset->fill_function (regcache, buf);
5096 /* Only now do we write the register set. */
5097 #ifndef __sparc__
5098 res = ptrace (regset->set_request, pid,
5099 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5100 #else
5101 res = ptrace (regset->set_request, pid, data, nt_type);
5102 #endif
5105 if (res < 0)
5107 if (errno == EIO
5108 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5110 /* If we get EIO on a regset, or an EINVAL and the regset is
5111 optional, do not try it again for this process mode. */
5112 disable_regset (regsets_info, regset);
5114 else if (errno == ESRCH)
5116 /* At this point, ESRCH should mean the process is
5117 already gone, in which case we simply ignore attempts
5118 to change its registers. See also the related
5119 comment in resume_one_lwp. */
5120 free (buf);
5121 return 0;
5123 else
5125 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5128 else if (regset->type == GENERAL_REGS)
5129 saw_general_regs = 1;
5130 free (buf);
5132 if (saw_general_regs)
5133 return 0;
5134 else
5135 return 1;
5138 #else /* !HAVE_LINUX_REGSETS */
5140 #define use_linux_regsets 0
5141 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5142 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5144 #endif
5146 /* Return 1 if register REGNO is supported by one of the regset ptrace
5147 calls or 0 if it has to be transferred individually. */
5149 static int
5150 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5152 unsigned char mask = 1 << (regno % 8);
5153 size_t index = regno / 8;
5155 return (use_linux_regsets
5156 && (regs_info->regset_bitmap == NULL
5157 || (regs_info->regset_bitmap[index] & mask) != 0));
5160 #ifdef HAVE_LINUX_USRREGS
5162 static int
5163 register_addr (const struct usrregs_info *usrregs, int regnum)
5165 int addr;
5167 if (regnum < 0 || regnum >= usrregs->num_regs)
5168 error ("Invalid register number %d.", regnum);
5170 addr = usrregs->regmap[regnum];
5172 return addr;
5176 void
5177 linux_process_target::fetch_register (const usrregs_info *usrregs,
5178 regcache *regcache, int regno)
5180 CORE_ADDR regaddr;
5181 int i, size;
5182 char *buf;
5184 if (regno >= usrregs->num_regs)
5185 return;
5186 if (low_cannot_fetch_register (regno))
5187 return;
5189 regaddr = register_addr (usrregs, regno);
5190 if (regaddr == -1)
5191 return;
5193 size = ((register_size (regcache->tdesc, regno)
5194 + sizeof (PTRACE_XFER_TYPE) - 1)
5195 & -sizeof (PTRACE_XFER_TYPE));
5196 buf = (char *) alloca (size);
5198 int pid = current_thread->id.lwp ();
5200 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5202 errno = 0;
5203 *(PTRACE_XFER_TYPE *) (buf + i) =
5204 ptrace (PTRACE_PEEKUSER, pid,
5205 /* Coerce to a uintptr_t first to avoid potential gcc warning
5206 of coercing an 8 byte integer to a 4 byte pointer. */
5207 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5208 regaddr += sizeof (PTRACE_XFER_TYPE);
5209 if (errno != 0)
5211 /* Mark register REGNO unavailable. */
5212 supply_register (regcache, regno, NULL);
5213 return;
5217 low_supply_ptrace_register (regcache, regno, buf);
5220 void
5221 linux_process_target::store_register (const usrregs_info *usrregs,
5222 regcache *regcache, int regno)
5224 CORE_ADDR regaddr;
5225 int i, size;
5226 char *buf;
5228 if (regno >= usrregs->num_regs)
5229 return;
5230 if (low_cannot_store_register (regno))
5231 return;
5233 regaddr = register_addr (usrregs, regno);
5234 if (regaddr == -1)
5235 return;
5237 size = ((register_size (regcache->tdesc, regno)
5238 + sizeof (PTRACE_XFER_TYPE) - 1)
5239 & -sizeof (PTRACE_XFER_TYPE));
5240 buf = (char *) alloca (size);
5241 memset (buf, 0, size);
5243 low_collect_ptrace_register (regcache, regno, buf);
5245 int pid = current_thread->id.lwp ();
5247 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5249 errno = 0;
5250 ptrace (PTRACE_POKEUSER, pid,
5251 /* Coerce to a uintptr_t first to avoid potential gcc warning
5252 about coercing an 8 byte integer to a 4 byte pointer. */
5253 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5254 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5255 if (errno != 0)
5257 /* At this point, ESRCH should mean the process is
5258 already gone, in which case we simply ignore attempts
5259 to change its registers. See also the related
5260 comment in resume_one_lwp. */
5261 if (errno == ESRCH)
5262 return;
5265 if (!low_cannot_store_register (regno))
5266 error ("writing register %d: %s", regno, safe_strerror (errno));
5268 regaddr += sizeof (PTRACE_XFER_TYPE);
5271 #endif /* HAVE_LINUX_USRREGS */
5273 void
5274 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5275 int regno, char *buf)
5277 collect_register (regcache, regno, buf);
5280 void
5281 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5282 int regno, const char *buf)
5284 supply_register (regcache, regno, buf);
5287 void
5288 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5289 regcache *regcache,
5290 int regno, int all)
5292 #ifdef HAVE_LINUX_USRREGS
5293 struct usrregs_info *usr = regs_info->usrregs;
5295 if (regno == -1)
5297 for (regno = 0; regno < usr->num_regs; regno++)
5298 if (all || !linux_register_in_regsets (regs_info, regno))
5299 fetch_register (usr, regcache, regno);
5301 else
5302 fetch_register (usr, regcache, regno);
5303 #endif
5306 void
5307 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5308 regcache *regcache,
5309 int regno, int all)
5311 #ifdef HAVE_LINUX_USRREGS
5312 struct usrregs_info *usr = regs_info->usrregs;
5314 if (regno == -1)
5316 for (regno = 0; regno < usr->num_regs; regno++)
5317 if (all || !linux_register_in_regsets (regs_info, regno))
5318 store_register (usr, regcache, regno);
5320 else
5321 store_register (usr, regcache, regno);
5322 #endif
5325 void
5326 linux_process_target::fetch_registers (regcache *regcache, int regno)
5328 int use_regsets;
5329 int all = 0;
5330 const regs_info *regs_info = get_regs_info ();
5332 if (regno == -1)
5334 if (regs_info->usrregs != NULL)
5335 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5336 low_fetch_register (regcache, regno);
5338 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5339 if (regs_info->usrregs != NULL)
5340 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5342 else
5344 if (low_fetch_register (regcache, regno))
5345 return;
5347 use_regsets = linux_register_in_regsets (regs_info, regno);
5348 if (use_regsets)
5349 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5350 regcache);
5351 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5352 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5356 void
5357 linux_process_target::store_registers (regcache *regcache, int regno)
5359 int use_regsets;
5360 int all = 0;
5361 const regs_info *regs_info = get_regs_info ();
5363 if (regno == -1)
5365 all = regsets_store_inferior_registers (regs_info->regsets_info,
5366 regcache);
5367 if (regs_info->usrregs != NULL)
5368 usr_store_inferior_registers (regs_info, regcache, regno, all);
5370 else
5372 use_regsets = linux_register_in_regsets (regs_info, regno);
5373 if (use_regsets)
5374 all = regsets_store_inferior_registers (regs_info->regsets_info,
5375 regcache);
5376 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5377 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5381 bool
5382 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5384 return false;
5387 /* A wrapper for the read_memory target op. */
5389 static int
5390 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5392 return the_target->read_memory (memaddr, myaddr, len);
5396 /* Helper for read_memory/write_memory using /proc/PID/mem. Because
5397 we can use a single read/write call, this can be much more
5398 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5399 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5400 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5401 not null, then we're reading, otherwise we're writing. */
5403 static int
5404 proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5405 const gdb_byte *writebuf, int len)
5407 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
5409 process_info *proc = current_process ();
5411 int fd = proc->priv->mem_fd;
5412 if (fd == -1)
5413 return EIO;
5415 while (len > 0)
5417 int bytes;
5419 /* Use pread64/pwrite64 if available, since they save a syscall
5420 and can handle 64-bit offsets even on 32-bit platforms (for
5421 instance, SPARC debugging a SPARC64 application). But only
5422 use them if the offset isn't so high that when cast to off_t
5423 it'd be negative, as seen on SPARC64. pread64/pwrite64
5424 outright reject such offsets. lseek does not. */
5425 #ifdef HAVE_PREAD64
5426 if ((off_t) memaddr >= 0)
5427 bytes = (readbuf != nullptr
5428 ? pread64 (fd, readbuf, len, memaddr)
5429 : pwrite64 (fd, writebuf, len, memaddr));
5430 else
5431 #endif
5433 bytes = -1;
5434 if (lseek (fd, memaddr, SEEK_SET) != -1)
5435 bytes = (readbuf != nullptr
5436 ? read (fd, readbuf, len)
5437 : write (fd, writebuf, len));
5440 if (bytes < 0)
5441 return errno;
5442 else if (bytes == 0)
5444 /* EOF means the address space is gone, the whole process
5445 exited or execed. */
5446 return EIO;
5449 memaddr += bytes;
5450 if (readbuf != nullptr)
5451 readbuf += bytes;
5452 else
5453 writebuf += bytes;
5454 len -= bytes;
5457 return 0;
5461 linux_process_target::read_memory (CORE_ADDR memaddr,
5462 unsigned char *myaddr, int len)
5464 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
5467 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5468 memory at MEMADDR. On failure (cannot write to the inferior)
5469 returns the value of errno. Always succeeds if LEN is zero. */
5472 linux_process_target::write_memory (CORE_ADDR memaddr,
5473 const unsigned char *myaddr, int len)
5475 if (debug_threads)
5477 /* Dump up to four bytes. */
5478 char str[4 * 2 + 1];
5479 char *p = str;
5480 int dump = len < 4 ? len : 4;
5482 for (int i = 0; i < dump; i++)
5484 sprintf (p, "%02x", myaddr[i]);
5485 p += 2;
5487 *p = '\0';
5489 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5490 str, (long) memaddr, current_process ()->pid);
5493 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
5496 void
5497 linux_process_target::look_up_symbols ()
5499 #ifdef USE_THREAD_DB
5500 struct process_info *proc = current_process ();
5502 if (proc->priv->thread_db != NULL)
5503 return;
5505 thread_db_init ();
5506 #endif
5509 void
5510 linux_process_target::request_interrupt ()
5512 /* Send a SIGINT to the process group. This acts just like the user
5513 typed a ^C on the controlling terminal. */
5514 int res = ::kill (-signal_pid, SIGINT);
5515 if (res == -1)
5516 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5517 signal_pid, safe_strerror (errno));
5520 bool
5521 linux_process_target::supports_read_auxv ()
5523 return true;
5526 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5527 to debugger memory starting at MYADDR. */
5530 linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5531 unsigned char *myaddr, unsigned int len)
5533 char filename[PATH_MAX];
5534 int fd, n;
5536 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5538 fd = open (filename, O_RDONLY);
5539 if (fd < 0)
5540 return -1;
5542 if (offset != (CORE_ADDR) 0
5543 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5544 n = -1;
5545 else
5546 n = read (fd, myaddr, len);
5548 close (fd);
5550 return n;
5554 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5555 int size, raw_breakpoint *bp)
5557 if (type == raw_bkpt_type_sw)
5558 return insert_memory_breakpoint (bp);
5559 else
5560 return low_insert_point (type, addr, size, bp);
5564 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5565 int size, raw_breakpoint *bp)
5567 /* Unsupported (see target.h). */
5568 return 1;
5572 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5573 int size, raw_breakpoint *bp)
5575 if (type == raw_bkpt_type_sw)
5576 return remove_memory_breakpoint (bp);
5577 else
5578 return low_remove_point (type, addr, size, bp);
5582 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5583 int size, raw_breakpoint *bp)
5585 /* Unsupported (see target.h). */
5586 return 1;
5589 /* Implement the stopped_by_sw_breakpoint target_ops
5590 method. */
5592 bool
5593 linux_process_target::stopped_by_sw_breakpoint ()
5595 struct lwp_info *lwp = get_thread_lwp (current_thread);
5597 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5600 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5601 method. */
5603 bool
5604 linux_process_target::supports_stopped_by_sw_breakpoint ()
5606 return true;
5609 /* Implement the stopped_by_hw_breakpoint target_ops
5610 method. */
5612 bool
5613 linux_process_target::stopped_by_hw_breakpoint ()
5615 struct lwp_info *lwp = get_thread_lwp (current_thread);
5617 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5620 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5621 method. */
5623 bool
5624 linux_process_target::supports_stopped_by_hw_breakpoint ()
5626 return true;
5629 /* Implement the supports_hardware_single_step target_ops method. */
5631 bool
5632 linux_process_target::supports_hardware_single_step ()
5634 return true;
5637 bool
5638 linux_process_target::stopped_by_watchpoint ()
5640 struct lwp_info *lwp = get_thread_lwp (current_thread);
5642 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5645 CORE_ADDR
5646 linux_process_target::stopped_data_address ()
5648 struct lwp_info *lwp = get_thread_lwp (current_thread);
5650 return lwp->stopped_data_address;
5653 /* This is only used for targets that define PT_TEXT_ADDR,
5654 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5655 the target has different ways of acquiring this information, like
5656 loadmaps. */
5658 bool
5659 linux_process_target::supports_read_offsets ()
5661 #ifdef SUPPORTS_READ_OFFSETS
5662 return true;
5663 #else
5664 return false;
5665 #endif
5668 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5669 to tell gdb about. */
5672 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5674 #ifdef SUPPORTS_READ_OFFSETS
5675 unsigned long text, text_end, data;
5676 int pid = current_thread->id.lwp ();
5678 errno = 0;
5680 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5681 (PTRACE_TYPE_ARG4) 0);
5682 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5683 (PTRACE_TYPE_ARG4) 0);
5684 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5685 (PTRACE_TYPE_ARG4) 0);
5687 if (errno == 0)
5689 /* Both text and data offsets produced at compile-time (and so
5690 used by gdb) are relative to the beginning of the program,
5691 with the data segment immediately following the text segment.
5692 However, the actual runtime layout in memory may put the data
5693 somewhere else, so when we send gdb a data base-address, we
5694 use the real data base address and subtract the compile-time
5695 data base-address from it (which is just the length of the
5696 text segment). BSS immediately follows data in both
5697 cases. */
5698 *text_p = text;
5699 *data_p = data - (text_end - text);
5701 return 1;
5703 return 0;
5704 #else
5705 gdb_assert_not_reached ("target op read_offsets not supported");
5706 #endif
5709 bool
5710 linux_process_target::supports_get_tls_address ()
5712 #ifdef USE_THREAD_DB
5713 return true;
5714 #else
5715 return false;
5716 #endif
5720 linux_process_target::get_tls_address (thread_info *thread,
5721 CORE_ADDR offset,
5722 CORE_ADDR load_module,
5723 CORE_ADDR *address)
5725 #ifdef USE_THREAD_DB
5726 return thread_db_get_tls_address (thread, offset, load_module, address);
5727 #else
5728 return -1;
5729 #endif
5732 bool
5733 linux_process_target::supports_qxfer_osdata ()
5735 return true;
5739 linux_process_target::qxfer_osdata (const char *annex,
5740 unsigned char *readbuf,
5741 unsigned const char *writebuf,
5742 CORE_ADDR offset, int len)
5744 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5747 void
5748 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5749 gdb_byte *inf_siginfo, int direction)
5751 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5753 /* If there was no callback, or the callback didn't do anything,
5754 then just do a straight memcpy. */
5755 if (!done)
5757 if (direction == 1)
5758 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5759 else
5760 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5764 bool
5765 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5766 int direction)
5768 return false;
5771 bool
5772 linux_process_target::supports_qxfer_siginfo ()
5774 return true;
5778 linux_process_target::qxfer_siginfo (const char *annex,
5779 unsigned char *readbuf,
5780 unsigned const char *writebuf,
5781 CORE_ADDR offset, int len)
5783 siginfo_t siginfo;
5784 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5786 if (current_thread == NULL)
5787 return -1;
5789 int pid = current_thread->id.lwp ();
5791 threads_debug_printf ("%s siginfo for lwp %d.",
5792 readbuf != NULL ? "Reading" : "Writing",
5793 pid);
5795 if (offset >= sizeof (siginfo))
5796 return -1;
5798 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5799 return -1;
5801 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5802 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5803 inferior with a 64-bit GDBSERVER should look the same as debugging it
5804 with a 32-bit GDBSERVER, we need to convert it. */
5805 siginfo_fixup (&siginfo, inf_siginfo, 0);
5807 if (offset + len > sizeof (siginfo))
5808 len = sizeof (siginfo) - offset;
5810 if (readbuf != NULL)
5811 memcpy (readbuf, inf_siginfo + offset, len);
5812 else
5814 memcpy (inf_siginfo + offset, writebuf, len);
5816 /* Convert back to ptrace layout before flushing it out. */
5817 siginfo_fixup (&siginfo, inf_siginfo, 1);
5819 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5820 return -1;
5823 return len;
5826 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5827 so we notice when children change state; as the handler for the
5828 sigsuspend in my_waitpid. */
5830 static void
5831 sigchld_handler (int signo)
5833 int old_errno = errno;
5835 if (debug_threads)
5839 /* Use the async signal safe debug function. */
5840 if (debug_write ("sigchld_handler\n",
5841 sizeof ("sigchld_handler\n") - 1) < 0)
5842 break; /* just ignore */
5843 } while (0);
5846 if (target_is_async_p ())
5847 async_file_mark (); /* trigger a linux_wait */
5849 errno = old_errno;
5852 bool
5853 linux_process_target::supports_non_stop ()
5855 return true;
5858 bool
5859 linux_process_target::async (bool enable)
5861 bool previous = target_is_async_p ();
5863 threads_debug_printf ("async (%d), previous=%d",
5864 enable, previous);
5866 if (previous != enable)
5868 sigset_t mask;
5869 sigemptyset (&mask);
5870 sigaddset (&mask, SIGCHLD);
5872 gdb_sigmask (SIG_BLOCK, &mask, NULL);
5874 if (enable)
5876 if (!linux_event_pipe.open_pipe ())
5878 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5880 warning ("creating event pipe failed.");
5881 return previous;
5884 /* Register the event loop handler. */
5885 add_file_handler (linux_event_pipe.event_fd (),
5886 handle_target_event, NULL,
5887 "linux-low");
5889 /* Always trigger a linux_wait. */
5890 async_file_mark ();
5892 else
5894 delete_file_handler (linux_event_pipe.event_fd ());
5896 linux_event_pipe.close_pipe ();
5899 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5902 return previous;
5906 linux_process_target::start_non_stop (bool nonstop)
5908 /* Register or unregister from event-loop accordingly. */
5909 target_async (nonstop);
5911 if (target_is_async_p () != (nonstop != false))
5912 return -1;
5914 return 0;
5917 bool
5918 linux_process_target::supports_multi_process ()
5920 return true;
5923 /* Check if fork events are supported. */
5925 bool
5926 linux_process_target::supports_fork_events ()
5928 return true;
5931 /* Check if vfork events are supported. */
5933 bool
5934 linux_process_target::supports_vfork_events ()
5936 return true;
5939 /* Return the set of supported thread options. */
5941 gdb_thread_options
5942 linux_process_target::supported_thread_options ()
5944 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
5947 /* Check if exec events are supported. */
5949 bool
5950 linux_process_target::supports_exec_events ()
5952 return true;
5955 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5956 ptrace flags for all inferiors. This is in case the new GDB connection
5957 doesn't support the same set of events that the previous one did. */
5959 void
5960 linux_process_target::handle_new_gdb_connection ()
5962 /* Request that all the lwps reset their ptrace options. */
5963 for_each_thread ([] (thread_info *thread)
5965 struct lwp_info *lwp = get_thread_lwp (thread);
5967 if (!lwp->stopped)
5969 /* Stop the lwp so we can modify its ptrace options. */
5970 lwp->must_set_ptrace_flags = 1;
5971 linux_stop_lwp (lwp);
5973 else
5975 /* Already stopped; go ahead and set the ptrace options. */
5976 process_info *proc = find_process_pid (thread->id.pid ());
5977 int options = linux_low_ptrace_options (proc->attached);
5979 linux_enable_event_reporting (thread->id.lwp (), options);
5980 lwp->must_set_ptrace_flags = 0;
5986 linux_process_target::handle_monitor_command (char *mon)
5988 #ifdef USE_THREAD_DB
5989 return thread_db_handle_monitor_command (mon);
5990 #else
5991 return 0;
5992 #endif
5996 linux_process_target::core_of_thread (ptid_t ptid)
5998 return linux_common_core_of_thread (ptid);
6001 bool
6002 linux_process_target::supports_disable_randomization ()
6004 return true;
6007 bool
6008 linux_process_target::supports_agent ()
6010 return true;
6013 bool
6014 linux_process_target::supports_range_stepping ()
6016 if (supports_software_single_step ())
6017 return true;
6019 return low_supports_range_stepping ();
6022 bool
6023 linux_process_target::low_supports_range_stepping ()
6025 return false;
6028 bool
6029 linux_process_target::supports_pid_to_exec_file ()
6031 return true;
6034 const char *
6035 linux_process_target::pid_to_exec_file (int pid)
6037 return linux_proc_pid_to_exec_file (pid);
6040 bool
6041 linux_process_target::supports_multifs ()
6043 return true;
6047 linux_process_target::multifs_open (int pid, const char *filename,
6048 int flags, mode_t mode)
6050 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6054 linux_process_target::multifs_unlink (int pid, const char *filename)
6056 return linux_mntns_unlink (pid, filename);
6059 ssize_t
6060 linux_process_target::multifs_readlink (int pid, const char *filename,
6061 char *buf, size_t bufsiz)
6063 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6066 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6067 struct target_loadseg
6069 /* Core address to which the segment is mapped. */
6070 Elf32_Addr addr;
6071 /* VMA recorded in the program header. */
6072 Elf32_Addr p_vaddr;
6073 /* Size of this segment in memory. */
6074 Elf32_Word p_memsz;
6077 # if defined PT_GETDSBT
6078 struct target_loadmap
6080 /* Protocol version number, must be zero. */
6081 Elf32_Word version;
6082 /* Pointer to the DSBT table, its size, and the DSBT index. */
6083 unsigned *dsbt_table;
6084 unsigned dsbt_size, dsbt_index;
6085 /* Number of segments in this map. */
6086 Elf32_Word nsegs;
6087 /* The actual memory map. */
6088 struct target_loadseg segs[/*nsegs*/];
6090 # define LINUX_LOADMAP PT_GETDSBT
6091 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6092 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6093 # else
6094 struct target_loadmap
6096 /* Protocol version number, must be zero. */
6097 Elf32_Half version;
6098 /* Number of segments in this map. */
6099 Elf32_Half nsegs;
6100 /* The actual memory map. */
6101 struct target_loadseg segs[/*nsegs*/];
6103 # define LINUX_LOADMAP PTRACE_GETFDPIC
6104 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6105 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6106 # endif
6108 bool
6109 linux_process_target::supports_read_loadmap ()
6111 return true;
6115 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6116 unsigned char *myaddr, unsigned int len)
6118 int pid = current_thread->id.lwp ();
6119 int addr = -1;
6120 struct target_loadmap *data = NULL;
6121 unsigned int actual_length, copy_length;
6123 if (strcmp (annex, "exec") == 0)
6124 addr = (int) LINUX_LOADMAP_EXEC;
6125 else if (strcmp (annex, "interp") == 0)
6126 addr = (int) LINUX_LOADMAP_INTERP;
6127 else
6128 return -1;
6130 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6131 return -1;
6133 if (data == NULL)
6134 return -1;
6136 actual_length = sizeof (struct target_loadmap)
6137 + sizeof (struct target_loadseg) * data->nsegs;
6139 if (offset < 0 || offset > actual_length)
6140 return -1;
6142 copy_length = actual_length - offset < len ? actual_length - offset : len;
6143 memcpy (myaddr, (char *) data + offset, copy_length);
6144 return copy_length;
6146 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6148 bool
6149 linux_process_target::supports_catch_syscall ()
6151 return low_supports_catch_syscall ();
6154 bool
6155 linux_process_target::low_supports_catch_syscall ()
6157 return false;
6160 CORE_ADDR
6161 linux_process_target::read_pc (regcache *regcache)
6163 if (!low_supports_breakpoints ())
6164 return 0;
6166 return low_get_pc (regcache);
6169 void
6170 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6172 gdb_assert (low_supports_breakpoints ());
6174 low_set_pc (regcache, pc);
6177 bool
6178 linux_process_target::supports_thread_stopped ()
6180 return true;
6183 bool
6184 linux_process_target::thread_stopped (thread_info *thread)
6186 return get_thread_lwp (thread)->stopped;
6189 bool
6190 linux_process_target::any_resumed ()
6192 bool any_resumed;
6194 auto status_pending_p_any = [&] (thread_info *thread)
6196 return status_pending_p_callback (thread, minus_one_ptid);
6199 auto not_stopped = [&] (thread_info *thread)
6201 return not_stopped_callback (thread, minus_one_ptid);
6204 /* Find a resumed LWP, if any. */
6205 if (find_thread (status_pending_p_any) != NULL)
6206 any_resumed = 1;
6207 else if (find_thread (not_stopped) != NULL)
6208 any_resumed = 1;
6209 else
6210 any_resumed = 0;
6212 return any_resumed;
6215 /* This exposes stop-all-threads functionality to other modules. */
6217 void
6218 linux_process_target::pause_all (bool freeze)
6220 stop_all_lwps (freeze, NULL);
6223 /* This exposes unstop-all-threads functionality to other gdbserver
6224 modules. */
6226 void
6227 linux_process_target::unpause_all (bool unfreeze)
6229 unstop_all_lwps (unfreeze, NULL);
6232 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6234 static int
6235 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6236 CORE_ADDR *phdr_memaddr, int *num_phdr)
6238 char filename[PATH_MAX];
6239 int fd;
6240 const int auxv_size = is_elf64
6241 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6242 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6244 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6246 fd = open (filename, O_RDONLY);
6247 if (fd < 0)
6248 return 1;
6250 *phdr_memaddr = 0;
6251 *num_phdr = 0;
6252 while (read (fd, buf, auxv_size) == auxv_size
6253 && (*phdr_memaddr == 0 || *num_phdr == 0))
6255 if (is_elf64)
6257 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6259 switch (aux->a_type)
6261 case AT_PHDR:
6262 *phdr_memaddr = aux->a_un.a_val;
6263 break;
6264 case AT_PHNUM:
6265 *num_phdr = aux->a_un.a_val;
6266 break;
6269 else
6271 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6273 switch (aux->a_type)
6275 case AT_PHDR:
6276 *phdr_memaddr = aux->a_un.a_val;
6277 break;
6278 case AT_PHNUM:
6279 *num_phdr = aux->a_un.a_val;
6280 break;
6285 close (fd);
6287 if (*phdr_memaddr == 0 || *num_phdr == 0)
6289 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6290 "phdr_memaddr = %ld, phdr_num = %d",
6291 (long) *phdr_memaddr, *num_phdr);
6292 return 2;
6295 return 0;
6298 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6300 static CORE_ADDR
6301 get_dynamic (const int pid, const int is_elf64)
6303 CORE_ADDR phdr_memaddr, relocation;
6304 int num_phdr, i;
6305 unsigned char *phdr_buf;
6306 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6308 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6309 return 0;
6311 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6312 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6314 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6315 return 0;
6317 /* Compute relocation: it is expected to be 0 for "regular" executables,
6318 non-zero for PIE ones. */
6319 relocation = -1;
6320 for (i = 0; relocation == -1 && i < num_phdr; i++)
6321 if (is_elf64)
6323 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6325 if (p->p_type == PT_PHDR)
6326 relocation = phdr_memaddr - p->p_vaddr;
6328 else
6330 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6332 if (p->p_type == PT_PHDR)
6333 relocation = phdr_memaddr - p->p_vaddr;
6336 if (relocation == -1)
6338 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6339 any real world executables, including PIE executables, have always
6340 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6341 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6342 or present DT_DEBUG anyway (fpc binaries are statically linked).
6344 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6346 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6348 return 0;
6351 for (i = 0; i < num_phdr; i++)
6353 if (is_elf64)
6355 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6357 if (p->p_type == PT_DYNAMIC)
6358 return p->p_vaddr + relocation;
6360 else
6362 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6364 if (p->p_type == PT_DYNAMIC)
6365 return p->p_vaddr + relocation;
6369 return 0;
6372 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6373 can be 0 if the inferior does not yet have the library list initialized.
6374 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6375 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6377 static CORE_ADDR
6378 get_r_debug (const int pid, const int is_elf64)
6380 CORE_ADDR dynamic_memaddr;
6381 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6382 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6383 CORE_ADDR map = -1;
6385 dynamic_memaddr = get_dynamic (pid, is_elf64);
6386 if (dynamic_memaddr == 0)
6387 return map;
6389 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6391 if (is_elf64)
6393 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6394 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6395 union
6397 Elf64_Xword map;
6398 unsigned char buf[sizeof (Elf64_Xword)];
6400 rld_map;
6401 #endif
6402 #ifdef DT_MIPS_RLD_MAP
6403 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6405 if (linux_read_memory (dyn->d_un.d_val,
6406 rld_map.buf, sizeof (rld_map.buf)) == 0)
6407 return rld_map.map;
6408 else
6409 break;
6411 #endif /* DT_MIPS_RLD_MAP */
6412 #ifdef DT_MIPS_RLD_MAP_REL
6413 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6415 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6416 rld_map.buf, sizeof (rld_map.buf)) == 0)
6417 return rld_map.map;
6418 else
6419 break;
6421 #endif /* DT_MIPS_RLD_MAP_REL */
6423 if (dyn->d_tag == DT_DEBUG && map == -1)
6424 map = dyn->d_un.d_val;
6426 if (dyn->d_tag == DT_NULL)
6427 break;
6429 else
6431 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6432 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6433 union
6435 Elf32_Word map;
6436 unsigned char buf[sizeof (Elf32_Word)];
6438 rld_map;
6439 #endif
6440 #ifdef DT_MIPS_RLD_MAP
6441 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6443 if (linux_read_memory (dyn->d_un.d_val,
6444 rld_map.buf, sizeof (rld_map.buf)) == 0)
6445 return rld_map.map;
6446 else
6447 break;
6449 #endif /* DT_MIPS_RLD_MAP */
6450 #ifdef DT_MIPS_RLD_MAP_REL
6451 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6453 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6454 rld_map.buf, sizeof (rld_map.buf)) == 0)
6455 return rld_map.map;
6456 else
6457 break;
6459 #endif /* DT_MIPS_RLD_MAP_REL */
6461 if (dyn->d_tag == DT_DEBUG && map == -1)
6462 map = dyn->d_un.d_val;
6464 if (dyn->d_tag == DT_NULL)
6465 break;
6468 dynamic_memaddr += dyn_size;
6471 return map;
6474 /* Read one pointer from MEMADDR in the inferior. */
6476 static int
6477 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6479 int ret;
6481 /* Go through a union so this works on either big or little endian
6482 hosts, when the inferior's pointer size is smaller than the size
6483 of CORE_ADDR. It is assumed the inferior's endianness is the
6484 same of the superior's. */
6485 union
6487 CORE_ADDR core_addr;
6488 unsigned int ui;
6489 unsigned char uc;
6490 } addr;
6492 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6493 if (ret == 0)
6495 if (ptr_size == sizeof (CORE_ADDR))
6496 *ptr = addr.core_addr;
6497 else if (ptr_size == sizeof (unsigned int))
6498 *ptr = addr.ui;
6499 else
6500 gdb_assert_not_reached ("unhandled pointer size");
6502 return ret;
6505 bool
6506 linux_process_target::supports_qxfer_libraries_svr4 ()
6508 return true;
6511 struct link_map_offsets
6513 /* Offset and size of r_debug.r_version. */
6514 int r_version_offset;
6516 /* Offset and size of r_debug.r_map. */
6517 int r_map_offset;
6519 /* Offset of r_debug_extended.r_next. */
6520 int r_next_offset;
6522 /* Offset to l_addr field in struct link_map. */
6523 int l_addr_offset;
6525 /* Offset to l_name field in struct link_map. */
6526 int l_name_offset;
6528 /* Offset to l_ld field in struct link_map. */
6529 int l_ld_offset;
6531 /* Offset to l_next field in struct link_map. */
6532 int l_next_offset;
6534 /* Offset to l_prev field in struct link_map. */
6535 int l_prev_offset;
6538 static const link_map_offsets lmo_32bit_offsets =
6540 0, /* r_version offset. */
6541 4, /* r_debug.r_map offset. */
6542 20, /* r_debug_extended.r_next. */
6543 0, /* l_addr offset in link_map. */
6544 4, /* l_name offset in link_map. */
6545 8, /* l_ld offset in link_map. */
6546 12, /* l_next offset in link_map. */
6547 16 /* l_prev offset in link_map. */
6550 static const link_map_offsets lmo_64bit_offsets =
6552 0, /* r_version offset. */
6553 8, /* r_debug.r_map offset. */
6554 40, /* r_debug_extended.r_next. */
6555 0, /* l_addr offset in link_map. */
6556 8, /* l_name offset in link_map. */
6557 16, /* l_ld offset in link_map. */
6558 24, /* l_next offset in link_map. */
6559 32 /* l_prev offset in link_map. */
6562 /* Get the loaded shared libraries from one namespace. */
6564 static void
6565 read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6566 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
6568 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6570 while (lm_addr
6571 && read_one_ptr (lm_addr + lmo->l_name_offset,
6572 &l_name, ptr_size) == 0
6573 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6574 &l_addr, ptr_size) == 0
6575 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6576 &l_ld, ptr_size) == 0
6577 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6578 &l_prev, ptr_size) == 0
6579 && read_one_ptr (lm_addr + lmo->l_next_offset,
6580 &l_next, ptr_size) == 0)
6582 unsigned char libname[PATH_MAX];
6584 if (lm_prev != l_prev)
6586 warning ("Corrupted shared library list: 0x%s != 0x%s",
6587 paddress (lm_prev), paddress (l_prev));
6588 break;
6591 /* Not checking for error because reading may stop before we've got
6592 PATH_MAX worth of characters. */
6593 libname[0] = '\0';
6594 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6595 libname[sizeof (libname) - 1] = '\0';
6596 if (libname[0] != '\0')
6598 string_appendf (document, "<library name=\"");
6599 xml_escape_text_append (document, (char *) libname);
6600 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
6601 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
6602 paddress (lm_addr), paddress (l_addr),
6603 paddress (l_ld), paddress (lmid));
6606 lm_prev = lm_addr;
6607 lm_addr = l_next;
6611 /* Construct qXfer:libraries-svr4:read reply. */
6614 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6615 unsigned char *readbuf,
6616 unsigned const char *writebuf,
6617 CORE_ADDR offset, int len)
6619 struct process_info_private *const priv = current_process ()->priv;
6620 char filename[PATH_MAX];
6621 int is_elf64;
6622 unsigned int machine;
6623 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
6625 if (writebuf != NULL)
6626 return -2;
6627 if (readbuf == NULL)
6628 return -1;
6630 int pid = current_thread->id.lwp ();
6631 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6632 is_elf64 = elf_64_file_p (filename, &machine);
6633 const link_map_offsets *lmo;
6634 int ptr_size;
6635 if (is_elf64)
6637 lmo = &lmo_64bit_offsets;
6638 ptr_size = 8;
6640 else
6642 lmo = &lmo_32bit_offsets;
6643 ptr_size = 4;
6646 while (annex[0] != '\0')
6648 const char *sep;
6649 CORE_ADDR *addrp;
6650 int name_len;
6652 sep = strchr (annex, '=');
6653 if (sep == NULL)
6654 break;
6656 name_len = sep - annex;
6657 if (name_len == 4 && startswith (annex, "lmid"))
6658 addrp = &lmid;
6659 else if (name_len == 5 && startswith (annex, "start"))
6660 addrp = &lm_addr;
6661 else if (name_len == 4 && startswith (annex, "prev"))
6662 addrp = &lm_prev;
6663 else
6665 annex = strchr (sep, ';');
6666 if (annex == NULL)
6667 break;
6668 annex++;
6669 continue;
6672 annex = decode_address_to_semicolon (addrp, sep + 1);
6675 std::string document = "<library-list-svr4 version=\"1.0\"";
6677 /* When the starting LM_ADDR is passed in the annex, only traverse that
6678 namespace, which is assumed to be identified by LMID.
6680 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6681 if (lm_addr != 0)
6683 document += ">";
6684 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
6686 else
6688 if (lm_prev != 0)
6689 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
6691 /* We could interpret LMID as 'provide only the libraries for this
6692 namespace' but GDB is currently only providing lmid, start, and
6693 prev, or nothing. */
6694 if (lmid != 0)
6695 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6697 CORE_ADDR r_debug = priv->r_debug;
6698 if (r_debug == 0)
6699 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
6701 /* We failed to find DT_DEBUG. Such situation will not change
6702 for this inferior - do not retry it. Report it to GDB as
6703 E01, see for the reasons at the GDB solib-svr4.c side. */
6704 if (r_debug == (CORE_ADDR) -1)
6705 return -1;
6707 /* Terminate the header if we end up with an empty list. */
6708 if (r_debug == 0)
6709 document += ">";
6711 while (r_debug != 0)
6713 int r_version = 0;
6714 if (linux_read_memory (r_debug + lmo->r_version_offset,
6715 (unsigned char *) &r_version,
6716 sizeof (r_version)) != 0)
6718 warning ("unable to read r_version from 0x%s",
6719 paddress (r_debug + lmo->r_version_offset));
6720 break;
6723 if (r_version < 1)
6725 warning ("unexpected r_debug version %d", r_version);
6726 break;
6729 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6730 ptr_size) != 0)
6732 warning ("unable to read r_map from 0x%s",
6733 paddress (r_debug + lmo->r_map_offset));
6734 break;
6737 /* We read the entire namespace. */
6738 lm_prev = 0;
6740 /* The first entry corresponds to the main executable unless the
6741 dynamic loader was loaded late by a static executable. But
6742 in such case the main executable does not have PT_DYNAMIC
6743 present and we would not have gotten here. */
6744 if (r_debug == priv->r_debug)
6746 if (lm_addr != 0)
6747 string_appendf (document, " main-lm=\"0x%s\">",
6748 paddress (lm_addr));
6749 else
6750 document += ">";
6752 lm_prev = lm_addr;
6753 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6754 &lm_addr, ptr_size) != 0)
6756 warning ("unable to read l_next from 0x%s",
6757 paddress (lm_addr + lmo->l_next_offset));
6758 break;
6762 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
6764 if (r_version < 2)
6765 break;
6767 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6768 ptr_size) != 0)
6770 warning ("unable to read r_next from 0x%s",
6771 paddress (r_debug + lmo->r_next_offset));
6772 break;
6777 document += "</library-list-svr4>";
6779 int document_len = document.length ();
6780 if (offset < document_len)
6781 document_len -= offset;
6782 else
6783 document_len = 0;
6784 if (len > document_len)
6785 len = document_len;
6787 memcpy (readbuf, document.data () + offset, len);
6789 return len;
6792 #ifdef HAVE_LINUX_BTRACE
6794 bool
6795 linux_process_target::supports_btrace ()
6797 return true;
6800 btrace_target_info *
6801 linux_process_target::enable_btrace (thread_info *tp,
6802 const btrace_config *conf)
6804 return linux_enable_btrace (tp->id, conf);
6807 /* See to_disable_btrace target method. */
6810 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6812 enum btrace_error err;
6814 err = linux_disable_btrace (tinfo);
6815 return (err == BTRACE_ERR_NONE ? 0 : -1);
6818 /* Encode an Intel Processor Trace configuration. */
6820 static void
6821 linux_low_encode_pt_config (std::string *buffer,
6822 const struct btrace_data_pt_config *config)
6824 *buffer += "<pt-config>\n";
6826 switch (config->cpu.vendor)
6828 case CV_INTEL:
6829 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6830 "model=\"%u\" stepping=\"%u\"/>\n",
6831 config->cpu.family, config->cpu.model,
6832 config->cpu.stepping);
6833 break;
6835 default:
6836 break;
6839 *buffer += "</pt-config>\n";
6842 /* Encode a raw buffer. */
6844 static void
6845 linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
6846 unsigned int size)
6848 if (size == 0)
6849 return;
6851 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6852 *buffer += "<raw>\n";
6854 while (size-- > 0)
6856 char elem[2];
6858 elem[0] = tohex ((*data >> 4) & 0xf);
6859 elem[1] = tohex (*data++ & 0xf);
6861 buffer->append (elem, 2);
6864 *buffer += "</raw>\n";
6867 /* See to_read_btrace target method. */
6870 linux_process_target::read_btrace (btrace_target_info *tinfo,
6871 std::string *buffer,
6872 enum btrace_read_type type)
6874 struct btrace_data btrace;
6875 enum btrace_error err;
6877 err = linux_read_btrace (&btrace, tinfo, type);
6878 if (err != BTRACE_ERR_NONE)
6880 if (err == BTRACE_ERR_OVERFLOW)
6881 *buffer += "E.Overflow.";
6882 else
6883 *buffer += "E.Generic Error.";
6885 return -1;
6888 switch (btrace.format)
6890 case BTRACE_FORMAT_NONE:
6891 *buffer += "E.No Trace.";
6892 return -1;
6894 case BTRACE_FORMAT_BTS:
6895 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6896 *buffer += "<btrace version=\"1.0\">\n";
6898 for (const btrace_block &block : *btrace.variant.bts.blocks)
6899 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6900 paddress (block.begin), paddress (block.end));
6902 *buffer += "</btrace>\n";
6903 break;
6905 case BTRACE_FORMAT_PT:
6906 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6907 *buffer += "<btrace version=\"1.0\">\n";
6908 *buffer += "<pt>\n";
6910 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6912 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6913 btrace.variant.pt.size);
6915 *buffer += "</pt>\n";
6916 *buffer += "</btrace>\n";
6917 break;
6919 default:
6920 *buffer += "E.Unsupported Trace Format.";
6921 return -1;
6924 return 0;
6927 /* See to_btrace_conf target method. */
6930 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6931 std::string *buffer)
6933 const struct btrace_config *conf;
6935 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6936 *buffer += "<btrace-conf version=\"1.0\">\n";
6938 conf = linux_btrace_conf (tinfo);
6939 if (conf != NULL)
6941 switch (conf->format)
6943 case BTRACE_FORMAT_NONE:
6944 break;
6946 case BTRACE_FORMAT_BTS:
6947 string_xml_appendf (*buffer, "<bts");
6948 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6949 string_xml_appendf (*buffer, " />\n");
6950 break;
6952 case BTRACE_FORMAT_PT:
6953 string_xml_appendf (*buffer, "<pt");
6954 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6955 string_xml_appendf (*buffer, "/>\n");
6956 string_xml_appendf (*buffer, " ptwrite=\"%s\"",
6957 conf->pt.ptwrite ? "yes" : "no");
6958 string_xml_appendf (*buffer, " event-tracing=\"%s\"",
6959 conf->pt.event_tracing ? "yes" : "no");
6960 string_xml_appendf (*buffer, "/>\n");
6961 break;
6965 *buffer += "</btrace-conf>\n";
6966 return 0;
6968 #endif /* HAVE_LINUX_BTRACE */
6970 /* See nat/linux-nat.h. */
6972 ptid_t
6973 current_lwp_ptid (void)
6975 return current_thread->id;
6978 /* A helper function that copies NAME to DEST, replacing non-printable
6979 characters with '?'. Returns the original DEST as a
6980 convenience. */
6982 static const char *
6983 replace_non_ascii (char *dest, const char *name)
6985 const char *result = dest;
6986 while (*name != '\0')
6988 if (!ISPRINT (*name))
6989 *dest++ = '?';
6990 else
6991 *dest++ = *name;
6992 ++name;
6994 *dest = '\0';
6995 return result;
6998 const char *
6999 linux_process_target::thread_name (ptid_t thread)
7001 static char dest[100];
7003 const char *name = linux_proc_tid_get_name (thread);
7004 if (name == nullptr)
7005 return nullptr;
7007 /* Linux limits the comm file to 16 bytes (including the trailing
7008 \0. If the program or thread name is set when using a multi-byte
7009 encoding, this might cause it to be truncated mid-character. In
7010 this situation, sending the truncated form in an XML <thread>
7011 response will cause a parse error in gdb. So, instead convert
7012 from the locale's encoding (we can't be sure this is the correct
7013 encoding, but it's as good a guess as we have) to UTF-8, but in a
7014 way that ignores any encoding errors. See PR remote/30618. */
7015 const char *cset = nl_langinfo (CODESET);
7016 iconv_t handle = iconv_open ("UTF-8//IGNORE", cset);
7017 if (handle == (iconv_t) -1)
7018 return replace_non_ascii (dest, name);
7020 size_t inbytes = strlen (name);
7021 char *inbuf = const_cast<char *> (name);
7022 size_t outbytes = sizeof (dest);
7023 char *outbuf = dest;
7024 size_t result = iconv (handle, &inbuf, &inbytes, &outbuf, &outbytes);
7026 if (result == (size_t) -1)
7028 if (errno == E2BIG)
7029 outbuf = &dest[sizeof (dest) - 1];
7030 else if ((errno == EILSEQ || errno == EINVAL)
7031 && outbuf < &dest[sizeof (dest) - 2])
7032 *outbuf++ = '?';
7034 *outbuf = '\0';
7036 iconv_close (handle);
7037 return *dest == '\0' ? nullptr : dest;
7040 #if USE_THREAD_DB
7041 bool
7042 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7043 int *handle_len)
7045 return thread_db_thread_handle (ptid, handle, handle_len);
7047 #endif
7049 thread_info *
7050 linux_process_target::thread_pending_parent (thread_info *thread)
7052 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7054 if (parent == nullptr)
7055 return nullptr;
7057 return parent->thread;
7060 thread_info *
7061 linux_process_target::thread_pending_child (thread_info *thread,
7062 target_waitkind *kind)
7064 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
7066 if (child == nullptr)
7067 return nullptr;
7069 return child->thread;
7072 /* Default implementation of linux_target_ops method "set_pc" for
7073 32-bit pc register which is literally named "pc". */
7075 void
7076 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7078 uint32_t newpc = pc;
7080 supply_register_by_name (regcache, "pc", &newpc);
7083 /* Default implementation of linux_target_ops method "get_pc" for
7084 32-bit pc register which is literally named "pc". */
7086 CORE_ADDR
7087 linux_get_pc_32bit (struct regcache *regcache)
7089 uint32_t pc;
7091 collect_register_by_name (regcache, "pc", &pc);
7092 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
7093 return pc;
7096 /* Default implementation of linux_target_ops method "set_pc" for
7097 64-bit pc register which is literally named "pc". */
7099 void
7100 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7102 uint64_t newpc = pc;
7104 supply_register_by_name (regcache, "pc", &newpc);
7107 /* Default implementation of linux_target_ops method "get_pc" for
7108 64-bit pc register which is literally named "pc". */
7110 CORE_ADDR
7111 linux_get_pc_64bit (struct regcache *regcache)
7113 uint64_t pc;
7115 collect_register_by_name (regcache, "pc", &pc);
7116 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
7117 return pc;
7120 /* See linux-low.h. */
7123 linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7125 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7126 int offset = 0;
7128 gdb_assert (wordsize == 4 || wordsize == 8);
7130 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7131 == 2 * wordsize)
7133 if (wordsize == 4)
7135 uint32_t *data_p = (uint32_t *) data;
7136 if (data_p[0] == match)
7138 *valp = data_p[1];
7139 return 1;
7142 else
7144 uint64_t *data_p = (uint64_t *) data;
7145 if (data_p[0] == match)
7147 *valp = data_p[1];
7148 return 1;
7152 offset += 2 * wordsize;
7155 return 0;
7158 /* See linux-low.h. */
7160 CORE_ADDR
7161 linux_get_hwcap (int pid, int wordsize)
7163 CORE_ADDR hwcap = 0;
7164 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
7165 return hwcap;
7168 /* See linux-low.h. */
7170 CORE_ADDR
7171 linux_get_hwcap2 (int pid, int wordsize)
7173 CORE_ADDR hwcap2 = 0;
7174 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
7175 return hwcap2;
7178 #ifdef HAVE_LINUX_REGSETS
7179 void
7180 initialize_regsets_info (struct regsets_info *info)
7182 for (info->num_regsets = 0;
7183 info->regsets[info->num_regsets].size >= 0;
7184 info->num_regsets++)
7187 #endif
7189 void
7190 initialize_low (void)
7192 struct sigaction sigchld_action;
7194 memset (&sigchld_action, 0, sizeof (sigchld_action));
7195 set_target_ops (the_linux_target);
7197 linux_ptrace_init_warnings ();
7198 linux_proc_init_warnings ();
7200 sigchld_action.sa_handler = sigchld_handler;
7201 sigemptyset (&sigchld_action.sa_mask);
7202 sigchld_action.sa_flags = SA_RESTART;
7203 sigaction (SIGCHLD, &sigchld_action, NULL);
7205 initialize_low_arch ();
7207 linux_check_ptrace_features ();