Automatic date update in version.in
[binutils-gdb.git] / gdbserver / linux-low.cc
blob65268a6ee6cd6906bcb0605d303777e7e75dbdbd
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2024 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "linux-low.h"
20 #include "nat/linux-osdata.h"
21 #include "gdbsupport/agent.h"
22 #include "tdesc.h"
23 #include "gdbsupport/event-loop.h"
24 #include "gdbsupport/event-pipe.h"
25 #include "gdbsupport/rsp-low.h"
26 #include "gdbsupport/signals-state-save-restore.h"
27 #include "nat/linux-nat.h"
28 #include "nat/linux-waitpid.h"
29 #include "gdbsupport/gdb_wait.h"
30 #include "nat/gdb_ptrace.h"
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include <signal.h>
35 #include <sys/ioctl.h>
36 #include <fcntl.h>
37 #include <unistd.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include <langinfo.h>
47 #include <iconv.h>
48 #include "gdbsupport/filestuff.h"
49 #include "gdbsupport/gdb-safe-ctype.h"
50 #include "tracepoint.h"
51 #include <inttypes.h>
52 #include "gdbsupport/common-inferior.h"
53 #include "nat/fork-inferior.h"
54 #include "gdbsupport/environ.h"
55 #include "gdbsupport/gdb-sigmask.h"
56 #include "gdbsupport/scoped_restore.h"
57 #ifndef ELFMAG0
58 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
59 then ELFMAG0 will have been defined. If it didn't get included by
60 gdb_proc_service.h then including it will likely introduce a duplicate
61 definition of elf_fpregset_t. */
62 #include <elf.h>
63 #endif
64 #include "nat/linux-namespaces.h"
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
70 #ifndef AT_HWCAP2
71 #define AT_HWCAP2 26
72 #endif
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* These are still undefined in 3.10 kernels. */
86 #elif defined(__TMS320C6X__)
87 #define PT_TEXT_ADDR (0x10000*4)
88 #define PT_DATA_ADDR (0x10004*4)
89 #define PT_TEXT_END_ADDR (0x10008*4)
90 #endif
91 #endif
93 #if (defined(__UCLIBC__) \
94 && defined(HAS_NOMMU) \
95 && defined(PT_TEXT_ADDR) \
96 && defined(PT_DATA_ADDR) \
97 && defined(PT_TEXT_END_ADDR))
98 #define SUPPORTS_READ_OFFSETS
99 #endif
101 #ifdef HAVE_LINUX_BTRACE
102 # include "nat/linux-btrace.h"
103 # include "gdbsupport/btrace-common.h"
104 #endif
106 #ifndef HAVE_ELF32_AUXV_T
107 /* Copied from glibc's elf.h. */
108 typedef struct
110 uint32_t a_type; /* Entry type */
111 union
113 uint32_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118 } Elf32_auxv_t;
119 #endif
121 #ifndef HAVE_ELF64_AUXV_T
122 /* Copied from glibc's elf.h. */
123 typedef struct
125 uint64_t a_type; /* Entry type */
126 union
128 uint64_t a_val; /* Integer value */
129 /* We use to have pointer elements added here. We cannot do that,
130 though, since it does not work when using 32-bit definitions
131 on 64-bit platforms and vice versa. */
132 } a_un;
133 } Elf64_auxv_t;
134 #endif
136 /* See nat/linux-nat.h. */
137 enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
139 /* Return TRUE if THREAD is the leader thread of the process. */
141 static bool
142 is_leader (thread_info *thread)
144 return thread->id.pid () == thread->id.lwp ();
147 /* Return true if we should report thread exit events to GDB, for
148 THR. */
150 static bool
151 report_exit_events_for (thread_info *thr)
153 client_state &cs = get_client_state ();
155 return (cs.report_thread_events
156 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
159 /* LWP accessors. */
161 /* See nat/linux-nat.h. */
163 ptid_t
164 ptid_of_lwp (struct lwp_info *lwp)
166 return lwp->thread->id;
169 /* See nat/linux-nat.h. */
171 void
172 lwp_set_arch_private_info (struct lwp_info *lwp,
173 struct arch_lwp_info *info)
175 lwp->arch_private = info;
178 /* See nat/linux-nat.h. */
180 struct arch_lwp_info *
181 lwp_arch_private_info (struct lwp_info *lwp)
183 return lwp->arch_private;
186 /* See nat/linux-nat.h. */
189 lwp_is_stopped (struct lwp_info *lwp)
191 return lwp->stopped;
194 /* See nat/linux-nat.h. */
196 enum target_stop_reason
197 lwp_stop_reason (struct lwp_info *lwp)
199 return lwp->stop_reason;
202 /* See nat/linux-nat.h. */
205 lwp_is_stepping (struct lwp_info *lwp)
207 return lwp->stepping;
210 /* A list of all unknown processes which receive stop signals. Some
211 other process will presumably claim each of these as forked
212 children momentarily. */
214 struct simple_pid_list
216 /* The process ID. */
217 int pid;
219 /* The status as reported by waitpid. */
220 int status;
222 /* Next in chain. */
223 struct simple_pid_list *next;
225 static struct simple_pid_list *stopped_pids;
227 /* Trivial list manipulation functions to keep track of a list of new
228 stopped processes. */
230 static void
231 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
233 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
235 new_pid->pid = pid;
236 new_pid->status = status;
237 new_pid->next = *listp;
238 *listp = new_pid;
241 static int
242 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
244 struct simple_pid_list **p;
246 for (p = listp; *p != NULL; p = &(*p)->next)
247 if ((*p)->pid == pid)
249 struct simple_pid_list *next = (*p)->next;
251 *statusp = (*p)->status;
252 xfree (*p);
253 *p = next;
254 return 1;
256 return 0;
259 enum stopping_threads_kind
261 /* Not stopping threads presently. */
262 NOT_STOPPING_THREADS,
264 /* Stopping threads. */
265 STOPPING_THREADS,
267 /* Stopping and suspending threads. */
268 STOPPING_AND_SUSPENDING_THREADS
271 /* This is set while stop_all_lwps is in effect. */
272 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
274 /* FIXME make into a target method? */
275 int using_threads = 1;
277 /* True if we're presently stabilizing threads (moving them out of
278 jump pads). */
279 static int stabilizing_threads;
281 static void unsuspend_all_lwps (struct lwp_info *except);
282 static void mark_lwp_dead (struct lwp_info *lwp, int wstat,
283 bool thread_event);
284 static int lwp_is_marked_dead (struct lwp_info *lwp);
285 static int kill_lwp (unsigned long lwpid, int signo);
286 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
287 static int linux_low_ptrace_options (int attached);
288 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
290 /* When the event-loop is doing a step-over, this points at the thread
291 being stepped. */
292 static ptid_t step_over_bkpt;
294 bool
295 linux_process_target::low_supports_breakpoints ()
297 return false;
300 CORE_ADDR
301 linux_process_target::low_get_pc (regcache *regcache)
303 return 0;
306 void
307 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
309 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
312 std::vector<CORE_ADDR>
313 linux_process_target::low_get_next_pcs (regcache *regcache)
315 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
316 "implemented");
320 linux_process_target::low_decr_pc_after_break ()
322 return 0;
325 /* True if LWP is stopped in its stepping range. */
327 static int
328 lwp_in_step_range (struct lwp_info *lwp)
330 CORE_ADDR pc = lwp->stop_pc;
332 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
335 /* The event pipe registered as a waitable file in the event loop. */
336 static event_pipe linux_event_pipe;
338 /* True if we're currently in async mode. */
339 #define target_is_async_p() (linux_event_pipe.is_open ())
341 static void send_sigstop (struct lwp_info *lwp);
343 /* Return non-zero if HEADER is a 64-bit ELF file. */
345 static int
346 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
348 if (header->e_ident[EI_MAG0] == ELFMAG0
349 && header->e_ident[EI_MAG1] == ELFMAG1
350 && header->e_ident[EI_MAG2] == ELFMAG2
351 && header->e_ident[EI_MAG3] == ELFMAG3)
353 *machine = header->e_machine;
354 return header->e_ident[EI_CLASS] == ELFCLASS64;
357 *machine = EM_NONE;
358 return -1;
361 /* Return non-zero if FILE is a 64-bit ELF file,
362 zero if the file is not a 64-bit ELF file,
363 and -1 if the file is not accessible or doesn't exist. */
365 static int
366 elf_64_file_p (const char *file, unsigned int *machine)
368 Elf64_Ehdr header;
369 int fd;
371 fd = open (file, O_RDONLY);
372 if (fd < 0)
373 return -1;
375 if (read (fd, &header, sizeof (header)) != sizeof (header))
377 close (fd);
378 return 0;
380 close (fd);
382 return elf_64_header_p (&header, machine);
385 /* Accepts an integer PID; Returns true if the executable PID is
386 running is a 64-bit ELF file.. */
389 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
391 char file[PATH_MAX];
393 sprintf (file, "/proc/%d/exe", pid);
394 return elf_64_file_p (file, machine);
397 void
398 linux_process_target::delete_lwp (lwp_info *lwp)
400 thread_info *thr = lwp->thread;
402 threads_debug_printf ("deleting %ld", thr->id.lwp ());
404 thr->process ()->remove_thread (thr);
406 low_delete_thread (lwp->arch_private);
408 delete lwp;
411 void
412 linux_process_target::low_delete_thread (arch_lwp_info *info)
414 /* Default implementation should be overridden if architecture-specific
415 info is being used. */
416 gdb_assert (info == nullptr);
419 /* Open the /proc/PID/mem file for PROC. */
421 static void
422 open_proc_mem_file (process_info *proc)
424 gdb_assert (proc->priv->mem_fd == -1);
426 char filename[64];
427 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
429 proc->priv->mem_fd
430 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
433 process_info *
434 linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
436 struct process_info *proc;
438 proc = add_process (pid, attached);
439 proc->priv = XCNEW (struct process_info_private);
441 proc->priv->arch_private = low_new_process ();
442 proc->priv->mem_fd = -1;
444 return proc;
448 process_info *
449 linux_process_target::add_linux_process (int pid, int attached)
451 process_info *proc = add_linux_process_no_mem_file (pid, attached);
452 open_proc_mem_file (proc);
453 return proc;
456 void
457 linux_process_target::remove_linux_process (process_info *proc)
459 if (proc->priv->mem_fd >= 0)
460 close (proc->priv->mem_fd);
462 this->low_delete_process (proc->priv->arch_private);
464 xfree (proc->priv);
465 proc->priv = nullptr;
467 remove_process (proc);
470 arch_process_info *
471 linux_process_target::low_new_process ()
473 return nullptr;
476 void
477 linux_process_target::low_delete_process (arch_process_info *info)
479 /* Default implementation must be overridden if architecture-specific
480 info exists. */
481 gdb_assert (info == nullptr);
484 void
485 linux_process_target::low_new_fork (process_info *parent, process_info *child)
487 /* Nop. */
490 void
491 linux_process_target::arch_setup_thread (thread_info *thread)
493 scoped_restore_current_thread restore_thread;
494 switch_to_thread (thread);
496 low_arch_setup ();
500 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
501 int wstat)
503 client_state &cs = get_client_state ();
504 struct lwp_info *event_lwp = *orig_event_lwp;
505 int event = linux_ptrace_get_extended_event (wstat);
506 thread_info *event_thr = event_lwp->thread;
508 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
510 /* All extended events we currently use are mid-syscall. Only
511 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
512 you have to be using PTRACE_SEIZE to get that. */
513 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
515 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
516 || (event == PTRACE_EVENT_CLONE))
518 unsigned long new_pid;
519 int ret, status;
521 /* Get the pid of the new lwp. */
522 ptrace (PTRACE_GETEVENTMSG, event_thr->id.lwp (), (PTRACE_TYPE_ARG3) 0,
523 &new_pid);
525 /* If we haven't already seen the new PID stop, wait for it now. */
526 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
528 /* The new child has a pending SIGSTOP. We can't affect it until it
529 hits the SIGSTOP, but we're already attached. */
531 ret = my_waitpid (new_pid, &status, __WALL);
533 if (ret == -1)
534 perror_with_name ("waiting for new child");
535 else if (ret != new_pid)
536 warning ("wait returned unexpected PID %d", ret);
537 else if (!WIFSTOPPED (status))
538 warning ("wait returned unexpected status 0x%x", status);
541 if (debug_threads)
543 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
544 (event == PTRACE_EVENT_FORK ? "fork"
545 : event == PTRACE_EVENT_VFORK ? "vfork"
546 : event == PTRACE_EVENT_CLONE ? "clone"
547 : "???"),
548 event_thr->id.lwp (),
549 new_pid);
552 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
553 ? ptid_t (new_pid, new_pid)
554 : ptid_t (event_thr->id.pid (), new_pid));
556 process_info *child_proc = nullptr;
558 if (event != PTRACE_EVENT_CLONE)
560 /* Add the new process to the tables before we add the LWP.
561 We need to do this even if the new process will be
562 detached. See breakpoint cloning code further below. */
563 child_proc = add_linux_process (new_pid, 0);
566 lwp_info *child_lwp = add_lwp (child_ptid);
567 gdb_assert (child_lwp != NULL);
568 child_lwp->stopped = 1;
569 if (event != PTRACE_EVENT_CLONE)
570 child_lwp->must_set_ptrace_flags = 1;
571 child_lwp->status_pending_p = 0;
573 thread_info *child_thr = child_lwp->thread;
575 /* If we're suspending all threads, leave this one suspended
576 too. If the fork/clone parent is stepping over a breakpoint,
577 all other threads have been suspended already. Leave the
578 child suspended too. */
579 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
580 || event_lwp->bp_reinsert != 0)
582 threads_debug_printf ("leaving child suspended");
583 child_lwp->suspended = 1;
586 if (event_lwp->bp_reinsert != 0
587 && supports_software_single_step ()
588 && event == PTRACE_EVENT_VFORK)
590 /* If we leave single-step breakpoints there, child will
591 hit it, so uninsert single-step breakpoints from parent
592 (and child). Once vfork child is done, reinsert
593 them back to parent. */
594 uninsert_single_step_breakpoints (event_thr);
597 if (event != PTRACE_EVENT_CLONE)
599 /* Clone the breakpoint lists of the parent. We need to do
600 this even if the new process will be detached, since we
601 will need the process object and the breakpoints to
602 remove any breakpoints from memory when we detach, and
603 the client side will access registers. */
604 gdb_assert (child_proc != NULL);
606 process_info *parent_proc = event_thr->process ();
607 child_proc->attached = parent_proc->attached;
609 clone_all_breakpoints (child_thr, event_thr);
611 target_desc_up tdesc = allocate_target_description ();
612 copy_target_description (tdesc.get (), parent_proc->tdesc);
613 child_proc->tdesc = tdesc.release ();
615 /* Clone arch-specific process data. */
616 low_new_fork (parent_proc, child_proc);
619 /* Save fork/clone info in the parent thread. */
620 if (event == PTRACE_EVENT_FORK)
621 event_lwp->waitstatus.set_forked (child_ptid);
622 else if (event == PTRACE_EVENT_VFORK)
623 event_lwp->waitstatus.set_vforked (child_ptid);
624 else if (event == PTRACE_EVENT_CLONE
625 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
626 event_lwp->waitstatus.set_thread_cloned (child_ptid);
628 if (event != PTRACE_EVENT_CLONE
629 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
631 /* The status_pending field contains bits denoting the
632 extended event, so when the pending event is handled, the
633 handler will look at lwp->waitstatus. */
634 event_lwp->status_pending_p = 1;
635 event_lwp->status_pending = wstat;
637 /* Link the threads until the parent's event is passed on to
638 GDB. */
639 event_lwp->relative = child_lwp;
640 child_lwp->relative = event_lwp;
643 /* If the parent thread is doing step-over with single-step
644 breakpoints, the list of single-step breakpoints are cloned
645 from the parent's. Remove them from the child process.
646 In case of vfork, we'll reinsert them back once vforked
647 child is done. */
648 if (event_lwp->bp_reinsert != 0
649 && supports_software_single_step ())
651 /* The child process is forked and stopped, so it is safe
652 to access its memory without stopping all other threads
653 from other processes. */
654 delete_single_step_breakpoints (child_thr);
656 gdb_assert (has_single_step_breakpoints (event_thr));
657 gdb_assert (!has_single_step_breakpoints (child_thr));
660 /* Normally we will get the pending SIGSTOP. But in some cases
661 we might get another signal delivered to the group first.
662 If we do get another signal, be sure not to lose it. */
663 if (WSTOPSIG (status) != SIGSTOP)
665 child_lwp->stop_expected = 1;
666 child_lwp->status_pending_p = 1;
667 child_lwp->status_pending = status;
669 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
671 child_lwp->waitstatus.set_thread_created ();
672 child_lwp->status_pending_p = 1;
673 child_lwp->status_pending = status;
676 if (event == PTRACE_EVENT_CLONE)
678 #ifdef USE_THREAD_DB
679 thread_db_notice_clone (event_thr, child_ptid);
680 #endif
683 if (event == PTRACE_EVENT_CLONE
684 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
686 threads_debug_printf
687 ("not reporting clone event from LWP %ld, new child is %ld\n",
688 event_thr->id.lwp (),
689 new_pid);
690 return 1;
693 /* Leave the child stopped until GDB processes the parent
694 event. */
695 child_thr->last_resume_kind = resume_stop;
696 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
698 /* Report the event. */
699 threads_debug_printf
700 ("reporting %s event from LWP %ld, new child is %ld\n",
701 (event == PTRACE_EVENT_FORK ? "fork"
702 : event == PTRACE_EVENT_VFORK ? "vfork"
703 : event == PTRACE_EVENT_CLONE ? "clone"
704 : "???"),
705 event_thr->id.lwp (),
706 new_pid);
707 return 0;
709 else if (event == PTRACE_EVENT_VFORK_DONE)
711 event_lwp->waitstatus.set_vfork_done ();
713 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
715 reinsert_single_step_breakpoints (event_thr);
717 gdb_assert (has_single_step_breakpoints (event_thr));
720 /* Report the event. */
721 return 0;
723 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
725 std::vector<int> syscalls_to_catch;
726 ptid_t event_ptid;
727 pid_t event_pid;
729 threads_debug_printf ("Got exec event from LWP %ld",
730 event_thr->id.lwp ());
732 /* Get the event ptid. */
733 event_ptid = event_thr->id;
734 event_pid = event_ptid.pid ();
736 /* Save the syscall list from the execing process. */
737 process_info *proc = event_thr->process ();
738 syscalls_to_catch = std::move (proc->syscalls_to_catch);
740 /* Delete the execing process and all its threads. */
741 mourn (proc);
742 switch_to_thread (nullptr);
744 /* Create a new process/lwp/thread. */
745 proc = add_linux_process (event_pid, 0);
746 event_lwp = add_lwp (event_ptid);
747 event_thr = event_lwp->thread;
748 gdb_assert (current_thread == event_thr);
749 arch_setup_thread (event_thr);
751 /* Set the event status. */
752 event_lwp->waitstatus.set_execd
753 (make_unique_xstrdup
754 (linux_proc_pid_to_exec_file (event_thr->id.lwp ())));
756 /* Mark the exec status as pending. */
757 event_lwp->stopped = 1;
758 event_lwp->status_pending_p = 1;
759 event_lwp->status_pending = wstat;
760 event_thr->last_resume_kind = resume_continue;
761 event_thr->last_status.set_ignore ();
763 /* Update syscall state in the new lwp, effectively mid-syscall too. */
764 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
766 /* Restore the list to catch. Don't rely on the client, which is free
767 to avoid sending a new list when the architecture doesn't change.
768 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
769 proc->syscalls_to_catch = std::move (syscalls_to_catch);
771 /* Report the event. */
772 *orig_event_lwp = event_lwp;
773 return 0;
776 internal_error (_("unknown ptrace event %d"), event);
779 CORE_ADDR
780 linux_process_target::get_pc (lwp_info *lwp)
782 process_info *proc = lwp->thread->process ();
783 gdb_assert (!proc->starting_up);
785 if (!low_supports_breakpoints ())
786 return 0;
788 scoped_restore_current_thread restore_thread;
789 switch_to_thread (lwp->thread);
791 regcache *regcache = get_thread_regcache (current_thread);
792 CORE_ADDR pc = low_get_pc (regcache);
794 threads_debug_printf ("pc is 0x%lx", (long) pc);
796 return pc;
799 void
800 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
802 struct regcache *regcache;
804 scoped_restore_current_thread restore_thread;
805 switch_to_thread (lwp->thread);
807 regcache = get_thread_regcache (current_thread);
808 low_get_syscall_trapinfo (regcache, sysno);
810 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
813 void
814 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
816 /* By default, report an unknown system call number. */
817 *sysno = UNKNOWN_SYSCALL;
820 bool
821 linux_process_target::save_stop_reason (lwp_info *lwp)
823 CORE_ADDR pc;
824 CORE_ADDR sw_breakpoint_pc;
825 siginfo_t siginfo;
827 if (!low_supports_breakpoints ())
828 return false;
830 process_info *proc = lwp->thread->process ();
831 if (proc->starting_up)
833 /* Claim we have the stop PC so that the caller doesn't try to
834 fetch it itself. */
835 return true;
838 pc = get_pc (lwp);
839 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
841 /* breakpoint_at reads from the current thread. */
842 scoped_restore_current_thread restore_thread;
843 switch_to_thread (lwp->thread);
845 if (ptrace (PTRACE_GETSIGINFO, current_thread->id.lwp (),
846 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
848 if (siginfo.si_signo == SIGTRAP)
850 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
851 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
853 /* The si_code is ambiguous on this arch -- check debug
854 registers. */
855 if (!check_stopped_by_watchpoint (lwp))
856 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
858 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
860 /* If we determine the LWP stopped for a SW breakpoint,
861 trust it. Particularly don't check watchpoint
862 registers, because at least on s390, we'd find
863 stopped-by-watchpoint as long as there's a watchpoint
864 set. */
865 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
867 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
869 /* This can indicate either a hardware breakpoint or
870 hardware watchpoint. Check debug registers. */
871 if (!check_stopped_by_watchpoint (lwp))
872 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
874 else if (siginfo.si_code == TRAP_TRACE)
876 /* We may have single stepped an instruction that
877 triggered a watchpoint. In that case, on some
878 architectures (such as x86), instead of TRAP_HWBKPT,
879 si_code indicates TRAP_TRACE, and we need to check
880 the debug registers separately. */
881 if (!check_stopped_by_watchpoint (lwp))
882 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
887 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
889 threads_debug_printf
890 ("%s stopped by software breakpoint",
891 target_pid_to_str (lwp->thread->id).c_str ());
893 /* Back up the PC if necessary. */
894 if (pc != sw_breakpoint_pc)
896 struct regcache *regcache
897 = get_thread_regcache (current_thread);
898 low_set_pc (regcache, sw_breakpoint_pc);
901 /* Update this so we record the correct stop PC below. */
902 pc = sw_breakpoint_pc;
904 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
905 threads_debug_printf
906 ("%s stopped by hardware breakpoint",
907 target_pid_to_str (lwp->thread->id).c_str ());
908 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
909 threads_debug_printf
910 ("%s stopped by hardware watchpoint",
911 target_pid_to_str (lwp->thread->id).c_str ());
912 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
913 threads_debug_printf
914 ("%s stopped by trace",
915 target_pid_to_str (lwp->thread->id).c_str ());
917 lwp->stop_pc = pc;
918 return true;
921 lwp_info *
922 linux_process_target::add_lwp (ptid_t ptid)
924 lwp_info *lwp = new lwp_info;
926 lwp->thread = find_process_pid (ptid.pid ())->add_thread (ptid, lwp);
928 low_new_thread (lwp);
930 return lwp;
933 void
934 linux_process_target::low_new_thread (lwp_info *info)
936 /* Nop. */
939 /* Callback to be used when calling fork_inferior, responsible for
940 actually initiating the tracing of the inferior. */
942 static void
943 linux_ptrace_fun ()
945 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
946 (PTRACE_TYPE_ARG4) 0) < 0)
947 trace_start_error_with_name ("ptrace");
949 if (setpgid (0, 0) < 0)
950 trace_start_error_with_name ("setpgid");
952 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
953 stdout to stderr so that inferior i/o doesn't corrupt the connection.
954 Also, redirect stdin to /dev/null. */
955 if (remote_connection_is_stdio ())
957 if (close (0) < 0)
958 trace_start_error_with_name ("close");
959 if (open ("/dev/null", O_RDONLY) < 0)
960 trace_start_error_with_name ("open");
961 if (dup2 (2, 1) < 0)
962 trace_start_error_with_name ("dup2");
963 if (write (2, "stdin/stdout redirected\n",
964 sizeof ("stdin/stdout redirected\n") - 1) < 0)
966 /* Errors ignored. */;
971 /* Start an inferior process and returns its pid.
972 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
973 are its arguments. */
976 linux_process_target::create_inferior (const char *program,
977 const std::string &program_args)
979 client_state &cs = get_client_state ();
980 struct lwp_info *new_lwp;
981 int pid;
982 ptid_t ptid;
985 maybe_disable_address_space_randomization restore_personality
986 (cs.disable_randomization);
988 pid = fork_inferior (program,
989 program_args.c_str (),
990 get_environ ()->envp (), linux_ptrace_fun,
991 NULL, NULL, NULL, NULL);
994 /* When spawning a new process, we can't open the mem file yet. We
995 still have to nurse the process through the shell, and that execs
996 a couple times. The address space a /proc/PID/mem file is
997 accessing is destroyed on exec. */
998 process_info *proc = add_linux_process_no_mem_file (pid, 0);
1000 ptid = ptid_t (pid, pid);
1001 new_lwp = add_lwp (ptid);
1002 new_lwp->must_set_ptrace_flags = 1;
1004 post_fork_inferior (pid, program);
1006 /* PROC is now past the shell running the program we want, so we can
1007 open the /proc/PID/mem file. */
1008 open_proc_mem_file (proc);
1010 return pid;
1013 /* Implement the post_create_inferior target_ops method. */
1015 void
1016 linux_process_target::post_create_inferior ()
1018 struct lwp_info *lwp = get_thread_lwp (current_thread);
1020 low_arch_setup ();
1022 if (lwp->must_set_ptrace_flags)
1024 struct process_info *proc = current_process ();
1025 int options = linux_low_ptrace_options (proc->attached);
1027 linux_enable_event_reporting (current_thread->id.lwp (), options);
1028 lwp->must_set_ptrace_flags = 0;
1033 linux_process_target::attach_lwp (ptid_t ptid)
1035 struct lwp_info *new_lwp;
1036 int lwpid = ptid.lwp ();
1038 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1039 != 0)
1040 return errno;
1042 new_lwp = add_lwp (ptid);
1044 /* We need to wait for SIGSTOP before being able to make the next
1045 ptrace call on this LWP. */
1046 new_lwp->must_set_ptrace_flags = 1;
1048 if (linux_proc_pid_is_stopped (lwpid))
1050 threads_debug_printf ("Attached to a stopped process");
1052 /* The process is definitely stopped. It is in a job control
1053 stop, unless the kernel predates the TASK_STOPPED /
1054 TASK_TRACED distinction, in which case it might be in a
1055 ptrace stop. Make sure it is in a ptrace stop; from there we
1056 can kill it, signal it, et cetera.
1058 First make sure there is a pending SIGSTOP. Since we are
1059 already attached, the process can not transition from stopped
1060 to running without a PTRACE_CONT; so we know this signal will
1061 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1062 probably already in the queue (unless this kernel is old
1063 enough to use TASK_STOPPED for ptrace stops); but since
1064 SIGSTOP is not an RT signal, it can only be queued once. */
1065 kill_lwp (lwpid, SIGSTOP);
1067 /* Finally, resume the stopped process. This will deliver the
1068 SIGSTOP (or a higher priority signal, just like normal
1069 PTRACE_ATTACH), which we'll catch later on. */
1070 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1073 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1074 brings it to a halt.
1076 There are several cases to consider here:
1078 1) gdbserver has already attached to the process and is being notified
1079 of a new thread that is being created.
1080 In this case we should ignore that SIGSTOP and resume the
1081 process. This is handled below by setting stop_expected = 1,
1082 and the fact that add_thread sets last_resume_kind ==
1083 resume_continue.
1085 2) This is the first thread (the process thread), and we're attaching
1086 to it via attach_inferior.
1087 In this case we want the process thread to stop.
1088 This is handled by having linux_attach set last_resume_kind ==
1089 resume_stop after we return.
1091 If the pid we are attaching to is also the tgid, we attach to and
1092 stop all the existing threads. Otherwise, we attach to pid and
1093 ignore any other threads in the same group as this pid.
1095 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1096 existing threads.
1097 In this case we want the thread to stop.
1098 FIXME: This case is currently not properly handled.
1099 We should wait for the SIGSTOP but don't. Things work apparently
1100 because enough time passes between when we ptrace (ATTACH) and when
1101 gdb makes the next ptrace call on the thread.
1103 On the other hand, if we are currently trying to stop all threads, we
1104 should treat the new thread as if we had sent it a SIGSTOP. This works
1105 because we are guaranteed that the add_lwp call above added us to the
1106 end of the list, and so the new thread has not yet reached
1107 wait_for_sigstop (but will). */
1108 new_lwp->stop_expected = 1;
1110 return 0;
1113 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1114 already attached. Returns true if a new LWP is found, false
1115 otherwise. */
1117 static int
1118 attach_proc_task_lwp_callback (ptid_t ptid)
1120 /* Is this a new thread? */
1121 if (find_thread_ptid (ptid) == NULL)
1123 int lwpid = ptid.lwp ();
1124 int err;
1126 threads_debug_printf ("Found new lwp %d", lwpid);
1128 err = the_linux_target->attach_lwp (ptid);
1130 /* Be quiet if we simply raced with the thread exiting. EPERM
1131 is returned if the thread's task still exists, and is marked
1132 as exited or zombie, as well as other conditions, so in that
1133 case, confirm the status in /proc/PID/status. */
1134 if (err == ESRCH
1135 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1136 threads_debug_printf
1137 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1138 lwpid, err, safe_strerror (err));
1139 else if (err != 0)
1141 std::string reason
1142 = linux_ptrace_attach_fail_reason_string (ptid, err);
1144 error (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1147 return 1;
1149 return 0;
1152 static void async_file_mark (void);
1154 /* Attach to PID. If PID is the tgid, attach to it and all
1155 of its threads. */
1158 linux_process_target::attach (unsigned long pid)
1160 struct process_info *proc;
1161 thread_info *initial_thread;
1162 ptid_t ptid = ptid_t (pid, pid);
1163 int err;
1165 /* Delay opening the /proc/PID/mem file until we've successfully
1166 attached. */
1167 proc = add_linux_process_no_mem_file (pid, 1);
1169 /* Attach to PID. We will check for other threads
1170 soon. */
1171 err = attach_lwp (ptid);
1172 if (err != 0)
1174 this->remove_linux_process (proc);
1176 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1177 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1180 open_proc_mem_file (proc);
1182 /* Don't ignore the initial SIGSTOP if we just attached to this
1183 process. It will be collected by wait shortly. */
1184 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1185 gdb_assert (initial_thread != nullptr);
1186 initial_thread->last_resume_kind = resume_stop;
1188 /* We must attach to every LWP. If /proc is mounted, use that to
1189 find them now. On the one hand, the inferior may be using raw
1190 clone instead of using pthreads. On the other hand, even if it
1191 is using pthreads, GDB may not be connected yet (thread_db needs
1192 to do symbol lookups, through qSymbol). Also, thread_db walks
1193 structures in the inferior's address space to find the list of
1194 threads/LWPs, and those structures may well be corrupted. Note
1195 that once thread_db is loaded, we'll still use it to list threads
1196 and associate pthread info with each LWP. */
1199 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1201 catch (const gdb_exception_error &)
1203 /* Make sure we do not deliver the SIGSTOP to the process. */
1204 initial_thread->last_resume_kind = resume_continue;
1206 this->detach (proc);
1207 throw;
1210 /* GDB will shortly read the xml target description for this
1211 process, to figure out the process' architecture. But the target
1212 description is only filled in when the first process/thread in
1213 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1214 that now, otherwise, if GDB is fast enough, it could read the
1215 target description _before_ that initial stop. */
1216 if (non_stop)
1218 struct lwp_info *lwp;
1219 int wstat, lwpid;
1220 ptid_t pid_ptid = ptid_t (pid);
1222 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1223 gdb_assert (lwpid > 0);
1225 lwp = find_lwp_pid (ptid_t (lwpid));
1226 gdb_assert (lwp != nullptr);
1228 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1230 lwp->status_pending_p = 1;
1231 lwp->status_pending = wstat;
1234 initial_thread->last_resume_kind = resume_continue;
1236 async_file_mark ();
1238 gdb_assert (proc->tdesc != NULL);
1241 return 0;
1244 static int
1245 last_thread_of_process_p (int pid)
1247 bool seen_one = false;
1249 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1251 if (!seen_one)
1253 /* This is the first thread of this process we see. */
1254 seen_one = true;
1255 return false;
1257 else
1259 /* This is the second thread of this process we see. */
1260 return true;
1264 return thread == NULL;
1267 /* Kill LWP. */
1269 static void
1270 linux_kill_one_lwp (struct lwp_info *lwp)
1272 thread_info *thr = lwp->thread;
1273 int pid = thr->id.lwp ();
1275 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1276 there is no signal context, and ptrace(PTRACE_KILL) (or
1277 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1278 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1279 alternative is to kill with SIGKILL. We only need one SIGKILL
1280 per process, not one for each thread. But since we still support
1281 support debugging programs using raw clone without CLONE_THREAD,
1282 we send one for each thread. For years, we used PTRACE_KILL
1283 only, so we're being a bit paranoid about some old kernels where
1284 PTRACE_KILL might work better (dubious if there are any such, but
1285 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1286 second, and so we're fine everywhere. */
1288 errno = 0;
1289 kill_lwp (pid, SIGKILL);
1290 if (debug_threads)
1292 int save_errno = errno;
1294 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1295 target_pid_to_str (thr->id).c_str (),
1296 save_errno ? safe_strerror (save_errno) : "OK");
1299 errno = 0;
1300 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1301 if (debug_threads)
1303 int save_errno = errno;
1305 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1306 target_pid_to_str (thr->id).c_str (),
1307 save_errno ? safe_strerror (save_errno) : "OK");
1311 /* Kill LWP and wait for it to die. */
1313 static void
1314 kill_wait_lwp (struct lwp_info *lwp)
1316 thread_info *thr = lwp->thread;
1317 int pid = thr->id.pid ();
1318 int lwpid = thr->id.lwp ();
1319 int wstat;
1320 int res;
1322 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1326 linux_kill_one_lwp (lwp);
1328 /* Make sure it died. Notes:
1330 - The loop is most likely unnecessary.
1332 - We don't use wait_for_event as that could delete lwps
1333 while we're iterating over them. We're not interested in
1334 any pending status at this point, only in making sure all
1335 wait status on the kernel side are collected until the
1336 process is reaped.
1338 - We don't use __WALL here as the __WALL emulation relies on
1339 SIGCHLD, and killing a stopped process doesn't generate
1340 one, nor an exit status.
1342 res = my_waitpid (lwpid, &wstat, 0);
1343 if (res == -1 && errno == ECHILD)
1344 res = my_waitpid (lwpid, &wstat, __WCLONE);
1345 } while (res > 0 && WIFSTOPPED (wstat));
1347 /* Even if it was stopped, the child may have already disappeared.
1348 E.g., if it was killed by SIGKILL. */
1349 if (res < 0 && errno != ECHILD)
1350 perror_with_name ("kill_wait_lwp");
1353 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1354 except the leader. */
1356 static void
1357 kill_one_lwp_callback (thread_info *thread, int pid)
1359 struct lwp_info *lwp = get_thread_lwp (thread);
1361 /* We avoid killing the first thread here, because of a Linux kernel (at
1362 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1363 the children get a chance to be reaped, it will remain a zombie
1364 forever. */
1366 if (thread->id.lwp () == pid)
1368 threads_debug_printf ("is last of process %s",
1369 target_pid_to_str (thread->id).c_str ());
1370 return;
1373 kill_wait_lwp (lwp);
1377 linux_process_target::kill (process_info *process)
1379 int pid = process->pid;
1381 /* If we're killing a running inferior, make sure it is stopped
1382 first, as PTRACE_KILL will not work otherwise. */
1383 stop_all_lwps (0, NULL);
1385 process->for_each_thread ([&] (thread_info *thread)
1387 kill_one_lwp_callback (thread, pid);
1390 /* See the comment in linux_kill_one_lwp. We did not kill the first
1391 thread in the list, so do so now. */
1392 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1394 if (lwp == NULL)
1395 threads_debug_printf ("cannot find lwp for pid: %d", pid);
1396 else
1397 kill_wait_lwp (lwp);
1399 mourn (process);
1401 /* Since we presently can only stop all lwps of all processes, we
1402 need to unstop lwps of other processes. */
1403 unstop_all_lwps (0, NULL);
1404 return 0;
1407 /* Get pending signal of THREAD, for detaching purposes. This is the
1408 signal the thread last stopped for, which we need to deliver to the
1409 thread when detaching, otherwise, it'd be suppressed/lost. */
1411 static int
1412 get_detach_signal (thread_info *thread)
1414 client_state &cs = get_client_state ();
1415 enum gdb_signal signo = GDB_SIGNAL_0;
1416 int status;
1417 struct lwp_info *lp = get_thread_lwp (thread);
1419 if (lp->status_pending_p)
1420 status = lp->status_pending;
1421 else
1423 /* If the thread had been suspended by gdbserver, and it stopped
1424 cleanly, then it'll have stopped with SIGSTOP. But we don't
1425 want to deliver that SIGSTOP. */
1426 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1427 || thread->last_status.sig () == GDB_SIGNAL_0)
1428 return 0;
1430 /* Otherwise, we may need to deliver the signal we
1431 intercepted. */
1432 status = lp->last_status;
1435 if (!WIFSTOPPED (status))
1437 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1438 target_pid_to_str (thread->id).c_str ());
1439 return 0;
1442 /* Extended wait statuses aren't real SIGTRAPs. */
1443 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1445 threads_debug_printf ("lwp %s had stopped with extended "
1446 "status: no pending signal",
1447 target_pid_to_str (thread->id).c_str ());
1448 return 0;
1451 signo = gdb_signal_from_host (WSTOPSIG (status));
1453 if (cs.program_signals_p && !cs.program_signals[signo])
1455 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1456 target_pid_to_str (thread->id).c_str (),
1457 gdb_signal_to_string (signo));
1458 return 0;
1460 else if (!cs.program_signals_p
1461 /* If we have no way to know which signals GDB does not
1462 want to have passed to the program, assume
1463 SIGTRAP/SIGINT, which is GDB's default. */
1464 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1466 threads_debug_printf ("lwp %s had signal %s, "
1467 "but we don't know if we should pass it. "
1468 "Default to not.",
1469 target_pid_to_str (thread->id).c_str (),
1470 gdb_signal_to_string (signo));
1471 return 0;
1473 else
1475 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1476 target_pid_to_str (thread->id).c_str (),
1477 gdb_signal_to_string (signo));
1479 return WSTOPSIG (status);
1483 void
1484 linux_process_target::detach_one_lwp (lwp_info *lwp)
1486 thread_info *thread = lwp->thread;
1487 int sig;
1489 /* If there is a pending SIGSTOP, get rid of it. */
1490 if (lwp->stop_expected)
1492 threads_debug_printf ("Sending SIGCONT to %s",
1493 target_pid_to_str (thread->id).c_str ());
1495 kill_lwp (thread->id.lwp (), SIGCONT);
1496 lwp->stop_expected = 0;
1499 /* Pass on any pending signal for this thread. */
1500 sig = get_detach_signal (thread);
1502 /* Preparing to resume may try to write registers, and fail if the
1503 lwp is zombie. If that happens, ignore the error. We'll handle
1504 it below, when detach fails with ESRCH. */
1507 /* Flush any pending changes to the process's registers. */
1508 regcache_invalidate_thread (thread);
1510 /* Finally, let it resume. */
1511 low_prepare_to_resume (lwp);
1513 catch (const gdb_exception_error &ex)
1515 if (!check_ptrace_stopped_lwp_gone (lwp))
1516 throw;
1519 int lwpid = thread->id.lwp ();
1520 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1521 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1523 int save_errno = errno;
1525 /* We know the thread exists, so ESRCH must mean the lwp is
1526 zombie. This can happen if one of the already-detached
1527 threads exits the whole thread group. In that case we're
1528 still attached, and must reap the lwp. */
1529 if (save_errno == ESRCH)
1531 int ret, status;
1533 ret = my_waitpid (lwpid, &status, __WALL);
1534 if (ret == -1)
1536 warning (_("Couldn't reap LWP %d while detaching: %s"),
1537 lwpid, safe_strerror (errno));
1539 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1541 warning (_("Reaping LWP %d while detaching "
1542 "returned unexpected status 0x%x"),
1543 lwpid, status);
1546 else
1548 error (_("Can't detach %s: %s"),
1549 target_pid_to_str (thread->id).c_str (),
1550 safe_strerror (save_errno));
1553 else
1554 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1555 target_pid_to_str (thread->id).c_str (),
1556 strsignal (sig));
1558 delete_lwp (lwp);
1562 linux_process_target::detach (process_info *process)
1564 struct lwp_info *main_lwp;
1566 /* As there's a step over already in progress, let it finish first,
1567 otherwise nesting a stabilize_threads operation on top gets real
1568 messy. */
1569 complete_ongoing_step_over ();
1571 /* Stop all threads before detaching. First, ptrace requires that
1572 the thread is stopped to successfully detach. Second, thread_db
1573 may need to uninstall thread event breakpoints from memory, which
1574 only works with a stopped process anyway. */
1575 stop_all_lwps (0, NULL);
1577 #ifdef USE_THREAD_DB
1578 thread_db_detach (process);
1579 #endif
1581 /* Stabilize threads (move out of jump pads). */
1582 target_stabilize_threads ();
1584 /* Detach from the clone lwps first. If the thread group exits just
1585 while we're detaching, we must reap the clone lwps before we're
1586 able to reap the leader. */
1587 process->for_each_thread ([this] (thread_info *thread)
1589 /* We don't actually detach from the thread group leader just yet.
1590 If the thread group exits, we must reap the zombie clone lwps
1591 before we're able to reap the leader. */
1592 if (thread->id.pid () == thread->id.lwp ())
1593 return;
1595 lwp_info *lwp = get_thread_lwp (thread);
1596 detach_one_lwp (lwp);
1599 main_lwp = find_lwp_pid (ptid_t (process->pid));
1600 gdb_assert (main_lwp != nullptr);
1601 detach_one_lwp (main_lwp);
1603 mourn (process);
1605 /* Since we presently can only stop all lwps of all processes, we
1606 need to unstop lwps of other processes. */
1607 unstop_all_lwps (0, NULL);
1608 return 0;
1611 /* Remove all LWPs that belong to process PROC from the lwp list. */
1613 void
1614 linux_process_target::mourn (process_info *process)
1616 #ifdef USE_THREAD_DB
1617 thread_db_mourn (process);
1618 #endif
1620 process->for_each_thread ([this] (thread_info *thread)
1622 delete_lwp (get_thread_lwp (thread));
1625 this->remove_linux_process (process);
1628 void
1629 linux_process_target::join (int pid)
1631 int status, ret;
1633 do {
1634 ret = my_waitpid (pid, &status, 0);
1635 if (WIFEXITED (status) || WIFSIGNALED (status))
1636 break;
1637 } while (ret != -1 || errno != ECHILD);
1640 /* Return true if the given thread is still alive. */
1642 bool
1643 linux_process_target::thread_alive (ptid_t ptid)
1645 struct lwp_info *lwp = find_lwp_pid (ptid);
1647 /* We assume we always know if a thread exits. If a whole process
1648 exited but we still haven't been able to report it to GDB, we'll
1649 hold on to the last lwp of the dead process. */
1650 if (lwp != NULL)
1651 return !lwp_is_marked_dead (lwp);
1652 else
1653 return 0;
1656 bool
1657 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1659 struct lwp_info *lp = get_thread_lwp (thread);
1661 if (!lp->status_pending_p)
1662 return 0;
1664 if (thread->last_resume_kind != resume_stop
1665 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1666 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1668 CORE_ADDR pc;
1669 int discard = 0;
1671 gdb_assert (lp->last_status != 0);
1673 pc = get_pc (lp);
1675 scoped_restore_current_thread restore_thread;
1676 switch_to_thread (thread);
1678 if (pc != lp->stop_pc)
1680 threads_debug_printf ("PC of %ld changed",
1681 thread->id.lwp ());
1682 discard = 1;
1685 if (discard)
1687 threads_debug_printf ("discarding pending breakpoint status");
1688 lp->status_pending_p = 0;
1689 return 0;
1693 return 1;
1696 /* Returns true if LWP is resumed from the client's perspective. */
1698 static int
1699 lwp_resumed (struct lwp_info *lwp)
1701 thread_info *thread = lwp->thread;
1703 if (thread->last_resume_kind != resume_stop)
1704 return 1;
1706 /* Did gdb send us a `vCont;t', but we haven't reported the
1707 corresponding stop to gdb yet? If so, the thread is still
1708 resumed/running from gdb's perspective. */
1709 if (thread->last_resume_kind == resume_stop
1710 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1711 return 1;
1713 return 0;
1716 bool
1717 linux_process_target::status_pending_p_callback (thread_info *thread,
1718 ptid_t ptid)
1720 struct lwp_info *lp = get_thread_lwp (thread);
1722 /* Check if we're only interested in events from a specific process
1723 or a specific LWP. */
1724 if (!thread->id.matches (ptid))
1725 return 0;
1727 if (!lwp_resumed (lp))
1728 return 0;
1730 if (lp->status_pending_p
1731 && !thread_still_has_status_pending (thread))
1733 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1734 return 0;
1737 return lp->status_pending_p;
1740 struct lwp_info *
1741 find_lwp_pid (ptid_t ptid)
1743 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1744 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
1746 return thr_arg->id.lwp () == lwp;
1749 if (thread == NULL)
1750 return NULL;
1752 return get_thread_lwp (thread);
1755 /* Return the number of known LWPs in PROCESS. */
1757 static int
1758 num_lwps (process_info *process)
1760 int count = 0;
1762 process->for_each_thread ([&] (thread_info *thread)
1764 count++;
1767 return count;
1770 /* See nat/linux-nat.h. */
1772 struct lwp_info *
1773 iterate_over_lwps (ptid_t filter,
1774 gdb::function_view<iterate_over_lwps_ftype> callback)
1776 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1778 lwp_info *lwp = get_thread_lwp (thr_arg);
1780 return callback (lwp);
1783 if (thread == NULL)
1784 return NULL;
1786 return get_thread_lwp (thread);
1789 bool
1790 linux_process_target::check_zombie_leaders ()
1792 bool new_pending_event = false;
1794 for_each_process ([&] (process_info *proc)
1796 pid_t leader_pid = proc->pid;
1797 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1799 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1800 "num_lwps=%d, zombie=%d",
1801 leader_pid, leader_lp!= NULL, num_lwps (proc),
1802 linux_proc_pid_is_zombie (leader_pid));
1804 if (leader_lp != NULL && !leader_lp->stopped
1805 /* Check if there are other threads in the group, as we may
1806 have raced with the inferior simply exiting. Note this
1807 isn't a watertight check. If the inferior is
1808 multi-threaded and is exiting, it may be we see the
1809 leader as zombie before we reap all the non-leader
1810 threads. See comments below. */
1811 && !last_thread_of_process_p (leader_pid)
1812 && linux_proc_pid_is_zombie (leader_pid))
1814 /* A zombie leader in a multi-threaded program can mean one
1815 of three things:
1817 #1 - Only the leader exited, not the whole program, e.g.,
1818 with pthread_exit. Since we can't reap the leader's exit
1819 status until all other threads are gone and reaped too,
1820 we want to delete the zombie leader right away, as it
1821 can't be debugged, we can't read its registers, etc.
1822 This is the main reason we check for zombie leaders
1823 disappearing.
1825 #2 - The whole thread-group/process exited (a group exit,
1826 via e.g. exit(3), and there is (or will be shortly) an
1827 exit reported for each thread in the process, and then
1828 finally an exit for the leader once the non-leaders are
1829 reaped.
1831 #3 - There are 3 or more threads in the group, and a
1832 thread other than the leader exec'd. See comments on
1833 exec events at the top of the file.
1835 Ideally we would never delete the leader for case #2.
1836 Instead, we want to collect the exit status of each
1837 non-leader thread, and then finally collect the exit
1838 status of the leader as normal and use its exit code as
1839 whole-process exit code. Unfortunately, there's no
1840 race-free way to distinguish cases #1 and #2. We can't
1841 assume the exit events for the non-leaders threads are
1842 already pending in the kernel, nor can we assume the
1843 non-leader threads are in zombie state already. Between
1844 the leader becoming zombie and the non-leaders exiting
1845 and becoming zombie themselves, there's a small time
1846 window, so such a check would be racy. Temporarily
1847 pausing all threads and checking to see if all threads
1848 exit or not before re-resuming them would work in the
1849 case that all threads are running right now, but it
1850 wouldn't work if some thread is currently already
1851 ptrace-stopped, e.g., due to scheduler-locking.
1853 So what we do is we delete the leader anyhow, and then
1854 later on when we see its exit status, we re-add it back.
1855 We also make sure that we only report a whole-process
1856 exit when we see the leader exiting, as opposed to when
1857 the last LWP in the LWP list exits, which can be a
1858 non-leader if we deleted the leader here. */
1859 threads_debug_printf ("Thread group leader %d zombie "
1860 "(it exited, or another thread execd), "
1861 "deleting it.",
1862 leader_pid);
1864 thread_info *leader_thread = leader_lp->thread;
1865 if (report_exit_events_for (leader_thread))
1867 mark_lwp_dead (leader_lp, W_EXITCODE (0, 0), true);
1868 new_pending_event = true;
1870 else
1871 delete_lwp (leader_lp);
1875 return new_pending_event;
1878 /* Callback for `find_thread'. Returns the first LWP that is not
1879 stopped. */
1881 static bool
1882 not_stopped_callback (thread_info *thread, ptid_t filter)
1884 if (!thread->id.matches (filter))
1885 return false;
1887 lwp_info *lwp = get_thread_lwp (thread);
1889 return !lwp->stopped;
1892 /* Increment LWP's suspend count. */
1894 static void
1895 lwp_suspended_inc (struct lwp_info *lwp)
1897 lwp->suspended++;
1899 if (lwp->suspended > 4)
1900 threads_debug_printf
1901 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1902 lwp->thread->id.lwp (), lwp->suspended);
1905 /* Decrement LWP's suspend count. */
1907 static void
1908 lwp_suspended_decr (struct lwp_info *lwp)
1910 lwp->suspended--;
1912 if (lwp->suspended < 0)
1914 thread_info *thread = lwp->thread;
1916 internal_error ("unsuspend LWP %ld, suspended=%d\n", thread->id.lwp (),
1917 lwp->suspended);
1921 /* This function should only be called if the LWP got a SIGTRAP.
1923 Handle any tracepoint steps or hits. Return true if a tracepoint
1924 event was handled, 0 otherwise. */
1926 static int
1927 handle_tracepoints (struct lwp_info *lwp)
1929 thread_info *tinfo = lwp->thread;
1930 int tpoint_related_event = 0;
1932 gdb_assert (lwp->suspended == 0);
1934 /* If this tracepoint hit causes a tracing stop, we'll immediately
1935 uninsert tracepoints. To do this, we temporarily pause all
1936 threads, unpatch away, and then unpause threads. We need to make
1937 sure the unpausing doesn't resume LWP too. */
1938 lwp_suspended_inc (lwp);
1940 /* And we need to be sure that any all-threads-stopping doesn't try
1941 to move threads out of the jump pads, as it could deadlock the
1942 inferior (LWP could be in the jump pad, maybe even holding the
1943 lock.) */
1945 /* Do any necessary step collect actions. */
1946 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1948 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1950 /* See if we just hit a tracepoint and do its main collect
1951 actions. */
1952 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1954 lwp_suspended_decr (lwp);
1956 gdb_assert (lwp->suspended == 0);
1957 gdb_assert (!stabilizing_threads
1958 || (lwp->collecting_fast_tracepoint
1959 != fast_tpoint_collect_result::not_collecting));
1961 if (tpoint_related_event)
1963 threads_debug_printf ("got a tracepoint event");
1964 return 1;
1967 return 0;
1970 fast_tpoint_collect_result
1971 linux_process_target::linux_fast_tracepoint_collecting
1972 (lwp_info *lwp, fast_tpoint_collect_status *status)
1974 CORE_ADDR thread_area;
1975 thread_info *thread = lwp->thread;
1977 /* Get the thread area address. This is used to recognize which
1978 thread is which when tracing with the in-process agent library.
1979 We don't read anything from the address, and treat it as opaque;
1980 it's the address itself that we assume is unique per-thread. */
1981 if (low_get_thread_area (thread->id.lwp (), &thread_area) == -1)
1982 return fast_tpoint_collect_result::not_collecting;
1984 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1988 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1990 return -1;
1993 bool
1994 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1996 scoped_restore_current_thread restore_thread;
1997 switch_to_thread (lwp->thread);
1999 if ((wstat == NULL
2000 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2001 && supports_fast_tracepoints ()
2002 && agent_loaded_p ())
2004 struct fast_tpoint_collect_status status;
2006 threads_debug_printf
2007 ("Checking whether LWP %ld needs to move out of the jump pad.",
2008 current_thread->id.lwp ());
2010 fast_tpoint_collect_result r
2011 = linux_fast_tracepoint_collecting (lwp, &status);
2013 if (wstat == NULL
2014 || (WSTOPSIG (*wstat) != SIGILL
2015 && WSTOPSIG (*wstat) != SIGFPE
2016 && WSTOPSIG (*wstat) != SIGSEGV
2017 && WSTOPSIG (*wstat) != SIGBUS))
2019 lwp->collecting_fast_tracepoint = r;
2021 if (r != fast_tpoint_collect_result::not_collecting)
2023 if (r == fast_tpoint_collect_result::before_insn
2024 && lwp->exit_jump_pad_bkpt == NULL)
2026 /* Haven't executed the original instruction yet.
2027 Set breakpoint there, and wait till it's hit,
2028 then single-step until exiting the jump pad. */
2029 lwp->exit_jump_pad_bkpt
2030 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2033 threads_debug_printf
2034 ("Checking whether LWP %ld needs to move out of the jump pad..."
2035 " it does", current_thread->id.lwp ());
2037 return true;
2040 else
2042 /* If we get a synchronous signal while collecting, *and*
2043 while executing the (relocated) original instruction,
2044 reset the PC to point at the tpoint address, before
2045 reporting to GDB. Otherwise, it's an IPA lib bug: just
2046 report the signal to GDB, and pray for the best. */
2048 lwp->collecting_fast_tracepoint
2049 = fast_tpoint_collect_result::not_collecting;
2051 if (r != fast_tpoint_collect_result::not_collecting
2052 && (status.adjusted_insn_addr <= lwp->stop_pc
2053 && lwp->stop_pc < status.adjusted_insn_addr_end))
2055 siginfo_t info;
2056 struct regcache *regcache;
2058 /* The si_addr on a few signals references the address
2059 of the faulting instruction. Adjust that as
2060 well. */
2061 if ((WSTOPSIG (*wstat) == SIGILL
2062 || WSTOPSIG (*wstat) == SIGFPE
2063 || WSTOPSIG (*wstat) == SIGBUS
2064 || WSTOPSIG (*wstat) == SIGSEGV)
2065 && ptrace (PTRACE_GETSIGINFO, current_thread->id.lwp (),
2066 (PTRACE_TYPE_ARG3) 0, &info) == 0
2067 /* Final check just to make sure we don't clobber
2068 the siginfo of non-kernel-sent signals. */
2069 && (uintptr_t) info.si_addr == lwp->stop_pc)
2071 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2072 ptrace (PTRACE_SETSIGINFO, current_thread->id.lwp (),
2073 (PTRACE_TYPE_ARG3) 0, &info);
2076 regcache = get_thread_regcache (current_thread);
2077 low_set_pc (regcache, status.tpoint_addr);
2078 lwp->stop_pc = status.tpoint_addr;
2080 /* Cancel any fast tracepoint lock this thread was
2081 holding. */
2082 force_unlock_trace_buffer ();
2085 if (lwp->exit_jump_pad_bkpt != NULL)
2087 threads_debug_printf
2088 ("Cancelling fast exit-jump-pad: removing bkpt."
2089 "stopping all threads momentarily.");
2091 stop_all_lwps (1, lwp);
2093 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2094 lwp->exit_jump_pad_bkpt = NULL;
2096 unstop_all_lwps (1, lwp);
2098 gdb_assert (lwp->suspended >= 0);
2103 threads_debug_printf
2104 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2105 current_thread->id.lwp ());
2107 return false;
2110 /* Enqueue one signal in the "signals to report later when out of the
2111 jump pad" list. */
2113 static void
2114 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2116 thread_info *thread = lwp->thread;
2118 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2119 WSTOPSIG (*wstat), thread->id.lwp ());
2121 if (debug_threads)
2123 for (const auto &sig : lwp->pending_signals_to_report)
2124 threads_debug_printf (" Already queued %d", sig.signal);
2126 threads_debug_printf (" (no more currently queued signals)");
2129 /* Don't enqueue non-RT signals if they are already in the deferred
2130 queue. (SIGSTOP being the easiest signal to see ending up here
2131 twice) */
2132 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2134 for (const auto &sig : lwp->pending_signals_to_report)
2136 if (sig.signal == WSTOPSIG (*wstat))
2138 threads_debug_printf
2139 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2140 sig.signal, thread->id.lwp ());
2141 return;
2146 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2148 ptrace (PTRACE_GETSIGINFO, thread->id.lwp (), (PTRACE_TYPE_ARG3) 0,
2149 &lwp->pending_signals_to_report.back ().info);
2152 /* Dequeue one signal from the "signals to report later when out of
2153 the jump pad" list. */
2155 static int
2156 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2158 thread_info *thread = lwp->thread;
2160 if (!lwp->pending_signals_to_report.empty ())
2162 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2164 *wstat = W_STOPCODE (p_sig.signal);
2165 if (p_sig.info.si_signo != 0)
2166 ptrace (PTRACE_SETSIGINFO, thread->id.lwp (), (PTRACE_TYPE_ARG3) 0,
2167 &p_sig.info);
2169 lwp->pending_signals_to_report.pop_front ();
2171 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2172 WSTOPSIG (*wstat), thread->id.lwp ());
2174 if (debug_threads)
2176 for (const auto &sig : lwp->pending_signals_to_report)
2177 threads_debug_printf (" Still queued %d", sig.signal);
2179 threads_debug_printf (" (no more queued signals)");
2182 return 1;
2185 return 0;
2188 bool
2189 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2191 scoped_restore_current_thread restore_thread;
2192 switch_to_thread (child->thread);
2194 if (low_stopped_by_watchpoint ())
2196 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2197 child->stopped_data_address = low_stopped_data_address ();
2200 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2203 bool
2204 linux_process_target::low_stopped_by_watchpoint ()
2206 return false;
2209 CORE_ADDR
2210 linux_process_target::low_stopped_data_address ()
2212 return 0;
2215 /* Return the ptrace options that we want to try to enable. */
2217 static int
2218 linux_low_ptrace_options (int attached)
2220 client_state &cs = get_client_state ();
2221 int options = 0;
2223 if (!attached)
2224 options |= PTRACE_O_EXITKILL;
2226 if (cs.report_fork_events)
2227 options |= PTRACE_O_TRACEFORK;
2229 if (cs.report_vfork_events)
2230 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2232 if (cs.report_exec_events)
2233 options |= PTRACE_O_TRACEEXEC;
2235 options |= PTRACE_O_TRACESYSGOOD;
2237 return options;
2240 void
2241 linux_process_target::filter_event (int lwpid, int wstat)
2243 struct lwp_info *child;
2244 thread_info *thread;
2245 int have_stop_pc = 0;
2247 child = find_lwp_pid (ptid_t (lwpid));
2249 /* Check for events reported by anything not in our LWP list. */
2250 if (child == nullptr)
2252 if (WIFSTOPPED (wstat))
2254 if (WSTOPSIG (wstat) == SIGTRAP
2255 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2257 /* A non-leader thread exec'ed after we've seen the
2258 leader zombie, and removed it from our lists (in
2259 check_zombie_leaders). The non-leader thread changes
2260 its tid to the tgid. */
2261 threads_debug_printf
2262 ("Re-adding thread group leader LWP %d after exec.",
2263 lwpid);
2265 child = add_lwp (ptid_t (lwpid, lwpid));
2266 child->stopped = 1;
2267 switch_to_thread (child->thread);
2269 else
2271 /* A process we are controlling has forked and the new
2272 child's stop was reported to us by the kernel. Save
2273 its PID and go back to waiting for the fork event to
2274 be reported - the stopped process might be returned
2275 from waitpid before or after the fork event is. */
2276 threads_debug_printf
2277 ("Saving LWP %d status %s in stopped_pids list",
2278 lwpid, status_to_str (wstat).c_str ());
2279 add_to_pid_list (&stopped_pids, lwpid, wstat);
2282 else
2284 /* Don't report an event for the exit of an LWP not in our
2285 list, i.e. not part of any inferior we're debugging.
2286 This can happen if we detach from a program we originally
2287 forked and then it exits. However, note that we may have
2288 earlier deleted a leader of an inferior we're debugging,
2289 in check_zombie_leaders. Re-add it back here if so. */
2290 find_process ([&] (process_info *proc)
2292 if (proc->pid == lwpid)
2294 threads_debug_printf
2295 ("Re-adding thread group leader LWP %d after exit.",
2296 lwpid);
2298 child = add_lwp (ptid_t (lwpid, lwpid));
2299 return true;
2301 return false;
2305 if (child == nullptr)
2306 return;
2309 thread = child->thread;
2311 child->stopped = 1;
2313 child->last_status = wstat;
2315 /* Check if the thread has exited. */
2316 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2318 threads_debug_printf ("%d exited", lwpid);
2320 if (finish_step_over (child))
2322 /* Unsuspend all other LWPs, and set them back running again. */
2323 unsuspend_all_lwps (child);
2326 /* If this is not the leader LWP, then the exit signal was not
2327 the end of the debugged application and should be ignored,
2328 unless GDB wants to hear about thread exits. */
2329 if (report_exit_events_for (thread) || is_leader (thread))
2331 /* Since events are serialized to GDB core, and we can't
2332 report this one right now. Leave the status pending for
2333 the next time we're able to report it. */
2334 mark_lwp_dead (child, wstat, false);
2335 return;
2337 else
2339 delete_lwp (child);
2340 return;
2344 gdb_assert (WIFSTOPPED (wstat));
2346 if (WIFSTOPPED (wstat))
2348 /* Architecture-specific setup after inferior is running. */
2349 process_info *proc = find_process_pid (thread->id.pid ());
2351 if (proc->tdesc == NULL)
2353 if (proc->attached)
2355 /* This needs to happen after we have attached to the
2356 inferior and it is stopped for the first time, but
2357 before we access any inferior registers. */
2358 arch_setup_thread (thread);
2360 else
2362 /* The process is started, but GDBserver will do
2363 architecture-specific setup after the program stops at
2364 the first instruction. */
2365 child->status_pending_p = 1;
2366 child->status_pending = wstat;
2367 return;
2372 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2374 process_info *proc = find_process_pid (thread->id.pid ());
2375 int options = linux_low_ptrace_options (proc->attached);
2377 linux_enable_event_reporting (lwpid, options);
2378 child->must_set_ptrace_flags = 0;
2381 /* Always update syscall_state, even if it will be filtered later. */
2382 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2384 child->syscall_state
2385 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2386 ? TARGET_WAITKIND_SYSCALL_RETURN
2387 : TARGET_WAITKIND_SYSCALL_ENTRY);
2389 else
2391 /* Almost all other ptrace-stops are known to be outside of system
2392 calls, with further exceptions in handle_extended_wait. */
2393 child->syscall_state = TARGET_WAITKIND_IGNORE;
2396 /* Be careful to not overwrite stop_pc until save_stop_reason is
2397 called. */
2398 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2399 && linux_is_extended_waitstatus (wstat))
2401 child->stop_pc = get_pc (child);
2402 if (handle_extended_wait (&child, wstat))
2404 /* The event has been handled, so just return without
2405 reporting it. */
2406 return;
2410 if (linux_wstatus_maybe_breakpoint (wstat))
2412 if (save_stop_reason (child))
2413 have_stop_pc = 1;
2416 if (!have_stop_pc)
2417 child->stop_pc = get_pc (child);
2419 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2420 && child->stop_expected)
2422 threads_debug_printf ("Expected stop.");
2424 child->stop_expected = 0;
2426 if (thread->last_resume_kind == resume_stop)
2428 /* We want to report the stop to the core. Treat the
2429 SIGSTOP as a normal event. */
2430 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2431 target_pid_to_str (thread->id).c_str ());
2433 else if (stopping_threads != NOT_STOPPING_THREADS)
2435 /* Stopping threads. We don't want this SIGSTOP to end up
2436 pending. */
2437 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2438 target_pid_to_str (thread->id).c_str ());
2439 return;
2441 else
2443 /* This is a delayed SIGSTOP. Filter out the event. */
2444 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2445 child->stepping ? "step" : "continue",
2446 target_pid_to_str (thread->id).c_str ());
2448 resume_one_lwp (child, child->stepping, 0, NULL);
2449 return;
2453 child->status_pending_p = 1;
2454 child->status_pending = wstat;
2455 return;
2458 bool
2459 linux_process_target::maybe_hw_step (thread_info *thread)
2461 if (supports_hardware_single_step ())
2462 return true;
2463 else
2465 /* GDBserver must insert single-step breakpoint for software
2466 single step. */
2467 gdb_assert (has_single_step_breakpoints (thread));
2468 return false;
2472 void
2473 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2475 struct lwp_info *lp = get_thread_lwp (thread);
2477 if (lp->stopped
2478 && !lp->suspended
2479 && !lp->status_pending_p
2480 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2482 int step = 0;
2484 if (thread->last_resume_kind == resume_step)
2486 if (supports_software_single_step ())
2487 install_software_single_step_breakpoints (lp);
2489 step = maybe_hw_step (thread);
2492 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2493 target_pid_to_str (thread->id).c_str (),
2494 paddress (lp->stop_pc), step);
2496 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2501 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2502 ptid_t filter_ptid,
2503 int *wstatp, int options)
2505 thread_info *event_thread;
2506 struct lwp_info *event_child, *requested_child;
2507 sigset_t block_mask, prev_mask;
2509 retry:
2510 /* N.B. event_thread points to the thread_info struct that contains
2511 event_child. Keep them in sync. */
2512 event_thread = NULL;
2513 event_child = NULL;
2514 requested_child = NULL;
2516 /* Check for a lwp with a pending status. */
2518 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2520 event_thread = find_thread_in_random ([&] (thread_info *thread)
2522 return status_pending_p_callback (thread, filter_ptid);
2525 if (event_thread != NULL)
2527 event_child = get_thread_lwp (event_thread);
2528 threads_debug_printf ("Got a pending child %ld", event_thread->id.lwp ());
2531 else if (filter_ptid != null_ptid)
2533 requested_child = find_lwp_pid (filter_ptid);
2534 gdb_assert (requested_child != nullptr);
2536 if (stopping_threads == NOT_STOPPING_THREADS
2537 && requested_child->status_pending_p
2538 && (requested_child->collecting_fast_tracepoint
2539 != fast_tpoint_collect_result::not_collecting))
2541 enqueue_one_deferred_signal (requested_child,
2542 &requested_child->status_pending);
2543 requested_child->status_pending_p = 0;
2544 requested_child->status_pending = 0;
2545 resume_one_lwp (requested_child, 0, 0, NULL);
2548 if (requested_child->suspended
2549 && requested_child->status_pending_p)
2551 internal_error ("requesting an event out of a"
2552 " suspended child?");
2555 if (requested_child->status_pending_p)
2557 event_child = requested_child;
2558 event_thread = event_child->thread;
2562 if (event_child != NULL)
2564 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2565 event_thread->id.lwp (),
2566 event_child->status_pending);
2568 *wstatp = event_child->status_pending;
2569 event_child->status_pending_p = 0;
2570 event_child->status_pending = 0;
2571 switch_to_thread (event_thread);
2572 return event_thread->id.lwp ();
2575 /* But if we don't find a pending event, we'll have to wait.
2577 We only enter this loop if no process has a pending wait status.
2578 Thus any action taken in response to a wait status inside this
2579 loop is responding as soon as we detect the status, not after any
2580 pending events. */
2582 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2583 all signals while here. */
2584 sigfillset (&block_mask);
2585 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2587 /* Always pull all events out of the kernel. We'll randomly select
2588 an event LWP out of all that have events, to prevent
2589 starvation. */
2590 while (event_child == NULL)
2592 pid_t ret = 0;
2594 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2595 quirks:
2597 - If the thread group leader exits while other threads in the
2598 thread group still exist, waitpid(TGID, ...) hangs. That
2599 waitpid won't return an exit status until the other threads
2600 in the group are reaped.
2602 - When a non-leader thread execs, that thread just vanishes
2603 without reporting an exit (so we'd hang if we waited for it
2604 explicitly in that case). The exec event is reported to
2605 the TGID pid. */
2606 errno = 0;
2607 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2609 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2610 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2612 if (ret > 0)
2614 threads_debug_printf ("waitpid %ld received %s",
2615 (long) ret, status_to_str (*wstatp).c_str ());
2617 /* Filter all events. IOW, leave all events pending. We'll
2618 randomly select an event LWP out of all that have events
2619 below. */
2620 filter_event (ret, *wstatp);
2621 /* Retry until nothing comes out of waitpid. A single
2622 SIGCHLD can indicate more than one child stopped. */
2623 continue;
2626 /* Now that we've pulled all events out of the kernel, resume
2627 LWPs that don't have an interesting event to report. */
2628 if (stopping_threads == NOT_STOPPING_THREADS)
2629 for_each_thread ([this] (thread_info *thread)
2631 resume_stopped_resumed_lwps (thread);
2634 /* ... and find an LWP with a status to report to the core, if
2635 any. */
2636 event_thread = find_thread_in_random ([&] (thread_info *thread)
2638 return status_pending_p_callback (thread, filter_ptid);
2641 if (event_thread != NULL)
2643 event_child = get_thread_lwp (event_thread);
2644 *wstatp = event_child->status_pending;
2645 event_child->status_pending_p = 0;
2646 event_child->status_pending = 0;
2647 break;
2650 /* Check for zombie thread group leaders. Those can't be reaped
2651 until all other threads in the thread group are. */
2652 if (check_zombie_leaders ())
2653 goto retry;
2655 auto not_stopped = [&] (thread_info *thread)
2657 return not_stopped_callback (thread, wait_ptid);
2660 /* If there are no resumed children left in the set of LWPs we
2661 want to wait for, bail. We can't just block in
2662 waitpid/sigsuspend, because lwps might have been left stopped
2663 in trace-stop state, and we'd be stuck forever waiting for
2664 their status to change (which would only happen if we resumed
2665 them). Even if WNOHANG is set, this return code is preferred
2666 over 0 (below), as it is more detailed. */
2667 if (find_thread (not_stopped) == NULL)
2669 threads_debug_printf ("exit (no unwaited-for LWP)");
2671 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2672 return -1;
2675 /* No interesting event to report to the caller. */
2676 if ((options & WNOHANG))
2678 threads_debug_printf ("WNOHANG set, no event found");
2680 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2681 return 0;
2684 /* Block until we get an event reported with SIGCHLD. */
2685 threads_debug_printf ("sigsuspend'ing");
2687 sigsuspend (&prev_mask);
2688 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2689 goto retry;
2692 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2694 switch_to_thread (event_thread);
2696 return event_thread->id.lwp ();
2700 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2702 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2705 /* Select one LWP out of those that have events pending. */
2707 static void
2708 select_event_lwp (struct lwp_info **orig_lp)
2710 thread_info *event_thread = NULL;
2712 /* In all-stop, give preference to the LWP that is being
2713 single-stepped. There will be at most one, and it's the LWP that
2714 the core is most interested in. If we didn't do this, then we'd
2715 have to handle pending step SIGTRAPs somehow in case the core
2716 later continues the previously-stepped thread, otherwise we'd
2717 report the pending SIGTRAP, and the core, not having stepped the
2718 thread, wouldn't understand what the trap was for, and therefore
2719 would report it to the user as a random signal. */
2720 if (!non_stop)
2722 event_thread = find_thread ([] (thread_info *thread)
2724 lwp_info *lp = get_thread_lwp (thread);
2726 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2727 && thread->last_resume_kind == resume_step
2728 && lp->status_pending_p);
2731 if (event_thread != NULL)
2732 threads_debug_printf
2733 ("Select single-step %s",
2734 target_pid_to_str (event_thread->id).c_str ());
2736 if (event_thread == NULL)
2738 /* No single-stepping LWP. Select one at random, out of those
2739 which have had events. */
2741 event_thread = find_thread_in_random ([&] (thread_info *thread)
2743 lwp_info *lp = get_thread_lwp (thread);
2745 /* Only resumed LWPs that have an event pending. */
2746 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2747 && lp->status_pending_p);
2751 if (event_thread != NULL)
2753 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2755 /* Switch the event LWP. */
2756 *orig_lp = event_lp;
2760 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2761 NULL. */
2763 static void
2764 unsuspend_all_lwps (struct lwp_info *except)
2766 for_each_thread ([&] (thread_info *thread)
2768 lwp_info *lwp = get_thread_lwp (thread);
2770 if (lwp != except)
2771 lwp_suspended_decr (lwp);
2775 static bool lwp_running (thread_info *thread);
2777 /* Stabilize threads (move out of jump pads).
2779 If a thread is midway collecting a fast tracepoint, we need to
2780 finish the collection and move it out of the jump pad before
2781 reporting the signal.
2783 This avoids recursion while collecting (when a signal arrives
2784 midway, and the signal handler itself collects), which would trash
2785 the trace buffer. In case the user set a breakpoint in a signal
2786 handler, this avoids the backtrace showing the jump pad, etc..
2787 Most importantly, there are certain things we can't do safely if
2788 threads are stopped in a jump pad (or in its callee's). For
2789 example:
2791 - starting a new trace run. A thread still collecting the
2792 previous run, could trash the trace buffer when resumed. The trace
2793 buffer control structures would have been reset but the thread had
2794 no way to tell. The thread could even midway memcpy'ing to the
2795 buffer, which would mean that when resumed, it would clobber the
2796 trace buffer that had been set for a new run.
2798 - we can't rewrite/reuse the jump pads for new tracepoints
2799 safely. Say you do tstart while a thread is stopped midway while
2800 collecting. When the thread is later resumed, it finishes the
2801 collection, and returns to the jump pad, to execute the original
2802 instruction that was under the tracepoint jump at the time the
2803 older run had been started. If the jump pad had been rewritten
2804 since for something else in the new run, the thread would now
2805 execute the wrong / random instructions. */
2807 void
2808 linux_process_target::stabilize_threads ()
2810 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2812 return stuck_in_jump_pad (thread);
2815 if (thread_stuck != NULL)
2817 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2818 thread_stuck->id.lwp ());
2819 return;
2822 scoped_restore_current_thread restore_thread;
2824 stabilizing_threads = 1;
2826 /* Kick 'em all. */
2827 for_each_thread ([this] (thread_info *thread)
2829 move_out_of_jump_pad (thread);
2832 /* Loop until all are stopped out of the jump pads. */
2833 while (find_thread (lwp_running) != NULL)
2835 struct target_waitstatus ourstatus;
2836 struct lwp_info *lwp;
2837 int wstat;
2839 /* Note that we go through the full wait even loop. While
2840 moving threads out of jump pad, we need to be able to step
2841 over internal breakpoints and such. */
2842 wait_1 (minus_one_ptid, &ourstatus, 0);
2844 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2846 lwp = get_thread_lwp (current_thread);
2848 /* Lock it. */
2849 lwp_suspended_inc (lwp);
2851 if (ourstatus.sig () != GDB_SIGNAL_0
2852 || current_thread->last_resume_kind == resume_stop)
2854 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2855 enqueue_one_deferred_signal (lwp, &wstat);
2860 unsuspend_all_lwps (NULL);
2862 stabilizing_threads = 0;
2864 if (debug_threads)
2866 thread_stuck = find_thread ([this] (thread_info *thread)
2868 return stuck_in_jump_pad (thread);
2871 if (thread_stuck != NULL)
2872 threads_debug_printf
2873 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2874 thread_stuck->id.lwp ());
2878 /* Convenience function that is called when the kernel reports an
2879 event that is not passed out to GDB. */
2881 static ptid_t
2882 ignore_event (struct target_waitstatus *ourstatus)
2884 /* If we got an event, there may still be others, as a single
2885 SIGCHLD can indicate more than one child stopped. This forces
2886 another target_wait call. */
2887 async_file_mark ();
2889 ourstatus->set_ignore ();
2890 return null_ptid;
2893 ptid_t
2894 linux_process_target::filter_exit_event (lwp_info *event_child,
2895 target_waitstatus *ourstatus)
2897 thread_info *thread = event_child->thread;
2898 ptid_t ptid = thread->id;
2900 if (ourstatus->kind () == TARGET_WAITKIND_THREAD_EXITED)
2902 /* We're reporting a thread exit for the leader. The exit was
2903 detected by check_zombie_leaders. */
2904 gdb_assert (is_leader (thread));
2905 gdb_assert (report_exit_events_for (thread));
2907 delete_lwp (event_child);
2908 return ptid;
2911 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2912 if a non-leader thread exits with a signal, we'd report it to the
2913 core which would interpret it as the whole-process exiting.
2914 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2915 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2916 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2917 return ptid;
2919 if (!is_leader (thread))
2921 if (report_exit_events_for (thread))
2922 ourstatus->set_thread_exited (0);
2923 else
2924 ourstatus->set_ignore ();
2926 delete_lwp (event_child);
2928 return ptid;
2931 /* Returns 1 if GDB is interested in any event_child syscalls. */
2933 static int
2934 gdb_catching_syscalls_p (struct lwp_info *event_child)
2936 return !event_child->thread->process ()->syscalls_to_catch.empty ();
2939 bool
2940 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2942 int sysno;
2943 thread_info *thread = event_child->thread;
2944 process_info *proc = thread->process ();
2946 if (proc->syscalls_to_catch.empty ())
2947 return false;
2949 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2950 return true;
2952 get_syscall_trapinfo (event_child, &sysno);
2954 for (int iter : proc->syscalls_to_catch)
2955 if (iter == sysno)
2956 return true;
2958 return false;
2961 ptid_t
2962 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2963 target_wait_flags target_options)
2965 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2967 client_state &cs = get_client_state ();
2968 int w;
2969 struct lwp_info *event_child;
2970 int options;
2971 int pid;
2972 int step_over_finished;
2973 int bp_explains_trap;
2974 int maybe_internal_trap;
2975 int report_to_gdb;
2976 int trace_event;
2977 int in_step_range;
2979 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
2981 /* Translate generic target options into linux options. */
2982 options = __WALL;
2983 if (target_options & TARGET_WNOHANG)
2984 options |= WNOHANG;
2986 bp_explains_trap = 0;
2987 trace_event = 0;
2988 in_step_range = 0;
2989 ourstatus->set_ignore ();
2991 bool was_any_resumed = any_resumed ();
2993 if (step_over_bkpt == null_ptid)
2994 pid = wait_for_event (ptid, &w, options);
2995 else
2997 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
2998 target_pid_to_str (step_over_bkpt).c_str ());
2999 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3002 if (pid == 0 || (pid == -1 && !was_any_resumed))
3004 gdb_assert (target_options & TARGET_WNOHANG);
3006 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
3008 ourstatus->set_ignore ();
3009 return null_ptid;
3011 else if (pid == -1)
3013 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
3015 ourstatus->set_no_resumed ();
3016 return null_ptid;
3019 event_child = get_thread_lwp (current_thread);
3021 /* wait_for_event only returns an exit status for the last
3022 child of a process. Report it. */
3023 if (WIFEXITED (w) || WIFSIGNALED (w))
3025 if (WIFEXITED (w))
3027 /* If we already have the exit recorded in waitstatus, use
3028 it. This will happen when we detect a zombie leader,
3029 when we had GDB_THREAD_OPTION_EXIT enabled for it. We
3030 want to report its exit as TARGET_WAITKIND_THREAD_EXITED,
3031 as the whole process hasn't exited yet. */
3032 const target_waitstatus &ws = event_child->waitstatus;
3033 if (ws.kind () != TARGET_WAITKIND_IGNORE)
3035 gdb_assert (ws.kind () == TARGET_WAITKIND_EXITED
3036 || ws.kind () == TARGET_WAITKIND_THREAD_EXITED);
3037 *ourstatus = ws;
3039 else
3040 ourstatus->set_exited (WEXITSTATUS (w));
3042 threads_debug_printf
3043 ("ret = %s, exited with retcode %d",
3044 target_pid_to_str (current_thread->id).c_str (),
3045 WEXITSTATUS (w));
3047 else
3049 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3051 threads_debug_printf
3052 ("ret = %s, terminated with signal %d",
3053 target_pid_to_str (current_thread->id).c_str (),
3054 WTERMSIG (w));
3057 return filter_exit_event (event_child, ourstatus);
3060 /* If step-over executes a breakpoint instruction, in the case of a
3061 hardware single step it means a gdb/gdbserver breakpoint had been
3062 planted on top of a permanent breakpoint, in the case of a software
3063 single step it may just mean that gdbserver hit the reinsert breakpoint.
3064 The PC has been adjusted by save_stop_reason to point at
3065 the breakpoint address.
3066 So in the case of the hardware single step advance the PC manually
3067 past the breakpoint and in the case of software single step advance only
3068 if it's not the single_step_breakpoint we are hitting.
3069 This avoids that a program would keep trapping a permanent breakpoint
3070 forever. */
3071 if (step_over_bkpt != null_ptid
3072 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3073 && (event_child->stepping
3074 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3076 int increment_pc = 0;
3077 int breakpoint_kind = 0;
3078 CORE_ADDR stop_pc = event_child->stop_pc;
3080 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3081 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3083 threads_debug_printf
3084 ("step-over for %s executed software breakpoint",
3085 target_pid_to_str (current_thread->id).c_str ());
3087 if (increment_pc != 0)
3089 struct regcache *regcache
3090 = get_thread_regcache (current_thread);
3092 event_child->stop_pc += increment_pc;
3093 low_set_pc (regcache, event_child->stop_pc);
3095 if (!low_breakpoint_at (event_child->stop_pc))
3096 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3100 /* If this event was not handled before, and is not a SIGTRAP, we
3101 report it. SIGILL and SIGSEGV are also treated as traps in case
3102 a breakpoint is inserted at the current PC. If this target does
3103 not support internal breakpoints at all, we also report the
3104 SIGTRAP without further processing; it's of no concern to us. */
3105 maybe_internal_trap
3106 = (low_supports_breakpoints ()
3107 && (WSTOPSIG (w) == SIGTRAP
3108 || ((WSTOPSIG (w) == SIGILL
3109 || WSTOPSIG (w) == SIGSEGV)
3110 && low_breakpoint_at (event_child->stop_pc))));
3112 if (maybe_internal_trap)
3114 /* Handle anything that requires bookkeeping before deciding to
3115 report the event or continue waiting. */
3117 /* First check if we can explain the SIGTRAP with an internal
3118 breakpoint, or if we should possibly report the event to GDB.
3119 Do this before anything that may remove or insert a
3120 breakpoint. */
3121 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3123 /* We have a SIGTRAP, possibly a step-over dance has just
3124 finished. If so, tweak the state machine accordingly,
3125 reinsert breakpoints and delete any single-step
3126 breakpoints. */
3127 step_over_finished = finish_step_over (event_child);
3129 /* Now invoke the callbacks of any internal breakpoints there. */
3130 check_breakpoints (event_child->stop_pc);
3132 /* Handle tracepoint data collecting. This may overflow the
3133 trace buffer, and cause a tracing stop, removing
3134 breakpoints. */
3135 trace_event = handle_tracepoints (event_child);
3137 if (bp_explains_trap)
3138 threads_debug_printf ("Hit a gdbserver breakpoint.");
3140 else
3142 /* We have some other signal, possibly a step-over dance was in
3143 progress, and it should be cancelled too. */
3144 step_over_finished = finish_step_over (event_child);
3147 /* We have all the data we need. Either report the event to GDB, or
3148 resume threads and keep waiting for more. */
3150 /* If we're collecting a fast tracepoint, finish the collection and
3151 move out of the jump pad before delivering a signal. See
3152 linux_stabilize_threads. */
3154 if (WIFSTOPPED (w)
3155 && WSTOPSIG (w) != SIGTRAP
3156 && supports_fast_tracepoints ()
3157 && agent_loaded_p ())
3159 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3160 "to defer or adjust it.",
3161 WSTOPSIG (w), current_thread->id.lwp ());
3163 /* Allow debugging the jump pad itself. */
3164 if (current_thread->last_resume_kind != resume_step
3165 && maybe_move_out_of_jump_pad (event_child, &w))
3167 enqueue_one_deferred_signal (event_child, &w);
3169 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3170 WSTOPSIG (w), current_thread->id.lwp ());
3172 resume_one_lwp (event_child, 0, 0, NULL);
3174 return ignore_event (ourstatus);
3178 if (event_child->collecting_fast_tracepoint
3179 != fast_tpoint_collect_result::not_collecting)
3181 threads_debug_printf
3182 ("LWP %ld was trying to move out of the jump pad (%d). "
3183 "Check if we're already there.",
3184 current_thread->id.lwp (),
3185 (int) event_child->collecting_fast_tracepoint);
3187 trace_event = 1;
3189 event_child->collecting_fast_tracepoint
3190 = linux_fast_tracepoint_collecting (event_child, NULL);
3192 if (event_child->collecting_fast_tracepoint
3193 != fast_tpoint_collect_result::before_insn)
3195 /* No longer need this breakpoint. */
3196 if (event_child->exit_jump_pad_bkpt != NULL)
3198 threads_debug_printf
3199 ("No longer need exit-jump-pad bkpt; removing it."
3200 "stopping all threads momentarily.");
3202 /* Other running threads could hit this breakpoint.
3203 We don't handle moribund locations like GDB does,
3204 instead we always pause all threads when removing
3205 breakpoints, so that any step-over or
3206 decr_pc_after_break adjustment is always taken
3207 care of while the breakpoint is still
3208 inserted. */
3209 stop_all_lwps (1, event_child);
3211 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3212 event_child->exit_jump_pad_bkpt = NULL;
3214 unstop_all_lwps (1, event_child);
3216 gdb_assert (event_child->suspended >= 0);
3220 if (event_child->collecting_fast_tracepoint
3221 == fast_tpoint_collect_result::not_collecting)
3223 threads_debug_printf
3224 ("fast tracepoint finished collecting successfully.");
3226 /* We may have a deferred signal to report. */
3227 if (dequeue_one_deferred_signal (event_child, &w))
3228 threads_debug_printf ("dequeued one signal.");
3229 else
3231 threads_debug_printf ("no deferred signals.");
3233 if (stabilizing_threads)
3235 ourstatus->set_stopped (GDB_SIGNAL_0);
3237 threads_debug_printf
3238 ("ret = %s, stopped while stabilizing threads",
3239 target_pid_to_str (current_thread->id).c_str ());
3241 return current_thread->id;
3247 /* Check whether GDB would be interested in this event. */
3249 /* Check if GDB is interested in this syscall. */
3250 if (WIFSTOPPED (w)
3251 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3252 && !gdb_catch_this_syscall (event_child))
3254 threads_debug_printf ("Ignored syscall for LWP %ld.",
3255 current_thread->id.lwp ());
3257 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3259 return ignore_event (ourstatus);
3262 /* If GDB is not interested in this signal, don't stop other
3263 threads, and don't report it to GDB. Just resume the inferior
3264 right away. We do this for threading-related signals as well as
3265 any that GDB specifically requested we ignore. But never ignore
3266 SIGSTOP if we sent it ourselves, and do not ignore signals when
3267 stepping - they may require special handling to skip the signal
3268 handler. Also never ignore signals that could be caused by a
3269 breakpoint. */
3270 if (WIFSTOPPED (w)
3271 && current_thread->last_resume_kind != resume_step
3272 && (
3273 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3274 (current_process ()->priv->thread_db != NULL
3275 && (WSTOPSIG (w) == __SIGRTMIN
3276 || WSTOPSIG (w) == __SIGRTMIN + 1))
3278 #endif
3279 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3280 && !(WSTOPSIG (w) == SIGSTOP
3281 && current_thread->last_resume_kind == resume_stop)
3282 && !linux_wstatus_maybe_breakpoint (w))))
3284 siginfo_t info, *info_p;
3286 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3287 WSTOPSIG (w), current_thread->id.lwp ());
3289 if (ptrace (PTRACE_GETSIGINFO, current_thread->id.lwp (),
3290 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3291 info_p = &info;
3292 else
3293 info_p = NULL;
3295 if (step_over_finished)
3297 /* We cancelled this thread's step-over above. We still
3298 need to unsuspend all other LWPs, and set them back
3299 running again while the signal handler runs. */
3300 unsuspend_all_lwps (event_child);
3302 /* Enqueue the pending signal info so that proceed_all_lwps
3303 doesn't lose it. */
3304 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3306 proceed_all_lwps ();
3308 else
3310 resume_one_lwp (event_child, event_child->stepping,
3311 WSTOPSIG (w), info_p);
3314 return ignore_event (ourstatus);
3317 /* Note that all addresses are always "out of the step range" when
3318 there's no range to begin with. */
3319 in_step_range = lwp_in_step_range (event_child);
3321 /* If GDB wanted this thread to single step, and the thread is out
3322 of the step range, we always want to report the SIGTRAP, and let
3323 GDB handle it. Watchpoints should always be reported. So should
3324 signals we can't explain. A SIGTRAP we can't explain could be a
3325 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3326 do, we're be able to handle GDB breakpoints on top of internal
3327 breakpoints, by handling the internal breakpoint and still
3328 reporting the event to GDB. If we don't, we're out of luck, GDB
3329 won't see the breakpoint hit. If we see a single-step event but
3330 the thread should be continuing, don't pass the trap to gdb.
3331 That indicates that we had previously finished a single-step but
3332 left the single-step pending -- see
3333 complete_ongoing_step_over. */
3334 report_to_gdb = (!maybe_internal_trap
3335 || (current_thread->last_resume_kind == resume_step
3336 && !in_step_range)
3337 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3338 || (!in_step_range
3339 && !bp_explains_trap
3340 && !trace_event
3341 && !step_over_finished
3342 && !(current_thread->last_resume_kind == resume_continue
3343 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3344 || (gdb_breakpoint_here (event_child->stop_pc)
3345 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3346 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3347 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3349 run_breakpoint_commands (event_child->stop_pc);
3351 /* We found no reason GDB would want us to stop. We either hit one
3352 of our own breakpoints, or finished an internal step GDB
3353 shouldn't know about. */
3354 if (!report_to_gdb)
3356 if (bp_explains_trap)
3357 threads_debug_printf ("Hit a gdbserver breakpoint.");
3359 if (step_over_finished)
3360 threads_debug_printf ("Step-over finished.");
3362 if (trace_event)
3363 threads_debug_printf ("Tracepoint event.");
3365 if (lwp_in_step_range (event_child))
3366 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3367 paddress (event_child->stop_pc),
3368 paddress (event_child->step_range_start),
3369 paddress (event_child->step_range_end));
3371 /* We're not reporting this breakpoint to GDB, so apply the
3372 decr_pc_after_break adjustment to the inferior's regcache
3373 ourselves. */
3375 if (low_supports_breakpoints ())
3377 struct regcache *regcache
3378 = get_thread_regcache (current_thread);
3379 low_set_pc (regcache, event_child->stop_pc);
3382 if (step_over_finished)
3384 /* If we have finished stepping over a breakpoint, we've
3385 stopped and suspended all LWPs momentarily except the
3386 stepping one. This is where we resume them all again.
3387 We're going to keep waiting, so use proceed, which
3388 handles stepping over the next breakpoint. */
3389 unsuspend_all_lwps (event_child);
3391 else
3393 /* Remove the single-step breakpoints if any. Note that
3394 there isn't single-step breakpoint if we finished stepping
3395 over. */
3396 if (supports_software_single_step ()
3397 && has_single_step_breakpoints (current_thread))
3399 stop_all_lwps (0, event_child);
3400 delete_single_step_breakpoints (current_thread);
3401 unstop_all_lwps (0, event_child);
3405 threads_debug_printf ("proceeding all threads.");
3407 proceed_all_lwps ();
3409 return ignore_event (ourstatus);
3412 if (debug_threads)
3414 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3415 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3416 event_child->thread->id.lwp (),
3417 event_child->waitstatus.to_string ().c_str ());
3419 if (current_thread->last_resume_kind == resume_step)
3421 if (event_child->step_range_start == event_child->step_range_end)
3422 threads_debug_printf
3423 ("GDB wanted to single-step, reporting event.");
3424 else if (!lwp_in_step_range (event_child))
3425 threads_debug_printf ("Out of step range, reporting event.");
3428 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3429 threads_debug_printf ("Stopped by watchpoint.");
3430 else if (gdb_breakpoint_here (event_child->stop_pc))
3431 threads_debug_printf ("Stopped by GDB breakpoint.");
3434 threads_debug_printf ("Hit a non-gdbserver trap event.");
3436 /* Alright, we're going to report a stop. */
3438 /* Remove single-step breakpoints. */
3439 if (supports_software_single_step ())
3441 /* Remove single-step breakpoints or not. It it is true, stop all
3442 lwps, so that other threads won't hit the breakpoint in the
3443 staled memory. */
3444 int remove_single_step_breakpoints_p = 0;
3446 if (non_stop)
3448 remove_single_step_breakpoints_p
3449 = has_single_step_breakpoints (current_thread);
3451 else
3453 /* In all-stop, a stop reply cancels all previous resume
3454 requests. Delete all single-step breakpoints. */
3456 find_thread ([&] (thread_info *thread) {
3457 if (has_single_step_breakpoints (thread))
3459 remove_single_step_breakpoints_p = 1;
3460 return true;
3463 return false;
3467 if (remove_single_step_breakpoints_p)
3469 /* If we remove single-step breakpoints from memory, stop all lwps,
3470 so that other threads won't hit the breakpoint in the staled
3471 memory. */
3472 stop_all_lwps (0, event_child);
3474 if (non_stop)
3476 gdb_assert (has_single_step_breakpoints (current_thread));
3477 delete_single_step_breakpoints (current_thread);
3479 else
3481 for_each_thread ([] (thread_info *thread){
3482 if (has_single_step_breakpoints (thread))
3483 delete_single_step_breakpoints (thread);
3487 unstop_all_lwps (0, event_child);
3491 if (!stabilizing_threads)
3493 /* In all-stop, stop all threads. */
3494 if (!non_stop)
3495 stop_all_lwps (0, NULL);
3497 if (step_over_finished)
3499 if (!non_stop)
3501 /* If we were doing a step-over, all other threads but
3502 the stepping one had been paused in start_step_over,
3503 with their suspend counts incremented. We don't want
3504 to do a full unstop/unpause, because we're in
3505 all-stop mode (so we want threads stopped), but we
3506 still need to unsuspend the other threads, to
3507 decrement their `suspended' count back. */
3508 unsuspend_all_lwps (event_child);
3510 else
3512 /* If we just finished a step-over, then all threads had
3513 been momentarily paused. In all-stop, that's fine,
3514 we want threads stopped by now anyway. In non-stop,
3515 we need to re-resume threads that GDB wanted to be
3516 running. */
3517 unstop_all_lwps (1, event_child);
3521 /* If we're not waiting for a specific LWP, choose an event LWP
3522 from among those that have had events. Giving equal priority
3523 to all LWPs that have had events helps prevent
3524 starvation. */
3525 if (ptid == minus_one_ptid)
3527 event_child->status_pending_p = 1;
3528 event_child->status_pending = w;
3530 select_event_lwp (&event_child);
3532 /* current_thread and event_child must stay in sync. */
3533 switch_to_thread (event_child->thread);
3535 event_child->status_pending_p = 0;
3536 w = event_child->status_pending;
3540 /* Stabilize threads (move out of jump pads). */
3541 if (!non_stop)
3542 target_stabilize_threads ();
3544 else
3546 /* If we just finished a step-over, then all threads had been
3547 momentarily paused. In all-stop, that's fine, we want
3548 threads stopped by now anyway. In non-stop, we need to
3549 re-resume threads that GDB wanted to be running. */
3550 if (step_over_finished)
3551 unstop_all_lwps (1, event_child);
3554 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3555 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3557 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3559 /* If the reported event is an exit, fork, vfork, clone or exec,
3560 let GDB know. */
3562 /* Break the unreported fork/vfork/clone relationship chain. */
3563 if (is_new_child_status (event_child->waitstatus.kind ()))
3565 event_child->relative->relative = NULL;
3566 event_child->relative = NULL;
3569 *ourstatus = event_child->waitstatus;
3570 /* Clear the event lwp's waitstatus since we handled it already. */
3571 event_child->waitstatus.set_ignore ();
3573 else
3575 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3576 event_child->waitstatus wasn't filled in with the details, so look at
3577 the wait status W. */
3578 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3580 int syscall_number;
3582 get_syscall_trapinfo (event_child, &syscall_number);
3583 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3584 ourstatus->set_syscall_entry (syscall_number);
3585 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3586 ourstatus->set_syscall_return (syscall_number);
3587 else
3588 gdb_assert_not_reached ("unexpected syscall state");
3590 else if (current_thread->last_resume_kind == resume_stop
3591 && WSTOPSIG (w) == SIGSTOP)
3593 /* A thread that has been requested to stop by GDB with vCont;t,
3594 and it stopped cleanly, so report as SIG0. The use of
3595 SIGSTOP is an implementation detail. */
3596 ourstatus->set_stopped (GDB_SIGNAL_0);
3598 else
3599 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3602 /* Now that we've selected our final event LWP, un-adjust its PC if
3603 it was a software breakpoint, and the client doesn't know we can
3604 adjust the breakpoint ourselves. */
3605 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3606 && !cs.swbreak_feature)
3608 int decr_pc = low_decr_pc_after_break ();
3610 if (decr_pc != 0)
3612 struct regcache *regcache
3613 = get_thread_regcache (current_thread);
3614 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3618 gdb_assert (step_over_bkpt == null_ptid);
3620 threads_debug_printf ("ret = %s, %s",
3621 target_pid_to_str (current_thread->id).c_str (),
3622 ourstatus->to_string ().c_str ());
3624 return filter_exit_event (event_child, ourstatus);
3627 /* Get rid of any pending event in the pipe. */
3628 static void
3629 async_file_flush (void)
3631 linux_event_pipe.flush ();
3634 /* Put something in the pipe, so the event loop wakes up. */
3635 static void
3636 async_file_mark (void)
3638 linux_event_pipe.mark ();
3641 ptid_t
3642 linux_process_target::wait (ptid_t ptid,
3643 target_waitstatus *ourstatus,
3644 target_wait_flags target_options)
3646 ptid_t event_ptid;
3648 /* Flush the async file first. */
3649 if (target_is_async_p ())
3650 async_file_flush ();
3654 event_ptid = wait_1 (ptid, ourstatus, target_options);
3656 while ((target_options & TARGET_WNOHANG) == 0
3657 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3659 /* If at least one stop was reported, there may be more. A single
3660 SIGCHLD can signal more than one child stop. */
3661 if (target_is_async_p ()
3662 && (target_options & TARGET_WNOHANG) != 0
3663 && event_ptid != null_ptid)
3664 async_file_mark ();
3666 return event_ptid;
3669 /* Send a signal to an LWP. */
3671 static int
3672 kill_lwp (unsigned long lwpid, int signo)
3674 int ret;
3676 errno = 0;
3677 ret = syscall (__NR_tkill, lwpid, signo);
3678 if (errno == ENOSYS)
3680 /* If tkill fails, then we are not using nptl threads, a
3681 configuration we no longer support. */
3682 perror_with_name (("tkill"));
3684 return ret;
3687 void
3688 linux_stop_lwp (struct lwp_info *lwp)
3690 send_sigstop (lwp);
3693 static void
3694 send_sigstop (struct lwp_info *lwp)
3696 int pid = lwp->thread->id.lwp ();
3698 /* If we already have a pending stop signal for this process, don't
3699 send another. */
3700 if (lwp->stop_expected)
3702 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3704 return;
3707 threads_debug_printf ("Sending sigstop to lwp %d", pid);
3709 lwp->stop_expected = 1;
3710 kill_lwp (pid, SIGSTOP);
3713 static void
3714 send_sigstop (thread_info *thread, lwp_info *except)
3716 struct lwp_info *lwp = get_thread_lwp (thread);
3718 /* Ignore EXCEPT. */
3719 if (lwp == except)
3720 return;
3722 if (lwp->stopped)
3723 return;
3725 send_sigstop (lwp);
3728 /* Increment the suspend count of an LWP, and stop it, if not stopped
3729 yet. */
3730 static void
3731 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3733 struct lwp_info *lwp = get_thread_lwp (thread);
3735 /* Ignore EXCEPT. */
3736 if (lwp == except)
3737 return;
3739 lwp_suspended_inc (lwp);
3741 send_sigstop (thread, except);
3744 /* Mark LWP dead, with WSTAT as exit status pending to report later.
3745 If THREAD_EVENT is true, interpret WSTAT as a thread exit event
3746 instead of a process exit event. This is meaningful for the leader
3747 thread, as we normally report a process-wide exit event when we see
3748 the leader exit, and a thread exit event when we see any other
3749 thread exit. */
3751 static void
3752 mark_lwp_dead (struct lwp_info *lwp, int wstat, bool thread_event)
3754 /* Store the exit status for later. */
3755 lwp->status_pending_p = 1;
3756 lwp->status_pending = wstat;
3758 /* Store in waitstatus as well, as there's nothing else to process
3759 for this event. */
3760 if (WIFEXITED (wstat))
3762 if (thread_event)
3763 lwp->waitstatus.set_thread_exited (WEXITSTATUS (wstat));
3764 else
3765 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3767 else if (WIFSIGNALED (wstat))
3769 gdb_assert (!thread_event);
3770 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3772 else
3773 gdb_assert_not_reached ("unknown status kind");
3775 /* Prevent trying to stop it. */
3776 lwp->stopped = 1;
3778 /* No further stops are expected from a dead lwp. */
3779 lwp->stop_expected = 0;
3782 /* Return true if LWP has exited already, and has a pending exit event
3783 to report to GDB. */
3785 static int
3786 lwp_is_marked_dead (struct lwp_info *lwp)
3788 return (lwp->status_pending_p
3789 && (WIFEXITED (lwp->status_pending)
3790 || WIFSIGNALED (lwp->status_pending)));
3793 void
3794 linux_process_target::wait_for_sigstop ()
3796 thread_info *saved_thread;
3797 ptid_t saved_tid;
3798 int wstat;
3799 int ret;
3801 saved_thread = current_thread;
3802 if (saved_thread != NULL)
3803 saved_tid = saved_thread->id;
3804 else
3805 saved_tid = null_ptid; /* avoid bogus unused warning */
3807 scoped_restore_current_thread restore_thread;
3809 threads_debug_printf ("pulling events");
3811 /* Passing NULL_PTID as filter indicates we want all events to be
3812 left pending. Eventually this returns when there are no
3813 unwaited-for children left. */
3814 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3815 gdb_assert (ret == -1);
3817 if (saved_thread == NULL || mythread_alive (saved_tid))
3818 return;
3819 else
3821 threads_debug_printf ("Previously current thread died.");
3823 /* We can't change the current inferior behind GDB's back,
3824 otherwise, a subsequent command may apply to the wrong
3825 process. */
3826 restore_thread.dont_restore ();
3827 switch_to_thread (nullptr);
3831 bool
3832 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3834 struct lwp_info *lwp = get_thread_lwp (thread);
3836 if (lwp->suspended != 0)
3838 internal_error ("LWP %ld is suspended, suspended=%d\n",
3839 thread->id.lwp (), lwp->suspended);
3841 gdb_assert (lwp->stopped);
3843 /* Allow debugging the jump pad, gdb_collect, etc.. */
3844 return (supports_fast_tracepoints ()
3845 && agent_loaded_p ()
3846 && (gdb_breakpoint_here (lwp->stop_pc)
3847 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3848 || thread->last_resume_kind == resume_step)
3849 && (linux_fast_tracepoint_collecting (lwp, NULL)
3850 != fast_tpoint_collect_result::not_collecting));
3853 void
3854 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3856 struct lwp_info *lwp = get_thread_lwp (thread);
3857 int *wstat;
3859 if (lwp->suspended != 0)
3861 internal_error ("LWP %ld is suspended, suspended=%d\n",
3862 thread->id.lwp (), lwp->suspended);
3864 gdb_assert (lwp->stopped);
3866 /* For gdb_breakpoint_here. */
3867 scoped_restore_current_thread restore_thread;
3868 switch_to_thread (thread);
3870 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3872 /* Allow debugging the jump pad, gdb_collect, etc. */
3873 if (!gdb_breakpoint_here (lwp->stop_pc)
3874 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3875 && thread->last_resume_kind != resume_step
3876 && maybe_move_out_of_jump_pad (lwp, wstat))
3878 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3879 thread->id.lwp ());
3881 if (wstat)
3883 lwp->status_pending_p = 0;
3884 enqueue_one_deferred_signal (lwp, wstat);
3886 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3887 WSTOPSIG (*wstat), thread->id.lwp ());
3890 resume_one_lwp (lwp, 0, 0, NULL);
3892 else
3893 lwp_suspended_inc (lwp);
3896 static bool
3897 lwp_running (thread_info *thread)
3899 struct lwp_info *lwp = get_thread_lwp (thread);
3901 if (lwp_is_marked_dead (lwp))
3902 return false;
3904 return !lwp->stopped;
3907 void
3908 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3910 /* Should not be called recursively. */
3911 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3913 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3915 threads_debug_printf
3916 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3917 (except != NULL
3918 ? target_pid_to_str (except->thread->id).c_str ()
3919 : "none"));
3921 stopping_threads = (suspend
3922 ? STOPPING_AND_SUSPENDING_THREADS
3923 : STOPPING_THREADS);
3925 if (suspend)
3926 for_each_thread ([&] (thread_info *thread)
3928 suspend_and_send_sigstop (thread, except);
3930 else
3931 for_each_thread ([&] (thread_info *thread)
3933 send_sigstop (thread, except);
3936 wait_for_sigstop ();
3937 stopping_threads = NOT_STOPPING_THREADS;
3939 threads_debug_printf ("setting stopping_threads back to !stopping");
3942 /* Enqueue one signal in the chain of signals which need to be
3943 delivered to this process on next resume. */
3945 static void
3946 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3948 lwp->pending_signals.emplace_back (signal);
3949 if (info == nullptr)
3950 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3951 else
3952 lwp->pending_signals.back ().info = *info;
3955 void
3956 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3958 thread_info *thread = lwp->thread;
3959 regcache *regcache = get_thread_regcache (thread);
3961 scoped_restore_current_thread restore_thread;
3963 switch_to_thread (thread);
3964 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3966 for (CORE_ADDR pc : next_pcs)
3967 set_single_step_breakpoint (pc, current_thread->id);
3971 linux_process_target::single_step (lwp_info* lwp)
3973 int step = 0;
3975 if (supports_hardware_single_step ())
3977 step = 1;
3979 else if (supports_software_single_step ())
3981 install_software_single_step_breakpoints (lwp);
3982 step = 0;
3984 else
3985 threads_debug_printf ("stepping is not implemented on this target");
3987 return step;
3990 /* The signal can be delivered to the inferior if we are not trying to
3991 finish a fast tracepoint collect. Since signal can be delivered in
3992 the step-over, the program may go to signal handler and trap again
3993 after return from the signal handler. We can live with the spurious
3994 double traps. */
3996 static int
3997 lwp_signal_can_be_delivered (struct lwp_info *lwp)
3999 return (lwp->collecting_fast_tracepoint
4000 == fast_tpoint_collect_result::not_collecting);
4003 void
4004 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4005 int signal, siginfo_t *info)
4007 thread_info *thread = lwp->thread;
4008 int ptrace_request;
4010 /* Note that target description may not be initialised
4011 (proc->tdesc == NULL) at this point because the program hasn't
4012 stopped at the first instruction yet. It means GDBserver skips
4013 the extra traps from the wrapper program (see option --wrapper).
4014 Code in this function that requires register access should be
4015 guarded by proc->tdesc == NULL or something else. */
4017 if (lwp->stopped == 0)
4018 return;
4020 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
4022 fast_tpoint_collect_result fast_tp_collecting
4023 = lwp->collecting_fast_tracepoint;
4025 gdb_assert (!stabilizing_threads
4026 || (fast_tp_collecting
4027 != fast_tpoint_collect_result::not_collecting));
4029 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4030 user used the "jump" command, or "set $pc = foo"). */
4031 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4033 /* Collecting 'while-stepping' actions doesn't make sense
4034 anymore. */
4035 release_while_stepping_state_list (thread);
4038 /* If we have pending signals or status, and a new signal, enqueue the
4039 signal. Also enqueue the signal if it can't be delivered to the
4040 inferior right now. */
4041 if (signal != 0
4042 && (lwp->status_pending_p
4043 || !lwp->pending_signals.empty ()
4044 || !lwp_signal_can_be_delivered (lwp)))
4046 enqueue_pending_signal (lwp, signal, info);
4048 /* Postpone any pending signal. It was enqueued above. */
4049 signal = 0;
4052 if (lwp->status_pending_p)
4054 threads_debug_printf
4055 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4056 thread->id.lwp (), step ? "step" : "continue",
4057 lwp->stop_expected ? "expected" : "not expected");
4058 return;
4061 scoped_restore_current_thread restore_thread;
4062 switch_to_thread (thread);
4064 /* This bit needs some thinking about. If we get a signal that
4065 we must report while a single-step reinsert is still pending,
4066 we often end up resuming the thread. It might be better to
4067 (ew) allow a stack of pending events; then we could be sure that
4068 the reinsert happened right away and not lose any signals.
4070 Making this stack would also shrink the window in which breakpoints are
4071 uninserted (see comment in linux_wait_for_lwp) but not enough for
4072 complete correctness, so it won't solve that problem. It may be
4073 worthwhile just to solve this one, however. */
4074 if (lwp->bp_reinsert != 0)
4076 threads_debug_printf (" pending reinsert at 0x%s",
4077 paddress (lwp->bp_reinsert));
4079 if (supports_hardware_single_step ())
4081 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4083 if (step == 0)
4084 warning ("BAD - reinserting but not stepping.");
4085 if (lwp->suspended)
4086 warning ("BAD - reinserting and suspended(%d).",
4087 lwp->suspended);
4091 step = maybe_hw_step (thread);
4094 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4095 threads_debug_printf
4096 ("lwp %ld wants to get out of fast tracepoint jump pad "
4097 "(exit-jump-pad-bkpt)", thread->id.lwp ());
4099 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4101 threads_debug_printf
4102 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4103 thread->id.lwp ());
4105 if (supports_hardware_single_step ())
4106 step = 1;
4107 else
4109 internal_error ("moving out of jump pad single-stepping"
4110 " not implemented on this target");
4114 /* If we have while-stepping actions in this thread set it stepping.
4115 If we have a signal to deliver, it may or may not be set to
4116 SIG_IGN, we don't know. Assume so, and allow collecting
4117 while-stepping into a signal handler. A possible smart thing to
4118 do would be to set an internal breakpoint at the signal return
4119 address, continue, and carry on catching this while-stepping
4120 action only when that breakpoint is hit. A future
4121 enhancement. */
4122 if (thread->while_stepping != NULL)
4124 threads_debug_printf
4125 ("lwp %ld has a while-stepping action -> forcing step.",
4126 thread->id.lwp ());
4128 step = single_step (lwp);
4131 if (thread->process ()->tdesc != nullptr && low_supports_breakpoints ())
4133 regcache *regcache = get_thread_regcache (current_thread);
4135 lwp->stop_pc = low_get_pc (regcache);
4137 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4138 (long) lwp->stop_pc);
4141 /* If we have pending signals, consume one if it can be delivered to
4142 the inferior. */
4143 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4145 const pending_signal &p_sig = lwp->pending_signals.front ();
4147 signal = p_sig.signal;
4148 if (p_sig.info.si_signo != 0)
4149 ptrace (PTRACE_SETSIGINFO, thread->id.lwp (), (PTRACE_TYPE_ARG3) 0,
4150 &p_sig.info);
4152 lwp->pending_signals.pop_front ();
4155 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4156 thread->id.lwp (), step ? "step" : "continue", signal,
4157 lwp->stop_expected ? "expected" : "not expected");
4159 low_prepare_to_resume (lwp);
4161 regcache_invalidate_thread (thread);
4162 errno = 0;
4163 lwp->stepping = step;
4164 if (step)
4165 ptrace_request = PTRACE_SINGLESTEP;
4166 else if (gdb_catching_syscalls_p (lwp))
4167 ptrace_request = PTRACE_SYSCALL;
4168 else
4169 ptrace_request = PTRACE_CONT;
4170 ptrace (ptrace_request,
4171 thread->id.lwp (),
4172 (PTRACE_TYPE_ARG3) 0,
4173 /* Coerce to a uintptr_t first to avoid potential gcc warning
4174 of coercing an 8 byte integer to a 4 byte pointer. */
4175 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4177 if (errno)
4179 int saved_errno = errno;
4181 threads_debug_printf ("ptrace errno = %d (%s)",
4182 saved_errno, strerror (saved_errno));
4184 errno = saved_errno;
4185 perror_with_name ("resuming thread");
4188 /* Successfully resumed. Clear state that no longer makes sense,
4189 and mark the LWP as running. Must not do this before resuming
4190 otherwise if that fails other code will be confused. E.g., we'd
4191 later try to stop the LWP and hang forever waiting for a stop
4192 status. Note that we must not throw after this is cleared,
4193 otherwise handle_zombie_lwp_error would get confused. */
4194 lwp->stopped = 0;
4195 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4198 void
4199 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4201 /* Nop. */
4204 /* Called when we try to resume a stopped LWP and that errors out. If
4205 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4206 or about to become), discard the error, clear any pending status
4207 the LWP may have, and return true (we'll collect the exit status
4208 soon enough). Otherwise, return false. */
4210 static int
4211 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4213 thread_info *thread = lp->thread;
4215 /* If we get an error after resuming the LWP successfully, we'd
4216 confuse !T state for the LWP being gone. */
4217 gdb_assert (lp->stopped);
4219 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4220 because even if ptrace failed with ESRCH, the tracee may be "not
4221 yet fully dead", but already refusing ptrace requests. In that
4222 case the tracee has 'R (Running)' state for a little bit
4223 (observed in Linux 3.18). See also the note on ESRCH in the
4224 ptrace(2) man page. Instead, check whether the LWP has any state
4225 other than ptrace-stopped. */
4227 /* Don't assume anything if /proc/PID/status can't be read. */
4228 if (linux_proc_pid_is_trace_stopped_nowarn (thread->id.lwp ()) == 0)
4230 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4231 lp->status_pending_p = 0;
4232 return 1;
4234 return 0;
4237 void
4238 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4239 siginfo_t *info)
4243 resume_one_lwp_throw (lwp, step, signal, info);
4245 catch (const gdb_exception_error &ex)
4247 if (check_ptrace_stopped_lwp_gone (lwp))
4249 /* This could because we tried to resume an LWP after its leader
4250 exited. Mark it as resumed, so we can collect an exit event
4251 from it. */
4252 lwp->stopped = 0;
4253 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4255 else
4256 throw;
4260 /* This function is called once per thread via for_each_thread.
4261 We look up which resume request applies to THREAD and mark it with a
4262 pointer to the appropriate resume request.
4264 This algorithm is O(threads * resume elements), but resume elements
4265 is small (and will remain small at least until GDB supports thread
4266 suspension). */
4268 static void
4269 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4271 struct lwp_info *lwp = get_thread_lwp (thread);
4273 for (int ndx = 0; ndx < n; ndx++)
4275 ptid_t ptid = resume[ndx].thread;
4276 if (ptid == minus_one_ptid
4277 || ptid == thread->id
4278 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4279 of PID'. */
4280 || (ptid.pid () == thread->id.pid ()
4281 && (ptid.is_pid ()
4282 || ptid.lwp () == -1)))
4284 if (resume[ndx].kind == resume_stop
4285 && thread->last_resume_kind == resume_stop)
4287 threads_debug_printf
4288 ("already %s LWP %ld at GDB's request",
4289 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4290 ? "stopped" : "stopping"),
4291 thread->id.lwp ());
4293 continue;
4296 /* Ignore (wildcard) resume requests for already-resumed
4297 threads. */
4298 if (resume[ndx].kind != resume_stop
4299 && thread->last_resume_kind != resume_stop)
4301 threads_debug_printf
4302 ("already %s LWP %ld at GDB's request",
4303 (thread->last_resume_kind == resume_step
4304 ? "stepping" : "continuing"),
4305 thread->id.lwp ());
4306 continue;
4309 /* Don't let wildcard resumes resume fork/vfork/clone
4310 children that GDB does not yet know are new children. */
4311 if (lwp->relative != NULL)
4313 struct lwp_info *rel = lwp->relative;
4315 if (rel->status_pending_p
4316 && is_new_child_status (rel->waitstatus.kind ()))
4318 threads_debug_printf
4319 ("not resuming LWP %ld: has queued stop reply",
4320 thread->id.lwp ());
4321 continue;
4325 /* If the thread has a pending event that has already been
4326 reported to GDBserver core, but GDB has not pulled the
4327 event out of the vStopped queue yet, likewise, ignore the
4328 (wildcard) resume request. */
4329 if (in_queued_stop_replies (thread->id))
4331 threads_debug_printf
4332 ("not resuming LWP %ld: has queued stop reply",
4333 thread->id.lwp ());
4334 continue;
4337 lwp->resume = &resume[ndx];
4338 thread->last_resume_kind = lwp->resume->kind;
4340 lwp->step_range_start = lwp->resume->step_range_start;
4341 lwp->step_range_end = lwp->resume->step_range_end;
4343 /* If we had a deferred signal to report, dequeue one now.
4344 This can happen if LWP gets more than one signal while
4345 trying to get out of a jump pad. */
4346 if (lwp->stopped
4347 && !lwp->status_pending_p
4348 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4350 lwp->status_pending_p = 1;
4352 threads_debug_printf
4353 ("Dequeueing deferred signal %d for LWP %ld, "
4354 "leaving status pending.",
4355 WSTOPSIG (lwp->status_pending),
4356 thread->id.lwp ());
4359 return;
4363 /* No resume action for this thread. */
4364 lwp->resume = NULL;
4367 bool
4368 linux_process_target::resume_status_pending (thread_info *thread)
4370 struct lwp_info *lwp = get_thread_lwp (thread);
4372 /* LWPs which will not be resumed are not interesting, because
4373 we might not wait for them next time through linux_wait. */
4374 if (lwp->resume == NULL)
4375 return false;
4377 return thread_still_has_status_pending (thread);
4380 bool
4381 linux_process_target::thread_needs_step_over (thread_info *thread)
4383 struct lwp_info *lwp = get_thread_lwp (thread);
4384 CORE_ADDR pc;
4386 /* GDBserver is skipping the extra traps from the wrapper program,
4387 don't have to do step over. */
4388 if (thread->process ()->tdesc == nullptr)
4389 return false;
4391 /* LWPs which will not be resumed are not interesting, because we
4392 might not wait for them next time through linux_wait. */
4394 if (!lwp->stopped)
4396 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4397 thread->id.lwp ());
4398 return false;
4401 if (thread->last_resume_kind == resume_stop)
4403 threads_debug_printf
4404 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4405 thread->id.lwp ());
4406 return false;
4409 gdb_assert (lwp->suspended >= 0);
4411 if (lwp->suspended)
4413 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4414 thread->id.lwp ());
4415 return false;
4418 if (lwp->status_pending_p)
4420 threads_debug_printf
4421 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4422 thread->id.lwp ());
4423 return false;
4426 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4427 or we have. */
4428 pc = get_pc (lwp);
4430 /* If the PC has changed since we stopped, then don't do anything,
4431 and let the breakpoint/tracepoint be hit. This happens if, for
4432 instance, GDB handled the decr_pc_after_break subtraction itself,
4433 GDB is OOL stepping this thread, or the user has issued a "jump"
4434 command, or poked thread's registers herself. */
4435 if (pc != lwp->stop_pc)
4437 threads_debug_printf
4438 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4439 "Old stop_pc was 0x%s, PC is now 0x%s", thread->id.lwp (),
4440 paddress (lwp->stop_pc), paddress (pc));
4441 return false;
4444 /* On software single step target, resume the inferior with signal
4445 rather than stepping over. */
4446 if (supports_software_single_step ()
4447 && !lwp->pending_signals.empty ()
4448 && lwp_signal_can_be_delivered (lwp))
4450 threads_debug_printf
4451 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4452 thread->id.lwp ());
4454 return false;
4457 scoped_restore_current_thread restore_thread;
4458 switch_to_thread (thread);
4460 /* We can only step over breakpoints we know about. */
4461 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4463 /* Don't step over a breakpoint that GDB expects to hit
4464 though. If the condition is being evaluated on the target's side
4465 and it evaluate to false, step over this breakpoint as well. */
4466 if (gdb_breakpoint_here (pc)
4467 && gdb_condition_true_at_breakpoint (pc)
4468 && gdb_no_commands_at_breakpoint (pc))
4470 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4471 " GDB breakpoint at 0x%s; skipping step over",
4472 thread->id.lwp (), paddress (pc));
4474 return false;
4476 else
4478 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4479 "found breakpoint at 0x%s",
4480 thread->id.lwp (), paddress (pc));
4482 /* We've found an lwp that needs stepping over --- return 1 so
4483 that find_thread stops looking. */
4484 return true;
4488 threads_debug_printf
4489 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4490 thread->id.lwp (), paddress (pc));
4492 return false;
4495 void
4496 linux_process_target::start_step_over (lwp_info *lwp)
4498 thread_info *thread = lwp->thread;
4499 CORE_ADDR pc;
4501 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4502 thread->id.lwp ());
4504 stop_all_lwps (1, lwp);
4506 if (lwp->suspended != 0)
4508 internal_error ("LWP %ld suspended=%d\n", thread->id.lwp (),
4509 lwp->suspended);
4512 threads_debug_printf ("Done stopping all threads for step-over.");
4514 /* Note, we should always reach here with an already adjusted PC,
4515 either by GDB (if we're resuming due to GDB's request), or by our
4516 caller, if we just finished handling an internal breakpoint GDB
4517 shouldn't care about. */
4518 pc = get_pc (lwp);
4520 bool step = false;
4522 scoped_restore_current_thread restore_thread;
4523 switch_to_thread (thread);
4525 lwp->bp_reinsert = pc;
4526 uninsert_breakpoints_at (pc);
4527 uninsert_fast_tracepoint_jumps_at (pc);
4529 step = single_step (lwp);
4532 resume_one_lwp (lwp, step, 0, NULL);
4534 /* Require next event from this LWP. */
4535 step_over_bkpt = thread->id;
4538 bool
4539 linux_process_target::finish_step_over (lwp_info *lwp)
4541 if (lwp->bp_reinsert != 0)
4543 scoped_restore_current_thread restore_thread;
4545 threads_debug_printf ("Finished step over.");
4547 switch_to_thread (lwp->thread);
4549 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4550 may be no breakpoint to reinsert there by now. */
4551 reinsert_breakpoints_at (lwp->bp_reinsert);
4552 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4554 lwp->bp_reinsert = 0;
4556 /* Delete any single-step breakpoints. No longer needed. We
4557 don't have to worry about other threads hitting this trap,
4558 and later not being able to explain it, because we were
4559 stepping over a breakpoint, and we hold all threads but
4560 LWP stopped while doing that. */
4561 if (!supports_hardware_single_step ())
4563 gdb_assert (has_single_step_breakpoints (current_thread));
4564 delete_single_step_breakpoints (current_thread);
4567 step_over_bkpt = null_ptid;
4568 return true;
4570 else
4571 return false;
4574 void
4575 linux_process_target::complete_ongoing_step_over ()
4577 if (step_over_bkpt != null_ptid)
4579 struct lwp_info *lwp;
4580 int wstat;
4581 int ret;
4583 threads_debug_printf ("detach: step over in progress, finish it first");
4585 /* Passing NULL_PTID as filter indicates we want all events to
4586 be left pending. Eventually this returns when there are no
4587 unwaited-for children left. */
4588 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4589 __WALL);
4590 gdb_assert (ret == -1);
4592 lwp = find_lwp_pid (step_over_bkpt);
4593 if (lwp != NULL)
4595 finish_step_over (lwp);
4597 /* If we got our step SIGTRAP, don't leave it pending,
4598 otherwise we would report it to GDB as a spurious
4599 SIGTRAP. */
4600 gdb_assert (lwp->status_pending_p);
4601 if (WIFSTOPPED (lwp->status_pending)
4602 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4604 thread_info *thread = lwp->thread;
4605 if (thread->last_resume_kind != resume_step)
4607 threads_debug_printf ("detach: discard step-over SIGTRAP");
4609 lwp->status_pending_p = 0;
4610 lwp->status_pending = 0;
4611 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4613 else
4614 threads_debug_printf
4615 ("detach: resume_step, not discarding step-over SIGTRAP");
4618 step_over_bkpt = null_ptid;
4619 unsuspend_all_lwps (lwp);
4623 void
4624 linux_process_target::resume_one_thread (thread_info *thread,
4625 bool leave_all_stopped)
4627 struct lwp_info *lwp = get_thread_lwp (thread);
4628 int leave_pending;
4630 if (lwp->resume == NULL)
4631 return;
4633 if (lwp->resume->kind == resume_stop)
4635 threads_debug_printf ("resume_stop request for LWP %ld",
4636 thread->id.lwp ());
4638 if (!lwp->stopped)
4640 threads_debug_printf ("stopping LWP %ld", thread->id.lwp ());
4642 /* Stop the thread, and wait for the event asynchronously,
4643 through the event loop. */
4644 send_sigstop (lwp);
4646 else
4648 threads_debug_printf ("already stopped LWP %ld", thread->id.lwp ());
4650 /* The LWP may have been stopped in an internal event that
4651 was not meant to be notified back to GDB (e.g., gdbserver
4652 breakpoint), so we should be reporting a stop event in
4653 this case too. */
4655 /* If the thread already has a pending SIGSTOP, this is a
4656 no-op. Otherwise, something later will presumably resume
4657 the thread and this will cause it to cancel any pending
4658 operation, due to last_resume_kind == resume_stop. If
4659 the thread already has a pending status to report, we
4660 will still report it the next time we wait - see
4661 status_pending_p_callback. */
4663 /* If we already have a pending signal to report, then
4664 there's no need to queue a SIGSTOP, as this means we're
4665 midway through moving the LWP out of the jumppad, and we
4666 will report the pending signal as soon as that is
4667 finished. */
4668 if (lwp->pending_signals_to_report.empty ())
4669 send_sigstop (lwp);
4672 /* For stop requests, we're done. */
4673 lwp->resume = NULL;
4674 thread->last_status.set_ignore ();
4675 return;
4678 /* If this thread which is about to be resumed has a pending status,
4679 then don't resume it - we can just report the pending status.
4680 Likewise if it is suspended, because e.g., another thread is
4681 stepping past a breakpoint. Make sure to queue any signals that
4682 would otherwise be sent. In all-stop mode, we do this decision
4683 based on if *any* thread has a pending status. If there's a
4684 thread that needs the step-over-breakpoint dance, then don't
4685 resume any other thread but that particular one. */
4686 leave_pending = (lwp->suspended
4687 || lwp->status_pending_p
4688 || leave_all_stopped);
4690 /* If we have a new signal, enqueue the signal. */
4691 if (lwp->resume->sig != 0)
4693 siginfo_t info, *info_p;
4695 /* If this is the same signal we were previously stopped by,
4696 make sure to queue its siginfo. */
4697 if (WIFSTOPPED (lwp->last_status)
4698 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4699 && ptrace (PTRACE_GETSIGINFO, thread->id.lwp (),
4700 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4701 info_p = &info;
4702 else
4703 info_p = NULL;
4705 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4708 if (!leave_pending)
4710 threads_debug_printf ("resuming LWP %ld", thread->id.lwp ());
4712 proceed_one_lwp (thread, NULL);
4714 else
4715 threads_debug_printf ("leaving LWP %ld stopped", thread->id.lwp ());
4717 thread->last_status.set_ignore ();
4718 lwp->resume = NULL;
4721 void
4722 linux_process_target::resume (thread_resume *resume_info, size_t n)
4724 thread_info *need_step_over = NULL;
4726 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4728 for_each_thread ([&] (thread_info *thread)
4730 linux_set_resume_request (thread, resume_info, n);
4733 /* If there is a thread which would otherwise be resumed, which has
4734 a pending status, then don't resume any threads - we can just
4735 report the pending status. Make sure to queue any signals that
4736 would otherwise be sent. In non-stop mode, we'll apply this
4737 logic to each thread individually. We consume all pending events
4738 before considering to start a step-over (in all-stop). */
4739 bool any_pending = false;
4740 if (!non_stop)
4741 any_pending = find_thread ([this] (thread_info *thread)
4743 return resume_status_pending (thread);
4744 }) != nullptr;
4746 /* If there is a thread which would otherwise be resumed, which is
4747 stopped at a breakpoint that needs stepping over, then don't
4748 resume any threads - have it step over the breakpoint with all
4749 other threads stopped, then resume all threads again. Make sure
4750 to queue any signals that would otherwise be delivered or
4751 queued. */
4752 if (!any_pending && low_supports_breakpoints ())
4753 need_step_over = find_thread ([this] (thread_info *thread)
4755 return thread_needs_step_over (thread);
4758 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4760 if (need_step_over != NULL)
4761 threads_debug_printf ("Not resuming all, need step over");
4762 else if (any_pending)
4763 threads_debug_printf ("Not resuming, all-stop and found "
4764 "an LWP with pending status");
4765 else
4766 threads_debug_printf ("Resuming, no pending status or step over needed");
4768 /* Even if we're leaving threads stopped, queue all signals we'd
4769 otherwise deliver. */
4770 for_each_thread ([&] (thread_info *thread)
4772 resume_one_thread (thread, leave_all_stopped);
4775 if (need_step_over)
4776 start_step_over (get_thread_lwp (need_step_over));
4778 /* We may have events that were pending that can/should be sent to
4779 the client now. Trigger a linux_wait call. */
4780 if (target_is_async_p ())
4781 async_file_mark ();
4784 void
4785 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4787 struct lwp_info *lwp = get_thread_lwp (thread);
4788 int step;
4790 if (lwp == except)
4791 return;
4793 threads_debug_printf ("lwp %ld", thread->id.lwp ());
4795 if (!lwp->stopped)
4797 threads_debug_printf (" LWP %ld already running", thread->id.lwp ());
4798 return;
4801 if (thread->last_resume_kind == resume_stop
4802 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4804 threads_debug_printf (" client wants LWP to remain %ld stopped",
4805 thread->id.lwp ());
4806 return;
4809 if (lwp->status_pending_p)
4811 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4812 thread->id.lwp ());
4813 return;
4816 gdb_assert (lwp->suspended >= 0);
4818 if (lwp->suspended)
4820 threads_debug_printf (" LWP %ld is suspended", thread->id.lwp ());
4821 return;
4824 if (thread->last_resume_kind == resume_stop
4825 && lwp->pending_signals_to_report.empty ()
4826 && (lwp->collecting_fast_tracepoint
4827 == fast_tpoint_collect_result::not_collecting))
4829 /* We haven't reported this LWP as stopped yet (otherwise, the
4830 last_status.kind check above would catch it, and we wouldn't
4831 reach here. This LWP may have been momentarily paused by a
4832 stop_all_lwps call while handling for example, another LWP's
4833 step-over. In that case, the pending expected SIGSTOP signal
4834 that was queued at vCont;t handling time will have already
4835 been consumed by wait_for_sigstop, and so we need to requeue
4836 another one here. Note that if the LWP already has a SIGSTOP
4837 pending, this is a no-op. */
4839 threads_debug_printf
4840 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4841 thread->id.lwp ());
4843 send_sigstop (lwp);
4846 if (thread->last_resume_kind == resume_step)
4848 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4849 thread->id.lwp ());
4851 /* If resume_step is requested by GDB, install single-step
4852 breakpoints when the thread is about to be actually resumed if
4853 the single-step breakpoints weren't removed. */
4854 if (supports_software_single_step ()
4855 && !has_single_step_breakpoints (thread))
4856 install_software_single_step_breakpoints (lwp);
4858 step = maybe_hw_step (thread);
4860 else if (lwp->bp_reinsert != 0)
4862 threads_debug_printf (" stepping LWP %ld, reinsert set",
4863 thread->id.lwp ());
4865 step = maybe_hw_step (thread);
4867 else
4868 step = 0;
4870 resume_one_lwp (lwp, step, 0, NULL);
4873 void
4874 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4875 lwp_info *except)
4877 struct lwp_info *lwp = get_thread_lwp (thread);
4879 if (lwp == except)
4880 return;
4882 lwp_suspended_decr (lwp);
4884 proceed_one_lwp (thread, except);
4887 void
4888 linux_process_target::proceed_all_lwps ()
4890 thread_info *need_step_over;
4892 /* If there is a thread which would otherwise be resumed, which is
4893 stopped at a breakpoint that needs stepping over, then don't
4894 resume any threads - have it step over the breakpoint with all
4895 other threads stopped, then resume all threads again. */
4897 if (low_supports_breakpoints ())
4899 need_step_over = find_thread ([this] (thread_info *thread)
4901 return thread_needs_step_over (thread);
4904 if (need_step_over != NULL)
4906 threads_debug_printf ("found thread %ld needing a step-over",
4907 need_step_over->id.lwp ());
4909 start_step_over (get_thread_lwp (need_step_over));
4910 return;
4914 threads_debug_printf ("Proceeding, no step-over needed");
4916 for_each_thread ([this] (thread_info *thread)
4918 proceed_one_lwp (thread, NULL);
4922 void
4923 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4925 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4927 if (except)
4928 threads_debug_printf ("except=(LWP %ld)", except->thread->id.lwp ());
4929 else
4930 threads_debug_printf ("except=nullptr");
4932 if (unsuspend)
4933 for_each_thread ([&] (thread_info *thread)
4935 unsuspend_and_proceed_one_lwp (thread, except);
4937 else
4938 for_each_thread ([&] (thread_info *thread)
4940 proceed_one_lwp (thread, except);
4945 #ifdef HAVE_LINUX_REGSETS
4947 #define use_linux_regsets 1
4949 /* Returns true if REGSET has been disabled. */
4951 static int
4952 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4954 return (info->disabled_regsets != NULL
4955 && info->disabled_regsets[regset - info->regsets]);
4958 /* Disable REGSET. */
4960 static void
4961 disable_regset (struct regsets_info *info, struct regset_info *regset)
4963 int dr_offset;
4965 dr_offset = regset - info->regsets;
4966 if (info->disabled_regsets == NULL)
4967 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4968 info->disabled_regsets[dr_offset] = 1;
4971 static int
4972 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4973 struct regcache *regcache)
4975 struct regset_info *regset;
4976 int saw_general_regs = 0;
4977 int pid = current_thread->id.lwp ();
4978 struct iovec iov;
4980 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4982 void *buf, *data;
4983 int nt_type, res;
4985 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4986 continue;
4988 buf = xmalloc (regset->size);
4990 nt_type = regset->nt_type;
4991 if (nt_type)
4993 iov.iov_base = buf;
4994 iov.iov_len = regset->size;
4995 data = (void *) &iov;
4997 else
4998 data = buf;
5000 #ifndef __sparc__
5001 res = ptrace (regset->get_request, pid,
5002 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5003 #else
5004 res = ptrace (regset->get_request, pid, data, nt_type);
5005 #endif
5006 if (res < 0)
5008 if (errno == EIO
5009 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5011 /* If we get EIO on a regset, or an EINVAL and the regset is
5012 optional, do not try it again for this process mode. */
5013 disable_regset (regsets_info, regset);
5015 else if (errno == ENODATA)
5017 /* ENODATA may be returned if the regset is currently
5018 not "active". This can happen in normal operation,
5019 so suppress the warning in this case. */
5021 else if (errno == ESRCH)
5023 /* At this point, ESRCH should mean the process is
5024 already gone, in which case we simply ignore attempts
5025 to read its registers. */
5027 else
5029 char s[256];
5030 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5031 pid);
5032 perror (s);
5035 else
5037 if (regset->type == GENERAL_REGS)
5038 saw_general_regs = 1;
5039 regset->store_function (regcache, buf);
5041 free (buf);
5043 if (saw_general_regs)
5044 return 0;
5045 else
5046 return 1;
5049 static int
5050 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5051 struct regcache *regcache)
5053 struct regset_info *regset;
5054 int saw_general_regs = 0;
5055 int pid = current_thread->id.lwp ();
5056 struct iovec iov;
5058 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5060 void *buf, *data;
5061 int nt_type, res;
5063 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5064 || regset->fill_function == NULL)
5065 continue;
5067 buf = xmalloc (regset->size);
5069 /* First fill the buffer with the current register set contents,
5070 in case there are any items in the kernel's regset that are
5071 not in gdbserver's regcache. */
5073 nt_type = regset->nt_type;
5074 if (nt_type)
5076 iov.iov_base = buf;
5077 iov.iov_len = regset->size;
5078 data = (void *) &iov;
5080 else
5081 data = buf;
5083 #ifndef __sparc__
5084 res = ptrace (regset->get_request, pid,
5085 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5086 #else
5087 res = ptrace (regset->get_request, pid, data, nt_type);
5088 #endif
5090 if (res == 0)
5092 /* Then overlay our cached registers on that. */
5093 regset->fill_function (regcache, buf);
5095 /* Only now do we write the register set. */
5096 #ifndef __sparc__
5097 res = ptrace (regset->set_request, pid,
5098 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5099 #else
5100 res = ptrace (regset->set_request, pid, data, nt_type);
5101 #endif
5104 if (res < 0)
5106 if (errno == EIO
5107 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5109 /* If we get EIO on a regset, or an EINVAL and the regset is
5110 optional, do not try it again for this process mode. */
5111 disable_regset (regsets_info, regset);
5113 else if (errno == ESRCH)
5115 /* At this point, ESRCH should mean the process is
5116 already gone, in which case we simply ignore attempts
5117 to change its registers. See also the related
5118 comment in resume_one_lwp. */
5119 free (buf);
5120 return 0;
5122 else
5124 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5127 else if (regset->type == GENERAL_REGS)
5128 saw_general_regs = 1;
5129 free (buf);
5131 if (saw_general_regs)
5132 return 0;
5133 else
5134 return 1;
5137 #else /* !HAVE_LINUX_REGSETS */
5139 #define use_linux_regsets 0
5140 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5141 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5143 #endif
5145 /* Return 1 if register REGNO is supported by one of the regset ptrace
5146 calls or 0 if it has to be transferred individually. */
5148 static int
5149 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5151 unsigned char mask = 1 << (regno % 8);
5152 size_t index = regno / 8;
5154 return (use_linux_regsets
5155 && (regs_info->regset_bitmap == NULL
5156 || (regs_info->regset_bitmap[index] & mask) != 0));
5159 #ifdef HAVE_LINUX_USRREGS
5161 static int
5162 register_addr (const struct usrregs_info *usrregs, int regnum)
5164 int addr;
5166 if (regnum < 0 || regnum >= usrregs->num_regs)
5167 error ("Invalid register number %d.", regnum);
5169 addr = usrregs->regmap[regnum];
5171 return addr;
5175 void
5176 linux_process_target::fetch_register (const usrregs_info *usrregs,
5177 regcache *regcache, int regno)
5179 CORE_ADDR regaddr;
5180 int i, size;
5181 char *buf;
5183 if (regno >= usrregs->num_regs)
5184 return;
5185 if (low_cannot_fetch_register (regno))
5186 return;
5188 regaddr = register_addr (usrregs, regno);
5189 if (regaddr == -1)
5190 return;
5192 size = ((register_size (regcache->tdesc, regno)
5193 + sizeof (PTRACE_XFER_TYPE) - 1)
5194 & -sizeof (PTRACE_XFER_TYPE));
5195 buf = (char *) alloca (size);
5197 int pid = current_thread->id.lwp ();
5199 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5201 errno = 0;
5202 *(PTRACE_XFER_TYPE *) (buf + i) =
5203 ptrace (PTRACE_PEEKUSER, pid,
5204 /* Coerce to a uintptr_t first to avoid potential gcc warning
5205 of coercing an 8 byte integer to a 4 byte pointer. */
5206 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5207 regaddr += sizeof (PTRACE_XFER_TYPE);
5208 if (errno != 0)
5210 /* Mark register REGNO unavailable. */
5211 supply_register (regcache, regno, NULL);
5212 return;
5216 low_supply_ptrace_register (regcache, regno, buf);
5219 void
5220 linux_process_target::store_register (const usrregs_info *usrregs,
5221 regcache *regcache, int regno)
5223 CORE_ADDR regaddr;
5224 int i, size;
5225 char *buf;
5227 if (regno >= usrregs->num_regs)
5228 return;
5229 if (low_cannot_store_register (regno))
5230 return;
5232 regaddr = register_addr (usrregs, regno);
5233 if (regaddr == -1)
5234 return;
5236 size = ((register_size (regcache->tdesc, regno)
5237 + sizeof (PTRACE_XFER_TYPE) - 1)
5238 & -sizeof (PTRACE_XFER_TYPE));
5239 buf = (char *) alloca (size);
5240 memset (buf, 0, size);
5242 low_collect_ptrace_register (regcache, regno, buf);
5244 int pid = current_thread->id.lwp ();
5246 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5248 errno = 0;
5249 ptrace (PTRACE_POKEUSER, pid,
5250 /* Coerce to a uintptr_t first to avoid potential gcc warning
5251 about coercing an 8 byte integer to a 4 byte pointer. */
5252 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5253 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5254 if (errno != 0)
5256 /* At this point, ESRCH should mean the process is
5257 already gone, in which case we simply ignore attempts
5258 to change its registers. See also the related
5259 comment in resume_one_lwp. */
5260 if (errno == ESRCH)
5261 return;
5264 if (!low_cannot_store_register (regno))
5265 error ("writing register %d: %s", regno, safe_strerror (errno));
5267 regaddr += sizeof (PTRACE_XFER_TYPE);
5270 #endif /* HAVE_LINUX_USRREGS */
5272 void
5273 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5274 int regno, char *buf)
5276 collect_register (regcache, regno, buf);
5279 void
5280 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5281 int regno, const char *buf)
5283 supply_register (regcache, regno, buf);
5286 void
5287 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5288 regcache *regcache,
5289 int regno, int all)
5291 #ifdef HAVE_LINUX_USRREGS
5292 struct usrregs_info *usr = regs_info->usrregs;
5294 if (regno == -1)
5296 for (regno = 0; regno < usr->num_regs; regno++)
5297 if (all || !linux_register_in_regsets (regs_info, regno))
5298 fetch_register (usr, regcache, regno);
5300 else
5301 fetch_register (usr, regcache, regno);
5302 #endif
5305 void
5306 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5307 regcache *regcache,
5308 int regno, int all)
5310 #ifdef HAVE_LINUX_USRREGS
5311 struct usrregs_info *usr = regs_info->usrregs;
5313 if (regno == -1)
5315 for (regno = 0; regno < usr->num_regs; regno++)
5316 if (all || !linux_register_in_regsets (regs_info, regno))
5317 store_register (usr, regcache, regno);
5319 else
5320 store_register (usr, regcache, regno);
5321 #endif
5324 void
5325 linux_process_target::fetch_registers (regcache *regcache, int regno)
5327 int use_regsets;
5328 int all = 0;
5329 const regs_info *regs_info = get_regs_info ();
5331 if (regno == -1)
5333 if (regs_info->usrregs != NULL)
5334 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5335 low_fetch_register (regcache, regno);
5337 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5338 if (regs_info->usrregs != NULL)
5339 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5341 else
5343 if (low_fetch_register (regcache, regno))
5344 return;
5346 use_regsets = linux_register_in_regsets (regs_info, regno);
5347 if (use_regsets)
5348 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5349 regcache);
5350 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5351 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5355 void
5356 linux_process_target::store_registers (regcache *regcache, int regno)
5358 int use_regsets;
5359 int all = 0;
5360 const regs_info *regs_info = get_regs_info ();
5362 if (regno == -1)
5364 all = regsets_store_inferior_registers (regs_info->regsets_info,
5365 regcache);
5366 if (regs_info->usrregs != NULL)
5367 usr_store_inferior_registers (regs_info, regcache, regno, all);
5369 else
5371 use_regsets = linux_register_in_regsets (regs_info, regno);
5372 if (use_regsets)
5373 all = regsets_store_inferior_registers (regs_info->regsets_info,
5374 regcache);
5375 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5376 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5380 bool
5381 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5383 return false;
5386 /* A wrapper for the read_memory target op. */
5388 static int
5389 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5391 return the_target->read_memory (memaddr, myaddr, len);
5395 /* Helper for read_memory/write_memory using /proc/PID/mem. Because
5396 we can use a single read/write call, this can be much more
5397 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5398 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5399 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5400 not null, then we're reading, otherwise we're writing. */
5402 static int
5403 proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5404 const gdb_byte *writebuf, int len)
5406 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
5408 process_info *proc = current_process ();
5410 int fd = proc->priv->mem_fd;
5411 if (fd == -1)
5412 return EIO;
5414 while (len > 0)
5416 int bytes;
5418 /* Use pread64/pwrite64 if available, since they save a syscall
5419 and can handle 64-bit offsets even on 32-bit platforms (for
5420 instance, SPARC debugging a SPARC64 application). But only
5421 use them if the offset isn't so high that when cast to off_t
5422 it'd be negative, as seen on SPARC64. pread64/pwrite64
5423 outright reject such offsets. lseek does not. */
5424 #ifdef HAVE_PREAD64
5425 if ((off_t) memaddr >= 0)
5426 bytes = (readbuf != nullptr
5427 ? pread64 (fd, readbuf, len, memaddr)
5428 : pwrite64 (fd, writebuf, len, memaddr));
5429 else
5430 #endif
5432 bytes = -1;
5433 if (lseek (fd, memaddr, SEEK_SET) != -1)
5434 bytes = (readbuf != nullptr
5435 ? read (fd, readbuf, len)
5436 : write (fd, writebuf, len));
5439 if (bytes < 0)
5440 return errno;
5441 else if (bytes == 0)
5443 /* EOF means the address space is gone, the whole process
5444 exited or execed. */
5445 return EIO;
5448 memaddr += bytes;
5449 if (readbuf != nullptr)
5450 readbuf += bytes;
5451 else
5452 writebuf += bytes;
5453 len -= bytes;
5456 return 0;
5460 linux_process_target::read_memory (CORE_ADDR memaddr,
5461 unsigned char *myaddr, int len)
5463 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
5466 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5467 memory at MEMADDR. On failure (cannot write to the inferior)
5468 returns the value of errno. Always succeeds if LEN is zero. */
5471 linux_process_target::write_memory (CORE_ADDR memaddr,
5472 const unsigned char *myaddr, int len)
5474 if (debug_threads)
5476 /* Dump up to four bytes. */
5477 char str[4 * 2 + 1];
5478 char *p = str;
5479 int dump = len < 4 ? len : 4;
5481 for (int i = 0; i < dump; i++)
5483 sprintf (p, "%02x", myaddr[i]);
5484 p += 2;
5486 *p = '\0';
5488 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5489 str, (long) memaddr, current_process ()->pid);
5492 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
5495 void
5496 linux_process_target::look_up_symbols ()
5498 #ifdef USE_THREAD_DB
5499 struct process_info *proc = current_process ();
5501 if (proc->priv->thread_db != NULL)
5502 return;
5504 thread_db_init ();
5505 #endif
5508 void
5509 linux_process_target::request_interrupt ()
5511 /* Send a SIGINT to the process group. This acts just like the user
5512 typed a ^C on the controlling terminal. */
5513 int res = ::kill (-signal_pid, SIGINT);
5514 if (res == -1)
5515 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5516 signal_pid, safe_strerror (errno));
5519 bool
5520 linux_process_target::supports_read_auxv ()
5522 return true;
5525 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5526 to debugger memory starting at MYADDR. */
5529 linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5530 unsigned char *myaddr, unsigned int len)
5532 char filename[PATH_MAX];
5533 int fd, n;
5535 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5537 fd = open (filename, O_RDONLY);
5538 if (fd < 0)
5539 return -1;
5541 if (offset != (CORE_ADDR) 0
5542 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5543 n = -1;
5544 else
5545 n = read (fd, myaddr, len);
5547 close (fd);
5549 return n;
5553 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5554 int size, raw_breakpoint *bp)
5556 if (type == raw_bkpt_type_sw)
5557 return insert_memory_breakpoint (bp);
5558 else
5559 return low_insert_point (type, addr, size, bp);
5563 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5564 int size, raw_breakpoint *bp)
5566 /* Unsupported (see target.h). */
5567 return 1;
5571 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5572 int size, raw_breakpoint *bp)
5574 if (type == raw_bkpt_type_sw)
5575 return remove_memory_breakpoint (bp);
5576 else
5577 return low_remove_point (type, addr, size, bp);
5581 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5582 int size, raw_breakpoint *bp)
5584 /* Unsupported (see target.h). */
5585 return 1;
5588 /* Implement the stopped_by_sw_breakpoint target_ops
5589 method. */
5591 bool
5592 linux_process_target::stopped_by_sw_breakpoint ()
5594 struct lwp_info *lwp = get_thread_lwp (current_thread);
5596 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5599 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5600 method. */
5602 bool
5603 linux_process_target::supports_stopped_by_sw_breakpoint ()
5605 return true;
5608 /* Implement the stopped_by_hw_breakpoint target_ops
5609 method. */
5611 bool
5612 linux_process_target::stopped_by_hw_breakpoint ()
5614 struct lwp_info *lwp = get_thread_lwp (current_thread);
5616 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5619 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5620 method. */
5622 bool
5623 linux_process_target::supports_stopped_by_hw_breakpoint ()
5625 return true;
5628 /* Implement the supports_hardware_single_step target_ops method. */
5630 bool
5631 linux_process_target::supports_hardware_single_step ()
5633 return true;
5636 bool
5637 linux_process_target::stopped_by_watchpoint ()
5639 struct lwp_info *lwp = get_thread_lwp (current_thread);
5641 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5644 CORE_ADDR
5645 linux_process_target::stopped_data_address ()
5647 struct lwp_info *lwp = get_thread_lwp (current_thread);
5649 return lwp->stopped_data_address;
5652 /* This is only used for targets that define PT_TEXT_ADDR,
5653 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5654 the target has different ways of acquiring this information, like
5655 loadmaps. */
5657 bool
5658 linux_process_target::supports_read_offsets ()
5660 #ifdef SUPPORTS_READ_OFFSETS
5661 return true;
5662 #else
5663 return false;
5664 #endif
5667 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5668 to tell gdb about. */
5671 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5673 #ifdef SUPPORTS_READ_OFFSETS
5674 unsigned long text, text_end, data;
5675 int pid = current_thread->id.lwp ();
5677 errno = 0;
5679 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5680 (PTRACE_TYPE_ARG4) 0);
5681 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5682 (PTRACE_TYPE_ARG4) 0);
5683 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5684 (PTRACE_TYPE_ARG4) 0);
5686 if (errno == 0)
5688 /* Both text and data offsets produced at compile-time (and so
5689 used by gdb) are relative to the beginning of the program,
5690 with the data segment immediately following the text segment.
5691 However, the actual runtime layout in memory may put the data
5692 somewhere else, so when we send gdb a data base-address, we
5693 use the real data base address and subtract the compile-time
5694 data base-address from it (which is just the length of the
5695 text segment). BSS immediately follows data in both
5696 cases. */
5697 *text_p = text;
5698 *data_p = data - (text_end - text);
5700 return 1;
5702 return 0;
5703 #else
5704 gdb_assert_not_reached ("target op read_offsets not supported");
5705 #endif
5708 bool
5709 linux_process_target::supports_get_tls_address ()
5711 #ifdef USE_THREAD_DB
5712 return true;
5713 #else
5714 return false;
5715 #endif
5719 linux_process_target::get_tls_address (thread_info *thread,
5720 CORE_ADDR offset,
5721 CORE_ADDR load_module,
5722 CORE_ADDR *address)
5724 #ifdef USE_THREAD_DB
5725 return thread_db_get_tls_address (thread, offset, load_module, address);
5726 #else
5727 return -1;
5728 #endif
5731 bool
5732 linux_process_target::supports_qxfer_osdata ()
5734 return true;
5738 linux_process_target::qxfer_osdata (const char *annex,
5739 unsigned char *readbuf,
5740 unsigned const char *writebuf,
5741 CORE_ADDR offset, int len)
5743 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5746 void
5747 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5748 gdb_byte *inf_siginfo, int direction)
5750 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5752 /* If there was no callback, or the callback didn't do anything,
5753 then just do a straight memcpy. */
5754 if (!done)
5756 if (direction == 1)
5757 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5758 else
5759 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5763 bool
5764 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5765 int direction)
5767 return false;
5770 bool
5771 linux_process_target::supports_qxfer_siginfo ()
5773 return true;
5777 linux_process_target::qxfer_siginfo (const char *annex,
5778 unsigned char *readbuf,
5779 unsigned const char *writebuf,
5780 CORE_ADDR offset, int len)
5782 siginfo_t siginfo;
5783 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5785 if (current_thread == NULL)
5786 return -1;
5788 int pid = current_thread->id.lwp ();
5790 threads_debug_printf ("%s siginfo for lwp %d.",
5791 readbuf != NULL ? "Reading" : "Writing",
5792 pid);
5794 if (offset >= sizeof (siginfo))
5795 return -1;
5797 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5798 return -1;
5800 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5801 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5802 inferior with a 64-bit GDBSERVER should look the same as debugging it
5803 with a 32-bit GDBSERVER, we need to convert it. */
5804 siginfo_fixup (&siginfo, inf_siginfo, 0);
5806 if (offset + len > sizeof (siginfo))
5807 len = sizeof (siginfo) - offset;
5809 if (readbuf != NULL)
5810 memcpy (readbuf, inf_siginfo + offset, len);
5811 else
5813 memcpy (inf_siginfo + offset, writebuf, len);
5815 /* Convert back to ptrace layout before flushing it out. */
5816 siginfo_fixup (&siginfo, inf_siginfo, 1);
5818 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5819 return -1;
5822 return len;
5825 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5826 so we notice when children change state; as the handler for the
5827 sigsuspend in my_waitpid. */
5829 static void
5830 sigchld_handler (int signo)
5832 int old_errno = errno;
5834 if (debug_threads)
5838 /* Use the async signal safe debug function. */
5839 if (debug_write ("sigchld_handler\n",
5840 sizeof ("sigchld_handler\n") - 1) < 0)
5841 break; /* just ignore */
5842 } while (0);
5845 if (target_is_async_p ())
5846 async_file_mark (); /* trigger a linux_wait */
5848 errno = old_errno;
5851 bool
5852 linux_process_target::supports_non_stop ()
5854 return true;
5857 bool
5858 linux_process_target::async (bool enable)
5860 bool previous = target_is_async_p ();
5862 threads_debug_printf ("async (%d), previous=%d",
5863 enable, previous);
5865 if (previous != enable)
5867 sigset_t mask;
5868 sigemptyset (&mask);
5869 sigaddset (&mask, SIGCHLD);
5871 gdb_sigmask (SIG_BLOCK, &mask, NULL);
5873 if (enable)
5875 if (!linux_event_pipe.open_pipe ())
5877 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5879 warning ("creating event pipe failed.");
5880 return previous;
5883 /* Register the event loop handler. */
5884 add_file_handler (linux_event_pipe.event_fd (),
5885 handle_target_event, NULL,
5886 "linux-low");
5888 /* Always trigger a linux_wait. */
5889 async_file_mark ();
5891 else
5893 delete_file_handler (linux_event_pipe.event_fd ());
5895 linux_event_pipe.close_pipe ();
5898 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5901 return previous;
5905 linux_process_target::start_non_stop (bool nonstop)
5907 /* Register or unregister from event-loop accordingly. */
5908 target_async (nonstop);
5910 if (target_is_async_p () != (nonstop != false))
5911 return -1;
5913 return 0;
5916 bool
5917 linux_process_target::supports_multi_process ()
5919 return true;
5922 /* Check if fork events are supported. */
5924 bool
5925 linux_process_target::supports_fork_events ()
5927 return true;
5930 /* Check if vfork events are supported. */
5932 bool
5933 linux_process_target::supports_vfork_events ()
5935 return true;
5938 /* Return the set of supported thread options. */
5940 gdb_thread_options
5941 linux_process_target::supported_thread_options ()
5943 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
5946 /* Check if exec events are supported. */
5948 bool
5949 linux_process_target::supports_exec_events ()
5951 return true;
5954 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5955 ptrace flags for all inferiors. This is in case the new GDB connection
5956 doesn't support the same set of events that the previous one did. */
5958 void
5959 linux_process_target::handle_new_gdb_connection ()
5961 /* Request that all the lwps reset their ptrace options. */
5962 for_each_thread ([] (thread_info *thread)
5964 struct lwp_info *lwp = get_thread_lwp (thread);
5966 if (!lwp->stopped)
5968 /* Stop the lwp so we can modify its ptrace options. */
5969 lwp->must_set_ptrace_flags = 1;
5970 linux_stop_lwp (lwp);
5972 else
5974 /* Already stopped; go ahead and set the ptrace options. */
5975 process_info *proc = find_process_pid (thread->id.pid ());
5976 int options = linux_low_ptrace_options (proc->attached);
5978 linux_enable_event_reporting (thread->id.lwp (), options);
5979 lwp->must_set_ptrace_flags = 0;
5985 linux_process_target::handle_monitor_command (char *mon)
5987 #ifdef USE_THREAD_DB
5988 return thread_db_handle_monitor_command (mon);
5989 #else
5990 return 0;
5991 #endif
5995 linux_process_target::core_of_thread (ptid_t ptid)
5997 return linux_common_core_of_thread (ptid);
6000 bool
6001 linux_process_target::supports_disable_randomization ()
6003 return true;
6006 bool
6007 linux_process_target::supports_agent ()
6009 return true;
6012 bool
6013 linux_process_target::supports_range_stepping ()
6015 if (supports_software_single_step ())
6016 return true;
6018 return low_supports_range_stepping ();
6021 bool
6022 linux_process_target::low_supports_range_stepping ()
6024 return false;
6027 bool
6028 linux_process_target::supports_pid_to_exec_file ()
6030 return true;
6033 const char *
6034 linux_process_target::pid_to_exec_file (int pid)
6036 return linux_proc_pid_to_exec_file (pid);
6039 bool
6040 linux_process_target::supports_multifs ()
6042 return true;
6046 linux_process_target::multifs_open (int pid, const char *filename,
6047 int flags, mode_t mode)
6049 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6053 linux_process_target::multifs_unlink (int pid, const char *filename)
6055 return linux_mntns_unlink (pid, filename);
6058 ssize_t
6059 linux_process_target::multifs_readlink (int pid, const char *filename,
6060 char *buf, size_t bufsiz)
6062 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6065 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6066 struct target_loadseg
6068 /* Core address to which the segment is mapped. */
6069 Elf32_Addr addr;
6070 /* VMA recorded in the program header. */
6071 Elf32_Addr p_vaddr;
6072 /* Size of this segment in memory. */
6073 Elf32_Word p_memsz;
6076 # if defined PT_GETDSBT
6077 struct target_loadmap
6079 /* Protocol version number, must be zero. */
6080 Elf32_Word version;
6081 /* Pointer to the DSBT table, its size, and the DSBT index. */
6082 unsigned *dsbt_table;
6083 unsigned dsbt_size, dsbt_index;
6084 /* Number of segments in this map. */
6085 Elf32_Word nsegs;
6086 /* The actual memory map. */
6087 struct target_loadseg segs[/*nsegs*/];
6089 # define LINUX_LOADMAP PT_GETDSBT
6090 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6091 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6092 # else
6093 struct target_loadmap
6095 /* Protocol version number, must be zero. */
6096 Elf32_Half version;
6097 /* Number of segments in this map. */
6098 Elf32_Half nsegs;
6099 /* The actual memory map. */
6100 struct target_loadseg segs[/*nsegs*/];
6102 # define LINUX_LOADMAP PTRACE_GETFDPIC
6103 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6104 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6105 # endif
6107 bool
6108 linux_process_target::supports_read_loadmap ()
6110 return true;
6114 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6115 unsigned char *myaddr, unsigned int len)
6117 int pid = current_thread->id.lwp ();
6118 int addr = -1;
6119 struct target_loadmap *data = NULL;
6120 unsigned int actual_length, copy_length;
6122 if (strcmp (annex, "exec") == 0)
6123 addr = (int) LINUX_LOADMAP_EXEC;
6124 else if (strcmp (annex, "interp") == 0)
6125 addr = (int) LINUX_LOADMAP_INTERP;
6126 else
6127 return -1;
6129 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6130 return -1;
6132 if (data == NULL)
6133 return -1;
6135 actual_length = sizeof (struct target_loadmap)
6136 + sizeof (struct target_loadseg) * data->nsegs;
6138 if (offset < 0 || offset > actual_length)
6139 return -1;
6141 copy_length = actual_length - offset < len ? actual_length - offset : len;
6142 memcpy (myaddr, (char *) data + offset, copy_length);
6143 return copy_length;
6145 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6147 bool
6148 linux_process_target::supports_catch_syscall ()
6150 return low_supports_catch_syscall ();
6153 bool
6154 linux_process_target::low_supports_catch_syscall ()
6156 return false;
6159 CORE_ADDR
6160 linux_process_target::read_pc (regcache *regcache)
6162 if (!low_supports_breakpoints ())
6163 return 0;
6165 return low_get_pc (regcache);
6168 void
6169 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6171 gdb_assert (low_supports_breakpoints ());
6173 low_set_pc (regcache, pc);
6176 bool
6177 linux_process_target::supports_thread_stopped ()
6179 return true;
6182 bool
6183 linux_process_target::thread_stopped (thread_info *thread)
6185 return get_thread_lwp (thread)->stopped;
6188 bool
6189 linux_process_target::any_resumed ()
6191 bool any_resumed;
6193 auto status_pending_p_any = [&] (thread_info *thread)
6195 return status_pending_p_callback (thread, minus_one_ptid);
6198 auto not_stopped = [&] (thread_info *thread)
6200 return not_stopped_callback (thread, minus_one_ptid);
6203 /* Find a resumed LWP, if any. */
6204 if (find_thread (status_pending_p_any) != NULL)
6205 any_resumed = 1;
6206 else if (find_thread (not_stopped) != NULL)
6207 any_resumed = 1;
6208 else
6209 any_resumed = 0;
6211 return any_resumed;
6214 /* This exposes stop-all-threads functionality to other modules. */
6216 void
6217 linux_process_target::pause_all (bool freeze)
6219 stop_all_lwps (freeze, NULL);
6222 /* This exposes unstop-all-threads functionality to other gdbserver
6223 modules. */
6225 void
6226 linux_process_target::unpause_all (bool unfreeze)
6228 unstop_all_lwps (unfreeze, NULL);
6231 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6233 static int
6234 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6235 CORE_ADDR *phdr_memaddr, int *num_phdr)
6237 char filename[PATH_MAX];
6238 int fd;
6239 const int auxv_size = is_elf64
6240 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6241 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6243 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6245 fd = open (filename, O_RDONLY);
6246 if (fd < 0)
6247 return 1;
6249 *phdr_memaddr = 0;
6250 *num_phdr = 0;
6251 while (read (fd, buf, auxv_size) == auxv_size
6252 && (*phdr_memaddr == 0 || *num_phdr == 0))
6254 if (is_elf64)
6256 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6258 switch (aux->a_type)
6260 case AT_PHDR:
6261 *phdr_memaddr = aux->a_un.a_val;
6262 break;
6263 case AT_PHNUM:
6264 *num_phdr = aux->a_un.a_val;
6265 break;
6268 else
6270 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6272 switch (aux->a_type)
6274 case AT_PHDR:
6275 *phdr_memaddr = aux->a_un.a_val;
6276 break;
6277 case AT_PHNUM:
6278 *num_phdr = aux->a_un.a_val;
6279 break;
6284 close (fd);
6286 if (*phdr_memaddr == 0 || *num_phdr == 0)
6288 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6289 "phdr_memaddr = %ld, phdr_num = %d",
6290 (long) *phdr_memaddr, *num_phdr);
6291 return 2;
6294 return 0;
6297 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6299 static CORE_ADDR
6300 get_dynamic (const int pid, const int is_elf64)
6302 CORE_ADDR phdr_memaddr, relocation;
6303 int num_phdr, i;
6304 unsigned char *phdr_buf;
6305 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6307 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6308 return 0;
6310 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6311 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6313 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6314 return 0;
6316 /* Compute relocation: it is expected to be 0 for "regular" executables,
6317 non-zero for PIE ones. */
6318 relocation = -1;
6319 for (i = 0; relocation == -1 && i < num_phdr; i++)
6320 if (is_elf64)
6322 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6324 if (p->p_type == PT_PHDR)
6325 relocation = phdr_memaddr - p->p_vaddr;
6327 else
6329 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6331 if (p->p_type == PT_PHDR)
6332 relocation = phdr_memaddr - p->p_vaddr;
6335 if (relocation == -1)
6337 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6338 any real world executables, including PIE executables, have always
6339 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6340 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6341 or present DT_DEBUG anyway (fpc binaries are statically linked).
6343 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6345 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6347 return 0;
6350 for (i = 0; i < num_phdr; i++)
6352 if (is_elf64)
6354 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6356 if (p->p_type == PT_DYNAMIC)
6357 return p->p_vaddr + relocation;
6359 else
6361 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6363 if (p->p_type == PT_DYNAMIC)
6364 return p->p_vaddr + relocation;
6368 return 0;
6371 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6372 can be 0 if the inferior does not yet have the library list initialized.
6373 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6374 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6376 static CORE_ADDR
6377 get_r_debug (const int pid, const int is_elf64)
6379 CORE_ADDR dynamic_memaddr;
6380 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6381 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6382 CORE_ADDR map = -1;
6384 dynamic_memaddr = get_dynamic (pid, is_elf64);
6385 if (dynamic_memaddr == 0)
6386 return map;
6388 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6390 if (is_elf64)
6392 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6393 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6394 union
6396 Elf64_Xword map;
6397 unsigned char buf[sizeof (Elf64_Xword)];
6399 rld_map;
6400 #endif
6401 #ifdef DT_MIPS_RLD_MAP
6402 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6404 if (linux_read_memory (dyn->d_un.d_val,
6405 rld_map.buf, sizeof (rld_map.buf)) == 0)
6406 return rld_map.map;
6407 else
6408 break;
6410 #endif /* DT_MIPS_RLD_MAP */
6411 #ifdef DT_MIPS_RLD_MAP_REL
6412 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6414 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6415 rld_map.buf, sizeof (rld_map.buf)) == 0)
6416 return rld_map.map;
6417 else
6418 break;
6420 #endif /* DT_MIPS_RLD_MAP_REL */
6422 if (dyn->d_tag == DT_DEBUG && map == -1)
6423 map = dyn->d_un.d_val;
6425 if (dyn->d_tag == DT_NULL)
6426 break;
6428 else
6430 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6431 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6432 union
6434 Elf32_Word map;
6435 unsigned char buf[sizeof (Elf32_Word)];
6437 rld_map;
6438 #endif
6439 #ifdef DT_MIPS_RLD_MAP
6440 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6442 if (linux_read_memory (dyn->d_un.d_val,
6443 rld_map.buf, sizeof (rld_map.buf)) == 0)
6444 return rld_map.map;
6445 else
6446 break;
6448 #endif /* DT_MIPS_RLD_MAP */
6449 #ifdef DT_MIPS_RLD_MAP_REL
6450 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6452 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6453 rld_map.buf, sizeof (rld_map.buf)) == 0)
6454 return rld_map.map;
6455 else
6456 break;
6458 #endif /* DT_MIPS_RLD_MAP_REL */
6460 if (dyn->d_tag == DT_DEBUG && map == -1)
6461 map = dyn->d_un.d_val;
6463 if (dyn->d_tag == DT_NULL)
6464 break;
6467 dynamic_memaddr += dyn_size;
6470 return map;
6473 /* Read one pointer from MEMADDR in the inferior. */
6475 static int
6476 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6478 int ret;
6480 /* Go through a union so this works on either big or little endian
6481 hosts, when the inferior's pointer size is smaller than the size
6482 of CORE_ADDR. It is assumed the inferior's endianness is the
6483 same of the superior's. */
6484 union
6486 CORE_ADDR core_addr;
6487 unsigned int ui;
6488 unsigned char uc;
6489 } addr;
6491 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6492 if (ret == 0)
6494 if (ptr_size == sizeof (CORE_ADDR))
6495 *ptr = addr.core_addr;
6496 else if (ptr_size == sizeof (unsigned int))
6497 *ptr = addr.ui;
6498 else
6499 gdb_assert_not_reached ("unhandled pointer size");
6501 return ret;
6504 bool
6505 linux_process_target::supports_qxfer_libraries_svr4 ()
6507 return true;
6510 struct link_map_offsets
6512 /* Offset and size of r_debug.r_version. */
6513 int r_version_offset;
6515 /* Offset and size of r_debug.r_map. */
6516 int r_map_offset;
6518 /* Offset of r_debug_extended.r_next. */
6519 int r_next_offset;
6521 /* Offset to l_addr field in struct link_map. */
6522 int l_addr_offset;
6524 /* Offset to l_name field in struct link_map. */
6525 int l_name_offset;
6527 /* Offset to l_ld field in struct link_map. */
6528 int l_ld_offset;
6530 /* Offset to l_next field in struct link_map. */
6531 int l_next_offset;
6533 /* Offset to l_prev field in struct link_map. */
6534 int l_prev_offset;
6537 static const link_map_offsets lmo_32bit_offsets =
6539 0, /* r_version offset. */
6540 4, /* r_debug.r_map offset. */
6541 20, /* r_debug_extended.r_next. */
6542 0, /* l_addr offset in link_map. */
6543 4, /* l_name offset in link_map. */
6544 8, /* l_ld offset in link_map. */
6545 12, /* l_next offset in link_map. */
6546 16 /* l_prev offset in link_map. */
6549 static const link_map_offsets lmo_64bit_offsets =
6551 0, /* r_version offset. */
6552 8, /* r_debug.r_map offset. */
6553 40, /* r_debug_extended.r_next. */
6554 0, /* l_addr offset in link_map. */
6555 8, /* l_name offset in link_map. */
6556 16, /* l_ld offset in link_map. */
6557 24, /* l_next offset in link_map. */
6558 32 /* l_prev offset in link_map. */
6561 /* Get the loaded shared libraries from one namespace. */
6563 static void
6564 read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6565 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
6567 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6569 while (lm_addr
6570 && read_one_ptr (lm_addr + lmo->l_name_offset,
6571 &l_name, ptr_size) == 0
6572 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6573 &l_addr, ptr_size) == 0
6574 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6575 &l_ld, ptr_size) == 0
6576 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6577 &l_prev, ptr_size) == 0
6578 && read_one_ptr (lm_addr + lmo->l_next_offset,
6579 &l_next, ptr_size) == 0)
6581 unsigned char libname[PATH_MAX];
6583 if (lm_prev != l_prev)
6585 warning ("Corrupted shared library list: 0x%s != 0x%s",
6586 paddress (lm_prev), paddress (l_prev));
6587 break;
6590 /* Not checking for error because reading may stop before we've got
6591 PATH_MAX worth of characters. */
6592 libname[0] = '\0';
6593 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6594 libname[sizeof (libname) - 1] = '\0';
6595 if (libname[0] != '\0')
6597 string_appendf (document, "<library name=\"");
6598 xml_escape_text_append (document, (char *) libname);
6599 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
6600 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
6601 paddress (lm_addr), paddress (l_addr),
6602 paddress (l_ld), paddress (lmid));
6605 lm_prev = lm_addr;
6606 lm_addr = l_next;
6610 /* Construct qXfer:libraries-svr4:read reply. */
6613 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6614 unsigned char *readbuf,
6615 unsigned const char *writebuf,
6616 CORE_ADDR offset, int len)
6618 struct process_info_private *const priv = current_process ()->priv;
6619 char filename[PATH_MAX];
6620 int is_elf64;
6621 unsigned int machine;
6622 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
6624 if (writebuf != NULL)
6625 return -2;
6626 if (readbuf == NULL)
6627 return -1;
6629 int pid = current_thread->id.lwp ();
6630 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6631 is_elf64 = elf_64_file_p (filename, &machine);
6632 const link_map_offsets *lmo;
6633 int ptr_size;
6634 if (is_elf64)
6636 lmo = &lmo_64bit_offsets;
6637 ptr_size = 8;
6639 else
6641 lmo = &lmo_32bit_offsets;
6642 ptr_size = 4;
6645 while (annex[0] != '\0')
6647 const char *sep;
6648 CORE_ADDR *addrp;
6649 int name_len;
6651 sep = strchr (annex, '=');
6652 if (sep == NULL)
6653 break;
6655 name_len = sep - annex;
6656 if (name_len == 4 && startswith (annex, "lmid"))
6657 addrp = &lmid;
6658 else if (name_len == 5 && startswith (annex, "start"))
6659 addrp = &lm_addr;
6660 else if (name_len == 4 && startswith (annex, "prev"))
6661 addrp = &lm_prev;
6662 else
6664 annex = strchr (sep, ';');
6665 if (annex == NULL)
6666 break;
6667 annex++;
6668 continue;
6671 annex = decode_address_to_semicolon (addrp, sep + 1);
6674 std::string document = "<library-list-svr4 version=\"1.0\"";
6676 /* When the starting LM_ADDR is passed in the annex, only traverse that
6677 namespace, which is assumed to be identified by LMID.
6679 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6680 if (lm_addr != 0)
6682 document += ">";
6683 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
6685 else
6687 if (lm_prev != 0)
6688 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
6690 /* We could interpret LMID as 'provide only the libraries for this
6691 namespace' but GDB is currently only providing lmid, start, and
6692 prev, or nothing. */
6693 if (lmid != 0)
6694 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6696 CORE_ADDR r_debug = priv->r_debug;
6697 if (r_debug == 0)
6698 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
6700 /* We failed to find DT_DEBUG. Such situation will not change
6701 for this inferior - do not retry it. Report it to GDB as
6702 E01, see for the reasons at the GDB solib-svr4.c side. */
6703 if (r_debug == (CORE_ADDR) -1)
6704 return -1;
6706 /* Terminate the header if we end up with an empty list. */
6707 if (r_debug == 0)
6708 document += ">";
6710 while (r_debug != 0)
6712 int r_version = 0;
6713 if (linux_read_memory (r_debug + lmo->r_version_offset,
6714 (unsigned char *) &r_version,
6715 sizeof (r_version)) != 0)
6717 warning ("unable to read r_version from 0x%s",
6718 paddress (r_debug + lmo->r_version_offset));
6719 break;
6722 if (r_version < 1)
6724 warning ("unexpected r_debug version %d", r_version);
6725 break;
6728 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6729 ptr_size) != 0)
6731 warning ("unable to read r_map from 0x%s",
6732 paddress (r_debug + lmo->r_map_offset));
6733 break;
6736 /* We read the entire namespace. */
6737 lm_prev = 0;
6739 /* The first entry corresponds to the main executable unless the
6740 dynamic loader was loaded late by a static executable. But
6741 in such case the main executable does not have PT_DYNAMIC
6742 present and we would not have gotten here. */
6743 if (r_debug == priv->r_debug)
6745 if (lm_addr != 0)
6746 string_appendf (document, " main-lm=\"0x%s\">",
6747 paddress (lm_addr));
6748 else
6749 document += ">";
6751 lm_prev = lm_addr;
6752 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6753 &lm_addr, ptr_size) != 0)
6755 warning ("unable to read l_next from 0x%s",
6756 paddress (lm_addr + lmo->l_next_offset));
6757 break;
6761 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
6763 if (r_version < 2)
6764 break;
6766 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6767 ptr_size) != 0)
6769 warning ("unable to read r_next from 0x%s",
6770 paddress (r_debug + lmo->r_next_offset));
6771 break;
6776 document += "</library-list-svr4>";
6778 int document_len = document.length ();
6779 if (offset < document_len)
6780 document_len -= offset;
6781 else
6782 document_len = 0;
6783 if (len > document_len)
6784 len = document_len;
6786 memcpy (readbuf, document.data () + offset, len);
6788 return len;
6791 #ifdef HAVE_LINUX_BTRACE
6793 bool
6794 linux_process_target::supports_btrace ()
6796 return true;
6799 btrace_target_info *
6800 linux_process_target::enable_btrace (thread_info *tp,
6801 const btrace_config *conf)
6803 return linux_enable_btrace (tp->id, conf);
6806 /* See to_disable_btrace target method. */
6809 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6811 enum btrace_error err;
6813 err = linux_disable_btrace (tinfo);
6814 return (err == BTRACE_ERR_NONE ? 0 : -1);
6817 /* Encode an Intel Processor Trace configuration. */
6819 static void
6820 linux_low_encode_pt_config (std::string *buffer,
6821 const struct btrace_data_pt_config *config)
6823 *buffer += "<pt-config>\n";
6825 switch (config->cpu.vendor)
6827 case CV_INTEL:
6828 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6829 "model=\"%u\" stepping=\"%u\"/>\n",
6830 config->cpu.family, config->cpu.model,
6831 config->cpu.stepping);
6832 break;
6834 default:
6835 break;
6838 *buffer += "</pt-config>\n";
6841 /* Encode a raw buffer. */
6843 static void
6844 linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
6845 unsigned int size)
6847 if (size == 0)
6848 return;
6850 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6851 *buffer += "<raw>\n";
6853 while (size-- > 0)
6855 char elem[2];
6857 elem[0] = tohex ((*data >> 4) & 0xf);
6858 elem[1] = tohex (*data++ & 0xf);
6860 buffer->append (elem, 2);
6863 *buffer += "</raw>\n";
6866 /* See to_read_btrace target method. */
6869 linux_process_target::read_btrace (btrace_target_info *tinfo,
6870 std::string *buffer,
6871 enum btrace_read_type type)
6873 struct btrace_data btrace;
6874 enum btrace_error err;
6876 err = linux_read_btrace (&btrace, tinfo, type);
6877 if (err != BTRACE_ERR_NONE)
6879 if (err == BTRACE_ERR_OVERFLOW)
6880 *buffer += "E.Overflow.";
6881 else
6882 *buffer += "E.Generic Error.";
6884 return -1;
6887 switch (btrace.format)
6889 case BTRACE_FORMAT_NONE:
6890 *buffer += "E.No Trace.";
6891 return -1;
6893 case BTRACE_FORMAT_BTS:
6894 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6895 *buffer += "<btrace version=\"1.0\">\n";
6897 for (const btrace_block &block : *btrace.variant.bts.blocks)
6898 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6899 paddress (block.begin), paddress (block.end));
6901 *buffer += "</btrace>\n";
6902 break;
6904 case BTRACE_FORMAT_PT:
6905 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6906 *buffer += "<btrace version=\"1.0\">\n";
6907 *buffer += "<pt>\n";
6909 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6911 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6912 btrace.variant.pt.size);
6914 *buffer += "</pt>\n";
6915 *buffer += "</btrace>\n";
6916 break;
6918 default:
6919 *buffer += "E.Unsupported Trace Format.";
6920 return -1;
6923 return 0;
6926 /* See to_btrace_conf target method. */
6929 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6930 std::string *buffer)
6932 const struct btrace_config *conf;
6934 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6935 *buffer += "<btrace-conf version=\"1.0\">\n";
6937 conf = linux_btrace_conf (tinfo);
6938 if (conf != NULL)
6940 switch (conf->format)
6942 case BTRACE_FORMAT_NONE:
6943 break;
6945 case BTRACE_FORMAT_BTS:
6946 string_xml_appendf (*buffer, "<bts");
6947 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6948 string_xml_appendf (*buffer, " />\n");
6949 break;
6951 case BTRACE_FORMAT_PT:
6952 string_xml_appendf (*buffer, "<pt");
6953 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6954 string_xml_appendf (*buffer, "/>\n");
6955 string_xml_appendf (*buffer, " ptwrite=\"%s\"",
6956 conf->pt.ptwrite ? "yes" : "no");
6957 string_xml_appendf (*buffer, " event-tracing=\"%s\"",
6958 conf->pt.event_tracing ? "yes" : "no");
6959 string_xml_appendf (*buffer, "/>\n");
6960 break;
6964 *buffer += "</btrace-conf>\n";
6965 return 0;
6967 #endif /* HAVE_LINUX_BTRACE */
6969 /* See nat/linux-nat.h. */
6971 ptid_t
6972 current_lwp_ptid (void)
6974 return current_thread->id;
6977 /* A helper function that copies NAME to DEST, replacing non-printable
6978 characters with '?'. Returns the original DEST as a
6979 convenience. */
6981 static const char *
6982 replace_non_ascii (char *dest, const char *name)
6984 const char *result = dest;
6985 while (*name != '\0')
6987 if (!ISPRINT (*name))
6988 *dest++ = '?';
6989 else
6990 *dest++ = *name;
6991 ++name;
6993 *dest = '\0';
6994 return result;
6997 const char *
6998 linux_process_target::thread_name (ptid_t thread)
7000 static char dest[100];
7002 const char *name = linux_proc_tid_get_name (thread);
7003 if (name == nullptr)
7004 return nullptr;
7006 /* Linux limits the comm file to 16 bytes (including the trailing
7007 \0. If the program or thread name is set when using a multi-byte
7008 encoding, this might cause it to be truncated mid-character. In
7009 this situation, sending the truncated form in an XML <thread>
7010 response will cause a parse error in gdb. So, instead convert
7011 from the locale's encoding (we can't be sure this is the correct
7012 encoding, but it's as good a guess as we have) to UTF-8, but in a
7013 way that ignores any encoding errors. See PR remote/30618. */
7014 const char *cset = nl_langinfo (CODESET);
7015 iconv_t handle = iconv_open ("UTF-8//IGNORE", cset);
7016 if (handle == (iconv_t) -1)
7017 return replace_non_ascii (dest, name);
7019 size_t inbytes = strlen (name);
7020 char *inbuf = const_cast<char *> (name);
7021 size_t outbytes = sizeof (dest);
7022 char *outbuf = dest;
7023 size_t result = iconv (handle, &inbuf, &inbytes, &outbuf, &outbytes);
7025 if (result == (size_t) -1)
7027 if (errno == E2BIG)
7028 outbuf = &dest[sizeof (dest) - 1];
7029 else if ((errno == EILSEQ || errno == EINVAL)
7030 && outbuf < &dest[sizeof (dest) - 2])
7031 *outbuf++ = '?';
7033 *outbuf = '\0';
7035 iconv_close (handle);
7036 return *dest == '\0' ? nullptr : dest;
7039 #if USE_THREAD_DB
7040 bool
7041 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7042 int *handle_len)
7044 return thread_db_thread_handle (ptid, handle, handle_len);
7046 #endif
7048 thread_info *
7049 linux_process_target::thread_pending_parent (thread_info *thread)
7051 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7053 if (parent == nullptr)
7054 return nullptr;
7056 return parent->thread;
7059 thread_info *
7060 linux_process_target::thread_pending_child (thread_info *thread,
7061 target_waitkind *kind)
7063 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
7065 if (child == nullptr)
7066 return nullptr;
7068 return child->thread;
7071 /* Default implementation of linux_target_ops method "set_pc" for
7072 32-bit pc register which is literally named "pc". */
7074 void
7075 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7077 uint32_t newpc = pc;
7079 supply_register_by_name (regcache, "pc", &newpc);
7082 /* Default implementation of linux_target_ops method "get_pc" for
7083 32-bit pc register which is literally named "pc". */
7085 CORE_ADDR
7086 linux_get_pc_32bit (struct regcache *regcache)
7088 uint32_t pc;
7090 collect_register_by_name (regcache, "pc", &pc);
7091 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
7092 return pc;
7095 /* Default implementation of linux_target_ops method "set_pc" for
7096 64-bit pc register which is literally named "pc". */
7098 void
7099 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7101 uint64_t newpc = pc;
7103 supply_register_by_name (regcache, "pc", &newpc);
7106 /* Default implementation of linux_target_ops method "get_pc" for
7107 64-bit pc register which is literally named "pc". */
7109 CORE_ADDR
7110 linux_get_pc_64bit (struct regcache *regcache)
7112 uint64_t pc;
7114 collect_register_by_name (regcache, "pc", &pc);
7115 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
7116 return pc;
7119 /* See linux-low.h. */
7122 linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7124 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7125 int offset = 0;
7127 gdb_assert (wordsize == 4 || wordsize == 8);
7129 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7130 == 2 * wordsize)
7132 if (wordsize == 4)
7134 uint32_t *data_p = (uint32_t *) data;
7135 if (data_p[0] == match)
7137 *valp = data_p[1];
7138 return 1;
7141 else
7143 uint64_t *data_p = (uint64_t *) data;
7144 if (data_p[0] == match)
7146 *valp = data_p[1];
7147 return 1;
7151 offset += 2 * wordsize;
7154 return 0;
7157 /* See linux-low.h. */
7159 CORE_ADDR
7160 linux_get_hwcap (int pid, int wordsize)
7162 CORE_ADDR hwcap = 0;
7163 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
7164 return hwcap;
7167 /* See linux-low.h. */
7169 CORE_ADDR
7170 linux_get_hwcap2 (int pid, int wordsize)
7172 CORE_ADDR hwcap2 = 0;
7173 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
7174 return hwcap2;
7177 #ifdef HAVE_LINUX_REGSETS
7178 void
7179 initialize_regsets_info (struct regsets_info *info)
7181 for (info->num_regsets = 0;
7182 info->regsets[info->num_regsets].size >= 0;
7183 info->num_regsets++)
7186 #endif
7188 void
7189 initialize_low (void)
7191 struct sigaction sigchld_action;
7193 memset (&sigchld_action, 0, sizeof (sigchld_action));
7194 set_target_ops (the_linux_target);
7196 linux_ptrace_init_warnings ();
7197 linux_proc_init_warnings ();
7199 sigchld_action.sa_handler = sigchld_handler;
7200 sigemptyset (&sigchld_action.sa_mask);
7201 sigchld_action.sa_flags = SA_RESTART;
7202 sigaction (SIGCHLD, &sigchld_action, NULL);
7204 initialize_low_arch ();
7206 linux_check_ptrace_features ();