1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2021 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
34 #include <sys/ioctl.h>
37 #include <sys/syscall.h>
41 #include <sys/types.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
61 #include "nat/linux-namespaces.h"
71 /* Some targets did not define these ptrace constants from the start,
72 so gdbserver defines them locally here. In the future, these may
73 be removed after they are added to asm/ptrace.h. */
74 #if !(defined(PT_TEXT_ADDR) \
75 || defined(PT_DATA_ADDR) \
76 || defined(PT_TEXT_END_ADDR))
77 #if defined(__mcoldfire__)
78 /* These are still undefined in 3.10 kernels. */
79 #define PT_TEXT_ADDR 49*4
80 #define PT_DATA_ADDR 50*4
81 #define PT_TEXT_END_ADDR 51*4
82 /* These are still undefined in 3.10 kernels. */
83 #elif defined(__TMS320C6X__)
84 #define PT_TEXT_ADDR (0x10000*4)
85 #define PT_DATA_ADDR (0x10004*4)
86 #define PT_TEXT_END_ADDR (0x10008*4)
90 #if (defined(__UCLIBC__) \
91 && defined(HAS_NOMMU) \
92 && defined(PT_TEXT_ADDR) \
93 && defined(PT_DATA_ADDR) \
94 && defined(PT_TEXT_END_ADDR))
95 #define SUPPORTS_READ_OFFSETS
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "gdbsupport/btrace-common.h"
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
107 uint32_t a_type
; /* Entry type */
110 uint32_t a_val
; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
122 uint64_t a_type
; /* Entry type */
125 uint64_t a_val
; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset
= -1;
138 /* See nat/linux-nat.h. */
141 ptid_of_lwp (struct lwp_info
*lwp
)
143 return ptid_of (get_lwp_thread (lwp
));
146 /* See nat/linux-nat.h. */
149 lwp_set_arch_private_info (struct lwp_info
*lwp
,
150 struct arch_lwp_info
*info
)
152 lwp
->arch_private
= info
;
155 /* See nat/linux-nat.h. */
157 struct arch_lwp_info
*
158 lwp_arch_private_info (struct lwp_info
*lwp
)
160 return lwp
->arch_private
;
163 /* See nat/linux-nat.h. */
166 lwp_is_stopped (struct lwp_info
*lwp
)
171 /* See nat/linux-nat.h. */
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info
*lwp
)
176 return lwp
->stop_reason
;
179 /* See nat/linux-nat.h. */
182 lwp_is_stepping (struct lwp_info
*lwp
)
184 return lwp
->stepping
;
187 /* A list of all unknown processes which receive stop signals. Some
188 other process will presumably claim each of these as forked
189 children momentarily. */
191 struct simple_pid_list
193 /* The process ID. */
196 /* The status as reported by waitpid. */
200 struct simple_pid_list
*next
;
202 static struct simple_pid_list
*stopped_pids
;
204 /* Trivial list manipulation functions to keep track of a list of new
205 stopped processes. */
208 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
210 struct simple_pid_list
*new_pid
= XNEW (struct simple_pid_list
);
213 new_pid
->status
= status
;
214 new_pid
->next
= *listp
;
219 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
221 struct simple_pid_list
**p
;
223 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
224 if ((*p
)->pid
== pid
)
226 struct simple_pid_list
*next
= (*p
)->next
;
228 *statusp
= (*p
)->status
;
236 enum stopping_threads_kind
238 /* Not stopping threads presently. */
239 NOT_STOPPING_THREADS
,
241 /* Stopping threads. */
244 /* Stopping and suspending threads. */
245 STOPPING_AND_SUSPENDING_THREADS
248 /* This is set while stop_all_lwps is in effect. */
249 static stopping_threads_kind stopping_threads
= NOT_STOPPING_THREADS
;
251 /* FIXME make into a target method? */
252 int using_threads
= 1;
254 /* True if we're presently stabilizing threads (moving them out of
256 static int stabilizing_threads
;
258 static void unsuspend_all_lwps (struct lwp_info
*except
);
259 static void mark_lwp_dead (struct lwp_info
*lwp
, int wstat
);
260 static int lwp_is_marked_dead (struct lwp_info
*lwp
);
261 static int kill_lwp (unsigned long lwpid
, int signo
);
262 static void enqueue_pending_signal (struct lwp_info
*lwp
, int signal
, siginfo_t
*info
);
263 static int linux_low_ptrace_options (int attached
);
264 static int check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
);
266 /* When the event-loop is doing a step-over, this points at the thread
268 static ptid_t step_over_bkpt
;
271 linux_process_target::low_supports_breakpoints ()
277 linux_process_target::low_get_pc (regcache
*regcache
)
283 linux_process_target::low_set_pc (regcache
*regcache
, CORE_ADDR newpc
)
285 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
288 std::vector
<CORE_ADDR
>
289 linux_process_target::low_get_next_pcs (regcache
*regcache
)
291 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
296 linux_process_target::low_decr_pc_after_break ()
301 /* True if LWP is stopped in its stepping range. */
304 lwp_in_step_range (struct lwp_info
*lwp
)
306 CORE_ADDR pc
= lwp
->stop_pc
;
308 return (pc
>= lwp
->step_range_start
&& pc
< lwp
->step_range_end
);
311 /* The read/write ends of the pipe registered as waitable file in the
313 static int linux_event_pipe
[2] = { -1, -1 };
315 /* True if we're currently in async mode. */
316 #define target_is_async_p() (linux_event_pipe[0] != -1)
318 static void send_sigstop (struct lwp_info
*lwp
);
320 /* Return non-zero if HEADER is a 64-bit ELF file. */
323 elf_64_header_p (const Elf64_Ehdr
*header
, unsigned int *machine
)
325 if (header
->e_ident
[EI_MAG0
] == ELFMAG0
326 && header
->e_ident
[EI_MAG1
] == ELFMAG1
327 && header
->e_ident
[EI_MAG2
] == ELFMAG2
328 && header
->e_ident
[EI_MAG3
] == ELFMAG3
)
330 *machine
= header
->e_machine
;
331 return header
->e_ident
[EI_CLASS
] == ELFCLASS64
;
338 /* Return non-zero if FILE is a 64-bit ELF file,
339 zero if the file is not a 64-bit ELF file,
340 and -1 if the file is not accessible or doesn't exist. */
343 elf_64_file_p (const char *file
, unsigned int *machine
)
348 fd
= open (file
, O_RDONLY
);
352 if (read (fd
, &header
, sizeof (header
)) != sizeof (header
))
359 return elf_64_header_p (&header
, machine
);
362 /* Accepts an integer PID; Returns true if the executable PID is
363 running is a 64-bit ELF file.. */
366 linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
)
370 sprintf (file
, "/proc/%d/exe", pid
);
371 return elf_64_file_p (file
, machine
);
375 linux_process_target::delete_lwp (lwp_info
*lwp
)
377 struct thread_info
*thr
= get_lwp_thread (lwp
);
380 debug_printf ("deleting %ld\n", lwpid_of (thr
));
384 low_delete_thread (lwp
->arch_private
);
390 linux_process_target::low_delete_thread (arch_lwp_info
*info
)
392 /* Default implementation should be overridden if architecture-specific
393 info is being used. */
394 gdb_assert (info
== nullptr);
398 linux_process_target::add_linux_process (int pid
, int attached
)
400 struct process_info
*proc
;
402 proc
= add_process (pid
, attached
);
403 proc
->priv
= XCNEW (struct process_info_private
);
405 proc
->priv
->arch_private
= low_new_process ();
411 linux_process_target::low_new_process ()
417 linux_process_target::low_delete_process (arch_process_info
*info
)
419 /* Default implementation must be overridden if architecture-specific
421 gdb_assert (info
== nullptr);
425 linux_process_target::low_new_fork (process_info
*parent
, process_info
*child
)
431 linux_process_target::arch_setup_thread (thread_info
*thread
)
433 struct thread_info
*saved_thread
;
435 saved_thread
= current_thread
;
436 current_thread
= thread
;
440 current_thread
= saved_thread
;
444 linux_process_target::handle_extended_wait (lwp_info
**orig_event_lwp
,
447 client_state
&cs
= get_client_state ();
448 struct lwp_info
*event_lwp
= *orig_event_lwp
;
449 int event
= linux_ptrace_get_extended_event (wstat
);
450 struct thread_info
*event_thr
= get_lwp_thread (event_lwp
);
451 struct lwp_info
*new_lwp
;
453 gdb_assert (event_lwp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
);
455 /* All extended events we currently use are mid-syscall. Only
456 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
457 you have to be using PTRACE_SEIZE to get that. */
458 event_lwp
->syscall_state
= TARGET_WAITKIND_SYSCALL_ENTRY
;
460 if ((event
== PTRACE_EVENT_FORK
) || (event
== PTRACE_EVENT_VFORK
)
461 || (event
== PTRACE_EVENT_CLONE
))
464 unsigned long new_pid
;
467 /* Get the pid of the new lwp. */
468 ptrace (PTRACE_GETEVENTMSG
, lwpid_of (event_thr
), (PTRACE_TYPE_ARG3
) 0,
471 /* If we haven't already seen the new PID stop, wait for it now. */
472 if (!pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
474 /* The new child has a pending SIGSTOP. We can't affect it until it
475 hits the SIGSTOP, but we're already attached. */
477 ret
= my_waitpid (new_pid
, &status
, __WALL
);
480 perror_with_name ("waiting for new child");
481 else if (ret
!= new_pid
)
482 warning ("wait returned unexpected PID %d", ret
);
483 else if (!WIFSTOPPED (status
))
484 warning ("wait returned unexpected status 0x%x", status
);
487 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
)
489 struct process_info
*parent_proc
;
490 struct process_info
*child_proc
;
491 struct lwp_info
*child_lwp
;
492 struct thread_info
*child_thr
;
494 ptid
= ptid_t (new_pid
, new_pid
);
498 debug_printf ("HEW: Got fork event from LWP %ld, "
500 ptid_of (event_thr
).lwp (),
504 /* Add the new process to the tables and clone the breakpoint
505 lists of the parent. We need to do this even if the new process
506 will be detached, since we will need the process object and the
507 breakpoints to remove any breakpoints from memory when we
508 detach, and the client side will access registers. */
509 child_proc
= add_linux_process (new_pid
, 0);
510 gdb_assert (child_proc
!= NULL
);
511 child_lwp
= add_lwp (ptid
);
512 gdb_assert (child_lwp
!= NULL
);
513 child_lwp
->stopped
= 1;
514 child_lwp
->must_set_ptrace_flags
= 1;
515 child_lwp
->status_pending_p
= 0;
516 child_thr
= get_lwp_thread (child_lwp
);
517 child_thr
->last_resume_kind
= resume_stop
;
518 child_thr
->last_status
.kind
= TARGET_WAITKIND_STOPPED
;
520 /* If we're suspending all threads, leave this one suspended
521 too. If the fork/clone parent is stepping over a breakpoint,
522 all other threads have been suspended already. Leave the
523 child suspended too. */
524 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
525 || event_lwp
->bp_reinsert
!= 0)
528 debug_printf ("HEW: leaving child suspended\n");
529 child_lwp
->suspended
= 1;
532 parent_proc
= get_thread_process (event_thr
);
533 child_proc
->attached
= parent_proc
->attached
;
535 if (event_lwp
->bp_reinsert
!= 0
536 && supports_software_single_step ()
537 && event
== PTRACE_EVENT_VFORK
)
539 /* If we leave single-step breakpoints there, child will
540 hit it, so uninsert single-step breakpoints from parent
541 (and child). Once vfork child is done, reinsert
542 them back to parent. */
543 uninsert_single_step_breakpoints (event_thr
);
546 clone_all_breakpoints (child_thr
, event_thr
);
548 target_desc_up tdesc
= allocate_target_description ();
549 copy_target_description (tdesc
.get (), parent_proc
->tdesc
);
550 child_proc
->tdesc
= tdesc
.release ();
552 /* Clone arch-specific process data. */
553 low_new_fork (parent_proc
, child_proc
);
555 /* Save fork info in the parent thread. */
556 if (event
== PTRACE_EVENT_FORK
)
557 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_FORKED
;
558 else if (event
== PTRACE_EVENT_VFORK
)
559 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_VFORKED
;
561 event_lwp
->waitstatus
.value
.related_pid
= ptid
;
563 /* The status_pending field contains bits denoting the
564 extended event, so when the pending event is handled,
565 the handler will look at lwp->waitstatus. */
566 event_lwp
->status_pending_p
= 1;
567 event_lwp
->status_pending
= wstat
;
569 /* Link the threads until the parent event is passed on to
571 event_lwp
->fork_relative
= child_lwp
;
572 child_lwp
->fork_relative
= event_lwp
;
574 /* If the parent thread is doing step-over with single-step
575 breakpoints, the list of single-step breakpoints are cloned
576 from the parent's. Remove them from the child process.
577 In case of vfork, we'll reinsert them back once vforked
579 if (event_lwp
->bp_reinsert
!= 0
580 && supports_software_single_step ())
582 /* The child process is forked and stopped, so it is safe
583 to access its memory without stopping all other threads
584 from other processes. */
585 delete_single_step_breakpoints (child_thr
);
587 gdb_assert (has_single_step_breakpoints (event_thr
));
588 gdb_assert (!has_single_step_breakpoints (child_thr
));
591 /* Report the event. */
596 debug_printf ("HEW: Got clone event "
597 "from LWP %ld, new child is LWP %ld\n",
598 lwpid_of (event_thr
), new_pid
);
600 ptid
= ptid_t (pid_of (event_thr
), new_pid
);
601 new_lwp
= add_lwp (ptid
);
603 /* Either we're going to immediately resume the new thread
604 or leave it stopped. resume_one_lwp is a nop if it
605 thinks the thread is currently running, so set this first
606 before calling resume_one_lwp. */
607 new_lwp
->stopped
= 1;
609 /* If we're suspending all threads, leave this one suspended
610 too. If the fork/clone parent is stepping over a breakpoint,
611 all other threads have been suspended already. Leave the
612 child suspended too. */
613 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
614 || event_lwp
->bp_reinsert
!= 0)
615 new_lwp
->suspended
= 1;
617 /* Normally we will get the pending SIGSTOP. But in some cases
618 we might get another signal delivered to the group first.
619 If we do get another signal, be sure not to lose it. */
620 if (WSTOPSIG (status
) != SIGSTOP
)
622 new_lwp
->stop_expected
= 1;
623 new_lwp
->status_pending_p
= 1;
624 new_lwp
->status_pending
= status
;
626 else if (cs
.report_thread_events
)
628 new_lwp
->waitstatus
.kind
= TARGET_WAITKIND_THREAD_CREATED
;
629 new_lwp
->status_pending_p
= 1;
630 new_lwp
->status_pending
= status
;
634 thread_db_notice_clone (event_thr
, ptid
);
637 /* Don't report the event. */
640 else if (event
== PTRACE_EVENT_VFORK_DONE
)
642 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_VFORK_DONE
;
644 if (event_lwp
->bp_reinsert
!= 0 && supports_software_single_step ())
646 reinsert_single_step_breakpoints (event_thr
);
648 gdb_assert (has_single_step_breakpoints (event_thr
));
651 /* Report the event. */
654 else if (event
== PTRACE_EVENT_EXEC
&& cs
.report_exec_events
)
656 struct process_info
*proc
;
657 std::vector
<int> syscalls_to_catch
;
663 debug_printf ("HEW: Got exec event from LWP %ld\n",
664 lwpid_of (event_thr
));
667 /* Get the event ptid. */
668 event_ptid
= ptid_of (event_thr
);
669 event_pid
= event_ptid
.pid ();
671 /* Save the syscall list from the execing process. */
672 proc
= get_thread_process (event_thr
);
673 syscalls_to_catch
= std::move (proc
->syscalls_to_catch
);
675 /* Delete the execing process and all its threads. */
677 current_thread
= NULL
;
679 /* Create a new process/lwp/thread. */
680 proc
= add_linux_process (event_pid
, 0);
681 event_lwp
= add_lwp (event_ptid
);
682 event_thr
= get_lwp_thread (event_lwp
);
683 gdb_assert (current_thread
== event_thr
);
684 arch_setup_thread (event_thr
);
686 /* Set the event status. */
687 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_EXECD
;
688 event_lwp
->waitstatus
.value
.execd_pathname
689 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr
)));
691 /* Mark the exec status as pending. */
692 event_lwp
->stopped
= 1;
693 event_lwp
->status_pending_p
= 1;
694 event_lwp
->status_pending
= wstat
;
695 event_thr
->last_resume_kind
= resume_continue
;
696 event_thr
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
698 /* Update syscall state in the new lwp, effectively mid-syscall too. */
699 event_lwp
->syscall_state
= TARGET_WAITKIND_SYSCALL_ENTRY
;
701 /* Restore the list to catch. Don't rely on the client, which is free
702 to avoid sending a new list when the architecture doesn't change.
703 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
704 proc
->syscalls_to_catch
= std::move (syscalls_to_catch
);
706 /* Report the event. */
707 *orig_event_lwp
= event_lwp
;
711 internal_error (__FILE__
, __LINE__
, _("unknown ptrace event %d"), event
);
715 linux_process_target::get_pc (lwp_info
*lwp
)
717 struct thread_info
*saved_thread
;
718 struct regcache
*regcache
;
721 if (!low_supports_breakpoints ())
724 saved_thread
= current_thread
;
725 current_thread
= get_lwp_thread (lwp
);
727 regcache
= get_thread_regcache (current_thread
, 1);
728 pc
= low_get_pc (regcache
);
731 debug_printf ("pc is 0x%lx\n", (long) pc
);
733 current_thread
= saved_thread
;
738 linux_process_target::get_syscall_trapinfo (lwp_info
*lwp
, int *sysno
)
740 struct thread_info
*saved_thread
;
741 struct regcache
*regcache
;
743 saved_thread
= current_thread
;
744 current_thread
= get_lwp_thread (lwp
);
746 regcache
= get_thread_regcache (current_thread
, 1);
747 low_get_syscall_trapinfo (regcache
, sysno
);
750 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno
);
752 current_thread
= saved_thread
;
756 linux_process_target::low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
)
758 /* By default, report an unknown system call number. */
759 *sysno
= UNKNOWN_SYSCALL
;
763 linux_process_target::save_stop_reason (lwp_info
*lwp
)
766 CORE_ADDR sw_breakpoint_pc
;
767 struct thread_info
*saved_thread
;
768 #if USE_SIGTRAP_SIGINFO
772 if (!low_supports_breakpoints ())
776 sw_breakpoint_pc
= pc
- low_decr_pc_after_break ();
778 /* breakpoint_at reads from the current thread. */
779 saved_thread
= current_thread
;
780 current_thread
= get_lwp_thread (lwp
);
782 #if USE_SIGTRAP_SIGINFO
783 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
784 (PTRACE_TYPE_ARG3
) 0, &siginfo
) == 0)
786 if (siginfo
.si_signo
== SIGTRAP
)
788 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
)
789 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
791 /* The si_code is ambiguous on this arch -- check debug
793 if (!check_stopped_by_watchpoint (lwp
))
794 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
796 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
))
798 /* If we determine the LWP stopped for a SW breakpoint,
799 trust it. Particularly don't check watchpoint
800 registers, because at least on s390, we'd find
801 stopped-by-watchpoint as long as there's a watchpoint
803 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
805 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
807 /* This can indicate either a hardware breakpoint or
808 hardware watchpoint. Check debug registers. */
809 if (!check_stopped_by_watchpoint (lwp
))
810 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
812 else if (siginfo
.si_code
== TRAP_TRACE
)
814 /* We may have single stepped an instruction that
815 triggered a watchpoint. In that case, on some
816 architectures (such as x86), instead of TRAP_HWBKPT,
817 si_code indicates TRAP_TRACE, and we need to check
818 the debug registers separately. */
819 if (!check_stopped_by_watchpoint (lwp
))
820 lwp
->stop_reason
= TARGET_STOPPED_BY_SINGLE_STEP
;
825 /* We may have just stepped a breakpoint instruction. E.g., in
826 non-stop mode, GDB first tells the thread A to step a range, and
827 then the user inserts a breakpoint inside the range. In that
828 case we need to report the breakpoint PC. */
829 if ((!lwp
->stepping
|| lwp
->stop_pc
== sw_breakpoint_pc
)
830 && low_breakpoint_at (sw_breakpoint_pc
))
831 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
833 if (hardware_breakpoint_inserted_here (pc
))
834 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
836 if (lwp
->stop_reason
== TARGET_STOPPED_BY_NO_REASON
)
837 check_stopped_by_watchpoint (lwp
);
840 if (lwp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
)
844 struct thread_info
*thr
= get_lwp_thread (lwp
);
846 debug_printf ("CSBB: %s stopped by software breakpoint\n",
847 target_pid_to_str (ptid_of (thr
)));
850 /* Back up the PC if necessary. */
851 if (pc
!= sw_breakpoint_pc
)
853 struct regcache
*regcache
854 = get_thread_regcache (current_thread
, 1);
855 low_set_pc (regcache
, sw_breakpoint_pc
);
858 /* Update this so we record the correct stop PC below. */
859 pc
= sw_breakpoint_pc
;
861 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
)
865 struct thread_info
*thr
= get_lwp_thread (lwp
);
867 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
868 target_pid_to_str (ptid_of (thr
)));
871 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
875 struct thread_info
*thr
= get_lwp_thread (lwp
);
877 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
878 target_pid_to_str (ptid_of (thr
)));
881 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_SINGLE_STEP
)
885 struct thread_info
*thr
= get_lwp_thread (lwp
);
887 debug_printf ("CSBB: %s stopped by trace\n",
888 target_pid_to_str (ptid_of (thr
)));
893 current_thread
= saved_thread
;
898 linux_process_target::add_lwp (ptid_t ptid
)
900 struct lwp_info
*lwp
;
902 lwp
= new lwp_info
{};
904 lwp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
906 lwp
->thread
= add_thread (ptid
, lwp
);
908 low_new_thread (lwp
);
914 linux_process_target::low_new_thread (lwp_info
*info
)
919 /* Callback to be used when calling fork_inferior, responsible for
920 actually initiating the tracing of the inferior. */
925 if (ptrace (PTRACE_TRACEME
, 0, (PTRACE_TYPE_ARG3
) 0,
926 (PTRACE_TYPE_ARG4
) 0) < 0)
927 trace_start_error_with_name ("ptrace");
929 if (setpgid (0, 0) < 0)
930 trace_start_error_with_name ("setpgid");
932 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
933 stdout to stderr so that inferior i/o doesn't corrupt the connection.
934 Also, redirect stdin to /dev/null. */
935 if (remote_connection_is_stdio ())
938 trace_start_error_with_name ("close");
939 if (open ("/dev/null", O_RDONLY
) < 0)
940 trace_start_error_with_name ("open");
942 trace_start_error_with_name ("dup2");
943 if (write (2, "stdin/stdout redirected\n",
944 sizeof ("stdin/stdout redirected\n") - 1) < 0)
946 /* Errors ignored. */;
951 /* Start an inferior process and returns its pid.
952 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
953 are its arguments. */
956 linux_process_target::create_inferior (const char *program
,
957 const std::vector
<char *> &program_args
)
959 client_state
&cs
= get_client_state ();
960 struct lwp_info
*new_lwp
;
965 maybe_disable_address_space_randomization restore_personality
966 (cs
.disable_randomization
);
967 std::string str_program_args
= construct_inferior_arguments (program_args
);
969 pid
= fork_inferior (program
,
970 str_program_args
.c_str (),
971 get_environ ()->envp (), linux_ptrace_fun
,
972 NULL
, NULL
, NULL
, NULL
);
975 add_linux_process (pid
, 0);
977 ptid
= ptid_t (pid
, pid
);
978 new_lwp
= add_lwp (ptid
);
979 new_lwp
->must_set_ptrace_flags
= 1;
981 post_fork_inferior (pid
, program
);
986 /* Implement the post_create_inferior target_ops method. */
989 linux_process_target::post_create_inferior ()
991 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
995 if (lwp
->must_set_ptrace_flags
)
997 struct process_info
*proc
= current_process ();
998 int options
= linux_low_ptrace_options (proc
->attached
);
1000 linux_enable_event_reporting (lwpid_of (current_thread
), options
);
1001 lwp
->must_set_ptrace_flags
= 0;
1006 linux_process_target::attach_lwp (ptid_t ptid
)
1008 struct lwp_info
*new_lwp
;
1009 int lwpid
= ptid
.lwp ();
1011 if (ptrace (PTRACE_ATTACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0)
1015 new_lwp
= add_lwp (ptid
);
1017 /* We need to wait for SIGSTOP before being able to make the next
1018 ptrace call on this LWP. */
1019 new_lwp
->must_set_ptrace_flags
= 1;
1021 if (linux_proc_pid_is_stopped (lwpid
))
1024 debug_printf ("Attached to a stopped process\n");
1026 /* The process is definitely stopped. It is in a job control
1027 stop, unless the kernel predates the TASK_STOPPED /
1028 TASK_TRACED distinction, in which case it might be in a
1029 ptrace stop. Make sure it is in a ptrace stop; from there we
1030 can kill it, signal it, et cetera.
1032 First make sure there is a pending SIGSTOP. Since we are
1033 already attached, the process can not transition from stopped
1034 to running without a PTRACE_CONT; so we know this signal will
1035 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1036 probably already in the queue (unless this kernel is old
1037 enough to use TASK_STOPPED for ptrace stops); but since
1038 SIGSTOP is not an RT signal, it can only be queued once. */
1039 kill_lwp (lwpid
, SIGSTOP
);
1041 /* Finally, resume the stopped process. This will deliver the
1042 SIGSTOP (or a higher priority signal, just like normal
1043 PTRACE_ATTACH), which we'll catch later on. */
1044 ptrace (PTRACE_CONT
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
1047 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1048 brings it to a halt.
1050 There are several cases to consider here:
1052 1) gdbserver has already attached to the process and is being notified
1053 of a new thread that is being created.
1054 In this case we should ignore that SIGSTOP and resume the
1055 process. This is handled below by setting stop_expected = 1,
1056 and the fact that add_thread sets last_resume_kind ==
1059 2) This is the first thread (the process thread), and we're attaching
1060 to it via attach_inferior.
1061 In this case we want the process thread to stop.
1062 This is handled by having linux_attach set last_resume_kind ==
1063 resume_stop after we return.
1065 If the pid we are attaching to is also the tgid, we attach to and
1066 stop all the existing threads. Otherwise, we attach to pid and
1067 ignore any other threads in the same group as this pid.
1069 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1071 In this case we want the thread to stop.
1072 FIXME: This case is currently not properly handled.
1073 We should wait for the SIGSTOP but don't. Things work apparently
1074 because enough time passes between when we ptrace (ATTACH) and when
1075 gdb makes the next ptrace call on the thread.
1077 On the other hand, if we are currently trying to stop all threads, we
1078 should treat the new thread as if we had sent it a SIGSTOP. This works
1079 because we are guaranteed that the add_lwp call above added us to the
1080 end of the list, and so the new thread has not yet reached
1081 wait_for_sigstop (but will). */
1082 new_lwp
->stop_expected
= 1;
1087 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1088 already attached. Returns true if a new LWP is found, false
1092 attach_proc_task_lwp_callback (ptid_t ptid
)
1094 /* Is this a new thread? */
1095 if (find_thread_ptid (ptid
) == NULL
)
1097 int lwpid
= ptid
.lwp ();
1101 debug_printf ("Found new lwp %d\n", lwpid
);
1103 err
= the_linux_target
->attach_lwp (ptid
);
1105 /* Be quiet if we simply raced with the thread exiting. EPERM
1106 is returned if the thread's task still exists, and is marked
1107 as exited or zombie, as well as other conditions, so in that
1108 case, confirm the status in /proc/PID/status. */
1110 || (err
== EPERM
&& linux_proc_pid_is_gone (lwpid
)))
1114 debug_printf ("Cannot attach to lwp %d: "
1115 "thread is gone (%d: %s)\n",
1116 lwpid
, err
, safe_strerror (err
));
1122 = linux_ptrace_attach_fail_reason_string (ptid
, err
);
1124 warning (_("Cannot attach to lwp %d: %s"), lwpid
, reason
.c_str ());
1132 static void async_file_mark (void);
1134 /* Attach to PID. If PID is the tgid, attach to it and all
1138 linux_process_target::attach (unsigned long pid
)
1140 struct process_info
*proc
;
1141 struct thread_info
*initial_thread
;
1142 ptid_t ptid
= ptid_t (pid
, pid
);
1145 proc
= add_linux_process (pid
, 1);
1147 /* Attach to PID. We will check for other threads
1149 err
= attach_lwp (ptid
);
1152 remove_process (proc
);
1154 std::string reason
= linux_ptrace_attach_fail_reason_string (ptid
, err
);
1155 error ("Cannot attach to process %ld: %s", pid
, reason
.c_str ());
1158 /* Don't ignore the initial SIGSTOP if we just attached to this
1159 process. It will be collected by wait shortly. */
1160 initial_thread
= find_thread_ptid (ptid_t (pid
, pid
));
1161 initial_thread
->last_resume_kind
= resume_stop
;
1163 /* We must attach to every LWP. If /proc is mounted, use that to
1164 find them now. On the one hand, the inferior may be using raw
1165 clone instead of using pthreads. On the other hand, even if it
1166 is using pthreads, GDB may not be connected yet (thread_db needs
1167 to do symbol lookups, through qSymbol). Also, thread_db walks
1168 structures in the inferior's address space to find the list of
1169 threads/LWPs, and those structures may well be corrupted. Note
1170 that once thread_db is loaded, we'll still use it to list threads
1171 and associate pthread info with each LWP. */
1172 linux_proc_attach_tgid_threads (pid
, attach_proc_task_lwp_callback
);
1174 /* GDB will shortly read the xml target description for this
1175 process, to figure out the process' architecture. But the target
1176 description is only filled in when the first process/thread in
1177 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1178 that now, otherwise, if GDB is fast enough, it could read the
1179 target description _before_ that initial stop. */
1182 struct lwp_info
*lwp
;
1184 ptid_t pid_ptid
= ptid_t (pid
);
1186 lwpid
= wait_for_event_filtered (pid_ptid
, pid_ptid
, &wstat
, __WALL
);
1187 gdb_assert (lwpid
> 0);
1189 lwp
= find_lwp_pid (ptid_t (lwpid
));
1191 if (!WIFSTOPPED (wstat
) || WSTOPSIG (wstat
) != SIGSTOP
)
1193 lwp
->status_pending_p
= 1;
1194 lwp
->status_pending
= wstat
;
1197 initial_thread
->last_resume_kind
= resume_continue
;
1201 gdb_assert (proc
->tdesc
!= NULL
);
1208 last_thread_of_process_p (int pid
)
1210 bool seen_one
= false;
1212 thread_info
*thread
= find_thread (pid
, [&] (thread_info
*thr_arg
)
1216 /* This is the first thread of this process we see. */
1222 /* This is the second thread of this process we see. */
1227 return thread
== NULL
;
1233 linux_kill_one_lwp (struct lwp_info
*lwp
)
1235 struct thread_info
*thr
= get_lwp_thread (lwp
);
1236 int pid
= lwpid_of (thr
);
1238 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1239 there is no signal context, and ptrace(PTRACE_KILL) (or
1240 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1241 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1242 alternative is to kill with SIGKILL. We only need one SIGKILL
1243 per process, not one for each thread. But since we still support
1244 support debugging programs using raw clone without CLONE_THREAD,
1245 we send one for each thread. For years, we used PTRACE_KILL
1246 only, so we're being a bit paranoid about some old kernels where
1247 PTRACE_KILL might work better (dubious if there are any such, but
1248 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1249 second, and so we're fine everywhere. */
1252 kill_lwp (pid
, SIGKILL
);
1255 int save_errno
= errno
;
1257 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1258 target_pid_to_str (ptid_of (thr
)),
1259 save_errno
? safe_strerror (save_errno
) : "OK");
1263 ptrace (PTRACE_KILL
, pid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
1266 int save_errno
= errno
;
1268 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1269 target_pid_to_str (ptid_of (thr
)),
1270 save_errno
? safe_strerror (save_errno
) : "OK");
1274 /* Kill LWP and wait for it to die. */
1277 kill_wait_lwp (struct lwp_info
*lwp
)
1279 struct thread_info
*thr
= get_lwp_thread (lwp
);
1280 int pid
= ptid_of (thr
).pid ();
1281 int lwpid
= ptid_of (thr
).lwp ();
1286 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid
, pid
);
1290 linux_kill_one_lwp (lwp
);
1292 /* Make sure it died. Notes:
1294 - The loop is most likely unnecessary.
1296 - We don't use wait_for_event as that could delete lwps
1297 while we're iterating over them. We're not interested in
1298 any pending status at this point, only in making sure all
1299 wait status on the kernel side are collected until the
1302 - We don't use __WALL here as the __WALL emulation relies on
1303 SIGCHLD, and killing a stopped process doesn't generate
1304 one, nor an exit status.
1306 res
= my_waitpid (lwpid
, &wstat
, 0);
1307 if (res
== -1 && errno
== ECHILD
)
1308 res
= my_waitpid (lwpid
, &wstat
, __WCLONE
);
1309 } while (res
> 0 && WIFSTOPPED (wstat
));
1311 /* Even if it was stopped, the child may have already disappeared.
1312 E.g., if it was killed by SIGKILL. */
1313 if (res
< 0 && errno
!= ECHILD
)
1314 perror_with_name ("kill_wait_lwp");
1317 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1318 except the leader. */
1321 kill_one_lwp_callback (thread_info
*thread
, int pid
)
1323 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1325 /* We avoid killing the first thread here, because of a Linux kernel (at
1326 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1327 the children get a chance to be reaped, it will remain a zombie
1330 if (lwpid_of (thread
) == pid
)
1333 debug_printf ("lkop: is last of process %s\n",
1334 target_pid_to_str (thread
->id
));
1338 kill_wait_lwp (lwp
);
1342 linux_process_target::kill (process_info
*process
)
1344 int pid
= process
->pid
;
1346 /* If we're killing a running inferior, make sure it is stopped
1347 first, as PTRACE_KILL will not work otherwise. */
1348 stop_all_lwps (0, NULL
);
1350 for_each_thread (pid
, [&] (thread_info
*thread
)
1352 kill_one_lwp_callback (thread
, pid
);
1355 /* See the comment in linux_kill_one_lwp. We did not kill the first
1356 thread in the list, so do so now. */
1357 lwp_info
*lwp
= find_lwp_pid (ptid_t (pid
));
1362 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1366 kill_wait_lwp (lwp
);
1370 /* Since we presently can only stop all lwps of all processes, we
1371 need to unstop lwps of other processes. */
1372 unstop_all_lwps (0, NULL
);
1376 /* Get pending signal of THREAD, for detaching purposes. This is the
1377 signal the thread last stopped for, which we need to deliver to the
1378 thread when detaching, otherwise, it'd be suppressed/lost. */
1381 get_detach_signal (struct thread_info
*thread
)
1383 client_state
&cs
= get_client_state ();
1384 enum gdb_signal signo
= GDB_SIGNAL_0
;
1386 struct lwp_info
*lp
= get_thread_lwp (thread
);
1388 if (lp
->status_pending_p
)
1389 status
= lp
->status_pending
;
1392 /* If the thread had been suspended by gdbserver, and it stopped
1393 cleanly, then it'll have stopped with SIGSTOP. But we don't
1394 want to deliver that SIGSTOP. */
1395 if (thread
->last_status
.kind
!= TARGET_WAITKIND_STOPPED
1396 || thread
->last_status
.value
.sig
== GDB_SIGNAL_0
)
1399 /* Otherwise, we may need to deliver the signal we
1401 status
= lp
->last_status
;
1404 if (!WIFSTOPPED (status
))
1407 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1408 target_pid_to_str (ptid_of (thread
)));
1412 /* Extended wait statuses aren't real SIGTRAPs. */
1413 if (WSTOPSIG (status
) == SIGTRAP
&& linux_is_extended_waitstatus (status
))
1416 debug_printf ("GPS: lwp %s had stopped with extended "
1417 "status: no pending signal\n",
1418 target_pid_to_str (ptid_of (thread
)));
1422 signo
= gdb_signal_from_host (WSTOPSIG (status
));
1424 if (cs
.program_signals_p
&& !cs
.program_signals
[signo
])
1427 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1428 target_pid_to_str (ptid_of (thread
)),
1429 gdb_signal_to_string (signo
));
1432 else if (!cs
.program_signals_p
1433 /* If we have no way to know which signals GDB does not
1434 want to have passed to the program, assume
1435 SIGTRAP/SIGINT, which is GDB's default. */
1436 && (signo
== GDB_SIGNAL_TRAP
|| signo
== GDB_SIGNAL_INT
))
1439 debug_printf ("GPS: lwp %s had signal %s, "
1440 "but we don't know if we should pass it. "
1441 "Default to not.\n",
1442 target_pid_to_str (ptid_of (thread
)),
1443 gdb_signal_to_string (signo
));
1449 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1450 target_pid_to_str (ptid_of (thread
)),
1451 gdb_signal_to_string (signo
));
1453 return WSTOPSIG (status
);
1458 linux_process_target::detach_one_lwp (lwp_info
*lwp
)
1460 struct thread_info
*thread
= get_lwp_thread (lwp
);
1464 /* If there is a pending SIGSTOP, get rid of it. */
1465 if (lwp
->stop_expected
)
1468 debug_printf ("Sending SIGCONT to %s\n",
1469 target_pid_to_str (ptid_of (thread
)));
1471 kill_lwp (lwpid_of (thread
), SIGCONT
);
1472 lwp
->stop_expected
= 0;
1475 /* Pass on any pending signal for this thread. */
1476 sig
= get_detach_signal (thread
);
1478 /* Preparing to resume may try to write registers, and fail if the
1479 lwp is zombie. If that happens, ignore the error. We'll handle
1480 it below, when detach fails with ESRCH. */
1483 /* Flush any pending changes to the process's registers. */
1484 regcache_invalidate_thread (thread
);
1486 /* Finally, let it resume. */
1487 low_prepare_to_resume (lwp
);
1489 catch (const gdb_exception_error
&ex
)
1491 if (!check_ptrace_stopped_lwp_gone (lwp
))
1495 lwpid
= lwpid_of (thread
);
1496 if (ptrace (PTRACE_DETACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0,
1497 (PTRACE_TYPE_ARG4
) (long) sig
) < 0)
1499 int save_errno
= errno
;
1501 /* We know the thread exists, so ESRCH must mean the lwp is
1502 zombie. This can happen if one of the already-detached
1503 threads exits the whole thread group. In that case we're
1504 still attached, and must reap the lwp. */
1505 if (save_errno
== ESRCH
)
1509 ret
= my_waitpid (lwpid
, &status
, __WALL
);
1512 warning (_("Couldn't reap LWP %d while detaching: %s"),
1513 lwpid
, safe_strerror (errno
));
1515 else if (!WIFEXITED (status
) && !WIFSIGNALED (status
))
1517 warning (_("Reaping LWP %d while detaching "
1518 "returned unexpected status 0x%x"),
1524 error (_("Can't detach %s: %s"),
1525 target_pid_to_str (ptid_of (thread
)),
1526 safe_strerror (save_errno
));
1529 else if (debug_threads
)
1531 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1532 target_pid_to_str (ptid_of (thread
)),
1540 linux_process_target::detach (process_info
*process
)
1542 struct lwp_info
*main_lwp
;
1544 /* As there's a step over already in progress, let it finish first,
1545 otherwise nesting a stabilize_threads operation on top gets real
1547 complete_ongoing_step_over ();
1549 /* Stop all threads before detaching. First, ptrace requires that
1550 the thread is stopped to successfully detach. Second, thread_db
1551 may need to uninstall thread event breakpoints from memory, which
1552 only works with a stopped process anyway. */
1553 stop_all_lwps (0, NULL
);
1555 #ifdef USE_THREAD_DB
1556 thread_db_detach (process
);
1559 /* Stabilize threads (move out of jump pads). */
1560 target_stabilize_threads ();
1562 /* Detach from the clone lwps first. If the thread group exits just
1563 while we're detaching, we must reap the clone lwps before we're
1564 able to reap the leader. */
1565 for_each_thread (process
->pid
, [this] (thread_info
*thread
)
1567 /* We don't actually detach from the thread group leader just yet.
1568 If the thread group exits, we must reap the zombie clone lwps
1569 before we're able to reap the leader. */
1570 if (thread
->id
.pid () == thread
->id
.lwp ())
1573 lwp_info
*lwp
= get_thread_lwp (thread
);
1574 detach_one_lwp (lwp
);
1577 main_lwp
= find_lwp_pid (ptid_t (process
->pid
));
1578 detach_one_lwp (main_lwp
);
1582 /* Since we presently can only stop all lwps of all processes, we
1583 need to unstop lwps of other processes. */
1584 unstop_all_lwps (0, NULL
);
1588 /* Remove all LWPs that belong to process PROC from the lwp list. */
1591 linux_process_target::mourn (process_info
*process
)
1593 struct process_info_private
*priv
;
1595 #ifdef USE_THREAD_DB
1596 thread_db_mourn (process
);
1599 for_each_thread (process
->pid
, [this] (thread_info
*thread
)
1601 delete_lwp (get_thread_lwp (thread
));
1604 /* Freeing all private data. */
1605 priv
= process
->priv
;
1606 low_delete_process (priv
->arch_private
);
1608 process
->priv
= NULL
;
1610 remove_process (process
);
1614 linux_process_target::join (int pid
)
1619 ret
= my_waitpid (pid
, &status
, 0);
1620 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1622 } while (ret
!= -1 || errno
!= ECHILD
);
1625 /* Return true if the given thread is still alive. */
1628 linux_process_target::thread_alive (ptid_t ptid
)
1630 struct lwp_info
*lwp
= find_lwp_pid (ptid
);
1632 /* We assume we always know if a thread exits. If a whole process
1633 exited but we still haven't been able to report it to GDB, we'll
1634 hold on to the last lwp of the dead process. */
1636 return !lwp_is_marked_dead (lwp
);
1642 linux_process_target::thread_still_has_status_pending (thread_info
*thread
)
1644 struct lwp_info
*lp
= get_thread_lwp (thread
);
1646 if (!lp
->status_pending_p
)
1649 if (thread
->last_resume_kind
!= resume_stop
1650 && (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1651 || lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
))
1653 struct thread_info
*saved_thread
;
1657 gdb_assert (lp
->last_status
!= 0);
1661 saved_thread
= current_thread
;
1662 current_thread
= thread
;
1664 if (pc
!= lp
->stop_pc
)
1667 debug_printf ("PC of %ld changed\n",
1672 #if !USE_SIGTRAP_SIGINFO
1673 else if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1674 && !low_breakpoint_at (pc
))
1677 debug_printf ("previous SW breakpoint of %ld gone\n",
1681 else if (lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
1682 && !hardware_breakpoint_inserted_here (pc
))
1685 debug_printf ("previous HW breakpoint of %ld gone\n",
1691 current_thread
= saved_thread
;
1696 debug_printf ("discarding pending breakpoint status\n");
1697 lp
->status_pending_p
= 0;
1705 /* Returns true if LWP is resumed from the client's perspective. */
1708 lwp_resumed (struct lwp_info
*lwp
)
1710 struct thread_info
*thread
= get_lwp_thread (lwp
);
1712 if (thread
->last_resume_kind
!= resume_stop
)
1715 /* Did gdb send us a `vCont;t', but we haven't reported the
1716 corresponding stop to gdb yet? If so, the thread is still
1717 resumed/running from gdb's perspective. */
1718 if (thread
->last_resume_kind
== resume_stop
1719 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
)
1726 linux_process_target::status_pending_p_callback (thread_info
*thread
,
1729 struct lwp_info
*lp
= get_thread_lwp (thread
);
1731 /* Check if we're only interested in events from a specific process
1732 or a specific LWP. */
1733 if (!thread
->id
.matches (ptid
))
1736 if (!lwp_resumed (lp
))
1739 if (lp
->status_pending_p
1740 && !thread_still_has_status_pending (thread
))
1742 resume_one_lwp (lp
, lp
->stepping
, GDB_SIGNAL_0
, NULL
);
1746 return lp
->status_pending_p
;
1750 find_lwp_pid (ptid_t ptid
)
1752 thread_info
*thread
= find_thread ([&] (thread_info
*thr_arg
)
1754 int lwp
= ptid
.lwp () != 0 ? ptid
.lwp () : ptid
.pid ();
1755 return thr_arg
->id
.lwp () == lwp
;
1761 return get_thread_lwp (thread
);
1764 /* Return the number of known LWPs in the tgid given by PID. */
1771 for_each_thread (pid
, [&] (thread_info
*thread
)
1779 /* See nat/linux-nat.h. */
1782 iterate_over_lwps (ptid_t filter
,
1783 gdb::function_view
<iterate_over_lwps_ftype
> callback
)
1785 thread_info
*thread
= find_thread (filter
, [&] (thread_info
*thr_arg
)
1787 lwp_info
*lwp
= get_thread_lwp (thr_arg
);
1789 return callback (lwp
);
1795 return get_thread_lwp (thread
);
1799 linux_process_target::check_zombie_leaders ()
1801 for_each_process ([this] (process_info
*proc
) {
1802 pid_t leader_pid
= pid_of (proc
);
1803 struct lwp_info
*leader_lp
;
1805 leader_lp
= find_lwp_pid (ptid_t (leader_pid
));
1808 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1809 "num_lwps=%d, zombie=%d\n",
1810 leader_pid
, leader_lp
!= NULL
, num_lwps (leader_pid
),
1811 linux_proc_pid_is_zombie (leader_pid
));
1813 if (leader_lp
!= NULL
&& !leader_lp
->stopped
1814 /* Check if there are other threads in the group, as we may
1815 have raced with the inferior simply exiting. */
1816 && !last_thread_of_process_p (leader_pid
)
1817 && linux_proc_pid_is_zombie (leader_pid
))
1819 /* A leader zombie can mean one of two things:
1821 - It exited, and there's an exit status pending
1822 available, or only the leader exited (not the whole
1823 program). In the latter case, we can't waitpid the
1824 leader's exit status until all other threads are gone.
1826 - There are 3 or more threads in the group, and a thread
1827 other than the leader exec'd. On an exec, the Linux
1828 kernel destroys all other threads (except the execing
1829 one) in the thread group, and resets the execing thread's
1830 tid to the tgid. No exit notification is sent for the
1831 execing thread -- from the ptracer's perspective, it
1832 appears as though the execing thread just vanishes.
1833 Until we reap all other threads except the leader and the
1834 execing thread, the leader will be zombie, and the
1835 execing thread will be in `D (disc sleep)'. As soon as
1836 all other threads are reaped, the execing thread changes
1837 it's tid to the tgid, and the previous (zombie) leader
1838 vanishes, giving place to the "new" leader. We could try
1839 distinguishing the exit and exec cases, by waiting once
1840 more, and seeing if something comes out, but it doesn't
1841 sound useful. The previous leader _does_ go away, and
1842 we'll re-add the new one once we see the exec event
1843 (which is just the same as what would happen if the
1844 previous leader did exit voluntarily before some other
1848 debug_printf ("CZL: Thread group leader %d zombie "
1849 "(it exited, or another thread execd).\n",
1852 delete_lwp (leader_lp
);
1857 /* Callback for `find_thread'. Returns the first LWP that is not
1861 not_stopped_callback (thread_info
*thread
, ptid_t filter
)
1863 if (!thread
->id
.matches (filter
))
1866 lwp_info
*lwp
= get_thread_lwp (thread
);
1868 return !lwp
->stopped
;
1871 /* Increment LWP's suspend count. */
1874 lwp_suspended_inc (struct lwp_info
*lwp
)
1878 if (debug_threads
&& lwp
->suspended
> 4)
1880 struct thread_info
*thread
= get_lwp_thread (lwp
);
1882 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1883 " suspended=%d\n", lwpid_of (thread
), lwp
->suspended
);
1887 /* Decrement LWP's suspend count. */
1890 lwp_suspended_decr (struct lwp_info
*lwp
)
1894 if (lwp
->suspended
< 0)
1896 struct thread_info
*thread
= get_lwp_thread (lwp
);
1898 internal_error (__FILE__
, __LINE__
,
1899 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread
),
1904 /* This function should only be called if the LWP got a SIGTRAP.
1906 Handle any tracepoint steps or hits. Return true if a tracepoint
1907 event was handled, 0 otherwise. */
1910 handle_tracepoints (struct lwp_info
*lwp
)
1912 struct thread_info
*tinfo
= get_lwp_thread (lwp
);
1913 int tpoint_related_event
= 0;
1915 gdb_assert (lwp
->suspended
== 0);
1917 /* If this tracepoint hit causes a tracing stop, we'll immediately
1918 uninsert tracepoints. To do this, we temporarily pause all
1919 threads, unpatch away, and then unpause threads. We need to make
1920 sure the unpausing doesn't resume LWP too. */
1921 lwp_suspended_inc (lwp
);
1923 /* And we need to be sure that any all-threads-stopping doesn't try
1924 to move threads out of the jump pads, as it could deadlock the
1925 inferior (LWP could be in the jump pad, maybe even holding the
1928 /* Do any necessary step collect actions. */
1929 tpoint_related_event
|= tracepoint_finished_step (tinfo
, lwp
->stop_pc
);
1931 tpoint_related_event
|= handle_tracepoint_bkpts (tinfo
, lwp
->stop_pc
);
1933 /* See if we just hit a tracepoint and do its main collect
1935 tpoint_related_event
|= tracepoint_was_hit (tinfo
, lwp
->stop_pc
);
1937 lwp_suspended_decr (lwp
);
1939 gdb_assert (lwp
->suspended
== 0);
1940 gdb_assert (!stabilizing_threads
1941 || (lwp
->collecting_fast_tracepoint
1942 != fast_tpoint_collect_result::not_collecting
));
1944 if (tpoint_related_event
)
1947 debug_printf ("got a tracepoint event\n");
1954 fast_tpoint_collect_result
1955 linux_process_target::linux_fast_tracepoint_collecting
1956 (lwp_info
*lwp
, fast_tpoint_collect_status
*status
)
1958 CORE_ADDR thread_area
;
1959 struct thread_info
*thread
= get_lwp_thread (lwp
);
1961 /* Get the thread area address. This is used to recognize which
1962 thread is which when tracing with the in-process agent library.
1963 We don't read anything from the address, and treat it as opaque;
1964 it's the address itself that we assume is unique per-thread. */
1965 if (low_get_thread_area (lwpid_of (thread
), &thread_area
) == -1)
1966 return fast_tpoint_collect_result::not_collecting
;
1968 return fast_tracepoint_collecting (thread_area
, lwp
->stop_pc
, status
);
1972 linux_process_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
1978 linux_process_target::maybe_move_out_of_jump_pad (lwp_info
*lwp
, int *wstat
)
1980 struct thread_info
*saved_thread
;
1982 saved_thread
= current_thread
;
1983 current_thread
= get_lwp_thread (lwp
);
1986 || (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) != SIGTRAP
))
1987 && supports_fast_tracepoints ()
1988 && agent_loaded_p ())
1990 struct fast_tpoint_collect_status status
;
1993 debug_printf ("Checking whether LWP %ld needs to move out of the "
1995 lwpid_of (current_thread
));
1997 fast_tpoint_collect_result r
1998 = linux_fast_tracepoint_collecting (lwp
, &status
);
2001 || (WSTOPSIG (*wstat
) != SIGILL
2002 && WSTOPSIG (*wstat
) != SIGFPE
2003 && WSTOPSIG (*wstat
) != SIGSEGV
2004 && WSTOPSIG (*wstat
) != SIGBUS
))
2006 lwp
->collecting_fast_tracepoint
= r
;
2008 if (r
!= fast_tpoint_collect_result::not_collecting
)
2010 if (r
== fast_tpoint_collect_result::before_insn
2011 && lwp
->exit_jump_pad_bkpt
== NULL
)
2013 /* Haven't executed the original instruction yet.
2014 Set breakpoint there, and wait till it's hit,
2015 then single-step until exiting the jump pad. */
2016 lwp
->exit_jump_pad_bkpt
2017 = set_breakpoint_at (status
.adjusted_insn_addr
, NULL
);
2021 debug_printf ("Checking whether LWP %ld needs to move out of "
2022 "the jump pad...it does\n",
2023 lwpid_of (current_thread
));
2024 current_thread
= saved_thread
;
2031 /* If we get a synchronous signal while collecting, *and*
2032 while executing the (relocated) original instruction,
2033 reset the PC to point at the tpoint address, before
2034 reporting to GDB. Otherwise, it's an IPA lib bug: just
2035 report the signal to GDB, and pray for the best. */
2037 lwp
->collecting_fast_tracepoint
2038 = fast_tpoint_collect_result::not_collecting
;
2040 if (r
!= fast_tpoint_collect_result::not_collecting
2041 && (status
.adjusted_insn_addr
<= lwp
->stop_pc
2042 && lwp
->stop_pc
< status
.adjusted_insn_addr_end
))
2045 struct regcache
*regcache
;
2047 /* The si_addr on a few signals references the address
2048 of the faulting instruction. Adjust that as
2050 if ((WSTOPSIG (*wstat
) == SIGILL
2051 || WSTOPSIG (*wstat
) == SIGFPE
2052 || WSTOPSIG (*wstat
) == SIGBUS
2053 || WSTOPSIG (*wstat
) == SIGSEGV
)
2054 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
2055 (PTRACE_TYPE_ARG3
) 0, &info
) == 0
2056 /* Final check just to make sure we don't clobber
2057 the siginfo of non-kernel-sent signals. */
2058 && (uintptr_t) info
.si_addr
== lwp
->stop_pc
)
2060 info
.si_addr
= (void *) (uintptr_t) status
.tpoint_addr
;
2061 ptrace (PTRACE_SETSIGINFO
, lwpid_of (current_thread
),
2062 (PTRACE_TYPE_ARG3
) 0, &info
);
2065 regcache
= get_thread_regcache (current_thread
, 1);
2066 low_set_pc (regcache
, status
.tpoint_addr
);
2067 lwp
->stop_pc
= status
.tpoint_addr
;
2069 /* Cancel any fast tracepoint lock this thread was
2071 force_unlock_trace_buffer ();
2074 if (lwp
->exit_jump_pad_bkpt
!= NULL
)
2077 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2078 "stopping all threads momentarily.\n");
2080 stop_all_lwps (1, lwp
);
2082 delete_breakpoint (lwp
->exit_jump_pad_bkpt
);
2083 lwp
->exit_jump_pad_bkpt
= NULL
;
2085 unstop_all_lwps (1, lwp
);
2087 gdb_assert (lwp
->suspended
>= 0);
2093 debug_printf ("Checking whether LWP %ld needs to move out of the "
2095 lwpid_of (current_thread
));
2097 current_thread
= saved_thread
;
2101 /* Enqueue one signal in the "signals to report later when out of the
2105 enqueue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
2107 struct thread_info
*thread
= get_lwp_thread (lwp
);
2110 debug_printf ("Deferring signal %d for LWP %ld.\n",
2111 WSTOPSIG (*wstat
), lwpid_of (thread
));
2115 for (const auto &sig
: lwp
->pending_signals_to_report
)
2116 debug_printf (" Already queued %d\n",
2119 debug_printf (" (no more currently queued signals)\n");
2122 /* Don't enqueue non-RT signals if they are already in the deferred
2123 queue. (SIGSTOP being the easiest signal to see ending up here
2125 if (WSTOPSIG (*wstat
) < __SIGRTMIN
)
2127 for (const auto &sig
: lwp
->pending_signals_to_report
)
2129 if (sig
.signal
== WSTOPSIG (*wstat
))
2132 debug_printf ("Not requeuing already queued non-RT signal %d"
2141 lwp
->pending_signals_to_report
.emplace_back (WSTOPSIG (*wstat
));
2143 ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
2144 &lwp
->pending_signals_to_report
.back ().info
);
2147 /* Dequeue one signal from the "signals to report later when out of
2148 the jump pad" list. */
2151 dequeue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
2153 struct thread_info
*thread
= get_lwp_thread (lwp
);
2155 if (!lwp
->pending_signals_to_report
.empty ())
2157 const pending_signal
&p_sig
= lwp
->pending_signals_to_report
.front ();
2159 *wstat
= W_STOPCODE (p_sig
.signal
);
2160 if (p_sig
.info
.si_signo
!= 0)
2161 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
2164 lwp
->pending_signals_to_report
.pop_front ();
2167 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2168 WSTOPSIG (*wstat
), lwpid_of (thread
));
2172 for (const auto &sig
: lwp
->pending_signals_to_report
)
2173 debug_printf (" Still queued %d\n",
2176 debug_printf (" (no more queued signals)\n");
2186 linux_process_target::check_stopped_by_watchpoint (lwp_info
*child
)
2188 struct thread_info
*saved_thread
= current_thread
;
2189 current_thread
= get_lwp_thread (child
);
2191 if (low_stopped_by_watchpoint ())
2193 child
->stop_reason
= TARGET_STOPPED_BY_WATCHPOINT
;
2194 child
->stopped_data_address
= low_stopped_data_address ();
2197 current_thread
= saved_thread
;
2199 return child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
2203 linux_process_target::low_stopped_by_watchpoint ()
2209 linux_process_target::low_stopped_data_address ()
2214 /* Return the ptrace options that we want to try to enable. */
2217 linux_low_ptrace_options (int attached
)
2219 client_state
&cs
= get_client_state ();
2223 options
|= PTRACE_O_EXITKILL
;
2225 if (cs
.report_fork_events
)
2226 options
|= PTRACE_O_TRACEFORK
;
2228 if (cs
.report_vfork_events
)
2229 options
|= (PTRACE_O_TRACEVFORK
| PTRACE_O_TRACEVFORKDONE
);
2231 if (cs
.report_exec_events
)
2232 options
|= PTRACE_O_TRACEEXEC
;
2234 options
|= PTRACE_O_TRACESYSGOOD
;
2240 linux_process_target::filter_event (int lwpid
, int wstat
)
2242 client_state
&cs
= get_client_state ();
2243 struct lwp_info
*child
;
2244 struct thread_info
*thread
;
2245 int have_stop_pc
= 0;
2247 child
= find_lwp_pid (ptid_t (lwpid
));
2249 /* Check for stop events reported by a process we didn't already
2250 know about - anything not already in our LWP list.
2252 If we're expecting to receive stopped processes after
2253 fork, vfork, and clone events, then we'll just add the
2254 new one to our list and go back to waiting for the event
2255 to be reported - the stopped process might be returned
2256 from waitpid before or after the event is.
2258 But note the case of a non-leader thread exec'ing after the
2259 leader having exited, and gone from our lists (because
2260 check_zombie_leaders deleted it). The non-leader thread
2261 changes its tid to the tgid. */
2263 if (WIFSTOPPED (wstat
) && child
== NULL
&& WSTOPSIG (wstat
) == SIGTRAP
2264 && linux_ptrace_get_extended_event (wstat
) == PTRACE_EVENT_EXEC
)
2268 /* A multi-thread exec after we had seen the leader exiting. */
2271 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2272 "after exec.\n", lwpid
);
2275 child_ptid
= ptid_t (lwpid
, lwpid
);
2276 child
= add_lwp (child_ptid
);
2278 current_thread
= child
->thread
;
2281 /* If we didn't find a process, one of two things presumably happened:
2282 - A process we started and then detached from has exited. Ignore it.
2283 - A process we are controlling has forked and the new child's stop
2284 was reported to us by the kernel. Save its PID. */
2285 if (child
== NULL
&& WIFSTOPPED (wstat
))
2287 add_to_pid_list (&stopped_pids
, lwpid
, wstat
);
2290 else if (child
== NULL
)
2293 thread
= get_lwp_thread (child
);
2297 child
->last_status
= wstat
;
2299 /* Check if the thread has exited. */
2300 if ((WIFEXITED (wstat
) || WIFSIGNALED (wstat
)))
2303 debug_printf ("LLFE: %d exited.\n", lwpid
);
2305 if (finish_step_over (child
))
2307 /* Unsuspend all other LWPs, and set them back running again. */
2308 unsuspend_all_lwps (child
);
2311 /* If there is at least one more LWP, then the exit signal was
2312 not the end of the debugged application and should be
2313 ignored, unless GDB wants to hear about thread exits. */
2314 if (cs
.report_thread_events
2315 || last_thread_of_process_p (pid_of (thread
)))
2317 /* Since events are serialized to GDB core, and we can't
2318 report this one right now. Leave the status pending for
2319 the next time we're able to report it. */
2320 mark_lwp_dead (child
, wstat
);
2330 gdb_assert (WIFSTOPPED (wstat
));
2332 if (WIFSTOPPED (wstat
))
2334 struct process_info
*proc
;
2336 /* Architecture-specific setup after inferior is running. */
2337 proc
= find_process_pid (pid_of (thread
));
2338 if (proc
->tdesc
== NULL
)
2342 /* This needs to happen after we have attached to the
2343 inferior and it is stopped for the first time, but
2344 before we access any inferior registers. */
2345 arch_setup_thread (thread
);
2349 /* The process is started, but GDBserver will do
2350 architecture-specific setup after the program stops at
2351 the first instruction. */
2352 child
->status_pending_p
= 1;
2353 child
->status_pending
= wstat
;
2359 if (WIFSTOPPED (wstat
) && child
->must_set_ptrace_flags
)
2361 struct process_info
*proc
= find_process_pid (pid_of (thread
));
2362 int options
= linux_low_ptrace_options (proc
->attached
);
2364 linux_enable_event_reporting (lwpid
, options
);
2365 child
->must_set_ptrace_flags
= 0;
2368 /* Always update syscall_state, even if it will be filtered later. */
2369 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SYSCALL_SIGTRAP
)
2371 child
->syscall_state
2372 = (child
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
2373 ? TARGET_WAITKIND_SYSCALL_RETURN
2374 : TARGET_WAITKIND_SYSCALL_ENTRY
);
2378 /* Almost all other ptrace-stops are known to be outside of system
2379 calls, with further exceptions in handle_extended_wait. */
2380 child
->syscall_state
= TARGET_WAITKIND_IGNORE
;
2383 /* Be careful to not overwrite stop_pc until save_stop_reason is
2385 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGTRAP
2386 && linux_is_extended_waitstatus (wstat
))
2388 child
->stop_pc
= get_pc (child
);
2389 if (handle_extended_wait (&child
, wstat
))
2391 /* The event has been handled, so just return without
2397 if (linux_wstatus_maybe_breakpoint (wstat
))
2399 if (save_stop_reason (child
))
2404 child
->stop_pc
= get_pc (child
);
2406 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGSTOP
2407 && child
->stop_expected
)
2410 debug_printf ("Expected stop.\n");
2411 child
->stop_expected
= 0;
2413 if (thread
->last_resume_kind
== resume_stop
)
2415 /* We want to report the stop to the core. Treat the
2416 SIGSTOP as a normal event. */
2418 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2419 target_pid_to_str (ptid_of (thread
)));
2421 else if (stopping_threads
!= NOT_STOPPING_THREADS
)
2423 /* Stopping threads. We don't want this SIGSTOP to end up
2426 debug_printf ("LLW: SIGSTOP caught for %s "
2427 "while stopping threads.\n",
2428 target_pid_to_str (ptid_of (thread
)));
2433 /* This is a delayed SIGSTOP. Filter out the event. */
2435 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2436 child
->stepping
? "step" : "continue",
2437 target_pid_to_str (ptid_of (thread
)));
2439 resume_one_lwp (child
, child
->stepping
, 0, NULL
);
2444 child
->status_pending_p
= 1;
2445 child
->status_pending
= wstat
;
2450 linux_process_target::maybe_hw_step (thread_info
*thread
)
2452 if (supports_hardware_single_step ())
2456 /* GDBserver must insert single-step breakpoint for software
2458 gdb_assert (has_single_step_breakpoints (thread
));
2464 linux_process_target::resume_stopped_resumed_lwps (thread_info
*thread
)
2466 struct lwp_info
*lp
= get_thread_lwp (thread
);
2470 && !lp
->status_pending_p
2471 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
)
2475 if (thread
->last_resume_kind
== resume_step
)
2476 step
= maybe_hw_step (thread
);
2479 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2480 target_pid_to_str (ptid_of (thread
)),
2481 paddress (lp
->stop_pc
),
2484 resume_one_lwp (lp
, step
, GDB_SIGNAL_0
, NULL
);
2489 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid
,
2491 int *wstatp
, int options
)
2493 struct thread_info
*event_thread
;
2494 struct lwp_info
*event_child
, *requested_child
;
2495 sigset_t block_mask
, prev_mask
;
2498 /* N.B. event_thread points to the thread_info struct that contains
2499 event_child. Keep them in sync. */
2500 event_thread
= NULL
;
2502 requested_child
= NULL
;
2504 /* Check for a lwp with a pending status. */
2506 if (filter_ptid
== minus_one_ptid
|| filter_ptid
.is_pid ())
2508 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2510 return status_pending_p_callback (thread
, filter_ptid
);
2513 if (event_thread
!= NULL
)
2514 event_child
= get_thread_lwp (event_thread
);
2515 if (debug_threads
&& event_thread
)
2516 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread
));
2518 else if (filter_ptid
!= null_ptid
)
2520 requested_child
= find_lwp_pid (filter_ptid
);
2522 if (stopping_threads
== NOT_STOPPING_THREADS
2523 && requested_child
->status_pending_p
2524 && (requested_child
->collecting_fast_tracepoint
2525 != fast_tpoint_collect_result::not_collecting
))
2527 enqueue_one_deferred_signal (requested_child
,
2528 &requested_child
->status_pending
);
2529 requested_child
->status_pending_p
= 0;
2530 requested_child
->status_pending
= 0;
2531 resume_one_lwp (requested_child
, 0, 0, NULL
);
2534 if (requested_child
->suspended
2535 && requested_child
->status_pending_p
)
2537 internal_error (__FILE__
, __LINE__
,
2538 "requesting an event out of a"
2539 " suspended child?");
2542 if (requested_child
->status_pending_p
)
2544 event_child
= requested_child
;
2545 event_thread
= get_lwp_thread (event_child
);
2549 if (event_child
!= NULL
)
2552 debug_printf ("Got an event from pending child %ld (%04x)\n",
2553 lwpid_of (event_thread
), event_child
->status_pending
);
2554 *wstatp
= event_child
->status_pending
;
2555 event_child
->status_pending_p
= 0;
2556 event_child
->status_pending
= 0;
2557 current_thread
= event_thread
;
2558 return lwpid_of (event_thread
);
2561 /* But if we don't find a pending event, we'll have to wait.
2563 We only enter this loop if no process has a pending wait status.
2564 Thus any action taken in response to a wait status inside this
2565 loop is responding as soon as we detect the status, not after any
2568 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2569 all signals while here. */
2570 sigfillset (&block_mask
);
2571 gdb_sigmask (SIG_BLOCK
, &block_mask
, &prev_mask
);
2573 /* Always pull all events out of the kernel. We'll randomly select
2574 an event LWP out of all that have events, to prevent
2576 while (event_child
== NULL
)
2580 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2583 - If the thread group leader exits while other threads in the
2584 thread group still exist, waitpid(TGID, ...) hangs. That
2585 waitpid won't return an exit status until the other threads
2586 in the group are reaped.
2588 - When a non-leader thread execs, that thread just vanishes
2589 without reporting an exit (so we'd hang if we waited for it
2590 explicitly in that case). The exec event is reported to
2593 ret
= my_waitpid (-1, wstatp
, options
| WNOHANG
);
2596 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2597 ret
, errno
? safe_strerror (errno
) : "ERRNO-OK");
2603 debug_printf ("LLW: waitpid %ld received %s\n",
2604 (long) ret
, status_to_str (*wstatp
).c_str ());
2607 /* Filter all events. IOW, leave all events pending. We'll
2608 randomly select an event LWP out of all that have events
2610 filter_event (ret
, *wstatp
);
2611 /* Retry until nothing comes out of waitpid. A single
2612 SIGCHLD can indicate more than one child stopped. */
2616 /* Now that we've pulled all events out of the kernel, resume
2617 LWPs that don't have an interesting event to report. */
2618 if (stopping_threads
== NOT_STOPPING_THREADS
)
2619 for_each_thread ([this] (thread_info
*thread
)
2621 resume_stopped_resumed_lwps (thread
);
2624 /* ... and find an LWP with a status to report to the core, if
2626 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2628 return status_pending_p_callback (thread
, filter_ptid
);
2631 if (event_thread
!= NULL
)
2633 event_child
= get_thread_lwp (event_thread
);
2634 *wstatp
= event_child
->status_pending
;
2635 event_child
->status_pending_p
= 0;
2636 event_child
->status_pending
= 0;
2640 /* Check for zombie thread group leaders. Those can't be reaped
2641 until all other threads in the thread group are. */
2642 check_zombie_leaders ();
2644 auto not_stopped
= [&] (thread_info
*thread
)
2646 return not_stopped_callback (thread
, wait_ptid
);
2649 /* If there are no resumed children left in the set of LWPs we
2650 want to wait for, bail. We can't just block in
2651 waitpid/sigsuspend, because lwps might have been left stopped
2652 in trace-stop state, and we'd be stuck forever waiting for
2653 their status to change (which would only happen if we resumed
2654 them). Even if WNOHANG is set, this return code is preferred
2655 over 0 (below), as it is more detailed. */
2656 if (find_thread (not_stopped
) == NULL
)
2659 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2660 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2664 /* No interesting event to report to the caller. */
2665 if ((options
& WNOHANG
))
2668 debug_printf ("WNOHANG set, no event found\n");
2670 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2674 /* Block until we get an event reported with SIGCHLD. */
2676 debug_printf ("sigsuspend'ing\n");
2678 sigsuspend (&prev_mask
);
2679 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2683 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2685 current_thread
= event_thread
;
2687 return lwpid_of (event_thread
);
2691 linux_process_target::wait_for_event (ptid_t ptid
, int *wstatp
, int options
)
2693 return wait_for_event_filtered (ptid
, ptid
, wstatp
, options
);
2696 /* Select one LWP out of those that have events pending. */
2699 select_event_lwp (struct lwp_info
**orig_lp
)
2701 struct thread_info
*event_thread
= NULL
;
2703 /* In all-stop, give preference to the LWP that is being
2704 single-stepped. There will be at most one, and it's the LWP that
2705 the core is most interested in. If we didn't do this, then we'd
2706 have to handle pending step SIGTRAPs somehow in case the core
2707 later continues the previously-stepped thread, otherwise we'd
2708 report the pending SIGTRAP, and the core, not having stepped the
2709 thread, wouldn't understand what the trap was for, and therefore
2710 would report it to the user as a random signal. */
2713 event_thread
= find_thread ([] (thread_info
*thread
)
2715 lwp_info
*lp
= get_thread_lwp (thread
);
2717 return (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2718 && thread
->last_resume_kind
== resume_step
2719 && lp
->status_pending_p
);
2722 if (event_thread
!= NULL
)
2725 debug_printf ("SEL: Select single-step %s\n",
2726 target_pid_to_str (ptid_of (event_thread
)));
2729 if (event_thread
== NULL
)
2731 /* No single-stepping LWP. Select one at random, out of those
2732 which have had events. */
2734 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2736 lwp_info
*lp
= get_thread_lwp (thread
);
2738 /* Only resumed LWPs that have an event pending. */
2739 return (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2740 && lp
->status_pending_p
);
2744 if (event_thread
!= NULL
)
2746 struct lwp_info
*event_lp
= get_thread_lwp (event_thread
);
2748 /* Switch the event LWP. */
2749 *orig_lp
= event_lp
;
2753 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2757 unsuspend_all_lwps (struct lwp_info
*except
)
2759 for_each_thread ([&] (thread_info
*thread
)
2761 lwp_info
*lwp
= get_thread_lwp (thread
);
2764 lwp_suspended_decr (lwp
);
2768 static bool lwp_running (thread_info
*thread
);
2770 /* Stabilize threads (move out of jump pads).
2772 If a thread is midway collecting a fast tracepoint, we need to
2773 finish the collection and move it out of the jump pad before
2774 reporting the signal.
2776 This avoids recursion while collecting (when a signal arrives
2777 midway, and the signal handler itself collects), which would trash
2778 the trace buffer. In case the user set a breakpoint in a signal
2779 handler, this avoids the backtrace showing the jump pad, etc..
2780 Most importantly, there are certain things we can't do safely if
2781 threads are stopped in a jump pad (or in its callee's). For
2784 - starting a new trace run. A thread still collecting the
2785 previous run, could trash the trace buffer when resumed. The trace
2786 buffer control structures would have been reset but the thread had
2787 no way to tell. The thread could even midway memcpy'ing to the
2788 buffer, which would mean that when resumed, it would clobber the
2789 trace buffer that had been set for a new run.
2791 - we can't rewrite/reuse the jump pads for new tracepoints
2792 safely. Say you do tstart while a thread is stopped midway while
2793 collecting. When the thread is later resumed, it finishes the
2794 collection, and returns to the jump pad, to execute the original
2795 instruction that was under the tracepoint jump at the time the
2796 older run had been started. If the jump pad had been rewritten
2797 since for something else in the new run, the thread would now
2798 execute the wrong / random instructions. */
2801 linux_process_target::stabilize_threads ()
2803 thread_info
*thread_stuck
= find_thread ([this] (thread_info
*thread
)
2805 return stuck_in_jump_pad (thread
);
2808 if (thread_stuck
!= NULL
)
2811 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2812 lwpid_of (thread_stuck
));
2816 thread_info
*saved_thread
= current_thread
;
2818 stabilizing_threads
= 1;
2821 for_each_thread ([this] (thread_info
*thread
)
2823 move_out_of_jump_pad (thread
);
2826 /* Loop until all are stopped out of the jump pads. */
2827 while (find_thread (lwp_running
) != NULL
)
2829 struct target_waitstatus ourstatus
;
2830 struct lwp_info
*lwp
;
2833 /* Note that we go through the full wait even loop. While
2834 moving threads out of jump pad, we need to be able to step
2835 over internal breakpoints and such. */
2836 wait_1 (minus_one_ptid
, &ourstatus
, 0);
2838 if (ourstatus
.kind
== TARGET_WAITKIND_STOPPED
)
2840 lwp
= get_thread_lwp (current_thread
);
2843 lwp_suspended_inc (lwp
);
2845 if (ourstatus
.value
.sig
!= GDB_SIGNAL_0
2846 || current_thread
->last_resume_kind
== resume_stop
)
2848 wstat
= W_STOPCODE (gdb_signal_to_host (ourstatus
.value
.sig
));
2849 enqueue_one_deferred_signal (lwp
, &wstat
);
2854 unsuspend_all_lwps (NULL
);
2856 stabilizing_threads
= 0;
2858 current_thread
= saved_thread
;
2862 thread_stuck
= find_thread ([this] (thread_info
*thread
)
2864 return stuck_in_jump_pad (thread
);
2867 if (thread_stuck
!= NULL
)
2868 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2869 lwpid_of (thread_stuck
));
2873 /* Convenience function that is called when the kernel reports an
2874 event that is not passed out to GDB. */
2877 ignore_event (struct target_waitstatus
*ourstatus
)
2879 /* If we got an event, there may still be others, as a single
2880 SIGCHLD can indicate more than one child stopped. This forces
2881 another target_wait call. */
2884 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2889 linux_process_target::filter_exit_event (lwp_info
*event_child
,
2890 target_waitstatus
*ourstatus
)
2892 client_state
&cs
= get_client_state ();
2893 struct thread_info
*thread
= get_lwp_thread (event_child
);
2894 ptid_t ptid
= ptid_of (thread
);
2896 if (!last_thread_of_process_p (pid_of (thread
)))
2898 if (cs
.report_thread_events
)
2899 ourstatus
->kind
= TARGET_WAITKIND_THREAD_EXITED
;
2901 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2903 delete_lwp (event_child
);
2908 /* Returns 1 if GDB is interested in any event_child syscalls. */
2911 gdb_catching_syscalls_p (struct lwp_info
*event_child
)
2913 struct thread_info
*thread
= get_lwp_thread (event_child
);
2914 struct process_info
*proc
= get_thread_process (thread
);
2916 return !proc
->syscalls_to_catch
.empty ();
2920 linux_process_target::gdb_catch_this_syscall (lwp_info
*event_child
)
2923 struct thread_info
*thread
= get_lwp_thread (event_child
);
2924 struct process_info
*proc
= get_thread_process (thread
);
2926 if (proc
->syscalls_to_catch
.empty ())
2929 if (proc
->syscalls_to_catch
[0] == ANY_SYSCALL
)
2932 get_syscall_trapinfo (event_child
, &sysno
);
2934 for (int iter
: proc
->syscalls_to_catch
)
2942 linux_process_target::wait_1 (ptid_t ptid
, target_waitstatus
*ourstatus
,
2943 target_wait_flags target_options
)
2945 client_state
&cs
= get_client_state ();
2947 struct lwp_info
*event_child
;
2950 int step_over_finished
;
2951 int bp_explains_trap
;
2952 int maybe_internal_trap
;
2961 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid
));
2964 /* Translate generic target options into linux options. */
2966 if (target_options
& TARGET_WNOHANG
)
2969 bp_explains_trap
= 0;
2972 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2974 auto status_pending_p_any
= [&] (thread_info
*thread
)
2976 return status_pending_p_callback (thread
, minus_one_ptid
);
2979 auto not_stopped
= [&] (thread_info
*thread
)
2981 return not_stopped_callback (thread
, minus_one_ptid
);
2984 /* Find a resumed LWP, if any. */
2985 if (find_thread (status_pending_p_any
) != NULL
)
2987 else if (find_thread (not_stopped
) != NULL
)
2992 if (step_over_bkpt
== null_ptid
)
2993 pid
= wait_for_event (ptid
, &w
, options
);
2997 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2998 target_pid_to_str (step_over_bkpt
));
2999 pid
= wait_for_event (step_over_bkpt
, &w
, options
& ~WNOHANG
);
3002 if (pid
== 0 || (pid
== -1 && !any_resumed
))
3004 gdb_assert (target_options
& TARGET_WNOHANG
);
3008 debug_printf ("wait_1 ret = null_ptid, "
3009 "TARGET_WAITKIND_IGNORE\n");
3013 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
3020 debug_printf ("wait_1 ret = null_ptid, "
3021 "TARGET_WAITKIND_NO_RESUMED\n");
3025 ourstatus
->kind
= TARGET_WAITKIND_NO_RESUMED
;
3029 event_child
= get_thread_lwp (current_thread
);
3031 /* wait_for_event only returns an exit status for the last
3032 child of a process. Report it. */
3033 if (WIFEXITED (w
) || WIFSIGNALED (w
))
3037 ourstatus
->kind
= TARGET_WAITKIND_EXITED
;
3038 ourstatus
->value
.integer
= WEXITSTATUS (w
);
3042 debug_printf ("wait_1 ret = %s, exited with "
3044 target_pid_to_str (ptid_of (current_thread
)),
3051 ourstatus
->kind
= TARGET_WAITKIND_SIGNALLED
;
3052 ourstatus
->value
.sig
= gdb_signal_from_host (WTERMSIG (w
));
3056 debug_printf ("wait_1 ret = %s, terminated with "
3058 target_pid_to_str (ptid_of (current_thread
)),
3064 if (ourstatus
->kind
== TARGET_WAITKIND_EXITED
)
3065 return filter_exit_event (event_child
, ourstatus
);
3067 return ptid_of (current_thread
);
3070 /* If step-over executes a breakpoint instruction, in the case of a
3071 hardware single step it means a gdb/gdbserver breakpoint had been
3072 planted on top of a permanent breakpoint, in the case of a software
3073 single step it may just mean that gdbserver hit the reinsert breakpoint.
3074 The PC has been adjusted by save_stop_reason to point at
3075 the breakpoint address.
3076 So in the case of the hardware single step advance the PC manually
3077 past the breakpoint and in the case of software single step advance only
3078 if it's not the single_step_breakpoint we are hitting.
3079 This avoids that a program would keep trapping a permanent breakpoint
3081 if (step_over_bkpt
!= null_ptid
3082 && event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3083 && (event_child
->stepping
3084 || !single_step_breakpoint_inserted_here (event_child
->stop_pc
)))
3086 int increment_pc
= 0;
3087 int breakpoint_kind
= 0;
3088 CORE_ADDR stop_pc
= event_child
->stop_pc
;
3090 breakpoint_kind
= breakpoint_kind_from_current_state (&stop_pc
);
3091 sw_breakpoint_from_kind (breakpoint_kind
, &increment_pc
);
3095 debug_printf ("step-over for %s executed software breakpoint\n",
3096 target_pid_to_str (ptid_of (current_thread
)));
3099 if (increment_pc
!= 0)
3101 struct regcache
*regcache
3102 = get_thread_regcache (current_thread
, 1);
3104 event_child
->stop_pc
+= increment_pc
;
3105 low_set_pc (regcache
, event_child
->stop_pc
);
3107 if (!low_breakpoint_at (event_child
->stop_pc
))
3108 event_child
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
3112 /* If this event was not handled before, and is not a SIGTRAP, we
3113 report it. SIGILL and SIGSEGV are also treated as traps in case
3114 a breakpoint is inserted at the current PC. If this target does
3115 not support internal breakpoints at all, we also report the
3116 SIGTRAP without further processing; it's of no concern to us. */
3118 = (low_supports_breakpoints ()
3119 && (WSTOPSIG (w
) == SIGTRAP
3120 || ((WSTOPSIG (w
) == SIGILL
3121 || WSTOPSIG (w
) == SIGSEGV
)
3122 && low_breakpoint_at (event_child
->stop_pc
))));
3124 if (maybe_internal_trap
)
3126 /* Handle anything that requires bookkeeping before deciding to
3127 report the event or continue waiting. */
3129 /* First check if we can explain the SIGTRAP with an internal
3130 breakpoint, or if we should possibly report the event to GDB.
3131 Do this before anything that may remove or insert a
3133 bp_explains_trap
= breakpoint_inserted_here (event_child
->stop_pc
);
3135 /* We have a SIGTRAP, possibly a step-over dance has just
3136 finished. If so, tweak the state machine accordingly,
3137 reinsert breakpoints and delete any single-step
3139 step_over_finished
= finish_step_over (event_child
);
3141 /* Now invoke the callbacks of any internal breakpoints there. */
3142 check_breakpoints (event_child
->stop_pc
);
3144 /* Handle tracepoint data collecting. This may overflow the
3145 trace buffer, and cause a tracing stop, removing
3147 trace_event
= handle_tracepoints (event_child
);
3149 if (bp_explains_trap
)
3152 debug_printf ("Hit a gdbserver breakpoint.\n");
3157 /* We have some other signal, possibly a step-over dance was in
3158 progress, and it should be cancelled too. */
3159 step_over_finished
= finish_step_over (event_child
);
3162 /* We have all the data we need. Either report the event to GDB, or
3163 resume threads and keep waiting for more. */
3165 /* If we're collecting a fast tracepoint, finish the collection and
3166 move out of the jump pad before delivering a signal. See
3167 linux_stabilize_threads. */
3170 && WSTOPSIG (w
) != SIGTRAP
3171 && supports_fast_tracepoints ()
3172 && agent_loaded_p ())
3175 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3176 "to defer or adjust it.\n",
3177 WSTOPSIG (w
), lwpid_of (current_thread
));
3179 /* Allow debugging the jump pad itself. */
3180 if (current_thread
->last_resume_kind
!= resume_step
3181 && maybe_move_out_of_jump_pad (event_child
, &w
))
3183 enqueue_one_deferred_signal (event_child
, &w
);
3186 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3187 WSTOPSIG (w
), lwpid_of (current_thread
));
3189 resume_one_lwp (event_child
, 0, 0, NULL
);
3193 return ignore_event (ourstatus
);
3197 if (event_child
->collecting_fast_tracepoint
3198 != fast_tpoint_collect_result::not_collecting
)
3201 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3202 "Check if we're already there.\n",
3203 lwpid_of (current_thread
),
3204 (int) event_child
->collecting_fast_tracepoint
);
3208 event_child
->collecting_fast_tracepoint
3209 = linux_fast_tracepoint_collecting (event_child
, NULL
);
3211 if (event_child
->collecting_fast_tracepoint
3212 != fast_tpoint_collect_result::before_insn
)
3214 /* No longer need this breakpoint. */
3215 if (event_child
->exit_jump_pad_bkpt
!= NULL
)
3218 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3219 "stopping all threads momentarily.\n");
3221 /* Other running threads could hit this breakpoint.
3222 We don't handle moribund locations like GDB does,
3223 instead we always pause all threads when removing
3224 breakpoints, so that any step-over or
3225 decr_pc_after_break adjustment is always taken
3226 care of while the breakpoint is still
3228 stop_all_lwps (1, event_child
);
3230 delete_breakpoint (event_child
->exit_jump_pad_bkpt
);
3231 event_child
->exit_jump_pad_bkpt
= NULL
;
3233 unstop_all_lwps (1, event_child
);
3235 gdb_assert (event_child
->suspended
>= 0);
3239 if (event_child
->collecting_fast_tracepoint
3240 == fast_tpoint_collect_result::not_collecting
)
3243 debug_printf ("fast tracepoint finished "
3244 "collecting successfully.\n");
3246 /* We may have a deferred signal to report. */
3247 if (dequeue_one_deferred_signal (event_child
, &w
))
3250 debug_printf ("dequeued one signal.\n");
3255 debug_printf ("no deferred signals.\n");
3257 if (stabilizing_threads
)
3259 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3260 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3264 debug_printf ("wait_1 ret = %s, stopped "
3265 "while stabilizing threads\n",
3266 target_pid_to_str (ptid_of (current_thread
)));
3270 return ptid_of (current_thread
);
3276 /* Check whether GDB would be interested in this event. */
3278 /* Check if GDB is interested in this syscall. */
3280 && WSTOPSIG (w
) == SYSCALL_SIGTRAP
3281 && !gdb_catch_this_syscall (event_child
))
3285 debug_printf ("Ignored syscall for LWP %ld.\n",
3286 lwpid_of (current_thread
));
3289 resume_one_lwp (event_child
, event_child
->stepping
, 0, NULL
);
3293 return ignore_event (ourstatus
);
3296 /* If GDB is not interested in this signal, don't stop other
3297 threads, and don't report it to GDB. Just resume the inferior
3298 right away. We do this for threading-related signals as well as
3299 any that GDB specifically requested we ignore. But never ignore
3300 SIGSTOP if we sent it ourselves, and do not ignore signals when
3301 stepping - they may require special handling to skip the signal
3302 handler. Also never ignore signals that could be caused by a
3305 && current_thread
->last_resume_kind
!= resume_step
3307 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3308 (current_process ()->priv
->thread_db
!= NULL
3309 && (WSTOPSIG (w
) == __SIGRTMIN
3310 || WSTOPSIG (w
) == __SIGRTMIN
+ 1))
3313 (cs
.pass_signals
[gdb_signal_from_host (WSTOPSIG (w
))]
3314 && !(WSTOPSIG (w
) == SIGSTOP
3315 && current_thread
->last_resume_kind
== resume_stop
)
3316 && !linux_wstatus_maybe_breakpoint (w
))))
3318 siginfo_t info
, *info_p
;
3321 debug_printf ("Ignored signal %d for LWP %ld.\n",
3322 WSTOPSIG (w
), lwpid_of (current_thread
));
3324 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
3325 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
3330 if (step_over_finished
)
3332 /* We cancelled this thread's step-over above. We still
3333 need to unsuspend all other LWPs, and set them back
3334 running again while the signal handler runs. */
3335 unsuspend_all_lwps (event_child
);
3337 /* Enqueue the pending signal info so that proceed_all_lwps
3339 enqueue_pending_signal (event_child
, WSTOPSIG (w
), info_p
);
3341 proceed_all_lwps ();
3345 resume_one_lwp (event_child
, event_child
->stepping
,
3346 WSTOPSIG (w
), info_p
);
3352 return ignore_event (ourstatus
);
3355 /* Note that all addresses are always "out of the step range" when
3356 there's no range to begin with. */
3357 in_step_range
= lwp_in_step_range (event_child
);
3359 /* If GDB wanted this thread to single step, and the thread is out
3360 of the step range, we always want to report the SIGTRAP, and let
3361 GDB handle it. Watchpoints should always be reported. So should
3362 signals we can't explain. A SIGTRAP we can't explain could be a
3363 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3364 do, we're be able to handle GDB breakpoints on top of internal
3365 breakpoints, by handling the internal breakpoint and still
3366 reporting the event to GDB. If we don't, we're out of luck, GDB
3367 won't see the breakpoint hit. If we see a single-step event but
3368 the thread should be continuing, don't pass the trap to gdb.
3369 That indicates that we had previously finished a single-step but
3370 left the single-step pending -- see
3371 complete_ongoing_step_over. */
3372 report_to_gdb
= (!maybe_internal_trap
3373 || (current_thread
->last_resume_kind
== resume_step
3375 || event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3377 && !bp_explains_trap
3379 && !step_over_finished
3380 && !(current_thread
->last_resume_kind
== resume_continue
3381 && event_child
->stop_reason
== TARGET_STOPPED_BY_SINGLE_STEP
))
3382 || (gdb_breakpoint_here (event_child
->stop_pc
)
3383 && gdb_condition_true_at_breakpoint (event_child
->stop_pc
)
3384 && gdb_no_commands_at_breakpoint (event_child
->stop_pc
))
3385 || event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
);
3387 run_breakpoint_commands (event_child
->stop_pc
);
3389 /* We found no reason GDB would want us to stop. We either hit one
3390 of our own breakpoints, or finished an internal step GDB
3391 shouldn't know about. */
3396 if (bp_explains_trap
)
3397 debug_printf ("Hit a gdbserver breakpoint.\n");
3398 if (step_over_finished
)
3399 debug_printf ("Step-over finished.\n");
3401 debug_printf ("Tracepoint event.\n");
3402 if (lwp_in_step_range (event_child
))
3403 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3404 paddress (event_child
->stop_pc
),
3405 paddress (event_child
->step_range_start
),
3406 paddress (event_child
->step_range_end
));
3409 /* We're not reporting this breakpoint to GDB, so apply the
3410 decr_pc_after_break adjustment to the inferior's regcache
3413 if (low_supports_breakpoints ())
3415 struct regcache
*regcache
3416 = get_thread_regcache (current_thread
, 1);
3417 low_set_pc (regcache
, event_child
->stop_pc
);
3420 if (step_over_finished
)
3422 /* If we have finished stepping over a breakpoint, we've
3423 stopped and suspended all LWPs momentarily except the
3424 stepping one. This is where we resume them all again.
3425 We're going to keep waiting, so use proceed, which
3426 handles stepping over the next breakpoint. */
3427 unsuspend_all_lwps (event_child
);
3431 /* Remove the single-step breakpoints if any. Note that
3432 there isn't single-step breakpoint if we finished stepping
3434 if (supports_software_single_step ()
3435 && has_single_step_breakpoints (current_thread
))
3437 stop_all_lwps (0, event_child
);
3438 delete_single_step_breakpoints (current_thread
);
3439 unstop_all_lwps (0, event_child
);
3444 debug_printf ("proceeding all threads.\n");
3445 proceed_all_lwps ();
3450 return ignore_event (ourstatus
);
3455 if (event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3458 = target_waitstatus_to_string (&event_child
->waitstatus
);
3460 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3461 lwpid_of (get_lwp_thread (event_child
)), str
.c_str ());
3463 if (current_thread
->last_resume_kind
== resume_step
)
3465 if (event_child
->step_range_start
== event_child
->step_range_end
)
3466 debug_printf ("GDB wanted to single-step, reporting event.\n");
3467 else if (!lwp_in_step_range (event_child
))
3468 debug_printf ("Out of step range, reporting event.\n");
3470 if (event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
3471 debug_printf ("Stopped by watchpoint.\n");
3472 else if (gdb_breakpoint_here (event_child
->stop_pc
))
3473 debug_printf ("Stopped by GDB breakpoint.\n");
3475 debug_printf ("Hit a non-gdbserver trap event.\n");
3478 /* Alright, we're going to report a stop. */
3480 /* Remove single-step breakpoints. */
3481 if (supports_software_single_step ())
3483 /* Remove single-step breakpoints or not. It it is true, stop all
3484 lwps, so that other threads won't hit the breakpoint in the
3486 int remove_single_step_breakpoints_p
= 0;
3490 remove_single_step_breakpoints_p
3491 = has_single_step_breakpoints (current_thread
);
3495 /* In all-stop, a stop reply cancels all previous resume
3496 requests. Delete all single-step breakpoints. */
3498 find_thread ([&] (thread_info
*thread
) {
3499 if (has_single_step_breakpoints (thread
))
3501 remove_single_step_breakpoints_p
= 1;
3509 if (remove_single_step_breakpoints_p
)
3511 /* If we remove single-step breakpoints from memory, stop all lwps,
3512 so that other threads won't hit the breakpoint in the staled
3514 stop_all_lwps (0, event_child
);
3518 gdb_assert (has_single_step_breakpoints (current_thread
));
3519 delete_single_step_breakpoints (current_thread
);
3523 for_each_thread ([] (thread_info
*thread
){
3524 if (has_single_step_breakpoints (thread
))
3525 delete_single_step_breakpoints (thread
);
3529 unstop_all_lwps (0, event_child
);
3533 if (!stabilizing_threads
)
3535 /* In all-stop, stop all threads. */
3537 stop_all_lwps (0, NULL
);
3539 if (step_over_finished
)
3543 /* If we were doing a step-over, all other threads but
3544 the stepping one had been paused in start_step_over,
3545 with their suspend counts incremented. We don't want
3546 to do a full unstop/unpause, because we're in
3547 all-stop mode (so we want threads stopped), but we
3548 still need to unsuspend the other threads, to
3549 decrement their `suspended' count back. */
3550 unsuspend_all_lwps (event_child
);
3554 /* If we just finished a step-over, then all threads had
3555 been momentarily paused. In all-stop, that's fine,
3556 we want threads stopped by now anyway. In non-stop,
3557 we need to re-resume threads that GDB wanted to be
3559 unstop_all_lwps (1, event_child
);
3563 /* If we're not waiting for a specific LWP, choose an event LWP
3564 from among those that have had events. Giving equal priority
3565 to all LWPs that have had events helps prevent
3567 if (ptid
== minus_one_ptid
)
3569 event_child
->status_pending_p
= 1;
3570 event_child
->status_pending
= w
;
3572 select_event_lwp (&event_child
);
3574 /* current_thread and event_child must stay in sync. */
3575 current_thread
= get_lwp_thread (event_child
);
3577 event_child
->status_pending_p
= 0;
3578 w
= event_child
->status_pending
;
3582 /* Stabilize threads (move out of jump pads). */
3584 target_stabilize_threads ();
3588 /* If we just finished a step-over, then all threads had been
3589 momentarily paused. In all-stop, that's fine, we want
3590 threads stopped by now anyway. In non-stop, we need to
3591 re-resume threads that GDB wanted to be running. */
3592 if (step_over_finished
)
3593 unstop_all_lwps (1, event_child
);
3596 if (event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3598 /* If the reported event is an exit, fork, vfork or exec, let
3601 /* Break the unreported fork relationship chain. */
3602 if (event_child
->waitstatus
.kind
== TARGET_WAITKIND_FORKED
3603 || event_child
->waitstatus
.kind
== TARGET_WAITKIND_VFORKED
)
3605 event_child
->fork_relative
->fork_relative
= NULL
;
3606 event_child
->fork_relative
= NULL
;
3609 *ourstatus
= event_child
->waitstatus
;
3610 /* Clear the event lwp's waitstatus since we handled it already. */
3611 event_child
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
3614 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3616 /* Now that we've selected our final event LWP, un-adjust its PC if
3617 it was a software breakpoint, and the client doesn't know we can
3618 adjust the breakpoint ourselves. */
3619 if (event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3620 && !cs
.swbreak_feature
)
3622 int decr_pc
= low_decr_pc_after_break ();
3626 struct regcache
*regcache
3627 = get_thread_regcache (current_thread
, 1);
3628 low_set_pc (regcache
, event_child
->stop_pc
+ decr_pc
);
3632 if (WSTOPSIG (w
) == SYSCALL_SIGTRAP
)
3634 get_syscall_trapinfo (event_child
,
3635 &ourstatus
->value
.syscall_number
);
3636 ourstatus
->kind
= event_child
->syscall_state
;
3638 else if (current_thread
->last_resume_kind
== resume_stop
3639 && WSTOPSIG (w
) == SIGSTOP
)
3641 /* A thread that has been requested to stop by GDB with vCont;t,
3642 and it stopped cleanly, so report as SIG0. The use of
3643 SIGSTOP is an implementation detail. */
3644 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3646 else if (current_thread
->last_resume_kind
== resume_stop
3647 && WSTOPSIG (w
) != SIGSTOP
)
3649 /* A thread that has been requested to stop by GDB with vCont;t,
3650 but, it stopped for other reasons. */
3651 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3653 else if (ourstatus
->kind
== TARGET_WAITKIND_STOPPED
)
3655 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3658 gdb_assert (step_over_bkpt
== null_ptid
);
3662 debug_printf ("wait_1 ret = %s, %d, %d\n",
3663 target_pid_to_str (ptid_of (current_thread
)),
3664 ourstatus
->kind
, ourstatus
->value
.sig
);
3668 if (ourstatus
->kind
== TARGET_WAITKIND_EXITED
)
3669 return filter_exit_event (event_child
, ourstatus
);
3671 return ptid_of (current_thread
);
3674 /* Get rid of any pending event in the pipe. */
3676 async_file_flush (void)
3682 ret
= read (linux_event_pipe
[0], &buf
, 1);
3683 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
3686 /* Put something in the pipe, so the event loop wakes up. */
3688 async_file_mark (void)
3692 async_file_flush ();
3695 ret
= write (linux_event_pipe
[1], "+", 1);
3696 while (ret
== 0 || (ret
== -1 && errno
== EINTR
));
3698 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3699 be awakened anyway. */
3703 linux_process_target::wait (ptid_t ptid
,
3704 target_waitstatus
*ourstatus
,
3705 target_wait_flags target_options
)
3709 /* Flush the async file first. */
3710 if (target_is_async_p ())
3711 async_file_flush ();
3715 event_ptid
= wait_1 (ptid
, ourstatus
, target_options
);
3717 while ((target_options
& TARGET_WNOHANG
) == 0
3718 && event_ptid
== null_ptid
3719 && ourstatus
->kind
== TARGET_WAITKIND_IGNORE
);
3721 /* If at least one stop was reported, there may be more. A single
3722 SIGCHLD can signal more than one child stop. */
3723 if (target_is_async_p ()
3724 && (target_options
& TARGET_WNOHANG
) != 0
3725 && event_ptid
!= null_ptid
)
3731 /* Send a signal to an LWP. */
3734 kill_lwp (unsigned long lwpid
, int signo
)
3739 ret
= syscall (__NR_tkill
, lwpid
, signo
);
3740 if (errno
== ENOSYS
)
3742 /* If tkill fails, then we are not using nptl threads, a
3743 configuration we no longer support. */
3744 perror_with_name (("tkill"));
3750 linux_stop_lwp (struct lwp_info
*lwp
)
3756 send_sigstop (struct lwp_info
*lwp
)
3760 pid
= lwpid_of (get_lwp_thread (lwp
));
3762 /* If we already have a pending stop signal for this process, don't
3764 if (lwp
->stop_expected
)
3767 debug_printf ("Have pending sigstop for lwp %d\n", pid
);
3773 debug_printf ("Sending sigstop to lwp %d\n", pid
);
3775 lwp
->stop_expected
= 1;
3776 kill_lwp (pid
, SIGSTOP
);
3780 send_sigstop (thread_info
*thread
, lwp_info
*except
)
3782 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3784 /* Ignore EXCEPT. */
3794 /* Increment the suspend count of an LWP, and stop it, if not stopped
3797 suspend_and_send_sigstop (thread_info
*thread
, lwp_info
*except
)
3799 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3801 /* Ignore EXCEPT. */
3805 lwp_suspended_inc (lwp
);
3807 send_sigstop (thread
, except
);
3811 mark_lwp_dead (struct lwp_info
*lwp
, int wstat
)
3813 /* Store the exit status for later. */
3814 lwp
->status_pending_p
= 1;
3815 lwp
->status_pending
= wstat
;
3817 /* Store in waitstatus as well, as there's nothing else to process
3819 if (WIFEXITED (wstat
))
3821 lwp
->waitstatus
.kind
= TARGET_WAITKIND_EXITED
;
3822 lwp
->waitstatus
.value
.integer
= WEXITSTATUS (wstat
);
3824 else if (WIFSIGNALED (wstat
))
3826 lwp
->waitstatus
.kind
= TARGET_WAITKIND_SIGNALLED
;
3827 lwp
->waitstatus
.value
.sig
= gdb_signal_from_host (WTERMSIG (wstat
));
3830 /* Prevent trying to stop it. */
3833 /* No further stops are expected from a dead lwp. */
3834 lwp
->stop_expected
= 0;
3837 /* Return true if LWP has exited already, and has a pending exit event
3838 to report to GDB. */
3841 lwp_is_marked_dead (struct lwp_info
*lwp
)
3843 return (lwp
->status_pending_p
3844 && (WIFEXITED (lwp
->status_pending
)
3845 || WIFSIGNALED (lwp
->status_pending
)));
3849 linux_process_target::wait_for_sigstop ()
3851 struct thread_info
*saved_thread
;
3856 saved_thread
= current_thread
;
3857 if (saved_thread
!= NULL
)
3858 saved_tid
= saved_thread
->id
;
3860 saved_tid
= null_ptid
; /* avoid bogus unused warning */
3863 debug_printf ("wait_for_sigstop: pulling events\n");
3865 /* Passing NULL_PTID as filter indicates we want all events to be
3866 left pending. Eventually this returns when there are no
3867 unwaited-for children left. */
3868 ret
= wait_for_event_filtered (minus_one_ptid
, null_ptid
, &wstat
, __WALL
);
3869 gdb_assert (ret
== -1);
3871 if (saved_thread
== NULL
|| mythread_alive (saved_tid
))
3872 current_thread
= saved_thread
;
3876 debug_printf ("Previously current thread died.\n");
3878 /* We can't change the current inferior behind GDB's back,
3879 otherwise, a subsequent command may apply to the wrong
3881 current_thread
= NULL
;
3886 linux_process_target::stuck_in_jump_pad (thread_info
*thread
)
3888 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3890 if (lwp
->suspended
!= 0)
3892 internal_error (__FILE__
, __LINE__
,
3893 "LWP %ld is suspended, suspended=%d\n",
3894 lwpid_of (thread
), lwp
->suspended
);
3896 gdb_assert (lwp
->stopped
);
3898 /* Allow debugging the jump pad, gdb_collect, etc.. */
3899 return (supports_fast_tracepoints ()
3900 && agent_loaded_p ()
3901 && (gdb_breakpoint_here (lwp
->stop_pc
)
3902 || lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3903 || thread
->last_resume_kind
== resume_step
)
3904 && (linux_fast_tracepoint_collecting (lwp
, NULL
)
3905 != fast_tpoint_collect_result::not_collecting
));
3909 linux_process_target::move_out_of_jump_pad (thread_info
*thread
)
3911 struct thread_info
*saved_thread
;
3912 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3915 if (lwp
->suspended
!= 0)
3917 internal_error (__FILE__
, __LINE__
,
3918 "LWP %ld is suspended, suspended=%d\n",
3919 lwpid_of (thread
), lwp
->suspended
);
3921 gdb_assert (lwp
->stopped
);
3923 /* For gdb_breakpoint_here. */
3924 saved_thread
= current_thread
;
3925 current_thread
= thread
;
3927 wstat
= lwp
->status_pending_p
? &lwp
->status_pending
: NULL
;
3929 /* Allow debugging the jump pad, gdb_collect, etc. */
3930 if (!gdb_breakpoint_here (lwp
->stop_pc
)
3931 && lwp
->stop_reason
!= TARGET_STOPPED_BY_WATCHPOINT
3932 && thread
->last_resume_kind
!= resume_step
3933 && maybe_move_out_of_jump_pad (lwp
, wstat
))
3936 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3941 lwp
->status_pending_p
= 0;
3942 enqueue_one_deferred_signal (lwp
, wstat
);
3945 debug_printf ("Signal %d for LWP %ld deferred "
3947 WSTOPSIG (*wstat
), lwpid_of (thread
));
3950 resume_one_lwp (lwp
, 0, 0, NULL
);
3953 lwp_suspended_inc (lwp
);
3955 current_thread
= saved_thread
;
3959 lwp_running (thread_info
*thread
)
3961 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3963 if (lwp_is_marked_dead (lwp
))
3966 return !lwp
->stopped
;
3970 linux_process_target::stop_all_lwps (int suspend
, lwp_info
*except
)
3972 /* Should not be called recursively. */
3973 gdb_assert (stopping_threads
== NOT_STOPPING_THREADS
);
3978 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3979 suspend
? "stop-and-suspend" : "stop",
3981 ? target_pid_to_str (ptid_of (get_lwp_thread (except
)))
3985 stopping_threads
= (suspend
3986 ? STOPPING_AND_SUSPENDING_THREADS
3987 : STOPPING_THREADS
);
3990 for_each_thread ([&] (thread_info
*thread
)
3992 suspend_and_send_sigstop (thread
, except
);
3995 for_each_thread ([&] (thread_info
*thread
)
3997 send_sigstop (thread
, except
);
4000 wait_for_sigstop ();
4001 stopping_threads
= NOT_STOPPING_THREADS
;
4005 debug_printf ("stop_all_lwps done, setting stopping_threads "
4006 "back to !stopping\n");
4011 /* Enqueue one signal in the chain of signals which need to be
4012 delivered to this process on next resume. */
4015 enqueue_pending_signal (struct lwp_info
*lwp
, int signal
, siginfo_t
*info
)
4017 lwp
->pending_signals
.emplace_back (signal
);
4018 if (info
== nullptr)
4019 memset (&lwp
->pending_signals
.back ().info
, 0, sizeof (siginfo_t
));
4021 lwp
->pending_signals
.back ().info
= *info
;
4025 linux_process_target::install_software_single_step_breakpoints (lwp_info
*lwp
)
4027 struct thread_info
*thread
= get_lwp_thread (lwp
);
4028 struct regcache
*regcache
= get_thread_regcache (thread
, 1);
4030 scoped_restore save_current_thread
= make_scoped_restore (¤t_thread
);
4032 current_thread
= thread
;
4033 std::vector
<CORE_ADDR
> next_pcs
= low_get_next_pcs (regcache
);
4035 for (CORE_ADDR pc
: next_pcs
)
4036 set_single_step_breakpoint (pc
, current_ptid
);
4040 linux_process_target::single_step (lwp_info
* lwp
)
4044 if (supports_hardware_single_step ())
4048 else if (supports_software_single_step ())
4050 install_software_single_step_breakpoints (lwp
);
4056 debug_printf ("stepping is not implemented on this target");
4062 /* The signal can be delivered to the inferior if we are not trying to
4063 finish a fast tracepoint collect. Since signal can be delivered in
4064 the step-over, the program may go to signal handler and trap again
4065 after return from the signal handler. We can live with the spurious
4069 lwp_signal_can_be_delivered (struct lwp_info
*lwp
)
4071 return (lwp
->collecting_fast_tracepoint
4072 == fast_tpoint_collect_result::not_collecting
);
4076 linux_process_target::resume_one_lwp_throw (lwp_info
*lwp
, int step
,
4077 int signal
, siginfo_t
*info
)
4079 struct thread_info
*thread
= get_lwp_thread (lwp
);
4080 struct thread_info
*saved_thread
;
4082 struct process_info
*proc
= get_thread_process (thread
);
4084 /* Note that target description may not be initialised
4085 (proc->tdesc == NULL) at this point because the program hasn't
4086 stopped at the first instruction yet. It means GDBserver skips
4087 the extra traps from the wrapper program (see option --wrapper).
4088 Code in this function that requires register access should be
4089 guarded by proc->tdesc == NULL or something else. */
4091 if (lwp
->stopped
== 0)
4094 gdb_assert (lwp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
);
4096 fast_tpoint_collect_result fast_tp_collecting
4097 = lwp
->collecting_fast_tracepoint
;
4099 gdb_assert (!stabilizing_threads
4100 || (fast_tp_collecting
4101 != fast_tpoint_collect_result::not_collecting
));
4103 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4104 user used the "jump" command, or "set $pc = foo"). */
4105 if (thread
->while_stepping
!= NULL
&& lwp
->stop_pc
!= get_pc (lwp
))
4107 /* Collecting 'while-stepping' actions doesn't make sense
4109 release_while_stepping_state_list (thread
);
4112 /* If we have pending signals or status, and a new signal, enqueue the
4113 signal. Also enqueue the signal if it can't be delivered to the
4114 inferior right now. */
4116 && (lwp
->status_pending_p
4117 || !lwp
->pending_signals
.empty ()
4118 || !lwp_signal_can_be_delivered (lwp
)))
4120 enqueue_pending_signal (lwp
, signal
, info
);
4122 /* Postpone any pending signal. It was enqueued above. */
4126 if (lwp
->status_pending_p
)
4129 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4130 " has pending status\n",
4131 lwpid_of (thread
), step
? "step" : "continue",
4132 lwp
->stop_expected
? "expected" : "not expected");
4136 saved_thread
= current_thread
;
4137 current_thread
= thread
;
4139 /* This bit needs some thinking about. If we get a signal that
4140 we must report while a single-step reinsert is still pending,
4141 we often end up resuming the thread. It might be better to
4142 (ew) allow a stack of pending events; then we could be sure that
4143 the reinsert happened right away and not lose any signals.
4145 Making this stack would also shrink the window in which breakpoints are
4146 uninserted (see comment in linux_wait_for_lwp) but not enough for
4147 complete correctness, so it won't solve that problem. It may be
4148 worthwhile just to solve this one, however. */
4149 if (lwp
->bp_reinsert
!= 0)
4152 debug_printf (" pending reinsert at 0x%s\n",
4153 paddress (lwp
->bp_reinsert
));
4155 if (supports_hardware_single_step ())
4157 if (fast_tp_collecting
== fast_tpoint_collect_result::not_collecting
)
4160 warning ("BAD - reinserting but not stepping.");
4162 warning ("BAD - reinserting and suspended(%d).",
4167 step
= maybe_hw_step (thread
);
4170 if (fast_tp_collecting
== fast_tpoint_collect_result::before_insn
)
4173 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4174 " (exit-jump-pad-bkpt)\n",
4177 else if (fast_tp_collecting
== fast_tpoint_collect_result::at_insn
)
4180 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4181 " single-stepping\n",
4184 if (supports_hardware_single_step ())
4188 internal_error (__FILE__
, __LINE__
,
4189 "moving out of jump pad single-stepping"
4190 " not implemented on this target");
4194 /* If we have while-stepping actions in this thread set it stepping.
4195 If we have a signal to deliver, it may or may not be set to
4196 SIG_IGN, we don't know. Assume so, and allow collecting
4197 while-stepping into a signal handler. A possible smart thing to
4198 do would be to set an internal breakpoint at the signal return
4199 address, continue, and carry on catching this while-stepping
4200 action only when that breakpoint is hit. A future
4202 if (thread
->while_stepping
!= NULL
)
4205 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4208 step
= single_step (lwp
);
4211 if (proc
->tdesc
!= NULL
&& low_supports_breakpoints ())
4213 struct regcache
*regcache
= get_thread_regcache (current_thread
, 1);
4215 lwp
->stop_pc
= low_get_pc (regcache
);
4219 debug_printf (" %s from pc 0x%lx\n", step
? "step" : "continue",
4220 (long) lwp
->stop_pc
);
4224 /* If we have pending signals, consume one if it can be delivered to
4226 if (!lwp
->pending_signals
.empty () && lwp_signal_can_be_delivered (lwp
))
4228 const pending_signal
&p_sig
= lwp
->pending_signals
.front ();
4230 signal
= p_sig
.signal
;
4231 if (p_sig
.info
.si_signo
!= 0)
4232 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
4235 lwp
->pending_signals
.pop_front ();
4239 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4240 lwpid_of (thread
), step
? "step" : "continue", signal
,
4241 lwp
->stop_expected
? "expected" : "not expected");
4243 low_prepare_to_resume (lwp
);
4245 regcache_invalidate_thread (thread
);
4247 lwp
->stepping
= step
;
4249 ptrace_request
= PTRACE_SINGLESTEP
;
4250 else if (gdb_catching_syscalls_p (lwp
))
4251 ptrace_request
= PTRACE_SYSCALL
;
4253 ptrace_request
= PTRACE_CONT
;
4254 ptrace (ptrace_request
,
4256 (PTRACE_TYPE_ARG3
) 0,
4257 /* Coerce to a uintptr_t first to avoid potential gcc warning
4258 of coercing an 8 byte integer to a 4 byte pointer. */
4259 (PTRACE_TYPE_ARG4
) (uintptr_t) signal
);
4261 current_thread
= saved_thread
;
4263 perror_with_name ("resuming thread");
4265 /* Successfully resumed. Clear state that no longer makes sense,
4266 and mark the LWP as running. Must not do this before resuming
4267 otherwise if that fails other code will be confused. E.g., we'd
4268 later try to stop the LWP and hang forever waiting for a stop
4269 status. Note that we must not throw after this is cleared,
4270 otherwise handle_zombie_lwp_error would get confused. */
4272 lwp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
4276 linux_process_target::low_prepare_to_resume (lwp_info
*lwp
)
4281 /* Called when we try to resume a stopped LWP and that errors out. If
4282 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4283 or about to become), discard the error, clear any pending status
4284 the LWP may have, and return true (we'll collect the exit status
4285 soon enough). Otherwise, return false. */
4288 check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
)
4290 struct thread_info
*thread
= get_lwp_thread (lp
);
4292 /* If we get an error after resuming the LWP successfully, we'd
4293 confuse !T state for the LWP being gone. */
4294 gdb_assert (lp
->stopped
);
4296 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4297 because even if ptrace failed with ESRCH, the tracee may be "not
4298 yet fully dead", but already refusing ptrace requests. In that
4299 case the tracee has 'R (Running)' state for a little bit
4300 (observed in Linux 3.18). See also the note on ESRCH in the
4301 ptrace(2) man page. Instead, check whether the LWP has any state
4302 other than ptrace-stopped. */
4304 /* Don't assume anything if /proc/PID/status can't be read. */
4305 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread
)) == 0)
4307 lp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
4308 lp
->status_pending_p
= 0;
4315 linux_process_target::resume_one_lwp (lwp_info
*lwp
, int step
, int signal
,
4320 resume_one_lwp_throw (lwp
, step
, signal
, info
);
4322 catch (const gdb_exception_error
&ex
)
4324 if (!check_ptrace_stopped_lwp_gone (lwp
))
4329 /* This function is called once per thread via for_each_thread.
4330 We look up which resume request applies to THREAD and mark it with a
4331 pointer to the appropriate resume request.
4333 This algorithm is O(threads * resume elements), but resume elements
4334 is small (and will remain small at least until GDB supports thread
4338 linux_set_resume_request (thread_info
*thread
, thread_resume
*resume
, size_t n
)
4340 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4342 for (int ndx
= 0; ndx
< n
; ndx
++)
4344 ptid_t ptid
= resume
[ndx
].thread
;
4345 if (ptid
== minus_one_ptid
4346 || ptid
== thread
->id
4347 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4349 || (ptid
.pid () == pid_of (thread
)
4351 || ptid
.lwp () == -1)))
4353 if (resume
[ndx
].kind
== resume_stop
4354 && thread
->last_resume_kind
== resume_stop
)
4357 debug_printf ("already %s LWP %ld at GDB's request\n",
4358 (thread
->last_status
.kind
4359 == TARGET_WAITKIND_STOPPED
)
4367 /* Ignore (wildcard) resume requests for already-resumed
4369 if (resume
[ndx
].kind
!= resume_stop
4370 && thread
->last_resume_kind
!= resume_stop
)
4373 debug_printf ("already %s LWP %ld at GDB's request\n",
4374 (thread
->last_resume_kind
4382 /* Don't let wildcard resumes resume fork children that GDB
4383 does not yet know are new fork children. */
4384 if (lwp
->fork_relative
!= NULL
)
4386 struct lwp_info
*rel
= lwp
->fork_relative
;
4388 if (rel
->status_pending_p
4389 && (rel
->waitstatus
.kind
== TARGET_WAITKIND_FORKED
4390 || rel
->waitstatus
.kind
== TARGET_WAITKIND_VFORKED
))
4393 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4399 /* If the thread has a pending event that has already been
4400 reported to GDBserver core, but GDB has not pulled the
4401 event out of the vStopped queue yet, likewise, ignore the
4402 (wildcard) resume request. */
4403 if (in_queued_stop_replies (thread
->id
))
4406 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4411 lwp
->resume
= &resume
[ndx
];
4412 thread
->last_resume_kind
= lwp
->resume
->kind
;
4414 lwp
->step_range_start
= lwp
->resume
->step_range_start
;
4415 lwp
->step_range_end
= lwp
->resume
->step_range_end
;
4417 /* If we had a deferred signal to report, dequeue one now.
4418 This can happen if LWP gets more than one signal while
4419 trying to get out of a jump pad. */
4421 && !lwp
->status_pending_p
4422 && dequeue_one_deferred_signal (lwp
, &lwp
->status_pending
))
4424 lwp
->status_pending_p
= 1;
4427 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4428 "leaving status pending.\n",
4429 WSTOPSIG (lwp
->status_pending
),
4437 /* No resume action for this thread. */
4442 linux_process_target::resume_status_pending (thread_info
*thread
)
4444 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4446 /* LWPs which will not be resumed are not interesting, because
4447 we might not wait for them next time through linux_wait. */
4448 if (lwp
->resume
== NULL
)
4451 return thread_still_has_status_pending (thread
);
4455 linux_process_target::thread_needs_step_over (thread_info
*thread
)
4457 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4458 struct thread_info
*saved_thread
;
4460 struct process_info
*proc
= get_thread_process (thread
);
4462 /* GDBserver is skipping the extra traps from the wrapper program,
4463 don't have to do step over. */
4464 if (proc
->tdesc
== NULL
)
4467 /* LWPs which will not be resumed are not interesting, because we
4468 might not wait for them next time through linux_wait. */
4473 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4478 if (thread
->last_resume_kind
== resume_stop
)
4481 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4487 gdb_assert (lwp
->suspended
>= 0);
4492 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4497 if (lwp
->status_pending_p
)
4500 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4506 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4510 /* If the PC has changed since we stopped, then don't do anything,
4511 and let the breakpoint/tracepoint be hit. This happens if, for
4512 instance, GDB handled the decr_pc_after_break subtraction itself,
4513 GDB is OOL stepping this thread, or the user has issued a "jump"
4514 command, or poked thread's registers herself. */
4515 if (pc
!= lwp
->stop_pc
)
4518 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4519 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4521 paddress (lwp
->stop_pc
), paddress (pc
));
4525 /* On software single step target, resume the inferior with signal
4526 rather than stepping over. */
4527 if (supports_software_single_step ()
4528 && !lwp
->pending_signals
.empty ()
4529 && lwp_signal_can_be_delivered (lwp
))
4532 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4539 saved_thread
= current_thread
;
4540 current_thread
= thread
;
4542 /* We can only step over breakpoints we know about. */
4543 if (breakpoint_here (pc
) || fast_tracepoint_jump_here (pc
))
4545 /* Don't step over a breakpoint that GDB expects to hit
4546 though. If the condition is being evaluated on the target's side
4547 and it evaluate to false, step over this breakpoint as well. */
4548 if (gdb_breakpoint_here (pc
)
4549 && gdb_condition_true_at_breakpoint (pc
)
4550 && gdb_no_commands_at_breakpoint (pc
))
4553 debug_printf ("Need step over [LWP %ld]? yes, but found"
4554 " GDB breakpoint at 0x%s; skipping step over\n",
4555 lwpid_of (thread
), paddress (pc
));
4557 current_thread
= saved_thread
;
4563 debug_printf ("Need step over [LWP %ld]? yes, "
4564 "found breakpoint at 0x%s\n",
4565 lwpid_of (thread
), paddress (pc
));
4567 /* We've found an lwp that needs stepping over --- return 1 so
4568 that find_thread stops looking. */
4569 current_thread
= saved_thread
;
4575 current_thread
= saved_thread
;
4578 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4580 lwpid_of (thread
), paddress (pc
));
4586 linux_process_target::start_step_over (lwp_info
*lwp
)
4588 struct thread_info
*thread
= get_lwp_thread (lwp
);
4589 struct thread_info
*saved_thread
;
4594 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4597 stop_all_lwps (1, lwp
);
4599 if (lwp
->suspended
!= 0)
4601 internal_error (__FILE__
, __LINE__
,
4602 "LWP %ld suspended=%d\n", lwpid_of (thread
),
4607 debug_printf ("Done stopping all threads for step-over.\n");
4609 /* Note, we should always reach here with an already adjusted PC,
4610 either by GDB (if we're resuming due to GDB's request), or by our
4611 caller, if we just finished handling an internal breakpoint GDB
4612 shouldn't care about. */
4615 saved_thread
= current_thread
;
4616 current_thread
= thread
;
4618 lwp
->bp_reinsert
= pc
;
4619 uninsert_breakpoints_at (pc
);
4620 uninsert_fast_tracepoint_jumps_at (pc
);
4622 step
= single_step (lwp
);
4624 current_thread
= saved_thread
;
4626 resume_one_lwp (lwp
, step
, 0, NULL
);
4628 /* Require next event from this LWP. */
4629 step_over_bkpt
= thread
->id
;
4633 linux_process_target::finish_step_over (lwp_info
*lwp
)
4635 if (lwp
->bp_reinsert
!= 0)
4637 struct thread_info
*saved_thread
= current_thread
;
4640 debug_printf ("Finished step over.\n");
4642 current_thread
= get_lwp_thread (lwp
);
4644 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4645 may be no breakpoint to reinsert there by now. */
4646 reinsert_breakpoints_at (lwp
->bp_reinsert
);
4647 reinsert_fast_tracepoint_jumps_at (lwp
->bp_reinsert
);
4649 lwp
->bp_reinsert
= 0;
4651 /* Delete any single-step breakpoints. No longer needed. We
4652 don't have to worry about other threads hitting this trap,
4653 and later not being able to explain it, because we were
4654 stepping over a breakpoint, and we hold all threads but
4655 LWP stopped while doing that. */
4656 if (!supports_hardware_single_step ())
4658 gdb_assert (has_single_step_breakpoints (current_thread
));
4659 delete_single_step_breakpoints (current_thread
);
4662 step_over_bkpt
= null_ptid
;
4663 current_thread
= saved_thread
;
4671 linux_process_target::complete_ongoing_step_over ()
4673 if (step_over_bkpt
!= null_ptid
)
4675 struct lwp_info
*lwp
;
4680 debug_printf ("detach: step over in progress, finish it first\n");
4682 /* Passing NULL_PTID as filter indicates we want all events to
4683 be left pending. Eventually this returns when there are no
4684 unwaited-for children left. */
4685 ret
= wait_for_event_filtered (minus_one_ptid
, null_ptid
, &wstat
,
4687 gdb_assert (ret
== -1);
4689 lwp
= find_lwp_pid (step_over_bkpt
);
4692 finish_step_over (lwp
);
4694 /* If we got our step SIGTRAP, don't leave it pending,
4695 otherwise we would report it to GDB as a spurious
4697 gdb_assert (lwp
->status_pending_p
);
4698 if (WIFSTOPPED (lwp
->status_pending
)
4699 && WSTOPSIG (lwp
->status_pending
) == SIGTRAP
)
4701 thread_info
*thread
= get_lwp_thread (lwp
);
4702 if (thread
->last_resume_kind
!= resume_step
)
4705 debug_printf ("detach: discard step-over SIGTRAP\n");
4707 lwp
->status_pending_p
= 0;
4708 lwp
->status_pending
= 0;
4709 resume_one_lwp (lwp
, lwp
->stepping
, 0, NULL
);
4714 debug_printf ("detach: resume_step, "
4715 "not discarding step-over SIGTRAP\n");
4719 step_over_bkpt
= null_ptid
;
4720 unsuspend_all_lwps (lwp
);
4725 linux_process_target::resume_one_thread (thread_info
*thread
,
4726 bool leave_all_stopped
)
4728 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4731 if (lwp
->resume
== NULL
)
4734 if (lwp
->resume
->kind
== resume_stop
)
4737 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread
));
4742 debug_printf ("stopping LWP %ld\n", lwpid_of (thread
));
4744 /* Stop the thread, and wait for the event asynchronously,
4745 through the event loop. */
4751 debug_printf ("already stopped LWP %ld\n",
4754 /* The LWP may have been stopped in an internal event that
4755 was not meant to be notified back to GDB (e.g., gdbserver
4756 breakpoint), so we should be reporting a stop event in
4759 /* If the thread already has a pending SIGSTOP, this is a
4760 no-op. Otherwise, something later will presumably resume
4761 the thread and this will cause it to cancel any pending
4762 operation, due to last_resume_kind == resume_stop. If
4763 the thread already has a pending status to report, we
4764 will still report it the next time we wait - see
4765 status_pending_p_callback. */
4767 /* If we already have a pending signal to report, then
4768 there's no need to queue a SIGSTOP, as this means we're
4769 midway through moving the LWP out of the jumppad, and we
4770 will report the pending signal as soon as that is
4772 if (lwp
->pending_signals_to_report
.empty ())
4776 /* For stop requests, we're done. */
4778 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
4782 /* If this thread which is about to be resumed has a pending status,
4783 then don't resume it - we can just report the pending status.
4784 Likewise if it is suspended, because e.g., another thread is
4785 stepping past a breakpoint. Make sure to queue any signals that
4786 would otherwise be sent. In all-stop mode, we do this decision
4787 based on if *any* thread has a pending status. If there's a
4788 thread that needs the step-over-breakpoint dance, then don't
4789 resume any other thread but that particular one. */
4790 leave_pending
= (lwp
->suspended
4791 || lwp
->status_pending_p
4792 || leave_all_stopped
);
4794 /* If we have a new signal, enqueue the signal. */
4795 if (lwp
->resume
->sig
!= 0)
4797 siginfo_t info
, *info_p
;
4799 /* If this is the same signal we were previously stopped by,
4800 make sure to queue its siginfo. */
4801 if (WIFSTOPPED (lwp
->last_status
)
4802 && WSTOPSIG (lwp
->last_status
) == lwp
->resume
->sig
4803 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
),
4804 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
4809 enqueue_pending_signal (lwp
, lwp
->resume
->sig
, info_p
);
4815 debug_printf ("resuming LWP %ld\n", lwpid_of (thread
));
4817 proceed_one_lwp (thread
, NULL
);
4822 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread
));
4825 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
4830 linux_process_target::resume (thread_resume
*resume_info
, size_t n
)
4832 struct thread_info
*need_step_over
= NULL
;
4837 debug_printf ("linux_resume:\n");
4840 for_each_thread ([&] (thread_info
*thread
)
4842 linux_set_resume_request (thread
, resume_info
, n
);
4845 /* If there is a thread which would otherwise be resumed, which has
4846 a pending status, then don't resume any threads - we can just
4847 report the pending status. Make sure to queue any signals that
4848 would otherwise be sent. In non-stop mode, we'll apply this
4849 logic to each thread individually. We consume all pending events
4850 before considering to start a step-over (in all-stop). */
4851 bool any_pending
= false;
4853 any_pending
= find_thread ([this] (thread_info
*thread
)
4855 return resume_status_pending (thread
);
4858 /* If there is a thread which would otherwise be resumed, which is
4859 stopped at a breakpoint that needs stepping over, then don't
4860 resume any threads - have it step over the breakpoint with all
4861 other threads stopped, then resume all threads again. Make sure
4862 to queue any signals that would otherwise be delivered or
4864 if (!any_pending
&& low_supports_breakpoints ())
4865 need_step_over
= find_thread ([this] (thread_info
*thread
)
4867 return thread_needs_step_over (thread
);
4870 bool leave_all_stopped
= (need_step_over
!= NULL
|| any_pending
);
4874 if (need_step_over
!= NULL
)
4875 debug_printf ("Not resuming all, need step over\n");
4876 else if (any_pending
)
4877 debug_printf ("Not resuming, all-stop and found "
4878 "an LWP with pending status\n");
4880 debug_printf ("Resuming, no pending status or step over needed\n");
4883 /* Even if we're leaving threads stopped, queue all signals we'd
4884 otherwise deliver. */
4885 for_each_thread ([&] (thread_info
*thread
)
4887 resume_one_thread (thread
, leave_all_stopped
);
4891 start_step_over (get_thread_lwp (need_step_over
));
4895 debug_printf ("linux_resume done\n");
4899 /* We may have events that were pending that can/should be sent to
4900 the client now. Trigger a linux_wait call. */
4901 if (target_is_async_p ())
4906 linux_process_target::proceed_one_lwp (thread_info
*thread
, lwp_info
*except
)
4908 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4915 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread
));
4920 debug_printf (" LWP %ld already running\n", lwpid_of (thread
));
4924 if (thread
->last_resume_kind
== resume_stop
4925 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
4928 debug_printf (" client wants LWP to remain %ld stopped\n",
4933 if (lwp
->status_pending_p
)
4936 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4941 gdb_assert (lwp
->suspended
>= 0);
4946 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread
));
4950 if (thread
->last_resume_kind
== resume_stop
4951 && lwp
->pending_signals_to_report
.empty ()
4952 && (lwp
->collecting_fast_tracepoint
4953 == fast_tpoint_collect_result::not_collecting
))
4955 /* We haven't reported this LWP as stopped yet (otherwise, the
4956 last_status.kind check above would catch it, and we wouldn't
4957 reach here. This LWP may have been momentarily paused by a
4958 stop_all_lwps call while handling for example, another LWP's
4959 step-over. In that case, the pending expected SIGSTOP signal
4960 that was queued at vCont;t handling time will have already
4961 been consumed by wait_for_sigstop, and so we need to requeue
4962 another one here. Note that if the LWP already has a SIGSTOP
4963 pending, this is a no-op. */
4966 debug_printf ("Client wants LWP %ld to stop. "
4967 "Making sure it has a SIGSTOP pending\n",
4973 if (thread
->last_resume_kind
== resume_step
)
4976 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4979 /* If resume_step is requested by GDB, install single-step
4980 breakpoints when the thread is about to be actually resumed if
4981 the single-step breakpoints weren't removed. */
4982 if (supports_software_single_step ()
4983 && !has_single_step_breakpoints (thread
))
4984 install_software_single_step_breakpoints (lwp
);
4986 step
= maybe_hw_step (thread
);
4988 else if (lwp
->bp_reinsert
!= 0)
4991 debug_printf (" stepping LWP %ld, reinsert set\n",
4994 step
= maybe_hw_step (thread
);
4999 resume_one_lwp (lwp
, step
, 0, NULL
);
5003 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info
*thread
,
5006 struct lwp_info
*lwp
= get_thread_lwp (thread
);
5011 lwp_suspended_decr (lwp
);
5013 proceed_one_lwp (thread
, except
);
5017 linux_process_target::proceed_all_lwps ()
5019 struct thread_info
*need_step_over
;
5021 /* If there is a thread which would otherwise be resumed, which is
5022 stopped at a breakpoint that needs stepping over, then don't
5023 resume any threads - have it step over the breakpoint with all
5024 other threads stopped, then resume all threads again. */
5026 if (low_supports_breakpoints ())
5028 need_step_over
= find_thread ([this] (thread_info
*thread
)
5030 return thread_needs_step_over (thread
);
5033 if (need_step_over
!= NULL
)
5036 debug_printf ("proceed_all_lwps: found "
5037 "thread %ld needing a step-over\n",
5038 lwpid_of (need_step_over
));
5040 start_step_over (get_thread_lwp (need_step_over
));
5046 debug_printf ("Proceeding, no step-over needed\n");
5048 for_each_thread ([this] (thread_info
*thread
)
5050 proceed_one_lwp (thread
, NULL
);
5055 linux_process_target::unstop_all_lwps (int unsuspend
, lwp_info
*except
)
5061 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5062 lwpid_of (get_lwp_thread (except
)));
5064 debug_printf ("unstopping all lwps\n");
5068 for_each_thread ([&] (thread_info
*thread
)
5070 unsuspend_and_proceed_one_lwp (thread
, except
);
5073 for_each_thread ([&] (thread_info
*thread
)
5075 proceed_one_lwp (thread
, except
);
5080 debug_printf ("unstop_all_lwps done\n");
5086 #ifdef HAVE_LINUX_REGSETS
5088 #define use_linux_regsets 1
5090 /* Returns true if REGSET has been disabled. */
5093 regset_disabled (struct regsets_info
*info
, struct regset_info
*regset
)
5095 return (info
->disabled_regsets
!= NULL
5096 && info
->disabled_regsets
[regset
- info
->regsets
]);
5099 /* Disable REGSET. */
5102 disable_regset (struct regsets_info
*info
, struct regset_info
*regset
)
5106 dr_offset
= regset
- info
->regsets
;
5107 if (info
->disabled_regsets
== NULL
)
5108 info
->disabled_regsets
= (char *) xcalloc (1, info
->num_regsets
);
5109 info
->disabled_regsets
[dr_offset
] = 1;
5113 regsets_fetch_inferior_registers (struct regsets_info
*regsets_info
,
5114 struct regcache
*regcache
)
5116 struct regset_info
*regset
;
5117 int saw_general_regs
= 0;
5121 pid
= lwpid_of (current_thread
);
5122 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
5127 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
))
5130 buf
= xmalloc (regset
->size
);
5132 nt_type
= regset
->nt_type
;
5136 iov
.iov_len
= regset
->size
;
5137 data
= (void *) &iov
;
5143 res
= ptrace (regset
->get_request
, pid
,
5144 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5146 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
5151 || (errno
== EINVAL
&& regset
->type
== OPTIONAL_REGS
))
5153 /* If we get EIO on a regset, or an EINVAL and the regset is
5154 optional, do not try it again for this process mode. */
5155 disable_regset (regsets_info
, regset
);
5157 else if (errno
== ENODATA
)
5159 /* ENODATA may be returned if the regset is currently
5160 not "active". This can happen in normal operation,
5161 so suppress the warning in this case. */
5163 else if (errno
== ESRCH
)
5165 /* At this point, ESRCH should mean the process is
5166 already gone, in which case we simply ignore attempts
5167 to read its registers. */
5172 sprintf (s
, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5179 if (regset
->type
== GENERAL_REGS
)
5180 saw_general_regs
= 1;
5181 regset
->store_function (regcache
, buf
);
5185 if (saw_general_regs
)
5192 regsets_store_inferior_registers (struct regsets_info
*regsets_info
,
5193 struct regcache
*regcache
)
5195 struct regset_info
*regset
;
5196 int saw_general_regs
= 0;
5200 pid
= lwpid_of (current_thread
);
5201 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
5206 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
)
5207 || regset
->fill_function
== NULL
)
5210 buf
= xmalloc (regset
->size
);
5212 /* First fill the buffer with the current register set contents,
5213 in case there are any items in the kernel's regset that are
5214 not in gdbserver's regcache. */
5216 nt_type
= regset
->nt_type
;
5220 iov
.iov_len
= regset
->size
;
5221 data
= (void *) &iov
;
5227 res
= ptrace (regset
->get_request
, pid
,
5228 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5230 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
5235 /* Then overlay our cached registers on that. */
5236 regset
->fill_function (regcache
, buf
);
5238 /* Only now do we write the register set. */
5240 res
= ptrace (regset
->set_request
, pid
,
5241 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5243 res
= ptrace (regset
->set_request
, pid
, data
, nt_type
);
5250 || (errno
== EINVAL
&& regset
->type
== OPTIONAL_REGS
))
5252 /* If we get EIO on a regset, or an EINVAL and the regset is
5253 optional, do not try it again for this process mode. */
5254 disable_regset (regsets_info
, regset
);
5256 else if (errno
== ESRCH
)
5258 /* At this point, ESRCH should mean the process is
5259 already gone, in which case we simply ignore attempts
5260 to change its registers. See also the related
5261 comment in resume_one_lwp. */
5267 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5270 else if (regset
->type
== GENERAL_REGS
)
5271 saw_general_regs
= 1;
5274 if (saw_general_regs
)
5280 #else /* !HAVE_LINUX_REGSETS */
5282 #define use_linux_regsets 0
5283 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5284 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5288 /* Return 1 if register REGNO is supported by one of the regset ptrace
5289 calls or 0 if it has to be transferred individually. */
5292 linux_register_in_regsets (const struct regs_info
*regs_info
, int regno
)
5294 unsigned char mask
= 1 << (regno
% 8);
5295 size_t index
= regno
/ 8;
5297 return (use_linux_regsets
5298 && (regs_info
->regset_bitmap
== NULL
5299 || (regs_info
->regset_bitmap
[index
] & mask
) != 0));
5302 #ifdef HAVE_LINUX_USRREGS
5305 register_addr (const struct usrregs_info
*usrregs
, int regnum
)
5309 if (regnum
< 0 || regnum
>= usrregs
->num_regs
)
5310 error ("Invalid register number %d.", regnum
);
5312 addr
= usrregs
->regmap
[regnum
];
5319 linux_process_target::fetch_register (const usrregs_info
*usrregs
,
5320 regcache
*regcache
, int regno
)
5327 if (regno
>= usrregs
->num_regs
)
5329 if (low_cannot_fetch_register (regno
))
5332 regaddr
= register_addr (usrregs
, regno
);
5336 size
= ((register_size (regcache
->tdesc
, regno
)
5337 + sizeof (PTRACE_XFER_TYPE
) - 1)
5338 & -sizeof (PTRACE_XFER_TYPE
));
5339 buf
= (char *) alloca (size
);
5341 pid
= lwpid_of (current_thread
);
5342 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
5345 *(PTRACE_XFER_TYPE
*) (buf
+ i
) =
5346 ptrace (PTRACE_PEEKUSER
, pid
,
5347 /* Coerce to a uintptr_t first to avoid potential gcc warning
5348 of coercing an 8 byte integer to a 4 byte pointer. */
5349 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
, (PTRACE_TYPE_ARG4
) 0);
5350 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
5353 /* Mark register REGNO unavailable. */
5354 supply_register (regcache
, regno
, NULL
);
5359 low_supply_ptrace_register (regcache
, regno
, buf
);
5363 linux_process_target::store_register (const usrregs_info
*usrregs
,
5364 regcache
*regcache
, int regno
)
5371 if (regno
>= usrregs
->num_regs
)
5373 if (low_cannot_store_register (regno
))
5376 regaddr
= register_addr (usrregs
, regno
);
5380 size
= ((register_size (regcache
->tdesc
, regno
)
5381 + sizeof (PTRACE_XFER_TYPE
) - 1)
5382 & -sizeof (PTRACE_XFER_TYPE
));
5383 buf
= (char *) alloca (size
);
5384 memset (buf
, 0, size
);
5386 low_collect_ptrace_register (regcache
, regno
, buf
);
5388 pid
= lwpid_of (current_thread
);
5389 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
5392 ptrace (PTRACE_POKEUSER
, pid
,
5393 /* Coerce to a uintptr_t first to avoid potential gcc warning
5394 about coercing an 8 byte integer to a 4 byte pointer. */
5395 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
,
5396 (PTRACE_TYPE_ARG4
) *(PTRACE_XFER_TYPE
*) (buf
+ i
));
5399 /* At this point, ESRCH should mean the process is
5400 already gone, in which case we simply ignore attempts
5401 to change its registers. See also the related
5402 comment in resume_one_lwp. */
5407 if (!low_cannot_store_register (regno
))
5408 error ("writing register %d: %s", regno
, safe_strerror (errno
));
5410 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
5413 #endif /* HAVE_LINUX_USRREGS */
5416 linux_process_target::low_collect_ptrace_register (regcache
*regcache
,
5417 int regno
, char *buf
)
5419 collect_register (regcache
, regno
, buf
);
5423 linux_process_target::low_supply_ptrace_register (regcache
*regcache
,
5424 int regno
, const char *buf
)
5426 supply_register (regcache
, regno
, buf
);
5430 linux_process_target::usr_fetch_inferior_registers (const regs_info
*regs_info
,
5434 #ifdef HAVE_LINUX_USRREGS
5435 struct usrregs_info
*usr
= regs_info
->usrregs
;
5439 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
5440 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
5441 fetch_register (usr
, regcache
, regno
);
5444 fetch_register (usr
, regcache
, regno
);
5449 linux_process_target::usr_store_inferior_registers (const regs_info
*regs_info
,
5453 #ifdef HAVE_LINUX_USRREGS
5454 struct usrregs_info
*usr
= regs_info
->usrregs
;
5458 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
5459 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
5460 store_register (usr
, regcache
, regno
);
5463 store_register (usr
, regcache
, regno
);
5468 linux_process_target::fetch_registers (regcache
*regcache
, int regno
)
5472 const regs_info
*regs_info
= get_regs_info ();
5476 if (regs_info
->usrregs
!= NULL
)
5477 for (regno
= 0; regno
< regs_info
->usrregs
->num_regs
; regno
++)
5478 low_fetch_register (regcache
, regno
);
5480 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
, regcache
);
5481 if (regs_info
->usrregs
!= NULL
)
5482 usr_fetch_inferior_registers (regs_info
, regcache
, -1, all
);
5486 if (low_fetch_register (regcache
, regno
))
5489 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
5491 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
,
5493 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
5494 usr_fetch_inferior_registers (regs_info
, regcache
, regno
, 1);
5499 linux_process_target::store_registers (regcache
*regcache
, int regno
)
5503 const regs_info
*regs_info
= get_regs_info ();
5507 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
5509 if (regs_info
->usrregs
!= NULL
)
5510 usr_store_inferior_registers (regs_info
, regcache
, regno
, all
);
5514 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
5516 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
5518 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
5519 usr_store_inferior_registers (regs_info
, regcache
, regno
, 1);
5524 linux_process_target::low_fetch_register (regcache
*regcache
, int regno
)
5529 /* A wrapper for the read_memory target op. */
5532 linux_read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
, int len
)
5534 return the_target
->read_memory (memaddr
, myaddr
, len
);
5537 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5538 to debugger memory starting at MYADDR. */
5541 linux_process_target::read_memory (CORE_ADDR memaddr
,
5542 unsigned char *myaddr
, int len
)
5544 int pid
= lwpid_of (current_thread
);
5545 PTRACE_XFER_TYPE
*buffer
;
5553 /* Try using /proc. Don't bother for one word. */
5554 if (len
>= 3 * sizeof (long))
5558 /* We could keep this file open and cache it - possibly one per
5559 thread. That requires some juggling, but is even faster. */
5560 sprintf (filename
, "/proc/%d/mem", pid
);
5561 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
5565 /* If pread64 is available, use it. It's faster if the kernel
5566 supports it (only one syscall), and it's 64-bit safe even on
5567 32-bit platforms (for instance, SPARC debugging a SPARC64
5570 bytes
= pread64 (fd
, myaddr
, len
, memaddr
);
5573 if (lseek (fd
, memaddr
, SEEK_SET
) != -1)
5574 bytes
= read (fd
, myaddr
, len
);
5581 /* Some data was read, we'll try to get the rest with ptrace. */
5591 /* Round starting address down to longword boundary. */
5592 addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5593 /* Round ending address up; get number of longwords that makes. */
5594 count
= ((((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5595 / sizeof (PTRACE_XFER_TYPE
));
5596 /* Allocate buffer of that many longwords. */
5597 buffer
= XALLOCAVEC (PTRACE_XFER_TYPE
, count
);
5599 /* Read all the longwords */
5601 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5603 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5604 about coercing an 8 byte integer to a 4 byte pointer. */
5605 buffer
[i
] = ptrace (PTRACE_PEEKTEXT
, pid
,
5606 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5607 (PTRACE_TYPE_ARG4
) 0);
5613 /* Copy appropriate bytes out of the buffer. */
5616 i
*= sizeof (PTRACE_XFER_TYPE
);
5617 i
-= memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1);
5619 (char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5626 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5627 memory at MEMADDR. On failure (cannot write to the inferior)
5628 returns the value of errno. Always succeeds if LEN is zero. */
5631 linux_process_target::write_memory (CORE_ADDR memaddr
,
5632 const unsigned char *myaddr
, int len
)
5635 /* Round starting address down to longword boundary. */
5636 CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5637 /* Round ending address up; get number of longwords that makes. */
5639 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5640 / sizeof (PTRACE_XFER_TYPE
);
5642 /* Allocate buffer of that many longwords. */
5643 PTRACE_XFER_TYPE
*buffer
= XALLOCAVEC (PTRACE_XFER_TYPE
, count
);
5645 int pid
= lwpid_of (current_thread
);
5649 /* Zero length write always succeeds. */
5655 /* Dump up to four bytes. */
5656 char str
[4 * 2 + 1];
5658 int dump
= len
< 4 ? len
: 4;
5660 for (i
= 0; i
< dump
; i
++)
5662 sprintf (p
, "%02x", myaddr
[i
]);
5667 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5668 str
, (long) memaddr
, pid
);
5671 /* Fill start and end extra bytes of buffer with existing memory data. */
5674 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5675 about coercing an 8 byte integer to a 4 byte pointer. */
5676 buffer
[0] = ptrace (PTRACE_PEEKTEXT
, pid
,
5677 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5678 (PTRACE_TYPE_ARG4
) 0);
5686 = ptrace (PTRACE_PEEKTEXT
, pid
,
5687 /* Coerce to a uintptr_t first to avoid potential gcc warning
5688 about coercing an 8 byte integer to a 4 byte pointer. */
5689 (PTRACE_TYPE_ARG3
) (uintptr_t) (addr
+ (count
- 1)
5690 * sizeof (PTRACE_XFER_TYPE
)),
5691 (PTRACE_TYPE_ARG4
) 0);
5696 /* Copy data to be written over corresponding part of buffer. */
5698 memcpy ((char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5701 /* Write the entire buffer. */
5703 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5706 ptrace (PTRACE_POKETEXT
, pid
,
5707 /* Coerce to a uintptr_t first to avoid potential gcc warning
5708 about coercing an 8 byte integer to a 4 byte pointer. */
5709 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5710 (PTRACE_TYPE_ARG4
) buffer
[i
]);
5719 linux_process_target::look_up_symbols ()
5721 #ifdef USE_THREAD_DB
5722 struct process_info
*proc
= current_process ();
5724 if (proc
->priv
->thread_db
!= NULL
)
5732 linux_process_target::request_interrupt ()
5734 /* Send a SIGINT to the process group. This acts just like the user
5735 typed a ^C on the controlling terminal. */
5736 ::kill (-signal_pid
, SIGINT
);
5740 linux_process_target::supports_read_auxv ()
5745 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5746 to debugger memory starting at MYADDR. */
5749 linux_process_target::read_auxv (CORE_ADDR offset
, unsigned char *myaddr
,
5752 char filename
[PATH_MAX
];
5754 int pid
= lwpid_of (current_thread
);
5756 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
5758 fd
= open (filename
, O_RDONLY
);
5762 if (offset
!= (CORE_ADDR
) 0
5763 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
5766 n
= read (fd
, myaddr
, len
);
5774 linux_process_target::insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5775 int size
, raw_breakpoint
*bp
)
5777 if (type
== raw_bkpt_type_sw
)
5778 return insert_memory_breakpoint (bp
);
5780 return low_insert_point (type
, addr
, size
, bp
);
5784 linux_process_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
5785 int size
, raw_breakpoint
*bp
)
5787 /* Unsupported (see target.h). */
5792 linux_process_target::remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5793 int size
, raw_breakpoint
*bp
)
5795 if (type
== raw_bkpt_type_sw
)
5796 return remove_memory_breakpoint (bp
);
5798 return low_remove_point (type
, addr
, size
, bp
);
5802 linux_process_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
5803 int size
, raw_breakpoint
*bp
)
5805 /* Unsupported (see target.h). */
5809 /* Implement the stopped_by_sw_breakpoint target_ops
5813 linux_process_target::stopped_by_sw_breakpoint ()
5815 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5817 return (lwp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
);
5820 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5824 linux_process_target::supports_stopped_by_sw_breakpoint ()
5826 return USE_SIGTRAP_SIGINFO
;
5829 /* Implement the stopped_by_hw_breakpoint target_ops
5833 linux_process_target::stopped_by_hw_breakpoint ()
5835 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5837 return (lwp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
);
5840 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5844 linux_process_target::supports_stopped_by_hw_breakpoint ()
5846 return USE_SIGTRAP_SIGINFO
;
5849 /* Implement the supports_hardware_single_step target_ops method. */
5852 linux_process_target::supports_hardware_single_step ()
5858 linux_process_target::stopped_by_watchpoint ()
5860 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5862 return lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
5866 linux_process_target::stopped_data_address ()
5868 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5870 return lwp
->stopped_data_address
;
5873 /* This is only used for targets that define PT_TEXT_ADDR,
5874 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5875 the target has different ways of acquiring this information, like
5879 linux_process_target::supports_read_offsets ()
5881 #ifdef SUPPORTS_READ_OFFSETS
5888 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5889 to tell gdb about. */
5892 linux_process_target::read_offsets (CORE_ADDR
*text_p
, CORE_ADDR
*data_p
)
5894 #ifdef SUPPORTS_READ_OFFSETS
5895 unsigned long text
, text_end
, data
;
5896 int pid
= lwpid_of (current_thread
);
5900 text
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_ADDR
,
5901 (PTRACE_TYPE_ARG4
) 0);
5902 text_end
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_END_ADDR
,
5903 (PTRACE_TYPE_ARG4
) 0);
5904 data
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_DATA_ADDR
,
5905 (PTRACE_TYPE_ARG4
) 0);
5909 /* Both text and data offsets produced at compile-time (and so
5910 used by gdb) are relative to the beginning of the program,
5911 with the data segment immediately following the text segment.
5912 However, the actual runtime layout in memory may put the data
5913 somewhere else, so when we send gdb a data base-address, we
5914 use the real data base address and subtract the compile-time
5915 data base-address from it (which is just the length of the
5916 text segment). BSS immediately follows data in both
5919 *data_p
= data
- (text_end
- text
);
5925 gdb_assert_not_reached ("target op read_offsets not supported");
5930 linux_process_target::supports_get_tls_address ()
5932 #ifdef USE_THREAD_DB
5940 linux_process_target::get_tls_address (thread_info
*thread
,
5942 CORE_ADDR load_module
,
5945 #ifdef USE_THREAD_DB
5946 return thread_db_get_tls_address (thread
, offset
, load_module
, address
);
5953 linux_process_target::supports_qxfer_osdata ()
5959 linux_process_target::qxfer_osdata (const char *annex
,
5960 unsigned char *readbuf
,
5961 unsigned const char *writebuf
,
5962 CORE_ADDR offset
, int len
)
5964 return linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
5968 linux_process_target::siginfo_fixup (siginfo_t
*siginfo
,
5969 gdb_byte
*inf_siginfo
, int direction
)
5971 bool done
= low_siginfo_fixup (siginfo
, inf_siginfo
, direction
);
5973 /* If there was no callback, or the callback didn't do anything,
5974 then just do a straight memcpy. */
5978 memcpy (siginfo
, inf_siginfo
, sizeof (siginfo_t
));
5980 memcpy (inf_siginfo
, siginfo
, sizeof (siginfo_t
));
5985 linux_process_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
5992 linux_process_target::supports_qxfer_siginfo ()
5998 linux_process_target::qxfer_siginfo (const char *annex
,
5999 unsigned char *readbuf
,
6000 unsigned const char *writebuf
,
6001 CORE_ADDR offset
, int len
)
6005 gdb_byte inf_siginfo
[sizeof (siginfo_t
)];
6007 if (current_thread
== NULL
)
6010 pid
= lwpid_of (current_thread
);
6013 debug_printf ("%s siginfo for lwp %d.\n",
6014 readbuf
!= NULL
? "Reading" : "Writing",
6017 if (offset
>= sizeof (siginfo
))
6020 if (ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
6023 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6024 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6025 inferior with a 64-bit GDBSERVER should look the same as debugging it
6026 with a 32-bit GDBSERVER, we need to convert it. */
6027 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
6029 if (offset
+ len
> sizeof (siginfo
))
6030 len
= sizeof (siginfo
) - offset
;
6032 if (readbuf
!= NULL
)
6033 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
6036 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
6038 /* Convert back to ptrace layout before flushing it out. */
6039 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
6041 if (ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
6048 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6049 so we notice when children change state; as the handler for the
6050 sigsuspend in my_waitpid. */
6053 sigchld_handler (int signo
)
6055 int old_errno
= errno
;
6061 /* Use the async signal safe debug function. */
6062 if (debug_write ("sigchld_handler\n",
6063 sizeof ("sigchld_handler\n") - 1) < 0)
6064 break; /* just ignore */
6068 if (target_is_async_p ())
6069 async_file_mark (); /* trigger a linux_wait */
6075 linux_process_target::supports_non_stop ()
6081 linux_process_target::async (bool enable
)
6083 bool previous
= target_is_async_p ();
6086 debug_printf ("linux_async (%d), previous=%d\n",
6089 if (previous
!= enable
)
6092 sigemptyset (&mask
);
6093 sigaddset (&mask
, SIGCHLD
);
6095 gdb_sigmask (SIG_BLOCK
, &mask
, NULL
);
6099 if (pipe (linux_event_pipe
) == -1)
6101 linux_event_pipe
[0] = -1;
6102 linux_event_pipe
[1] = -1;
6103 gdb_sigmask (SIG_UNBLOCK
, &mask
, NULL
);
6105 warning ("creating event pipe failed.");
6109 fcntl (linux_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
6110 fcntl (linux_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
6112 /* Register the event loop handler. */
6113 add_file_handler (linux_event_pipe
[0],
6114 handle_target_event
, NULL
,
6117 /* Always trigger a linux_wait. */
6122 delete_file_handler (linux_event_pipe
[0]);
6124 close (linux_event_pipe
[0]);
6125 close (linux_event_pipe
[1]);
6126 linux_event_pipe
[0] = -1;
6127 linux_event_pipe
[1] = -1;
6130 gdb_sigmask (SIG_UNBLOCK
, &mask
, NULL
);
6137 linux_process_target::start_non_stop (bool nonstop
)
6139 /* Register or unregister from event-loop accordingly. */
6140 target_async (nonstop
);
6142 if (target_is_async_p () != (nonstop
!= false))
6149 linux_process_target::supports_multi_process ()
6154 /* Check if fork events are supported. */
6157 linux_process_target::supports_fork_events ()
6159 return linux_supports_tracefork ();
6162 /* Check if vfork events are supported. */
6165 linux_process_target::supports_vfork_events ()
6167 return linux_supports_tracefork ();
6170 /* Check if exec events are supported. */
6173 linux_process_target::supports_exec_events ()
6175 return linux_supports_traceexec ();
6178 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6179 ptrace flags for all inferiors. This is in case the new GDB connection
6180 doesn't support the same set of events that the previous one did. */
6183 linux_process_target::handle_new_gdb_connection ()
6185 /* Request that all the lwps reset their ptrace options. */
6186 for_each_thread ([] (thread_info
*thread
)
6188 struct lwp_info
*lwp
= get_thread_lwp (thread
);
6192 /* Stop the lwp so we can modify its ptrace options. */
6193 lwp
->must_set_ptrace_flags
= 1;
6194 linux_stop_lwp (lwp
);
6198 /* Already stopped; go ahead and set the ptrace options. */
6199 struct process_info
*proc
= find_process_pid (pid_of (thread
));
6200 int options
= linux_low_ptrace_options (proc
->attached
);
6202 linux_enable_event_reporting (lwpid_of (thread
), options
);
6203 lwp
->must_set_ptrace_flags
= 0;
6209 linux_process_target::handle_monitor_command (char *mon
)
6211 #ifdef USE_THREAD_DB
6212 return thread_db_handle_monitor_command (mon
);
6219 linux_process_target::core_of_thread (ptid_t ptid
)
6221 return linux_common_core_of_thread (ptid
);
6225 linux_process_target::supports_disable_randomization ()
6231 linux_process_target::supports_agent ()
6237 linux_process_target::supports_range_stepping ()
6239 if (supports_software_single_step ())
6242 return low_supports_range_stepping ();
6246 linux_process_target::low_supports_range_stepping ()
6252 linux_process_target::supports_pid_to_exec_file ()
6258 linux_process_target::pid_to_exec_file (int pid
)
6260 return linux_proc_pid_to_exec_file (pid
);
6264 linux_process_target::supports_multifs ()
6270 linux_process_target::multifs_open (int pid
, const char *filename
,
6271 int flags
, mode_t mode
)
6273 return linux_mntns_open_cloexec (pid
, filename
, flags
, mode
);
6277 linux_process_target::multifs_unlink (int pid
, const char *filename
)
6279 return linux_mntns_unlink (pid
, filename
);
6283 linux_process_target::multifs_readlink (int pid
, const char *filename
,
6284 char *buf
, size_t bufsiz
)
6286 return linux_mntns_readlink (pid
, filename
, buf
, bufsiz
);
6289 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6290 struct target_loadseg
6292 /* Core address to which the segment is mapped. */
6294 /* VMA recorded in the program header. */
6296 /* Size of this segment in memory. */
6300 # if defined PT_GETDSBT
6301 struct target_loadmap
6303 /* Protocol version number, must be zero. */
6305 /* Pointer to the DSBT table, its size, and the DSBT index. */
6306 unsigned *dsbt_table
;
6307 unsigned dsbt_size
, dsbt_index
;
6308 /* Number of segments in this map. */
6310 /* The actual memory map. */
6311 struct target_loadseg segs
[/*nsegs*/];
6313 # define LINUX_LOADMAP PT_GETDSBT
6314 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6315 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6317 struct target_loadmap
6319 /* Protocol version number, must be zero. */
6321 /* Number of segments in this map. */
6323 /* The actual memory map. */
6324 struct target_loadseg segs
[/*nsegs*/];
6326 # define LINUX_LOADMAP PTRACE_GETFDPIC
6327 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6328 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6332 linux_process_target::supports_read_loadmap ()
6338 linux_process_target::read_loadmap (const char *annex
, CORE_ADDR offset
,
6339 unsigned char *myaddr
, unsigned int len
)
6341 int pid
= lwpid_of (current_thread
);
6343 struct target_loadmap
*data
= NULL
;
6344 unsigned int actual_length
, copy_length
;
6346 if (strcmp (annex
, "exec") == 0)
6347 addr
= (int) LINUX_LOADMAP_EXEC
;
6348 else if (strcmp (annex
, "interp") == 0)
6349 addr
= (int) LINUX_LOADMAP_INTERP
;
6353 if (ptrace (LINUX_LOADMAP
, pid
, addr
, &data
) != 0)
6359 actual_length
= sizeof (struct target_loadmap
)
6360 + sizeof (struct target_loadseg
) * data
->nsegs
;
6362 if (offset
< 0 || offset
> actual_length
)
6365 copy_length
= actual_length
- offset
< len
? actual_length
- offset
: len
;
6366 memcpy (myaddr
, (char *) data
+ offset
, copy_length
);
6369 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6372 linux_process_target::supports_catch_syscall ()
6374 return (low_supports_catch_syscall ()
6375 && linux_supports_tracesysgood ());
6379 linux_process_target::low_supports_catch_syscall ()
6385 linux_process_target::read_pc (regcache
*regcache
)
6387 if (!low_supports_breakpoints ())
6390 return low_get_pc (regcache
);
6394 linux_process_target::write_pc (regcache
*regcache
, CORE_ADDR pc
)
6396 gdb_assert (low_supports_breakpoints ());
6398 low_set_pc (regcache
, pc
);
6402 linux_process_target::supports_thread_stopped ()
6408 linux_process_target::thread_stopped (thread_info
*thread
)
6410 return get_thread_lwp (thread
)->stopped
;
6413 /* This exposes stop-all-threads functionality to other modules. */
6416 linux_process_target::pause_all (bool freeze
)
6418 stop_all_lwps (freeze
, NULL
);
6421 /* This exposes unstop-all-threads functionality to other gdbserver
6425 linux_process_target::unpause_all (bool unfreeze
)
6427 unstop_all_lwps (unfreeze
, NULL
);
6431 linux_process_target::prepare_to_access_memory ()
6433 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6436 target_pause_all (true);
6441 linux_process_target::done_accessing_memory ()
6443 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6446 target_unpause_all (true);
6449 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6452 get_phdr_phnum_from_proc_auxv (const int pid
, const int is_elf64
,
6453 CORE_ADDR
*phdr_memaddr
, int *num_phdr
)
6455 char filename
[PATH_MAX
];
6457 const int auxv_size
= is_elf64
6458 ? sizeof (Elf64_auxv_t
) : sizeof (Elf32_auxv_t
);
6459 char buf
[sizeof (Elf64_auxv_t
)]; /* The larger of the two. */
6461 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
6463 fd
= open (filename
, O_RDONLY
);
6469 while (read (fd
, buf
, auxv_size
) == auxv_size
6470 && (*phdr_memaddr
== 0 || *num_phdr
== 0))
6474 Elf64_auxv_t
*const aux
= (Elf64_auxv_t
*) buf
;
6476 switch (aux
->a_type
)
6479 *phdr_memaddr
= aux
->a_un
.a_val
;
6482 *num_phdr
= aux
->a_un
.a_val
;
6488 Elf32_auxv_t
*const aux
= (Elf32_auxv_t
*) buf
;
6490 switch (aux
->a_type
)
6493 *phdr_memaddr
= aux
->a_un
.a_val
;
6496 *num_phdr
= aux
->a_un
.a_val
;
6504 if (*phdr_memaddr
== 0 || *num_phdr
== 0)
6506 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6507 "phdr_memaddr = %ld, phdr_num = %d",
6508 (long) *phdr_memaddr
, *num_phdr
);
6515 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6518 get_dynamic (const int pid
, const int is_elf64
)
6520 CORE_ADDR phdr_memaddr
, relocation
;
6522 unsigned char *phdr_buf
;
6523 const int phdr_size
= is_elf64
? sizeof (Elf64_Phdr
) : sizeof (Elf32_Phdr
);
6525 if (get_phdr_phnum_from_proc_auxv (pid
, is_elf64
, &phdr_memaddr
, &num_phdr
))
6528 gdb_assert (num_phdr
< 100); /* Basic sanity check. */
6529 phdr_buf
= (unsigned char *) alloca (num_phdr
* phdr_size
);
6531 if (linux_read_memory (phdr_memaddr
, phdr_buf
, num_phdr
* phdr_size
))
6534 /* Compute relocation: it is expected to be 0 for "regular" executables,
6535 non-zero for PIE ones. */
6537 for (i
= 0; relocation
== -1 && i
< num_phdr
; i
++)
6540 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6542 if (p
->p_type
== PT_PHDR
)
6543 relocation
= phdr_memaddr
- p
->p_vaddr
;
6547 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6549 if (p
->p_type
== PT_PHDR
)
6550 relocation
= phdr_memaddr
- p
->p_vaddr
;
6553 if (relocation
== -1)
6555 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6556 any real world executables, including PIE executables, have always
6557 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6558 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6559 or present DT_DEBUG anyway (fpc binaries are statically linked).
6561 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6563 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6568 for (i
= 0; i
< num_phdr
; i
++)
6572 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6574 if (p
->p_type
== PT_DYNAMIC
)
6575 return p
->p_vaddr
+ relocation
;
6579 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6581 if (p
->p_type
== PT_DYNAMIC
)
6582 return p
->p_vaddr
+ relocation
;
6589 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6590 can be 0 if the inferior does not yet have the library list initialized.
6591 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6592 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6595 get_r_debug (const int pid
, const int is_elf64
)
6597 CORE_ADDR dynamic_memaddr
;
6598 const int dyn_size
= is_elf64
? sizeof (Elf64_Dyn
) : sizeof (Elf32_Dyn
);
6599 unsigned char buf
[sizeof (Elf64_Dyn
)]; /* The larger of the two. */
6602 dynamic_memaddr
= get_dynamic (pid
, is_elf64
);
6603 if (dynamic_memaddr
== 0)
6606 while (linux_read_memory (dynamic_memaddr
, buf
, dyn_size
) == 0)
6610 Elf64_Dyn
*const dyn
= (Elf64_Dyn
*) buf
;
6611 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6615 unsigned char buf
[sizeof (Elf64_Xword
)];
6619 #ifdef DT_MIPS_RLD_MAP
6620 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6622 if (linux_read_memory (dyn
->d_un
.d_val
,
6623 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6628 #endif /* DT_MIPS_RLD_MAP */
6629 #ifdef DT_MIPS_RLD_MAP_REL
6630 if (dyn
->d_tag
== DT_MIPS_RLD_MAP_REL
)
6632 if (linux_read_memory (dyn
->d_un
.d_val
+ dynamic_memaddr
,
6633 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6638 #endif /* DT_MIPS_RLD_MAP_REL */
6640 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6641 map
= dyn
->d_un
.d_val
;
6643 if (dyn
->d_tag
== DT_NULL
)
6648 Elf32_Dyn
*const dyn
= (Elf32_Dyn
*) buf
;
6649 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6653 unsigned char buf
[sizeof (Elf32_Word
)];
6657 #ifdef DT_MIPS_RLD_MAP
6658 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6660 if (linux_read_memory (dyn
->d_un
.d_val
,
6661 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6666 #endif /* DT_MIPS_RLD_MAP */
6667 #ifdef DT_MIPS_RLD_MAP_REL
6668 if (dyn
->d_tag
== DT_MIPS_RLD_MAP_REL
)
6670 if (linux_read_memory (dyn
->d_un
.d_val
+ dynamic_memaddr
,
6671 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6676 #endif /* DT_MIPS_RLD_MAP_REL */
6678 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6679 map
= dyn
->d_un
.d_val
;
6681 if (dyn
->d_tag
== DT_NULL
)
6685 dynamic_memaddr
+= dyn_size
;
6691 /* Read one pointer from MEMADDR in the inferior. */
6694 read_one_ptr (CORE_ADDR memaddr
, CORE_ADDR
*ptr
, int ptr_size
)
6698 /* Go through a union so this works on either big or little endian
6699 hosts, when the inferior's pointer size is smaller than the size
6700 of CORE_ADDR. It is assumed the inferior's endianness is the
6701 same of the superior's. */
6704 CORE_ADDR core_addr
;
6709 ret
= linux_read_memory (memaddr
, &addr
.uc
, ptr_size
);
6712 if (ptr_size
== sizeof (CORE_ADDR
))
6713 *ptr
= addr
.core_addr
;
6714 else if (ptr_size
== sizeof (unsigned int))
6717 gdb_assert_not_reached ("unhandled pointer size");
6723 linux_process_target::supports_qxfer_libraries_svr4 ()
6728 struct link_map_offsets
6730 /* Offset and size of r_debug.r_version. */
6731 int r_version_offset
;
6733 /* Offset and size of r_debug.r_map. */
6736 /* Offset to l_addr field in struct link_map. */
6739 /* Offset to l_name field in struct link_map. */
6742 /* Offset to l_ld field in struct link_map. */
6745 /* Offset to l_next field in struct link_map. */
6748 /* Offset to l_prev field in struct link_map. */
6752 /* Construct qXfer:libraries-svr4:read reply. */
6755 linux_process_target::qxfer_libraries_svr4 (const char *annex
,
6756 unsigned char *readbuf
,
6757 unsigned const char *writebuf
,
6758 CORE_ADDR offset
, int len
)
6760 struct process_info_private
*const priv
= current_process ()->priv
;
6761 char filename
[PATH_MAX
];
6764 static const struct link_map_offsets lmo_32bit_offsets
=
6766 0, /* r_version offset. */
6767 4, /* r_debug.r_map offset. */
6768 0, /* l_addr offset in link_map. */
6769 4, /* l_name offset in link_map. */
6770 8, /* l_ld offset in link_map. */
6771 12, /* l_next offset in link_map. */
6772 16 /* l_prev offset in link_map. */
6775 static const struct link_map_offsets lmo_64bit_offsets
=
6777 0, /* r_version offset. */
6778 8, /* r_debug.r_map offset. */
6779 0, /* l_addr offset in link_map. */
6780 8, /* l_name offset in link_map. */
6781 16, /* l_ld offset in link_map. */
6782 24, /* l_next offset in link_map. */
6783 32 /* l_prev offset in link_map. */
6785 const struct link_map_offsets
*lmo
;
6786 unsigned int machine
;
6788 CORE_ADDR lm_addr
= 0, lm_prev
= 0;
6789 CORE_ADDR l_name
, l_addr
, l_ld
, l_next
, l_prev
;
6790 int header_done
= 0;
6792 if (writebuf
!= NULL
)
6794 if (readbuf
== NULL
)
6797 pid
= lwpid_of (current_thread
);
6798 xsnprintf (filename
, sizeof filename
, "/proc/%d/exe", pid
);
6799 is_elf64
= elf_64_file_p (filename
, &machine
);
6800 lmo
= is_elf64
? &lmo_64bit_offsets
: &lmo_32bit_offsets
;
6801 ptr_size
= is_elf64
? 8 : 4;
6803 while (annex
[0] != '\0')
6809 sep
= strchr (annex
, '=');
6813 name_len
= sep
- annex
;
6814 if (name_len
== 5 && startswith (annex
, "start"))
6816 else if (name_len
== 4 && startswith (annex
, "prev"))
6820 annex
= strchr (sep
, ';');
6827 annex
= decode_address_to_semicolon (addrp
, sep
+ 1);
6834 if (priv
->r_debug
== 0)
6835 priv
->r_debug
= get_r_debug (pid
, is_elf64
);
6837 /* We failed to find DT_DEBUG. Such situation will not change
6838 for this inferior - do not retry it. Report it to GDB as
6839 E01, see for the reasons at the GDB solib-svr4.c side. */
6840 if (priv
->r_debug
== (CORE_ADDR
) -1)
6843 if (priv
->r_debug
!= 0)
6845 if (linux_read_memory (priv
->r_debug
+ lmo
->r_version_offset
,
6846 (unsigned char *) &r_version
,
6847 sizeof (r_version
)) != 0
6850 warning ("unexpected r_debug version %d", r_version
);
6852 else if (read_one_ptr (priv
->r_debug
+ lmo
->r_map_offset
,
6853 &lm_addr
, ptr_size
) != 0)
6855 warning ("unable to read r_map from 0x%lx",
6856 (long) priv
->r_debug
+ lmo
->r_map_offset
);
6861 std::string document
= "<library-list-svr4 version=\"1.0\"";
6864 && read_one_ptr (lm_addr
+ lmo
->l_name_offset
,
6865 &l_name
, ptr_size
) == 0
6866 && read_one_ptr (lm_addr
+ lmo
->l_addr_offset
,
6867 &l_addr
, ptr_size
) == 0
6868 && read_one_ptr (lm_addr
+ lmo
->l_ld_offset
,
6869 &l_ld
, ptr_size
) == 0
6870 && read_one_ptr (lm_addr
+ lmo
->l_prev_offset
,
6871 &l_prev
, ptr_size
) == 0
6872 && read_one_ptr (lm_addr
+ lmo
->l_next_offset
,
6873 &l_next
, ptr_size
) == 0)
6875 unsigned char libname
[PATH_MAX
];
6877 if (lm_prev
!= l_prev
)
6879 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6880 (long) lm_prev
, (long) l_prev
);
6884 /* Ignore the first entry even if it has valid name as the first entry
6885 corresponds to the main executable. The first entry should not be
6886 skipped if the dynamic loader was loaded late by a static executable
6887 (see solib-svr4.c parameter ignore_first). But in such case the main
6888 executable does not have PT_DYNAMIC present and this function already
6889 exited above due to failed get_r_debug. */
6891 string_appendf (document
, " main-lm=\"0x%lx\"", (unsigned long) lm_addr
);
6894 /* Not checking for error because reading may stop before
6895 we've got PATH_MAX worth of characters. */
6897 linux_read_memory (l_name
, libname
, sizeof (libname
) - 1);
6898 libname
[sizeof (libname
) - 1] = '\0';
6899 if (libname
[0] != '\0')
6903 /* Terminate `<library-list-svr4'. */
6908 string_appendf (document
, "<library name=\"");
6909 xml_escape_text_append (&document
, (char *) libname
);
6910 string_appendf (document
, "\" lm=\"0x%lx\" "
6911 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6912 (unsigned long) lm_addr
, (unsigned long) l_addr
,
6913 (unsigned long) l_ld
);
6923 /* Empty list; terminate `<library-list-svr4'. */
6927 document
+= "</library-list-svr4>";
6929 int document_len
= document
.length ();
6930 if (offset
< document_len
)
6931 document_len
-= offset
;
6934 if (len
> document_len
)
6937 memcpy (readbuf
, document
.data () + offset
, len
);
6942 #ifdef HAVE_LINUX_BTRACE
6944 btrace_target_info
*
6945 linux_process_target::enable_btrace (ptid_t ptid
,
6946 const btrace_config
*conf
)
6948 return linux_enable_btrace (ptid
, conf
);
6951 /* See to_disable_btrace target method. */
6954 linux_process_target::disable_btrace (btrace_target_info
*tinfo
)
6956 enum btrace_error err
;
6958 err
= linux_disable_btrace (tinfo
);
6959 return (err
== BTRACE_ERR_NONE
? 0 : -1);
6962 /* Encode an Intel Processor Trace configuration. */
6965 linux_low_encode_pt_config (struct buffer
*buffer
,
6966 const struct btrace_data_pt_config
*config
)
6968 buffer_grow_str (buffer
, "<pt-config>\n");
6970 switch (config
->cpu
.vendor
)
6973 buffer_xml_printf (buffer
, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6974 "model=\"%u\" stepping=\"%u\"/>\n",
6975 config
->cpu
.family
, config
->cpu
.model
,
6976 config
->cpu
.stepping
);
6983 buffer_grow_str (buffer
, "</pt-config>\n");
6986 /* Encode a raw buffer. */
6989 linux_low_encode_raw (struct buffer
*buffer
, const gdb_byte
*data
,
6995 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6996 buffer_grow_str (buffer
, "<raw>\n");
7002 elem
[0] = tohex ((*data
>> 4) & 0xf);
7003 elem
[1] = tohex (*data
++ & 0xf);
7005 buffer_grow (buffer
, elem
, 2);
7008 buffer_grow_str (buffer
, "</raw>\n");
7011 /* See to_read_btrace target method. */
7014 linux_process_target::read_btrace (btrace_target_info
*tinfo
,
7016 enum btrace_read_type type
)
7018 struct btrace_data btrace
;
7019 enum btrace_error err
;
7021 err
= linux_read_btrace (&btrace
, tinfo
, type
);
7022 if (err
!= BTRACE_ERR_NONE
)
7024 if (err
== BTRACE_ERR_OVERFLOW
)
7025 buffer_grow_str0 (buffer
, "E.Overflow.");
7027 buffer_grow_str0 (buffer
, "E.Generic Error.");
7032 switch (btrace
.format
)
7034 case BTRACE_FORMAT_NONE
:
7035 buffer_grow_str0 (buffer
, "E.No Trace.");
7038 case BTRACE_FORMAT_BTS
:
7039 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7040 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
7042 for (const btrace_block
&block
: *btrace
.variant
.bts
.blocks
)
7043 buffer_xml_printf (buffer
, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7044 paddress (block
.begin
), paddress (block
.end
));
7046 buffer_grow_str0 (buffer
, "</btrace>\n");
7049 case BTRACE_FORMAT_PT
:
7050 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7051 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
7052 buffer_grow_str (buffer
, "<pt>\n");
7054 linux_low_encode_pt_config (buffer
, &btrace
.variant
.pt
.config
);
7056 linux_low_encode_raw (buffer
, btrace
.variant
.pt
.data
,
7057 btrace
.variant
.pt
.size
);
7059 buffer_grow_str (buffer
, "</pt>\n");
7060 buffer_grow_str0 (buffer
, "</btrace>\n");
7064 buffer_grow_str0 (buffer
, "E.Unsupported Trace Format.");
7071 /* See to_btrace_conf target method. */
7074 linux_process_target::read_btrace_conf (const btrace_target_info
*tinfo
,
7077 const struct btrace_config
*conf
;
7079 buffer_grow_str (buffer
, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7080 buffer_grow_str (buffer
, "<btrace-conf version=\"1.0\">\n");
7082 conf
= linux_btrace_conf (tinfo
);
7085 switch (conf
->format
)
7087 case BTRACE_FORMAT_NONE
:
7090 case BTRACE_FORMAT_BTS
:
7091 buffer_xml_printf (buffer
, "<bts");
7092 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->bts
.size
);
7093 buffer_xml_printf (buffer
, " />\n");
7096 case BTRACE_FORMAT_PT
:
7097 buffer_xml_printf (buffer
, "<pt");
7098 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->pt
.size
);
7099 buffer_xml_printf (buffer
, "/>\n");
7104 buffer_grow_str0 (buffer
, "</btrace-conf>\n");
7107 #endif /* HAVE_LINUX_BTRACE */
7109 /* See nat/linux-nat.h. */
7112 current_lwp_ptid (void)
7114 return ptid_of (current_thread
);
7118 linux_process_target::thread_name (ptid_t thread
)
7120 return linux_proc_tid_get_name (thread
);
7125 linux_process_target::thread_handle (ptid_t ptid
, gdb_byte
**handle
,
7128 return thread_db_thread_handle (ptid
, handle
, handle_len
);
7132 /* Default implementation of linux_target_ops method "set_pc" for
7133 32-bit pc register which is literally named "pc". */
7136 linux_set_pc_32bit (struct regcache
*regcache
, CORE_ADDR pc
)
7138 uint32_t newpc
= pc
;
7140 supply_register_by_name (regcache
, "pc", &newpc
);
7143 /* Default implementation of linux_target_ops method "get_pc" for
7144 32-bit pc register which is literally named "pc". */
7147 linux_get_pc_32bit (struct regcache
*regcache
)
7151 collect_register_by_name (regcache
, "pc", &pc
);
7153 debug_printf ("stop pc is 0x%" PRIx32
"\n", pc
);
7157 /* Default implementation of linux_target_ops method "set_pc" for
7158 64-bit pc register which is literally named "pc". */
7161 linux_set_pc_64bit (struct regcache
*regcache
, CORE_ADDR pc
)
7163 uint64_t newpc
= pc
;
7165 supply_register_by_name (regcache
, "pc", &newpc
);
7168 /* Default implementation of linux_target_ops method "get_pc" for
7169 64-bit pc register which is literally named "pc". */
7172 linux_get_pc_64bit (struct regcache
*regcache
)
7176 collect_register_by_name (regcache
, "pc", &pc
);
7178 debug_printf ("stop pc is 0x%" PRIx64
"\n", pc
);
7182 /* See linux-low.h. */
7185 linux_get_auxv (int wordsize
, CORE_ADDR match
, CORE_ADDR
*valp
)
7187 gdb_byte
*data
= (gdb_byte
*) alloca (2 * wordsize
);
7190 gdb_assert (wordsize
== 4 || wordsize
== 8);
7192 while (the_target
->read_auxv (offset
, data
, 2 * wordsize
) == 2 * wordsize
)
7196 uint32_t *data_p
= (uint32_t *) data
;
7197 if (data_p
[0] == match
)
7205 uint64_t *data_p
= (uint64_t *) data
;
7206 if (data_p
[0] == match
)
7213 offset
+= 2 * wordsize
;
7219 /* See linux-low.h. */
7222 linux_get_hwcap (int wordsize
)
7224 CORE_ADDR hwcap
= 0;
7225 linux_get_auxv (wordsize
, AT_HWCAP
, &hwcap
);
7229 /* See linux-low.h. */
7232 linux_get_hwcap2 (int wordsize
)
7234 CORE_ADDR hwcap2
= 0;
7235 linux_get_auxv (wordsize
, AT_HWCAP2
, &hwcap2
);
7239 #ifdef HAVE_LINUX_REGSETS
7241 initialize_regsets_info (struct regsets_info
*info
)
7243 for (info
->num_regsets
= 0;
7244 info
->regsets
[info
->num_regsets
].size
>= 0;
7245 info
->num_regsets
++)
7251 initialize_low (void)
7253 struct sigaction sigchld_action
;
7255 memset (&sigchld_action
, 0, sizeof (sigchld_action
));
7256 set_target_ops (the_linux_target
);
7258 linux_ptrace_init_warnings ();
7259 linux_proc_init_warnings ();
7261 sigchld_action
.sa_handler
= sigchld_handler
;
7262 sigemptyset (&sigchld_action
.sa_mask
);
7263 sigchld_action
.sa_flags
= SA_RESTART
;
7264 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
7266 initialize_low_arch ();
7268 linux_check_ptrace_features ();