1 /* Target-struct-independent code to start (run) and stop an inferior
4 Copyright (C) 1986-2024 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "cli/cli-cmds.h"
22 #include "displaced-stepping.h"
25 #include "exceptions.h"
29 #include "breakpoint.h"
32 #include "target-connection.h"
33 #include "gdbthread.h"
41 #include "observable.h"
46 #include "mi/mi-common.h"
47 #include "event-top.h"
49 #include "record-full.h"
50 #include "inline-frame.h"
52 #include "tracepoint.h"
56 #include "completer.h"
57 #include "target-descriptions.h"
58 #include "target-dcache.h"
61 #include "gdbsupport/event-loop.h"
62 #include "thread-fsm.h"
63 #include "gdbsupport/enum-flags.h"
64 #include "progspace-and-thread.h"
66 #include "arch-utils.h"
67 #include "gdbsupport/scope-exit.h"
68 #include "gdbsupport/forward-scope-exit.h"
69 #include "gdbsupport/gdb_select.h"
70 #include <unordered_map>
71 #include "async-event.h"
72 #include "gdbsupport/selftest.h"
73 #include "scoped-mock-context.h"
74 #include "test-target.h"
75 #include "gdbsupport/common-debug.h"
76 #include "gdbsupport/buildargv.h"
77 #include "extension.h"
81 /* Prototypes for local functions */
83 static void sig_print_info (enum gdb_signal
);
85 static void sig_print_header (void);
87 static void follow_inferior_reset_breakpoints (void);
89 static bool currently_stepping (struct thread_info
*tp
);
91 static void insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr
&);
93 static void insert_step_resume_breakpoint_at_caller (const frame_info_ptr
&);
95 static void insert_longjmp_resume_breakpoint (struct gdbarch
*, CORE_ADDR
);
97 static bool maybe_software_singlestep (struct gdbarch
*gdbarch
);
99 static void resume (gdb_signal sig
);
101 static void wait_for_inferior (inferior
*inf
);
103 static void restart_threads (struct thread_info
*event_thread
,
104 inferior
*inf
= nullptr);
106 static bool start_step_over (void);
108 static bool step_over_info_valid_p (void);
110 static bool schedlock_applies (struct thread_info
*tp
);
112 /* Asynchronous signal handler registered as event loop source for
113 when we have pending events ready to be passed to the core. */
114 static struct async_event_handler
*infrun_async_inferior_event_token
;
116 /* Stores whether infrun_async was previously enabled or disabled.
117 Starts off as -1, indicating "never enabled/disabled". */
118 static int infrun_is_async
= -1;
119 static CORE_ADDR
update_line_range_start (CORE_ADDR pc
,
120 struct execution_control_state
*ecs
);
125 infrun_async (int enable
)
127 if (infrun_is_async
!= enable
)
129 infrun_is_async
= enable
;
131 infrun_debug_printf ("enable=%d", enable
);
134 mark_async_event_handler (infrun_async_inferior_event_token
);
136 clear_async_event_handler (infrun_async_inferior_event_token
);
143 mark_infrun_async_event_handler (void)
145 mark_async_event_handler (infrun_async_inferior_event_token
);
148 /* When set, stop the 'step' command if we enter a function which has
149 no line number information. The normal behavior is that we step
150 over such function. */
151 bool step_stop_if_no_debug
= false;
153 show_step_stop_if_no_debug (struct ui_file
*file
, int from_tty
,
154 struct cmd_list_element
*c
, const char *value
)
156 gdb_printf (file
, _("Mode of the step operation is %s.\n"), value
);
159 /* proceed and normal_stop use this to notify the user when the
160 inferior stopped in a different thread than it had been running in.
161 It can also be used to find for which thread normal_stop last
163 static thread_info_ref previous_thread
;
168 update_previous_thread ()
170 if (inferior_ptid
== null_ptid
)
171 previous_thread
= nullptr;
173 previous_thread
= thread_info_ref::new_reference (inferior_thread ());
179 get_previous_thread ()
181 return previous_thread
.get ();
184 /* If set (default for legacy reasons), when following a fork, GDB
185 will detach from one of the fork branches, child or parent.
186 Exactly which branch is detached depends on 'set follow-fork-mode'
189 static bool detach_fork
= true;
191 bool debug_infrun
= false;
193 show_debug_infrun (struct ui_file
*file
, int from_tty
,
194 struct cmd_list_element
*c
, const char *value
)
196 gdb_printf (file
, _("Inferior debugging is %s.\n"), value
);
199 /* Support for disabling address space randomization. */
201 bool disable_randomization
= true;
204 show_disable_randomization (struct ui_file
*file
, int from_tty
,
205 struct cmd_list_element
*c
, const char *value
)
207 if (target_supports_disable_randomization ())
209 _("Disabling randomization of debuggee's "
210 "virtual address space is %s.\n"),
213 gdb_puts (_("Disabling randomization of debuggee's "
214 "virtual address space is unsupported on\n"
215 "this platform.\n"), file
);
219 set_disable_randomization (const char *args
, int from_tty
,
220 struct cmd_list_element
*c
)
222 if (!target_supports_disable_randomization ())
223 error (_("Disabling randomization of debuggee's "
224 "virtual address space is unsupported on\n"
228 /* User interface for non-stop mode. */
230 bool non_stop
= false;
231 static bool non_stop_1
= false;
234 set_non_stop (const char *args
, int from_tty
,
235 struct cmd_list_element
*c
)
237 if (target_has_execution ())
239 non_stop_1
= non_stop
;
240 error (_("Cannot change this setting while the inferior is running."));
243 non_stop
= non_stop_1
;
247 show_non_stop (struct ui_file
*file
, int from_tty
,
248 struct cmd_list_element
*c
, const char *value
)
251 _("Controlling the inferior in non-stop mode is %s.\n"),
255 /* "Observer mode" is somewhat like a more extreme version of
256 non-stop, in which all GDB operations that might affect the
257 target's execution have been disabled. */
259 static bool observer_mode
= false;
260 static bool observer_mode_1
= false;
263 set_observer_mode (const char *args
, int from_tty
,
264 struct cmd_list_element
*c
)
266 if (target_has_execution ())
268 observer_mode_1
= observer_mode
;
269 error (_("Cannot change this setting while the inferior is running."));
272 observer_mode
= observer_mode_1
;
274 may_write_registers
= !observer_mode
;
275 may_write_memory
= !observer_mode
;
276 may_insert_breakpoints
= !observer_mode
;
277 may_insert_tracepoints
= !observer_mode
;
278 /* We can insert fast tracepoints in or out of observer mode,
279 but enable them if we're going into this mode. */
281 may_insert_fast_tracepoints
= true;
282 may_stop
= !observer_mode
;
283 update_target_permissions ();
285 /* Going *into* observer mode we must force non-stop, then
286 going out we leave it that way. */
289 pagination_enabled
= false;
290 non_stop
= non_stop_1
= true;
294 gdb_printf (_("Observer mode is now %s.\n"),
295 (observer_mode
? "on" : "off"));
299 show_observer_mode (struct ui_file
*file
, int from_tty
,
300 struct cmd_list_element
*c
, const char *value
)
302 gdb_printf (file
, _("Observer mode is %s.\n"), value
);
305 /* This updates the value of observer mode based on changes in
306 permissions. Note that we are deliberately ignoring the values of
307 may-write-registers and may-write-memory, since the user may have
308 reason to enable these during a session, for instance to turn on a
309 debugging-related global. */
312 update_observer_mode (void)
314 bool newval
= (!may_insert_breakpoints
315 && !may_insert_tracepoints
316 && may_insert_fast_tracepoints
320 /* Let the user know if things change. */
321 if (newval
!= observer_mode
)
322 gdb_printf (_("Observer mode is now %s.\n"),
323 (newval
? "on" : "off"));
325 observer_mode
= observer_mode_1
= newval
;
328 /* Tables of how to react to signals; the user sets them. */
330 static unsigned char signal_stop
[GDB_SIGNAL_LAST
];
331 static unsigned char signal_print
[GDB_SIGNAL_LAST
];
332 static unsigned char signal_program
[GDB_SIGNAL_LAST
];
334 /* Table of signals that are registered with "catch signal". A
335 non-zero entry indicates that the signal is caught by some "catch
337 static unsigned char signal_catch
[GDB_SIGNAL_LAST
];
339 /* Table of signals that the target may silently handle.
340 This is automatically determined from the flags above,
341 and simply cached here. */
342 static unsigned char signal_pass
[GDB_SIGNAL_LAST
];
344 #define SET_SIGS(nsigs,sigs,flags) \
346 int signum = (nsigs); \
347 while (signum-- > 0) \
348 if ((sigs)[signum]) \
349 (flags)[signum] = 1; \
352 #define UNSET_SIGS(nsigs,sigs,flags) \
354 int signum = (nsigs); \
355 while (signum-- > 0) \
356 if ((sigs)[signum]) \
357 (flags)[signum] = 0; \
360 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
361 this function is to avoid exporting `signal_program'. */
364 update_signals_program_target (void)
366 target_program_signals (signal_program
);
369 /* Value to pass to target_resume() to cause all threads to resume. */
371 #define RESUME_ALL minus_one_ptid
373 /* Command list pointer for the "stop" placeholder. */
375 static struct cmd_list_element
*stop_command
;
377 /* Nonzero if we want to give control to the user when we're notified
378 of shared library events by the dynamic linker. */
379 int stop_on_solib_events
;
381 /* Enable or disable optional shared library event breakpoints
382 as appropriate when the above flag is changed. */
385 set_stop_on_solib_events (const char *args
,
386 int from_tty
, struct cmd_list_element
*c
)
388 update_solib_breakpoints ();
392 show_stop_on_solib_events (struct ui_file
*file
, int from_tty
,
393 struct cmd_list_element
*c
, const char *value
)
395 gdb_printf (file
, _("Stopping for shared library events is %s.\n"),
399 /* True after stop if current stack frame should be printed. */
401 static bool stop_print_frame
;
403 /* This is a cached copy of the target/ptid/waitstatus of the last
404 event returned by target_wait().
405 This information is returned by get_last_target_status(). */
406 static process_stratum_target
*target_last_proc_target
;
407 static ptid_t target_last_wait_ptid
;
408 static struct target_waitstatus target_last_waitstatus
;
410 void init_thread_stepping_state (struct thread_info
*tss
);
412 static const char follow_fork_mode_child
[] = "child";
413 static const char follow_fork_mode_parent
[] = "parent";
415 static const char *const follow_fork_mode_kind_names
[] = {
416 follow_fork_mode_child
,
417 follow_fork_mode_parent
,
421 static const char *follow_fork_mode_string
= follow_fork_mode_parent
;
423 show_follow_fork_mode_string (struct ui_file
*file
, int from_tty
,
424 struct cmd_list_element
*c
, const char *value
)
427 _("Debugger response to a program "
428 "call of fork or vfork is \"%s\".\n"),
433 /* Handle changes to the inferior list based on the type of fork,
434 which process is being followed, and whether the other process
435 should be detached. On entry inferior_ptid must be the ptid of
436 the fork parent. At return inferior_ptid is the ptid of the
437 followed inferior. */
440 follow_fork_inferior (bool follow_child
, bool detach_fork
)
442 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
444 infrun_debug_printf ("follow_child = %d, detach_fork = %d",
445 follow_child
, detach_fork
);
447 target_waitkind fork_kind
= inferior_thread ()->pending_follow
.kind ();
448 gdb_assert (fork_kind
== TARGET_WAITKIND_FORKED
449 || fork_kind
== TARGET_WAITKIND_VFORKED
);
450 bool has_vforked
= fork_kind
== TARGET_WAITKIND_VFORKED
;
451 ptid_t parent_ptid
= inferior_ptid
;
452 ptid_t child_ptid
= inferior_thread ()->pending_follow
.child_ptid ();
455 && !non_stop
/* Non-stop always resumes both branches. */
456 && current_ui
->prompt_state
== PROMPT_BLOCKED
457 && !(follow_child
|| detach_fork
|| sched_multi
))
459 /* The parent stays blocked inside the vfork syscall until the
460 child execs or exits. If we don't let the child run, then
461 the parent stays blocked. If we're telling the parent to run
462 in the foreground, the user will not be able to ctrl-c to get
463 back the terminal, effectively hanging the debug session. */
464 gdb_printf (gdb_stderr
, _("\
465 Can not resume the parent process over vfork in the foreground while\n\
466 holding the child stopped. Try \"set detach-on-fork\" or \
467 \"set schedule-multiple\".\n"));
471 inferior
*parent_inf
= current_inferior ();
472 inferior
*child_inf
= nullptr;
474 gdb_assert (parent_inf
->thread_waiting_for_vfork_done
== nullptr);
478 /* Detach new forked process? */
481 /* Before detaching from the child, remove all breakpoints
482 from it. If we forked, then this has already been taken
483 care of by infrun.c. If we vforked however, any
484 breakpoint inserted in the parent is visible in the
485 child, even those added while stopped in a vfork
486 catchpoint. This will remove the breakpoints from the
487 parent also, but they'll be reinserted below. */
490 /* Keep breakpoints list in sync. */
491 remove_breakpoints_inf (current_inferior ());
494 if (print_inferior_events
)
496 /* Ensure that we have a process ptid. */
497 ptid_t process_ptid
= ptid_t (child_ptid
.pid ());
499 target_terminal::ours_for_output ();
500 gdb_printf (_("[Detaching after %s from child %s]\n"),
501 has_vforked
? "vfork" : "fork",
502 target_pid_to_str (process_ptid
).c_str ());
507 /* Add process to GDB's tables. */
508 child_inf
= add_inferior (child_ptid
.pid ());
510 child_inf
->attach_flag
= parent_inf
->attach_flag
;
511 copy_terminal_info (child_inf
, parent_inf
);
512 child_inf
->set_arch (parent_inf
->arch ());
513 child_inf
->tdesc_info
= parent_inf
->tdesc_info
;
515 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
517 /* If this is a vfork child, then the address-space is
518 shared with the parent. */
521 child_inf
->pspace
= parent_inf
->pspace
;
522 child_inf
->aspace
= parent_inf
->aspace
;
524 exec_on_vfork (child_inf
);
526 /* The parent will be frozen until the child is done
527 with the shared region. Keep track of the
529 child_inf
->vfork_parent
= parent_inf
;
530 child_inf
->pending_detach
= false;
531 parent_inf
->vfork_child
= child_inf
;
532 parent_inf
->pending_detach
= false;
536 child_inf
->pspace
= new program_space (new_address_space ());
537 child_inf
->aspace
= child_inf
->pspace
->aspace
;
538 child_inf
->removable
= true;
539 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
545 /* If we detached from the child, then we have to be careful
546 to not insert breakpoints in the parent until the child
547 is done with the shared memory region. However, if we're
548 staying attached to the child, then we can and should
549 insert breakpoints, so that we can debug it. A
550 subsequent child exec or exit is enough to know when does
551 the child stops using the parent's address space. */
552 parent_inf
->thread_waiting_for_vfork_done
553 = detach_fork
? inferior_thread () : nullptr;
554 parent_inf
->pspace
->breakpoints_not_allowed
= detach_fork
;
557 ("parent_inf->thread_waiting_for_vfork_done == %s",
558 (parent_inf
->thread_waiting_for_vfork_done
== nullptr
560 : (parent_inf
->thread_waiting_for_vfork_done
561 ->ptid
.to_string ().c_str ())));
566 /* Follow the child. */
568 if (print_inferior_events
)
570 std::string parent_pid
= target_pid_to_str (parent_ptid
);
571 std::string child_pid
= target_pid_to_str (child_ptid
);
573 target_terminal::ours_for_output ();
574 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
576 has_vforked
? "vfork" : "fork",
580 /* Add the new inferior first, so that the target_detach below
581 doesn't unpush the target. */
583 child_inf
= add_inferior (child_ptid
.pid ());
585 child_inf
->attach_flag
= parent_inf
->attach_flag
;
586 copy_terminal_info (child_inf
, parent_inf
);
587 child_inf
->set_arch (parent_inf
->arch ());
588 child_inf
->tdesc_info
= parent_inf
->tdesc_info
;
592 /* If this is a vfork child, then the address-space is shared
594 child_inf
->aspace
= parent_inf
->aspace
;
595 child_inf
->pspace
= parent_inf
->pspace
;
597 exec_on_vfork (child_inf
);
599 else if (detach_fork
)
601 /* We follow the child and detach from the parent: move the parent's
602 program space to the child. This simplifies some things, like
603 doing "next" over fork() and landing on the expected line in the
604 child (note, that is broken with "set detach-on-fork off").
606 Before assigning brand new spaces for the parent, remove
607 breakpoints from it: because the new pspace won't match
608 currently inserted locations, the normal detach procedure
609 wouldn't remove them, and we would leave them inserted when
611 remove_breakpoints_inf (parent_inf
);
613 child_inf
->aspace
= parent_inf
->aspace
;
614 child_inf
->pspace
= parent_inf
->pspace
;
615 parent_inf
->pspace
= new program_space (new_address_space ());
616 parent_inf
->aspace
= parent_inf
->pspace
->aspace
;
617 clone_program_space (parent_inf
->pspace
, child_inf
->pspace
);
619 /* The parent inferior is still the current one, so keep things
621 set_current_program_space (parent_inf
->pspace
);
625 child_inf
->pspace
= new program_space (new_address_space ());
626 child_inf
->aspace
= child_inf
->pspace
->aspace
;
627 child_inf
->removable
= true;
628 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
629 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
633 gdb_assert (current_inferior () == parent_inf
);
635 /* If we are setting up an inferior for the child, target_follow_fork is
636 responsible for pushing the appropriate targets on the new inferior's
637 target stack and adding the initial thread (with ptid CHILD_PTID).
639 If we are not setting up an inferior for the child (because following
640 the parent and detach_fork is true), it is responsible for detaching
642 target_follow_fork (child_inf
, child_ptid
, fork_kind
, follow_child
,
645 gdb::observers::inferior_forked
.notify (parent_inf
, child_inf
, fork_kind
);
647 /* target_follow_fork must leave the parent as the current inferior. If we
648 want to follow the child, we make it the current one below. */
649 gdb_assert (current_inferior () == parent_inf
);
651 /* If there is a child inferior, target_follow_fork must have created a thread
653 if (child_inf
!= nullptr)
654 gdb_assert (!child_inf
->thread_list
.empty ());
656 /* Clear the parent thread's pending follow field. Do this before calling
657 target_detach, so that the target can differentiate the two following
660 - We continue past a fork with "follow-fork-mode == child" &&
661 "detach-on-fork on", and therefore detach the parent. In that
662 case the target should not detach the fork child.
663 - We run to a fork catchpoint and the user types "detach". In that
664 case, the target should detach the fork child in addition to the
667 The former case will have pending_follow cleared, the later will have
668 pending_follow set. */
669 thread_info
*parent_thread
= parent_inf
->find_thread (parent_ptid
);
670 gdb_assert (parent_thread
!= nullptr);
671 parent_thread
->pending_follow
.set_spurious ();
673 /* Detach the parent if needed. */
676 /* If we're vforking, we want to hold on to the parent until
677 the child exits or execs. At child exec or exit time we
678 can remove the old breakpoints from the parent and detach
679 or resume debugging it. Otherwise, detach the parent now;
680 we'll want to reuse it's program/address spaces, but we
681 can't set them to the child before removing breakpoints
682 from the parent, otherwise, the breakpoints module could
683 decide to remove breakpoints from the wrong process (since
684 they'd be assigned to the same address space). */
688 gdb_assert (child_inf
->vfork_parent
== nullptr);
689 gdb_assert (parent_inf
->vfork_child
== nullptr);
690 child_inf
->vfork_parent
= parent_inf
;
691 child_inf
->pending_detach
= false;
692 parent_inf
->vfork_child
= child_inf
;
693 parent_inf
->pending_detach
= detach_fork
;
695 else if (detach_fork
)
697 if (print_inferior_events
)
699 /* Ensure that we have a process ptid. */
700 ptid_t process_ptid
= ptid_t (parent_ptid
.pid ());
702 target_terminal::ours_for_output ();
703 gdb_printf (_("[Detaching after fork from "
705 target_pid_to_str (process_ptid
).c_str ());
708 target_detach (parent_inf
, 0);
712 /* If we ended up creating a new inferior, call post_create_inferior to inform
713 the various subcomponents. */
714 if (child_inf
!= nullptr)
716 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
717 (do not restore the parent as the current inferior). */
718 std::optional
<scoped_restore_current_thread
> maybe_restore
;
720 if (!follow_child
&& !sched_multi
)
721 maybe_restore
.emplace ();
723 switch_to_thread (*child_inf
->threads ().begin ());
724 post_create_inferior (0);
730 /* Set the last target status as TP having stopped. */
733 set_last_target_status_stopped (thread_info
*tp
)
735 set_last_target_status (tp
->inf
->process_target (), tp
->ptid
,
736 target_waitstatus
{}.set_stopped (GDB_SIGNAL_0
));
739 /* Tell the target to follow the fork we're stopped at. Returns true
740 if the inferior should be resumed; false, if the target for some
741 reason decided it's best not to resume. */
746 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
748 bool follow_child
= (follow_fork_mode_string
== follow_fork_mode_child
);
749 bool should_resume
= true;
751 /* Copy user stepping state to the new inferior thread. FIXME: the
752 followed fork child thread should have a copy of most of the
753 parent thread structure's run control related fields, not just these.
754 Initialized to avoid "may be used uninitialized" warnings from gcc. */
755 struct breakpoint
*step_resume_breakpoint
= nullptr;
756 struct breakpoint
*exception_resume_breakpoint
= nullptr;
757 CORE_ADDR step_range_start
= 0;
758 CORE_ADDR step_range_end
= 0;
759 int current_line
= 0;
760 symtab
*current_symtab
= nullptr;
761 struct frame_id step_frame_id
= { 0 };
765 thread_info
*cur_thr
= inferior_thread ();
768 = user_visible_resume_ptid (cur_thr
->control
.stepping_command
);
769 process_stratum_target
*resume_target
770 = user_visible_resume_target (resume_ptid
);
772 /* Check if there's a thread that we're about to resume, other
773 than the current, with an unfollowed fork/vfork. If so,
774 switch back to it, to tell the target to follow it (in either
775 direction). We'll afterwards refuse to resume, and inform
776 the user what happened. */
777 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
783 /* follow_fork_inferior clears tp->pending_follow, and below
784 we'll need the value after the follow_fork_inferior
786 target_waitkind kind
= tp
->pending_follow
.kind ();
788 if (kind
!= TARGET_WAITKIND_SPURIOUS
)
790 infrun_debug_printf ("need to follow-fork [%s] first",
791 tp
->ptid
.to_string ().c_str ());
793 switch_to_thread (tp
);
795 /* Set up inferior(s) as specified by the caller, and
796 tell the target to do whatever is necessary to follow
797 either parent or child. */
800 /* The thread that started the execution command
801 won't exist in the child. Abort the command and
802 immediately stop in this thread, in the child,
804 should_resume
= false;
808 /* Following the parent, so let the thread fork its
809 child freely, it won't influence the current
810 execution command. */
811 if (follow_fork_inferior (follow_child
, detach_fork
))
813 /* Target refused to follow, or there's some
814 other reason we shouldn't resume. */
815 switch_to_thread (cur_thr
);
816 set_last_target_status_stopped (cur_thr
);
820 /* If we're following a vfork, when we need to leave
821 the just-forked thread as selected, as we need to
822 solo-resume it to collect the VFORK_DONE event.
823 If we're following a fork, however, switch back
824 to the original thread that we continue stepping
826 if (kind
!= TARGET_WAITKIND_VFORKED
)
828 gdb_assert (kind
== TARGET_WAITKIND_FORKED
);
829 switch_to_thread (cur_thr
);
838 thread_info
*tp
= inferior_thread ();
840 /* If there were any forks/vforks that were caught and are now to be
841 followed, then do so now. */
842 switch (tp
->pending_follow
.kind ())
844 case TARGET_WAITKIND_FORKED
:
845 case TARGET_WAITKIND_VFORKED
:
847 ptid_t parent
, child
;
848 std::unique_ptr
<struct thread_fsm
> thread_fsm
;
850 /* If the user did a next/step, etc, over a fork call,
851 preserve the stepping state in the fork child. */
852 if (follow_child
&& should_resume
)
854 step_resume_breakpoint
= clone_momentary_breakpoint
855 (tp
->control
.step_resume_breakpoint
);
856 step_range_start
= tp
->control
.step_range_start
;
857 step_range_end
= tp
->control
.step_range_end
;
858 current_line
= tp
->current_line
;
859 current_symtab
= tp
->current_symtab
;
860 step_frame_id
= tp
->control
.step_frame_id
;
861 exception_resume_breakpoint
862 = clone_momentary_breakpoint (tp
->control
.exception_resume_breakpoint
);
863 thread_fsm
= tp
->release_thread_fsm ();
865 /* For now, delete the parent's sr breakpoint, otherwise,
866 parent/child sr breakpoints are considered duplicates,
867 and the child version will not be installed. Remove
868 this when the breakpoints module becomes aware of
869 inferiors and address spaces. */
870 delete_step_resume_breakpoint (tp
);
871 tp
->control
.step_range_start
= 0;
872 tp
->control
.step_range_end
= 0;
873 tp
->control
.step_frame_id
= null_frame_id
;
874 delete_exception_resume_breakpoint (tp
);
877 parent
= inferior_ptid
;
878 child
= tp
->pending_follow
.child_ptid ();
880 /* If handling a vfork, stop all the inferior's threads, they will be
881 restarted when the vfork shared region is complete. */
882 if (tp
->pending_follow
.kind () == TARGET_WAITKIND_VFORKED
883 && target_is_non_stop_p ())
884 stop_all_threads ("handling vfork", tp
->inf
);
886 process_stratum_target
*parent_targ
= tp
->inf
->process_target ();
887 /* Set up inferior(s) as specified by the caller, and tell the
888 target to do whatever is necessary to follow either parent
890 if (follow_fork_inferior (follow_child
, detach_fork
))
892 /* Target refused to follow, or there's some other reason
893 we shouldn't resume. */
898 /* If we followed the child, switch to it... */
901 tp
= parent_targ
->find_thread (child
);
902 switch_to_thread (tp
);
904 /* ... and preserve the stepping state, in case the
905 user was stepping over the fork call. */
908 tp
->control
.step_resume_breakpoint
909 = step_resume_breakpoint
;
910 tp
->control
.step_range_start
= step_range_start
;
911 tp
->control
.step_range_end
= step_range_end
;
912 tp
->current_line
= current_line
;
913 tp
->current_symtab
= current_symtab
;
914 tp
->control
.step_frame_id
= step_frame_id
;
915 tp
->control
.exception_resume_breakpoint
916 = exception_resume_breakpoint
;
917 tp
->set_thread_fsm (std::move (thread_fsm
));
921 /* If we get here, it was because we're trying to
922 resume from a fork catchpoint, but, the user
923 has switched threads away from the thread that
924 forked. In that case, the resume command
925 issued is most likely not applicable to the
926 child, so just warn, and refuse to resume. */
927 warning (_("Not resuming: switched threads "
928 "before following fork child."));
931 /* Reset breakpoints in the child as appropriate. */
932 follow_inferior_reset_breakpoints ();
937 case TARGET_WAITKIND_SPURIOUS
:
938 /* Nothing to follow. */
941 internal_error ("Unexpected pending_follow.kind %d\n",
942 tp
->pending_follow
.kind ());
947 set_last_target_status_stopped (tp
);
948 return should_resume
;
952 follow_inferior_reset_breakpoints (void)
954 struct thread_info
*tp
= inferior_thread ();
956 /* Was there a step_resume breakpoint? (There was if the user
957 did a "next" at the fork() call.) If so, explicitly reset its
958 thread number. Cloned step_resume breakpoints are disabled on
959 creation, so enable it here now that it is associated with the
962 step_resumes are a form of bp that are made to be per-thread.
963 Since we created the step_resume bp when the parent process
964 was being debugged, and now are switching to the child process,
965 from the breakpoint package's viewpoint, that's a switch of
966 "threads". We must update the bp's notion of which thread
967 it is for, or it'll be ignored when it triggers. */
969 if (tp
->control
.step_resume_breakpoint
)
971 breakpoint_re_set_thread (tp
->control
.step_resume_breakpoint
);
972 tp
->control
.step_resume_breakpoint
->first_loc ().enabled
= 1;
975 /* Treat exception_resume breakpoints like step_resume breakpoints. */
976 if (tp
->control
.exception_resume_breakpoint
)
978 breakpoint_re_set_thread (tp
->control
.exception_resume_breakpoint
);
979 tp
->control
.exception_resume_breakpoint
->first_loc ().enabled
= 1;
982 /* Reinsert all breakpoints in the child. The user may have set
983 breakpoints after catching the fork, in which case those
984 were never set in the child, but only in the parent. This makes
985 sure the inserted breakpoints match the breakpoint list. */
987 breakpoint_re_set ();
988 insert_breakpoints ();
991 /* The child has exited or execed: resume THREAD, a thread of the parent,
992 if it was meant to be executing. */
995 proceed_after_vfork_done (thread_info
*thread
)
997 if (thread
->state
== THREAD_RUNNING
998 && !thread
->executing ()
999 && !thread
->stop_requested
1000 && thread
->stop_signal () == GDB_SIGNAL_0
)
1002 infrun_debug_printf ("resuming vfork parent thread %s",
1003 thread
->ptid
.to_string ().c_str ());
1005 switch_to_thread (thread
);
1006 clear_proceed_status (0);
1007 proceed ((CORE_ADDR
) -1, GDB_SIGNAL_DEFAULT
);
1011 /* Called whenever we notice an exec or exit event, to handle
1012 detaching or resuming a vfork parent. */
1015 handle_vfork_child_exec_or_exit (int exec
)
1017 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
1019 struct inferior
*inf
= current_inferior ();
1021 if (inf
->vfork_parent
)
1023 inferior
*resume_parent
= nullptr;
1025 /* This exec or exit marks the end of the shared memory region
1026 between the parent and the child. Break the bonds. */
1027 inferior
*vfork_parent
= inf
->vfork_parent
;
1028 inf
->vfork_parent
->vfork_child
= nullptr;
1029 inf
->vfork_parent
= nullptr;
1031 /* If the user wanted to detach from the parent, now is the
1033 if (vfork_parent
->pending_detach
)
1035 struct program_space
*pspace
;
1037 /* follow-fork child, detach-on-fork on. */
1039 vfork_parent
->pending_detach
= false;
1041 scoped_restore_current_pspace_and_thread restore_thread
;
1043 /* We're letting loose of the parent. */
1044 thread_info
*tp
= any_live_thread_of_inferior (vfork_parent
);
1045 switch_to_thread (tp
);
1047 /* We're about to detach from the parent, which implicitly
1048 removes breakpoints from its address space. There's a
1049 catch here: we want to reuse the spaces for the child,
1050 but, parent/child are still sharing the pspace at this
1051 point, although the exec in reality makes the kernel give
1052 the child a fresh set of new pages. The problem here is
1053 that the breakpoints module being unaware of this, would
1054 likely chose the child process to write to the parent
1055 address space. Swapping the child temporarily away from
1056 the spaces has the desired effect. Yes, this is "sort
1059 pspace
= inf
->pspace
;
1060 inf
->pspace
= nullptr;
1061 address_space_ref_ptr aspace
= std::move (inf
->aspace
);
1063 if (print_inferior_events
)
1066 = target_pid_to_str (ptid_t (vfork_parent
->pid
));
1068 target_terminal::ours_for_output ();
1072 gdb_printf (_("[Detaching vfork parent %s "
1073 "after child exec]\n"), pidstr
.c_str ());
1077 gdb_printf (_("[Detaching vfork parent %s "
1078 "after child exit]\n"), pidstr
.c_str ());
1082 target_detach (vfork_parent
, 0);
1085 inf
->pspace
= pspace
;
1086 inf
->aspace
= aspace
;
1090 /* We're staying attached to the parent, so, really give the
1091 child a new address space. */
1092 inf
->pspace
= new program_space (maybe_new_address_space ());
1093 inf
->aspace
= inf
->pspace
->aspace
;
1094 inf
->removable
= true;
1095 set_current_program_space (inf
->pspace
);
1097 resume_parent
= vfork_parent
;
1101 /* If this is a vfork child exiting, then the pspace and
1102 aspaces were shared with the parent. Since we're
1103 reporting the process exit, we'll be mourning all that is
1104 found in the address space, and switching to null_ptid,
1105 preparing to start a new inferior. But, since we don't
1106 want to clobber the parent's address/program spaces, we
1107 go ahead and create a new one for this exiting
1110 scoped_restore_current_thread restore_thread
;
1112 /* Temporarily switch to the vfork parent, to facilitate ptrace
1113 calls done during maybe_new_address_space. */
1114 switch_to_thread (any_live_thread_of_inferior (vfork_parent
));
1115 address_space_ref_ptr aspace
= maybe_new_address_space ();
1117 /* Switch back to the vfork child inferior. Switch to no-thread
1118 while running clone_program_space, so that clone_program_space
1119 doesn't want to read the selected frame of a dead process. */
1120 switch_to_inferior_no_thread (inf
);
1122 inf
->pspace
= new program_space (std::move (aspace
));
1123 inf
->aspace
= inf
->pspace
->aspace
;
1124 set_current_program_space (inf
->pspace
);
1125 inf
->removable
= true;
1126 inf
->symfile_flags
= SYMFILE_NO_READ
;
1127 clone_program_space (inf
->pspace
, vfork_parent
->pspace
);
1129 resume_parent
= vfork_parent
;
1132 gdb_assert (current_program_space
== inf
->pspace
);
1134 if (non_stop
&& resume_parent
!= nullptr)
1136 /* If the user wanted the parent to be running, let it go
1138 scoped_restore_current_thread restore_thread
;
1140 infrun_debug_printf ("resuming vfork parent process %d",
1141 resume_parent
->pid
);
1143 for (thread_info
*thread
: resume_parent
->threads ())
1144 proceed_after_vfork_done (thread
);
1149 /* Handle TARGET_WAITKIND_VFORK_DONE. */
1152 handle_vfork_done (thread_info
*event_thread
)
1154 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
1156 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1157 set, that is if we are waiting for a vfork child not under our control
1158 (because we detached it) to exec or exit.
1160 If an inferior has vforked and we are debugging the child, we don't use
1161 the vfork-done event to get notified about the end of the shared address
1162 space window. We rely instead on the child's exec or exit event, and the
1163 inferior::vfork_{parent,child} fields are used instead. See
1164 handle_vfork_child_exec_or_exit for that. */
1165 if (event_thread
->inf
->thread_waiting_for_vfork_done
== nullptr)
1167 infrun_debug_printf ("not waiting for a vfork-done event");
1171 /* We stopped all threads (other than the vforking thread) of the inferior in
1172 follow_fork and kept them stopped until now. It should therefore not be
1173 possible for another thread to have reported a vfork during that window.
1174 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1175 vfork-done we are handling right now. */
1176 gdb_assert (event_thread
->inf
->thread_waiting_for_vfork_done
== event_thread
);
1178 event_thread
->inf
->thread_waiting_for_vfork_done
= nullptr;
1179 event_thread
->inf
->pspace
->breakpoints_not_allowed
= 0;
1181 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1182 resume them now. On all-stop targets, everything that needs to be resumed
1183 will be when we resume the event thread. */
1184 if (target_is_non_stop_p ())
1186 /* restart_threads and start_step_over may change the current thread, make
1187 sure we leave the event thread as the current thread. */
1188 scoped_restore_current_thread restore_thread
;
1190 insert_breakpoints ();
1193 if (!step_over_info_valid_p ())
1194 restart_threads (event_thread
, event_thread
->inf
);
1198 /* Enum strings for "set|show follow-exec-mode". */
1200 static const char follow_exec_mode_new
[] = "new";
1201 static const char follow_exec_mode_same
[] = "same";
1202 static const char *const follow_exec_mode_names
[] =
1204 follow_exec_mode_new
,
1205 follow_exec_mode_same
,
1209 static const char *follow_exec_mode_string
= follow_exec_mode_same
;
1211 show_follow_exec_mode_string (struct ui_file
*file
, int from_tty
,
1212 struct cmd_list_element
*c
, const char *value
)
1214 gdb_printf (file
, _("Follow exec mode is \"%s\".\n"), value
);
1217 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1220 follow_exec (ptid_t ptid
, const char *exec_file_target
)
1222 int pid
= ptid
.pid ();
1223 ptid_t process_ptid
;
1225 /* Switch terminal for any messages produced e.g. by
1226 breakpoint_re_set. */
1227 target_terminal::ours_for_output ();
1229 /* This is an exec event that we actually wish to pay attention to.
1230 Refresh our symbol table to the newly exec'd program, remove any
1231 momentary bp's, etc.
1233 If there are breakpoints, they aren't really inserted now,
1234 since the exec() transformed our inferior into a fresh set
1237 We want to preserve symbolic breakpoints on the list, since
1238 we have hopes that they can be reset after the new a.out's
1239 symbol table is read.
1241 However, any "raw" breakpoints must be removed from the list
1242 (e.g., the solib bp's), since their address is probably invalid
1245 And, we DON'T want to call delete_breakpoints() here, since
1246 that may write the bp's "shadow contents" (the instruction
1247 value that was overwritten with a TRAP instruction). Since
1248 we now have a new a.out, those shadow contents aren't valid. */
1250 mark_breakpoints_out (current_program_space
);
1252 /* The target reports the exec event to the main thread, even if
1253 some other thread does the exec, and even if the main thread was
1254 stopped or already gone. We may still have non-leader threads of
1255 the process on our list. E.g., on targets that don't have thread
1256 exit events (like remote) and nothing forces an update of the
1257 thread list up to here. When debugging remotely, it's best to
1258 avoid extra traffic, when possible, so avoid syncing the thread
1259 list with the target, and instead go ahead and delete all threads
1260 of the process but the one that reported the event. Note this must
1261 be done before calling update_breakpoints_after_exec, as
1262 otherwise clearing the threads' resources would reference stale
1263 thread breakpoints -- it may have been one of these threads that
1264 stepped across the exec. We could just clear their stepping
1265 states, but as long as we're iterating, might as well delete
1266 them. Deleting them now rather than at the next user-visible
1267 stop provides a nicer sequence of events for user and MI
1269 for (thread_info
*th
: all_threads_safe ())
1270 if (th
->ptid
.pid () == pid
&& th
->ptid
!= ptid
)
1273 /* We also need to clear any left over stale state for the
1274 leader/event thread. E.g., if there was any step-resume
1275 breakpoint or similar, it's gone now. We cannot truly
1276 step-to-next statement through an exec(). */
1277 thread_info
*th
= inferior_thread ();
1278 th
->control
.step_resume_breakpoint
= nullptr;
1279 th
->control
.exception_resume_breakpoint
= nullptr;
1280 th
->control
.single_step_breakpoints
= nullptr;
1281 th
->control
.step_range_start
= 0;
1282 th
->control
.step_range_end
= 0;
1284 /* The user may have had the main thread held stopped in the
1285 previous image (e.g., schedlock on, or non-stop). Release
1287 th
->stop_requested
= 0;
1289 update_breakpoints_after_exec ();
1291 /* What is this a.out's name? */
1292 process_ptid
= ptid_t (pid
);
1293 gdb_printf (_("%s is executing new program: %s\n"),
1294 target_pid_to_str (process_ptid
).c_str (),
1297 /* We've followed the inferior through an exec. Therefore, the
1298 inferior has essentially been killed & reborn. */
1300 breakpoint_init_inferior (current_inferior (), inf_execd
);
1302 gdb::unique_xmalloc_ptr
<char> exec_file_host
1303 = exec_file_find (exec_file_target
, nullptr);
1305 /* If we were unable to map the executable target pathname onto a host
1306 pathname, tell the user that. Otherwise GDB's subsequent behavior
1307 is confusing. Maybe it would even be better to stop at this point
1308 so that the user can specify a file manually before continuing. */
1309 if (exec_file_host
== nullptr)
1310 warning (_("Could not load symbols for executable %s.\n"
1311 "Do you need \"set sysroot\"?"),
1314 /* Reset the shared library package. This ensures that we get a
1315 shlib event when the child reaches "_start", at which point the
1316 dld will have had a chance to initialize the child. */
1317 /* Also, loading a symbol file below may trigger symbol lookups, and
1318 we don't want those to be satisfied by the libraries of the
1319 previous incarnation of this process. */
1320 no_shared_libraries (current_program_space
);
1322 inferior
*execing_inferior
= current_inferior ();
1323 inferior
*following_inferior
;
1325 if (follow_exec_mode_string
== follow_exec_mode_new
)
1327 /* The user wants to keep the old inferior and program spaces
1328 around. Create a new fresh one, and switch to it. */
1330 /* Do exit processing for the original inferior before setting the new
1331 inferior's pid. Having two inferiors with the same pid would confuse
1332 find_inferior_p(t)id. Transfer the terminal state and info from the
1333 old to the new inferior. */
1334 following_inferior
= add_inferior_with_spaces ();
1336 swap_terminal_info (following_inferior
, execing_inferior
);
1337 exit_inferior (execing_inferior
);
1339 following_inferior
->pid
= pid
;
1343 /* follow-exec-mode is "same", we continue execution in the execing
1345 following_inferior
= execing_inferior
;
1347 /* The old description may no longer be fit for the new image.
1348 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1349 old description; we'll read a new one below. No need to do
1350 this on "follow-exec-mode new", as the old inferior stays
1351 around (its description is later cleared/refetched on
1353 target_clear_description ();
1356 target_follow_exec (following_inferior
, ptid
, exec_file_target
);
1358 gdb_assert (current_inferior () == following_inferior
);
1359 gdb_assert (current_program_space
== following_inferior
->pspace
);
1361 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1362 because the proper displacement for a PIE (Position Independent
1363 Executable) main symbol file will only be computed by
1364 solib_create_inferior_hook below. breakpoint_re_set would fail
1365 to insert the breakpoints with the zero displacement. */
1366 try_open_exec_file (exec_file_host
.get (), following_inferior
,
1367 SYMFILE_DEFER_BP_RESET
);
1369 /* If the target can specify a description, read it. Must do this
1370 after flipping to the new executable (because the target supplied
1371 description must be compatible with the executable's
1372 architecture, and the old executable may e.g., be 32-bit, while
1373 the new one 64-bit), and before anything involving memory or
1375 target_find_description ();
1377 gdb::observers::inferior_execd
.notify (execing_inferior
, following_inferior
);
1379 breakpoint_re_set ();
1381 /* Reinsert all breakpoints. (Those which were symbolic have
1382 been reset to the proper address in the new a.out, thanks
1383 to symbol_file_command...). */
1384 insert_breakpoints ();
1386 /* The next resume of this inferior should bring it to the shlib
1387 startup breakpoints. (If the user had also set bp's on
1388 "main" from the old (parent) process, then they'll auto-
1389 matically get reset there in the new process.). */
1392 /* The chain of threads that need to do a step-over operation to get
1393 past e.g., a breakpoint. What technique is used to step over the
1394 breakpoint/watchpoint does not matter -- all threads end up in the
1395 same queue, to maintain rough temporal order of execution, in order
1396 to avoid starvation, otherwise, we could e.g., find ourselves
1397 constantly stepping the same couple threads past their breakpoints
1398 over and over, if the single-step finish fast enough. */
1399 thread_step_over_list global_thread_step_over_list
;
1401 /* Bit flags indicating what the thread needs to step over. */
1403 enum step_over_what_flag
1405 /* Step over a breakpoint. */
1406 STEP_OVER_BREAKPOINT
= 1,
1408 /* Step past a non-continuable watchpoint, in order to let the
1409 instruction execute so we can evaluate the watchpoint
1411 STEP_OVER_WATCHPOINT
= 2
1413 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag
, step_over_what
);
1415 /* Info about an instruction that is being stepped over. */
1417 struct step_over_info
1419 /* If we're stepping past a breakpoint, this is the address space
1420 and address of the instruction the breakpoint is set at. We'll
1421 skip inserting all breakpoints here. Valid iff ASPACE is
1423 const address_space
*aspace
= nullptr;
1424 CORE_ADDR address
= 0;
1426 /* The instruction being stepped over triggers a nonsteppable
1427 watchpoint. If true, we'll skip inserting watchpoints. */
1428 int nonsteppable_watchpoint_p
= 0;
1430 /* The thread's global number. */
1434 /* The step-over info of the location that is being stepped over.
1436 Note that with async/breakpoint always-inserted mode, a user might
1437 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1438 being stepped over. As setting a new breakpoint inserts all
1439 breakpoints, we need to make sure the breakpoint being stepped over
1440 isn't inserted then. We do that by only clearing the step-over
1441 info when the step-over is actually finished (or aborted).
1443 Presently GDB can only step over one breakpoint at any given time.
1444 Given threads that can't run code in the same address space as the
1445 breakpoint's can't really miss the breakpoint, GDB could be taught
1446 to step-over at most one breakpoint per address space (so this info
1447 could move to the address space object if/when GDB is extended).
1448 The set of breakpoints being stepped over will normally be much
1449 smaller than the set of all breakpoints, so a flag in the
1450 breakpoint location structure would be wasteful. A separate list
1451 also saves complexity and run-time, as otherwise we'd have to go
1452 through all breakpoint locations clearing their flag whenever we
1453 start a new sequence. Similar considerations weigh against storing
1454 this info in the thread object. Plus, not all step overs actually
1455 have breakpoint locations -- e.g., stepping past a single-step
1456 breakpoint, or stepping to complete a non-continuable
1458 static struct step_over_info step_over_info
;
1460 /* Record the address of the breakpoint/instruction we're currently
1462 N.B. We record the aspace and address now, instead of say just the thread,
1463 because when we need the info later the thread may be running. */
1466 set_step_over_info (const address_space
*aspace
, CORE_ADDR address
,
1467 int nonsteppable_watchpoint_p
,
1470 step_over_info
.aspace
= aspace
;
1471 step_over_info
.address
= address
;
1472 step_over_info
.nonsteppable_watchpoint_p
= nonsteppable_watchpoint_p
;
1473 step_over_info
.thread
= thread
;
1476 /* Called when we're not longer stepping over a breakpoint / an
1477 instruction, so all breakpoints are free to be (re)inserted. */
1480 clear_step_over_info (void)
1482 infrun_debug_printf ("clearing step over info");
1483 step_over_info
.aspace
= nullptr;
1484 step_over_info
.address
= 0;
1485 step_over_info
.nonsteppable_watchpoint_p
= 0;
1486 step_over_info
.thread
= -1;
1492 stepping_past_instruction_at (struct address_space
*aspace
,
1495 return (step_over_info
.aspace
!= nullptr
1496 && breakpoint_address_match (aspace
, address
,
1497 step_over_info
.aspace
,
1498 step_over_info
.address
));
1504 thread_is_stepping_over_breakpoint (int thread
)
1506 return (step_over_info
.thread
!= -1
1507 && thread
== step_over_info
.thread
);
1513 stepping_past_nonsteppable_watchpoint (void)
1515 return step_over_info
.nonsteppable_watchpoint_p
;
1518 /* Returns true if step-over info is valid. */
1521 step_over_info_valid_p (void)
1523 return (step_over_info
.aspace
!= nullptr
1524 || stepping_past_nonsteppable_watchpoint ());
1528 /* Displaced stepping. */
1530 /* In non-stop debugging mode, we must take special care to manage
1531 breakpoints properly; in particular, the traditional strategy for
1532 stepping a thread past a breakpoint it has hit is unsuitable.
1533 'Displaced stepping' is a tactic for stepping one thread past a
1534 breakpoint it has hit while ensuring that other threads running
1535 concurrently will hit the breakpoint as they should.
1537 The traditional way to step a thread T off a breakpoint in a
1538 multi-threaded program in all-stop mode is as follows:
1540 a0) Initially, all threads are stopped, and breakpoints are not
1542 a1) We single-step T, leaving breakpoints uninserted.
1543 a2) We insert breakpoints, and resume all threads.
1545 In non-stop debugging, however, this strategy is unsuitable: we
1546 don't want to have to stop all threads in the system in order to
1547 continue or step T past a breakpoint. Instead, we use displaced
1550 n0) Initially, T is stopped, other threads are running, and
1551 breakpoints are inserted.
1552 n1) We copy the instruction "under" the breakpoint to a separate
1553 location, outside the main code stream, making any adjustments
1554 to the instruction, register, and memory state as directed by
1556 n2) We single-step T over the instruction at its new location.
1557 n3) We adjust the resulting register and memory state as directed
1558 by T's architecture. This includes resetting T's PC to point
1559 back into the main instruction stream.
1562 This approach depends on the following gdbarch methods:
1564 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1565 indicate where to copy the instruction, and how much space must
1566 be reserved there. We use these in step n1.
1568 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1569 address, and makes any necessary adjustments to the instruction,
1570 register contents, and memory. We use this in step n1.
1572 - gdbarch_displaced_step_fixup adjusts registers and memory after
1573 we have successfully single-stepped the instruction, to yield the
1574 same effect the instruction would have had if we had executed it
1575 at its original address. We use this in step n3.
1577 The gdbarch_displaced_step_copy_insn and
1578 gdbarch_displaced_step_fixup functions must be written so that
1579 copying an instruction with gdbarch_displaced_step_copy_insn,
1580 single-stepping across the copied instruction, and then applying
1581 gdbarch_displaced_insn_fixup should have the same effects on the
1582 thread's memory and registers as stepping the instruction in place
1583 would have. Exactly which responsibilities fall to the copy and
1584 which fall to the fixup is up to the author of those functions.
1586 See the comments in gdbarch.sh for details.
1588 Note that displaced stepping and software single-step cannot
1589 currently be used in combination, although with some care I think
1590 they could be made to. Software single-step works by placing
1591 breakpoints on all possible subsequent instructions; if the
1592 displaced instruction is a PC-relative jump, those breakpoints
1593 could fall in very strange places --- on pages that aren't
1594 executable, or at addresses that are not proper instruction
1595 boundaries. (We do generally let other threads run while we wait
1596 to hit the software single-step breakpoint, and they might
1597 encounter such a corrupted instruction.) One way to work around
1598 this would be to have gdbarch_displaced_step_copy_insn fully
1599 simulate the effect of PC-relative instructions (and return NULL)
1600 on architectures that use software single-stepping.
1602 In non-stop mode, we can have independent and simultaneous step
1603 requests, so more than one thread may need to simultaneously step
1604 over a breakpoint. The current implementation assumes there is
1605 only one scratch space per process. In this case, we have to
1606 serialize access to the scratch space. If thread A wants to step
1607 over a breakpoint, but we are currently waiting for some other
1608 thread to complete a displaced step, we leave thread A stopped and
1609 place it in the displaced_step_request_queue. Whenever a displaced
1610 step finishes, we pick the next thread in the queue and start a new
1611 displaced step operation on it. See displaced_step_prepare and
1612 displaced_step_finish for details. */
1614 /* Return true if THREAD is doing a displaced step. */
1617 displaced_step_in_progress_thread (thread_info
*thread
)
1619 gdb_assert (thread
!= nullptr);
1621 return thread
->displaced_step_state
.in_progress ();
1624 /* Return true if INF has a thread doing a displaced step. */
1627 displaced_step_in_progress (inferior
*inf
)
1629 return inf
->displaced_step_state
.in_progress_count
> 0;
1632 /* Return true if any thread is doing a displaced step. */
1635 displaced_step_in_progress_any_thread ()
1637 for (inferior
*inf
: all_non_exited_inferiors ())
1639 if (displaced_step_in_progress (inf
))
1647 infrun_inferior_exit (struct inferior
*inf
)
1649 inf
->displaced_step_state
.reset ();
1650 inf
->thread_waiting_for_vfork_done
= nullptr;
1654 infrun_inferior_execd (inferior
*exec_inf
, inferior
*follow_inf
)
1656 /* If some threads where was doing a displaced step in this inferior at the
1657 moment of the exec, they no longer exist. Even if the exec'ing thread
1658 doing a displaced step, we don't want to to any fixup nor restore displaced
1659 stepping buffer bytes. */
1660 follow_inf
->displaced_step_state
.reset ();
1662 for (thread_info
*thread
: follow_inf
->threads ())
1663 thread
->displaced_step_state
.reset ();
1665 /* Since an in-line step is done with everything else stopped, if there was
1666 one in progress at the time of the exec, it must have been the exec'ing
1668 clear_step_over_info ();
1670 follow_inf
->thread_waiting_for_vfork_done
= nullptr;
1673 /* If ON, and the architecture supports it, GDB will use displaced
1674 stepping to step over breakpoints. If OFF, or if the architecture
1675 doesn't support it, GDB will instead use the traditional
1676 hold-and-step approach. If AUTO (which is the default), GDB will
1677 decide which technique to use to step over breakpoints depending on
1678 whether the target works in a non-stop way (see use_displaced_stepping). */
1680 static enum auto_boolean can_use_displaced_stepping
= AUTO_BOOLEAN_AUTO
;
1683 show_can_use_displaced_stepping (struct ui_file
*file
, int from_tty
,
1684 struct cmd_list_element
*c
,
1687 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
)
1689 _("Debugger's willingness to use displaced stepping "
1690 "to step over breakpoints is %s (currently %s).\n"),
1691 value
, target_is_non_stop_p () ? "on" : "off");
1694 _("Debugger's willingness to use displaced stepping "
1695 "to step over breakpoints is %s.\n"), value
);
1698 /* Return true if the gdbarch implements the required methods to use
1699 displaced stepping. */
1702 gdbarch_supports_displaced_stepping (gdbarch
*arch
)
1704 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1705 that if `prepare` is provided, so is `finish`. */
1706 return gdbarch_displaced_step_prepare_p (arch
);
1709 /* Return non-zero if displaced stepping can/should be used to step
1710 over breakpoints of thread TP. */
1713 use_displaced_stepping (thread_info
*tp
)
1715 /* If the user disabled it explicitly, don't use displaced stepping. */
1716 if (can_use_displaced_stepping
== AUTO_BOOLEAN_FALSE
)
1719 /* If "auto", only use displaced stepping if the target operates in a non-stop
1721 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
1722 && !target_is_non_stop_p ())
1725 gdbarch
*gdbarch
= get_thread_regcache (tp
)->arch ();
1727 /* If the architecture doesn't implement displaced stepping, don't use
1729 if (!gdbarch_supports_displaced_stepping (gdbarch
))
1732 /* If recording, don't use displaced stepping. */
1733 if (find_record_target () != nullptr)
1736 /* If displaced stepping failed before for this inferior, don't bother trying
1738 if (tp
->inf
->displaced_step_state
.failed_before
)
1744 /* Simple function wrapper around displaced_step_thread_state::reset. */
1747 displaced_step_reset (displaced_step_thread_state
*displaced
)
1749 displaced
->reset ();
1752 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1753 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1755 using displaced_step_reset_cleanup
= FORWARD_SCOPE_EXIT (displaced_step_reset
);
1757 /* Prepare to single-step, using displaced stepping.
1759 Note that we cannot use displaced stepping when we have a signal to
1760 deliver. If we have a signal to deliver and an instruction to step
1761 over, then after the step, there will be no indication from the
1762 target whether the thread entered a signal handler or ignored the
1763 signal and stepped over the instruction successfully --- both cases
1764 result in a simple SIGTRAP. In the first case we mustn't do a
1765 fixup, and in the second case we must --- but we can't tell which.
1766 Comments in the code for 'random signals' in handle_inferior_event
1767 explain how we handle this case instead.
1769 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1770 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1771 if displaced stepping this thread got queued; or
1772 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1775 static displaced_step_prepare_status
1776 displaced_step_prepare_throw (thread_info
*tp
)
1778 regcache
*regcache
= get_thread_regcache (tp
);
1779 struct gdbarch
*gdbarch
= regcache
->arch ();
1780 displaced_step_thread_state
&disp_step_thread_state
1781 = tp
->displaced_step_state
;
1783 /* We should never reach this function if the architecture does not
1784 support displaced stepping. */
1785 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch
));
1787 /* Nor if the thread isn't meant to step over a breakpoint. */
1788 gdb_assert (tp
->control
.trap_expected
);
1790 /* Disable range stepping while executing in the scratch pad. We
1791 want a single-step even if executing the displaced instruction in
1792 the scratch buffer lands within the stepping range (e.g., a
1794 tp
->control
.may_range_step
= 0;
1796 /* We are about to start a displaced step for this thread. If one is already
1797 in progress, something's wrong. */
1798 gdb_assert (!disp_step_thread_state
.in_progress ());
1800 if (tp
->inf
->displaced_step_state
.unavailable
)
1802 /* The gdbarch tells us it's not worth asking to try a prepare because
1803 it is likely that it will return unavailable, so don't bother asking. */
1805 displaced_debug_printf ("deferring step of %s",
1806 tp
->ptid
.to_string ().c_str ());
1808 global_thread_step_over_chain_enqueue (tp
);
1809 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
;
1812 displaced_debug_printf ("displaced-stepping %s now",
1813 tp
->ptid
.to_string ().c_str ());
1815 scoped_restore_current_thread restore_thread
;
1817 switch_to_thread (tp
);
1819 CORE_ADDR original_pc
= regcache_read_pc (regcache
);
1820 CORE_ADDR displaced_pc
;
1822 /* Display the instruction we are going to displaced step. */
1823 if (debug_displaced
)
1825 string_file tmp_stream
;
1826 int dislen
= gdb_print_insn (gdbarch
, original_pc
, &tmp_stream
,
1831 gdb::byte_vector
insn_buf (dislen
);
1832 read_memory (original_pc
, insn_buf
.data (), insn_buf
.size ());
1834 std::string insn_bytes
= bytes_to_string (insn_buf
);
1836 displaced_debug_printf ("original insn %s: %s \t %s",
1837 paddress (gdbarch
, original_pc
),
1838 insn_bytes
.c_str (),
1839 tmp_stream
.string ().c_str ());
1842 displaced_debug_printf ("original insn %s: invalid length: %d",
1843 paddress (gdbarch
, original_pc
), dislen
);
1846 displaced_step_prepare_status status
1847 = gdbarch_displaced_step_prepare (gdbarch
, tp
, displaced_pc
);
1849 if (status
== DISPLACED_STEP_PREPARE_STATUS_CANT
)
1851 displaced_debug_printf ("failed to prepare (%s)",
1852 tp
->ptid
.to_string ().c_str ());
1854 return DISPLACED_STEP_PREPARE_STATUS_CANT
;
1856 else if (status
== DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
)
1858 /* Not enough displaced stepping resources available, defer this
1859 request by placing it the queue. */
1861 displaced_debug_printf ("not enough resources available, "
1862 "deferring step of %s",
1863 tp
->ptid
.to_string ().c_str ());
1865 global_thread_step_over_chain_enqueue (tp
);
1867 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
;
1870 gdb_assert (status
== DISPLACED_STEP_PREPARE_STATUS_OK
);
1872 /* Save the information we need to fix things up if the step
1874 disp_step_thread_state
.set (gdbarch
);
1876 tp
->inf
->displaced_step_state
.in_progress_count
++;
1878 displaced_debug_printf ("prepared successfully thread=%s, "
1879 "original_pc=%s, displaced_pc=%s",
1880 tp
->ptid
.to_string ().c_str (),
1881 paddress (gdbarch
, original_pc
),
1882 paddress (gdbarch
, displaced_pc
));
1884 /* Display the new displaced instruction(s). */
1885 if (debug_displaced
)
1887 string_file tmp_stream
;
1888 CORE_ADDR addr
= displaced_pc
;
1890 /* If displaced stepping is going to use h/w single step then we know
1891 that the replacement instruction can only be a single instruction,
1892 in that case set the end address at the next byte.
1894 Otherwise the displaced stepping copy instruction routine could
1895 have generated multiple instructions, and all we know is that they
1896 must fit within the LEN bytes of the buffer. */
1898 = addr
+ (gdbarch_displaced_step_hw_singlestep (gdbarch
)
1899 ? 1 : gdbarch_displaced_step_buffer_length (gdbarch
));
1903 int dislen
= gdb_print_insn (gdbarch
, addr
, &tmp_stream
, nullptr);
1906 displaced_debug_printf
1907 ("replacement insn %s: invalid length: %d",
1908 paddress (gdbarch
, addr
), dislen
);
1912 gdb::byte_vector
insn_buf (dislen
);
1913 read_memory (addr
, insn_buf
.data (), insn_buf
.size ());
1915 std::string insn_bytes
= bytes_to_string (insn_buf
);
1916 std::string insn_str
= tmp_stream
.release ();
1917 displaced_debug_printf ("replacement insn %s: %s \t %s",
1918 paddress (gdbarch
, addr
),
1919 insn_bytes
.c_str (),
1925 return DISPLACED_STEP_PREPARE_STATUS_OK
;
1928 /* Wrapper for displaced_step_prepare_throw that disabled further
1929 attempts at displaced stepping if we get a memory error. */
1931 static displaced_step_prepare_status
1932 displaced_step_prepare (thread_info
*thread
)
1934 displaced_step_prepare_status status
1935 = DISPLACED_STEP_PREPARE_STATUS_CANT
;
1939 status
= displaced_step_prepare_throw (thread
);
1941 catch (const gdb_exception_error
&ex
)
1943 if (ex
.error
!= MEMORY_ERROR
1944 && ex
.error
!= NOT_SUPPORTED_ERROR
)
1947 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1950 /* Be verbose if "set displaced-stepping" is "on", silent if
1952 if (can_use_displaced_stepping
== AUTO_BOOLEAN_TRUE
)
1954 warning (_("disabling displaced stepping: %s"),
1958 /* Disable further displaced stepping attempts. */
1959 thread
->inf
->displaced_step_state
.failed_before
= 1;
1965 /* True if any thread of TARGET that matches RESUME_PTID requires
1966 target_thread_events enabled. This assumes TARGET does not support
1967 target thread options. */
1970 any_thread_needs_target_thread_events (process_stratum_target
*target
,
1973 for (thread_info
*tp
: all_non_exited_threads (target
, resume_ptid
))
1974 if (displaced_step_in_progress_thread (tp
)
1975 || schedlock_applies (tp
)
1976 || tp
->thread_fsm () != nullptr)
1981 /* Maybe disable thread-{cloned,created,exited} event reporting after
1982 a step-over (either in-line or displaced) finishes. */
1985 update_thread_events_after_step_over (thread_info
*event_thread
,
1986 const target_waitstatus
&event_status
)
1988 if (schedlock_applies (event_thread
))
1990 /* If scheduler-locking applies, continue reporting
1991 thread-created/thread-cloned events. */
1994 else if (target_supports_set_thread_options (0))
1996 /* We can control per-thread options. Disable events for the
1997 event thread, unless the thread is gone. */
1998 if (event_status
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
1999 event_thread
->set_thread_options (0);
2003 /* We can only control the target-wide target_thread_events
2004 setting. Disable it, but only if other threads in the target
2005 don't need it enabled. */
2006 process_stratum_target
*target
= event_thread
->inf
->process_target ();
2007 if (!any_thread_needs_target_thread_events (target
, minus_one_ptid
))
2008 target_thread_events (false);
2012 /* If we displaced stepped an instruction successfully, adjust registers and
2013 memory to yield the same effect the instruction would have had if we had
2014 executed it at its original address, and return
2015 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
2016 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
2018 If the thread wasn't displaced stepping, return
2019 DISPLACED_STEP_FINISH_STATUS_OK as well. */
2021 static displaced_step_finish_status
2022 displaced_step_finish (thread_info
*event_thread
,
2023 const target_waitstatus
&event_status
)
2025 /* Check whether the parent is displaced stepping. */
2026 inferior
*parent_inf
= event_thread
->inf
;
2028 /* If this was a fork/vfork/clone, this event indicates that the
2029 displaced stepping of the syscall instruction has been done, so
2030 we perform cleanup for parent here. Also note that this
2031 operation also cleans up the child for vfork, because their pages
2034 /* If this is a fork (child gets its own address space copy) and
2035 some displaced step buffers were in use at the time of the fork,
2036 restore the displaced step buffer bytes in the child process.
2038 Architectures which support displaced stepping and fork events
2039 must supply an implementation of
2040 gdbarch_displaced_step_restore_all_in_ptid. This is not enforced
2041 during gdbarch validation to support architectures which support
2042 displaced stepping but not forks. */
2043 if (event_status
.kind () == TARGET_WAITKIND_FORKED
)
2045 struct regcache
*parent_regcache
= get_thread_regcache (event_thread
);
2046 struct gdbarch
*gdbarch
= parent_regcache
->arch ();
2048 if (gdbarch_supports_displaced_stepping (gdbarch
))
2049 gdbarch_displaced_step_restore_all_in_ptid
2050 (gdbarch
, parent_inf
, event_status
.child_ptid ());
2053 displaced_step_thread_state
*displaced
= &event_thread
->displaced_step_state
;
2055 /* Was this thread performing a displaced step? */
2056 if (!displaced
->in_progress ())
2057 return DISPLACED_STEP_FINISH_STATUS_OK
;
2059 update_thread_events_after_step_over (event_thread
, event_status
);
2061 gdb_assert (event_thread
->inf
->displaced_step_state
.in_progress_count
> 0);
2062 event_thread
->inf
->displaced_step_state
.in_progress_count
--;
2064 /* Fixup may need to read memory/registers. Switch to the thread
2065 that we're fixing up. Also, target_stopped_by_watchpoint checks
2066 the current thread, and displaced_step_restore performs ptid-dependent
2067 memory accesses using current_inferior(). */
2068 switch_to_thread (event_thread
);
2070 displaced_step_reset_cleanup
cleanup (displaced
);
2072 /* Do the fixup, and release the resources acquired to do the displaced
2074 displaced_step_finish_status status
2075 = gdbarch_displaced_step_finish (displaced
->get_original_gdbarch (),
2076 event_thread
, event_status
);
2078 if (event_status
.kind () == TARGET_WAITKIND_FORKED
2079 || event_status
.kind () == TARGET_WAITKIND_VFORKED
2080 || event_status
.kind () == TARGET_WAITKIND_THREAD_CLONED
)
2082 /* Since the vfork/fork/clone syscall instruction was executed
2083 in the scratchpad, the child's PC is also within the
2084 scratchpad. Set the child's PC to the parent's PC value,
2085 which has already been fixed up. Note: we use the parent's
2086 aspace here, although we're touching the child, because the
2087 child hasn't been added to the inferior list yet at this
2090 struct regcache
*parent_regcache
= get_thread_regcache (event_thread
);
2091 struct gdbarch
*gdbarch
= parent_regcache
->arch ();
2092 struct regcache
*child_regcache
2093 = get_thread_arch_regcache (parent_inf
, event_status
.child_ptid (),
2095 /* Read PC value of parent. */
2096 CORE_ADDR parent_pc
= regcache_read_pc (parent_regcache
);
2098 displaced_debug_printf ("write child pc from %s to %s",
2100 regcache_read_pc (child_regcache
)),
2101 paddress (gdbarch
, parent_pc
));
2103 regcache_write_pc (child_regcache
, parent_pc
);
2109 /* Data to be passed around while handling an event. This data is
2110 discarded between events. */
2111 struct execution_control_state
2113 explicit execution_control_state (thread_info
*thr
= nullptr)
2114 : ptid (thr
== nullptr ? null_ptid
: thr
->ptid
),
2119 process_stratum_target
*target
= nullptr;
2121 /* The thread that got the event, if this was a thread event; NULL
2123 struct thread_info
*event_thread
;
2125 struct target_waitstatus ws
;
2126 int stop_func_filled_in
= 0;
2127 CORE_ADDR stop_func_alt_start
= 0;
2128 CORE_ADDR stop_func_start
= 0;
2129 CORE_ADDR stop_func_end
= 0;
2130 const char *stop_func_name
= nullptr;
2131 int wait_some_more
= 0;
2133 /* True if the event thread hit the single-step breakpoint of
2134 another thread. Thus the event doesn't cause a stop, the thread
2135 needs to be single-stepped past the single-step breakpoint before
2136 we can switch back to the original stepping thread. */
2137 int hit_singlestep_breakpoint
= 0;
2140 static void keep_going_pass_signal (struct execution_control_state
*ecs
);
2141 static void prepare_to_wait (struct execution_control_state
*ecs
);
2142 static bool keep_going_stepped_thread (struct thread_info
*tp
);
2143 static step_over_what
thread_still_needs_step_over (struct thread_info
*tp
);
2145 /* Are there any pending step-over requests? If so, run all we can
2146 now and return true. Otherwise, return false. */
2149 start_step_over (void)
2151 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
2153 /* Don't start a new step-over if we already have an in-line
2154 step-over operation ongoing. */
2155 if (step_over_info_valid_p ())
2158 /* Steal the global thread step over chain. As we try to initiate displaced
2159 steps, threads will be enqueued in the global chain if no buffers are
2160 available. If we iterated on the global chain directly, we might iterate
2162 thread_step_over_list threads_to_step
2163 = std::move (global_thread_step_over_list
);
2165 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
2166 thread_step_over_chain_length (threads_to_step
));
2168 bool started
= false;
2170 /* On scope exit (whatever the reason, return or exception), if there are
2171 threads left in the THREADS_TO_STEP chain, put back these threads in the
2175 if (threads_to_step
.empty ())
2176 infrun_debug_printf ("step-over queue now empty");
2179 infrun_debug_printf ("putting back %d threads to step in global queue",
2180 thread_step_over_chain_length (threads_to_step
));
2182 global_thread_step_over_chain_enqueue_chain
2183 (std::move (threads_to_step
));
2187 thread_step_over_list_safe_range range
2188 = make_thread_step_over_list_safe_range (threads_to_step
);
2190 for (thread_info
*tp
: range
)
2192 step_over_what step_what
;
2193 int must_be_in_line
;
2195 gdb_assert (!tp
->stop_requested
);
2197 if (tp
->inf
->displaced_step_state
.unavailable
)
2199 /* The arch told us to not even try preparing another displaced step
2200 for this inferior. Just leave the thread in THREADS_TO_STEP, it
2201 will get moved to the global chain on scope exit. */
2205 if (tp
->inf
->thread_waiting_for_vfork_done
!= nullptr)
2207 /* When we stop all threads, handling a vfork, any thread in the step
2208 over chain remains there. A user could also try to continue a
2209 thread stopped at a breakpoint while another thread is waiting for
2210 a vfork-done event. In any case, we don't want to start a step
2215 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
2216 while we try to prepare the displaced step, we don't add it back to
2217 the global step over chain. This is to avoid a thread staying in the
2218 step over chain indefinitely if something goes wrong when resuming it
2219 If the error is intermittent and it still needs a step over, it will
2220 get enqueued again when we try to resume it normally. */
2221 threads_to_step
.erase (threads_to_step
.iterator_to (*tp
));
2223 step_what
= thread_still_needs_step_over (tp
);
2224 must_be_in_line
= ((step_what
& STEP_OVER_WATCHPOINT
)
2225 || ((step_what
& STEP_OVER_BREAKPOINT
)
2226 && !use_displaced_stepping (tp
)));
2228 /* We currently stop all threads of all processes to step-over
2229 in-line. If we need to start a new in-line step-over, let
2230 any pending displaced steps finish first. */
2231 if (must_be_in_line
&& displaced_step_in_progress_any_thread ())
2233 global_thread_step_over_chain_enqueue (tp
);
2237 if (tp
->control
.trap_expected
2239 || tp
->executing ())
2241 internal_error ("[%s] has inconsistent state: "
2242 "trap_expected=%d, resumed=%d, executing=%d\n",
2243 tp
->ptid
.to_string ().c_str (),
2244 tp
->control
.trap_expected
,
2249 infrun_debug_printf ("resuming [%s] for step-over",
2250 tp
->ptid
.to_string ().c_str ());
2252 /* keep_going_pass_signal skips the step-over if the breakpoint
2253 is no longer inserted. In all-stop, we want to keep looking
2254 for a thread that needs a step-over instead of resuming TP,
2255 because we wouldn't be able to resume anything else until the
2256 target stops again. In non-stop, the resume always resumes
2257 only TP, so it's OK to let the thread resume freely. */
2258 if (!target_is_non_stop_p () && !step_what
)
2261 switch_to_thread (tp
);
2262 execution_control_state
ecs (tp
);
2263 keep_going_pass_signal (&ecs
);
2265 if (!ecs
.wait_some_more
)
2266 error (_("Command aborted."));
2268 /* If the thread's step over could not be initiated because no buffers
2269 were available, it was re-added to the global step over chain. */
2272 infrun_debug_printf ("[%s] was resumed.",
2273 tp
->ptid
.to_string ().c_str ());
2274 gdb_assert (!thread_is_in_step_over_chain (tp
));
2278 infrun_debug_printf ("[%s] was NOT resumed.",
2279 tp
->ptid
.to_string ().c_str ());
2280 gdb_assert (thread_is_in_step_over_chain (tp
));
2283 /* If we started a new in-line step-over, we're done. */
2284 if (step_over_info_valid_p ())
2286 gdb_assert (tp
->control
.trap_expected
);
2291 if (!target_is_non_stop_p ())
2293 /* On all-stop, shouldn't have resumed unless we needed a
2295 gdb_assert (tp
->control
.trap_expected
2296 || tp
->step_after_step_resume_breakpoint
);
2298 /* With remote targets (at least), in all-stop, we can't
2299 issue any further remote commands until the program stops
2305 /* Either the thread no longer needed a step-over, or a new
2306 displaced stepping sequence started. Even in the latter
2307 case, continue looking. Maybe we can also start another
2308 displaced step on a thread of other process. */
2314 /* Update global variables holding ptids to hold NEW_PTID if they were
2315 holding OLD_PTID. */
2317 infrun_thread_ptid_changed (process_stratum_target
*target
,
2318 ptid_t old_ptid
, ptid_t new_ptid
)
2320 if (inferior_ptid
== old_ptid
2321 && current_inferior ()->process_target () == target
)
2322 inferior_ptid
= new_ptid
;
2327 static const char schedlock_off
[] = "off";
2328 static const char schedlock_on
[] = "on";
2329 static const char schedlock_step
[] = "step";
2330 static const char schedlock_replay
[] = "replay";
2331 static const char *const scheduler_enums
[] = {
2338 static const char *scheduler_mode
= schedlock_replay
;
2340 show_scheduler_mode (struct ui_file
*file
, int from_tty
,
2341 struct cmd_list_element
*c
, const char *value
)
2344 _("Mode for locking scheduler "
2345 "during execution is \"%s\".\n"),
2350 set_schedlock_func (const char *args
, int from_tty
, struct cmd_list_element
*c
)
2352 if (!target_can_lock_scheduler ())
2354 scheduler_mode
= schedlock_off
;
2355 error (_("Target '%s' cannot support this command."),
2356 target_shortname ());
2360 /* True if execution commands resume all threads of all processes by
2361 default; otherwise, resume only threads of the current inferior
2363 bool sched_multi
= false;
2365 /* Try to setup for software single stepping. Return true if target_resume()
2366 should use hardware single step.
2368 GDBARCH the current gdbarch. */
2371 maybe_software_singlestep (struct gdbarch
*gdbarch
)
2373 bool hw_step
= true;
2375 if (execution_direction
== EXEC_FORWARD
2376 && gdbarch_software_single_step_p (gdbarch
))
2377 hw_step
= !insert_single_step_breakpoints (gdbarch
);
2385 user_visible_resume_ptid (int step
)
2391 /* With non-stop mode on, threads are always handled
2393 resume_ptid
= inferior_ptid
;
2395 else if ((scheduler_mode
== schedlock_on
)
2396 || (scheduler_mode
== schedlock_step
&& step
))
2398 /* User-settable 'scheduler' mode requires solo thread
2400 resume_ptid
= inferior_ptid
;
2402 else if ((scheduler_mode
== schedlock_replay
)
2403 && target_record_will_replay (minus_one_ptid
, execution_direction
))
2405 /* User-settable 'scheduler' mode requires solo thread resume in replay
2407 resume_ptid
= inferior_ptid
;
2409 else if (inferior_ptid
!= null_ptid
2410 && inferior_thread ()->control
.in_cond_eval
)
2412 /* The inferior thread is evaluating a BP condition. Other threads
2413 might be stopped or running and we do not want to change their
2414 state, thus, resume only the current thread. */
2415 resume_ptid
= inferior_ptid
;
2417 else if (!sched_multi
&& target_supports_multi_process ())
2419 /* Resume all threads of the current process (and none of other
2421 resume_ptid
= ptid_t (inferior_ptid
.pid ());
2425 /* Resume all threads of all processes. */
2426 resume_ptid
= RESUME_ALL
;
2434 process_stratum_target
*
2435 user_visible_resume_target (ptid_t resume_ptid
)
2437 return (resume_ptid
== minus_one_ptid
&& sched_multi
2439 : current_inferior ()->process_target ());
2442 /* Find a thread from the inferiors that we'll resume that is waiting
2443 for a vfork-done event. */
2445 static thread_info
*
2446 find_thread_waiting_for_vfork_done ()
2448 gdb_assert (!target_is_non_stop_p ());
2452 for (inferior
*inf
: all_non_exited_inferiors ())
2453 if (inf
->thread_waiting_for_vfork_done
!= nullptr)
2454 return inf
->thread_waiting_for_vfork_done
;
2458 inferior
*cur_inf
= current_inferior ();
2459 if (cur_inf
->thread_waiting_for_vfork_done
!= nullptr)
2460 return cur_inf
->thread_waiting_for_vfork_done
;
2465 /* Return a ptid representing the set of threads that we will resume,
2466 in the perspective of the target, assuming run control handling
2467 does not require leaving some threads stopped (e.g., stepping past
2468 breakpoint). USER_STEP indicates whether we're about to start the
2469 target for a stepping command. */
2472 internal_resume_ptid (int user_step
)
2474 /* In non-stop, we always control threads individually. Note that
2475 the target may always work in non-stop mode even with "set
2476 non-stop off", in which case user_visible_resume_ptid could
2477 return a wildcard ptid. */
2478 if (target_is_non_stop_p ())
2479 return inferior_ptid
;
2481 /* The rest of the function assumes non-stop==off and
2482 target-non-stop==off.
2484 If a thread is waiting for a vfork-done event, it means breakpoints are out
2485 for this inferior (well, program space in fact). We don't want to resume
2486 any thread other than the one waiting for vfork done, otherwise these other
2487 threads could miss breakpoints. So if a thread in the resumption set is
2488 waiting for a vfork-done event, resume only that thread.
2490 The resumption set width depends on whether schedule-multiple is on or off.
2492 Note that if the target_resume interface was more flexible, we could be
2493 smarter here when schedule-multiple is on. For example, imagine 3
2494 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2495 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2496 target(s) to resume:
2498 - All threads of inferior 1
2502 Since we don't have that flexibility (we can only pass one ptid), just
2503 resume the first thread waiting for a vfork-done event we find (e.g. thread
2505 thread_info
*thr
= find_thread_waiting_for_vfork_done ();
2508 /* If we have a thread that is waiting for a vfork-done event,
2509 then we should have switched to it earlier. Calling
2510 target_resume with thread scope is only possible when the
2511 current thread matches the thread scope. */
2512 gdb_assert (thr
->ptid
== inferior_ptid
);
2513 gdb_assert (thr
->inf
->process_target ()
2514 == inferior_thread ()->inf
->process_target ());
2518 return user_visible_resume_ptid (user_step
);
2521 /* Wrapper for target_resume, that handles infrun-specific
2525 do_target_resume (ptid_t resume_ptid
, bool step
, enum gdb_signal sig
)
2527 struct thread_info
*tp
= inferior_thread ();
2529 gdb_assert (!tp
->stop_requested
);
2531 /* Install inferior's terminal modes. */
2532 target_terminal::inferior ();
2534 /* Avoid confusing the next resume, if the next stop/resume
2535 happens to apply to another thread. */
2536 tp
->set_stop_signal (GDB_SIGNAL_0
);
2538 /* Advise target which signals may be handled silently.
2540 If we have removed breakpoints because we are stepping over one
2541 in-line (in any thread), we need to receive all signals to avoid
2542 accidentally skipping a breakpoint during execution of a signal
2545 Likewise if we're displaced stepping, otherwise a trap for a
2546 breakpoint in a signal handler might be confused with the
2547 displaced step finishing. We don't make the displaced_step_finish
2548 step distinguish the cases instead, because:
2550 - a backtrace while stopped in the signal handler would show the
2551 scratch pad as frame older than the signal handler, instead of
2552 the real mainline code.
2554 - when the thread is later resumed, the signal handler would
2555 return to the scratch pad area, which would no longer be
2557 if (step_over_info_valid_p ()
2558 || displaced_step_in_progress (tp
->inf
))
2559 target_pass_signals ({});
2561 target_pass_signals (signal_pass
);
2563 /* Request that the target report thread-{created,cloned,exited}
2564 events in the following situations:
2566 - If we are performing an in-line step-over-breakpoint, then we
2567 will remove a breakpoint from the target and only run the
2568 current thread. We don't want any new thread (spawned by the
2569 step) to start running, as it might miss the breakpoint. We
2570 need to clear the step-over state if the stepped thread exits,
2571 so we also enable thread-exit events.
2573 - If we are stepping over a breakpoint out of line (displaced
2574 stepping) then we won't remove a breakpoint from the target,
2575 but, if the step spawns a new clone thread, then we will need
2576 to fixup the $pc address in the clone child too, so we need it
2577 to start stopped. We need to release the displaced stepping
2578 buffer if the stepped thread exits, so we also enable
2581 - If scheduler-locking applies, threads that the current thread
2582 spawns should remain halted. It's not strictly necessary to
2583 enable thread-exit events in this case, but it doesn't hurt.
2585 if (step_over_info_valid_p ()
2586 || displaced_step_in_progress_thread (tp
)
2587 || schedlock_applies (tp
))
2589 gdb_thread_options options
2590 = GDB_THREAD_OPTION_CLONE
| GDB_THREAD_OPTION_EXIT
;
2591 if (target_supports_set_thread_options (options
))
2592 tp
->set_thread_options (options
);
2594 target_thread_events (true);
2596 else if (tp
->thread_fsm () != nullptr)
2598 gdb_thread_options options
= GDB_THREAD_OPTION_EXIT
;
2599 if (target_supports_set_thread_options (options
))
2600 tp
->set_thread_options (options
);
2602 target_thread_events (true);
2606 if (target_supports_set_thread_options (0))
2607 tp
->set_thread_options (0);
2610 process_stratum_target
*resume_target
= tp
->inf
->process_target ();
2611 if (!any_thread_needs_target_thread_events (resume_target
,
2613 target_thread_events (false);
2617 /* If we're resuming more than one thread simultaneously, then any
2618 thread other than the leader is being set to run free. Clear any
2619 previous thread option for those threads. */
2620 if (resume_ptid
!= inferior_ptid
&& target_supports_set_thread_options (0))
2622 process_stratum_target
*resume_target
= tp
->inf
->process_target ();
2623 for (thread_info
*thr_iter
: all_non_exited_threads (resume_target
,
2626 thr_iter
->set_thread_options (0);
2629 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2630 resume_ptid
.to_string ().c_str (),
2631 step
, gdb_signal_to_symbol_string (sig
));
2633 target_resume (resume_ptid
, step
, sig
);
2636 /* Resume the inferior. SIG is the signal to give the inferior
2637 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2638 call 'resume', which handles exceptions. */
2641 resume_1 (enum gdb_signal sig
)
2643 struct thread_info
*tp
= inferior_thread ();
2644 regcache
*regcache
= get_thread_regcache (tp
);
2645 struct gdbarch
*gdbarch
= regcache
->arch ();
2647 /* This represents the user's step vs continue request. When
2648 deciding whether "set scheduler-locking step" applies, it's the
2649 user's intention that counts. */
2650 const int user_step
= tp
->control
.stepping_command
;
2651 /* This represents what we'll actually request the target to do.
2652 This can decay from a step to a continue, if e.g., we need to
2653 implement single-stepping with breakpoints (software
2657 gdb_assert (!tp
->stop_requested
);
2658 gdb_assert (!thread_is_in_step_over_chain (tp
));
2660 if (tp
->has_pending_waitstatus ())
2663 ("thread %s has pending wait "
2664 "status %s (currently_stepping=%d).",
2665 tp
->ptid
.to_string ().c_str (),
2666 tp
->pending_waitstatus ().to_string ().c_str (),
2667 currently_stepping (tp
));
2669 tp
->inf
->process_target ()->threads_executing
= true;
2670 tp
->set_resumed (true);
2672 /* FIXME: What should we do if we are supposed to resume this
2673 thread with a signal? Maybe we should maintain a queue of
2674 pending signals to deliver. */
2675 if (sig
!= GDB_SIGNAL_0
)
2677 warning (_("Couldn't deliver signal %s to %s."),
2678 gdb_signal_to_name (sig
),
2679 tp
->ptid
.to_string ().c_str ());
2682 tp
->set_stop_signal (GDB_SIGNAL_0
);
2684 if (target_can_async_p ())
2686 target_async (true);
2687 /* Tell the event loop we have an event to process. */
2688 mark_async_event_handler (infrun_async_inferior_event_token
);
2693 tp
->stepped_breakpoint
= 0;
2695 /* Depends on stepped_breakpoint. */
2696 step
= currently_stepping (tp
);
2698 if (current_inferior ()->thread_waiting_for_vfork_done
!= nullptr)
2700 /* Don't try to single-step a vfork parent that is waiting for
2701 the child to get out of the shared memory region (by exec'ing
2702 or exiting). This is particularly important on software
2703 single-step archs, as the child process would trip on the
2704 software single step breakpoint inserted for the parent
2705 process. Since the parent will not actually execute any
2706 instruction until the child is out of the shared region (such
2707 are vfork's semantics), it is safe to simply continue it.
2708 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2709 the parent, and tell it to `keep_going', which automatically
2710 re-sets it stepping. */
2711 infrun_debug_printf ("resume : clear step");
2715 CORE_ADDR pc
= regcache_read_pc (regcache
);
2717 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2718 "current thread [%s] at %s",
2719 step
, gdb_signal_to_symbol_string (sig
),
2720 tp
->control
.trap_expected
,
2721 inferior_ptid
.to_string ().c_str (),
2722 paddress (gdbarch
, pc
));
2724 const address_space
*aspace
= tp
->inf
->aspace
.get ();
2726 /* Normally, by the time we reach `resume', the breakpoints are either
2727 removed or inserted, as appropriate. The exception is if we're sitting
2728 at a permanent breakpoint; we need to step over it, but permanent
2729 breakpoints can't be removed. So we have to test for it here. */
2730 if (breakpoint_here_p (aspace
, pc
) == permanent_breakpoint_here
)
2732 if (sig
!= GDB_SIGNAL_0
)
2734 /* We have a signal to pass to the inferior. The resume
2735 may, or may not take us to the signal handler. If this
2736 is a step, we'll need to stop in the signal handler, if
2737 there's one, (if the target supports stepping into
2738 handlers), or in the next mainline instruction, if
2739 there's no handler. If this is a continue, we need to be
2740 sure to run the handler with all breakpoints inserted.
2741 In all cases, set a breakpoint at the current address
2742 (where the handler returns to), and once that breakpoint
2743 is hit, resume skipping the permanent breakpoint. If
2744 that breakpoint isn't hit, then we've stepped into the
2745 signal handler (or hit some other event). We'll delete
2746 the step-resume breakpoint then. */
2748 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2749 "deliver signal first");
2751 clear_step_over_info ();
2752 tp
->control
.trap_expected
= 0;
2754 if (tp
->control
.step_resume_breakpoint
== nullptr)
2756 /* Set a "high-priority" step-resume, as we don't want
2757 user breakpoints at PC to trigger (again) when this
2759 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2760 gdb_assert (tp
->control
.step_resume_breakpoint
->first_loc ()
2763 tp
->step_after_step_resume_breakpoint
= step
;
2766 insert_breakpoints ();
2770 /* There's no signal to pass, we can go ahead and skip the
2771 permanent breakpoint manually. */
2772 infrun_debug_printf ("skipping permanent breakpoint");
2773 gdbarch_skip_permanent_breakpoint (gdbarch
, regcache
);
2774 /* Update pc to reflect the new address from which we will
2775 execute instructions. */
2776 pc
= regcache_read_pc (regcache
);
2780 /* We've already advanced the PC, so the stepping part
2781 is done. Now we need to arrange for a trap to be
2782 reported to handle_inferior_event. Set a breakpoint
2783 at the current PC, and run to it. Don't update
2784 prev_pc, because if we end in
2785 switch_back_to_stepped_thread, we want the "expected
2786 thread advanced also" branch to be taken. IOW, we
2787 don't want this thread to step further from PC
2789 gdb_assert (!step_over_info_valid_p ());
2790 insert_single_step_breakpoint (gdbarch
, aspace
, pc
);
2791 insert_breakpoints ();
2793 resume_ptid
= internal_resume_ptid (user_step
);
2794 do_target_resume (resume_ptid
, false, GDB_SIGNAL_0
);
2795 tp
->set_resumed (true);
2801 /* If we have a breakpoint to step over, make sure to do a single
2802 step only. Same if we have software watchpoints. */
2803 if (tp
->control
.trap_expected
|| bpstat_should_step ())
2804 tp
->control
.may_range_step
= 0;
2806 /* If displaced stepping is enabled, step over breakpoints by executing a
2807 copy of the instruction at a different address.
2809 We can't use displaced stepping when we have a signal to deliver;
2810 the comments for displaced_step_prepare explain why. The
2811 comments in the handle_inferior event for dealing with 'random
2812 signals' explain what we do instead.
2814 We can't use displaced stepping when we are waiting for vfork_done
2815 event, displaced stepping breaks the vfork child similarly as single
2816 step software breakpoint. */
2817 if (tp
->control
.trap_expected
2818 && use_displaced_stepping (tp
)
2819 && !step_over_info_valid_p ()
2820 && sig
== GDB_SIGNAL_0
2821 && current_inferior ()->thread_waiting_for_vfork_done
== nullptr)
2823 displaced_step_prepare_status prepare_status
2824 = displaced_step_prepare (tp
);
2826 if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
)
2828 infrun_debug_printf ("Got placed in step-over queue");
2830 tp
->control
.trap_expected
= 0;
2833 else if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_CANT
)
2835 /* Fallback to stepping over the breakpoint in-line. */
2837 if (target_is_non_stop_p ())
2838 stop_all_threads ("displaced stepping falling back on inline stepping");
2840 set_step_over_info (aspace
, regcache_read_pc (regcache
), 0,
2843 step
= maybe_software_singlestep (gdbarch
);
2845 insert_breakpoints ();
2847 else if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_OK
)
2849 /* Update pc to reflect the new address from which we will
2850 execute instructions due to displaced stepping. */
2851 pc
= regcache_read_pc (get_thread_regcache (tp
));
2853 step
= gdbarch_displaced_step_hw_singlestep (gdbarch
);
2856 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2860 /* Do we need to do it the hard way, w/temp breakpoints? */
2862 step
= maybe_software_singlestep (gdbarch
);
2864 /* Currently, our software single-step implementation leads to different
2865 results than hardware single-stepping in one situation: when stepping
2866 into delivering a signal which has an associated signal handler,
2867 hardware single-step will stop at the first instruction of the handler,
2868 while software single-step will simply skip execution of the handler.
2870 For now, this difference in behavior is accepted since there is no
2871 easy way to actually implement single-stepping into a signal handler
2872 without kernel support.
2874 However, there is one scenario where this difference leads to follow-on
2875 problems: if we're stepping off a breakpoint by removing all breakpoints
2876 and then single-stepping. In this case, the software single-step
2877 behavior means that even if there is a *breakpoint* in the signal
2878 handler, GDB still would not stop.
2880 Fortunately, we can at least fix this particular issue. We detect
2881 here the case where we are about to deliver a signal while software
2882 single-stepping with breakpoints removed. In this situation, we
2883 revert the decisions to remove all breakpoints and insert single-
2884 step breakpoints, and instead we install a step-resume breakpoint
2885 at the current address, deliver the signal without stepping, and
2886 once we arrive back at the step-resume breakpoint, actually step
2887 over the breakpoint we originally wanted to step over. */
2888 if (thread_has_single_step_breakpoints_set (tp
)
2889 && sig
!= GDB_SIGNAL_0
2890 && step_over_info_valid_p ())
2892 /* If we have nested signals or a pending signal is delivered
2893 immediately after a handler returns, might already have
2894 a step-resume breakpoint set on the earlier handler. We cannot
2895 set another step-resume breakpoint; just continue on until the
2896 original breakpoint is hit. */
2897 if (tp
->control
.step_resume_breakpoint
== nullptr)
2899 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2900 tp
->step_after_step_resume_breakpoint
= 1;
2903 delete_single_step_breakpoints (tp
);
2905 clear_step_over_info ();
2906 tp
->control
.trap_expected
= 0;
2908 insert_breakpoints ();
2911 /* If STEP is set, it's a request to use hardware stepping
2912 facilities. But in that case, we should never
2913 use singlestep breakpoint. */
2914 gdb_assert (!(thread_has_single_step_breakpoints_set (tp
) && step
));
2916 /* Decide the set of threads to ask the target to resume. */
2917 if (tp
->control
.trap_expected
)
2919 /* We're allowing a thread to run past a breakpoint it has
2920 hit, either by single-stepping the thread with the breakpoint
2921 removed, or by displaced stepping, with the breakpoint inserted.
2922 In the former case, we need to single-step only this thread,
2923 and keep others stopped, as they can miss this breakpoint if
2924 allowed to run. That's not really a problem for displaced
2925 stepping, but, we still keep other threads stopped, in case
2926 another thread is also stopped for a breakpoint waiting for
2927 its turn in the displaced stepping queue. */
2928 resume_ptid
= inferior_ptid
;
2931 resume_ptid
= internal_resume_ptid (user_step
);
2933 if (execution_direction
!= EXEC_REVERSE
2934 && step
&& breakpoint_inserted_here_p (aspace
, pc
))
2936 /* There are two cases where we currently need to step a
2937 breakpoint instruction when we have a signal to deliver:
2939 - See handle_signal_stop where we handle random signals that
2940 could take out us out of the stepping range. Normally, in
2941 that case we end up continuing (instead of stepping) over the
2942 signal handler with a breakpoint at PC, but there are cases
2943 where we should _always_ single-step, even if we have a
2944 step-resume breakpoint, like when a software watchpoint is
2945 set. Assuming single-stepping and delivering a signal at the
2946 same time would takes us to the signal handler, then we could
2947 have removed the breakpoint at PC to step over it. However,
2948 some hardware step targets (like e.g., Mac OS) can't step
2949 into signal handlers, and for those, we need to leave the
2950 breakpoint at PC inserted, as otherwise if the handler
2951 recurses and executes PC again, it'll miss the breakpoint.
2952 So we leave the breakpoint inserted anyway, but we need to
2953 record that we tried to step a breakpoint instruction, so
2954 that adjust_pc_after_break doesn't end up confused.
2956 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2957 in one thread after another thread that was stepping had been
2958 momentarily paused for a step-over. When we re-resume the
2959 stepping thread, it may be resumed from that address with a
2960 breakpoint that hasn't trapped yet. Seen with
2961 gdb.threads/non-stop-fair-events.exp, on targets that don't
2962 do displaced stepping. */
2964 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2965 tp
->ptid
.to_string ().c_str ());
2967 tp
->stepped_breakpoint
= 1;
2969 /* Most targets can step a breakpoint instruction, thus
2970 executing it normally. But if this one cannot, just
2971 continue and we will hit it anyway. */
2972 if (gdbarch_cannot_step_breakpoint (gdbarch
))
2976 if (tp
->control
.may_range_step
)
2978 /* If we're resuming a thread with the PC out of the step
2979 range, then we're doing some nested/finer run control
2980 operation, like stepping the thread out of the dynamic
2981 linker or the displaced stepping scratch pad. We
2982 shouldn't have allowed a range step then. */
2983 gdb_assert (pc_in_thread_step_range (pc
, tp
));
2986 do_target_resume (resume_ptid
, step
, sig
);
2987 tp
->set_resumed (true);
2990 /* Resume the inferior. SIG is the signal to give the inferior
2991 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2992 rolls back state on error. */
2995 resume (gdb_signal sig
)
3001 catch (const gdb_exception
&ex
)
3003 /* If resuming is being aborted for any reason, delete any
3004 single-step breakpoint resume_1 may have created, to avoid
3005 confusing the following resumption, and to avoid leaving
3006 single-step breakpoints perturbing other threads, in case
3007 we're running in non-stop mode. */
3008 if (inferior_ptid
!= null_ptid
)
3009 delete_single_step_breakpoints (inferior_thread ());
3019 /* Counter that tracks number of user visible stops. This can be used
3020 to tell whether a command has proceeded the inferior past the
3021 current location. This allows e.g., inferior function calls in
3022 breakpoint commands to not interrupt the command list. When the
3023 call finishes successfully, the inferior is standing at the same
3024 breakpoint as if nothing happened (and so we don't call
3026 static ULONGEST current_stop_id
;
3033 return current_stop_id
;
3036 /* Called when we report a user visible stop. */
3044 /* Clear out all variables saying what to do when inferior is continued.
3045 First do this, then set the ones you want, then call `proceed'. */
3048 clear_proceed_status_thread (struct thread_info
*tp
)
3050 infrun_debug_printf ("%s", tp
->ptid
.to_string ().c_str ());
3052 /* If we're starting a new sequence, then the previous finished
3053 single-step is no longer relevant. */
3054 if (tp
->has_pending_waitstatus ())
3056 if (tp
->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP
)
3058 infrun_debug_printf ("pending event of %s was a finished step. "
3060 tp
->ptid
.to_string ().c_str ());
3062 tp
->clear_pending_waitstatus ();
3063 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
3068 ("thread %s has pending wait status %s (currently_stepping=%d).",
3069 tp
->ptid
.to_string ().c_str (),
3070 tp
->pending_waitstatus ().to_string ().c_str (),
3071 currently_stepping (tp
));
3075 /* If this signal should not be seen by program, give it zero.
3076 Used for debugging signals. */
3077 if (!signal_pass_state (tp
->stop_signal ()))
3078 tp
->set_stop_signal (GDB_SIGNAL_0
);
3080 tp
->release_thread_fsm ();
3082 tp
->control
.trap_expected
= 0;
3083 tp
->control
.step_range_start
= 0;
3084 tp
->control
.step_range_end
= 0;
3085 tp
->control
.may_range_step
= 0;
3086 tp
->control
.step_frame_id
= null_frame_id
;
3087 tp
->control
.step_stack_frame_id
= null_frame_id
;
3088 tp
->control
.step_over_calls
= STEP_OVER_UNDEBUGGABLE
;
3089 tp
->control
.step_start_function
= nullptr;
3090 tp
->stop_requested
= 0;
3092 tp
->control
.stop_step
= 0;
3094 tp
->control
.proceed_to_finish
= 0;
3096 tp
->control
.stepping_command
= 0;
3098 /* Discard any remaining commands or status from previous stop. */
3099 bpstat_clear (&tp
->control
.stop_bpstat
);
3102 /* Notify the current interpreter and observers that the target is about to
3106 notify_about_to_proceed ()
3108 top_level_interpreter ()->on_about_to_proceed ();
3109 gdb::observers::about_to_proceed
.notify ();
3113 clear_proceed_status (int step
)
3115 /* With scheduler-locking replay, stop replaying other threads if we're
3116 not replaying the user-visible resume ptid.
3118 This is a convenience feature to not require the user to explicitly
3119 stop replaying the other threads. We're assuming that the user's
3120 intent is to resume tracing the recorded process. */
3121 if (!non_stop
&& scheduler_mode
== schedlock_replay
3122 && target_record_is_replaying (minus_one_ptid
)
3123 && !target_record_will_replay (user_visible_resume_ptid (step
),
3124 execution_direction
))
3125 target_record_stop_replaying ();
3127 if (!non_stop
&& inferior_ptid
!= null_ptid
)
3129 ptid_t resume_ptid
= user_visible_resume_ptid (step
);
3130 process_stratum_target
*resume_target
3131 = user_visible_resume_target (resume_ptid
);
3133 /* In all-stop mode, delete the per-thread status of all threads
3134 we're about to resume, implicitly and explicitly. */
3135 for (thread_info
*tp
: all_non_exited_threads (resume_target
, resume_ptid
))
3136 clear_proceed_status_thread (tp
);
3139 if (inferior_ptid
!= null_ptid
)
3141 struct inferior
*inferior
;
3145 /* If in non-stop mode, only delete the per-thread status of
3146 the current thread. */
3147 clear_proceed_status_thread (inferior_thread ());
3150 inferior
= current_inferior ();
3151 inferior
->control
.stop_soon
= NO_STOP_QUIETLY
;
3154 notify_about_to_proceed ();
3157 /* Returns true if TP is still stopped at a breakpoint that needs
3158 stepping-over in order to make progress. If the breakpoint is gone
3159 meanwhile, we can skip the whole step-over dance. */
3162 thread_still_needs_step_over_bp (struct thread_info
*tp
)
3164 if (tp
->stepping_over_breakpoint
)
3166 struct regcache
*regcache
= get_thread_regcache (tp
);
3168 if (breakpoint_here_p (tp
->inf
->aspace
.get (),
3169 regcache_read_pc (regcache
))
3170 == ordinary_breakpoint_here
)
3173 tp
->stepping_over_breakpoint
= 0;
3179 /* Check whether thread TP still needs to start a step-over in order
3180 to make progress when resumed. Returns an bitwise or of enum
3181 step_over_what bits, indicating what needs to be stepped over. */
3183 static step_over_what
3184 thread_still_needs_step_over (struct thread_info
*tp
)
3186 step_over_what what
= 0;
3188 if (thread_still_needs_step_over_bp (tp
))
3189 what
|= STEP_OVER_BREAKPOINT
;
3191 if (tp
->stepping_over_watchpoint
3192 && !target_have_steppable_watchpoint ())
3193 what
|= STEP_OVER_WATCHPOINT
;
3198 /* Returns true if scheduler locking applies. STEP indicates whether
3199 we're about to do a step/next-like command to a thread. */
3202 schedlock_applies (struct thread_info
*tp
)
3204 return (scheduler_mode
== schedlock_on
3205 || (scheduler_mode
== schedlock_step
3206 && tp
->control
.stepping_command
)
3207 || (scheduler_mode
== schedlock_replay
3208 && target_record_will_replay (minus_one_ptid
,
3209 execution_direction
)));
3212 /* When FORCE_P is false, set process_stratum_target::COMMIT_RESUMED_STATE
3213 in all target stacks that have threads executing and don't have threads
3214 with pending events.
3216 When FORCE_P is true, set process_stratum_target::COMMIT_RESUMED_STATE
3217 in all target stacks that have threads executing regardless of whether
3218 there are pending events or not.
3220 Passing FORCE_P as false makes sense when GDB is going to wait for
3221 events from all threads and will therefore spot the pending events.
3222 However, if GDB is only going to wait for events from select threads
3223 (i.e. when performing an inferior call) then a pending event on some
3224 other thread will not be spotted, and if we fail to commit the resume
3225 state for the thread performing the inferior call, then the inferior
3226 call will never complete (or even start). */
3229 maybe_set_commit_resumed_all_targets (bool force_p
)
3231 scoped_restore_current_thread restore_thread
;
3233 for (inferior
*inf
: all_non_exited_inferiors ())
3235 process_stratum_target
*proc_target
= inf
->process_target ();
3237 if (proc_target
->commit_resumed_state
)
3239 /* We already set this in a previous iteration, via another
3240 inferior sharing the process_stratum target. */
3244 /* If the target has no resumed threads, it would be useless to
3245 ask it to commit the resumed threads. */
3246 if (!proc_target
->threads_executing
)
3248 infrun_debug_printf ("not requesting commit-resumed for target "
3249 "%s, no resumed threads",
3250 proc_target
->shortname ());
3254 /* As an optimization, if a thread from this target has some
3255 status to report, handle it before requiring the target to
3256 commit its resumed threads: handling the status might lead to
3257 resuming more threads. */
3258 if (!force_p
&& proc_target
->has_resumed_with_pending_wait_status ())
3260 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
3261 " thread has a pending waitstatus",
3262 proc_target
->shortname ());
3266 switch_to_inferior_no_thread (inf
);
3268 if (!force_p
&& target_has_pending_events ())
3270 infrun_debug_printf ("not requesting commit-resumed for target %s, "
3271 "target has pending events",
3272 proc_target
->shortname ());
3276 infrun_debug_printf ("enabling commit-resumed for target %s",
3277 proc_target
->shortname ());
3279 proc_target
->commit_resumed_state
= true;
3286 maybe_call_commit_resumed_all_targets ()
3288 scoped_restore_current_thread restore_thread
;
3290 for (inferior
*inf
: all_non_exited_inferiors ())
3292 process_stratum_target
*proc_target
= inf
->process_target ();
3294 if (!proc_target
->commit_resumed_state
)
3297 switch_to_inferior_no_thread (inf
);
3299 infrun_debug_printf ("calling commit_resumed for target %s",
3300 proc_target
->shortname());
3302 target_commit_resumed ();
3306 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
3307 that only the outermost one attempts to re-enable
3309 static bool enable_commit_resumed
= true;
3313 scoped_disable_commit_resumed::scoped_disable_commit_resumed
3314 (const char *reason
)
3315 : m_reason (reason
),
3316 m_prev_enable_commit_resumed (enable_commit_resumed
)
3318 infrun_debug_printf ("reason=%s", m_reason
);
3320 enable_commit_resumed
= false;
3322 for (inferior
*inf
: all_non_exited_inferiors ())
3324 process_stratum_target
*proc_target
= inf
->process_target ();
3326 if (m_prev_enable_commit_resumed
)
3328 /* This is the outermost instance: force all
3329 COMMIT_RESUMED_STATE to false. */
3330 proc_target
->commit_resumed_state
= false;
3334 /* This is not the outermost instance, we expect
3335 COMMIT_RESUMED_STATE to have been cleared by the
3336 outermost instance. */
3337 gdb_assert (!proc_target
->commit_resumed_state
);
3345 scoped_disable_commit_resumed::reset ()
3351 infrun_debug_printf ("reason=%s", m_reason
);
3353 gdb_assert (!enable_commit_resumed
);
3355 enable_commit_resumed
= m_prev_enable_commit_resumed
;
3357 if (m_prev_enable_commit_resumed
)
3359 /* This is the outermost instance, re-enable
3360 COMMIT_RESUMED_STATE on the targets where it's possible. */
3361 maybe_set_commit_resumed_all_targets (false);
3365 /* This is not the outermost instance, we expect
3366 COMMIT_RESUMED_STATE to still be false. */
3367 for (inferior
*inf
: all_non_exited_inferiors ())
3369 process_stratum_target
*proc_target
= inf
->process_target ();
3370 gdb_assert (!proc_target
->commit_resumed_state
);
3377 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3385 scoped_disable_commit_resumed::reset_and_commit ()
3388 maybe_call_commit_resumed_all_targets ();
3393 scoped_enable_commit_resumed::scoped_enable_commit_resumed
3394 (const char *reason
, bool force_p
)
3395 : m_reason (reason
),
3396 m_prev_enable_commit_resumed (enable_commit_resumed
)
3398 infrun_debug_printf ("reason=%s", m_reason
);
3400 if (!enable_commit_resumed
)
3402 enable_commit_resumed
= true;
3404 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3406 maybe_set_commit_resumed_all_targets (force_p
);
3408 maybe_call_commit_resumed_all_targets ();
3414 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3416 infrun_debug_printf ("reason=%s", m_reason
);
3418 gdb_assert (enable_commit_resumed
);
3420 enable_commit_resumed
= m_prev_enable_commit_resumed
;
3422 if (!enable_commit_resumed
)
3424 /* Force all COMMIT_RESUMED_STATE back to false. */
3425 for (inferior
*inf
: all_non_exited_inferiors ())
3427 process_stratum_target
*proc_target
= inf
->process_target ();
3428 proc_target
->commit_resumed_state
= false;
3433 /* Check that all the targets we're about to resume are in non-stop
3434 mode. Ideally, we'd only care whether all targets support
3435 target-async, but we're not there yet. E.g., stop_all_threads
3436 doesn't know how to handle all-stop targets. Also, the remote
3437 protocol in all-stop mode is synchronous, irrespective of
3438 target-async, which means that things like a breakpoint re-set
3439 triggered by one target would try to read memory from all targets
3443 check_multi_target_resumption (process_stratum_target
*resume_target
)
3445 if (!non_stop
&& resume_target
== nullptr)
3447 scoped_restore_current_thread restore_thread
;
3449 /* This is used to track whether we're resuming more than one
3451 process_stratum_target
*first_connection
= nullptr;
3453 /* The first inferior we see with a target that does not work in
3454 always-non-stop mode. */
3455 inferior
*first_not_non_stop
= nullptr;
3457 for (inferior
*inf
: all_non_exited_inferiors ())
3459 switch_to_inferior_no_thread (inf
);
3461 if (!target_has_execution ())
3464 process_stratum_target
*proc_target
3465 = current_inferior ()->process_target();
3467 if (!target_is_non_stop_p ())
3468 first_not_non_stop
= inf
;
3470 if (first_connection
== nullptr)
3471 first_connection
= proc_target
;
3472 else if (first_connection
!= proc_target
3473 && first_not_non_stop
!= nullptr)
3475 switch_to_inferior_no_thread (first_not_non_stop
);
3477 proc_target
= current_inferior ()->process_target();
3479 error (_("Connection %d (%s) does not support "
3480 "multi-target resumption."),
3481 proc_target
->connection_number
,
3482 make_target_connection_string (proc_target
).c_str ());
3488 /* Helper function for `proceed`. Check if thread TP is suitable for
3489 resuming, and, if it is, switch to the thread and call
3490 `keep_going_pass_signal`. If TP is not suitable for resuming then this
3491 function will just return without switching threads. */
3494 proceed_resume_thread_checked (thread_info
*tp
)
3496 if (!tp
->inf
->has_execution ())
3498 infrun_debug_printf ("[%s] target has no execution",
3499 tp
->ptid
.to_string ().c_str ());
3505 infrun_debug_printf ("[%s] resumed",
3506 tp
->ptid
.to_string ().c_str ());
3507 gdb_assert (tp
->executing () || tp
->has_pending_waitstatus ());
3511 if (thread_is_in_step_over_chain (tp
))
3513 infrun_debug_printf ("[%s] needs step-over",
3514 tp
->ptid
.to_string ().c_str ());
3518 /* When handling a vfork GDB removes all breakpoints from the program
3519 space in which the vfork is being handled. If we are following the
3520 parent then GDB will set the thread_waiting_for_vfork_done member of
3521 the parent inferior. In this case we should take care to only resume
3522 the vfork parent thread, the kernel will hold this thread suspended
3523 until the vfork child has exited or execd, at which point the parent
3524 will be resumed and a VFORK_DONE event sent to GDB. */
3525 if (tp
->inf
->thread_waiting_for_vfork_done
!= nullptr)
3527 if (target_is_non_stop_p ())
3529 /* For non-stop targets, regardless of whether GDB is using
3530 all-stop or non-stop mode, threads are controlled
3533 When a thread is handling a vfork, breakpoints are removed
3534 from the inferior (well, program space in fact), so it is
3535 critical that we don't try to resume any thread other than the
3537 if (tp
!= tp
->inf
->thread_waiting_for_vfork_done
)
3539 infrun_debug_printf ("[%s] thread %s of this inferior is "
3540 "waiting for vfork-done",
3541 tp
->ptid
.to_string ().c_str (),
3542 tp
->inf
->thread_waiting_for_vfork_done
3543 ->ptid
.to_string ().c_str ());
3549 /* For all-stop targets, when we attempt to resume the inferior,
3550 we will only resume the vfork parent thread, this is handled
3551 in internal_resume_ptid.
3553 Additionally, we will always be called with the vfork parent
3554 thread as the current thread (TP) thanks to follow_fork, as
3555 such the following assertion should hold.
3557 Beyond this there is nothing more that needs to be done
3559 gdb_assert (tp
== tp
->inf
->thread_waiting_for_vfork_done
);
3563 /* When handling a vfork GDB removes all breakpoints from the program
3564 space in which the vfork is being handled. If we are following the
3565 child then GDB will set vfork_child member of the vfork parent
3566 inferior. Once the child has either exited or execd then GDB will
3567 detach from the parent process. Until that point GDB should not
3568 resume any thread in the parent process. */
3569 if (tp
->inf
->vfork_child
!= nullptr)
3571 infrun_debug_printf ("[%s] thread is part of a vfork parent, child is %d",
3572 tp
->ptid
.to_string ().c_str (),
3573 tp
->inf
->vfork_child
->pid
);
3577 infrun_debug_printf ("resuming %s",
3578 tp
->ptid
.to_string ().c_str ());
3580 execution_control_state
ecs (tp
);
3581 switch_to_thread (tp
);
3582 keep_going_pass_signal (&ecs
);
3583 if (!ecs
.wait_some_more
)
3584 error (_("Command aborted."));
3587 /* Basic routine for continuing the program in various fashions.
3589 ADDR is the address to resume at, or -1 for resume where stopped.
3590 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3591 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3593 You should call clear_proceed_status before calling proceed. */
3596 proceed (CORE_ADDR addr
, enum gdb_signal siggnal
)
3598 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
3600 struct gdbarch
*gdbarch
;
3603 /* If we're stopped at a fork/vfork, switch to either the parent or child
3604 thread as defined by the "set follow-fork-mode" command, or, if both
3605 the parent and child are controlled by GDB, and schedule-multiple is
3606 on, follow the child. If none of the above apply then we just proceed
3607 resuming the current thread. */
3608 if (!follow_fork ())
3610 /* The target for some reason decided not to resume. */
3612 if (target_can_async_p ())
3613 inferior_event_handler (INF_EXEC_COMPLETE
);
3617 /* We'll update this if & when we switch to a new thread. */
3618 update_previous_thread ();
3620 thread_info
*cur_thr
= inferior_thread ();
3621 infrun_debug_printf ("cur_thr = %s", cur_thr
->ptid
.to_string ().c_str ());
3623 regcache
*regcache
= get_thread_regcache (cur_thr
);
3624 gdbarch
= regcache
->arch ();
3625 pc
= regcache_read_pc_protected (regcache
);
3627 /* Fill in with reasonable starting values. */
3628 init_thread_stepping_state (cur_thr
);
3630 gdb_assert (!thread_is_in_step_over_chain (cur_thr
));
3633 = user_visible_resume_ptid (cur_thr
->control
.stepping_command
);
3634 process_stratum_target
*resume_target
3635 = user_visible_resume_target (resume_ptid
);
3637 check_multi_target_resumption (resume_target
);
3639 if (addr
== (CORE_ADDR
) -1)
3641 const address_space
*aspace
= cur_thr
->inf
->aspace
.get ();
3643 if (cur_thr
->stop_pc_p ()
3644 && pc
== cur_thr
->stop_pc ()
3645 && breakpoint_here_p (aspace
, pc
) == ordinary_breakpoint_here
3646 && execution_direction
!= EXEC_REVERSE
)
3647 /* There is a breakpoint at the address we will resume at,
3648 step one instruction before inserting breakpoints so that
3649 we do not stop right away (and report a second hit at this
3652 Note, we don't do this in reverse, because we won't
3653 actually be executing the breakpoint insn anyway.
3654 We'll be (un-)executing the previous instruction. */
3655 cur_thr
->stepping_over_breakpoint
= 1;
3656 else if (gdbarch_single_step_through_delay_p (gdbarch
)
3657 && gdbarch_single_step_through_delay (gdbarch
,
3658 get_current_frame ()))
3659 /* We stepped onto an instruction that needs to be stepped
3660 again before re-inserting the breakpoint, do so. */
3661 cur_thr
->stepping_over_breakpoint
= 1;
3665 regcache_write_pc (regcache
, addr
);
3668 if (siggnal
!= GDB_SIGNAL_DEFAULT
)
3669 cur_thr
->set_stop_signal (siggnal
);
3671 /* If an exception is thrown from this point on, make sure to
3672 propagate GDB's knowledge of the executing state to the
3673 frontend/user running state. */
3674 scoped_finish_thread_state
finish_state (resume_target
, resume_ptid
);
3676 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3677 threads (e.g., we might need to set threads stepping over
3678 breakpoints first), from the user/frontend's point of view, all
3679 threads in RESUME_PTID are now running. Unless we're calling an
3680 inferior function, as in that case we pretend the inferior
3681 doesn't run at all. */
3682 if (!cur_thr
->control
.in_infcall
)
3683 set_running (resume_target
, resume_ptid
, true);
3685 infrun_debug_printf ("addr=%s, signal=%s, resume_ptid=%s",
3686 paddress (gdbarch
, addr
),
3687 gdb_signal_to_symbol_string (siggnal
),
3688 resume_ptid
.to_string ().c_str ());
3690 annotate_starting ();
3692 /* Make sure that output from GDB appears before output from the
3694 gdb_flush (gdb_stdout
);
3696 /* Since we've marked the inferior running, give it the terminal. A
3697 QUIT/Ctrl-C from here on is forwarded to the target (which can
3698 still detect attempts to unblock a stuck connection with repeated
3699 Ctrl-C from within target_pass_ctrlc). */
3700 target_terminal::inferior ();
3702 /* In a multi-threaded task we may select another thread and
3703 then continue or step.
3705 But if a thread that we're resuming had stopped at a breakpoint,
3706 it will immediately cause another breakpoint stop without any
3707 execution (i.e. it will report a breakpoint hit incorrectly). So
3708 we must step over it first.
3710 Look for threads other than the current (TP) that reported a
3711 breakpoint hit and haven't been resumed yet since. */
3713 /* If scheduler locking applies, we can avoid iterating over all
3715 if (!non_stop
&& !schedlock_applies (cur_thr
))
3717 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3720 switch_to_thread_no_regs (tp
);
3722 /* Ignore the current thread here. It's handled
3727 if (!thread_still_needs_step_over (tp
))
3730 gdb_assert (!thread_is_in_step_over_chain (tp
));
3732 infrun_debug_printf ("need to step-over [%s] first",
3733 tp
->ptid
.to_string ().c_str ());
3735 global_thread_step_over_chain_enqueue (tp
);
3738 switch_to_thread (cur_thr
);
3741 /* Enqueue the current thread last, so that we move all other
3742 threads over their breakpoints first. */
3743 if (cur_thr
->stepping_over_breakpoint
)
3744 global_thread_step_over_chain_enqueue (cur_thr
);
3746 /* If the thread isn't started, we'll still need to set its prev_pc,
3747 so that switch_back_to_stepped_thread knows the thread hasn't
3748 advanced. Must do this before resuming any thread, as in
3749 all-stop/remote, once we resume we can't send any other packet
3750 until the target stops again. */
3751 cur_thr
->prev_pc
= regcache_read_pc_protected (regcache
);
3754 scoped_disable_commit_resumed
disable_commit_resumed ("proceeding");
3755 bool step_over_started
= start_step_over ();
3757 if (step_over_info_valid_p ())
3759 /* Either this thread started a new in-line step over, or some
3760 other thread was already doing one. In either case, don't
3761 resume anything else until the step-over is finished. */
3763 else if (step_over_started
&& !target_is_non_stop_p ())
3765 /* A new displaced stepping sequence was started. In all-stop,
3766 we can't talk to the target anymore until it next stops. */
3768 else if (!non_stop
&& target_is_non_stop_p ())
3770 INFRUN_SCOPED_DEBUG_START_END
3771 ("resuming threads, all-stop-on-top-of-non-stop");
3773 /* In all-stop, but the target is always in non-stop mode.
3774 Start all other threads that are implicitly resumed too. */
3775 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3778 switch_to_thread_no_regs (tp
);
3779 proceed_resume_thread_checked (tp
);
3783 proceed_resume_thread_checked (cur_thr
);
3785 disable_commit_resumed
.reset_and_commit ();
3788 finish_state
.release ();
3790 /* If we've switched threads above, switch back to the previously
3791 current thread. We don't want the user to see a different
3793 switch_to_thread (cur_thr
);
3795 /* Tell the event loop to wait for it to stop. If the target
3796 supports asynchronous execution, it'll do this from within
3798 if (!target_can_async_p ())
3799 mark_async_event_handler (infrun_async_inferior_event_token
);
3803 /* Start remote-debugging of a machine over a serial link. */
3806 start_remote (int from_tty
)
3808 inferior
*inf
= current_inferior ();
3809 inf
->control
.stop_soon
= STOP_QUIETLY_REMOTE
;
3811 /* Always go on waiting for the target, regardless of the mode. */
3812 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3813 indicate to wait_for_inferior that a target should timeout if
3814 nothing is returned (instead of just blocking). Because of this,
3815 targets expecting an immediate response need to, internally, set
3816 things up so that the target_wait() is forced to eventually
3818 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3819 differentiate to its caller what the state of the target is after
3820 the initial open has been performed. Here we're assuming that
3821 the target has stopped. It should be possible to eventually have
3822 target_open() return to the caller an indication that the target
3823 is currently running and GDB state should be set to the same as
3824 for an async run. */
3825 wait_for_inferior (inf
);
3827 /* Now that the inferior has stopped, do any bookkeeping like
3828 loading shared libraries. We want to do this before normal_stop,
3829 so that the displayed frame is up to date. */
3830 post_create_inferior (from_tty
);
3835 /* Initialize static vars when a new inferior begins. */
3838 init_wait_for_inferior (void)
3840 /* These are meaningless until the first time through wait_for_inferior. */
3842 breakpoint_init_inferior (current_inferior (), inf_starting
);
3844 clear_proceed_status (0);
3846 nullify_last_target_wait_ptid ();
3848 update_previous_thread ();
3853 static void handle_inferior_event (struct execution_control_state
*ecs
);
3855 static void handle_step_into_function (struct gdbarch
*gdbarch
,
3856 struct execution_control_state
*ecs
);
3857 static void handle_step_into_function_backward (struct gdbarch
*gdbarch
,
3858 struct execution_control_state
*ecs
);
3859 static void handle_signal_stop (struct execution_control_state
*ecs
);
3860 static void check_exception_resume (struct execution_control_state
*,
3861 const frame_info_ptr
&);
3863 static void end_stepping_range (struct execution_control_state
*ecs
);
3864 static void stop_waiting (struct execution_control_state
*ecs
);
3865 static void keep_going (struct execution_control_state
*ecs
);
3866 static void process_event_stop_test (struct execution_control_state
*ecs
);
3867 static bool switch_back_to_stepped_thread (struct execution_control_state
*ecs
);
3869 /* This function is attached as a "thread_stop_requested" observer.
3870 Cleanup local state that assumed the PTID was to be resumed, and
3871 report the stop to the frontend. */
3874 infrun_thread_stop_requested (ptid_t ptid
)
3876 process_stratum_target
*curr_target
= current_inferior ()->process_target ();
3878 /* PTID was requested to stop. If the thread was already stopped,
3879 but the user/frontend doesn't know about that yet (e.g., the
3880 thread had been temporarily paused for some step-over), set up
3881 for reporting the stop now. */
3882 for (thread_info
*tp
: all_threads (curr_target
, ptid
))
3884 if (tp
->state
!= THREAD_RUNNING
)
3886 if (tp
->executing ())
3889 /* Remove matching threads from the step-over queue, so
3890 start_step_over doesn't try to resume them
3892 if (thread_is_in_step_over_chain (tp
))
3893 global_thread_step_over_chain_remove (tp
);
3895 /* If the thread is stopped, but the user/frontend doesn't
3896 know about that yet, queue a pending event, as if the
3897 thread had just stopped now. Unless the thread already had
3899 if (!tp
->has_pending_waitstatus ())
3901 target_waitstatus ws
;
3902 ws
.set_stopped (GDB_SIGNAL_0
);
3903 tp
->set_pending_waitstatus (ws
);
3906 /* Clear the inline-frame state, since we're re-processing the
3908 clear_inline_frame_state (tp
);
3910 /* If this thread was paused because some other thread was
3911 doing an inline-step over, let that finish first. Once
3912 that happens, we'll restart all threads and consume pending
3913 stop events then. */
3914 if (step_over_info_valid_p ())
3917 /* Otherwise we can process the (new) pending event now. Set
3918 it so this pending event is considered by
3920 tp
->set_resumed (true);
3924 /* Delete the step resume, single-step and longjmp/exception resume
3925 breakpoints of TP. */
3928 delete_thread_infrun_breakpoints (struct thread_info
*tp
)
3930 delete_step_resume_breakpoint (tp
);
3931 delete_exception_resume_breakpoint (tp
);
3932 delete_single_step_breakpoints (tp
);
3935 /* If the target still has execution, call FUNC for each thread that
3936 just stopped. In all-stop, that's all the non-exited threads; in
3937 non-stop, that's the current thread, only. */
3939 typedef void (*for_each_just_stopped_thread_callback_func
)
3940 (struct thread_info
*tp
);
3943 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func
)
3945 if (!target_has_execution () || inferior_ptid
== null_ptid
)
3948 if (target_is_non_stop_p ())
3950 /* If in non-stop mode, only the current thread stopped. */
3951 func (inferior_thread ());
3955 /* In all-stop mode, all threads have stopped. */
3956 for (thread_info
*tp
: all_non_exited_threads ())
3961 /* Delete the step resume and longjmp/exception resume breakpoints of
3962 the threads that just stopped. */
3965 delete_just_stopped_threads_infrun_breakpoints (void)
3967 for_each_just_stopped_thread (delete_thread_infrun_breakpoints
);
3970 /* Delete the single-step breakpoints of the threads that just
3974 delete_just_stopped_threads_single_step_breakpoints (void)
3976 for_each_just_stopped_thread (delete_single_step_breakpoints
);
3982 print_target_wait_results (ptid_t waiton_ptid
, ptid_t result_ptid
,
3983 const struct target_waitstatus
&ws
)
3985 infrun_debug_printf ("target_wait (%s [%s], status) =",
3986 waiton_ptid
.to_string ().c_str (),
3987 target_pid_to_str (waiton_ptid
).c_str ());
3988 infrun_debug_printf (" %s [%s],",
3989 result_ptid
.to_string ().c_str (),
3990 target_pid_to_str (result_ptid
).c_str ());
3991 infrun_debug_printf (" %s", ws
.to_string ().c_str ());
3994 /* Select a thread at random, out of those which are resumed and have
3997 static struct thread_info
*
3998 random_pending_event_thread (inferior
*inf
, ptid_t waiton_ptid
)
4000 process_stratum_target
*proc_target
= inf
->process_target ();
4002 = proc_target
->random_resumed_with_pending_wait_status (inf
, waiton_ptid
);
4004 if (thread
== nullptr)
4006 infrun_debug_printf ("None found.");
4010 infrun_debug_printf ("Found %s.", thread
->ptid
.to_string ().c_str ());
4011 gdb_assert (thread
->resumed ());
4012 gdb_assert (thread
->has_pending_waitstatus ());
4017 /* Wrapper for target_wait that first checks whether threads have
4018 pending statuses to report before actually asking the target for
4019 more events. INF is the inferior we're using to call target_wait
4023 do_target_wait_1 (inferior
*inf
, ptid_t ptid
,
4024 target_waitstatus
*status
, target_wait_flags options
)
4026 struct thread_info
*tp
;
4028 /* We know that we are looking for an event in the target of inferior
4029 INF, but we don't know which thread the event might come from. As
4030 such we want to make sure that INFERIOR_PTID is reset so that none of
4031 the wait code relies on it - doing so is always a mistake. */
4032 switch_to_inferior_no_thread (inf
);
4034 /* First check if there is a resumed thread with a wait status
4036 if (ptid
== minus_one_ptid
|| ptid
.is_pid ())
4038 tp
= random_pending_event_thread (inf
, ptid
);
4042 infrun_debug_printf ("Waiting for specific thread %s.",
4043 ptid
.to_string ().c_str ());
4045 /* We have a specific thread to check. */
4046 tp
= inf
->find_thread (ptid
);
4047 gdb_assert (tp
!= nullptr);
4048 if (!tp
->has_pending_waitstatus ())
4053 && (tp
->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4054 || tp
->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT
))
4056 struct regcache
*regcache
= get_thread_regcache (tp
);
4057 struct gdbarch
*gdbarch
= regcache
->arch ();
4061 pc
= regcache_read_pc (regcache
);
4063 if (pc
!= tp
->stop_pc ())
4065 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
4066 tp
->ptid
.to_string ().c_str (),
4067 paddress (gdbarch
, tp
->stop_pc ()),
4068 paddress (gdbarch
, pc
));
4071 else if (!breakpoint_inserted_here_p (tp
->inf
->aspace
.get (), pc
))
4073 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
4074 tp
->ptid
.to_string ().c_str (),
4075 paddress (gdbarch
, pc
));
4082 infrun_debug_printf ("pending event of %s cancelled.",
4083 tp
->ptid
.to_string ().c_str ());
4085 tp
->clear_pending_waitstatus ();
4086 target_waitstatus ws
;
4088 tp
->set_pending_waitstatus (ws
);
4089 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
4095 infrun_debug_printf ("Using pending wait status %s for %s.",
4096 tp
->pending_waitstatus ().to_string ().c_str (),
4097 tp
->ptid
.to_string ().c_str ());
4099 /* Now that we've selected our final event LWP, un-adjust its PC
4100 if it was a software breakpoint (and the target doesn't
4101 always adjust the PC itself). */
4102 if (tp
->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4103 && !target_supports_stopped_by_sw_breakpoint ())
4105 struct regcache
*regcache
;
4106 struct gdbarch
*gdbarch
;
4109 regcache
= get_thread_regcache (tp
);
4110 gdbarch
= regcache
->arch ();
4112 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
4117 pc
= regcache_read_pc (regcache
);
4118 regcache_write_pc (regcache
, pc
+ decr_pc
);
4122 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
4123 *status
= tp
->pending_waitstatus ();
4124 tp
->clear_pending_waitstatus ();
4126 /* Wake up the event loop again, until all pending events are
4128 if (target_is_async_p ())
4129 mark_async_event_handler (infrun_async_inferior_event_token
);
4133 /* But if we don't find one, we'll have to wait. */
4135 /* We can't ask a non-async target to do a non-blocking wait, so this will be
4137 if (!target_can_async_p ())
4138 options
&= ~TARGET_WNOHANG
;
4140 return target_wait (ptid
, status
, options
);
4143 /* Wrapper for target_wait that first checks whether threads have
4144 pending statuses to report before actually asking the target for
4145 more events. Polls for events from all inferiors/targets. */
4148 do_target_wait (ptid_t wait_ptid
, execution_control_state
*ecs
,
4149 target_wait_flags options
)
4151 int num_inferiors
= 0;
4152 int random_selector
;
4154 /* For fairness, we pick the first inferior/target to poll at random
4155 out of all inferiors that may report events, and then continue
4156 polling the rest of the inferior list starting from that one in a
4157 circular fashion until the whole list is polled once. */
4159 ptid_t wait_ptid_pid
{wait_ptid
.pid ()};
4160 auto inferior_matches
= [&wait_ptid_pid
] (inferior
*inf
)
4162 return (inf
->process_target () != nullptr
4163 && ptid_t (inf
->pid
).matches (wait_ptid_pid
));
4166 /* First see how many matching inferiors we have. */
4167 for (inferior
*inf
: all_inferiors ())
4168 if (inferior_matches (inf
))
4171 if (num_inferiors
== 0)
4173 ecs
->ws
.set_ignore ();
4177 /* Now randomly pick an inferior out of those that matched. */
4178 random_selector
= (int)
4179 ((num_inferiors
* (double) rand ()) / (RAND_MAX
+ 1.0));
4181 if (num_inferiors
> 1)
4182 infrun_debug_printf ("Found %d inferiors, starting at #%d",
4183 num_inferiors
, random_selector
);
4185 /* Select the Nth inferior that matched. */
4187 inferior
*selected
= nullptr;
4189 for (inferior
*inf
: all_inferiors ())
4190 if (inferior_matches (inf
))
4191 if (random_selector
-- == 0)
4197 /* Now poll for events out of each of the matching inferior's
4198 targets, starting from the selected one. */
4200 auto do_wait
= [&] (inferior
*inf
)
4202 ecs
->ptid
= do_target_wait_1 (inf
, wait_ptid
, &ecs
->ws
, options
);
4203 ecs
->target
= inf
->process_target ();
4204 return (ecs
->ws
.kind () != TARGET_WAITKIND_IGNORE
);
4207 /* Needed in 'all-stop + target-non-stop' mode, because we end up
4208 here spuriously after the target is all stopped and we've already
4209 reported the stop to the user, polling for events. */
4210 scoped_restore_current_thread restore_thread
;
4212 intrusive_list_iterator
<inferior
> start
4213 = inferior_list
.iterator_to (*selected
);
4215 for (intrusive_list_iterator
<inferior
> it
= start
;
4216 it
!= inferior_list
.end ();
4219 inferior
*inf
= &*it
;
4221 if (inferior_matches (inf
) && do_wait (inf
))
4225 for (intrusive_list_iterator
<inferior
> it
= inferior_list
.begin ();
4229 inferior
*inf
= &*it
;
4231 if (inferior_matches (inf
) && do_wait (inf
))
4235 ecs
->ws
.set_ignore ();
4239 /* An event reported by wait_one. */
4241 struct wait_one_event
4243 /* The target the event came out of. */
4244 process_stratum_target
*target
;
4246 /* The PTID the event was for. */
4249 /* The waitstatus. */
4250 target_waitstatus ws
;
4253 static bool handle_one (const wait_one_event
&event
);
4254 static int finish_step_over (struct execution_control_state
*ecs
);
4256 /* Prepare and stabilize the inferior for detaching it. E.g.,
4257 detaching while a thread is displaced stepping is a recipe for
4258 crashing it, as nothing would readjust the PC out of the scratch
4262 prepare_for_detach (void)
4264 struct inferior
*inf
= current_inferior ();
4265 ptid_t pid_ptid
= ptid_t (inf
->pid
);
4266 scoped_restore_current_thread restore_thread
;
4268 scoped_restore restore_detaching
= make_scoped_restore (&inf
->detaching
, true);
4270 /* Remove all threads of INF from the global step-over chain. We
4271 want to stop any ongoing step-over, not start any new one. */
4272 thread_step_over_list_safe_range range
4273 = make_thread_step_over_list_safe_range (global_thread_step_over_list
);
4275 for (thread_info
*tp
: range
)
4278 infrun_debug_printf ("removing thread %s from global step over chain",
4279 tp
->ptid
.to_string ().c_str ());
4280 global_thread_step_over_chain_remove (tp
);
4283 /* If we were already in the middle of an inline step-over, and the
4284 thread stepping belongs to the inferior we're detaching, we need
4285 to restart the threads of other inferiors. */
4286 if (step_over_info
.thread
!= -1)
4288 infrun_debug_printf ("inline step-over in-process while detaching");
4290 thread_info
*thr
= find_thread_global_id (step_over_info
.thread
);
4291 if (thr
->inf
== inf
)
4293 /* Since we removed threads of INF from the step-over chain,
4294 we know this won't start a step-over for INF. */
4295 clear_step_over_info ();
4297 if (target_is_non_stop_p ())
4299 /* Start a new step-over in another thread if there's
4300 one that needs it. */
4303 /* Restart all other threads (except the
4304 previously-stepping thread, since that one is still
4306 if (!step_over_info_valid_p ())
4307 restart_threads (thr
);
4312 if (displaced_step_in_progress (inf
))
4314 infrun_debug_printf ("displaced-stepping in-process while detaching");
4316 /* Stop threads currently displaced stepping, aborting it. */
4318 for (thread_info
*thr
: inf
->non_exited_threads ())
4320 if (thr
->displaced_step_state
.in_progress ())
4322 if (thr
->executing ())
4324 if (!thr
->stop_requested
)
4326 target_stop (thr
->ptid
);
4327 thr
->stop_requested
= true;
4331 thr
->set_resumed (false);
4335 while (displaced_step_in_progress (inf
))
4337 wait_one_event event
;
4339 event
.target
= inf
->process_target ();
4340 event
.ptid
= do_target_wait_1 (inf
, pid_ptid
, &event
.ws
, 0);
4343 print_target_wait_results (pid_ptid
, event
.ptid
, event
.ws
);
4348 /* It's OK to leave some of the threads of INF stopped, since
4349 they'll be detached shortly. */
4353 /* If all-stop, but there exists a non-stop target, stop all threads
4354 now that we're presenting the stop to the user. */
4357 stop_all_threads_if_all_stop_mode ()
4359 if (!non_stop
&& exists_non_stop_target ())
4360 stop_all_threads ("presenting stop to user in all-stop");
4363 /* Wait for control to return from inferior to debugger.
4365 If inferior gets a signal, we may decide to start it up again
4366 instead of returning. That is why there is a loop in this function.
4367 When this function actually returns it means the inferior
4368 should be left stopped and GDB should read more commands. */
4371 wait_for_inferior (inferior
*inf
)
4373 infrun_debug_printf ("wait_for_inferior ()");
4375 SCOPE_EXIT
{ delete_just_stopped_threads_infrun_breakpoints (); };
4377 /* If an error happens while handling the event, propagate GDB's
4378 knowledge of the executing state to the frontend/user running
4380 scoped_finish_thread_state finish_state
4381 (inf
->process_target (), minus_one_ptid
);
4385 execution_control_state ecs
;
4387 overlay_cache_invalid
= 1;
4389 /* Flush target cache before starting to handle each event.
4390 Target was running and cache could be stale. This is just a
4391 heuristic. Running threads may modify target memory, but we
4392 don't get any event. */
4393 target_dcache_invalidate (current_program_space
->aspace
);
4395 ecs
.ptid
= do_target_wait_1 (inf
, minus_one_ptid
, &ecs
.ws
, 0);
4396 ecs
.target
= inf
->process_target ();
4399 print_target_wait_results (minus_one_ptid
, ecs
.ptid
, ecs
.ws
);
4401 /* Now figure out what to do with the result of the result. */
4402 handle_inferior_event (&ecs
);
4404 if (!ecs
.wait_some_more
)
4408 stop_all_threads_if_all_stop_mode ();
4410 /* No error, don't finish the state yet. */
4411 finish_state
.release ();
4414 /* Cleanup that reinstalls the readline callback handler, if the
4415 target is running in the background. If while handling the target
4416 event something triggered a secondary prompt, like e.g., a
4417 pagination prompt, we'll have removed the callback handler (see
4418 gdb_readline_wrapper_line). Need to do this as we go back to the
4419 event loop, ready to process further input. Note this has no
4420 effect if the handler hasn't actually been removed, because calling
4421 rl_callback_handler_install resets the line buffer, thus losing
4425 reinstall_readline_callback_handler_cleanup ()
4427 struct ui
*ui
= current_ui
;
4431 /* We're not going back to the top level event loop yet. Don't
4432 install the readline callback, as it'd prep the terminal,
4433 readline-style (raw, noecho) (e.g., --batch). We'll install
4434 it the next time the prompt is displayed, when we're ready
4439 if (ui
->command_editing
&& ui
->prompt_state
!= PROMPT_BLOCKED
)
4440 gdb_rl_callback_handler_reinstall ();
4443 /* Clean up the FSMs of threads that are now stopped. In non-stop,
4444 that's just the event thread. In all-stop, that's all threads. In
4445 all-stop, threads that had a pending exit no longer have a reason
4446 to be around, as their FSMs/commands are canceled, so we delete
4447 them. This avoids "info threads" listing such threads as if they
4448 were alive (and failing to read their registers), the user being
4449 able to select and resume them (and that failing), etc. */
4452 clean_up_just_stopped_threads_fsms (struct execution_control_state
*ecs
)
4454 /* The first clean_up call below assumes the event thread is the current
4456 if (ecs
->event_thread
!= nullptr)
4457 gdb_assert (ecs
->event_thread
== inferior_thread ());
4459 if (ecs
->event_thread
!= nullptr
4460 && ecs
->event_thread
->thread_fsm () != nullptr)
4461 ecs
->event_thread
->thread_fsm ()->clean_up (ecs
->event_thread
);
4465 scoped_restore_current_thread restore_thread
;
4467 for (thread_info
*thr
: all_threads_safe ())
4469 if (thr
->state
== THREAD_EXITED
)
4472 if (thr
== ecs
->event_thread
)
4475 if (thr
->thread_fsm () != nullptr)
4477 switch_to_thread (thr
);
4478 thr
->thread_fsm ()->clean_up (thr
);
4481 /* As we are cancelling the command/FSM of this thread,
4482 whatever was the reason we needed to report a thread
4483 exited event to the user, that reason is gone. Delete
4484 the thread, so that the user doesn't see it in the thread
4485 list, the next proceed doesn't try to resume it, etc. */
4486 if (thr
->has_pending_waitstatus ()
4487 && (thr
->pending_waitstatus ().kind ()
4488 == TARGET_WAITKIND_THREAD_EXITED
))
4489 delete_thread (thr
);
4494 /* Helper for all_uis_check_sync_execution_done that works on the
4498 check_curr_ui_sync_execution_done (void)
4500 struct ui
*ui
= current_ui
;
4502 if (ui
->prompt_state
== PROMPT_NEEDED
4504 && !gdb_in_secondary_prompt_p (ui
))
4506 target_terminal::ours ();
4507 top_level_interpreter ()->on_sync_execution_done ();
4508 ui
->register_file_handler ();
4515 all_uis_check_sync_execution_done (void)
4517 SWITCH_THRU_ALL_UIS ()
4519 check_curr_ui_sync_execution_done ();
4526 all_uis_on_sync_execution_starting (void)
4528 SWITCH_THRU_ALL_UIS ()
4530 if (current_ui
->prompt_state
== PROMPT_NEEDED
)
4531 async_disable_stdin ();
4535 /* A quit_handler callback installed while we're handling inferior
4539 infrun_quit_handler ()
4541 if (target_terminal::is_ours ())
4545 default_quit_handler would throw a quit in this case, but if
4546 we're handling an event while we have the terminal, it means
4547 the target is running a background execution command, and
4548 thus when users press Ctrl-C, they're wanting to interrupt
4549 whatever command they were executing in the command line.
4553 (gdb) foo bar whatever<ctrl-c>
4555 That Ctrl-C should clear the input line, not interrupt event
4556 handling if it happens that the user types Ctrl-C at just the
4559 It's as-if background event handling was handled by a
4560 separate background thread.
4562 To be clear, the Ctrl-C is not lost -- it will be processed
4563 by the next QUIT call once we're out of fetch_inferior_event
4568 if (check_quit_flag ())
4569 target_pass_ctrlc ();
4573 /* Asynchronous version of wait_for_inferior. It is called by the
4574 event loop whenever a change of state is detected on the file
4575 descriptor corresponding to the target. It can be called more than
4576 once to complete a single execution command. In such cases we need
4577 to keep the state in a global variable ECSS. If it is the last time
4578 that this function is called for a single execution command, then
4579 report to the user that the inferior has stopped, and do the
4580 necessary cleanups. */
4583 fetch_inferior_event ()
4585 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
4587 execution_control_state ecs
;
4590 /* Events are always processed with the main UI as current UI. This
4591 way, warnings, debug output, etc. are always consistently sent to
4592 the main console. */
4593 scoped_restore save_ui
= make_scoped_restore (¤t_ui
, main_ui
);
4595 /* Temporarily disable pagination. Otherwise, the user would be
4596 given an option to press 'q' to quit, which would cause an early
4597 exit and could leave GDB in a half-baked state. */
4598 scoped_restore save_pagination
4599 = make_scoped_restore (&pagination_enabled
, false);
4601 /* Install a quit handler that does nothing if we have the terminal
4602 (meaning the target is running a background execution command),
4603 so that Ctrl-C never interrupts GDB before the event is fully
4605 scoped_restore restore_quit_handler
4606 = make_scoped_restore (&quit_handler
, infrun_quit_handler
);
4608 /* Make sure a SIGINT does not interrupt an extension language while
4609 we're handling an event. That could interrupt a Python unwinder
4610 or a Python observer or some such. A Ctrl-C should either be
4611 forwarded to the inferior if the inferior has the terminal, or,
4612 if GDB has the terminal, should interrupt the command the user is
4613 typing in the CLI. */
4614 scoped_disable_cooperative_sigint_handling restore_coop_sigint
;
4616 /* End up with readline processing input, if necessary. */
4618 SCOPE_EXIT
{ reinstall_readline_callback_handler_cleanup (); };
4620 /* We're handling a live event, so make sure we're doing live
4621 debugging. If we're looking at traceframes while the target is
4622 running, we're going to need to get back to that mode after
4623 handling the event. */
4624 std::optional
<scoped_restore_current_traceframe
> maybe_restore_traceframe
;
4627 maybe_restore_traceframe
.emplace ();
4628 set_current_traceframe (-1);
4631 /* The user/frontend should not notice a thread switch due to
4632 internal events. Make sure we revert to the user selected
4633 thread and frame after handling the event and running any
4634 breakpoint commands. */
4635 scoped_restore_current_thread restore_thread
;
4637 overlay_cache_invalid
= 1;
4638 /* Flush target cache before starting to handle each event. Target
4639 was running and cache could be stale. This is just a heuristic.
4640 Running threads may modify target memory, but we don't get any
4642 target_dcache_invalidate (current_program_space
->aspace
);
4644 scoped_restore save_exec_dir
4645 = make_scoped_restore (&execution_direction
,
4646 target_execution_direction ());
4648 /* Allow targets to pause their resumed threads while we handle
4650 scoped_disable_commit_resumed
disable_commit_resumed ("handling event");
4652 /* Is the current thread performing an inferior function call as part
4653 of a breakpoint condition evaluation? */
4654 bool in_cond_eval
= (inferior_ptid
!= null_ptid
4655 && inferior_thread ()->control
.in_cond_eval
);
4657 /* If the thread is in the middle of the condition evaluation, wait for
4658 an event from the current thread. Otherwise, wait for an event from
4660 ptid_t waiton_ptid
= in_cond_eval
? inferior_ptid
: minus_one_ptid
;
4662 if (!do_target_wait (waiton_ptid
, &ecs
, TARGET_WNOHANG
))
4664 infrun_debug_printf ("do_target_wait returned no event");
4665 disable_commit_resumed
.reset_and_commit ();
4669 gdb_assert (ecs
.ws
.kind () != TARGET_WAITKIND_IGNORE
);
4671 /* Switch to the inferior that generated the event, so we can do
4672 target calls. If the event was not associated to a ptid, */
4673 if (ecs
.ptid
!= null_ptid
4674 && ecs
.ptid
!= minus_one_ptid
)
4675 switch_to_inferior_no_thread (find_inferior_ptid (ecs
.target
, ecs
.ptid
));
4677 switch_to_target_no_thread (ecs
.target
);
4680 print_target_wait_results (minus_one_ptid
, ecs
.ptid
, ecs
.ws
);
4682 /* If an error happens while handling the event, propagate GDB's
4683 knowledge of the executing state to the frontend/user running
4685 ptid_t finish_ptid
= !target_is_non_stop_p () ? minus_one_ptid
: ecs
.ptid
;
4686 scoped_finish_thread_state
finish_state (ecs
.target
, finish_ptid
);
4688 /* Get executed before scoped_restore_current_thread above to apply
4689 still for the thread which has thrown the exception. */
4690 auto defer_bpstat_clear
4691 = make_scope_exit (bpstat_clear_actions
);
4692 auto defer_delete_threads
4693 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints
);
4695 int stop_id
= get_stop_id ();
4697 /* Now figure out what to do with the result of the result. */
4698 handle_inferior_event (&ecs
);
4700 if (!ecs
.wait_some_more
)
4702 struct inferior
*inf
= find_inferior_ptid (ecs
.target
, ecs
.ptid
);
4703 bool should_stop
= true;
4704 struct thread_info
*thr
= ecs
.event_thread
;
4706 delete_just_stopped_threads_infrun_breakpoints ();
4708 if (thr
!= nullptr && thr
->thread_fsm () != nullptr)
4709 should_stop
= thr
->thread_fsm ()->should_stop (thr
);
4717 bool should_notify_stop
= true;
4718 bool proceeded
= false;
4720 /* If the thread that stopped just completed an inferior
4721 function call as part of a condition evaluation, then we
4722 don't want to stop all the other threads. */
4723 if (ecs
.event_thread
== nullptr
4724 || !ecs
.event_thread
->control
.in_cond_eval
)
4725 stop_all_threads_if_all_stop_mode ();
4727 clean_up_just_stopped_threads_fsms (&ecs
);
4729 if (stop_id
!= get_stop_id ())
4731 /* If the stop-id has changed then a stop has already been
4732 presented to the user in handle_inferior_event, this is
4733 likely a failed inferior call. As the stop has already
4734 been announced then we should not notify again.
4736 Also, if the prompt state is not PROMPT_NEEDED then GDB
4737 will not be ready for user input after this function. */
4738 should_notify_stop
= false;
4739 gdb_assert (current_ui
->prompt_state
== PROMPT_NEEDED
);
4741 else if (thr
!= nullptr && thr
->thread_fsm () != nullptr)
4743 = thr
->thread_fsm ()->should_notify_stop ();
4745 if (should_notify_stop
)
4747 /* We may not find an inferior if this was a process exit. */
4748 if (inf
== nullptr || inf
->control
.stop_soon
== NO_STOP_QUIETLY
)
4749 proceeded
= normal_stop ();
4752 if (!proceeded
&& !in_cond_eval
)
4754 inferior_event_handler (INF_EXEC_COMPLETE
);
4758 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4759 previously selected thread is gone. We have two
4760 choices - switch to no thread selected, or restore the
4761 previously selected thread (now exited). We chose the
4762 later, just because that's what GDB used to do. After
4763 this, "info threads" says "The current thread <Thread
4764 ID 2> has terminated." instead of "No thread
4768 && ecs
.ws
.kind () != TARGET_WAITKIND_NO_RESUMED
)
4769 restore_thread
.dont_restore ();
4773 defer_delete_threads
.release ();
4774 defer_bpstat_clear
.release ();
4776 /* No error, don't finish the thread states yet. */
4777 finish_state
.release ();
4779 disable_commit_resumed
.reset_and_commit ();
4781 /* This scope is used to ensure that readline callbacks are
4782 reinstalled here. */
4785 /* Handling this event might have caused some inferiors to become prunable.
4786 For example, the exit of an inferior that was automatically added. Try
4787 to get rid of them. Keeping those around slows down things linearly.
4789 Note that this never removes the current inferior. Therefore, call this
4790 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4791 temporarily made the current inferior) is meant to be deleted.
4793 Call this before all_uis_check_sync_execution_done, so that notifications about
4794 removed inferiors appear before the prompt. */
4797 /* If a UI was in sync execution mode, and now isn't, restore its
4798 prompt (a synchronous execution command has finished, and we're
4799 ready for input). */
4800 all_uis_check_sync_execution_done ();
4803 && exec_done_display_p
4804 && (inferior_ptid
== null_ptid
4805 || inferior_thread ()->state
!= THREAD_RUNNING
))
4806 gdb_printf (_("completed.\n"));
4812 set_step_info (thread_info
*tp
, const frame_info_ptr
&frame
,
4813 struct symtab_and_line sal
)
4815 /* This can be removed once this function no longer implicitly relies on the
4816 inferior_ptid value. */
4817 gdb_assert (inferior_ptid
== tp
->ptid
);
4819 tp
->control
.step_frame_id
= get_frame_id (frame
);
4820 tp
->control
.step_stack_frame_id
= get_stack_frame_id (frame
);
4822 tp
->current_symtab
= sal
.symtab
;
4823 tp
->current_line
= sal
.line
;
4826 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4827 tp
->current_symtab
!= nullptr ? tp
->current_symtab
->filename
: "<null>",
4829 tp
->control
.step_frame_id
.to_string ().c_str (),
4830 tp
->control
.step_stack_frame_id
.to_string ().c_str ());
4833 /* Clear context switchable stepping state. */
4836 init_thread_stepping_state (struct thread_info
*tss
)
4838 tss
->stepped_breakpoint
= 0;
4839 tss
->stepping_over_breakpoint
= 0;
4840 tss
->stepping_over_watchpoint
= 0;
4841 tss
->step_after_step_resume_breakpoint
= 0;
4847 set_last_target_status (process_stratum_target
*target
, ptid_t ptid
,
4848 const target_waitstatus
&status
)
4850 target_last_proc_target
= target
;
4851 target_last_wait_ptid
= ptid
;
4852 target_last_waitstatus
= status
;
4858 get_last_target_status (process_stratum_target
**target
, ptid_t
*ptid
,
4859 target_waitstatus
*status
)
4861 if (target
!= nullptr)
4862 *target
= target_last_proc_target
;
4863 if (ptid
!= nullptr)
4864 *ptid
= target_last_wait_ptid
;
4865 if (status
!= nullptr)
4866 *status
= target_last_waitstatus
;
4872 nullify_last_target_wait_ptid (void)
4874 target_last_proc_target
= nullptr;
4875 target_last_wait_ptid
= minus_one_ptid
;
4876 target_last_waitstatus
= {};
4879 /* Switch thread contexts. */
4882 context_switch (execution_control_state
*ecs
)
4884 if (ecs
->ptid
!= inferior_ptid
4885 && (inferior_ptid
== null_ptid
4886 || ecs
->event_thread
!= inferior_thread ()))
4888 infrun_debug_printf ("Switching context from %s to %s",
4889 inferior_ptid
.to_string ().c_str (),
4890 ecs
->ptid
.to_string ().c_str ());
4893 switch_to_thread (ecs
->event_thread
);
4896 /* If the target can't tell whether we've hit breakpoints
4897 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4898 check whether that could have been caused by a breakpoint. If so,
4899 adjust the PC, per gdbarch_decr_pc_after_break. */
4902 adjust_pc_after_break (struct thread_info
*thread
,
4903 const target_waitstatus
&ws
)
4905 struct regcache
*regcache
;
4906 struct gdbarch
*gdbarch
;
4907 CORE_ADDR breakpoint_pc
, decr_pc
;
4909 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4910 we aren't, just return.
4912 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4913 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4914 implemented by software breakpoints should be handled through the normal
4917 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4918 different signals (SIGILL or SIGEMT for instance), but it is less
4919 clear where the PC is pointing afterwards. It may not match
4920 gdbarch_decr_pc_after_break. I don't know any specific target that
4921 generates these signals at breakpoints (the code has been in GDB since at
4922 least 1992) so I can not guess how to handle them here.
4924 In earlier versions of GDB, a target with
4925 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4926 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4927 target with both of these set in GDB history, and it seems unlikely to be
4928 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4930 if (ws
.kind () != TARGET_WAITKIND_STOPPED
)
4933 if (ws
.sig () != GDB_SIGNAL_TRAP
)
4936 /* In reverse execution, when a breakpoint is hit, the instruction
4937 under it has already been de-executed. The reported PC always
4938 points at the breakpoint address, so adjusting it further would
4939 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4942 B1 0x08000000 : INSN1
4943 B2 0x08000001 : INSN2
4945 PC -> 0x08000003 : INSN4
4947 Say you're stopped at 0x08000003 as above. Reverse continuing
4948 from that point should hit B2 as below. Reading the PC when the
4949 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4950 been de-executed already.
4952 B1 0x08000000 : INSN1
4953 B2 PC -> 0x08000001 : INSN2
4957 We can't apply the same logic as for forward execution, because
4958 we would wrongly adjust the PC to 0x08000000, since there's a
4959 breakpoint at PC - 1. We'd then report a hit on B1, although
4960 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4962 if (execution_direction
== EXEC_REVERSE
)
4965 /* If the target can tell whether the thread hit a SW breakpoint,
4966 trust it. Targets that can tell also adjust the PC
4968 if (target_supports_stopped_by_sw_breakpoint ())
4971 /* Note that relying on whether a breakpoint is planted in memory to
4972 determine this can fail. E.g,. the breakpoint could have been
4973 removed since. Or the thread could have been told to step an
4974 instruction the size of a breakpoint instruction, and only
4975 _after_ was a breakpoint inserted at its address. */
4977 /* If this target does not decrement the PC after breakpoints, then
4978 we have nothing to do. */
4979 regcache
= get_thread_regcache (thread
);
4980 gdbarch
= regcache
->arch ();
4982 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
4986 const address_space
*aspace
= thread
->inf
->aspace
.get ();
4988 /* Find the location where (if we've hit a breakpoint) the
4989 breakpoint would be. */
4990 breakpoint_pc
= regcache_read_pc (regcache
) - decr_pc
;
4992 /* If the target can't tell whether a software breakpoint triggered,
4993 fallback to figuring it out based on breakpoints we think were
4994 inserted in the target, and on whether the thread was stepped or
4997 /* Check whether there actually is a software breakpoint inserted at
5000 If in non-stop mode, a race condition is possible where we've
5001 removed a breakpoint, but stop events for that breakpoint were
5002 already queued and arrive later. To suppress those spurious
5003 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
5004 and retire them after a number of stop events are reported. Note
5005 this is an heuristic and can thus get confused. The real fix is
5006 to get the "stopped by SW BP and needs adjustment" info out of
5007 the target/kernel (and thus never reach here; see above). */
5008 if (software_breakpoint_inserted_here_p (aspace
, breakpoint_pc
)
5009 || (target_is_non_stop_p ()
5010 && moribund_breakpoint_here_p (aspace
, breakpoint_pc
)))
5012 std::optional
<scoped_restore_tmpl
<int>> restore_operation_disable
;
5014 if (record_full_is_used ())
5015 restore_operation_disable
.emplace
5016 (record_full_gdb_operation_disable_set ());
5018 /* When using hardware single-step, a SIGTRAP is reported for both
5019 a completed single-step and a software breakpoint. Need to
5020 differentiate between the two, as the latter needs adjusting
5021 but the former does not.
5023 The SIGTRAP can be due to a completed hardware single-step only if
5024 - we didn't insert software single-step breakpoints
5025 - this thread is currently being stepped
5027 If any of these events did not occur, we must have stopped due
5028 to hitting a software breakpoint, and have to back up to the
5031 As a special case, we could have hardware single-stepped a
5032 software breakpoint. In this case (prev_pc == breakpoint_pc),
5033 we also need to back up to the breakpoint address. */
5035 if (thread_has_single_step_breakpoints_set (thread
)
5036 || !currently_stepping (thread
)
5037 || (thread
->stepped_breakpoint
5038 && thread
->prev_pc
== breakpoint_pc
))
5039 regcache_write_pc (regcache
, breakpoint_pc
);
5044 stepped_in_from (const frame_info_ptr
&initial_frame
, frame_id step_frame_id
)
5046 frame_info_ptr frame
= initial_frame
;
5048 for (frame
= get_prev_frame (frame
);
5050 frame
= get_prev_frame (frame
))
5052 if (get_frame_id (frame
) == step_frame_id
)
5055 if (get_frame_type (frame
) != INLINE_FRAME
)
5062 /* Look for an inline frame that is marked for skip.
5063 If PREV_FRAME is TRUE start at the previous frame,
5064 otherwise start at the current frame. Stop at the
5065 first non-inline frame, or at the frame where the
5069 inline_frame_is_marked_for_skip (bool prev_frame
, struct thread_info
*tp
)
5071 frame_info_ptr frame
= get_current_frame ();
5074 frame
= get_prev_frame (frame
);
5076 for (; frame
!= nullptr; frame
= get_prev_frame (frame
))
5078 const char *fn
= nullptr;
5079 symtab_and_line sal
;
5082 if (get_frame_id (frame
) == tp
->control
.step_frame_id
)
5084 if (get_frame_type (frame
) != INLINE_FRAME
)
5087 sal
= find_frame_sal (frame
);
5088 sym
= get_frame_function (frame
);
5091 fn
= sym
->print_name ();
5094 && function_name_is_marked_for_skip (fn
, sal
))
5101 /* If the event thread has the stop requested flag set, pretend it
5102 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
5106 handle_stop_requested (struct execution_control_state
*ecs
)
5108 if (ecs
->event_thread
->stop_requested
)
5110 ecs
->ws
.set_stopped (GDB_SIGNAL_0
);
5111 handle_signal_stop (ecs
);
5117 /* Auxiliary function that handles syscall entry/return events.
5118 It returns true if the inferior should keep going (and GDB
5119 should ignore the event), or false if the event deserves to be
5123 handle_syscall_event (struct execution_control_state
*ecs
)
5125 struct regcache
*regcache
;
5128 context_switch (ecs
);
5130 regcache
= get_thread_regcache (ecs
->event_thread
);
5131 syscall_number
= ecs
->ws
.syscall_number ();
5132 ecs
->event_thread
->set_stop_pc (regcache_read_pc (regcache
));
5134 if (catch_syscall_enabled ()
5135 && catching_syscall_number (syscall_number
))
5137 infrun_debug_printf ("syscall number=%d", syscall_number
);
5139 ecs
->event_thread
->control
.stop_bpstat
5140 = bpstat_stop_status_nowatch (ecs
->event_thread
->inf
->aspace
.get (),
5141 ecs
->event_thread
->stop_pc (),
5142 ecs
->event_thread
, ecs
->ws
);
5144 if (handle_stop_requested (ecs
))
5147 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5149 /* Catchpoint hit. */
5154 if (handle_stop_requested (ecs
))
5157 /* If no catchpoint triggered for this, then keep going. */
5163 /* Lazily fill in the execution_control_state's stop_func_* fields. */
5166 fill_in_stop_func (struct gdbarch
*gdbarch
,
5167 struct execution_control_state
*ecs
)
5169 if (!ecs
->stop_func_filled_in
)
5172 const general_symbol_info
*gsi
;
5174 /* Don't care about return value; stop_func_start and stop_func_name
5175 will both be 0 if it doesn't work. */
5176 find_pc_partial_function_sym (ecs
->event_thread
->stop_pc (),
5178 &ecs
->stop_func_start
,
5179 &ecs
->stop_func_end
,
5181 ecs
->stop_func_name
= gsi
== nullptr ? nullptr : gsi
->print_name ();
5183 /* The call to find_pc_partial_function, above, will set
5184 stop_func_start and stop_func_end to the start and end
5185 of the range containing the stop pc. If this range
5186 contains the entry pc for the block (which is always the
5187 case for contiguous blocks), advance stop_func_start past
5188 the function's start offset and entrypoint. Note that
5189 stop_func_start is NOT advanced when in a range of a
5190 non-contiguous block that does not contain the entry pc. */
5191 if (block
!= nullptr
5192 && ecs
->stop_func_start
<= block
->entry_pc ()
5193 && block
->entry_pc () < ecs
->stop_func_end
)
5195 ecs
->stop_func_start
5196 += gdbarch_deprecated_function_start_offset (gdbarch
);
5198 /* PowerPC functions have a Local Entry Point (LEP) and a Global
5199 Entry Point (GEP). There is only one Entry Point (GEP = LEP) for
5200 other architectures. */
5201 ecs
->stop_func_alt_start
= ecs
->stop_func_start
;
5203 if (gdbarch_skip_entrypoint_p (gdbarch
))
5204 ecs
->stop_func_start
5205 = gdbarch_skip_entrypoint (gdbarch
, ecs
->stop_func_start
);
5208 ecs
->stop_func_filled_in
= 1;
5213 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
5215 static enum stop_kind
5216 get_inferior_stop_soon (execution_control_state
*ecs
)
5218 struct inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
5220 gdb_assert (inf
!= nullptr);
5221 return inf
->control
.stop_soon
;
5224 /* Poll for one event out of the current target. Store the resulting
5225 waitstatus in WS, and return the event ptid. Does not block. */
5228 poll_one_curr_target (struct target_waitstatus
*ws
)
5232 overlay_cache_invalid
= 1;
5234 /* Flush target cache before starting to handle each event.
5235 Target was running and cache could be stale. This is just a
5236 heuristic. Running threads may modify target memory, but we
5237 don't get any event. */
5238 target_dcache_invalidate (current_program_space
->aspace
);
5240 event_ptid
= target_wait (minus_one_ptid
, ws
, TARGET_WNOHANG
);
5243 print_target_wait_results (minus_one_ptid
, event_ptid
, *ws
);
5248 /* Wait for one event out of any target. */
5250 static wait_one_event
5255 for (inferior
*inf
: all_inferiors ())
5257 process_stratum_target
*target
= inf
->process_target ();
5258 if (target
== nullptr
5259 || !target
->is_async_p ()
5260 || !target
->threads_executing
)
5263 switch_to_inferior_no_thread (inf
);
5265 wait_one_event event
;
5266 event
.target
= target
;
5267 event
.ptid
= poll_one_curr_target (&event
.ws
);
5269 if (event
.ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
5271 /* If nothing is resumed, remove the target from the
5273 target_async (false);
5275 else if (event
.ws
.kind () != TARGET_WAITKIND_IGNORE
)
5279 /* Block waiting for some event. */
5286 for (inferior
*inf
: all_inferiors ())
5288 process_stratum_target
*target
= inf
->process_target ();
5289 if (target
== nullptr
5290 || !target
->is_async_p ()
5291 || !target
->threads_executing
)
5294 int fd
= target
->async_wait_fd ();
5295 FD_SET (fd
, &readfds
);
5302 /* No waitable targets left. All must be stopped. */
5303 infrun_debug_printf ("no waitable targets left");
5305 target_waitstatus ws
;
5306 ws
.set_no_resumed ();
5307 return {nullptr, minus_one_ptid
, std::move (ws
)};
5312 int numfds
= interruptible_select (nfds
, &readfds
, 0, nullptr, 0);
5318 perror_with_name ("interruptible_select");
5323 /* Save the thread's event and stop reason to process it later. */
5326 save_waitstatus (struct thread_info
*tp
, const target_waitstatus
&ws
)
5328 infrun_debug_printf ("saving status %s for %s",
5329 ws
.to_string ().c_str (),
5330 tp
->ptid
.to_string ().c_str ());
5332 /* Record for later. */
5333 tp
->set_pending_waitstatus (ws
);
5335 if (ws
.kind () == TARGET_WAITKIND_STOPPED
5336 && ws
.sig () == GDB_SIGNAL_TRAP
)
5338 struct regcache
*regcache
= get_thread_regcache (tp
);
5339 const address_space
*aspace
= tp
->inf
->aspace
.get ();
5340 CORE_ADDR pc
= regcache_read_pc (regcache
);
5342 adjust_pc_after_break (tp
, tp
->pending_waitstatus ());
5344 scoped_restore_current_thread restore_thread
;
5345 switch_to_thread (tp
);
5347 if (target_stopped_by_watchpoint ())
5348 tp
->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT
);
5349 else if (target_supports_stopped_by_sw_breakpoint ()
5350 && target_stopped_by_sw_breakpoint ())
5351 tp
->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT
);
5352 else if (target_supports_stopped_by_hw_breakpoint ()
5353 && target_stopped_by_hw_breakpoint ())
5354 tp
->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT
);
5355 else if (!target_supports_stopped_by_hw_breakpoint ()
5356 && hardware_breakpoint_inserted_here_p (aspace
, pc
))
5357 tp
->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT
);
5358 else if (!target_supports_stopped_by_sw_breakpoint ()
5359 && software_breakpoint_inserted_here_p (aspace
, pc
))
5360 tp
->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT
);
5361 else if (!thread_has_single_step_breakpoints_set (tp
)
5362 && currently_stepping (tp
))
5363 tp
->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP
);
5367 /* Mark the non-executing threads accordingly. In all-stop, all
5368 threads of all processes are stopped when we get any event
5369 reported. In non-stop mode, only the event thread stops. */
5372 mark_non_executing_threads (process_stratum_target
*target
,
5374 const target_waitstatus
&ws
)
5378 if (!target_is_non_stop_p ())
5379 mark_ptid
= minus_one_ptid
;
5380 else if (ws
.kind () == TARGET_WAITKIND_SIGNALLED
5381 || ws
.kind () == TARGET_WAITKIND_EXITED
)
5383 /* If we're handling a process exit in non-stop mode, even
5384 though threads haven't been deleted yet, one would think
5385 that there is nothing to do, as threads of the dead process
5386 will be soon deleted, and threads of any other process were
5387 left running. However, on some targets, threads survive a
5388 process exit event. E.g., for the "checkpoint" command,
5389 when the current checkpoint/fork exits, linux-fork.c
5390 automatically switches to another fork from within
5391 target_mourn_inferior, by associating the same
5392 inferior/thread to another fork. We haven't mourned yet at
5393 this point, but we must mark any threads left in the
5394 process as not-executing so that finish_thread_state marks
5395 them stopped (in the user's perspective) if/when we present
5396 the stop to the user. */
5397 mark_ptid
= ptid_t (event_ptid
.pid ());
5400 mark_ptid
= event_ptid
;
5402 set_executing (target
, mark_ptid
, false);
5404 /* Likewise the resumed flag. */
5405 set_resumed (target
, mark_ptid
, false);
5408 /* Handle one event after stopping threads. If the eventing thread
5409 reports back any interesting event, we leave it pending. If the
5410 eventing thread was in the middle of a displaced step, we
5411 cancel/finish it, and unless the thread's inferior is being
5412 detached, put the thread back in the step-over chain. Returns true
5413 if there are no resumed threads left in the target (thus there's no
5414 point in waiting further), false otherwise. */
5417 handle_one (const wait_one_event
&event
)
5420 ("%s %s", event
.ws
.to_string ().c_str (),
5421 event
.ptid
.to_string ().c_str ());
5423 if (event
.ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
5425 /* All resumed threads exited. */
5428 else if (event
.ws
.kind () == TARGET_WAITKIND_THREAD_EXITED
5429 || event
.ws
.kind () == TARGET_WAITKIND_EXITED
5430 || event
.ws
.kind () == TARGET_WAITKIND_SIGNALLED
)
5432 /* One thread/process exited/signalled. */
5434 thread_info
*t
= nullptr;
5436 /* The target may have reported just a pid. If so, try
5437 the first non-exited thread. */
5438 if (event
.ptid
.is_pid ())
5440 int pid
= event
.ptid
.pid ();
5441 inferior
*inf
= find_inferior_pid (event
.target
, pid
);
5442 for (thread_info
*tp
: inf
->non_exited_threads ())
5448 /* If there is no available thread, the event would
5449 have to be appended to a per-inferior event list,
5450 which does not exist (and if it did, we'd have
5451 to adjust run control command to be able to
5452 resume such an inferior). We assert here instead
5453 of going into an infinite loop. */
5454 gdb_assert (t
!= nullptr);
5457 ("using %s", t
->ptid
.to_string ().c_str ());
5461 t
= event
.target
->find_thread (event
.ptid
);
5462 /* Check if this is the first time we see this thread.
5463 Don't bother adding if it individually exited. */
5465 && event
.ws
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
5466 t
= add_thread (event
.target
, event
.ptid
);
5471 /* Set the threads as non-executing to avoid
5472 another stop attempt on them. */
5473 switch_to_thread_no_regs (t
);
5474 mark_non_executing_threads (event
.target
, event
.ptid
,
5476 save_waitstatus (t
, event
.ws
);
5477 t
->stop_requested
= false;
5479 if (event
.ws
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
5481 if (displaced_step_finish (t
, event
.ws
)
5482 != DISPLACED_STEP_FINISH_STATUS_OK
)
5484 gdb_assert_not_reached ("displaced_step_finish on "
5485 "exited thread failed");
5492 thread_info
*t
= event
.target
->find_thread (event
.ptid
);
5494 t
= add_thread (event
.target
, event
.ptid
);
5496 t
->stop_requested
= 0;
5497 t
->set_executing (false);
5498 t
->set_resumed (false);
5499 t
->control
.may_range_step
= 0;
5501 /* This may be the first time we see the inferior report
5503 if (t
->inf
->needs_setup
)
5505 switch_to_thread_no_regs (t
);
5509 if (event
.ws
.kind () == TARGET_WAITKIND_STOPPED
5510 && event
.ws
.sig () == GDB_SIGNAL_0
)
5512 /* We caught the event that we intended to catch, so
5513 there's no event to save as pending. */
5515 if (displaced_step_finish (t
, event
.ws
)
5516 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED
)
5518 /* Add it back to the step-over queue. */
5520 ("displaced-step of %s canceled",
5521 t
->ptid
.to_string ().c_str ());
5523 t
->control
.trap_expected
= 0;
5524 if (!t
->inf
->detaching
)
5525 global_thread_step_over_chain_enqueue (t
);
5530 struct regcache
*regcache
;
5533 ("target_wait %s, saving status for %s",
5534 event
.ws
.to_string ().c_str (),
5535 t
->ptid
.to_string ().c_str ());
5537 /* Record for later. */
5538 save_waitstatus (t
, event
.ws
);
5540 if (displaced_step_finish (t
, event
.ws
)
5541 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED
)
5543 /* Add it back to the step-over queue. */
5544 t
->control
.trap_expected
= 0;
5545 if (!t
->inf
->detaching
)
5546 global_thread_step_over_chain_enqueue (t
);
5549 regcache
= get_thread_regcache (t
);
5550 t
->set_stop_pc (regcache_read_pc (regcache
));
5552 infrun_debug_printf ("saved stop_pc=%s for %s "
5553 "(currently_stepping=%d)",
5554 paddress (current_inferior ()->arch (),
5556 t
->ptid
.to_string ().c_str (),
5557 currently_stepping (t
));
5564 /* Helper for stop_all_threads. wait_one waits for events until it
5565 sees a TARGET_WAITKIND_NO_RESUMED event. When it sees one, it
5566 disables target_async for the target to stop waiting for events
5567 from it. TARGET_WAITKIND_NO_RESUMED can be delayed though,
5568 consider, debugging against gdbserver:
5570 #1 - Threads 1-5 are running, and thread 1 hits a breakpoint.
5572 #2 - gdb processes the breakpoint hit for thread 1, stops all
5573 threads, and steps thread 1 over the breakpoint. while
5574 stopping threads, some other threads reported interesting
5575 events, which were left pending in the thread's objects
5578 #2 - Thread 1 exits (it stepped an exit syscall), and gdbserver
5579 reports the thread exit for thread 1. The event ends up in
5580 remote's stop reply queue.
5582 #3 - That was the last resumed thread, so gdbserver reports
5583 no-resumed, and that event also ends up in remote's stop
5584 reply queue, queued after the thread exit from #2.
5586 #4 - gdb processes the thread exit event, which finishes the
5587 step-over, and so gdb restarts all threads (threads with
5588 pending events are left marked resumed, but aren't set
5589 executing). The no-resumed event is still left pending in
5590 the remote stop reply queue.
5592 #5 - Since there are now resumed threads with pending breakpoint
5593 hits, gdb picks one at random to process next.
5595 #5 - gdb picks the breakpoint hit for thread 2 this time, and that
5596 breakpoint also needs to be stepped over, so gdb stops all
5599 #6 - stop_all_threads counts number of expected stops and calls
5600 wait_one once for each.
5602 #7 - The first wait_one call collects the no-resumed event from #3
5605 #9 - Seeing the no-resumed event, wait_one disables target async
5606 for the remote target, to stop waiting for events from it.
5607 wait_one from here on always return no-resumed directly
5608 without reaching the target.
5610 #10 - stop_all_threads still hasn't seen all the stops it expects,
5611 so it does another pass.
5613 #11 - Since the remote target is not async (disabled in #9),
5614 wait_one doesn't wait on it, so it won't see the expected
5615 stops, and instead returns no-resumed directly.
5617 #12 - stop_all_threads still haven't seen all the stops, so it
5618 does another pass. goto #11, looping forever.
5620 To handle this, we explicitly (re-)enable target async on all
5621 targets that can async every time stop_all_threads goes wait for
5622 the expected stops. */
5625 reenable_target_async ()
5627 for (inferior
*inf
: all_inferiors ())
5629 process_stratum_target
*target
= inf
->process_target ();
5630 if (target
!= nullptr
5631 && target
->threads_executing
5632 && target
->can_async_p ()
5633 && !target
->is_async_p ())
5635 switch_to_inferior_no_thread (inf
);
5644 stop_all_threads (const char *reason
, inferior
*inf
)
5646 /* We may need multiple passes to discover all threads. */
5650 gdb_assert (exists_non_stop_target ());
5652 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason
,
5653 inf
!= nullptr ? inf
->num
: -1);
5655 infrun_debug_show_threads ("non-exited threads",
5656 all_non_exited_threads ());
5658 scoped_restore_current_thread restore_thread
;
5660 /* Enable thread events on relevant targets. */
5661 for (auto *target
: all_non_exited_process_targets ())
5663 if (inf
!= nullptr && inf
->process_target () != target
)
5666 switch_to_target_no_thread (target
);
5667 target_thread_events (true);
5672 /* Disable thread events on relevant targets. */
5673 for (auto *target
: all_non_exited_process_targets ())
5675 if (inf
!= nullptr && inf
->process_target () != target
)
5678 switch_to_target_no_thread (target
);
5679 target_thread_events (false);
5682 /* Use debug_prefixed_printf directly to get a meaningful function
5685 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5688 /* Request threads to stop, and then wait for the stops. Because
5689 threads we already know about can spawn more threads while we're
5690 trying to stop them, and we only learn about new threads when we
5691 update the thread list, do this in a loop, and keep iterating
5692 until two passes find no threads that need to be stopped. */
5693 for (pass
= 0; pass
< 2; pass
++, iterations
++)
5695 infrun_debug_printf ("pass=%d, iterations=%d", pass
, iterations
);
5698 int waits_needed
= 0;
5700 for (auto *target
: all_non_exited_process_targets ())
5702 if (inf
!= nullptr && inf
->process_target () != target
)
5705 switch_to_target_no_thread (target
);
5706 update_thread_list ();
5709 /* Go through all threads looking for threads that we need
5710 to tell the target to stop. */
5711 for (thread_info
*t
: all_non_exited_threads ())
5713 if (inf
!= nullptr && t
->inf
!= inf
)
5716 /* For a single-target setting with an all-stop target,
5717 we would not even arrive here. For a multi-target
5718 setting, until GDB is able to handle a mixture of
5719 all-stop and non-stop targets, simply skip all-stop
5720 targets' threads. This should be fine due to the
5721 protection of 'check_multi_target_resumption'. */
5723 switch_to_thread_no_regs (t
);
5724 if (!target_is_non_stop_p ())
5727 if (t
->executing ())
5729 /* If already stopping, don't request a stop again.
5730 We just haven't seen the notification yet. */
5731 if (!t
->stop_requested
)
5733 infrun_debug_printf (" %s executing, need stop",
5734 t
->ptid
.to_string ().c_str ());
5735 target_stop (t
->ptid
);
5736 t
->stop_requested
= 1;
5740 infrun_debug_printf (" %s executing, already stopping",
5741 t
->ptid
.to_string ().c_str ());
5744 if (t
->stop_requested
)
5749 infrun_debug_printf (" %s not executing",
5750 t
->ptid
.to_string ().c_str ());
5752 /* The thread may be not executing, but still be
5753 resumed with a pending status to process. */
5754 t
->set_resumed (false);
5758 if (waits_needed
== 0)
5761 /* If we find new threads on the second iteration, restart
5762 over. We want to see two iterations in a row with all
5767 reenable_target_async ();
5769 for (int i
= 0; i
< waits_needed
; i
++)
5771 wait_one_event event
= wait_one ();
5772 if (handle_one (event
))
5779 /* Handle a TARGET_WAITKIND_NO_RESUMED event. Return true if we
5780 handled the event and should continue waiting. Return false if we
5781 should stop and report the event to the user. */
5784 handle_no_resumed (struct execution_control_state
*ecs
)
5786 if (target_can_async_p ())
5788 bool any_sync
= false;
5790 for (ui
*ui
: all_uis ())
5792 if (ui
->prompt_state
== PROMPT_BLOCKED
)
5800 /* There were no unwaited-for children left in the target, but,
5801 we're not synchronously waiting for events either. Just
5804 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5805 prepare_to_wait (ecs
);
5810 /* Otherwise, if we were running a synchronous execution command, we
5811 may need to cancel it and give the user back the terminal.
5813 In non-stop mode, the target can't tell whether we've already
5814 consumed previous stop events, so it can end up sending us a
5815 no-resumed event like so:
5817 #0 - thread 1 is left stopped
5819 #1 - thread 2 is resumed and hits breakpoint
5820 -> TARGET_WAITKIND_STOPPED
5822 #2 - thread 3 is resumed and exits
5823 this is the last resumed thread, so
5824 -> TARGET_WAITKIND_NO_RESUMED
5826 #3 - gdb processes stop for thread 2 and decides to re-resume
5829 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5830 thread 2 is now resumed, so the event should be ignored.
5832 IOW, if the stop for thread 2 doesn't end a foreground command,
5833 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5834 event. But it could be that the event meant that thread 2 itself
5835 (or whatever other thread was the last resumed thread) exited.
5837 To address this we refresh the thread list and check whether we
5838 have resumed threads _now_. In the example above, this removes
5839 thread 3 from the thread list. If thread 2 was re-resumed, we
5840 ignore this event. If we find no thread resumed, then we cancel
5841 the synchronous command and show "no unwaited-for " to the
5844 inferior
*curr_inf
= current_inferior ();
5846 scoped_restore_current_thread restore_thread
;
5847 update_thread_list ();
5851 - the current target has no thread executing, and
5852 - the current inferior is native, and
5853 - the current inferior is the one which has the terminal, and
5856 then a Ctrl-C from this point on would remain stuck in the
5857 kernel, until a thread resumes and dequeues it. That would
5858 result in the GDB CLI not reacting to Ctrl-C, not able to
5859 interrupt the program. To address this, if the current inferior
5860 no longer has any thread executing, we give the terminal to some
5861 other inferior that has at least one thread executing. */
5862 bool swap_terminal
= true;
5864 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5865 whether to report it to the user. */
5866 bool ignore_event
= false;
5868 for (thread_info
*thread
: all_non_exited_threads ())
5870 if (swap_terminal
&& thread
->executing ())
5872 if (thread
->inf
!= curr_inf
)
5874 target_terminal::ours ();
5876 switch_to_thread (thread
);
5877 target_terminal::inferior ();
5879 swap_terminal
= false;
5882 if (!ignore_event
&& thread
->resumed ())
5884 /* Either there were no unwaited-for children left in the
5885 target at some point, but there are now, or some target
5886 other than the eventing one has unwaited-for children
5887 left. Just ignore. */
5888 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5889 "(ignoring: found resumed)");
5891 ignore_event
= true;
5894 if (ignore_event
&& !swap_terminal
)
5900 switch_to_inferior_no_thread (curr_inf
);
5901 prepare_to_wait (ecs
);
5905 /* Go ahead and report the event. */
5909 /* Handle a TARGET_WAITKIND_THREAD_EXITED event. Return true if we
5910 handled the event and should continue waiting. Return false if we
5911 should stop and report the event to the user. */
5914 handle_thread_exited (execution_control_state
*ecs
)
5916 context_switch (ecs
);
5918 /* Clear these so we don't re-start the thread stepping over a
5919 breakpoint/watchpoint. */
5920 ecs
->event_thread
->stepping_over_breakpoint
= 0;
5921 ecs
->event_thread
->stepping_over_watchpoint
= 0;
5923 /* If the thread had an FSM, then abort the command. But only after
5924 finishing the step over, as in non-stop mode, aborting this
5925 thread's command should not interfere with other threads. We
5926 must check this before finish_step over, however, which may
5927 update the thread list and delete the event thread. */
5928 bool abort_cmd
= (ecs
->event_thread
->thread_fsm () != nullptr);
5930 /* Mark the thread exited right now, because finish_step_over may
5931 update the thread list and that may delete the thread silently
5932 (depending on target), while we always want to emit the "[Thread
5933 ... exited]" notification. Don't actually delete the thread yet,
5934 because we need to pass its pointer down to finish_step_over. */
5935 set_thread_exited (ecs
->event_thread
);
5937 /* Maybe the thread was doing a step-over, if so release
5938 resources and start any further pending step-overs.
5940 If we are on a non-stop target and the thread was doing an
5941 in-line step, this also restarts the other threads. */
5942 int ret
= finish_step_over (ecs
);
5944 /* finish_step_over returns true if it moves ecs' wait status
5945 back into the thread, so that we go handle another pending
5946 event before this one. But we know it never does that if
5947 the event thread has exited. */
5948 gdb_assert (ret
== 0);
5952 /* We're stopping for the thread exit event. Switch to the
5953 event thread again, as finish_step_over may have switched
5955 switch_to_thread (ecs
->event_thread
);
5956 ecs
->event_thread
= nullptr;
5960 /* If finish_step_over started a new in-line step-over, don't
5961 try to restart anything else. */
5962 if (step_over_info_valid_p ())
5964 delete_thread (ecs
->event_thread
);
5968 /* Maybe we are on an all-stop target and we got this event
5969 while doing a step-like command on another thread. If so,
5970 go back to doing that. If this thread was stepping,
5971 switch_back_to_stepped_thread will consider that the thread
5972 was interrupted mid-step and will try keep stepping it. We
5973 don't want that, the thread is gone. So clear the proceed
5974 status so it doesn't do that. */
5975 clear_proceed_status_thread (ecs
->event_thread
);
5976 if (switch_back_to_stepped_thread (ecs
))
5978 delete_thread (ecs
->event_thread
);
5982 inferior
*inf
= ecs
->event_thread
->inf
;
5983 bool slock_applies
= schedlock_applies (ecs
->event_thread
);
5985 delete_thread (ecs
->event_thread
);
5986 ecs
->event_thread
= nullptr;
5988 /* Continue handling the event as if we had gotten a
5989 TARGET_WAITKIND_NO_RESUMED. */
5990 auto handle_as_no_resumed
= [ecs
] ()
5992 /* handle_no_resumed doesn't really look at the event kind, but
5993 normal_stop does. */
5994 ecs
->ws
.set_no_resumed ();
5995 ecs
->event_thread
= nullptr;
5996 ecs
->ptid
= minus_one_ptid
;
5998 /* Re-record the last target status. */
5999 set_last_target_status (ecs
->target
, ecs
->ptid
, ecs
->ws
);
6001 return handle_no_resumed (ecs
);
6004 /* If we are on an all-stop target, the target has stopped all
6005 threads to report the event. We don't actually want to
6006 stop, so restart the threads. */
6007 if (!target_is_non_stop_p ())
6011 /* Since the target is !non-stop, then everything is stopped
6012 at this point, and we can't assume we'll get further
6013 events until we resume the target again. Handle this
6014 event like if it were a TARGET_WAITKIND_NO_RESUMED. Note
6015 this refreshes the thread list and checks whether there
6016 are other resumed threads before deciding whether to
6017 print "no-unwaited-for left". This is important because
6018 the user could have done:
6020 (gdb) set scheduler-locking on
6026 ... and only one of the threads exited. */
6027 return handle_as_no_resumed ();
6031 /* Switch to the first non-exited thread we can find, and
6033 auto range
= inf
->non_exited_threads ();
6034 if (range
.begin () == range
.end ())
6036 /* Looks like the target reported a
6037 TARGET_WAITKIND_THREAD_EXITED for its last known
6039 return handle_as_no_resumed ();
6041 thread_info
*non_exited_thread
= *range
.begin ();
6042 switch_to_thread (non_exited_thread
);
6043 insert_breakpoints ();
6044 resume (GDB_SIGNAL_0
);
6048 prepare_to_wait (ecs
);
6052 /* Given an execution control state that has been freshly filled in by
6053 an event from the inferior, figure out what it means and take
6056 The alternatives are:
6058 1) stop_waiting and return; to really stop and return to the
6061 2) keep_going and return; to wait for the next event (set
6062 ecs->event_thread->stepping_over_breakpoint to 1 to single step
6066 handle_inferior_event (struct execution_control_state
*ecs
)
6068 /* Make sure that all temporary struct value objects that were
6069 created during the handling of the event get deleted at the
6071 scoped_value_mark free_values
;
6073 infrun_debug_printf ("%s", ecs
->ws
.to_string ().c_str ());
6075 if (ecs
->ws
.kind () == TARGET_WAITKIND_IGNORE
)
6077 /* We had an event in the inferior, but we are not interested in
6078 handling it at this level. The lower layers have already
6079 done what needs to be done, if anything.
6081 One of the possible circumstances for this is when the
6082 inferior produces output for the console. The inferior has
6083 not stopped, and we are ignoring the event. Another possible
6084 circumstance is any event which the lower level knows will be
6085 reported multiple times without an intervening resume. */
6086 prepare_to_wait (ecs
);
6090 if (ecs
->ws
.kind () == TARGET_WAITKIND_NO_RESUMED
6091 && handle_no_resumed (ecs
))
6094 /* Cache the last target/ptid/waitstatus. */
6095 set_last_target_status (ecs
->target
, ecs
->ptid
, ecs
->ws
);
6097 /* Always clear state belonging to the previous time we stopped. */
6098 stop_stack_dummy
= STOP_NONE
;
6100 if (ecs
->ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
6102 /* No unwaited-for children left. IOW, all resumed children
6108 if (ecs
->ws
.kind () != TARGET_WAITKIND_EXITED
6109 && ecs
->ws
.kind () != TARGET_WAITKIND_SIGNALLED
)
6111 ecs
->event_thread
= ecs
->target
->find_thread (ecs
->ptid
);
6112 /* If it's a new thread, add it to the thread database. */
6113 if (ecs
->event_thread
== nullptr)
6114 ecs
->event_thread
= add_thread (ecs
->target
, ecs
->ptid
);
6116 /* Disable range stepping. If the next step request could use a
6117 range, this will be end up re-enabled then. */
6118 ecs
->event_thread
->control
.may_range_step
= 0;
6121 /* Dependent on valid ECS->EVENT_THREAD. */
6122 adjust_pc_after_break (ecs
->event_thread
, ecs
->ws
);
6124 /* Dependent on the current PC value modified by adjust_pc_after_break. */
6125 reinit_frame_cache ();
6127 breakpoint_retire_moribund ();
6129 /* First, distinguish signals caused by the debugger from signals
6130 that have to do with the program's own actions. Note that
6131 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
6132 on the operating system version. Here we detect when a SIGILL or
6133 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
6134 something similar for SIGSEGV, since a SIGSEGV will be generated
6135 when we're trying to execute a breakpoint instruction on a
6136 non-executable stack. This happens for call dummy breakpoints
6137 for architectures like SPARC that place call dummies on the
6139 if (ecs
->ws
.kind () == TARGET_WAITKIND_STOPPED
6140 && (ecs
->ws
.sig () == GDB_SIGNAL_ILL
6141 || ecs
->ws
.sig () == GDB_SIGNAL_SEGV
6142 || ecs
->ws
.sig () == GDB_SIGNAL_EMT
))
6144 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
6146 if (breakpoint_inserted_here_p (ecs
->event_thread
->inf
->aspace
.get (),
6147 regcache_read_pc (regcache
)))
6149 infrun_debug_printf ("Treating signal as SIGTRAP");
6150 ecs
->ws
.set_stopped (GDB_SIGNAL_TRAP
);
6154 mark_non_executing_threads (ecs
->target
, ecs
->ptid
, ecs
->ws
);
6156 switch (ecs
->ws
.kind ())
6158 case TARGET_WAITKIND_LOADED
:
6160 context_switch (ecs
);
6161 /* Ignore gracefully during startup of the inferior, as it might
6162 be the shell which has just loaded some objects, otherwise
6163 add the symbols for the newly loaded objects. Also ignore at
6164 the beginning of an attach or remote session; we will query
6165 the full list of libraries once the connection is
6168 stop_kind stop_soon
= get_inferior_stop_soon (ecs
);
6169 if (stop_soon
== NO_STOP_QUIETLY
)
6171 struct regcache
*regcache
;
6173 regcache
= get_thread_regcache (ecs
->event_thread
);
6175 handle_solib_event ();
6177 ecs
->event_thread
->set_stop_pc (regcache_read_pc (regcache
));
6178 address_space
*aspace
= ecs
->event_thread
->inf
->aspace
.get ();
6179 ecs
->event_thread
->control
.stop_bpstat
6180 = bpstat_stop_status_nowatch (aspace
,
6181 ecs
->event_thread
->stop_pc (),
6182 ecs
->event_thread
, ecs
->ws
);
6184 if (handle_stop_requested (ecs
))
6187 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
6189 /* A catchpoint triggered. */
6190 process_event_stop_test (ecs
);
6194 /* If requested, stop when the dynamic linker notifies
6195 gdb of events. This allows the user to get control
6196 and place breakpoints in initializer routines for
6197 dynamically loaded objects (among other things). */
6198 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6199 if (stop_on_solib_events
)
6201 /* Make sure we print "Stopped due to solib-event" in
6203 stop_print_frame
= true;
6210 /* If we are skipping through a shell, or through shared library
6211 loading that we aren't interested in, resume the program. If
6212 we're running the program normally, also resume. */
6213 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== NO_STOP_QUIETLY
)
6215 /* Loading of shared libraries might have changed breakpoint
6216 addresses. Make sure new breakpoints are inserted. */
6217 if (stop_soon
== NO_STOP_QUIETLY
)
6218 insert_breakpoints ();
6219 resume (GDB_SIGNAL_0
);
6220 prepare_to_wait (ecs
);
6224 /* But stop if we're attaching or setting up a remote
6226 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
6227 || stop_soon
== STOP_QUIETLY_REMOTE
)
6229 infrun_debug_printf ("quietly stopped");
6234 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon
);
6237 case TARGET_WAITKIND_SPURIOUS
:
6238 if (handle_stop_requested (ecs
))
6240 context_switch (ecs
);
6241 resume (GDB_SIGNAL_0
);
6242 prepare_to_wait (ecs
);
6245 case TARGET_WAITKIND_THREAD_CREATED
:
6246 if (handle_stop_requested (ecs
))
6248 context_switch (ecs
);
6249 if (!switch_back_to_stepped_thread (ecs
))
6253 case TARGET_WAITKIND_THREAD_EXITED
:
6254 if (handle_thread_exited (ecs
))
6259 case TARGET_WAITKIND_EXITED
:
6260 case TARGET_WAITKIND_SIGNALLED
:
6262 /* Depending on the system, ecs->ptid may point to a thread or
6263 to a process. On some targets, target_mourn_inferior may
6264 need to have access to the just-exited thread. That is the
6265 case of GNU/Linux's "checkpoint" support, for example.
6266 Call the switch_to_xxx routine as appropriate. */
6267 thread_info
*thr
= ecs
->target
->find_thread (ecs
->ptid
);
6269 switch_to_thread (thr
);
6272 inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
6273 switch_to_inferior_no_thread (inf
);
6276 handle_vfork_child_exec_or_exit (0);
6277 target_terminal::ours (); /* Must do this before mourn anyway. */
6279 /* Clearing any previous state of convenience variables. */
6280 clear_exit_convenience_vars ();
6282 if (ecs
->ws
.kind () == TARGET_WAITKIND_EXITED
)
6284 /* Record the exit code in the convenience variable $_exitcode, so
6285 that the user can inspect this again later. */
6286 set_internalvar_integer (lookup_internalvar ("_exitcode"),
6287 (LONGEST
) ecs
->ws
.exit_status ());
6289 /* Also record this in the inferior itself. */
6290 current_inferior ()->has_exit_code
= true;
6291 current_inferior ()->exit_code
= (LONGEST
) ecs
->ws
.exit_status ();
6293 /* Support the --return-child-result option. */
6294 return_child_result_value
= ecs
->ws
.exit_status ();
6296 interps_notify_exited (ecs
->ws
.exit_status ());
6300 struct gdbarch
*gdbarch
= current_inferior ()->arch ();
6302 if (gdbarch_gdb_signal_to_target_p (gdbarch
))
6304 /* Set the value of the internal variable $_exitsignal,
6305 which holds the signal uncaught by the inferior. */
6306 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
6307 gdbarch_gdb_signal_to_target (gdbarch
,
6312 /* We don't have access to the target's method used for
6313 converting between signal numbers (GDB's internal
6314 representation <-> target's representation).
6315 Therefore, we cannot do a good job at displaying this
6316 information to the user. It's better to just warn
6317 her about it (if infrun debugging is enabled), and
6319 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
6323 interps_notify_signal_exited (ecs
->ws
.sig ());
6326 gdb_flush (gdb_stdout
);
6327 target_mourn_inferior (inferior_ptid
);
6328 stop_print_frame
= false;
6332 case TARGET_WAITKIND_FORKED
:
6333 case TARGET_WAITKIND_VFORKED
:
6334 case TARGET_WAITKIND_THREAD_CLONED
:
6336 displaced_step_finish (ecs
->event_thread
, ecs
->ws
);
6338 /* Start a new step-over in another thread if there's one that
6342 context_switch (ecs
);
6344 /* Immediately detach breakpoints from the child before there's
6345 any chance of letting the user delete breakpoints from the
6346 breakpoint lists. If we don't do this early, it's easy to
6347 leave left over traps in the child, vis: "break foo; catch
6348 fork; c; <fork>; del; c; <child calls foo>". We only follow
6349 the fork on the last `continue', and by that time the
6350 breakpoint at "foo" is long gone from the breakpoint table.
6351 If we vforked, then we don't need to unpatch here, since both
6352 parent and child are sharing the same memory pages; we'll
6353 need to unpatch at follow/detach time instead to be certain
6354 that new breakpoints added between catchpoint hit time and
6355 vfork follow are detached. */
6356 if (ecs
->ws
.kind () == TARGET_WAITKIND_FORKED
)
6358 /* This won't actually modify the breakpoint list, but will
6359 physically remove the breakpoints from the child. */
6360 detach_breakpoints (ecs
->ws
.child_ptid ());
6363 delete_just_stopped_threads_single_step_breakpoints ();
6365 /* In case the event is caught by a catchpoint, remember that
6366 the event is to be followed at the next resume of the thread,
6367 and not immediately. */
6368 ecs
->event_thread
->pending_follow
= ecs
->ws
;
6370 ecs
->event_thread
->set_stop_pc
6371 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
6373 ecs
->event_thread
->control
.stop_bpstat
6374 = bpstat_stop_status_nowatch (ecs
->event_thread
->inf
->aspace
.get (),
6375 ecs
->event_thread
->stop_pc (),
6376 ecs
->event_thread
, ecs
->ws
);
6378 if (handle_stop_requested (ecs
))
6381 /* If no catchpoint triggered for this, then keep going. Note
6382 that we're interested in knowing the bpstat actually causes a
6383 stop, not just if it may explain the signal. Software
6384 watchpoints, for example, always appear in the bpstat. */
6385 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
6388 = (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
6389 && follow_fork_mode_string
== follow_fork_mode_child
);
6391 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6393 process_stratum_target
*targ
6394 = ecs
->event_thread
->inf
->process_target ();
6397 if (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
)
6398 should_resume
= follow_fork ();
6401 should_resume
= true;
6402 inferior
*inf
= ecs
->event_thread
->inf
;
6403 inf
->top_target ()->follow_clone (ecs
->ws
.child_ptid ());
6404 ecs
->event_thread
->pending_follow
.set_spurious ();
6407 /* Note that one of these may be an invalid pointer,
6408 depending on detach_fork. */
6409 thread_info
*parent
= ecs
->event_thread
;
6410 thread_info
*child
= targ
->find_thread (ecs
->ws
.child_ptid ());
6412 /* At this point, the parent is marked running, and the
6413 child is marked stopped. */
6415 /* If not resuming the parent, mark it stopped. */
6416 if (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
6417 && follow_child
&& !detach_fork
&& !non_stop
&& !sched_multi
)
6418 parent
->set_running (false);
6420 /* If resuming the child, mark it running. */
6421 if ((ecs
->ws
.kind () == TARGET_WAITKIND_THREAD_CLONED
6422 && !schedlock_applies (ecs
->event_thread
))
6423 || (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
6425 || (!detach_fork
&& (non_stop
|| sched_multi
)))))
6426 child
->set_running (true);
6428 /* In non-stop mode, also resume the other branch. */
6429 if ((ecs
->ws
.kind () == TARGET_WAITKIND_THREAD_CLONED
6430 && target_is_non_stop_p ()
6431 && !schedlock_applies (ecs
->event_thread
))
6432 || (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
6433 && (!detach_fork
&& (non_stop
6435 && target_is_non_stop_p ())))))
6438 switch_to_thread (parent
);
6440 switch_to_thread (child
);
6442 ecs
->event_thread
= inferior_thread ();
6443 ecs
->ptid
= inferior_ptid
;
6448 switch_to_thread (child
);
6450 switch_to_thread (parent
);
6452 ecs
->event_thread
= inferior_thread ();
6453 ecs
->ptid
= inferior_ptid
;
6457 /* Never call switch_back_to_stepped_thread if we are waiting for
6458 vfork-done (waiting for an external vfork child to exec or
6459 exit). We will resume only the vforking thread for the purpose
6460 of collecting the vfork-done event, and we will restart any
6461 step once the critical shared address space window is done. */
6464 && parent
->inf
->thread_waiting_for_vfork_done
!= nullptr)
6465 || !switch_back_to_stepped_thread (ecs
))
6472 process_event_stop_test (ecs
);
6475 case TARGET_WAITKIND_VFORK_DONE
:
6476 /* Done with the shared memory region. Re-insert breakpoints in
6477 the parent, and keep going. */
6479 context_switch (ecs
);
6481 handle_vfork_done (ecs
->event_thread
);
6482 gdb_assert (inferior_thread () == ecs
->event_thread
);
6484 if (handle_stop_requested (ecs
))
6487 if (!switch_back_to_stepped_thread (ecs
))
6489 gdb_assert (inferior_thread () == ecs
->event_thread
);
6490 /* This also takes care of reinserting breakpoints in the
6491 previously locked inferior. */
6496 case TARGET_WAITKIND_EXECD
:
6498 /* Note we can't read registers yet (the stop_pc), because we
6499 don't yet know the inferior's post-exec architecture.
6500 'stop_pc' is explicitly read below instead. */
6501 switch_to_thread_no_regs (ecs
->event_thread
);
6503 /* Do whatever is necessary to the parent branch of the vfork. */
6504 handle_vfork_child_exec_or_exit (1);
6506 /* This causes the eventpoints and symbol table to be reset.
6507 Must do this now, before trying to determine whether to
6509 follow_exec (inferior_ptid
, ecs
->ws
.execd_pathname ());
6511 /* In follow_exec we may have deleted the original thread and
6512 created a new one. Make sure that the event thread is the
6513 execd thread for that case (this is a nop otherwise). */
6514 ecs
->event_thread
= inferior_thread ();
6516 ecs
->event_thread
->set_stop_pc
6517 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
6519 ecs
->event_thread
->control
.stop_bpstat
6520 = bpstat_stop_status_nowatch (ecs
->event_thread
->inf
->aspace
.get (),
6521 ecs
->event_thread
->stop_pc (),
6522 ecs
->event_thread
, ecs
->ws
);
6524 if (handle_stop_requested (ecs
))
6527 /* If no catchpoint triggered for this, then keep going. */
6528 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
6530 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6534 process_event_stop_test (ecs
);
6537 /* Be careful not to try to gather much state about a thread
6538 that's in a syscall. It's frequently a losing proposition. */
6539 case TARGET_WAITKIND_SYSCALL_ENTRY
:
6540 /* Getting the current syscall number. */
6541 if (handle_syscall_event (ecs
) == 0)
6542 process_event_stop_test (ecs
);
6545 /* Before examining the threads further, step this thread to
6546 get it entirely out of the syscall. (We get notice of the
6547 event when the thread is just on the verge of exiting a
6548 syscall. Stepping one instruction seems to get it back
6550 case TARGET_WAITKIND_SYSCALL_RETURN
:
6551 if (handle_syscall_event (ecs
) == 0)
6552 process_event_stop_test (ecs
);
6555 case TARGET_WAITKIND_STOPPED
:
6556 handle_signal_stop (ecs
);
6559 case TARGET_WAITKIND_NO_HISTORY
:
6560 /* Reverse execution: target ran out of history info. */
6562 /* Switch to the stopped thread. */
6563 context_switch (ecs
);
6564 infrun_debug_printf ("stopped");
6566 delete_just_stopped_threads_single_step_breakpoints ();
6567 ecs
->event_thread
->set_stop_pc
6568 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
6570 if (handle_stop_requested (ecs
))
6573 interps_notify_no_history ();
6579 /* Restart threads back to what they were trying to do back when we
6580 paused them (because of an in-line step-over or vfork, for example).
6581 The EVENT_THREAD thread is ignored (not restarted).
6583 If INF is non-nullptr, only resume threads from INF. */
6586 restart_threads (struct thread_info
*event_thread
, inferior
*inf
)
6588 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
6589 event_thread
->ptid
.to_string ().c_str (),
6590 inf
!= nullptr ? inf
->num
: -1);
6592 gdb_assert (!step_over_info_valid_p ());
6594 /* In case the instruction just stepped spawned a new thread. */
6595 update_thread_list ();
6597 for (thread_info
*tp
: all_non_exited_threads ())
6599 if (inf
!= nullptr && tp
->inf
!= inf
)
6602 if (tp
->inf
->detaching
)
6604 infrun_debug_printf ("restart threads: [%s] inferior detaching",
6605 tp
->ptid
.to_string ().c_str ());
6609 switch_to_thread_no_regs (tp
);
6611 if (tp
== event_thread
)
6613 infrun_debug_printf ("restart threads: [%s] is event thread",
6614 tp
->ptid
.to_string ().c_str ());
6618 if (!(tp
->state
== THREAD_RUNNING
|| tp
->control
.in_infcall
))
6620 infrun_debug_printf ("restart threads: [%s] not meant to be running",
6621 tp
->ptid
.to_string ().c_str ());
6627 infrun_debug_printf ("restart threads: [%s] resumed",
6628 tp
->ptid
.to_string ().c_str ());
6629 gdb_assert (tp
->executing () || tp
->has_pending_waitstatus ());
6633 if (thread_is_in_step_over_chain (tp
))
6635 infrun_debug_printf ("restart threads: [%s] needs step-over",
6636 tp
->ptid
.to_string ().c_str ());
6637 gdb_assert (!tp
->resumed ());
6642 if (tp
->has_pending_waitstatus ())
6644 infrun_debug_printf ("restart threads: [%s] has pending status",
6645 tp
->ptid
.to_string ().c_str ());
6646 tp
->set_resumed (true);
6650 gdb_assert (!tp
->stop_requested
);
6652 /* If some thread needs to start a step-over at this point, it
6653 should still be in the step-over queue, and thus skipped
6655 if (thread_still_needs_step_over (tp
))
6657 internal_error ("thread [%s] needs a step-over, but not in "
6658 "step-over queue\n",
6659 tp
->ptid
.to_string ().c_str ());
6662 if (currently_stepping (tp
))
6664 infrun_debug_printf ("restart threads: [%s] was stepping",
6665 tp
->ptid
.to_string ().c_str ());
6666 keep_going_stepped_thread (tp
);
6670 infrun_debug_printf ("restart threads: [%s] continuing",
6671 tp
->ptid
.to_string ().c_str ());
6672 execution_control_state
ecs (tp
);
6673 switch_to_thread (tp
);
6674 keep_going_pass_signal (&ecs
);
6679 /* Callback for iterate_over_threads. Find a resumed thread that has
6680 a pending waitstatus. */
6683 resumed_thread_with_pending_status (struct thread_info
*tp
,
6686 return tp
->resumed () && tp
->has_pending_waitstatus ();
6689 /* Called when we get an event that may finish an in-line or
6690 out-of-line (displaced stepping) step-over started previously.
6691 Return true if the event is processed and we should go back to the
6692 event loop; false if the caller should continue processing the
6696 finish_step_over (struct execution_control_state
*ecs
)
6698 displaced_step_finish (ecs
->event_thread
, ecs
->ws
);
6700 bool had_step_over_info
= step_over_info_valid_p ();
6702 if (had_step_over_info
)
6704 /* If we're stepping over a breakpoint with all threads locked,
6705 then only the thread that was stepped should be reporting
6707 gdb_assert (ecs
->event_thread
->control
.trap_expected
);
6709 update_thread_events_after_step_over (ecs
->event_thread
, ecs
->ws
);
6711 clear_step_over_info ();
6714 if (!target_is_non_stop_p ())
6717 /* Start a new step-over in another thread if there's one that
6721 /* If we were stepping over a breakpoint before, and haven't started
6722 a new in-line step-over sequence, then restart all other threads
6723 (except the event thread). We can't do this in all-stop, as then
6724 e.g., we wouldn't be able to issue any other remote packet until
6725 these other threads stop. */
6726 if (had_step_over_info
&& !step_over_info_valid_p ())
6728 struct thread_info
*pending
;
6730 /* If we only have threads with pending statuses, the restart
6731 below won't restart any thread and so nothing re-inserts the
6732 breakpoint we just stepped over. But we need it inserted
6733 when we later process the pending events, otherwise if
6734 another thread has a pending event for this breakpoint too,
6735 we'd discard its event (because the breakpoint that
6736 originally caused the event was no longer inserted). */
6737 context_switch (ecs
);
6738 insert_breakpoints ();
6740 restart_threads (ecs
->event_thread
);
6742 /* If we have events pending, go through handle_inferior_event
6743 again, picking up a pending event at random. This avoids
6744 thread starvation. */
6746 /* But not if we just stepped over a watchpoint in order to let
6747 the instruction execute so we can evaluate its expression.
6748 The set of watchpoints that triggered is recorded in the
6749 breakpoint objects themselves (see bp->watchpoint_triggered).
6750 If we processed another event first, that other event could
6751 clobber this info. */
6752 if (ecs
->event_thread
->stepping_over_watchpoint
)
6755 /* The code below is meant to avoid one thread hogging the event
6756 loop by doing constant in-line step overs. If the stepping
6757 thread exited, there's no risk for this to happen, so we can
6758 safely let our caller process the event immediately. */
6759 if (ecs
->ws
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
6762 pending
= iterate_over_threads (resumed_thread_with_pending_status
,
6764 if (pending
!= nullptr)
6766 struct thread_info
*tp
= ecs
->event_thread
;
6767 struct regcache
*regcache
;
6769 infrun_debug_printf ("found resumed threads with "
6770 "pending events, saving status");
6772 gdb_assert (pending
!= tp
);
6774 /* Record the event thread's event for later. */
6775 save_waitstatus (tp
, ecs
->ws
);
6776 /* This was cleared early, by handle_inferior_event. Set it
6777 so this pending event is considered by
6779 tp
->set_resumed (true);
6781 gdb_assert (!tp
->executing ());
6783 regcache
= get_thread_regcache (tp
);
6784 tp
->set_stop_pc (regcache_read_pc (regcache
));
6786 infrun_debug_printf ("saved stop_pc=%s for %s "
6787 "(currently_stepping=%d)",
6788 paddress (current_inferior ()->arch (),
6790 tp
->ptid
.to_string ().c_str (),
6791 currently_stepping (tp
));
6793 /* This in-line step-over finished; clear this so we won't
6794 start a new one. This is what handle_signal_stop would
6795 do, if we returned false. */
6796 tp
->stepping_over_breakpoint
= 0;
6798 /* Wake up the event loop again. */
6799 mark_async_event_handler (infrun_async_inferior_event_token
);
6801 prepare_to_wait (ecs
);
6812 notify_signal_received (gdb_signal sig
)
6814 interps_notify_signal_received (sig
);
6815 gdb::observers::signal_received
.notify (sig
);
6821 notify_normal_stop (bpstat
*bs
, int print_frame
)
6823 interps_notify_normal_stop (bs
, print_frame
);
6824 gdb::observers::normal_stop
.notify (bs
, print_frame
);
6829 void notify_user_selected_context_changed (user_selected_what selection
)
6831 interps_notify_user_selected_context_changed (selection
);
6832 gdb::observers::user_selected_context_changed
.notify (selection
);
6835 /* Come here when the program has stopped with a signal. */
6838 handle_signal_stop (struct execution_control_state
*ecs
)
6840 frame_info_ptr frame
;
6841 struct gdbarch
*gdbarch
;
6842 int stopped_by_watchpoint
;
6843 enum stop_kind stop_soon
;
6846 gdb_assert (ecs
->ws
.kind () == TARGET_WAITKIND_STOPPED
);
6848 ecs
->event_thread
->set_stop_signal (ecs
->ws
.sig ());
6850 /* Do we need to clean up the state of a thread that has
6851 completed a displaced single-step? (Doing so usually affects
6852 the PC, so do it here, before we set stop_pc.) */
6853 if (finish_step_over (ecs
))
6856 /* If we either finished a single-step or hit a breakpoint, but
6857 the user wanted this thread to be stopped, pretend we got a
6858 SIG0 (generic unsignaled stop). */
6859 if (ecs
->event_thread
->stop_requested
6860 && ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
6861 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6863 ecs
->event_thread
->set_stop_pc
6864 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
6866 context_switch (ecs
);
6868 if (deprecated_context_hook
)
6869 deprecated_context_hook (ecs
->event_thread
->global_num
);
6873 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
6874 struct gdbarch
*reg_gdbarch
= regcache
->arch ();
6877 ("stop_pc=%s", paddress (reg_gdbarch
, ecs
->event_thread
->stop_pc ()));
6878 if (target_stopped_by_watchpoint ())
6882 infrun_debug_printf ("stopped by watchpoint");
6884 if (target_stopped_data_address (current_inferior ()->top_target (),
6886 infrun_debug_printf ("stopped data address=%s",
6887 paddress (reg_gdbarch
, addr
));
6889 infrun_debug_printf ("(no data address available)");
6893 /* This is originated from start_remote(), start_inferior() and
6894 shared libraries hook functions. */
6895 stop_soon
= get_inferior_stop_soon (ecs
);
6896 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== STOP_QUIETLY_REMOTE
)
6898 infrun_debug_printf ("quietly stopped");
6899 stop_print_frame
= true;
6904 /* This originates from attach_command(). We need to overwrite
6905 the stop_signal here, because some kernels don't ignore a
6906 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6907 See more comments in inferior.h. On the other hand, if we
6908 get a non-SIGSTOP, report it to the user - assume the backend
6909 will handle the SIGSTOP if it should show up later.
6911 Also consider that the attach is complete when we see a
6912 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6913 target extended-remote report it instead of a SIGSTOP
6914 (e.g. gdbserver). We already rely on SIGTRAP being our
6915 signal, so this is no exception.
6917 Also consider that the attach is complete when we see a
6918 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6919 the target to stop all threads of the inferior, in case the
6920 low level attach operation doesn't stop them implicitly. If
6921 they weren't stopped implicitly, then the stub will report a
6922 GDB_SIGNAL_0, meaning: stopped for no particular reason
6923 other than GDB's request. */
6924 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
6925 && (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_STOP
6926 || ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6927 || ecs
->event_thread
->stop_signal () == GDB_SIGNAL_0
))
6929 stop_print_frame
= true;
6931 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6935 /* At this point, get hold of the now-current thread's frame. */
6936 frame
= get_current_frame ();
6937 gdbarch
= get_frame_arch (frame
);
6939 /* Pull the single step breakpoints out of the target. */
6940 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
6942 struct regcache
*regcache
;
6945 regcache
= get_thread_regcache (ecs
->event_thread
);
6946 const address_space
*aspace
= ecs
->event_thread
->inf
->aspace
.get ();
6948 pc
= regcache_read_pc (regcache
);
6950 /* However, before doing so, if this single-step breakpoint was
6951 actually for another thread, set this thread up for moving
6953 if (!thread_has_single_step_breakpoint_here (ecs
->event_thread
,
6956 if (single_step_breakpoint_inserted_here_p (aspace
, pc
))
6958 infrun_debug_printf ("[%s] hit another thread's single-step "
6960 ecs
->ptid
.to_string ().c_str ());
6961 ecs
->hit_singlestep_breakpoint
= 1;
6966 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6967 ecs
->ptid
.to_string ().c_str ());
6970 delete_just_stopped_threads_single_step_breakpoints ();
6972 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6973 && ecs
->event_thread
->control
.trap_expected
6974 && ecs
->event_thread
->stepping_over_watchpoint
)
6975 stopped_by_watchpoint
= 0;
6977 stopped_by_watchpoint
= watchpoints_triggered (ecs
->ws
);
6979 /* If necessary, step over this watchpoint. We'll be back to display
6981 if (stopped_by_watchpoint
6982 && (target_have_steppable_watchpoint ()
6983 || gdbarch_have_nonsteppable_watchpoint (gdbarch
)))
6985 /* At this point, we are stopped at an instruction which has
6986 attempted to write to a piece of memory under control of
6987 a watchpoint. The instruction hasn't actually executed
6988 yet. If we were to evaluate the watchpoint expression
6989 now, we would get the old value, and therefore no change
6990 would seem to have occurred.
6992 In order to make watchpoints work `right', we really need
6993 to complete the memory write, and then evaluate the
6994 watchpoint expression. We do this by single-stepping the
6997 It may not be necessary to disable the watchpoint to step over
6998 it. For example, the PA can (with some kernel cooperation)
6999 single step over a watchpoint without disabling the watchpoint.
7001 It is far more common to need to disable a watchpoint to step
7002 the inferior over it. If we have non-steppable watchpoints,
7003 we must disable the current watchpoint; it's simplest to
7004 disable all watchpoints.
7006 Any breakpoint at PC must also be stepped over -- if there's
7007 one, it will have already triggered before the watchpoint
7008 triggered, and we either already reported it to the user, or
7009 it didn't cause a stop and we called keep_going. In either
7010 case, if there was a breakpoint at PC, we must be trying to
7012 ecs
->event_thread
->stepping_over_watchpoint
= 1;
7017 ecs
->event_thread
->stepping_over_breakpoint
= 0;
7018 ecs
->event_thread
->stepping_over_watchpoint
= 0;
7019 bpstat_clear (&ecs
->event_thread
->control
.stop_bpstat
);
7020 ecs
->event_thread
->control
.stop_step
= 0;
7021 stop_print_frame
= true;
7022 stopped_by_random_signal
= 0;
7023 bpstat
*stop_chain
= nullptr;
7025 /* Hide inlined functions starting here, unless we just performed stepi or
7026 nexti. After stepi and nexti, always show the innermost frame (not any
7027 inline function call sites). */
7028 if (ecs
->event_thread
->control
.step_range_end
!= 1)
7030 const address_space
*aspace
= ecs
->event_thread
->inf
->aspace
.get ();
7032 /* skip_inline_frames is expensive, so we avoid it if we can
7033 determine that the address is one where functions cannot have
7034 been inlined. This improves performance with inferiors that
7035 load a lot of shared libraries, because the solib event
7036 breakpoint is defined as the address of a function (i.e. not
7037 inline). Note that we have to check the previous PC as well
7038 as the current one to catch cases when we have just
7039 single-stepped off a breakpoint prior to reinstating it.
7040 Note that we're assuming that the code we single-step to is
7041 not inline, but that's not definitive: there's nothing
7042 preventing the event breakpoint function from containing
7043 inlined code, and the single-step ending up there. If the
7044 user had set a breakpoint on that inlined code, the missing
7045 skip_inline_frames call would break things. Fortunately
7046 that's an extremely unlikely scenario. */
7047 if (!pc_at_non_inline_function (aspace
,
7048 ecs
->event_thread
->stop_pc (),
7050 && !(ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
7051 && ecs
->event_thread
->control
.trap_expected
7052 && pc_at_non_inline_function (aspace
,
7053 ecs
->event_thread
->prev_pc
,
7056 stop_chain
= build_bpstat_chain (aspace
,
7057 ecs
->event_thread
->stop_pc (),
7059 skip_inline_frames (ecs
->event_thread
, stop_chain
);
7063 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
7064 && ecs
->event_thread
->control
.trap_expected
7065 && gdbarch_single_step_through_delay_p (gdbarch
)
7066 && currently_stepping (ecs
->event_thread
))
7068 /* We're trying to step off a breakpoint. Turns out that we're
7069 also on an instruction that needs to be stepped multiple
7070 times before it's been fully executing. E.g., architectures
7071 with a delay slot. It needs to be stepped twice, once for
7072 the instruction and once for the delay slot. */
7073 int step_through_delay
7074 = gdbarch_single_step_through_delay (gdbarch
, frame
);
7076 if (step_through_delay
)
7077 infrun_debug_printf ("step through delay");
7079 if (ecs
->event_thread
->control
.step_range_end
== 0
7080 && step_through_delay
)
7082 /* The user issued a continue when stopped at a breakpoint.
7083 Set up for another trap and get out of here. */
7084 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7088 else if (step_through_delay
)
7090 /* The user issued a step when stopped at a breakpoint.
7091 Maybe we should stop, maybe we should not - the delay
7092 slot *might* correspond to a line of source. In any
7093 case, don't decide that here, just set
7094 ecs->stepping_over_breakpoint, making sure we
7095 single-step again before breakpoints are re-inserted. */
7096 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7100 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
7101 handles this event. */
7102 ecs
->event_thread
->control
.stop_bpstat
7103 = bpstat_stop_status (ecs
->event_thread
->inf
->aspace
.get (),
7104 ecs
->event_thread
->stop_pc (),
7105 ecs
->event_thread
, ecs
->ws
, stop_chain
);
7107 /* Following in case break condition called a
7109 stop_print_frame
= true;
7111 /* This is where we handle "moribund" watchpoints. Unlike
7112 software breakpoints traps, hardware watchpoint traps are
7113 always distinguishable from random traps. If no high-level
7114 watchpoint is associated with the reported stop data address
7115 anymore, then the bpstat does not explain the signal ---
7116 simply make sure to ignore it if `stopped_by_watchpoint' is
7119 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
7120 && !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
7122 && stopped_by_watchpoint
)
7124 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
7128 /* NOTE: cagney/2003-03-29: These checks for a random signal
7129 at one stage in the past included checks for an inferior
7130 function call's call dummy's return breakpoint. The original
7131 comment, that went with the test, read:
7133 ``End of a stack dummy. Some systems (e.g. Sony news) give
7134 another signal besides SIGTRAP, so check here as well as
7137 If someone ever tries to get call dummys on a
7138 non-executable stack to work (where the target would stop
7139 with something like a SIGSEGV), then those tests might need
7140 to be re-instated. Given, however, that the tests were only
7141 enabled when momentary breakpoints were not being used, I
7142 suspect that it won't be the case.
7144 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
7145 be necessary for call dummies on a non-executable stack on
7148 /* See if the breakpoints module can explain the signal. */
7150 = !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
7151 ecs
->event_thread
->stop_signal ());
7153 /* Maybe this was a trap for a software breakpoint that has since
7155 if (random_signal
&& target_stopped_by_sw_breakpoint ())
7157 if (gdbarch_program_breakpoint_here_p (gdbarch
,
7158 ecs
->event_thread
->stop_pc ()))
7160 struct regcache
*regcache
;
7163 /* Re-adjust PC to what the program would see if GDB was not
7165 regcache
= get_thread_regcache (ecs
->event_thread
);
7166 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
7169 std::optional
<scoped_restore_tmpl
<int>>
7170 restore_operation_disable
;
7172 if (record_full_is_used ())
7173 restore_operation_disable
.emplace
7174 (record_full_gdb_operation_disable_set ());
7176 regcache_write_pc (regcache
,
7177 ecs
->event_thread
->stop_pc () + decr_pc
);
7182 /* A delayed software breakpoint event. Ignore the trap. */
7183 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
7188 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
7189 has since been removed. */
7190 if (random_signal
&& target_stopped_by_hw_breakpoint ())
7192 /* A delayed hardware breakpoint event. Ignore the trap. */
7193 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
7198 /* If not, perhaps stepping/nexting can. */
7200 random_signal
= !(ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
7201 && currently_stepping (ecs
->event_thread
));
7203 /* Perhaps the thread hit a single-step breakpoint of _another_
7204 thread. Single-step breakpoints are transparent to the
7205 breakpoints module. */
7207 random_signal
= !ecs
->hit_singlestep_breakpoint
;
7209 /* No? Perhaps we got a moribund watchpoint. */
7211 random_signal
= !stopped_by_watchpoint
;
7213 /* Always stop if the user explicitly requested this thread to
7215 if (ecs
->event_thread
->stop_requested
)
7218 infrun_debug_printf ("user-requested stop");
7221 /* For the program's own signals, act according to
7222 the signal handling tables. */
7226 /* Signal not for debugging purposes. */
7227 enum gdb_signal stop_signal
= ecs
->event_thread
->stop_signal ();
7229 infrun_debug_printf ("random signal (%s)",
7230 gdb_signal_to_symbol_string (stop_signal
));
7232 stopped_by_random_signal
= 1;
7234 /* Always stop on signals if we're either just gaining control
7235 of the program, or the user explicitly requested this thread
7236 to remain stopped. */
7237 if (stop_soon
!= NO_STOP_QUIETLY
7238 || ecs
->event_thread
->stop_requested
7239 || signal_stop_state (ecs
->event_thread
->stop_signal ()))
7245 /* Notify observers the signal has "handle print" set. Note we
7246 returned early above if stopping; normal_stop handles the
7247 printing in that case. */
7248 if (signal_print
[ecs
->event_thread
->stop_signal ()])
7250 /* The signal table tells us to print about this signal. */
7251 target_terminal::ours_for_output ();
7252 notify_signal_received (ecs
->event_thread
->stop_signal ());
7253 target_terminal::inferior ();
7256 /* Clear the signal if it should not be passed. */
7257 if (signal_program
[ecs
->event_thread
->stop_signal ()] == 0)
7258 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
7260 if (ecs
->event_thread
->prev_pc
== ecs
->event_thread
->stop_pc ()
7261 && ecs
->event_thread
->control
.trap_expected
7262 && ecs
->event_thread
->control
.step_resume_breakpoint
== nullptr)
7264 /* We were just starting a new sequence, attempting to
7265 single-step off of a breakpoint and expecting a SIGTRAP.
7266 Instead this signal arrives. This signal will take us out
7267 of the stepping range so GDB needs to remember to, when
7268 the signal handler returns, resume stepping off that
7270 /* To simplify things, "continue" is forced to use the same
7271 code paths as single-step - set a breakpoint at the
7272 signal return address and then, once hit, step off that
7274 infrun_debug_printf ("signal arrived while stepping over breakpoint");
7276 insert_hp_step_resume_breakpoint_at_frame (frame
);
7277 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
7278 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7279 ecs
->event_thread
->control
.trap_expected
= 0;
7281 /* If we were nexting/stepping some other thread, switch to
7282 it, so that we don't continue it, losing control. */
7283 if (!switch_back_to_stepped_thread (ecs
))
7288 if (ecs
->event_thread
->stop_signal () != GDB_SIGNAL_0
7289 && (pc_in_thread_step_range (ecs
->event_thread
->stop_pc (),
7291 || ecs
->event_thread
->control
.step_range_end
== 1)
7292 && (get_stack_frame_id (frame
)
7293 == ecs
->event_thread
->control
.step_stack_frame_id
)
7294 && ecs
->event_thread
->control
.step_resume_breakpoint
== nullptr)
7296 /* The inferior is about to take a signal that will take it
7297 out of the single step range. Set a breakpoint at the
7298 current PC (which is presumably where the signal handler
7299 will eventually return) and then allow the inferior to
7302 Note that this is only needed for a signal delivered
7303 while in the single-step range. Nested signals aren't a
7304 problem as they eventually all return. */
7305 infrun_debug_printf ("signal may take us out of single-step range");
7307 clear_step_over_info ();
7308 insert_hp_step_resume_breakpoint_at_frame (frame
);
7309 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
7310 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7311 ecs
->event_thread
->control
.trap_expected
= 0;
7316 /* Note: step_resume_breakpoint may be non-NULL. This occurs
7317 when either there's a nested signal, or when there's a
7318 pending signal enabled just as the signal handler returns
7319 (leaving the inferior at the step-resume-breakpoint without
7320 actually executing it). Either way continue until the
7321 breakpoint is really hit. */
7323 if (!switch_back_to_stepped_thread (ecs
))
7325 infrun_debug_printf ("random signal, keep going");
7332 process_event_stop_test (ecs
);
7335 /* Return the address for the beginning of the line. */
7338 update_line_range_start (CORE_ADDR pc
, struct execution_control_state
*ecs
)
7340 /* The line table may have multiple entries for the same source code line.
7341 Given the PC, check the line table and return the PC that corresponds
7342 to the line table entry for the source line that PC is in. */
7343 CORE_ADDR start_line_pc
= ecs
->event_thread
->control
.step_range_start
;
7344 std::optional
<CORE_ADDR
> real_range_start
;
7346 /* Call find_line_range_start to get the smallest address in the
7347 linetable for multiple Line X entries in the line table. */
7348 real_range_start
= find_line_range_start (pc
);
7350 if (real_range_start
.has_value ())
7351 start_line_pc
= *real_range_start
;
7353 return start_line_pc
;
7358 /* Helper class for process_event_stop_test implementing lazy evaluation. */
7359 template<typename T
>
7362 using fetcher_t
= std::function
<T ()>;
7365 explicit lazy_loader (fetcher_t
&&f
) : m_loader (std::move (f
))
7370 if (!m_value
.has_value ())
7371 m_value
.emplace (m_loader ());
7372 return m_value
.value ();
7381 std::optional
<T
> m_value
;
7387 /* Come here when we've got some debug event / signal we can explain
7388 (IOW, not a random signal), and test whether it should cause a
7389 stop, or whether we should resume the inferior (transparently).
7390 E.g., could be a breakpoint whose condition evaluates false; we
7391 could be still stepping within the line; etc. */
7394 process_event_stop_test (struct execution_control_state
*ecs
)
7396 struct symtab_and_line stop_pc_sal
;
7397 frame_info_ptr frame
;
7398 struct gdbarch
*gdbarch
;
7399 CORE_ADDR jmp_buf_pc
;
7400 struct bpstat_what what
;
7402 /* Handle cases caused by hitting a breakpoint. */
7404 frame
= get_current_frame ();
7405 gdbarch
= get_frame_arch (frame
);
7407 what
= bpstat_what (ecs
->event_thread
->control
.stop_bpstat
);
7409 if (what
.call_dummy
)
7411 stop_stack_dummy
= what
.call_dummy
;
7414 /* A few breakpoint types have callbacks associated (e.g.,
7415 bp_jit_event). Run them now. */
7416 bpstat_run_callbacks (ecs
->event_thread
->control
.stop_bpstat
);
7418 /* Shorthand to make if statements smaller. */
7419 struct frame_id original_frame_id
7420 = ecs
->event_thread
->control
.step_frame_id
;
7421 lazy_loader
<frame_id
> curr_frame_id
7422 ([] () { return get_frame_id (get_current_frame ()); });
7424 switch (what
.main_action
)
7426 case BPSTAT_WHAT_SET_LONGJMP_RESUME
:
7427 /* If we hit the breakpoint at longjmp while stepping, we
7428 install a momentary breakpoint at the target of the
7431 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
7433 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7435 if (what
.is_longjmp
)
7437 struct value
*arg_value
;
7439 /* If we set the longjmp breakpoint via a SystemTap probe,
7440 then use it to extract the arguments. The destination PC
7441 is the third argument to the probe. */
7442 arg_value
= probe_safe_evaluate_at_pc (frame
, 2);
7445 jmp_buf_pc
= value_as_address (arg_value
);
7446 jmp_buf_pc
= gdbarch_addr_bits_remove (gdbarch
, jmp_buf_pc
);
7448 else if (!gdbarch_get_longjmp_target_p (gdbarch
)
7449 || !gdbarch_get_longjmp_target (gdbarch
,
7450 frame
, &jmp_buf_pc
))
7452 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
7453 "(!gdbarch_get_longjmp_target)");
7458 /* Insert a breakpoint at resume address. */
7459 insert_longjmp_resume_breakpoint (gdbarch
, jmp_buf_pc
);
7462 check_exception_resume (ecs
, frame
);
7466 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME
:
7468 frame_info_ptr init_frame
;
7470 /* There are several cases to consider.
7472 1. The initiating frame no longer exists. In this case we
7473 must stop, because the exception or longjmp has gone too
7476 2. The initiating frame exists, and is the same as the
7477 current frame. We stop, because the exception or longjmp
7480 3. The initiating frame exists and is different from the
7481 current frame. This means the exception or longjmp has
7482 been caught beneath the initiating frame, so keep going.
7484 4. longjmp breakpoint has been placed just to protect
7485 against stale dummy frames and user is not interested in
7486 stopping around longjmps. */
7488 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
7490 gdb_assert (ecs
->event_thread
->control
.exception_resume_breakpoint
7492 delete_exception_resume_breakpoint (ecs
->event_thread
);
7494 if (what
.is_longjmp
)
7496 check_longjmp_breakpoint_for_call_dummy (ecs
->event_thread
);
7498 if (!frame_id_p (ecs
->event_thread
->initiating_frame
))
7506 init_frame
= frame_find_by_id (ecs
->event_thread
->initiating_frame
);
7510 if (*curr_frame_id
== ecs
->event_thread
->initiating_frame
)
7512 /* Case 2. Fall through. */
7522 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
7524 delete_step_resume_breakpoint (ecs
->event_thread
);
7526 end_stepping_range (ecs
);
7530 case BPSTAT_WHAT_SINGLE
:
7531 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
7532 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7533 /* Still need to check other stuff, at least the case where we
7534 are stepping and step out of the right range. */
7537 case BPSTAT_WHAT_STEP_RESUME
:
7538 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
7540 delete_step_resume_breakpoint (ecs
->event_thread
);
7541 if (ecs
->event_thread
->control
.proceed_to_finish
7542 && execution_direction
== EXEC_REVERSE
)
7544 struct thread_info
*tp
= ecs
->event_thread
;
7546 /* We are finishing a function in reverse, and just hit the
7547 step-resume breakpoint at the start address of the
7548 function, and we're almost there -- just need to back up
7549 by one more single-step, which should take us back to the
7551 tp
->control
.step_range_start
= tp
->control
.step_range_end
= 1;
7555 fill_in_stop_func (gdbarch
, ecs
);
7556 if (ecs
->event_thread
->stop_pc () == ecs
->stop_func_start
7557 && execution_direction
== EXEC_REVERSE
)
7559 /* We are stepping over a function call in reverse, and just
7560 hit the step-resume breakpoint at the start address of
7561 the function. Go back to single-stepping, which should
7562 take us back to the function call. */
7563 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7569 case BPSTAT_WHAT_STOP_NOISY
:
7570 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
7571 stop_print_frame
= true;
7573 /* Assume the thread stopped for a breakpoint. We'll still check
7574 whether a/the breakpoint is there when the thread is next
7576 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7581 case BPSTAT_WHAT_STOP_SILENT
:
7582 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
7583 stop_print_frame
= false;
7585 /* Assume the thread stopped for a breakpoint. We'll still check
7586 whether a/the breakpoint is there when the thread is next
7588 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7592 case BPSTAT_WHAT_HP_STEP_RESUME
:
7593 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
7595 delete_step_resume_breakpoint (ecs
->event_thread
);
7596 if (ecs
->event_thread
->step_after_step_resume_breakpoint
)
7598 /* Back when the step-resume breakpoint was inserted, we
7599 were trying to single-step off a breakpoint. Go back to
7601 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
7602 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7608 case BPSTAT_WHAT_KEEP_CHECKING
:
7612 /* If we stepped a permanent breakpoint and we had a high priority
7613 step-resume breakpoint for the address we stepped, but we didn't
7614 hit it, then we must have stepped into the signal handler. The
7615 step-resume was only necessary to catch the case of _not_
7616 stepping into the handler, so delete it, and fall through to
7617 checking whether the step finished. */
7618 if (ecs
->event_thread
->stepped_breakpoint
)
7620 struct breakpoint
*sr_bp
7621 = ecs
->event_thread
->control
.step_resume_breakpoint
;
7623 if (sr_bp
!= nullptr
7624 && sr_bp
->first_loc ().permanent
7625 && sr_bp
->type
== bp_hp_step_resume
7626 && sr_bp
->first_loc ().address
== ecs
->event_thread
->prev_pc
)
7628 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
7629 delete_step_resume_breakpoint (ecs
->event_thread
);
7630 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
7634 /* We come here if we hit a breakpoint but should not stop for it.
7635 Possibly we also were stepping and should stop for that. So fall
7636 through and test for stepping. But, if not stepping, do not
7639 /* In all-stop mode, if we're currently stepping but have stopped in
7640 some other thread, we need to switch back to the stepped thread. */
7641 if (switch_back_to_stepped_thread (ecs
))
7644 if (ecs
->event_thread
->control
.step_resume_breakpoint
)
7646 infrun_debug_printf ("step-resume breakpoint is inserted");
7648 /* Having a step-resume breakpoint overrides anything
7649 else having to do with stepping commands until
7650 that breakpoint is reached. */
7655 if (ecs
->event_thread
->control
.step_range_end
== 0)
7657 infrun_debug_printf ("no stepping, continue");
7658 /* Likewise if we aren't even stepping. */
7663 fill_in_stop_func (gdbarch
, ecs
);
7665 /* If stepping through a line, keep going if still within it.
7667 Note that step_range_end is the address of the first instruction
7668 beyond the step range, and NOT the address of the last instruction
7671 Note also that during reverse execution, we may be stepping
7672 through a function epilogue and therefore must detect when
7673 the current-frame changes in the middle of a line. */
7675 if (pc_in_thread_step_range (ecs
->event_thread
->stop_pc (),
7677 && (execution_direction
!= EXEC_REVERSE
7678 || *curr_frame_id
== original_frame_id
))
7681 ("stepping inside range [%s-%s]",
7682 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_start
),
7683 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_end
));
7685 /* Tentatively re-enable range stepping; `resume' disables it if
7686 necessary (e.g., if we're stepping over a breakpoint or we
7687 have software watchpoints). */
7688 ecs
->event_thread
->control
.may_range_step
= 1;
7690 /* When stepping backward, stop at beginning of line range
7691 (unless it's the function entry point, in which case
7692 keep going back to the call point). */
7693 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7694 if (stop_pc
== ecs
->event_thread
->control
.step_range_start
7695 && stop_pc
!= ecs
->stop_func_start
7696 && execution_direction
== EXEC_REVERSE
)
7697 end_stepping_range (ecs
);
7704 /* We stepped out of the stepping range. */
7706 /* If we are stepping at the source level and entered the runtime
7707 loader dynamic symbol resolution code...
7709 EXEC_FORWARD: we keep on single stepping until we exit the run
7710 time loader code and reach the callee's address.
7712 EXEC_REVERSE: we've already executed the callee (backward), and
7713 the runtime loader code is handled just like any other
7714 undebuggable function call. Now we need only keep stepping
7715 backward through the trampoline code, and that's handled further
7716 down, so there is nothing for us to do here. */
7718 if (execution_direction
!= EXEC_REVERSE
7719 && ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7720 && in_solib_dynsym_resolve_code (ecs
->event_thread
->stop_pc ())
7721 && (ecs
->event_thread
->control
.step_start_function
== nullptr
7722 || !in_solib_dynsym_resolve_code (
7723 ecs
->event_thread
->control
.step_start_function
->value_block ()
7726 CORE_ADDR pc_after_resolver
=
7727 gdbarch_skip_solib_resolver (gdbarch
, ecs
->event_thread
->stop_pc ());
7729 infrun_debug_printf ("stepped into dynsym resolve code");
7731 if (pc_after_resolver
)
7733 /* Set up a step-resume breakpoint at the address
7734 indicated by SKIP_SOLIB_RESOLVER. */
7735 symtab_and_line sr_sal
;
7736 sr_sal
.pc
= pc_after_resolver
;
7737 sr_sal
.pspace
= get_frame_program_space (frame
);
7739 insert_step_resume_breakpoint_at_sal (gdbarch
,
7740 sr_sal
, null_frame_id
);
7747 /* Step through an indirect branch thunk. */
7748 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
7749 && gdbarch_in_indirect_branch_thunk (gdbarch
,
7750 ecs
->event_thread
->stop_pc ()))
7752 infrun_debug_printf ("stepped into indirect branch thunk");
7757 if (ecs
->event_thread
->control
.step_range_end
!= 1
7758 && (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7759 || ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
7760 && get_frame_type (frame
) == SIGTRAMP_FRAME
)
7762 infrun_debug_printf ("stepped into signal trampoline");
7763 /* The inferior, while doing a "step" or "next", has ended up in
7764 a signal trampoline (either by a signal being delivered or by
7765 the signal handler returning). Just single-step until the
7766 inferior leaves the trampoline (either by calling the handler
7772 /* If we're in the return path from a shared library trampoline,
7773 we want to proceed through the trampoline when stepping. */
7774 /* macro/2012-04-25: This needs to come before the subroutine
7775 call check below as on some targets return trampolines look
7776 like subroutine calls (MIPS16 return thunks). */
7777 if (gdbarch_in_solib_return_trampoline (gdbarch
,
7778 ecs
->event_thread
->stop_pc (),
7779 ecs
->stop_func_name
)
7780 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
7782 /* Determine where this trampoline returns. */
7783 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7784 CORE_ADDR real_stop_pc
7785 = gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
7787 infrun_debug_printf ("stepped into solib return tramp");
7789 /* Only proceed through if we know where it's going. */
7792 /* And put the step-breakpoint there and go until there. */
7793 symtab_and_line sr_sal
;
7794 sr_sal
.pc
= real_stop_pc
;
7795 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
7796 sr_sal
.pspace
= get_frame_program_space (frame
);
7798 /* Do not specify what the fp should be when we stop since
7799 on some machines the prologue is where the new fp value
7801 insert_step_resume_breakpoint_at_sal (gdbarch
,
7802 sr_sal
, null_frame_id
);
7804 /* Restart without fiddling with the step ranges or
7811 /* Check for subroutine calls. The check for the current frame
7812 equalling the step ID is not necessary - the check of the
7813 previous frame's ID is sufficient - but it is a common case and
7814 cheaper than checking the previous frame's ID.
7816 NOTE: frame_id::operator== will never report two invalid frame IDs as
7817 being equal, so to get into this block, both the current and
7818 previous frame must have valid frame IDs. */
7819 /* The outer_frame_id check is a heuristic to detect stepping
7820 through startup code. If we step over an instruction which
7821 sets the stack pointer from an invalid value to a valid value,
7822 we may detect that as a subroutine call from the mythical
7823 "outermost" function. This could be fixed by marking
7824 outermost frames as !stack_p,code_p,special_p. Then the
7825 initial outermost frame, before sp was valid, would
7826 have code_addr == &_start. See the comment in frame_id::operator==
7829 /* We want "nexti" to step into, not over, signal handlers invoked
7830 by the kernel, therefore this subroutine check should not trigger
7831 for a signal handler invocation. On most platforms, this is already
7832 not the case, as the kernel puts a signal trampoline frame onto the
7833 stack to handle proper return after the handler, and therefore at this
7834 point, the current frame is a grandchild of the step frame, not a
7835 child. However, on some platforms, the kernel actually uses a
7836 trampoline to handle *invocation* of the handler. In that case,
7837 when executing the first instruction of the trampoline, this check
7838 would erroneously detect the trampoline invocation as a subroutine
7839 call. Fix this by checking for SIGTRAMP_FRAME. */
7840 if ((get_stack_frame_id (frame
)
7841 != ecs
->event_thread
->control
.step_stack_frame_id
)
7842 && get_frame_type (frame
) != SIGTRAMP_FRAME
7843 && ((frame_unwind_caller_id (frame
)
7844 == ecs
->event_thread
->control
.step_stack_frame_id
)
7845 && ((ecs
->event_thread
->control
.step_stack_frame_id
7847 || (ecs
->event_thread
->control
.step_start_function
7848 != find_pc_function (ecs
->event_thread
->stop_pc ())))))
7850 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7851 CORE_ADDR real_stop_pc
;
7853 infrun_debug_printf ("stepped into subroutine");
7855 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_NONE
)
7857 /* I presume that step_over_calls is only 0 when we're
7858 supposed to be stepping at the assembly language level
7859 ("stepi"). Just stop. */
7860 /* And this works the same backward as frontward. MVS */
7861 end_stepping_range (ecs
);
7865 /* Reverse stepping through solib trampolines. */
7867 if (execution_direction
== EXEC_REVERSE
7868 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
7869 && (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
7870 || (ecs
->stop_func_start
== 0
7871 && in_solib_dynsym_resolve_code (stop_pc
))))
7873 /* Any solib trampoline code can be handled in reverse
7874 by simply continuing to single-step. We have already
7875 executed the solib function (backwards), and a few
7876 steps will take us back through the trampoline to the
7882 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
7884 /* We're doing a "next".
7886 Normal (forward) execution: set a breakpoint at the
7887 callee's return address (the address at which the caller
7890 Reverse (backward) execution. set the step-resume
7891 breakpoint at the start of the function that we just
7892 stepped into (backwards), and continue to there. When we
7893 get there, we'll need to single-step back to the caller. */
7895 if (execution_direction
== EXEC_REVERSE
)
7897 /* If we're already at the start of the function, we've either
7898 just stepped backward into a single instruction function,
7899 or stepped back out of a signal handler to the first instruction
7900 of the function. Just keep going, which will single-step back
7902 if (ecs
->stop_func_start
!= stop_pc
&& ecs
->stop_func_start
!= 0)
7904 /* Normal function call return (static or dynamic). */
7905 symtab_and_line sr_sal
;
7906 sr_sal
.pc
= ecs
->stop_func_start
;
7907 sr_sal
.pspace
= get_frame_program_space (frame
);
7908 insert_step_resume_breakpoint_at_sal (gdbarch
,
7909 sr_sal
, get_stack_frame_id (frame
));
7913 insert_step_resume_breakpoint_at_caller (frame
);
7919 /* If we are in a function call trampoline (a stub between the
7920 calling routine and the real function), locate the real
7921 function. That's what tells us (a) whether we want to step
7922 into it at all, and (b) what prologue we want to run to the
7923 end of, if we do step into it. */
7924 real_stop_pc
= skip_language_trampoline (frame
, stop_pc
);
7925 if (real_stop_pc
== 0)
7926 real_stop_pc
= gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
7927 if (real_stop_pc
!= 0)
7928 ecs
->stop_func_start
= real_stop_pc
;
7930 if (real_stop_pc
!= 0 && in_solib_dynsym_resolve_code (real_stop_pc
))
7932 symtab_and_line sr_sal
;
7933 sr_sal
.pc
= ecs
->stop_func_start
;
7934 sr_sal
.pspace
= get_frame_program_space (frame
);
7936 insert_step_resume_breakpoint_at_sal (gdbarch
,
7937 sr_sal
, null_frame_id
);
7942 /* If we have line number information for the function we are
7943 thinking of stepping into and the function isn't on the skip
7946 If there are several symtabs at that PC (e.g. with include
7947 files), just want to know whether *any* of them have line
7948 numbers. find_pc_line handles this. */
7950 struct symtab_and_line tmp_sal
;
7952 tmp_sal
= find_pc_line (ecs
->stop_func_start
, 0);
7953 if (tmp_sal
.line
!= 0
7954 && !function_name_is_marked_for_skip (ecs
->stop_func_name
,
7956 && !inline_frame_is_marked_for_skip (true, ecs
->event_thread
))
7958 if (execution_direction
== EXEC_REVERSE
)
7959 handle_step_into_function_backward (gdbarch
, ecs
);
7961 handle_step_into_function (gdbarch
, ecs
);
7966 /* If we have no line number and the step-stop-if-no-debug is
7967 set, we stop the step so that the user has a chance to switch
7968 in assembly mode. */
7969 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7970 && step_stop_if_no_debug
)
7972 end_stepping_range (ecs
);
7976 if (execution_direction
== EXEC_REVERSE
)
7978 /* If we're already at the start of the function, we've either just
7979 stepped backward into a single instruction function without line
7980 number info, or stepped back out of a signal handler to the first
7981 instruction of the function without line number info. Just keep
7982 going, which will single-step back to the caller. */
7983 if (ecs
->stop_func_start
!= stop_pc
)
7985 /* Set a breakpoint at callee's start address.
7986 From there we can step once and be back in the caller. */
7987 symtab_and_line sr_sal
;
7988 sr_sal
.pc
= ecs
->stop_func_start
;
7989 sr_sal
.pspace
= get_frame_program_space (frame
);
7990 insert_step_resume_breakpoint_at_sal (gdbarch
,
7991 sr_sal
, null_frame_id
);
7995 /* Set a breakpoint at callee's return address (the address
7996 at which the caller will resume). */
7997 insert_step_resume_breakpoint_at_caller (frame
);
8003 /* Reverse stepping through solib trampolines. */
8005 if (execution_direction
== EXEC_REVERSE
8006 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
8008 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
8010 if (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
8011 || (ecs
->stop_func_start
== 0
8012 && in_solib_dynsym_resolve_code (stop_pc
)))
8014 /* Any solib trampoline code can be handled in reverse
8015 by simply continuing to single-step. We have already
8016 executed the solib function (backwards), and a few
8017 steps will take us back through the trampoline to the
8022 else if (in_solib_dynsym_resolve_code (stop_pc
))
8024 /* Stepped backward into the solib dynsym resolver.
8025 Set a breakpoint at its start and continue, then
8026 one more step will take us out. */
8027 symtab_and_line sr_sal
;
8028 sr_sal
.pc
= ecs
->stop_func_start
;
8029 sr_sal
.pspace
= get_frame_program_space (frame
);
8030 insert_step_resume_breakpoint_at_sal (gdbarch
,
8031 sr_sal
, null_frame_id
);
8037 /* This always returns the sal for the inner-most frame when we are in a
8038 stack of inlined frames, even if GDB actually believes that it is in a
8039 more outer frame. This is checked for below by calls to
8040 inline_skipped_frames. */
8041 stop_pc_sal
= find_pc_line (ecs
->event_thread
->stop_pc (), 0);
8043 /* NOTE: tausq/2004-05-24: This if block used to be done before all
8044 the trampoline processing logic, however, there are some trampolines
8045 that have no names, so we should do trampoline handling first. */
8046 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
8047 && ecs
->stop_func_name
== nullptr
8048 && stop_pc_sal
.line
== 0)
8050 infrun_debug_printf ("stepped into undebuggable function");
8052 /* The inferior just stepped into, or returned to, an
8053 undebuggable function (where there is no debugging information
8054 and no line number corresponding to the address where the
8055 inferior stopped). Since we want to skip this kind of code,
8056 we keep going until the inferior returns from this
8057 function - unless the user has asked us not to (via
8058 set step-mode) or we no longer know how to get back
8059 to the call site. */
8060 if (step_stop_if_no_debug
8061 || !frame_id_p (frame_unwind_caller_id (frame
)))
8063 /* If we have no line number and the step-stop-if-no-debug
8064 is set, we stop the step so that the user has a chance to
8065 switch in assembly mode. */
8066 end_stepping_range (ecs
);
8071 /* Set a breakpoint at callee's return address (the address
8072 at which the caller will resume). */
8073 insert_step_resume_breakpoint_at_caller (frame
);
8079 if (execution_direction
== EXEC_REVERSE
8080 && ecs
->event_thread
->control
.proceed_to_finish
8081 && ecs
->event_thread
->stop_pc () >= ecs
->stop_func_alt_start
8082 && ecs
->event_thread
->stop_pc () < ecs
->stop_func_start
)
8084 /* We are executing the reverse-finish command.
8085 If the system supports multiple entry points and we are finishing a
8086 function in reverse. If we are between the entry points single-step
8087 back to the alternate entry point. If we are at the alternate entry
8088 point -- just need to back up by one more single-step, which
8089 should take us back to the function call. */
8090 ecs
->event_thread
->control
.step_range_start
8091 = ecs
->event_thread
->control
.step_range_end
= 1;
8097 if (ecs
->event_thread
->control
.step_range_end
== 1)
8099 /* It is stepi or nexti. We always want to stop stepping after
8101 infrun_debug_printf ("stepi/nexti");
8102 end_stepping_range (ecs
);
8106 if (stop_pc_sal
.line
== 0)
8108 /* We have no line number information. That means to stop
8109 stepping (does this always happen right after one instruction,
8110 when we do "s" in a function with no line numbers,
8111 or can this happen as a result of a return or longjmp?). */
8112 infrun_debug_printf ("line number info");
8113 end_stepping_range (ecs
);
8117 /* Look for "calls" to inlined functions, part one. If the inline
8118 frame machinery detected some skipped call sites, we have entered
8119 a new inline function. */
8121 if ((*curr_frame_id
== original_frame_id
)
8122 && inline_skipped_frames (ecs
->event_thread
))
8124 infrun_debug_printf ("stepped into inlined function");
8126 symtab_and_line call_sal
= find_frame_sal (frame
);
8128 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_ALL
)
8130 /* For "step", we're going to stop. But if the call site
8131 for this inlined function is on the same source line as
8132 we were previously stepping, go down into the function
8133 first. Otherwise stop at the call site. */
8135 if (call_sal
.line
== ecs
->event_thread
->current_line
8136 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
8138 step_into_inline_frame (ecs
->event_thread
);
8139 if (inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
8146 end_stepping_range (ecs
);
8151 /* For "next", we should stop at the call site if it is on a
8152 different source line. Otherwise continue through the
8153 inlined function. */
8154 if (call_sal
.line
== ecs
->event_thread
->current_line
8155 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
8158 end_stepping_range (ecs
);
8163 /* Look for "calls" to inlined functions, part two. If we are still
8164 in the same real function we were stepping through, but we have
8165 to go further up to find the exact frame ID, we are stepping
8166 through a more inlined call beyond its call site. */
8168 if (get_frame_type (frame
) == INLINE_FRAME
8169 && (*curr_frame_id
!= original_frame_id
)
8170 && stepped_in_from (frame
, original_frame_id
))
8172 infrun_debug_printf ("stepping through inlined function");
8174 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
8175 || inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
8178 end_stepping_range (ecs
);
8182 bool refresh_step_info
= true;
8183 if ((ecs
->event_thread
->stop_pc () == stop_pc_sal
.pc
)
8184 && (ecs
->event_thread
->current_line
!= stop_pc_sal
.line
8185 || ecs
->event_thread
->current_symtab
!= stop_pc_sal
.symtab
))
8187 /* We are at a different line. */
8189 if (stop_pc_sal
.is_stmt
)
8191 if (execution_direction
== EXEC_REVERSE
)
8193 /* We are stepping backwards make sure we have reached the
8194 beginning of the line. */
8195 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
8196 CORE_ADDR start_line_pc
8197 = update_line_range_start (stop_pc
, ecs
);
8199 if (stop_pc
!= start_line_pc
)
8201 /* Have not reached the beginning of the source code line.
8202 Set a step range. Execution should stop in any function
8203 calls we execute back into before reaching the beginning
8205 ecs
->event_thread
->control
.step_range_start
8207 ecs
->event_thread
->control
.step_range_end
= stop_pc
;
8208 set_step_info (ecs
->event_thread
, frame
, stop_pc_sal
);
8214 /* We are at the start of a statement.
8216 So stop. Note that we don't stop if we step into the middle of a
8217 statement. That is said to make things like for (;;) statements
8219 infrun_debug_printf ("stepped to a different line");
8220 end_stepping_range (ecs
);
8223 else if (*curr_frame_id
== original_frame_id
)
8225 /* We are not at the start of a statement, and we have not changed
8228 We ignore this line table entry, and continue stepping forward,
8229 looking for a better place to stop. */
8230 refresh_step_info
= false;
8231 infrun_debug_printf ("stepped to a different line, but "
8232 "it's not the start of a statement");
8236 /* We are not the start of a statement, and we have changed frame.
8238 We ignore this line table entry, and continue stepping forward,
8239 looking for a better place to stop. Keep refresh_step_info at
8240 true to note that the frame has changed, but ignore the line
8241 number to make sure we don't ignore a subsequent entry with the
8242 same line number. */
8243 stop_pc_sal
.line
= 0;
8244 infrun_debug_printf ("stepped to a different frame, but "
8245 "it's not the start of a statement");
8248 else if (execution_direction
== EXEC_REVERSE
8249 && *curr_frame_id
!= original_frame_id
8250 && original_frame_id
.code_addr_p
&& curr_frame_id
->code_addr_p
8251 && original_frame_id
.code_addr
== curr_frame_id
->code_addr
)
8253 /* If we enter here, we're leaving a recursive function call. In this
8254 situation, we shouldn't refresh the step information, because if we
8255 do, we'll lose the frame_id of when we started stepping, and this
8256 will make GDB not know we need to print frame information. */
8257 refresh_step_info
= false;
8258 infrun_debug_printf ("reverse stepping, left a recursive call, don't "
8259 "update step info so we remember we left a frame");
8262 /* We aren't done stepping.
8264 Optimize by setting the stepping range to the line.
8265 (We might not be in the original line, but if we entered a
8266 new line in mid-statement, we continue stepping. This makes
8267 things like for(;;) statements work better.)
8269 If we entered a SAL that indicates a non-statement line table entry,
8270 then we update the stepping range, but we don't update the step info,
8271 which includes things like the line number we are stepping away from.
8272 This means we will stop when we find a line table entry that is marked
8273 as is-statement, even if it matches the non-statement one we just
8276 ecs
->event_thread
->control
.step_range_start
= stop_pc_sal
.pc
;
8277 ecs
->event_thread
->control
.step_range_end
= stop_pc_sal
.end
;
8278 ecs
->event_thread
->control
.may_range_step
= 1;
8280 ("updated step range, start = %s, end = %s, may_range_step = %d",
8281 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_start
),
8282 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_end
),
8283 ecs
->event_thread
->control
.may_range_step
);
8284 if (refresh_step_info
)
8285 set_step_info (ecs
->event_thread
, frame
, stop_pc_sal
);
8287 infrun_debug_printf ("keep going");
8289 if (execution_direction
== EXEC_REVERSE
)
8291 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
8293 /* Make sure the stop_pc is set to the beginning of the line. */
8294 if (stop_pc
!= ecs
->event_thread
->control
.step_range_start
)
8295 ecs
->event_thread
->control
.step_range_start
8296 = update_line_range_start (stop_pc
, ecs
);
8302 static bool restart_stepped_thread (process_stratum_target
*resume_target
,
8303 ptid_t resume_ptid
);
8305 /* In all-stop mode, if we're currently stepping but have stopped in
8306 some other thread, we may need to switch back to the stepped
8307 thread. Returns true we set the inferior running, false if we left
8308 it stopped (and the event needs further processing). */
8311 switch_back_to_stepped_thread (struct execution_control_state
*ecs
)
8313 if (!target_is_non_stop_p ())
8315 /* If any thread is blocked on some internal breakpoint, and we
8316 simply need to step over that breakpoint to get it going
8317 again, do that first. */
8319 /* However, if we see an event for the stepping thread, then we
8320 know all other threads have been moved past their breakpoints
8321 already. Let the caller check whether the step is finished,
8322 etc., before deciding to move it past a breakpoint. */
8323 if (ecs
->event_thread
->control
.step_range_end
!= 0)
8326 /* Check if the current thread is blocked on an incomplete
8327 step-over, interrupted by a random signal. */
8328 if (ecs
->event_thread
->control
.trap_expected
8329 && ecs
->event_thread
->stop_signal () != GDB_SIGNAL_TRAP
)
8332 ("need to finish step-over of [%s]",
8333 ecs
->event_thread
->ptid
.to_string ().c_str ());
8338 /* Check if the current thread is blocked by a single-step
8339 breakpoint of another thread. */
8340 if (ecs
->hit_singlestep_breakpoint
)
8342 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
8343 ecs
->ptid
.to_string ().c_str ());
8348 /* If this thread needs yet another step-over (e.g., stepping
8349 through a delay slot), do it first before moving on to
8351 if (thread_still_needs_step_over (ecs
->event_thread
))
8354 ("thread [%s] still needs step-over",
8355 ecs
->event_thread
->ptid
.to_string ().c_str ());
8360 /* If scheduler locking applies even if not stepping, there's no
8361 need to walk over threads. Above we've checked whether the
8362 current thread is stepping. If some other thread not the
8363 event thread is stepping, then it must be that scheduler
8364 locking is not in effect. */
8365 if (schedlock_applies (ecs
->event_thread
))
8368 /* Otherwise, we no longer expect a trap in the current thread.
8369 Clear the trap_expected flag before switching back -- this is
8370 what keep_going does as well, if we call it. */
8371 ecs
->event_thread
->control
.trap_expected
= 0;
8373 /* Likewise, clear the signal if it should not be passed. */
8374 if (!signal_program
[ecs
->event_thread
->stop_signal ()])
8375 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
8377 if (restart_stepped_thread (ecs
->target
, ecs
->ptid
))
8379 prepare_to_wait (ecs
);
8383 switch_to_thread (ecs
->event_thread
);
8389 /* Look for the thread that was stepping, and resume it.
8390 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
8391 is resuming. Return true if a thread was started, false
8395 restart_stepped_thread (process_stratum_target
*resume_target
,
8398 /* Do all pending step-overs before actually proceeding with
8400 if (start_step_over ())
8403 for (thread_info
*tp
: all_threads_safe ())
8405 if (tp
->state
== THREAD_EXITED
)
8408 if (tp
->has_pending_waitstatus ())
8411 /* Ignore threads of processes the caller is not
8414 && (tp
->inf
->process_target () != resume_target
8415 || tp
->inf
->pid
!= resume_ptid
.pid ()))
8418 if (tp
->control
.trap_expected
)
8420 infrun_debug_printf ("switching back to stepped thread (step-over)");
8422 if (keep_going_stepped_thread (tp
))
8427 for (thread_info
*tp
: all_threads_safe ())
8429 if (tp
->state
== THREAD_EXITED
)
8432 if (tp
->has_pending_waitstatus ())
8435 /* Ignore threads of processes the caller is not
8438 && (tp
->inf
->process_target () != resume_target
8439 || tp
->inf
->pid
!= resume_ptid
.pid ()))
8442 /* Did we find the stepping thread? */
8443 if (tp
->control
.step_range_end
)
8445 infrun_debug_printf ("switching back to stepped thread (stepping)");
8447 if (keep_going_stepped_thread (tp
))
8458 restart_after_all_stop_detach (process_stratum_target
*proc_target
)
8460 /* Note we don't check target_is_non_stop_p() here, because the
8461 current inferior may no longer have a process_stratum target
8462 pushed, as we just detached. */
8464 /* See if we have a THREAD_RUNNING thread that need to be
8465 re-resumed. If we have any thread that is already executing,
8466 then we don't need to resume the target -- it is already been
8467 resumed. With the remote target (in all-stop), it's even
8468 impossible to issue another resumption if the target is already
8469 resumed, until the target reports a stop. */
8470 for (thread_info
*thr
: all_threads (proc_target
))
8472 if (thr
->state
!= THREAD_RUNNING
)
8475 /* If we have any thread that is already executing, then we
8476 don't need to resume the target -- it is already been
8478 if (thr
->executing ())
8481 /* If we have a pending event to process, skip resuming the
8482 target and go straight to processing it. */
8483 if (thr
->resumed () && thr
->has_pending_waitstatus ())
8487 /* Alright, we need to re-resume the target. If a thread was
8488 stepping, we need to restart it stepping. */
8489 if (restart_stepped_thread (proc_target
, minus_one_ptid
))
8492 /* Otherwise, find the first THREAD_RUNNING thread and resume
8494 for (thread_info
*thr
: all_threads (proc_target
))
8496 if (thr
->state
!= THREAD_RUNNING
)
8499 execution_control_state
ecs (thr
);
8500 switch_to_thread (thr
);
8506 /* Set a previously stepped thread back to stepping. Returns true on
8507 success, false if the resume is not possible (e.g., the thread
8511 keep_going_stepped_thread (struct thread_info
*tp
)
8513 frame_info_ptr frame
;
8515 /* If the stepping thread exited, then don't try to switch back and
8516 resume it, which could fail in several different ways depending
8517 on the target. Instead, just keep going.
8519 We can find a stepping dead thread in the thread list in two
8522 - The target supports thread exit events, and when the target
8523 tries to delete the thread from the thread list, inferior_ptid
8524 pointed at the exiting thread. In such case, calling
8525 delete_thread does not really remove the thread from the list;
8526 instead, the thread is left listed, with 'exited' state.
8528 - The target's debug interface does not support thread exit
8529 events, and so we have no idea whatsoever if the previously
8530 stepping thread is still alive. For that reason, we need to
8531 synchronously query the target now. */
8533 if (tp
->state
== THREAD_EXITED
|| !target_thread_alive (tp
->ptid
))
8535 infrun_debug_printf ("not resuming previously stepped thread, it has "
8542 infrun_debug_printf ("resuming previously stepped thread");
8544 execution_control_state
ecs (tp
);
8545 switch_to_thread (tp
);
8547 tp
->set_stop_pc (regcache_read_pc (get_thread_regcache (tp
)));
8548 frame
= get_current_frame ();
8550 /* If the PC of the thread we were trying to single-step has
8551 changed, then that thread has trapped or been signaled, but the
8552 event has not been reported to GDB yet. Re-poll the target
8553 looking for this particular thread's event (i.e. temporarily
8554 enable schedlock) by:
8556 - setting a break at the current PC
8557 - resuming that particular thread, only (by setting trap
8560 This prevents us continuously moving the single-step breakpoint
8561 forward, one instruction at a time, overstepping. */
8563 if (tp
->stop_pc () != tp
->prev_pc
)
8567 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
8568 paddress (current_inferior ()->arch (), tp
->prev_pc
),
8569 paddress (current_inferior ()->arch (),
8572 /* Clear the info of the previous step-over, as it's no longer
8573 valid (if the thread was trying to step over a breakpoint, it
8574 has already succeeded). It's what keep_going would do too,
8575 if we called it. Do this before trying to insert the sss
8576 breakpoint, otherwise if we were previously trying to step
8577 over this exact address in another thread, the breakpoint is
8579 clear_step_over_info ();
8580 tp
->control
.trap_expected
= 0;
8582 insert_single_step_breakpoint (get_frame_arch (frame
),
8583 get_frame_address_space (frame
),
8586 tp
->set_resumed (true);
8587 resume_ptid
= internal_resume_ptid (tp
->control
.stepping_command
);
8588 do_target_resume (resume_ptid
, false, GDB_SIGNAL_0
);
8592 infrun_debug_printf ("expected thread still hasn't advanced");
8594 keep_going_pass_signal (&ecs
);
8600 /* Is thread TP in the middle of (software or hardware)
8601 single-stepping? (Note the result of this function must never be
8602 passed directly as target_resume's STEP parameter.) */
8605 currently_stepping (struct thread_info
*tp
)
8607 return ((tp
->control
.step_range_end
8608 && tp
->control
.step_resume_breakpoint
== nullptr)
8609 || tp
->control
.trap_expected
8610 || tp
->stepped_breakpoint
8611 || bpstat_should_step ());
8614 /* Inferior has stepped into a subroutine call with source code that
8615 we should not step over. Do step to the first line of code in
8619 handle_step_into_function (struct gdbarch
*gdbarch
,
8620 struct execution_control_state
*ecs
)
8622 fill_in_stop_func (gdbarch
, ecs
);
8624 compunit_symtab
*cust
8625 = find_pc_compunit_symtab (ecs
->event_thread
->stop_pc ());
8626 if (cust
!= nullptr && cust
->language () != language_asm
)
8627 ecs
->stop_func_start
8628 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
8630 symtab_and_line stop_func_sal
= find_pc_line (ecs
->stop_func_start
, 0);
8631 /* Use the step_resume_break to step until the end of the prologue,
8632 even if that involves jumps (as it seems to on the vax under
8634 /* If the prologue ends in the middle of a source line, continue to
8635 the end of that source line (if it is still within the function).
8636 Otherwise, just go to end of prologue. */
8637 if (stop_func_sal
.end
8638 && stop_func_sal
.pc
!= ecs
->stop_func_start
8639 && stop_func_sal
.end
< ecs
->stop_func_end
)
8640 ecs
->stop_func_start
= stop_func_sal
.end
;
8642 /* Architectures which require breakpoint adjustment might not be able
8643 to place a breakpoint at the computed address. If so, the test
8644 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
8645 ecs->stop_func_start to an address at which a breakpoint may be
8646 legitimately placed.
8648 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
8649 made, GDB will enter an infinite loop when stepping through
8650 optimized code consisting of VLIW instructions which contain
8651 subinstructions corresponding to different source lines. On
8652 FR-V, it's not permitted to place a breakpoint on any but the
8653 first subinstruction of a VLIW instruction. When a breakpoint is
8654 set, GDB will adjust the breakpoint address to the beginning of
8655 the VLIW instruction. Thus, we need to make the corresponding
8656 adjustment here when computing the stop address. */
8658 if (gdbarch_adjust_breakpoint_address_p (gdbarch
))
8660 ecs
->stop_func_start
8661 = gdbarch_adjust_breakpoint_address (gdbarch
,
8662 ecs
->stop_func_start
);
8665 if (ecs
->stop_func_start
== ecs
->event_thread
->stop_pc ())
8667 /* We are already there: stop now. */
8668 end_stepping_range (ecs
);
8673 /* Put the step-breakpoint there and go until there. */
8674 symtab_and_line sr_sal
;
8675 sr_sal
.pc
= ecs
->stop_func_start
;
8676 sr_sal
.section
= find_pc_overlay (ecs
->stop_func_start
);
8677 sr_sal
.pspace
= get_frame_program_space (get_current_frame ());
8679 /* Do not specify what the fp should be when we stop since on
8680 some machines the prologue is where the new fp value is
8682 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
, null_frame_id
);
8684 /* And make sure stepping stops right away then. */
8685 ecs
->event_thread
->control
.step_range_end
8686 = ecs
->event_thread
->control
.step_range_start
;
8691 /* Inferior has stepped backward into a subroutine call with source
8692 code that we should not step over. Do step to the beginning of the
8693 last line of code in it. */
8696 handle_step_into_function_backward (struct gdbarch
*gdbarch
,
8697 struct execution_control_state
*ecs
)
8699 struct compunit_symtab
*cust
;
8700 struct symtab_and_line stop_func_sal
;
8702 fill_in_stop_func (gdbarch
, ecs
);
8704 cust
= find_pc_compunit_symtab (ecs
->event_thread
->stop_pc ());
8705 if (cust
!= nullptr && cust
->language () != language_asm
)
8706 ecs
->stop_func_start
8707 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
8709 stop_func_sal
= find_pc_line (ecs
->event_thread
->stop_pc (), 0);
8711 /* OK, we're just going to keep stepping here. */
8712 if (stop_func_sal
.pc
== ecs
->event_thread
->stop_pc ())
8714 /* We're there already. Just stop stepping now. */
8715 end_stepping_range (ecs
);
8719 /* Else just reset the step range and keep going.
8720 No step-resume breakpoint, they don't work for
8721 epilogues, which can have multiple entry paths. */
8722 ecs
->event_thread
->control
.step_range_start
= stop_func_sal
.pc
;
8723 ecs
->event_thread
->control
.step_range_end
= stop_func_sal
.end
;
8729 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
8730 This is used to both functions and to skip over code. */
8733 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch
*gdbarch
,
8734 struct symtab_and_line sr_sal
,
8735 struct frame_id sr_id
,
8736 enum bptype sr_type
)
8738 /* There should never be more than one step-resume or longjmp-resume
8739 breakpoint per thread, so we should never be setting a new
8740 step_resume_breakpoint when one is already active. */
8741 gdb_assert (inferior_thread ()->control
.step_resume_breakpoint
== nullptr);
8742 gdb_assert (sr_type
== bp_step_resume
|| sr_type
== bp_hp_step_resume
);
8744 infrun_debug_printf ("inserting step-resume breakpoint at %s",
8745 paddress (gdbarch
, sr_sal
.pc
));
8747 inferior_thread ()->control
.step_resume_breakpoint
8748 = set_momentary_breakpoint (gdbarch
, sr_sal
, sr_id
, sr_type
).release ();
8752 insert_step_resume_breakpoint_at_sal (struct gdbarch
*gdbarch
,
8753 struct symtab_and_line sr_sal
,
8754 struct frame_id sr_id
)
8756 insert_step_resume_breakpoint_at_sal_1 (gdbarch
,
8761 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
8762 This is used to skip a potential signal handler.
8764 This is called with the interrupted function's frame. The signal
8765 handler, when it returns, will resume the interrupted function at
8769 insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr
&return_frame
)
8771 gdb_assert (return_frame
!= nullptr);
8773 struct gdbarch
*gdbarch
= get_frame_arch (return_frame
);
8775 symtab_and_line sr_sal
;
8776 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
, get_frame_pc (return_frame
));
8777 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
8778 sr_sal
.pspace
= get_frame_program_space (return_frame
);
8780 insert_step_resume_breakpoint_at_sal_1 (gdbarch
, sr_sal
,
8781 get_stack_frame_id (return_frame
),
8785 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
8786 is used to skip a function after stepping into it (for "next" or if
8787 the called function has no debugging information).
8789 The current function has almost always been reached by single
8790 stepping a call or return instruction. NEXT_FRAME belongs to the
8791 current function, and the breakpoint will be set at the caller's
8794 This is a separate function rather than reusing
8795 insert_hp_step_resume_breakpoint_at_frame in order to avoid
8796 get_prev_frame, which may stop prematurely (see the implementation
8797 of frame_unwind_caller_id for an example). */
8800 insert_step_resume_breakpoint_at_caller (const frame_info_ptr
&next_frame
)
8802 /* We shouldn't have gotten here if we don't know where the call site
8804 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame
)));
8806 struct gdbarch
*gdbarch
= frame_unwind_caller_arch (next_frame
);
8808 symtab_and_line sr_sal
;
8809 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
,
8810 frame_unwind_caller_pc (next_frame
));
8811 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
8812 sr_sal
.pspace
= frame_unwind_program_space (next_frame
);
8814 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
,
8815 frame_unwind_caller_id (next_frame
));
8818 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
8819 new breakpoint at the target of a jmp_buf. The handling of
8820 longjmp-resume uses the same mechanisms used for handling
8821 "step-resume" breakpoints. */
8824 insert_longjmp_resume_breakpoint (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
8826 /* There should never be more than one longjmp-resume breakpoint per
8827 thread, so we should never be setting a new
8828 longjmp_resume_breakpoint when one is already active. */
8829 gdb_assert (inferior_thread ()->control
.exception_resume_breakpoint
== nullptr);
8831 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8832 paddress (gdbarch
, pc
));
8834 inferior_thread ()->control
.exception_resume_breakpoint
=
8835 set_momentary_breakpoint_at_pc (gdbarch
, pc
, bp_longjmp_resume
).release ();
8838 /* Insert an exception resume breakpoint. TP is the thread throwing
8839 the exception. The block B is the block of the unwinder debug hook
8840 function. FRAME is the frame corresponding to the call to this
8841 function. SYM is the symbol of the function argument holding the
8842 target PC of the exception. */
8845 insert_exception_resume_breakpoint (struct thread_info
*tp
,
8846 const struct block
*b
,
8847 const frame_info_ptr
&frame
,
8852 struct block_symbol vsym
;
8853 struct value
*value
;
8855 struct breakpoint
*bp
;
8857 vsym
= lookup_symbol_search_name (sym
->search_name (),
8858 b
, SEARCH_VAR_DOMAIN
);
8859 value
= read_var_value (vsym
.symbol
, vsym
.block
, frame
);
8860 /* If the value was optimized out, revert to the old behavior. */
8861 if (! value
->optimized_out ())
8863 handler
= value_as_address (value
);
8865 infrun_debug_printf ("exception resume at %lx",
8866 (unsigned long) handler
);
8868 /* set_momentary_breakpoint_at_pc creates a thread-specific
8869 breakpoint for the current inferior thread. */
8870 gdb_assert (tp
== inferior_thread ());
8871 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
8873 bp_exception_resume
).release ();
8875 tp
->control
.exception_resume_breakpoint
= bp
;
8878 catch (const gdb_exception_error
&e
)
8880 /* We want to ignore errors here. */
8884 /* A helper for check_exception_resume that sets an
8885 exception-breakpoint based on a SystemTap probe. */
8888 insert_exception_resume_from_probe (struct thread_info
*tp
,
8889 const struct bound_probe
*probe
,
8890 const frame_info_ptr
&frame
)
8892 struct value
*arg_value
;
8894 struct breakpoint
*bp
;
8896 arg_value
= probe_safe_evaluate_at_pc (frame
, 1);
8900 handler
= value_as_address (arg_value
);
8902 infrun_debug_printf ("exception resume at %s",
8903 paddress (probe
->objfile
->arch (), handler
));
8905 /* set_momentary_breakpoint_at_pc creates a thread-specific breakpoint
8906 for the current inferior thread. */
8907 gdb_assert (tp
== inferior_thread ());
8908 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
8909 handler
, bp_exception_resume
).release ();
8910 tp
->control
.exception_resume_breakpoint
= bp
;
8913 /* This is called when an exception has been intercepted. Check to
8914 see whether the exception's destination is of interest, and if so,
8915 set an exception resume breakpoint there. */
8918 check_exception_resume (struct execution_control_state
*ecs
,
8919 const frame_info_ptr
&frame
)
8921 struct bound_probe probe
;
8922 struct symbol
*func
;
8924 /* First see if this exception unwinding breakpoint was set via a
8925 SystemTap probe point. If so, the probe has two arguments: the
8926 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8927 set a breakpoint there. */
8928 probe
= find_probe_by_pc (get_frame_pc (frame
));
8931 insert_exception_resume_from_probe (ecs
->event_thread
, &probe
, frame
);
8935 func
= get_frame_function (frame
);
8941 const struct block
*b
;
8944 /* The exception breakpoint is a thread-specific breakpoint on
8945 the unwinder's debug hook, declared as:
8947 void _Unwind_DebugHook (void *cfa, void *handler);
8949 The CFA argument indicates the frame to which control is
8950 about to be transferred. HANDLER is the destination PC.
8952 We ignore the CFA and set a temporary breakpoint at HANDLER.
8953 This is not extremely efficient but it avoids issues in gdb
8954 with computing the DWARF CFA, and it also works even in weird
8955 cases such as throwing an exception from inside a signal
8958 b
= func
->value_block ();
8959 for (struct symbol
*sym
: block_iterator_range (b
))
8961 if (!sym
->is_argument ())
8968 insert_exception_resume_breakpoint (ecs
->event_thread
,
8974 catch (const gdb_exception_error
&e
)
8980 stop_waiting (struct execution_control_state
*ecs
)
8982 infrun_debug_printf ("stop_waiting");
8984 /* Let callers know we don't want to wait for the inferior anymore. */
8985 ecs
->wait_some_more
= 0;
8988 /* Like keep_going, but passes the signal to the inferior, even if the
8989 signal is set to nopass. */
8992 keep_going_pass_signal (struct execution_control_state
*ecs
)
8994 gdb_assert (ecs
->event_thread
->ptid
== inferior_ptid
);
8995 gdb_assert (!ecs
->event_thread
->resumed ());
8997 /* Save the pc before execution, to compare with pc after stop. */
8998 ecs
->event_thread
->prev_pc
8999 = regcache_read_pc_protected (get_thread_regcache (ecs
->event_thread
));
9001 if (ecs
->event_thread
->control
.trap_expected
)
9003 struct thread_info
*tp
= ecs
->event_thread
;
9005 infrun_debug_printf ("%s has trap_expected set, "
9006 "resuming to collect trap",
9007 tp
->ptid
.to_string ().c_str ());
9009 /* We haven't yet gotten our trap, and either: intercepted a
9010 non-signal event (e.g., a fork); or took a signal which we
9011 are supposed to pass through to the inferior. Simply
9013 resume (ecs
->event_thread
->stop_signal ());
9015 else if (step_over_info_valid_p ())
9017 /* Another thread is stepping over a breakpoint in-line. If
9018 this thread needs a step-over too, queue the request. In
9019 either case, this resume must be deferred for later. */
9020 struct thread_info
*tp
= ecs
->event_thread
;
9022 if (ecs
->hit_singlestep_breakpoint
9023 || thread_still_needs_step_over (tp
))
9025 infrun_debug_printf ("step-over already in progress: "
9026 "step-over for %s deferred",
9027 tp
->ptid
.to_string ().c_str ());
9028 global_thread_step_over_chain_enqueue (tp
);
9031 infrun_debug_printf ("step-over in progress: resume of %s deferred",
9032 tp
->ptid
.to_string ().c_str ());
9036 regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
9039 step_over_what step_what
;
9041 /* Either the trap was not expected, but we are continuing
9042 anyway (if we got a signal, the user asked it be passed to
9045 We got our expected trap, but decided we should resume from
9048 We're going to run this baby now!
9050 Note that insert_breakpoints won't try to re-insert
9051 already inserted breakpoints. Therefore, we don't
9052 care if breakpoints were already inserted, or not. */
9054 /* If we need to step over a breakpoint, and we're not using
9055 displaced stepping to do so, insert all breakpoints
9056 (watchpoints, etc.) but the one we're stepping over, step one
9057 instruction, and then re-insert the breakpoint when that step
9060 step_what
= thread_still_needs_step_over (ecs
->event_thread
);
9062 remove_bp
= (ecs
->hit_singlestep_breakpoint
9063 || (step_what
& STEP_OVER_BREAKPOINT
));
9064 remove_wps
= (step_what
& STEP_OVER_WATCHPOINT
);
9066 /* We can't use displaced stepping if we need to step past a
9067 watchpoint. The instruction copied to the scratch pad would
9068 still trigger the watchpoint. */
9070 && (remove_wps
|| !use_displaced_stepping (ecs
->event_thread
)))
9072 set_step_over_info (ecs
->event_thread
->inf
->aspace
.get (),
9073 regcache_read_pc (regcache
), remove_wps
,
9074 ecs
->event_thread
->global_num
);
9076 else if (remove_wps
)
9077 set_step_over_info (nullptr, 0, remove_wps
, -1);
9079 /* If we now need to do an in-line step-over, we need to stop
9080 all other threads. Note this must be done before
9081 insert_breakpoints below, because that removes the breakpoint
9082 we're about to step over, otherwise other threads could miss
9084 if (step_over_info_valid_p () && target_is_non_stop_p ())
9085 stop_all_threads ("starting in-line step-over");
9087 /* Stop stepping if inserting breakpoints fails. */
9090 insert_breakpoints ();
9092 catch (const gdb_exception_error
&e
)
9094 exception_print (gdb_stderr
, e
);
9096 clear_step_over_info ();
9100 ecs
->event_thread
->control
.trap_expected
= (remove_bp
|| remove_wps
);
9102 resume (ecs
->event_thread
->stop_signal ());
9105 prepare_to_wait (ecs
);
9108 /* Called when we should continue running the inferior, because the
9109 current event doesn't cause a user visible stop. This does the
9110 resuming part; waiting for the next event is done elsewhere. */
9113 keep_going (struct execution_control_state
*ecs
)
9115 if (ecs
->event_thread
->control
.trap_expected
9116 && ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
9117 ecs
->event_thread
->control
.trap_expected
= 0;
9119 if (!signal_program
[ecs
->event_thread
->stop_signal ()])
9120 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
9121 keep_going_pass_signal (ecs
);
9124 /* This function normally comes after a resume, before
9125 handle_inferior_event exits. It takes care of any last bits of
9126 housekeeping, and sets the all-important wait_some_more flag. */
9129 prepare_to_wait (struct execution_control_state
*ecs
)
9131 infrun_debug_printf ("prepare_to_wait");
9133 ecs
->wait_some_more
= 1;
9135 /* If the target can't async, emulate it by marking the infrun event
9136 handler such that as soon as we get back to the event-loop, we
9137 immediately end up in fetch_inferior_event again calling
9139 if (!target_can_async_p ())
9140 mark_infrun_async_event_handler ();
9143 /* We are done with the step range of a step/next/si/ni command.
9144 Called once for each n of a "step n" operation. */
9147 end_stepping_range (struct execution_control_state
*ecs
)
9149 ecs
->event_thread
->control
.stop_step
= 1;
9153 /* Several print_*_reason functions to print why the inferior has stopped.
9154 We always print something when the inferior exits, or receives a signal.
9155 The rest of the cases are dealt with later on in normal_stop and
9156 print_it_typical. Ideally there should be a call to one of these
9157 print_*_reason functions functions from handle_inferior_event each time
9158 stop_waiting is called.
9160 Note that we don't call these directly, instead we delegate that to
9161 the interpreters, through observers. Interpreters then call these
9162 with whatever uiout is right. */
9165 print_signal_exited_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
9167 annotate_signalled ();
9168 if (uiout
->is_mi_like_p ())
9170 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED
));
9171 uiout
->text ("\nProgram terminated with signal ");
9172 annotate_signal_name ();
9173 uiout
->field_string ("signal-name",
9174 gdb_signal_to_name (siggnal
));
9175 annotate_signal_name_end ();
9177 annotate_signal_string ();
9178 uiout
->field_string ("signal-meaning",
9179 gdb_signal_to_string (siggnal
));
9180 annotate_signal_string_end ();
9181 uiout
->text (".\n");
9182 uiout
->text ("The program no longer exists.\n");
9186 print_exited_reason (struct ui_out
*uiout
, int exitstatus
)
9188 struct inferior
*inf
= current_inferior ();
9189 std::string pidstr
= target_pid_to_str (ptid_t (inf
->pid
));
9191 annotate_exited (exitstatus
);
9194 if (uiout
->is_mi_like_p ())
9195 uiout
->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED
));
9196 std::string exit_code_str
9197 = string_printf ("0%o", (unsigned int) exitstatus
);
9198 uiout
->message ("[Inferior %s (%s) exited with code %pF]\n",
9199 plongest (inf
->num
), pidstr
.c_str (),
9200 string_field ("exit-code", exit_code_str
.c_str ()));
9204 if (uiout
->is_mi_like_p ())
9206 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY
));
9207 uiout
->message ("[Inferior %s (%s) exited normally]\n",
9208 plongest (inf
->num
), pidstr
.c_str ());
9213 print_signal_received_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
9215 struct thread_info
*thr
= inferior_thread ();
9217 infrun_debug_printf ("signal = %s", gdb_signal_to_string (siggnal
));
9221 if (uiout
->is_mi_like_p ())
9223 else if (show_thread_that_caused_stop ())
9225 uiout
->text ("\nThread ");
9226 uiout
->field_string ("thread-id", print_thread_id (thr
));
9228 const char *name
= thread_name (thr
);
9229 if (name
!= nullptr)
9231 uiout
->text (" \"");
9232 uiout
->field_string ("name", name
);
9237 uiout
->text ("\nProgram");
9239 if (siggnal
== GDB_SIGNAL_0
&& !uiout
->is_mi_like_p ())
9240 uiout
->text (" stopped");
9243 uiout
->text (" received signal ");
9244 annotate_signal_name ();
9245 if (uiout
->is_mi_like_p ())
9247 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED
));
9248 uiout
->field_string ("signal-name", gdb_signal_to_name (siggnal
));
9249 annotate_signal_name_end ();
9251 annotate_signal_string ();
9252 uiout
->field_string ("signal-meaning", gdb_signal_to_string (siggnal
));
9254 regcache
*regcache
= get_thread_regcache (thr
);
9255 struct gdbarch
*gdbarch
= regcache
->arch ();
9256 if (gdbarch_report_signal_info_p (gdbarch
))
9257 gdbarch_report_signal_info (gdbarch
, uiout
, siggnal
);
9259 annotate_signal_string_end ();
9261 uiout
->text (".\n");
9265 print_no_history_reason (struct ui_out
*uiout
)
9267 if (uiout
->is_mi_like_p ())
9268 uiout
->field_string ("reason", async_reason_lookup (EXEC_ASYNC_NO_HISTORY
));
9270 uiout
->text ("\nNo more reverse-execution history.\n");
9273 /* Print current location without a level number, if we have changed
9274 functions or hit a breakpoint. Print source line if we have one.
9275 bpstat_print contains the logic deciding in detail what to print,
9276 based on the event(s) that just occurred. */
9279 print_stop_location (const target_waitstatus
&ws
)
9282 enum print_what source_flag
;
9283 int do_frame_printing
= 1;
9284 struct thread_info
*tp
= inferior_thread ();
9286 bpstat_ret
= bpstat_print (tp
->control
.stop_bpstat
, ws
.kind ());
9290 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
9291 should) carry around the function and does (or should) use
9292 that when doing a frame comparison. */
9293 if (tp
->control
.stop_step
9294 && (tp
->control
.step_frame_id
9295 == get_frame_id (get_current_frame ()))
9296 && (tp
->control
.step_start_function
9297 == find_pc_function (tp
->stop_pc ())))
9299 symtab_and_line sal
= find_frame_sal (get_selected_frame (nullptr));
9300 if (sal
.symtab
!= tp
->current_symtab
)
9302 /* Finished step in same frame but into different file, print
9303 location and source line. */
9304 source_flag
= SRC_AND_LOC
;
9308 /* Finished step in same frame and same file, just print source
9310 source_flag
= SRC_LINE
;
9315 /* Finished step into different frame, print location and source
9317 source_flag
= SRC_AND_LOC
;
9320 case PRINT_SRC_AND_LOC
:
9321 /* Print location and source line. */
9322 source_flag
= SRC_AND_LOC
;
9324 case PRINT_SRC_ONLY
:
9325 source_flag
= SRC_LINE
;
9328 /* Something bogus. */
9329 source_flag
= SRC_LINE
;
9330 do_frame_printing
= 0;
9333 internal_error (_("Unknown value."));
9336 /* The behavior of this routine with respect to the source
9338 SRC_LINE: Print only source line
9339 LOCATION: Print only location
9340 SRC_AND_LOC: Print location and source line. */
9341 if (do_frame_printing
)
9342 print_stack_frame (get_selected_frame (nullptr), 0, source_flag
, 1);
9345 /* See `print_stop_event` in infrun.h. */
9348 do_print_stop_event (struct ui_out
*uiout
, bool displays
)
9350 struct target_waitstatus last
;
9351 struct thread_info
*tp
;
9353 get_last_target_status (nullptr, nullptr, &last
);
9356 scoped_restore save_uiout
= make_scoped_restore (¤t_uiout
, uiout
);
9358 print_stop_location (last
);
9360 /* Display the auto-display expressions. */
9365 tp
= inferior_thread ();
9366 if (tp
->thread_fsm () != nullptr
9367 && tp
->thread_fsm ()->finished_p ())
9369 struct return_value_info
*rv
;
9371 rv
= tp
->thread_fsm ()->return_value ();
9373 print_return_value (uiout
, rv
);
9377 /* See infrun.h. This function itself sets up buffered output for the
9378 duration of do_print_stop_event, which performs the actual event
9382 print_stop_event (struct ui_out
*uiout
, bool displays
)
9384 do_with_buffered_output (do_print_stop_event
, uiout
, displays
);
9390 maybe_remove_breakpoints (void)
9392 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
9394 if (remove_breakpoints ())
9396 target_terminal::ours_for_output ();
9397 gdb_printf (_("Cannot remove breakpoints because "
9398 "program is no longer writable.\nFurther "
9399 "execution is probably impossible.\n"));
9404 /* The execution context that just caused a normal stop. */
9410 DISABLE_COPY_AND_ASSIGN (stop_context
);
9412 bool changed () const;
9417 /* The event PTID. */
9421 /* If stopp for a thread event, this is the thread that caused the
9423 thread_info_ref thread
;
9425 /* The inferior that caused the stop. */
9429 /* Initializes a new stop context. If stopped for a thread event, this
9430 takes a strong reference to the thread. */
9432 stop_context::stop_context ()
9434 stop_id
= get_stop_id ();
9435 ptid
= inferior_ptid
;
9436 inf_num
= current_inferior ()->num
;
9438 if (inferior_ptid
!= null_ptid
)
9440 /* Take a strong reference so that the thread can't be deleted
9442 thread
= thread_info_ref::new_reference (inferior_thread ());
9446 /* Return true if the current context no longer matches the saved stop
9450 stop_context::changed () const
9452 if (ptid
!= inferior_ptid
)
9454 if (inf_num
!= current_inferior ()->num
)
9456 if (thread
!= nullptr && thread
->state
!= THREAD_STOPPED
)
9458 if (get_stop_id () != stop_id
)
9468 struct target_waitstatus last
;
9470 get_last_target_status (nullptr, nullptr, &last
);
9474 /* If an exception is thrown from this point on, make sure to
9475 propagate GDB's knowledge of the executing state to the
9476 frontend/user running state. A QUIT is an easy exception to see
9477 here, so do this before any filtered output. */
9479 ptid_t finish_ptid
= null_ptid
;
9482 finish_ptid
= minus_one_ptid
;
9483 else if (last
.kind () == TARGET_WAITKIND_SIGNALLED
9484 || last
.kind () == TARGET_WAITKIND_EXITED
)
9486 /* On some targets, we may still have live threads in the
9487 inferior when we get a process exit event. E.g., for
9488 "checkpoint", when the current checkpoint/fork exits,
9489 linux-fork.c automatically switches to another fork from
9490 within target_mourn_inferior. */
9491 if (inferior_ptid
!= null_ptid
)
9492 finish_ptid
= ptid_t (inferior_ptid
.pid ());
9494 else if (last
.kind () != TARGET_WAITKIND_NO_RESUMED
9495 && last
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
9496 finish_ptid
= inferior_ptid
;
9498 std::optional
<scoped_finish_thread_state
> maybe_finish_thread_state
;
9499 if (finish_ptid
!= null_ptid
)
9501 maybe_finish_thread_state
.emplace
9502 (user_visible_resume_target (finish_ptid
), finish_ptid
);
9505 /* As we're presenting a stop, and potentially removing breakpoints,
9506 update the thread list so we can tell whether there are threads
9507 running on the target. With target remote, for example, we can
9508 only learn about new threads when we explicitly update the thread
9509 list. Do this before notifying the interpreters about signal
9510 stops, end of stepping ranges, etc., so that the "new thread"
9511 output is emitted before e.g., "Program received signal FOO",
9512 instead of after. */
9513 update_thread_list ();
9515 if (last
.kind () == TARGET_WAITKIND_STOPPED
&& stopped_by_random_signal
)
9516 notify_signal_received (inferior_thread ()->stop_signal ());
9518 /* As with the notification of thread events, we want to delay
9519 notifying the user that we've switched thread context until
9520 the inferior actually stops.
9522 There's no point in saying anything if the inferior has exited.
9523 Note that SIGNALLED here means "exited with a signal", not
9524 "received a signal".
9526 Also skip saying anything in non-stop mode. In that mode, as we
9527 don't want GDB to switch threads behind the user's back, to avoid
9528 races where the user is typing a command to apply to thread x,
9529 but GDB switches to thread y before the user finishes entering
9530 the command, fetch_inferior_event installs a cleanup to restore
9531 the current thread back to the thread the user had selected right
9532 after this event is handled, so we're not really switching, only
9533 informing of a stop. */
9536 if ((last
.kind () != TARGET_WAITKIND_SIGNALLED
9537 && last
.kind () != TARGET_WAITKIND_EXITED
9538 && last
.kind () != TARGET_WAITKIND_NO_RESUMED
9539 && last
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
9540 && target_has_execution ()
9541 && previous_thread
!= inferior_thread ())
9543 SWITCH_THRU_ALL_UIS ()
9545 target_terminal::ours_for_output ();
9546 gdb_printf (_("[Switching to %s]\n"),
9547 target_pid_to_str (inferior_ptid
).c_str ());
9548 annotate_thread_changed ();
9552 update_previous_thread ();
9555 if (last
.kind () == TARGET_WAITKIND_NO_RESUMED
9556 || last
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
9558 stop_print_frame
= false;
9560 SWITCH_THRU_ALL_UIS ()
9561 if (current_ui
->prompt_state
== PROMPT_BLOCKED
)
9563 target_terminal::ours_for_output ();
9564 if (last
.kind () == TARGET_WAITKIND_NO_RESUMED
)
9565 gdb_printf (_("No unwaited-for children left.\n"));
9566 else if (last
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
9567 gdb_printf (_("Command aborted, thread exited.\n"));
9569 gdb_assert_not_reached ("unhandled");
9573 /* Note: this depends on the update_thread_list call above. */
9574 maybe_remove_breakpoints ();
9576 /* If an auto-display called a function and that got a signal,
9577 delete that auto-display to avoid an infinite recursion. */
9579 if (stopped_by_random_signal
)
9580 disable_current_display ();
9582 SWITCH_THRU_ALL_UIS ()
9584 async_enable_stdin ();
9587 /* Let the user/frontend see the threads as stopped. */
9588 maybe_finish_thread_state
.reset ();
9590 /* Select innermost stack frame - i.e., current frame is frame 0,
9591 and current location is based on that. Handle the case where the
9592 dummy call is returning after being stopped. E.g. the dummy call
9593 previously hit a breakpoint. (If the dummy call returns
9594 normally, we won't reach here.) Do this before the stop hook is
9595 run, so that it doesn't get to see the temporary dummy frame,
9596 which is not where we'll present the stop. */
9597 if (has_stack_frames ())
9599 if (stop_stack_dummy
== STOP_STACK_DUMMY
)
9601 /* Pop the empty frame that contains the stack dummy. This
9602 also restores inferior state prior to the call (struct
9603 infcall_suspend_state). */
9604 frame_info_ptr frame
= get_current_frame ();
9606 gdb_assert (get_frame_type (frame
) == DUMMY_FRAME
);
9608 /* frame_pop calls reinit_frame_cache as the last thing it
9609 does which means there's now no selected frame. */
9612 select_frame (get_current_frame ());
9614 /* Set the current source location. */
9615 set_current_sal_from_frame (get_current_frame ());
9618 /* Look up the hook_stop and run it (CLI internally handles problem
9619 of stop_command's pre-hook not existing). */
9620 stop_context saved_context
;
9624 execute_cmd_pre_hook (stop_command
);
9626 catch (const gdb_exception_error
&ex
)
9628 exception_fprintf (gdb_stderr
, ex
,
9629 "Error while running hook_stop:\n");
9632 /* If the stop hook resumes the target, then there's no point in
9633 trying to notify about the previous stop; its context is
9634 gone. Likewise if the command switches thread or inferior --
9635 the observers would print a stop for the wrong
9637 if (saved_context
.changed ())
9640 /* Notify observers about the stop. This is where the interpreters
9641 print the stop event. */
9642 notify_normal_stop ((inferior_ptid
!= null_ptid
9643 ? inferior_thread ()->control
.stop_bpstat
9646 annotate_stopped ();
9648 if (target_has_execution ())
9650 if (last
.kind () != TARGET_WAITKIND_SIGNALLED
9651 && last
.kind () != TARGET_WAITKIND_EXITED
9652 && last
.kind () != TARGET_WAITKIND_NO_RESUMED
9653 && last
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
9654 /* Delete the breakpoint we stopped at, if it wants to be deleted.
9655 Delete any breakpoint that is to be deleted at the next stop. */
9656 breakpoint_auto_delete (inferior_thread ()->control
.stop_bpstat
);
9663 signal_stop_state (int signo
)
9665 return signal_stop
[signo
];
9669 signal_print_state (int signo
)
9671 return signal_print
[signo
];
9675 signal_pass_state (int signo
)
9677 return signal_program
[signo
];
9681 signal_cache_update (int signo
)
9685 for (signo
= 0; signo
< (int) GDB_SIGNAL_LAST
; signo
++)
9686 signal_cache_update (signo
);
9691 signal_pass
[signo
] = (signal_stop
[signo
] == 0
9692 && signal_print
[signo
] == 0
9693 && signal_program
[signo
] == 1
9694 && signal_catch
[signo
] == 0);
9698 signal_stop_update (int signo
, int state
)
9700 int ret
= signal_stop
[signo
];
9702 signal_stop
[signo
] = state
;
9703 signal_cache_update (signo
);
9708 signal_print_update (int signo
, int state
)
9710 int ret
= signal_print
[signo
];
9712 signal_print
[signo
] = state
;
9713 signal_cache_update (signo
);
9718 signal_pass_update (int signo
, int state
)
9720 int ret
= signal_program
[signo
];
9722 signal_program
[signo
] = state
;
9723 signal_cache_update (signo
);
9727 /* Update the global 'signal_catch' from INFO and notify the
9731 signal_catch_update (const unsigned int *info
)
9735 for (i
= 0; i
< GDB_SIGNAL_LAST
; ++i
)
9736 signal_catch
[i
] = info
[i
] > 0;
9737 signal_cache_update (-1);
9738 target_pass_signals (signal_pass
);
9742 sig_print_header (void)
9744 gdb_printf (_("Signal Stop\tPrint\tPass "
9745 "to program\tDescription\n"));
9749 sig_print_info (enum gdb_signal oursig
)
9751 const char *name
= gdb_signal_to_name (oursig
);
9752 int name_padding
= 13 - strlen (name
);
9754 if (name_padding
<= 0)
9757 gdb_printf ("%s", name
);
9758 gdb_printf ("%*.*s ", name_padding
, name_padding
, " ");
9759 gdb_printf ("%s\t", signal_stop
[oursig
] ? "Yes" : "No");
9760 gdb_printf ("%s\t", signal_print
[oursig
] ? "Yes" : "No");
9761 gdb_printf ("%s\t\t", signal_program
[oursig
] ? "Yes" : "No");
9762 gdb_printf ("%s\n", gdb_signal_to_string (oursig
));
9765 /* Specify how various signals in the inferior should be handled. */
9768 handle_command (const char *args
, int from_tty
)
9770 int digits
, wordlen
;
9771 int sigfirst
, siglast
;
9772 enum gdb_signal oursig
;
9775 if (args
== nullptr)
9777 error_no_arg (_("signal to handle"));
9780 /* Allocate and zero an array of flags for which signals to handle. */
9782 const size_t nsigs
= GDB_SIGNAL_LAST
;
9783 unsigned char sigs
[nsigs
] {};
9785 /* Break the command line up into args. */
9787 gdb_argv
built_argv (args
);
9789 /* Walk through the args, looking for signal oursigs, signal names, and
9790 actions. Signal numbers and signal names may be interspersed with
9791 actions, with the actions being performed for all signals cumulatively
9792 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
9794 for (char *arg
: built_argv
)
9796 wordlen
= strlen (arg
);
9797 for (digits
= 0; isdigit (arg
[digits
]); digits
++)
9801 sigfirst
= siglast
= -1;
9803 if (wordlen
>= 1 && !strncmp (arg
, "all", wordlen
))
9805 /* Apply action to all signals except those used by the
9806 debugger. Silently skip those. */
9809 siglast
= nsigs
- 1;
9811 else if (wordlen
>= 1 && !strncmp (arg
, "stop", wordlen
))
9813 SET_SIGS (nsigs
, sigs
, signal_stop
);
9814 SET_SIGS (nsigs
, sigs
, signal_print
);
9816 else if (wordlen
>= 1 && !strncmp (arg
, "ignore", wordlen
))
9818 UNSET_SIGS (nsigs
, sigs
, signal_program
);
9820 else if (wordlen
>= 2 && !strncmp (arg
, "print", wordlen
))
9822 SET_SIGS (nsigs
, sigs
, signal_print
);
9824 else if (wordlen
>= 2 && !strncmp (arg
, "pass", wordlen
))
9826 SET_SIGS (nsigs
, sigs
, signal_program
);
9828 else if (wordlen
>= 3 && !strncmp (arg
, "nostop", wordlen
))
9830 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
9832 else if (wordlen
>= 3 && !strncmp (arg
, "noignore", wordlen
))
9834 SET_SIGS (nsigs
, sigs
, signal_program
);
9836 else if (wordlen
>= 4 && !strncmp (arg
, "noprint", wordlen
))
9838 UNSET_SIGS (nsigs
, sigs
, signal_print
);
9839 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
9841 else if (wordlen
>= 4 && !strncmp (arg
, "nopass", wordlen
))
9843 UNSET_SIGS (nsigs
, sigs
, signal_program
);
9845 else if (digits
> 0)
9847 /* It is numeric. The numeric signal refers to our own
9848 internal signal numbering from target.h, not to host/target
9849 signal number. This is a feature; users really should be
9850 using symbolic names anyway, and the common ones like
9851 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9853 sigfirst
= siglast
= (int)
9854 gdb_signal_from_command (atoi (arg
));
9855 if (arg
[digits
] == '-')
9858 gdb_signal_from_command (atoi (arg
+ digits
+ 1));
9860 if (sigfirst
> siglast
)
9862 /* Bet he didn't figure we'd think of this case... */
9863 std::swap (sigfirst
, siglast
);
9868 oursig
= gdb_signal_from_name (arg
);
9869 if (oursig
!= GDB_SIGNAL_UNKNOWN
)
9871 sigfirst
= siglast
= (int) oursig
;
9875 /* Not a number and not a recognized flag word => complain. */
9876 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg
);
9880 /* If any signal numbers or symbol names were found, set flags for
9881 which signals to apply actions to. */
9883 for (int signum
= sigfirst
; signum
>= 0 && signum
<= siglast
; signum
++)
9885 switch ((enum gdb_signal
) signum
)
9887 case GDB_SIGNAL_TRAP
:
9888 case GDB_SIGNAL_INT
:
9889 if (!allsigs
&& !sigs
[signum
])
9891 if (query (_("%s is used by the debugger.\n\
9892 Are you sure you want to change it? "),
9893 gdb_signal_to_name ((enum gdb_signal
) signum
)))
9898 gdb_printf (_("Not confirmed, unchanged.\n"));
9902 case GDB_SIGNAL_DEFAULT
:
9903 case GDB_SIGNAL_UNKNOWN
:
9904 /* Make sure that "all" doesn't print these. */
9913 for (int signum
= 0; signum
< nsigs
; signum
++)
9916 signal_cache_update (-1);
9917 target_pass_signals (signal_pass
);
9918 target_program_signals (signal_program
);
9922 /* Show the results. */
9923 sig_print_header ();
9924 for (; signum
< nsigs
; signum
++)
9926 sig_print_info ((enum gdb_signal
) signum
);
9933 /* Complete the "handle" command. */
9936 handle_completer (struct cmd_list_element
*ignore
,
9937 completion_tracker
&tracker
,
9938 const char *text
, const char *word
)
9940 static const char * const keywords
[] =
9954 signal_completer (ignore
, tracker
, text
, word
);
9955 complete_on_enum (tracker
, keywords
, word
, word
);
9959 gdb_signal_from_command (int num
)
9961 if (num
>= 1 && num
<= 15)
9962 return (enum gdb_signal
) num
;
9963 error (_("Only signals 1-15 are valid as numeric signals.\n\
9964 Use \"info signals\" for a list of symbolic signals."));
9967 /* Print current contents of the tables set by the handle command.
9968 It is possible we should just be printing signals actually used
9969 by the current target (but for things to work right when switching
9970 targets, all signals should be in the signal tables). */
9973 info_signals_command (const char *signum_exp
, int from_tty
)
9975 enum gdb_signal oursig
;
9977 sig_print_header ();
9981 /* First see if this is a symbol name. */
9982 oursig
= gdb_signal_from_name (signum_exp
);
9983 if (oursig
== GDB_SIGNAL_UNKNOWN
)
9985 /* No, try numeric. */
9987 gdb_signal_from_command (parse_and_eval_long (signum_exp
));
9989 sig_print_info (oursig
);
9994 /* These ugly casts brought to you by the native VAX compiler. */
9995 for (oursig
= GDB_SIGNAL_FIRST
;
9996 (int) oursig
< (int) GDB_SIGNAL_LAST
;
9997 oursig
= (enum gdb_signal
) ((int) oursig
+ 1))
10001 if (oursig
!= GDB_SIGNAL_UNKNOWN
10002 && oursig
!= GDB_SIGNAL_DEFAULT
&& oursig
!= GDB_SIGNAL_0
)
10003 sig_print_info (oursig
);
10006 gdb_printf (_("\nUse the \"handle\" command "
10007 "to change these tables.\n"));
10010 /* The $_siginfo convenience variable is a bit special. We don't know
10011 for sure the type of the value until we actually have a chance to
10012 fetch the data. The type can change depending on gdbarch, so it is
10013 also dependent on which thread you have selected.
10015 1. making $_siginfo be an internalvar that creates a new value on
10018 2. making the value of $_siginfo be an lval_computed value. */
10020 /* This function implements the lval_computed support for reading a
10021 $_siginfo value. */
10024 siginfo_value_read (struct value
*v
)
10026 LONGEST transferred
;
10028 /* If we can access registers, so can we access $_siginfo. Likewise
10030 validate_registers_access ();
10033 target_read (current_inferior ()->top_target (),
10034 TARGET_OBJECT_SIGNAL_INFO
,
10036 v
->contents_all_raw ().data (),
10038 v
->type ()->length ());
10040 if (transferred
!= v
->type ()->length ())
10041 error (_("Unable to read siginfo"));
10044 /* This function implements the lval_computed support for writing a
10045 $_siginfo value. */
10048 siginfo_value_write (struct value
*v
, struct value
*fromval
)
10050 LONGEST transferred
;
10052 /* If we can access registers, so can we access $_siginfo. Likewise
10054 validate_registers_access ();
10056 transferred
= target_write (current_inferior ()->top_target (),
10057 TARGET_OBJECT_SIGNAL_INFO
,
10059 fromval
->contents_all_raw ().data (),
10061 fromval
->type ()->length ());
10063 if (transferred
!= fromval
->type ()->length ())
10064 error (_("Unable to write siginfo"));
10067 static const struct lval_funcs siginfo_value_funcs
=
10069 siginfo_value_read
,
10070 siginfo_value_write
10073 /* Return a new value with the correct type for the siginfo object of
10074 the current thread using architecture GDBARCH. Return a void value
10075 if there's no object available. */
10077 static struct value
*
10078 siginfo_make_value (struct gdbarch
*gdbarch
, struct internalvar
*var
,
10081 if (target_has_stack ()
10082 && inferior_ptid
!= null_ptid
10083 && gdbarch_get_siginfo_type_p (gdbarch
))
10085 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
10087 return value::allocate_computed (type
, &siginfo_value_funcs
, nullptr);
10090 return value::allocate (builtin_type (gdbarch
)->builtin_void
);
10094 /* infcall_suspend_state contains state about the program itself like its
10095 registers and any signal it received when it last stopped.
10096 This state must be restored regardless of how the inferior function call
10097 ends (either successfully, or after it hits a breakpoint or signal)
10098 if the program is to properly continue where it left off. */
10100 class infcall_suspend_state
10103 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
10104 once the inferior function call has finished. */
10105 infcall_suspend_state (struct gdbarch
*gdbarch
,
10106 const struct thread_info
*tp
,
10107 struct regcache
*regcache
)
10108 : m_registers (new readonly_detached_regcache (*regcache
))
10110 tp
->save_suspend_to (m_thread_suspend
);
10112 gdb::unique_xmalloc_ptr
<gdb_byte
> siginfo_data
;
10114 if (gdbarch_get_siginfo_type_p (gdbarch
))
10116 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
10117 size_t len
= type
->length ();
10119 siginfo_data
.reset ((gdb_byte
*) xmalloc (len
));
10121 if (target_read (current_inferior ()->top_target (),
10122 TARGET_OBJECT_SIGNAL_INFO
, nullptr,
10123 siginfo_data
.get (), 0, len
) != len
)
10125 /* Errors ignored. */
10126 siginfo_data
.reset (nullptr);
10132 m_siginfo_gdbarch
= gdbarch
;
10133 m_siginfo_data
= std::move (siginfo_data
);
10137 /* Return a pointer to the stored register state. */
10139 readonly_detached_regcache
*registers () const
10141 return m_registers
.get ();
10144 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
10146 void restore (struct gdbarch
*gdbarch
,
10147 struct thread_info
*tp
,
10148 struct regcache
*regcache
) const
10150 tp
->restore_suspend_from (m_thread_suspend
);
10152 if (m_siginfo_gdbarch
== gdbarch
)
10154 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
10156 /* Errors ignored. */
10157 target_write (current_inferior ()->top_target (),
10158 TARGET_OBJECT_SIGNAL_INFO
, nullptr,
10159 m_siginfo_data
.get (), 0, type
->length ());
10162 /* The inferior can be gone if the user types "print exit(0)"
10163 (and perhaps other times). */
10164 if (target_has_execution ())
10165 /* NB: The register write goes through to the target. */
10166 regcache
->restore (registers ());
10170 /* How the current thread stopped before the inferior function call was
10172 struct thread_suspend_state m_thread_suspend
;
10174 /* The registers before the inferior function call was executed. */
10175 std::unique_ptr
<readonly_detached_regcache
> m_registers
;
10177 /* Format of SIGINFO_DATA or NULL if it is not present. */
10178 struct gdbarch
*m_siginfo_gdbarch
= nullptr;
10180 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
10181 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
10182 content would be invalid. */
10183 gdb::unique_xmalloc_ptr
<gdb_byte
> m_siginfo_data
;
10186 infcall_suspend_state_up
10187 save_infcall_suspend_state ()
10189 struct thread_info
*tp
= inferior_thread ();
10190 regcache
*regcache
= get_thread_regcache (tp
);
10191 struct gdbarch
*gdbarch
= regcache
->arch ();
10193 infcall_suspend_state_up inf_state
10194 (new struct infcall_suspend_state (gdbarch
, tp
, regcache
));
10196 /* Having saved the current state, adjust the thread state, discarding
10197 any stop signal information. The stop signal is not useful when
10198 starting an inferior function call, and run_inferior_call will not use
10199 the signal due to its `proceed' call with GDB_SIGNAL_0. */
10200 tp
->set_stop_signal (GDB_SIGNAL_0
);
10205 /* Restore inferior session state to INF_STATE. */
10208 restore_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
10210 struct thread_info
*tp
= inferior_thread ();
10211 regcache
*regcache
= get_thread_regcache (inferior_thread ());
10212 struct gdbarch
*gdbarch
= regcache
->arch ();
10214 inf_state
->restore (gdbarch
, tp
, regcache
);
10215 discard_infcall_suspend_state (inf_state
);
10219 discard_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
10224 readonly_detached_regcache
*
10225 get_infcall_suspend_state_regcache (struct infcall_suspend_state
*inf_state
)
10227 return inf_state
->registers ();
10230 /* infcall_control_state contains state regarding gdb's control of the
10231 inferior itself like stepping control. It also contains session state like
10232 the user's currently selected frame. */
10234 struct infcall_control_state
10236 struct thread_control_state thread_control
;
10237 struct inferior_control_state inferior_control
;
10239 /* Other fields: */
10240 enum stop_stack_kind stop_stack_dummy
= STOP_NONE
;
10241 int stopped_by_random_signal
= 0;
10243 /* ID and level of the selected frame when the inferior function
10245 struct frame_id selected_frame_id
{};
10246 int selected_frame_level
= -1;
10249 /* Save all of the information associated with the inferior<==>gdb
10252 infcall_control_state_up
10253 save_infcall_control_state ()
10255 infcall_control_state_up
inf_status (new struct infcall_control_state
);
10256 struct thread_info
*tp
= inferior_thread ();
10257 struct inferior
*inf
= current_inferior ();
10259 inf_status
->thread_control
= tp
->control
;
10260 inf_status
->inferior_control
= inf
->control
;
10262 tp
->control
.step_resume_breakpoint
= nullptr;
10263 tp
->control
.exception_resume_breakpoint
= nullptr;
10265 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
10266 chain. If caller's caller is walking the chain, they'll be happier if we
10267 hand them back the original chain when restore_infcall_control_state is
10269 tp
->control
.stop_bpstat
= bpstat_copy (tp
->control
.stop_bpstat
);
10271 /* Other fields: */
10272 inf_status
->stop_stack_dummy
= stop_stack_dummy
;
10273 inf_status
->stopped_by_random_signal
= stopped_by_random_signal
;
10275 save_selected_frame (&inf_status
->selected_frame_id
,
10276 &inf_status
->selected_frame_level
);
10281 /* Restore inferior session state to INF_STATUS. */
10284 restore_infcall_control_state (struct infcall_control_state
*inf_status
)
10286 struct thread_info
*tp
= inferior_thread ();
10287 struct inferior
*inf
= current_inferior ();
10289 if (tp
->control
.step_resume_breakpoint
)
10290 tp
->control
.step_resume_breakpoint
->disposition
= disp_del_at_next_stop
;
10292 if (tp
->control
.exception_resume_breakpoint
)
10293 tp
->control
.exception_resume_breakpoint
->disposition
10294 = disp_del_at_next_stop
;
10296 /* Handle the bpstat_copy of the chain. */
10297 bpstat_clear (&tp
->control
.stop_bpstat
);
10299 tp
->control
= inf_status
->thread_control
;
10300 inf
->control
= inf_status
->inferior_control
;
10302 /* Other fields: */
10303 stop_stack_dummy
= inf_status
->stop_stack_dummy
;
10304 stopped_by_random_signal
= inf_status
->stopped_by_random_signal
;
10306 if (target_has_stack ())
10308 restore_selected_frame (inf_status
->selected_frame_id
,
10309 inf_status
->selected_frame_level
);
10316 discard_infcall_control_state (struct infcall_control_state
*inf_status
)
10318 if (inf_status
->thread_control
.step_resume_breakpoint
)
10319 inf_status
->thread_control
.step_resume_breakpoint
->disposition
10320 = disp_del_at_next_stop
;
10322 if (inf_status
->thread_control
.exception_resume_breakpoint
)
10323 inf_status
->thread_control
.exception_resume_breakpoint
->disposition
10324 = disp_del_at_next_stop
;
10326 /* See save_infcall_control_state for info on stop_bpstat. */
10327 bpstat_clear (&inf_status
->thread_control
.stop_bpstat
);
10332 /* See infrun.h. */
10335 clear_exit_convenience_vars (void)
10337 clear_internalvar (lookup_internalvar ("_exitsignal"));
10338 clear_internalvar (lookup_internalvar ("_exitcode"));
10342 /* User interface for reverse debugging:
10343 Set exec-direction / show exec-direction commands
10344 (returns error unless target implements to_set_exec_direction method). */
10346 enum exec_direction_kind execution_direction
= EXEC_FORWARD
;
10347 static const char exec_forward
[] = "forward";
10348 static const char exec_reverse
[] = "reverse";
10349 static const char *exec_direction
= exec_forward
;
10350 static const char *const exec_direction_names
[] = {
10357 set_exec_direction_func (const char *args
, int from_tty
,
10358 struct cmd_list_element
*cmd
)
10360 if (target_can_execute_reverse ())
10362 if (!strcmp (exec_direction
, exec_forward
))
10363 execution_direction
= EXEC_FORWARD
;
10364 else if (!strcmp (exec_direction
, exec_reverse
))
10365 execution_direction
= EXEC_REVERSE
;
10369 exec_direction
= exec_forward
;
10370 error (_("Target does not support this operation."));
10375 show_exec_direction_func (struct ui_file
*out
, int from_tty
,
10376 struct cmd_list_element
*cmd
, const char *value
)
10378 switch (execution_direction
) {
10380 gdb_printf (out
, _("Forward.\n"));
10383 gdb_printf (out
, _("Reverse.\n"));
10386 internal_error (_("bogus execution_direction value: %d"),
10387 (int) execution_direction
);
10392 show_schedule_multiple (struct ui_file
*file
, int from_tty
,
10393 struct cmd_list_element
*c
, const char *value
)
10395 gdb_printf (file
, _("Resuming the execution of threads "
10396 "of all processes is %s.\n"), value
);
10399 /* Implementation of `siginfo' variable. */
10401 static const struct internalvar_funcs siginfo_funcs
=
10403 siginfo_make_value
,
10407 /* Callback for infrun's target events source. This is marked when a
10408 thread has a pending status to process. */
10411 infrun_async_inferior_event_handler (gdb_client_data data
)
10413 clear_async_event_handler (infrun_async_inferior_event_token
);
10414 inferior_event_handler (INF_REG_EVENT
);
10418 namespace selftests
10421 /* Verify that when two threads with the same ptid exist (from two different
10422 targets) and one of them changes ptid, we only update inferior_ptid if
10423 it is appropriate. */
10426 infrun_thread_ptid_changed ()
10428 gdbarch
*arch
= current_inferior ()->arch ();
10430 /* The thread which inferior_ptid represents changes ptid. */
10432 scoped_restore_current_pspace_and_thread restore
;
10434 scoped_mock_context
<test_target_ops
> target1 (arch
);
10435 scoped_mock_context
<test_target_ops
> target2 (arch
);
10437 ptid_t
old_ptid (111, 222);
10438 ptid_t
new_ptid (111, 333);
10440 target1
.mock_inferior
.pid
= old_ptid
.pid ();
10441 target1
.mock_thread
.ptid
= old_ptid
;
10442 target1
.mock_inferior
.ptid_thread_map
.clear ();
10443 target1
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target1
.mock_thread
;
10445 target2
.mock_inferior
.pid
= old_ptid
.pid ();
10446 target2
.mock_thread
.ptid
= old_ptid
;
10447 target2
.mock_inferior
.ptid_thread_map
.clear ();
10448 target2
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target2
.mock_thread
;
10450 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
10451 set_current_inferior (&target1
.mock_inferior
);
10453 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
10455 gdb_assert (inferior_ptid
== new_ptid
);
10458 /* A thread with the same ptid as inferior_ptid, but from another target,
10461 scoped_restore_current_pspace_and_thread restore
;
10463 scoped_mock_context
<test_target_ops
> target1 (arch
);
10464 scoped_mock_context
<test_target_ops
> target2 (arch
);
10466 ptid_t
old_ptid (111, 222);
10467 ptid_t
new_ptid (111, 333);
10469 target1
.mock_inferior
.pid
= old_ptid
.pid ();
10470 target1
.mock_thread
.ptid
= old_ptid
;
10471 target1
.mock_inferior
.ptid_thread_map
.clear ();
10472 target1
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target1
.mock_thread
;
10474 target2
.mock_inferior
.pid
= old_ptid
.pid ();
10475 target2
.mock_thread
.ptid
= old_ptid
;
10476 target2
.mock_inferior
.ptid_thread_map
.clear ();
10477 target2
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target2
.mock_thread
;
10479 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
10480 set_current_inferior (&target2
.mock_inferior
);
10482 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
10484 gdb_assert (inferior_ptid
== old_ptid
);
10488 } /* namespace selftests */
10490 #endif /* GDB_SELF_TEST */
10492 void _initialize_infrun ();
10494 _initialize_infrun ()
10496 struct cmd_list_element
*c
;
10498 /* Register extra event sources in the event loop. */
10499 infrun_async_inferior_event_token
10500 = create_async_event_handler (infrun_async_inferior_event_handler
, nullptr,
10503 cmd_list_element
*info_signals_cmd
10504 = add_info ("signals", info_signals_command
, _("\
10505 What debugger does when program gets various signals.\n\
10506 Specify a signal as argument to print info on that signal only."));
10507 add_info_alias ("handle", info_signals_cmd
, 0);
10509 c
= add_com ("handle", class_run
, handle_command
, _("\
10510 Specify how to handle signals.\n\
10511 Usage: handle SIGNAL [ACTIONS]\n\
10512 Args are signals and actions to apply to those signals.\n\
10513 If no actions are specified, the current settings for the specified signals\n\
10514 will be displayed instead.\n\
10516 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
10517 from 1-15 are allowed for compatibility with old versions of GDB.\n\
10518 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
10519 The special arg \"all\" is recognized to mean all signals except those\n\
10520 used by the debugger, typically SIGTRAP and SIGINT.\n\
10522 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
10523 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
10524 Stop means reenter debugger if this signal happens (implies print).\n\
10525 Print means print a message if this signal happens.\n\
10526 Pass means let program see this signal; otherwise program doesn't know.\n\
10527 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
10528 Pass and Stop may be combined.\n\
10530 Multiple signals may be specified. Signal numbers and signal names\n\
10531 may be interspersed with actions, with the actions being performed for\n\
10532 all signals cumulatively specified."));
10533 set_cmd_completer (c
, handle_completer
);
10535 stop_command
= add_cmd ("stop", class_obscure
,
10536 not_just_help_class_command
, _("\
10537 There is no `stop' command, but you can set a hook on `stop'.\n\
10538 This allows you to set a list of commands to be run each time execution\n\
10539 of the program stops."), &cmdlist
);
10541 add_setshow_boolean_cmd
10542 ("infrun", class_maintenance
, &debug_infrun
,
10543 _("Set inferior debugging."),
10544 _("Show inferior debugging."),
10545 _("When non-zero, inferior specific debugging is enabled."),
10546 nullptr, show_debug_infrun
, &setdebuglist
, &showdebuglist
);
10548 add_setshow_boolean_cmd ("non-stop", no_class
,
10550 Set whether gdb controls the inferior in non-stop mode."), _("\
10551 Show whether gdb controls the inferior in non-stop mode."), _("\
10552 When debugging a multi-threaded program and this setting is\n\
10553 off (the default, also called all-stop mode), when one thread stops\n\
10554 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
10555 all other threads in the program while you interact with the thread of\n\
10556 interest. When you continue or step a thread, you can allow the other\n\
10557 threads to run, or have them remain stopped, but while you inspect any\n\
10558 thread's state, all threads stop.\n\
10560 In non-stop mode, when one thread stops, other threads can continue\n\
10561 to run freely. You'll be able to step each thread independently,\n\
10562 leave it stopped or free to run as needed."),
10568 for (size_t i
= 0; i
< GDB_SIGNAL_LAST
; i
++)
10570 signal_stop
[i
] = 1;
10571 signal_print
[i
] = 1;
10572 signal_program
[i
] = 1;
10573 signal_catch
[i
] = 0;
10576 /* Signals caused by debugger's own actions should not be given to
10577 the program afterwards.
10579 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
10580 explicitly specifies that it should be delivered to the target
10581 program. Typically, that would occur when a user is debugging a
10582 target monitor on a simulator: the target monitor sets a
10583 breakpoint; the simulator encounters this breakpoint and halts
10584 the simulation handing control to GDB; GDB, noting that the stop
10585 address doesn't map to any known breakpoint, returns control back
10586 to the simulator; the simulator then delivers the hardware
10587 equivalent of a GDB_SIGNAL_TRAP to the program being
10589 signal_program
[GDB_SIGNAL_TRAP
] = 0;
10590 signal_program
[GDB_SIGNAL_INT
] = 0;
10592 /* Signals that are not errors should not normally enter the debugger. */
10593 signal_stop
[GDB_SIGNAL_ALRM
] = 0;
10594 signal_print
[GDB_SIGNAL_ALRM
] = 0;
10595 signal_stop
[GDB_SIGNAL_VTALRM
] = 0;
10596 signal_print
[GDB_SIGNAL_VTALRM
] = 0;
10597 signal_stop
[GDB_SIGNAL_PROF
] = 0;
10598 signal_print
[GDB_SIGNAL_PROF
] = 0;
10599 signal_stop
[GDB_SIGNAL_CHLD
] = 0;
10600 signal_print
[GDB_SIGNAL_CHLD
] = 0;
10601 signal_stop
[GDB_SIGNAL_IO
] = 0;
10602 signal_print
[GDB_SIGNAL_IO
] = 0;
10603 signal_stop
[GDB_SIGNAL_POLL
] = 0;
10604 signal_print
[GDB_SIGNAL_POLL
] = 0;
10605 signal_stop
[GDB_SIGNAL_URG
] = 0;
10606 signal_print
[GDB_SIGNAL_URG
] = 0;
10607 signal_stop
[GDB_SIGNAL_WINCH
] = 0;
10608 signal_print
[GDB_SIGNAL_WINCH
] = 0;
10609 signal_stop
[GDB_SIGNAL_PRIO
] = 0;
10610 signal_print
[GDB_SIGNAL_PRIO
] = 0;
10612 /* These signals are used internally by user-level thread
10613 implementations. (See signal(5) on Solaris.) Like the above
10614 signals, a healthy program receives and handles them as part of
10615 its normal operation. */
10616 signal_stop
[GDB_SIGNAL_LWP
] = 0;
10617 signal_print
[GDB_SIGNAL_LWP
] = 0;
10618 signal_stop
[GDB_SIGNAL_WAITING
] = 0;
10619 signal_print
[GDB_SIGNAL_WAITING
] = 0;
10620 signal_stop
[GDB_SIGNAL_CANCEL
] = 0;
10621 signal_print
[GDB_SIGNAL_CANCEL
] = 0;
10622 signal_stop
[GDB_SIGNAL_LIBRT
] = 0;
10623 signal_print
[GDB_SIGNAL_LIBRT
] = 0;
10625 /* Update cached state. */
10626 signal_cache_update (-1);
10628 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support
,
10629 &stop_on_solib_events
, _("\
10630 Set stopping for shared library events."), _("\
10631 Show stopping for shared library events."), _("\
10632 If nonzero, gdb will give control to the user when the dynamic linker\n\
10633 notifies gdb of shared library events. The most common event of interest\n\
10634 to the user would be loading/unloading of a new library."),
10635 set_stop_on_solib_events
,
10636 show_stop_on_solib_events
,
10637 &setlist
, &showlist
);
10639 add_setshow_enum_cmd ("follow-fork-mode", class_run
,
10640 follow_fork_mode_kind_names
,
10641 &follow_fork_mode_string
, _("\
10642 Set debugger response to a program call of fork or vfork."), _("\
10643 Show debugger response to a program call of fork or vfork."), _("\
10644 A fork or vfork creates a new process. follow-fork-mode can be:\n\
10645 parent - the original process is debugged after a fork\n\
10646 child - the new process is debugged after a fork\n\
10647 The unfollowed process will continue to run.\n\
10648 By default, the debugger will follow the parent process."),
10650 show_follow_fork_mode_string
,
10651 &setlist
, &showlist
);
10653 add_setshow_enum_cmd ("follow-exec-mode", class_run
,
10654 follow_exec_mode_names
,
10655 &follow_exec_mode_string
, _("\
10656 Set debugger response to a program call of exec."), _("\
10657 Show debugger response to a program call of exec."), _("\
10658 An exec call replaces the program image of a process.\n\
10660 follow-exec-mode can be:\n\
10662 new - the debugger creates a new inferior and rebinds the process\n\
10663 to this new inferior. The program the process was running before\n\
10664 the exec call can be restarted afterwards by restarting the original\n\
10667 same - the debugger keeps the process bound to the same inferior.\n\
10668 The new executable image replaces the previous executable loaded in\n\
10669 the inferior. Restarting the inferior after the exec call restarts\n\
10670 the executable the process was running after the exec call.\n\
10672 By default, the debugger will use the same inferior."),
10674 show_follow_exec_mode_string
,
10675 &setlist
, &showlist
);
10677 add_setshow_enum_cmd ("scheduler-locking", class_run
,
10678 scheduler_enums
, &scheduler_mode
, _("\
10679 Set mode for locking scheduler during execution."), _("\
10680 Show mode for locking scheduler during execution."), _("\
10681 off == no locking (threads may preempt at any time)\n\
10682 on == full locking (no thread except the current thread may run)\n\
10683 This applies to both normal execution and replay mode.\n\
10684 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
10685 In this mode, other threads may run during other commands.\n\
10686 This applies to both normal execution and replay mode.\n\
10687 replay == scheduler locked in replay mode and unlocked during normal execution."),
10688 set_schedlock_func
, /* traps on target vector */
10689 show_scheduler_mode
,
10690 &setlist
, &showlist
);
10692 add_setshow_boolean_cmd ("schedule-multiple", class_run
, &sched_multi
, _("\
10693 Set mode for resuming threads of all processes."), _("\
10694 Show mode for resuming threads of all processes."), _("\
10695 When on, execution commands (such as 'continue' or 'next') resume all\n\
10696 threads of all processes. When off (which is the default), execution\n\
10697 commands only resume the threads of the current process. The set of\n\
10698 threads that are resumed is further refined by the scheduler-locking\n\
10699 mode (see help set scheduler-locking)."),
10701 show_schedule_multiple
,
10702 &setlist
, &showlist
);
10704 add_setshow_boolean_cmd ("step-mode", class_run
, &step_stop_if_no_debug
, _("\
10705 Set mode of the step operation."), _("\
10706 Show mode of the step operation."), _("\
10707 When set, doing a step over a function without debug line information\n\
10708 will stop at the first instruction of that function. Otherwise, the\n\
10709 function is skipped and the step command stops at a different source line."),
10711 show_step_stop_if_no_debug
,
10712 &setlist
, &showlist
);
10714 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run
,
10715 &can_use_displaced_stepping
, _("\
10716 Set debugger's willingness to use displaced stepping."), _("\
10717 Show debugger's willingness to use displaced stepping."), _("\
10718 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
10719 supported by the target architecture. If off, gdb will not use displaced\n\
10720 stepping to step over breakpoints, even if such is supported by the target\n\
10721 architecture. If auto (which is the default), gdb will use displaced stepping\n\
10722 if the target architecture supports it and non-stop mode is active, but will not\n\
10723 use it in all-stop mode (see help set non-stop)."),
10725 show_can_use_displaced_stepping
,
10726 &setlist
, &showlist
);
10728 add_setshow_enum_cmd ("exec-direction", class_run
, exec_direction_names
,
10729 &exec_direction
, _("Set direction of execution.\n\
10730 Options are 'forward' or 'reverse'."),
10731 _("Show direction of execution (forward/reverse)."),
10732 _("Tells gdb whether to execute forward or backward."),
10733 set_exec_direction_func
, show_exec_direction_func
,
10734 &setlist
, &showlist
);
10736 /* Set/show detach-on-fork: user-settable mode. */
10738 add_setshow_boolean_cmd ("detach-on-fork", class_run
, &detach_fork
, _("\
10739 Set whether gdb will detach the child of a fork."), _("\
10740 Show whether gdb will detach the child of a fork."), _("\
10741 Tells gdb whether to detach the child of a fork."),
10742 nullptr, nullptr, &setlist
, &showlist
);
10744 /* Set/show disable address space randomization mode. */
10746 add_setshow_boolean_cmd ("disable-randomization", class_support
,
10747 &disable_randomization
, _("\
10748 Set disabling of debuggee's virtual address space randomization."), _("\
10749 Show disabling of debuggee's virtual address space randomization."), _("\
10750 When this mode is on (which is the default), randomization of the virtual\n\
10751 address space is disabled. Standalone programs run with the randomization\n\
10752 enabled by default on some platforms."),
10753 &set_disable_randomization
,
10754 &show_disable_randomization
,
10755 &setlist
, &showlist
);
10757 /* ptid initializations */
10758 inferior_ptid
= null_ptid
;
10759 target_last_wait_ptid
= minus_one_ptid
;
10761 gdb::observers::thread_ptid_changed
.attach (infrun_thread_ptid_changed
,
10763 gdb::observers::thread_stop_requested
.attach (infrun_thread_stop_requested
,
10765 gdb::observers::inferior_exit
.attach (infrun_inferior_exit
, "infrun");
10766 gdb::observers::inferior_execd
.attach (infrun_inferior_execd
, "infrun");
10768 /* Explicitly create without lookup, since that tries to create a
10769 value with a void typed value, and when we get here, gdbarch
10770 isn't initialized yet. At this point, we're quite sure there
10771 isn't another convenience variable of the same name. */
10772 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs
, nullptr);
10774 add_setshow_boolean_cmd ("observer", no_class
,
10775 &observer_mode_1
, _("\
10776 Set whether gdb controls the inferior in observer mode."), _("\
10777 Show whether gdb controls the inferior in observer mode."), _("\
10778 In observer mode, GDB can get data from the inferior, but not\n\
10779 affect its execution. Registers and memory may not be changed,\n\
10780 breakpoints may not be set, and the program cannot be interrupted\n\
10783 show_observer_mode
,
10788 selftests::register_test ("infrun_thread_ptid_changed",
10789 selftests::infrun_thread_ptid_changed
);