Updated translations for the bfd, binutils, gas, ld and opcodes directories
[binutils-gdb.git] / gdb / infrun.c
blob06b454bf78f7340128873f95dc7f2d868e6f30dd
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
4 Copyright (C) 1986-2024 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "cli/cli-cmds.h"
22 #include "displaced-stepping.h"
23 #include "infrun.h"
24 #include <ctype.h>
25 #include "exceptions.h"
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "breakpoint.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "target-connection.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include "ui.h"
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observable.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "block.h"
46 #include "mi/mi-common.h"
47 #include "event-top.h"
48 #include "record.h"
49 #include "record-full.h"
50 #include "inline-frame.h"
51 #include "jit.h"
52 #include "tracepoint.h"
53 #include "skip.h"
54 #include "probe.h"
55 #include "objfiles.h"
56 #include "completer.h"
57 #include "target-descriptions.h"
58 #include "target-dcache.h"
59 #include "terminal.h"
60 #include "solist.h"
61 #include "gdbsupport/event-loop.h"
62 #include "thread-fsm.h"
63 #include "gdbsupport/enum-flags.h"
64 #include "progspace-and-thread.h"
65 #include <optional>
66 #include "arch-utils.h"
67 #include "gdbsupport/scope-exit.h"
68 #include "gdbsupport/forward-scope-exit.h"
69 #include "gdbsupport/gdb_select.h"
70 #include <unordered_map>
71 #include "async-event.h"
72 #include "gdbsupport/selftest.h"
73 #include "scoped-mock-context.h"
74 #include "test-target.h"
75 #include "gdbsupport/common-debug.h"
76 #include "gdbsupport/buildargv.h"
77 #include "extension.h"
78 #include "disasm.h"
79 #include "interps.h"
81 /* Prototypes for local functions */
83 static void sig_print_info (enum gdb_signal);
85 static void sig_print_header (void);
87 static void follow_inferior_reset_breakpoints (void);
89 static bool currently_stepping (struct thread_info *tp);
91 static void insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr &);
93 static void insert_step_resume_breakpoint_at_caller (const frame_info_ptr &);
95 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
97 static bool maybe_software_singlestep (struct gdbarch *gdbarch);
99 static void resume (gdb_signal sig);
101 static void wait_for_inferior (inferior *inf);
103 static void restart_threads (struct thread_info *event_thread,
104 inferior *inf = nullptr);
106 static bool start_step_over (void);
108 static bool step_over_info_valid_p (void);
110 static bool schedlock_applies (struct thread_info *tp);
112 /* Asynchronous signal handler registered as event loop source for
113 when we have pending events ready to be passed to the core. */
114 static struct async_event_handler *infrun_async_inferior_event_token;
116 /* Stores whether infrun_async was previously enabled or disabled.
117 Starts off as -1, indicating "never enabled/disabled". */
118 static int infrun_is_async = -1;
119 static CORE_ADDR update_line_range_start (CORE_ADDR pc,
120 struct execution_control_state *ecs);
122 /* See infrun.h. */
124 void
125 infrun_async (int enable)
127 if (infrun_is_async != enable)
129 infrun_is_async = enable;
131 infrun_debug_printf ("enable=%d", enable);
133 if (enable)
134 mark_async_event_handler (infrun_async_inferior_event_token);
135 else
136 clear_async_event_handler (infrun_async_inferior_event_token);
140 /* See infrun.h. */
142 void
143 mark_infrun_async_event_handler (void)
145 mark_async_event_handler (infrun_async_inferior_event_token);
148 /* When set, stop the 'step' command if we enter a function which has
149 no line number information. The normal behavior is that we step
150 over such function. */
151 bool step_stop_if_no_debug = false;
152 static void
153 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
156 gdb_printf (file, _("Mode of the step operation is %s.\n"), value);
159 /* proceed and normal_stop use this to notify the user when the
160 inferior stopped in a different thread than it had been running in.
161 It can also be used to find for which thread normal_stop last
162 reported a stop. */
163 static thread_info_ref previous_thread;
165 /* See infrun.h. */
167 void
168 update_previous_thread ()
170 if (inferior_ptid == null_ptid)
171 previous_thread = nullptr;
172 else
173 previous_thread = thread_info_ref::new_reference (inferior_thread ());
176 /* See infrun.h. */
178 thread_info *
179 get_previous_thread ()
181 return previous_thread.get ();
184 /* If set (default for legacy reasons), when following a fork, GDB
185 will detach from one of the fork branches, child or parent.
186 Exactly which branch is detached depends on 'set follow-fork-mode'
187 setting. */
189 static bool detach_fork = true;
191 bool debug_infrun = false;
192 static void
193 show_debug_infrun (struct ui_file *file, int from_tty,
194 struct cmd_list_element *c, const char *value)
196 gdb_printf (file, _("Inferior debugging is %s.\n"), value);
199 /* Support for disabling address space randomization. */
201 bool disable_randomization = true;
203 static void
204 show_disable_randomization (struct ui_file *file, int from_tty,
205 struct cmd_list_element *c, const char *value)
207 if (target_supports_disable_randomization ())
208 gdb_printf (file,
209 _("Disabling randomization of debuggee's "
210 "virtual address space is %s.\n"),
211 value);
212 else
213 gdb_puts (_("Disabling randomization of debuggee's "
214 "virtual address space is unsupported on\n"
215 "this platform.\n"), file);
218 static void
219 set_disable_randomization (const char *args, int from_tty,
220 struct cmd_list_element *c)
222 if (!target_supports_disable_randomization ())
223 error (_("Disabling randomization of debuggee's "
224 "virtual address space is unsupported on\n"
225 "this platform."));
228 /* User interface for non-stop mode. */
230 bool non_stop = false;
231 static bool non_stop_1 = false;
233 static void
234 set_non_stop (const char *args, int from_tty,
235 struct cmd_list_element *c)
237 if (target_has_execution ())
239 non_stop_1 = non_stop;
240 error (_("Cannot change this setting while the inferior is running."));
243 non_stop = non_stop_1;
246 static void
247 show_non_stop (struct ui_file *file, int from_tty,
248 struct cmd_list_element *c, const char *value)
250 gdb_printf (file,
251 _("Controlling the inferior in non-stop mode is %s.\n"),
252 value);
255 /* "Observer mode" is somewhat like a more extreme version of
256 non-stop, in which all GDB operations that might affect the
257 target's execution have been disabled. */
259 static bool observer_mode = false;
260 static bool observer_mode_1 = false;
262 static void
263 set_observer_mode (const char *args, int from_tty,
264 struct cmd_list_element *c)
266 if (target_has_execution ())
268 observer_mode_1 = observer_mode;
269 error (_("Cannot change this setting while the inferior is running."));
272 observer_mode = observer_mode_1;
274 may_write_registers = !observer_mode;
275 may_write_memory = !observer_mode;
276 may_insert_breakpoints = !observer_mode;
277 may_insert_tracepoints = !observer_mode;
278 /* We can insert fast tracepoints in or out of observer mode,
279 but enable them if we're going into this mode. */
280 if (observer_mode)
281 may_insert_fast_tracepoints = true;
282 may_stop = !observer_mode;
283 update_target_permissions ();
285 /* Going *into* observer mode we must force non-stop, then
286 going out we leave it that way. */
287 if (observer_mode)
289 pagination_enabled = false;
290 non_stop = non_stop_1 = true;
293 if (from_tty)
294 gdb_printf (_("Observer mode is now %s.\n"),
295 (observer_mode ? "on" : "off"));
298 static void
299 show_observer_mode (struct ui_file *file, int from_tty,
300 struct cmd_list_element *c, const char *value)
302 gdb_printf (file, _("Observer mode is %s.\n"), value);
305 /* This updates the value of observer mode based on changes in
306 permissions. Note that we are deliberately ignoring the values of
307 may-write-registers and may-write-memory, since the user may have
308 reason to enable these during a session, for instance to turn on a
309 debugging-related global. */
311 void
312 update_observer_mode (void)
314 bool newval = (!may_insert_breakpoints
315 && !may_insert_tracepoints
316 && may_insert_fast_tracepoints
317 && !may_stop
318 && non_stop);
320 /* Let the user know if things change. */
321 if (newval != observer_mode)
322 gdb_printf (_("Observer mode is now %s.\n"),
323 (newval ? "on" : "off"));
325 observer_mode = observer_mode_1 = newval;
328 /* Tables of how to react to signals; the user sets them. */
330 static unsigned char signal_stop[GDB_SIGNAL_LAST];
331 static unsigned char signal_print[GDB_SIGNAL_LAST];
332 static unsigned char signal_program[GDB_SIGNAL_LAST];
334 /* Table of signals that are registered with "catch signal". A
335 non-zero entry indicates that the signal is caught by some "catch
336 signal" command. */
337 static unsigned char signal_catch[GDB_SIGNAL_LAST];
339 /* Table of signals that the target may silently handle.
340 This is automatically determined from the flags above,
341 and simply cached here. */
342 static unsigned char signal_pass[GDB_SIGNAL_LAST];
344 #define SET_SIGS(nsigs,sigs,flags) \
345 do { \
346 int signum = (nsigs); \
347 while (signum-- > 0) \
348 if ((sigs)[signum]) \
349 (flags)[signum] = 1; \
350 } while (0)
352 #define UNSET_SIGS(nsigs,sigs,flags) \
353 do { \
354 int signum = (nsigs); \
355 while (signum-- > 0) \
356 if ((sigs)[signum]) \
357 (flags)[signum] = 0; \
358 } while (0)
360 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
361 this function is to avoid exporting `signal_program'. */
363 void
364 update_signals_program_target (void)
366 target_program_signals (signal_program);
369 /* Value to pass to target_resume() to cause all threads to resume. */
371 #define RESUME_ALL minus_one_ptid
373 /* Command list pointer for the "stop" placeholder. */
375 static struct cmd_list_element *stop_command;
377 /* Nonzero if we want to give control to the user when we're notified
378 of shared library events by the dynamic linker. */
379 int stop_on_solib_events;
381 /* Enable or disable optional shared library event breakpoints
382 as appropriate when the above flag is changed. */
384 static void
385 set_stop_on_solib_events (const char *args,
386 int from_tty, struct cmd_list_element *c)
388 update_solib_breakpoints ();
391 static void
392 show_stop_on_solib_events (struct ui_file *file, int from_tty,
393 struct cmd_list_element *c, const char *value)
395 gdb_printf (file, _("Stopping for shared library events is %s.\n"),
396 value);
399 /* True after stop if current stack frame should be printed. */
401 static bool stop_print_frame;
403 /* This is a cached copy of the target/ptid/waitstatus of the last
404 event returned by target_wait().
405 This information is returned by get_last_target_status(). */
406 static process_stratum_target *target_last_proc_target;
407 static ptid_t target_last_wait_ptid;
408 static struct target_waitstatus target_last_waitstatus;
410 void init_thread_stepping_state (struct thread_info *tss);
412 static const char follow_fork_mode_child[] = "child";
413 static const char follow_fork_mode_parent[] = "parent";
415 static const char *const follow_fork_mode_kind_names[] = {
416 follow_fork_mode_child,
417 follow_fork_mode_parent,
418 nullptr
421 static const char *follow_fork_mode_string = follow_fork_mode_parent;
422 static void
423 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
424 struct cmd_list_element *c, const char *value)
426 gdb_printf (file,
427 _("Debugger response to a program "
428 "call of fork or vfork is \"%s\".\n"),
429 value);
433 /* Handle changes to the inferior list based on the type of fork,
434 which process is being followed, and whether the other process
435 should be detached. On entry inferior_ptid must be the ptid of
436 the fork parent. At return inferior_ptid is the ptid of the
437 followed inferior. */
439 static bool
440 follow_fork_inferior (bool follow_child, bool detach_fork)
442 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
444 infrun_debug_printf ("follow_child = %d, detach_fork = %d",
445 follow_child, detach_fork);
447 target_waitkind fork_kind = inferior_thread ()->pending_follow.kind ();
448 gdb_assert (fork_kind == TARGET_WAITKIND_FORKED
449 || fork_kind == TARGET_WAITKIND_VFORKED);
450 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
451 ptid_t parent_ptid = inferior_ptid;
452 ptid_t child_ptid = inferior_thread ()->pending_follow.child_ptid ();
454 if (has_vforked
455 && !non_stop /* Non-stop always resumes both branches. */
456 && current_ui->prompt_state == PROMPT_BLOCKED
457 && !(follow_child || detach_fork || sched_multi))
459 /* The parent stays blocked inside the vfork syscall until the
460 child execs or exits. If we don't let the child run, then
461 the parent stays blocked. If we're telling the parent to run
462 in the foreground, the user will not be able to ctrl-c to get
463 back the terminal, effectively hanging the debug session. */
464 gdb_printf (gdb_stderr, _("\
465 Can not resume the parent process over vfork in the foreground while\n\
466 holding the child stopped. Try \"set detach-on-fork\" or \
467 \"set schedule-multiple\".\n"));
468 return true;
471 inferior *parent_inf = current_inferior ();
472 inferior *child_inf = nullptr;
474 gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
476 if (!follow_child)
478 /* Detach new forked process? */
479 if (detach_fork)
481 /* Before detaching from the child, remove all breakpoints
482 from it. If we forked, then this has already been taken
483 care of by infrun.c. If we vforked however, any
484 breakpoint inserted in the parent is visible in the
485 child, even those added while stopped in a vfork
486 catchpoint. This will remove the breakpoints from the
487 parent also, but they'll be reinserted below. */
488 if (has_vforked)
490 /* Keep breakpoints list in sync. */
491 remove_breakpoints_inf (current_inferior ());
494 if (print_inferior_events)
496 /* Ensure that we have a process ptid. */
497 ptid_t process_ptid = ptid_t (child_ptid.pid ());
499 target_terminal::ours_for_output ();
500 gdb_printf (_("[Detaching after %s from child %s]\n"),
501 has_vforked ? "vfork" : "fork",
502 target_pid_to_str (process_ptid).c_str ());
505 else
507 /* Add process to GDB's tables. */
508 child_inf = add_inferior (child_ptid.pid ());
510 child_inf->attach_flag = parent_inf->attach_flag;
511 copy_terminal_info (child_inf, parent_inf);
512 child_inf->set_arch (parent_inf->arch ());
513 child_inf->tdesc_info = parent_inf->tdesc_info;
515 child_inf->symfile_flags = SYMFILE_NO_READ;
517 /* If this is a vfork child, then the address-space is
518 shared with the parent. */
519 if (has_vforked)
521 child_inf->pspace = parent_inf->pspace;
522 child_inf->aspace = parent_inf->aspace;
524 exec_on_vfork (child_inf);
526 /* The parent will be frozen until the child is done
527 with the shared region. Keep track of the
528 parent. */
529 child_inf->vfork_parent = parent_inf;
530 child_inf->pending_detach = false;
531 parent_inf->vfork_child = child_inf;
532 parent_inf->pending_detach = false;
534 else
536 child_inf->pspace = new program_space (new_address_space ());
537 child_inf->aspace = child_inf->pspace->aspace;
538 child_inf->removable = true;
539 clone_program_space (child_inf->pspace, parent_inf->pspace);
543 if (has_vforked)
545 /* If we detached from the child, then we have to be careful
546 to not insert breakpoints in the parent until the child
547 is done with the shared memory region. However, if we're
548 staying attached to the child, then we can and should
549 insert breakpoints, so that we can debug it. A
550 subsequent child exec or exit is enough to know when does
551 the child stops using the parent's address space. */
552 parent_inf->thread_waiting_for_vfork_done
553 = detach_fork ? inferior_thread () : nullptr;
554 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
556 infrun_debug_printf
557 ("parent_inf->thread_waiting_for_vfork_done == %s",
558 (parent_inf->thread_waiting_for_vfork_done == nullptr
559 ? "nullptr"
560 : (parent_inf->thread_waiting_for_vfork_done
561 ->ptid.to_string ().c_str ())));
564 else
566 /* Follow the child. */
568 if (print_inferior_events)
570 std::string parent_pid = target_pid_to_str (parent_ptid);
571 std::string child_pid = target_pid_to_str (child_ptid);
573 target_terminal::ours_for_output ();
574 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
575 parent_pid.c_str (),
576 has_vforked ? "vfork" : "fork",
577 child_pid.c_str ());
580 /* Add the new inferior first, so that the target_detach below
581 doesn't unpush the target. */
583 child_inf = add_inferior (child_ptid.pid ());
585 child_inf->attach_flag = parent_inf->attach_flag;
586 copy_terminal_info (child_inf, parent_inf);
587 child_inf->set_arch (parent_inf->arch ());
588 child_inf->tdesc_info = parent_inf->tdesc_info;
590 if (has_vforked)
592 /* If this is a vfork child, then the address-space is shared
593 with the parent. */
594 child_inf->aspace = parent_inf->aspace;
595 child_inf->pspace = parent_inf->pspace;
597 exec_on_vfork (child_inf);
599 else if (detach_fork)
601 /* We follow the child and detach from the parent: move the parent's
602 program space to the child. This simplifies some things, like
603 doing "next" over fork() and landing on the expected line in the
604 child (note, that is broken with "set detach-on-fork off").
606 Before assigning brand new spaces for the parent, remove
607 breakpoints from it: because the new pspace won't match
608 currently inserted locations, the normal detach procedure
609 wouldn't remove them, and we would leave them inserted when
610 detaching. */
611 remove_breakpoints_inf (parent_inf);
613 child_inf->aspace = parent_inf->aspace;
614 child_inf->pspace = parent_inf->pspace;
615 parent_inf->pspace = new program_space (new_address_space ());
616 parent_inf->aspace = parent_inf->pspace->aspace;
617 clone_program_space (parent_inf->pspace, child_inf->pspace);
619 /* The parent inferior is still the current one, so keep things
620 in sync. */
621 set_current_program_space (parent_inf->pspace);
623 else
625 child_inf->pspace = new program_space (new_address_space ());
626 child_inf->aspace = child_inf->pspace->aspace;
627 child_inf->removable = true;
628 child_inf->symfile_flags = SYMFILE_NO_READ;
629 clone_program_space (child_inf->pspace, parent_inf->pspace);
633 gdb_assert (current_inferior () == parent_inf);
635 /* If we are setting up an inferior for the child, target_follow_fork is
636 responsible for pushing the appropriate targets on the new inferior's
637 target stack and adding the initial thread (with ptid CHILD_PTID).
639 If we are not setting up an inferior for the child (because following
640 the parent and detach_fork is true), it is responsible for detaching
641 from CHILD_PTID. */
642 target_follow_fork (child_inf, child_ptid, fork_kind, follow_child,
643 detach_fork);
645 gdb::observers::inferior_forked.notify (parent_inf, child_inf, fork_kind);
647 /* target_follow_fork must leave the parent as the current inferior. If we
648 want to follow the child, we make it the current one below. */
649 gdb_assert (current_inferior () == parent_inf);
651 /* If there is a child inferior, target_follow_fork must have created a thread
652 for it. */
653 if (child_inf != nullptr)
654 gdb_assert (!child_inf->thread_list.empty ());
656 /* Clear the parent thread's pending follow field. Do this before calling
657 target_detach, so that the target can differentiate the two following
658 cases:
660 - We continue past a fork with "follow-fork-mode == child" &&
661 "detach-on-fork on", and therefore detach the parent. In that
662 case the target should not detach the fork child.
663 - We run to a fork catchpoint and the user types "detach". In that
664 case, the target should detach the fork child in addition to the
665 parent.
667 The former case will have pending_follow cleared, the later will have
668 pending_follow set. */
669 thread_info *parent_thread = parent_inf->find_thread (parent_ptid);
670 gdb_assert (parent_thread != nullptr);
671 parent_thread->pending_follow.set_spurious ();
673 /* Detach the parent if needed. */
674 if (follow_child)
676 /* If we're vforking, we want to hold on to the parent until
677 the child exits or execs. At child exec or exit time we
678 can remove the old breakpoints from the parent and detach
679 or resume debugging it. Otherwise, detach the parent now;
680 we'll want to reuse it's program/address spaces, but we
681 can't set them to the child before removing breakpoints
682 from the parent, otherwise, the breakpoints module could
683 decide to remove breakpoints from the wrong process (since
684 they'd be assigned to the same address space). */
686 if (has_vforked)
688 gdb_assert (child_inf->vfork_parent == nullptr);
689 gdb_assert (parent_inf->vfork_child == nullptr);
690 child_inf->vfork_parent = parent_inf;
691 child_inf->pending_detach = false;
692 parent_inf->vfork_child = child_inf;
693 parent_inf->pending_detach = detach_fork;
695 else if (detach_fork)
697 if (print_inferior_events)
699 /* Ensure that we have a process ptid. */
700 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
702 target_terminal::ours_for_output ();
703 gdb_printf (_("[Detaching after fork from "
704 "parent %s]\n"),
705 target_pid_to_str (process_ptid).c_str ());
708 target_detach (parent_inf, 0);
712 /* If we ended up creating a new inferior, call post_create_inferior to inform
713 the various subcomponents. */
714 if (child_inf != nullptr)
716 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
717 (do not restore the parent as the current inferior). */
718 std::optional<scoped_restore_current_thread> maybe_restore;
720 if (!follow_child && !sched_multi)
721 maybe_restore.emplace ();
723 switch_to_thread (*child_inf->threads ().begin ());
724 post_create_inferior (0);
727 return false;
730 /* Set the last target status as TP having stopped. */
732 static void
733 set_last_target_status_stopped (thread_info *tp)
735 set_last_target_status (tp->inf->process_target (), tp->ptid,
736 target_waitstatus {}.set_stopped (GDB_SIGNAL_0));
739 /* Tell the target to follow the fork we're stopped at. Returns true
740 if the inferior should be resumed; false, if the target for some
741 reason decided it's best not to resume. */
743 static bool
744 follow_fork ()
746 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
748 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
749 bool should_resume = true;
751 /* Copy user stepping state to the new inferior thread. FIXME: the
752 followed fork child thread should have a copy of most of the
753 parent thread structure's run control related fields, not just these.
754 Initialized to avoid "may be used uninitialized" warnings from gcc. */
755 struct breakpoint *step_resume_breakpoint = nullptr;
756 struct breakpoint *exception_resume_breakpoint = nullptr;
757 CORE_ADDR step_range_start = 0;
758 CORE_ADDR step_range_end = 0;
759 int current_line = 0;
760 symtab *current_symtab = nullptr;
761 struct frame_id step_frame_id = { 0 };
763 if (!non_stop)
765 thread_info *cur_thr = inferior_thread ();
767 ptid_t resume_ptid
768 = user_visible_resume_ptid (cur_thr->control.stepping_command);
769 process_stratum_target *resume_target
770 = user_visible_resume_target (resume_ptid);
772 /* Check if there's a thread that we're about to resume, other
773 than the current, with an unfollowed fork/vfork. If so,
774 switch back to it, to tell the target to follow it (in either
775 direction). We'll afterwards refuse to resume, and inform
776 the user what happened. */
777 for (thread_info *tp : all_non_exited_threads (resume_target,
778 resume_ptid))
780 if (tp == cur_thr)
781 continue;
783 /* follow_fork_inferior clears tp->pending_follow, and below
784 we'll need the value after the follow_fork_inferior
785 call. */
786 target_waitkind kind = tp->pending_follow.kind ();
788 if (kind != TARGET_WAITKIND_SPURIOUS)
790 infrun_debug_printf ("need to follow-fork [%s] first",
791 tp->ptid.to_string ().c_str ());
793 switch_to_thread (tp);
795 /* Set up inferior(s) as specified by the caller, and
796 tell the target to do whatever is necessary to follow
797 either parent or child. */
798 if (follow_child)
800 /* The thread that started the execution command
801 won't exist in the child. Abort the command and
802 immediately stop in this thread, in the child,
803 inside fork. */
804 should_resume = false;
806 else
808 /* Following the parent, so let the thread fork its
809 child freely, it won't influence the current
810 execution command. */
811 if (follow_fork_inferior (follow_child, detach_fork))
813 /* Target refused to follow, or there's some
814 other reason we shouldn't resume. */
815 switch_to_thread (cur_thr);
816 set_last_target_status_stopped (cur_thr);
817 return false;
820 /* If we're following a vfork, when we need to leave
821 the just-forked thread as selected, as we need to
822 solo-resume it to collect the VFORK_DONE event.
823 If we're following a fork, however, switch back
824 to the original thread that we continue stepping
825 it, etc. */
826 if (kind != TARGET_WAITKIND_VFORKED)
828 gdb_assert (kind == TARGET_WAITKIND_FORKED);
829 switch_to_thread (cur_thr);
833 break;
838 thread_info *tp = inferior_thread ();
840 /* If there were any forks/vforks that were caught and are now to be
841 followed, then do so now. */
842 switch (tp->pending_follow.kind ())
844 case TARGET_WAITKIND_FORKED:
845 case TARGET_WAITKIND_VFORKED:
847 ptid_t parent, child;
848 std::unique_ptr<struct thread_fsm> thread_fsm;
850 /* If the user did a next/step, etc, over a fork call,
851 preserve the stepping state in the fork child. */
852 if (follow_child && should_resume)
854 step_resume_breakpoint = clone_momentary_breakpoint
855 (tp->control.step_resume_breakpoint);
856 step_range_start = tp->control.step_range_start;
857 step_range_end = tp->control.step_range_end;
858 current_line = tp->current_line;
859 current_symtab = tp->current_symtab;
860 step_frame_id = tp->control.step_frame_id;
861 exception_resume_breakpoint
862 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
863 thread_fsm = tp->release_thread_fsm ();
865 /* For now, delete the parent's sr breakpoint, otherwise,
866 parent/child sr breakpoints are considered duplicates,
867 and the child version will not be installed. Remove
868 this when the breakpoints module becomes aware of
869 inferiors and address spaces. */
870 delete_step_resume_breakpoint (tp);
871 tp->control.step_range_start = 0;
872 tp->control.step_range_end = 0;
873 tp->control.step_frame_id = null_frame_id;
874 delete_exception_resume_breakpoint (tp);
877 parent = inferior_ptid;
878 child = tp->pending_follow.child_ptid ();
880 /* If handling a vfork, stop all the inferior's threads, they will be
881 restarted when the vfork shared region is complete. */
882 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
883 && target_is_non_stop_p ())
884 stop_all_threads ("handling vfork", tp->inf);
886 process_stratum_target *parent_targ = tp->inf->process_target ();
887 /* Set up inferior(s) as specified by the caller, and tell the
888 target to do whatever is necessary to follow either parent
889 or child. */
890 if (follow_fork_inferior (follow_child, detach_fork))
892 /* Target refused to follow, or there's some other reason
893 we shouldn't resume. */
894 should_resume = 0;
896 else
898 /* If we followed the child, switch to it... */
899 if (follow_child)
901 tp = parent_targ->find_thread (child);
902 switch_to_thread (tp);
904 /* ... and preserve the stepping state, in case the
905 user was stepping over the fork call. */
906 if (should_resume)
908 tp->control.step_resume_breakpoint
909 = step_resume_breakpoint;
910 tp->control.step_range_start = step_range_start;
911 tp->control.step_range_end = step_range_end;
912 tp->current_line = current_line;
913 tp->current_symtab = current_symtab;
914 tp->control.step_frame_id = step_frame_id;
915 tp->control.exception_resume_breakpoint
916 = exception_resume_breakpoint;
917 tp->set_thread_fsm (std::move (thread_fsm));
919 else
921 /* If we get here, it was because we're trying to
922 resume from a fork catchpoint, but, the user
923 has switched threads away from the thread that
924 forked. In that case, the resume command
925 issued is most likely not applicable to the
926 child, so just warn, and refuse to resume. */
927 warning (_("Not resuming: switched threads "
928 "before following fork child."));
931 /* Reset breakpoints in the child as appropriate. */
932 follow_inferior_reset_breakpoints ();
936 break;
937 case TARGET_WAITKIND_SPURIOUS:
938 /* Nothing to follow. */
939 break;
940 default:
941 internal_error ("Unexpected pending_follow.kind %d\n",
942 tp->pending_follow.kind ());
943 break;
946 if (!should_resume)
947 set_last_target_status_stopped (tp);
948 return should_resume;
951 static void
952 follow_inferior_reset_breakpoints (void)
954 struct thread_info *tp = inferior_thread ();
956 /* Was there a step_resume breakpoint? (There was if the user
957 did a "next" at the fork() call.) If so, explicitly reset its
958 thread number. Cloned step_resume breakpoints are disabled on
959 creation, so enable it here now that it is associated with the
960 correct thread.
962 step_resumes are a form of bp that are made to be per-thread.
963 Since we created the step_resume bp when the parent process
964 was being debugged, and now are switching to the child process,
965 from the breakpoint package's viewpoint, that's a switch of
966 "threads". We must update the bp's notion of which thread
967 it is for, or it'll be ignored when it triggers. */
969 if (tp->control.step_resume_breakpoint)
971 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
972 tp->control.step_resume_breakpoint->first_loc ().enabled = 1;
975 /* Treat exception_resume breakpoints like step_resume breakpoints. */
976 if (tp->control.exception_resume_breakpoint)
978 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
979 tp->control.exception_resume_breakpoint->first_loc ().enabled = 1;
982 /* Reinsert all breakpoints in the child. The user may have set
983 breakpoints after catching the fork, in which case those
984 were never set in the child, but only in the parent. This makes
985 sure the inserted breakpoints match the breakpoint list. */
987 breakpoint_re_set ();
988 insert_breakpoints ();
991 /* The child has exited or execed: resume THREAD, a thread of the parent,
992 if it was meant to be executing. */
994 static void
995 proceed_after_vfork_done (thread_info *thread)
997 if (thread->state == THREAD_RUNNING
998 && !thread->executing ()
999 && !thread->stop_requested
1000 && thread->stop_signal () == GDB_SIGNAL_0)
1002 infrun_debug_printf ("resuming vfork parent thread %s",
1003 thread->ptid.to_string ().c_str ());
1005 switch_to_thread (thread);
1006 clear_proceed_status (0);
1007 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
1011 /* Called whenever we notice an exec or exit event, to handle
1012 detaching or resuming a vfork parent. */
1014 static void
1015 handle_vfork_child_exec_or_exit (int exec)
1017 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1019 struct inferior *inf = current_inferior ();
1021 if (inf->vfork_parent)
1023 inferior *resume_parent = nullptr;
1025 /* This exec or exit marks the end of the shared memory region
1026 between the parent and the child. Break the bonds. */
1027 inferior *vfork_parent = inf->vfork_parent;
1028 inf->vfork_parent->vfork_child = nullptr;
1029 inf->vfork_parent = nullptr;
1031 /* If the user wanted to detach from the parent, now is the
1032 time. */
1033 if (vfork_parent->pending_detach)
1035 struct program_space *pspace;
1037 /* follow-fork child, detach-on-fork on. */
1039 vfork_parent->pending_detach = false;
1041 scoped_restore_current_pspace_and_thread restore_thread;
1043 /* We're letting loose of the parent. */
1044 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
1045 switch_to_thread (tp);
1047 /* We're about to detach from the parent, which implicitly
1048 removes breakpoints from its address space. There's a
1049 catch here: we want to reuse the spaces for the child,
1050 but, parent/child are still sharing the pspace at this
1051 point, although the exec in reality makes the kernel give
1052 the child a fresh set of new pages. The problem here is
1053 that the breakpoints module being unaware of this, would
1054 likely chose the child process to write to the parent
1055 address space. Swapping the child temporarily away from
1056 the spaces has the desired effect. Yes, this is "sort
1057 of" a hack. */
1059 pspace = inf->pspace;
1060 inf->pspace = nullptr;
1061 address_space_ref_ptr aspace = std::move (inf->aspace);
1063 if (print_inferior_events)
1065 std::string pidstr
1066 = target_pid_to_str (ptid_t (vfork_parent->pid));
1068 target_terminal::ours_for_output ();
1070 if (exec)
1072 gdb_printf (_("[Detaching vfork parent %s "
1073 "after child exec]\n"), pidstr.c_str ());
1075 else
1077 gdb_printf (_("[Detaching vfork parent %s "
1078 "after child exit]\n"), pidstr.c_str ());
1082 target_detach (vfork_parent, 0);
1084 /* Put it back. */
1085 inf->pspace = pspace;
1086 inf->aspace = aspace;
1088 else if (exec)
1090 /* We're staying attached to the parent, so, really give the
1091 child a new address space. */
1092 inf->pspace = new program_space (maybe_new_address_space ());
1093 inf->aspace = inf->pspace->aspace;
1094 inf->removable = true;
1095 set_current_program_space (inf->pspace);
1097 resume_parent = vfork_parent;
1099 else
1101 /* If this is a vfork child exiting, then the pspace and
1102 aspaces were shared with the parent. Since we're
1103 reporting the process exit, we'll be mourning all that is
1104 found in the address space, and switching to null_ptid,
1105 preparing to start a new inferior. But, since we don't
1106 want to clobber the parent's address/program spaces, we
1107 go ahead and create a new one for this exiting
1108 inferior. */
1110 scoped_restore_current_thread restore_thread;
1112 /* Temporarily switch to the vfork parent, to facilitate ptrace
1113 calls done during maybe_new_address_space. */
1114 switch_to_thread (any_live_thread_of_inferior (vfork_parent));
1115 address_space_ref_ptr aspace = maybe_new_address_space ();
1117 /* Switch back to the vfork child inferior. Switch to no-thread
1118 while running clone_program_space, so that clone_program_space
1119 doesn't want to read the selected frame of a dead process. */
1120 switch_to_inferior_no_thread (inf);
1122 inf->pspace = new program_space (std::move (aspace));
1123 inf->aspace = inf->pspace->aspace;
1124 set_current_program_space (inf->pspace);
1125 inf->removable = true;
1126 inf->symfile_flags = SYMFILE_NO_READ;
1127 clone_program_space (inf->pspace, vfork_parent->pspace);
1129 resume_parent = vfork_parent;
1132 gdb_assert (current_program_space == inf->pspace);
1134 if (non_stop && resume_parent != nullptr)
1136 /* If the user wanted the parent to be running, let it go
1137 free now. */
1138 scoped_restore_current_thread restore_thread;
1140 infrun_debug_printf ("resuming vfork parent process %d",
1141 resume_parent->pid);
1143 for (thread_info *thread : resume_parent->threads ())
1144 proceed_after_vfork_done (thread);
1149 /* Handle TARGET_WAITKIND_VFORK_DONE. */
1151 static void
1152 handle_vfork_done (thread_info *event_thread)
1154 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1156 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1157 set, that is if we are waiting for a vfork child not under our control
1158 (because we detached it) to exec or exit.
1160 If an inferior has vforked and we are debugging the child, we don't use
1161 the vfork-done event to get notified about the end of the shared address
1162 space window. We rely instead on the child's exec or exit event, and the
1163 inferior::vfork_{parent,child} fields are used instead. See
1164 handle_vfork_child_exec_or_exit for that. */
1165 if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
1167 infrun_debug_printf ("not waiting for a vfork-done event");
1168 return;
1171 /* We stopped all threads (other than the vforking thread) of the inferior in
1172 follow_fork and kept them stopped until now. It should therefore not be
1173 possible for another thread to have reported a vfork during that window.
1174 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1175 vfork-done we are handling right now. */
1176 gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
1178 event_thread->inf->thread_waiting_for_vfork_done = nullptr;
1179 event_thread->inf->pspace->breakpoints_not_allowed = 0;
1181 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1182 resume them now. On all-stop targets, everything that needs to be resumed
1183 will be when we resume the event thread. */
1184 if (target_is_non_stop_p ())
1186 /* restart_threads and start_step_over may change the current thread, make
1187 sure we leave the event thread as the current thread. */
1188 scoped_restore_current_thread restore_thread;
1190 insert_breakpoints ();
1191 start_step_over ();
1193 if (!step_over_info_valid_p ())
1194 restart_threads (event_thread, event_thread->inf);
1198 /* Enum strings for "set|show follow-exec-mode". */
1200 static const char follow_exec_mode_new[] = "new";
1201 static const char follow_exec_mode_same[] = "same";
1202 static const char *const follow_exec_mode_names[] =
1204 follow_exec_mode_new,
1205 follow_exec_mode_same,
1206 nullptr,
1209 static const char *follow_exec_mode_string = follow_exec_mode_same;
1210 static void
1211 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1212 struct cmd_list_element *c, const char *value)
1214 gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value);
1217 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1219 static void
1220 follow_exec (ptid_t ptid, const char *exec_file_target)
1222 int pid = ptid.pid ();
1223 ptid_t process_ptid;
1225 /* Switch terminal for any messages produced e.g. by
1226 breakpoint_re_set. */
1227 target_terminal::ours_for_output ();
1229 /* This is an exec event that we actually wish to pay attention to.
1230 Refresh our symbol table to the newly exec'd program, remove any
1231 momentary bp's, etc.
1233 If there are breakpoints, they aren't really inserted now,
1234 since the exec() transformed our inferior into a fresh set
1235 of instructions.
1237 We want to preserve symbolic breakpoints on the list, since
1238 we have hopes that they can be reset after the new a.out's
1239 symbol table is read.
1241 However, any "raw" breakpoints must be removed from the list
1242 (e.g., the solib bp's), since their address is probably invalid
1243 now.
1245 And, we DON'T want to call delete_breakpoints() here, since
1246 that may write the bp's "shadow contents" (the instruction
1247 value that was overwritten with a TRAP instruction). Since
1248 we now have a new a.out, those shadow contents aren't valid. */
1250 mark_breakpoints_out (current_program_space);
1252 /* The target reports the exec event to the main thread, even if
1253 some other thread does the exec, and even if the main thread was
1254 stopped or already gone. We may still have non-leader threads of
1255 the process on our list. E.g., on targets that don't have thread
1256 exit events (like remote) and nothing forces an update of the
1257 thread list up to here. When debugging remotely, it's best to
1258 avoid extra traffic, when possible, so avoid syncing the thread
1259 list with the target, and instead go ahead and delete all threads
1260 of the process but the one that reported the event. Note this must
1261 be done before calling update_breakpoints_after_exec, as
1262 otherwise clearing the threads' resources would reference stale
1263 thread breakpoints -- it may have been one of these threads that
1264 stepped across the exec. We could just clear their stepping
1265 states, but as long as we're iterating, might as well delete
1266 them. Deleting them now rather than at the next user-visible
1267 stop provides a nicer sequence of events for user and MI
1268 notifications. */
1269 for (thread_info *th : all_threads_safe ())
1270 if (th->ptid.pid () == pid && th->ptid != ptid)
1271 delete_thread (th);
1273 /* We also need to clear any left over stale state for the
1274 leader/event thread. E.g., if there was any step-resume
1275 breakpoint or similar, it's gone now. We cannot truly
1276 step-to-next statement through an exec(). */
1277 thread_info *th = inferior_thread ();
1278 th->control.step_resume_breakpoint = nullptr;
1279 th->control.exception_resume_breakpoint = nullptr;
1280 th->control.single_step_breakpoints = nullptr;
1281 th->control.step_range_start = 0;
1282 th->control.step_range_end = 0;
1284 /* The user may have had the main thread held stopped in the
1285 previous image (e.g., schedlock on, or non-stop). Release
1286 it now. */
1287 th->stop_requested = 0;
1289 update_breakpoints_after_exec ();
1291 /* What is this a.out's name? */
1292 process_ptid = ptid_t (pid);
1293 gdb_printf (_("%s is executing new program: %s\n"),
1294 target_pid_to_str (process_ptid).c_str (),
1295 exec_file_target);
1297 /* We've followed the inferior through an exec. Therefore, the
1298 inferior has essentially been killed & reborn. */
1300 breakpoint_init_inferior (current_inferior (), inf_execd);
1302 gdb::unique_xmalloc_ptr<char> exec_file_host
1303 = exec_file_find (exec_file_target, nullptr);
1305 /* If we were unable to map the executable target pathname onto a host
1306 pathname, tell the user that. Otherwise GDB's subsequent behavior
1307 is confusing. Maybe it would even be better to stop at this point
1308 so that the user can specify a file manually before continuing. */
1309 if (exec_file_host == nullptr)
1310 warning (_("Could not load symbols for executable %s.\n"
1311 "Do you need \"set sysroot\"?"),
1312 exec_file_target);
1314 /* Reset the shared library package. This ensures that we get a
1315 shlib event when the child reaches "_start", at which point the
1316 dld will have had a chance to initialize the child. */
1317 /* Also, loading a symbol file below may trigger symbol lookups, and
1318 we don't want those to be satisfied by the libraries of the
1319 previous incarnation of this process. */
1320 no_shared_libraries (current_program_space);
1322 inferior *execing_inferior = current_inferior ();
1323 inferior *following_inferior;
1325 if (follow_exec_mode_string == follow_exec_mode_new)
1327 /* The user wants to keep the old inferior and program spaces
1328 around. Create a new fresh one, and switch to it. */
1330 /* Do exit processing for the original inferior before setting the new
1331 inferior's pid. Having two inferiors with the same pid would confuse
1332 find_inferior_p(t)id. Transfer the terminal state and info from the
1333 old to the new inferior. */
1334 following_inferior = add_inferior_with_spaces ();
1336 swap_terminal_info (following_inferior, execing_inferior);
1337 exit_inferior (execing_inferior);
1339 following_inferior->pid = pid;
1341 else
1343 /* follow-exec-mode is "same", we continue execution in the execing
1344 inferior. */
1345 following_inferior = execing_inferior;
1347 /* The old description may no longer be fit for the new image.
1348 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1349 old description; we'll read a new one below. No need to do
1350 this on "follow-exec-mode new", as the old inferior stays
1351 around (its description is later cleared/refetched on
1352 restart). */
1353 target_clear_description ();
1356 target_follow_exec (following_inferior, ptid, exec_file_target);
1358 gdb_assert (current_inferior () == following_inferior);
1359 gdb_assert (current_program_space == following_inferior->pspace);
1361 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1362 because the proper displacement for a PIE (Position Independent
1363 Executable) main symbol file will only be computed by
1364 solib_create_inferior_hook below. breakpoint_re_set would fail
1365 to insert the breakpoints with the zero displacement. */
1366 try_open_exec_file (exec_file_host.get (), following_inferior,
1367 SYMFILE_DEFER_BP_RESET);
1369 /* If the target can specify a description, read it. Must do this
1370 after flipping to the new executable (because the target supplied
1371 description must be compatible with the executable's
1372 architecture, and the old executable may e.g., be 32-bit, while
1373 the new one 64-bit), and before anything involving memory or
1374 registers. */
1375 target_find_description ();
1377 gdb::observers::inferior_execd.notify (execing_inferior, following_inferior);
1379 breakpoint_re_set ();
1381 /* Reinsert all breakpoints. (Those which were symbolic have
1382 been reset to the proper address in the new a.out, thanks
1383 to symbol_file_command...). */
1384 insert_breakpoints ();
1386 /* The next resume of this inferior should bring it to the shlib
1387 startup breakpoints. (If the user had also set bp's on
1388 "main" from the old (parent) process, then they'll auto-
1389 matically get reset there in the new process.). */
1392 /* The chain of threads that need to do a step-over operation to get
1393 past e.g., a breakpoint. What technique is used to step over the
1394 breakpoint/watchpoint does not matter -- all threads end up in the
1395 same queue, to maintain rough temporal order of execution, in order
1396 to avoid starvation, otherwise, we could e.g., find ourselves
1397 constantly stepping the same couple threads past their breakpoints
1398 over and over, if the single-step finish fast enough. */
1399 thread_step_over_list global_thread_step_over_list;
1401 /* Bit flags indicating what the thread needs to step over. */
1403 enum step_over_what_flag
1405 /* Step over a breakpoint. */
1406 STEP_OVER_BREAKPOINT = 1,
1408 /* Step past a non-continuable watchpoint, in order to let the
1409 instruction execute so we can evaluate the watchpoint
1410 expression. */
1411 STEP_OVER_WATCHPOINT = 2
1413 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
1415 /* Info about an instruction that is being stepped over. */
1417 struct step_over_info
1419 /* If we're stepping past a breakpoint, this is the address space
1420 and address of the instruction the breakpoint is set at. We'll
1421 skip inserting all breakpoints here. Valid iff ASPACE is
1422 non-NULL. */
1423 const address_space *aspace = nullptr;
1424 CORE_ADDR address = 0;
1426 /* The instruction being stepped over triggers a nonsteppable
1427 watchpoint. If true, we'll skip inserting watchpoints. */
1428 int nonsteppable_watchpoint_p = 0;
1430 /* The thread's global number. */
1431 int thread = -1;
1434 /* The step-over info of the location that is being stepped over.
1436 Note that with async/breakpoint always-inserted mode, a user might
1437 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1438 being stepped over. As setting a new breakpoint inserts all
1439 breakpoints, we need to make sure the breakpoint being stepped over
1440 isn't inserted then. We do that by only clearing the step-over
1441 info when the step-over is actually finished (or aborted).
1443 Presently GDB can only step over one breakpoint at any given time.
1444 Given threads that can't run code in the same address space as the
1445 breakpoint's can't really miss the breakpoint, GDB could be taught
1446 to step-over at most one breakpoint per address space (so this info
1447 could move to the address space object if/when GDB is extended).
1448 The set of breakpoints being stepped over will normally be much
1449 smaller than the set of all breakpoints, so a flag in the
1450 breakpoint location structure would be wasteful. A separate list
1451 also saves complexity and run-time, as otherwise we'd have to go
1452 through all breakpoint locations clearing their flag whenever we
1453 start a new sequence. Similar considerations weigh against storing
1454 this info in the thread object. Plus, not all step overs actually
1455 have breakpoint locations -- e.g., stepping past a single-step
1456 breakpoint, or stepping to complete a non-continuable
1457 watchpoint. */
1458 static struct step_over_info step_over_info;
1460 /* Record the address of the breakpoint/instruction we're currently
1461 stepping over.
1462 N.B. We record the aspace and address now, instead of say just the thread,
1463 because when we need the info later the thread may be running. */
1465 static void
1466 set_step_over_info (const address_space *aspace, CORE_ADDR address,
1467 int nonsteppable_watchpoint_p,
1468 int thread)
1470 step_over_info.aspace = aspace;
1471 step_over_info.address = address;
1472 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1473 step_over_info.thread = thread;
1476 /* Called when we're not longer stepping over a breakpoint / an
1477 instruction, so all breakpoints are free to be (re)inserted. */
1479 static void
1480 clear_step_over_info (void)
1482 infrun_debug_printf ("clearing step over info");
1483 step_over_info.aspace = nullptr;
1484 step_over_info.address = 0;
1485 step_over_info.nonsteppable_watchpoint_p = 0;
1486 step_over_info.thread = -1;
1489 /* See infrun.h. */
1492 stepping_past_instruction_at (struct address_space *aspace,
1493 CORE_ADDR address)
1495 return (step_over_info.aspace != nullptr
1496 && breakpoint_address_match (aspace, address,
1497 step_over_info.aspace,
1498 step_over_info.address));
1501 /* See infrun.h. */
1504 thread_is_stepping_over_breakpoint (int thread)
1506 return (step_over_info.thread != -1
1507 && thread == step_over_info.thread);
1510 /* See infrun.h. */
1513 stepping_past_nonsteppable_watchpoint (void)
1515 return step_over_info.nonsteppable_watchpoint_p;
1518 /* Returns true if step-over info is valid. */
1520 static bool
1521 step_over_info_valid_p (void)
1523 return (step_over_info.aspace != nullptr
1524 || stepping_past_nonsteppable_watchpoint ());
1528 /* Displaced stepping. */
1530 /* In non-stop debugging mode, we must take special care to manage
1531 breakpoints properly; in particular, the traditional strategy for
1532 stepping a thread past a breakpoint it has hit is unsuitable.
1533 'Displaced stepping' is a tactic for stepping one thread past a
1534 breakpoint it has hit while ensuring that other threads running
1535 concurrently will hit the breakpoint as they should.
1537 The traditional way to step a thread T off a breakpoint in a
1538 multi-threaded program in all-stop mode is as follows:
1540 a0) Initially, all threads are stopped, and breakpoints are not
1541 inserted.
1542 a1) We single-step T, leaving breakpoints uninserted.
1543 a2) We insert breakpoints, and resume all threads.
1545 In non-stop debugging, however, this strategy is unsuitable: we
1546 don't want to have to stop all threads in the system in order to
1547 continue or step T past a breakpoint. Instead, we use displaced
1548 stepping:
1550 n0) Initially, T is stopped, other threads are running, and
1551 breakpoints are inserted.
1552 n1) We copy the instruction "under" the breakpoint to a separate
1553 location, outside the main code stream, making any adjustments
1554 to the instruction, register, and memory state as directed by
1555 T's architecture.
1556 n2) We single-step T over the instruction at its new location.
1557 n3) We adjust the resulting register and memory state as directed
1558 by T's architecture. This includes resetting T's PC to point
1559 back into the main instruction stream.
1560 n4) We resume T.
1562 This approach depends on the following gdbarch methods:
1564 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1565 indicate where to copy the instruction, and how much space must
1566 be reserved there. We use these in step n1.
1568 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1569 address, and makes any necessary adjustments to the instruction,
1570 register contents, and memory. We use this in step n1.
1572 - gdbarch_displaced_step_fixup adjusts registers and memory after
1573 we have successfully single-stepped the instruction, to yield the
1574 same effect the instruction would have had if we had executed it
1575 at its original address. We use this in step n3.
1577 The gdbarch_displaced_step_copy_insn and
1578 gdbarch_displaced_step_fixup functions must be written so that
1579 copying an instruction with gdbarch_displaced_step_copy_insn,
1580 single-stepping across the copied instruction, and then applying
1581 gdbarch_displaced_insn_fixup should have the same effects on the
1582 thread's memory and registers as stepping the instruction in place
1583 would have. Exactly which responsibilities fall to the copy and
1584 which fall to the fixup is up to the author of those functions.
1586 See the comments in gdbarch.sh for details.
1588 Note that displaced stepping and software single-step cannot
1589 currently be used in combination, although with some care I think
1590 they could be made to. Software single-step works by placing
1591 breakpoints on all possible subsequent instructions; if the
1592 displaced instruction is a PC-relative jump, those breakpoints
1593 could fall in very strange places --- on pages that aren't
1594 executable, or at addresses that are not proper instruction
1595 boundaries. (We do generally let other threads run while we wait
1596 to hit the software single-step breakpoint, and they might
1597 encounter such a corrupted instruction.) One way to work around
1598 this would be to have gdbarch_displaced_step_copy_insn fully
1599 simulate the effect of PC-relative instructions (and return NULL)
1600 on architectures that use software single-stepping.
1602 In non-stop mode, we can have independent and simultaneous step
1603 requests, so more than one thread may need to simultaneously step
1604 over a breakpoint. The current implementation assumes there is
1605 only one scratch space per process. In this case, we have to
1606 serialize access to the scratch space. If thread A wants to step
1607 over a breakpoint, but we are currently waiting for some other
1608 thread to complete a displaced step, we leave thread A stopped and
1609 place it in the displaced_step_request_queue. Whenever a displaced
1610 step finishes, we pick the next thread in the queue and start a new
1611 displaced step operation on it. See displaced_step_prepare and
1612 displaced_step_finish for details. */
1614 /* Return true if THREAD is doing a displaced step. */
1616 static bool
1617 displaced_step_in_progress_thread (thread_info *thread)
1619 gdb_assert (thread != nullptr);
1621 return thread->displaced_step_state.in_progress ();
1624 /* Return true if INF has a thread doing a displaced step. */
1626 static bool
1627 displaced_step_in_progress (inferior *inf)
1629 return inf->displaced_step_state.in_progress_count > 0;
1632 /* Return true if any thread is doing a displaced step. */
1634 static bool
1635 displaced_step_in_progress_any_thread ()
1637 for (inferior *inf : all_non_exited_inferiors ())
1639 if (displaced_step_in_progress (inf))
1640 return true;
1643 return false;
1646 static void
1647 infrun_inferior_exit (struct inferior *inf)
1649 inf->displaced_step_state.reset ();
1650 inf->thread_waiting_for_vfork_done = nullptr;
1653 static void
1654 infrun_inferior_execd (inferior *exec_inf, inferior *follow_inf)
1656 /* If some threads where was doing a displaced step in this inferior at the
1657 moment of the exec, they no longer exist. Even if the exec'ing thread
1658 doing a displaced step, we don't want to to any fixup nor restore displaced
1659 stepping buffer bytes. */
1660 follow_inf->displaced_step_state.reset ();
1662 for (thread_info *thread : follow_inf->threads ())
1663 thread->displaced_step_state.reset ();
1665 /* Since an in-line step is done with everything else stopped, if there was
1666 one in progress at the time of the exec, it must have been the exec'ing
1667 thread. */
1668 clear_step_over_info ();
1670 follow_inf->thread_waiting_for_vfork_done = nullptr;
1673 /* If ON, and the architecture supports it, GDB will use displaced
1674 stepping to step over breakpoints. If OFF, or if the architecture
1675 doesn't support it, GDB will instead use the traditional
1676 hold-and-step approach. If AUTO (which is the default), GDB will
1677 decide which technique to use to step over breakpoints depending on
1678 whether the target works in a non-stop way (see use_displaced_stepping). */
1680 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1682 static void
1683 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1684 struct cmd_list_element *c,
1685 const char *value)
1687 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1688 gdb_printf (file,
1689 _("Debugger's willingness to use displaced stepping "
1690 "to step over breakpoints is %s (currently %s).\n"),
1691 value, target_is_non_stop_p () ? "on" : "off");
1692 else
1693 gdb_printf (file,
1694 _("Debugger's willingness to use displaced stepping "
1695 "to step over breakpoints is %s.\n"), value);
1698 /* Return true if the gdbarch implements the required methods to use
1699 displaced stepping. */
1701 static bool
1702 gdbarch_supports_displaced_stepping (gdbarch *arch)
1704 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1705 that if `prepare` is provided, so is `finish`. */
1706 return gdbarch_displaced_step_prepare_p (arch);
1709 /* Return non-zero if displaced stepping can/should be used to step
1710 over breakpoints of thread TP. */
1712 static bool
1713 use_displaced_stepping (thread_info *tp)
1715 /* If the user disabled it explicitly, don't use displaced stepping. */
1716 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1717 return false;
1719 /* If "auto", only use displaced stepping if the target operates in a non-stop
1720 way. */
1721 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1722 && !target_is_non_stop_p ())
1723 return false;
1725 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1727 /* If the architecture doesn't implement displaced stepping, don't use
1728 it. */
1729 if (!gdbarch_supports_displaced_stepping (gdbarch))
1730 return false;
1732 /* If recording, don't use displaced stepping. */
1733 if (find_record_target () != nullptr)
1734 return false;
1736 /* If displaced stepping failed before for this inferior, don't bother trying
1737 again. */
1738 if (tp->inf->displaced_step_state.failed_before)
1739 return false;
1741 return true;
1744 /* Simple function wrapper around displaced_step_thread_state::reset. */
1746 static void
1747 displaced_step_reset (displaced_step_thread_state *displaced)
1749 displaced->reset ();
1752 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1753 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1755 using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
1757 /* Prepare to single-step, using displaced stepping.
1759 Note that we cannot use displaced stepping when we have a signal to
1760 deliver. If we have a signal to deliver and an instruction to step
1761 over, then after the step, there will be no indication from the
1762 target whether the thread entered a signal handler or ignored the
1763 signal and stepped over the instruction successfully --- both cases
1764 result in a simple SIGTRAP. In the first case we mustn't do a
1765 fixup, and in the second case we must --- but we can't tell which.
1766 Comments in the code for 'random signals' in handle_inferior_event
1767 explain how we handle this case instead.
1769 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1770 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1771 if displaced stepping this thread got queued; or
1772 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1773 stepped. */
1775 static displaced_step_prepare_status
1776 displaced_step_prepare_throw (thread_info *tp)
1778 regcache *regcache = get_thread_regcache (tp);
1779 struct gdbarch *gdbarch = regcache->arch ();
1780 displaced_step_thread_state &disp_step_thread_state
1781 = tp->displaced_step_state;
1783 /* We should never reach this function if the architecture does not
1784 support displaced stepping. */
1785 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
1787 /* Nor if the thread isn't meant to step over a breakpoint. */
1788 gdb_assert (tp->control.trap_expected);
1790 /* Disable range stepping while executing in the scratch pad. We
1791 want a single-step even if executing the displaced instruction in
1792 the scratch buffer lands within the stepping range (e.g., a
1793 jump/branch). */
1794 tp->control.may_range_step = 0;
1796 /* We are about to start a displaced step for this thread. If one is already
1797 in progress, something's wrong. */
1798 gdb_assert (!disp_step_thread_state.in_progress ());
1800 if (tp->inf->displaced_step_state.unavailable)
1802 /* The gdbarch tells us it's not worth asking to try a prepare because
1803 it is likely that it will return unavailable, so don't bother asking. */
1805 displaced_debug_printf ("deferring step of %s",
1806 tp->ptid.to_string ().c_str ());
1808 global_thread_step_over_chain_enqueue (tp);
1809 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1812 displaced_debug_printf ("displaced-stepping %s now",
1813 tp->ptid.to_string ().c_str ());
1815 scoped_restore_current_thread restore_thread;
1817 switch_to_thread (tp);
1819 CORE_ADDR original_pc = regcache_read_pc (regcache);
1820 CORE_ADDR displaced_pc;
1822 /* Display the instruction we are going to displaced step. */
1823 if (debug_displaced)
1825 string_file tmp_stream;
1826 int dislen = gdb_print_insn (gdbarch, original_pc, &tmp_stream,
1827 nullptr);
1829 if (dislen > 0)
1831 gdb::byte_vector insn_buf (dislen);
1832 read_memory (original_pc, insn_buf.data (), insn_buf.size ());
1834 std::string insn_bytes = bytes_to_string (insn_buf);
1836 displaced_debug_printf ("original insn %s: %s \t %s",
1837 paddress (gdbarch, original_pc),
1838 insn_bytes.c_str (),
1839 tmp_stream.string ().c_str ());
1841 else
1842 displaced_debug_printf ("original insn %s: invalid length: %d",
1843 paddress (gdbarch, original_pc), dislen);
1846 displaced_step_prepare_status status
1847 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
1849 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
1851 displaced_debug_printf ("failed to prepare (%s)",
1852 tp->ptid.to_string ().c_str ());
1854 return DISPLACED_STEP_PREPARE_STATUS_CANT;
1856 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
1858 /* Not enough displaced stepping resources available, defer this
1859 request by placing it the queue. */
1861 displaced_debug_printf ("not enough resources available, "
1862 "deferring step of %s",
1863 tp->ptid.to_string ().c_str ());
1865 global_thread_step_over_chain_enqueue (tp);
1867 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1870 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1872 /* Save the information we need to fix things up if the step
1873 succeeds. */
1874 disp_step_thread_state.set (gdbarch);
1876 tp->inf->displaced_step_state.in_progress_count++;
1878 displaced_debug_printf ("prepared successfully thread=%s, "
1879 "original_pc=%s, displaced_pc=%s",
1880 tp->ptid.to_string ().c_str (),
1881 paddress (gdbarch, original_pc),
1882 paddress (gdbarch, displaced_pc));
1884 /* Display the new displaced instruction(s). */
1885 if (debug_displaced)
1887 string_file tmp_stream;
1888 CORE_ADDR addr = displaced_pc;
1890 /* If displaced stepping is going to use h/w single step then we know
1891 that the replacement instruction can only be a single instruction,
1892 in that case set the end address at the next byte.
1894 Otherwise the displaced stepping copy instruction routine could
1895 have generated multiple instructions, and all we know is that they
1896 must fit within the LEN bytes of the buffer. */
1897 CORE_ADDR end
1898 = addr + (gdbarch_displaced_step_hw_singlestep (gdbarch)
1899 ? 1 : gdbarch_displaced_step_buffer_length (gdbarch));
1901 while (addr < end)
1903 int dislen = gdb_print_insn (gdbarch, addr, &tmp_stream, nullptr);
1904 if (dislen <= 0)
1906 displaced_debug_printf
1907 ("replacement insn %s: invalid length: %d",
1908 paddress (gdbarch, addr), dislen);
1909 break;
1912 gdb::byte_vector insn_buf (dislen);
1913 read_memory (addr, insn_buf.data (), insn_buf.size ());
1915 std::string insn_bytes = bytes_to_string (insn_buf);
1916 std::string insn_str = tmp_stream.release ();
1917 displaced_debug_printf ("replacement insn %s: %s \t %s",
1918 paddress (gdbarch, addr),
1919 insn_bytes.c_str (),
1920 insn_str.c_str ());
1921 addr += dislen;
1925 return DISPLACED_STEP_PREPARE_STATUS_OK;
1928 /* Wrapper for displaced_step_prepare_throw that disabled further
1929 attempts at displaced stepping if we get a memory error. */
1931 static displaced_step_prepare_status
1932 displaced_step_prepare (thread_info *thread)
1934 displaced_step_prepare_status status
1935 = DISPLACED_STEP_PREPARE_STATUS_CANT;
1939 status = displaced_step_prepare_throw (thread);
1941 catch (const gdb_exception_error &ex)
1943 if (ex.error != MEMORY_ERROR
1944 && ex.error != NOT_SUPPORTED_ERROR)
1945 throw;
1947 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1948 ex.what ());
1950 /* Be verbose if "set displaced-stepping" is "on", silent if
1951 "auto". */
1952 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1954 warning (_("disabling displaced stepping: %s"),
1955 ex.what ());
1958 /* Disable further displaced stepping attempts. */
1959 thread->inf->displaced_step_state.failed_before = 1;
1962 return status;
1965 /* True if any thread of TARGET that matches RESUME_PTID requires
1966 target_thread_events enabled. This assumes TARGET does not support
1967 target thread options. */
1969 static bool
1970 any_thread_needs_target_thread_events (process_stratum_target *target,
1971 ptid_t resume_ptid)
1973 for (thread_info *tp : all_non_exited_threads (target, resume_ptid))
1974 if (displaced_step_in_progress_thread (tp)
1975 || schedlock_applies (tp)
1976 || tp->thread_fsm () != nullptr)
1977 return true;
1978 return false;
1981 /* Maybe disable thread-{cloned,created,exited} event reporting after
1982 a step-over (either in-line or displaced) finishes. */
1984 static void
1985 update_thread_events_after_step_over (thread_info *event_thread,
1986 const target_waitstatus &event_status)
1988 if (schedlock_applies (event_thread))
1990 /* If scheduler-locking applies, continue reporting
1991 thread-created/thread-cloned events. */
1992 return;
1994 else if (target_supports_set_thread_options (0))
1996 /* We can control per-thread options. Disable events for the
1997 event thread, unless the thread is gone. */
1998 if (event_status.kind () != TARGET_WAITKIND_THREAD_EXITED)
1999 event_thread->set_thread_options (0);
2001 else
2003 /* We can only control the target-wide target_thread_events
2004 setting. Disable it, but only if other threads in the target
2005 don't need it enabled. */
2006 process_stratum_target *target = event_thread->inf->process_target ();
2007 if (!any_thread_needs_target_thread_events (target, minus_one_ptid))
2008 target_thread_events (false);
2012 /* If we displaced stepped an instruction successfully, adjust registers and
2013 memory to yield the same effect the instruction would have had if we had
2014 executed it at its original address, and return
2015 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
2016 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
2018 If the thread wasn't displaced stepping, return
2019 DISPLACED_STEP_FINISH_STATUS_OK as well. */
2021 static displaced_step_finish_status
2022 displaced_step_finish (thread_info *event_thread,
2023 const target_waitstatus &event_status)
2025 /* Check whether the parent is displaced stepping. */
2026 inferior *parent_inf = event_thread->inf;
2028 /* If this was a fork/vfork/clone, this event indicates that the
2029 displaced stepping of the syscall instruction has been done, so
2030 we perform cleanup for parent here. Also note that this
2031 operation also cleans up the child for vfork, because their pages
2032 are shared. */
2034 /* If this is a fork (child gets its own address space copy) and
2035 some displaced step buffers were in use at the time of the fork,
2036 restore the displaced step buffer bytes in the child process.
2038 Architectures which support displaced stepping and fork events
2039 must supply an implementation of
2040 gdbarch_displaced_step_restore_all_in_ptid. This is not enforced
2041 during gdbarch validation to support architectures which support
2042 displaced stepping but not forks. */
2043 if (event_status.kind () == TARGET_WAITKIND_FORKED)
2045 struct regcache *parent_regcache = get_thread_regcache (event_thread);
2046 struct gdbarch *gdbarch = parent_regcache->arch ();
2048 if (gdbarch_supports_displaced_stepping (gdbarch))
2049 gdbarch_displaced_step_restore_all_in_ptid
2050 (gdbarch, parent_inf, event_status.child_ptid ());
2053 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
2055 /* Was this thread performing a displaced step? */
2056 if (!displaced->in_progress ())
2057 return DISPLACED_STEP_FINISH_STATUS_OK;
2059 update_thread_events_after_step_over (event_thread, event_status);
2061 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
2062 event_thread->inf->displaced_step_state.in_progress_count--;
2064 /* Fixup may need to read memory/registers. Switch to the thread
2065 that we're fixing up. Also, target_stopped_by_watchpoint checks
2066 the current thread, and displaced_step_restore performs ptid-dependent
2067 memory accesses using current_inferior(). */
2068 switch_to_thread (event_thread);
2070 displaced_step_reset_cleanup cleanup (displaced);
2072 /* Do the fixup, and release the resources acquired to do the displaced
2073 step. */
2074 displaced_step_finish_status status
2075 = gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
2076 event_thread, event_status);
2078 if (event_status.kind () == TARGET_WAITKIND_FORKED
2079 || event_status.kind () == TARGET_WAITKIND_VFORKED
2080 || event_status.kind () == TARGET_WAITKIND_THREAD_CLONED)
2082 /* Since the vfork/fork/clone syscall instruction was executed
2083 in the scratchpad, the child's PC is also within the
2084 scratchpad. Set the child's PC to the parent's PC value,
2085 which has already been fixed up. Note: we use the parent's
2086 aspace here, although we're touching the child, because the
2087 child hasn't been added to the inferior list yet at this
2088 point. */
2090 struct regcache *parent_regcache = get_thread_regcache (event_thread);
2091 struct gdbarch *gdbarch = parent_regcache->arch ();
2092 struct regcache *child_regcache
2093 = get_thread_arch_regcache (parent_inf, event_status.child_ptid (),
2094 gdbarch);
2095 /* Read PC value of parent. */
2096 CORE_ADDR parent_pc = regcache_read_pc (parent_regcache);
2098 displaced_debug_printf ("write child pc from %s to %s",
2099 paddress (gdbarch,
2100 regcache_read_pc (child_regcache)),
2101 paddress (gdbarch, parent_pc));
2103 regcache_write_pc (child_regcache, parent_pc);
2106 return status;
2109 /* Data to be passed around while handling an event. This data is
2110 discarded between events. */
2111 struct execution_control_state
2113 explicit execution_control_state (thread_info *thr = nullptr)
2114 : ptid (thr == nullptr ? null_ptid : thr->ptid),
2115 event_thread (thr)
2119 process_stratum_target *target = nullptr;
2120 ptid_t ptid;
2121 /* The thread that got the event, if this was a thread event; NULL
2122 otherwise. */
2123 struct thread_info *event_thread;
2125 struct target_waitstatus ws;
2126 int stop_func_filled_in = 0;
2127 CORE_ADDR stop_func_alt_start = 0;
2128 CORE_ADDR stop_func_start = 0;
2129 CORE_ADDR stop_func_end = 0;
2130 const char *stop_func_name = nullptr;
2131 int wait_some_more = 0;
2133 /* True if the event thread hit the single-step breakpoint of
2134 another thread. Thus the event doesn't cause a stop, the thread
2135 needs to be single-stepped past the single-step breakpoint before
2136 we can switch back to the original stepping thread. */
2137 int hit_singlestep_breakpoint = 0;
2140 static void keep_going_pass_signal (struct execution_control_state *ecs);
2141 static void prepare_to_wait (struct execution_control_state *ecs);
2142 static bool keep_going_stepped_thread (struct thread_info *tp);
2143 static step_over_what thread_still_needs_step_over (struct thread_info *tp);
2145 /* Are there any pending step-over requests? If so, run all we can
2146 now and return true. Otherwise, return false. */
2148 static bool
2149 start_step_over (void)
2151 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
2153 /* Don't start a new step-over if we already have an in-line
2154 step-over operation ongoing. */
2155 if (step_over_info_valid_p ())
2156 return false;
2158 /* Steal the global thread step over chain. As we try to initiate displaced
2159 steps, threads will be enqueued in the global chain if no buffers are
2160 available. If we iterated on the global chain directly, we might iterate
2161 indefinitely. */
2162 thread_step_over_list threads_to_step
2163 = std::move (global_thread_step_over_list);
2165 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
2166 thread_step_over_chain_length (threads_to_step));
2168 bool started = false;
2170 /* On scope exit (whatever the reason, return or exception), if there are
2171 threads left in the THREADS_TO_STEP chain, put back these threads in the
2172 global list. */
2173 SCOPE_EXIT
2175 if (threads_to_step.empty ())
2176 infrun_debug_printf ("step-over queue now empty");
2177 else
2179 infrun_debug_printf ("putting back %d threads to step in global queue",
2180 thread_step_over_chain_length (threads_to_step));
2182 global_thread_step_over_chain_enqueue_chain
2183 (std::move (threads_to_step));
2187 thread_step_over_list_safe_range range
2188 = make_thread_step_over_list_safe_range (threads_to_step);
2190 for (thread_info *tp : range)
2192 step_over_what step_what;
2193 int must_be_in_line;
2195 gdb_assert (!tp->stop_requested);
2197 if (tp->inf->displaced_step_state.unavailable)
2199 /* The arch told us to not even try preparing another displaced step
2200 for this inferior. Just leave the thread in THREADS_TO_STEP, it
2201 will get moved to the global chain on scope exit. */
2202 continue;
2205 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
2207 /* When we stop all threads, handling a vfork, any thread in the step
2208 over chain remains there. A user could also try to continue a
2209 thread stopped at a breakpoint while another thread is waiting for
2210 a vfork-done event. In any case, we don't want to start a step
2211 over right now. */
2212 continue;
2215 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
2216 while we try to prepare the displaced step, we don't add it back to
2217 the global step over chain. This is to avoid a thread staying in the
2218 step over chain indefinitely if something goes wrong when resuming it
2219 If the error is intermittent and it still needs a step over, it will
2220 get enqueued again when we try to resume it normally. */
2221 threads_to_step.erase (threads_to_step.iterator_to (*tp));
2223 step_what = thread_still_needs_step_over (tp);
2224 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
2225 || ((step_what & STEP_OVER_BREAKPOINT)
2226 && !use_displaced_stepping (tp)));
2228 /* We currently stop all threads of all processes to step-over
2229 in-line. If we need to start a new in-line step-over, let
2230 any pending displaced steps finish first. */
2231 if (must_be_in_line && displaced_step_in_progress_any_thread ())
2233 global_thread_step_over_chain_enqueue (tp);
2234 continue;
2237 if (tp->control.trap_expected
2238 || tp->resumed ()
2239 || tp->executing ())
2241 internal_error ("[%s] has inconsistent state: "
2242 "trap_expected=%d, resumed=%d, executing=%d\n",
2243 tp->ptid.to_string ().c_str (),
2244 tp->control.trap_expected,
2245 tp->resumed (),
2246 tp->executing ());
2249 infrun_debug_printf ("resuming [%s] for step-over",
2250 tp->ptid.to_string ().c_str ());
2252 /* keep_going_pass_signal skips the step-over if the breakpoint
2253 is no longer inserted. In all-stop, we want to keep looking
2254 for a thread that needs a step-over instead of resuming TP,
2255 because we wouldn't be able to resume anything else until the
2256 target stops again. In non-stop, the resume always resumes
2257 only TP, so it's OK to let the thread resume freely. */
2258 if (!target_is_non_stop_p () && !step_what)
2259 continue;
2261 switch_to_thread (tp);
2262 execution_control_state ecs (tp);
2263 keep_going_pass_signal (&ecs);
2265 if (!ecs.wait_some_more)
2266 error (_("Command aborted."));
2268 /* If the thread's step over could not be initiated because no buffers
2269 were available, it was re-added to the global step over chain. */
2270 if (tp->resumed ())
2272 infrun_debug_printf ("[%s] was resumed.",
2273 tp->ptid.to_string ().c_str ());
2274 gdb_assert (!thread_is_in_step_over_chain (tp));
2276 else
2278 infrun_debug_printf ("[%s] was NOT resumed.",
2279 tp->ptid.to_string ().c_str ());
2280 gdb_assert (thread_is_in_step_over_chain (tp));
2283 /* If we started a new in-line step-over, we're done. */
2284 if (step_over_info_valid_p ())
2286 gdb_assert (tp->control.trap_expected);
2287 started = true;
2288 break;
2291 if (!target_is_non_stop_p ())
2293 /* On all-stop, shouldn't have resumed unless we needed a
2294 step over. */
2295 gdb_assert (tp->control.trap_expected
2296 || tp->step_after_step_resume_breakpoint);
2298 /* With remote targets (at least), in all-stop, we can't
2299 issue any further remote commands until the program stops
2300 again. */
2301 started = true;
2302 break;
2305 /* Either the thread no longer needed a step-over, or a new
2306 displaced stepping sequence started. Even in the latter
2307 case, continue looking. Maybe we can also start another
2308 displaced step on a thread of other process. */
2311 return started;
2314 /* Update global variables holding ptids to hold NEW_PTID if they were
2315 holding OLD_PTID. */
2316 static void
2317 infrun_thread_ptid_changed (process_stratum_target *target,
2318 ptid_t old_ptid, ptid_t new_ptid)
2320 if (inferior_ptid == old_ptid
2321 && current_inferior ()->process_target () == target)
2322 inferior_ptid = new_ptid;
2327 static const char schedlock_off[] = "off";
2328 static const char schedlock_on[] = "on";
2329 static const char schedlock_step[] = "step";
2330 static const char schedlock_replay[] = "replay";
2331 static const char *const scheduler_enums[] = {
2332 schedlock_off,
2333 schedlock_on,
2334 schedlock_step,
2335 schedlock_replay,
2336 nullptr
2338 static const char *scheduler_mode = schedlock_replay;
2339 static void
2340 show_scheduler_mode (struct ui_file *file, int from_tty,
2341 struct cmd_list_element *c, const char *value)
2343 gdb_printf (file,
2344 _("Mode for locking scheduler "
2345 "during execution is \"%s\".\n"),
2346 value);
2349 static void
2350 set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2352 if (!target_can_lock_scheduler ())
2354 scheduler_mode = schedlock_off;
2355 error (_("Target '%s' cannot support this command."),
2356 target_shortname ());
2360 /* True if execution commands resume all threads of all processes by
2361 default; otherwise, resume only threads of the current inferior
2362 process. */
2363 bool sched_multi = false;
2365 /* Try to setup for software single stepping. Return true if target_resume()
2366 should use hardware single step.
2368 GDBARCH the current gdbarch. */
2370 static bool
2371 maybe_software_singlestep (struct gdbarch *gdbarch)
2373 bool hw_step = true;
2375 if (execution_direction == EXEC_FORWARD
2376 && gdbarch_software_single_step_p (gdbarch))
2377 hw_step = !insert_single_step_breakpoints (gdbarch);
2379 return hw_step;
2382 /* See infrun.h. */
2384 ptid_t
2385 user_visible_resume_ptid (int step)
2387 ptid_t resume_ptid;
2389 if (non_stop)
2391 /* With non-stop mode on, threads are always handled
2392 individually. */
2393 resume_ptid = inferior_ptid;
2395 else if ((scheduler_mode == schedlock_on)
2396 || (scheduler_mode == schedlock_step && step))
2398 /* User-settable 'scheduler' mode requires solo thread
2399 resume. */
2400 resume_ptid = inferior_ptid;
2402 else if ((scheduler_mode == schedlock_replay)
2403 && target_record_will_replay (minus_one_ptid, execution_direction))
2405 /* User-settable 'scheduler' mode requires solo thread resume in replay
2406 mode. */
2407 resume_ptid = inferior_ptid;
2409 else if (inferior_ptid != null_ptid
2410 && inferior_thread ()->control.in_cond_eval)
2412 /* The inferior thread is evaluating a BP condition. Other threads
2413 might be stopped or running and we do not want to change their
2414 state, thus, resume only the current thread. */
2415 resume_ptid = inferior_ptid;
2417 else if (!sched_multi && target_supports_multi_process ())
2419 /* Resume all threads of the current process (and none of other
2420 processes). */
2421 resume_ptid = ptid_t (inferior_ptid.pid ());
2423 else
2425 /* Resume all threads of all processes. */
2426 resume_ptid = RESUME_ALL;
2429 return resume_ptid;
2432 /* See infrun.h. */
2434 process_stratum_target *
2435 user_visible_resume_target (ptid_t resume_ptid)
2437 return (resume_ptid == minus_one_ptid && sched_multi
2438 ? nullptr
2439 : current_inferior ()->process_target ());
2442 /* Find a thread from the inferiors that we'll resume that is waiting
2443 for a vfork-done event. */
2445 static thread_info *
2446 find_thread_waiting_for_vfork_done ()
2448 gdb_assert (!target_is_non_stop_p ());
2450 if (sched_multi)
2452 for (inferior *inf : all_non_exited_inferiors ())
2453 if (inf->thread_waiting_for_vfork_done != nullptr)
2454 return inf->thread_waiting_for_vfork_done;
2456 else
2458 inferior *cur_inf = current_inferior ();
2459 if (cur_inf->thread_waiting_for_vfork_done != nullptr)
2460 return cur_inf->thread_waiting_for_vfork_done;
2462 return nullptr;
2465 /* Return a ptid representing the set of threads that we will resume,
2466 in the perspective of the target, assuming run control handling
2467 does not require leaving some threads stopped (e.g., stepping past
2468 breakpoint). USER_STEP indicates whether we're about to start the
2469 target for a stepping command. */
2471 static ptid_t
2472 internal_resume_ptid (int user_step)
2474 /* In non-stop, we always control threads individually. Note that
2475 the target may always work in non-stop mode even with "set
2476 non-stop off", in which case user_visible_resume_ptid could
2477 return a wildcard ptid. */
2478 if (target_is_non_stop_p ())
2479 return inferior_ptid;
2481 /* The rest of the function assumes non-stop==off and
2482 target-non-stop==off.
2484 If a thread is waiting for a vfork-done event, it means breakpoints are out
2485 for this inferior (well, program space in fact). We don't want to resume
2486 any thread other than the one waiting for vfork done, otherwise these other
2487 threads could miss breakpoints. So if a thread in the resumption set is
2488 waiting for a vfork-done event, resume only that thread.
2490 The resumption set width depends on whether schedule-multiple is on or off.
2492 Note that if the target_resume interface was more flexible, we could be
2493 smarter here when schedule-multiple is on. For example, imagine 3
2494 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2495 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2496 target(s) to resume:
2498 - All threads of inferior 1
2499 - Thread 2.1
2500 - Thread 3.2
2502 Since we don't have that flexibility (we can only pass one ptid), just
2503 resume the first thread waiting for a vfork-done event we find (e.g. thread
2504 2.1). */
2505 thread_info *thr = find_thread_waiting_for_vfork_done ();
2506 if (thr != nullptr)
2508 /* If we have a thread that is waiting for a vfork-done event,
2509 then we should have switched to it earlier. Calling
2510 target_resume with thread scope is only possible when the
2511 current thread matches the thread scope. */
2512 gdb_assert (thr->ptid == inferior_ptid);
2513 gdb_assert (thr->inf->process_target ()
2514 == inferior_thread ()->inf->process_target ());
2515 return thr->ptid;
2518 return user_visible_resume_ptid (user_step);
2521 /* Wrapper for target_resume, that handles infrun-specific
2522 bookkeeping. */
2524 static void
2525 do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
2527 struct thread_info *tp = inferior_thread ();
2529 gdb_assert (!tp->stop_requested);
2531 /* Install inferior's terminal modes. */
2532 target_terminal::inferior ();
2534 /* Avoid confusing the next resume, if the next stop/resume
2535 happens to apply to another thread. */
2536 tp->set_stop_signal (GDB_SIGNAL_0);
2538 /* Advise target which signals may be handled silently.
2540 If we have removed breakpoints because we are stepping over one
2541 in-line (in any thread), we need to receive all signals to avoid
2542 accidentally skipping a breakpoint during execution of a signal
2543 handler.
2545 Likewise if we're displaced stepping, otherwise a trap for a
2546 breakpoint in a signal handler might be confused with the
2547 displaced step finishing. We don't make the displaced_step_finish
2548 step distinguish the cases instead, because:
2550 - a backtrace while stopped in the signal handler would show the
2551 scratch pad as frame older than the signal handler, instead of
2552 the real mainline code.
2554 - when the thread is later resumed, the signal handler would
2555 return to the scratch pad area, which would no longer be
2556 valid. */
2557 if (step_over_info_valid_p ()
2558 || displaced_step_in_progress (tp->inf))
2559 target_pass_signals ({});
2560 else
2561 target_pass_signals (signal_pass);
2563 /* Request that the target report thread-{created,cloned,exited}
2564 events in the following situations:
2566 - If we are performing an in-line step-over-breakpoint, then we
2567 will remove a breakpoint from the target and only run the
2568 current thread. We don't want any new thread (spawned by the
2569 step) to start running, as it might miss the breakpoint. We
2570 need to clear the step-over state if the stepped thread exits,
2571 so we also enable thread-exit events.
2573 - If we are stepping over a breakpoint out of line (displaced
2574 stepping) then we won't remove a breakpoint from the target,
2575 but, if the step spawns a new clone thread, then we will need
2576 to fixup the $pc address in the clone child too, so we need it
2577 to start stopped. We need to release the displaced stepping
2578 buffer if the stepped thread exits, so we also enable
2579 thread-exit events.
2581 - If scheduler-locking applies, threads that the current thread
2582 spawns should remain halted. It's not strictly necessary to
2583 enable thread-exit events in this case, but it doesn't hurt.
2585 if (step_over_info_valid_p ()
2586 || displaced_step_in_progress_thread (tp)
2587 || schedlock_applies (tp))
2589 gdb_thread_options options
2590 = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
2591 if (target_supports_set_thread_options (options))
2592 tp->set_thread_options (options);
2593 else
2594 target_thread_events (true);
2596 else if (tp->thread_fsm () != nullptr)
2598 gdb_thread_options options = GDB_THREAD_OPTION_EXIT;
2599 if (target_supports_set_thread_options (options))
2600 tp->set_thread_options (options);
2601 else
2602 target_thread_events (true);
2604 else
2606 if (target_supports_set_thread_options (0))
2607 tp->set_thread_options (0);
2608 else
2610 process_stratum_target *resume_target = tp->inf->process_target ();
2611 if (!any_thread_needs_target_thread_events (resume_target,
2612 resume_ptid))
2613 target_thread_events (false);
2617 /* If we're resuming more than one thread simultaneously, then any
2618 thread other than the leader is being set to run free. Clear any
2619 previous thread option for those threads. */
2620 if (resume_ptid != inferior_ptid && target_supports_set_thread_options (0))
2622 process_stratum_target *resume_target = tp->inf->process_target ();
2623 for (thread_info *thr_iter : all_non_exited_threads (resume_target,
2624 resume_ptid))
2625 if (thr_iter != tp)
2626 thr_iter->set_thread_options (0);
2629 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2630 resume_ptid.to_string ().c_str (),
2631 step, gdb_signal_to_symbol_string (sig));
2633 target_resume (resume_ptid, step, sig);
2636 /* Resume the inferior. SIG is the signal to give the inferior
2637 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2638 call 'resume', which handles exceptions. */
2640 static void
2641 resume_1 (enum gdb_signal sig)
2643 struct thread_info *tp = inferior_thread ();
2644 regcache *regcache = get_thread_regcache (tp);
2645 struct gdbarch *gdbarch = regcache->arch ();
2646 ptid_t resume_ptid;
2647 /* This represents the user's step vs continue request. When
2648 deciding whether "set scheduler-locking step" applies, it's the
2649 user's intention that counts. */
2650 const int user_step = tp->control.stepping_command;
2651 /* This represents what we'll actually request the target to do.
2652 This can decay from a step to a continue, if e.g., we need to
2653 implement single-stepping with breakpoints (software
2654 single-step). */
2655 bool step;
2657 gdb_assert (!tp->stop_requested);
2658 gdb_assert (!thread_is_in_step_over_chain (tp));
2660 if (tp->has_pending_waitstatus ())
2662 infrun_debug_printf
2663 ("thread %s has pending wait "
2664 "status %s (currently_stepping=%d).",
2665 tp->ptid.to_string ().c_str (),
2666 tp->pending_waitstatus ().to_string ().c_str (),
2667 currently_stepping (tp));
2669 tp->inf->process_target ()->threads_executing = true;
2670 tp->set_resumed (true);
2672 /* FIXME: What should we do if we are supposed to resume this
2673 thread with a signal? Maybe we should maintain a queue of
2674 pending signals to deliver. */
2675 if (sig != GDB_SIGNAL_0)
2677 warning (_("Couldn't deliver signal %s to %s."),
2678 gdb_signal_to_name (sig),
2679 tp->ptid.to_string ().c_str ());
2682 tp->set_stop_signal (GDB_SIGNAL_0);
2684 if (target_can_async_p ())
2686 target_async (true);
2687 /* Tell the event loop we have an event to process. */
2688 mark_async_event_handler (infrun_async_inferior_event_token);
2690 return;
2693 tp->stepped_breakpoint = 0;
2695 /* Depends on stepped_breakpoint. */
2696 step = currently_stepping (tp);
2698 if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2700 /* Don't try to single-step a vfork parent that is waiting for
2701 the child to get out of the shared memory region (by exec'ing
2702 or exiting). This is particularly important on software
2703 single-step archs, as the child process would trip on the
2704 software single step breakpoint inserted for the parent
2705 process. Since the parent will not actually execute any
2706 instruction until the child is out of the shared region (such
2707 are vfork's semantics), it is safe to simply continue it.
2708 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2709 the parent, and tell it to `keep_going', which automatically
2710 re-sets it stepping. */
2711 infrun_debug_printf ("resume : clear step");
2712 step = false;
2715 CORE_ADDR pc = regcache_read_pc (regcache);
2717 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2718 "current thread [%s] at %s",
2719 step, gdb_signal_to_symbol_string (sig),
2720 tp->control.trap_expected,
2721 inferior_ptid.to_string ().c_str (),
2722 paddress (gdbarch, pc));
2724 const address_space *aspace = tp->inf->aspace.get ();
2726 /* Normally, by the time we reach `resume', the breakpoints are either
2727 removed or inserted, as appropriate. The exception is if we're sitting
2728 at a permanent breakpoint; we need to step over it, but permanent
2729 breakpoints can't be removed. So we have to test for it here. */
2730 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2732 if (sig != GDB_SIGNAL_0)
2734 /* We have a signal to pass to the inferior. The resume
2735 may, or may not take us to the signal handler. If this
2736 is a step, we'll need to stop in the signal handler, if
2737 there's one, (if the target supports stepping into
2738 handlers), or in the next mainline instruction, if
2739 there's no handler. If this is a continue, we need to be
2740 sure to run the handler with all breakpoints inserted.
2741 In all cases, set a breakpoint at the current address
2742 (where the handler returns to), and once that breakpoint
2743 is hit, resume skipping the permanent breakpoint. If
2744 that breakpoint isn't hit, then we've stepped into the
2745 signal handler (or hit some other event). We'll delete
2746 the step-resume breakpoint then. */
2748 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2749 "deliver signal first");
2751 clear_step_over_info ();
2752 tp->control.trap_expected = 0;
2754 if (tp->control.step_resume_breakpoint == nullptr)
2756 /* Set a "high-priority" step-resume, as we don't want
2757 user breakpoints at PC to trigger (again) when this
2758 hits. */
2759 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2760 gdb_assert (tp->control.step_resume_breakpoint->first_loc ()
2761 .permanent);
2763 tp->step_after_step_resume_breakpoint = step;
2766 insert_breakpoints ();
2768 else
2770 /* There's no signal to pass, we can go ahead and skip the
2771 permanent breakpoint manually. */
2772 infrun_debug_printf ("skipping permanent breakpoint");
2773 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2774 /* Update pc to reflect the new address from which we will
2775 execute instructions. */
2776 pc = regcache_read_pc (regcache);
2778 if (step)
2780 /* We've already advanced the PC, so the stepping part
2781 is done. Now we need to arrange for a trap to be
2782 reported to handle_inferior_event. Set a breakpoint
2783 at the current PC, and run to it. Don't update
2784 prev_pc, because if we end in
2785 switch_back_to_stepped_thread, we want the "expected
2786 thread advanced also" branch to be taken. IOW, we
2787 don't want this thread to step further from PC
2788 (overstep). */
2789 gdb_assert (!step_over_info_valid_p ());
2790 insert_single_step_breakpoint (gdbarch, aspace, pc);
2791 insert_breakpoints ();
2793 resume_ptid = internal_resume_ptid (user_step);
2794 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2795 tp->set_resumed (true);
2796 return;
2801 /* If we have a breakpoint to step over, make sure to do a single
2802 step only. Same if we have software watchpoints. */
2803 if (tp->control.trap_expected || bpstat_should_step ())
2804 tp->control.may_range_step = 0;
2806 /* If displaced stepping is enabled, step over breakpoints by executing a
2807 copy of the instruction at a different address.
2809 We can't use displaced stepping when we have a signal to deliver;
2810 the comments for displaced_step_prepare explain why. The
2811 comments in the handle_inferior event for dealing with 'random
2812 signals' explain what we do instead.
2814 We can't use displaced stepping when we are waiting for vfork_done
2815 event, displaced stepping breaks the vfork child similarly as single
2816 step software breakpoint. */
2817 if (tp->control.trap_expected
2818 && use_displaced_stepping (tp)
2819 && !step_over_info_valid_p ()
2820 && sig == GDB_SIGNAL_0
2821 && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
2823 displaced_step_prepare_status prepare_status
2824 = displaced_step_prepare (tp);
2826 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
2828 infrun_debug_printf ("Got placed in step-over queue");
2830 tp->control.trap_expected = 0;
2831 return;
2833 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
2835 /* Fallback to stepping over the breakpoint in-line. */
2837 if (target_is_non_stop_p ())
2838 stop_all_threads ("displaced stepping falling back on inline stepping");
2840 set_step_over_info (aspace, regcache_read_pc (regcache), 0,
2841 tp->global_num);
2843 step = maybe_software_singlestep (gdbarch);
2845 insert_breakpoints ();
2847 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
2849 /* Update pc to reflect the new address from which we will
2850 execute instructions due to displaced stepping. */
2851 pc = regcache_read_pc (get_thread_regcache (tp));
2853 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
2855 else
2856 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2857 "value.");
2860 /* Do we need to do it the hard way, w/temp breakpoints? */
2861 else if (step)
2862 step = maybe_software_singlestep (gdbarch);
2864 /* Currently, our software single-step implementation leads to different
2865 results than hardware single-stepping in one situation: when stepping
2866 into delivering a signal which has an associated signal handler,
2867 hardware single-step will stop at the first instruction of the handler,
2868 while software single-step will simply skip execution of the handler.
2870 For now, this difference in behavior is accepted since there is no
2871 easy way to actually implement single-stepping into a signal handler
2872 without kernel support.
2874 However, there is one scenario where this difference leads to follow-on
2875 problems: if we're stepping off a breakpoint by removing all breakpoints
2876 and then single-stepping. In this case, the software single-step
2877 behavior means that even if there is a *breakpoint* in the signal
2878 handler, GDB still would not stop.
2880 Fortunately, we can at least fix this particular issue. We detect
2881 here the case where we are about to deliver a signal while software
2882 single-stepping with breakpoints removed. In this situation, we
2883 revert the decisions to remove all breakpoints and insert single-
2884 step breakpoints, and instead we install a step-resume breakpoint
2885 at the current address, deliver the signal without stepping, and
2886 once we arrive back at the step-resume breakpoint, actually step
2887 over the breakpoint we originally wanted to step over. */
2888 if (thread_has_single_step_breakpoints_set (tp)
2889 && sig != GDB_SIGNAL_0
2890 && step_over_info_valid_p ())
2892 /* If we have nested signals or a pending signal is delivered
2893 immediately after a handler returns, might already have
2894 a step-resume breakpoint set on the earlier handler. We cannot
2895 set another step-resume breakpoint; just continue on until the
2896 original breakpoint is hit. */
2897 if (tp->control.step_resume_breakpoint == nullptr)
2899 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2900 tp->step_after_step_resume_breakpoint = 1;
2903 delete_single_step_breakpoints (tp);
2905 clear_step_over_info ();
2906 tp->control.trap_expected = 0;
2908 insert_breakpoints ();
2911 /* If STEP is set, it's a request to use hardware stepping
2912 facilities. But in that case, we should never
2913 use singlestep breakpoint. */
2914 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2916 /* Decide the set of threads to ask the target to resume. */
2917 if (tp->control.trap_expected)
2919 /* We're allowing a thread to run past a breakpoint it has
2920 hit, either by single-stepping the thread with the breakpoint
2921 removed, or by displaced stepping, with the breakpoint inserted.
2922 In the former case, we need to single-step only this thread,
2923 and keep others stopped, as they can miss this breakpoint if
2924 allowed to run. That's not really a problem for displaced
2925 stepping, but, we still keep other threads stopped, in case
2926 another thread is also stopped for a breakpoint waiting for
2927 its turn in the displaced stepping queue. */
2928 resume_ptid = inferior_ptid;
2930 else
2931 resume_ptid = internal_resume_ptid (user_step);
2933 if (execution_direction != EXEC_REVERSE
2934 && step && breakpoint_inserted_here_p (aspace, pc))
2936 /* There are two cases where we currently need to step a
2937 breakpoint instruction when we have a signal to deliver:
2939 - See handle_signal_stop where we handle random signals that
2940 could take out us out of the stepping range. Normally, in
2941 that case we end up continuing (instead of stepping) over the
2942 signal handler with a breakpoint at PC, but there are cases
2943 where we should _always_ single-step, even if we have a
2944 step-resume breakpoint, like when a software watchpoint is
2945 set. Assuming single-stepping and delivering a signal at the
2946 same time would takes us to the signal handler, then we could
2947 have removed the breakpoint at PC to step over it. However,
2948 some hardware step targets (like e.g., Mac OS) can't step
2949 into signal handlers, and for those, we need to leave the
2950 breakpoint at PC inserted, as otherwise if the handler
2951 recurses and executes PC again, it'll miss the breakpoint.
2952 So we leave the breakpoint inserted anyway, but we need to
2953 record that we tried to step a breakpoint instruction, so
2954 that adjust_pc_after_break doesn't end up confused.
2956 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2957 in one thread after another thread that was stepping had been
2958 momentarily paused for a step-over. When we re-resume the
2959 stepping thread, it may be resumed from that address with a
2960 breakpoint that hasn't trapped yet. Seen with
2961 gdb.threads/non-stop-fair-events.exp, on targets that don't
2962 do displaced stepping. */
2964 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2965 tp->ptid.to_string ().c_str ());
2967 tp->stepped_breakpoint = 1;
2969 /* Most targets can step a breakpoint instruction, thus
2970 executing it normally. But if this one cannot, just
2971 continue and we will hit it anyway. */
2972 if (gdbarch_cannot_step_breakpoint (gdbarch))
2973 step = false;
2976 if (tp->control.may_range_step)
2978 /* If we're resuming a thread with the PC out of the step
2979 range, then we're doing some nested/finer run control
2980 operation, like stepping the thread out of the dynamic
2981 linker or the displaced stepping scratch pad. We
2982 shouldn't have allowed a range step then. */
2983 gdb_assert (pc_in_thread_step_range (pc, tp));
2986 do_target_resume (resume_ptid, step, sig);
2987 tp->set_resumed (true);
2990 /* Resume the inferior. SIG is the signal to give the inferior
2991 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2992 rolls back state on error. */
2994 static void
2995 resume (gdb_signal sig)
2999 resume_1 (sig);
3001 catch (const gdb_exception &ex)
3003 /* If resuming is being aborted for any reason, delete any
3004 single-step breakpoint resume_1 may have created, to avoid
3005 confusing the following resumption, and to avoid leaving
3006 single-step breakpoints perturbing other threads, in case
3007 we're running in non-stop mode. */
3008 if (inferior_ptid != null_ptid)
3009 delete_single_step_breakpoints (inferior_thread ());
3010 throw;
3015 /* Proceeding. */
3017 /* See infrun.h. */
3019 /* Counter that tracks number of user visible stops. This can be used
3020 to tell whether a command has proceeded the inferior past the
3021 current location. This allows e.g., inferior function calls in
3022 breakpoint commands to not interrupt the command list. When the
3023 call finishes successfully, the inferior is standing at the same
3024 breakpoint as if nothing happened (and so we don't call
3025 normal_stop). */
3026 static ULONGEST current_stop_id;
3028 /* See infrun.h. */
3030 ULONGEST
3031 get_stop_id (void)
3033 return current_stop_id;
3036 /* Called when we report a user visible stop. */
3038 static void
3039 new_stop_id (void)
3041 current_stop_id++;
3044 /* Clear out all variables saying what to do when inferior is continued.
3045 First do this, then set the ones you want, then call `proceed'. */
3047 static void
3048 clear_proceed_status_thread (struct thread_info *tp)
3050 infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ());
3052 /* If we're starting a new sequence, then the previous finished
3053 single-step is no longer relevant. */
3054 if (tp->has_pending_waitstatus ())
3056 if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP)
3058 infrun_debug_printf ("pending event of %s was a finished step. "
3059 "Discarding.",
3060 tp->ptid.to_string ().c_str ());
3062 tp->clear_pending_waitstatus ();
3063 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
3065 else
3067 infrun_debug_printf
3068 ("thread %s has pending wait status %s (currently_stepping=%d).",
3069 tp->ptid.to_string ().c_str (),
3070 tp->pending_waitstatus ().to_string ().c_str (),
3071 currently_stepping (tp));
3075 /* If this signal should not be seen by program, give it zero.
3076 Used for debugging signals. */
3077 if (!signal_pass_state (tp->stop_signal ()))
3078 tp->set_stop_signal (GDB_SIGNAL_0);
3080 tp->release_thread_fsm ();
3082 tp->control.trap_expected = 0;
3083 tp->control.step_range_start = 0;
3084 tp->control.step_range_end = 0;
3085 tp->control.may_range_step = 0;
3086 tp->control.step_frame_id = null_frame_id;
3087 tp->control.step_stack_frame_id = null_frame_id;
3088 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
3089 tp->control.step_start_function = nullptr;
3090 tp->stop_requested = 0;
3092 tp->control.stop_step = 0;
3094 tp->control.proceed_to_finish = 0;
3096 tp->control.stepping_command = 0;
3098 /* Discard any remaining commands or status from previous stop. */
3099 bpstat_clear (&tp->control.stop_bpstat);
3102 /* Notify the current interpreter and observers that the target is about to
3103 proceed. */
3105 static void
3106 notify_about_to_proceed ()
3108 top_level_interpreter ()->on_about_to_proceed ();
3109 gdb::observers::about_to_proceed.notify ();
3112 void
3113 clear_proceed_status (int step)
3115 /* With scheduler-locking replay, stop replaying other threads if we're
3116 not replaying the user-visible resume ptid.
3118 This is a convenience feature to not require the user to explicitly
3119 stop replaying the other threads. We're assuming that the user's
3120 intent is to resume tracing the recorded process. */
3121 if (!non_stop && scheduler_mode == schedlock_replay
3122 && target_record_is_replaying (minus_one_ptid)
3123 && !target_record_will_replay (user_visible_resume_ptid (step),
3124 execution_direction))
3125 target_record_stop_replaying ();
3127 if (!non_stop && inferior_ptid != null_ptid)
3129 ptid_t resume_ptid = user_visible_resume_ptid (step);
3130 process_stratum_target *resume_target
3131 = user_visible_resume_target (resume_ptid);
3133 /* In all-stop mode, delete the per-thread status of all threads
3134 we're about to resume, implicitly and explicitly. */
3135 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
3136 clear_proceed_status_thread (tp);
3139 if (inferior_ptid != null_ptid)
3141 struct inferior *inferior;
3143 if (non_stop)
3145 /* If in non-stop mode, only delete the per-thread status of
3146 the current thread. */
3147 clear_proceed_status_thread (inferior_thread ());
3150 inferior = current_inferior ();
3151 inferior->control.stop_soon = NO_STOP_QUIETLY;
3154 notify_about_to_proceed ();
3157 /* Returns true if TP is still stopped at a breakpoint that needs
3158 stepping-over in order to make progress. If the breakpoint is gone
3159 meanwhile, we can skip the whole step-over dance. */
3161 static bool
3162 thread_still_needs_step_over_bp (struct thread_info *tp)
3164 if (tp->stepping_over_breakpoint)
3166 struct regcache *regcache = get_thread_regcache (tp);
3168 if (breakpoint_here_p (tp->inf->aspace.get (),
3169 regcache_read_pc (regcache))
3170 == ordinary_breakpoint_here)
3171 return true;
3173 tp->stepping_over_breakpoint = 0;
3176 return false;
3179 /* Check whether thread TP still needs to start a step-over in order
3180 to make progress when resumed. Returns an bitwise or of enum
3181 step_over_what bits, indicating what needs to be stepped over. */
3183 static step_over_what
3184 thread_still_needs_step_over (struct thread_info *tp)
3186 step_over_what what = 0;
3188 if (thread_still_needs_step_over_bp (tp))
3189 what |= STEP_OVER_BREAKPOINT;
3191 if (tp->stepping_over_watchpoint
3192 && !target_have_steppable_watchpoint ())
3193 what |= STEP_OVER_WATCHPOINT;
3195 return what;
3198 /* Returns true if scheduler locking applies. STEP indicates whether
3199 we're about to do a step/next-like command to a thread. */
3201 static bool
3202 schedlock_applies (struct thread_info *tp)
3204 return (scheduler_mode == schedlock_on
3205 || (scheduler_mode == schedlock_step
3206 && tp->control.stepping_command)
3207 || (scheduler_mode == schedlock_replay
3208 && target_record_will_replay (minus_one_ptid,
3209 execution_direction)));
3212 /* When FORCE_P is false, set process_stratum_target::COMMIT_RESUMED_STATE
3213 in all target stacks that have threads executing and don't have threads
3214 with pending events.
3216 When FORCE_P is true, set process_stratum_target::COMMIT_RESUMED_STATE
3217 in all target stacks that have threads executing regardless of whether
3218 there are pending events or not.
3220 Passing FORCE_P as false makes sense when GDB is going to wait for
3221 events from all threads and will therefore spot the pending events.
3222 However, if GDB is only going to wait for events from select threads
3223 (i.e. when performing an inferior call) then a pending event on some
3224 other thread will not be spotted, and if we fail to commit the resume
3225 state for the thread performing the inferior call, then the inferior
3226 call will never complete (or even start). */
3228 static void
3229 maybe_set_commit_resumed_all_targets (bool force_p)
3231 scoped_restore_current_thread restore_thread;
3233 for (inferior *inf : all_non_exited_inferiors ())
3235 process_stratum_target *proc_target = inf->process_target ();
3237 if (proc_target->commit_resumed_state)
3239 /* We already set this in a previous iteration, via another
3240 inferior sharing the process_stratum target. */
3241 continue;
3244 /* If the target has no resumed threads, it would be useless to
3245 ask it to commit the resumed threads. */
3246 if (!proc_target->threads_executing)
3248 infrun_debug_printf ("not requesting commit-resumed for target "
3249 "%s, no resumed threads",
3250 proc_target->shortname ());
3251 continue;
3254 /* As an optimization, if a thread from this target has some
3255 status to report, handle it before requiring the target to
3256 commit its resumed threads: handling the status might lead to
3257 resuming more threads. */
3258 if (!force_p && proc_target->has_resumed_with_pending_wait_status ())
3260 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
3261 " thread has a pending waitstatus",
3262 proc_target->shortname ());
3263 continue;
3266 switch_to_inferior_no_thread (inf);
3268 if (!force_p && target_has_pending_events ())
3270 infrun_debug_printf ("not requesting commit-resumed for target %s, "
3271 "target has pending events",
3272 proc_target->shortname ());
3273 continue;
3276 infrun_debug_printf ("enabling commit-resumed for target %s",
3277 proc_target->shortname ());
3279 proc_target->commit_resumed_state = true;
3283 /* See infrun.h. */
3285 void
3286 maybe_call_commit_resumed_all_targets ()
3288 scoped_restore_current_thread restore_thread;
3290 for (inferior *inf : all_non_exited_inferiors ())
3292 process_stratum_target *proc_target = inf->process_target ();
3294 if (!proc_target->commit_resumed_state)
3295 continue;
3297 switch_to_inferior_no_thread (inf);
3299 infrun_debug_printf ("calling commit_resumed for target %s",
3300 proc_target->shortname());
3302 target_commit_resumed ();
3306 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
3307 that only the outermost one attempts to re-enable
3308 commit-resumed. */
3309 static bool enable_commit_resumed = true;
3311 /* See infrun.h. */
3313 scoped_disable_commit_resumed::scoped_disable_commit_resumed
3314 (const char *reason)
3315 : m_reason (reason),
3316 m_prev_enable_commit_resumed (enable_commit_resumed)
3318 infrun_debug_printf ("reason=%s", m_reason);
3320 enable_commit_resumed = false;
3322 for (inferior *inf : all_non_exited_inferiors ())
3324 process_stratum_target *proc_target = inf->process_target ();
3326 if (m_prev_enable_commit_resumed)
3328 /* This is the outermost instance: force all
3329 COMMIT_RESUMED_STATE to false. */
3330 proc_target->commit_resumed_state = false;
3332 else
3334 /* This is not the outermost instance, we expect
3335 COMMIT_RESUMED_STATE to have been cleared by the
3336 outermost instance. */
3337 gdb_assert (!proc_target->commit_resumed_state);
3342 /* See infrun.h. */
3344 void
3345 scoped_disable_commit_resumed::reset ()
3347 if (m_reset)
3348 return;
3349 m_reset = true;
3351 infrun_debug_printf ("reason=%s", m_reason);
3353 gdb_assert (!enable_commit_resumed);
3355 enable_commit_resumed = m_prev_enable_commit_resumed;
3357 if (m_prev_enable_commit_resumed)
3359 /* This is the outermost instance, re-enable
3360 COMMIT_RESUMED_STATE on the targets where it's possible. */
3361 maybe_set_commit_resumed_all_targets (false);
3363 else
3365 /* This is not the outermost instance, we expect
3366 COMMIT_RESUMED_STATE to still be false. */
3367 for (inferior *inf : all_non_exited_inferiors ())
3369 process_stratum_target *proc_target = inf->process_target ();
3370 gdb_assert (!proc_target->commit_resumed_state);
3375 /* See infrun.h. */
3377 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3379 reset ();
3382 /* See infrun.h. */
3384 void
3385 scoped_disable_commit_resumed::reset_and_commit ()
3387 reset ();
3388 maybe_call_commit_resumed_all_targets ();
3391 /* See infrun.h. */
3393 scoped_enable_commit_resumed::scoped_enable_commit_resumed
3394 (const char *reason, bool force_p)
3395 : m_reason (reason),
3396 m_prev_enable_commit_resumed (enable_commit_resumed)
3398 infrun_debug_printf ("reason=%s", m_reason);
3400 if (!enable_commit_resumed)
3402 enable_commit_resumed = true;
3404 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3405 possible. */
3406 maybe_set_commit_resumed_all_targets (force_p);
3408 maybe_call_commit_resumed_all_targets ();
3412 /* See infrun.h. */
3414 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3416 infrun_debug_printf ("reason=%s", m_reason);
3418 gdb_assert (enable_commit_resumed);
3420 enable_commit_resumed = m_prev_enable_commit_resumed;
3422 if (!enable_commit_resumed)
3424 /* Force all COMMIT_RESUMED_STATE back to false. */
3425 for (inferior *inf : all_non_exited_inferiors ())
3427 process_stratum_target *proc_target = inf->process_target ();
3428 proc_target->commit_resumed_state = false;
3433 /* Check that all the targets we're about to resume are in non-stop
3434 mode. Ideally, we'd only care whether all targets support
3435 target-async, but we're not there yet. E.g., stop_all_threads
3436 doesn't know how to handle all-stop targets. Also, the remote
3437 protocol in all-stop mode is synchronous, irrespective of
3438 target-async, which means that things like a breakpoint re-set
3439 triggered by one target would try to read memory from all targets
3440 and fail. */
3442 static void
3443 check_multi_target_resumption (process_stratum_target *resume_target)
3445 if (!non_stop && resume_target == nullptr)
3447 scoped_restore_current_thread restore_thread;
3449 /* This is used to track whether we're resuming more than one
3450 target. */
3451 process_stratum_target *first_connection = nullptr;
3453 /* The first inferior we see with a target that does not work in
3454 always-non-stop mode. */
3455 inferior *first_not_non_stop = nullptr;
3457 for (inferior *inf : all_non_exited_inferiors ())
3459 switch_to_inferior_no_thread (inf);
3461 if (!target_has_execution ())
3462 continue;
3464 process_stratum_target *proc_target
3465 = current_inferior ()->process_target();
3467 if (!target_is_non_stop_p ())
3468 first_not_non_stop = inf;
3470 if (first_connection == nullptr)
3471 first_connection = proc_target;
3472 else if (first_connection != proc_target
3473 && first_not_non_stop != nullptr)
3475 switch_to_inferior_no_thread (first_not_non_stop);
3477 proc_target = current_inferior ()->process_target();
3479 error (_("Connection %d (%s) does not support "
3480 "multi-target resumption."),
3481 proc_target->connection_number,
3482 make_target_connection_string (proc_target).c_str ());
3488 /* Helper function for `proceed`. Check if thread TP is suitable for
3489 resuming, and, if it is, switch to the thread and call
3490 `keep_going_pass_signal`. If TP is not suitable for resuming then this
3491 function will just return without switching threads. */
3493 static void
3494 proceed_resume_thread_checked (thread_info *tp)
3496 if (!tp->inf->has_execution ())
3498 infrun_debug_printf ("[%s] target has no execution",
3499 tp->ptid.to_string ().c_str ());
3500 return;
3503 if (tp->resumed ())
3505 infrun_debug_printf ("[%s] resumed",
3506 tp->ptid.to_string ().c_str ());
3507 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
3508 return;
3511 if (thread_is_in_step_over_chain (tp))
3513 infrun_debug_printf ("[%s] needs step-over",
3514 tp->ptid.to_string ().c_str ());
3515 return;
3518 /* When handling a vfork GDB removes all breakpoints from the program
3519 space in which the vfork is being handled. If we are following the
3520 parent then GDB will set the thread_waiting_for_vfork_done member of
3521 the parent inferior. In this case we should take care to only resume
3522 the vfork parent thread, the kernel will hold this thread suspended
3523 until the vfork child has exited or execd, at which point the parent
3524 will be resumed and a VFORK_DONE event sent to GDB. */
3525 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
3527 if (target_is_non_stop_p ())
3529 /* For non-stop targets, regardless of whether GDB is using
3530 all-stop or non-stop mode, threads are controlled
3531 individually.
3533 When a thread is handling a vfork, breakpoints are removed
3534 from the inferior (well, program space in fact), so it is
3535 critical that we don't try to resume any thread other than the
3536 vfork parent. */
3537 if (tp != tp->inf->thread_waiting_for_vfork_done)
3539 infrun_debug_printf ("[%s] thread %s of this inferior is "
3540 "waiting for vfork-done",
3541 tp->ptid.to_string ().c_str (),
3542 tp->inf->thread_waiting_for_vfork_done
3543 ->ptid.to_string ().c_str ());
3544 return;
3547 else
3549 /* For all-stop targets, when we attempt to resume the inferior,
3550 we will only resume the vfork parent thread, this is handled
3551 in internal_resume_ptid.
3553 Additionally, we will always be called with the vfork parent
3554 thread as the current thread (TP) thanks to follow_fork, as
3555 such the following assertion should hold.
3557 Beyond this there is nothing more that needs to be done
3558 here. */
3559 gdb_assert (tp == tp->inf->thread_waiting_for_vfork_done);
3563 /* When handling a vfork GDB removes all breakpoints from the program
3564 space in which the vfork is being handled. If we are following the
3565 child then GDB will set vfork_child member of the vfork parent
3566 inferior. Once the child has either exited or execd then GDB will
3567 detach from the parent process. Until that point GDB should not
3568 resume any thread in the parent process. */
3569 if (tp->inf->vfork_child != nullptr)
3571 infrun_debug_printf ("[%s] thread is part of a vfork parent, child is %d",
3572 tp->ptid.to_string ().c_str (),
3573 tp->inf->vfork_child->pid);
3574 return;
3577 infrun_debug_printf ("resuming %s",
3578 tp->ptid.to_string ().c_str ());
3580 execution_control_state ecs (tp);
3581 switch_to_thread (tp);
3582 keep_going_pass_signal (&ecs);
3583 if (!ecs.wait_some_more)
3584 error (_("Command aborted."));
3587 /* Basic routine for continuing the program in various fashions.
3589 ADDR is the address to resume at, or -1 for resume where stopped.
3590 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3591 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3593 You should call clear_proceed_status before calling proceed. */
3595 void
3596 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
3598 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3600 struct gdbarch *gdbarch;
3601 CORE_ADDR pc;
3603 /* If we're stopped at a fork/vfork, switch to either the parent or child
3604 thread as defined by the "set follow-fork-mode" command, or, if both
3605 the parent and child are controlled by GDB, and schedule-multiple is
3606 on, follow the child. If none of the above apply then we just proceed
3607 resuming the current thread. */
3608 if (!follow_fork ())
3610 /* The target for some reason decided not to resume. */
3611 normal_stop ();
3612 if (target_can_async_p ())
3613 inferior_event_handler (INF_EXEC_COMPLETE);
3614 return;
3617 /* We'll update this if & when we switch to a new thread. */
3618 update_previous_thread ();
3620 thread_info *cur_thr = inferior_thread ();
3621 infrun_debug_printf ("cur_thr = %s", cur_thr->ptid.to_string ().c_str ());
3623 regcache *regcache = get_thread_regcache (cur_thr);
3624 gdbarch = regcache->arch ();
3625 pc = regcache_read_pc_protected (regcache);
3627 /* Fill in with reasonable starting values. */
3628 init_thread_stepping_state (cur_thr);
3630 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
3632 ptid_t resume_ptid
3633 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3634 process_stratum_target *resume_target
3635 = user_visible_resume_target (resume_ptid);
3637 check_multi_target_resumption (resume_target);
3639 if (addr == (CORE_ADDR) -1)
3641 const address_space *aspace = cur_thr->inf->aspace.get ();
3643 if (cur_thr->stop_pc_p ()
3644 && pc == cur_thr->stop_pc ()
3645 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
3646 && execution_direction != EXEC_REVERSE)
3647 /* There is a breakpoint at the address we will resume at,
3648 step one instruction before inserting breakpoints so that
3649 we do not stop right away (and report a second hit at this
3650 breakpoint).
3652 Note, we don't do this in reverse, because we won't
3653 actually be executing the breakpoint insn anyway.
3654 We'll be (un-)executing the previous instruction. */
3655 cur_thr->stepping_over_breakpoint = 1;
3656 else if (gdbarch_single_step_through_delay_p (gdbarch)
3657 && gdbarch_single_step_through_delay (gdbarch,
3658 get_current_frame ()))
3659 /* We stepped onto an instruction that needs to be stepped
3660 again before re-inserting the breakpoint, do so. */
3661 cur_thr->stepping_over_breakpoint = 1;
3663 else
3665 regcache_write_pc (regcache, addr);
3668 if (siggnal != GDB_SIGNAL_DEFAULT)
3669 cur_thr->set_stop_signal (siggnal);
3671 /* If an exception is thrown from this point on, make sure to
3672 propagate GDB's knowledge of the executing state to the
3673 frontend/user running state. */
3674 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
3676 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3677 threads (e.g., we might need to set threads stepping over
3678 breakpoints first), from the user/frontend's point of view, all
3679 threads in RESUME_PTID are now running. Unless we're calling an
3680 inferior function, as in that case we pretend the inferior
3681 doesn't run at all. */
3682 if (!cur_thr->control.in_infcall)
3683 set_running (resume_target, resume_ptid, true);
3685 infrun_debug_printf ("addr=%s, signal=%s, resume_ptid=%s",
3686 paddress (gdbarch, addr),
3687 gdb_signal_to_symbol_string (siggnal),
3688 resume_ptid.to_string ().c_str ());
3690 annotate_starting ();
3692 /* Make sure that output from GDB appears before output from the
3693 inferior. */
3694 gdb_flush (gdb_stdout);
3696 /* Since we've marked the inferior running, give it the terminal. A
3697 QUIT/Ctrl-C from here on is forwarded to the target (which can
3698 still detect attempts to unblock a stuck connection with repeated
3699 Ctrl-C from within target_pass_ctrlc). */
3700 target_terminal::inferior ();
3702 /* In a multi-threaded task we may select another thread and
3703 then continue or step.
3705 But if a thread that we're resuming had stopped at a breakpoint,
3706 it will immediately cause another breakpoint stop without any
3707 execution (i.e. it will report a breakpoint hit incorrectly). So
3708 we must step over it first.
3710 Look for threads other than the current (TP) that reported a
3711 breakpoint hit and haven't been resumed yet since. */
3713 /* If scheduler locking applies, we can avoid iterating over all
3714 threads. */
3715 if (!non_stop && !schedlock_applies (cur_thr))
3717 for (thread_info *tp : all_non_exited_threads (resume_target,
3718 resume_ptid))
3720 switch_to_thread_no_regs (tp);
3722 /* Ignore the current thread here. It's handled
3723 afterwards. */
3724 if (tp == cur_thr)
3725 continue;
3727 if (!thread_still_needs_step_over (tp))
3728 continue;
3730 gdb_assert (!thread_is_in_step_over_chain (tp));
3732 infrun_debug_printf ("need to step-over [%s] first",
3733 tp->ptid.to_string ().c_str ());
3735 global_thread_step_over_chain_enqueue (tp);
3738 switch_to_thread (cur_thr);
3741 /* Enqueue the current thread last, so that we move all other
3742 threads over their breakpoints first. */
3743 if (cur_thr->stepping_over_breakpoint)
3744 global_thread_step_over_chain_enqueue (cur_thr);
3746 /* If the thread isn't started, we'll still need to set its prev_pc,
3747 so that switch_back_to_stepped_thread knows the thread hasn't
3748 advanced. Must do this before resuming any thread, as in
3749 all-stop/remote, once we resume we can't send any other packet
3750 until the target stops again. */
3751 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
3754 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
3755 bool step_over_started = start_step_over ();
3757 if (step_over_info_valid_p ())
3759 /* Either this thread started a new in-line step over, or some
3760 other thread was already doing one. In either case, don't
3761 resume anything else until the step-over is finished. */
3763 else if (step_over_started && !target_is_non_stop_p ())
3765 /* A new displaced stepping sequence was started. In all-stop,
3766 we can't talk to the target anymore until it next stops. */
3768 else if (!non_stop && target_is_non_stop_p ())
3770 INFRUN_SCOPED_DEBUG_START_END
3771 ("resuming threads, all-stop-on-top-of-non-stop");
3773 /* In all-stop, but the target is always in non-stop mode.
3774 Start all other threads that are implicitly resumed too. */
3775 for (thread_info *tp : all_non_exited_threads (resume_target,
3776 resume_ptid))
3778 switch_to_thread_no_regs (tp);
3779 proceed_resume_thread_checked (tp);
3782 else
3783 proceed_resume_thread_checked (cur_thr);
3785 disable_commit_resumed.reset_and_commit ();
3788 finish_state.release ();
3790 /* If we've switched threads above, switch back to the previously
3791 current thread. We don't want the user to see a different
3792 selected thread. */
3793 switch_to_thread (cur_thr);
3795 /* Tell the event loop to wait for it to stop. If the target
3796 supports asynchronous execution, it'll do this from within
3797 target_resume. */
3798 if (!target_can_async_p ())
3799 mark_async_event_handler (infrun_async_inferior_event_token);
3803 /* Start remote-debugging of a machine over a serial link. */
3805 void
3806 start_remote (int from_tty)
3808 inferior *inf = current_inferior ();
3809 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
3811 /* Always go on waiting for the target, regardless of the mode. */
3812 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3813 indicate to wait_for_inferior that a target should timeout if
3814 nothing is returned (instead of just blocking). Because of this,
3815 targets expecting an immediate response need to, internally, set
3816 things up so that the target_wait() is forced to eventually
3817 timeout. */
3818 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3819 differentiate to its caller what the state of the target is after
3820 the initial open has been performed. Here we're assuming that
3821 the target has stopped. It should be possible to eventually have
3822 target_open() return to the caller an indication that the target
3823 is currently running and GDB state should be set to the same as
3824 for an async run. */
3825 wait_for_inferior (inf);
3827 /* Now that the inferior has stopped, do any bookkeeping like
3828 loading shared libraries. We want to do this before normal_stop,
3829 so that the displayed frame is up to date. */
3830 post_create_inferior (from_tty);
3832 normal_stop ();
3835 /* Initialize static vars when a new inferior begins. */
3837 void
3838 init_wait_for_inferior (void)
3840 /* These are meaningless until the first time through wait_for_inferior. */
3842 breakpoint_init_inferior (current_inferior (), inf_starting);
3844 clear_proceed_status (0);
3846 nullify_last_target_wait_ptid ();
3848 update_previous_thread ();
3853 static void handle_inferior_event (struct execution_control_state *ecs);
3855 static void handle_step_into_function (struct gdbarch *gdbarch,
3856 struct execution_control_state *ecs);
3857 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3858 struct execution_control_state *ecs);
3859 static void handle_signal_stop (struct execution_control_state *ecs);
3860 static void check_exception_resume (struct execution_control_state *,
3861 const frame_info_ptr &);
3863 static void end_stepping_range (struct execution_control_state *ecs);
3864 static void stop_waiting (struct execution_control_state *ecs);
3865 static void keep_going (struct execution_control_state *ecs);
3866 static void process_event_stop_test (struct execution_control_state *ecs);
3867 static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
3869 /* This function is attached as a "thread_stop_requested" observer.
3870 Cleanup local state that assumed the PTID was to be resumed, and
3871 report the stop to the frontend. */
3873 static void
3874 infrun_thread_stop_requested (ptid_t ptid)
3876 process_stratum_target *curr_target = current_inferior ()->process_target ();
3878 /* PTID was requested to stop. If the thread was already stopped,
3879 but the user/frontend doesn't know about that yet (e.g., the
3880 thread had been temporarily paused for some step-over), set up
3881 for reporting the stop now. */
3882 for (thread_info *tp : all_threads (curr_target, ptid))
3884 if (tp->state != THREAD_RUNNING)
3885 continue;
3886 if (tp->executing ())
3887 continue;
3889 /* Remove matching threads from the step-over queue, so
3890 start_step_over doesn't try to resume them
3891 automatically. */
3892 if (thread_is_in_step_over_chain (tp))
3893 global_thread_step_over_chain_remove (tp);
3895 /* If the thread is stopped, but the user/frontend doesn't
3896 know about that yet, queue a pending event, as if the
3897 thread had just stopped now. Unless the thread already had
3898 a pending event. */
3899 if (!tp->has_pending_waitstatus ())
3901 target_waitstatus ws;
3902 ws.set_stopped (GDB_SIGNAL_0);
3903 tp->set_pending_waitstatus (ws);
3906 /* Clear the inline-frame state, since we're re-processing the
3907 stop. */
3908 clear_inline_frame_state (tp);
3910 /* If this thread was paused because some other thread was
3911 doing an inline-step over, let that finish first. Once
3912 that happens, we'll restart all threads and consume pending
3913 stop events then. */
3914 if (step_over_info_valid_p ())
3915 continue;
3917 /* Otherwise we can process the (new) pending event now. Set
3918 it so this pending event is considered by
3919 do_target_wait. */
3920 tp->set_resumed (true);
3924 /* Delete the step resume, single-step and longjmp/exception resume
3925 breakpoints of TP. */
3927 static void
3928 delete_thread_infrun_breakpoints (struct thread_info *tp)
3930 delete_step_resume_breakpoint (tp);
3931 delete_exception_resume_breakpoint (tp);
3932 delete_single_step_breakpoints (tp);
3935 /* If the target still has execution, call FUNC for each thread that
3936 just stopped. In all-stop, that's all the non-exited threads; in
3937 non-stop, that's the current thread, only. */
3939 typedef void (*for_each_just_stopped_thread_callback_func)
3940 (struct thread_info *tp);
3942 static void
3943 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3945 if (!target_has_execution () || inferior_ptid == null_ptid)
3946 return;
3948 if (target_is_non_stop_p ())
3950 /* If in non-stop mode, only the current thread stopped. */
3951 func (inferior_thread ());
3953 else
3955 /* In all-stop mode, all threads have stopped. */
3956 for (thread_info *tp : all_non_exited_threads ())
3957 func (tp);
3961 /* Delete the step resume and longjmp/exception resume breakpoints of
3962 the threads that just stopped. */
3964 static void
3965 delete_just_stopped_threads_infrun_breakpoints (void)
3967 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3970 /* Delete the single-step breakpoints of the threads that just
3971 stopped. */
3973 static void
3974 delete_just_stopped_threads_single_step_breakpoints (void)
3976 for_each_just_stopped_thread (delete_single_step_breakpoints);
3979 /* See infrun.h. */
3981 void
3982 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3983 const struct target_waitstatus &ws)
3985 infrun_debug_printf ("target_wait (%s [%s], status) =",
3986 waiton_ptid.to_string ().c_str (),
3987 target_pid_to_str (waiton_ptid).c_str ());
3988 infrun_debug_printf (" %s [%s],",
3989 result_ptid.to_string ().c_str (),
3990 target_pid_to_str (result_ptid).c_str ());
3991 infrun_debug_printf (" %s", ws.to_string ().c_str ());
3994 /* Select a thread at random, out of those which are resumed and have
3995 had events. */
3997 static struct thread_info *
3998 random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
4000 process_stratum_target *proc_target = inf->process_target ();
4001 thread_info *thread
4002 = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid);
4004 if (thread == nullptr)
4006 infrun_debug_printf ("None found.");
4007 return nullptr;
4010 infrun_debug_printf ("Found %s.", thread->ptid.to_string ().c_str ());
4011 gdb_assert (thread->resumed ());
4012 gdb_assert (thread->has_pending_waitstatus ());
4014 return thread;
4017 /* Wrapper for target_wait that first checks whether threads have
4018 pending statuses to report before actually asking the target for
4019 more events. INF is the inferior we're using to call target_wait
4020 on. */
4022 static ptid_t
4023 do_target_wait_1 (inferior *inf, ptid_t ptid,
4024 target_waitstatus *status, target_wait_flags options)
4026 struct thread_info *tp;
4028 /* We know that we are looking for an event in the target of inferior
4029 INF, but we don't know which thread the event might come from. As
4030 such we want to make sure that INFERIOR_PTID is reset so that none of
4031 the wait code relies on it - doing so is always a mistake. */
4032 switch_to_inferior_no_thread (inf);
4034 /* First check if there is a resumed thread with a wait status
4035 pending. */
4036 if (ptid == minus_one_ptid || ptid.is_pid ())
4038 tp = random_pending_event_thread (inf, ptid);
4040 else
4042 infrun_debug_printf ("Waiting for specific thread %s.",
4043 ptid.to_string ().c_str ());
4045 /* We have a specific thread to check. */
4046 tp = inf->find_thread (ptid);
4047 gdb_assert (tp != nullptr);
4048 if (!tp->has_pending_waitstatus ())
4049 tp = nullptr;
4052 if (tp != nullptr
4053 && (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4054 || tp->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT))
4056 struct regcache *regcache = get_thread_regcache (tp);
4057 struct gdbarch *gdbarch = regcache->arch ();
4058 CORE_ADDR pc;
4059 int discard = 0;
4061 pc = regcache_read_pc (regcache);
4063 if (pc != tp->stop_pc ())
4065 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
4066 tp->ptid.to_string ().c_str (),
4067 paddress (gdbarch, tp->stop_pc ()),
4068 paddress (gdbarch, pc));
4069 discard = 1;
4071 else if (!breakpoint_inserted_here_p (tp->inf->aspace.get (), pc))
4073 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
4074 tp->ptid.to_string ().c_str (),
4075 paddress (gdbarch, pc));
4077 discard = 1;
4080 if (discard)
4082 infrun_debug_printf ("pending event of %s cancelled.",
4083 tp->ptid.to_string ().c_str ());
4085 tp->clear_pending_waitstatus ();
4086 target_waitstatus ws;
4087 ws.set_spurious ();
4088 tp->set_pending_waitstatus (ws);
4089 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4093 if (tp != nullptr)
4095 infrun_debug_printf ("Using pending wait status %s for %s.",
4096 tp->pending_waitstatus ().to_string ().c_str (),
4097 tp->ptid.to_string ().c_str ());
4099 /* Now that we've selected our final event LWP, un-adjust its PC
4100 if it was a software breakpoint (and the target doesn't
4101 always adjust the PC itself). */
4102 if (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4103 && !target_supports_stopped_by_sw_breakpoint ())
4105 struct regcache *regcache;
4106 struct gdbarch *gdbarch;
4107 int decr_pc;
4109 regcache = get_thread_regcache (tp);
4110 gdbarch = regcache->arch ();
4112 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4113 if (decr_pc != 0)
4115 CORE_ADDR pc;
4117 pc = regcache_read_pc (regcache);
4118 regcache_write_pc (regcache, pc + decr_pc);
4122 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4123 *status = tp->pending_waitstatus ();
4124 tp->clear_pending_waitstatus ();
4126 /* Wake up the event loop again, until all pending events are
4127 processed. */
4128 if (target_is_async_p ())
4129 mark_async_event_handler (infrun_async_inferior_event_token);
4130 return tp->ptid;
4133 /* But if we don't find one, we'll have to wait. */
4135 /* We can't ask a non-async target to do a non-blocking wait, so this will be
4136 a blocking wait. */
4137 if (!target_can_async_p ())
4138 options &= ~TARGET_WNOHANG;
4140 return target_wait (ptid, status, options);
4143 /* Wrapper for target_wait that first checks whether threads have
4144 pending statuses to report before actually asking the target for
4145 more events. Polls for events from all inferiors/targets. */
4147 static bool
4148 do_target_wait (ptid_t wait_ptid, execution_control_state *ecs,
4149 target_wait_flags options)
4151 int num_inferiors = 0;
4152 int random_selector;
4154 /* For fairness, we pick the first inferior/target to poll at random
4155 out of all inferiors that may report events, and then continue
4156 polling the rest of the inferior list starting from that one in a
4157 circular fashion until the whole list is polled once. */
4159 ptid_t wait_ptid_pid {wait_ptid.pid ()};
4160 auto inferior_matches = [&wait_ptid_pid] (inferior *inf)
4162 return (inf->process_target () != nullptr
4163 && ptid_t (inf->pid).matches (wait_ptid_pid));
4166 /* First see how many matching inferiors we have. */
4167 for (inferior *inf : all_inferiors ())
4168 if (inferior_matches (inf))
4169 num_inferiors++;
4171 if (num_inferiors == 0)
4173 ecs->ws.set_ignore ();
4174 return false;
4177 /* Now randomly pick an inferior out of those that matched. */
4178 random_selector = (int)
4179 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
4181 if (num_inferiors > 1)
4182 infrun_debug_printf ("Found %d inferiors, starting at #%d",
4183 num_inferiors, random_selector);
4185 /* Select the Nth inferior that matched. */
4187 inferior *selected = nullptr;
4189 for (inferior *inf : all_inferiors ())
4190 if (inferior_matches (inf))
4191 if (random_selector-- == 0)
4193 selected = inf;
4194 break;
4197 /* Now poll for events out of each of the matching inferior's
4198 targets, starting from the selected one. */
4200 auto do_wait = [&] (inferior *inf)
4202 ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options);
4203 ecs->target = inf->process_target ();
4204 return (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
4207 /* Needed in 'all-stop + target-non-stop' mode, because we end up
4208 here spuriously after the target is all stopped and we've already
4209 reported the stop to the user, polling for events. */
4210 scoped_restore_current_thread restore_thread;
4212 intrusive_list_iterator<inferior> start
4213 = inferior_list.iterator_to (*selected);
4215 for (intrusive_list_iterator<inferior> it = start;
4216 it != inferior_list.end ();
4217 ++it)
4219 inferior *inf = &*it;
4221 if (inferior_matches (inf) && do_wait (inf))
4222 return true;
4225 for (intrusive_list_iterator<inferior> it = inferior_list.begin ();
4226 it != start;
4227 ++it)
4229 inferior *inf = &*it;
4231 if (inferior_matches (inf) && do_wait (inf))
4232 return true;
4235 ecs->ws.set_ignore ();
4236 return false;
4239 /* An event reported by wait_one. */
4241 struct wait_one_event
4243 /* The target the event came out of. */
4244 process_stratum_target *target;
4246 /* The PTID the event was for. */
4247 ptid_t ptid;
4249 /* The waitstatus. */
4250 target_waitstatus ws;
4253 static bool handle_one (const wait_one_event &event);
4254 static int finish_step_over (struct execution_control_state *ecs);
4256 /* Prepare and stabilize the inferior for detaching it. E.g.,
4257 detaching while a thread is displaced stepping is a recipe for
4258 crashing it, as nothing would readjust the PC out of the scratch
4259 pad. */
4261 void
4262 prepare_for_detach (void)
4264 struct inferior *inf = current_inferior ();
4265 ptid_t pid_ptid = ptid_t (inf->pid);
4266 scoped_restore_current_thread restore_thread;
4268 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
4270 /* Remove all threads of INF from the global step-over chain. We
4271 want to stop any ongoing step-over, not start any new one. */
4272 thread_step_over_list_safe_range range
4273 = make_thread_step_over_list_safe_range (global_thread_step_over_list);
4275 for (thread_info *tp : range)
4276 if (tp->inf == inf)
4278 infrun_debug_printf ("removing thread %s from global step over chain",
4279 tp->ptid.to_string ().c_str ());
4280 global_thread_step_over_chain_remove (tp);
4283 /* If we were already in the middle of an inline step-over, and the
4284 thread stepping belongs to the inferior we're detaching, we need
4285 to restart the threads of other inferiors. */
4286 if (step_over_info.thread != -1)
4288 infrun_debug_printf ("inline step-over in-process while detaching");
4290 thread_info *thr = find_thread_global_id (step_over_info.thread);
4291 if (thr->inf == inf)
4293 /* Since we removed threads of INF from the step-over chain,
4294 we know this won't start a step-over for INF. */
4295 clear_step_over_info ();
4297 if (target_is_non_stop_p ())
4299 /* Start a new step-over in another thread if there's
4300 one that needs it. */
4301 start_step_over ();
4303 /* Restart all other threads (except the
4304 previously-stepping thread, since that one is still
4305 running). */
4306 if (!step_over_info_valid_p ())
4307 restart_threads (thr);
4312 if (displaced_step_in_progress (inf))
4314 infrun_debug_printf ("displaced-stepping in-process while detaching");
4316 /* Stop threads currently displaced stepping, aborting it. */
4318 for (thread_info *thr : inf->non_exited_threads ())
4320 if (thr->displaced_step_state.in_progress ())
4322 if (thr->executing ())
4324 if (!thr->stop_requested)
4326 target_stop (thr->ptid);
4327 thr->stop_requested = true;
4330 else
4331 thr->set_resumed (false);
4335 while (displaced_step_in_progress (inf))
4337 wait_one_event event;
4339 event.target = inf->process_target ();
4340 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
4342 if (debug_infrun)
4343 print_target_wait_results (pid_ptid, event.ptid, event.ws);
4345 handle_one (event);
4348 /* It's OK to leave some of the threads of INF stopped, since
4349 they'll be detached shortly. */
4353 /* If all-stop, but there exists a non-stop target, stop all threads
4354 now that we're presenting the stop to the user. */
4356 static void
4357 stop_all_threads_if_all_stop_mode ()
4359 if (!non_stop && exists_non_stop_target ())
4360 stop_all_threads ("presenting stop to user in all-stop");
4363 /* Wait for control to return from inferior to debugger.
4365 If inferior gets a signal, we may decide to start it up again
4366 instead of returning. That is why there is a loop in this function.
4367 When this function actually returns it means the inferior
4368 should be left stopped and GDB should read more commands. */
4370 static void
4371 wait_for_inferior (inferior *inf)
4373 infrun_debug_printf ("wait_for_inferior ()");
4375 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
4377 /* If an error happens while handling the event, propagate GDB's
4378 knowledge of the executing state to the frontend/user running
4379 state. */
4380 scoped_finish_thread_state finish_state
4381 (inf->process_target (), minus_one_ptid);
4383 while (1)
4385 execution_control_state ecs;
4387 overlay_cache_invalid = 1;
4389 /* Flush target cache before starting to handle each event.
4390 Target was running and cache could be stale. This is just a
4391 heuristic. Running threads may modify target memory, but we
4392 don't get any event. */
4393 target_dcache_invalidate (current_program_space->aspace);
4395 ecs.ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs.ws, 0);
4396 ecs.target = inf->process_target ();
4398 if (debug_infrun)
4399 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4401 /* Now figure out what to do with the result of the result. */
4402 handle_inferior_event (&ecs);
4404 if (!ecs.wait_some_more)
4405 break;
4408 stop_all_threads_if_all_stop_mode ();
4410 /* No error, don't finish the state yet. */
4411 finish_state.release ();
4414 /* Cleanup that reinstalls the readline callback handler, if the
4415 target is running in the background. If while handling the target
4416 event something triggered a secondary prompt, like e.g., a
4417 pagination prompt, we'll have removed the callback handler (see
4418 gdb_readline_wrapper_line). Need to do this as we go back to the
4419 event loop, ready to process further input. Note this has no
4420 effect if the handler hasn't actually been removed, because calling
4421 rl_callback_handler_install resets the line buffer, thus losing
4422 input. */
4424 static void
4425 reinstall_readline_callback_handler_cleanup ()
4427 struct ui *ui = current_ui;
4429 if (!ui->async)
4431 /* We're not going back to the top level event loop yet. Don't
4432 install the readline callback, as it'd prep the terminal,
4433 readline-style (raw, noecho) (e.g., --batch). We'll install
4434 it the next time the prompt is displayed, when we're ready
4435 for input. */
4436 return;
4439 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
4440 gdb_rl_callback_handler_reinstall ();
4443 /* Clean up the FSMs of threads that are now stopped. In non-stop,
4444 that's just the event thread. In all-stop, that's all threads. In
4445 all-stop, threads that had a pending exit no longer have a reason
4446 to be around, as their FSMs/commands are canceled, so we delete
4447 them. This avoids "info threads" listing such threads as if they
4448 were alive (and failing to read their registers), the user being
4449 able to select and resume them (and that failing), etc. */
4451 static void
4452 clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
4454 /* The first clean_up call below assumes the event thread is the current
4455 one. */
4456 if (ecs->event_thread != nullptr)
4457 gdb_assert (ecs->event_thread == inferior_thread ());
4459 if (ecs->event_thread != nullptr
4460 && ecs->event_thread->thread_fsm () != nullptr)
4461 ecs->event_thread->thread_fsm ()->clean_up (ecs->event_thread);
4463 if (!non_stop)
4465 scoped_restore_current_thread restore_thread;
4467 for (thread_info *thr : all_threads_safe ())
4469 if (thr->state == THREAD_EXITED)
4470 continue;
4472 if (thr == ecs->event_thread)
4473 continue;
4475 if (thr->thread_fsm () != nullptr)
4477 switch_to_thread (thr);
4478 thr->thread_fsm ()->clean_up (thr);
4481 /* As we are cancelling the command/FSM of this thread,
4482 whatever was the reason we needed to report a thread
4483 exited event to the user, that reason is gone. Delete
4484 the thread, so that the user doesn't see it in the thread
4485 list, the next proceed doesn't try to resume it, etc. */
4486 if (thr->has_pending_waitstatus ()
4487 && (thr->pending_waitstatus ().kind ()
4488 == TARGET_WAITKIND_THREAD_EXITED))
4489 delete_thread (thr);
4494 /* Helper for all_uis_check_sync_execution_done that works on the
4495 current UI. */
4497 static void
4498 check_curr_ui_sync_execution_done (void)
4500 struct ui *ui = current_ui;
4502 if (ui->prompt_state == PROMPT_NEEDED
4503 && ui->async
4504 && !gdb_in_secondary_prompt_p (ui))
4506 target_terminal::ours ();
4507 top_level_interpreter ()->on_sync_execution_done ();
4508 ui->register_file_handler ();
4512 /* See infrun.h. */
4514 void
4515 all_uis_check_sync_execution_done (void)
4517 SWITCH_THRU_ALL_UIS ()
4519 check_curr_ui_sync_execution_done ();
4523 /* See infrun.h. */
4525 void
4526 all_uis_on_sync_execution_starting (void)
4528 SWITCH_THRU_ALL_UIS ()
4530 if (current_ui->prompt_state == PROMPT_NEEDED)
4531 async_disable_stdin ();
4535 /* A quit_handler callback installed while we're handling inferior
4536 events. */
4538 static void
4539 infrun_quit_handler ()
4541 if (target_terminal::is_ours ())
4543 /* Do nothing.
4545 default_quit_handler would throw a quit in this case, but if
4546 we're handling an event while we have the terminal, it means
4547 the target is running a background execution command, and
4548 thus when users press Ctrl-C, they're wanting to interrupt
4549 whatever command they were executing in the command line.
4550 E.g.:
4552 (gdb) c&
4553 (gdb) foo bar whatever<ctrl-c>
4555 That Ctrl-C should clear the input line, not interrupt event
4556 handling if it happens that the user types Ctrl-C at just the
4557 "wrong" time!
4559 It's as-if background event handling was handled by a
4560 separate background thread.
4562 To be clear, the Ctrl-C is not lost -- it will be processed
4563 by the next QUIT call once we're out of fetch_inferior_event
4564 again. */
4566 else
4568 if (check_quit_flag ())
4569 target_pass_ctrlc ();
4573 /* Asynchronous version of wait_for_inferior. It is called by the
4574 event loop whenever a change of state is detected on the file
4575 descriptor corresponding to the target. It can be called more than
4576 once to complete a single execution command. In such cases we need
4577 to keep the state in a global variable ECSS. If it is the last time
4578 that this function is called for a single execution command, then
4579 report to the user that the inferior has stopped, and do the
4580 necessary cleanups. */
4582 void
4583 fetch_inferior_event ()
4585 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4587 execution_control_state ecs;
4588 int cmd_done = 0;
4590 /* Events are always processed with the main UI as current UI. This
4591 way, warnings, debug output, etc. are always consistently sent to
4592 the main console. */
4593 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
4595 /* Temporarily disable pagination. Otherwise, the user would be
4596 given an option to press 'q' to quit, which would cause an early
4597 exit and could leave GDB in a half-baked state. */
4598 scoped_restore save_pagination
4599 = make_scoped_restore (&pagination_enabled, false);
4601 /* Install a quit handler that does nothing if we have the terminal
4602 (meaning the target is running a background execution command),
4603 so that Ctrl-C never interrupts GDB before the event is fully
4604 handled. */
4605 scoped_restore restore_quit_handler
4606 = make_scoped_restore (&quit_handler, infrun_quit_handler);
4608 /* Make sure a SIGINT does not interrupt an extension language while
4609 we're handling an event. That could interrupt a Python unwinder
4610 or a Python observer or some such. A Ctrl-C should either be
4611 forwarded to the inferior if the inferior has the terminal, or,
4612 if GDB has the terminal, should interrupt the command the user is
4613 typing in the CLI. */
4614 scoped_disable_cooperative_sigint_handling restore_coop_sigint;
4616 /* End up with readline processing input, if necessary. */
4618 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4620 /* We're handling a live event, so make sure we're doing live
4621 debugging. If we're looking at traceframes while the target is
4622 running, we're going to need to get back to that mode after
4623 handling the event. */
4624 std::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4625 if (non_stop)
4627 maybe_restore_traceframe.emplace ();
4628 set_current_traceframe (-1);
4631 /* The user/frontend should not notice a thread switch due to
4632 internal events. Make sure we revert to the user selected
4633 thread and frame after handling the event and running any
4634 breakpoint commands. */
4635 scoped_restore_current_thread restore_thread;
4637 overlay_cache_invalid = 1;
4638 /* Flush target cache before starting to handle each event. Target
4639 was running and cache could be stale. This is just a heuristic.
4640 Running threads may modify target memory, but we don't get any
4641 event. */
4642 target_dcache_invalidate (current_program_space->aspace);
4644 scoped_restore save_exec_dir
4645 = make_scoped_restore (&execution_direction,
4646 target_execution_direction ());
4648 /* Allow targets to pause their resumed threads while we handle
4649 the event. */
4650 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4652 /* Is the current thread performing an inferior function call as part
4653 of a breakpoint condition evaluation? */
4654 bool in_cond_eval = (inferior_ptid != null_ptid
4655 && inferior_thread ()->control.in_cond_eval);
4657 /* If the thread is in the middle of the condition evaluation, wait for
4658 an event from the current thread. Otherwise, wait for an event from
4659 any thread. */
4660 ptid_t waiton_ptid = in_cond_eval ? inferior_ptid : minus_one_ptid;
4662 if (!do_target_wait (waiton_ptid, &ecs, TARGET_WNOHANG))
4664 infrun_debug_printf ("do_target_wait returned no event");
4665 disable_commit_resumed.reset_and_commit ();
4666 return;
4669 gdb_assert (ecs.ws.kind () != TARGET_WAITKIND_IGNORE);
4671 /* Switch to the inferior that generated the event, so we can do
4672 target calls. If the event was not associated to a ptid, */
4673 if (ecs.ptid != null_ptid
4674 && ecs.ptid != minus_one_ptid)
4675 switch_to_inferior_no_thread (find_inferior_ptid (ecs.target, ecs.ptid));
4676 else
4677 switch_to_target_no_thread (ecs.target);
4679 if (debug_infrun)
4680 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4682 /* If an error happens while handling the event, propagate GDB's
4683 knowledge of the executing state to the frontend/user running
4684 state. */
4685 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs.ptid;
4686 scoped_finish_thread_state finish_state (ecs.target, finish_ptid);
4688 /* Get executed before scoped_restore_current_thread above to apply
4689 still for the thread which has thrown the exception. */
4690 auto defer_bpstat_clear
4691 = make_scope_exit (bpstat_clear_actions);
4692 auto defer_delete_threads
4693 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4695 int stop_id = get_stop_id ();
4697 /* Now figure out what to do with the result of the result. */
4698 handle_inferior_event (&ecs);
4700 if (!ecs.wait_some_more)
4702 struct inferior *inf = find_inferior_ptid (ecs.target, ecs.ptid);
4703 bool should_stop = true;
4704 struct thread_info *thr = ecs.event_thread;
4706 delete_just_stopped_threads_infrun_breakpoints ();
4708 if (thr != nullptr && thr->thread_fsm () != nullptr)
4709 should_stop = thr->thread_fsm ()->should_stop (thr);
4711 if (!should_stop)
4713 keep_going (&ecs);
4715 else
4717 bool should_notify_stop = true;
4718 bool proceeded = false;
4720 /* If the thread that stopped just completed an inferior
4721 function call as part of a condition evaluation, then we
4722 don't want to stop all the other threads. */
4723 if (ecs.event_thread == nullptr
4724 || !ecs.event_thread->control.in_cond_eval)
4725 stop_all_threads_if_all_stop_mode ();
4727 clean_up_just_stopped_threads_fsms (&ecs);
4729 if (stop_id != get_stop_id ())
4731 /* If the stop-id has changed then a stop has already been
4732 presented to the user in handle_inferior_event, this is
4733 likely a failed inferior call. As the stop has already
4734 been announced then we should not notify again.
4736 Also, if the prompt state is not PROMPT_NEEDED then GDB
4737 will not be ready for user input after this function. */
4738 should_notify_stop = false;
4739 gdb_assert (current_ui->prompt_state == PROMPT_NEEDED);
4741 else if (thr != nullptr && thr->thread_fsm () != nullptr)
4742 should_notify_stop
4743 = thr->thread_fsm ()->should_notify_stop ();
4745 if (should_notify_stop)
4747 /* We may not find an inferior if this was a process exit. */
4748 if (inf == nullptr || inf->control.stop_soon == NO_STOP_QUIETLY)
4749 proceeded = normal_stop ();
4752 if (!proceeded && !in_cond_eval)
4754 inferior_event_handler (INF_EXEC_COMPLETE);
4755 cmd_done = 1;
4758 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4759 previously selected thread is gone. We have two
4760 choices - switch to no thread selected, or restore the
4761 previously selected thread (now exited). We chose the
4762 later, just because that's what GDB used to do. After
4763 this, "info threads" says "The current thread <Thread
4764 ID 2> has terminated." instead of "No thread
4765 selected.". */
4766 if (!non_stop
4767 && cmd_done
4768 && ecs.ws.kind () != TARGET_WAITKIND_NO_RESUMED)
4769 restore_thread.dont_restore ();
4773 defer_delete_threads.release ();
4774 defer_bpstat_clear.release ();
4776 /* No error, don't finish the thread states yet. */
4777 finish_state.release ();
4779 disable_commit_resumed.reset_and_commit ();
4781 /* This scope is used to ensure that readline callbacks are
4782 reinstalled here. */
4785 /* Handling this event might have caused some inferiors to become prunable.
4786 For example, the exit of an inferior that was automatically added. Try
4787 to get rid of them. Keeping those around slows down things linearly.
4789 Note that this never removes the current inferior. Therefore, call this
4790 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4791 temporarily made the current inferior) is meant to be deleted.
4793 Call this before all_uis_check_sync_execution_done, so that notifications about
4794 removed inferiors appear before the prompt. */
4795 prune_inferiors ();
4797 /* If a UI was in sync execution mode, and now isn't, restore its
4798 prompt (a synchronous execution command has finished, and we're
4799 ready for input). */
4800 all_uis_check_sync_execution_done ();
4802 if (cmd_done
4803 && exec_done_display_p
4804 && (inferior_ptid == null_ptid
4805 || inferior_thread ()->state != THREAD_RUNNING))
4806 gdb_printf (_("completed.\n"));
4809 /* See infrun.h. */
4811 void
4812 set_step_info (thread_info *tp, const frame_info_ptr &frame,
4813 struct symtab_and_line sal)
4815 /* This can be removed once this function no longer implicitly relies on the
4816 inferior_ptid value. */
4817 gdb_assert (inferior_ptid == tp->ptid);
4819 tp->control.step_frame_id = get_frame_id (frame);
4820 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
4822 tp->current_symtab = sal.symtab;
4823 tp->current_line = sal.line;
4825 infrun_debug_printf
4826 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4827 tp->current_symtab != nullptr ? tp->current_symtab->filename : "<null>",
4828 tp->current_line,
4829 tp->control.step_frame_id.to_string ().c_str (),
4830 tp->control.step_stack_frame_id.to_string ().c_str ());
4833 /* Clear context switchable stepping state. */
4835 void
4836 init_thread_stepping_state (struct thread_info *tss)
4838 tss->stepped_breakpoint = 0;
4839 tss->stepping_over_breakpoint = 0;
4840 tss->stepping_over_watchpoint = 0;
4841 tss->step_after_step_resume_breakpoint = 0;
4844 /* See infrun.h. */
4846 void
4847 set_last_target_status (process_stratum_target *target, ptid_t ptid,
4848 const target_waitstatus &status)
4850 target_last_proc_target = target;
4851 target_last_wait_ptid = ptid;
4852 target_last_waitstatus = status;
4855 /* See infrun.h. */
4857 void
4858 get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4859 target_waitstatus *status)
4861 if (target != nullptr)
4862 *target = target_last_proc_target;
4863 if (ptid != nullptr)
4864 *ptid = target_last_wait_ptid;
4865 if (status != nullptr)
4866 *status = target_last_waitstatus;
4869 /* See infrun.h. */
4871 void
4872 nullify_last_target_wait_ptid (void)
4874 target_last_proc_target = nullptr;
4875 target_last_wait_ptid = minus_one_ptid;
4876 target_last_waitstatus = {};
4879 /* Switch thread contexts. */
4881 static void
4882 context_switch (execution_control_state *ecs)
4884 if (ecs->ptid != inferior_ptid
4885 && (inferior_ptid == null_ptid
4886 || ecs->event_thread != inferior_thread ()))
4888 infrun_debug_printf ("Switching context from %s to %s",
4889 inferior_ptid.to_string ().c_str (),
4890 ecs->ptid.to_string ().c_str ());
4893 switch_to_thread (ecs->event_thread);
4896 /* If the target can't tell whether we've hit breakpoints
4897 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4898 check whether that could have been caused by a breakpoint. If so,
4899 adjust the PC, per gdbarch_decr_pc_after_break. */
4901 static void
4902 adjust_pc_after_break (struct thread_info *thread,
4903 const target_waitstatus &ws)
4905 struct regcache *regcache;
4906 struct gdbarch *gdbarch;
4907 CORE_ADDR breakpoint_pc, decr_pc;
4909 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4910 we aren't, just return.
4912 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4913 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4914 implemented by software breakpoints should be handled through the normal
4915 breakpoint layer.
4917 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4918 different signals (SIGILL or SIGEMT for instance), but it is less
4919 clear where the PC is pointing afterwards. It may not match
4920 gdbarch_decr_pc_after_break. I don't know any specific target that
4921 generates these signals at breakpoints (the code has been in GDB since at
4922 least 1992) so I can not guess how to handle them here.
4924 In earlier versions of GDB, a target with
4925 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4926 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4927 target with both of these set in GDB history, and it seems unlikely to be
4928 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4930 if (ws.kind () != TARGET_WAITKIND_STOPPED)
4931 return;
4933 if (ws.sig () != GDB_SIGNAL_TRAP)
4934 return;
4936 /* In reverse execution, when a breakpoint is hit, the instruction
4937 under it has already been de-executed. The reported PC always
4938 points at the breakpoint address, so adjusting it further would
4939 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4940 architecture:
4942 B1 0x08000000 : INSN1
4943 B2 0x08000001 : INSN2
4944 0x08000002 : INSN3
4945 PC -> 0x08000003 : INSN4
4947 Say you're stopped at 0x08000003 as above. Reverse continuing
4948 from that point should hit B2 as below. Reading the PC when the
4949 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4950 been de-executed already.
4952 B1 0x08000000 : INSN1
4953 B2 PC -> 0x08000001 : INSN2
4954 0x08000002 : INSN3
4955 0x08000003 : INSN4
4957 We can't apply the same logic as for forward execution, because
4958 we would wrongly adjust the PC to 0x08000000, since there's a
4959 breakpoint at PC - 1. We'd then report a hit on B1, although
4960 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4961 behaviour. */
4962 if (execution_direction == EXEC_REVERSE)
4963 return;
4965 /* If the target can tell whether the thread hit a SW breakpoint,
4966 trust it. Targets that can tell also adjust the PC
4967 themselves. */
4968 if (target_supports_stopped_by_sw_breakpoint ())
4969 return;
4971 /* Note that relying on whether a breakpoint is planted in memory to
4972 determine this can fail. E.g,. the breakpoint could have been
4973 removed since. Or the thread could have been told to step an
4974 instruction the size of a breakpoint instruction, and only
4975 _after_ was a breakpoint inserted at its address. */
4977 /* If this target does not decrement the PC after breakpoints, then
4978 we have nothing to do. */
4979 regcache = get_thread_regcache (thread);
4980 gdbarch = regcache->arch ();
4982 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4983 if (decr_pc == 0)
4984 return;
4986 const address_space *aspace = thread->inf->aspace.get ();
4988 /* Find the location where (if we've hit a breakpoint) the
4989 breakpoint would be. */
4990 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
4992 /* If the target can't tell whether a software breakpoint triggered,
4993 fallback to figuring it out based on breakpoints we think were
4994 inserted in the target, and on whether the thread was stepped or
4995 continued. */
4997 /* Check whether there actually is a software breakpoint inserted at
4998 that location.
5000 If in non-stop mode, a race condition is possible where we've
5001 removed a breakpoint, but stop events for that breakpoint were
5002 already queued and arrive later. To suppress those spurious
5003 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
5004 and retire them after a number of stop events are reported. Note
5005 this is an heuristic and can thus get confused. The real fix is
5006 to get the "stopped by SW BP and needs adjustment" info out of
5007 the target/kernel (and thus never reach here; see above). */
5008 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
5009 || (target_is_non_stop_p ()
5010 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
5012 std::optional<scoped_restore_tmpl<int>> restore_operation_disable;
5014 if (record_full_is_used ())
5015 restore_operation_disable.emplace
5016 (record_full_gdb_operation_disable_set ());
5018 /* When using hardware single-step, a SIGTRAP is reported for both
5019 a completed single-step and a software breakpoint. Need to
5020 differentiate between the two, as the latter needs adjusting
5021 but the former does not.
5023 The SIGTRAP can be due to a completed hardware single-step only if
5024 - we didn't insert software single-step breakpoints
5025 - this thread is currently being stepped
5027 If any of these events did not occur, we must have stopped due
5028 to hitting a software breakpoint, and have to back up to the
5029 breakpoint address.
5031 As a special case, we could have hardware single-stepped a
5032 software breakpoint. In this case (prev_pc == breakpoint_pc),
5033 we also need to back up to the breakpoint address. */
5035 if (thread_has_single_step_breakpoints_set (thread)
5036 || !currently_stepping (thread)
5037 || (thread->stepped_breakpoint
5038 && thread->prev_pc == breakpoint_pc))
5039 regcache_write_pc (regcache, breakpoint_pc);
5043 static bool
5044 stepped_in_from (const frame_info_ptr &initial_frame, frame_id step_frame_id)
5046 frame_info_ptr frame = initial_frame;
5048 for (frame = get_prev_frame (frame);
5049 frame != nullptr;
5050 frame = get_prev_frame (frame))
5052 if (get_frame_id (frame) == step_frame_id)
5053 return true;
5055 if (get_frame_type (frame) != INLINE_FRAME)
5056 break;
5059 return false;
5062 /* Look for an inline frame that is marked for skip.
5063 If PREV_FRAME is TRUE start at the previous frame,
5064 otherwise start at the current frame. Stop at the
5065 first non-inline frame, or at the frame where the
5066 step started. */
5068 static bool
5069 inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
5071 frame_info_ptr frame = get_current_frame ();
5073 if (prev_frame)
5074 frame = get_prev_frame (frame);
5076 for (; frame != nullptr; frame = get_prev_frame (frame))
5078 const char *fn = nullptr;
5079 symtab_and_line sal;
5080 struct symbol *sym;
5082 if (get_frame_id (frame) == tp->control.step_frame_id)
5083 break;
5084 if (get_frame_type (frame) != INLINE_FRAME)
5085 break;
5087 sal = find_frame_sal (frame);
5088 sym = get_frame_function (frame);
5090 if (sym != nullptr)
5091 fn = sym->print_name ();
5093 if (sal.line != 0
5094 && function_name_is_marked_for_skip (fn, sal))
5095 return true;
5098 return false;
5101 /* If the event thread has the stop requested flag set, pretend it
5102 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
5103 target_stop). */
5105 static bool
5106 handle_stop_requested (struct execution_control_state *ecs)
5108 if (ecs->event_thread->stop_requested)
5110 ecs->ws.set_stopped (GDB_SIGNAL_0);
5111 handle_signal_stop (ecs);
5112 return true;
5114 return false;
5117 /* Auxiliary function that handles syscall entry/return events.
5118 It returns true if the inferior should keep going (and GDB
5119 should ignore the event), or false if the event deserves to be
5120 processed. */
5122 static bool
5123 handle_syscall_event (struct execution_control_state *ecs)
5125 struct regcache *regcache;
5126 int syscall_number;
5128 context_switch (ecs);
5130 regcache = get_thread_regcache (ecs->event_thread);
5131 syscall_number = ecs->ws.syscall_number ();
5132 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
5134 if (catch_syscall_enabled ()
5135 && catching_syscall_number (syscall_number))
5137 infrun_debug_printf ("syscall number=%d", syscall_number);
5139 ecs->event_thread->control.stop_bpstat
5140 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
5141 ecs->event_thread->stop_pc (),
5142 ecs->event_thread, ecs->ws);
5144 if (handle_stop_requested (ecs))
5145 return false;
5147 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5149 /* Catchpoint hit. */
5150 return false;
5154 if (handle_stop_requested (ecs))
5155 return false;
5157 /* If no catchpoint triggered for this, then keep going. */
5158 keep_going (ecs);
5160 return true;
5163 /* Lazily fill in the execution_control_state's stop_func_* fields. */
5165 static void
5166 fill_in_stop_func (struct gdbarch *gdbarch,
5167 struct execution_control_state *ecs)
5169 if (!ecs->stop_func_filled_in)
5171 const block *block;
5172 const general_symbol_info *gsi;
5174 /* Don't care about return value; stop_func_start and stop_func_name
5175 will both be 0 if it doesn't work. */
5176 find_pc_partial_function_sym (ecs->event_thread->stop_pc (),
5177 &gsi,
5178 &ecs->stop_func_start,
5179 &ecs->stop_func_end,
5180 &block);
5181 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
5183 /* The call to find_pc_partial_function, above, will set
5184 stop_func_start and stop_func_end to the start and end
5185 of the range containing the stop pc. If this range
5186 contains the entry pc for the block (which is always the
5187 case for contiguous blocks), advance stop_func_start past
5188 the function's start offset and entrypoint. Note that
5189 stop_func_start is NOT advanced when in a range of a
5190 non-contiguous block that does not contain the entry pc. */
5191 if (block != nullptr
5192 && ecs->stop_func_start <= block->entry_pc ()
5193 && block->entry_pc () < ecs->stop_func_end)
5195 ecs->stop_func_start
5196 += gdbarch_deprecated_function_start_offset (gdbarch);
5198 /* PowerPC functions have a Local Entry Point (LEP) and a Global
5199 Entry Point (GEP). There is only one Entry Point (GEP = LEP) for
5200 other architectures. */
5201 ecs->stop_func_alt_start = ecs->stop_func_start;
5203 if (gdbarch_skip_entrypoint_p (gdbarch))
5204 ecs->stop_func_start
5205 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
5208 ecs->stop_func_filled_in = 1;
5213 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
5215 static enum stop_kind
5216 get_inferior_stop_soon (execution_control_state *ecs)
5218 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5220 gdb_assert (inf != nullptr);
5221 return inf->control.stop_soon;
5224 /* Poll for one event out of the current target. Store the resulting
5225 waitstatus in WS, and return the event ptid. Does not block. */
5227 static ptid_t
5228 poll_one_curr_target (struct target_waitstatus *ws)
5230 ptid_t event_ptid;
5232 overlay_cache_invalid = 1;
5234 /* Flush target cache before starting to handle each event.
5235 Target was running and cache could be stale. This is just a
5236 heuristic. Running threads may modify target memory, but we
5237 don't get any event. */
5238 target_dcache_invalidate (current_program_space->aspace);
5240 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
5242 if (debug_infrun)
5243 print_target_wait_results (minus_one_ptid, event_ptid, *ws);
5245 return event_ptid;
5248 /* Wait for one event out of any target. */
5250 static wait_one_event
5251 wait_one ()
5253 while (1)
5255 for (inferior *inf : all_inferiors ())
5257 process_stratum_target *target = inf->process_target ();
5258 if (target == nullptr
5259 || !target->is_async_p ()
5260 || !target->threads_executing)
5261 continue;
5263 switch_to_inferior_no_thread (inf);
5265 wait_one_event event;
5266 event.target = target;
5267 event.ptid = poll_one_curr_target (&event.ws);
5269 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5271 /* If nothing is resumed, remove the target from the
5272 event loop. */
5273 target_async (false);
5275 else if (event.ws.kind () != TARGET_WAITKIND_IGNORE)
5276 return event;
5279 /* Block waiting for some event. */
5281 fd_set readfds;
5282 int nfds = 0;
5284 FD_ZERO (&readfds);
5286 for (inferior *inf : all_inferiors ())
5288 process_stratum_target *target = inf->process_target ();
5289 if (target == nullptr
5290 || !target->is_async_p ()
5291 || !target->threads_executing)
5292 continue;
5294 int fd = target->async_wait_fd ();
5295 FD_SET (fd, &readfds);
5296 if (nfds <= fd)
5297 nfds = fd + 1;
5300 if (nfds == 0)
5302 /* No waitable targets left. All must be stopped. */
5303 infrun_debug_printf ("no waitable targets left");
5305 target_waitstatus ws;
5306 ws.set_no_resumed ();
5307 return {nullptr, minus_one_ptid, std::move (ws)};
5310 QUIT;
5312 int numfds = interruptible_select (nfds, &readfds, 0, nullptr, 0);
5313 if (numfds < 0)
5315 if (errno == EINTR)
5316 continue;
5317 else
5318 perror_with_name ("interruptible_select");
5323 /* Save the thread's event and stop reason to process it later. */
5325 static void
5326 save_waitstatus (struct thread_info *tp, const target_waitstatus &ws)
5328 infrun_debug_printf ("saving status %s for %s",
5329 ws.to_string ().c_str (),
5330 tp->ptid.to_string ().c_str ());
5332 /* Record for later. */
5333 tp->set_pending_waitstatus (ws);
5335 if (ws.kind () == TARGET_WAITKIND_STOPPED
5336 && ws.sig () == GDB_SIGNAL_TRAP)
5338 struct regcache *regcache = get_thread_regcache (tp);
5339 const address_space *aspace = tp->inf->aspace.get ();
5340 CORE_ADDR pc = regcache_read_pc (regcache);
5342 adjust_pc_after_break (tp, tp->pending_waitstatus ());
5344 scoped_restore_current_thread restore_thread;
5345 switch_to_thread (tp);
5347 if (target_stopped_by_watchpoint ())
5348 tp->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT);
5349 else if (target_supports_stopped_by_sw_breakpoint ()
5350 && target_stopped_by_sw_breakpoint ())
5351 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5352 else if (target_supports_stopped_by_hw_breakpoint ()
5353 && target_stopped_by_hw_breakpoint ())
5354 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5355 else if (!target_supports_stopped_by_hw_breakpoint ()
5356 && hardware_breakpoint_inserted_here_p (aspace, pc))
5357 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5358 else if (!target_supports_stopped_by_sw_breakpoint ()
5359 && software_breakpoint_inserted_here_p (aspace, pc))
5360 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5361 else if (!thread_has_single_step_breakpoints_set (tp)
5362 && currently_stepping (tp))
5363 tp->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP);
5367 /* Mark the non-executing threads accordingly. In all-stop, all
5368 threads of all processes are stopped when we get any event
5369 reported. In non-stop mode, only the event thread stops. */
5371 static void
5372 mark_non_executing_threads (process_stratum_target *target,
5373 ptid_t event_ptid,
5374 const target_waitstatus &ws)
5376 ptid_t mark_ptid;
5378 if (!target_is_non_stop_p ())
5379 mark_ptid = minus_one_ptid;
5380 else if (ws.kind () == TARGET_WAITKIND_SIGNALLED
5381 || ws.kind () == TARGET_WAITKIND_EXITED)
5383 /* If we're handling a process exit in non-stop mode, even
5384 though threads haven't been deleted yet, one would think
5385 that there is nothing to do, as threads of the dead process
5386 will be soon deleted, and threads of any other process were
5387 left running. However, on some targets, threads survive a
5388 process exit event. E.g., for the "checkpoint" command,
5389 when the current checkpoint/fork exits, linux-fork.c
5390 automatically switches to another fork from within
5391 target_mourn_inferior, by associating the same
5392 inferior/thread to another fork. We haven't mourned yet at
5393 this point, but we must mark any threads left in the
5394 process as not-executing so that finish_thread_state marks
5395 them stopped (in the user's perspective) if/when we present
5396 the stop to the user. */
5397 mark_ptid = ptid_t (event_ptid.pid ());
5399 else
5400 mark_ptid = event_ptid;
5402 set_executing (target, mark_ptid, false);
5404 /* Likewise the resumed flag. */
5405 set_resumed (target, mark_ptid, false);
5408 /* Handle one event after stopping threads. If the eventing thread
5409 reports back any interesting event, we leave it pending. If the
5410 eventing thread was in the middle of a displaced step, we
5411 cancel/finish it, and unless the thread's inferior is being
5412 detached, put the thread back in the step-over chain. Returns true
5413 if there are no resumed threads left in the target (thus there's no
5414 point in waiting further), false otherwise. */
5416 static bool
5417 handle_one (const wait_one_event &event)
5419 infrun_debug_printf
5420 ("%s %s", event.ws.to_string ().c_str (),
5421 event.ptid.to_string ().c_str ());
5423 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5425 /* All resumed threads exited. */
5426 return true;
5428 else if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED
5429 || event.ws.kind () == TARGET_WAITKIND_EXITED
5430 || event.ws.kind () == TARGET_WAITKIND_SIGNALLED)
5432 /* One thread/process exited/signalled. */
5434 thread_info *t = nullptr;
5436 /* The target may have reported just a pid. If so, try
5437 the first non-exited thread. */
5438 if (event.ptid.is_pid ())
5440 int pid = event.ptid.pid ();
5441 inferior *inf = find_inferior_pid (event.target, pid);
5442 for (thread_info *tp : inf->non_exited_threads ())
5444 t = tp;
5445 break;
5448 /* If there is no available thread, the event would
5449 have to be appended to a per-inferior event list,
5450 which does not exist (and if it did, we'd have
5451 to adjust run control command to be able to
5452 resume such an inferior). We assert here instead
5453 of going into an infinite loop. */
5454 gdb_assert (t != nullptr);
5456 infrun_debug_printf
5457 ("using %s", t->ptid.to_string ().c_str ());
5459 else
5461 t = event.target->find_thread (event.ptid);
5462 /* Check if this is the first time we see this thread.
5463 Don't bother adding if it individually exited. */
5464 if (t == nullptr
5465 && event.ws.kind () != TARGET_WAITKIND_THREAD_EXITED)
5466 t = add_thread (event.target, event.ptid);
5469 if (t != nullptr)
5471 /* Set the threads as non-executing to avoid
5472 another stop attempt on them. */
5473 switch_to_thread_no_regs (t);
5474 mark_non_executing_threads (event.target, event.ptid,
5475 event.ws);
5476 save_waitstatus (t, event.ws);
5477 t->stop_requested = false;
5479 if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
5481 if (displaced_step_finish (t, event.ws)
5482 != DISPLACED_STEP_FINISH_STATUS_OK)
5484 gdb_assert_not_reached ("displaced_step_finish on "
5485 "exited thread failed");
5490 else
5492 thread_info *t = event.target->find_thread (event.ptid);
5493 if (t == nullptr)
5494 t = add_thread (event.target, event.ptid);
5496 t->stop_requested = 0;
5497 t->set_executing (false);
5498 t->set_resumed (false);
5499 t->control.may_range_step = 0;
5501 /* This may be the first time we see the inferior report
5502 a stop. */
5503 if (t->inf->needs_setup)
5505 switch_to_thread_no_regs (t);
5506 setup_inferior (0);
5509 if (event.ws.kind () == TARGET_WAITKIND_STOPPED
5510 && event.ws.sig () == GDB_SIGNAL_0)
5512 /* We caught the event that we intended to catch, so
5513 there's no event to save as pending. */
5515 if (displaced_step_finish (t, event.ws)
5516 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5518 /* Add it back to the step-over queue. */
5519 infrun_debug_printf
5520 ("displaced-step of %s canceled",
5521 t->ptid.to_string ().c_str ());
5523 t->control.trap_expected = 0;
5524 if (!t->inf->detaching)
5525 global_thread_step_over_chain_enqueue (t);
5528 else
5530 struct regcache *regcache;
5532 infrun_debug_printf
5533 ("target_wait %s, saving status for %s",
5534 event.ws.to_string ().c_str (),
5535 t->ptid.to_string ().c_str ());
5537 /* Record for later. */
5538 save_waitstatus (t, event.ws);
5540 if (displaced_step_finish (t, event.ws)
5541 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5543 /* Add it back to the step-over queue. */
5544 t->control.trap_expected = 0;
5545 if (!t->inf->detaching)
5546 global_thread_step_over_chain_enqueue (t);
5549 regcache = get_thread_regcache (t);
5550 t->set_stop_pc (regcache_read_pc (regcache));
5552 infrun_debug_printf ("saved stop_pc=%s for %s "
5553 "(currently_stepping=%d)",
5554 paddress (current_inferior ()->arch (),
5555 t->stop_pc ()),
5556 t->ptid.to_string ().c_str (),
5557 currently_stepping (t));
5561 return false;
5564 /* Helper for stop_all_threads. wait_one waits for events until it
5565 sees a TARGET_WAITKIND_NO_RESUMED event. When it sees one, it
5566 disables target_async for the target to stop waiting for events
5567 from it. TARGET_WAITKIND_NO_RESUMED can be delayed though,
5568 consider, debugging against gdbserver:
5570 #1 - Threads 1-5 are running, and thread 1 hits a breakpoint.
5572 #2 - gdb processes the breakpoint hit for thread 1, stops all
5573 threads, and steps thread 1 over the breakpoint. while
5574 stopping threads, some other threads reported interesting
5575 events, which were left pending in the thread's objects
5576 (infrun's queue).
5578 #2 - Thread 1 exits (it stepped an exit syscall), and gdbserver
5579 reports the thread exit for thread 1. The event ends up in
5580 remote's stop reply queue.
5582 #3 - That was the last resumed thread, so gdbserver reports
5583 no-resumed, and that event also ends up in remote's stop
5584 reply queue, queued after the thread exit from #2.
5586 #4 - gdb processes the thread exit event, which finishes the
5587 step-over, and so gdb restarts all threads (threads with
5588 pending events are left marked resumed, but aren't set
5589 executing). The no-resumed event is still left pending in
5590 the remote stop reply queue.
5592 #5 - Since there are now resumed threads with pending breakpoint
5593 hits, gdb picks one at random to process next.
5595 #5 - gdb picks the breakpoint hit for thread 2 this time, and that
5596 breakpoint also needs to be stepped over, so gdb stops all
5597 threads again.
5599 #6 - stop_all_threads counts number of expected stops and calls
5600 wait_one once for each.
5602 #7 - The first wait_one call collects the no-resumed event from #3
5603 above.
5605 #9 - Seeing the no-resumed event, wait_one disables target async
5606 for the remote target, to stop waiting for events from it.
5607 wait_one from here on always return no-resumed directly
5608 without reaching the target.
5610 #10 - stop_all_threads still hasn't seen all the stops it expects,
5611 so it does another pass.
5613 #11 - Since the remote target is not async (disabled in #9),
5614 wait_one doesn't wait on it, so it won't see the expected
5615 stops, and instead returns no-resumed directly.
5617 #12 - stop_all_threads still haven't seen all the stops, so it
5618 does another pass. goto #11, looping forever.
5620 To handle this, we explicitly (re-)enable target async on all
5621 targets that can async every time stop_all_threads goes wait for
5622 the expected stops. */
5624 static void
5625 reenable_target_async ()
5627 for (inferior *inf : all_inferiors ())
5629 process_stratum_target *target = inf->process_target ();
5630 if (target != nullptr
5631 && target->threads_executing
5632 && target->can_async_p ()
5633 && !target->is_async_p ())
5635 switch_to_inferior_no_thread (inf);
5636 target_async (1);
5641 /* See infrun.h. */
5643 void
5644 stop_all_threads (const char *reason, inferior *inf)
5646 /* We may need multiple passes to discover all threads. */
5647 int pass;
5648 int iterations = 0;
5650 gdb_assert (exists_non_stop_target ());
5652 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
5653 inf != nullptr ? inf->num : -1);
5655 infrun_debug_show_threads ("non-exited threads",
5656 all_non_exited_threads ());
5658 scoped_restore_current_thread restore_thread;
5660 /* Enable thread events on relevant targets. */
5661 for (auto *target : all_non_exited_process_targets ())
5663 if (inf != nullptr && inf->process_target () != target)
5664 continue;
5666 switch_to_target_no_thread (target);
5667 target_thread_events (true);
5670 SCOPE_EXIT
5672 /* Disable thread events on relevant targets. */
5673 for (auto *target : all_non_exited_process_targets ())
5675 if (inf != nullptr && inf->process_target () != target)
5676 continue;
5678 switch_to_target_no_thread (target);
5679 target_thread_events (false);
5682 /* Use debug_prefixed_printf directly to get a meaningful function
5683 name. */
5684 if (debug_infrun)
5685 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5688 /* Request threads to stop, and then wait for the stops. Because
5689 threads we already know about can spawn more threads while we're
5690 trying to stop them, and we only learn about new threads when we
5691 update the thread list, do this in a loop, and keep iterating
5692 until two passes find no threads that need to be stopped. */
5693 for (pass = 0; pass < 2; pass++, iterations++)
5695 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
5696 while (1)
5698 int waits_needed = 0;
5700 for (auto *target : all_non_exited_process_targets ())
5702 if (inf != nullptr && inf->process_target () != target)
5703 continue;
5705 switch_to_target_no_thread (target);
5706 update_thread_list ();
5709 /* Go through all threads looking for threads that we need
5710 to tell the target to stop. */
5711 for (thread_info *t : all_non_exited_threads ())
5713 if (inf != nullptr && t->inf != inf)
5714 continue;
5716 /* For a single-target setting with an all-stop target,
5717 we would not even arrive here. For a multi-target
5718 setting, until GDB is able to handle a mixture of
5719 all-stop and non-stop targets, simply skip all-stop
5720 targets' threads. This should be fine due to the
5721 protection of 'check_multi_target_resumption'. */
5723 switch_to_thread_no_regs (t);
5724 if (!target_is_non_stop_p ())
5725 continue;
5727 if (t->executing ())
5729 /* If already stopping, don't request a stop again.
5730 We just haven't seen the notification yet. */
5731 if (!t->stop_requested)
5733 infrun_debug_printf (" %s executing, need stop",
5734 t->ptid.to_string ().c_str ());
5735 target_stop (t->ptid);
5736 t->stop_requested = 1;
5738 else
5740 infrun_debug_printf (" %s executing, already stopping",
5741 t->ptid.to_string ().c_str ());
5744 if (t->stop_requested)
5745 waits_needed++;
5747 else
5749 infrun_debug_printf (" %s not executing",
5750 t->ptid.to_string ().c_str ());
5752 /* The thread may be not executing, but still be
5753 resumed with a pending status to process. */
5754 t->set_resumed (false);
5758 if (waits_needed == 0)
5759 break;
5761 /* If we find new threads on the second iteration, restart
5762 over. We want to see two iterations in a row with all
5763 threads stopped. */
5764 if (pass > 0)
5765 pass = -1;
5767 reenable_target_async ();
5769 for (int i = 0; i < waits_needed; i++)
5771 wait_one_event event = wait_one ();
5772 if (handle_one (event))
5773 break;
5779 /* Handle a TARGET_WAITKIND_NO_RESUMED event. Return true if we
5780 handled the event and should continue waiting. Return false if we
5781 should stop and report the event to the user. */
5783 static bool
5784 handle_no_resumed (struct execution_control_state *ecs)
5786 if (target_can_async_p ())
5788 bool any_sync = false;
5790 for (ui *ui : all_uis ())
5792 if (ui->prompt_state == PROMPT_BLOCKED)
5794 any_sync = true;
5795 break;
5798 if (!any_sync)
5800 /* There were no unwaited-for children left in the target, but,
5801 we're not synchronously waiting for events either. Just
5802 ignore. */
5804 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5805 prepare_to_wait (ecs);
5806 return true;
5810 /* Otherwise, if we were running a synchronous execution command, we
5811 may need to cancel it and give the user back the terminal.
5813 In non-stop mode, the target can't tell whether we've already
5814 consumed previous stop events, so it can end up sending us a
5815 no-resumed event like so:
5817 #0 - thread 1 is left stopped
5819 #1 - thread 2 is resumed and hits breakpoint
5820 -> TARGET_WAITKIND_STOPPED
5822 #2 - thread 3 is resumed and exits
5823 this is the last resumed thread, so
5824 -> TARGET_WAITKIND_NO_RESUMED
5826 #3 - gdb processes stop for thread 2 and decides to re-resume
5829 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5830 thread 2 is now resumed, so the event should be ignored.
5832 IOW, if the stop for thread 2 doesn't end a foreground command,
5833 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5834 event. But it could be that the event meant that thread 2 itself
5835 (or whatever other thread was the last resumed thread) exited.
5837 To address this we refresh the thread list and check whether we
5838 have resumed threads _now_. In the example above, this removes
5839 thread 3 from the thread list. If thread 2 was re-resumed, we
5840 ignore this event. If we find no thread resumed, then we cancel
5841 the synchronous command and show "no unwaited-for " to the
5842 user. */
5844 inferior *curr_inf = current_inferior ();
5846 scoped_restore_current_thread restore_thread;
5847 update_thread_list ();
5849 /* If:
5851 - the current target has no thread executing, and
5852 - the current inferior is native, and
5853 - the current inferior is the one which has the terminal, and
5854 - we did nothing,
5856 then a Ctrl-C from this point on would remain stuck in the
5857 kernel, until a thread resumes and dequeues it. That would
5858 result in the GDB CLI not reacting to Ctrl-C, not able to
5859 interrupt the program. To address this, if the current inferior
5860 no longer has any thread executing, we give the terminal to some
5861 other inferior that has at least one thread executing. */
5862 bool swap_terminal = true;
5864 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5865 whether to report it to the user. */
5866 bool ignore_event = false;
5868 for (thread_info *thread : all_non_exited_threads ())
5870 if (swap_terminal && thread->executing ())
5872 if (thread->inf != curr_inf)
5874 target_terminal::ours ();
5876 switch_to_thread (thread);
5877 target_terminal::inferior ();
5879 swap_terminal = false;
5882 if (!ignore_event && thread->resumed ())
5884 /* Either there were no unwaited-for children left in the
5885 target at some point, but there are now, or some target
5886 other than the eventing one has unwaited-for children
5887 left. Just ignore. */
5888 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5889 "(ignoring: found resumed)");
5891 ignore_event = true;
5894 if (ignore_event && !swap_terminal)
5895 break;
5898 if (ignore_event)
5900 switch_to_inferior_no_thread (curr_inf);
5901 prepare_to_wait (ecs);
5902 return true;
5905 /* Go ahead and report the event. */
5906 return false;
5909 /* Handle a TARGET_WAITKIND_THREAD_EXITED event. Return true if we
5910 handled the event and should continue waiting. Return false if we
5911 should stop and report the event to the user. */
5913 static bool
5914 handle_thread_exited (execution_control_state *ecs)
5916 context_switch (ecs);
5918 /* Clear these so we don't re-start the thread stepping over a
5919 breakpoint/watchpoint. */
5920 ecs->event_thread->stepping_over_breakpoint = 0;
5921 ecs->event_thread->stepping_over_watchpoint = 0;
5923 /* If the thread had an FSM, then abort the command. But only after
5924 finishing the step over, as in non-stop mode, aborting this
5925 thread's command should not interfere with other threads. We
5926 must check this before finish_step over, however, which may
5927 update the thread list and delete the event thread. */
5928 bool abort_cmd = (ecs->event_thread->thread_fsm () != nullptr);
5930 /* Mark the thread exited right now, because finish_step_over may
5931 update the thread list and that may delete the thread silently
5932 (depending on target), while we always want to emit the "[Thread
5933 ... exited]" notification. Don't actually delete the thread yet,
5934 because we need to pass its pointer down to finish_step_over. */
5935 set_thread_exited (ecs->event_thread);
5937 /* Maybe the thread was doing a step-over, if so release
5938 resources and start any further pending step-overs.
5940 If we are on a non-stop target and the thread was doing an
5941 in-line step, this also restarts the other threads. */
5942 int ret = finish_step_over (ecs);
5944 /* finish_step_over returns true if it moves ecs' wait status
5945 back into the thread, so that we go handle another pending
5946 event before this one. But we know it never does that if
5947 the event thread has exited. */
5948 gdb_assert (ret == 0);
5950 if (abort_cmd)
5952 /* We're stopping for the thread exit event. Switch to the
5953 event thread again, as finish_step_over may have switched
5954 threads. */
5955 switch_to_thread (ecs->event_thread);
5956 ecs->event_thread = nullptr;
5957 return false;
5960 /* If finish_step_over started a new in-line step-over, don't
5961 try to restart anything else. */
5962 if (step_over_info_valid_p ())
5964 delete_thread (ecs->event_thread);
5965 return true;
5968 /* Maybe we are on an all-stop target and we got this event
5969 while doing a step-like command on another thread. If so,
5970 go back to doing that. If this thread was stepping,
5971 switch_back_to_stepped_thread will consider that the thread
5972 was interrupted mid-step and will try keep stepping it. We
5973 don't want that, the thread is gone. So clear the proceed
5974 status so it doesn't do that. */
5975 clear_proceed_status_thread (ecs->event_thread);
5976 if (switch_back_to_stepped_thread (ecs))
5978 delete_thread (ecs->event_thread);
5979 return true;
5982 inferior *inf = ecs->event_thread->inf;
5983 bool slock_applies = schedlock_applies (ecs->event_thread);
5985 delete_thread (ecs->event_thread);
5986 ecs->event_thread = nullptr;
5988 /* Continue handling the event as if we had gotten a
5989 TARGET_WAITKIND_NO_RESUMED. */
5990 auto handle_as_no_resumed = [ecs] ()
5992 /* handle_no_resumed doesn't really look at the event kind, but
5993 normal_stop does. */
5994 ecs->ws.set_no_resumed ();
5995 ecs->event_thread = nullptr;
5996 ecs->ptid = minus_one_ptid;
5998 /* Re-record the last target status. */
5999 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
6001 return handle_no_resumed (ecs);
6004 /* If we are on an all-stop target, the target has stopped all
6005 threads to report the event. We don't actually want to
6006 stop, so restart the threads. */
6007 if (!target_is_non_stop_p ())
6009 if (slock_applies)
6011 /* Since the target is !non-stop, then everything is stopped
6012 at this point, and we can't assume we'll get further
6013 events until we resume the target again. Handle this
6014 event like if it were a TARGET_WAITKIND_NO_RESUMED. Note
6015 this refreshes the thread list and checks whether there
6016 are other resumed threads before deciding whether to
6017 print "no-unwaited-for left". This is important because
6018 the user could have done:
6020 (gdb) set scheduler-locking on
6021 (gdb) thread 1
6022 (gdb) c&
6023 (gdb) thread 2
6024 (gdb) c
6026 ... and only one of the threads exited. */
6027 return handle_as_no_resumed ();
6029 else
6031 /* Switch to the first non-exited thread we can find, and
6032 resume. */
6033 auto range = inf->non_exited_threads ();
6034 if (range.begin () == range.end ())
6036 /* Looks like the target reported a
6037 TARGET_WAITKIND_THREAD_EXITED for its last known
6038 thread. */
6039 return handle_as_no_resumed ();
6041 thread_info *non_exited_thread = *range.begin ();
6042 switch_to_thread (non_exited_thread);
6043 insert_breakpoints ();
6044 resume (GDB_SIGNAL_0);
6048 prepare_to_wait (ecs);
6049 return true;
6052 /* Given an execution control state that has been freshly filled in by
6053 an event from the inferior, figure out what it means and take
6054 appropriate action.
6056 The alternatives are:
6058 1) stop_waiting and return; to really stop and return to the
6059 debugger.
6061 2) keep_going and return; to wait for the next event (set
6062 ecs->event_thread->stepping_over_breakpoint to 1 to single step
6063 once). */
6065 static void
6066 handle_inferior_event (struct execution_control_state *ecs)
6068 /* Make sure that all temporary struct value objects that were
6069 created during the handling of the event get deleted at the
6070 end. */
6071 scoped_value_mark free_values;
6073 infrun_debug_printf ("%s", ecs->ws.to_string ().c_str ());
6075 if (ecs->ws.kind () == TARGET_WAITKIND_IGNORE)
6077 /* We had an event in the inferior, but we are not interested in
6078 handling it at this level. The lower layers have already
6079 done what needs to be done, if anything.
6081 One of the possible circumstances for this is when the
6082 inferior produces output for the console. The inferior has
6083 not stopped, and we are ignoring the event. Another possible
6084 circumstance is any event which the lower level knows will be
6085 reported multiple times without an intervening resume. */
6086 prepare_to_wait (ecs);
6087 return;
6090 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED
6091 && handle_no_resumed (ecs))
6092 return;
6094 /* Cache the last target/ptid/waitstatus. */
6095 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
6097 /* Always clear state belonging to the previous time we stopped. */
6098 stop_stack_dummy = STOP_NONE;
6100 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED)
6102 /* No unwaited-for children left. IOW, all resumed children
6103 have exited. */
6104 stop_waiting (ecs);
6105 return;
6108 if (ecs->ws.kind () != TARGET_WAITKIND_EXITED
6109 && ecs->ws.kind () != TARGET_WAITKIND_SIGNALLED)
6111 ecs->event_thread = ecs->target->find_thread (ecs->ptid);
6112 /* If it's a new thread, add it to the thread database. */
6113 if (ecs->event_thread == nullptr)
6114 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
6116 /* Disable range stepping. If the next step request could use a
6117 range, this will be end up re-enabled then. */
6118 ecs->event_thread->control.may_range_step = 0;
6121 /* Dependent on valid ECS->EVENT_THREAD. */
6122 adjust_pc_after_break (ecs->event_thread, ecs->ws);
6124 /* Dependent on the current PC value modified by adjust_pc_after_break. */
6125 reinit_frame_cache ();
6127 breakpoint_retire_moribund ();
6129 /* First, distinguish signals caused by the debugger from signals
6130 that have to do with the program's own actions. Note that
6131 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
6132 on the operating system version. Here we detect when a SIGILL or
6133 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
6134 something similar for SIGSEGV, since a SIGSEGV will be generated
6135 when we're trying to execute a breakpoint instruction on a
6136 non-executable stack. This happens for call dummy breakpoints
6137 for architectures like SPARC that place call dummies on the
6138 stack. */
6139 if (ecs->ws.kind () == TARGET_WAITKIND_STOPPED
6140 && (ecs->ws.sig () == GDB_SIGNAL_ILL
6141 || ecs->ws.sig () == GDB_SIGNAL_SEGV
6142 || ecs->ws.sig () == GDB_SIGNAL_EMT))
6144 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6146 if (breakpoint_inserted_here_p (ecs->event_thread->inf->aspace.get (),
6147 regcache_read_pc (regcache)))
6149 infrun_debug_printf ("Treating signal as SIGTRAP");
6150 ecs->ws.set_stopped (GDB_SIGNAL_TRAP);
6154 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
6156 switch (ecs->ws.kind ())
6158 case TARGET_WAITKIND_LOADED:
6160 context_switch (ecs);
6161 /* Ignore gracefully during startup of the inferior, as it might
6162 be the shell which has just loaded some objects, otherwise
6163 add the symbols for the newly loaded objects. Also ignore at
6164 the beginning of an attach or remote session; we will query
6165 the full list of libraries once the connection is
6166 established. */
6168 stop_kind stop_soon = get_inferior_stop_soon (ecs);
6169 if (stop_soon == NO_STOP_QUIETLY)
6171 struct regcache *regcache;
6173 regcache = get_thread_regcache (ecs->event_thread);
6175 handle_solib_event ();
6177 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
6178 address_space *aspace = ecs->event_thread->inf->aspace.get ();
6179 ecs->event_thread->control.stop_bpstat
6180 = bpstat_stop_status_nowatch (aspace,
6181 ecs->event_thread->stop_pc (),
6182 ecs->event_thread, ecs->ws);
6184 if (handle_stop_requested (ecs))
6185 return;
6187 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6189 /* A catchpoint triggered. */
6190 process_event_stop_test (ecs);
6191 return;
6194 /* If requested, stop when the dynamic linker notifies
6195 gdb of events. This allows the user to get control
6196 and place breakpoints in initializer routines for
6197 dynamically loaded objects (among other things). */
6198 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6199 if (stop_on_solib_events)
6201 /* Make sure we print "Stopped due to solib-event" in
6202 normal_stop. */
6203 stop_print_frame = true;
6205 stop_waiting (ecs);
6206 return;
6210 /* If we are skipping through a shell, or through shared library
6211 loading that we aren't interested in, resume the program. If
6212 we're running the program normally, also resume. */
6213 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
6215 /* Loading of shared libraries might have changed breakpoint
6216 addresses. Make sure new breakpoints are inserted. */
6217 if (stop_soon == NO_STOP_QUIETLY)
6218 insert_breakpoints ();
6219 resume (GDB_SIGNAL_0);
6220 prepare_to_wait (ecs);
6221 return;
6224 /* But stop if we're attaching or setting up a remote
6225 connection. */
6226 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6227 || stop_soon == STOP_QUIETLY_REMOTE)
6229 infrun_debug_printf ("quietly stopped");
6230 stop_waiting (ecs);
6231 return;
6234 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon);
6237 case TARGET_WAITKIND_SPURIOUS:
6238 if (handle_stop_requested (ecs))
6239 return;
6240 context_switch (ecs);
6241 resume (GDB_SIGNAL_0);
6242 prepare_to_wait (ecs);
6243 return;
6245 case TARGET_WAITKIND_THREAD_CREATED:
6246 if (handle_stop_requested (ecs))
6247 return;
6248 context_switch (ecs);
6249 if (!switch_back_to_stepped_thread (ecs))
6250 keep_going (ecs);
6251 return;
6253 case TARGET_WAITKIND_THREAD_EXITED:
6254 if (handle_thread_exited (ecs))
6255 return;
6256 stop_waiting (ecs);
6257 break;
6259 case TARGET_WAITKIND_EXITED:
6260 case TARGET_WAITKIND_SIGNALLED:
6262 /* Depending on the system, ecs->ptid may point to a thread or
6263 to a process. On some targets, target_mourn_inferior may
6264 need to have access to the just-exited thread. That is the
6265 case of GNU/Linux's "checkpoint" support, for example.
6266 Call the switch_to_xxx routine as appropriate. */
6267 thread_info *thr = ecs->target->find_thread (ecs->ptid);
6268 if (thr != nullptr)
6269 switch_to_thread (thr);
6270 else
6272 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
6273 switch_to_inferior_no_thread (inf);
6276 handle_vfork_child_exec_or_exit (0);
6277 target_terminal::ours (); /* Must do this before mourn anyway. */
6279 /* Clearing any previous state of convenience variables. */
6280 clear_exit_convenience_vars ();
6282 if (ecs->ws.kind () == TARGET_WAITKIND_EXITED)
6284 /* Record the exit code in the convenience variable $_exitcode, so
6285 that the user can inspect this again later. */
6286 set_internalvar_integer (lookup_internalvar ("_exitcode"),
6287 (LONGEST) ecs->ws.exit_status ());
6289 /* Also record this in the inferior itself. */
6290 current_inferior ()->has_exit_code = true;
6291 current_inferior ()->exit_code = (LONGEST) ecs->ws.exit_status ();
6293 /* Support the --return-child-result option. */
6294 return_child_result_value = ecs->ws.exit_status ();
6296 interps_notify_exited (ecs->ws.exit_status ());
6298 else
6300 struct gdbarch *gdbarch = current_inferior ()->arch ();
6302 if (gdbarch_gdb_signal_to_target_p (gdbarch))
6304 /* Set the value of the internal variable $_exitsignal,
6305 which holds the signal uncaught by the inferior. */
6306 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
6307 gdbarch_gdb_signal_to_target (gdbarch,
6308 ecs->ws.sig ()));
6310 else
6312 /* We don't have access to the target's method used for
6313 converting between signal numbers (GDB's internal
6314 representation <-> target's representation).
6315 Therefore, we cannot do a good job at displaying this
6316 information to the user. It's better to just warn
6317 her about it (if infrun debugging is enabled), and
6318 give up. */
6319 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
6320 "signal number.");
6323 interps_notify_signal_exited (ecs->ws.sig ());
6326 gdb_flush (gdb_stdout);
6327 target_mourn_inferior (inferior_ptid);
6328 stop_print_frame = false;
6329 stop_waiting (ecs);
6330 return;
6332 case TARGET_WAITKIND_FORKED:
6333 case TARGET_WAITKIND_VFORKED:
6334 case TARGET_WAITKIND_THREAD_CLONED:
6336 displaced_step_finish (ecs->event_thread, ecs->ws);
6338 /* Start a new step-over in another thread if there's one that
6339 needs it. */
6340 start_step_over ();
6342 context_switch (ecs);
6344 /* Immediately detach breakpoints from the child before there's
6345 any chance of letting the user delete breakpoints from the
6346 breakpoint lists. If we don't do this early, it's easy to
6347 leave left over traps in the child, vis: "break foo; catch
6348 fork; c; <fork>; del; c; <child calls foo>". We only follow
6349 the fork on the last `continue', and by that time the
6350 breakpoint at "foo" is long gone from the breakpoint table.
6351 If we vforked, then we don't need to unpatch here, since both
6352 parent and child are sharing the same memory pages; we'll
6353 need to unpatch at follow/detach time instead to be certain
6354 that new breakpoints added between catchpoint hit time and
6355 vfork follow are detached. */
6356 if (ecs->ws.kind () == TARGET_WAITKIND_FORKED)
6358 /* This won't actually modify the breakpoint list, but will
6359 physically remove the breakpoints from the child. */
6360 detach_breakpoints (ecs->ws.child_ptid ());
6363 delete_just_stopped_threads_single_step_breakpoints ();
6365 /* In case the event is caught by a catchpoint, remember that
6366 the event is to be followed at the next resume of the thread,
6367 and not immediately. */
6368 ecs->event_thread->pending_follow = ecs->ws;
6370 ecs->event_thread->set_stop_pc
6371 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6373 ecs->event_thread->control.stop_bpstat
6374 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
6375 ecs->event_thread->stop_pc (),
6376 ecs->event_thread, ecs->ws);
6378 if (handle_stop_requested (ecs))
6379 return;
6381 /* If no catchpoint triggered for this, then keep going. Note
6382 that we're interested in knowing the bpstat actually causes a
6383 stop, not just if it may explain the signal. Software
6384 watchpoints, for example, always appear in the bpstat. */
6385 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6387 bool follow_child
6388 = (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6389 && follow_fork_mode_string == follow_fork_mode_child);
6391 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6393 process_stratum_target *targ
6394 = ecs->event_thread->inf->process_target ();
6396 bool should_resume;
6397 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED)
6398 should_resume = follow_fork ();
6399 else
6401 should_resume = true;
6402 inferior *inf = ecs->event_thread->inf;
6403 inf->top_target ()->follow_clone (ecs->ws.child_ptid ());
6404 ecs->event_thread->pending_follow.set_spurious ();
6407 /* Note that one of these may be an invalid pointer,
6408 depending on detach_fork. */
6409 thread_info *parent = ecs->event_thread;
6410 thread_info *child = targ->find_thread (ecs->ws.child_ptid ());
6412 /* At this point, the parent is marked running, and the
6413 child is marked stopped. */
6415 /* If not resuming the parent, mark it stopped. */
6416 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6417 && follow_child && !detach_fork && !non_stop && !sched_multi)
6418 parent->set_running (false);
6420 /* If resuming the child, mark it running. */
6421 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6422 && !schedlock_applies (ecs->event_thread))
6423 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6424 && (follow_child
6425 || (!detach_fork && (non_stop || sched_multi)))))
6426 child->set_running (true);
6428 /* In non-stop mode, also resume the other branch. */
6429 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6430 && target_is_non_stop_p ()
6431 && !schedlock_applies (ecs->event_thread))
6432 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6433 && (!detach_fork && (non_stop
6434 || (sched_multi
6435 && target_is_non_stop_p ())))))
6437 if (follow_child)
6438 switch_to_thread (parent);
6439 else
6440 switch_to_thread (child);
6442 ecs->event_thread = inferior_thread ();
6443 ecs->ptid = inferior_ptid;
6444 keep_going (ecs);
6447 if (follow_child)
6448 switch_to_thread (child);
6449 else
6450 switch_to_thread (parent);
6452 ecs->event_thread = inferior_thread ();
6453 ecs->ptid = inferior_ptid;
6455 if (should_resume)
6457 /* Never call switch_back_to_stepped_thread if we are waiting for
6458 vfork-done (waiting for an external vfork child to exec or
6459 exit). We will resume only the vforking thread for the purpose
6460 of collecting the vfork-done event, and we will restart any
6461 step once the critical shared address space window is done. */
6462 if ((!follow_child
6463 && detach_fork
6464 && parent->inf->thread_waiting_for_vfork_done != nullptr)
6465 || !switch_back_to_stepped_thread (ecs))
6466 keep_going (ecs);
6468 else
6469 stop_waiting (ecs);
6470 return;
6472 process_event_stop_test (ecs);
6473 return;
6475 case TARGET_WAITKIND_VFORK_DONE:
6476 /* Done with the shared memory region. Re-insert breakpoints in
6477 the parent, and keep going. */
6479 context_switch (ecs);
6481 handle_vfork_done (ecs->event_thread);
6482 gdb_assert (inferior_thread () == ecs->event_thread);
6484 if (handle_stop_requested (ecs))
6485 return;
6487 if (!switch_back_to_stepped_thread (ecs))
6489 gdb_assert (inferior_thread () == ecs->event_thread);
6490 /* This also takes care of reinserting breakpoints in the
6491 previously locked inferior. */
6492 keep_going (ecs);
6494 return;
6496 case TARGET_WAITKIND_EXECD:
6498 /* Note we can't read registers yet (the stop_pc), because we
6499 don't yet know the inferior's post-exec architecture.
6500 'stop_pc' is explicitly read below instead. */
6501 switch_to_thread_no_regs (ecs->event_thread);
6503 /* Do whatever is necessary to the parent branch of the vfork. */
6504 handle_vfork_child_exec_or_exit (1);
6506 /* This causes the eventpoints and symbol table to be reset.
6507 Must do this now, before trying to determine whether to
6508 stop. */
6509 follow_exec (inferior_ptid, ecs->ws.execd_pathname ());
6511 /* In follow_exec we may have deleted the original thread and
6512 created a new one. Make sure that the event thread is the
6513 execd thread for that case (this is a nop otherwise). */
6514 ecs->event_thread = inferior_thread ();
6516 ecs->event_thread->set_stop_pc
6517 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6519 ecs->event_thread->control.stop_bpstat
6520 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
6521 ecs->event_thread->stop_pc (),
6522 ecs->event_thread, ecs->ws);
6524 if (handle_stop_requested (ecs))
6525 return;
6527 /* If no catchpoint triggered for this, then keep going. */
6528 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6530 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6531 keep_going (ecs);
6532 return;
6534 process_event_stop_test (ecs);
6535 return;
6537 /* Be careful not to try to gather much state about a thread
6538 that's in a syscall. It's frequently a losing proposition. */
6539 case TARGET_WAITKIND_SYSCALL_ENTRY:
6540 /* Getting the current syscall number. */
6541 if (handle_syscall_event (ecs) == 0)
6542 process_event_stop_test (ecs);
6543 return;
6545 /* Before examining the threads further, step this thread to
6546 get it entirely out of the syscall. (We get notice of the
6547 event when the thread is just on the verge of exiting a
6548 syscall. Stepping one instruction seems to get it back
6549 into user code.) */
6550 case TARGET_WAITKIND_SYSCALL_RETURN:
6551 if (handle_syscall_event (ecs) == 0)
6552 process_event_stop_test (ecs);
6553 return;
6555 case TARGET_WAITKIND_STOPPED:
6556 handle_signal_stop (ecs);
6557 return;
6559 case TARGET_WAITKIND_NO_HISTORY:
6560 /* Reverse execution: target ran out of history info. */
6562 /* Switch to the stopped thread. */
6563 context_switch (ecs);
6564 infrun_debug_printf ("stopped");
6566 delete_just_stopped_threads_single_step_breakpoints ();
6567 ecs->event_thread->set_stop_pc
6568 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
6570 if (handle_stop_requested (ecs))
6571 return;
6573 interps_notify_no_history ();
6574 stop_waiting (ecs);
6575 return;
6579 /* Restart threads back to what they were trying to do back when we
6580 paused them (because of an in-line step-over or vfork, for example).
6581 The EVENT_THREAD thread is ignored (not restarted).
6583 If INF is non-nullptr, only resume threads from INF. */
6585 static void
6586 restart_threads (struct thread_info *event_thread, inferior *inf)
6588 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
6589 event_thread->ptid.to_string ().c_str (),
6590 inf != nullptr ? inf->num : -1);
6592 gdb_assert (!step_over_info_valid_p ());
6594 /* In case the instruction just stepped spawned a new thread. */
6595 update_thread_list ();
6597 for (thread_info *tp : all_non_exited_threads ())
6599 if (inf != nullptr && tp->inf != inf)
6600 continue;
6602 if (tp->inf->detaching)
6604 infrun_debug_printf ("restart threads: [%s] inferior detaching",
6605 tp->ptid.to_string ().c_str ());
6606 continue;
6609 switch_to_thread_no_regs (tp);
6611 if (tp == event_thread)
6613 infrun_debug_printf ("restart threads: [%s] is event thread",
6614 tp->ptid.to_string ().c_str ());
6615 continue;
6618 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
6620 infrun_debug_printf ("restart threads: [%s] not meant to be running",
6621 tp->ptid.to_string ().c_str ());
6622 continue;
6625 if (tp->resumed ())
6627 infrun_debug_printf ("restart threads: [%s] resumed",
6628 tp->ptid.to_string ().c_str ());
6629 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
6630 continue;
6633 if (thread_is_in_step_over_chain (tp))
6635 infrun_debug_printf ("restart threads: [%s] needs step-over",
6636 tp->ptid.to_string ().c_str ());
6637 gdb_assert (!tp->resumed ());
6638 continue;
6642 if (tp->has_pending_waitstatus ())
6644 infrun_debug_printf ("restart threads: [%s] has pending status",
6645 tp->ptid.to_string ().c_str ());
6646 tp->set_resumed (true);
6647 continue;
6650 gdb_assert (!tp->stop_requested);
6652 /* If some thread needs to start a step-over at this point, it
6653 should still be in the step-over queue, and thus skipped
6654 above. */
6655 if (thread_still_needs_step_over (tp))
6657 internal_error ("thread [%s] needs a step-over, but not in "
6658 "step-over queue\n",
6659 tp->ptid.to_string ().c_str ());
6662 if (currently_stepping (tp))
6664 infrun_debug_printf ("restart threads: [%s] was stepping",
6665 tp->ptid.to_string ().c_str ());
6666 keep_going_stepped_thread (tp);
6668 else
6670 infrun_debug_printf ("restart threads: [%s] continuing",
6671 tp->ptid.to_string ().c_str ());
6672 execution_control_state ecs (tp);
6673 switch_to_thread (tp);
6674 keep_going_pass_signal (&ecs);
6679 /* Callback for iterate_over_threads. Find a resumed thread that has
6680 a pending waitstatus. */
6682 static int
6683 resumed_thread_with_pending_status (struct thread_info *tp,
6684 void *arg)
6686 return tp->resumed () && tp->has_pending_waitstatus ();
6689 /* Called when we get an event that may finish an in-line or
6690 out-of-line (displaced stepping) step-over started previously.
6691 Return true if the event is processed and we should go back to the
6692 event loop; false if the caller should continue processing the
6693 event. */
6695 static int
6696 finish_step_over (struct execution_control_state *ecs)
6698 displaced_step_finish (ecs->event_thread, ecs->ws);
6700 bool had_step_over_info = step_over_info_valid_p ();
6702 if (had_step_over_info)
6704 /* If we're stepping over a breakpoint with all threads locked,
6705 then only the thread that was stepped should be reporting
6706 back an event. */
6707 gdb_assert (ecs->event_thread->control.trap_expected);
6709 update_thread_events_after_step_over (ecs->event_thread, ecs->ws);
6711 clear_step_over_info ();
6714 if (!target_is_non_stop_p ())
6715 return 0;
6717 /* Start a new step-over in another thread if there's one that
6718 needs it. */
6719 start_step_over ();
6721 /* If we were stepping over a breakpoint before, and haven't started
6722 a new in-line step-over sequence, then restart all other threads
6723 (except the event thread). We can't do this in all-stop, as then
6724 e.g., we wouldn't be able to issue any other remote packet until
6725 these other threads stop. */
6726 if (had_step_over_info && !step_over_info_valid_p ())
6728 struct thread_info *pending;
6730 /* If we only have threads with pending statuses, the restart
6731 below won't restart any thread and so nothing re-inserts the
6732 breakpoint we just stepped over. But we need it inserted
6733 when we later process the pending events, otherwise if
6734 another thread has a pending event for this breakpoint too,
6735 we'd discard its event (because the breakpoint that
6736 originally caused the event was no longer inserted). */
6737 context_switch (ecs);
6738 insert_breakpoints ();
6740 restart_threads (ecs->event_thread);
6742 /* If we have events pending, go through handle_inferior_event
6743 again, picking up a pending event at random. This avoids
6744 thread starvation. */
6746 /* But not if we just stepped over a watchpoint in order to let
6747 the instruction execute so we can evaluate its expression.
6748 The set of watchpoints that triggered is recorded in the
6749 breakpoint objects themselves (see bp->watchpoint_triggered).
6750 If we processed another event first, that other event could
6751 clobber this info. */
6752 if (ecs->event_thread->stepping_over_watchpoint)
6753 return 0;
6755 /* The code below is meant to avoid one thread hogging the event
6756 loop by doing constant in-line step overs. If the stepping
6757 thread exited, there's no risk for this to happen, so we can
6758 safely let our caller process the event immediately. */
6759 if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
6760 return 0;
6762 pending = iterate_over_threads (resumed_thread_with_pending_status,
6763 nullptr);
6764 if (pending != nullptr)
6766 struct thread_info *tp = ecs->event_thread;
6767 struct regcache *regcache;
6769 infrun_debug_printf ("found resumed threads with "
6770 "pending events, saving status");
6772 gdb_assert (pending != tp);
6774 /* Record the event thread's event for later. */
6775 save_waitstatus (tp, ecs->ws);
6776 /* This was cleared early, by handle_inferior_event. Set it
6777 so this pending event is considered by
6778 do_target_wait. */
6779 tp->set_resumed (true);
6781 gdb_assert (!tp->executing ());
6783 regcache = get_thread_regcache (tp);
6784 tp->set_stop_pc (regcache_read_pc (regcache));
6786 infrun_debug_printf ("saved stop_pc=%s for %s "
6787 "(currently_stepping=%d)",
6788 paddress (current_inferior ()->arch (),
6789 tp->stop_pc ()),
6790 tp->ptid.to_string ().c_str (),
6791 currently_stepping (tp));
6793 /* This in-line step-over finished; clear this so we won't
6794 start a new one. This is what handle_signal_stop would
6795 do, if we returned false. */
6796 tp->stepping_over_breakpoint = 0;
6798 /* Wake up the event loop again. */
6799 mark_async_event_handler (infrun_async_inferior_event_token);
6801 prepare_to_wait (ecs);
6802 return 1;
6806 return 0;
6809 /* See infrun.h. */
6811 void
6812 notify_signal_received (gdb_signal sig)
6814 interps_notify_signal_received (sig);
6815 gdb::observers::signal_received.notify (sig);
6818 /* See infrun.h. */
6820 void
6821 notify_normal_stop (bpstat *bs, int print_frame)
6823 interps_notify_normal_stop (bs, print_frame);
6824 gdb::observers::normal_stop.notify (bs, print_frame);
6827 /* See infrun.h. */
6829 void notify_user_selected_context_changed (user_selected_what selection)
6831 interps_notify_user_selected_context_changed (selection);
6832 gdb::observers::user_selected_context_changed.notify (selection);
6835 /* Come here when the program has stopped with a signal. */
6837 static void
6838 handle_signal_stop (struct execution_control_state *ecs)
6840 frame_info_ptr frame;
6841 struct gdbarch *gdbarch;
6842 int stopped_by_watchpoint;
6843 enum stop_kind stop_soon;
6844 int random_signal;
6846 gdb_assert (ecs->ws.kind () == TARGET_WAITKIND_STOPPED);
6848 ecs->event_thread->set_stop_signal (ecs->ws.sig ());
6850 /* Do we need to clean up the state of a thread that has
6851 completed a displaced single-step? (Doing so usually affects
6852 the PC, so do it here, before we set stop_pc.) */
6853 if (finish_step_over (ecs))
6854 return;
6856 /* If we either finished a single-step or hit a breakpoint, but
6857 the user wanted this thread to be stopped, pretend we got a
6858 SIG0 (generic unsignaled stop). */
6859 if (ecs->event_thread->stop_requested
6860 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6861 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6863 ecs->event_thread->set_stop_pc
6864 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6866 context_switch (ecs);
6868 if (deprecated_context_hook)
6869 deprecated_context_hook (ecs->event_thread->global_num);
6871 if (debug_infrun)
6873 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6874 struct gdbarch *reg_gdbarch = regcache->arch ();
6876 infrun_debug_printf
6877 ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ()));
6878 if (target_stopped_by_watchpoint ())
6880 CORE_ADDR addr;
6882 infrun_debug_printf ("stopped by watchpoint");
6884 if (target_stopped_data_address (current_inferior ()->top_target (),
6885 &addr))
6886 infrun_debug_printf ("stopped data address=%s",
6887 paddress (reg_gdbarch, addr));
6888 else
6889 infrun_debug_printf ("(no data address available)");
6893 /* This is originated from start_remote(), start_inferior() and
6894 shared libraries hook functions. */
6895 stop_soon = get_inferior_stop_soon (ecs);
6896 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6898 infrun_debug_printf ("quietly stopped");
6899 stop_print_frame = true;
6900 stop_waiting (ecs);
6901 return;
6904 /* This originates from attach_command(). We need to overwrite
6905 the stop_signal here, because some kernels don't ignore a
6906 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6907 See more comments in inferior.h. On the other hand, if we
6908 get a non-SIGSTOP, report it to the user - assume the backend
6909 will handle the SIGSTOP if it should show up later.
6911 Also consider that the attach is complete when we see a
6912 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6913 target extended-remote report it instead of a SIGSTOP
6914 (e.g. gdbserver). We already rely on SIGTRAP being our
6915 signal, so this is no exception.
6917 Also consider that the attach is complete when we see a
6918 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6919 the target to stop all threads of the inferior, in case the
6920 low level attach operation doesn't stop them implicitly. If
6921 they weren't stopped implicitly, then the stub will report a
6922 GDB_SIGNAL_0, meaning: stopped for no particular reason
6923 other than GDB's request. */
6924 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6925 && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP
6926 || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6927 || ecs->event_thread->stop_signal () == GDB_SIGNAL_0))
6929 stop_print_frame = true;
6930 stop_waiting (ecs);
6931 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6932 return;
6935 /* At this point, get hold of the now-current thread's frame. */
6936 frame = get_current_frame ();
6937 gdbarch = get_frame_arch (frame);
6939 /* Pull the single step breakpoints out of the target. */
6940 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6942 struct regcache *regcache;
6943 CORE_ADDR pc;
6945 regcache = get_thread_regcache (ecs->event_thread);
6946 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
6948 pc = regcache_read_pc (regcache);
6950 /* However, before doing so, if this single-step breakpoint was
6951 actually for another thread, set this thread up for moving
6952 past it. */
6953 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6954 aspace, pc))
6956 if (single_step_breakpoint_inserted_here_p (aspace, pc))
6958 infrun_debug_printf ("[%s] hit another thread's single-step "
6959 "breakpoint",
6960 ecs->ptid.to_string ().c_str ());
6961 ecs->hit_singlestep_breakpoint = 1;
6964 else
6966 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6967 ecs->ptid.to_string ().c_str ());
6970 delete_just_stopped_threads_single_step_breakpoints ();
6972 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6973 && ecs->event_thread->control.trap_expected
6974 && ecs->event_thread->stepping_over_watchpoint)
6975 stopped_by_watchpoint = 0;
6976 else
6977 stopped_by_watchpoint = watchpoints_triggered (ecs->ws);
6979 /* If necessary, step over this watchpoint. We'll be back to display
6980 it in a moment. */
6981 if (stopped_by_watchpoint
6982 && (target_have_steppable_watchpoint ()
6983 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
6985 /* At this point, we are stopped at an instruction which has
6986 attempted to write to a piece of memory under control of
6987 a watchpoint. The instruction hasn't actually executed
6988 yet. If we were to evaluate the watchpoint expression
6989 now, we would get the old value, and therefore no change
6990 would seem to have occurred.
6992 In order to make watchpoints work `right', we really need
6993 to complete the memory write, and then evaluate the
6994 watchpoint expression. We do this by single-stepping the
6995 target.
6997 It may not be necessary to disable the watchpoint to step over
6998 it. For example, the PA can (with some kernel cooperation)
6999 single step over a watchpoint without disabling the watchpoint.
7001 It is far more common to need to disable a watchpoint to step
7002 the inferior over it. If we have non-steppable watchpoints,
7003 we must disable the current watchpoint; it's simplest to
7004 disable all watchpoints.
7006 Any breakpoint at PC must also be stepped over -- if there's
7007 one, it will have already triggered before the watchpoint
7008 triggered, and we either already reported it to the user, or
7009 it didn't cause a stop and we called keep_going. In either
7010 case, if there was a breakpoint at PC, we must be trying to
7011 step past it. */
7012 ecs->event_thread->stepping_over_watchpoint = 1;
7013 keep_going (ecs);
7014 return;
7017 ecs->event_thread->stepping_over_breakpoint = 0;
7018 ecs->event_thread->stepping_over_watchpoint = 0;
7019 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
7020 ecs->event_thread->control.stop_step = 0;
7021 stop_print_frame = true;
7022 stopped_by_random_signal = 0;
7023 bpstat *stop_chain = nullptr;
7025 /* Hide inlined functions starting here, unless we just performed stepi or
7026 nexti. After stepi and nexti, always show the innermost frame (not any
7027 inline function call sites). */
7028 if (ecs->event_thread->control.step_range_end != 1)
7030 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
7032 /* skip_inline_frames is expensive, so we avoid it if we can
7033 determine that the address is one where functions cannot have
7034 been inlined. This improves performance with inferiors that
7035 load a lot of shared libraries, because the solib event
7036 breakpoint is defined as the address of a function (i.e. not
7037 inline). Note that we have to check the previous PC as well
7038 as the current one to catch cases when we have just
7039 single-stepped off a breakpoint prior to reinstating it.
7040 Note that we're assuming that the code we single-step to is
7041 not inline, but that's not definitive: there's nothing
7042 preventing the event breakpoint function from containing
7043 inlined code, and the single-step ending up there. If the
7044 user had set a breakpoint on that inlined code, the missing
7045 skip_inline_frames call would break things. Fortunately
7046 that's an extremely unlikely scenario. */
7047 if (!pc_at_non_inline_function (aspace,
7048 ecs->event_thread->stop_pc (),
7049 ecs->ws)
7050 && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7051 && ecs->event_thread->control.trap_expected
7052 && pc_at_non_inline_function (aspace,
7053 ecs->event_thread->prev_pc,
7054 ecs->ws)))
7056 stop_chain = build_bpstat_chain (aspace,
7057 ecs->event_thread->stop_pc (),
7058 ecs->ws);
7059 skip_inline_frames (ecs->event_thread, stop_chain);
7063 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7064 && ecs->event_thread->control.trap_expected
7065 && gdbarch_single_step_through_delay_p (gdbarch)
7066 && currently_stepping (ecs->event_thread))
7068 /* We're trying to step off a breakpoint. Turns out that we're
7069 also on an instruction that needs to be stepped multiple
7070 times before it's been fully executing. E.g., architectures
7071 with a delay slot. It needs to be stepped twice, once for
7072 the instruction and once for the delay slot. */
7073 int step_through_delay
7074 = gdbarch_single_step_through_delay (gdbarch, frame);
7076 if (step_through_delay)
7077 infrun_debug_printf ("step through delay");
7079 if (ecs->event_thread->control.step_range_end == 0
7080 && step_through_delay)
7082 /* The user issued a continue when stopped at a breakpoint.
7083 Set up for another trap and get out of here. */
7084 ecs->event_thread->stepping_over_breakpoint = 1;
7085 keep_going (ecs);
7086 return;
7088 else if (step_through_delay)
7090 /* The user issued a step when stopped at a breakpoint.
7091 Maybe we should stop, maybe we should not - the delay
7092 slot *might* correspond to a line of source. In any
7093 case, don't decide that here, just set
7094 ecs->stepping_over_breakpoint, making sure we
7095 single-step again before breakpoints are re-inserted. */
7096 ecs->event_thread->stepping_over_breakpoint = 1;
7100 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
7101 handles this event. */
7102 ecs->event_thread->control.stop_bpstat
7103 = bpstat_stop_status (ecs->event_thread->inf->aspace.get (),
7104 ecs->event_thread->stop_pc (),
7105 ecs->event_thread, ecs->ws, stop_chain);
7107 /* Following in case break condition called a
7108 function. */
7109 stop_print_frame = true;
7111 /* This is where we handle "moribund" watchpoints. Unlike
7112 software breakpoints traps, hardware watchpoint traps are
7113 always distinguishable from random traps. If no high-level
7114 watchpoint is associated with the reported stop data address
7115 anymore, then the bpstat does not explain the signal ---
7116 simply make sure to ignore it if `stopped_by_watchpoint' is
7117 set. */
7119 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7120 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
7121 GDB_SIGNAL_TRAP)
7122 && stopped_by_watchpoint)
7124 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
7125 "ignoring");
7128 /* NOTE: cagney/2003-03-29: These checks for a random signal
7129 at one stage in the past included checks for an inferior
7130 function call's call dummy's return breakpoint. The original
7131 comment, that went with the test, read:
7133 ``End of a stack dummy. Some systems (e.g. Sony news) give
7134 another signal besides SIGTRAP, so check here as well as
7135 above.''
7137 If someone ever tries to get call dummys on a
7138 non-executable stack to work (where the target would stop
7139 with something like a SIGSEGV), then those tests might need
7140 to be re-instated. Given, however, that the tests were only
7141 enabled when momentary breakpoints were not being used, I
7142 suspect that it won't be the case.
7144 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
7145 be necessary for call dummies on a non-executable stack on
7146 SPARC. */
7148 /* See if the breakpoints module can explain the signal. */
7149 random_signal
7150 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
7151 ecs->event_thread->stop_signal ());
7153 /* Maybe this was a trap for a software breakpoint that has since
7154 been removed. */
7155 if (random_signal && target_stopped_by_sw_breakpoint ())
7157 if (gdbarch_program_breakpoint_here_p (gdbarch,
7158 ecs->event_thread->stop_pc ()))
7160 struct regcache *regcache;
7161 int decr_pc;
7163 /* Re-adjust PC to what the program would see if GDB was not
7164 debugging it. */
7165 regcache = get_thread_regcache (ecs->event_thread);
7166 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
7167 if (decr_pc != 0)
7169 std::optional<scoped_restore_tmpl<int>>
7170 restore_operation_disable;
7172 if (record_full_is_used ())
7173 restore_operation_disable.emplace
7174 (record_full_gdb_operation_disable_set ());
7176 regcache_write_pc (regcache,
7177 ecs->event_thread->stop_pc () + decr_pc);
7180 else
7182 /* A delayed software breakpoint event. Ignore the trap. */
7183 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
7184 random_signal = 0;
7188 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
7189 has since been removed. */
7190 if (random_signal && target_stopped_by_hw_breakpoint ())
7192 /* A delayed hardware breakpoint event. Ignore the trap. */
7193 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
7194 "trap, ignoring");
7195 random_signal = 0;
7198 /* If not, perhaps stepping/nexting can. */
7199 if (random_signal)
7200 random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7201 && currently_stepping (ecs->event_thread));
7203 /* Perhaps the thread hit a single-step breakpoint of _another_
7204 thread. Single-step breakpoints are transparent to the
7205 breakpoints module. */
7206 if (random_signal)
7207 random_signal = !ecs->hit_singlestep_breakpoint;
7209 /* No? Perhaps we got a moribund watchpoint. */
7210 if (random_signal)
7211 random_signal = !stopped_by_watchpoint;
7213 /* Always stop if the user explicitly requested this thread to
7214 remain stopped. */
7215 if (ecs->event_thread->stop_requested)
7217 random_signal = 1;
7218 infrun_debug_printf ("user-requested stop");
7221 /* For the program's own signals, act according to
7222 the signal handling tables. */
7224 if (random_signal)
7226 /* Signal not for debugging purposes. */
7227 enum gdb_signal stop_signal = ecs->event_thread->stop_signal ();
7229 infrun_debug_printf ("random signal (%s)",
7230 gdb_signal_to_symbol_string (stop_signal));
7232 stopped_by_random_signal = 1;
7234 /* Always stop on signals if we're either just gaining control
7235 of the program, or the user explicitly requested this thread
7236 to remain stopped. */
7237 if (stop_soon != NO_STOP_QUIETLY
7238 || ecs->event_thread->stop_requested
7239 || signal_stop_state (ecs->event_thread->stop_signal ()))
7241 stop_waiting (ecs);
7242 return;
7245 /* Notify observers the signal has "handle print" set. Note we
7246 returned early above if stopping; normal_stop handles the
7247 printing in that case. */
7248 if (signal_print[ecs->event_thread->stop_signal ()])
7250 /* The signal table tells us to print about this signal. */
7251 target_terminal::ours_for_output ();
7252 notify_signal_received (ecs->event_thread->stop_signal ());
7253 target_terminal::inferior ();
7256 /* Clear the signal if it should not be passed. */
7257 if (signal_program[ecs->event_thread->stop_signal ()] == 0)
7258 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
7260 if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc ()
7261 && ecs->event_thread->control.trap_expected
7262 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
7264 /* We were just starting a new sequence, attempting to
7265 single-step off of a breakpoint and expecting a SIGTRAP.
7266 Instead this signal arrives. This signal will take us out
7267 of the stepping range so GDB needs to remember to, when
7268 the signal handler returns, resume stepping off that
7269 breakpoint. */
7270 /* To simplify things, "continue" is forced to use the same
7271 code paths as single-step - set a breakpoint at the
7272 signal return address and then, once hit, step off that
7273 breakpoint. */
7274 infrun_debug_printf ("signal arrived while stepping over breakpoint");
7276 insert_hp_step_resume_breakpoint_at_frame (frame);
7277 ecs->event_thread->step_after_step_resume_breakpoint = 1;
7278 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7279 ecs->event_thread->control.trap_expected = 0;
7281 /* If we were nexting/stepping some other thread, switch to
7282 it, so that we don't continue it, losing control. */
7283 if (!switch_back_to_stepped_thread (ecs))
7284 keep_going (ecs);
7285 return;
7288 if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0
7289 && (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
7290 ecs->event_thread)
7291 || ecs->event_thread->control.step_range_end == 1)
7292 && (get_stack_frame_id (frame)
7293 == ecs->event_thread->control.step_stack_frame_id)
7294 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
7296 /* The inferior is about to take a signal that will take it
7297 out of the single step range. Set a breakpoint at the
7298 current PC (which is presumably where the signal handler
7299 will eventually return) and then allow the inferior to
7300 run free.
7302 Note that this is only needed for a signal delivered
7303 while in the single-step range. Nested signals aren't a
7304 problem as they eventually all return. */
7305 infrun_debug_printf ("signal may take us out of single-step range");
7307 clear_step_over_info ();
7308 insert_hp_step_resume_breakpoint_at_frame (frame);
7309 ecs->event_thread->step_after_step_resume_breakpoint = 1;
7310 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7311 ecs->event_thread->control.trap_expected = 0;
7312 keep_going (ecs);
7313 return;
7316 /* Note: step_resume_breakpoint may be non-NULL. This occurs
7317 when either there's a nested signal, or when there's a
7318 pending signal enabled just as the signal handler returns
7319 (leaving the inferior at the step-resume-breakpoint without
7320 actually executing it). Either way continue until the
7321 breakpoint is really hit. */
7323 if (!switch_back_to_stepped_thread (ecs))
7325 infrun_debug_printf ("random signal, keep going");
7327 keep_going (ecs);
7329 return;
7332 process_event_stop_test (ecs);
7335 /* Return the address for the beginning of the line. */
7337 CORE_ADDR
7338 update_line_range_start (CORE_ADDR pc, struct execution_control_state *ecs)
7340 /* The line table may have multiple entries for the same source code line.
7341 Given the PC, check the line table and return the PC that corresponds
7342 to the line table entry for the source line that PC is in. */
7343 CORE_ADDR start_line_pc = ecs->event_thread->control.step_range_start;
7344 std::optional<CORE_ADDR> real_range_start;
7346 /* Call find_line_range_start to get the smallest address in the
7347 linetable for multiple Line X entries in the line table. */
7348 real_range_start = find_line_range_start (pc);
7350 if (real_range_start.has_value ())
7351 start_line_pc = *real_range_start;
7353 return start_line_pc;
7356 namespace {
7358 /* Helper class for process_event_stop_test implementing lazy evaluation. */
7359 template<typename T>
7360 class lazy_loader
7362 using fetcher_t = std::function<T ()>;
7364 public:
7365 explicit lazy_loader (fetcher_t &&f) : m_loader (std::move (f))
7368 T &operator* ()
7370 if (!m_value.has_value ())
7371 m_value.emplace (m_loader ());
7372 return m_value.value ();
7375 T *operator-> ()
7377 return &**this;
7380 private:
7381 std::optional<T> m_value;
7382 fetcher_t m_loader;
7387 /* Come here when we've got some debug event / signal we can explain
7388 (IOW, not a random signal), and test whether it should cause a
7389 stop, or whether we should resume the inferior (transparently).
7390 E.g., could be a breakpoint whose condition evaluates false; we
7391 could be still stepping within the line; etc. */
7393 static void
7394 process_event_stop_test (struct execution_control_state *ecs)
7396 struct symtab_and_line stop_pc_sal;
7397 frame_info_ptr frame;
7398 struct gdbarch *gdbarch;
7399 CORE_ADDR jmp_buf_pc;
7400 struct bpstat_what what;
7402 /* Handle cases caused by hitting a breakpoint. */
7404 frame = get_current_frame ();
7405 gdbarch = get_frame_arch (frame);
7407 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
7409 if (what.call_dummy)
7411 stop_stack_dummy = what.call_dummy;
7414 /* A few breakpoint types have callbacks associated (e.g.,
7415 bp_jit_event). Run them now. */
7416 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
7418 /* Shorthand to make if statements smaller. */
7419 struct frame_id original_frame_id
7420 = ecs->event_thread->control.step_frame_id;
7421 lazy_loader<frame_id> curr_frame_id
7422 ([] () { return get_frame_id (get_current_frame ()); });
7424 switch (what.main_action)
7426 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
7427 /* If we hit the breakpoint at longjmp while stepping, we
7428 install a momentary breakpoint at the target of the
7429 jmp_buf. */
7431 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
7433 ecs->event_thread->stepping_over_breakpoint = 1;
7435 if (what.is_longjmp)
7437 struct value *arg_value;
7439 /* If we set the longjmp breakpoint via a SystemTap probe,
7440 then use it to extract the arguments. The destination PC
7441 is the third argument to the probe. */
7442 arg_value = probe_safe_evaluate_at_pc (frame, 2);
7443 if (arg_value)
7445 jmp_buf_pc = value_as_address (arg_value);
7446 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
7448 else if (!gdbarch_get_longjmp_target_p (gdbarch)
7449 || !gdbarch_get_longjmp_target (gdbarch,
7450 frame, &jmp_buf_pc))
7452 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
7453 "(!gdbarch_get_longjmp_target)");
7454 keep_going (ecs);
7455 return;
7458 /* Insert a breakpoint at resume address. */
7459 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
7461 else
7462 check_exception_resume (ecs, frame);
7463 keep_going (ecs);
7464 return;
7466 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
7468 frame_info_ptr init_frame;
7470 /* There are several cases to consider.
7472 1. The initiating frame no longer exists. In this case we
7473 must stop, because the exception or longjmp has gone too
7474 far.
7476 2. The initiating frame exists, and is the same as the
7477 current frame. We stop, because the exception or longjmp
7478 has been caught.
7480 3. The initiating frame exists and is different from the
7481 current frame. This means the exception or longjmp has
7482 been caught beneath the initiating frame, so keep going.
7484 4. longjmp breakpoint has been placed just to protect
7485 against stale dummy frames and user is not interested in
7486 stopping around longjmps. */
7488 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
7490 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
7491 != nullptr);
7492 delete_exception_resume_breakpoint (ecs->event_thread);
7494 if (what.is_longjmp)
7496 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
7498 if (!frame_id_p (ecs->event_thread->initiating_frame))
7500 /* Case 4. */
7501 keep_going (ecs);
7502 return;
7506 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
7508 if (init_frame)
7510 if (*curr_frame_id == ecs->event_thread->initiating_frame)
7512 /* Case 2. Fall through. */
7514 else
7516 /* Case 3. */
7517 keep_going (ecs);
7518 return;
7522 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
7523 exists. */
7524 delete_step_resume_breakpoint (ecs->event_thread);
7526 end_stepping_range (ecs);
7528 return;
7530 case BPSTAT_WHAT_SINGLE:
7531 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
7532 ecs->event_thread->stepping_over_breakpoint = 1;
7533 /* Still need to check other stuff, at least the case where we
7534 are stepping and step out of the right range. */
7535 break;
7537 case BPSTAT_WHAT_STEP_RESUME:
7538 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
7540 delete_step_resume_breakpoint (ecs->event_thread);
7541 if (ecs->event_thread->control.proceed_to_finish
7542 && execution_direction == EXEC_REVERSE)
7544 struct thread_info *tp = ecs->event_thread;
7546 /* We are finishing a function in reverse, and just hit the
7547 step-resume breakpoint at the start address of the
7548 function, and we're almost there -- just need to back up
7549 by one more single-step, which should take us back to the
7550 function call. */
7551 tp->control.step_range_start = tp->control.step_range_end = 1;
7552 keep_going (ecs);
7553 return;
7555 fill_in_stop_func (gdbarch, ecs);
7556 if (ecs->event_thread->stop_pc () == ecs->stop_func_start
7557 && execution_direction == EXEC_REVERSE)
7559 /* We are stepping over a function call in reverse, and just
7560 hit the step-resume breakpoint at the start address of
7561 the function. Go back to single-stepping, which should
7562 take us back to the function call. */
7563 ecs->event_thread->stepping_over_breakpoint = 1;
7564 keep_going (ecs);
7565 return;
7567 break;
7569 case BPSTAT_WHAT_STOP_NOISY:
7570 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
7571 stop_print_frame = true;
7573 /* Assume the thread stopped for a breakpoint. We'll still check
7574 whether a/the breakpoint is there when the thread is next
7575 resumed. */
7576 ecs->event_thread->stepping_over_breakpoint = 1;
7578 stop_waiting (ecs);
7579 return;
7581 case BPSTAT_WHAT_STOP_SILENT:
7582 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
7583 stop_print_frame = false;
7585 /* Assume the thread stopped for a breakpoint. We'll still check
7586 whether a/the breakpoint is there when the thread is next
7587 resumed. */
7588 ecs->event_thread->stepping_over_breakpoint = 1;
7589 stop_waiting (ecs);
7590 return;
7592 case BPSTAT_WHAT_HP_STEP_RESUME:
7593 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
7595 delete_step_resume_breakpoint (ecs->event_thread);
7596 if (ecs->event_thread->step_after_step_resume_breakpoint)
7598 /* Back when the step-resume breakpoint was inserted, we
7599 were trying to single-step off a breakpoint. Go back to
7600 doing that. */
7601 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7602 ecs->event_thread->stepping_over_breakpoint = 1;
7603 keep_going (ecs);
7604 return;
7606 break;
7608 case BPSTAT_WHAT_KEEP_CHECKING:
7609 break;
7612 /* If we stepped a permanent breakpoint and we had a high priority
7613 step-resume breakpoint for the address we stepped, but we didn't
7614 hit it, then we must have stepped into the signal handler. The
7615 step-resume was only necessary to catch the case of _not_
7616 stepping into the handler, so delete it, and fall through to
7617 checking whether the step finished. */
7618 if (ecs->event_thread->stepped_breakpoint)
7620 struct breakpoint *sr_bp
7621 = ecs->event_thread->control.step_resume_breakpoint;
7623 if (sr_bp != nullptr
7624 && sr_bp->first_loc ().permanent
7625 && sr_bp->type == bp_hp_step_resume
7626 && sr_bp->first_loc ().address == ecs->event_thread->prev_pc)
7628 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
7629 delete_step_resume_breakpoint (ecs->event_thread);
7630 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7634 /* We come here if we hit a breakpoint but should not stop for it.
7635 Possibly we also were stepping and should stop for that. So fall
7636 through and test for stepping. But, if not stepping, do not
7637 stop. */
7639 /* In all-stop mode, if we're currently stepping but have stopped in
7640 some other thread, we need to switch back to the stepped thread. */
7641 if (switch_back_to_stepped_thread (ecs))
7642 return;
7644 if (ecs->event_thread->control.step_resume_breakpoint)
7646 infrun_debug_printf ("step-resume breakpoint is inserted");
7648 /* Having a step-resume breakpoint overrides anything
7649 else having to do with stepping commands until
7650 that breakpoint is reached. */
7651 keep_going (ecs);
7652 return;
7655 if (ecs->event_thread->control.step_range_end == 0)
7657 infrun_debug_printf ("no stepping, continue");
7658 /* Likewise if we aren't even stepping. */
7659 keep_going (ecs);
7660 return;
7663 fill_in_stop_func (gdbarch, ecs);
7665 /* If stepping through a line, keep going if still within it.
7667 Note that step_range_end is the address of the first instruction
7668 beyond the step range, and NOT the address of the last instruction
7669 within it!
7671 Note also that during reverse execution, we may be stepping
7672 through a function epilogue and therefore must detect when
7673 the current-frame changes in the middle of a line. */
7675 if (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
7676 ecs->event_thread)
7677 && (execution_direction != EXEC_REVERSE
7678 || *curr_frame_id == original_frame_id))
7680 infrun_debug_printf
7681 ("stepping inside range [%s-%s]",
7682 paddress (gdbarch, ecs->event_thread->control.step_range_start),
7683 paddress (gdbarch, ecs->event_thread->control.step_range_end));
7685 /* Tentatively re-enable range stepping; `resume' disables it if
7686 necessary (e.g., if we're stepping over a breakpoint or we
7687 have software watchpoints). */
7688 ecs->event_thread->control.may_range_step = 1;
7690 /* When stepping backward, stop at beginning of line range
7691 (unless it's the function entry point, in which case
7692 keep going back to the call point). */
7693 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7694 if (stop_pc == ecs->event_thread->control.step_range_start
7695 && stop_pc != ecs->stop_func_start
7696 && execution_direction == EXEC_REVERSE)
7697 end_stepping_range (ecs);
7698 else
7699 keep_going (ecs);
7701 return;
7704 /* We stepped out of the stepping range. */
7706 /* If we are stepping at the source level and entered the runtime
7707 loader dynamic symbol resolution code...
7709 EXEC_FORWARD: we keep on single stepping until we exit the run
7710 time loader code and reach the callee's address.
7712 EXEC_REVERSE: we've already executed the callee (backward), and
7713 the runtime loader code is handled just like any other
7714 undebuggable function call. Now we need only keep stepping
7715 backward through the trampoline code, and that's handled further
7716 down, so there is nothing for us to do here. */
7718 if (execution_direction != EXEC_REVERSE
7719 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7720 && in_solib_dynsym_resolve_code (ecs->event_thread->stop_pc ())
7721 && (ecs->event_thread->control.step_start_function == nullptr
7722 || !in_solib_dynsym_resolve_code (
7723 ecs->event_thread->control.step_start_function->value_block ()
7724 ->entry_pc ())))
7726 CORE_ADDR pc_after_resolver =
7727 gdbarch_skip_solib_resolver (gdbarch, ecs->event_thread->stop_pc ());
7729 infrun_debug_printf ("stepped into dynsym resolve code");
7731 if (pc_after_resolver)
7733 /* Set up a step-resume breakpoint at the address
7734 indicated by SKIP_SOLIB_RESOLVER. */
7735 symtab_and_line sr_sal;
7736 sr_sal.pc = pc_after_resolver;
7737 sr_sal.pspace = get_frame_program_space (frame);
7739 insert_step_resume_breakpoint_at_sal (gdbarch,
7740 sr_sal, null_frame_id);
7743 keep_going (ecs);
7744 return;
7747 /* Step through an indirect branch thunk. */
7748 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7749 && gdbarch_in_indirect_branch_thunk (gdbarch,
7750 ecs->event_thread->stop_pc ()))
7752 infrun_debug_printf ("stepped into indirect branch thunk");
7753 keep_going (ecs);
7754 return;
7757 if (ecs->event_thread->control.step_range_end != 1
7758 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7759 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7760 && get_frame_type (frame) == SIGTRAMP_FRAME)
7762 infrun_debug_printf ("stepped into signal trampoline");
7763 /* The inferior, while doing a "step" or "next", has ended up in
7764 a signal trampoline (either by a signal being delivered or by
7765 the signal handler returning). Just single-step until the
7766 inferior leaves the trampoline (either by calling the handler
7767 or returning). */
7768 keep_going (ecs);
7769 return;
7772 /* If we're in the return path from a shared library trampoline,
7773 we want to proceed through the trampoline when stepping. */
7774 /* macro/2012-04-25: This needs to come before the subroutine
7775 call check below as on some targets return trampolines look
7776 like subroutine calls (MIPS16 return thunks). */
7777 if (gdbarch_in_solib_return_trampoline (gdbarch,
7778 ecs->event_thread->stop_pc (),
7779 ecs->stop_func_name)
7780 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7782 /* Determine where this trampoline returns. */
7783 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7784 CORE_ADDR real_stop_pc
7785 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7787 infrun_debug_printf ("stepped into solib return tramp");
7789 /* Only proceed through if we know where it's going. */
7790 if (real_stop_pc)
7792 /* And put the step-breakpoint there and go until there. */
7793 symtab_and_line sr_sal;
7794 sr_sal.pc = real_stop_pc;
7795 sr_sal.section = find_pc_overlay (sr_sal.pc);
7796 sr_sal.pspace = get_frame_program_space (frame);
7798 /* Do not specify what the fp should be when we stop since
7799 on some machines the prologue is where the new fp value
7800 is established. */
7801 insert_step_resume_breakpoint_at_sal (gdbarch,
7802 sr_sal, null_frame_id);
7804 /* Restart without fiddling with the step ranges or
7805 other state. */
7806 keep_going (ecs);
7807 return;
7811 /* Check for subroutine calls. The check for the current frame
7812 equalling the step ID is not necessary - the check of the
7813 previous frame's ID is sufficient - but it is a common case and
7814 cheaper than checking the previous frame's ID.
7816 NOTE: frame_id::operator== will never report two invalid frame IDs as
7817 being equal, so to get into this block, both the current and
7818 previous frame must have valid frame IDs. */
7819 /* The outer_frame_id check is a heuristic to detect stepping
7820 through startup code. If we step over an instruction which
7821 sets the stack pointer from an invalid value to a valid value,
7822 we may detect that as a subroutine call from the mythical
7823 "outermost" function. This could be fixed by marking
7824 outermost frames as !stack_p,code_p,special_p. Then the
7825 initial outermost frame, before sp was valid, would
7826 have code_addr == &_start. See the comment in frame_id::operator==
7827 for more. */
7829 /* We want "nexti" to step into, not over, signal handlers invoked
7830 by the kernel, therefore this subroutine check should not trigger
7831 for a signal handler invocation. On most platforms, this is already
7832 not the case, as the kernel puts a signal trampoline frame onto the
7833 stack to handle proper return after the handler, and therefore at this
7834 point, the current frame is a grandchild of the step frame, not a
7835 child. However, on some platforms, the kernel actually uses a
7836 trampoline to handle *invocation* of the handler. In that case,
7837 when executing the first instruction of the trampoline, this check
7838 would erroneously detect the trampoline invocation as a subroutine
7839 call. Fix this by checking for SIGTRAMP_FRAME. */
7840 if ((get_stack_frame_id (frame)
7841 != ecs->event_thread->control.step_stack_frame_id)
7842 && get_frame_type (frame) != SIGTRAMP_FRAME
7843 && ((frame_unwind_caller_id (frame)
7844 == ecs->event_thread->control.step_stack_frame_id)
7845 && ((ecs->event_thread->control.step_stack_frame_id
7846 != outer_frame_id)
7847 || (ecs->event_thread->control.step_start_function
7848 != find_pc_function (ecs->event_thread->stop_pc ())))))
7850 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7851 CORE_ADDR real_stop_pc;
7853 infrun_debug_printf ("stepped into subroutine");
7855 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
7857 /* I presume that step_over_calls is only 0 when we're
7858 supposed to be stepping at the assembly language level
7859 ("stepi"). Just stop. */
7860 /* And this works the same backward as frontward. MVS */
7861 end_stepping_range (ecs);
7862 return;
7865 /* Reverse stepping through solib trampolines. */
7867 if (execution_direction == EXEC_REVERSE
7868 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7869 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7870 || (ecs->stop_func_start == 0
7871 && in_solib_dynsym_resolve_code (stop_pc))))
7873 /* Any solib trampoline code can be handled in reverse
7874 by simply continuing to single-step. We have already
7875 executed the solib function (backwards), and a few
7876 steps will take us back through the trampoline to the
7877 caller. */
7878 keep_going (ecs);
7879 return;
7882 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7884 /* We're doing a "next".
7886 Normal (forward) execution: set a breakpoint at the
7887 callee's return address (the address at which the caller
7888 will resume).
7890 Reverse (backward) execution. set the step-resume
7891 breakpoint at the start of the function that we just
7892 stepped into (backwards), and continue to there. When we
7893 get there, we'll need to single-step back to the caller. */
7895 if (execution_direction == EXEC_REVERSE)
7897 /* If we're already at the start of the function, we've either
7898 just stepped backward into a single instruction function,
7899 or stepped back out of a signal handler to the first instruction
7900 of the function. Just keep going, which will single-step back
7901 to the caller. */
7902 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
7904 /* Normal function call return (static or dynamic). */
7905 symtab_and_line sr_sal;
7906 sr_sal.pc = ecs->stop_func_start;
7907 sr_sal.pspace = get_frame_program_space (frame);
7908 insert_step_resume_breakpoint_at_sal (gdbarch,
7909 sr_sal, get_stack_frame_id (frame));
7912 else
7913 insert_step_resume_breakpoint_at_caller (frame);
7915 keep_going (ecs);
7916 return;
7919 /* If we are in a function call trampoline (a stub between the
7920 calling routine and the real function), locate the real
7921 function. That's what tells us (a) whether we want to step
7922 into it at all, and (b) what prologue we want to run to the
7923 end of, if we do step into it. */
7924 real_stop_pc = skip_language_trampoline (frame, stop_pc);
7925 if (real_stop_pc == 0)
7926 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7927 if (real_stop_pc != 0)
7928 ecs->stop_func_start = real_stop_pc;
7930 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
7932 symtab_and_line sr_sal;
7933 sr_sal.pc = ecs->stop_func_start;
7934 sr_sal.pspace = get_frame_program_space (frame);
7936 insert_step_resume_breakpoint_at_sal (gdbarch,
7937 sr_sal, null_frame_id);
7938 keep_going (ecs);
7939 return;
7942 /* If we have line number information for the function we are
7943 thinking of stepping into and the function isn't on the skip
7944 list, step into it.
7946 If there are several symtabs at that PC (e.g. with include
7947 files), just want to know whether *any* of them have line
7948 numbers. find_pc_line handles this. */
7950 struct symtab_and_line tmp_sal;
7952 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
7953 if (tmp_sal.line != 0
7954 && !function_name_is_marked_for_skip (ecs->stop_func_name,
7955 tmp_sal)
7956 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
7958 if (execution_direction == EXEC_REVERSE)
7959 handle_step_into_function_backward (gdbarch, ecs);
7960 else
7961 handle_step_into_function (gdbarch, ecs);
7962 return;
7966 /* If we have no line number and the step-stop-if-no-debug is
7967 set, we stop the step so that the user has a chance to switch
7968 in assembly mode. */
7969 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7970 && step_stop_if_no_debug)
7972 end_stepping_range (ecs);
7973 return;
7976 if (execution_direction == EXEC_REVERSE)
7978 /* If we're already at the start of the function, we've either just
7979 stepped backward into a single instruction function without line
7980 number info, or stepped back out of a signal handler to the first
7981 instruction of the function without line number info. Just keep
7982 going, which will single-step back to the caller. */
7983 if (ecs->stop_func_start != stop_pc)
7985 /* Set a breakpoint at callee's start address.
7986 From there we can step once and be back in the caller. */
7987 symtab_and_line sr_sal;
7988 sr_sal.pc = ecs->stop_func_start;
7989 sr_sal.pspace = get_frame_program_space (frame);
7990 insert_step_resume_breakpoint_at_sal (gdbarch,
7991 sr_sal, null_frame_id);
7994 else
7995 /* Set a breakpoint at callee's return address (the address
7996 at which the caller will resume). */
7997 insert_step_resume_breakpoint_at_caller (frame);
7999 keep_going (ecs);
8000 return;
8003 /* Reverse stepping through solib trampolines. */
8005 if (execution_direction == EXEC_REVERSE
8006 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
8008 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8010 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
8011 || (ecs->stop_func_start == 0
8012 && in_solib_dynsym_resolve_code (stop_pc)))
8014 /* Any solib trampoline code can be handled in reverse
8015 by simply continuing to single-step. We have already
8016 executed the solib function (backwards), and a few
8017 steps will take us back through the trampoline to the
8018 caller. */
8019 keep_going (ecs);
8020 return;
8022 else if (in_solib_dynsym_resolve_code (stop_pc))
8024 /* Stepped backward into the solib dynsym resolver.
8025 Set a breakpoint at its start and continue, then
8026 one more step will take us out. */
8027 symtab_and_line sr_sal;
8028 sr_sal.pc = ecs->stop_func_start;
8029 sr_sal.pspace = get_frame_program_space (frame);
8030 insert_step_resume_breakpoint_at_sal (gdbarch,
8031 sr_sal, null_frame_id);
8032 keep_going (ecs);
8033 return;
8037 /* This always returns the sal for the inner-most frame when we are in a
8038 stack of inlined frames, even if GDB actually believes that it is in a
8039 more outer frame. This is checked for below by calls to
8040 inline_skipped_frames. */
8041 stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
8043 /* NOTE: tausq/2004-05-24: This if block used to be done before all
8044 the trampoline processing logic, however, there are some trampolines
8045 that have no names, so we should do trampoline handling first. */
8046 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
8047 && ecs->stop_func_name == nullptr
8048 && stop_pc_sal.line == 0)
8050 infrun_debug_printf ("stepped into undebuggable function");
8052 /* The inferior just stepped into, or returned to, an
8053 undebuggable function (where there is no debugging information
8054 and no line number corresponding to the address where the
8055 inferior stopped). Since we want to skip this kind of code,
8056 we keep going until the inferior returns from this
8057 function - unless the user has asked us not to (via
8058 set step-mode) or we no longer know how to get back
8059 to the call site. */
8060 if (step_stop_if_no_debug
8061 || !frame_id_p (frame_unwind_caller_id (frame)))
8063 /* If we have no line number and the step-stop-if-no-debug
8064 is set, we stop the step so that the user has a chance to
8065 switch in assembly mode. */
8066 end_stepping_range (ecs);
8067 return;
8069 else
8071 /* Set a breakpoint at callee's return address (the address
8072 at which the caller will resume). */
8073 insert_step_resume_breakpoint_at_caller (frame);
8074 keep_going (ecs);
8075 return;
8079 if (execution_direction == EXEC_REVERSE
8080 && ecs->event_thread->control.proceed_to_finish
8081 && ecs->event_thread->stop_pc () >= ecs->stop_func_alt_start
8082 && ecs->event_thread->stop_pc () < ecs->stop_func_start)
8084 /* We are executing the reverse-finish command.
8085 If the system supports multiple entry points and we are finishing a
8086 function in reverse. If we are between the entry points single-step
8087 back to the alternate entry point. If we are at the alternate entry
8088 point -- just need to back up by one more single-step, which
8089 should take us back to the function call. */
8090 ecs->event_thread->control.step_range_start
8091 = ecs->event_thread->control.step_range_end = 1;
8092 keep_going (ecs);
8093 return;
8097 if (ecs->event_thread->control.step_range_end == 1)
8099 /* It is stepi or nexti. We always want to stop stepping after
8100 one instruction. */
8101 infrun_debug_printf ("stepi/nexti");
8102 end_stepping_range (ecs);
8103 return;
8106 if (stop_pc_sal.line == 0)
8108 /* We have no line number information. That means to stop
8109 stepping (does this always happen right after one instruction,
8110 when we do "s" in a function with no line numbers,
8111 or can this happen as a result of a return or longjmp?). */
8112 infrun_debug_printf ("line number info");
8113 end_stepping_range (ecs);
8114 return;
8117 /* Look for "calls" to inlined functions, part one. If the inline
8118 frame machinery detected some skipped call sites, we have entered
8119 a new inline function. */
8121 if ((*curr_frame_id == original_frame_id)
8122 && inline_skipped_frames (ecs->event_thread))
8124 infrun_debug_printf ("stepped into inlined function");
8126 symtab_and_line call_sal = find_frame_sal (frame);
8128 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
8130 /* For "step", we're going to stop. But if the call site
8131 for this inlined function is on the same source line as
8132 we were previously stepping, go down into the function
8133 first. Otherwise stop at the call site. */
8135 if (call_sal.line == ecs->event_thread->current_line
8136 && call_sal.symtab == ecs->event_thread->current_symtab)
8138 step_into_inline_frame (ecs->event_thread);
8139 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
8141 keep_going (ecs);
8142 return;
8146 end_stepping_range (ecs);
8147 return;
8149 else
8151 /* For "next", we should stop at the call site if it is on a
8152 different source line. Otherwise continue through the
8153 inlined function. */
8154 if (call_sal.line == ecs->event_thread->current_line
8155 && call_sal.symtab == ecs->event_thread->current_symtab)
8156 keep_going (ecs);
8157 else
8158 end_stepping_range (ecs);
8159 return;
8163 /* Look for "calls" to inlined functions, part two. If we are still
8164 in the same real function we were stepping through, but we have
8165 to go further up to find the exact frame ID, we are stepping
8166 through a more inlined call beyond its call site. */
8168 if (get_frame_type (frame) == INLINE_FRAME
8169 && (*curr_frame_id != original_frame_id)
8170 && stepped_in_from (frame, original_frame_id))
8172 infrun_debug_printf ("stepping through inlined function");
8174 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
8175 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
8176 keep_going (ecs);
8177 else
8178 end_stepping_range (ecs);
8179 return;
8182 bool refresh_step_info = true;
8183 if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc)
8184 && (ecs->event_thread->current_line != stop_pc_sal.line
8185 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
8187 /* We are at a different line. */
8189 if (stop_pc_sal.is_stmt)
8191 if (execution_direction == EXEC_REVERSE)
8193 /* We are stepping backwards make sure we have reached the
8194 beginning of the line. */
8195 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8196 CORE_ADDR start_line_pc
8197 = update_line_range_start (stop_pc, ecs);
8199 if (stop_pc != start_line_pc)
8201 /* Have not reached the beginning of the source code line.
8202 Set a step range. Execution should stop in any function
8203 calls we execute back into before reaching the beginning
8204 of the line. */
8205 ecs->event_thread->control.step_range_start
8206 = start_line_pc;
8207 ecs->event_thread->control.step_range_end = stop_pc;
8208 set_step_info (ecs->event_thread, frame, stop_pc_sal);
8209 keep_going (ecs);
8210 return;
8214 /* We are at the start of a statement.
8216 So stop. Note that we don't stop if we step into the middle of a
8217 statement. That is said to make things like for (;;) statements
8218 work better. */
8219 infrun_debug_printf ("stepped to a different line");
8220 end_stepping_range (ecs);
8221 return;
8223 else if (*curr_frame_id == original_frame_id)
8225 /* We are not at the start of a statement, and we have not changed
8226 frame.
8228 We ignore this line table entry, and continue stepping forward,
8229 looking for a better place to stop. */
8230 refresh_step_info = false;
8231 infrun_debug_printf ("stepped to a different line, but "
8232 "it's not the start of a statement");
8234 else
8236 /* We are not the start of a statement, and we have changed frame.
8238 We ignore this line table entry, and continue stepping forward,
8239 looking for a better place to stop. Keep refresh_step_info at
8240 true to note that the frame has changed, but ignore the line
8241 number to make sure we don't ignore a subsequent entry with the
8242 same line number. */
8243 stop_pc_sal.line = 0;
8244 infrun_debug_printf ("stepped to a different frame, but "
8245 "it's not the start of a statement");
8248 else if (execution_direction == EXEC_REVERSE
8249 && *curr_frame_id != original_frame_id
8250 && original_frame_id.code_addr_p && curr_frame_id->code_addr_p
8251 && original_frame_id.code_addr == curr_frame_id->code_addr)
8253 /* If we enter here, we're leaving a recursive function call. In this
8254 situation, we shouldn't refresh the step information, because if we
8255 do, we'll lose the frame_id of when we started stepping, and this
8256 will make GDB not know we need to print frame information. */
8257 refresh_step_info = false;
8258 infrun_debug_printf ("reverse stepping, left a recursive call, don't "
8259 "update step info so we remember we left a frame");
8262 /* We aren't done stepping.
8264 Optimize by setting the stepping range to the line.
8265 (We might not be in the original line, but if we entered a
8266 new line in mid-statement, we continue stepping. This makes
8267 things like for(;;) statements work better.)
8269 If we entered a SAL that indicates a non-statement line table entry,
8270 then we update the stepping range, but we don't update the step info,
8271 which includes things like the line number we are stepping away from.
8272 This means we will stop when we find a line table entry that is marked
8273 as is-statement, even if it matches the non-statement one we just
8274 stepped into. */
8276 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
8277 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
8278 ecs->event_thread->control.may_range_step = 1;
8279 infrun_debug_printf
8280 ("updated step range, start = %s, end = %s, may_range_step = %d",
8281 paddress (gdbarch, ecs->event_thread->control.step_range_start),
8282 paddress (gdbarch, ecs->event_thread->control.step_range_end),
8283 ecs->event_thread->control.may_range_step);
8284 if (refresh_step_info)
8285 set_step_info (ecs->event_thread, frame, stop_pc_sal);
8287 infrun_debug_printf ("keep going");
8289 if (execution_direction == EXEC_REVERSE)
8291 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8293 /* Make sure the stop_pc is set to the beginning of the line. */
8294 if (stop_pc != ecs->event_thread->control.step_range_start)
8295 ecs->event_thread->control.step_range_start
8296 = update_line_range_start (stop_pc, ecs);
8299 keep_going (ecs);
8302 static bool restart_stepped_thread (process_stratum_target *resume_target,
8303 ptid_t resume_ptid);
8305 /* In all-stop mode, if we're currently stepping but have stopped in
8306 some other thread, we may need to switch back to the stepped
8307 thread. Returns true we set the inferior running, false if we left
8308 it stopped (and the event needs further processing). */
8310 static bool
8311 switch_back_to_stepped_thread (struct execution_control_state *ecs)
8313 if (!target_is_non_stop_p ())
8315 /* If any thread is blocked on some internal breakpoint, and we
8316 simply need to step over that breakpoint to get it going
8317 again, do that first. */
8319 /* However, if we see an event for the stepping thread, then we
8320 know all other threads have been moved past their breakpoints
8321 already. Let the caller check whether the step is finished,
8322 etc., before deciding to move it past a breakpoint. */
8323 if (ecs->event_thread->control.step_range_end != 0)
8324 return false;
8326 /* Check if the current thread is blocked on an incomplete
8327 step-over, interrupted by a random signal. */
8328 if (ecs->event_thread->control.trap_expected
8329 && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP)
8331 infrun_debug_printf
8332 ("need to finish step-over of [%s]",
8333 ecs->event_thread->ptid.to_string ().c_str ());
8334 keep_going (ecs);
8335 return true;
8338 /* Check if the current thread is blocked by a single-step
8339 breakpoint of another thread. */
8340 if (ecs->hit_singlestep_breakpoint)
8342 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
8343 ecs->ptid.to_string ().c_str ());
8344 keep_going (ecs);
8345 return true;
8348 /* If this thread needs yet another step-over (e.g., stepping
8349 through a delay slot), do it first before moving on to
8350 another thread. */
8351 if (thread_still_needs_step_over (ecs->event_thread))
8353 infrun_debug_printf
8354 ("thread [%s] still needs step-over",
8355 ecs->event_thread->ptid.to_string ().c_str ());
8356 keep_going (ecs);
8357 return true;
8360 /* If scheduler locking applies even if not stepping, there's no
8361 need to walk over threads. Above we've checked whether the
8362 current thread is stepping. If some other thread not the
8363 event thread is stepping, then it must be that scheduler
8364 locking is not in effect. */
8365 if (schedlock_applies (ecs->event_thread))
8366 return false;
8368 /* Otherwise, we no longer expect a trap in the current thread.
8369 Clear the trap_expected flag before switching back -- this is
8370 what keep_going does as well, if we call it. */
8371 ecs->event_thread->control.trap_expected = 0;
8373 /* Likewise, clear the signal if it should not be passed. */
8374 if (!signal_program[ecs->event_thread->stop_signal ()])
8375 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
8377 if (restart_stepped_thread (ecs->target, ecs->ptid))
8379 prepare_to_wait (ecs);
8380 return true;
8383 switch_to_thread (ecs->event_thread);
8386 return false;
8389 /* Look for the thread that was stepping, and resume it.
8390 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
8391 is resuming. Return true if a thread was started, false
8392 otherwise. */
8394 static bool
8395 restart_stepped_thread (process_stratum_target *resume_target,
8396 ptid_t resume_ptid)
8398 /* Do all pending step-overs before actually proceeding with
8399 step/next/etc. */
8400 if (start_step_over ())
8401 return true;
8403 for (thread_info *tp : all_threads_safe ())
8405 if (tp->state == THREAD_EXITED)
8406 continue;
8408 if (tp->has_pending_waitstatus ())
8409 continue;
8411 /* Ignore threads of processes the caller is not
8412 resuming. */
8413 if (!sched_multi
8414 && (tp->inf->process_target () != resume_target
8415 || tp->inf->pid != resume_ptid.pid ()))
8416 continue;
8418 if (tp->control.trap_expected)
8420 infrun_debug_printf ("switching back to stepped thread (step-over)");
8422 if (keep_going_stepped_thread (tp))
8423 return true;
8427 for (thread_info *tp : all_threads_safe ())
8429 if (tp->state == THREAD_EXITED)
8430 continue;
8432 if (tp->has_pending_waitstatus ())
8433 continue;
8435 /* Ignore threads of processes the caller is not
8436 resuming. */
8437 if (!sched_multi
8438 && (tp->inf->process_target () != resume_target
8439 || tp->inf->pid != resume_ptid.pid ()))
8440 continue;
8442 /* Did we find the stepping thread? */
8443 if (tp->control.step_range_end)
8445 infrun_debug_printf ("switching back to stepped thread (stepping)");
8447 if (keep_going_stepped_thread (tp))
8448 return true;
8452 return false;
8455 /* See infrun.h. */
8457 void
8458 restart_after_all_stop_detach (process_stratum_target *proc_target)
8460 /* Note we don't check target_is_non_stop_p() here, because the
8461 current inferior may no longer have a process_stratum target
8462 pushed, as we just detached. */
8464 /* See if we have a THREAD_RUNNING thread that need to be
8465 re-resumed. If we have any thread that is already executing,
8466 then we don't need to resume the target -- it is already been
8467 resumed. With the remote target (in all-stop), it's even
8468 impossible to issue another resumption if the target is already
8469 resumed, until the target reports a stop. */
8470 for (thread_info *thr : all_threads (proc_target))
8472 if (thr->state != THREAD_RUNNING)
8473 continue;
8475 /* If we have any thread that is already executing, then we
8476 don't need to resume the target -- it is already been
8477 resumed. */
8478 if (thr->executing ())
8479 return;
8481 /* If we have a pending event to process, skip resuming the
8482 target and go straight to processing it. */
8483 if (thr->resumed () && thr->has_pending_waitstatus ())
8484 return;
8487 /* Alright, we need to re-resume the target. If a thread was
8488 stepping, we need to restart it stepping. */
8489 if (restart_stepped_thread (proc_target, minus_one_ptid))
8490 return;
8492 /* Otherwise, find the first THREAD_RUNNING thread and resume
8493 it. */
8494 for (thread_info *thr : all_threads (proc_target))
8496 if (thr->state != THREAD_RUNNING)
8497 continue;
8499 execution_control_state ecs (thr);
8500 switch_to_thread (thr);
8501 keep_going (&ecs);
8502 return;
8506 /* Set a previously stepped thread back to stepping. Returns true on
8507 success, false if the resume is not possible (e.g., the thread
8508 vanished). */
8510 static bool
8511 keep_going_stepped_thread (struct thread_info *tp)
8513 frame_info_ptr frame;
8515 /* If the stepping thread exited, then don't try to switch back and
8516 resume it, which could fail in several different ways depending
8517 on the target. Instead, just keep going.
8519 We can find a stepping dead thread in the thread list in two
8520 cases:
8522 - The target supports thread exit events, and when the target
8523 tries to delete the thread from the thread list, inferior_ptid
8524 pointed at the exiting thread. In such case, calling
8525 delete_thread does not really remove the thread from the list;
8526 instead, the thread is left listed, with 'exited' state.
8528 - The target's debug interface does not support thread exit
8529 events, and so we have no idea whatsoever if the previously
8530 stepping thread is still alive. For that reason, we need to
8531 synchronously query the target now. */
8533 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
8535 infrun_debug_printf ("not resuming previously stepped thread, it has "
8536 "vanished");
8538 delete_thread (tp);
8539 return false;
8542 infrun_debug_printf ("resuming previously stepped thread");
8544 execution_control_state ecs (tp);
8545 switch_to_thread (tp);
8547 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
8548 frame = get_current_frame ();
8550 /* If the PC of the thread we were trying to single-step has
8551 changed, then that thread has trapped or been signaled, but the
8552 event has not been reported to GDB yet. Re-poll the target
8553 looking for this particular thread's event (i.e. temporarily
8554 enable schedlock) by:
8556 - setting a break at the current PC
8557 - resuming that particular thread, only (by setting trap
8558 expected)
8560 This prevents us continuously moving the single-step breakpoint
8561 forward, one instruction at a time, overstepping. */
8563 if (tp->stop_pc () != tp->prev_pc)
8565 ptid_t resume_ptid;
8567 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
8568 paddress (current_inferior ()->arch (), tp->prev_pc),
8569 paddress (current_inferior ()->arch (),
8570 tp->stop_pc ()));
8572 /* Clear the info of the previous step-over, as it's no longer
8573 valid (if the thread was trying to step over a breakpoint, it
8574 has already succeeded). It's what keep_going would do too,
8575 if we called it. Do this before trying to insert the sss
8576 breakpoint, otherwise if we were previously trying to step
8577 over this exact address in another thread, the breakpoint is
8578 skipped. */
8579 clear_step_over_info ();
8580 tp->control.trap_expected = 0;
8582 insert_single_step_breakpoint (get_frame_arch (frame),
8583 get_frame_address_space (frame),
8584 tp->stop_pc ());
8586 tp->set_resumed (true);
8587 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
8588 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
8590 else
8592 infrun_debug_printf ("expected thread still hasn't advanced");
8594 keep_going_pass_signal (&ecs);
8597 return true;
8600 /* Is thread TP in the middle of (software or hardware)
8601 single-stepping? (Note the result of this function must never be
8602 passed directly as target_resume's STEP parameter.) */
8604 static bool
8605 currently_stepping (struct thread_info *tp)
8607 return ((tp->control.step_range_end
8608 && tp->control.step_resume_breakpoint == nullptr)
8609 || tp->control.trap_expected
8610 || tp->stepped_breakpoint
8611 || bpstat_should_step ());
8614 /* Inferior has stepped into a subroutine call with source code that
8615 we should not step over. Do step to the first line of code in
8616 it. */
8618 static void
8619 handle_step_into_function (struct gdbarch *gdbarch,
8620 struct execution_control_state *ecs)
8622 fill_in_stop_func (gdbarch, ecs);
8624 compunit_symtab *cust
8625 = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8626 if (cust != nullptr && cust->language () != language_asm)
8627 ecs->stop_func_start
8628 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8630 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
8631 /* Use the step_resume_break to step until the end of the prologue,
8632 even if that involves jumps (as it seems to on the vax under
8633 4.2). */
8634 /* If the prologue ends in the middle of a source line, continue to
8635 the end of that source line (if it is still within the function).
8636 Otherwise, just go to end of prologue. */
8637 if (stop_func_sal.end
8638 && stop_func_sal.pc != ecs->stop_func_start
8639 && stop_func_sal.end < ecs->stop_func_end)
8640 ecs->stop_func_start = stop_func_sal.end;
8642 /* Architectures which require breakpoint adjustment might not be able
8643 to place a breakpoint at the computed address. If so, the test
8644 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
8645 ecs->stop_func_start to an address at which a breakpoint may be
8646 legitimately placed.
8648 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
8649 made, GDB will enter an infinite loop when stepping through
8650 optimized code consisting of VLIW instructions which contain
8651 subinstructions corresponding to different source lines. On
8652 FR-V, it's not permitted to place a breakpoint on any but the
8653 first subinstruction of a VLIW instruction. When a breakpoint is
8654 set, GDB will adjust the breakpoint address to the beginning of
8655 the VLIW instruction. Thus, we need to make the corresponding
8656 adjustment here when computing the stop address. */
8658 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
8660 ecs->stop_func_start
8661 = gdbarch_adjust_breakpoint_address (gdbarch,
8662 ecs->stop_func_start);
8665 if (ecs->stop_func_start == ecs->event_thread->stop_pc ())
8667 /* We are already there: stop now. */
8668 end_stepping_range (ecs);
8669 return;
8671 else
8673 /* Put the step-breakpoint there and go until there. */
8674 symtab_and_line sr_sal;
8675 sr_sal.pc = ecs->stop_func_start;
8676 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
8677 sr_sal.pspace = get_frame_program_space (get_current_frame ());
8679 /* Do not specify what the fp should be when we stop since on
8680 some machines the prologue is where the new fp value is
8681 established. */
8682 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
8684 /* And make sure stepping stops right away then. */
8685 ecs->event_thread->control.step_range_end
8686 = ecs->event_thread->control.step_range_start;
8688 keep_going (ecs);
8691 /* Inferior has stepped backward into a subroutine call with source
8692 code that we should not step over. Do step to the beginning of the
8693 last line of code in it. */
8695 static void
8696 handle_step_into_function_backward (struct gdbarch *gdbarch,
8697 struct execution_control_state *ecs)
8699 struct compunit_symtab *cust;
8700 struct symtab_and_line stop_func_sal;
8702 fill_in_stop_func (gdbarch, ecs);
8704 cust = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8705 if (cust != nullptr && cust->language () != language_asm)
8706 ecs->stop_func_start
8707 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8709 stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
8711 /* OK, we're just going to keep stepping here. */
8712 if (stop_func_sal.pc == ecs->event_thread->stop_pc ())
8714 /* We're there already. Just stop stepping now. */
8715 end_stepping_range (ecs);
8717 else
8719 /* Else just reset the step range and keep going.
8720 No step-resume breakpoint, they don't work for
8721 epilogues, which can have multiple entry paths. */
8722 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
8723 ecs->event_thread->control.step_range_end = stop_func_sal.end;
8724 keep_going (ecs);
8726 return;
8729 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
8730 This is used to both functions and to skip over code. */
8732 static void
8733 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
8734 struct symtab_and_line sr_sal,
8735 struct frame_id sr_id,
8736 enum bptype sr_type)
8738 /* There should never be more than one step-resume or longjmp-resume
8739 breakpoint per thread, so we should never be setting a new
8740 step_resume_breakpoint when one is already active. */
8741 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == nullptr);
8742 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
8744 infrun_debug_printf ("inserting step-resume breakpoint at %s",
8745 paddress (gdbarch, sr_sal.pc));
8747 inferior_thread ()->control.step_resume_breakpoint
8748 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
8751 void
8752 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
8753 struct symtab_and_line sr_sal,
8754 struct frame_id sr_id)
8756 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
8757 sr_sal, sr_id,
8758 bp_step_resume);
8761 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
8762 This is used to skip a potential signal handler.
8764 This is called with the interrupted function's frame. The signal
8765 handler, when it returns, will resume the interrupted function at
8766 RETURN_FRAME.pc. */
8768 static void
8769 insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr &return_frame)
8771 gdb_assert (return_frame != nullptr);
8773 struct gdbarch *gdbarch = get_frame_arch (return_frame);
8775 symtab_and_line sr_sal;
8776 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
8777 sr_sal.section = find_pc_overlay (sr_sal.pc);
8778 sr_sal.pspace = get_frame_program_space (return_frame);
8780 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
8781 get_stack_frame_id (return_frame),
8782 bp_hp_step_resume);
8785 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
8786 is used to skip a function after stepping into it (for "next" or if
8787 the called function has no debugging information).
8789 The current function has almost always been reached by single
8790 stepping a call or return instruction. NEXT_FRAME belongs to the
8791 current function, and the breakpoint will be set at the caller's
8792 resume address.
8794 This is a separate function rather than reusing
8795 insert_hp_step_resume_breakpoint_at_frame in order to avoid
8796 get_prev_frame, which may stop prematurely (see the implementation
8797 of frame_unwind_caller_id for an example). */
8799 static void
8800 insert_step_resume_breakpoint_at_caller (const frame_info_ptr &next_frame)
8802 /* We shouldn't have gotten here if we don't know where the call site
8803 is. */
8804 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
8806 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
8808 symtab_and_line sr_sal;
8809 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
8810 frame_unwind_caller_pc (next_frame));
8811 sr_sal.section = find_pc_overlay (sr_sal.pc);
8812 sr_sal.pspace = frame_unwind_program_space (next_frame);
8814 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
8815 frame_unwind_caller_id (next_frame));
8818 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
8819 new breakpoint at the target of a jmp_buf. The handling of
8820 longjmp-resume uses the same mechanisms used for handling
8821 "step-resume" breakpoints. */
8823 static void
8824 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
8826 /* There should never be more than one longjmp-resume breakpoint per
8827 thread, so we should never be setting a new
8828 longjmp_resume_breakpoint when one is already active. */
8829 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == nullptr);
8831 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8832 paddress (gdbarch, pc));
8834 inferior_thread ()->control.exception_resume_breakpoint =
8835 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
8838 /* Insert an exception resume breakpoint. TP is the thread throwing
8839 the exception. The block B is the block of the unwinder debug hook
8840 function. FRAME is the frame corresponding to the call to this
8841 function. SYM is the symbol of the function argument holding the
8842 target PC of the exception. */
8844 static void
8845 insert_exception_resume_breakpoint (struct thread_info *tp,
8846 const struct block *b,
8847 const frame_info_ptr &frame,
8848 struct symbol *sym)
8852 struct block_symbol vsym;
8853 struct value *value;
8854 CORE_ADDR handler;
8855 struct breakpoint *bp;
8857 vsym = lookup_symbol_search_name (sym->search_name (),
8858 b, SEARCH_VAR_DOMAIN);
8859 value = read_var_value (vsym.symbol, vsym.block, frame);
8860 /* If the value was optimized out, revert to the old behavior. */
8861 if (! value->optimized_out ())
8863 handler = value_as_address (value);
8865 infrun_debug_printf ("exception resume at %lx",
8866 (unsigned long) handler);
8868 /* set_momentary_breakpoint_at_pc creates a thread-specific
8869 breakpoint for the current inferior thread. */
8870 gdb_assert (tp == inferior_thread ());
8871 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8872 handler,
8873 bp_exception_resume).release ();
8875 tp->control.exception_resume_breakpoint = bp;
8878 catch (const gdb_exception_error &e)
8880 /* We want to ignore errors here. */
8884 /* A helper for check_exception_resume that sets an
8885 exception-breakpoint based on a SystemTap probe. */
8887 static void
8888 insert_exception_resume_from_probe (struct thread_info *tp,
8889 const struct bound_probe *probe,
8890 const frame_info_ptr &frame)
8892 struct value *arg_value;
8893 CORE_ADDR handler;
8894 struct breakpoint *bp;
8896 arg_value = probe_safe_evaluate_at_pc (frame, 1);
8897 if (!arg_value)
8898 return;
8900 handler = value_as_address (arg_value);
8902 infrun_debug_printf ("exception resume at %s",
8903 paddress (probe->objfile->arch (), handler));
8905 /* set_momentary_breakpoint_at_pc creates a thread-specific breakpoint
8906 for the current inferior thread. */
8907 gdb_assert (tp == inferior_thread ());
8908 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8909 handler, bp_exception_resume).release ();
8910 tp->control.exception_resume_breakpoint = bp;
8913 /* This is called when an exception has been intercepted. Check to
8914 see whether the exception's destination is of interest, and if so,
8915 set an exception resume breakpoint there. */
8917 static void
8918 check_exception_resume (struct execution_control_state *ecs,
8919 const frame_info_ptr &frame)
8921 struct bound_probe probe;
8922 struct symbol *func;
8924 /* First see if this exception unwinding breakpoint was set via a
8925 SystemTap probe point. If so, the probe has two arguments: the
8926 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8927 set a breakpoint there. */
8928 probe = find_probe_by_pc (get_frame_pc (frame));
8929 if (probe.prob)
8931 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
8932 return;
8935 func = get_frame_function (frame);
8936 if (!func)
8937 return;
8941 const struct block *b;
8942 int argno = 0;
8944 /* The exception breakpoint is a thread-specific breakpoint on
8945 the unwinder's debug hook, declared as:
8947 void _Unwind_DebugHook (void *cfa, void *handler);
8949 The CFA argument indicates the frame to which control is
8950 about to be transferred. HANDLER is the destination PC.
8952 We ignore the CFA and set a temporary breakpoint at HANDLER.
8953 This is not extremely efficient but it avoids issues in gdb
8954 with computing the DWARF CFA, and it also works even in weird
8955 cases such as throwing an exception from inside a signal
8956 handler. */
8958 b = func->value_block ();
8959 for (struct symbol *sym : block_iterator_range (b))
8961 if (!sym->is_argument ())
8962 continue;
8964 if (argno == 0)
8965 ++argno;
8966 else
8968 insert_exception_resume_breakpoint (ecs->event_thread,
8969 b, frame, sym);
8970 break;
8974 catch (const gdb_exception_error &e)
8979 static void
8980 stop_waiting (struct execution_control_state *ecs)
8982 infrun_debug_printf ("stop_waiting");
8984 /* Let callers know we don't want to wait for the inferior anymore. */
8985 ecs->wait_some_more = 0;
8988 /* Like keep_going, but passes the signal to the inferior, even if the
8989 signal is set to nopass. */
8991 static void
8992 keep_going_pass_signal (struct execution_control_state *ecs)
8994 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
8995 gdb_assert (!ecs->event_thread->resumed ());
8997 /* Save the pc before execution, to compare with pc after stop. */
8998 ecs->event_thread->prev_pc
8999 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
9001 if (ecs->event_thread->control.trap_expected)
9003 struct thread_info *tp = ecs->event_thread;
9005 infrun_debug_printf ("%s has trap_expected set, "
9006 "resuming to collect trap",
9007 tp->ptid.to_string ().c_str ());
9009 /* We haven't yet gotten our trap, and either: intercepted a
9010 non-signal event (e.g., a fork); or took a signal which we
9011 are supposed to pass through to the inferior. Simply
9012 continue. */
9013 resume (ecs->event_thread->stop_signal ());
9015 else if (step_over_info_valid_p ())
9017 /* Another thread is stepping over a breakpoint in-line. If
9018 this thread needs a step-over too, queue the request. In
9019 either case, this resume must be deferred for later. */
9020 struct thread_info *tp = ecs->event_thread;
9022 if (ecs->hit_singlestep_breakpoint
9023 || thread_still_needs_step_over (tp))
9025 infrun_debug_printf ("step-over already in progress: "
9026 "step-over for %s deferred",
9027 tp->ptid.to_string ().c_str ());
9028 global_thread_step_over_chain_enqueue (tp);
9030 else
9031 infrun_debug_printf ("step-over in progress: resume of %s deferred",
9032 tp->ptid.to_string ().c_str ());
9034 else
9036 regcache *regcache = get_thread_regcache (ecs->event_thread);
9037 int remove_bp;
9038 int remove_wps;
9039 step_over_what step_what;
9041 /* Either the trap was not expected, but we are continuing
9042 anyway (if we got a signal, the user asked it be passed to
9043 the child)
9044 -- or --
9045 We got our expected trap, but decided we should resume from
9048 We're going to run this baby now!
9050 Note that insert_breakpoints won't try to re-insert
9051 already inserted breakpoints. Therefore, we don't
9052 care if breakpoints were already inserted, or not. */
9054 /* If we need to step over a breakpoint, and we're not using
9055 displaced stepping to do so, insert all breakpoints
9056 (watchpoints, etc.) but the one we're stepping over, step one
9057 instruction, and then re-insert the breakpoint when that step
9058 is finished. */
9060 step_what = thread_still_needs_step_over (ecs->event_thread);
9062 remove_bp = (ecs->hit_singlestep_breakpoint
9063 || (step_what & STEP_OVER_BREAKPOINT));
9064 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
9066 /* We can't use displaced stepping if we need to step past a
9067 watchpoint. The instruction copied to the scratch pad would
9068 still trigger the watchpoint. */
9069 if (remove_bp
9070 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
9072 set_step_over_info (ecs->event_thread->inf->aspace.get (),
9073 regcache_read_pc (regcache), remove_wps,
9074 ecs->event_thread->global_num);
9076 else if (remove_wps)
9077 set_step_over_info (nullptr, 0, remove_wps, -1);
9079 /* If we now need to do an in-line step-over, we need to stop
9080 all other threads. Note this must be done before
9081 insert_breakpoints below, because that removes the breakpoint
9082 we're about to step over, otherwise other threads could miss
9083 it. */
9084 if (step_over_info_valid_p () && target_is_non_stop_p ())
9085 stop_all_threads ("starting in-line step-over");
9087 /* Stop stepping if inserting breakpoints fails. */
9090 insert_breakpoints ();
9092 catch (const gdb_exception_error &e)
9094 exception_print (gdb_stderr, e);
9095 stop_waiting (ecs);
9096 clear_step_over_info ();
9097 return;
9100 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
9102 resume (ecs->event_thread->stop_signal ());
9105 prepare_to_wait (ecs);
9108 /* Called when we should continue running the inferior, because the
9109 current event doesn't cause a user visible stop. This does the
9110 resuming part; waiting for the next event is done elsewhere. */
9112 static void
9113 keep_going (struct execution_control_state *ecs)
9115 if (ecs->event_thread->control.trap_expected
9116 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
9117 ecs->event_thread->control.trap_expected = 0;
9119 if (!signal_program[ecs->event_thread->stop_signal ()])
9120 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
9121 keep_going_pass_signal (ecs);
9124 /* This function normally comes after a resume, before
9125 handle_inferior_event exits. It takes care of any last bits of
9126 housekeeping, and sets the all-important wait_some_more flag. */
9128 static void
9129 prepare_to_wait (struct execution_control_state *ecs)
9131 infrun_debug_printf ("prepare_to_wait");
9133 ecs->wait_some_more = 1;
9135 /* If the target can't async, emulate it by marking the infrun event
9136 handler such that as soon as we get back to the event-loop, we
9137 immediately end up in fetch_inferior_event again calling
9138 target_wait. */
9139 if (!target_can_async_p ())
9140 mark_infrun_async_event_handler ();
9143 /* We are done with the step range of a step/next/si/ni command.
9144 Called once for each n of a "step n" operation. */
9146 static void
9147 end_stepping_range (struct execution_control_state *ecs)
9149 ecs->event_thread->control.stop_step = 1;
9150 stop_waiting (ecs);
9153 /* Several print_*_reason functions to print why the inferior has stopped.
9154 We always print something when the inferior exits, or receives a signal.
9155 The rest of the cases are dealt with later on in normal_stop and
9156 print_it_typical. Ideally there should be a call to one of these
9157 print_*_reason functions functions from handle_inferior_event each time
9158 stop_waiting is called.
9160 Note that we don't call these directly, instead we delegate that to
9161 the interpreters, through observers. Interpreters then call these
9162 with whatever uiout is right. */
9164 void
9165 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
9167 annotate_signalled ();
9168 if (uiout->is_mi_like_p ())
9169 uiout->field_string
9170 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
9171 uiout->text ("\nProgram terminated with signal ");
9172 annotate_signal_name ();
9173 uiout->field_string ("signal-name",
9174 gdb_signal_to_name (siggnal));
9175 annotate_signal_name_end ();
9176 uiout->text (", ");
9177 annotate_signal_string ();
9178 uiout->field_string ("signal-meaning",
9179 gdb_signal_to_string (siggnal));
9180 annotate_signal_string_end ();
9181 uiout->text (".\n");
9182 uiout->text ("The program no longer exists.\n");
9185 void
9186 print_exited_reason (struct ui_out *uiout, int exitstatus)
9188 struct inferior *inf = current_inferior ();
9189 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
9191 annotate_exited (exitstatus);
9192 if (exitstatus)
9194 if (uiout->is_mi_like_p ())
9195 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
9196 std::string exit_code_str
9197 = string_printf ("0%o", (unsigned int) exitstatus);
9198 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
9199 plongest (inf->num), pidstr.c_str (),
9200 string_field ("exit-code", exit_code_str.c_str ()));
9202 else
9204 if (uiout->is_mi_like_p ())
9205 uiout->field_string
9206 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
9207 uiout->message ("[Inferior %s (%s) exited normally]\n",
9208 plongest (inf->num), pidstr.c_str ());
9212 void
9213 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
9215 struct thread_info *thr = inferior_thread ();
9217 infrun_debug_printf ("signal = %s", gdb_signal_to_string (siggnal));
9219 annotate_signal ();
9221 if (uiout->is_mi_like_p ())
9223 else if (show_thread_that_caused_stop ())
9225 uiout->text ("\nThread ");
9226 uiout->field_string ("thread-id", print_thread_id (thr));
9228 const char *name = thread_name (thr);
9229 if (name != nullptr)
9231 uiout->text (" \"");
9232 uiout->field_string ("name", name);
9233 uiout->text ("\"");
9236 else
9237 uiout->text ("\nProgram");
9239 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
9240 uiout->text (" stopped");
9241 else
9243 uiout->text (" received signal ");
9244 annotate_signal_name ();
9245 if (uiout->is_mi_like_p ())
9246 uiout->field_string
9247 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
9248 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
9249 annotate_signal_name_end ();
9250 uiout->text (", ");
9251 annotate_signal_string ();
9252 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
9254 regcache *regcache = get_thread_regcache (thr);
9255 struct gdbarch *gdbarch = regcache->arch ();
9256 if (gdbarch_report_signal_info_p (gdbarch))
9257 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
9259 annotate_signal_string_end ();
9261 uiout->text (".\n");
9264 void
9265 print_no_history_reason (struct ui_out *uiout)
9267 if (uiout->is_mi_like_p ())
9268 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_NO_HISTORY));
9269 else
9270 uiout->text ("\nNo more reverse-execution history.\n");
9273 /* Print current location without a level number, if we have changed
9274 functions or hit a breakpoint. Print source line if we have one.
9275 bpstat_print contains the logic deciding in detail what to print,
9276 based on the event(s) that just occurred. */
9278 static void
9279 print_stop_location (const target_waitstatus &ws)
9281 int bpstat_ret;
9282 enum print_what source_flag;
9283 int do_frame_printing = 1;
9284 struct thread_info *tp = inferior_thread ();
9286 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws.kind ());
9287 switch (bpstat_ret)
9289 case PRINT_UNKNOWN:
9290 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
9291 should) carry around the function and does (or should) use
9292 that when doing a frame comparison. */
9293 if (tp->control.stop_step
9294 && (tp->control.step_frame_id
9295 == get_frame_id (get_current_frame ()))
9296 && (tp->control.step_start_function
9297 == find_pc_function (tp->stop_pc ())))
9299 /* Finished step, just print source line. */
9300 source_flag = SRC_LINE;
9302 else
9304 /* Print location and source line. */
9305 source_flag = SRC_AND_LOC;
9307 break;
9308 case PRINT_SRC_AND_LOC:
9309 /* Print location and source line. */
9310 source_flag = SRC_AND_LOC;
9311 break;
9312 case PRINT_SRC_ONLY:
9313 source_flag = SRC_LINE;
9314 break;
9315 case PRINT_NOTHING:
9316 /* Something bogus. */
9317 source_flag = SRC_LINE;
9318 do_frame_printing = 0;
9319 break;
9320 default:
9321 internal_error (_("Unknown value."));
9324 /* The behavior of this routine with respect to the source
9325 flag is:
9326 SRC_LINE: Print only source line
9327 LOCATION: Print only location
9328 SRC_AND_LOC: Print location and source line. */
9329 if (do_frame_printing)
9330 print_stack_frame (get_selected_frame (nullptr), 0, source_flag, 1);
9333 /* See `print_stop_event` in infrun.h. */
9335 static void
9336 do_print_stop_event (struct ui_out *uiout, bool displays)
9338 struct target_waitstatus last;
9339 struct thread_info *tp;
9341 get_last_target_status (nullptr, nullptr, &last);
9344 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
9346 print_stop_location (last);
9348 /* Display the auto-display expressions. */
9349 if (displays)
9350 do_displays ();
9353 tp = inferior_thread ();
9354 if (tp->thread_fsm () != nullptr
9355 && tp->thread_fsm ()->finished_p ())
9357 struct return_value_info *rv;
9359 rv = tp->thread_fsm ()->return_value ();
9360 if (rv != nullptr)
9361 print_return_value (uiout, rv);
9365 /* See infrun.h. This function itself sets up buffered output for the
9366 duration of do_print_stop_event, which performs the actual event
9367 printing. */
9369 void
9370 print_stop_event (struct ui_out *uiout, bool displays)
9372 do_with_buffered_output (do_print_stop_event, uiout, displays);
9375 /* See infrun.h. */
9377 void
9378 maybe_remove_breakpoints (void)
9380 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
9382 if (remove_breakpoints ())
9384 target_terminal::ours_for_output ();
9385 gdb_printf (_("Cannot remove breakpoints because "
9386 "program is no longer writable.\nFurther "
9387 "execution is probably impossible.\n"));
9392 /* The execution context that just caused a normal stop. */
9394 struct stop_context
9396 stop_context ();
9398 DISABLE_COPY_AND_ASSIGN (stop_context);
9400 bool changed () const;
9402 /* The stop ID. */
9403 ULONGEST stop_id;
9405 /* The event PTID. */
9407 ptid_t ptid;
9409 /* If stopp for a thread event, this is the thread that caused the
9410 stop. */
9411 thread_info_ref thread;
9413 /* The inferior that caused the stop. */
9414 int inf_num;
9417 /* Initializes a new stop context. If stopped for a thread event, this
9418 takes a strong reference to the thread. */
9420 stop_context::stop_context ()
9422 stop_id = get_stop_id ();
9423 ptid = inferior_ptid;
9424 inf_num = current_inferior ()->num;
9426 if (inferior_ptid != null_ptid)
9428 /* Take a strong reference so that the thread can't be deleted
9429 yet. */
9430 thread = thread_info_ref::new_reference (inferior_thread ());
9434 /* Return true if the current context no longer matches the saved stop
9435 context. */
9437 bool
9438 stop_context::changed () const
9440 if (ptid != inferior_ptid)
9441 return true;
9442 if (inf_num != current_inferior ()->num)
9443 return true;
9444 if (thread != nullptr && thread->state != THREAD_STOPPED)
9445 return true;
9446 if (get_stop_id () != stop_id)
9447 return true;
9448 return false;
9451 /* See infrun.h. */
9453 bool
9454 normal_stop ()
9456 struct target_waitstatus last;
9458 get_last_target_status (nullptr, nullptr, &last);
9460 new_stop_id ();
9462 /* If an exception is thrown from this point on, make sure to
9463 propagate GDB's knowledge of the executing state to the
9464 frontend/user running state. A QUIT is an easy exception to see
9465 here, so do this before any filtered output. */
9467 ptid_t finish_ptid = null_ptid;
9469 if (!non_stop)
9470 finish_ptid = minus_one_ptid;
9471 else if (last.kind () == TARGET_WAITKIND_SIGNALLED
9472 || last.kind () == TARGET_WAITKIND_EXITED)
9474 /* On some targets, we may still have live threads in the
9475 inferior when we get a process exit event. E.g., for
9476 "checkpoint", when the current checkpoint/fork exits,
9477 linux-fork.c automatically switches to another fork from
9478 within target_mourn_inferior. */
9479 if (inferior_ptid != null_ptid)
9480 finish_ptid = ptid_t (inferior_ptid.pid ());
9482 else if (last.kind () != TARGET_WAITKIND_NO_RESUMED
9483 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9484 finish_ptid = inferior_ptid;
9486 std::optional<scoped_finish_thread_state> maybe_finish_thread_state;
9487 if (finish_ptid != null_ptid)
9489 maybe_finish_thread_state.emplace
9490 (user_visible_resume_target (finish_ptid), finish_ptid);
9493 /* As we're presenting a stop, and potentially removing breakpoints,
9494 update the thread list so we can tell whether there are threads
9495 running on the target. With target remote, for example, we can
9496 only learn about new threads when we explicitly update the thread
9497 list. Do this before notifying the interpreters about signal
9498 stops, end of stepping ranges, etc., so that the "new thread"
9499 output is emitted before e.g., "Program received signal FOO",
9500 instead of after. */
9501 update_thread_list ();
9503 if (last.kind () == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
9504 notify_signal_received (inferior_thread ()->stop_signal ());
9506 /* As with the notification of thread events, we want to delay
9507 notifying the user that we've switched thread context until
9508 the inferior actually stops.
9510 There's no point in saying anything if the inferior has exited.
9511 Note that SIGNALLED here means "exited with a signal", not
9512 "received a signal".
9514 Also skip saying anything in non-stop mode. In that mode, as we
9515 don't want GDB to switch threads behind the user's back, to avoid
9516 races where the user is typing a command to apply to thread x,
9517 but GDB switches to thread y before the user finishes entering
9518 the command, fetch_inferior_event installs a cleanup to restore
9519 the current thread back to the thread the user had selected right
9520 after this event is handled, so we're not really switching, only
9521 informing of a stop. */
9522 if (!non_stop)
9524 if ((last.kind () != TARGET_WAITKIND_SIGNALLED
9525 && last.kind () != TARGET_WAITKIND_EXITED
9526 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9527 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9528 && target_has_execution ()
9529 && previous_thread != inferior_thread ())
9531 SWITCH_THRU_ALL_UIS ()
9533 target_terminal::ours_for_output ();
9534 gdb_printf (_("[Switching to %s]\n"),
9535 target_pid_to_str (inferior_ptid).c_str ());
9536 annotate_thread_changed ();
9540 update_previous_thread ();
9543 if (last.kind () == TARGET_WAITKIND_NO_RESUMED
9544 || last.kind () == TARGET_WAITKIND_THREAD_EXITED)
9546 stop_print_frame = false;
9548 SWITCH_THRU_ALL_UIS ()
9549 if (current_ui->prompt_state == PROMPT_BLOCKED)
9551 target_terminal::ours_for_output ();
9552 if (last.kind () == TARGET_WAITKIND_NO_RESUMED)
9553 gdb_printf (_("No unwaited-for children left.\n"));
9554 else if (last.kind () == TARGET_WAITKIND_THREAD_EXITED)
9555 gdb_printf (_("Command aborted, thread exited.\n"));
9556 else
9557 gdb_assert_not_reached ("unhandled");
9561 /* Note: this depends on the update_thread_list call above. */
9562 maybe_remove_breakpoints ();
9564 /* If an auto-display called a function and that got a signal,
9565 delete that auto-display to avoid an infinite recursion. */
9567 if (stopped_by_random_signal)
9568 disable_current_display ();
9570 SWITCH_THRU_ALL_UIS ()
9572 async_enable_stdin ();
9575 /* Let the user/frontend see the threads as stopped. */
9576 maybe_finish_thread_state.reset ();
9578 /* Select innermost stack frame - i.e., current frame is frame 0,
9579 and current location is based on that. Handle the case where the
9580 dummy call is returning after being stopped. E.g. the dummy call
9581 previously hit a breakpoint. (If the dummy call returns
9582 normally, we won't reach here.) Do this before the stop hook is
9583 run, so that it doesn't get to see the temporary dummy frame,
9584 which is not where we'll present the stop. */
9585 if (has_stack_frames ())
9587 if (stop_stack_dummy == STOP_STACK_DUMMY)
9589 /* Pop the empty frame that contains the stack dummy. This
9590 also restores inferior state prior to the call (struct
9591 infcall_suspend_state). */
9592 frame_info_ptr frame = get_current_frame ();
9594 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
9595 frame_pop (frame);
9596 /* frame_pop calls reinit_frame_cache as the last thing it
9597 does which means there's now no selected frame. */
9600 select_frame (get_current_frame ());
9602 /* Set the current source location. */
9603 set_current_sal_from_frame (get_current_frame ());
9606 /* Look up the hook_stop and run it (CLI internally handles problem
9607 of stop_command's pre-hook not existing). */
9608 stop_context saved_context;
9612 execute_cmd_pre_hook (stop_command);
9614 catch (const gdb_exception_error &ex)
9616 exception_fprintf (gdb_stderr, ex,
9617 "Error while running hook_stop:\n");
9620 /* If the stop hook resumes the target, then there's no point in
9621 trying to notify about the previous stop; its context is
9622 gone. Likewise if the command switches thread or inferior --
9623 the observers would print a stop for the wrong
9624 thread/inferior. */
9625 if (saved_context.changed ())
9626 return true;
9628 /* Notify observers about the stop. This is where the interpreters
9629 print the stop event. */
9630 notify_normal_stop ((inferior_ptid != null_ptid
9631 ? inferior_thread ()->control.stop_bpstat
9632 : nullptr),
9633 stop_print_frame);
9634 annotate_stopped ();
9636 if (target_has_execution ())
9638 if (last.kind () != TARGET_WAITKIND_SIGNALLED
9639 && last.kind () != TARGET_WAITKIND_EXITED
9640 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9641 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9642 /* Delete the breakpoint we stopped at, if it wants to be deleted.
9643 Delete any breakpoint that is to be deleted at the next stop. */
9644 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
9647 return false;
9651 signal_stop_state (int signo)
9653 return signal_stop[signo];
9657 signal_print_state (int signo)
9659 return signal_print[signo];
9663 signal_pass_state (int signo)
9665 return signal_program[signo];
9668 static void
9669 signal_cache_update (int signo)
9671 if (signo == -1)
9673 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
9674 signal_cache_update (signo);
9676 return;
9679 signal_pass[signo] = (signal_stop[signo] == 0
9680 && signal_print[signo] == 0
9681 && signal_program[signo] == 1
9682 && signal_catch[signo] == 0);
9686 signal_stop_update (int signo, int state)
9688 int ret = signal_stop[signo];
9690 signal_stop[signo] = state;
9691 signal_cache_update (signo);
9692 return ret;
9696 signal_print_update (int signo, int state)
9698 int ret = signal_print[signo];
9700 signal_print[signo] = state;
9701 signal_cache_update (signo);
9702 return ret;
9706 signal_pass_update (int signo, int state)
9708 int ret = signal_program[signo];
9710 signal_program[signo] = state;
9711 signal_cache_update (signo);
9712 return ret;
9715 /* Update the global 'signal_catch' from INFO and notify the
9716 target. */
9718 void
9719 signal_catch_update (const unsigned int *info)
9721 int i;
9723 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
9724 signal_catch[i] = info[i] > 0;
9725 signal_cache_update (-1);
9726 target_pass_signals (signal_pass);
9729 static void
9730 sig_print_header (void)
9732 gdb_printf (_("Signal Stop\tPrint\tPass "
9733 "to program\tDescription\n"));
9736 static void
9737 sig_print_info (enum gdb_signal oursig)
9739 const char *name = gdb_signal_to_name (oursig);
9740 int name_padding = 13 - strlen (name);
9742 if (name_padding <= 0)
9743 name_padding = 0;
9745 gdb_printf ("%s", name);
9746 gdb_printf ("%*.*s ", name_padding, name_padding, " ");
9747 gdb_printf ("%s\t", signal_stop[oursig] ? "Yes" : "No");
9748 gdb_printf ("%s\t", signal_print[oursig] ? "Yes" : "No");
9749 gdb_printf ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
9750 gdb_printf ("%s\n", gdb_signal_to_string (oursig));
9753 /* Specify how various signals in the inferior should be handled. */
9755 static void
9756 handle_command (const char *args, int from_tty)
9758 int digits, wordlen;
9759 int sigfirst, siglast;
9760 enum gdb_signal oursig;
9761 int allsigs;
9763 if (args == nullptr)
9765 error_no_arg (_("signal to handle"));
9768 /* Allocate and zero an array of flags for which signals to handle. */
9770 const size_t nsigs = GDB_SIGNAL_LAST;
9771 unsigned char sigs[nsigs] {};
9773 /* Break the command line up into args. */
9775 gdb_argv built_argv (args);
9777 /* Walk through the args, looking for signal oursigs, signal names, and
9778 actions. Signal numbers and signal names may be interspersed with
9779 actions, with the actions being performed for all signals cumulatively
9780 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
9782 for (char *arg : built_argv)
9784 wordlen = strlen (arg);
9785 for (digits = 0; isdigit (arg[digits]); digits++)
9788 allsigs = 0;
9789 sigfirst = siglast = -1;
9791 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
9793 /* Apply action to all signals except those used by the
9794 debugger. Silently skip those. */
9795 allsigs = 1;
9796 sigfirst = 0;
9797 siglast = nsigs - 1;
9799 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
9801 SET_SIGS (nsigs, sigs, signal_stop);
9802 SET_SIGS (nsigs, sigs, signal_print);
9804 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
9806 UNSET_SIGS (nsigs, sigs, signal_program);
9808 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
9810 SET_SIGS (nsigs, sigs, signal_print);
9812 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
9814 SET_SIGS (nsigs, sigs, signal_program);
9816 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
9818 UNSET_SIGS (nsigs, sigs, signal_stop);
9820 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
9822 SET_SIGS (nsigs, sigs, signal_program);
9824 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
9826 UNSET_SIGS (nsigs, sigs, signal_print);
9827 UNSET_SIGS (nsigs, sigs, signal_stop);
9829 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
9831 UNSET_SIGS (nsigs, sigs, signal_program);
9833 else if (digits > 0)
9835 /* It is numeric. The numeric signal refers to our own
9836 internal signal numbering from target.h, not to host/target
9837 signal number. This is a feature; users really should be
9838 using symbolic names anyway, and the common ones like
9839 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9841 sigfirst = siglast = (int)
9842 gdb_signal_from_command (atoi (arg));
9843 if (arg[digits] == '-')
9845 siglast = (int)
9846 gdb_signal_from_command (atoi (arg + digits + 1));
9848 if (sigfirst > siglast)
9850 /* Bet he didn't figure we'd think of this case... */
9851 std::swap (sigfirst, siglast);
9854 else
9856 oursig = gdb_signal_from_name (arg);
9857 if (oursig != GDB_SIGNAL_UNKNOWN)
9859 sigfirst = siglast = (int) oursig;
9861 else
9863 /* Not a number and not a recognized flag word => complain. */
9864 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
9868 /* If any signal numbers or symbol names were found, set flags for
9869 which signals to apply actions to. */
9871 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
9873 switch ((enum gdb_signal) signum)
9875 case GDB_SIGNAL_TRAP:
9876 case GDB_SIGNAL_INT:
9877 if (!allsigs && !sigs[signum])
9879 if (query (_("%s is used by the debugger.\n\
9880 Are you sure you want to change it? "),
9881 gdb_signal_to_name ((enum gdb_signal) signum)))
9883 sigs[signum] = 1;
9885 else
9886 gdb_printf (_("Not confirmed, unchanged.\n"));
9888 break;
9889 case GDB_SIGNAL_0:
9890 case GDB_SIGNAL_DEFAULT:
9891 case GDB_SIGNAL_UNKNOWN:
9892 /* Make sure that "all" doesn't print these. */
9893 break;
9894 default:
9895 sigs[signum] = 1;
9896 break;
9901 for (int signum = 0; signum < nsigs; signum++)
9902 if (sigs[signum])
9904 signal_cache_update (-1);
9905 target_pass_signals (signal_pass);
9906 target_program_signals (signal_program);
9908 if (from_tty)
9910 /* Show the results. */
9911 sig_print_header ();
9912 for (; signum < nsigs; signum++)
9913 if (sigs[signum])
9914 sig_print_info ((enum gdb_signal) signum);
9917 break;
9921 /* Complete the "handle" command. */
9923 static void
9924 handle_completer (struct cmd_list_element *ignore,
9925 completion_tracker &tracker,
9926 const char *text, const char *word)
9928 static const char * const keywords[] =
9930 "all",
9931 "stop",
9932 "ignore",
9933 "print",
9934 "pass",
9935 "nostop",
9936 "noignore",
9937 "noprint",
9938 "nopass",
9939 nullptr,
9942 signal_completer (ignore, tracker, text, word);
9943 complete_on_enum (tracker, keywords, word, word);
9946 enum gdb_signal
9947 gdb_signal_from_command (int num)
9949 if (num >= 1 && num <= 15)
9950 return (enum gdb_signal) num;
9951 error (_("Only signals 1-15 are valid as numeric signals.\n\
9952 Use \"info signals\" for a list of symbolic signals."));
9955 /* Print current contents of the tables set by the handle command.
9956 It is possible we should just be printing signals actually used
9957 by the current target (but for things to work right when switching
9958 targets, all signals should be in the signal tables). */
9960 static void
9961 info_signals_command (const char *signum_exp, int from_tty)
9963 enum gdb_signal oursig;
9965 sig_print_header ();
9967 if (signum_exp)
9969 /* First see if this is a symbol name. */
9970 oursig = gdb_signal_from_name (signum_exp);
9971 if (oursig == GDB_SIGNAL_UNKNOWN)
9973 /* No, try numeric. */
9974 oursig =
9975 gdb_signal_from_command (parse_and_eval_long (signum_exp));
9977 sig_print_info (oursig);
9978 return;
9981 gdb_printf ("\n");
9982 /* These ugly casts brought to you by the native VAX compiler. */
9983 for (oursig = GDB_SIGNAL_FIRST;
9984 (int) oursig < (int) GDB_SIGNAL_LAST;
9985 oursig = (enum gdb_signal) ((int) oursig + 1))
9987 QUIT;
9989 if (oursig != GDB_SIGNAL_UNKNOWN
9990 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
9991 sig_print_info (oursig);
9994 gdb_printf (_("\nUse the \"handle\" command "
9995 "to change these tables.\n"));
9998 /* The $_siginfo convenience variable is a bit special. We don't know
9999 for sure the type of the value until we actually have a chance to
10000 fetch the data. The type can change depending on gdbarch, so it is
10001 also dependent on which thread you have selected.
10003 1. making $_siginfo be an internalvar that creates a new value on
10004 access.
10006 2. making the value of $_siginfo be an lval_computed value. */
10008 /* This function implements the lval_computed support for reading a
10009 $_siginfo value. */
10011 static void
10012 siginfo_value_read (struct value *v)
10014 LONGEST transferred;
10016 /* If we can access registers, so can we access $_siginfo. Likewise
10017 vice versa. */
10018 validate_registers_access ();
10020 transferred =
10021 target_read (current_inferior ()->top_target (),
10022 TARGET_OBJECT_SIGNAL_INFO,
10023 nullptr,
10024 v->contents_all_raw ().data (),
10025 v->offset (),
10026 v->type ()->length ());
10028 if (transferred != v->type ()->length ())
10029 error (_("Unable to read siginfo"));
10032 /* This function implements the lval_computed support for writing a
10033 $_siginfo value. */
10035 static void
10036 siginfo_value_write (struct value *v, struct value *fromval)
10038 LONGEST transferred;
10040 /* If we can access registers, so can we access $_siginfo. Likewise
10041 vice versa. */
10042 validate_registers_access ();
10044 transferred = target_write (current_inferior ()->top_target (),
10045 TARGET_OBJECT_SIGNAL_INFO,
10046 nullptr,
10047 fromval->contents_all_raw ().data (),
10048 v->offset (),
10049 fromval->type ()->length ());
10051 if (transferred != fromval->type ()->length ())
10052 error (_("Unable to write siginfo"));
10055 static const struct lval_funcs siginfo_value_funcs =
10057 siginfo_value_read,
10058 siginfo_value_write
10061 /* Return a new value with the correct type for the siginfo object of
10062 the current thread using architecture GDBARCH. Return a void value
10063 if there's no object available. */
10065 static struct value *
10066 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
10067 void *ignore)
10069 if (target_has_stack ()
10070 && inferior_ptid != null_ptid
10071 && gdbarch_get_siginfo_type_p (gdbarch))
10073 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10075 return value::allocate_computed (type, &siginfo_value_funcs, nullptr);
10078 return value::allocate (builtin_type (gdbarch)->builtin_void);
10082 /* infcall_suspend_state contains state about the program itself like its
10083 registers and any signal it received when it last stopped.
10084 This state must be restored regardless of how the inferior function call
10085 ends (either successfully, or after it hits a breakpoint or signal)
10086 if the program is to properly continue where it left off. */
10088 class infcall_suspend_state
10090 public:
10091 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
10092 once the inferior function call has finished. */
10093 infcall_suspend_state (struct gdbarch *gdbarch,
10094 const struct thread_info *tp,
10095 struct regcache *regcache)
10096 : m_registers (new readonly_detached_regcache (*regcache))
10098 tp->save_suspend_to (m_thread_suspend);
10100 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
10102 if (gdbarch_get_siginfo_type_p (gdbarch))
10104 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10105 size_t len = type->length ();
10107 siginfo_data.reset ((gdb_byte *) xmalloc (len));
10109 if (target_read (current_inferior ()->top_target (),
10110 TARGET_OBJECT_SIGNAL_INFO, nullptr,
10111 siginfo_data.get (), 0, len) != len)
10113 /* Errors ignored. */
10114 siginfo_data.reset (nullptr);
10118 if (siginfo_data)
10120 m_siginfo_gdbarch = gdbarch;
10121 m_siginfo_data = std::move (siginfo_data);
10125 /* Return a pointer to the stored register state. */
10127 readonly_detached_regcache *registers () const
10129 return m_registers.get ();
10132 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
10134 void restore (struct gdbarch *gdbarch,
10135 struct thread_info *tp,
10136 struct regcache *regcache) const
10138 tp->restore_suspend_from (m_thread_suspend);
10140 if (m_siginfo_gdbarch == gdbarch)
10142 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10144 /* Errors ignored. */
10145 target_write (current_inferior ()->top_target (),
10146 TARGET_OBJECT_SIGNAL_INFO, nullptr,
10147 m_siginfo_data.get (), 0, type->length ());
10150 /* The inferior can be gone if the user types "print exit(0)"
10151 (and perhaps other times). */
10152 if (target_has_execution ())
10153 /* NB: The register write goes through to the target. */
10154 regcache->restore (registers ());
10157 private:
10158 /* How the current thread stopped before the inferior function call was
10159 executed. */
10160 struct thread_suspend_state m_thread_suspend;
10162 /* The registers before the inferior function call was executed. */
10163 std::unique_ptr<readonly_detached_regcache> m_registers;
10165 /* Format of SIGINFO_DATA or NULL if it is not present. */
10166 struct gdbarch *m_siginfo_gdbarch = nullptr;
10168 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
10169 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
10170 content would be invalid. */
10171 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
10174 infcall_suspend_state_up
10175 save_infcall_suspend_state ()
10177 struct thread_info *tp = inferior_thread ();
10178 regcache *regcache = get_thread_regcache (tp);
10179 struct gdbarch *gdbarch = regcache->arch ();
10181 infcall_suspend_state_up inf_state
10182 (new struct infcall_suspend_state (gdbarch, tp, regcache));
10184 /* Having saved the current state, adjust the thread state, discarding
10185 any stop signal information. The stop signal is not useful when
10186 starting an inferior function call, and run_inferior_call will not use
10187 the signal due to its `proceed' call with GDB_SIGNAL_0. */
10188 tp->set_stop_signal (GDB_SIGNAL_0);
10190 return inf_state;
10193 /* Restore inferior session state to INF_STATE. */
10195 void
10196 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
10198 struct thread_info *tp = inferior_thread ();
10199 regcache *regcache = get_thread_regcache (inferior_thread ());
10200 struct gdbarch *gdbarch = regcache->arch ();
10202 inf_state->restore (gdbarch, tp, regcache);
10203 discard_infcall_suspend_state (inf_state);
10206 void
10207 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
10209 delete inf_state;
10212 readonly_detached_regcache *
10213 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
10215 return inf_state->registers ();
10218 /* infcall_control_state contains state regarding gdb's control of the
10219 inferior itself like stepping control. It also contains session state like
10220 the user's currently selected frame. */
10222 struct infcall_control_state
10224 struct thread_control_state thread_control;
10225 struct inferior_control_state inferior_control;
10227 /* Other fields: */
10228 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
10229 int stopped_by_random_signal = 0;
10231 /* ID and level of the selected frame when the inferior function
10232 call was made. */
10233 struct frame_id selected_frame_id {};
10234 int selected_frame_level = -1;
10237 /* Save all of the information associated with the inferior<==>gdb
10238 connection. */
10240 infcall_control_state_up
10241 save_infcall_control_state ()
10243 infcall_control_state_up inf_status (new struct infcall_control_state);
10244 struct thread_info *tp = inferior_thread ();
10245 struct inferior *inf = current_inferior ();
10247 inf_status->thread_control = tp->control;
10248 inf_status->inferior_control = inf->control;
10250 tp->control.step_resume_breakpoint = nullptr;
10251 tp->control.exception_resume_breakpoint = nullptr;
10253 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
10254 chain. If caller's caller is walking the chain, they'll be happier if we
10255 hand them back the original chain when restore_infcall_control_state is
10256 called. */
10257 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
10259 /* Other fields: */
10260 inf_status->stop_stack_dummy = stop_stack_dummy;
10261 inf_status->stopped_by_random_signal = stopped_by_random_signal;
10263 save_selected_frame (&inf_status->selected_frame_id,
10264 &inf_status->selected_frame_level);
10266 return inf_status;
10269 /* Restore inferior session state to INF_STATUS. */
10271 void
10272 restore_infcall_control_state (struct infcall_control_state *inf_status)
10274 struct thread_info *tp = inferior_thread ();
10275 struct inferior *inf = current_inferior ();
10277 if (tp->control.step_resume_breakpoint)
10278 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
10280 if (tp->control.exception_resume_breakpoint)
10281 tp->control.exception_resume_breakpoint->disposition
10282 = disp_del_at_next_stop;
10284 /* Handle the bpstat_copy of the chain. */
10285 bpstat_clear (&tp->control.stop_bpstat);
10287 tp->control = inf_status->thread_control;
10288 inf->control = inf_status->inferior_control;
10290 /* Other fields: */
10291 stop_stack_dummy = inf_status->stop_stack_dummy;
10292 stopped_by_random_signal = inf_status->stopped_by_random_signal;
10294 if (target_has_stack ())
10296 restore_selected_frame (inf_status->selected_frame_id,
10297 inf_status->selected_frame_level);
10300 delete inf_status;
10303 void
10304 discard_infcall_control_state (struct infcall_control_state *inf_status)
10306 if (inf_status->thread_control.step_resume_breakpoint)
10307 inf_status->thread_control.step_resume_breakpoint->disposition
10308 = disp_del_at_next_stop;
10310 if (inf_status->thread_control.exception_resume_breakpoint)
10311 inf_status->thread_control.exception_resume_breakpoint->disposition
10312 = disp_del_at_next_stop;
10314 /* See save_infcall_control_state for info on stop_bpstat. */
10315 bpstat_clear (&inf_status->thread_control.stop_bpstat);
10317 delete inf_status;
10320 /* See infrun.h. */
10322 void
10323 clear_exit_convenience_vars (void)
10325 clear_internalvar (lookup_internalvar ("_exitsignal"));
10326 clear_internalvar (lookup_internalvar ("_exitcode"));
10330 /* User interface for reverse debugging:
10331 Set exec-direction / show exec-direction commands
10332 (returns error unless target implements to_set_exec_direction method). */
10334 enum exec_direction_kind execution_direction = EXEC_FORWARD;
10335 static const char exec_forward[] = "forward";
10336 static const char exec_reverse[] = "reverse";
10337 static const char *exec_direction = exec_forward;
10338 static const char *const exec_direction_names[] = {
10339 exec_forward,
10340 exec_reverse,
10341 nullptr
10344 static void
10345 set_exec_direction_func (const char *args, int from_tty,
10346 struct cmd_list_element *cmd)
10348 if (target_can_execute_reverse ())
10350 if (!strcmp (exec_direction, exec_forward))
10351 execution_direction = EXEC_FORWARD;
10352 else if (!strcmp (exec_direction, exec_reverse))
10353 execution_direction = EXEC_REVERSE;
10355 else
10357 exec_direction = exec_forward;
10358 error (_("Target does not support this operation."));
10362 static void
10363 show_exec_direction_func (struct ui_file *out, int from_tty,
10364 struct cmd_list_element *cmd, const char *value)
10366 switch (execution_direction) {
10367 case EXEC_FORWARD:
10368 gdb_printf (out, _("Forward.\n"));
10369 break;
10370 case EXEC_REVERSE:
10371 gdb_printf (out, _("Reverse.\n"));
10372 break;
10373 default:
10374 internal_error (_("bogus execution_direction value: %d"),
10375 (int) execution_direction);
10379 static void
10380 show_schedule_multiple (struct ui_file *file, int from_tty,
10381 struct cmd_list_element *c, const char *value)
10383 gdb_printf (file, _("Resuming the execution of threads "
10384 "of all processes is %s.\n"), value);
10387 /* Implementation of `siginfo' variable. */
10389 static const struct internalvar_funcs siginfo_funcs =
10391 siginfo_make_value,
10392 nullptr,
10395 /* Callback for infrun's target events source. This is marked when a
10396 thread has a pending status to process. */
10398 static void
10399 infrun_async_inferior_event_handler (gdb_client_data data)
10401 clear_async_event_handler (infrun_async_inferior_event_token);
10402 inferior_event_handler (INF_REG_EVENT);
10405 #if GDB_SELF_TEST
10406 namespace selftests
10409 /* Verify that when two threads with the same ptid exist (from two different
10410 targets) and one of them changes ptid, we only update inferior_ptid if
10411 it is appropriate. */
10413 static void
10414 infrun_thread_ptid_changed ()
10416 gdbarch *arch = current_inferior ()->arch ();
10418 /* The thread which inferior_ptid represents changes ptid. */
10420 scoped_restore_current_pspace_and_thread restore;
10422 scoped_mock_context<test_target_ops> target1 (arch);
10423 scoped_mock_context<test_target_ops> target2 (arch);
10425 ptid_t old_ptid (111, 222);
10426 ptid_t new_ptid (111, 333);
10428 target1.mock_inferior.pid = old_ptid.pid ();
10429 target1.mock_thread.ptid = old_ptid;
10430 target1.mock_inferior.ptid_thread_map.clear ();
10431 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10433 target2.mock_inferior.pid = old_ptid.pid ();
10434 target2.mock_thread.ptid = old_ptid;
10435 target2.mock_inferior.ptid_thread_map.clear ();
10436 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
10438 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10439 set_current_inferior (&target1.mock_inferior);
10441 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10443 gdb_assert (inferior_ptid == new_ptid);
10446 /* A thread with the same ptid as inferior_ptid, but from another target,
10447 changes ptid. */
10449 scoped_restore_current_pspace_and_thread restore;
10451 scoped_mock_context<test_target_ops> target1 (arch);
10452 scoped_mock_context<test_target_ops> target2 (arch);
10454 ptid_t old_ptid (111, 222);
10455 ptid_t new_ptid (111, 333);
10457 target1.mock_inferior.pid = old_ptid.pid ();
10458 target1.mock_thread.ptid = old_ptid;
10459 target1.mock_inferior.ptid_thread_map.clear ();
10460 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10462 target2.mock_inferior.pid = old_ptid.pid ();
10463 target2.mock_thread.ptid = old_ptid;
10464 target2.mock_inferior.ptid_thread_map.clear ();
10465 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
10467 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10468 set_current_inferior (&target2.mock_inferior);
10470 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10472 gdb_assert (inferior_ptid == old_ptid);
10476 } /* namespace selftests */
10478 #endif /* GDB_SELF_TEST */
10480 void _initialize_infrun ();
10481 void
10482 _initialize_infrun ()
10484 struct cmd_list_element *c;
10486 /* Register extra event sources in the event loop. */
10487 infrun_async_inferior_event_token
10488 = create_async_event_handler (infrun_async_inferior_event_handler, nullptr,
10489 "infrun");
10491 cmd_list_element *info_signals_cmd
10492 = add_info ("signals", info_signals_command, _("\
10493 What debugger does when program gets various signals.\n\
10494 Specify a signal as argument to print info on that signal only."));
10495 add_info_alias ("handle", info_signals_cmd, 0);
10497 c = add_com ("handle", class_run, handle_command, _("\
10498 Specify how to handle signals.\n\
10499 Usage: handle SIGNAL [ACTIONS]\n\
10500 Args are signals and actions to apply to those signals.\n\
10501 If no actions are specified, the current settings for the specified signals\n\
10502 will be displayed instead.\n\
10504 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
10505 from 1-15 are allowed for compatibility with old versions of GDB.\n\
10506 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
10507 The special arg \"all\" is recognized to mean all signals except those\n\
10508 used by the debugger, typically SIGTRAP and SIGINT.\n\
10510 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
10511 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
10512 Stop means reenter debugger if this signal happens (implies print).\n\
10513 Print means print a message if this signal happens.\n\
10514 Pass means let program see this signal; otherwise program doesn't know.\n\
10515 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
10516 Pass and Stop may be combined.\n\
10518 Multiple signals may be specified. Signal numbers and signal names\n\
10519 may be interspersed with actions, with the actions being performed for\n\
10520 all signals cumulatively specified."));
10521 set_cmd_completer (c, handle_completer);
10523 stop_command = add_cmd ("stop", class_obscure,
10524 not_just_help_class_command, _("\
10525 There is no `stop' command, but you can set a hook on `stop'.\n\
10526 This allows you to set a list of commands to be run each time execution\n\
10527 of the program stops."), &cmdlist);
10529 add_setshow_boolean_cmd
10530 ("infrun", class_maintenance, &debug_infrun,
10531 _("Set inferior debugging."),
10532 _("Show inferior debugging."),
10533 _("When non-zero, inferior specific debugging is enabled."),
10534 nullptr, show_debug_infrun, &setdebuglist, &showdebuglist);
10536 add_setshow_boolean_cmd ("non-stop", no_class,
10537 &non_stop_1, _("\
10538 Set whether gdb controls the inferior in non-stop mode."), _("\
10539 Show whether gdb controls the inferior in non-stop mode."), _("\
10540 When debugging a multi-threaded program and this setting is\n\
10541 off (the default, also called all-stop mode), when one thread stops\n\
10542 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
10543 all other threads in the program while you interact with the thread of\n\
10544 interest. When you continue or step a thread, you can allow the other\n\
10545 threads to run, or have them remain stopped, but while you inspect any\n\
10546 thread's state, all threads stop.\n\
10548 In non-stop mode, when one thread stops, other threads can continue\n\
10549 to run freely. You'll be able to step each thread independently,\n\
10550 leave it stopped or free to run as needed."),
10551 set_non_stop,
10552 show_non_stop,
10553 &setlist,
10554 &showlist);
10556 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
10558 signal_stop[i] = 1;
10559 signal_print[i] = 1;
10560 signal_program[i] = 1;
10561 signal_catch[i] = 0;
10564 /* Signals caused by debugger's own actions should not be given to
10565 the program afterwards.
10567 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
10568 explicitly specifies that it should be delivered to the target
10569 program. Typically, that would occur when a user is debugging a
10570 target monitor on a simulator: the target monitor sets a
10571 breakpoint; the simulator encounters this breakpoint and halts
10572 the simulation handing control to GDB; GDB, noting that the stop
10573 address doesn't map to any known breakpoint, returns control back
10574 to the simulator; the simulator then delivers the hardware
10575 equivalent of a GDB_SIGNAL_TRAP to the program being
10576 debugged. */
10577 signal_program[GDB_SIGNAL_TRAP] = 0;
10578 signal_program[GDB_SIGNAL_INT] = 0;
10580 /* Signals that are not errors should not normally enter the debugger. */
10581 signal_stop[GDB_SIGNAL_ALRM] = 0;
10582 signal_print[GDB_SIGNAL_ALRM] = 0;
10583 signal_stop[GDB_SIGNAL_VTALRM] = 0;
10584 signal_print[GDB_SIGNAL_VTALRM] = 0;
10585 signal_stop[GDB_SIGNAL_PROF] = 0;
10586 signal_print[GDB_SIGNAL_PROF] = 0;
10587 signal_stop[GDB_SIGNAL_CHLD] = 0;
10588 signal_print[GDB_SIGNAL_CHLD] = 0;
10589 signal_stop[GDB_SIGNAL_IO] = 0;
10590 signal_print[GDB_SIGNAL_IO] = 0;
10591 signal_stop[GDB_SIGNAL_POLL] = 0;
10592 signal_print[GDB_SIGNAL_POLL] = 0;
10593 signal_stop[GDB_SIGNAL_URG] = 0;
10594 signal_print[GDB_SIGNAL_URG] = 0;
10595 signal_stop[GDB_SIGNAL_WINCH] = 0;
10596 signal_print[GDB_SIGNAL_WINCH] = 0;
10597 signal_stop[GDB_SIGNAL_PRIO] = 0;
10598 signal_print[GDB_SIGNAL_PRIO] = 0;
10600 /* These signals are used internally by user-level thread
10601 implementations. (See signal(5) on Solaris.) Like the above
10602 signals, a healthy program receives and handles them as part of
10603 its normal operation. */
10604 signal_stop[GDB_SIGNAL_LWP] = 0;
10605 signal_print[GDB_SIGNAL_LWP] = 0;
10606 signal_stop[GDB_SIGNAL_WAITING] = 0;
10607 signal_print[GDB_SIGNAL_WAITING] = 0;
10608 signal_stop[GDB_SIGNAL_CANCEL] = 0;
10609 signal_print[GDB_SIGNAL_CANCEL] = 0;
10610 signal_stop[GDB_SIGNAL_LIBRT] = 0;
10611 signal_print[GDB_SIGNAL_LIBRT] = 0;
10613 /* Update cached state. */
10614 signal_cache_update (-1);
10616 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
10617 &stop_on_solib_events, _("\
10618 Set stopping for shared library events."), _("\
10619 Show stopping for shared library events."), _("\
10620 If nonzero, gdb will give control to the user when the dynamic linker\n\
10621 notifies gdb of shared library events. The most common event of interest\n\
10622 to the user would be loading/unloading of a new library."),
10623 set_stop_on_solib_events,
10624 show_stop_on_solib_events,
10625 &setlist, &showlist);
10627 add_setshow_enum_cmd ("follow-fork-mode", class_run,
10628 follow_fork_mode_kind_names,
10629 &follow_fork_mode_string, _("\
10630 Set debugger response to a program call of fork or vfork."), _("\
10631 Show debugger response to a program call of fork or vfork."), _("\
10632 A fork or vfork creates a new process. follow-fork-mode can be:\n\
10633 parent - the original process is debugged after a fork\n\
10634 child - the new process is debugged after a fork\n\
10635 The unfollowed process will continue to run.\n\
10636 By default, the debugger will follow the parent process."),
10637 nullptr,
10638 show_follow_fork_mode_string,
10639 &setlist, &showlist);
10641 add_setshow_enum_cmd ("follow-exec-mode", class_run,
10642 follow_exec_mode_names,
10643 &follow_exec_mode_string, _("\
10644 Set debugger response to a program call of exec."), _("\
10645 Show debugger response to a program call of exec."), _("\
10646 An exec call replaces the program image of a process.\n\
10648 follow-exec-mode can be:\n\
10650 new - the debugger creates a new inferior and rebinds the process\n\
10651 to this new inferior. The program the process was running before\n\
10652 the exec call can be restarted afterwards by restarting the original\n\
10653 inferior.\n\
10655 same - the debugger keeps the process bound to the same inferior.\n\
10656 The new executable image replaces the previous executable loaded in\n\
10657 the inferior. Restarting the inferior after the exec call restarts\n\
10658 the executable the process was running after the exec call.\n\
10660 By default, the debugger will use the same inferior."),
10661 nullptr,
10662 show_follow_exec_mode_string,
10663 &setlist, &showlist);
10665 add_setshow_enum_cmd ("scheduler-locking", class_run,
10666 scheduler_enums, &scheduler_mode, _("\
10667 Set mode for locking scheduler during execution."), _("\
10668 Show mode for locking scheduler during execution."), _("\
10669 off == no locking (threads may preempt at any time)\n\
10670 on == full locking (no thread except the current thread may run)\n\
10671 This applies to both normal execution and replay mode.\n\
10672 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
10673 In this mode, other threads may run during other commands.\n\
10674 This applies to both normal execution and replay mode.\n\
10675 replay == scheduler locked in replay mode and unlocked during normal execution."),
10676 set_schedlock_func, /* traps on target vector */
10677 show_scheduler_mode,
10678 &setlist, &showlist);
10680 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
10681 Set mode for resuming threads of all processes."), _("\
10682 Show mode for resuming threads of all processes."), _("\
10683 When on, execution commands (such as 'continue' or 'next') resume all\n\
10684 threads of all processes. When off (which is the default), execution\n\
10685 commands only resume the threads of the current process. The set of\n\
10686 threads that are resumed is further refined by the scheduler-locking\n\
10687 mode (see help set scheduler-locking)."),
10688 nullptr,
10689 show_schedule_multiple,
10690 &setlist, &showlist);
10692 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
10693 Set mode of the step operation."), _("\
10694 Show mode of the step operation."), _("\
10695 When set, doing a step over a function without debug line information\n\
10696 will stop at the first instruction of that function. Otherwise, the\n\
10697 function is skipped and the step command stops at a different source line."),
10698 nullptr,
10699 show_step_stop_if_no_debug,
10700 &setlist, &showlist);
10702 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
10703 &can_use_displaced_stepping, _("\
10704 Set debugger's willingness to use displaced stepping."), _("\
10705 Show debugger's willingness to use displaced stepping."), _("\
10706 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
10707 supported by the target architecture. If off, gdb will not use displaced\n\
10708 stepping to step over breakpoints, even if such is supported by the target\n\
10709 architecture. If auto (which is the default), gdb will use displaced stepping\n\
10710 if the target architecture supports it and non-stop mode is active, but will not\n\
10711 use it in all-stop mode (see help set non-stop)."),
10712 nullptr,
10713 show_can_use_displaced_stepping,
10714 &setlist, &showlist);
10716 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
10717 &exec_direction, _("Set direction of execution.\n\
10718 Options are 'forward' or 'reverse'."),
10719 _("Show direction of execution (forward/reverse)."),
10720 _("Tells gdb whether to execute forward or backward."),
10721 set_exec_direction_func, show_exec_direction_func,
10722 &setlist, &showlist);
10724 /* Set/show detach-on-fork: user-settable mode. */
10726 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
10727 Set whether gdb will detach the child of a fork."), _("\
10728 Show whether gdb will detach the child of a fork."), _("\
10729 Tells gdb whether to detach the child of a fork."),
10730 nullptr, nullptr, &setlist, &showlist);
10732 /* Set/show disable address space randomization mode. */
10734 add_setshow_boolean_cmd ("disable-randomization", class_support,
10735 &disable_randomization, _("\
10736 Set disabling of debuggee's virtual address space randomization."), _("\
10737 Show disabling of debuggee's virtual address space randomization."), _("\
10738 When this mode is on (which is the default), randomization of the virtual\n\
10739 address space is disabled. Standalone programs run with the randomization\n\
10740 enabled by default on some platforms."),
10741 &set_disable_randomization,
10742 &show_disable_randomization,
10743 &setlist, &showlist);
10745 /* ptid initializations */
10746 inferior_ptid = null_ptid;
10747 target_last_wait_ptid = minus_one_ptid;
10749 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
10750 "infrun");
10751 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
10752 "infrun");
10753 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
10754 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
10756 /* Explicitly create without lookup, since that tries to create a
10757 value with a void typed value, and when we get here, gdbarch
10758 isn't initialized yet. At this point, we're quite sure there
10759 isn't another convenience variable of the same name. */
10760 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, nullptr);
10762 add_setshow_boolean_cmd ("observer", no_class,
10763 &observer_mode_1, _("\
10764 Set whether gdb controls the inferior in observer mode."), _("\
10765 Show whether gdb controls the inferior in observer mode."), _("\
10766 In observer mode, GDB can get data from the inferior, but not\n\
10767 affect its execution. Registers and memory may not be changed,\n\
10768 breakpoints may not be set, and the program cannot be interrupted\n\
10769 or signalled."),
10770 set_observer_mode,
10771 show_observer_mode,
10772 &setlist,
10773 &showlist);
10775 #if GDB_SELF_TEST
10776 selftests::register_test ("infrun_thread_ptid_changed",
10777 selftests::infrun_thread_ptid_changed);
10778 #endif