Add translations for various sub-directories
[binutils-gdb.git] / gdb / infrun.c
blob4687ee6edb39224100c35a8b770a3a3731a38441
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
4 Copyright (C) 1986-2024 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "cli/cli-cmds.h"
22 #include "cli/cli-style.h"
23 #include "displaced-stepping.h"
24 #include "infrun.h"
25 #include <ctype.h>
26 #include "exceptions.h"
27 #include "symtab.h"
28 #include "frame.h"
29 #include "inferior.h"
30 #include "breakpoint.h"
31 #include "gdbcore.h"
32 #include "target.h"
33 #include "target-connection.h"
34 #include "gdbthread.h"
35 #include "annotate.h"
36 #include "symfile.h"
37 #include "top.h"
38 #include "ui.h"
39 #include "inf-loop.h"
40 #include "regcache.h"
41 #include "value.h"
42 #include "observable.h"
43 #include "language.h"
44 #include "solib.h"
45 #include "main.h"
46 #include "block.h"
47 #include "mi/mi-common.h"
48 #include "event-top.h"
49 #include "record.h"
50 #include "record-full.h"
51 #include "inline-frame.h"
52 #include "jit.h"
53 #include "tracepoint.h"
54 #include "skip.h"
55 #include "probe.h"
56 #include "objfiles.h"
57 #include "completer.h"
58 #include "target-descriptions.h"
59 #include "target-dcache.h"
60 #include "terminal.h"
61 #include "solist.h"
62 #include "gdbsupport/event-loop.h"
63 #include "thread-fsm.h"
64 #include "gdbsupport/enum-flags.h"
65 #include "progspace-and-thread.h"
66 #include <optional>
67 #include "arch-utils.h"
68 #include "gdbsupport/scope-exit.h"
69 #include "gdbsupport/forward-scope-exit.h"
70 #include "gdbsupport/gdb_select.h"
71 #include <unordered_map>
72 #include "async-event.h"
73 #include "gdbsupport/selftest.h"
74 #include "scoped-mock-context.h"
75 #include "test-target.h"
76 #include "gdbsupport/common-debug.h"
77 #include "gdbsupport/buildargv.h"
78 #include "extension.h"
79 #include "disasm.h"
80 #include "interps.h"
82 /* Prototypes for local functions */
84 static void sig_print_info (enum gdb_signal);
86 static void sig_print_header (void);
88 static void follow_inferior_reset_breakpoints (void);
90 static bool currently_stepping (struct thread_info *tp);
92 static void insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr &);
94 static void insert_step_resume_breakpoint_at_caller (const frame_info_ptr &);
96 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
98 static bool maybe_software_singlestep (struct gdbarch *gdbarch);
100 static void resume (gdb_signal sig);
102 static void wait_for_inferior (inferior *inf);
104 static void restart_threads (struct thread_info *event_thread,
105 inferior *inf = nullptr);
107 static bool start_step_over (void);
109 static bool step_over_info_valid_p (void);
111 static bool schedlock_applies (struct thread_info *tp);
113 /* Asynchronous signal handler registered as event loop source for
114 when we have pending events ready to be passed to the core. */
115 static struct async_event_handler *infrun_async_inferior_event_token;
117 /* Stores whether infrun_async was previously enabled or disabled.
118 Starts off as -1, indicating "never enabled/disabled". */
119 static int infrun_is_async = -1;
120 static CORE_ADDR update_line_range_start (CORE_ADDR pc,
121 struct execution_control_state *ecs);
123 /* See infrun.h. */
125 void
126 infrun_async (int enable)
128 if (infrun_is_async != enable)
130 infrun_is_async = enable;
132 infrun_debug_printf ("enable=%d", enable);
134 if (enable)
135 mark_async_event_handler (infrun_async_inferior_event_token);
136 else
137 clear_async_event_handler (infrun_async_inferior_event_token);
141 /* See infrun.h. */
143 void
144 mark_infrun_async_event_handler (void)
146 mark_async_event_handler (infrun_async_inferior_event_token);
149 /* When set, stop the 'step' command if we enter a function which has
150 no line number information. The normal behavior is that we step
151 over such function. */
152 bool step_stop_if_no_debug = false;
153 static void
154 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
155 struct cmd_list_element *c, const char *value)
157 gdb_printf (file, _("Mode of the step operation is %s.\n"), value);
160 /* proceed and normal_stop use this to notify the user when the
161 inferior stopped in a different thread than it had been running in.
162 It can also be used to find for which thread normal_stop last
163 reported a stop. */
164 static thread_info_ref previous_thread;
166 /* See infrun.h. */
168 void
169 update_previous_thread ()
171 if (inferior_ptid == null_ptid)
172 previous_thread = nullptr;
173 else
174 previous_thread = thread_info_ref::new_reference (inferior_thread ());
177 /* See infrun.h. */
179 thread_info *
180 get_previous_thread ()
182 return previous_thread.get ();
185 /* If set (default for legacy reasons), when following a fork, GDB
186 will detach from one of the fork branches, child or parent.
187 Exactly which branch is detached depends on 'set follow-fork-mode'
188 setting. */
190 static bool detach_fork = true;
192 bool debug_infrun = false;
193 static void
194 show_debug_infrun (struct ui_file *file, int from_tty,
195 struct cmd_list_element *c, const char *value)
197 gdb_printf (file, _("Inferior debugging is %s.\n"), value);
200 /* Support for disabling address space randomization. */
202 bool disable_randomization = true;
204 static void
205 show_disable_randomization (struct ui_file *file, int from_tty,
206 struct cmd_list_element *c, const char *value)
208 if (target_supports_disable_randomization ())
209 gdb_printf (file,
210 _("Disabling randomization of debuggee's "
211 "virtual address space is %s.\n"),
212 value);
213 else
214 gdb_puts (_("Disabling randomization of debuggee's "
215 "virtual address space is unsupported on\n"
216 "this platform.\n"), file);
219 static void
220 set_disable_randomization (const char *args, int from_tty,
221 struct cmd_list_element *c)
223 if (!target_supports_disable_randomization ())
224 error (_("Disabling randomization of debuggee's "
225 "virtual address space is unsupported on\n"
226 "this platform."));
229 /* User interface for non-stop mode. */
231 bool non_stop = false;
232 static bool non_stop_1 = false;
234 static void
235 set_non_stop (const char *args, int from_tty,
236 struct cmd_list_element *c)
238 if (target_has_execution ())
240 non_stop_1 = non_stop;
241 error (_("Cannot change this setting while the inferior is running."));
244 non_stop = non_stop_1;
247 static void
248 show_non_stop (struct ui_file *file, int from_tty,
249 struct cmd_list_element *c, const char *value)
251 gdb_printf (file,
252 _("Controlling the inferior in non-stop mode is %s.\n"),
253 value);
256 /* "Observer mode" is somewhat like a more extreme version of
257 non-stop, in which all GDB operations that might affect the
258 target's execution have been disabled. */
260 static bool observer_mode = false;
261 static bool observer_mode_1 = false;
263 static void
264 set_observer_mode (const char *args, int from_tty,
265 struct cmd_list_element *c)
267 if (target_has_execution ())
269 observer_mode_1 = observer_mode;
270 error (_("Cannot change this setting while the inferior is running."));
273 observer_mode = observer_mode_1;
275 may_write_registers = !observer_mode;
276 may_write_memory = !observer_mode;
277 may_insert_breakpoints = !observer_mode;
278 may_insert_tracepoints = !observer_mode;
279 /* We can insert fast tracepoints in or out of observer mode,
280 but enable them if we're going into this mode. */
281 if (observer_mode)
282 may_insert_fast_tracepoints = true;
283 may_stop = !observer_mode;
284 update_target_permissions ();
286 /* Going *into* observer mode we must force non-stop, then
287 going out we leave it that way. */
288 if (observer_mode)
290 pagination_enabled = false;
291 non_stop = non_stop_1 = true;
294 if (from_tty)
295 gdb_printf (_("Observer mode is now %s.\n"),
296 (observer_mode ? "on" : "off"));
299 static void
300 show_observer_mode (struct ui_file *file, int from_tty,
301 struct cmd_list_element *c, const char *value)
303 gdb_printf (file, _("Observer mode is %s.\n"), value);
306 /* This updates the value of observer mode based on changes in
307 permissions. Note that we are deliberately ignoring the values of
308 may-write-registers and may-write-memory, since the user may have
309 reason to enable these during a session, for instance to turn on a
310 debugging-related global. */
312 void
313 update_observer_mode (void)
315 bool newval = (!may_insert_breakpoints
316 && !may_insert_tracepoints
317 && may_insert_fast_tracepoints
318 && !may_stop
319 && non_stop);
321 /* Let the user know if things change. */
322 if (newval != observer_mode)
323 gdb_printf (_("Observer mode is now %s.\n"),
324 (newval ? "on" : "off"));
326 observer_mode = observer_mode_1 = newval;
329 /* Tables of how to react to signals; the user sets them. */
331 static unsigned char signal_stop[GDB_SIGNAL_LAST];
332 static unsigned char signal_print[GDB_SIGNAL_LAST];
333 static unsigned char signal_program[GDB_SIGNAL_LAST];
335 /* Table of signals that are registered with "catch signal". A
336 non-zero entry indicates that the signal is caught by some "catch
337 signal" command. */
338 static unsigned char signal_catch[GDB_SIGNAL_LAST];
340 /* Table of signals that the target may silently handle.
341 This is automatically determined from the flags above,
342 and simply cached here. */
343 static unsigned char signal_pass[GDB_SIGNAL_LAST];
345 #define SET_SIGS(nsigs,sigs,flags) \
346 do { \
347 int signum = (nsigs); \
348 while (signum-- > 0) \
349 if ((sigs)[signum]) \
350 (flags)[signum] = 1; \
351 } while (0)
353 #define UNSET_SIGS(nsigs,sigs,flags) \
354 do { \
355 int signum = (nsigs); \
356 while (signum-- > 0) \
357 if ((sigs)[signum]) \
358 (flags)[signum] = 0; \
359 } while (0)
361 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
362 this function is to avoid exporting `signal_program'. */
364 void
365 update_signals_program_target (void)
367 target_program_signals (signal_program);
370 /* Value to pass to target_resume() to cause all threads to resume. */
372 #define RESUME_ALL minus_one_ptid
374 /* Command list pointer for the "stop" placeholder. */
376 static struct cmd_list_element *stop_command;
378 /* Nonzero if we want to give control to the user when we're notified
379 of shared library events by the dynamic linker. */
380 int stop_on_solib_events;
382 /* Enable or disable optional shared library event breakpoints
383 as appropriate when the above flag is changed. */
385 static void
386 set_stop_on_solib_events (const char *args,
387 int from_tty, struct cmd_list_element *c)
389 update_solib_breakpoints ();
392 static void
393 show_stop_on_solib_events (struct ui_file *file, int from_tty,
394 struct cmd_list_element *c, const char *value)
396 gdb_printf (file, _("Stopping for shared library events is %s.\n"),
397 value);
400 /* True after stop if current stack frame should be printed. */
402 static bool stop_print_frame;
404 /* This is a cached copy of the target/ptid/waitstatus of the last
405 event returned by target_wait().
406 This information is returned by get_last_target_status(). */
407 static process_stratum_target *target_last_proc_target;
408 static ptid_t target_last_wait_ptid;
409 static struct target_waitstatus target_last_waitstatus;
411 void init_thread_stepping_state (struct thread_info *tss);
413 static const char follow_fork_mode_child[] = "child";
414 static const char follow_fork_mode_parent[] = "parent";
416 static const char *const follow_fork_mode_kind_names[] = {
417 follow_fork_mode_child,
418 follow_fork_mode_parent,
419 nullptr
422 static const char *follow_fork_mode_string = follow_fork_mode_parent;
423 static void
424 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
425 struct cmd_list_element *c, const char *value)
427 gdb_printf (file,
428 _("Debugger response to a program "
429 "call of fork or vfork is \"%s\".\n"),
430 value);
434 /* Handle changes to the inferior list based on the type of fork,
435 which process is being followed, and whether the other process
436 should be detached. On entry inferior_ptid must be the ptid of
437 the fork parent. At return inferior_ptid is the ptid of the
438 followed inferior. */
440 static bool
441 follow_fork_inferior (bool follow_child, bool detach_fork)
443 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
445 infrun_debug_printf ("follow_child = %d, detach_fork = %d",
446 follow_child, detach_fork);
448 target_waitkind fork_kind = inferior_thread ()->pending_follow.kind ();
449 gdb_assert (fork_kind == TARGET_WAITKIND_FORKED
450 || fork_kind == TARGET_WAITKIND_VFORKED);
451 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
452 ptid_t parent_ptid = inferior_ptid;
453 ptid_t child_ptid = inferior_thread ()->pending_follow.child_ptid ();
455 if (has_vforked
456 && !non_stop /* Non-stop always resumes both branches. */
457 && current_ui->prompt_state == PROMPT_BLOCKED
458 && !(follow_child || detach_fork || sched_multi))
460 /* The parent stays blocked inside the vfork syscall until the
461 child execs or exits. If we don't let the child run, then
462 the parent stays blocked. If we're telling the parent to run
463 in the foreground, the user will not be able to ctrl-c to get
464 back the terminal, effectively hanging the debug session. */
465 gdb_printf (gdb_stderr, _("\
466 Can not resume the parent process over vfork in the foreground while\n\
467 holding the child stopped. Try \"set %ps\" or \"%ps\".\n"),
468 styled_string (command_style.style (), "set detach-on-fork"),
469 styled_string (command_style.style (),
470 "set schedule-multiple"));
471 return true;
474 inferior *parent_inf = current_inferior ();
475 inferior *child_inf = nullptr;
477 gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
479 if (!follow_child)
481 /* Detach new forked process? */
482 if (detach_fork)
484 /* Before detaching from the child, remove all breakpoints
485 from it. If we forked, then this has already been taken
486 care of by infrun.c. If we vforked however, any
487 breakpoint inserted in the parent is visible in the
488 child, even those added while stopped in a vfork
489 catchpoint. This will remove the breakpoints from the
490 parent also, but they'll be reinserted below. */
491 if (has_vforked)
493 /* Keep breakpoints list in sync. */
494 remove_breakpoints_inf (current_inferior ());
497 if (print_inferior_events)
499 /* Ensure that we have a process ptid. */
500 ptid_t process_ptid = ptid_t (child_ptid.pid ());
502 target_terminal::ours_for_output ();
503 gdb_printf (_("[Detaching after %s from child %s]\n"),
504 has_vforked ? "vfork" : "fork",
505 target_pid_to_str (process_ptid).c_str ());
508 else
510 /* Add process to GDB's tables. */
511 child_inf = add_inferior (child_ptid.pid ());
513 child_inf->attach_flag = parent_inf->attach_flag;
514 copy_terminal_info (child_inf, parent_inf);
515 child_inf->set_arch (parent_inf->arch ());
516 child_inf->tdesc_info = parent_inf->tdesc_info;
518 child_inf->symfile_flags = SYMFILE_NO_READ;
520 /* If this is a vfork child, then the address-space is
521 shared with the parent. */
522 if (has_vforked)
524 child_inf->pspace = parent_inf->pspace;
525 child_inf->aspace = parent_inf->aspace;
527 exec_on_vfork (child_inf);
529 /* The parent will be frozen until the child is done
530 with the shared region. Keep track of the
531 parent. */
532 child_inf->vfork_parent = parent_inf;
533 child_inf->pending_detach = false;
534 parent_inf->vfork_child = child_inf;
535 parent_inf->pending_detach = false;
537 else
539 child_inf->pspace = new program_space (new_address_space ());
540 child_inf->aspace = child_inf->pspace->aspace;
541 child_inf->removable = true;
542 clone_program_space (child_inf->pspace, parent_inf->pspace);
546 if (has_vforked)
548 /* If we detached from the child, then we have to be careful
549 to not insert breakpoints in the parent until the child
550 is done with the shared memory region. However, if we're
551 staying attached to the child, then we can and should
552 insert breakpoints, so that we can debug it. A
553 subsequent child exec or exit is enough to know when does
554 the child stops using the parent's address space. */
555 parent_inf->thread_waiting_for_vfork_done
556 = detach_fork ? inferior_thread () : nullptr;
557 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
559 infrun_debug_printf
560 ("parent_inf->thread_waiting_for_vfork_done == %s",
561 (parent_inf->thread_waiting_for_vfork_done == nullptr
562 ? "nullptr"
563 : (parent_inf->thread_waiting_for_vfork_done
564 ->ptid.to_string ().c_str ())));
567 else
569 /* Follow the child. */
571 if (print_inferior_events)
573 std::string parent_pid = target_pid_to_str (parent_ptid);
574 std::string child_pid = target_pid_to_str (child_ptid);
576 target_terminal::ours_for_output ();
577 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
578 parent_pid.c_str (),
579 has_vforked ? "vfork" : "fork",
580 child_pid.c_str ());
583 /* Add the new inferior first, so that the target_detach below
584 doesn't unpush the target. */
586 child_inf = add_inferior (child_ptid.pid ());
588 child_inf->attach_flag = parent_inf->attach_flag;
589 copy_terminal_info (child_inf, parent_inf);
590 child_inf->set_arch (parent_inf->arch ());
591 child_inf->tdesc_info = parent_inf->tdesc_info;
593 if (has_vforked)
595 /* If this is a vfork child, then the address-space is shared
596 with the parent. */
597 child_inf->aspace = parent_inf->aspace;
598 child_inf->pspace = parent_inf->pspace;
600 exec_on_vfork (child_inf);
602 else if (detach_fork)
604 /* We follow the child and detach from the parent: move the parent's
605 program space to the child. This simplifies some things, like
606 doing "next" over fork() and landing on the expected line in the
607 child (note, that is broken with "set detach-on-fork off").
609 Before assigning brand new spaces for the parent, remove
610 breakpoints from it: because the new pspace won't match
611 currently inserted locations, the normal detach procedure
612 wouldn't remove them, and we would leave them inserted when
613 detaching. */
614 remove_breakpoints_inf (parent_inf);
616 child_inf->aspace = parent_inf->aspace;
617 child_inf->pspace = parent_inf->pspace;
618 parent_inf->pspace = new program_space (new_address_space ());
619 parent_inf->aspace = parent_inf->pspace->aspace;
620 clone_program_space (parent_inf->pspace, child_inf->pspace);
622 /* The parent inferior is still the current one, so keep things
623 in sync. */
624 set_current_program_space (parent_inf->pspace);
626 else
628 child_inf->pspace = new program_space (new_address_space ());
629 child_inf->aspace = child_inf->pspace->aspace;
630 child_inf->removable = true;
631 child_inf->symfile_flags = SYMFILE_NO_READ;
632 clone_program_space (child_inf->pspace, parent_inf->pspace);
636 gdb_assert (current_inferior () == parent_inf);
638 /* If we are setting up an inferior for the child, target_follow_fork is
639 responsible for pushing the appropriate targets on the new inferior's
640 target stack and adding the initial thread (with ptid CHILD_PTID).
642 If we are not setting up an inferior for the child (because following
643 the parent and detach_fork is true), it is responsible for detaching
644 from CHILD_PTID. */
645 target_follow_fork (child_inf, child_ptid, fork_kind, follow_child,
646 detach_fork);
648 gdb::observers::inferior_forked.notify (parent_inf, child_inf, fork_kind);
650 /* target_follow_fork must leave the parent as the current inferior. If we
651 want to follow the child, we make it the current one below. */
652 gdb_assert (current_inferior () == parent_inf);
654 /* If there is a child inferior, target_follow_fork must have created a thread
655 for it. */
656 if (child_inf != nullptr)
657 gdb_assert (!child_inf->thread_list.empty ());
659 /* Clear the parent thread's pending follow field. Do this before calling
660 target_detach, so that the target can differentiate the two following
661 cases:
663 - We continue past a fork with "follow-fork-mode == child" &&
664 "detach-on-fork on", and therefore detach the parent. In that
665 case the target should not detach the fork child.
666 - We run to a fork catchpoint and the user types "detach". In that
667 case, the target should detach the fork child in addition to the
668 parent.
670 The former case will have pending_follow cleared, the later will have
671 pending_follow set. */
672 thread_info *parent_thread = parent_inf->find_thread (parent_ptid);
673 gdb_assert (parent_thread != nullptr);
674 parent_thread->pending_follow.set_spurious ();
676 /* Detach the parent if needed. */
677 if (follow_child)
679 /* If we're vforking, we want to hold on to the parent until
680 the child exits or execs. At child exec or exit time we
681 can remove the old breakpoints from the parent and detach
682 or resume debugging it. Otherwise, detach the parent now;
683 we'll want to reuse it's program/address spaces, but we
684 can't set them to the child before removing breakpoints
685 from the parent, otherwise, the breakpoints module could
686 decide to remove breakpoints from the wrong process (since
687 they'd be assigned to the same address space). */
689 if (has_vforked)
691 gdb_assert (child_inf->vfork_parent == nullptr);
692 gdb_assert (parent_inf->vfork_child == nullptr);
693 child_inf->vfork_parent = parent_inf;
694 child_inf->pending_detach = false;
695 parent_inf->vfork_child = child_inf;
696 parent_inf->pending_detach = detach_fork;
698 else if (detach_fork)
700 if (print_inferior_events)
702 /* Ensure that we have a process ptid. */
703 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
705 target_terminal::ours_for_output ();
706 gdb_printf (_("[Detaching after fork from "
707 "parent %s]\n"),
708 target_pid_to_str (process_ptid).c_str ());
711 target_detach (parent_inf, 0);
715 /* If we ended up creating a new inferior, call post_create_inferior to inform
716 the various subcomponents. */
717 if (child_inf != nullptr)
719 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
720 (do not restore the parent as the current inferior). */
721 std::optional<scoped_restore_current_thread> maybe_restore;
723 if (!follow_child && !sched_multi)
724 maybe_restore.emplace ();
726 switch_to_thread (*child_inf->threads ().begin ());
727 post_create_inferior (0);
730 return false;
733 /* Set the last target status as TP having stopped. */
735 static void
736 set_last_target_status_stopped (thread_info *tp)
738 set_last_target_status (tp->inf->process_target (), tp->ptid,
739 target_waitstatus {}.set_stopped (GDB_SIGNAL_0));
742 /* Tell the target to follow the fork we're stopped at. Returns true
743 if the inferior should be resumed; false, if the target for some
744 reason decided it's best not to resume. */
746 static bool
747 follow_fork ()
749 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
751 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
752 bool should_resume = true;
754 /* Copy user stepping state to the new inferior thread. FIXME: the
755 followed fork child thread should have a copy of most of the
756 parent thread structure's run control related fields, not just these.
757 Initialized to avoid "may be used uninitialized" warnings from gcc. */
758 struct breakpoint *step_resume_breakpoint = nullptr;
759 struct breakpoint *exception_resume_breakpoint = nullptr;
760 CORE_ADDR step_range_start = 0;
761 CORE_ADDR step_range_end = 0;
762 int current_line = 0;
763 symtab *current_symtab = nullptr;
764 struct frame_id step_frame_id = { 0 };
766 if (!non_stop)
768 thread_info *cur_thr = inferior_thread ();
770 ptid_t resume_ptid
771 = user_visible_resume_ptid (cur_thr->control.stepping_command);
772 process_stratum_target *resume_target
773 = user_visible_resume_target (resume_ptid);
775 /* Check if there's a thread that we're about to resume, other
776 than the current, with an unfollowed fork/vfork. If so,
777 switch back to it, to tell the target to follow it (in either
778 direction). We'll afterwards refuse to resume, and inform
779 the user what happened. */
780 for (thread_info *tp : all_non_exited_threads (resume_target,
781 resume_ptid))
783 if (tp == cur_thr)
784 continue;
786 /* follow_fork_inferior clears tp->pending_follow, and below
787 we'll need the value after the follow_fork_inferior
788 call. */
789 target_waitkind kind = tp->pending_follow.kind ();
791 if (kind != TARGET_WAITKIND_SPURIOUS)
793 infrun_debug_printf ("need to follow-fork [%s] first",
794 tp->ptid.to_string ().c_str ());
796 switch_to_thread (tp);
798 /* Set up inferior(s) as specified by the caller, and
799 tell the target to do whatever is necessary to follow
800 either parent or child. */
801 if (follow_child)
803 /* The thread that started the execution command
804 won't exist in the child. Abort the command and
805 immediately stop in this thread, in the child,
806 inside fork. */
807 should_resume = false;
809 else
811 /* Following the parent, so let the thread fork its
812 child freely, it won't influence the current
813 execution command. */
814 if (follow_fork_inferior (follow_child, detach_fork))
816 /* Target refused to follow, or there's some
817 other reason we shouldn't resume. */
818 switch_to_thread (cur_thr);
819 set_last_target_status_stopped (cur_thr);
820 return false;
823 /* If we're following a vfork, when we need to leave
824 the just-forked thread as selected, as we need to
825 solo-resume it to collect the VFORK_DONE event.
826 If we're following a fork, however, switch back
827 to the original thread that we continue stepping
828 it, etc. */
829 if (kind != TARGET_WAITKIND_VFORKED)
831 gdb_assert (kind == TARGET_WAITKIND_FORKED);
832 switch_to_thread (cur_thr);
836 break;
841 thread_info *tp = inferior_thread ();
843 /* If there were any forks/vforks that were caught and are now to be
844 followed, then do so now. */
845 switch (tp->pending_follow.kind ())
847 case TARGET_WAITKIND_FORKED:
848 case TARGET_WAITKIND_VFORKED:
850 ptid_t parent, child;
851 std::unique_ptr<struct thread_fsm> thread_fsm;
853 /* If the user did a next/step, etc, over a fork call,
854 preserve the stepping state in the fork child. */
855 if (follow_child && should_resume)
857 step_resume_breakpoint = clone_momentary_breakpoint
858 (tp->control.step_resume_breakpoint);
859 step_range_start = tp->control.step_range_start;
860 step_range_end = tp->control.step_range_end;
861 current_line = tp->current_line;
862 current_symtab = tp->current_symtab;
863 step_frame_id = tp->control.step_frame_id;
864 exception_resume_breakpoint
865 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
866 thread_fsm = tp->release_thread_fsm ();
868 /* For now, delete the parent's sr breakpoint, otherwise,
869 parent/child sr breakpoints are considered duplicates,
870 and the child version will not be installed. Remove
871 this when the breakpoints module becomes aware of
872 inferiors and address spaces. */
873 delete_step_resume_breakpoint (tp);
874 tp->control.step_range_start = 0;
875 tp->control.step_range_end = 0;
876 tp->control.step_frame_id = null_frame_id;
877 delete_exception_resume_breakpoint (tp);
880 parent = inferior_ptid;
881 child = tp->pending_follow.child_ptid ();
883 /* If handling a vfork, stop all the inferior's threads, they will be
884 restarted when the vfork shared region is complete. */
885 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
886 && target_is_non_stop_p ())
887 stop_all_threads ("handling vfork", tp->inf);
889 process_stratum_target *parent_targ = tp->inf->process_target ();
890 /* Set up inferior(s) as specified by the caller, and tell the
891 target to do whatever is necessary to follow either parent
892 or child. */
893 if (follow_fork_inferior (follow_child, detach_fork))
895 /* Target refused to follow, or there's some other reason
896 we shouldn't resume. */
897 should_resume = 0;
899 else
901 /* If we followed the child, switch to it... */
902 if (follow_child)
904 tp = parent_targ->find_thread (child);
905 switch_to_thread (tp);
907 /* ... and preserve the stepping state, in case the
908 user was stepping over the fork call. */
909 if (should_resume)
911 tp->control.step_resume_breakpoint
912 = step_resume_breakpoint;
913 tp->control.step_range_start = step_range_start;
914 tp->control.step_range_end = step_range_end;
915 tp->current_line = current_line;
916 tp->current_symtab = current_symtab;
917 tp->control.step_frame_id = step_frame_id;
918 tp->control.exception_resume_breakpoint
919 = exception_resume_breakpoint;
920 tp->set_thread_fsm (std::move (thread_fsm));
922 else
924 /* If we get here, it was because we're trying to
925 resume from a fork catchpoint, but, the user
926 has switched threads away from the thread that
927 forked. In that case, the resume command
928 issued is most likely not applicable to the
929 child, so just warn, and refuse to resume. */
930 warning (_("Not resuming: switched threads "
931 "before following fork child."));
934 /* Reset breakpoints in the child as appropriate. */
935 follow_inferior_reset_breakpoints ();
939 break;
940 case TARGET_WAITKIND_SPURIOUS:
941 /* Nothing to follow. */
942 break;
943 default:
944 internal_error ("Unexpected pending_follow.kind %d\n",
945 tp->pending_follow.kind ());
946 break;
949 if (!should_resume)
950 set_last_target_status_stopped (tp);
951 return should_resume;
954 static void
955 follow_inferior_reset_breakpoints (void)
957 struct thread_info *tp = inferior_thread ();
959 /* Was there a step_resume breakpoint? (There was if the user
960 did a "next" at the fork() call.) If so, explicitly reset its
961 thread number. Cloned step_resume breakpoints are disabled on
962 creation, so enable it here now that it is associated with the
963 correct thread.
965 step_resumes are a form of bp that are made to be per-thread.
966 Since we created the step_resume bp when the parent process
967 was being debugged, and now are switching to the child process,
968 from the breakpoint package's viewpoint, that's a switch of
969 "threads". We must update the bp's notion of which thread
970 it is for, or it'll be ignored when it triggers. */
972 if (tp->control.step_resume_breakpoint)
974 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
975 tp->control.step_resume_breakpoint->first_loc ().enabled = 1;
978 /* Treat exception_resume breakpoints like step_resume breakpoints. */
979 if (tp->control.exception_resume_breakpoint)
981 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
982 tp->control.exception_resume_breakpoint->first_loc ().enabled = 1;
985 /* Reinsert all breakpoints in the child. The user may have set
986 breakpoints after catching the fork, in which case those
987 were never set in the child, but only in the parent. This makes
988 sure the inserted breakpoints match the breakpoint list. */
990 breakpoint_re_set ();
991 insert_breakpoints ();
994 /* The child has exited or execed: resume THREAD, a thread of the parent,
995 if it was meant to be executing. */
997 static void
998 proceed_after_vfork_done (thread_info *thread)
1000 if (thread->state == THREAD_RUNNING
1001 && !thread->executing ()
1002 && !thread->stop_requested
1003 && thread->stop_signal () == GDB_SIGNAL_0)
1005 infrun_debug_printf ("resuming vfork parent thread %s",
1006 thread->ptid.to_string ().c_str ());
1008 switch_to_thread (thread);
1009 clear_proceed_status (0);
1010 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
1014 /* Called whenever we notice an exec or exit event, to handle
1015 detaching or resuming a vfork parent. */
1017 static void
1018 handle_vfork_child_exec_or_exit (int exec)
1020 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1022 struct inferior *inf = current_inferior ();
1024 if (inf->vfork_parent)
1026 inferior *resume_parent = nullptr;
1028 /* This exec or exit marks the end of the shared memory region
1029 between the parent and the child. Break the bonds. */
1030 inferior *vfork_parent = inf->vfork_parent;
1031 inf->vfork_parent->vfork_child = nullptr;
1032 inf->vfork_parent = nullptr;
1034 /* If the user wanted to detach from the parent, now is the
1035 time. */
1036 if (vfork_parent->pending_detach)
1038 struct program_space *pspace;
1040 /* follow-fork child, detach-on-fork on. */
1042 vfork_parent->pending_detach = false;
1044 scoped_restore_current_pspace_and_thread restore_thread;
1046 /* We're letting loose of the parent. */
1047 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
1048 switch_to_thread (tp);
1050 /* We're about to detach from the parent, which implicitly
1051 removes breakpoints from its address space. There's a
1052 catch here: we want to reuse the spaces for the child,
1053 but, parent/child are still sharing the pspace at this
1054 point, although the exec in reality makes the kernel give
1055 the child a fresh set of new pages. The problem here is
1056 that the breakpoints module being unaware of this, would
1057 likely chose the child process to write to the parent
1058 address space. Swapping the child temporarily away from
1059 the spaces has the desired effect. Yes, this is "sort
1060 of" a hack. */
1062 pspace = inf->pspace;
1063 inf->pspace = nullptr;
1064 address_space_ref_ptr aspace = std::move (inf->aspace);
1066 if (print_inferior_events)
1068 std::string pidstr
1069 = target_pid_to_str (ptid_t (vfork_parent->pid));
1071 target_terminal::ours_for_output ();
1073 if (exec)
1075 gdb_printf (_("[Detaching vfork parent %s "
1076 "after child exec]\n"), pidstr.c_str ());
1078 else
1080 gdb_printf (_("[Detaching vfork parent %s "
1081 "after child exit]\n"), pidstr.c_str ());
1085 target_detach (vfork_parent, 0);
1087 /* Put it back. */
1088 inf->pspace = pspace;
1089 inf->aspace = aspace;
1091 else if (exec)
1093 /* We're staying attached to the parent, so, really give the
1094 child a new address space. */
1095 inf->pspace = new program_space (maybe_new_address_space ());
1096 inf->aspace = inf->pspace->aspace;
1097 inf->removable = true;
1098 set_current_program_space (inf->pspace);
1100 resume_parent = vfork_parent;
1102 else
1104 /* If this is a vfork child exiting, then the pspace and
1105 aspaces were shared with the parent. Since we're
1106 reporting the process exit, we'll be mourning all that is
1107 found in the address space, and switching to null_ptid,
1108 preparing to start a new inferior. But, since we don't
1109 want to clobber the parent's address/program spaces, we
1110 go ahead and create a new one for this exiting
1111 inferior. */
1113 scoped_restore_current_thread restore_thread;
1115 /* Temporarily switch to the vfork parent, to facilitate ptrace
1116 calls done during maybe_new_address_space. */
1117 switch_to_thread (any_live_thread_of_inferior (vfork_parent));
1118 address_space_ref_ptr aspace = maybe_new_address_space ();
1120 /* Switch back to the vfork child inferior. Switch to no-thread
1121 while running clone_program_space, so that clone_program_space
1122 doesn't want to read the selected frame of a dead process. */
1123 switch_to_inferior_no_thread (inf);
1125 inf->pspace = new program_space (std::move (aspace));
1126 inf->aspace = inf->pspace->aspace;
1127 set_current_program_space (inf->pspace);
1128 inf->removable = true;
1129 inf->symfile_flags = SYMFILE_NO_READ;
1130 clone_program_space (inf->pspace, vfork_parent->pspace);
1132 resume_parent = vfork_parent;
1135 gdb_assert (current_program_space == inf->pspace);
1137 if (non_stop && resume_parent != nullptr)
1139 /* If the user wanted the parent to be running, let it go
1140 free now. */
1141 scoped_restore_current_thread restore_thread;
1143 infrun_debug_printf ("resuming vfork parent process %d",
1144 resume_parent->pid);
1146 for (thread_info *thread : resume_parent->threads ())
1147 proceed_after_vfork_done (thread);
1152 /* Handle TARGET_WAITKIND_VFORK_DONE. */
1154 static void
1155 handle_vfork_done (thread_info *event_thread)
1157 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1159 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1160 set, that is if we are waiting for a vfork child not under our control
1161 (because we detached it) to exec or exit.
1163 If an inferior has vforked and we are debugging the child, we don't use
1164 the vfork-done event to get notified about the end of the shared address
1165 space window. We rely instead on the child's exec or exit event, and the
1166 inferior::vfork_{parent,child} fields are used instead. See
1167 handle_vfork_child_exec_or_exit for that. */
1168 if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
1170 infrun_debug_printf ("not waiting for a vfork-done event");
1171 return;
1174 /* We stopped all threads (other than the vforking thread) of the inferior in
1175 follow_fork and kept them stopped until now. It should therefore not be
1176 possible for another thread to have reported a vfork during that window.
1177 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1178 vfork-done we are handling right now. */
1179 gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
1181 event_thread->inf->thread_waiting_for_vfork_done = nullptr;
1182 event_thread->inf->pspace->breakpoints_not_allowed = 0;
1184 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1185 resume them now. On all-stop targets, everything that needs to be resumed
1186 will be when we resume the event thread. */
1187 if (target_is_non_stop_p ())
1189 /* restart_threads and start_step_over may change the current thread, make
1190 sure we leave the event thread as the current thread. */
1191 scoped_restore_current_thread restore_thread;
1193 insert_breakpoints ();
1194 start_step_over ();
1196 if (!step_over_info_valid_p ())
1197 restart_threads (event_thread, event_thread->inf);
1201 /* Enum strings for "set|show follow-exec-mode". */
1203 static const char follow_exec_mode_new[] = "new";
1204 static const char follow_exec_mode_same[] = "same";
1205 static const char *const follow_exec_mode_names[] =
1207 follow_exec_mode_new,
1208 follow_exec_mode_same,
1209 nullptr,
1212 static const char *follow_exec_mode_string = follow_exec_mode_same;
1213 static void
1214 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1215 struct cmd_list_element *c, const char *value)
1217 gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value);
1220 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1222 static void
1223 follow_exec (ptid_t ptid, const char *exec_file_target)
1225 int pid = ptid.pid ();
1226 ptid_t process_ptid;
1228 /* Switch terminal for any messages produced e.g. by
1229 breakpoint_re_set. */
1230 target_terminal::ours_for_output ();
1232 /* This is an exec event that we actually wish to pay attention to.
1233 Refresh our symbol table to the newly exec'd program, remove any
1234 momentary bp's, etc.
1236 If there are breakpoints, they aren't really inserted now,
1237 since the exec() transformed our inferior into a fresh set
1238 of instructions.
1240 We want to preserve symbolic breakpoints on the list, since
1241 we have hopes that they can be reset after the new a.out's
1242 symbol table is read.
1244 However, any "raw" breakpoints must be removed from the list
1245 (e.g., the solib bp's), since their address is probably invalid
1246 now.
1248 And, we DON'T want to call delete_breakpoints() here, since
1249 that may write the bp's "shadow contents" (the instruction
1250 value that was overwritten with a TRAP instruction). Since
1251 we now have a new a.out, those shadow contents aren't valid. */
1253 mark_breakpoints_out (current_program_space);
1255 /* The target reports the exec event to the main thread, even if
1256 some other thread does the exec, and even if the main thread was
1257 stopped or already gone. We may still have non-leader threads of
1258 the process on our list. E.g., on targets that don't have thread
1259 exit events (like remote) and nothing forces an update of the
1260 thread list up to here. When debugging remotely, it's best to
1261 avoid extra traffic, when possible, so avoid syncing the thread
1262 list with the target, and instead go ahead and delete all threads
1263 of the process but the one that reported the event. Note this must
1264 be done before calling update_breakpoints_after_exec, as
1265 otherwise clearing the threads' resources would reference stale
1266 thread breakpoints -- it may have been one of these threads that
1267 stepped across the exec. We could just clear their stepping
1268 states, but as long as we're iterating, might as well delete
1269 them. Deleting them now rather than at the next user-visible
1270 stop provides a nicer sequence of events for user and MI
1271 notifications. */
1272 for (thread_info *th : all_threads_safe ())
1273 if (th->ptid.pid () == pid && th->ptid != ptid)
1274 delete_thread (th);
1276 /* We also need to clear any left over stale state for the
1277 leader/event thread. E.g., if there was any step-resume
1278 breakpoint or similar, it's gone now. We cannot truly
1279 step-to-next statement through an exec(). */
1280 thread_info *th = inferior_thread ();
1281 th->control.step_resume_breakpoint = nullptr;
1282 th->control.exception_resume_breakpoint = nullptr;
1283 th->control.single_step_breakpoints = nullptr;
1284 th->control.step_range_start = 0;
1285 th->control.step_range_end = 0;
1287 /* The user may have had the main thread held stopped in the
1288 previous image (e.g., schedlock on, or non-stop). Release
1289 it now. */
1290 th->stop_requested = false;
1292 update_breakpoints_after_exec ();
1294 /* What is this a.out's name? */
1295 process_ptid = ptid_t (pid);
1296 gdb_printf (_("%s is executing new program: %s\n"),
1297 target_pid_to_str (process_ptid).c_str (),
1298 exec_file_target);
1300 /* We've followed the inferior through an exec. Therefore, the
1301 inferior has essentially been killed & reborn. */
1303 breakpoint_init_inferior (current_inferior (), inf_execd);
1305 gdb::unique_xmalloc_ptr<char> exec_file_host
1306 = exec_file_find (exec_file_target, nullptr);
1308 /* If we were unable to map the executable target pathname onto a host
1309 pathname, tell the user that. Otherwise GDB's subsequent behavior
1310 is confusing. Maybe it would even be better to stop at this point
1311 so that the user can specify a file manually before continuing. */
1312 if (exec_file_host == nullptr)
1313 warning (_("Could not load symbols for executable %s.\n"
1314 "Do you need \"%ps\"?"),
1315 exec_file_target,
1316 styled_string (command_style.style (), "set sysroot"));
1318 /* Reset the shared library package. This ensures that we get a
1319 shlib event when the child reaches "_start", at which point the
1320 dld will have had a chance to initialize the child. */
1321 /* Also, loading a symbol file below may trigger symbol lookups, and
1322 we don't want those to be satisfied by the libraries of the
1323 previous incarnation of this process. */
1324 no_shared_libraries (current_program_space);
1326 inferior *execing_inferior = current_inferior ();
1327 inferior *following_inferior;
1329 if (follow_exec_mode_string == follow_exec_mode_new)
1331 /* The user wants to keep the old inferior and program spaces
1332 around. Create a new fresh one, and switch to it. */
1334 /* Do exit processing for the original inferior before setting the new
1335 inferior's pid. Having two inferiors with the same pid would confuse
1336 find_inferior_p(t)id. Transfer the terminal state and info from the
1337 old to the new inferior. */
1338 following_inferior = add_inferior_with_spaces ();
1340 swap_terminal_info (following_inferior, execing_inferior);
1341 exit_inferior (execing_inferior);
1343 following_inferior->pid = pid;
1345 else
1347 /* follow-exec-mode is "same", we continue execution in the execing
1348 inferior. */
1349 following_inferior = execing_inferior;
1351 /* The old description may no longer be fit for the new image.
1352 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1353 old description; we'll read a new one below. No need to do
1354 this on "follow-exec-mode new", as the old inferior stays
1355 around (its description is later cleared/refetched on
1356 restart). */
1357 target_clear_description ();
1360 target_follow_exec (following_inferior, ptid, exec_file_target);
1362 gdb_assert (current_inferior () == following_inferior);
1363 gdb_assert (current_program_space == following_inferior->pspace);
1365 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1366 because the proper displacement for a PIE (Position Independent
1367 Executable) main symbol file will only be computed by
1368 solib_create_inferior_hook below. breakpoint_re_set would fail
1369 to insert the breakpoints with the zero displacement. */
1370 try_open_exec_file (exec_file_host.get (), following_inferior,
1371 SYMFILE_DEFER_BP_RESET);
1373 /* If the target can specify a description, read it. Must do this
1374 after flipping to the new executable (because the target supplied
1375 description must be compatible with the executable's
1376 architecture, and the old executable may e.g., be 32-bit, while
1377 the new one 64-bit), and before anything involving memory or
1378 registers. */
1379 target_find_description ();
1381 gdb::observers::inferior_execd.notify (execing_inferior, following_inferior);
1383 breakpoint_re_set ();
1385 /* Reinsert all breakpoints. (Those which were symbolic have
1386 been reset to the proper address in the new a.out, thanks
1387 to symbol_file_command...). */
1388 insert_breakpoints ();
1390 /* The next resume of this inferior should bring it to the shlib
1391 startup breakpoints. (If the user had also set bp's on
1392 "main" from the old (parent) process, then they'll auto-
1393 matically get reset there in the new process.). */
1396 /* The chain of threads that need to do a step-over operation to get
1397 past e.g., a breakpoint. What technique is used to step over the
1398 breakpoint/watchpoint does not matter -- all threads end up in the
1399 same queue, to maintain rough temporal order of execution, in order
1400 to avoid starvation, otherwise, we could e.g., find ourselves
1401 constantly stepping the same couple threads past their breakpoints
1402 over and over, if the single-step finish fast enough. */
1403 thread_step_over_list global_thread_step_over_list;
1405 /* Bit flags indicating what the thread needs to step over. */
1407 enum step_over_what_flag
1409 /* Step over a breakpoint. */
1410 STEP_OVER_BREAKPOINT = 1,
1412 /* Step past a non-continuable watchpoint, in order to let the
1413 instruction execute so we can evaluate the watchpoint
1414 expression. */
1415 STEP_OVER_WATCHPOINT = 2
1417 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
1419 /* Info about an instruction that is being stepped over. */
1421 struct step_over_info
1423 /* If we're stepping past a breakpoint, this is the address space
1424 and address of the instruction the breakpoint is set at. We'll
1425 skip inserting all breakpoints here. Valid iff ASPACE is
1426 non-NULL. */
1427 const address_space *aspace = nullptr;
1428 CORE_ADDR address = 0;
1430 /* The instruction being stepped over triggers a nonsteppable
1431 watchpoint. If true, we'll skip inserting watchpoints. */
1432 int nonsteppable_watchpoint_p = 0;
1434 /* The thread's global number. */
1435 int thread = -1;
1438 /* The step-over info of the location that is being stepped over.
1440 Note that with async/breakpoint always-inserted mode, a user might
1441 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1442 being stepped over. As setting a new breakpoint inserts all
1443 breakpoints, we need to make sure the breakpoint being stepped over
1444 isn't inserted then. We do that by only clearing the step-over
1445 info when the step-over is actually finished (or aborted).
1447 Presently GDB can only step over one breakpoint at any given time.
1448 Given threads that can't run code in the same address space as the
1449 breakpoint's can't really miss the breakpoint, GDB could be taught
1450 to step-over at most one breakpoint per address space (so this info
1451 could move to the address space object if/when GDB is extended).
1452 The set of breakpoints being stepped over will normally be much
1453 smaller than the set of all breakpoints, so a flag in the
1454 breakpoint location structure would be wasteful. A separate list
1455 also saves complexity and run-time, as otherwise we'd have to go
1456 through all breakpoint locations clearing their flag whenever we
1457 start a new sequence. Similar considerations weigh against storing
1458 this info in the thread object. Plus, not all step overs actually
1459 have breakpoint locations -- e.g., stepping past a single-step
1460 breakpoint, or stepping to complete a non-continuable
1461 watchpoint. */
1462 static struct step_over_info step_over_info;
1464 /* Record the address of the breakpoint/instruction we're currently
1465 stepping over.
1466 N.B. We record the aspace and address now, instead of say just the thread,
1467 because when we need the info later the thread may be running. */
1469 static void
1470 set_step_over_info (const address_space *aspace, CORE_ADDR address,
1471 int nonsteppable_watchpoint_p,
1472 int thread)
1474 step_over_info.aspace = aspace;
1475 step_over_info.address = address;
1476 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1477 step_over_info.thread = thread;
1480 /* Called when we're not longer stepping over a breakpoint / an
1481 instruction, so all breakpoints are free to be (re)inserted. */
1483 static void
1484 clear_step_over_info (void)
1486 infrun_debug_printf ("clearing step over info");
1487 step_over_info.aspace = nullptr;
1488 step_over_info.address = 0;
1489 step_over_info.nonsteppable_watchpoint_p = 0;
1490 step_over_info.thread = -1;
1493 /* See infrun.h. */
1496 stepping_past_instruction_at (struct address_space *aspace,
1497 CORE_ADDR address)
1499 return (step_over_info.aspace != nullptr
1500 && breakpoint_address_match (aspace, address,
1501 step_over_info.aspace,
1502 step_over_info.address));
1505 /* See infrun.h. */
1508 thread_is_stepping_over_breakpoint (int thread)
1510 return (step_over_info.thread != -1
1511 && thread == step_over_info.thread);
1514 /* See infrun.h. */
1517 stepping_past_nonsteppable_watchpoint (void)
1519 return step_over_info.nonsteppable_watchpoint_p;
1522 /* Returns true if step-over info is valid. */
1524 static bool
1525 step_over_info_valid_p (void)
1527 return (step_over_info.aspace != nullptr
1528 || stepping_past_nonsteppable_watchpoint ());
1532 /* Displaced stepping. */
1534 /* In non-stop debugging mode, we must take special care to manage
1535 breakpoints properly; in particular, the traditional strategy for
1536 stepping a thread past a breakpoint it has hit is unsuitable.
1537 'Displaced stepping' is a tactic for stepping one thread past a
1538 breakpoint it has hit while ensuring that other threads running
1539 concurrently will hit the breakpoint as they should.
1541 The traditional way to step a thread T off a breakpoint in a
1542 multi-threaded program in all-stop mode is as follows:
1544 a0) Initially, all threads are stopped, and breakpoints are not
1545 inserted.
1546 a1) We single-step T, leaving breakpoints uninserted.
1547 a2) We insert breakpoints, and resume all threads.
1549 In non-stop debugging, however, this strategy is unsuitable: we
1550 don't want to have to stop all threads in the system in order to
1551 continue or step T past a breakpoint. Instead, we use displaced
1552 stepping:
1554 n0) Initially, T is stopped, other threads are running, and
1555 breakpoints are inserted.
1556 n1) We copy the instruction "under" the breakpoint to a separate
1557 location, outside the main code stream, making any adjustments
1558 to the instruction, register, and memory state as directed by
1559 T's architecture.
1560 n2) We single-step T over the instruction at its new location.
1561 n3) We adjust the resulting register and memory state as directed
1562 by T's architecture. This includes resetting T's PC to point
1563 back into the main instruction stream.
1564 n4) We resume T.
1566 This approach depends on the following gdbarch methods:
1568 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1569 indicate where to copy the instruction, and how much space must
1570 be reserved there. We use these in step n1.
1572 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1573 address, and makes any necessary adjustments to the instruction,
1574 register contents, and memory. We use this in step n1.
1576 - gdbarch_displaced_step_fixup adjusts registers and memory after
1577 we have successfully single-stepped the instruction, to yield the
1578 same effect the instruction would have had if we had executed it
1579 at its original address. We use this in step n3.
1581 The gdbarch_displaced_step_copy_insn and
1582 gdbarch_displaced_step_fixup functions must be written so that
1583 copying an instruction with gdbarch_displaced_step_copy_insn,
1584 single-stepping across the copied instruction, and then applying
1585 gdbarch_displaced_insn_fixup should have the same effects on the
1586 thread's memory and registers as stepping the instruction in place
1587 would have. Exactly which responsibilities fall to the copy and
1588 which fall to the fixup is up to the author of those functions.
1590 See the comments in gdbarch.sh for details.
1592 Note that displaced stepping and software single-step cannot
1593 currently be used in combination, although with some care I think
1594 they could be made to. Software single-step works by placing
1595 breakpoints on all possible subsequent instructions; if the
1596 displaced instruction is a PC-relative jump, those breakpoints
1597 could fall in very strange places --- on pages that aren't
1598 executable, or at addresses that are not proper instruction
1599 boundaries. (We do generally let other threads run while we wait
1600 to hit the software single-step breakpoint, and they might
1601 encounter such a corrupted instruction.) One way to work around
1602 this would be to have gdbarch_displaced_step_copy_insn fully
1603 simulate the effect of PC-relative instructions (and return NULL)
1604 on architectures that use software single-stepping.
1606 In non-stop mode, we can have independent and simultaneous step
1607 requests, so more than one thread may need to simultaneously step
1608 over a breakpoint. The current implementation assumes there is
1609 only one scratch space per process. In this case, we have to
1610 serialize access to the scratch space. If thread A wants to step
1611 over a breakpoint, but we are currently waiting for some other
1612 thread to complete a displaced step, we leave thread A stopped and
1613 place it in the displaced_step_request_queue. Whenever a displaced
1614 step finishes, we pick the next thread in the queue and start a new
1615 displaced step operation on it. See displaced_step_prepare and
1616 displaced_step_finish for details. */
1618 /* Return true if THREAD is doing a displaced step. */
1620 static bool
1621 displaced_step_in_progress_thread (thread_info *thread)
1623 gdb_assert (thread != nullptr);
1625 return thread->displaced_step_state.in_progress ();
1628 /* Return true if INF has a thread doing a displaced step. */
1630 static bool
1631 displaced_step_in_progress (inferior *inf)
1633 return inf->displaced_step_state.in_progress_count > 0;
1636 /* Return true if any thread is doing a displaced step. */
1638 static bool
1639 displaced_step_in_progress_any_thread ()
1641 for (inferior *inf : all_non_exited_inferiors ())
1643 if (displaced_step_in_progress (inf))
1644 return true;
1647 return false;
1650 static void
1651 infrun_inferior_exit (struct inferior *inf)
1653 inf->displaced_step_state.reset ();
1654 inf->thread_waiting_for_vfork_done = nullptr;
1657 static void
1658 infrun_inferior_execd (inferior *exec_inf, inferior *follow_inf)
1660 /* If some threads where was doing a displaced step in this inferior at the
1661 moment of the exec, they no longer exist. Even if the exec'ing thread
1662 doing a displaced step, we don't want to to any fixup nor restore displaced
1663 stepping buffer bytes. */
1664 follow_inf->displaced_step_state.reset ();
1666 for (thread_info *thread : follow_inf->threads ())
1667 thread->displaced_step_state.reset ();
1669 /* Since an in-line step is done with everything else stopped, if there was
1670 one in progress at the time of the exec, it must have been the exec'ing
1671 thread. */
1672 clear_step_over_info ();
1674 follow_inf->thread_waiting_for_vfork_done = nullptr;
1677 /* If ON, and the architecture supports it, GDB will use displaced
1678 stepping to step over breakpoints. If OFF, or if the architecture
1679 doesn't support it, GDB will instead use the traditional
1680 hold-and-step approach. If AUTO (which is the default), GDB will
1681 decide which technique to use to step over breakpoints depending on
1682 whether the target works in a non-stop way (see use_displaced_stepping). */
1684 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1686 static void
1687 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1688 struct cmd_list_element *c,
1689 const char *value)
1691 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1692 gdb_printf (file,
1693 _("Debugger's willingness to use displaced stepping "
1694 "to step over breakpoints is %s (currently %s).\n"),
1695 value, target_is_non_stop_p () ? "on" : "off");
1696 else
1697 gdb_printf (file,
1698 _("Debugger's willingness to use displaced stepping "
1699 "to step over breakpoints is %s.\n"), value);
1702 /* Return true if the gdbarch implements the required methods to use
1703 displaced stepping. */
1705 static bool
1706 gdbarch_supports_displaced_stepping (gdbarch *arch)
1708 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1709 that if `prepare` is provided, so is `finish`. */
1710 return gdbarch_displaced_step_prepare_p (arch);
1713 /* Return non-zero if displaced stepping can/should be used to step
1714 over breakpoints of thread TP. */
1716 static bool
1717 use_displaced_stepping (thread_info *tp)
1719 /* If the user disabled it explicitly, don't use displaced stepping. */
1720 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1721 return false;
1723 /* If "auto", only use displaced stepping if the target operates in a non-stop
1724 way. */
1725 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1726 && !target_is_non_stop_p ())
1727 return false;
1729 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1731 /* If the architecture doesn't implement displaced stepping, don't use
1732 it. */
1733 if (!gdbarch_supports_displaced_stepping (gdbarch))
1734 return false;
1736 /* If recording, don't use displaced stepping. */
1737 if (find_record_target () != nullptr)
1738 return false;
1740 /* If displaced stepping failed before for this inferior, don't bother trying
1741 again. */
1742 if (tp->inf->displaced_step_state.failed_before)
1743 return false;
1745 return true;
1748 /* Simple function wrapper around displaced_step_thread_state::reset. */
1750 static void
1751 displaced_step_reset (displaced_step_thread_state *displaced)
1753 displaced->reset ();
1756 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1757 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1759 using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
1761 /* Prepare to single-step, using displaced stepping.
1763 Note that we cannot use displaced stepping when we have a signal to
1764 deliver. If we have a signal to deliver and an instruction to step
1765 over, then after the step, there will be no indication from the
1766 target whether the thread entered a signal handler or ignored the
1767 signal and stepped over the instruction successfully --- both cases
1768 result in a simple SIGTRAP. In the first case we mustn't do a
1769 fixup, and in the second case we must --- but we can't tell which.
1770 Comments in the code for 'random signals' in handle_inferior_event
1771 explain how we handle this case instead.
1773 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1774 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1775 if displaced stepping this thread got queued; or
1776 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1777 stepped. */
1779 static displaced_step_prepare_status
1780 displaced_step_prepare_throw (thread_info *tp)
1782 regcache *regcache = get_thread_regcache (tp);
1783 struct gdbarch *gdbarch = regcache->arch ();
1784 displaced_step_thread_state &disp_step_thread_state
1785 = tp->displaced_step_state;
1787 /* We should never reach this function if the architecture does not
1788 support displaced stepping. */
1789 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
1791 /* Nor if the thread isn't meant to step over a breakpoint. */
1792 gdb_assert (tp->control.trap_expected);
1794 /* Disable range stepping while executing in the scratch pad. We
1795 want a single-step even if executing the displaced instruction in
1796 the scratch buffer lands within the stepping range (e.g., a
1797 jump/branch). */
1798 tp->control.may_range_step = 0;
1800 /* We are about to start a displaced step for this thread. If one is already
1801 in progress, something's wrong. */
1802 gdb_assert (!disp_step_thread_state.in_progress ());
1804 if (tp->inf->displaced_step_state.unavailable)
1806 /* The gdbarch tells us it's not worth asking to try a prepare because
1807 it is likely that it will return unavailable, so don't bother asking. */
1809 displaced_debug_printf ("deferring step of %s",
1810 tp->ptid.to_string ().c_str ());
1812 global_thread_step_over_chain_enqueue (tp);
1813 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1816 displaced_debug_printf ("displaced-stepping %s now",
1817 tp->ptid.to_string ().c_str ());
1819 scoped_restore_current_thread restore_thread;
1821 switch_to_thread (tp);
1823 CORE_ADDR original_pc = regcache_read_pc (regcache);
1824 CORE_ADDR displaced_pc;
1826 /* Display the instruction we are going to displaced step. */
1827 if (debug_displaced)
1829 string_file tmp_stream;
1830 int dislen = gdb_print_insn (gdbarch, original_pc, &tmp_stream,
1831 nullptr);
1833 if (dislen > 0)
1835 gdb::byte_vector insn_buf (dislen);
1836 read_memory (original_pc, insn_buf.data (), insn_buf.size ());
1838 std::string insn_bytes = bytes_to_string (insn_buf);
1840 displaced_debug_printf ("original insn %s: %s \t %s",
1841 paddress (gdbarch, original_pc),
1842 insn_bytes.c_str (),
1843 tmp_stream.string ().c_str ());
1845 else
1846 displaced_debug_printf ("original insn %s: invalid length: %d",
1847 paddress (gdbarch, original_pc), dislen);
1850 displaced_step_prepare_status status
1851 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
1853 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
1855 displaced_debug_printf ("failed to prepare (%s)",
1856 tp->ptid.to_string ().c_str ());
1858 return DISPLACED_STEP_PREPARE_STATUS_CANT;
1860 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
1862 /* Not enough displaced stepping resources available, defer this
1863 request by placing it the queue. */
1865 displaced_debug_printf ("not enough resources available, "
1866 "deferring step of %s",
1867 tp->ptid.to_string ().c_str ());
1869 global_thread_step_over_chain_enqueue (tp);
1871 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1874 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1876 /* Save the information we need to fix things up if the step
1877 succeeds. */
1878 disp_step_thread_state.set (gdbarch);
1880 tp->inf->displaced_step_state.in_progress_count++;
1882 displaced_debug_printf ("prepared successfully thread=%s, "
1883 "original_pc=%s, displaced_pc=%s",
1884 tp->ptid.to_string ().c_str (),
1885 paddress (gdbarch, original_pc),
1886 paddress (gdbarch, displaced_pc));
1888 /* Display the new displaced instruction(s). */
1889 if (debug_displaced)
1891 string_file tmp_stream;
1892 CORE_ADDR addr = displaced_pc;
1894 /* If displaced stepping is going to use h/w single step then we know
1895 that the replacement instruction can only be a single instruction,
1896 in that case set the end address at the next byte.
1898 Otherwise the displaced stepping copy instruction routine could
1899 have generated multiple instructions, and all we know is that they
1900 must fit within the LEN bytes of the buffer. */
1901 CORE_ADDR end
1902 = addr + (gdbarch_displaced_step_hw_singlestep (gdbarch)
1903 ? 1 : gdbarch_displaced_step_buffer_length (gdbarch));
1905 while (addr < end)
1907 int dislen = gdb_print_insn (gdbarch, addr, &tmp_stream, nullptr);
1908 if (dislen <= 0)
1910 displaced_debug_printf
1911 ("replacement insn %s: invalid length: %d",
1912 paddress (gdbarch, addr), dislen);
1913 break;
1916 gdb::byte_vector insn_buf (dislen);
1917 read_memory (addr, insn_buf.data (), insn_buf.size ());
1919 std::string insn_bytes = bytes_to_string (insn_buf);
1920 std::string insn_str = tmp_stream.release ();
1921 displaced_debug_printf ("replacement insn %s: %s \t %s",
1922 paddress (gdbarch, addr),
1923 insn_bytes.c_str (),
1924 insn_str.c_str ());
1925 addr += dislen;
1929 return DISPLACED_STEP_PREPARE_STATUS_OK;
1932 /* Wrapper for displaced_step_prepare_throw that disabled further
1933 attempts at displaced stepping if we get a memory error. */
1935 static displaced_step_prepare_status
1936 displaced_step_prepare (thread_info *thread)
1938 displaced_step_prepare_status status
1939 = DISPLACED_STEP_PREPARE_STATUS_CANT;
1943 status = displaced_step_prepare_throw (thread);
1945 catch (const gdb_exception_error &ex)
1947 if (ex.error != MEMORY_ERROR
1948 && ex.error != NOT_SUPPORTED_ERROR)
1949 throw;
1951 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1952 ex.what ());
1954 /* Be verbose if "set displaced-stepping" is "on", silent if
1955 "auto". */
1956 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1958 warning (_("disabling displaced stepping: %s"),
1959 ex.what ());
1962 /* Disable further displaced stepping attempts. */
1963 thread->inf->displaced_step_state.failed_before = 1;
1966 return status;
1969 /* True if any thread of TARGET that matches RESUME_PTID requires
1970 target_thread_events enabled. This assumes TARGET does not support
1971 target thread options. */
1973 static bool
1974 any_thread_needs_target_thread_events (process_stratum_target *target,
1975 ptid_t resume_ptid)
1977 for (thread_info *tp : all_non_exited_threads (target, resume_ptid))
1978 if (displaced_step_in_progress_thread (tp)
1979 || schedlock_applies (tp)
1980 || tp->thread_fsm () != nullptr)
1981 return true;
1982 return false;
1985 /* Maybe disable thread-{cloned,created,exited} event reporting after
1986 a step-over (either in-line or displaced) finishes. */
1988 static void
1989 update_thread_events_after_step_over (thread_info *event_thread,
1990 const target_waitstatus &event_status)
1992 if (schedlock_applies (event_thread))
1994 /* If scheduler-locking applies, continue reporting
1995 thread-created/thread-cloned events. */
1996 return;
1998 else if (target_supports_set_thread_options (0))
2000 /* We can control per-thread options. Disable events for the
2001 event thread, unless the thread is gone. */
2002 if (event_status.kind () != TARGET_WAITKIND_THREAD_EXITED)
2003 event_thread->set_thread_options (0);
2005 else
2007 /* We can only control the target-wide target_thread_events
2008 setting. Disable it, but only if other threads in the target
2009 don't need it enabled. */
2010 process_stratum_target *target = event_thread->inf->process_target ();
2011 if (!any_thread_needs_target_thread_events (target, minus_one_ptid))
2012 target_thread_events (false);
2016 /* If we displaced stepped an instruction successfully, adjust registers and
2017 memory to yield the same effect the instruction would have had if we had
2018 executed it at its original address, and return
2019 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
2020 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
2022 If the thread wasn't displaced stepping, return
2023 DISPLACED_STEP_FINISH_STATUS_OK as well. */
2025 static displaced_step_finish_status
2026 displaced_step_finish (thread_info *event_thread,
2027 const target_waitstatus &event_status)
2029 /* Check whether the parent is displaced stepping. */
2030 inferior *parent_inf = event_thread->inf;
2032 /* If this was a fork/vfork/clone, this event indicates that the
2033 displaced stepping of the syscall instruction has been done, so
2034 we perform cleanup for parent here. Also note that this
2035 operation also cleans up the child for vfork, because their pages
2036 are shared. */
2038 /* If this is a fork (child gets its own address space copy) and
2039 some displaced step buffers were in use at the time of the fork,
2040 restore the displaced step buffer bytes in the child process.
2042 Architectures which support displaced stepping and fork events
2043 must supply an implementation of
2044 gdbarch_displaced_step_restore_all_in_ptid. This is not enforced
2045 during gdbarch validation to support architectures which support
2046 displaced stepping but not forks. */
2047 if (event_status.kind () == TARGET_WAITKIND_FORKED)
2049 struct regcache *parent_regcache = get_thread_regcache (event_thread);
2050 struct gdbarch *gdbarch = parent_regcache->arch ();
2052 if (gdbarch_supports_displaced_stepping (gdbarch))
2053 gdbarch_displaced_step_restore_all_in_ptid
2054 (gdbarch, parent_inf, event_status.child_ptid ());
2057 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
2059 /* Was this thread performing a displaced step? */
2060 if (!displaced->in_progress ())
2061 return DISPLACED_STEP_FINISH_STATUS_OK;
2063 update_thread_events_after_step_over (event_thread, event_status);
2065 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
2066 event_thread->inf->displaced_step_state.in_progress_count--;
2068 /* Fixup may need to read memory/registers. Switch to the thread
2069 that we're fixing up. Also, target_stopped_by_watchpoint checks
2070 the current thread, and displaced_step_restore performs ptid-dependent
2071 memory accesses using current_inferior(). */
2072 switch_to_thread (event_thread);
2074 displaced_step_reset_cleanup cleanup (displaced);
2076 /* Do the fixup, and release the resources acquired to do the displaced
2077 step. */
2078 displaced_step_finish_status status
2079 = gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
2080 event_thread, event_status);
2082 if (event_status.kind () == TARGET_WAITKIND_FORKED
2083 || event_status.kind () == TARGET_WAITKIND_VFORKED
2084 || event_status.kind () == TARGET_WAITKIND_THREAD_CLONED)
2086 /* Since the vfork/fork/clone syscall instruction was executed
2087 in the scratchpad, the child's PC is also within the
2088 scratchpad. Set the child's PC to the parent's PC value,
2089 which has already been fixed up. Note: we use the parent's
2090 aspace here, although we're touching the child, because the
2091 child hasn't been added to the inferior list yet at this
2092 point. */
2094 struct regcache *parent_regcache = get_thread_regcache (event_thread);
2095 struct gdbarch *gdbarch = parent_regcache->arch ();
2096 struct regcache *child_regcache
2097 = get_thread_arch_regcache (parent_inf, event_status.child_ptid (),
2098 gdbarch);
2099 /* Read PC value of parent. */
2100 CORE_ADDR parent_pc = regcache_read_pc (parent_regcache);
2102 displaced_debug_printf ("write child pc from %s to %s",
2103 paddress (gdbarch,
2104 regcache_read_pc (child_regcache)),
2105 paddress (gdbarch, parent_pc));
2107 regcache_write_pc (child_regcache, parent_pc);
2110 return status;
2113 /* Data to be passed around while handling an event. This data is
2114 discarded between events. */
2115 struct execution_control_state
2117 explicit execution_control_state (thread_info *thr = nullptr)
2118 : ptid (thr == nullptr ? null_ptid : thr->ptid),
2119 event_thread (thr)
2123 process_stratum_target *target = nullptr;
2124 ptid_t ptid;
2125 /* The thread that got the event, if this was a thread event; NULL
2126 otherwise. */
2127 struct thread_info *event_thread;
2129 struct target_waitstatus ws;
2130 int stop_func_filled_in = 0;
2131 CORE_ADDR stop_func_alt_start = 0;
2132 CORE_ADDR stop_func_start = 0;
2133 CORE_ADDR stop_func_end = 0;
2134 const char *stop_func_name = nullptr;
2135 int wait_some_more = 0;
2137 /* True if the event thread hit the single-step breakpoint of
2138 another thread. Thus the event doesn't cause a stop, the thread
2139 needs to be single-stepped past the single-step breakpoint before
2140 we can switch back to the original stepping thread. */
2141 int hit_singlestep_breakpoint = 0;
2144 static void keep_going_pass_signal (struct execution_control_state *ecs);
2145 static void prepare_to_wait (struct execution_control_state *ecs);
2146 static bool keep_going_stepped_thread (struct thread_info *tp);
2147 static step_over_what thread_still_needs_step_over (struct thread_info *tp);
2149 /* Are there any pending step-over requests? If so, run all we can
2150 now and return true. Otherwise, return false. */
2152 static bool
2153 start_step_over (void)
2155 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
2157 /* Don't start a new step-over if we already have an in-line
2158 step-over operation ongoing. */
2159 if (step_over_info_valid_p ())
2160 return false;
2162 /* Steal the global thread step over chain. As we try to initiate displaced
2163 steps, threads will be enqueued in the global chain if no buffers are
2164 available. If we iterated on the global chain directly, we might iterate
2165 indefinitely. */
2166 thread_step_over_list threads_to_step
2167 = std::move (global_thread_step_over_list);
2169 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
2170 thread_step_over_chain_length (threads_to_step));
2172 bool started = false;
2174 /* On scope exit (whatever the reason, return or exception), if there are
2175 threads left in the THREADS_TO_STEP chain, put back these threads in the
2176 global list. */
2177 SCOPE_EXIT
2179 if (threads_to_step.empty ())
2180 infrun_debug_printf ("step-over queue now empty");
2181 else
2183 infrun_debug_printf ("putting back %d threads to step in global queue",
2184 thread_step_over_chain_length (threads_to_step));
2186 global_thread_step_over_chain_enqueue_chain
2187 (std::move (threads_to_step));
2191 thread_step_over_list_safe_range range
2192 = make_thread_step_over_list_safe_range (threads_to_step);
2194 for (thread_info *tp : range)
2196 step_over_what step_what;
2197 int must_be_in_line;
2199 gdb_assert (!tp->stop_requested);
2201 if (tp->inf->displaced_step_state.unavailable)
2203 /* The arch told us to not even try preparing another displaced step
2204 for this inferior. Just leave the thread in THREADS_TO_STEP, it
2205 will get moved to the global chain on scope exit. */
2206 continue;
2209 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
2211 /* When we stop all threads, handling a vfork, any thread in the step
2212 over chain remains there. A user could also try to continue a
2213 thread stopped at a breakpoint while another thread is waiting for
2214 a vfork-done event. In any case, we don't want to start a step
2215 over right now. */
2216 continue;
2219 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
2220 while we try to prepare the displaced step, we don't add it back to
2221 the global step over chain. This is to avoid a thread staying in the
2222 step over chain indefinitely if something goes wrong when resuming it
2223 If the error is intermittent and it still needs a step over, it will
2224 get enqueued again when we try to resume it normally. */
2225 threads_to_step.erase (threads_to_step.iterator_to (*tp));
2227 step_what = thread_still_needs_step_over (tp);
2228 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
2229 || ((step_what & STEP_OVER_BREAKPOINT)
2230 && !use_displaced_stepping (tp)));
2232 /* We currently stop all threads of all processes to step-over
2233 in-line. If we need to start a new in-line step-over, let
2234 any pending displaced steps finish first. */
2235 if (must_be_in_line && displaced_step_in_progress_any_thread ())
2237 global_thread_step_over_chain_enqueue (tp);
2238 continue;
2241 if (tp->control.trap_expected
2242 || tp->resumed ()
2243 || tp->executing ())
2245 internal_error ("[%s] has inconsistent state: "
2246 "trap_expected=%d, resumed=%d, executing=%d\n",
2247 tp->ptid.to_string ().c_str (),
2248 tp->control.trap_expected,
2249 tp->resumed (),
2250 tp->executing ());
2253 infrun_debug_printf ("resuming [%s] for step-over",
2254 tp->ptid.to_string ().c_str ());
2256 /* keep_going_pass_signal skips the step-over if the breakpoint
2257 is no longer inserted. In all-stop, we want to keep looking
2258 for a thread that needs a step-over instead of resuming TP,
2259 because we wouldn't be able to resume anything else until the
2260 target stops again. In non-stop, the resume always resumes
2261 only TP, so it's OK to let the thread resume freely. */
2262 if (!target_is_non_stop_p () && !step_what)
2263 continue;
2265 switch_to_thread (tp);
2266 execution_control_state ecs (tp);
2267 keep_going_pass_signal (&ecs);
2269 if (!ecs.wait_some_more)
2270 error (_("Command aborted."));
2272 /* If the thread's step over could not be initiated because no buffers
2273 were available, it was re-added to the global step over chain. */
2274 if (tp->resumed ())
2276 infrun_debug_printf ("[%s] was resumed.",
2277 tp->ptid.to_string ().c_str ());
2278 gdb_assert (!thread_is_in_step_over_chain (tp));
2280 else
2282 infrun_debug_printf ("[%s] was NOT resumed.",
2283 tp->ptid.to_string ().c_str ());
2284 gdb_assert (thread_is_in_step_over_chain (tp));
2287 /* If we started a new in-line step-over, we're done. */
2288 if (step_over_info_valid_p ())
2290 gdb_assert (tp->control.trap_expected);
2291 started = true;
2292 break;
2295 if (!target_is_non_stop_p ())
2297 /* On all-stop, shouldn't have resumed unless we needed a
2298 step over. */
2299 gdb_assert (tp->control.trap_expected
2300 || tp->step_after_step_resume_breakpoint);
2302 /* With remote targets (at least), in all-stop, we can't
2303 issue any further remote commands until the program stops
2304 again. */
2305 started = true;
2306 break;
2309 /* Either the thread no longer needed a step-over, or a new
2310 displaced stepping sequence started. Even in the latter
2311 case, continue looking. Maybe we can also start another
2312 displaced step on a thread of other process. */
2315 return started;
2318 /* Update global variables holding ptids to hold NEW_PTID if they were
2319 holding OLD_PTID. */
2320 static void
2321 infrun_thread_ptid_changed (process_stratum_target *target,
2322 ptid_t old_ptid, ptid_t new_ptid)
2324 if (inferior_ptid == old_ptid
2325 && current_inferior ()->process_target () == target)
2326 inferior_ptid = new_ptid;
2331 static const char schedlock_off[] = "off";
2332 static const char schedlock_on[] = "on";
2333 static const char schedlock_step[] = "step";
2334 static const char schedlock_replay[] = "replay";
2335 static const char *const scheduler_enums[] = {
2336 schedlock_off,
2337 schedlock_on,
2338 schedlock_step,
2339 schedlock_replay,
2340 nullptr
2342 static const char *scheduler_mode = schedlock_replay;
2343 static void
2344 show_scheduler_mode (struct ui_file *file, int from_tty,
2345 struct cmd_list_element *c, const char *value)
2347 gdb_printf (file,
2348 _("Mode for locking scheduler "
2349 "during execution is \"%s\".\n"),
2350 value);
2353 static void
2354 set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2356 if (!target_can_lock_scheduler ())
2358 scheduler_mode = schedlock_off;
2359 error (_("Target '%s' cannot support this command."),
2360 target_shortname ());
2364 /* True if execution commands resume all threads of all processes by
2365 default; otherwise, resume only threads of the current inferior
2366 process. */
2367 bool sched_multi = false;
2369 /* Try to setup for software single stepping. Return true if target_resume()
2370 should use hardware single step.
2372 GDBARCH the current gdbarch. */
2374 static bool
2375 maybe_software_singlestep (struct gdbarch *gdbarch)
2377 bool hw_step = true;
2379 if (execution_direction == EXEC_FORWARD
2380 && gdbarch_software_single_step_p (gdbarch))
2381 hw_step = !insert_single_step_breakpoints (gdbarch);
2383 return hw_step;
2386 /* See infrun.h. */
2388 ptid_t
2389 user_visible_resume_ptid (int step)
2391 ptid_t resume_ptid;
2393 if (non_stop)
2395 /* With non-stop mode on, threads are always handled
2396 individually. */
2397 resume_ptid = inferior_ptid;
2399 else if ((scheduler_mode == schedlock_on)
2400 || (scheduler_mode == schedlock_step && step))
2402 /* User-settable 'scheduler' mode requires solo thread
2403 resume. */
2404 resume_ptid = inferior_ptid;
2406 else if ((scheduler_mode == schedlock_replay)
2407 && target_record_will_replay (minus_one_ptid, execution_direction))
2409 /* User-settable 'scheduler' mode requires solo thread resume in replay
2410 mode. */
2411 resume_ptid = inferior_ptid;
2413 else if (inferior_ptid != null_ptid
2414 && inferior_thread ()->control.in_cond_eval)
2416 /* The inferior thread is evaluating a BP condition. Other threads
2417 might be stopped or running and we do not want to change their
2418 state, thus, resume only the current thread. */
2419 resume_ptid = inferior_ptid;
2421 else if (!sched_multi && target_supports_multi_process ())
2423 /* Resume all threads of the current process (and none of other
2424 processes). */
2425 resume_ptid = ptid_t (inferior_ptid.pid ());
2427 else
2429 /* Resume all threads of all processes. */
2430 resume_ptid = RESUME_ALL;
2433 return resume_ptid;
2436 /* See infrun.h. */
2438 process_stratum_target *
2439 user_visible_resume_target (ptid_t resume_ptid)
2441 return (resume_ptid == minus_one_ptid && sched_multi
2442 ? nullptr
2443 : current_inferior ()->process_target ());
2446 /* Find a thread from the inferiors that we'll resume that is waiting
2447 for a vfork-done event. */
2449 static thread_info *
2450 find_thread_waiting_for_vfork_done ()
2452 gdb_assert (!target_is_non_stop_p ());
2454 if (sched_multi)
2456 for (inferior *inf : all_non_exited_inferiors ())
2457 if (inf->thread_waiting_for_vfork_done != nullptr)
2458 return inf->thread_waiting_for_vfork_done;
2460 else
2462 inferior *cur_inf = current_inferior ();
2463 if (cur_inf->thread_waiting_for_vfork_done != nullptr)
2464 return cur_inf->thread_waiting_for_vfork_done;
2466 return nullptr;
2469 /* Return a ptid representing the set of threads that we will resume,
2470 in the perspective of the target, assuming run control handling
2471 does not require leaving some threads stopped (e.g., stepping past
2472 breakpoint). USER_STEP indicates whether we're about to start the
2473 target for a stepping command. */
2475 static ptid_t
2476 internal_resume_ptid (int user_step)
2478 /* In non-stop, we always control threads individually. Note that
2479 the target may always work in non-stop mode even with "set
2480 non-stop off", in which case user_visible_resume_ptid could
2481 return a wildcard ptid. */
2482 if (target_is_non_stop_p ())
2483 return inferior_ptid;
2485 /* The rest of the function assumes non-stop==off and
2486 target-non-stop==off.
2488 If a thread is waiting for a vfork-done event, it means breakpoints are out
2489 for this inferior (well, program space in fact). We don't want to resume
2490 any thread other than the one waiting for vfork done, otherwise these other
2491 threads could miss breakpoints. So if a thread in the resumption set is
2492 waiting for a vfork-done event, resume only that thread.
2494 The resumption set width depends on whether schedule-multiple is on or off.
2496 Note that if the target_resume interface was more flexible, we could be
2497 smarter here when schedule-multiple is on. For example, imagine 3
2498 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2499 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2500 target(s) to resume:
2502 - All threads of inferior 1
2503 - Thread 2.1
2504 - Thread 3.2
2506 Since we don't have that flexibility (we can only pass one ptid), just
2507 resume the first thread waiting for a vfork-done event we find (e.g. thread
2508 2.1). */
2509 thread_info *thr = find_thread_waiting_for_vfork_done ();
2510 if (thr != nullptr)
2512 /* If we have a thread that is waiting for a vfork-done event,
2513 then we should have switched to it earlier. Calling
2514 target_resume with thread scope is only possible when the
2515 current thread matches the thread scope. */
2516 gdb_assert (thr->ptid == inferior_ptid);
2517 gdb_assert (thr->inf->process_target ()
2518 == inferior_thread ()->inf->process_target ());
2519 return thr->ptid;
2522 return user_visible_resume_ptid (user_step);
2525 /* Wrapper for target_resume, that handles infrun-specific
2526 bookkeeping. */
2528 static void
2529 do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
2531 struct thread_info *tp = inferior_thread ();
2533 gdb_assert (!tp->stop_requested);
2535 /* Install inferior's terminal modes. */
2536 target_terminal::inferior ();
2538 /* Avoid confusing the next resume, if the next stop/resume
2539 happens to apply to another thread. */
2540 tp->set_stop_signal (GDB_SIGNAL_0);
2542 /* Advise target which signals may be handled silently.
2544 If we have removed breakpoints because we are stepping over one
2545 in-line (in any thread), we need to receive all signals to avoid
2546 accidentally skipping a breakpoint during execution of a signal
2547 handler.
2549 Likewise if we're displaced stepping, otherwise a trap for a
2550 breakpoint in a signal handler might be confused with the
2551 displaced step finishing. We don't make the displaced_step_finish
2552 step distinguish the cases instead, because:
2554 - a backtrace while stopped in the signal handler would show the
2555 scratch pad as frame older than the signal handler, instead of
2556 the real mainline code.
2558 - when the thread is later resumed, the signal handler would
2559 return to the scratch pad area, which would no longer be
2560 valid. */
2561 if (step_over_info_valid_p ()
2562 || displaced_step_in_progress (tp->inf))
2563 target_pass_signals ({});
2564 else
2565 target_pass_signals (signal_pass);
2567 /* Request that the target report thread-{created,cloned,exited}
2568 events in the following situations:
2570 - If we are performing an in-line step-over-breakpoint, then we
2571 will remove a breakpoint from the target and only run the
2572 current thread. We don't want any new thread (spawned by the
2573 step) to start running, as it might miss the breakpoint. We
2574 need to clear the step-over state if the stepped thread exits,
2575 so we also enable thread-exit events.
2577 - If we are stepping over a breakpoint out of line (displaced
2578 stepping) then we won't remove a breakpoint from the target,
2579 but, if the step spawns a new clone thread, then we will need
2580 to fixup the $pc address in the clone child too, so we need it
2581 to start stopped. We need to release the displaced stepping
2582 buffer if the stepped thread exits, so we also enable
2583 thread-exit events.
2585 - If scheduler-locking applies, threads that the current thread
2586 spawns should remain halted. It's not strictly necessary to
2587 enable thread-exit events in this case, but it doesn't hurt.
2589 if (step_over_info_valid_p ()
2590 || displaced_step_in_progress_thread (tp)
2591 || schedlock_applies (tp))
2593 gdb_thread_options options
2594 = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
2595 if (target_supports_set_thread_options (options))
2596 tp->set_thread_options (options);
2597 else
2598 target_thread_events (true);
2600 else if (tp->thread_fsm () != nullptr)
2602 gdb_thread_options options = GDB_THREAD_OPTION_EXIT;
2603 if (target_supports_set_thread_options (options))
2604 tp->set_thread_options (options);
2605 else
2606 target_thread_events (true);
2608 else
2610 if (target_supports_set_thread_options (0))
2611 tp->set_thread_options (0);
2612 else
2614 process_stratum_target *resume_target = tp->inf->process_target ();
2615 if (!any_thread_needs_target_thread_events (resume_target,
2616 resume_ptid))
2617 target_thread_events (false);
2621 /* If we're resuming more than one thread simultaneously, then any
2622 thread other than the leader is being set to run free. Clear any
2623 previous thread option for those threads. */
2624 if (resume_ptid != inferior_ptid && target_supports_set_thread_options (0))
2626 process_stratum_target *resume_target = tp->inf->process_target ();
2627 for (thread_info *thr_iter : all_non_exited_threads (resume_target,
2628 resume_ptid))
2629 if (thr_iter != tp)
2630 thr_iter->set_thread_options (0);
2633 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2634 resume_ptid.to_string ().c_str (),
2635 step, gdb_signal_to_symbol_string (sig));
2637 target_resume (resume_ptid, step, sig);
2640 /* Resume the inferior. SIG is the signal to give the inferior
2641 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2642 call 'resume', which handles exceptions. */
2644 static void
2645 resume_1 (enum gdb_signal sig)
2647 struct thread_info *tp = inferior_thread ();
2648 regcache *regcache = get_thread_regcache (tp);
2649 struct gdbarch *gdbarch = regcache->arch ();
2650 ptid_t resume_ptid;
2651 /* This represents the user's step vs continue request. When
2652 deciding whether "set scheduler-locking step" applies, it's the
2653 user's intention that counts. */
2654 const int user_step = tp->control.stepping_command;
2655 /* This represents what we'll actually request the target to do.
2656 This can decay from a step to a continue, if e.g., we need to
2657 implement single-stepping with breakpoints (software
2658 single-step). */
2659 bool step;
2661 gdb_assert (!tp->stop_requested);
2662 gdb_assert (!thread_is_in_step_over_chain (tp));
2664 if (tp->has_pending_waitstatus ())
2666 infrun_debug_printf
2667 ("thread %s has pending wait "
2668 "status %s (currently_stepping=%d).",
2669 tp->ptid.to_string ().c_str (),
2670 tp->pending_waitstatus ().to_string ().c_str (),
2671 currently_stepping (tp));
2673 tp->inf->process_target ()->threads_executing = true;
2674 tp->set_resumed (true);
2676 /* FIXME: What should we do if we are supposed to resume this
2677 thread with a signal? Maybe we should maintain a queue of
2678 pending signals to deliver. */
2679 if (sig != GDB_SIGNAL_0)
2681 warning (_("Couldn't deliver signal %s to %s."),
2682 gdb_signal_to_name (sig),
2683 tp->ptid.to_string ().c_str ());
2686 tp->set_stop_signal (GDB_SIGNAL_0);
2688 if (target_can_async_p ())
2690 target_async (true);
2691 /* Tell the event loop we have an event to process. */
2692 mark_async_event_handler (infrun_async_inferior_event_token);
2694 return;
2697 tp->stepped_breakpoint = 0;
2699 /* Depends on stepped_breakpoint. */
2700 step = currently_stepping (tp);
2702 if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2704 /* Don't try to single-step a vfork parent that is waiting for
2705 the child to get out of the shared memory region (by exec'ing
2706 or exiting). This is particularly important on software
2707 single-step archs, as the child process would trip on the
2708 software single step breakpoint inserted for the parent
2709 process. Since the parent will not actually execute any
2710 instruction until the child is out of the shared region (such
2711 are vfork's semantics), it is safe to simply continue it.
2712 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2713 the parent, and tell it to `keep_going', which automatically
2714 re-sets it stepping. */
2715 infrun_debug_printf ("resume : clear step");
2716 step = false;
2719 CORE_ADDR pc = regcache_read_pc (regcache);
2721 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2722 "current thread [%s] at %s",
2723 step, gdb_signal_to_symbol_string (sig),
2724 tp->control.trap_expected,
2725 inferior_ptid.to_string ().c_str (),
2726 paddress (gdbarch, pc));
2728 const address_space *aspace = tp->inf->aspace.get ();
2730 /* Normally, by the time we reach `resume', the breakpoints are either
2731 removed or inserted, as appropriate. The exception is if we're sitting
2732 at a permanent breakpoint; we need to step over it, but permanent
2733 breakpoints can't be removed. So we have to test for it here. */
2734 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2736 if (sig != GDB_SIGNAL_0)
2738 /* We have a signal to pass to the inferior. The resume
2739 may, or may not take us to the signal handler. If this
2740 is a step, we'll need to stop in the signal handler, if
2741 there's one, (if the target supports stepping into
2742 handlers), or in the next mainline instruction, if
2743 there's no handler. If this is a continue, we need to be
2744 sure to run the handler with all breakpoints inserted.
2745 In all cases, set a breakpoint at the current address
2746 (where the handler returns to), and once that breakpoint
2747 is hit, resume skipping the permanent breakpoint. If
2748 that breakpoint isn't hit, then we've stepped into the
2749 signal handler (or hit some other event). We'll delete
2750 the step-resume breakpoint then. */
2752 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2753 "deliver signal first");
2755 clear_step_over_info ();
2756 tp->control.trap_expected = 0;
2758 if (tp->control.step_resume_breakpoint == nullptr)
2760 /* Set a "high-priority" step-resume, as we don't want
2761 user breakpoints at PC to trigger (again) when this
2762 hits. */
2763 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2764 gdb_assert (tp->control.step_resume_breakpoint->first_loc ()
2765 .permanent);
2767 tp->step_after_step_resume_breakpoint = step;
2770 insert_breakpoints ();
2772 else
2774 /* There's no signal to pass, we can go ahead and skip the
2775 permanent breakpoint manually. */
2776 infrun_debug_printf ("skipping permanent breakpoint");
2777 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2778 /* Update pc to reflect the new address from which we will
2779 execute instructions. */
2780 pc = regcache_read_pc (regcache);
2782 if (step)
2784 /* We've already advanced the PC, so the stepping part
2785 is done. Now we need to arrange for a trap to be
2786 reported to handle_inferior_event. Set a breakpoint
2787 at the current PC, and run to it. Don't update
2788 prev_pc, because if we end in
2789 switch_back_to_stepped_thread, we want the "expected
2790 thread advanced also" branch to be taken. IOW, we
2791 don't want this thread to step further from PC
2792 (overstep). */
2793 gdb_assert (!step_over_info_valid_p ());
2794 insert_single_step_breakpoint (gdbarch, aspace, pc);
2795 insert_breakpoints ();
2797 resume_ptid = internal_resume_ptid (user_step);
2798 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2799 tp->set_resumed (true);
2800 return;
2805 /* If we have a breakpoint to step over, make sure to do a single
2806 step only. Same if we have software watchpoints. */
2807 if (tp->control.trap_expected || bpstat_should_step ())
2808 tp->control.may_range_step = 0;
2810 /* If displaced stepping is enabled, step over breakpoints by executing a
2811 copy of the instruction at a different address.
2813 We can't use displaced stepping when we have a signal to deliver;
2814 the comments for displaced_step_prepare explain why. The
2815 comments in the handle_inferior event for dealing with 'random
2816 signals' explain what we do instead.
2818 We can't use displaced stepping when we are waiting for vfork_done
2819 event, displaced stepping breaks the vfork child similarly as single
2820 step software breakpoint. */
2821 if (tp->control.trap_expected
2822 && use_displaced_stepping (tp)
2823 && !step_over_info_valid_p ()
2824 && sig == GDB_SIGNAL_0
2825 && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
2827 displaced_step_prepare_status prepare_status
2828 = displaced_step_prepare (tp);
2830 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
2832 infrun_debug_printf ("Got placed in step-over queue");
2834 tp->control.trap_expected = 0;
2835 return;
2837 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
2839 /* Fallback to stepping over the breakpoint in-line. */
2841 if (target_is_non_stop_p ())
2842 stop_all_threads ("displaced stepping falling back on inline stepping");
2844 set_step_over_info (aspace, regcache_read_pc (regcache), 0,
2845 tp->global_num);
2847 step = maybe_software_singlestep (gdbarch);
2849 insert_breakpoints ();
2851 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
2853 /* Update pc to reflect the new address from which we will
2854 execute instructions due to displaced stepping. */
2855 pc = regcache_read_pc (get_thread_regcache (tp));
2857 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
2859 else
2860 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2861 "value.");
2864 /* Do we need to do it the hard way, w/temp breakpoints? */
2865 else if (step)
2866 step = maybe_software_singlestep (gdbarch);
2868 /* Currently, our software single-step implementation leads to different
2869 results than hardware single-stepping in one situation: when stepping
2870 into delivering a signal which has an associated signal handler,
2871 hardware single-step will stop at the first instruction of the handler,
2872 while software single-step will simply skip execution of the handler.
2874 For now, this difference in behavior is accepted since there is no
2875 easy way to actually implement single-stepping into a signal handler
2876 without kernel support.
2878 However, there is one scenario where this difference leads to follow-on
2879 problems: if we're stepping off a breakpoint by removing all breakpoints
2880 and then single-stepping. In this case, the software single-step
2881 behavior means that even if there is a *breakpoint* in the signal
2882 handler, GDB still would not stop.
2884 Fortunately, we can at least fix this particular issue. We detect
2885 here the case where we are about to deliver a signal while software
2886 single-stepping with breakpoints removed. In this situation, we
2887 revert the decisions to remove all breakpoints and insert single-
2888 step breakpoints, and instead we install a step-resume breakpoint
2889 at the current address, deliver the signal without stepping, and
2890 once we arrive back at the step-resume breakpoint, actually step
2891 over the breakpoint we originally wanted to step over. */
2892 if (thread_has_single_step_breakpoints_set (tp)
2893 && sig != GDB_SIGNAL_0
2894 && step_over_info_valid_p ())
2896 /* If we have nested signals or a pending signal is delivered
2897 immediately after a handler returns, might already have
2898 a step-resume breakpoint set on the earlier handler. We cannot
2899 set another step-resume breakpoint; just continue on until the
2900 original breakpoint is hit. */
2901 if (tp->control.step_resume_breakpoint == nullptr)
2903 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2904 tp->step_after_step_resume_breakpoint = 1;
2907 delete_single_step_breakpoints (tp);
2909 clear_step_over_info ();
2910 tp->control.trap_expected = 0;
2912 insert_breakpoints ();
2915 /* If STEP is set, it's a request to use hardware stepping
2916 facilities. But in that case, we should never
2917 use singlestep breakpoint. */
2918 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2920 /* Decide the set of threads to ask the target to resume. */
2921 if (tp->control.trap_expected)
2923 /* We're allowing a thread to run past a breakpoint it has
2924 hit, either by single-stepping the thread with the breakpoint
2925 removed, or by displaced stepping, with the breakpoint inserted.
2926 In the former case, we need to single-step only this thread,
2927 and keep others stopped, as they can miss this breakpoint if
2928 allowed to run. That's not really a problem for displaced
2929 stepping, but, we still keep other threads stopped, in case
2930 another thread is also stopped for a breakpoint waiting for
2931 its turn in the displaced stepping queue. */
2932 resume_ptid = inferior_ptid;
2934 else
2935 resume_ptid = internal_resume_ptid (user_step);
2937 if (execution_direction != EXEC_REVERSE
2938 && step && breakpoint_inserted_here_p (aspace, pc))
2940 /* There are two cases where we currently need to step a
2941 breakpoint instruction when we have a signal to deliver:
2943 - See handle_signal_stop where we handle random signals that
2944 could take out us out of the stepping range. Normally, in
2945 that case we end up continuing (instead of stepping) over the
2946 signal handler with a breakpoint at PC, but there are cases
2947 where we should _always_ single-step, even if we have a
2948 step-resume breakpoint, like when a software watchpoint is
2949 set. Assuming single-stepping and delivering a signal at the
2950 same time would takes us to the signal handler, then we could
2951 have removed the breakpoint at PC to step over it. However,
2952 some hardware step targets (like e.g., Mac OS) can't step
2953 into signal handlers, and for those, we need to leave the
2954 breakpoint at PC inserted, as otherwise if the handler
2955 recurses and executes PC again, it'll miss the breakpoint.
2956 So we leave the breakpoint inserted anyway, but we need to
2957 record that we tried to step a breakpoint instruction, so
2958 that adjust_pc_after_break doesn't end up confused.
2960 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2961 in one thread after another thread that was stepping had been
2962 momentarily paused for a step-over. When we re-resume the
2963 stepping thread, it may be resumed from that address with a
2964 breakpoint that hasn't trapped yet. Seen with
2965 gdb.threads/non-stop-fair-events.exp, on targets that don't
2966 do displaced stepping. */
2968 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2969 tp->ptid.to_string ().c_str ());
2971 tp->stepped_breakpoint = 1;
2973 /* Most targets can step a breakpoint instruction, thus
2974 executing it normally. But if this one cannot, just
2975 continue and we will hit it anyway. */
2976 if (gdbarch_cannot_step_breakpoint (gdbarch))
2977 step = false;
2980 if (tp->control.may_range_step)
2982 /* If we're resuming a thread with the PC out of the step
2983 range, then we're doing some nested/finer run control
2984 operation, like stepping the thread out of the dynamic
2985 linker or the displaced stepping scratch pad. We
2986 shouldn't have allowed a range step then. */
2987 gdb_assert (pc_in_thread_step_range (pc, tp));
2990 do_target_resume (resume_ptid, step, sig);
2991 tp->set_resumed (true);
2994 /* Resume the inferior. SIG is the signal to give the inferior
2995 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2996 rolls back state on error. */
2998 static void
2999 resume (gdb_signal sig)
3003 resume_1 (sig);
3005 catch (const gdb_exception &ex)
3007 /* If resuming is being aborted for any reason, delete any
3008 single-step breakpoint resume_1 may have created, to avoid
3009 confusing the following resumption, and to avoid leaving
3010 single-step breakpoints perturbing other threads, in case
3011 we're running in non-stop mode. */
3012 if (inferior_ptid != null_ptid)
3013 delete_single_step_breakpoints (inferior_thread ());
3014 throw;
3019 /* Proceeding. */
3021 /* See infrun.h. */
3023 /* Counter that tracks number of user visible stops. This can be used
3024 to tell whether a command has proceeded the inferior past the
3025 current location. This allows e.g., inferior function calls in
3026 breakpoint commands to not interrupt the command list. When the
3027 call finishes successfully, the inferior is standing at the same
3028 breakpoint as if nothing happened (and so we don't call
3029 normal_stop). */
3030 static ULONGEST current_stop_id;
3032 /* See infrun.h. */
3034 ULONGEST
3035 get_stop_id (void)
3037 return current_stop_id;
3040 /* Called when we report a user visible stop. */
3042 static void
3043 new_stop_id (void)
3045 current_stop_id++;
3048 /* Clear out all variables saying what to do when inferior is continued.
3049 First do this, then set the ones you want, then call `proceed'. */
3051 static void
3052 clear_proceed_status_thread (struct thread_info *tp)
3054 infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ());
3056 /* If we're starting a new sequence, then the previous finished
3057 single-step is no longer relevant. */
3058 if (tp->has_pending_waitstatus ())
3060 if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP)
3062 infrun_debug_printf ("pending event of %s was a finished step. "
3063 "Discarding.",
3064 tp->ptid.to_string ().c_str ());
3066 tp->clear_pending_waitstatus ();
3067 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
3069 else
3071 infrun_debug_printf
3072 ("thread %s has pending wait status %s (currently_stepping=%d).",
3073 tp->ptid.to_string ().c_str (),
3074 tp->pending_waitstatus ().to_string ().c_str (),
3075 currently_stepping (tp));
3079 /* If this signal should not be seen by program, give it zero.
3080 Used for debugging signals. */
3081 if (!signal_pass_state (tp->stop_signal ()))
3082 tp->set_stop_signal (GDB_SIGNAL_0);
3084 tp->release_thread_fsm ();
3086 tp->control.trap_expected = 0;
3087 tp->control.step_range_start = 0;
3088 tp->control.step_range_end = 0;
3089 tp->control.may_range_step = 0;
3090 tp->control.step_frame_id = null_frame_id;
3091 tp->control.step_stack_frame_id = null_frame_id;
3092 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
3093 tp->control.step_start_function = nullptr;
3094 tp->stop_requested = false;
3096 tp->control.stop_step = 0;
3098 tp->control.proceed_to_finish = 0;
3100 tp->control.stepping_command = 0;
3102 /* Discard any remaining commands or status from previous stop. */
3103 bpstat_clear (&tp->control.stop_bpstat);
3106 /* Notify the current interpreter and observers that the target is about to
3107 proceed. */
3109 static void
3110 notify_about_to_proceed ()
3112 top_level_interpreter ()->on_about_to_proceed ();
3113 gdb::observers::about_to_proceed.notify ();
3116 void
3117 clear_proceed_status (int step)
3119 /* With scheduler-locking replay, stop replaying other threads if we're
3120 not replaying the user-visible resume ptid.
3122 This is a convenience feature to not require the user to explicitly
3123 stop replaying the other threads. We're assuming that the user's
3124 intent is to resume tracing the recorded process. */
3125 if (!non_stop && scheduler_mode == schedlock_replay
3126 && target_record_is_replaying (minus_one_ptid)
3127 && !target_record_will_replay (user_visible_resume_ptid (step),
3128 execution_direction))
3129 target_record_stop_replaying ();
3131 if (!non_stop && inferior_ptid != null_ptid)
3133 ptid_t resume_ptid = user_visible_resume_ptid (step);
3134 process_stratum_target *resume_target
3135 = user_visible_resume_target (resume_ptid);
3137 /* In all-stop mode, delete the per-thread status of all threads
3138 we're about to resume, implicitly and explicitly. */
3139 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
3140 clear_proceed_status_thread (tp);
3143 if (inferior_ptid != null_ptid)
3145 struct inferior *inferior;
3147 if (non_stop)
3149 /* If in non-stop mode, only delete the per-thread status of
3150 the current thread. */
3151 clear_proceed_status_thread (inferior_thread ());
3154 inferior = current_inferior ();
3155 inferior->control.stop_soon = NO_STOP_QUIETLY;
3158 notify_about_to_proceed ();
3161 /* Returns true if TP is still stopped at a breakpoint that needs
3162 stepping-over in order to make progress. If the breakpoint is gone
3163 meanwhile, we can skip the whole step-over dance. */
3165 static bool
3166 thread_still_needs_step_over_bp (struct thread_info *tp)
3168 if (tp->stepping_over_breakpoint)
3170 struct regcache *regcache = get_thread_regcache (tp);
3172 if (breakpoint_here_p (tp->inf->aspace.get (),
3173 regcache_read_pc (regcache))
3174 == ordinary_breakpoint_here)
3175 return true;
3177 tp->stepping_over_breakpoint = 0;
3180 return false;
3183 /* Check whether thread TP still needs to start a step-over in order
3184 to make progress when resumed. Returns an bitwise or of enum
3185 step_over_what bits, indicating what needs to be stepped over. */
3187 static step_over_what
3188 thread_still_needs_step_over (struct thread_info *tp)
3190 step_over_what what = 0;
3192 if (thread_still_needs_step_over_bp (tp))
3193 what |= STEP_OVER_BREAKPOINT;
3195 if (tp->stepping_over_watchpoint
3196 && !target_have_steppable_watchpoint ())
3197 what |= STEP_OVER_WATCHPOINT;
3199 return what;
3202 /* Returns true if scheduler locking applies. STEP indicates whether
3203 we're about to do a step/next-like command to a thread. */
3205 static bool
3206 schedlock_applies (struct thread_info *tp)
3208 return (scheduler_mode == schedlock_on
3209 || (scheduler_mode == schedlock_step
3210 && tp->control.stepping_command)
3211 || (scheduler_mode == schedlock_replay
3212 && target_record_will_replay (minus_one_ptid,
3213 execution_direction)));
3216 /* When FORCE_P is false, set process_stratum_target::COMMIT_RESUMED_STATE
3217 in all target stacks that have threads executing and don't have threads
3218 with pending events.
3220 When FORCE_P is true, set process_stratum_target::COMMIT_RESUMED_STATE
3221 in all target stacks that have threads executing regardless of whether
3222 there are pending events or not.
3224 Passing FORCE_P as false makes sense when GDB is going to wait for
3225 events from all threads and will therefore spot the pending events.
3226 However, if GDB is only going to wait for events from select threads
3227 (i.e. when performing an inferior call) then a pending event on some
3228 other thread will not be spotted, and if we fail to commit the resume
3229 state for the thread performing the inferior call, then the inferior
3230 call will never complete (or even start). */
3232 static void
3233 maybe_set_commit_resumed_all_targets (bool force_p)
3235 scoped_restore_current_thread restore_thread;
3237 for (inferior *inf : all_non_exited_inferiors ())
3239 process_stratum_target *proc_target = inf->process_target ();
3241 if (proc_target->commit_resumed_state)
3243 /* We already set this in a previous iteration, via another
3244 inferior sharing the process_stratum target. */
3245 continue;
3248 /* If the target has no resumed threads, it would be useless to
3249 ask it to commit the resumed threads. */
3250 if (!proc_target->threads_executing)
3252 infrun_debug_printf ("not requesting commit-resumed for target "
3253 "%s, no resumed threads",
3254 proc_target->shortname ());
3255 continue;
3258 /* As an optimization, if a thread from this target has some
3259 status to report, handle it before requiring the target to
3260 commit its resumed threads: handling the status might lead to
3261 resuming more threads. */
3262 if (!force_p && proc_target->has_resumed_with_pending_wait_status ())
3264 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
3265 " thread has a pending waitstatus",
3266 proc_target->shortname ());
3267 continue;
3270 switch_to_inferior_no_thread (inf);
3272 if (!force_p && target_has_pending_events ())
3274 infrun_debug_printf ("not requesting commit-resumed for target %s, "
3275 "target has pending events",
3276 proc_target->shortname ());
3277 continue;
3280 infrun_debug_printf ("enabling commit-resumed for target %s",
3281 proc_target->shortname ());
3283 proc_target->commit_resumed_state = true;
3287 /* See infrun.h. */
3289 void
3290 maybe_call_commit_resumed_all_targets ()
3292 scoped_restore_current_thread restore_thread;
3294 for (inferior *inf : all_non_exited_inferiors ())
3296 process_stratum_target *proc_target = inf->process_target ();
3298 if (!proc_target->commit_resumed_state)
3299 continue;
3301 switch_to_inferior_no_thread (inf);
3303 infrun_debug_printf ("calling commit_resumed for target %s",
3304 proc_target->shortname());
3306 target_commit_resumed ();
3310 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
3311 that only the outermost one attempts to re-enable
3312 commit-resumed. */
3313 static bool enable_commit_resumed = true;
3315 /* See infrun.h. */
3317 scoped_disable_commit_resumed::scoped_disable_commit_resumed
3318 (const char *reason)
3319 : m_reason (reason),
3320 m_prev_enable_commit_resumed (enable_commit_resumed)
3322 infrun_debug_printf ("reason=%s", m_reason);
3324 enable_commit_resumed = false;
3326 for (inferior *inf : all_non_exited_inferiors ())
3328 process_stratum_target *proc_target = inf->process_target ();
3330 if (m_prev_enable_commit_resumed)
3332 /* This is the outermost instance: force all
3333 COMMIT_RESUMED_STATE to false. */
3334 proc_target->commit_resumed_state = false;
3336 else
3338 /* This is not the outermost instance, we expect
3339 COMMIT_RESUMED_STATE to have been cleared by the
3340 outermost instance. */
3341 gdb_assert (!proc_target->commit_resumed_state);
3346 /* See infrun.h. */
3348 void
3349 scoped_disable_commit_resumed::reset ()
3351 if (m_reset)
3352 return;
3353 m_reset = true;
3355 infrun_debug_printf ("reason=%s", m_reason);
3357 gdb_assert (!enable_commit_resumed);
3359 enable_commit_resumed = m_prev_enable_commit_resumed;
3361 if (m_prev_enable_commit_resumed)
3363 /* This is the outermost instance, re-enable
3364 COMMIT_RESUMED_STATE on the targets where it's possible. */
3365 maybe_set_commit_resumed_all_targets (false);
3367 else
3369 /* This is not the outermost instance, we expect
3370 COMMIT_RESUMED_STATE to still be false. */
3371 for (inferior *inf : all_non_exited_inferiors ())
3373 process_stratum_target *proc_target = inf->process_target ();
3374 gdb_assert (!proc_target->commit_resumed_state);
3379 /* See infrun.h. */
3381 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3383 reset ();
3386 /* See infrun.h. */
3388 void
3389 scoped_disable_commit_resumed::reset_and_commit ()
3391 reset ();
3392 maybe_call_commit_resumed_all_targets ();
3395 /* See infrun.h. */
3397 scoped_enable_commit_resumed::scoped_enable_commit_resumed
3398 (const char *reason, bool force_p)
3399 : m_reason (reason),
3400 m_prev_enable_commit_resumed (enable_commit_resumed)
3402 infrun_debug_printf ("reason=%s", m_reason);
3404 if (!enable_commit_resumed)
3406 enable_commit_resumed = true;
3408 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3409 possible. */
3410 maybe_set_commit_resumed_all_targets (force_p);
3412 maybe_call_commit_resumed_all_targets ();
3416 /* See infrun.h. */
3418 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3420 infrun_debug_printf ("reason=%s", m_reason);
3422 gdb_assert (enable_commit_resumed);
3424 enable_commit_resumed = m_prev_enable_commit_resumed;
3426 if (!enable_commit_resumed)
3428 /* Force all COMMIT_RESUMED_STATE back to false. */
3429 for (inferior *inf : all_non_exited_inferiors ())
3431 process_stratum_target *proc_target = inf->process_target ();
3432 proc_target->commit_resumed_state = false;
3437 /* Check that all the targets we're about to resume are in non-stop
3438 mode. Ideally, we'd only care whether all targets support
3439 target-async, but we're not there yet. E.g., stop_all_threads
3440 doesn't know how to handle all-stop targets. Also, the remote
3441 protocol in all-stop mode is synchronous, irrespective of
3442 target-async, which means that things like a breakpoint re-set
3443 triggered by one target would try to read memory from all targets
3444 and fail. */
3446 static void
3447 check_multi_target_resumption (process_stratum_target *resume_target)
3449 if (!non_stop && resume_target == nullptr)
3451 scoped_restore_current_thread restore_thread;
3453 /* This is used to track whether we're resuming more than one
3454 target. */
3455 process_stratum_target *first_connection = nullptr;
3457 /* The first inferior we see with a target that does not work in
3458 always-non-stop mode. */
3459 inferior *first_not_non_stop = nullptr;
3461 for (inferior *inf : all_non_exited_inferiors ())
3463 switch_to_inferior_no_thread (inf);
3465 if (!target_has_execution ())
3466 continue;
3468 process_stratum_target *proc_target
3469 = current_inferior ()->process_target();
3471 if (!target_is_non_stop_p ())
3472 first_not_non_stop = inf;
3474 if (first_connection == nullptr)
3475 first_connection = proc_target;
3476 else if (first_connection != proc_target
3477 && first_not_non_stop != nullptr)
3479 switch_to_inferior_no_thread (first_not_non_stop);
3481 proc_target = current_inferior ()->process_target();
3483 error (_("Connection %d (%s) does not support "
3484 "multi-target resumption."),
3485 proc_target->connection_number,
3486 make_target_connection_string (proc_target).c_str ());
3492 /* Helper function for `proceed`. Check if thread TP is suitable for
3493 resuming, and, if it is, switch to the thread and call
3494 `keep_going_pass_signal`. If TP is not suitable for resuming then this
3495 function will just return without switching threads. */
3497 static void
3498 proceed_resume_thread_checked (thread_info *tp)
3500 if (!tp->inf->has_execution ())
3502 infrun_debug_printf ("[%s] target has no execution",
3503 tp->ptid.to_string ().c_str ());
3504 return;
3507 if (tp->resumed ())
3509 infrun_debug_printf ("[%s] resumed",
3510 tp->ptid.to_string ().c_str ());
3511 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
3512 return;
3515 if (thread_is_in_step_over_chain (tp))
3517 infrun_debug_printf ("[%s] needs step-over",
3518 tp->ptid.to_string ().c_str ());
3519 return;
3522 /* When handling a vfork GDB removes all breakpoints from the program
3523 space in which the vfork is being handled. If we are following the
3524 parent then GDB will set the thread_waiting_for_vfork_done member of
3525 the parent inferior. In this case we should take care to only resume
3526 the vfork parent thread, the kernel will hold this thread suspended
3527 until the vfork child has exited or execd, at which point the parent
3528 will be resumed and a VFORK_DONE event sent to GDB. */
3529 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
3531 if (target_is_non_stop_p ())
3533 /* For non-stop targets, regardless of whether GDB is using
3534 all-stop or non-stop mode, threads are controlled
3535 individually.
3537 When a thread is handling a vfork, breakpoints are removed
3538 from the inferior (well, program space in fact), so it is
3539 critical that we don't try to resume any thread other than the
3540 vfork parent. */
3541 if (tp != tp->inf->thread_waiting_for_vfork_done)
3543 infrun_debug_printf ("[%s] thread %s of this inferior is "
3544 "waiting for vfork-done",
3545 tp->ptid.to_string ().c_str (),
3546 tp->inf->thread_waiting_for_vfork_done
3547 ->ptid.to_string ().c_str ());
3548 return;
3551 else
3553 /* For all-stop targets, when we attempt to resume the inferior,
3554 we will only resume the vfork parent thread, this is handled
3555 in internal_resume_ptid.
3557 Additionally, we will always be called with the vfork parent
3558 thread as the current thread (TP) thanks to follow_fork, as
3559 such the following assertion should hold.
3561 Beyond this there is nothing more that needs to be done
3562 here. */
3563 gdb_assert (tp == tp->inf->thread_waiting_for_vfork_done);
3567 /* When handling a vfork GDB removes all breakpoints from the program
3568 space in which the vfork is being handled. If we are following the
3569 child then GDB will set vfork_child member of the vfork parent
3570 inferior. Once the child has either exited or execd then GDB will
3571 detach from the parent process. Until that point GDB should not
3572 resume any thread in the parent process. */
3573 if (tp->inf->vfork_child != nullptr)
3575 infrun_debug_printf ("[%s] thread is part of a vfork parent, child is %d",
3576 tp->ptid.to_string ().c_str (),
3577 tp->inf->vfork_child->pid);
3578 return;
3581 infrun_debug_printf ("resuming %s",
3582 tp->ptid.to_string ().c_str ());
3584 execution_control_state ecs (tp);
3585 switch_to_thread (tp);
3586 keep_going_pass_signal (&ecs);
3587 if (!ecs.wait_some_more)
3588 error (_("Command aborted."));
3591 /* Basic routine for continuing the program in various fashions.
3593 ADDR is the address to resume at, or -1 for resume where stopped.
3594 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3595 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3597 You should call clear_proceed_status before calling proceed. */
3599 void
3600 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
3602 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3604 struct gdbarch *gdbarch;
3605 CORE_ADDR pc;
3607 /* If we're stopped at a fork/vfork, switch to either the parent or child
3608 thread as defined by the "set follow-fork-mode" command, or, if both
3609 the parent and child are controlled by GDB, and schedule-multiple is
3610 on, follow the child. If none of the above apply then we just proceed
3611 resuming the current thread. */
3612 if (!follow_fork ())
3614 /* The target for some reason decided not to resume. */
3615 normal_stop ();
3616 if (target_can_async_p ())
3617 inferior_event_handler (INF_EXEC_COMPLETE);
3618 return;
3621 /* We'll update this if & when we switch to a new thread. */
3622 update_previous_thread ();
3624 thread_info *cur_thr = inferior_thread ();
3625 infrun_debug_printf ("cur_thr = %s", cur_thr->ptid.to_string ().c_str ());
3627 regcache *regcache = get_thread_regcache (cur_thr);
3628 gdbarch = regcache->arch ();
3629 pc = regcache_read_pc_protected (regcache);
3631 /* Fill in with reasonable starting values. */
3632 init_thread_stepping_state (cur_thr);
3634 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
3636 ptid_t resume_ptid
3637 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3638 process_stratum_target *resume_target
3639 = user_visible_resume_target (resume_ptid);
3641 check_multi_target_resumption (resume_target);
3643 if (addr == (CORE_ADDR) -1)
3645 const address_space *aspace = cur_thr->inf->aspace.get ();
3647 if (cur_thr->stop_pc_p ()
3648 && pc == cur_thr->stop_pc ()
3649 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
3650 && execution_direction != EXEC_REVERSE)
3651 /* There is a breakpoint at the address we will resume at,
3652 step one instruction before inserting breakpoints so that
3653 we do not stop right away (and report a second hit at this
3654 breakpoint).
3656 Note, we don't do this in reverse, because we won't
3657 actually be executing the breakpoint insn anyway.
3658 We'll be (un-)executing the previous instruction. */
3659 cur_thr->stepping_over_breakpoint = 1;
3660 else if (gdbarch_single_step_through_delay_p (gdbarch)
3661 && gdbarch_single_step_through_delay (gdbarch,
3662 get_current_frame ()))
3663 /* We stepped onto an instruction that needs to be stepped
3664 again before re-inserting the breakpoint, do so. */
3665 cur_thr->stepping_over_breakpoint = 1;
3667 else
3669 regcache_write_pc (regcache, addr);
3672 if (siggnal != GDB_SIGNAL_DEFAULT)
3673 cur_thr->set_stop_signal (siggnal);
3675 /* If an exception is thrown from this point on, make sure to
3676 propagate GDB's knowledge of the executing state to the
3677 frontend/user running state. */
3678 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
3680 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3681 threads (e.g., we might need to set threads stepping over
3682 breakpoints first), from the user/frontend's point of view, all
3683 threads in RESUME_PTID are now running. Unless we're calling an
3684 inferior function, as in that case we pretend the inferior
3685 doesn't run at all. */
3686 if (!cur_thr->control.in_infcall)
3687 set_running (resume_target, resume_ptid, true);
3689 infrun_debug_printf ("addr=%s, signal=%s, resume_ptid=%s",
3690 paddress (gdbarch, addr),
3691 gdb_signal_to_symbol_string (siggnal),
3692 resume_ptid.to_string ().c_str ());
3694 annotate_starting ();
3696 /* Make sure that output from GDB appears before output from the
3697 inferior. */
3698 gdb_flush (gdb_stdout);
3700 /* Since we've marked the inferior running, give it the terminal. A
3701 QUIT/Ctrl-C from here on is forwarded to the target (which can
3702 still detect attempts to unblock a stuck connection with repeated
3703 Ctrl-C from within target_pass_ctrlc). */
3704 target_terminal::inferior ();
3706 /* In a multi-threaded task we may select another thread and
3707 then continue or step.
3709 But if a thread that we're resuming had stopped at a breakpoint,
3710 it will immediately cause another breakpoint stop without any
3711 execution (i.e. it will report a breakpoint hit incorrectly). So
3712 we must step over it first.
3714 Look for threads other than the current (TP) that reported a
3715 breakpoint hit and haven't been resumed yet since. */
3717 /* If scheduler locking applies, we can avoid iterating over all
3718 threads. */
3719 if (!non_stop && !schedlock_applies (cur_thr))
3721 for (thread_info *tp : all_non_exited_threads (resume_target,
3722 resume_ptid))
3724 switch_to_thread_no_regs (tp);
3726 /* Ignore the current thread here. It's handled
3727 afterwards. */
3728 if (tp == cur_thr)
3729 continue;
3731 if (!thread_still_needs_step_over (tp))
3732 continue;
3734 gdb_assert (!thread_is_in_step_over_chain (tp));
3736 infrun_debug_printf ("need to step-over [%s] first",
3737 tp->ptid.to_string ().c_str ());
3739 global_thread_step_over_chain_enqueue (tp);
3742 switch_to_thread (cur_thr);
3745 /* Enqueue the current thread last, so that we move all other
3746 threads over their breakpoints first. */
3747 if (cur_thr->stepping_over_breakpoint)
3748 global_thread_step_over_chain_enqueue (cur_thr);
3750 /* If the thread isn't started, we'll still need to set its prev_pc,
3751 so that switch_back_to_stepped_thread knows the thread hasn't
3752 advanced. Must do this before resuming any thread, as in
3753 all-stop/remote, once we resume we can't send any other packet
3754 until the target stops again. */
3755 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
3758 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
3759 bool step_over_started = start_step_over ();
3761 if (step_over_info_valid_p ())
3763 /* Either this thread started a new in-line step over, or some
3764 other thread was already doing one. In either case, don't
3765 resume anything else until the step-over is finished. */
3767 else if (step_over_started && !target_is_non_stop_p ())
3769 /* A new displaced stepping sequence was started. In all-stop,
3770 we can't talk to the target anymore until it next stops. */
3772 else if (!non_stop && target_is_non_stop_p ())
3774 INFRUN_SCOPED_DEBUG_START_END
3775 ("resuming threads, all-stop-on-top-of-non-stop");
3777 /* In all-stop, but the target is always in non-stop mode.
3778 Start all other threads that are implicitly resumed too. */
3779 for (thread_info *tp : all_non_exited_threads (resume_target,
3780 resume_ptid))
3782 switch_to_thread_no_regs (tp);
3783 proceed_resume_thread_checked (tp);
3786 else
3787 proceed_resume_thread_checked (cur_thr);
3789 disable_commit_resumed.reset_and_commit ();
3792 finish_state.release ();
3794 /* If we've switched threads above, switch back to the previously
3795 current thread. We don't want the user to see a different
3796 selected thread. */
3797 switch_to_thread (cur_thr);
3799 /* Tell the event loop to wait for it to stop. If the target
3800 supports asynchronous execution, it'll do this from within
3801 target_resume. */
3802 if (!target_can_async_p ())
3803 mark_async_event_handler (infrun_async_inferior_event_token);
3807 /* Start remote-debugging of a machine over a serial link. */
3809 void
3810 start_remote (int from_tty)
3812 inferior *inf = current_inferior ();
3813 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
3815 /* Always go on waiting for the target, regardless of the mode. */
3816 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3817 indicate to wait_for_inferior that a target should timeout if
3818 nothing is returned (instead of just blocking). Because of this,
3819 targets expecting an immediate response need to, internally, set
3820 things up so that the target_wait() is forced to eventually
3821 timeout. */
3822 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3823 differentiate to its caller what the state of the target is after
3824 the initial open has been performed. Here we're assuming that
3825 the target has stopped. It should be possible to eventually have
3826 target_open() return to the caller an indication that the target
3827 is currently running and GDB state should be set to the same as
3828 for an async run. */
3829 wait_for_inferior (inf);
3831 /* Now that the inferior has stopped, do any bookkeeping like
3832 loading shared libraries. We want to do this before normal_stop,
3833 so that the displayed frame is up to date. */
3834 post_create_inferior (from_tty);
3836 normal_stop ();
3839 /* Initialize static vars when a new inferior begins. */
3841 void
3842 init_wait_for_inferior (void)
3844 /* These are meaningless until the first time through wait_for_inferior. */
3846 breakpoint_init_inferior (current_inferior (), inf_starting);
3848 clear_proceed_status (0);
3850 nullify_last_target_wait_ptid ();
3852 update_previous_thread ();
3857 static void handle_inferior_event (struct execution_control_state *ecs);
3859 static void handle_step_into_function (struct gdbarch *gdbarch,
3860 struct execution_control_state *ecs);
3861 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3862 struct execution_control_state *ecs);
3863 static void handle_signal_stop (struct execution_control_state *ecs);
3864 static void check_exception_resume (struct execution_control_state *,
3865 const frame_info_ptr &);
3867 static void end_stepping_range (struct execution_control_state *ecs);
3868 static void stop_waiting (struct execution_control_state *ecs);
3869 static void keep_going (struct execution_control_state *ecs);
3870 static void process_event_stop_test (struct execution_control_state *ecs);
3871 static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
3873 /* This function is attached as a "thread_stop_requested" observer.
3874 Cleanup local state that assumed the PTID was to be resumed, and
3875 report the stop to the frontend. */
3877 static void
3878 infrun_thread_stop_requested (ptid_t ptid)
3880 process_stratum_target *curr_target = current_inferior ()->process_target ();
3882 /* PTID was requested to stop. If the thread was already stopped,
3883 but the user/frontend doesn't know about that yet (e.g., the
3884 thread had been temporarily paused for some step-over), set up
3885 for reporting the stop now. */
3886 for (thread_info *tp : all_threads (curr_target, ptid))
3888 if (tp->state != THREAD_RUNNING)
3889 continue;
3890 if (tp->executing ())
3891 continue;
3893 /* Remove matching threads from the step-over queue, so
3894 start_step_over doesn't try to resume them
3895 automatically. */
3896 if (thread_is_in_step_over_chain (tp))
3897 global_thread_step_over_chain_remove (tp);
3899 /* If the thread is stopped, but the user/frontend doesn't
3900 know about that yet, queue a pending event, as if the
3901 thread had just stopped now. Unless the thread already had
3902 a pending event. */
3903 if (!tp->has_pending_waitstatus ())
3905 target_waitstatus ws;
3906 ws.set_stopped (GDB_SIGNAL_0);
3907 tp->set_pending_waitstatus (ws);
3910 /* Clear the inline-frame state, since we're re-processing the
3911 stop. */
3912 clear_inline_frame_state (tp);
3914 /* If this thread was paused because some other thread was
3915 doing an inline-step over, let that finish first. Once
3916 that happens, we'll restart all threads and consume pending
3917 stop events then. */
3918 if (step_over_info_valid_p ())
3919 continue;
3921 /* Otherwise we can process the (new) pending event now. Set
3922 it so this pending event is considered by
3923 do_target_wait. */
3924 tp->set_resumed (true);
3928 /* Delete the step resume, single-step and longjmp/exception resume
3929 breakpoints of TP. */
3931 static void
3932 delete_thread_infrun_breakpoints (struct thread_info *tp)
3934 delete_step_resume_breakpoint (tp);
3935 delete_exception_resume_breakpoint (tp);
3936 delete_single_step_breakpoints (tp);
3939 /* If the target still has execution, call FUNC for each thread that
3940 just stopped. In all-stop, that's all the non-exited threads; in
3941 non-stop, that's the current thread, only. */
3943 typedef void (*for_each_just_stopped_thread_callback_func)
3944 (struct thread_info *tp);
3946 static void
3947 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3949 if (!target_has_execution () || inferior_ptid == null_ptid)
3950 return;
3952 if (target_is_non_stop_p ())
3954 /* If in non-stop mode, only the current thread stopped. */
3955 func (inferior_thread ());
3957 else
3959 /* In all-stop mode, all threads have stopped. */
3960 for (thread_info *tp : all_non_exited_threads ())
3961 func (tp);
3965 /* Delete the step resume and longjmp/exception resume breakpoints of
3966 the threads that just stopped. */
3968 static void
3969 delete_just_stopped_threads_infrun_breakpoints (void)
3971 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3974 /* Delete the single-step breakpoints of the threads that just
3975 stopped. */
3977 static void
3978 delete_just_stopped_threads_single_step_breakpoints (void)
3980 for_each_just_stopped_thread (delete_single_step_breakpoints);
3983 /* See infrun.h. */
3985 void
3986 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3987 const struct target_waitstatus &ws)
3989 infrun_debug_printf ("target_wait (%s [%s], status) =",
3990 waiton_ptid.to_string ().c_str (),
3991 target_pid_to_str (waiton_ptid).c_str ());
3992 infrun_debug_printf (" %s [%s],",
3993 result_ptid.to_string ().c_str (),
3994 target_pid_to_str (result_ptid).c_str ());
3995 infrun_debug_printf (" %s", ws.to_string ().c_str ());
3998 /* Select a thread at random, out of those which are resumed and have
3999 had events. */
4001 static struct thread_info *
4002 random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
4004 process_stratum_target *proc_target = inf->process_target ();
4005 thread_info *thread
4006 = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid);
4008 if (thread == nullptr)
4010 infrun_debug_printf ("None found.");
4011 return nullptr;
4014 infrun_debug_printf ("Found %s.", thread->ptid.to_string ().c_str ());
4015 gdb_assert (thread->resumed ());
4016 gdb_assert (thread->has_pending_waitstatus ());
4018 return thread;
4021 /* Wrapper for target_wait that first checks whether threads have
4022 pending statuses to report before actually asking the target for
4023 more events. INF is the inferior we're using to call target_wait
4024 on. */
4026 static ptid_t
4027 do_target_wait_1 (inferior *inf, ptid_t ptid,
4028 target_waitstatus *status, target_wait_flags options)
4030 struct thread_info *tp;
4032 /* We know that we are looking for an event in the target of inferior
4033 INF, but we don't know which thread the event might come from. As
4034 such we want to make sure that INFERIOR_PTID is reset so that none of
4035 the wait code relies on it - doing so is always a mistake. */
4036 switch_to_inferior_no_thread (inf);
4038 /* First check if there is a resumed thread with a wait status
4039 pending. */
4040 if (ptid == minus_one_ptid || ptid.is_pid ())
4042 tp = random_pending_event_thread (inf, ptid);
4044 else
4046 infrun_debug_printf ("Waiting for specific thread %s.",
4047 ptid.to_string ().c_str ());
4049 /* We have a specific thread to check. */
4050 tp = inf->find_thread (ptid);
4051 gdb_assert (tp != nullptr);
4052 if (!tp->has_pending_waitstatus ())
4053 tp = nullptr;
4056 if (tp != nullptr
4057 && (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4058 || tp->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT))
4060 struct regcache *regcache = get_thread_regcache (tp);
4061 struct gdbarch *gdbarch = regcache->arch ();
4062 CORE_ADDR pc;
4063 int discard = 0;
4065 pc = regcache_read_pc (regcache);
4067 if (pc != tp->stop_pc ())
4069 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
4070 tp->ptid.to_string ().c_str (),
4071 paddress (gdbarch, tp->stop_pc ()),
4072 paddress (gdbarch, pc));
4073 discard = 1;
4075 else if (!breakpoint_inserted_here_p (tp->inf->aspace.get (), pc))
4077 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
4078 tp->ptid.to_string ().c_str (),
4079 paddress (gdbarch, pc));
4081 discard = 1;
4084 if (discard)
4086 infrun_debug_printf ("pending event of %s cancelled.",
4087 tp->ptid.to_string ().c_str ());
4089 tp->clear_pending_waitstatus ();
4090 target_waitstatus ws;
4091 ws.set_spurious ();
4092 tp->set_pending_waitstatus (ws);
4093 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4097 if (tp != nullptr)
4099 infrun_debug_printf ("Using pending wait status %s for %s.",
4100 tp->pending_waitstatus ().to_string ().c_str (),
4101 tp->ptid.to_string ().c_str ());
4103 /* Now that we've selected our final event LWP, un-adjust its PC
4104 if it was a software breakpoint (and the target doesn't
4105 always adjust the PC itself). */
4106 if (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4107 && !target_supports_stopped_by_sw_breakpoint ())
4109 struct regcache *regcache;
4110 struct gdbarch *gdbarch;
4111 int decr_pc;
4113 regcache = get_thread_regcache (tp);
4114 gdbarch = regcache->arch ();
4116 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4117 if (decr_pc != 0)
4119 CORE_ADDR pc;
4121 pc = regcache_read_pc (regcache);
4122 regcache_write_pc (regcache, pc + decr_pc);
4126 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4127 *status = tp->pending_waitstatus ();
4128 tp->clear_pending_waitstatus ();
4130 /* Wake up the event loop again, until all pending events are
4131 processed. */
4132 if (target_is_async_p ())
4133 mark_async_event_handler (infrun_async_inferior_event_token);
4134 return tp->ptid;
4137 /* But if we don't find one, we'll have to wait. */
4139 /* We can't ask a non-async target to do a non-blocking wait, so this will be
4140 a blocking wait. */
4141 if (!target_can_async_p ())
4142 options &= ~TARGET_WNOHANG;
4144 return target_wait (ptid, status, options);
4147 /* Wrapper for target_wait that first checks whether threads have
4148 pending statuses to report before actually asking the target for
4149 more events. Polls for events from all inferiors/targets. */
4151 static bool
4152 do_target_wait (ptid_t wait_ptid, execution_control_state *ecs,
4153 target_wait_flags options)
4155 int num_inferiors = 0;
4156 int random_selector;
4158 /* For fairness, we pick the first inferior/target to poll at random
4159 out of all inferiors that may report events, and then continue
4160 polling the rest of the inferior list starting from that one in a
4161 circular fashion until the whole list is polled once. */
4163 ptid_t wait_ptid_pid {wait_ptid.pid ()};
4164 auto inferior_matches = [&wait_ptid_pid] (inferior *inf)
4166 return (inf->process_target () != nullptr
4167 && ptid_t (inf->pid).matches (wait_ptid_pid));
4170 /* First see how many matching inferiors we have. */
4171 for (inferior *inf : all_inferiors ())
4172 if (inferior_matches (inf))
4173 num_inferiors++;
4175 if (num_inferiors == 0)
4177 ecs->ws.set_ignore ();
4178 return false;
4181 /* Now randomly pick an inferior out of those that matched. */
4182 random_selector = (int)
4183 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
4185 if (num_inferiors > 1)
4186 infrun_debug_printf ("Found %d inferiors, starting at #%d",
4187 num_inferiors, random_selector);
4189 /* Select the Nth inferior that matched. */
4191 inferior *selected = nullptr;
4193 for (inferior *inf : all_inferiors ())
4194 if (inferior_matches (inf))
4195 if (random_selector-- == 0)
4197 selected = inf;
4198 break;
4201 /* Now poll for events out of each of the matching inferior's
4202 targets, starting from the selected one. */
4204 auto do_wait = [&] (inferior *inf)
4206 ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options);
4207 ecs->target = inf->process_target ();
4208 return (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
4211 /* Needed in 'all-stop + target-non-stop' mode, because we end up
4212 here spuriously after the target is all stopped and we've already
4213 reported the stop to the user, polling for events. */
4214 scoped_restore_current_thread restore_thread;
4216 intrusive_list_iterator<inferior> start
4217 = inferior_list.iterator_to (*selected);
4219 for (intrusive_list_iterator<inferior> it = start;
4220 it != inferior_list.end ();
4221 ++it)
4223 inferior *inf = &*it;
4225 if (inferior_matches (inf) && do_wait (inf))
4226 return true;
4229 for (intrusive_list_iterator<inferior> it = inferior_list.begin ();
4230 it != start;
4231 ++it)
4233 inferior *inf = &*it;
4235 if (inferior_matches (inf) && do_wait (inf))
4236 return true;
4239 ecs->ws.set_ignore ();
4240 return false;
4243 /* An event reported by wait_one. */
4245 struct wait_one_event
4247 /* The target the event came out of. */
4248 process_stratum_target *target;
4250 /* The PTID the event was for. */
4251 ptid_t ptid;
4253 /* The waitstatus. */
4254 target_waitstatus ws;
4257 static bool handle_one (const wait_one_event &event);
4258 static int finish_step_over (struct execution_control_state *ecs);
4260 /* Prepare and stabilize the inferior for detaching it. E.g.,
4261 detaching while a thread is displaced stepping is a recipe for
4262 crashing it, as nothing would readjust the PC out of the scratch
4263 pad. */
4265 void
4266 prepare_for_detach (void)
4268 struct inferior *inf = current_inferior ();
4269 ptid_t pid_ptid = ptid_t (inf->pid);
4270 scoped_restore_current_thread restore_thread;
4272 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
4274 /* Remove all threads of INF from the global step-over chain. We
4275 want to stop any ongoing step-over, not start any new one. */
4276 thread_step_over_list_safe_range range
4277 = make_thread_step_over_list_safe_range (global_thread_step_over_list);
4279 for (thread_info *tp : range)
4280 if (tp->inf == inf)
4282 infrun_debug_printf ("removing thread %s from global step over chain",
4283 tp->ptid.to_string ().c_str ());
4284 global_thread_step_over_chain_remove (tp);
4287 /* If we were already in the middle of an inline step-over, and the
4288 thread stepping belongs to the inferior we're detaching, we need
4289 to restart the threads of other inferiors. */
4290 if (step_over_info.thread != -1)
4292 infrun_debug_printf ("inline step-over in-process while detaching");
4294 thread_info *thr = find_thread_global_id (step_over_info.thread);
4295 if (thr->inf == inf)
4297 /* Since we removed threads of INF from the step-over chain,
4298 we know this won't start a step-over for INF. */
4299 clear_step_over_info ();
4301 if (target_is_non_stop_p ())
4303 /* Start a new step-over in another thread if there's
4304 one that needs it. */
4305 start_step_over ();
4307 /* Restart all other threads (except the
4308 previously-stepping thread, since that one is still
4309 running). */
4310 if (!step_over_info_valid_p ())
4311 restart_threads (thr);
4316 if (displaced_step_in_progress (inf))
4318 infrun_debug_printf ("displaced-stepping in-process while detaching");
4320 /* Stop threads currently displaced stepping, aborting it. */
4322 for (thread_info *thr : inf->non_exited_threads ())
4324 if (thr->displaced_step_state.in_progress ())
4326 if (thr->executing ())
4328 if (!thr->stop_requested)
4330 target_stop (thr->ptid);
4331 thr->stop_requested = true;
4334 else
4335 thr->set_resumed (false);
4339 while (displaced_step_in_progress (inf))
4341 wait_one_event event;
4343 event.target = inf->process_target ();
4344 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
4346 if (debug_infrun)
4347 print_target_wait_results (pid_ptid, event.ptid, event.ws);
4349 handle_one (event);
4352 /* It's OK to leave some of the threads of INF stopped, since
4353 they'll be detached shortly. */
4357 /* If all-stop, but there exists a non-stop target, stop all threads
4358 now that we're presenting the stop to the user. */
4360 static void
4361 stop_all_threads_if_all_stop_mode ()
4363 if (!non_stop && exists_non_stop_target ())
4364 stop_all_threads ("presenting stop to user in all-stop");
4367 /* Wait for control to return from inferior to debugger.
4369 If inferior gets a signal, we may decide to start it up again
4370 instead of returning. That is why there is a loop in this function.
4371 When this function actually returns it means the inferior
4372 should be left stopped and GDB should read more commands. */
4374 static void
4375 wait_for_inferior (inferior *inf)
4377 infrun_debug_printf ("wait_for_inferior ()");
4379 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
4381 /* If an error happens while handling the event, propagate GDB's
4382 knowledge of the executing state to the frontend/user running
4383 state. */
4384 scoped_finish_thread_state finish_state
4385 (inf->process_target (), minus_one_ptid);
4387 while (1)
4389 execution_control_state ecs;
4391 overlay_cache_invalid = 1;
4393 /* Flush target cache before starting to handle each event.
4394 Target was running and cache could be stale. This is just a
4395 heuristic. Running threads may modify target memory, but we
4396 don't get any event. */
4397 target_dcache_invalidate (current_program_space->aspace);
4399 ecs.ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs.ws, 0);
4400 ecs.target = inf->process_target ();
4402 if (debug_infrun)
4403 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4405 /* Now figure out what to do with the result of the result. */
4406 handle_inferior_event (&ecs);
4408 if (!ecs.wait_some_more)
4409 break;
4412 stop_all_threads_if_all_stop_mode ();
4414 /* No error, don't finish the state yet. */
4415 finish_state.release ();
4418 /* Cleanup that reinstalls the readline callback handler, if the
4419 target is running in the background. If while handling the target
4420 event something triggered a secondary prompt, like e.g., a
4421 pagination prompt, we'll have removed the callback handler (see
4422 gdb_readline_wrapper_line). Need to do this as we go back to the
4423 event loop, ready to process further input. Note this has no
4424 effect if the handler hasn't actually been removed, because calling
4425 rl_callback_handler_install resets the line buffer, thus losing
4426 input. */
4428 static void
4429 reinstall_readline_callback_handler_cleanup ()
4431 struct ui *ui = current_ui;
4433 if (!ui->async)
4435 /* We're not going back to the top level event loop yet. Don't
4436 install the readline callback, as it'd prep the terminal,
4437 readline-style (raw, noecho) (e.g., --batch). We'll install
4438 it the next time the prompt is displayed, when we're ready
4439 for input. */
4440 return;
4443 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
4444 gdb_rl_callback_handler_reinstall ();
4447 /* Clean up the FSMs of threads that are now stopped. In non-stop,
4448 that's just the event thread. In all-stop, that's all threads. In
4449 all-stop, threads that had a pending exit no longer have a reason
4450 to be around, as their FSMs/commands are canceled, so we delete
4451 them. This avoids "info threads" listing such threads as if they
4452 were alive (and failing to read their registers), the user being
4453 able to select and resume them (and that failing), etc. */
4455 static void
4456 clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
4458 /* The first clean_up call below assumes the event thread is the current
4459 one. */
4460 if (ecs->event_thread != nullptr)
4461 gdb_assert (ecs->event_thread == inferior_thread ());
4463 if (ecs->event_thread != nullptr
4464 && ecs->event_thread->thread_fsm () != nullptr)
4465 ecs->event_thread->thread_fsm ()->clean_up (ecs->event_thread);
4467 if (!non_stop)
4469 scoped_restore_current_thread restore_thread;
4471 for (thread_info *thr : all_threads_safe ())
4473 if (thr->state == THREAD_EXITED)
4474 continue;
4476 if (thr == ecs->event_thread)
4477 continue;
4479 if (thr->thread_fsm () != nullptr)
4481 switch_to_thread (thr);
4482 thr->thread_fsm ()->clean_up (thr);
4485 /* As we are cancelling the command/FSM of this thread,
4486 whatever was the reason we needed to report a thread
4487 exited event to the user, that reason is gone. Delete
4488 the thread, so that the user doesn't see it in the thread
4489 list, the next proceed doesn't try to resume it, etc. */
4490 if (thr->has_pending_waitstatus ()
4491 && (thr->pending_waitstatus ().kind ()
4492 == TARGET_WAITKIND_THREAD_EXITED))
4493 delete_thread (thr);
4498 /* Helper for all_uis_check_sync_execution_done that works on the
4499 current UI. */
4501 static void
4502 check_curr_ui_sync_execution_done (void)
4504 struct ui *ui = current_ui;
4506 if (ui->prompt_state == PROMPT_NEEDED
4507 && ui->async
4508 && !gdb_in_secondary_prompt_p (ui))
4510 target_terminal::ours ();
4511 top_level_interpreter ()->on_sync_execution_done ();
4512 ui->register_file_handler ();
4516 /* See infrun.h. */
4518 void
4519 all_uis_check_sync_execution_done (void)
4521 SWITCH_THRU_ALL_UIS ()
4523 check_curr_ui_sync_execution_done ();
4527 /* See infrun.h. */
4529 void
4530 all_uis_on_sync_execution_starting (void)
4532 SWITCH_THRU_ALL_UIS ()
4534 if (current_ui->prompt_state == PROMPT_NEEDED)
4535 async_disable_stdin ();
4539 /* A quit_handler callback installed while we're handling inferior
4540 events. */
4542 static void
4543 infrun_quit_handler ()
4545 if (target_terminal::is_ours ())
4547 /* Do nothing.
4549 default_quit_handler would throw a quit in this case, but if
4550 we're handling an event while we have the terminal, it means
4551 the target is running a background execution command, and
4552 thus when users press Ctrl-C, they're wanting to interrupt
4553 whatever command they were executing in the command line.
4554 E.g.:
4556 (gdb) c&
4557 (gdb) foo bar whatever<ctrl-c>
4559 That Ctrl-C should clear the input line, not interrupt event
4560 handling if it happens that the user types Ctrl-C at just the
4561 "wrong" time!
4563 It's as-if background event handling was handled by a
4564 separate background thread.
4566 To be clear, the Ctrl-C is not lost -- it will be processed
4567 by the next QUIT call once we're out of fetch_inferior_event
4568 again. */
4570 else
4572 if (check_quit_flag ())
4573 target_pass_ctrlc ();
4577 /* Asynchronous version of wait_for_inferior. It is called by the
4578 event loop whenever a change of state is detected on the file
4579 descriptor corresponding to the target. It can be called more than
4580 once to complete a single execution command. In such cases we need
4581 to keep the state in a global variable ECSS. If it is the last time
4582 that this function is called for a single execution command, then
4583 report to the user that the inferior has stopped, and do the
4584 necessary cleanups. */
4586 void
4587 fetch_inferior_event ()
4589 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4591 execution_control_state ecs;
4592 int cmd_done = 0;
4594 /* Events are always processed with the main UI as current UI. This
4595 way, warnings, debug output, etc. are always consistently sent to
4596 the main console. */
4597 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
4599 /* Temporarily disable pagination. Otherwise, the user would be
4600 given an option to press 'q' to quit, which would cause an early
4601 exit and could leave GDB in a half-baked state. */
4602 scoped_restore save_pagination
4603 = make_scoped_restore (&pagination_enabled, false);
4605 /* Install a quit handler that does nothing if we have the terminal
4606 (meaning the target is running a background execution command),
4607 so that Ctrl-C never interrupts GDB before the event is fully
4608 handled. */
4609 scoped_restore restore_quit_handler
4610 = make_scoped_restore (&quit_handler, infrun_quit_handler);
4612 /* Make sure a SIGINT does not interrupt an extension language while
4613 we're handling an event. That could interrupt a Python unwinder
4614 or a Python observer or some such. A Ctrl-C should either be
4615 forwarded to the inferior if the inferior has the terminal, or,
4616 if GDB has the terminal, should interrupt the command the user is
4617 typing in the CLI. */
4618 scoped_disable_cooperative_sigint_handling restore_coop_sigint;
4620 /* End up with readline processing input, if necessary. */
4622 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4624 /* We're handling a live event, so make sure we're doing live
4625 debugging. If we're looking at traceframes while the target is
4626 running, we're going to need to get back to that mode after
4627 handling the event. */
4628 std::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4629 if (non_stop)
4631 maybe_restore_traceframe.emplace ();
4632 set_current_traceframe (-1);
4635 /* The user/frontend should not notice a thread switch due to
4636 internal events. Make sure we revert to the user selected
4637 thread and frame after handling the event and running any
4638 breakpoint commands. */
4639 scoped_restore_current_thread restore_thread;
4641 overlay_cache_invalid = 1;
4642 /* Flush target cache before starting to handle each event. Target
4643 was running and cache could be stale. This is just a heuristic.
4644 Running threads may modify target memory, but we don't get any
4645 event. */
4646 target_dcache_invalidate (current_program_space->aspace);
4648 scoped_restore save_exec_dir
4649 = make_scoped_restore (&execution_direction,
4650 target_execution_direction ());
4652 /* Allow targets to pause their resumed threads while we handle
4653 the event. */
4654 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4656 /* Is the current thread performing an inferior function call as part
4657 of a breakpoint condition evaluation? */
4658 bool in_cond_eval = (inferior_ptid != null_ptid
4659 && inferior_thread ()->control.in_cond_eval);
4661 /* If the thread is in the middle of the condition evaluation, wait for
4662 an event from the current thread. Otherwise, wait for an event from
4663 any thread. */
4664 ptid_t waiton_ptid = in_cond_eval ? inferior_ptid : minus_one_ptid;
4666 if (!do_target_wait (waiton_ptid, &ecs, TARGET_WNOHANG))
4668 infrun_debug_printf ("do_target_wait returned no event");
4669 disable_commit_resumed.reset_and_commit ();
4670 return;
4673 gdb_assert (ecs.ws.kind () != TARGET_WAITKIND_IGNORE);
4675 /* Switch to the inferior that generated the event, so we can do
4676 target calls. If the event was not associated to a ptid, */
4677 if (ecs.ptid != null_ptid
4678 && ecs.ptid != minus_one_ptid)
4679 switch_to_inferior_no_thread (find_inferior_ptid (ecs.target, ecs.ptid));
4680 else
4681 switch_to_target_no_thread (ecs.target);
4683 if (debug_infrun)
4684 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4686 /* If an error happens while handling the event, propagate GDB's
4687 knowledge of the executing state to the frontend/user running
4688 state. */
4689 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs.ptid;
4690 scoped_finish_thread_state finish_state (ecs.target, finish_ptid);
4692 /* Get executed before scoped_restore_current_thread above to apply
4693 still for the thread which has thrown the exception. */
4694 auto defer_bpstat_clear
4695 = make_scope_exit (bpstat_clear_actions);
4696 auto defer_delete_threads
4697 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4699 int stop_id = get_stop_id ();
4701 /* Now figure out what to do with the result of the result. */
4702 handle_inferior_event (&ecs);
4704 if (!ecs.wait_some_more)
4706 struct inferior *inf = find_inferior_ptid (ecs.target, ecs.ptid);
4707 bool should_stop = true;
4708 struct thread_info *thr = ecs.event_thread;
4710 delete_just_stopped_threads_infrun_breakpoints ();
4712 if (thr != nullptr && thr->thread_fsm () != nullptr)
4713 should_stop = thr->thread_fsm ()->should_stop (thr);
4715 if (!should_stop)
4717 keep_going (&ecs);
4719 else
4721 bool should_notify_stop = true;
4722 bool proceeded = false;
4724 /* If the thread that stopped just completed an inferior
4725 function call as part of a condition evaluation, then we
4726 don't want to stop all the other threads. */
4727 if (ecs.event_thread == nullptr
4728 || !ecs.event_thread->control.in_cond_eval)
4729 stop_all_threads_if_all_stop_mode ();
4731 clean_up_just_stopped_threads_fsms (&ecs);
4733 if (stop_id != get_stop_id ())
4735 /* If the stop-id has changed then a stop has already been
4736 presented to the user in handle_inferior_event, this is
4737 likely a failed inferior call. As the stop has already
4738 been announced then we should not notify again.
4740 Also, if the prompt state is not PROMPT_NEEDED then GDB
4741 will not be ready for user input after this function. */
4742 should_notify_stop = false;
4743 gdb_assert (current_ui->prompt_state == PROMPT_NEEDED);
4745 else if (thr != nullptr && thr->thread_fsm () != nullptr)
4746 should_notify_stop
4747 = thr->thread_fsm ()->should_notify_stop ();
4749 if (should_notify_stop)
4751 /* We may not find an inferior if this was a process exit. */
4752 if (inf == nullptr || inf->control.stop_soon == NO_STOP_QUIETLY)
4753 proceeded = normal_stop ();
4756 if (!proceeded && !in_cond_eval)
4758 inferior_event_handler (INF_EXEC_COMPLETE);
4759 cmd_done = 1;
4762 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4763 previously selected thread is gone. We have two
4764 choices - switch to no thread selected, or restore the
4765 previously selected thread (now exited). We chose the
4766 later, just because that's what GDB used to do. After
4767 this, "info threads" says "The current thread <Thread
4768 ID 2> has terminated." instead of "No thread
4769 selected.". */
4770 if (!non_stop
4771 && cmd_done
4772 && ecs.ws.kind () != TARGET_WAITKIND_NO_RESUMED)
4773 restore_thread.dont_restore ();
4777 defer_delete_threads.release ();
4778 defer_bpstat_clear.release ();
4780 /* No error, don't finish the thread states yet. */
4781 finish_state.release ();
4783 disable_commit_resumed.reset_and_commit ();
4785 /* This scope is used to ensure that readline callbacks are
4786 reinstalled here. */
4789 /* Handling this event might have caused some inferiors to become prunable.
4790 For example, the exit of an inferior that was automatically added. Try
4791 to get rid of them. Keeping those around slows down things linearly.
4793 Note that this never removes the current inferior. Therefore, call this
4794 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4795 temporarily made the current inferior) is meant to be deleted.
4797 Call this before all_uis_check_sync_execution_done, so that notifications about
4798 removed inferiors appear before the prompt. */
4799 prune_inferiors ();
4801 /* If a UI was in sync execution mode, and now isn't, restore its
4802 prompt (a synchronous execution command has finished, and we're
4803 ready for input). */
4804 all_uis_check_sync_execution_done ();
4806 if (cmd_done
4807 && exec_done_display_p
4808 && (inferior_ptid == null_ptid
4809 || inferior_thread ()->state != THREAD_RUNNING))
4810 gdb_printf (_("completed.\n"));
4813 /* See infrun.h. */
4815 void
4816 set_step_info (thread_info *tp, const frame_info_ptr &frame,
4817 struct symtab_and_line sal)
4819 /* This can be removed once this function no longer implicitly relies on the
4820 inferior_ptid value. */
4821 gdb_assert (inferior_ptid == tp->ptid);
4823 tp->control.step_frame_id = get_frame_id (frame);
4824 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
4826 tp->current_symtab = sal.symtab;
4827 tp->current_line = sal.line;
4829 infrun_debug_printf
4830 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4831 tp->current_symtab != nullptr ? tp->current_symtab->filename : "<null>",
4832 tp->current_line,
4833 tp->control.step_frame_id.to_string ().c_str (),
4834 tp->control.step_stack_frame_id.to_string ().c_str ());
4837 /* Clear context switchable stepping state. */
4839 void
4840 init_thread_stepping_state (struct thread_info *tss)
4842 tss->stepped_breakpoint = 0;
4843 tss->stepping_over_breakpoint = 0;
4844 tss->stepping_over_watchpoint = 0;
4845 tss->step_after_step_resume_breakpoint = 0;
4848 /* See infrun.h. */
4850 void
4851 set_last_target_status (process_stratum_target *target, ptid_t ptid,
4852 const target_waitstatus &status)
4854 target_last_proc_target = target;
4855 target_last_wait_ptid = ptid;
4856 target_last_waitstatus = status;
4859 /* See infrun.h. */
4861 void
4862 get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4863 target_waitstatus *status)
4865 if (target != nullptr)
4866 *target = target_last_proc_target;
4867 if (ptid != nullptr)
4868 *ptid = target_last_wait_ptid;
4869 if (status != nullptr)
4870 *status = target_last_waitstatus;
4873 /* See infrun.h. */
4875 void
4876 nullify_last_target_wait_ptid (void)
4878 target_last_proc_target = nullptr;
4879 target_last_wait_ptid = minus_one_ptid;
4880 target_last_waitstatus = {};
4883 /* Switch thread contexts. */
4885 static void
4886 context_switch (execution_control_state *ecs)
4888 if (ecs->ptid != inferior_ptid
4889 && (inferior_ptid == null_ptid
4890 || ecs->event_thread != inferior_thread ()))
4892 infrun_debug_printf ("Switching context from %s to %s",
4893 inferior_ptid.to_string ().c_str (),
4894 ecs->ptid.to_string ().c_str ());
4897 switch_to_thread (ecs->event_thread);
4900 /* If the target can't tell whether we've hit breakpoints
4901 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4902 check whether that could have been caused by a breakpoint. If so,
4903 adjust the PC, per gdbarch_decr_pc_after_break. */
4905 static void
4906 adjust_pc_after_break (struct thread_info *thread,
4907 const target_waitstatus &ws)
4909 struct regcache *regcache;
4910 struct gdbarch *gdbarch;
4911 CORE_ADDR breakpoint_pc, decr_pc;
4913 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4914 we aren't, just return.
4916 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4917 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4918 implemented by software breakpoints should be handled through the normal
4919 breakpoint layer.
4921 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4922 different signals (SIGILL or SIGEMT for instance), but it is less
4923 clear where the PC is pointing afterwards. It may not match
4924 gdbarch_decr_pc_after_break. I don't know any specific target that
4925 generates these signals at breakpoints (the code has been in GDB since at
4926 least 1992) so I can not guess how to handle them here.
4928 In earlier versions of GDB, a target with
4929 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4930 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4931 target with both of these set in GDB history, and it seems unlikely to be
4932 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4934 if (ws.kind () != TARGET_WAITKIND_STOPPED)
4935 return;
4937 if (ws.sig () != GDB_SIGNAL_TRAP)
4938 return;
4940 /* In reverse execution, when a breakpoint is hit, the instruction
4941 under it has already been de-executed. The reported PC always
4942 points at the breakpoint address, so adjusting it further would
4943 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4944 architecture:
4946 B1 0x08000000 : INSN1
4947 B2 0x08000001 : INSN2
4948 0x08000002 : INSN3
4949 PC -> 0x08000003 : INSN4
4951 Say you're stopped at 0x08000003 as above. Reverse continuing
4952 from that point should hit B2 as below. Reading the PC when the
4953 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4954 been de-executed already.
4956 B1 0x08000000 : INSN1
4957 B2 PC -> 0x08000001 : INSN2
4958 0x08000002 : INSN3
4959 0x08000003 : INSN4
4961 We can't apply the same logic as for forward execution, because
4962 we would wrongly adjust the PC to 0x08000000, since there's a
4963 breakpoint at PC - 1. We'd then report a hit on B1, although
4964 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4965 behavior. */
4966 if (execution_direction == EXEC_REVERSE)
4967 return;
4969 /* If the target can tell whether the thread hit a SW breakpoint,
4970 trust it. Targets that can tell also adjust the PC
4971 themselves. */
4972 if (target_supports_stopped_by_sw_breakpoint ())
4973 return;
4975 /* Note that relying on whether a breakpoint is planted in memory to
4976 determine this can fail. E.g,. the breakpoint could have been
4977 removed since. Or the thread could have been told to step an
4978 instruction the size of a breakpoint instruction, and only
4979 _after_ was a breakpoint inserted at its address. */
4981 /* If this target does not decrement the PC after breakpoints, then
4982 we have nothing to do. */
4983 regcache = get_thread_regcache (thread);
4984 gdbarch = regcache->arch ();
4986 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4987 if (decr_pc == 0)
4988 return;
4990 const address_space *aspace = thread->inf->aspace.get ();
4992 /* Find the location where (if we've hit a breakpoint) the
4993 breakpoint would be. */
4994 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
4996 /* If the target can't tell whether a software breakpoint triggered,
4997 fallback to figuring it out based on breakpoints we think were
4998 inserted in the target, and on whether the thread was stepped or
4999 continued. */
5001 /* Check whether there actually is a software breakpoint inserted at
5002 that location.
5004 If in non-stop mode, a race condition is possible where we've
5005 removed a breakpoint, but stop events for that breakpoint were
5006 already queued and arrive later. To suppress those spurious
5007 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
5008 and retire them after a number of stop events are reported. Note
5009 this is an heuristic and can thus get confused. The real fix is
5010 to get the "stopped by SW BP and needs adjustment" info out of
5011 the target/kernel (and thus never reach here; see above). */
5012 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
5013 || (target_is_non_stop_p ()
5014 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
5016 std::optional<scoped_restore_tmpl<int>> restore_operation_disable;
5018 if (record_full_is_used ())
5019 restore_operation_disable.emplace
5020 (record_full_gdb_operation_disable_set ());
5022 /* When using hardware single-step, a SIGTRAP is reported for both
5023 a completed single-step and a software breakpoint. Need to
5024 differentiate between the two, as the latter needs adjusting
5025 but the former does not.
5027 The SIGTRAP can be due to a completed hardware single-step only if
5028 - we didn't insert software single-step breakpoints
5029 - this thread is currently being stepped
5031 If any of these events did not occur, we must have stopped due
5032 to hitting a software breakpoint, and have to back up to the
5033 breakpoint address.
5035 As a special case, we could have hardware single-stepped a
5036 software breakpoint. In this case (prev_pc == breakpoint_pc),
5037 we also need to back up to the breakpoint address. */
5039 if (thread_has_single_step_breakpoints_set (thread)
5040 || !currently_stepping (thread)
5041 || (thread->stepped_breakpoint
5042 && thread->prev_pc == breakpoint_pc))
5043 regcache_write_pc (regcache, breakpoint_pc);
5047 static bool
5048 stepped_in_from (const frame_info_ptr &initial_frame, frame_id step_frame_id)
5050 frame_info_ptr frame = initial_frame;
5052 for (frame = get_prev_frame (frame);
5053 frame != nullptr;
5054 frame = get_prev_frame (frame))
5056 if (get_frame_id (frame) == step_frame_id)
5057 return true;
5059 if (get_frame_type (frame) != INLINE_FRAME)
5060 break;
5063 return false;
5066 /* Look for an inline frame that is marked for skip.
5067 If PREV_FRAME is TRUE start at the previous frame,
5068 otherwise start at the current frame. Stop at the
5069 first non-inline frame, or at the frame where the
5070 step started. */
5072 static bool
5073 inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
5075 frame_info_ptr frame = get_current_frame ();
5077 if (prev_frame)
5078 frame = get_prev_frame (frame);
5080 for (; frame != nullptr; frame = get_prev_frame (frame))
5082 const char *fn = nullptr;
5083 symtab_and_line sal;
5084 struct symbol *sym;
5086 if (get_frame_id (frame) == tp->control.step_frame_id)
5087 break;
5088 if (get_frame_type (frame) != INLINE_FRAME)
5089 break;
5091 sal = find_frame_sal (frame);
5092 sym = get_frame_function (frame);
5094 if (sym != nullptr)
5095 fn = sym->print_name ();
5097 if (sal.line != 0
5098 && function_name_is_marked_for_skip (fn, sal))
5099 return true;
5102 return false;
5105 /* If the event thread has the stop requested flag set, pretend it
5106 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
5107 target_stop). */
5109 static bool
5110 handle_stop_requested (struct execution_control_state *ecs)
5112 if (ecs->event_thread->stop_requested)
5114 ecs->ws.set_stopped (GDB_SIGNAL_0);
5115 handle_signal_stop (ecs);
5116 return true;
5118 return false;
5121 /* Auxiliary function that handles syscall entry/return events.
5122 It returns true if the inferior should keep going (and GDB
5123 should ignore the event), or false if the event deserves to be
5124 processed. */
5126 static bool
5127 handle_syscall_event (struct execution_control_state *ecs)
5129 struct regcache *regcache;
5130 int syscall_number;
5132 context_switch (ecs);
5134 regcache = get_thread_regcache (ecs->event_thread);
5135 syscall_number = ecs->ws.syscall_number ();
5136 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
5138 if (catch_syscall_enabled ()
5139 && catching_syscall_number (syscall_number))
5141 infrun_debug_printf ("syscall number=%d", syscall_number);
5143 ecs->event_thread->control.stop_bpstat
5144 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
5145 ecs->event_thread->stop_pc (),
5146 ecs->event_thread, ecs->ws);
5148 if (handle_stop_requested (ecs))
5149 return false;
5151 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5153 /* Catchpoint hit. */
5154 return false;
5158 if (handle_stop_requested (ecs))
5159 return false;
5161 /* If no catchpoint triggered for this, then keep going. */
5162 keep_going (ecs);
5164 return true;
5167 /* Lazily fill in the execution_control_state's stop_func_* fields. */
5169 static void
5170 fill_in_stop_func (struct gdbarch *gdbarch,
5171 struct execution_control_state *ecs)
5173 if (!ecs->stop_func_filled_in)
5175 const block *block;
5176 const general_symbol_info *gsi;
5178 /* Don't care about return value; stop_func_start and stop_func_name
5179 will both be 0 if it doesn't work. */
5180 find_pc_partial_function_sym (ecs->event_thread->stop_pc (),
5181 &gsi,
5182 &ecs->stop_func_start,
5183 &ecs->stop_func_end,
5184 &block);
5185 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
5187 /* The call to find_pc_partial_function, above, will set
5188 stop_func_start and stop_func_end to the start and end
5189 of the range containing the stop pc. If this range
5190 contains the entry pc for the block (which is always the
5191 case for contiguous blocks), advance stop_func_start past
5192 the function's start offset and entrypoint. Note that
5193 stop_func_start is NOT advanced when in a range of a
5194 non-contiguous block that does not contain the entry pc. */
5195 if (block != nullptr
5196 && ecs->stop_func_start <= block->entry_pc ()
5197 && block->entry_pc () < ecs->stop_func_end)
5199 ecs->stop_func_start
5200 += gdbarch_deprecated_function_start_offset (gdbarch);
5202 /* PowerPC functions have a Local Entry Point (LEP) and a Global
5203 Entry Point (GEP). There is only one Entry Point (GEP = LEP) for
5204 other architectures. */
5205 ecs->stop_func_alt_start = ecs->stop_func_start;
5207 if (gdbarch_skip_entrypoint_p (gdbarch))
5208 ecs->stop_func_start
5209 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
5212 ecs->stop_func_filled_in = 1;
5217 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
5219 static enum stop_kind
5220 get_inferior_stop_soon (execution_control_state *ecs)
5222 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5224 gdb_assert (inf != nullptr);
5225 return inf->control.stop_soon;
5228 /* Poll for one event out of the current target. Store the resulting
5229 waitstatus in WS, and return the event ptid. Does not block. */
5231 static ptid_t
5232 poll_one_curr_target (struct target_waitstatus *ws)
5234 ptid_t event_ptid;
5236 overlay_cache_invalid = 1;
5238 /* Flush target cache before starting to handle each event.
5239 Target was running and cache could be stale. This is just a
5240 heuristic. Running threads may modify target memory, but we
5241 don't get any event. */
5242 target_dcache_invalidate (current_program_space->aspace);
5244 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
5246 if (debug_infrun)
5247 print_target_wait_results (minus_one_ptid, event_ptid, *ws);
5249 return event_ptid;
5252 /* Wait for one event out of any target. */
5254 static wait_one_event
5255 wait_one ()
5257 while (1)
5259 for (inferior *inf : all_inferiors ())
5261 process_stratum_target *target = inf->process_target ();
5262 if (target == nullptr
5263 || !target->is_async_p ()
5264 || !target->threads_executing)
5265 continue;
5267 switch_to_inferior_no_thread (inf);
5269 wait_one_event event;
5270 event.target = target;
5271 event.ptid = poll_one_curr_target (&event.ws);
5273 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5275 /* If nothing is resumed, remove the target from the
5276 event loop. */
5277 target_async (false);
5279 else if (event.ws.kind () != TARGET_WAITKIND_IGNORE)
5280 return event;
5283 /* Block waiting for some event. */
5285 fd_set readfds;
5286 int nfds = 0;
5288 FD_ZERO (&readfds);
5290 for (inferior *inf : all_inferiors ())
5292 process_stratum_target *target = inf->process_target ();
5293 if (target == nullptr
5294 || !target->is_async_p ()
5295 || !target->threads_executing)
5296 continue;
5298 int fd = target->async_wait_fd ();
5299 FD_SET (fd, &readfds);
5300 if (nfds <= fd)
5301 nfds = fd + 1;
5304 if (nfds == 0)
5306 /* No waitable targets left. All must be stopped. */
5307 infrun_debug_printf ("no waitable targets left");
5309 target_waitstatus ws;
5310 ws.set_no_resumed ();
5311 return {nullptr, minus_one_ptid, std::move (ws)};
5314 QUIT;
5316 int numfds = interruptible_select (nfds, &readfds, 0, nullptr, 0);
5317 if (numfds < 0)
5319 if (errno == EINTR)
5320 continue;
5321 else
5322 perror_with_name ("interruptible_select");
5327 /* Save the thread's event and stop reason to process it later. */
5329 static void
5330 save_waitstatus (struct thread_info *tp, const target_waitstatus &ws)
5332 infrun_debug_printf ("saving status %s for %s",
5333 ws.to_string ().c_str (),
5334 tp->ptid.to_string ().c_str ());
5336 /* Record for later. */
5337 tp->set_pending_waitstatus (ws);
5339 if (ws.kind () == TARGET_WAITKIND_STOPPED
5340 && ws.sig () == GDB_SIGNAL_TRAP)
5342 struct regcache *regcache = get_thread_regcache (tp);
5343 const address_space *aspace = tp->inf->aspace.get ();
5344 CORE_ADDR pc = regcache_read_pc (regcache);
5346 adjust_pc_after_break (tp, tp->pending_waitstatus ());
5348 scoped_restore_current_thread restore_thread;
5349 switch_to_thread (tp);
5351 if (target_stopped_by_watchpoint ())
5352 tp->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT);
5353 else if (target_supports_stopped_by_sw_breakpoint ()
5354 && target_stopped_by_sw_breakpoint ())
5355 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5356 else if (target_supports_stopped_by_hw_breakpoint ()
5357 && target_stopped_by_hw_breakpoint ())
5358 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5359 else if (!target_supports_stopped_by_hw_breakpoint ()
5360 && hardware_breakpoint_inserted_here_p (aspace, pc))
5361 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5362 else if (!target_supports_stopped_by_sw_breakpoint ()
5363 && software_breakpoint_inserted_here_p (aspace, pc))
5364 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5365 else if (!thread_has_single_step_breakpoints_set (tp)
5366 && currently_stepping (tp))
5367 tp->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP);
5371 /* Mark the non-executing threads accordingly. In all-stop, all
5372 threads of all processes are stopped when we get any event
5373 reported. In non-stop mode, only the event thread stops. */
5375 static void
5376 mark_non_executing_threads (process_stratum_target *target,
5377 ptid_t event_ptid,
5378 const target_waitstatus &ws)
5380 ptid_t mark_ptid;
5382 if (!target_is_non_stop_p ())
5383 mark_ptid = minus_one_ptid;
5384 else if (ws.kind () == TARGET_WAITKIND_SIGNALLED
5385 || ws.kind () == TARGET_WAITKIND_EXITED)
5387 /* If we're handling a process exit in non-stop mode, even
5388 though threads haven't been deleted yet, one would think
5389 that there is nothing to do, as threads of the dead process
5390 will be soon deleted, and threads of any other process were
5391 left running. However, on some targets, threads survive a
5392 process exit event. E.g., for the "checkpoint" command,
5393 when the current checkpoint/fork exits, linux-fork.c
5394 automatically switches to another fork from within
5395 target_mourn_inferior, by associating the same
5396 inferior/thread to another fork. We haven't mourned yet at
5397 this point, but we must mark any threads left in the
5398 process as not-executing so that finish_thread_state marks
5399 them stopped (in the user's perspective) if/when we present
5400 the stop to the user. */
5401 mark_ptid = ptid_t (event_ptid.pid ());
5403 else
5404 mark_ptid = event_ptid;
5406 set_executing (target, mark_ptid, false);
5408 /* Likewise the resumed flag. */
5409 set_resumed (target, mark_ptid, false);
5412 /* Handle one event after stopping threads. If the eventing thread
5413 reports back any interesting event, we leave it pending. If the
5414 eventing thread was in the middle of a displaced step, we
5415 cancel/finish it, and unless the thread's inferior is being
5416 detached, put the thread back in the step-over chain. Returns true
5417 if there are no resumed threads left in the target (thus there's no
5418 point in waiting further), false otherwise. */
5420 static bool
5421 handle_one (const wait_one_event &event)
5423 infrun_debug_printf
5424 ("%s %s", event.ws.to_string ().c_str (),
5425 event.ptid.to_string ().c_str ());
5427 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5429 /* All resumed threads exited. */
5430 return true;
5432 else if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED
5433 || event.ws.kind () == TARGET_WAITKIND_EXITED
5434 || event.ws.kind () == TARGET_WAITKIND_SIGNALLED)
5436 /* One thread/process exited/signalled. */
5438 thread_info *t = nullptr;
5440 /* The target may have reported just a pid. If so, try
5441 the first non-exited thread. */
5442 if (event.ptid.is_pid ())
5444 int pid = event.ptid.pid ();
5445 inferior *inf = find_inferior_pid (event.target, pid);
5446 for (thread_info *tp : inf->non_exited_threads ())
5448 t = tp;
5449 break;
5452 /* If there is no available thread, the event would
5453 have to be appended to a per-inferior event list,
5454 which does not exist (and if it did, we'd have
5455 to adjust run control command to be able to
5456 resume such an inferior). We assert here instead
5457 of going into an infinite loop. */
5458 gdb_assert (t != nullptr);
5460 infrun_debug_printf
5461 ("using %s", t->ptid.to_string ().c_str ());
5463 else
5465 t = event.target->find_thread (event.ptid);
5466 /* Check if this is the first time we see this thread.
5467 Don't bother adding if it individually exited. */
5468 if (t == nullptr
5469 && event.ws.kind () != TARGET_WAITKIND_THREAD_EXITED)
5470 t = add_thread (event.target, event.ptid);
5473 if (t != nullptr)
5475 /* Set the threads as non-executing to avoid
5476 another stop attempt on them. */
5477 switch_to_thread_no_regs (t);
5478 mark_non_executing_threads (event.target, event.ptid,
5479 event.ws);
5480 save_waitstatus (t, event.ws);
5481 t->stop_requested = false;
5483 if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
5485 if (displaced_step_finish (t, event.ws)
5486 != DISPLACED_STEP_FINISH_STATUS_OK)
5488 gdb_assert_not_reached ("displaced_step_finish on "
5489 "exited thread failed");
5494 else
5496 thread_info *t = event.target->find_thread (event.ptid);
5497 if (t == nullptr)
5498 t = add_thread (event.target, event.ptid);
5500 t->stop_requested = false;
5501 t->set_executing (false);
5502 t->set_resumed (false);
5503 t->control.may_range_step = 0;
5505 /* This may be the first time we see the inferior report
5506 a stop. */
5507 if (t->inf->needs_setup)
5509 switch_to_thread_no_regs (t);
5510 setup_inferior (0);
5513 if (event.ws.kind () == TARGET_WAITKIND_STOPPED
5514 && event.ws.sig () == GDB_SIGNAL_0)
5516 /* We caught the event that we intended to catch, so
5517 there's no event to save as pending. */
5519 if (displaced_step_finish (t, event.ws)
5520 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5522 /* Add it back to the step-over queue. */
5523 infrun_debug_printf
5524 ("displaced-step of %s canceled",
5525 t->ptid.to_string ().c_str ());
5527 t->control.trap_expected = 0;
5528 if (!t->inf->detaching)
5529 global_thread_step_over_chain_enqueue (t);
5532 else
5534 struct regcache *regcache;
5536 infrun_debug_printf
5537 ("target_wait %s, saving status for %s",
5538 event.ws.to_string ().c_str (),
5539 t->ptid.to_string ().c_str ());
5541 /* Record for later. */
5542 save_waitstatus (t, event.ws);
5544 if (displaced_step_finish (t, event.ws)
5545 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5547 /* Add it back to the step-over queue. */
5548 t->control.trap_expected = 0;
5549 if (!t->inf->detaching)
5550 global_thread_step_over_chain_enqueue (t);
5553 regcache = get_thread_regcache (t);
5554 t->set_stop_pc (regcache_read_pc (regcache));
5556 infrun_debug_printf ("saved stop_pc=%s for %s "
5557 "(currently_stepping=%d)",
5558 paddress (current_inferior ()->arch (),
5559 t->stop_pc ()),
5560 t->ptid.to_string ().c_str (),
5561 currently_stepping (t));
5565 return false;
5568 /* Helper for stop_all_threads. wait_one waits for events until it
5569 sees a TARGET_WAITKIND_NO_RESUMED event. When it sees one, it
5570 disables target_async for the target to stop waiting for events
5571 from it. TARGET_WAITKIND_NO_RESUMED can be delayed though,
5572 consider, debugging against gdbserver:
5574 #1 - Threads 1-5 are running, and thread 1 hits a breakpoint.
5576 #2 - gdb processes the breakpoint hit for thread 1, stops all
5577 threads, and steps thread 1 over the breakpoint. while
5578 stopping threads, some other threads reported interesting
5579 events, which were left pending in the thread's objects
5580 (infrun's queue).
5582 #2 - Thread 1 exits (it stepped an exit syscall), and gdbserver
5583 reports the thread exit for thread 1. The event ends up in
5584 remote's stop reply queue.
5586 #3 - That was the last resumed thread, so gdbserver reports
5587 no-resumed, and that event also ends up in remote's stop
5588 reply queue, queued after the thread exit from #2.
5590 #4 - gdb processes the thread exit event, which finishes the
5591 step-over, and so gdb restarts all threads (threads with
5592 pending events are left marked resumed, but aren't set
5593 executing). The no-resumed event is still left pending in
5594 the remote stop reply queue.
5596 #5 - Since there are now resumed threads with pending breakpoint
5597 hits, gdb picks one at random to process next.
5599 #5 - gdb picks the breakpoint hit for thread 2 this time, and that
5600 breakpoint also needs to be stepped over, so gdb stops all
5601 threads again.
5603 #6 - stop_all_threads counts number of expected stops and calls
5604 wait_one once for each.
5606 #7 - The first wait_one call collects the no-resumed event from #3
5607 above.
5609 #9 - Seeing the no-resumed event, wait_one disables target async
5610 for the remote target, to stop waiting for events from it.
5611 wait_one from here on always return no-resumed directly
5612 without reaching the target.
5614 #10 - stop_all_threads still hasn't seen all the stops it expects,
5615 so it does another pass.
5617 #11 - Since the remote target is not async (disabled in #9),
5618 wait_one doesn't wait on it, so it won't see the expected
5619 stops, and instead returns no-resumed directly.
5621 #12 - stop_all_threads still haven't seen all the stops, so it
5622 does another pass. goto #11, looping forever.
5624 To handle this, we explicitly (re-)enable target async on all
5625 targets that can async every time stop_all_threads goes wait for
5626 the expected stops. */
5628 static void
5629 reenable_target_async ()
5631 for (inferior *inf : all_inferiors ())
5633 process_stratum_target *target = inf->process_target ();
5634 if (target != nullptr
5635 && target->threads_executing
5636 && target->can_async_p ()
5637 && !target->is_async_p ())
5639 switch_to_inferior_no_thread (inf);
5640 target_async (1);
5645 /* See infrun.h. */
5647 void
5648 stop_all_threads (const char *reason, inferior *inf)
5650 /* We may need multiple passes to discover all threads. */
5651 int pass;
5652 int iterations = 0;
5654 gdb_assert (exists_non_stop_target ());
5656 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
5657 inf != nullptr ? inf->num : -1);
5659 infrun_debug_show_threads ("non-exited threads",
5660 all_non_exited_threads ());
5662 scoped_restore_current_thread restore_thread;
5664 /* Enable thread events on relevant targets. */
5665 for (auto *target : all_non_exited_process_targets ())
5667 if (inf != nullptr && inf->process_target () != target)
5668 continue;
5670 switch_to_target_no_thread (target);
5671 target_thread_events (true);
5674 SCOPE_EXIT
5676 /* Disable thread events on relevant targets. */
5677 for (auto *target : all_non_exited_process_targets ())
5679 if (inf != nullptr && inf->process_target () != target)
5680 continue;
5682 switch_to_target_no_thread (target);
5683 target_thread_events (false);
5686 /* Use debug_prefixed_printf directly to get a meaningful function
5687 name. */
5688 if (debug_infrun)
5689 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5692 /* Request threads to stop, and then wait for the stops. Because
5693 threads we already know about can spawn more threads while we're
5694 trying to stop them, and we only learn about new threads when we
5695 update the thread list, do this in a loop, and keep iterating
5696 until two passes find no threads that need to be stopped. */
5697 for (pass = 0; pass < 2; pass++, iterations++)
5699 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
5700 while (1)
5702 int waits_needed = 0;
5704 for (auto *target : all_non_exited_process_targets ())
5706 if (inf != nullptr && inf->process_target () != target)
5707 continue;
5709 switch_to_target_no_thread (target);
5710 update_thread_list ();
5713 /* Go through all threads looking for threads that we need
5714 to tell the target to stop. */
5715 for (thread_info *t : all_non_exited_threads ())
5717 if (inf != nullptr && t->inf != inf)
5718 continue;
5720 /* For a single-target setting with an all-stop target,
5721 we would not even arrive here. For a multi-target
5722 setting, until GDB is able to handle a mixture of
5723 all-stop and non-stop targets, simply skip all-stop
5724 targets' threads. This should be fine due to the
5725 protection of 'check_multi_target_resumption'. */
5727 switch_to_thread_no_regs (t);
5728 if (!target_is_non_stop_p ())
5729 continue;
5731 if (t->executing ())
5733 /* If already stopping, don't request a stop again.
5734 We just haven't seen the notification yet. */
5735 if (!t->stop_requested)
5737 infrun_debug_printf (" %s executing, need stop",
5738 t->ptid.to_string ().c_str ());
5739 target_stop (t->ptid);
5740 t->stop_requested = true;
5742 else
5744 infrun_debug_printf (" %s executing, already stopping",
5745 t->ptid.to_string ().c_str ());
5748 if (t->stop_requested)
5749 waits_needed++;
5751 else
5753 infrun_debug_printf (" %s not executing",
5754 t->ptid.to_string ().c_str ());
5756 /* The thread may be not executing, but still be
5757 resumed with a pending status to process. */
5758 t->set_resumed (false);
5762 if (waits_needed == 0)
5763 break;
5765 /* If we find new threads on the second iteration, restart
5766 over. We want to see two iterations in a row with all
5767 threads stopped. */
5768 if (pass > 0)
5769 pass = -1;
5771 reenable_target_async ();
5773 for (int i = 0; i < waits_needed; i++)
5775 wait_one_event event = wait_one ();
5776 if (handle_one (event))
5777 break;
5783 /* Handle a TARGET_WAITKIND_NO_RESUMED event. Return true if we
5784 handled the event and should continue waiting. Return false if we
5785 should stop and report the event to the user. */
5787 static bool
5788 handle_no_resumed (struct execution_control_state *ecs)
5790 if (target_can_async_p ())
5792 bool any_sync = false;
5794 for (ui *ui : all_uis ())
5796 if (ui->prompt_state == PROMPT_BLOCKED)
5798 any_sync = true;
5799 break;
5802 if (!any_sync)
5804 /* There were no unwaited-for children left in the target, but,
5805 we're not synchronously waiting for events either. Just
5806 ignore. */
5808 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5809 prepare_to_wait (ecs);
5810 return true;
5814 /* Otherwise, if we were running a synchronous execution command, we
5815 may need to cancel it and give the user back the terminal.
5817 In non-stop mode, the target can't tell whether we've already
5818 consumed previous stop events, so it can end up sending us a
5819 no-resumed event like so:
5821 #0 - thread 1 is left stopped
5823 #1 - thread 2 is resumed and hits breakpoint
5824 -> TARGET_WAITKIND_STOPPED
5826 #2 - thread 3 is resumed and exits
5827 this is the last resumed thread, so
5828 -> TARGET_WAITKIND_NO_RESUMED
5830 #3 - gdb processes stop for thread 2 and decides to re-resume
5833 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5834 thread 2 is now resumed, so the event should be ignored.
5836 IOW, if the stop for thread 2 doesn't end a foreground command,
5837 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5838 event. But it could be that the event meant that thread 2 itself
5839 (or whatever other thread was the last resumed thread) exited.
5841 To address this we refresh the thread list and check whether we
5842 have resumed threads _now_. In the example above, this removes
5843 thread 3 from the thread list. If thread 2 was re-resumed, we
5844 ignore this event. If we find no thread resumed, then we cancel
5845 the synchronous command and show "no unwaited-for " to the
5846 user. */
5848 inferior *curr_inf = current_inferior ();
5850 scoped_restore_current_thread restore_thread;
5851 update_thread_list ();
5853 /* If:
5855 - the current target has no thread executing, and
5856 - the current inferior is native, and
5857 - the current inferior is the one which has the terminal, and
5858 - we did nothing,
5860 then a Ctrl-C from this point on would remain stuck in the
5861 kernel, until a thread resumes and dequeues it. That would
5862 result in the GDB CLI not reacting to Ctrl-C, not able to
5863 interrupt the program. To address this, if the current inferior
5864 no longer has any thread executing, we give the terminal to some
5865 other inferior that has at least one thread executing. */
5866 bool swap_terminal = true;
5868 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5869 whether to report it to the user. */
5870 bool ignore_event = false;
5872 for (thread_info *thread : all_non_exited_threads ())
5874 if (swap_terminal && thread->executing ())
5876 if (thread->inf != curr_inf)
5878 target_terminal::ours ();
5880 switch_to_thread (thread);
5881 target_terminal::inferior ();
5883 swap_terminal = false;
5886 if (!ignore_event && thread->resumed ())
5888 /* Either there were no unwaited-for children left in the
5889 target at some point, but there are now, or some target
5890 other than the eventing one has unwaited-for children
5891 left. Just ignore. */
5892 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5893 "(ignoring: found resumed)");
5895 ignore_event = true;
5898 if (ignore_event && !swap_terminal)
5899 break;
5902 if (ignore_event)
5904 switch_to_inferior_no_thread (curr_inf);
5905 prepare_to_wait (ecs);
5906 return true;
5909 /* Go ahead and report the event. */
5910 return false;
5913 /* Handle a TARGET_WAITKIND_THREAD_EXITED event. Return true if we
5914 handled the event and should continue waiting. Return false if we
5915 should stop and report the event to the user. */
5917 static bool
5918 handle_thread_exited (execution_control_state *ecs)
5920 context_switch (ecs);
5922 /* Clear these so we don't re-start the thread stepping over a
5923 breakpoint/watchpoint. */
5924 ecs->event_thread->stepping_over_breakpoint = 0;
5925 ecs->event_thread->stepping_over_watchpoint = 0;
5927 /* If the thread had an FSM, then abort the command. But only after
5928 finishing the step over, as in non-stop mode, aborting this
5929 thread's command should not interfere with other threads. We
5930 must check this before finish_step over, however, which may
5931 update the thread list and delete the event thread. */
5932 bool abort_cmd = (ecs->event_thread->thread_fsm () != nullptr);
5934 /* Mark the thread exited right now, because finish_step_over may
5935 update the thread list and that may delete the thread silently
5936 (depending on target), while we always want to emit the "[Thread
5937 ... exited]" notification. Don't actually delete the thread yet,
5938 because we need to pass its pointer down to finish_step_over. */
5939 set_thread_exited (ecs->event_thread);
5941 /* Maybe the thread was doing a step-over, if so release
5942 resources and start any further pending step-overs.
5944 If we are on a non-stop target and the thread was doing an
5945 in-line step, this also restarts the other threads. */
5946 int ret = finish_step_over (ecs);
5948 /* finish_step_over returns true if it moves ecs' wait status
5949 back into the thread, so that we go handle another pending
5950 event before this one. But we know it never does that if
5951 the event thread has exited. */
5952 gdb_assert (ret == 0);
5954 if (abort_cmd)
5956 /* We're stopping for the thread exit event. Switch to the
5957 event thread again, as finish_step_over may have switched
5958 threads. */
5959 switch_to_thread (ecs->event_thread);
5960 ecs->event_thread = nullptr;
5961 return false;
5964 /* If finish_step_over started a new in-line step-over, don't
5965 try to restart anything else. */
5966 if (step_over_info_valid_p ())
5968 delete_thread (ecs->event_thread);
5969 return true;
5972 /* Maybe we are on an all-stop target and we got this event
5973 while doing a step-like command on another thread. If so,
5974 go back to doing that. If this thread was stepping,
5975 switch_back_to_stepped_thread will consider that the thread
5976 was interrupted mid-step and will try keep stepping it. We
5977 don't want that, the thread is gone. So clear the proceed
5978 status so it doesn't do that. */
5979 clear_proceed_status_thread (ecs->event_thread);
5980 if (switch_back_to_stepped_thread (ecs))
5982 delete_thread (ecs->event_thread);
5983 return true;
5986 inferior *inf = ecs->event_thread->inf;
5987 bool slock_applies = schedlock_applies (ecs->event_thread);
5989 delete_thread (ecs->event_thread);
5990 ecs->event_thread = nullptr;
5992 /* Continue handling the event as if we had gotten a
5993 TARGET_WAITKIND_NO_RESUMED. */
5994 auto handle_as_no_resumed = [ecs] ()
5996 /* handle_no_resumed doesn't really look at the event kind, but
5997 normal_stop does. */
5998 ecs->ws.set_no_resumed ();
5999 ecs->event_thread = nullptr;
6000 ecs->ptid = minus_one_ptid;
6002 /* Re-record the last target status. */
6003 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
6005 return handle_no_resumed (ecs);
6008 /* If we are on an all-stop target, the target has stopped all
6009 threads to report the event. We don't actually want to
6010 stop, so restart the threads. */
6011 if (!target_is_non_stop_p ())
6013 if (slock_applies)
6015 /* Since the target is !non-stop, then everything is stopped
6016 at this point, and we can't assume we'll get further
6017 events until we resume the target again. Handle this
6018 event like if it were a TARGET_WAITKIND_NO_RESUMED. Note
6019 this refreshes the thread list and checks whether there
6020 are other resumed threads before deciding whether to
6021 print "no-unwaited-for left". This is important because
6022 the user could have done:
6024 (gdb) set scheduler-locking on
6025 (gdb) thread 1
6026 (gdb) c&
6027 (gdb) thread 2
6028 (gdb) c
6030 ... and only one of the threads exited. */
6031 return handle_as_no_resumed ();
6033 else
6035 /* Switch to the first non-exited thread we can find, and
6036 resume. */
6037 auto range = inf->non_exited_threads ();
6038 if (range.begin () == range.end ())
6040 /* Looks like the target reported a
6041 TARGET_WAITKIND_THREAD_EXITED for its last known
6042 thread. */
6043 return handle_as_no_resumed ();
6045 thread_info *non_exited_thread = *range.begin ();
6046 switch_to_thread (non_exited_thread);
6047 insert_breakpoints ();
6048 resume (GDB_SIGNAL_0);
6052 prepare_to_wait (ecs);
6053 return true;
6056 /* Given an execution control state that has been freshly filled in by
6057 an event from the inferior, figure out what it means and take
6058 appropriate action.
6060 The alternatives are:
6062 1) stop_waiting and return; to really stop and return to the
6063 debugger.
6065 2) keep_going and return; to wait for the next event (set
6066 ecs->event_thread->stepping_over_breakpoint to 1 to single step
6067 once). */
6069 static void
6070 handle_inferior_event (struct execution_control_state *ecs)
6072 /* Make sure that all temporary struct value objects that were
6073 created during the handling of the event get deleted at the
6074 end. */
6075 scoped_value_mark free_values;
6077 infrun_debug_printf ("%s", ecs->ws.to_string ().c_str ());
6079 if (ecs->ws.kind () == TARGET_WAITKIND_IGNORE)
6081 /* We had an event in the inferior, but we are not interested in
6082 handling it at this level. The lower layers have already
6083 done what needs to be done, if anything.
6085 One of the possible circumstances for this is when the
6086 inferior produces output for the console. The inferior has
6087 not stopped, and we are ignoring the event. Another possible
6088 circumstance is any event which the lower level knows will be
6089 reported multiple times without an intervening resume. */
6090 prepare_to_wait (ecs);
6091 return;
6094 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED
6095 && handle_no_resumed (ecs))
6096 return;
6098 /* Cache the last target/ptid/waitstatus. */
6099 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
6101 /* Always clear state belonging to the previous time we stopped. */
6102 stop_stack_dummy = STOP_NONE;
6104 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED)
6106 /* No unwaited-for children left. IOW, all resumed children
6107 have exited. */
6108 stop_waiting (ecs);
6109 return;
6112 if (ecs->ws.kind () != TARGET_WAITKIND_EXITED
6113 && ecs->ws.kind () != TARGET_WAITKIND_SIGNALLED)
6115 ecs->event_thread = ecs->target->find_thread (ecs->ptid);
6116 /* If it's a new thread, add it to the thread database. */
6117 if (ecs->event_thread == nullptr)
6118 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
6120 /* Disable range stepping. If the next step request could use a
6121 range, this will be end up re-enabled then. */
6122 ecs->event_thread->control.may_range_step = 0;
6125 /* Dependent on valid ECS->EVENT_THREAD. */
6126 adjust_pc_after_break (ecs->event_thread, ecs->ws);
6128 /* Dependent on the current PC value modified by adjust_pc_after_break. */
6129 reinit_frame_cache ();
6131 breakpoint_retire_moribund ();
6133 /* First, distinguish signals caused by the debugger from signals
6134 that have to do with the program's own actions. Note that
6135 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
6136 on the operating system version. Here we detect when a SIGILL or
6137 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
6138 something similar for SIGSEGV, since a SIGSEGV will be generated
6139 when we're trying to execute a breakpoint instruction on a
6140 non-executable stack. This happens for call dummy breakpoints
6141 for architectures like SPARC that place call dummies on the
6142 stack. */
6143 if (ecs->ws.kind () == TARGET_WAITKIND_STOPPED
6144 && (ecs->ws.sig () == GDB_SIGNAL_ILL
6145 || ecs->ws.sig () == GDB_SIGNAL_SEGV
6146 || ecs->ws.sig () == GDB_SIGNAL_EMT))
6148 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6150 if (breakpoint_inserted_here_p (ecs->event_thread->inf->aspace.get (),
6151 regcache_read_pc (regcache)))
6153 infrun_debug_printf ("Treating signal as SIGTRAP");
6154 ecs->ws.set_stopped (GDB_SIGNAL_TRAP);
6158 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
6160 switch (ecs->ws.kind ())
6162 case TARGET_WAITKIND_LOADED:
6164 context_switch (ecs);
6165 /* Ignore gracefully during startup of the inferior, as it might
6166 be the shell which has just loaded some objects, otherwise
6167 add the symbols for the newly loaded objects. Also ignore at
6168 the beginning of an attach or remote session; we will query
6169 the full list of libraries once the connection is
6170 established. */
6172 stop_kind stop_soon = get_inferior_stop_soon (ecs);
6173 if (stop_soon == NO_STOP_QUIETLY)
6175 struct regcache *regcache;
6177 regcache = get_thread_regcache (ecs->event_thread);
6179 handle_solib_event ();
6181 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
6182 address_space *aspace = ecs->event_thread->inf->aspace.get ();
6183 ecs->event_thread->control.stop_bpstat
6184 = bpstat_stop_status_nowatch (aspace,
6185 ecs->event_thread->stop_pc (),
6186 ecs->event_thread, ecs->ws);
6188 if (handle_stop_requested (ecs))
6189 return;
6191 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6193 /* A catchpoint triggered. */
6194 process_event_stop_test (ecs);
6195 return;
6198 /* If requested, stop when the dynamic linker notifies
6199 gdb of events. This allows the user to get control
6200 and place breakpoints in initializer routines for
6201 dynamically loaded objects (among other things). */
6202 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6203 if (stop_on_solib_events)
6205 /* Make sure we print "Stopped due to solib-event" in
6206 normal_stop. */
6207 stop_print_frame = true;
6209 stop_waiting (ecs);
6210 return;
6214 /* If we are skipping through a shell, or through shared library
6215 loading that we aren't interested in, resume the program. If
6216 we're running the program normally, also resume. */
6217 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
6219 /* Loading of shared libraries might have changed breakpoint
6220 addresses. Make sure new breakpoints are inserted. */
6221 if (stop_soon == NO_STOP_QUIETLY)
6222 insert_breakpoints ();
6223 resume (GDB_SIGNAL_0);
6224 prepare_to_wait (ecs);
6225 return;
6228 /* But stop if we're attaching or setting up a remote
6229 connection. */
6230 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6231 || stop_soon == STOP_QUIETLY_REMOTE)
6233 infrun_debug_printf ("quietly stopped");
6234 stop_waiting (ecs);
6235 return;
6238 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon);
6241 case TARGET_WAITKIND_SPURIOUS:
6242 if (handle_stop_requested (ecs))
6243 return;
6244 context_switch (ecs);
6245 resume (GDB_SIGNAL_0);
6246 prepare_to_wait (ecs);
6247 return;
6249 case TARGET_WAITKIND_THREAD_CREATED:
6250 if (handle_stop_requested (ecs))
6251 return;
6252 context_switch (ecs);
6253 if (!switch_back_to_stepped_thread (ecs))
6254 keep_going (ecs);
6255 return;
6257 case TARGET_WAITKIND_THREAD_EXITED:
6258 if (handle_thread_exited (ecs))
6259 return;
6260 stop_waiting (ecs);
6261 break;
6263 case TARGET_WAITKIND_EXITED:
6264 case TARGET_WAITKIND_SIGNALLED:
6266 /* Depending on the system, ecs->ptid may point to a thread or
6267 to a process. On some targets, target_mourn_inferior may
6268 need to have access to the just-exited thread. That is the
6269 case of GNU/Linux's "checkpoint" support, for example.
6270 Call the switch_to_xxx routine as appropriate. */
6271 thread_info *thr = ecs->target->find_thread (ecs->ptid);
6272 if (thr != nullptr)
6273 switch_to_thread (thr);
6274 else
6276 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
6277 switch_to_inferior_no_thread (inf);
6280 handle_vfork_child_exec_or_exit (0);
6281 target_terminal::ours (); /* Must do this before mourn anyway. */
6283 /* Clearing any previous state of convenience variables. */
6284 clear_exit_convenience_vars ();
6286 if (ecs->ws.kind () == TARGET_WAITKIND_EXITED)
6288 /* Record the exit code in the convenience variable $_exitcode, so
6289 that the user can inspect this again later. */
6290 set_internalvar_integer (lookup_internalvar ("_exitcode"),
6291 (LONGEST) ecs->ws.exit_status ());
6293 /* Also record this in the inferior itself. */
6294 current_inferior ()->has_exit_code = true;
6295 current_inferior ()->exit_code = (LONGEST) ecs->ws.exit_status ();
6297 /* Support the --return-child-result option. */
6298 return_child_result_value = ecs->ws.exit_status ();
6300 interps_notify_exited (ecs->ws.exit_status ());
6302 else
6304 struct gdbarch *gdbarch = current_inferior ()->arch ();
6306 if (gdbarch_gdb_signal_to_target_p (gdbarch))
6308 /* Set the value of the internal variable $_exitsignal,
6309 which holds the signal uncaught by the inferior. */
6310 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
6311 gdbarch_gdb_signal_to_target (gdbarch,
6312 ecs->ws.sig ()));
6314 else
6316 /* We don't have access to the target's method used for
6317 converting between signal numbers (GDB's internal
6318 representation <-> target's representation).
6319 Therefore, we cannot do a good job at displaying this
6320 information to the user. It's better to just warn
6321 her about it (if infrun debugging is enabled), and
6322 give up. */
6323 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
6324 "signal number.");
6327 interps_notify_signal_exited (ecs->ws.sig ());
6330 gdb_flush (gdb_stdout);
6331 target_mourn_inferior (inferior_ptid);
6332 stop_print_frame = false;
6333 stop_waiting (ecs);
6334 return;
6336 case TARGET_WAITKIND_FORKED:
6337 case TARGET_WAITKIND_VFORKED:
6338 case TARGET_WAITKIND_THREAD_CLONED:
6340 displaced_step_finish (ecs->event_thread, ecs->ws);
6342 /* Start a new step-over in another thread if there's one that
6343 needs it. */
6344 start_step_over ();
6346 context_switch (ecs);
6348 /* Immediately detach breakpoints from the child before there's
6349 any chance of letting the user delete breakpoints from the
6350 breakpoint lists. If we don't do this early, it's easy to
6351 leave left over traps in the child, vis: "break foo; catch
6352 fork; c; <fork>; del; c; <child calls foo>". We only follow
6353 the fork on the last `continue', and by that time the
6354 breakpoint at "foo" is long gone from the breakpoint table.
6355 If we vforked, then we don't need to unpatch here, since both
6356 parent and child are sharing the same memory pages; we'll
6357 need to unpatch at follow/detach time instead to be certain
6358 that new breakpoints added between catchpoint hit time and
6359 vfork follow are detached. */
6360 if (ecs->ws.kind () == TARGET_WAITKIND_FORKED)
6362 /* This won't actually modify the breakpoint list, but will
6363 physically remove the breakpoints from the child. */
6364 detach_breakpoints (ecs->ws.child_ptid ());
6367 delete_just_stopped_threads_single_step_breakpoints ();
6369 /* In case the event is caught by a catchpoint, remember that
6370 the event is to be followed at the next resume of the thread,
6371 and not immediately. */
6372 ecs->event_thread->pending_follow = ecs->ws;
6374 ecs->event_thread->set_stop_pc
6375 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6377 ecs->event_thread->control.stop_bpstat
6378 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
6379 ecs->event_thread->stop_pc (),
6380 ecs->event_thread, ecs->ws);
6382 if (handle_stop_requested (ecs))
6383 return;
6385 /* If no catchpoint triggered for this, then keep going. Note
6386 that we're interested in knowing the bpstat actually causes a
6387 stop, not just if it may explain the signal. Software
6388 watchpoints, for example, always appear in the bpstat. */
6389 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6391 bool follow_child
6392 = (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6393 && follow_fork_mode_string == follow_fork_mode_child);
6395 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6397 process_stratum_target *targ
6398 = ecs->event_thread->inf->process_target ();
6400 bool should_resume;
6401 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED)
6402 should_resume = follow_fork ();
6403 else
6405 should_resume = true;
6406 inferior *inf = ecs->event_thread->inf;
6407 inf->top_target ()->follow_clone (ecs->ws.child_ptid ());
6408 ecs->event_thread->pending_follow.set_spurious ();
6411 /* Note that one of these may be an invalid pointer,
6412 depending on detach_fork. */
6413 thread_info *parent = ecs->event_thread;
6414 thread_info *child = targ->find_thread (ecs->ws.child_ptid ());
6416 /* At this point, the parent is marked running, and the
6417 child is marked stopped. */
6419 /* If not resuming the parent, mark it stopped. */
6420 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6421 && follow_child && !detach_fork && !non_stop && !sched_multi)
6422 parent->set_running (false);
6424 /* If resuming the child, mark it running. */
6425 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6426 && !schedlock_applies (ecs->event_thread))
6427 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6428 && (follow_child
6429 || (!detach_fork && (non_stop || sched_multi)))))
6430 child->set_running (true);
6432 /* In non-stop mode, also resume the other branch. */
6433 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6434 && target_is_non_stop_p ()
6435 && !schedlock_applies (ecs->event_thread))
6436 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6437 && (!detach_fork && (non_stop
6438 || (sched_multi
6439 && target_is_non_stop_p ())))))
6441 if (follow_child)
6442 switch_to_thread (parent);
6443 else
6444 switch_to_thread (child);
6446 ecs->event_thread = inferior_thread ();
6447 ecs->ptid = inferior_ptid;
6448 keep_going (ecs);
6451 if (follow_child)
6452 switch_to_thread (child);
6453 else
6454 switch_to_thread (parent);
6456 ecs->event_thread = inferior_thread ();
6457 ecs->ptid = inferior_ptid;
6459 if (should_resume)
6461 /* Never call switch_back_to_stepped_thread if we are waiting for
6462 vfork-done (waiting for an external vfork child to exec or
6463 exit). We will resume only the vforking thread for the purpose
6464 of collecting the vfork-done event, and we will restart any
6465 step once the critical shared address space window is done. */
6466 if ((!follow_child
6467 && detach_fork
6468 && parent->inf->thread_waiting_for_vfork_done != nullptr)
6469 || !switch_back_to_stepped_thread (ecs))
6470 keep_going (ecs);
6472 else
6473 stop_waiting (ecs);
6474 return;
6476 process_event_stop_test (ecs);
6477 return;
6479 case TARGET_WAITKIND_VFORK_DONE:
6480 /* Done with the shared memory region. Re-insert breakpoints in
6481 the parent, and keep going. */
6483 context_switch (ecs);
6485 handle_vfork_done (ecs->event_thread);
6486 gdb_assert (inferior_thread () == ecs->event_thread);
6488 if (handle_stop_requested (ecs))
6489 return;
6491 if (!switch_back_to_stepped_thread (ecs))
6493 gdb_assert (inferior_thread () == ecs->event_thread);
6494 /* This also takes care of reinserting breakpoints in the
6495 previously locked inferior. */
6496 keep_going (ecs);
6498 return;
6500 case TARGET_WAITKIND_EXECD:
6502 /* Note we can't read registers yet (the stop_pc), because we
6503 don't yet know the inferior's post-exec architecture.
6504 'stop_pc' is explicitly read below instead. */
6505 switch_to_thread_no_regs (ecs->event_thread);
6507 /* Do whatever is necessary to the parent branch of the vfork. */
6508 handle_vfork_child_exec_or_exit (1);
6510 /* This causes the eventpoints and symbol table to be reset.
6511 Must do this now, before trying to determine whether to
6512 stop. */
6513 follow_exec (inferior_ptid, ecs->ws.execd_pathname ());
6515 /* In follow_exec we may have deleted the original thread and
6516 created a new one. Make sure that the event thread is the
6517 execd thread for that case (this is a nop otherwise). */
6518 ecs->event_thread = inferior_thread ();
6520 ecs->event_thread->set_stop_pc
6521 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6523 ecs->event_thread->control.stop_bpstat
6524 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
6525 ecs->event_thread->stop_pc (),
6526 ecs->event_thread, ecs->ws);
6528 if (handle_stop_requested (ecs))
6529 return;
6531 /* If no catchpoint triggered for this, then keep going. */
6532 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6534 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6535 keep_going (ecs);
6536 return;
6538 process_event_stop_test (ecs);
6539 return;
6541 /* Be careful not to try to gather much state about a thread
6542 that's in a syscall. It's frequently a losing proposition. */
6543 case TARGET_WAITKIND_SYSCALL_ENTRY:
6544 /* Getting the current syscall number. */
6545 if (handle_syscall_event (ecs) == 0)
6546 process_event_stop_test (ecs);
6547 return;
6549 /* Before examining the threads further, step this thread to
6550 get it entirely out of the syscall. (We get notice of the
6551 event when the thread is just on the verge of exiting a
6552 syscall. Stepping one instruction seems to get it back
6553 into user code.) */
6554 case TARGET_WAITKIND_SYSCALL_RETURN:
6555 if (handle_syscall_event (ecs) == 0)
6556 process_event_stop_test (ecs);
6557 return;
6559 case TARGET_WAITKIND_STOPPED:
6560 handle_signal_stop (ecs);
6561 return;
6563 case TARGET_WAITKIND_NO_HISTORY:
6564 /* Reverse execution: target ran out of history info. */
6566 /* Switch to the stopped thread. */
6567 context_switch (ecs);
6568 infrun_debug_printf ("stopped");
6570 delete_just_stopped_threads_single_step_breakpoints ();
6571 ecs->event_thread->set_stop_pc
6572 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
6574 if (handle_stop_requested (ecs))
6575 return;
6577 interps_notify_no_history ();
6578 stop_waiting (ecs);
6579 return;
6583 /* Restart threads back to what they were trying to do back when we
6584 paused them (because of an in-line step-over or vfork, for example).
6585 The EVENT_THREAD thread is ignored (not restarted).
6587 If INF is non-nullptr, only resume threads from INF. */
6589 static void
6590 restart_threads (struct thread_info *event_thread, inferior *inf)
6592 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
6593 event_thread->ptid.to_string ().c_str (),
6594 inf != nullptr ? inf->num : -1);
6596 gdb_assert (!step_over_info_valid_p ());
6598 /* In case the instruction just stepped spawned a new thread. */
6599 update_thread_list ();
6601 for (thread_info *tp : all_non_exited_threads ())
6603 if (inf != nullptr && tp->inf != inf)
6604 continue;
6606 if (tp->inf->detaching)
6608 infrun_debug_printf ("restart threads: [%s] inferior detaching",
6609 tp->ptid.to_string ().c_str ());
6610 continue;
6613 switch_to_thread_no_regs (tp);
6615 if (tp == event_thread)
6617 infrun_debug_printf ("restart threads: [%s] is event thread",
6618 tp->ptid.to_string ().c_str ());
6619 continue;
6622 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
6624 infrun_debug_printf ("restart threads: [%s] not meant to be running",
6625 tp->ptid.to_string ().c_str ());
6626 continue;
6629 if (tp->resumed ())
6631 infrun_debug_printf ("restart threads: [%s] resumed",
6632 tp->ptid.to_string ().c_str ());
6633 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
6634 continue;
6637 if (thread_is_in_step_over_chain (tp))
6639 infrun_debug_printf ("restart threads: [%s] needs step-over",
6640 tp->ptid.to_string ().c_str ());
6641 gdb_assert (!tp->resumed ());
6642 continue;
6646 if (tp->has_pending_waitstatus ())
6648 infrun_debug_printf ("restart threads: [%s] has pending status",
6649 tp->ptid.to_string ().c_str ());
6650 tp->set_resumed (true);
6651 continue;
6654 gdb_assert (!tp->stop_requested);
6656 /* If some thread needs to start a step-over at this point, it
6657 should still be in the step-over queue, and thus skipped
6658 above. */
6659 if (thread_still_needs_step_over (tp))
6661 internal_error ("thread [%s] needs a step-over, but not in "
6662 "step-over queue\n",
6663 tp->ptid.to_string ().c_str ());
6666 if (currently_stepping (tp))
6668 infrun_debug_printf ("restart threads: [%s] was stepping",
6669 tp->ptid.to_string ().c_str ());
6670 keep_going_stepped_thread (tp);
6672 else
6674 infrun_debug_printf ("restart threads: [%s] continuing",
6675 tp->ptid.to_string ().c_str ());
6676 execution_control_state ecs (tp);
6677 switch_to_thread (tp);
6678 keep_going_pass_signal (&ecs);
6683 /* Callback for iterate_over_threads. Find a resumed thread that has
6684 a pending waitstatus. */
6686 static int
6687 resumed_thread_with_pending_status (struct thread_info *tp,
6688 void *arg)
6690 return tp->resumed () && tp->has_pending_waitstatus ();
6693 /* Called when we get an event that may finish an in-line or
6694 out-of-line (displaced stepping) step-over started previously.
6695 Return true if the event is processed and we should go back to the
6696 event loop; false if the caller should continue processing the
6697 event. */
6699 static int
6700 finish_step_over (struct execution_control_state *ecs)
6702 displaced_step_finish (ecs->event_thread, ecs->ws);
6704 bool had_step_over_info = step_over_info_valid_p ();
6706 if (had_step_over_info)
6708 /* If we're stepping over a breakpoint with all threads locked,
6709 then only the thread that was stepped should be reporting
6710 back an event. */
6711 gdb_assert (ecs->event_thread->control.trap_expected);
6713 update_thread_events_after_step_over (ecs->event_thread, ecs->ws);
6715 clear_step_over_info ();
6718 if (!target_is_non_stop_p ())
6719 return 0;
6721 /* Start a new step-over in another thread if there's one that
6722 needs it. */
6723 start_step_over ();
6725 /* If we were stepping over a breakpoint before, and haven't started
6726 a new in-line step-over sequence, then restart all other threads
6727 (except the event thread). We can't do this in all-stop, as then
6728 e.g., we wouldn't be able to issue any other remote packet until
6729 these other threads stop. */
6730 if (had_step_over_info && !step_over_info_valid_p ())
6732 struct thread_info *pending;
6734 /* If we only have threads with pending statuses, the restart
6735 below won't restart any thread and so nothing re-inserts the
6736 breakpoint we just stepped over. But we need it inserted
6737 when we later process the pending events, otherwise if
6738 another thread has a pending event for this breakpoint too,
6739 we'd discard its event (because the breakpoint that
6740 originally caused the event was no longer inserted). */
6741 context_switch (ecs);
6742 insert_breakpoints ();
6744 restart_threads (ecs->event_thread);
6746 /* If we have events pending, go through handle_inferior_event
6747 again, picking up a pending event at random. This avoids
6748 thread starvation. */
6750 /* But not if we just stepped over a watchpoint in order to let
6751 the instruction execute so we can evaluate its expression.
6752 The set of watchpoints that triggered is recorded in the
6753 breakpoint objects themselves (see bp->watchpoint_triggered).
6754 If we processed another event first, that other event could
6755 clobber this info. */
6756 if (ecs->event_thread->stepping_over_watchpoint)
6757 return 0;
6759 /* The code below is meant to avoid one thread hogging the event
6760 loop by doing constant in-line step overs. If the stepping
6761 thread exited, there's no risk for this to happen, so we can
6762 safely let our caller process the event immediately. */
6763 if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
6764 return 0;
6766 pending = iterate_over_threads (resumed_thread_with_pending_status,
6767 nullptr);
6768 if (pending != nullptr)
6770 struct thread_info *tp = ecs->event_thread;
6771 struct regcache *regcache;
6773 infrun_debug_printf ("found resumed threads with "
6774 "pending events, saving status");
6776 gdb_assert (pending != tp);
6778 /* Record the event thread's event for later. */
6779 save_waitstatus (tp, ecs->ws);
6780 /* This was cleared early, by handle_inferior_event. Set it
6781 so this pending event is considered by
6782 do_target_wait. */
6783 tp->set_resumed (true);
6785 gdb_assert (!tp->executing ());
6787 regcache = get_thread_regcache (tp);
6788 tp->set_stop_pc (regcache_read_pc (regcache));
6790 infrun_debug_printf ("saved stop_pc=%s for %s "
6791 "(currently_stepping=%d)",
6792 paddress (current_inferior ()->arch (),
6793 tp->stop_pc ()),
6794 tp->ptid.to_string ().c_str (),
6795 currently_stepping (tp));
6797 /* This in-line step-over finished; clear this so we won't
6798 start a new one. This is what handle_signal_stop would
6799 do, if we returned false. */
6800 tp->stepping_over_breakpoint = 0;
6802 /* Wake up the event loop again. */
6803 mark_async_event_handler (infrun_async_inferior_event_token);
6805 prepare_to_wait (ecs);
6806 return 1;
6810 return 0;
6813 /* See infrun.h. */
6815 void
6816 notify_signal_received (gdb_signal sig)
6818 interps_notify_signal_received (sig);
6819 gdb::observers::signal_received.notify (sig);
6822 /* See infrun.h. */
6824 void
6825 notify_normal_stop (bpstat *bs, int print_frame)
6827 interps_notify_normal_stop (bs, print_frame);
6828 gdb::observers::normal_stop.notify (bs, print_frame);
6831 /* See infrun.h. */
6833 void notify_user_selected_context_changed (user_selected_what selection)
6835 interps_notify_user_selected_context_changed (selection);
6836 gdb::observers::user_selected_context_changed.notify (selection);
6839 /* Come here when the program has stopped with a signal. */
6841 static void
6842 handle_signal_stop (struct execution_control_state *ecs)
6844 frame_info_ptr frame;
6845 struct gdbarch *gdbarch;
6846 int stopped_by_watchpoint;
6847 enum stop_kind stop_soon;
6848 int random_signal;
6850 gdb_assert (ecs->ws.kind () == TARGET_WAITKIND_STOPPED);
6852 ecs->event_thread->set_stop_signal (ecs->ws.sig ());
6854 /* Do we need to clean up the state of a thread that has
6855 completed a displaced single-step? (Doing so usually affects
6856 the PC, so do it here, before we set stop_pc.) */
6857 if (finish_step_over (ecs))
6858 return;
6860 /* If we either finished a single-step or hit a breakpoint, but
6861 the user wanted this thread to be stopped, pretend we got a
6862 SIG0 (generic unsignaled stop). */
6863 if (ecs->event_thread->stop_requested
6864 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6865 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6867 ecs->event_thread->set_stop_pc
6868 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6870 context_switch (ecs);
6872 if (deprecated_context_hook)
6873 deprecated_context_hook (ecs->event_thread->global_num);
6875 if (debug_infrun)
6877 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6878 struct gdbarch *reg_gdbarch = regcache->arch ();
6880 infrun_debug_printf
6881 ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ()));
6882 if (target_stopped_by_watchpoint ())
6884 CORE_ADDR addr;
6886 infrun_debug_printf ("stopped by watchpoint");
6888 if (target_stopped_data_address (current_inferior ()->top_target (),
6889 &addr))
6890 infrun_debug_printf ("stopped data address=%s",
6891 paddress (reg_gdbarch, addr));
6892 else
6893 infrun_debug_printf ("(no data address available)");
6897 /* This is originated from start_remote(), start_inferior() and
6898 shared libraries hook functions. */
6899 stop_soon = get_inferior_stop_soon (ecs);
6900 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6902 infrun_debug_printf ("quietly stopped");
6903 stop_print_frame = true;
6904 stop_waiting (ecs);
6905 return;
6908 /* This originates from attach_command(). We need to overwrite
6909 the stop_signal here, because some kernels don't ignore a
6910 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6911 See more comments in inferior.h. On the other hand, if we
6912 get a non-SIGSTOP, report it to the user - assume the backend
6913 will handle the SIGSTOP if it should show up later.
6915 Also consider that the attach is complete when we see a
6916 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6917 target extended-remote report it instead of a SIGSTOP
6918 (e.g. gdbserver). We already rely on SIGTRAP being our
6919 signal, so this is no exception.
6921 Also consider that the attach is complete when we see a
6922 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6923 the target to stop all threads of the inferior, in case the
6924 low level attach operation doesn't stop them implicitly. If
6925 they weren't stopped implicitly, then the stub will report a
6926 GDB_SIGNAL_0, meaning: stopped for no particular reason
6927 other than GDB's request. */
6928 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6929 && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP
6930 || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6931 || ecs->event_thread->stop_signal () == GDB_SIGNAL_0))
6933 stop_print_frame = true;
6934 stop_waiting (ecs);
6935 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6936 return;
6939 /* At this point, get hold of the now-current thread's frame. */
6940 frame = get_current_frame ();
6941 gdbarch = get_frame_arch (frame);
6943 /* Pull the single step breakpoints out of the target. */
6944 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6946 struct regcache *regcache;
6947 CORE_ADDR pc;
6949 regcache = get_thread_regcache (ecs->event_thread);
6950 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
6952 pc = regcache_read_pc (regcache);
6954 /* However, before doing so, if this single-step breakpoint was
6955 actually for another thread, set this thread up for moving
6956 past it. */
6957 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6958 aspace, pc))
6960 if (single_step_breakpoint_inserted_here_p (aspace, pc))
6962 infrun_debug_printf ("[%s] hit another thread's single-step "
6963 "breakpoint",
6964 ecs->ptid.to_string ().c_str ());
6965 ecs->hit_singlestep_breakpoint = 1;
6968 else
6970 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6971 ecs->ptid.to_string ().c_str ());
6974 delete_just_stopped_threads_single_step_breakpoints ();
6976 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6977 && ecs->event_thread->control.trap_expected
6978 && ecs->event_thread->stepping_over_watchpoint)
6979 stopped_by_watchpoint = 0;
6980 else
6981 stopped_by_watchpoint = watchpoints_triggered (ecs->ws);
6983 /* If necessary, step over this watchpoint. We'll be back to display
6984 it in a moment. */
6985 if (stopped_by_watchpoint
6986 && (target_have_steppable_watchpoint ()
6987 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
6989 /* At this point, we are stopped at an instruction which has
6990 attempted to write to a piece of memory under control of
6991 a watchpoint. The instruction hasn't actually executed
6992 yet. If we were to evaluate the watchpoint expression
6993 now, we would get the old value, and therefore no change
6994 would seem to have occurred.
6996 In order to make watchpoints work `right', we really need
6997 to complete the memory write, and then evaluate the
6998 watchpoint expression. We do this by single-stepping the
6999 target.
7001 It may not be necessary to disable the watchpoint to step over
7002 it. For example, the PA can (with some kernel cooperation)
7003 single step over a watchpoint without disabling the watchpoint.
7005 It is far more common to need to disable a watchpoint to step
7006 the inferior over it. If we have non-steppable watchpoints,
7007 we must disable the current watchpoint; it's simplest to
7008 disable all watchpoints.
7010 Any breakpoint at PC must also be stepped over -- if there's
7011 one, it will have already triggered before the watchpoint
7012 triggered, and we either already reported it to the user, or
7013 it didn't cause a stop and we called keep_going. In either
7014 case, if there was a breakpoint at PC, we must be trying to
7015 step past it. */
7016 ecs->event_thread->stepping_over_watchpoint = 1;
7017 keep_going (ecs);
7018 return;
7021 ecs->event_thread->stepping_over_breakpoint = 0;
7022 ecs->event_thread->stepping_over_watchpoint = 0;
7023 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
7024 ecs->event_thread->control.stop_step = 0;
7025 stop_print_frame = true;
7026 stopped_by_random_signal = 0;
7027 bpstat *stop_chain = nullptr;
7029 /* Hide inlined functions starting here, unless we just performed stepi or
7030 nexti. After stepi and nexti, always show the innermost frame (not any
7031 inline function call sites). */
7032 if (ecs->event_thread->control.step_range_end != 1)
7034 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
7036 /* skip_inline_frames is expensive, so we avoid it if we can
7037 determine that the address is one where functions cannot have
7038 been inlined. This improves performance with inferiors that
7039 load a lot of shared libraries, because the solib event
7040 breakpoint is defined as the address of a function (i.e. not
7041 inline). Note that we have to check the previous PC as well
7042 as the current one to catch cases when we have just
7043 single-stepped off a breakpoint prior to reinstating it.
7044 Note that we're assuming that the code we single-step to is
7045 not inline, but that's not definitive: there's nothing
7046 preventing the event breakpoint function from containing
7047 inlined code, and the single-step ending up there. If the
7048 user had set a breakpoint on that inlined code, the missing
7049 skip_inline_frames call would break things. Fortunately
7050 that's an extremely unlikely scenario. */
7051 if (!pc_at_non_inline_function (aspace,
7052 ecs->event_thread->stop_pc (),
7053 ecs->ws)
7054 && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7055 && ecs->event_thread->control.trap_expected
7056 && pc_at_non_inline_function (aspace,
7057 ecs->event_thread->prev_pc,
7058 ecs->ws)))
7060 stop_chain = build_bpstat_chain (aspace,
7061 ecs->event_thread->stop_pc (),
7062 ecs->ws);
7063 skip_inline_frames (ecs->event_thread, stop_chain);
7067 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7068 && ecs->event_thread->control.trap_expected
7069 && gdbarch_single_step_through_delay_p (gdbarch)
7070 && currently_stepping (ecs->event_thread))
7072 /* We're trying to step off a breakpoint. Turns out that we're
7073 also on an instruction that needs to be stepped multiple
7074 times before it's been fully executing. E.g., architectures
7075 with a delay slot. It needs to be stepped twice, once for
7076 the instruction and once for the delay slot. */
7077 int step_through_delay
7078 = gdbarch_single_step_through_delay (gdbarch, frame);
7080 if (step_through_delay)
7081 infrun_debug_printf ("step through delay");
7083 if (ecs->event_thread->control.step_range_end == 0
7084 && step_through_delay)
7086 /* The user issued a continue when stopped at a breakpoint.
7087 Set up for another trap and get out of here. */
7088 ecs->event_thread->stepping_over_breakpoint = 1;
7089 keep_going (ecs);
7090 return;
7092 else if (step_through_delay)
7094 /* The user issued a step when stopped at a breakpoint.
7095 Maybe we should stop, maybe we should not - the delay
7096 slot *might* correspond to a line of source. In any
7097 case, don't decide that here, just set
7098 ecs->stepping_over_breakpoint, making sure we
7099 single-step again before breakpoints are re-inserted. */
7100 ecs->event_thread->stepping_over_breakpoint = 1;
7104 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
7105 handles this event. */
7106 ecs->event_thread->control.stop_bpstat
7107 = bpstat_stop_status (ecs->event_thread->inf->aspace.get (),
7108 ecs->event_thread->stop_pc (),
7109 ecs->event_thread, ecs->ws, stop_chain);
7111 /* Following in case break condition called a
7112 function. */
7113 stop_print_frame = true;
7115 /* This is where we handle "moribund" watchpoints. Unlike
7116 software breakpoints traps, hardware watchpoint traps are
7117 always distinguishable from random traps. If no high-level
7118 watchpoint is associated with the reported stop data address
7119 anymore, then the bpstat does not explain the signal ---
7120 simply make sure to ignore it if `stopped_by_watchpoint' is
7121 set. */
7123 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7124 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
7125 GDB_SIGNAL_TRAP)
7126 && stopped_by_watchpoint)
7128 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
7129 "ignoring");
7132 /* NOTE: cagney/2003-03-29: These checks for a random signal
7133 at one stage in the past included checks for an inferior
7134 function call's call dummy's return breakpoint. The original
7135 comment, that went with the test, read:
7137 ``End of a stack dummy. Some systems (e.g. Sony news) give
7138 another signal besides SIGTRAP, so check here as well as
7139 above.''
7141 If someone ever tries to get call dummys on a
7142 non-executable stack to work (where the target would stop
7143 with something like a SIGSEGV), then those tests might need
7144 to be re-instated. Given, however, that the tests were only
7145 enabled when momentary breakpoints were not being used, I
7146 suspect that it won't be the case.
7148 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
7149 be necessary for call dummies on a non-executable stack on
7150 SPARC. */
7152 /* See if the breakpoints module can explain the signal. */
7153 random_signal
7154 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
7155 ecs->event_thread->stop_signal ());
7157 /* Maybe this was a trap for a software breakpoint that has since
7158 been removed. */
7159 if (random_signal && target_stopped_by_sw_breakpoint ())
7161 if (gdbarch_program_breakpoint_here_p (gdbarch,
7162 ecs->event_thread->stop_pc ()))
7164 struct regcache *regcache;
7165 int decr_pc;
7167 /* Re-adjust PC to what the program would see if GDB was not
7168 debugging it. */
7169 regcache = get_thread_regcache (ecs->event_thread);
7170 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
7171 if (decr_pc != 0)
7173 std::optional<scoped_restore_tmpl<int>>
7174 restore_operation_disable;
7176 if (record_full_is_used ())
7177 restore_operation_disable.emplace
7178 (record_full_gdb_operation_disable_set ());
7180 regcache_write_pc (regcache,
7181 ecs->event_thread->stop_pc () + decr_pc);
7184 else
7186 /* A delayed software breakpoint event. Ignore the trap. */
7187 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
7188 random_signal = 0;
7192 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
7193 has since been removed. */
7194 if (random_signal && target_stopped_by_hw_breakpoint ())
7196 /* A delayed hardware breakpoint event. Ignore the trap. */
7197 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
7198 "trap, ignoring");
7199 random_signal = 0;
7202 /* If not, perhaps stepping/nexting can. */
7203 if (random_signal)
7204 random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7205 && currently_stepping (ecs->event_thread));
7207 /* Perhaps the thread hit a single-step breakpoint of _another_
7208 thread. Single-step breakpoints are transparent to the
7209 breakpoints module. */
7210 if (random_signal)
7211 random_signal = !ecs->hit_singlestep_breakpoint;
7213 /* No? Perhaps we got a moribund watchpoint. */
7214 if (random_signal)
7215 random_signal = !stopped_by_watchpoint;
7217 /* Always stop if the user explicitly requested this thread to
7218 remain stopped. */
7219 if (ecs->event_thread->stop_requested)
7221 random_signal = 1;
7222 infrun_debug_printf ("user-requested stop");
7225 /* For the program's own signals, act according to
7226 the signal handling tables. */
7228 if (random_signal)
7230 /* Signal not for debugging purposes. */
7231 enum gdb_signal stop_signal = ecs->event_thread->stop_signal ();
7233 infrun_debug_printf ("random signal (%s)",
7234 gdb_signal_to_symbol_string (stop_signal));
7236 stopped_by_random_signal = 1;
7238 /* Always stop on signals if we're either just gaining control
7239 of the program, or the user explicitly requested this thread
7240 to remain stopped. */
7241 if (stop_soon != NO_STOP_QUIETLY
7242 || ecs->event_thread->stop_requested
7243 || signal_stop_state (ecs->event_thread->stop_signal ()))
7245 stop_waiting (ecs);
7246 return;
7249 /* Notify observers the signal has "handle print" set. Note we
7250 returned early above if stopping; normal_stop handles the
7251 printing in that case. */
7252 if (signal_print[ecs->event_thread->stop_signal ()])
7254 /* The signal table tells us to print about this signal. */
7255 target_terminal::ours_for_output ();
7256 notify_signal_received (ecs->event_thread->stop_signal ());
7257 target_terminal::inferior ();
7260 /* Clear the signal if it should not be passed. */
7261 if (signal_program[ecs->event_thread->stop_signal ()] == 0)
7262 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
7264 if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc ()
7265 && ecs->event_thread->control.trap_expected
7266 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
7268 /* We were just starting a new sequence, attempting to
7269 single-step off of a breakpoint and expecting a SIGTRAP.
7270 Instead this signal arrives. This signal will take us out
7271 of the stepping range so GDB needs to remember to, when
7272 the signal handler returns, resume stepping off that
7273 breakpoint. */
7274 /* To simplify things, "continue" is forced to use the same
7275 code paths as single-step - set a breakpoint at the
7276 signal return address and then, once hit, step off that
7277 breakpoint. */
7278 infrun_debug_printf ("signal arrived while stepping over breakpoint");
7280 insert_hp_step_resume_breakpoint_at_frame (frame);
7281 ecs->event_thread->step_after_step_resume_breakpoint = 1;
7282 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7283 ecs->event_thread->control.trap_expected = 0;
7285 /* If we were nexting/stepping some other thread, switch to
7286 it, so that we don't continue it, losing control. */
7287 if (!switch_back_to_stepped_thread (ecs))
7288 keep_going (ecs);
7289 return;
7292 if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0
7293 && (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
7294 ecs->event_thread)
7295 || ecs->event_thread->control.step_range_end == 1)
7296 && (get_stack_frame_id (frame)
7297 == ecs->event_thread->control.step_stack_frame_id)
7298 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
7300 /* The inferior is about to take a signal that will take it
7301 out of the single step range. Set a breakpoint at the
7302 current PC (which is presumably where the signal handler
7303 will eventually return) and then allow the inferior to
7304 run free.
7306 Note that this is only needed for a signal delivered
7307 while in the single-step range. Nested signals aren't a
7308 problem as they eventually all return. */
7309 infrun_debug_printf ("signal may take us out of single-step range");
7311 clear_step_over_info ();
7312 insert_hp_step_resume_breakpoint_at_frame (frame);
7313 ecs->event_thread->step_after_step_resume_breakpoint = 1;
7314 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7315 ecs->event_thread->control.trap_expected = 0;
7316 keep_going (ecs);
7317 return;
7320 /* Note: step_resume_breakpoint may be non-NULL. This occurs
7321 when either there's a nested signal, or when there's a
7322 pending signal enabled just as the signal handler returns
7323 (leaving the inferior at the step-resume-breakpoint without
7324 actually executing it). Either way continue until the
7325 breakpoint is really hit. */
7327 if (!switch_back_to_stepped_thread (ecs))
7329 infrun_debug_printf ("random signal, keep going");
7331 keep_going (ecs);
7333 return;
7336 process_event_stop_test (ecs);
7339 /* Return the address for the beginning of the line. */
7341 CORE_ADDR
7342 update_line_range_start (CORE_ADDR pc, struct execution_control_state *ecs)
7344 /* The line table may have multiple entries for the same source code line.
7345 Given the PC, check the line table and return the PC that corresponds
7346 to the line table entry for the source line that PC is in. */
7347 CORE_ADDR start_line_pc = ecs->event_thread->control.step_range_start;
7348 std::optional<CORE_ADDR> real_range_start;
7350 /* Call find_line_range_start to get the smallest address in the
7351 linetable for multiple Line X entries in the line table. */
7352 real_range_start = find_line_range_start (pc);
7354 if (real_range_start.has_value ())
7355 start_line_pc = *real_range_start;
7357 return start_line_pc;
7360 namespace {
7362 /* Helper class for process_event_stop_test implementing lazy evaluation. */
7363 template<typename T>
7364 class lazy_loader
7366 using fetcher_t = std::function<T ()>;
7368 public:
7369 explicit lazy_loader (fetcher_t &&f) : m_loader (std::move (f))
7372 T &operator* ()
7374 if (!m_value.has_value ())
7375 m_value.emplace (m_loader ());
7376 return m_value.value ();
7379 T *operator-> ()
7381 return &**this;
7384 private:
7385 std::optional<T> m_value;
7386 fetcher_t m_loader;
7391 /* Come here when we've got some debug event / signal we can explain
7392 (IOW, not a random signal), and test whether it should cause a
7393 stop, or whether we should resume the inferior (transparently).
7394 E.g., could be a breakpoint whose condition evaluates false; we
7395 could be still stepping within the line; etc. */
7397 static void
7398 process_event_stop_test (struct execution_control_state *ecs)
7400 struct symtab_and_line stop_pc_sal;
7401 frame_info_ptr frame;
7402 struct gdbarch *gdbarch;
7403 CORE_ADDR jmp_buf_pc;
7404 struct bpstat_what what;
7406 /* Handle cases caused by hitting a breakpoint. */
7408 frame = get_current_frame ();
7409 gdbarch = get_frame_arch (frame);
7411 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
7413 if (what.call_dummy)
7415 stop_stack_dummy = what.call_dummy;
7418 /* A few breakpoint types have callbacks associated (e.g.,
7419 bp_jit_event). Run them now. */
7420 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
7422 /* Shorthand to make if statements smaller. */
7423 struct frame_id original_frame_id
7424 = ecs->event_thread->control.step_frame_id;
7425 lazy_loader<frame_id> curr_frame_id
7426 ([] () { return get_frame_id (get_current_frame ()); });
7428 switch (what.main_action)
7430 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
7431 /* If we hit the breakpoint at longjmp while stepping, we
7432 install a momentary breakpoint at the target of the
7433 jmp_buf. */
7435 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
7437 ecs->event_thread->stepping_over_breakpoint = 1;
7439 if (what.is_longjmp)
7441 struct value *arg_value;
7443 /* If we set the longjmp breakpoint via a SystemTap probe,
7444 then use it to extract the arguments. The destination PC
7445 is the third argument to the probe. */
7446 arg_value = probe_safe_evaluate_at_pc (frame, 2);
7447 if (arg_value)
7449 jmp_buf_pc = value_as_address (arg_value);
7450 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
7452 else if (!gdbarch_get_longjmp_target_p (gdbarch)
7453 || !gdbarch_get_longjmp_target (gdbarch,
7454 frame, &jmp_buf_pc))
7456 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
7457 "(!gdbarch_get_longjmp_target)");
7458 keep_going (ecs);
7459 return;
7462 /* Insert a breakpoint at resume address. */
7463 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
7465 else
7466 check_exception_resume (ecs, frame);
7467 keep_going (ecs);
7468 return;
7470 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
7472 frame_info_ptr init_frame;
7474 /* There are several cases to consider.
7476 1. The initiating frame no longer exists. In this case we
7477 must stop, because the exception or longjmp has gone too
7478 far.
7480 2. The initiating frame exists, and is the same as the
7481 current frame. We stop, because the exception or longjmp
7482 has been caught.
7484 3. The initiating frame exists and is different from the
7485 current frame. This means the exception or longjmp has
7486 been caught beneath the initiating frame, so keep going.
7488 4. longjmp breakpoint has been placed just to protect
7489 against stale dummy frames and user is not interested in
7490 stopping around longjmps. */
7492 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
7494 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
7495 != nullptr);
7496 delete_exception_resume_breakpoint (ecs->event_thread);
7498 if (what.is_longjmp)
7500 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
7502 if (!frame_id_p (ecs->event_thread->initiating_frame))
7504 /* Case 4. */
7505 keep_going (ecs);
7506 return;
7510 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
7512 if (init_frame)
7514 if (*curr_frame_id == ecs->event_thread->initiating_frame)
7516 /* Case 2. Fall through. */
7518 else
7520 /* Case 3. */
7521 keep_going (ecs);
7522 return;
7526 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
7527 exists. */
7528 delete_step_resume_breakpoint (ecs->event_thread);
7530 end_stepping_range (ecs);
7532 return;
7534 case BPSTAT_WHAT_SINGLE:
7535 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
7536 ecs->event_thread->stepping_over_breakpoint = 1;
7537 /* Still need to check other stuff, at least the case where we
7538 are stepping and step out of the right range. */
7539 break;
7541 case BPSTAT_WHAT_STEP_RESUME:
7542 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
7544 delete_step_resume_breakpoint (ecs->event_thread);
7545 if (ecs->event_thread->control.proceed_to_finish
7546 && execution_direction == EXEC_REVERSE)
7548 struct thread_info *tp = ecs->event_thread;
7550 /* We are finishing a function in reverse, and just hit the
7551 step-resume breakpoint at the start address of the
7552 function, and we're almost there -- just need to back up
7553 by one more single-step, which should take us back to the
7554 function call. */
7555 tp->control.step_range_start = tp->control.step_range_end = 1;
7556 keep_going (ecs);
7557 return;
7559 fill_in_stop_func (gdbarch, ecs);
7560 if (ecs->event_thread->stop_pc () == ecs->stop_func_start
7561 && execution_direction == EXEC_REVERSE)
7563 /* We are stepping over a function call in reverse, and just
7564 hit the step-resume breakpoint at the start address of
7565 the function. Go back to single-stepping, which should
7566 take us back to the function call. */
7567 ecs->event_thread->stepping_over_breakpoint = 1;
7568 keep_going (ecs);
7569 return;
7571 break;
7573 case BPSTAT_WHAT_STOP_NOISY:
7574 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
7575 stop_print_frame = true;
7577 /* Assume the thread stopped for a breakpoint. We'll still check
7578 whether a/the breakpoint is there when the thread is next
7579 resumed. */
7580 ecs->event_thread->stepping_over_breakpoint = 1;
7582 stop_waiting (ecs);
7583 return;
7585 case BPSTAT_WHAT_STOP_SILENT:
7586 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
7587 stop_print_frame = false;
7589 /* Assume the thread stopped for a breakpoint. We'll still check
7590 whether a/the breakpoint is there when the thread is next
7591 resumed. */
7592 ecs->event_thread->stepping_over_breakpoint = 1;
7593 stop_waiting (ecs);
7594 return;
7596 case BPSTAT_WHAT_HP_STEP_RESUME:
7597 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
7599 delete_step_resume_breakpoint (ecs->event_thread);
7600 if (ecs->event_thread->step_after_step_resume_breakpoint)
7602 /* Back when the step-resume breakpoint was inserted, we
7603 were trying to single-step off a breakpoint. Go back to
7604 doing that. */
7605 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7606 ecs->event_thread->stepping_over_breakpoint = 1;
7607 keep_going (ecs);
7608 return;
7610 break;
7612 case BPSTAT_WHAT_KEEP_CHECKING:
7613 break;
7616 /* If we stepped a permanent breakpoint and we had a high priority
7617 step-resume breakpoint for the address we stepped, but we didn't
7618 hit it, then we must have stepped into the signal handler. The
7619 step-resume was only necessary to catch the case of _not_
7620 stepping into the handler, so delete it, and fall through to
7621 checking whether the step finished. */
7622 if (ecs->event_thread->stepped_breakpoint)
7624 struct breakpoint *sr_bp
7625 = ecs->event_thread->control.step_resume_breakpoint;
7627 if (sr_bp != nullptr
7628 && sr_bp->first_loc ().permanent
7629 && sr_bp->type == bp_hp_step_resume
7630 && sr_bp->first_loc ().address == ecs->event_thread->prev_pc)
7632 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
7633 delete_step_resume_breakpoint (ecs->event_thread);
7634 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7638 /* We come here if we hit a breakpoint but should not stop for it.
7639 Possibly we also were stepping and should stop for that. So fall
7640 through and test for stepping. But, if not stepping, do not
7641 stop. */
7643 /* In all-stop mode, if we're currently stepping but have stopped in
7644 some other thread, we need to switch back to the stepped thread. */
7645 if (switch_back_to_stepped_thread (ecs))
7646 return;
7648 if (ecs->event_thread->control.step_resume_breakpoint)
7650 infrun_debug_printf ("step-resume breakpoint is inserted");
7652 /* Having a step-resume breakpoint overrides anything
7653 else having to do with stepping commands until
7654 that breakpoint is reached. */
7655 keep_going (ecs);
7656 return;
7659 if (ecs->event_thread->control.step_range_end == 0)
7661 infrun_debug_printf ("no stepping, continue");
7662 /* Likewise if we aren't even stepping. */
7663 keep_going (ecs);
7664 return;
7667 fill_in_stop_func (gdbarch, ecs);
7669 /* If stepping through a line, keep going if still within it.
7671 Note that step_range_end is the address of the first instruction
7672 beyond the step range, and NOT the address of the last instruction
7673 within it!
7675 Note also that during reverse execution, we may be stepping
7676 through a function epilogue and therefore must detect when
7677 the current-frame changes in the middle of a line. */
7679 if (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
7680 ecs->event_thread)
7681 && (execution_direction != EXEC_REVERSE
7682 || *curr_frame_id == original_frame_id))
7684 infrun_debug_printf
7685 ("stepping inside range [%s-%s]",
7686 paddress (gdbarch, ecs->event_thread->control.step_range_start),
7687 paddress (gdbarch, ecs->event_thread->control.step_range_end));
7689 /* Tentatively re-enable range stepping; `resume' disables it if
7690 necessary (e.g., if we're stepping over a breakpoint or we
7691 have software watchpoints). */
7692 ecs->event_thread->control.may_range_step = 1;
7694 /* When stepping backward, stop at beginning of line range
7695 (unless it's the function entry point, in which case
7696 keep going back to the call point). */
7697 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7698 if (stop_pc == ecs->event_thread->control.step_range_start
7699 && stop_pc != ecs->stop_func_start
7700 && execution_direction == EXEC_REVERSE)
7701 end_stepping_range (ecs);
7702 else
7703 keep_going (ecs);
7705 return;
7708 /* We stepped out of the stepping range. */
7710 /* If we are stepping at the source level and entered the runtime
7711 loader dynamic symbol resolution code...
7713 EXEC_FORWARD: we keep on single stepping until we exit the run
7714 time loader code and reach the callee's address.
7716 EXEC_REVERSE: we've already executed the callee (backward), and
7717 the runtime loader code is handled just like any other
7718 undebuggable function call. Now we need only keep stepping
7719 backward through the trampoline code, and that's handled further
7720 down, so there is nothing for us to do here. */
7722 if (execution_direction != EXEC_REVERSE
7723 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7724 && in_solib_dynsym_resolve_code (ecs->event_thread->stop_pc ())
7725 && (ecs->event_thread->control.step_start_function == nullptr
7726 || !in_solib_dynsym_resolve_code (
7727 ecs->event_thread->control.step_start_function->value_block ()
7728 ->entry_pc ())))
7730 CORE_ADDR pc_after_resolver =
7731 gdbarch_skip_solib_resolver (gdbarch, ecs->event_thread->stop_pc ());
7733 infrun_debug_printf ("stepped into dynsym resolve code");
7735 if (pc_after_resolver)
7737 /* Set up a step-resume breakpoint at the address
7738 indicated by SKIP_SOLIB_RESOLVER. */
7739 symtab_and_line sr_sal;
7740 sr_sal.pc = pc_after_resolver;
7741 sr_sal.pspace = get_frame_program_space (frame);
7743 insert_step_resume_breakpoint_at_sal (gdbarch,
7744 sr_sal, null_frame_id);
7747 keep_going (ecs);
7748 return;
7751 /* Step through an indirect branch thunk. */
7752 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7753 && gdbarch_in_indirect_branch_thunk (gdbarch,
7754 ecs->event_thread->stop_pc ()))
7756 infrun_debug_printf ("stepped into indirect branch thunk");
7757 keep_going (ecs);
7758 return;
7761 if (ecs->event_thread->control.step_range_end != 1
7762 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7763 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7764 && get_frame_type (frame) == SIGTRAMP_FRAME)
7766 infrun_debug_printf ("stepped into signal trampoline");
7767 /* The inferior, while doing a "step" or "next", has ended up in
7768 a signal trampoline (either by a signal being delivered or by
7769 the signal handler returning). Just single-step until the
7770 inferior leaves the trampoline (either by calling the handler
7771 or returning). */
7772 keep_going (ecs);
7773 return;
7776 /* If we're in the return path from a shared library trampoline,
7777 we want to proceed through the trampoline when stepping. */
7778 /* macro/2012-04-25: This needs to come before the subroutine
7779 call check below as on some targets return trampolines look
7780 like subroutine calls (MIPS16 return thunks). */
7781 if (gdbarch_in_solib_return_trampoline (gdbarch,
7782 ecs->event_thread->stop_pc (),
7783 ecs->stop_func_name)
7784 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7786 /* Determine where this trampoline returns. */
7787 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7788 CORE_ADDR real_stop_pc
7789 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7791 infrun_debug_printf ("stepped into solib return tramp");
7793 /* Only proceed through if we know where it's going. */
7794 if (real_stop_pc)
7796 /* And put the step-breakpoint there and go until there. */
7797 symtab_and_line sr_sal;
7798 sr_sal.pc = real_stop_pc;
7799 sr_sal.section = find_pc_overlay (sr_sal.pc);
7800 sr_sal.pspace = get_frame_program_space (frame);
7802 /* Do not specify what the fp should be when we stop since
7803 on some machines the prologue is where the new fp value
7804 is established. */
7805 insert_step_resume_breakpoint_at_sal (gdbarch,
7806 sr_sal, null_frame_id);
7808 /* Restart without fiddling with the step ranges or
7809 other state. */
7810 keep_going (ecs);
7811 return;
7815 /* Check for subroutine calls. The check for the current frame
7816 equalling the step ID is not necessary - the check of the
7817 previous frame's ID is sufficient - but it is a common case and
7818 cheaper than checking the previous frame's ID.
7820 NOTE: frame_id::operator== will never report two invalid frame IDs as
7821 being equal, so to get into this block, both the current and
7822 previous frame must have valid frame IDs. */
7823 /* The outer_frame_id check is a heuristic to detect stepping
7824 through startup code. If we step over an instruction which
7825 sets the stack pointer from an invalid value to a valid value,
7826 we may detect that as a subroutine call from the mythical
7827 "outermost" function. This could be fixed by marking
7828 outermost frames as !stack_p,code_p,special_p. Then the
7829 initial outermost frame, before sp was valid, would
7830 have code_addr == &_start. See the comment in frame_id::operator==
7831 for more. */
7833 /* We want "nexti" to step into, not over, signal handlers invoked
7834 by the kernel, therefore this subroutine check should not trigger
7835 for a signal handler invocation. On most platforms, this is already
7836 not the case, as the kernel puts a signal trampoline frame onto the
7837 stack to handle proper return after the handler, and therefore at this
7838 point, the current frame is a grandchild of the step frame, not a
7839 child. However, on some platforms, the kernel actually uses a
7840 trampoline to handle *invocation* of the handler. In that case,
7841 when executing the first instruction of the trampoline, this check
7842 would erroneously detect the trampoline invocation as a subroutine
7843 call. Fix this by checking for SIGTRAMP_FRAME. */
7844 if ((get_stack_frame_id (frame)
7845 != ecs->event_thread->control.step_stack_frame_id)
7846 && get_frame_type (frame) != SIGTRAMP_FRAME
7847 && ((frame_unwind_caller_id (frame)
7848 == ecs->event_thread->control.step_stack_frame_id)
7849 && ((ecs->event_thread->control.step_stack_frame_id
7850 != outer_frame_id)
7851 || (ecs->event_thread->control.step_start_function
7852 != find_pc_function (ecs->event_thread->stop_pc ())))))
7854 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7855 CORE_ADDR real_stop_pc;
7857 infrun_debug_printf ("stepped into subroutine");
7859 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
7861 /* I presume that step_over_calls is only 0 when we're
7862 supposed to be stepping at the assembly language level
7863 ("stepi"). Just stop. */
7864 /* And this works the same backward as frontward. MVS */
7865 end_stepping_range (ecs);
7866 return;
7869 /* Reverse stepping through solib trampolines. */
7871 if (execution_direction == EXEC_REVERSE
7872 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7873 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7874 || (ecs->stop_func_start == 0
7875 && in_solib_dynsym_resolve_code (stop_pc))))
7877 /* Any solib trampoline code can be handled in reverse
7878 by simply continuing to single-step. We have already
7879 executed the solib function (backwards), and a few
7880 steps will take us back through the trampoline to the
7881 caller. */
7882 keep_going (ecs);
7883 return;
7886 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7888 /* We're doing a "next".
7890 Normal (forward) execution: set a breakpoint at the
7891 callee's return address (the address at which the caller
7892 will resume).
7894 Reverse (backward) execution. set the step-resume
7895 breakpoint at the start of the function that we just
7896 stepped into (backwards), and continue to there. When we
7897 get there, we'll need to single-step back to the caller. */
7899 if (execution_direction == EXEC_REVERSE)
7901 /* If we're already at the start of the function, we've either
7902 just stepped backward into a single instruction function,
7903 or stepped back out of a signal handler to the first instruction
7904 of the function. Just keep going, which will single-step back
7905 to the caller. */
7906 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
7908 /* Normal function call return (static or dynamic). */
7909 symtab_and_line sr_sal;
7910 sr_sal.pc = ecs->stop_func_start;
7911 sr_sal.pspace = get_frame_program_space (frame);
7912 insert_step_resume_breakpoint_at_sal (gdbarch,
7913 sr_sal, get_stack_frame_id (frame));
7916 else
7917 insert_step_resume_breakpoint_at_caller (frame);
7919 keep_going (ecs);
7920 return;
7923 /* If we are in a function call trampoline (a stub between the
7924 calling routine and the real function), locate the real
7925 function. That's what tells us (a) whether we want to step
7926 into it at all, and (b) what prologue we want to run to the
7927 end of, if we do step into it. */
7928 real_stop_pc = skip_language_trampoline (frame, stop_pc);
7929 if (real_stop_pc == 0)
7930 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7931 if (real_stop_pc != 0)
7932 ecs->stop_func_start = real_stop_pc;
7934 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
7936 symtab_and_line sr_sal;
7937 sr_sal.pc = ecs->stop_func_start;
7938 sr_sal.pspace = get_frame_program_space (frame);
7940 insert_step_resume_breakpoint_at_sal (gdbarch,
7941 sr_sal, null_frame_id);
7942 keep_going (ecs);
7943 return;
7946 /* If we have line number information for the function we are
7947 thinking of stepping into and the function isn't on the skip
7948 list, step into it.
7950 If there are several symtabs at that PC (e.g. with include
7951 files), just want to know whether *any* of them have line
7952 numbers. find_pc_line handles this. */
7954 struct symtab_and_line tmp_sal;
7956 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
7957 if (tmp_sal.line != 0
7958 && !function_name_is_marked_for_skip (ecs->stop_func_name,
7959 tmp_sal)
7960 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
7962 if (execution_direction == EXEC_REVERSE)
7963 handle_step_into_function_backward (gdbarch, ecs);
7964 else
7965 handle_step_into_function (gdbarch, ecs);
7966 return;
7970 /* If we have no line number and the step-stop-if-no-debug is
7971 set, we stop the step so that the user has a chance to switch
7972 in assembly mode. */
7973 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7974 && step_stop_if_no_debug)
7976 end_stepping_range (ecs);
7977 return;
7980 if (execution_direction == EXEC_REVERSE)
7982 /* If we're already at the start of the function, we've either just
7983 stepped backward into a single instruction function without line
7984 number info, or stepped back out of a signal handler to the first
7985 instruction of the function without line number info. Just keep
7986 going, which will single-step back to the caller. */
7987 if (ecs->stop_func_start != stop_pc)
7989 /* Set a breakpoint at callee's start address.
7990 From there we can step once and be back in the caller. */
7991 symtab_and_line sr_sal;
7992 sr_sal.pc = ecs->stop_func_start;
7993 sr_sal.pspace = get_frame_program_space (frame);
7994 insert_step_resume_breakpoint_at_sal (gdbarch,
7995 sr_sal, null_frame_id);
7998 else
7999 /* Set a breakpoint at callee's return address (the address
8000 at which the caller will resume). */
8001 insert_step_resume_breakpoint_at_caller (frame);
8003 keep_going (ecs);
8004 return;
8007 /* Reverse stepping through solib trampolines. */
8009 if (execution_direction == EXEC_REVERSE
8010 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
8012 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8014 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
8015 || (ecs->stop_func_start == 0
8016 && in_solib_dynsym_resolve_code (stop_pc)))
8018 /* Any solib trampoline code can be handled in reverse
8019 by simply continuing to single-step. We have already
8020 executed the solib function (backwards), and a few
8021 steps will take us back through the trampoline to the
8022 caller. */
8023 keep_going (ecs);
8024 return;
8026 else if (in_solib_dynsym_resolve_code (stop_pc))
8028 /* Stepped backward into the solib dynsym resolver.
8029 Set a breakpoint at its start and continue, then
8030 one more step will take us out. */
8031 symtab_and_line sr_sal;
8032 sr_sal.pc = ecs->stop_func_start;
8033 sr_sal.pspace = get_frame_program_space (frame);
8034 insert_step_resume_breakpoint_at_sal (gdbarch,
8035 sr_sal, null_frame_id);
8036 keep_going (ecs);
8037 return;
8041 /* This always returns the sal for the inner-most frame when we are in a
8042 stack of inlined frames, even if GDB actually believes that it is in a
8043 more outer frame. This is checked for below by calls to
8044 inline_skipped_frames. */
8045 stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
8047 /* NOTE: tausq/2004-05-24: This if block used to be done before all
8048 the trampoline processing logic, however, there are some trampolines
8049 that have no names, so we should do trampoline handling first. */
8050 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
8051 && ecs->stop_func_name == nullptr
8052 && stop_pc_sal.line == 0)
8054 infrun_debug_printf ("stepped into undebuggable function");
8056 /* The inferior just stepped into, or returned to, an
8057 undebuggable function (where there is no debugging information
8058 and no line number corresponding to the address where the
8059 inferior stopped). Since we want to skip this kind of code,
8060 we keep going until the inferior returns from this
8061 function - unless the user has asked us not to (via
8062 set step-mode) or we no longer know how to get back
8063 to the call site. */
8064 if (step_stop_if_no_debug
8065 || !frame_id_p (frame_unwind_caller_id (frame)))
8067 /* If we have no line number and the step-stop-if-no-debug
8068 is set, we stop the step so that the user has a chance to
8069 switch in assembly mode. */
8070 end_stepping_range (ecs);
8071 return;
8073 else
8075 /* Set a breakpoint at callee's return address (the address
8076 at which the caller will resume). */
8077 insert_step_resume_breakpoint_at_caller (frame);
8078 keep_going (ecs);
8079 return;
8083 if (execution_direction == EXEC_REVERSE
8084 && ecs->event_thread->control.proceed_to_finish
8085 && ecs->event_thread->stop_pc () >= ecs->stop_func_alt_start
8086 && ecs->event_thread->stop_pc () < ecs->stop_func_start)
8088 /* We are executing the reverse-finish command.
8089 If the system supports multiple entry points and we are finishing a
8090 function in reverse. If we are between the entry points single-step
8091 back to the alternate entry point. If we are at the alternate entry
8092 point -- just need to back up by one more single-step, which
8093 should take us back to the function call. */
8094 ecs->event_thread->control.step_range_start
8095 = ecs->event_thread->control.step_range_end = 1;
8096 keep_going (ecs);
8097 return;
8101 if (ecs->event_thread->control.step_range_end == 1)
8103 /* It is stepi or nexti. We always want to stop stepping after
8104 one instruction. */
8105 infrun_debug_printf ("stepi/nexti");
8106 end_stepping_range (ecs);
8107 return;
8110 if (stop_pc_sal.line == 0)
8112 /* We have no line number information. That means to stop
8113 stepping (does this always happen right after one instruction,
8114 when we do "s" in a function with no line numbers,
8115 or can this happen as a result of a return or longjmp?). */
8116 infrun_debug_printf ("line number info");
8117 end_stepping_range (ecs);
8118 return;
8121 /* Handle the case when subroutines have multiple ranges. When we step
8122 from one part to the next part of the same subroutine, all subroutine
8123 levels are skipped again which begin here. Compensate for this by
8124 removing all skipped subroutines, which were already executing from
8125 the user's perspective. */
8127 if (get_stack_frame_id (frame)
8128 == ecs->event_thread->control.step_stack_frame_id
8129 && inline_skipped_frames (ecs->event_thread) > 0
8130 && ecs->event_thread->control.step_frame_id.artificial_depth > 0
8131 && ecs->event_thread->control.step_frame_id.code_addr_p)
8133 int depth = 0;
8134 const struct block *prev
8135 = block_for_pc (ecs->event_thread->control.step_frame_id.code_addr);
8136 const struct block *curr = block_for_pc (ecs->event_thread->stop_pc ());
8137 while (curr != nullptr && !curr->contains (prev))
8139 if (curr->inlined_p ())
8140 depth++;
8141 else if (curr->function () != nullptr)
8142 break;
8143 curr = curr->superblock ();
8145 while (inline_skipped_frames (ecs->event_thread) > depth)
8146 step_into_inline_frame (ecs->event_thread);
8149 /* Look for "calls" to inlined functions, part one. If the inline
8150 frame machinery detected some skipped call sites, we have entered
8151 a new inline function. */
8153 if ((*curr_frame_id == original_frame_id)
8154 && inline_skipped_frames (ecs->event_thread))
8156 infrun_debug_printf ("stepped into inlined function");
8158 symtab_and_line call_sal = find_frame_sal (frame);
8160 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
8162 /* For "step", we're going to stop. But if the call site
8163 for this inlined function is on the same source line as
8164 we were previously stepping, go down into the function
8165 first. Otherwise stop at the call site. */
8167 if (call_sal.line == ecs->event_thread->current_line
8168 && call_sal.symtab == ecs->event_thread->current_symtab)
8170 step_into_inline_frame (ecs->event_thread);
8171 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
8173 keep_going (ecs);
8174 return;
8178 end_stepping_range (ecs);
8179 return;
8181 else
8183 /* For "next", we should stop at the call site if it is on a
8184 different source line. Otherwise continue through the
8185 inlined function. */
8186 if (call_sal.line == ecs->event_thread->current_line
8187 && call_sal.symtab == ecs->event_thread->current_symtab)
8188 keep_going (ecs);
8189 else
8190 end_stepping_range (ecs);
8191 return;
8195 /* Look for "calls" to inlined functions, part two. If we are still
8196 in the same real function we were stepping through, but we have
8197 to go further up to find the exact frame ID, we are stepping
8198 through a more inlined call beyond its call site. */
8200 if (get_frame_type (frame) == INLINE_FRAME
8201 && (*curr_frame_id != original_frame_id)
8202 && stepped_in_from (frame, original_frame_id))
8204 infrun_debug_printf ("stepping through inlined function");
8206 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
8207 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
8208 keep_going (ecs);
8209 else
8210 end_stepping_range (ecs);
8211 return;
8214 bool refresh_step_info = true;
8215 if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc)
8216 && (ecs->event_thread->current_line != stop_pc_sal.line
8217 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
8219 /* We are at a different line. */
8221 if (stop_pc_sal.is_stmt)
8223 if (execution_direction == EXEC_REVERSE)
8225 /* We are stepping backwards make sure we have reached the
8226 beginning of the line. */
8227 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8228 CORE_ADDR start_line_pc
8229 = update_line_range_start (stop_pc, ecs);
8231 if (stop_pc != start_line_pc)
8233 /* Have not reached the beginning of the source code line.
8234 Set a step range. Execution should stop in any function
8235 calls we execute back into before reaching the beginning
8236 of the line. */
8237 ecs->event_thread->control.step_range_start
8238 = start_line_pc;
8239 ecs->event_thread->control.step_range_end = stop_pc;
8240 set_step_info (ecs->event_thread, frame, stop_pc_sal);
8241 keep_going (ecs);
8242 return;
8246 /* We are at the start of a statement.
8248 So stop. Note that we don't stop if we step into the middle of a
8249 statement. That is said to make things like for (;;) statements
8250 work better. */
8251 infrun_debug_printf ("stepped to a different line");
8252 end_stepping_range (ecs);
8253 return;
8255 else if (*curr_frame_id == original_frame_id)
8257 /* We are not at the start of a statement, and we have not changed
8258 frame.
8260 We ignore this line table entry, and continue stepping forward,
8261 looking for a better place to stop. */
8262 refresh_step_info = false;
8263 infrun_debug_printf ("stepped to a different line, but "
8264 "it's not the start of a statement");
8266 else
8268 /* We are not the start of a statement, and we have changed frame.
8270 We ignore this line table entry, and continue stepping forward,
8271 looking for a better place to stop. Keep refresh_step_info at
8272 true to note that the frame has changed, but ignore the line
8273 number to make sure we don't ignore a subsequent entry with the
8274 same line number. */
8275 stop_pc_sal.line = 0;
8276 infrun_debug_printf ("stepped to a different frame, but "
8277 "it's not the start of a statement");
8281 if (execution_direction == EXEC_REVERSE
8282 && *curr_frame_id != original_frame_id
8283 && original_frame_id.code_addr_p && curr_frame_id->code_addr_p
8284 && original_frame_id.code_addr == curr_frame_id->code_addr)
8286 /* If we enter here, we're leaving a recursive function call. In this
8287 situation, we shouldn't refresh the step information, because if we
8288 do, we'll lose the frame_id of when we started stepping, and this
8289 will make GDB not know we need to print frame information. */
8290 refresh_step_info = false;
8291 infrun_debug_printf ("reverse stepping, left a recursive call, don't "
8292 "update step info so we remember we left a frame");
8295 /* We aren't done stepping.
8297 Optimize by setting the stepping range to the line.
8298 (We might not be in the original line, but if we entered a
8299 new line in mid-statement, we continue stepping. This makes
8300 things like for(;;) statements work better.)
8302 If we entered a SAL that indicates a non-statement line table entry,
8303 then we update the stepping range, but we don't update the step info,
8304 which includes things like the line number we are stepping away from.
8305 This means we will stop when we find a line table entry that is marked
8306 as is-statement, even if it matches the non-statement one we just
8307 stepped into. */
8309 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
8310 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
8311 ecs->event_thread->control.may_range_step = 1;
8312 infrun_debug_printf
8313 ("updated step range, start = %s, end = %s, may_range_step = %d",
8314 paddress (gdbarch, ecs->event_thread->control.step_range_start),
8315 paddress (gdbarch, ecs->event_thread->control.step_range_end),
8316 ecs->event_thread->control.may_range_step);
8317 if (refresh_step_info)
8318 set_step_info (ecs->event_thread, frame, stop_pc_sal);
8320 infrun_debug_printf ("keep going");
8322 if (execution_direction == EXEC_REVERSE)
8324 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8326 /* Make sure the stop_pc is set to the beginning of the line. */
8327 if (stop_pc != ecs->event_thread->control.step_range_start)
8328 ecs->event_thread->control.step_range_start
8329 = update_line_range_start (stop_pc, ecs);
8332 keep_going (ecs);
8335 static bool restart_stepped_thread (process_stratum_target *resume_target,
8336 ptid_t resume_ptid);
8338 /* In all-stop mode, if we're currently stepping but have stopped in
8339 some other thread, we may need to switch back to the stepped
8340 thread. Returns true we set the inferior running, false if we left
8341 it stopped (and the event needs further processing). */
8343 static bool
8344 switch_back_to_stepped_thread (struct execution_control_state *ecs)
8346 if (!target_is_non_stop_p ())
8348 /* If any thread is blocked on some internal breakpoint, and we
8349 simply need to step over that breakpoint to get it going
8350 again, do that first. */
8352 /* However, if we see an event for the stepping thread, then we
8353 know all other threads have been moved past their breakpoints
8354 already. Let the caller check whether the step is finished,
8355 etc., before deciding to move it past a breakpoint. */
8356 if (ecs->event_thread->control.step_range_end != 0)
8357 return false;
8359 /* Check if the current thread is blocked on an incomplete
8360 step-over, interrupted by a random signal. */
8361 if (ecs->event_thread->control.trap_expected
8362 && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP)
8364 infrun_debug_printf
8365 ("need to finish step-over of [%s]",
8366 ecs->event_thread->ptid.to_string ().c_str ());
8367 keep_going (ecs);
8368 return true;
8371 /* Check if the current thread is blocked by a single-step
8372 breakpoint of another thread. */
8373 if (ecs->hit_singlestep_breakpoint)
8375 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
8376 ecs->ptid.to_string ().c_str ());
8377 keep_going (ecs);
8378 return true;
8381 /* If this thread needs yet another step-over (e.g., stepping
8382 through a delay slot), do it first before moving on to
8383 another thread. */
8384 if (thread_still_needs_step_over (ecs->event_thread))
8386 infrun_debug_printf
8387 ("thread [%s] still needs step-over",
8388 ecs->event_thread->ptid.to_string ().c_str ());
8389 keep_going (ecs);
8390 return true;
8393 /* If scheduler locking applies even if not stepping, there's no
8394 need to walk over threads. Above we've checked whether the
8395 current thread is stepping. If some other thread not the
8396 event thread is stepping, then it must be that scheduler
8397 locking is not in effect. */
8398 if (schedlock_applies (ecs->event_thread))
8399 return false;
8401 /* Otherwise, we no longer expect a trap in the current thread.
8402 Clear the trap_expected flag before switching back -- this is
8403 what keep_going does as well, if we call it. */
8404 ecs->event_thread->control.trap_expected = 0;
8406 /* Likewise, clear the signal if it should not be passed. */
8407 if (!signal_program[ecs->event_thread->stop_signal ()])
8408 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
8410 if (restart_stepped_thread (ecs->target, ecs->ptid))
8412 prepare_to_wait (ecs);
8413 return true;
8416 switch_to_thread (ecs->event_thread);
8419 return false;
8422 /* Look for the thread that was stepping, and resume it.
8423 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
8424 is resuming. Return true if a thread was started, false
8425 otherwise. */
8427 static bool
8428 restart_stepped_thread (process_stratum_target *resume_target,
8429 ptid_t resume_ptid)
8431 /* Do all pending step-overs before actually proceeding with
8432 step/next/etc. */
8433 if (start_step_over ())
8434 return true;
8436 for (thread_info *tp : all_threads_safe ())
8438 if (tp->state == THREAD_EXITED)
8439 continue;
8441 if (tp->has_pending_waitstatus ())
8442 continue;
8444 /* Ignore threads of processes the caller is not
8445 resuming. */
8446 if (!sched_multi
8447 && (tp->inf->process_target () != resume_target
8448 || tp->inf->pid != resume_ptid.pid ()))
8449 continue;
8451 if (tp->control.trap_expected)
8453 infrun_debug_printf ("switching back to stepped thread (step-over)");
8455 if (keep_going_stepped_thread (tp))
8456 return true;
8460 for (thread_info *tp : all_threads_safe ())
8462 if (tp->state == THREAD_EXITED)
8463 continue;
8465 if (tp->has_pending_waitstatus ())
8466 continue;
8468 /* Ignore threads of processes the caller is not
8469 resuming. */
8470 if (!sched_multi
8471 && (tp->inf->process_target () != resume_target
8472 || tp->inf->pid != resume_ptid.pid ()))
8473 continue;
8475 /* Did we find the stepping thread? */
8476 if (tp->control.step_range_end)
8478 infrun_debug_printf ("switching back to stepped thread (stepping)");
8480 if (keep_going_stepped_thread (tp))
8481 return true;
8485 return false;
8488 /* See infrun.h. */
8490 void
8491 restart_after_all_stop_detach (process_stratum_target *proc_target)
8493 /* Note we don't check target_is_non_stop_p() here, because the
8494 current inferior may no longer have a process_stratum target
8495 pushed, as we just detached. */
8497 /* See if we have a THREAD_RUNNING thread that need to be
8498 re-resumed. If we have any thread that is already executing,
8499 then we don't need to resume the target -- it is already been
8500 resumed. With the remote target (in all-stop), it's even
8501 impossible to issue another resumption if the target is already
8502 resumed, until the target reports a stop. */
8503 for (thread_info *thr : all_threads (proc_target))
8505 if (thr->state != THREAD_RUNNING)
8506 continue;
8508 /* If we have any thread that is already executing, then we
8509 don't need to resume the target -- it is already been
8510 resumed. */
8511 if (thr->executing ())
8512 return;
8514 /* If we have a pending event to process, skip resuming the
8515 target and go straight to processing it. */
8516 if (thr->resumed () && thr->has_pending_waitstatus ())
8517 return;
8520 /* Alright, we need to re-resume the target. If a thread was
8521 stepping, we need to restart it stepping. */
8522 if (restart_stepped_thread (proc_target, minus_one_ptid))
8523 return;
8525 /* Otherwise, find the first THREAD_RUNNING thread and resume
8526 it. */
8527 for (thread_info *thr : all_threads (proc_target))
8529 if (thr->state != THREAD_RUNNING)
8530 continue;
8532 execution_control_state ecs (thr);
8533 switch_to_thread (thr);
8534 keep_going (&ecs);
8535 return;
8539 /* Set a previously stepped thread back to stepping. Returns true on
8540 success, false if the resume is not possible (e.g., the thread
8541 vanished). */
8543 static bool
8544 keep_going_stepped_thread (struct thread_info *tp)
8546 frame_info_ptr frame;
8548 /* If the stepping thread exited, then don't try to switch back and
8549 resume it, which could fail in several different ways depending
8550 on the target. Instead, just keep going.
8552 We can find a stepping dead thread in the thread list in two
8553 cases:
8555 - The target supports thread exit events, and when the target
8556 tries to delete the thread from the thread list, inferior_ptid
8557 pointed at the exiting thread. In such case, calling
8558 delete_thread does not really remove the thread from the list;
8559 instead, the thread is left listed, with 'exited' state.
8561 - The target's debug interface does not support thread exit
8562 events, and so we have no idea whatsoever if the previously
8563 stepping thread is still alive. For that reason, we need to
8564 synchronously query the target now. */
8566 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
8568 infrun_debug_printf ("not resuming previously stepped thread, it has "
8569 "vanished");
8571 delete_thread (tp);
8572 return false;
8575 infrun_debug_printf ("resuming previously stepped thread");
8577 execution_control_state ecs (tp);
8578 switch_to_thread (tp);
8580 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
8581 frame = get_current_frame ();
8583 /* If the PC of the thread we were trying to single-step has
8584 changed, then that thread has trapped or been signaled, but the
8585 event has not been reported to GDB yet. Re-poll the target
8586 looking for this particular thread's event (i.e. temporarily
8587 enable schedlock) by:
8589 - setting a break at the current PC
8590 - resuming that particular thread, only (by setting trap
8591 expected)
8593 This prevents us continuously moving the single-step breakpoint
8594 forward, one instruction at a time, overstepping. */
8596 if (tp->stop_pc () != tp->prev_pc)
8598 ptid_t resume_ptid;
8600 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
8601 paddress (current_inferior ()->arch (), tp->prev_pc),
8602 paddress (current_inferior ()->arch (),
8603 tp->stop_pc ()));
8605 /* Clear the info of the previous step-over, as it's no longer
8606 valid (if the thread was trying to step over a breakpoint, it
8607 has already succeeded). It's what keep_going would do too,
8608 if we called it. Do this before trying to insert the sss
8609 breakpoint, otherwise if we were previously trying to step
8610 over this exact address in another thread, the breakpoint is
8611 skipped. */
8612 clear_step_over_info ();
8613 tp->control.trap_expected = 0;
8615 insert_single_step_breakpoint (get_frame_arch (frame),
8616 get_frame_address_space (frame),
8617 tp->stop_pc ());
8619 tp->set_resumed (true);
8620 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
8621 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
8623 else
8625 infrun_debug_printf ("expected thread still hasn't advanced");
8627 keep_going_pass_signal (&ecs);
8630 return true;
8633 /* Is thread TP in the middle of (software or hardware)
8634 single-stepping? (Note the result of this function must never be
8635 passed directly as target_resume's STEP parameter.) */
8637 static bool
8638 currently_stepping (struct thread_info *tp)
8640 return ((tp->control.step_range_end
8641 && tp->control.step_resume_breakpoint == nullptr)
8642 || tp->control.trap_expected
8643 || tp->stepped_breakpoint
8644 || bpstat_should_step ());
8647 /* Inferior has stepped into a subroutine call with source code that
8648 we should not step over. Do step to the first line of code in
8649 it. */
8651 static void
8652 handle_step_into_function (struct gdbarch *gdbarch,
8653 struct execution_control_state *ecs)
8655 fill_in_stop_func (gdbarch, ecs);
8657 compunit_symtab *cust
8658 = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8659 if (cust != nullptr && cust->language () != language_asm)
8660 ecs->stop_func_start
8661 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8663 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
8664 /* Use the step_resume_break to step until the end of the prologue,
8665 even if that involves jumps (as it seems to on the vax under
8666 4.2). */
8667 /* If the prologue ends in the middle of a source line, continue to
8668 the end of that source line (if it is still within the function).
8669 Otherwise, just go to end of prologue. */
8670 if (stop_func_sal.end
8671 && stop_func_sal.pc != ecs->stop_func_start
8672 && stop_func_sal.end < ecs->stop_func_end)
8673 ecs->stop_func_start = stop_func_sal.end;
8675 /* Architectures which require breakpoint adjustment might not be able
8676 to place a breakpoint at the computed address. If so, the test
8677 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
8678 ecs->stop_func_start to an address at which a breakpoint may be
8679 legitimately placed.
8681 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
8682 made, GDB will enter an infinite loop when stepping through
8683 optimized code consisting of VLIW instructions which contain
8684 subinstructions corresponding to different source lines. On
8685 FR-V, it's not permitted to place a breakpoint on any but the
8686 first subinstruction of a VLIW instruction. When a breakpoint is
8687 set, GDB will adjust the breakpoint address to the beginning of
8688 the VLIW instruction. Thus, we need to make the corresponding
8689 adjustment here when computing the stop address. */
8691 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
8693 ecs->stop_func_start
8694 = gdbarch_adjust_breakpoint_address (gdbarch,
8695 ecs->stop_func_start);
8698 if (ecs->stop_func_start == ecs->event_thread->stop_pc ())
8700 /* We are already there: stop now. */
8701 end_stepping_range (ecs);
8702 return;
8704 else
8706 /* Put the step-breakpoint there and go until there. */
8707 symtab_and_line sr_sal;
8708 sr_sal.pc = ecs->stop_func_start;
8709 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
8710 sr_sal.pspace = get_frame_program_space (get_current_frame ());
8712 /* Do not specify what the fp should be when we stop since on
8713 some machines the prologue is where the new fp value is
8714 established. */
8715 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
8717 /* And make sure stepping stops right away then. */
8718 ecs->event_thread->control.step_range_end
8719 = ecs->event_thread->control.step_range_start;
8721 keep_going (ecs);
8724 /* Inferior has stepped backward into a subroutine call with source
8725 code that we should not step over. Do step to the beginning of the
8726 last line of code in it. */
8728 static void
8729 handle_step_into_function_backward (struct gdbarch *gdbarch,
8730 struct execution_control_state *ecs)
8732 struct compunit_symtab *cust;
8733 struct symtab_and_line stop_func_sal;
8735 fill_in_stop_func (gdbarch, ecs);
8737 cust = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8738 if (cust != nullptr && cust->language () != language_asm)
8739 ecs->stop_func_start
8740 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8742 stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
8744 /* OK, we're just going to keep stepping here. */
8745 if (stop_func_sal.pc == ecs->event_thread->stop_pc ())
8747 /* We're there already. Just stop stepping now. */
8748 end_stepping_range (ecs);
8750 else
8752 /* Else just reset the step range and keep going.
8753 No step-resume breakpoint, they don't work for
8754 epilogues, which can have multiple entry paths. */
8755 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
8756 ecs->event_thread->control.step_range_end = stop_func_sal.end;
8757 keep_going (ecs);
8759 return;
8762 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
8763 This is used to both functions and to skip over code. */
8765 static void
8766 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
8767 struct symtab_and_line sr_sal,
8768 struct frame_id sr_id,
8769 enum bptype sr_type)
8771 /* There should never be more than one step-resume or longjmp-resume
8772 breakpoint per thread, so we should never be setting a new
8773 step_resume_breakpoint when one is already active. */
8774 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == nullptr);
8775 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
8777 infrun_debug_printf ("inserting step-resume breakpoint at %s",
8778 paddress (gdbarch, sr_sal.pc));
8780 inferior_thread ()->control.step_resume_breakpoint
8781 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
8784 void
8785 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
8786 struct symtab_and_line sr_sal,
8787 struct frame_id sr_id)
8789 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
8790 sr_sal, sr_id,
8791 bp_step_resume);
8794 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
8795 This is used to skip a potential signal handler.
8797 This is called with the interrupted function's frame. The signal
8798 handler, when it returns, will resume the interrupted function at
8799 RETURN_FRAME.pc. */
8801 static void
8802 insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr &return_frame)
8804 gdb_assert (return_frame != nullptr);
8806 struct gdbarch *gdbarch = get_frame_arch (return_frame);
8808 symtab_and_line sr_sal;
8809 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
8810 sr_sal.section = find_pc_overlay (sr_sal.pc);
8811 sr_sal.pspace = get_frame_program_space (return_frame);
8813 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
8814 get_stack_frame_id (return_frame),
8815 bp_hp_step_resume);
8818 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
8819 is used to skip a function after stepping into it (for "next" or if
8820 the called function has no debugging information).
8822 The current function has almost always been reached by single
8823 stepping a call or return instruction. NEXT_FRAME belongs to the
8824 current function, and the breakpoint will be set at the caller's
8825 resume address.
8827 This is a separate function rather than reusing
8828 insert_hp_step_resume_breakpoint_at_frame in order to avoid
8829 get_prev_frame, which may stop prematurely (see the implementation
8830 of frame_unwind_caller_id for an example). */
8832 static void
8833 insert_step_resume_breakpoint_at_caller (const frame_info_ptr &next_frame)
8835 /* We shouldn't have gotten here if we don't know where the call site
8836 is. */
8837 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
8839 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
8841 symtab_and_line sr_sal;
8842 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
8843 frame_unwind_caller_pc (next_frame));
8844 sr_sal.section = find_pc_overlay (sr_sal.pc);
8845 sr_sal.pspace = frame_unwind_program_space (next_frame);
8847 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
8848 frame_unwind_caller_id (next_frame));
8851 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
8852 new breakpoint at the target of a jmp_buf. The handling of
8853 longjmp-resume uses the same mechanisms used for handling
8854 "step-resume" breakpoints. */
8856 static void
8857 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
8859 /* There should never be more than one longjmp-resume breakpoint per
8860 thread, so we should never be setting a new
8861 longjmp_resume_breakpoint when one is already active. */
8862 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == nullptr);
8864 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8865 paddress (gdbarch, pc));
8867 inferior_thread ()->control.exception_resume_breakpoint =
8868 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
8871 /* Insert an exception resume breakpoint. TP is the thread throwing
8872 the exception. The block B is the block of the unwinder debug hook
8873 function. FRAME is the frame corresponding to the call to this
8874 function. SYM is the symbol of the function argument holding the
8875 target PC of the exception. */
8877 static void
8878 insert_exception_resume_breakpoint (struct thread_info *tp,
8879 const struct block *b,
8880 const frame_info_ptr &frame,
8881 struct symbol *sym)
8885 struct block_symbol vsym;
8886 struct value *value;
8887 CORE_ADDR handler;
8888 struct breakpoint *bp;
8890 vsym = lookup_symbol_search_name (sym->search_name (),
8891 b, SEARCH_VAR_DOMAIN);
8892 value = read_var_value (vsym.symbol, vsym.block, frame);
8893 /* If the value was optimized out, revert to the old behavior. */
8894 if (! value->optimized_out ())
8896 handler = value_as_address (value);
8898 infrun_debug_printf ("exception resume at %lx",
8899 (unsigned long) handler);
8901 /* set_momentary_breakpoint_at_pc creates a thread-specific
8902 breakpoint for the current inferior thread. */
8903 gdb_assert (tp == inferior_thread ());
8904 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8905 handler,
8906 bp_exception_resume).release ();
8908 tp->control.exception_resume_breakpoint = bp;
8911 catch (const gdb_exception_error &e)
8913 /* We want to ignore errors here. */
8917 /* A helper for check_exception_resume that sets an
8918 exception-breakpoint based on a SystemTap probe. */
8920 static void
8921 insert_exception_resume_from_probe (struct thread_info *tp,
8922 const struct bound_probe *probe,
8923 const frame_info_ptr &frame)
8925 struct value *arg_value;
8926 CORE_ADDR handler;
8927 struct breakpoint *bp;
8929 arg_value = probe_safe_evaluate_at_pc (frame, 1);
8930 if (!arg_value)
8931 return;
8933 handler = value_as_address (arg_value);
8935 infrun_debug_printf ("exception resume at %s",
8936 paddress (probe->objfile->arch (), handler));
8938 /* set_momentary_breakpoint_at_pc creates a thread-specific breakpoint
8939 for the current inferior thread. */
8940 gdb_assert (tp == inferior_thread ());
8941 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8942 handler, bp_exception_resume).release ();
8943 tp->control.exception_resume_breakpoint = bp;
8946 /* This is called when an exception has been intercepted. Check to
8947 see whether the exception's destination is of interest, and if so,
8948 set an exception resume breakpoint there. */
8950 static void
8951 check_exception_resume (struct execution_control_state *ecs,
8952 const frame_info_ptr &frame)
8954 struct bound_probe probe;
8955 struct symbol *func;
8957 /* First see if this exception unwinding breakpoint was set via a
8958 SystemTap probe point. If so, the probe has two arguments: the
8959 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8960 set a breakpoint there. */
8961 probe = find_probe_by_pc (get_frame_pc (frame));
8962 if (probe.prob)
8964 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
8965 return;
8968 func = get_frame_function (frame);
8969 if (!func)
8970 return;
8974 const struct block *b;
8975 int argno = 0;
8977 /* The exception breakpoint is a thread-specific breakpoint on
8978 the unwinder's debug hook, declared as:
8980 void _Unwind_DebugHook (void *cfa, void *handler);
8982 The CFA argument indicates the frame to which control is
8983 about to be transferred. HANDLER is the destination PC.
8985 We ignore the CFA and set a temporary breakpoint at HANDLER.
8986 This is not extremely efficient but it avoids issues in gdb
8987 with computing the DWARF CFA, and it also works even in weird
8988 cases such as throwing an exception from inside a signal
8989 handler. */
8991 b = func->value_block ();
8992 for (struct symbol *sym : block_iterator_range (b))
8994 if (!sym->is_argument ())
8995 continue;
8997 if (argno == 0)
8998 ++argno;
8999 else
9001 insert_exception_resume_breakpoint (ecs->event_thread,
9002 b, frame, sym);
9003 break;
9007 catch (const gdb_exception_error &e)
9012 static void
9013 stop_waiting (struct execution_control_state *ecs)
9015 infrun_debug_printf ("stop_waiting");
9017 /* Let callers know we don't want to wait for the inferior anymore. */
9018 ecs->wait_some_more = 0;
9021 /* Like keep_going, but passes the signal to the inferior, even if the
9022 signal is set to nopass. */
9024 static void
9025 keep_going_pass_signal (struct execution_control_state *ecs)
9027 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
9028 gdb_assert (!ecs->event_thread->resumed ());
9030 /* Save the pc before execution, to compare with pc after stop. */
9031 ecs->event_thread->prev_pc
9032 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
9034 if (ecs->event_thread->control.trap_expected)
9036 struct thread_info *tp = ecs->event_thread;
9038 infrun_debug_printf ("%s has trap_expected set, "
9039 "resuming to collect trap",
9040 tp->ptid.to_string ().c_str ());
9042 /* We haven't yet gotten our trap, and either: intercepted a
9043 non-signal event (e.g., a fork); or took a signal which we
9044 are supposed to pass through to the inferior. Simply
9045 continue. */
9046 resume (ecs->event_thread->stop_signal ());
9048 else if (step_over_info_valid_p ())
9050 /* Another thread is stepping over a breakpoint in-line. If
9051 this thread needs a step-over too, queue the request. In
9052 either case, this resume must be deferred for later. */
9053 struct thread_info *tp = ecs->event_thread;
9055 if (ecs->hit_singlestep_breakpoint
9056 || thread_still_needs_step_over (tp))
9058 infrun_debug_printf ("step-over already in progress: "
9059 "step-over for %s deferred",
9060 tp->ptid.to_string ().c_str ());
9061 global_thread_step_over_chain_enqueue (tp);
9063 else
9064 infrun_debug_printf ("step-over in progress: resume of %s deferred",
9065 tp->ptid.to_string ().c_str ());
9067 else
9069 regcache *regcache = get_thread_regcache (ecs->event_thread);
9070 int remove_bp;
9071 int remove_wps;
9072 step_over_what step_what;
9074 /* Either the trap was not expected, but we are continuing
9075 anyway (if we got a signal, the user asked it be passed to
9076 the child)
9077 -- or --
9078 We got our expected trap, but decided we should resume from
9081 We're going to run this baby now!
9083 Note that insert_breakpoints won't try to re-insert
9084 already inserted breakpoints. Therefore, we don't
9085 care if breakpoints were already inserted, or not. */
9087 /* If we need to step over a breakpoint, and we're not using
9088 displaced stepping to do so, insert all breakpoints
9089 (watchpoints, etc.) but the one we're stepping over, step one
9090 instruction, and then re-insert the breakpoint when that step
9091 is finished. */
9093 step_what = thread_still_needs_step_over (ecs->event_thread);
9095 remove_bp = (ecs->hit_singlestep_breakpoint
9096 || (step_what & STEP_OVER_BREAKPOINT));
9097 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
9099 /* We can't use displaced stepping if we need to step past a
9100 watchpoint. The instruction copied to the scratch pad would
9101 still trigger the watchpoint. */
9102 if (remove_bp
9103 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
9105 set_step_over_info (ecs->event_thread->inf->aspace.get (),
9106 regcache_read_pc (regcache), remove_wps,
9107 ecs->event_thread->global_num);
9109 else if (remove_wps)
9110 set_step_over_info (nullptr, 0, remove_wps, -1);
9112 /* If we now need to do an in-line step-over, we need to stop
9113 all other threads. Note this must be done before
9114 insert_breakpoints below, because that removes the breakpoint
9115 we're about to step over, otherwise other threads could miss
9116 it. */
9117 if (step_over_info_valid_p () && target_is_non_stop_p ())
9118 stop_all_threads ("starting in-line step-over");
9120 /* Stop stepping if inserting breakpoints fails. */
9123 insert_breakpoints ();
9125 catch (const gdb_exception_error &e)
9127 exception_print (gdb_stderr, e);
9128 stop_waiting (ecs);
9129 clear_step_over_info ();
9130 return;
9133 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
9135 resume (ecs->event_thread->stop_signal ());
9138 prepare_to_wait (ecs);
9141 /* Called when we should continue running the inferior, because the
9142 current event doesn't cause a user visible stop. This does the
9143 resuming part; waiting for the next event is done elsewhere. */
9145 static void
9146 keep_going (struct execution_control_state *ecs)
9148 if (ecs->event_thread->control.trap_expected
9149 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
9150 ecs->event_thread->control.trap_expected = 0;
9152 if (!signal_program[ecs->event_thread->stop_signal ()])
9153 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
9154 keep_going_pass_signal (ecs);
9157 /* This function normally comes after a resume, before
9158 handle_inferior_event exits. It takes care of any last bits of
9159 housekeeping, and sets the all-important wait_some_more flag. */
9161 static void
9162 prepare_to_wait (struct execution_control_state *ecs)
9164 infrun_debug_printf ("prepare_to_wait");
9166 ecs->wait_some_more = 1;
9168 /* If the target can't async, emulate it by marking the infrun event
9169 handler such that as soon as we get back to the event-loop, we
9170 immediately end up in fetch_inferior_event again calling
9171 target_wait. */
9172 if (!target_can_async_p ())
9173 mark_infrun_async_event_handler ();
9176 /* We are done with the step range of a step/next/si/ni command.
9177 Called once for each n of a "step n" operation. */
9179 static void
9180 end_stepping_range (struct execution_control_state *ecs)
9182 ecs->event_thread->control.stop_step = 1;
9183 stop_waiting (ecs);
9186 /* Several print_*_reason functions to print why the inferior has stopped.
9187 We always print something when the inferior exits, or receives a signal.
9188 The rest of the cases are dealt with later on in normal_stop and
9189 print_it_typical. Ideally there should be a call to one of these
9190 print_*_reason functions functions from handle_inferior_event each time
9191 stop_waiting is called.
9193 Note that we don't call these directly, instead we delegate that to
9194 the interpreters, through observers. Interpreters then call these
9195 with whatever uiout is right. */
9197 void
9198 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
9200 annotate_signalled ();
9201 if (uiout->is_mi_like_p ())
9202 uiout->field_string
9203 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
9204 uiout->text ("\nProgram terminated with signal ");
9205 annotate_signal_name ();
9206 uiout->field_string ("signal-name",
9207 gdb_signal_to_name (siggnal));
9208 annotate_signal_name_end ();
9209 uiout->text (", ");
9210 annotate_signal_string ();
9211 uiout->field_string ("signal-meaning",
9212 gdb_signal_to_string (siggnal));
9213 annotate_signal_string_end ();
9214 uiout->text (".\n");
9215 uiout->text ("The program no longer exists.\n");
9218 void
9219 print_exited_reason (struct ui_out *uiout, int exitstatus)
9221 struct inferior *inf = current_inferior ();
9222 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
9224 annotate_exited (exitstatus);
9225 if (exitstatus)
9227 if (uiout->is_mi_like_p ())
9228 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
9229 std::string exit_code_str
9230 = string_printf ("0%o", (unsigned int) exitstatus);
9231 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
9232 plongest (inf->num), pidstr.c_str (),
9233 string_field ("exit-code", exit_code_str.c_str ()));
9235 else
9237 if (uiout->is_mi_like_p ())
9238 uiout->field_string
9239 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
9240 uiout->message ("[Inferior %s (%s) exited normally]\n",
9241 plongest (inf->num), pidstr.c_str ());
9245 void
9246 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
9248 struct thread_info *thr = inferior_thread ();
9250 infrun_debug_printf ("signal = %s", gdb_signal_to_string (siggnal));
9252 annotate_signal ();
9254 if (uiout->is_mi_like_p ())
9256 else if (show_thread_that_caused_stop ())
9258 uiout->text ("\nThread ");
9259 uiout->field_string ("thread-id", print_thread_id (thr));
9261 const char *name = thread_name (thr);
9262 if (name != nullptr)
9264 uiout->text (" \"");
9265 uiout->field_string ("name", name);
9266 uiout->text ("\"");
9269 else
9270 uiout->text ("\nProgram");
9272 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
9273 uiout->text (" stopped");
9274 else
9276 uiout->text (" received signal ");
9277 annotate_signal_name ();
9278 if (uiout->is_mi_like_p ())
9279 uiout->field_string
9280 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
9281 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
9282 annotate_signal_name_end ();
9283 uiout->text (", ");
9284 annotate_signal_string ();
9285 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
9287 regcache *regcache = get_thread_regcache (thr);
9288 struct gdbarch *gdbarch = regcache->arch ();
9289 if (gdbarch_report_signal_info_p (gdbarch))
9290 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
9292 annotate_signal_string_end ();
9294 uiout->text (".\n");
9297 void
9298 print_no_history_reason (struct ui_out *uiout)
9300 if (uiout->is_mi_like_p ())
9301 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_NO_HISTORY));
9302 else if (execution_direction == EXEC_FORWARD)
9303 uiout->text ("\nReached end of recorded history; stopping.\nFollowing "
9304 "forward execution will be added to history.\n");
9305 else
9307 gdb_assert (execution_direction == EXEC_REVERSE);
9308 uiout->text ("\nReached end of recorded history; stopping.\nBackward "
9309 "execution from here not possible.\n");
9313 /* Print current location without a level number, if we have changed
9314 functions or hit a breakpoint. Print source line if we have one.
9315 bpstat_print contains the logic deciding in detail what to print,
9316 based on the event(s) that just occurred. */
9318 static void
9319 print_stop_location (const target_waitstatus &ws)
9321 int bpstat_ret;
9322 enum print_what source_flag;
9323 int do_frame_printing = 1;
9324 struct thread_info *tp = inferior_thread ();
9326 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws.kind ());
9327 switch (bpstat_ret)
9329 case PRINT_UNKNOWN:
9330 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
9331 should) carry around the function and does (or should) use
9332 that when doing a frame comparison. */
9333 if (tp->control.stop_step
9334 && (tp->control.step_frame_id
9335 == get_frame_id (get_current_frame ()))
9336 && (tp->control.step_start_function
9337 == find_pc_function (tp->stop_pc ())))
9339 symtab_and_line sal = find_frame_sal (get_selected_frame (nullptr));
9340 if (sal.symtab != tp->current_symtab)
9342 /* Finished step in same frame but into different file, print
9343 location and source line. */
9344 source_flag = SRC_AND_LOC;
9346 else
9348 /* Finished step in same frame and same file, just print source
9349 line. */
9350 source_flag = SRC_LINE;
9353 else
9355 /* Finished step into different frame, print location and source
9356 line. */
9357 source_flag = SRC_AND_LOC;
9359 break;
9360 case PRINT_SRC_AND_LOC:
9361 /* Print location and source line. */
9362 source_flag = SRC_AND_LOC;
9363 break;
9364 case PRINT_SRC_ONLY:
9365 source_flag = SRC_LINE;
9366 break;
9367 case PRINT_NOTHING:
9368 /* Something bogus. */
9369 source_flag = SRC_LINE;
9370 do_frame_printing = 0;
9371 break;
9372 default:
9373 internal_error (_("Unknown value."));
9376 /* The behavior of this routine with respect to the source
9377 flag is:
9378 SRC_LINE: Print only source line
9379 LOCATION: Print only location
9380 SRC_AND_LOC: Print location and source line. */
9381 if (do_frame_printing)
9382 print_stack_frame (get_selected_frame (nullptr), 0, source_flag, 1);
9385 /* See `print_stop_event` in infrun.h. */
9387 static void
9388 do_print_stop_event (struct ui_out *uiout, bool displays)
9390 struct target_waitstatus last;
9391 struct thread_info *tp;
9393 get_last_target_status (nullptr, nullptr, &last);
9396 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
9398 print_stop_location (last);
9400 /* Display the auto-display expressions. */
9401 if (displays)
9402 do_displays ();
9405 tp = inferior_thread ();
9406 if (tp->thread_fsm () != nullptr
9407 && tp->thread_fsm ()->finished_p ())
9409 struct return_value_info *rv;
9411 rv = tp->thread_fsm ()->return_value ();
9412 if (rv != nullptr)
9413 print_return_value (uiout, rv);
9417 /* See infrun.h. This function itself sets up buffered output for the
9418 duration of do_print_stop_event, which performs the actual event
9419 printing. */
9421 void
9422 print_stop_event (struct ui_out *uiout, bool displays)
9424 do_with_buffered_output (do_print_stop_event, uiout, displays);
9427 /* See infrun.h. */
9429 void
9430 maybe_remove_breakpoints (void)
9432 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
9434 if (remove_breakpoints ())
9436 target_terminal::ours_for_output ();
9437 gdb_printf (_("Cannot remove breakpoints because "
9438 "program is no longer writable.\nFurther "
9439 "execution is probably impossible.\n"));
9444 /* The execution context that just caused a normal stop. */
9446 struct stop_context
9448 stop_context ();
9450 DISABLE_COPY_AND_ASSIGN (stop_context);
9452 bool changed () const;
9454 /* The stop ID. */
9455 ULONGEST stop_id;
9457 /* The event PTID. */
9459 ptid_t ptid;
9461 /* If stopp for a thread event, this is the thread that caused the
9462 stop. */
9463 thread_info_ref thread;
9465 /* The inferior that caused the stop. */
9466 int inf_num;
9469 /* Initializes a new stop context. If stopped for a thread event, this
9470 takes a strong reference to the thread. */
9472 stop_context::stop_context ()
9474 stop_id = get_stop_id ();
9475 ptid = inferior_ptid;
9476 inf_num = current_inferior ()->num;
9478 if (inferior_ptid != null_ptid)
9480 /* Take a strong reference so that the thread can't be deleted
9481 yet. */
9482 thread = thread_info_ref::new_reference (inferior_thread ());
9486 /* Return true if the current context no longer matches the saved stop
9487 context. */
9489 bool
9490 stop_context::changed () const
9492 if (ptid != inferior_ptid)
9493 return true;
9494 if (inf_num != current_inferior ()->num)
9495 return true;
9496 if (thread != nullptr && thread->state != THREAD_STOPPED)
9497 return true;
9498 if (get_stop_id () != stop_id)
9499 return true;
9500 return false;
9503 /* See infrun.h. */
9505 bool
9506 normal_stop ()
9508 struct target_waitstatus last;
9510 get_last_target_status (nullptr, nullptr, &last);
9512 new_stop_id ();
9514 /* If an exception is thrown from this point on, make sure to
9515 propagate GDB's knowledge of the executing state to the
9516 frontend/user running state. A QUIT is an easy exception to see
9517 here, so do this before any filtered output. */
9519 ptid_t finish_ptid = null_ptid;
9521 if (!non_stop)
9522 finish_ptid = minus_one_ptid;
9523 else if (last.kind () == TARGET_WAITKIND_SIGNALLED
9524 || last.kind () == TARGET_WAITKIND_EXITED)
9526 /* On some targets, we may still have live threads in the
9527 inferior when we get a process exit event. E.g., for
9528 "checkpoint", when the current checkpoint/fork exits,
9529 linux-fork.c automatically switches to another fork from
9530 within target_mourn_inferior. */
9531 if (inferior_ptid != null_ptid)
9532 finish_ptid = ptid_t (inferior_ptid.pid ());
9534 else if (last.kind () != TARGET_WAITKIND_NO_RESUMED
9535 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9536 finish_ptid = inferior_ptid;
9538 std::optional<scoped_finish_thread_state> maybe_finish_thread_state;
9539 if (finish_ptid != null_ptid)
9541 maybe_finish_thread_state.emplace
9542 (user_visible_resume_target (finish_ptid), finish_ptid);
9545 /* As we're presenting a stop, and potentially removing breakpoints,
9546 update the thread list so we can tell whether there are threads
9547 running on the target. With target remote, for example, we can
9548 only learn about new threads when we explicitly update the thread
9549 list. Do this before notifying the interpreters about signal
9550 stops, end of stepping ranges, etc., so that the "new thread"
9551 output is emitted before e.g., "Program received signal FOO",
9552 instead of after. */
9553 update_thread_list ();
9555 if (last.kind () == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
9556 notify_signal_received (inferior_thread ()->stop_signal ());
9558 /* As with the notification of thread events, we want to delay
9559 notifying the user that we've switched thread context until
9560 the inferior actually stops.
9562 There's no point in saying anything if the inferior has exited.
9563 Note that SIGNALLED here means "exited with a signal", not
9564 "received a signal".
9566 Also skip saying anything in non-stop mode. In that mode, as we
9567 don't want GDB to switch threads behind the user's back, to avoid
9568 races where the user is typing a command to apply to thread x,
9569 but GDB switches to thread y before the user finishes entering
9570 the command, fetch_inferior_event installs a cleanup to restore
9571 the current thread back to the thread the user had selected right
9572 after this event is handled, so we're not really switching, only
9573 informing of a stop. */
9574 if (!non_stop)
9576 if ((last.kind () != TARGET_WAITKIND_SIGNALLED
9577 && last.kind () != TARGET_WAITKIND_EXITED
9578 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9579 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9580 && target_has_execution ()
9581 && previous_thread != inferior_thread ())
9583 SWITCH_THRU_ALL_UIS ()
9585 target_terminal::ours_for_output ();
9586 gdb_printf (_("[Switching to %s]\n"),
9587 target_pid_to_str (inferior_ptid).c_str ());
9588 annotate_thread_changed ();
9592 update_previous_thread ();
9595 if (last.kind () == TARGET_WAITKIND_NO_RESUMED
9596 || last.kind () == TARGET_WAITKIND_THREAD_EXITED)
9598 stop_print_frame = false;
9600 SWITCH_THRU_ALL_UIS ()
9601 if (current_ui->prompt_state == PROMPT_BLOCKED)
9603 target_terminal::ours_for_output ();
9604 if (last.kind () == TARGET_WAITKIND_NO_RESUMED)
9605 gdb_printf (_("No unwaited-for children left.\n"));
9606 else if (last.kind () == TARGET_WAITKIND_THREAD_EXITED)
9607 gdb_printf (_("Command aborted, thread exited.\n"));
9608 else
9609 gdb_assert_not_reached ("unhandled");
9613 /* Note: this depends on the update_thread_list call above. */
9614 maybe_remove_breakpoints ();
9616 /* If an auto-display called a function and that got a signal,
9617 delete that auto-display to avoid an infinite recursion. */
9619 if (stopped_by_random_signal)
9620 disable_current_display ();
9622 SWITCH_THRU_ALL_UIS ()
9624 async_enable_stdin ();
9627 /* Let the user/frontend see the threads as stopped. */
9628 maybe_finish_thread_state.reset ();
9630 /* Select innermost stack frame - i.e., current frame is frame 0,
9631 and current location is based on that. Handle the case where the
9632 dummy call is returning after being stopped. E.g. the dummy call
9633 previously hit a breakpoint. (If the dummy call returns
9634 normally, we won't reach here.) Do this before the stop hook is
9635 run, so that it doesn't get to see the temporary dummy frame,
9636 which is not where we'll present the stop. */
9637 if (has_stack_frames ())
9639 if (stop_stack_dummy == STOP_STACK_DUMMY)
9641 /* Pop the empty frame that contains the stack dummy. This
9642 also restores inferior state prior to the call (struct
9643 infcall_suspend_state). */
9644 frame_info_ptr frame = get_current_frame ();
9646 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
9647 frame_pop (frame);
9648 /* frame_pop calls reinit_frame_cache as the last thing it
9649 does which means there's now no selected frame. */
9652 select_frame (get_current_frame ());
9654 /* Set the current source location. */
9655 set_current_sal_from_frame (get_current_frame ());
9658 /* Look up the hook_stop and run it (CLI internally handles problem
9659 of stop_command's pre-hook not existing). */
9660 stop_context saved_context;
9664 execute_cmd_pre_hook (stop_command);
9666 catch (const gdb_exception_error &ex)
9668 exception_fprintf (gdb_stderr, ex,
9669 "Error while running hook_stop:\n");
9672 /* If the stop hook resumes the target, then there's no point in
9673 trying to notify about the previous stop; its context is
9674 gone. Likewise if the command switches thread or inferior --
9675 the observers would print a stop for the wrong
9676 thread/inferior. */
9677 if (saved_context.changed ())
9678 return true;
9680 /* Notify observers about the stop. This is where the interpreters
9681 print the stop event. */
9682 notify_normal_stop ((inferior_ptid != null_ptid
9683 ? inferior_thread ()->control.stop_bpstat
9684 : nullptr),
9685 stop_print_frame);
9686 annotate_stopped ();
9688 if (target_has_execution ())
9690 if (last.kind () != TARGET_WAITKIND_SIGNALLED
9691 && last.kind () != TARGET_WAITKIND_EXITED
9692 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9693 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9694 /* Delete the breakpoint we stopped at, if it wants to be deleted.
9695 Delete any breakpoint that is to be deleted at the next stop. */
9696 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
9699 return false;
9703 signal_stop_state (int signo)
9705 return signal_stop[signo];
9709 signal_print_state (int signo)
9711 return signal_print[signo];
9715 signal_pass_state (int signo)
9717 return signal_program[signo];
9720 static void
9721 signal_cache_update (int signo)
9723 if (signo == -1)
9725 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
9726 signal_cache_update (signo);
9728 return;
9731 signal_pass[signo] = (signal_stop[signo] == 0
9732 && signal_print[signo] == 0
9733 && signal_program[signo] == 1
9734 && signal_catch[signo] == 0);
9738 signal_stop_update (int signo, int state)
9740 int ret = signal_stop[signo];
9742 signal_stop[signo] = state;
9743 signal_cache_update (signo);
9744 return ret;
9748 signal_print_update (int signo, int state)
9750 int ret = signal_print[signo];
9752 signal_print[signo] = state;
9753 signal_cache_update (signo);
9754 return ret;
9758 signal_pass_update (int signo, int state)
9760 int ret = signal_program[signo];
9762 signal_program[signo] = state;
9763 signal_cache_update (signo);
9764 return ret;
9767 /* Update the global 'signal_catch' from INFO and notify the
9768 target. */
9770 void
9771 signal_catch_update (const unsigned int *info)
9773 int i;
9775 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
9776 signal_catch[i] = info[i] > 0;
9777 signal_cache_update (-1);
9778 target_pass_signals (signal_pass);
9781 static void
9782 sig_print_header (void)
9784 gdb_printf (_("Signal Stop\tPrint\tPass "
9785 "to program\tDescription\n"));
9788 static void
9789 sig_print_info (enum gdb_signal oursig)
9791 const char *name = gdb_signal_to_name (oursig);
9792 int name_padding = 13 - strlen (name);
9794 if (name_padding <= 0)
9795 name_padding = 0;
9797 gdb_printf ("%s", name);
9798 gdb_printf ("%*.*s ", name_padding, name_padding, " ");
9799 gdb_printf ("%s\t", signal_stop[oursig] ? "Yes" : "No");
9800 gdb_printf ("%s\t", signal_print[oursig] ? "Yes" : "No");
9801 gdb_printf ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
9802 gdb_printf ("%s\n", gdb_signal_to_string (oursig));
9805 /* Specify how various signals in the inferior should be handled. */
9807 static void
9808 handle_command (const char *args, int from_tty)
9810 int digits, wordlen;
9811 int sigfirst, siglast;
9812 enum gdb_signal oursig;
9813 int allsigs;
9815 if (args == nullptr)
9817 error_no_arg (_("signal to handle"));
9820 /* Allocate and zero an array of flags for which signals to handle. */
9822 const size_t nsigs = GDB_SIGNAL_LAST;
9823 unsigned char sigs[nsigs] {};
9825 /* Break the command line up into args. */
9827 gdb_argv built_argv (args);
9829 /* Walk through the args, looking for signal oursigs, signal names, and
9830 actions. Signal numbers and signal names may be interspersed with
9831 actions, with the actions being performed for all signals cumulatively
9832 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
9834 for (char *arg : built_argv)
9836 wordlen = strlen (arg);
9837 for (digits = 0; isdigit (arg[digits]); digits++)
9840 allsigs = 0;
9841 sigfirst = siglast = -1;
9843 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
9845 /* Apply action to all signals except those used by the
9846 debugger. Silently skip those. */
9847 allsigs = 1;
9848 sigfirst = 0;
9849 siglast = nsigs - 1;
9851 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
9853 SET_SIGS (nsigs, sigs, signal_stop);
9854 SET_SIGS (nsigs, sigs, signal_print);
9856 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
9858 UNSET_SIGS (nsigs, sigs, signal_program);
9860 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
9862 SET_SIGS (nsigs, sigs, signal_print);
9864 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
9866 SET_SIGS (nsigs, sigs, signal_program);
9868 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
9870 UNSET_SIGS (nsigs, sigs, signal_stop);
9872 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
9874 SET_SIGS (nsigs, sigs, signal_program);
9876 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
9878 UNSET_SIGS (nsigs, sigs, signal_print);
9879 UNSET_SIGS (nsigs, sigs, signal_stop);
9881 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
9883 UNSET_SIGS (nsigs, sigs, signal_program);
9885 else if (digits > 0)
9887 /* It is numeric. The numeric signal refers to our own
9888 internal signal numbering from target.h, not to host/target
9889 signal number. This is a feature; users really should be
9890 using symbolic names anyway, and the common ones like
9891 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9893 sigfirst = siglast = (int)
9894 gdb_signal_from_command (atoi (arg));
9895 if (arg[digits] == '-')
9897 siglast = (int)
9898 gdb_signal_from_command (atoi (arg + digits + 1));
9900 if (sigfirst > siglast)
9902 /* Bet he didn't figure we'd think of this case... */
9903 std::swap (sigfirst, siglast);
9906 else
9908 oursig = gdb_signal_from_name (arg);
9909 if (oursig != GDB_SIGNAL_UNKNOWN)
9911 sigfirst = siglast = (int) oursig;
9913 else
9915 /* Not a number and not a recognized flag word => complain. */
9916 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
9920 /* If any signal numbers or symbol names were found, set flags for
9921 which signals to apply actions to. */
9923 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
9925 switch ((enum gdb_signal) signum)
9927 case GDB_SIGNAL_TRAP:
9928 case GDB_SIGNAL_INT:
9929 if (!allsigs && !sigs[signum])
9931 if (query (_("%s is used by the debugger.\n\
9932 Are you sure you want to change it? "),
9933 gdb_signal_to_name ((enum gdb_signal) signum)))
9935 sigs[signum] = 1;
9937 else
9938 gdb_printf (_("Not confirmed, unchanged.\n"));
9940 break;
9941 case GDB_SIGNAL_0:
9942 case GDB_SIGNAL_DEFAULT:
9943 case GDB_SIGNAL_UNKNOWN:
9944 /* Make sure that "all" doesn't print these. */
9945 break;
9946 default:
9947 sigs[signum] = 1;
9948 break;
9953 for (int signum = 0; signum < nsigs; signum++)
9954 if (sigs[signum])
9956 signal_cache_update (-1);
9957 target_pass_signals (signal_pass);
9958 target_program_signals (signal_program);
9960 if (from_tty)
9962 /* Show the results. */
9963 sig_print_header ();
9964 for (; signum < nsigs; signum++)
9965 if (sigs[signum])
9966 sig_print_info ((enum gdb_signal) signum);
9969 break;
9973 /* Complete the "handle" command. */
9975 static void
9976 handle_completer (struct cmd_list_element *ignore,
9977 completion_tracker &tracker,
9978 const char *text, const char *word)
9980 static const char * const keywords[] =
9982 "all",
9983 "stop",
9984 "ignore",
9985 "print",
9986 "pass",
9987 "nostop",
9988 "noignore",
9989 "noprint",
9990 "nopass",
9991 nullptr,
9994 signal_completer (ignore, tracker, text, word);
9995 complete_on_enum (tracker, keywords, word, word);
9998 enum gdb_signal
9999 gdb_signal_from_command (int num)
10001 if (num >= 1 && num <= 15)
10002 return (enum gdb_signal) num;
10003 error (_("Only signals 1-15 are valid as numeric signals.\n\
10004 Use \"info signals\" for a list of symbolic signals."));
10007 /* Print current contents of the tables set by the handle command.
10008 It is possible we should just be printing signals actually used
10009 by the current target (but for things to work right when switching
10010 targets, all signals should be in the signal tables). */
10012 static void
10013 info_signals_command (const char *signum_exp, int from_tty)
10015 enum gdb_signal oursig;
10017 sig_print_header ();
10019 if (signum_exp)
10021 /* First see if this is a symbol name. */
10022 oursig = gdb_signal_from_name (signum_exp);
10023 if (oursig == GDB_SIGNAL_UNKNOWN)
10025 /* No, try numeric. */
10026 oursig =
10027 gdb_signal_from_command (parse_and_eval_long (signum_exp));
10029 sig_print_info (oursig);
10030 return;
10033 gdb_printf ("\n");
10034 /* These ugly casts brought to you by the native VAX compiler. */
10035 for (oursig = GDB_SIGNAL_FIRST;
10036 (int) oursig < (int) GDB_SIGNAL_LAST;
10037 oursig = (enum gdb_signal) ((int) oursig + 1))
10039 QUIT;
10041 if (oursig != GDB_SIGNAL_UNKNOWN
10042 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
10043 sig_print_info (oursig);
10046 gdb_printf (_("\nUse the \"%ps\" command to change these tables.\n"),
10047 styled_string (command_style.style (), "handle"));
10050 /* The $_siginfo convenience variable is a bit special. We don't know
10051 for sure the type of the value until we actually have a chance to
10052 fetch the data. The type can change depending on gdbarch, so it is
10053 also dependent on which thread you have selected.
10055 1. making $_siginfo be an internalvar that creates a new value on
10056 access.
10058 2. making the value of $_siginfo be an lval_computed value. */
10060 /* This function implements the lval_computed support for reading a
10061 $_siginfo value. */
10063 static void
10064 siginfo_value_read (struct value *v)
10066 LONGEST transferred;
10068 /* If we can access registers, so can we access $_siginfo. Likewise
10069 vice versa. */
10070 validate_registers_access ();
10072 transferred =
10073 target_read (current_inferior ()->top_target (),
10074 TARGET_OBJECT_SIGNAL_INFO,
10075 nullptr,
10076 v->contents_all_raw ().data (),
10077 v->offset (),
10078 v->type ()->length ());
10080 if (transferred != v->type ()->length ())
10081 error (_("Unable to read siginfo"));
10084 /* This function implements the lval_computed support for writing a
10085 $_siginfo value. */
10087 static void
10088 siginfo_value_write (struct value *v, struct value *fromval)
10090 LONGEST transferred;
10092 /* If we can access registers, so can we access $_siginfo. Likewise
10093 vice versa. */
10094 validate_registers_access ();
10096 transferred = target_write (current_inferior ()->top_target (),
10097 TARGET_OBJECT_SIGNAL_INFO,
10098 nullptr,
10099 fromval->contents_all_raw ().data (),
10100 v->offset (),
10101 fromval->type ()->length ());
10103 if (transferred != fromval->type ()->length ())
10104 error (_("Unable to write siginfo"));
10107 static const struct lval_funcs siginfo_value_funcs =
10109 siginfo_value_read,
10110 siginfo_value_write
10113 /* Return a new value with the correct type for the siginfo object of
10114 the current thread using architecture GDBARCH. Return a void value
10115 if there's no object available. */
10117 static struct value *
10118 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
10119 void *ignore)
10121 if (target_has_stack ()
10122 && inferior_ptid != null_ptid
10123 && gdbarch_get_siginfo_type_p (gdbarch))
10125 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10127 return value::allocate_computed (type, &siginfo_value_funcs, nullptr);
10130 return value::allocate (builtin_type (gdbarch)->builtin_void);
10134 /* infcall_suspend_state contains state about the program itself like its
10135 registers and any signal it received when it last stopped.
10136 This state must be restored regardless of how the inferior function call
10137 ends (either successfully, or after it hits a breakpoint or signal)
10138 if the program is to properly continue where it left off. */
10140 class infcall_suspend_state
10142 public:
10143 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
10144 once the inferior function call has finished. */
10145 infcall_suspend_state (struct gdbarch *gdbarch,
10146 const struct thread_info *tp,
10147 struct regcache *regcache)
10148 : m_registers (new readonly_detached_regcache (*regcache))
10150 tp->save_suspend_to (m_thread_suspend);
10152 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
10154 if (gdbarch_get_siginfo_type_p (gdbarch))
10156 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10157 size_t len = type->length ();
10159 siginfo_data.reset ((gdb_byte *) xmalloc (len));
10161 if (target_read (current_inferior ()->top_target (),
10162 TARGET_OBJECT_SIGNAL_INFO, nullptr,
10163 siginfo_data.get (), 0, len) != len)
10165 /* Errors ignored. */
10166 siginfo_data.reset (nullptr);
10170 if (siginfo_data)
10172 m_siginfo_gdbarch = gdbarch;
10173 m_siginfo_data = std::move (siginfo_data);
10177 /* Return a pointer to the stored register state. */
10179 readonly_detached_regcache *registers () const
10181 return m_registers.get ();
10184 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
10186 void restore (struct gdbarch *gdbarch,
10187 struct thread_info *tp,
10188 struct regcache *regcache) const
10190 tp->restore_suspend_from (m_thread_suspend);
10192 if (m_siginfo_gdbarch == gdbarch)
10194 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10196 /* Errors ignored. */
10197 target_write (current_inferior ()->top_target (),
10198 TARGET_OBJECT_SIGNAL_INFO, nullptr,
10199 m_siginfo_data.get (), 0, type->length ());
10202 /* The inferior can be gone if the user types "print exit(0)"
10203 (and perhaps other times). */
10204 if (target_has_execution ())
10205 /* NB: The register write goes through to the target. */
10206 regcache->restore (registers ());
10209 private:
10210 /* How the current thread stopped before the inferior function call was
10211 executed. */
10212 struct thread_suspend_state m_thread_suspend;
10214 /* The registers before the inferior function call was executed. */
10215 std::unique_ptr<readonly_detached_regcache> m_registers;
10217 /* Format of SIGINFO_DATA or NULL if it is not present. */
10218 struct gdbarch *m_siginfo_gdbarch = nullptr;
10220 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
10221 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
10222 content would be invalid. */
10223 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
10226 infcall_suspend_state_up
10227 save_infcall_suspend_state ()
10229 struct thread_info *tp = inferior_thread ();
10230 regcache *regcache = get_thread_regcache (tp);
10231 struct gdbarch *gdbarch = regcache->arch ();
10233 infcall_suspend_state_up inf_state
10234 (new struct infcall_suspend_state (gdbarch, tp, regcache));
10236 /* Having saved the current state, adjust the thread state, discarding
10237 any stop signal information. The stop signal is not useful when
10238 starting an inferior function call, and run_inferior_call will not use
10239 the signal due to its `proceed' call with GDB_SIGNAL_0. */
10240 tp->set_stop_signal (GDB_SIGNAL_0);
10242 return inf_state;
10245 /* Restore inferior session state to INF_STATE. */
10247 void
10248 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
10250 struct thread_info *tp = inferior_thread ();
10251 regcache *regcache = get_thread_regcache (inferior_thread ());
10252 struct gdbarch *gdbarch = regcache->arch ();
10254 inf_state->restore (gdbarch, tp, regcache);
10255 discard_infcall_suspend_state (inf_state);
10258 void
10259 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
10261 delete inf_state;
10264 readonly_detached_regcache *
10265 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
10267 return inf_state->registers ();
10270 /* infcall_control_state contains state regarding gdb's control of the
10271 inferior itself like stepping control. It also contains session state like
10272 the user's currently selected frame. */
10274 struct infcall_control_state
10276 struct thread_control_state thread_control;
10277 struct inferior_control_state inferior_control;
10279 /* Other fields: */
10280 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
10281 int stopped_by_random_signal = 0;
10283 /* ID and level of the selected frame when the inferior function
10284 call was made. */
10285 struct frame_id selected_frame_id {};
10286 int selected_frame_level = -1;
10289 /* Save all of the information associated with the inferior<==>gdb
10290 connection. */
10292 infcall_control_state_up
10293 save_infcall_control_state ()
10295 infcall_control_state_up inf_status (new struct infcall_control_state);
10296 struct thread_info *tp = inferior_thread ();
10297 struct inferior *inf = current_inferior ();
10299 inf_status->thread_control = tp->control;
10300 inf_status->inferior_control = inf->control;
10302 tp->control.step_resume_breakpoint = nullptr;
10303 tp->control.exception_resume_breakpoint = nullptr;
10305 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
10306 chain. If caller's caller is walking the chain, they'll be happier if we
10307 hand them back the original chain when restore_infcall_control_state is
10308 called. */
10309 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
10311 /* Other fields: */
10312 inf_status->stop_stack_dummy = stop_stack_dummy;
10313 inf_status->stopped_by_random_signal = stopped_by_random_signal;
10315 save_selected_frame (&inf_status->selected_frame_id,
10316 &inf_status->selected_frame_level);
10318 return inf_status;
10321 /* Restore inferior session state to INF_STATUS. */
10323 void
10324 restore_infcall_control_state (struct infcall_control_state *inf_status)
10326 struct thread_info *tp = inferior_thread ();
10327 struct inferior *inf = current_inferior ();
10329 if (tp->control.step_resume_breakpoint)
10330 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
10332 if (tp->control.exception_resume_breakpoint)
10333 tp->control.exception_resume_breakpoint->disposition
10334 = disp_del_at_next_stop;
10336 /* Handle the bpstat_copy of the chain. */
10337 bpstat_clear (&tp->control.stop_bpstat);
10339 tp->control = inf_status->thread_control;
10340 inf->control = inf_status->inferior_control;
10342 /* Other fields: */
10343 stop_stack_dummy = inf_status->stop_stack_dummy;
10344 stopped_by_random_signal = inf_status->stopped_by_random_signal;
10346 if (target_has_stack ())
10348 restore_selected_frame (inf_status->selected_frame_id,
10349 inf_status->selected_frame_level);
10352 delete inf_status;
10355 void
10356 discard_infcall_control_state (struct infcall_control_state *inf_status)
10358 if (inf_status->thread_control.step_resume_breakpoint)
10359 inf_status->thread_control.step_resume_breakpoint->disposition
10360 = disp_del_at_next_stop;
10362 if (inf_status->thread_control.exception_resume_breakpoint)
10363 inf_status->thread_control.exception_resume_breakpoint->disposition
10364 = disp_del_at_next_stop;
10366 /* See save_infcall_control_state for info on stop_bpstat. */
10367 bpstat_clear (&inf_status->thread_control.stop_bpstat);
10369 delete inf_status;
10372 /* See infrun.h. */
10374 void
10375 clear_exit_convenience_vars (void)
10377 clear_internalvar (lookup_internalvar ("_exitsignal"));
10378 clear_internalvar (lookup_internalvar ("_exitcode"));
10382 /* User interface for reverse debugging:
10383 Set exec-direction / show exec-direction commands
10384 (returns error unless target implements to_set_exec_direction method). */
10386 enum exec_direction_kind execution_direction = EXEC_FORWARD;
10387 static const char exec_forward[] = "forward";
10388 static const char exec_reverse[] = "reverse";
10389 static const char *exec_direction = exec_forward;
10390 static const char *const exec_direction_names[] = {
10391 exec_forward,
10392 exec_reverse,
10393 nullptr
10396 static void
10397 set_exec_direction_func (const char *args, int from_tty,
10398 struct cmd_list_element *cmd)
10400 if (target_can_execute_reverse ())
10402 if (!strcmp (exec_direction, exec_forward))
10403 execution_direction = EXEC_FORWARD;
10404 else if (!strcmp (exec_direction, exec_reverse))
10405 execution_direction = EXEC_REVERSE;
10407 else
10409 exec_direction = exec_forward;
10410 error (_("Target does not support this operation."));
10414 static void
10415 show_exec_direction_func (struct ui_file *out, int from_tty,
10416 struct cmd_list_element *cmd, const char *value)
10418 switch (execution_direction) {
10419 case EXEC_FORWARD:
10420 gdb_printf (out, _("Forward.\n"));
10421 break;
10422 case EXEC_REVERSE:
10423 gdb_printf (out, _("Reverse.\n"));
10424 break;
10425 default:
10426 internal_error (_("bogus execution_direction value: %d"),
10427 (int) execution_direction);
10431 static void
10432 show_schedule_multiple (struct ui_file *file, int from_tty,
10433 struct cmd_list_element *c, const char *value)
10435 gdb_printf (file, _("Resuming the execution of threads "
10436 "of all processes is %s.\n"), value);
10439 /* Implementation of `siginfo' variable. */
10441 static const struct internalvar_funcs siginfo_funcs =
10443 siginfo_make_value,
10444 nullptr,
10447 /* Callback for infrun's target events source. This is marked when a
10448 thread has a pending status to process. */
10450 static void
10451 infrun_async_inferior_event_handler (gdb_client_data data)
10453 clear_async_event_handler (infrun_async_inferior_event_token);
10454 inferior_event_handler (INF_REG_EVENT);
10457 #if GDB_SELF_TEST
10458 namespace selftests
10461 /* Verify that when two threads with the same ptid exist (from two different
10462 targets) and one of them changes ptid, we only update inferior_ptid if
10463 it is appropriate. */
10465 static void
10466 infrun_thread_ptid_changed ()
10468 gdbarch *arch = current_inferior ()->arch ();
10470 /* The thread which inferior_ptid represents changes ptid. */
10472 scoped_restore_current_pspace_and_thread restore;
10474 scoped_mock_context<test_target_ops> target1 (arch);
10475 scoped_mock_context<test_target_ops> target2 (arch);
10477 ptid_t old_ptid (111, 222);
10478 ptid_t new_ptid (111, 333);
10480 target1.mock_inferior.pid = old_ptid.pid ();
10481 target1.mock_thread.ptid = old_ptid;
10482 target1.mock_inferior.ptid_thread_map.clear ();
10483 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10485 target2.mock_inferior.pid = old_ptid.pid ();
10486 target2.mock_thread.ptid = old_ptid;
10487 target2.mock_inferior.ptid_thread_map.clear ();
10488 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
10490 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10491 set_current_inferior (&target1.mock_inferior);
10493 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10495 gdb_assert (inferior_ptid == new_ptid);
10498 /* A thread with the same ptid as inferior_ptid, but from another target,
10499 changes ptid. */
10501 scoped_restore_current_pspace_and_thread restore;
10503 scoped_mock_context<test_target_ops> target1 (arch);
10504 scoped_mock_context<test_target_ops> target2 (arch);
10506 ptid_t old_ptid (111, 222);
10507 ptid_t new_ptid (111, 333);
10509 target1.mock_inferior.pid = old_ptid.pid ();
10510 target1.mock_thread.ptid = old_ptid;
10511 target1.mock_inferior.ptid_thread_map.clear ();
10512 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10514 target2.mock_inferior.pid = old_ptid.pid ();
10515 target2.mock_thread.ptid = old_ptid;
10516 target2.mock_inferior.ptid_thread_map.clear ();
10517 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
10519 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10520 set_current_inferior (&target2.mock_inferior);
10522 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10524 gdb_assert (inferior_ptid == old_ptid);
10528 } /* namespace selftests */
10530 #endif /* GDB_SELF_TEST */
10532 void _initialize_infrun ();
10533 void
10534 _initialize_infrun ()
10536 struct cmd_list_element *c;
10538 /* Register extra event sources in the event loop. */
10539 infrun_async_inferior_event_token
10540 = create_async_event_handler (infrun_async_inferior_event_handler, nullptr,
10541 "infrun");
10543 cmd_list_element *info_signals_cmd
10544 = add_info ("signals", info_signals_command, _("\
10545 What debugger does when program gets various signals.\n\
10546 Specify a signal as argument to print info on that signal only."));
10547 add_info_alias ("handle", info_signals_cmd, 0);
10549 c = add_com ("handle", class_run, handle_command, _("\
10550 Specify how to handle signals.\n\
10551 Usage: handle SIGNAL [ACTIONS]\n\
10552 Args are signals and actions to apply to those signals.\n\
10553 If no actions are specified, the current settings for the specified signals\n\
10554 will be displayed instead.\n\
10556 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
10557 from 1-15 are allowed for compatibility with old versions of GDB.\n\
10558 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
10559 The special arg \"all\" is recognized to mean all signals except those\n\
10560 used by the debugger, typically SIGTRAP and SIGINT.\n\
10562 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
10563 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
10564 Stop means reenter debugger if this signal happens (implies print).\n\
10565 Print means print a message if this signal happens.\n\
10566 Pass means let program see this signal; otherwise program doesn't know.\n\
10567 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
10568 Pass and Stop may be combined.\n\
10570 Multiple signals may be specified. Signal numbers and signal names\n\
10571 may be interspersed with actions, with the actions being performed for\n\
10572 all signals cumulatively specified."));
10573 set_cmd_completer (c, handle_completer);
10575 stop_command = add_cmd ("stop", class_obscure,
10576 not_just_help_class_command, _("\
10577 There is no `stop' command, but you can set a hook on `stop'.\n\
10578 This allows you to set a list of commands to be run each time execution\n\
10579 of the program stops."), &cmdlist);
10581 add_setshow_boolean_cmd
10582 ("infrun", class_maintenance, &debug_infrun,
10583 _("Set inferior debugging."),
10584 _("Show inferior debugging."),
10585 _("When non-zero, inferior specific debugging is enabled."),
10586 nullptr, show_debug_infrun, &setdebuglist, &showdebuglist);
10588 add_setshow_boolean_cmd ("non-stop", no_class,
10589 &non_stop_1, _("\
10590 Set whether gdb controls the inferior in non-stop mode."), _("\
10591 Show whether gdb controls the inferior in non-stop mode."), _("\
10592 When debugging a multi-threaded program and this setting is\n\
10593 off (the default, also called all-stop mode), when one thread stops\n\
10594 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
10595 all other threads in the program while you interact with the thread of\n\
10596 interest. When you continue or step a thread, you can allow the other\n\
10597 threads to run, or have them remain stopped, but while you inspect any\n\
10598 thread's state, all threads stop.\n\
10600 In non-stop mode, when one thread stops, other threads can continue\n\
10601 to run freely. You'll be able to step each thread independently,\n\
10602 leave it stopped or free to run as needed."),
10603 set_non_stop,
10604 show_non_stop,
10605 &setlist,
10606 &showlist);
10608 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
10610 signal_stop[i] = 1;
10611 signal_print[i] = 1;
10612 signal_program[i] = 1;
10613 signal_catch[i] = 0;
10616 /* Signals caused by debugger's own actions should not be given to
10617 the program afterwards.
10619 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
10620 explicitly specifies that it should be delivered to the target
10621 program. Typically, that would occur when a user is debugging a
10622 target monitor on a simulator: the target monitor sets a
10623 breakpoint; the simulator encounters this breakpoint and halts
10624 the simulation handing control to GDB; GDB, noting that the stop
10625 address doesn't map to any known breakpoint, returns control back
10626 to the simulator; the simulator then delivers the hardware
10627 equivalent of a GDB_SIGNAL_TRAP to the program being
10628 debugged. */
10629 signal_program[GDB_SIGNAL_TRAP] = 0;
10630 signal_program[GDB_SIGNAL_INT] = 0;
10632 /* Signals that are not errors should not normally enter the debugger. */
10633 signal_stop[GDB_SIGNAL_ALRM] = 0;
10634 signal_print[GDB_SIGNAL_ALRM] = 0;
10635 signal_stop[GDB_SIGNAL_VTALRM] = 0;
10636 signal_print[GDB_SIGNAL_VTALRM] = 0;
10637 signal_stop[GDB_SIGNAL_PROF] = 0;
10638 signal_print[GDB_SIGNAL_PROF] = 0;
10639 signal_stop[GDB_SIGNAL_CHLD] = 0;
10640 signal_print[GDB_SIGNAL_CHLD] = 0;
10641 signal_stop[GDB_SIGNAL_IO] = 0;
10642 signal_print[GDB_SIGNAL_IO] = 0;
10643 signal_stop[GDB_SIGNAL_POLL] = 0;
10644 signal_print[GDB_SIGNAL_POLL] = 0;
10645 signal_stop[GDB_SIGNAL_URG] = 0;
10646 signal_print[GDB_SIGNAL_URG] = 0;
10647 signal_stop[GDB_SIGNAL_WINCH] = 0;
10648 signal_print[GDB_SIGNAL_WINCH] = 0;
10649 signal_stop[GDB_SIGNAL_PRIO] = 0;
10650 signal_print[GDB_SIGNAL_PRIO] = 0;
10652 /* These signals are used internally by user-level thread
10653 implementations. (See signal(5) on Solaris.) Like the above
10654 signals, a healthy program receives and handles them as part of
10655 its normal operation. */
10656 signal_stop[GDB_SIGNAL_LWP] = 0;
10657 signal_print[GDB_SIGNAL_LWP] = 0;
10658 signal_stop[GDB_SIGNAL_WAITING] = 0;
10659 signal_print[GDB_SIGNAL_WAITING] = 0;
10660 signal_stop[GDB_SIGNAL_CANCEL] = 0;
10661 signal_print[GDB_SIGNAL_CANCEL] = 0;
10662 signal_stop[GDB_SIGNAL_LIBRT] = 0;
10663 signal_print[GDB_SIGNAL_LIBRT] = 0;
10665 /* Update cached state. */
10666 signal_cache_update (-1);
10668 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
10669 &stop_on_solib_events, _("\
10670 Set stopping for shared library events."), _("\
10671 Show stopping for shared library events."), _("\
10672 If nonzero, gdb will give control to the user when the dynamic linker\n\
10673 notifies gdb of shared library events. The most common event of interest\n\
10674 to the user would be loading/unloading of a new library."),
10675 set_stop_on_solib_events,
10676 show_stop_on_solib_events,
10677 &setlist, &showlist);
10679 add_setshow_enum_cmd ("follow-fork-mode", class_run,
10680 follow_fork_mode_kind_names,
10681 &follow_fork_mode_string, _("\
10682 Set debugger response to a program call of fork or vfork."), _("\
10683 Show debugger response to a program call of fork or vfork."), _("\
10684 A fork or vfork creates a new process. follow-fork-mode can be:\n\
10685 parent - the original process is debugged after a fork\n\
10686 child - the new process is debugged after a fork\n\
10687 The unfollowed process will continue to run.\n\
10688 By default, the debugger will follow the parent process."),
10689 nullptr,
10690 show_follow_fork_mode_string,
10691 &setlist, &showlist);
10693 add_setshow_enum_cmd ("follow-exec-mode", class_run,
10694 follow_exec_mode_names,
10695 &follow_exec_mode_string, _("\
10696 Set debugger response to a program call of exec."), _("\
10697 Show debugger response to a program call of exec."), _("\
10698 An exec call replaces the program image of a process.\n\
10700 follow-exec-mode can be:\n\
10702 new - the debugger creates a new inferior and rebinds the process\n\
10703 to this new inferior. The program the process was running before\n\
10704 the exec call can be restarted afterwards by restarting the original\n\
10705 inferior.\n\
10707 same - the debugger keeps the process bound to the same inferior.\n\
10708 The new executable image replaces the previous executable loaded in\n\
10709 the inferior. Restarting the inferior after the exec call restarts\n\
10710 the executable the process was running after the exec call.\n\
10712 By default, the debugger will use the same inferior."),
10713 nullptr,
10714 show_follow_exec_mode_string,
10715 &setlist, &showlist);
10717 add_setshow_enum_cmd ("scheduler-locking", class_run,
10718 scheduler_enums, &scheduler_mode, _("\
10719 Set mode for locking scheduler during execution."), _("\
10720 Show mode for locking scheduler during execution."), _("\
10721 off == no locking (threads may preempt at any time)\n\
10722 on == full locking (no thread except the current thread may run)\n\
10723 This applies to both normal execution and replay mode.\n\
10724 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
10725 In this mode, other threads may run during other commands.\n\
10726 This applies to both normal execution and replay mode.\n\
10727 replay == scheduler locked in replay mode and unlocked during normal execution."),
10728 set_schedlock_func, /* traps on target vector */
10729 show_scheduler_mode,
10730 &setlist, &showlist);
10732 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
10733 Set mode for resuming threads of all processes."), _("\
10734 Show mode for resuming threads of all processes."), _("\
10735 When on, execution commands (such as 'continue' or 'next') resume all\n\
10736 threads of all processes. When off (which is the default), execution\n\
10737 commands only resume the threads of the current process. The set of\n\
10738 threads that are resumed is further refined by the scheduler-locking\n\
10739 mode (see help set scheduler-locking)."),
10740 nullptr,
10741 show_schedule_multiple,
10742 &setlist, &showlist);
10744 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
10745 Set mode of the step operation."), _("\
10746 Show mode of the step operation."), _("\
10747 When set, doing a step over a function without debug line information\n\
10748 will stop at the first instruction of that function. Otherwise, the\n\
10749 function is skipped and the step command stops at a different source line."),
10750 nullptr,
10751 show_step_stop_if_no_debug,
10752 &setlist, &showlist);
10754 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
10755 &can_use_displaced_stepping, _("\
10756 Set debugger's willingness to use displaced stepping."), _("\
10757 Show debugger's willingness to use displaced stepping."), _("\
10758 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
10759 supported by the target architecture. If off, gdb will not use displaced\n\
10760 stepping to step over breakpoints, even if such is supported by the target\n\
10761 architecture. If auto (which is the default), gdb will use displaced stepping\n\
10762 if the target architecture supports it and non-stop mode is active, but will not\n\
10763 use it in all-stop mode (see help set non-stop)."),
10764 nullptr,
10765 show_can_use_displaced_stepping,
10766 &setlist, &showlist);
10768 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
10769 &exec_direction, _("Set direction of execution.\n\
10770 Options are 'forward' or 'reverse'."),
10771 _("Show direction of execution (forward/reverse)."),
10772 _("Tells gdb whether to execute forward or backward."),
10773 set_exec_direction_func, show_exec_direction_func,
10774 &setlist, &showlist);
10776 /* Set/show detach-on-fork: user-settable mode. */
10778 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
10779 Set whether gdb will detach the child of a fork."), _("\
10780 Show whether gdb will detach the child of a fork."), _("\
10781 Tells gdb whether to detach the child of a fork."),
10782 nullptr, nullptr, &setlist, &showlist);
10784 /* Set/show disable address space randomization mode. */
10786 add_setshow_boolean_cmd ("disable-randomization", class_support,
10787 &disable_randomization, _("\
10788 Set disabling of debuggee's virtual address space randomization."), _("\
10789 Show disabling of debuggee's virtual address space randomization."), _("\
10790 When this mode is on (which is the default), randomization of the virtual\n\
10791 address space is disabled. Standalone programs run with the randomization\n\
10792 enabled by default on some platforms."),
10793 &set_disable_randomization,
10794 &show_disable_randomization,
10795 &setlist, &showlist);
10797 /* ptid initializations */
10798 inferior_ptid = null_ptid;
10799 target_last_wait_ptid = minus_one_ptid;
10801 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
10802 "infrun");
10803 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
10804 "infrun");
10805 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
10806 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
10808 /* Explicitly create without lookup, since that tries to create a
10809 value with a void typed value, and when we get here, gdbarch
10810 isn't initialized yet. At this point, we're quite sure there
10811 isn't another convenience variable of the same name. */
10812 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, nullptr);
10814 add_setshow_boolean_cmd ("observer", no_class,
10815 &observer_mode_1, _("\
10816 Set whether gdb controls the inferior in observer mode."), _("\
10817 Show whether gdb controls the inferior in observer mode."), _("\
10818 In observer mode, GDB can get data from the inferior, but not\n\
10819 affect its execution. Registers and memory may not be changed,\n\
10820 breakpoints may not be set, and the program cannot be interrupted\n\
10821 or signalled."),
10822 set_observer_mode,
10823 show_observer_mode,
10824 &setlist,
10825 &showlist);
10827 #if GDB_SELF_TEST
10828 selftests::register_test ("infrun_thread_ptid_changed",
10829 selftests::infrun_thread_ptid_changed);
10830 #endif