Automatic date update in version.in
[binutils-gdb.git] / gdb / record-btrace.c
blob49cbf13ad4deeacae81b96f9fa87307ea06dee59
1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2024 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "record.h"
23 #include "record-btrace.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "cli/cli-cmds.h"
27 #include "disasm.h"
28 #include "observable.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "top.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "gdbsupport/event-loop.h"
40 #include "inf-loop.h"
41 #include "inferior.h"
42 #include <algorithm>
43 #include "gdbarch.h"
44 #include "cli/cli-style.h"
45 #include "async-event.h"
46 #include <forward_list>
47 #include "objfiles.h"
48 #include "interps.h"
50 static const target_info record_btrace_target_info = {
51 "record-btrace",
52 N_("Branch tracing target"),
53 N_("Collect control-flow trace and provide the execution history.")
56 /* The target_ops of record-btrace. */
58 class record_btrace_target final : public target_ops
60 public:
61 const target_info &info () const override
62 { return record_btrace_target_info; }
64 strata stratum () const override { return record_stratum; }
66 void close () override;
67 void async (bool) override;
69 void detach (inferior *inf, int from_tty) override
70 { record_detach (this, inf, from_tty); }
72 void disconnect (const char *, int) override;
74 void mourn_inferior () override
75 { record_mourn_inferior (this); }
77 void kill () override
78 { record_kill (this); }
80 enum record_method record_method (ptid_t ptid) override;
82 void stop_recording () override;
83 void info_record () override;
85 void insn_history (int size, gdb_disassembly_flags flags) override;
86 void insn_history_from (ULONGEST from, int size,
87 gdb_disassembly_flags flags) override;
88 void insn_history_range (ULONGEST begin, ULONGEST end,
89 gdb_disassembly_flags flags) override;
90 void call_history (int size, record_print_flags flags) override;
91 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
92 override;
93 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
94 override;
96 bool record_is_replaying (ptid_t ptid) override;
97 bool record_will_replay (ptid_t ptid, int dir) override;
98 void record_stop_replaying () override;
100 enum target_xfer_status xfer_partial (enum target_object object,
101 const char *annex,
102 gdb_byte *readbuf,
103 const gdb_byte *writebuf,
104 ULONGEST offset, ULONGEST len,
105 ULONGEST *xfered_len) override;
107 int insert_breakpoint (struct gdbarch *,
108 struct bp_target_info *) override;
109 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
110 enum remove_bp_reason) override;
112 void fetch_registers (struct regcache *, int) override;
114 void store_registers (struct regcache *, int) override;
115 void prepare_to_store (struct regcache *) override;
117 const struct frame_unwind *get_unwinder () override;
119 const struct frame_unwind *get_tailcall_unwinder () override;
121 void resume (ptid_t, int, enum gdb_signal) override;
122 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
124 void stop (ptid_t) override;
125 void update_thread_list () override;
126 bool thread_alive (ptid_t ptid) override;
127 void goto_record_begin () override;
128 void goto_record_end () override;
129 void goto_record (ULONGEST insn) override;
131 bool can_execute_reverse () override;
133 bool stopped_by_sw_breakpoint () override;
134 bool supports_stopped_by_sw_breakpoint () override;
136 bool stopped_by_hw_breakpoint () override;
137 bool supports_stopped_by_hw_breakpoint () override;
139 enum exec_direction_kind execution_direction () override;
140 void prepare_to_generate_core () override;
141 void done_generating_core () override;
144 static record_btrace_target record_btrace_ops;
146 /* Initialize the record-btrace target ops. */
148 /* Token associated with a new-thread observer enabling branch tracing
149 for the new thread. */
150 static const gdb::observers::token record_btrace_thread_observer_token {};
152 /* Memory access types used in set/show record btrace replay-memory-access. */
153 static const char replay_memory_access_read_only[] = "read-only";
154 static const char replay_memory_access_read_write[] = "read-write";
155 static const char *const replay_memory_access_types[] =
157 replay_memory_access_read_only,
158 replay_memory_access_read_write,
159 NULL
162 /* The currently allowed replay memory access type. */
163 static const char *replay_memory_access = replay_memory_access_read_only;
165 /* The cpu state kinds. */
166 enum record_btrace_cpu_state_kind
168 CS_AUTO,
169 CS_NONE,
170 CS_CPU
173 /* The current cpu state. */
174 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
176 /* The current cpu for trace decode. */
177 static struct btrace_cpu record_btrace_cpu;
179 /* Command lists for "set/show record btrace". */
180 static struct cmd_list_element *set_record_btrace_cmdlist;
181 static struct cmd_list_element *show_record_btrace_cmdlist;
183 /* The execution direction of the last resume we got. See record-full.c. */
184 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
186 /* The async event handler for reverse/replay execution. */
187 static struct async_event_handler *record_btrace_async_inferior_event_handler;
189 /* A flag indicating that we are currently generating a core file. */
190 static int record_btrace_generating_corefile;
192 /* The current branch trace configuration. */
193 static struct btrace_config record_btrace_conf;
195 /* Command list for "record btrace". */
196 static struct cmd_list_element *record_btrace_cmdlist;
198 /* Command lists for "set/show record btrace bts". */
199 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
200 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
202 /* Command lists for "set/show record btrace pt". */
203 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
204 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
206 /* Command list for "set record btrace cpu". */
207 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
209 /* Print a record-btrace debug message. Use do ... while (0) to avoid
210 ambiguities when used in if statements. */
212 #define DEBUG(msg, args...) \
213 do \
215 if (record_debug != 0) \
216 gdb_printf (gdb_stdlog, \
217 "[record-btrace] " msg "\n", ##args); \
219 while (0)
222 /* Return the cpu configured by the user. Returns NULL if the cpu was
223 configured as auto. */
224 const struct btrace_cpu *
225 record_btrace_get_cpu (void)
227 switch (record_btrace_cpu_state)
229 case CS_AUTO:
230 return nullptr;
232 case CS_NONE:
233 record_btrace_cpu.vendor = CV_UNKNOWN;
234 [[fallthrough]];
235 case CS_CPU:
236 return &record_btrace_cpu;
239 error (_("Internal error: bad record btrace cpu state."));
242 /* Update the branch trace for the current thread and return a pointer to its
243 thread_info.
245 Throws an error if there is no thread or no trace. This function never
246 returns NULL. */
248 static struct thread_info *
249 require_btrace_thread (void)
251 DEBUG ("require");
253 if (inferior_ptid == null_ptid)
254 error (_("No thread."));
256 thread_info *tp = inferior_thread ();
258 validate_registers_access ();
260 btrace_fetch (tp, record_btrace_get_cpu ());
262 if (btrace_is_empty (tp))
263 error (_("No trace."));
265 return tp;
268 /* Update the branch trace for the current thread and return a pointer to its
269 branch trace information struct.
271 Throws an error if there is no thread or no trace. This function never
272 returns NULL. */
274 static struct btrace_thread_info *
275 require_btrace (void)
277 struct thread_info *tp;
279 tp = require_btrace_thread ();
281 return &tp->btrace;
284 /* The new thread observer. */
286 static void
287 record_btrace_on_new_thread (struct thread_info *tp)
289 /* Ignore this thread if its inferior is not recorded by us. */
290 target_ops *rec = tp->inf->target_at (record_stratum);
291 if (rec != &record_btrace_ops)
292 return;
296 btrace_enable (tp, &record_btrace_conf);
298 catch (const gdb_exception_error &error)
300 warning ("%s", error.what ());
304 /* Enable automatic tracing of new threads. */
306 static void
307 record_btrace_auto_enable (void)
309 DEBUG ("attach thread observer");
311 gdb::observers::new_thread.attach (record_btrace_on_new_thread,
312 record_btrace_thread_observer_token,
313 "record-btrace");
316 /* Disable automatic tracing of new threads. */
318 static void
319 record_btrace_auto_disable (void)
321 DEBUG ("detach thread observer");
323 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
326 /* The record-btrace async event handler function. */
328 static void
329 record_btrace_handle_async_inferior_event (gdb_client_data data)
331 inferior_event_handler (INF_REG_EVENT);
334 /* See record-btrace.h. */
336 void
337 record_btrace_push_target (void)
339 const char *format;
341 record_btrace_auto_enable ();
343 current_inferior ()->push_target (&record_btrace_ops);
345 record_btrace_async_inferior_event_handler
346 = create_async_event_handler (record_btrace_handle_async_inferior_event,
347 NULL, "record-btrace");
348 record_btrace_generating_corefile = 0;
350 format = btrace_format_short_string (record_btrace_conf.format);
351 interps_notify_record_changed (current_inferior (), 1, "btrace", format);
354 /* Disable btrace on a set of threads on scope exit. */
356 struct scoped_btrace_disable
358 scoped_btrace_disable () = default;
360 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
362 ~scoped_btrace_disable ()
364 for (thread_info *tp : m_threads)
365 btrace_disable (tp);
368 void add_thread (thread_info *thread)
370 m_threads.push_front (thread);
373 void discard ()
375 m_threads.clear ();
378 private:
379 std::forward_list<thread_info *> m_threads;
382 /* Open target record-btrace. */
384 static void
385 record_btrace_target_open (const char *args, int from_tty)
387 /* If we fail to enable btrace for one thread, disable it for the threads for
388 which it was successfully enabled. */
389 scoped_btrace_disable btrace_disable;
391 DEBUG ("open");
393 record_preopen ();
395 if (!target_has_execution ())
396 error (_("The program is not being run."));
398 for (thread_info *tp : current_inferior ()->non_exited_threads ())
399 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
401 btrace_enable (tp, &record_btrace_conf);
403 btrace_disable.add_thread (tp);
406 record_btrace_push_target ();
408 btrace_disable.discard ();
411 /* The stop_recording method of target record-btrace. */
413 void
414 record_btrace_target::stop_recording ()
416 DEBUG ("stop recording");
418 record_btrace_auto_disable ();
420 for (thread_info *tp : current_inferior ()->non_exited_threads ())
421 if (tp->btrace.target != NULL)
422 btrace_disable (tp);
425 /* The disconnect method of target record-btrace. */
427 void
428 record_btrace_target::disconnect (const char *args,
429 int from_tty)
431 struct target_ops *beneath = this->beneath ();
433 /* Do not stop recording, just clean up GDB side. */
434 current_inferior ()->unpush_target (this);
436 /* Forward disconnect. */
437 beneath->disconnect (args, from_tty);
440 /* The close method of target record-btrace. */
442 void
443 record_btrace_target::close ()
445 if (record_btrace_async_inferior_event_handler != NULL)
446 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
448 /* Make sure automatic recording gets disabled even if we did not stop
449 recording before closing the record-btrace target. */
450 record_btrace_auto_disable ();
452 /* We should have already stopped recording.
453 Tear down btrace in case we have not. */
454 for (thread_info *tp : current_inferior ()->non_exited_threads ())
455 btrace_teardown (tp);
458 /* The async method of target record-btrace. */
460 void
461 record_btrace_target::async (bool enable)
463 if (enable)
464 mark_async_event_handler (record_btrace_async_inferior_event_handler);
465 else
466 clear_async_event_handler (record_btrace_async_inferior_event_handler);
468 this->beneath ()->async (enable);
471 /* Adjusts the size and returns a human readable size suffix. */
473 static const char *
474 record_btrace_adjust_size (unsigned int *size)
476 unsigned int sz;
478 sz = *size;
480 if ((sz & ((1u << 30) - 1)) == 0)
482 *size = sz >> 30;
483 return "GB";
485 else if ((sz & ((1u << 20) - 1)) == 0)
487 *size = sz >> 20;
488 return "MB";
490 else if ((sz & ((1u << 10) - 1)) == 0)
492 *size = sz >> 10;
493 return "kB";
495 else
496 return "";
499 /* Print a BTS configuration. */
501 static void
502 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
504 const char *suffix;
505 unsigned int size;
507 size = conf->size;
508 if (size > 0)
510 suffix = record_btrace_adjust_size (&size);
511 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
515 /* Print an Intel Processor Trace configuration. */
517 static void
518 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
520 const char *suffix;
521 unsigned int size;
523 size = conf->size;
524 if (size > 0)
526 suffix = record_btrace_adjust_size (&size);
527 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
531 /* Print a branch tracing configuration. */
533 static void
534 record_btrace_print_conf (const struct btrace_config *conf)
536 gdb_printf (_("Recording format: %s.\n"),
537 btrace_format_string (conf->format));
539 switch (conf->format)
541 case BTRACE_FORMAT_NONE:
542 return;
544 case BTRACE_FORMAT_BTS:
545 record_btrace_print_bts_conf (&conf->bts);
546 return;
548 case BTRACE_FORMAT_PT:
549 record_btrace_print_pt_conf (&conf->pt);
550 return;
553 internal_error (_("Unknown branch trace format."));
556 /* The info_record method of target record-btrace. */
558 void
559 record_btrace_target::info_record ()
561 struct btrace_thread_info *btinfo;
562 const struct btrace_config *conf;
563 struct thread_info *tp;
564 unsigned int insns, calls, gaps;
566 DEBUG ("info");
568 if (inferior_ptid == null_ptid)
569 error (_("No thread."));
571 tp = inferior_thread ();
573 validate_registers_access ();
575 btinfo = &tp->btrace;
577 conf = ::btrace_conf (btinfo);
578 if (conf != NULL)
579 record_btrace_print_conf (conf);
581 btrace_fetch (tp, record_btrace_get_cpu ());
583 insns = 0;
584 calls = 0;
585 gaps = 0;
587 if (!btrace_is_empty (tp))
589 struct btrace_call_iterator call;
590 struct btrace_insn_iterator insn;
592 btrace_call_end (&call, btinfo);
593 btrace_call_prev (&call, 1);
594 calls = btrace_call_number (&call);
596 btrace_insn_end (&insn, btinfo);
597 insns = btrace_insn_number (&insn);
599 /* If the last instruction is not a gap, it is the current instruction
600 that is not actually part of the record. */
601 if (btrace_insn_get (&insn) != NULL)
602 insns -= 1;
604 gaps = btinfo->ngaps;
607 gdb_printf (_("Recorded %u instructions in %u functions (%u gaps) "
608 "for thread %s (%s).\n"), insns, calls, gaps,
609 print_thread_id (tp),
610 target_pid_to_str (tp->ptid).c_str ());
612 if (btrace_is_replaying (tp))
613 gdb_printf (_("Replay in progress. At instruction %u.\n"),
614 btrace_insn_number (btinfo->replay));
617 /* Print a decode error. */
619 static void
620 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
621 enum btrace_format format)
623 const char *errstr = btrace_decode_error (format, errcode);
625 uiout->text (_("["));
626 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
627 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
629 uiout->text (_("decode error ("));
630 uiout->field_signed ("errcode", errcode);
631 uiout->text (_("): "));
633 uiout->text (errstr);
634 uiout->text (_("]\n"));
637 /* A range of source lines. */
639 struct btrace_line_range
641 /* The symtab this line is from. */
642 struct symtab *symtab;
644 /* The first line (inclusive). */
645 int begin;
647 /* The last line (exclusive). */
648 int end;
651 /* Construct a line range. */
653 static struct btrace_line_range
654 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
656 struct btrace_line_range range;
658 range.symtab = symtab;
659 range.begin = begin;
660 range.end = end;
662 return range;
665 /* Add a line to a line range. */
667 static struct btrace_line_range
668 btrace_line_range_add (struct btrace_line_range range, int line)
670 if (range.end <= range.begin)
672 /* This is the first entry. */
673 range.begin = line;
674 range.end = line + 1;
676 else if (line < range.begin)
677 range.begin = line;
678 else if (range.end < line)
679 range.end = line;
681 return range;
684 /* Return non-zero if RANGE is empty, zero otherwise. */
686 static int
687 btrace_line_range_is_empty (struct btrace_line_range range)
689 return range.end <= range.begin;
692 /* Return non-zero if LHS contains RHS, zero otherwise. */
694 static int
695 btrace_line_range_contains_range (struct btrace_line_range lhs,
696 struct btrace_line_range rhs)
698 return ((lhs.symtab == rhs.symtab)
699 && (lhs.begin <= rhs.begin)
700 && (rhs.end <= lhs.end));
703 /* Find the line range associated with PC. */
705 static struct btrace_line_range
706 btrace_find_line_range (CORE_ADDR pc)
708 struct btrace_line_range range;
709 const linetable_entry *lines;
710 const linetable *ltable;
711 struct symtab *symtab;
712 int nlines, i;
714 symtab = find_pc_line_symtab (pc);
715 if (symtab == NULL)
716 return btrace_mk_line_range (NULL, 0, 0);
718 ltable = symtab->linetable ();
719 if (ltable == NULL)
720 return btrace_mk_line_range (symtab, 0, 0);
722 nlines = ltable->nitems;
723 lines = ltable->item;
724 if (nlines <= 0)
725 return btrace_mk_line_range (symtab, 0, 0);
727 struct objfile *objfile = symtab->compunit ()->objfile ();
728 unrelocated_addr unrel_pc
729 = unrelocated_addr (pc - objfile->text_section_offset ());
731 range = btrace_mk_line_range (symtab, 0, 0);
732 for (i = 0; i < nlines - 1; i++)
734 /* The test of is_stmt here was added when the is_stmt field was
735 introduced to the 'struct linetable_entry' structure. This
736 ensured that this loop maintained the same behaviour as before we
737 introduced is_stmt. That said, it might be that we would be
738 better off not checking is_stmt here, this would lead to us
739 possibly adding more line numbers to the range. At the time this
740 change was made I was unsure how to test this so chose to go with
741 maintaining the existing experience. */
742 if (lines[i].unrelocated_pc () == unrel_pc && lines[i].line != 0
743 && lines[i].is_stmt)
744 range = btrace_line_range_add (range, lines[i].line);
747 return range;
750 /* Print source lines in LINES to UIOUT.
752 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
753 instructions corresponding to that source line. When printing a new source
754 line, we do the cleanups for the open chain and open a new cleanup chain for
755 the new source line. If the source line range in LINES is not empty, this
756 function will leave the cleanup chain for the last printed source line open
757 so instructions can be added to it. */
759 static void
760 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
761 std::optional<ui_out_emit_tuple> *src_and_asm_tuple,
762 std::optional<ui_out_emit_list> *asm_list,
763 gdb_disassembly_flags flags)
765 print_source_lines_flags psl_flags;
767 if (flags & DISASSEMBLY_FILENAME)
768 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
770 for (int line = lines.begin; line < lines.end; ++line)
772 asm_list->reset ();
774 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
776 print_source_lines (lines.symtab, line, line + 1, psl_flags);
778 asm_list->emplace (uiout, "line_asm_insn");
782 /* Disassemble a section of the recorded instruction trace. */
784 static void
785 btrace_insn_history (struct ui_out *uiout,
786 const struct btrace_thread_info *btinfo,
787 const struct btrace_insn_iterator *begin,
788 const struct btrace_insn_iterator *end,
789 gdb_disassembly_flags flags)
791 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
792 btrace_insn_number (begin), btrace_insn_number (end));
794 flags |= DISASSEMBLY_SPECULATIVE;
796 gdbarch *gdbarch = current_inferior ()->arch ();
797 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
799 ui_out_emit_list list_emitter (uiout, "asm_insns");
801 std::optional<ui_out_emit_tuple> src_and_asm_tuple;
802 std::optional<ui_out_emit_list> asm_list;
804 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
806 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
807 btrace_insn_next (&it, 1))
809 const struct btrace_insn *insn;
811 insn = btrace_insn_get (&it);
813 /* A NULL instruction indicates a gap in the trace. */
814 if (insn == NULL)
816 const struct btrace_config *conf;
818 conf = btrace_conf (btinfo);
820 /* We have trace so we must have a configuration. */
821 gdb_assert (conf != NULL);
823 uiout->field_fmt ("insn-number", "%u",
824 btrace_insn_number (&it));
825 uiout->text ("\t");
827 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
828 conf->format);
830 else if (insn->iclass == BTRACE_INSN_AUX)
832 if ((flags & DISASSEMBLY_OMIT_AUX_INSN) != 0)
833 continue;
835 uiout->field_fmt ("insn-number", "%u", btrace_insn_number (&it));
836 uiout->text ("\t");
837 /* Add 3 spaces to match the instructions and 2 to indent the aux
838 string to make it more visible. */
839 uiout->spaces (5);
840 uiout->text ("[");
841 uiout->field_fmt ("aux-data", "%s",
842 it.btinfo->aux_data.at
843 (insn->aux_data_index).c_str ());
844 uiout->text ("]\n");
846 else
848 struct disasm_insn dinsn;
850 if ((flags & DISASSEMBLY_SOURCE) != 0)
852 struct btrace_line_range lines;
854 lines = btrace_find_line_range (insn->pc);
855 if (!btrace_line_range_is_empty (lines)
856 && !btrace_line_range_contains_range (last_lines, lines))
858 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
859 flags);
860 last_lines = lines;
862 else if (!src_and_asm_tuple.has_value ())
864 gdb_assert (!asm_list.has_value ());
866 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
868 /* No source information. */
869 asm_list.emplace (uiout, "line_asm_insn");
872 gdb_assert (src_and_asm_tuple.has_value ());
873 gdb_assert (asm_list.has_value ());
876 memset (&dinsn, 0, sizeof (dinsn));
877 dinsn.number = btrace_insn_number (&it);
878 dinsn.addr = insn->pc;
880 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
881 dinsn.is_speculative = 1;
883 disasm.pretty_print_insn (&dinsn, flags);
888 /* The insn_history method of target record-btrace. */
890 void
891 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
893 struct btrace_thread_info *btinfo;
894 struct btrace_insn_history *history;
895 struct btrace_insn_iterator begin, end;
896 struct ui_out *uiout;
897 unsigned int context, covered;
899 uiout = current_uiout;
900 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
901 context = abs (size);
902 if (context == 0)
903 error (_("Bad record instruction-history-size."));
905 btinfo = require_btrace ();
906 history = btinfo->insn_history;
907 if (history == NULL)
909 struct btrace_insn_iterator *replay;
911 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
913 /* If we're replaying, we start at the replay position. Otherwise, we
914 start at the tail of the trace. */
915 replay = btinfo->replay;
916 if (replay != NULL)
917 begin = *replay;
918 else
919 btrace_insn_end (&begin, btinfo);
921 /* We start from here and expand in the requested direction. Then we
922 expand in the other direction, as well, to fill up any remaining
923 context. */
924 end = begin;
925 if (size < 0)
927 /* We want the current position covered, as well. */
928 covered = btrace_insn_next (&end, 1);
929 covered += btrace_insn_prev (&begin, context - covered);
930 covered += btrace_insn_next (&end, context - covered);
932 else
934 covered = btrace_insn_next (&end, context);
935 covered += btrace_insn_prev (&begin, context - covered);
938 else
940 begin = history->begin;
941 end = history->end;
943 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
944 btrace_insn_number (&begin), btrace_insn_number (&end));
946 if (size < 0)
948 end = begin;
949 covered = btrace_insn_prev (&begin, context);
951 else
953 begin = end;
954 covered = btrace_insn_next (&end, context);
958 if (covered > 0)
959 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
960 else
962 if (size < 0)
963 gdb_printf (_("At the start of the branch trace record.\n"));
964 else
965 gdb_printf (_("At the end of the branch trace record.\n"));
968 btrace_set_insn_history (btinfo, &begin, &end);
971 /* The insn_history_range method of target record-btrace. */
973 void
974 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
975 gdb_disassembly_flags flags)
977 struct btrace_thread_info *btinfo;
978 struct btrace_insn_iterator begin, end;
979 struct ui_out *uiout;
980 unsigned int low, high;
981 int found;
983 uiout = current_uiout;
984 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
985 low = from;
986 high = to;
988 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
990 /* Check for wrap-arounds. */
991 if (low != from || high != to)
992 error (_("Bad range."));
994 if (high < low)
995 error (_("Bad range."));
997 btinfo = require_btrace ();
999 found = btrace_find_insn_by_number (&begin, btinfo, low);
1000 if (found == 0)
1001 error (_("Range out of bounds."));
1003 found = btrace_find_insn_by_number (&end, btinfo, high);
1004 if (found == 0)
1006 /* Silently truncate the range. */
1007 btrace_insn_end (&end, btinfo);
1009 else
1011 /* We want both begin and end to be inclusive. */
1012 btrace_insn_next (&end, 1);
1015 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
1016 btrace_set_insn_history (btinfo, &begin, &end);
1019 /* The insn_history_from method of target record-btrace. */
1021 void
1022 record_btrace_target::insn_history_from (ULONGEST from, int size,
1023 gdb_disassembly_flags flags)
1025 ULONGEST begin, end, context;
1027 context = abs (size);
1028 if (context == 0)
1029 error (_("Bad record instruction-history-size."));
1031 if (size < 0)
1033 end = from;
1035 if (from < context)
1036 begin = 0;
1037 else
1038 begin = from - context + 1;
1040 else
1042 begin = from;
1043 end = from + context - 1;
1045 /* Check for wrap-around. */
1046 if (end < begin)
1047 end = ULONGEST_MAX;
1050 insn_history_range (begin, end, flags);
1053 /* Print the instruction number range for a function call history line. */
1055 static void
1056 btrace_call_history_insn_range (struct ui_out *uiout,
1057 const struct btrace_function *bfun)
1059 unsigned int begin, end, size;
1061 size = bfun->insn.size ();
1062 gdb_assert (size > 0);
1064 begin = bfun->insn_offset;
1065 end = begin + size - 1;
1067 uiout->field_unsigned ("insn begin", begin);
1068 uiout->text (",");
1069 uiout->field_unsigned ("insn end", end);
1072 /* Compute the lowest and highest source line for the instructions in BFUN
1073 and return them in PBEGIN and PEND.
1074 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1075 result from inlining or macro expansion. */
1077 static void
1078 btrace_compute_src_line_range (const struct btrace_function *bfun,
1079 int *pbegin, int *pend)
1081 struct symtab *symtab;
1082 struct symbol *sym;
1083 int begin, end;
1085 begin = INT_MAX;
1086 end = INT_MIN;
1088 sym = bfun->sym;
1089 if (sym == NULL)
1090 goto out;
1092 symtab = sym->symtab ();
1094 for (const btrace_insn &insn : bfun->insn)
1096 struct symtab_and_line sal;
1098 sal = find_pc_line (insn.pc, 0);
1099 if (sal.symtab != symtab || sal.line == 0)
1100 continue;
1102 begin = std::min (begin, sal.line);
1103 end = std::max (end, sal.line);
1106 out:
1107 *pbegin = begin;
1108 *pend = end;
1111 /* Print the source line information for a function call history line. */
1113 static void
1114 btrace_call_history_src_line (struct ui_out *uiout,
1115 const struct btrace_function *bfun)
1117 struct symbol *sym;
1118 int begin, end;
1120 sym = bfun->sym;
1121 if (sym == NULL)
1122 return;
1124 uiout->field_string ("file",
1125 symtab_to_filename_for_display (sym->symtab ()),
1126 file_name_style.style ());
1128 btrace_compute_src_line_range (bfun, &begin, &end);
1129 if (end < begin)
1130 return;
1132 uiout->text (":");
1133 uiout->field_signed ("min line", begin);
1135 if (end == begin)
1136 return;
1138 uiout->text (",");
1139 uiout->field_signed ("max line", end);
1142 /* Get the name of a branch trace function. */
1144 static const char *
1145 btrace_get_bfun_name (const struct btrace_function *bfun)
1147 struct minimal_symbol *msym;
1148 struct symbol *sym;
1150 if (bfun == NULL)
1151 return "??";
1153 msym = bfun->msym;
1154 sym = bfun->sym;
1156 if (sym != NULL)
1157 return sym->print_name ();
1158 else if (msym != NULL)
1159 return msym->print_name ();
1160 else
1161 return "??";
1164 static void
1165 btrace_print_aux_insn (struct ui_out *uiout,
1166 const struct btrace_function *bfun,
1167 const struct btrace_thread_info *btinfo,
1168 int level)
1170 for (const btrace_insn &insn : bfun->insn)
1172 if (insn.iclass == BTRACE_INSN_AUX)
1174 /* Indent to the function level. */
1175 uiout->text ("\t");
1176 /* Adjust for RECORD_PRINT_INDENT_CALLS and indent one
1177 additional level. */
1178 for (int i = 0; i <= level; ++i)
1179 uiout->text (" ");
1181 uiout->text ("[");
1182 uiout->field_fmt ("aux-data", "%s",
1183 btinfo->aux_data.at (insn.aux_data_index).c_str ());
1184 uiout->text ("]\n");
1189 /* Disassemble a section of the recorded function trace. */
1191 static void
1192 btrace_call_history (struct ui_out *uiout,
1193 const struct btrace_thread_info *btinfo,
1194 const struct btrace_call_iterator *begin,
1195 const struct btrace_call_iterator *end,
1196 int int_flags)
1198 struct btrace_call_iterator it;
1199 record_print_flags flags = (enum record_print_flag) int_flags;
1201 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1202 btrace_call_number (end));
1204 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1206 const struct btrace_function *bfun;
1207 struct minimal_symbol *msym;
1208 struct symbol *sym;
1209 int level = 0;
1211 bfun = btrace_call_get (&it);
1212 sym = bfun->sym;
1213 msym = bfun->msym;
1215 /* Print the function index. */
1216 uiout->field_unsigned ("index", bfun->number);
1217 uiout->text ("\t");
1219 /* Indicate gaps in the trace. */
1220 if (bfun->errcode != 0)
1222 const struct btrace_config *conf;
1224 conf = btrace_conf (btinfo);
1226 /* We have trace so we must have a configuration. */
1227 gdb_assert (conf != NULL);
1229 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1231 continue;
1234 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1236 level = bfun->level + btinfo->level;
1238 for (int i = 0; i < level; ++i)
1239 uiout->text (" ");
1242 if (sym != NULL)
1243 uiout->field_string ("function", sym->print_name (),
1244 function_name_style.style ());
1245 else if (msym != NULL)
1246 uiout->field_string ("function", msym->print_name (),
1247 function_name_style.style ());
1248 else if (!uiout->is_mi_like_p ())
1249 uiout->field_string ("function", "??",
1250 function_name_style.style ());
1252 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1254 uiout->text (_("\tinst "));
1255 btrace_call_history_insn_range (uiout, bfun);
1258 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1260 uiout->text (_("\tat "));
1261 btrace_call_history_src_line (uiout, bfun);
1264 uiout->text ("\n");
1266 if (((flags & RECORD_DONT_PRINT_AUX) == 0)
1267 && ((bfun->flags & BFUN_CONTAINS_AUX) != 0))
1268 btrace_print_aux_insn (uiout, bfun, btinfo, level);
1272 /* The call_history method of target record-btrace. */
1274 void
1275 record_btrace_target::call_history (int size, record_print_flags flags)
1277 struct btrace_thread_info *btinfo;
1278 struct btrace_call_history *history;
1279 struct btrace_call_iterator begin, end;
1280 struct ui_out *uiout;
1281 unsigned int context, covered;
1283 uiout = current_uiout;
1284 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1285 context = abs (size);
1286 if (context == 0)
1287 error (_("Bad record function-call-history-size."));
1289 btinfo = require_btrace ();
1290 history = btinfo->call_history;
1291 if (history == NULL)
1293 struct btrace_insn_iterator *replay;
1295 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1297 /* If we're replaying, we start at the replay position. Otherwise, we
1298 start at the tail of the trace. */
1299 replay = btinfo->replay;
1300 if (replay != NULL)
1302 begin.btinfo = btinfo;
1303 begin.index = replay->call_index;
1305 else
1306 btrace_call_end (&begin, btinfo);
1308 /* We start from here and expand in the requested direction. Then we
1309 expand in the other direction, as well, to fill up any remaining
1310 context. */
1311 end = begin;
1312 if (size < 0)
1314 /* We want the current position covered, as well. */
1315 covered = btrace_call_next (&end, 1);
1316 covered += btrace_call_prev (&begin, context - covered);
1317 covered += btrace_call_next (&end, context - covered);
1319 else
1321 covered = btrace_call_next (&end, context);
1322 covered += btrace_call_prev (&begin, context- covered);
1325 else
1327 begin = history->begin;
1328 end = history->end;
1330 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1331 btrace_call_number (&begin), btrace_call_number (&end));
1333 if (size < 0)
1335 end = begin;
1336 covered = btrace_call_prev (&begin, context);
1338 else
1340 begin = end;
1341 covered = btrace_call_next (&end, context);
1345 if (covered > 0)
1346 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1347 else
1349 if (size < 0)
1350 gdb_printf (_("At the start of the branch trace record.\n"));
1351 else
1352 gdb_printf (_("At the end of the branch trace record.\n"));
1355 btrace_set_call_history (btinfo, &begin, &end);
1358 /* The call_history_range method of target record-btrace. */
1360 void
1361 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1362 record_print_flags flags)
1364 struct btrace_thread_info *btinfo;
1365 struct btrace_call_iterator begin, end;
1366 struct ui_out *uiout;
1367 unsigned int low, high;
1368 int found;
1370 uiout = current_uiout;
1371 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1372 low = from;
1373 high = to;
1375 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1377 /* Check for wrap-arounds. */
1378 if (low != from || high != to)
1379 error (_("Bad range."));
1381 if (high < low)
1382 error (_("Bad range."));
1384 btinfo = require_btrace ();
1386 found = btrace_find_call_by_number (&begin, btinfo, low);
1387 if (found == 0)
1388 error (_("Range out of bounds."));
1390 found = btrace_find_call_by_number (&end, btinfo, high);
1391 if (found == 0)
1393 /* Silently truncate the range. */
1394 btrace_call_end (&end, btinfo);
1396 else
1398 /* We want both begin and end to be inclusive. */
1399 btrace_call_next (&end, 1);
1402 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1403 btrace_set_call_history (btinfo, &begin, &end);
1406 /* The call_history_from method of target record-btrace. */
1408 void
1409 record_btrace_target::call_history_from (ULONGEST from, int size,
1410 record_print_flags flags)
1412 ULONGEST begin, end, context;
1414 context = abs (size);
1415 if (context == 0)
1416 error (_("Bad record function-call-history-size."));
1418 if (size < 0)
1420 end = from;
1422 if (from < context)
1423 begin = 0;
1424 else
1425 begin = from - context + 1;
1427 else
1429 begin = from;
1430 end = from + context - 1;
1432 /* Check for wrap-around. */
1433 if (end < begin)
1434 end = ULONGEST_MAX;
1437 call_history_range ( begin, end, flags);
1440 /* The record_method method of target record-btrace. */
1442 enum record_method
1443 record_btrace_target::record_method (ptid_t ptid)
1445 process_stratum_target *proc_target = current_inferior ()->process_target ();
1446 thread_info *const tp = proc_target->find_thread (ptid);
1448 if (tp == NULL)
1449 error (_("No thread."));
1451 if (tp->btrace.target == NULL)
1452 return RECORD_METHOD_NONE;
1454 return RECORD_METHOD_BTRACE;
1457 /* The record_is_replaying method of target record-btrace. */
1459 bool
1460 record_btrace_target::record_is_replaying (ptid_t ptid)
1462 process_stratum_target *proc_target = current_inferior ()->process_target ();
1463 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
1464 if (btrace_is_replaying (tp))
1465 return true;
1467 return false;
1470 /* The record_will_replay method of target record-btrace. */
1472 bool
1473 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1475 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1478 /* The xfer_partial method of target record-btrace. */
1480 enum target_xfer_status
1481 record_btrace_target::xfer_partial (enum target_object object,
1482 const char *annex, gdb_byte *readbuf,
1483 const gdb_byte *writebuf, ULONGEST offset,
1484 ULONGEST len, ULONGEST *xfered_len)
1486 /* Filter out requests that don't make sense during replay. */
1487 if (replay_memory_access == replay_memory_access_read_only
1488 && !record_btrace_generating_corefile
1489 && record_is_replaying (inferior_ptid))
1491 switch (object)
1493 case TARGET_OBJECT_MEMORY:
1495 const struct target_section *section;
1497 /* We do not allow writing memory in general. */
1498 if (writebuf != NULL)
1500 *xfered_len = len;
1501 return TARGET_XFER_UNAVAILABLE;
1504 /* We allow reading readonly memory. */
1505 section = target_section_by_addr (this, offset);
1506 if (section != NULL)
1508 /* Check if the section we found is readonly. */
1509 if ((bfd_section_flags (section->the_bfd_section)
1510 & SEC_READONLY) != 0)
1512 /* Truncate the request to fit into this section. */
1513 len = std::min (len, section->endaddr - offset);
1514 break;
1518 *xfered_len = len;
1519 return TARGET_XFER_UNAVAILABLE;
1524 /* Forward the request. */
1525 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1526 offset, len, xfered_len);
1529 /* The insert_breakpoint method of target record-btrace. */
1532 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1533 struct bp_target_info *bp_tgt)
1535 const char *old;
1536 int ret;
1538 /* Inserting breakpoints requires accessing memory. Allow it for the
1539 duration of this function. */
1540 old = replay_memory_access;
1541 replay_memory_access = replay_memory_access_read_write;
1543 ret = 0;
1546 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1548 catch (const gdb_exception &except)
1550 replay_memory_access = old;
1551 throw;
1553 replay_memory_access = old;
1555 return ret;
1558 /* The remove_breakpoint method of target record-btrace. */
1561 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1562 struct bp_target_info *bp_tgt,
1563 enum remove_bp_reason reason)
1565 const char *old;
1566 int ret;
1568 /* Removing breakpoints requires accessing memory. Allow it for the
1569 duration of this function. */
1570 old = replay_memory_access;
1571 replay_memory_access = replay_memory_access_read_write;
1573 ret = 0;
1576 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1578 catch (const gdb_exception &except)
1580 replay_memory_access = old;
1581 throw;
1583 replay_memory_access = old;
1585 return ret;
1588 /* The fetch_registers method of target record-btrace. */
1590 void
1591 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1593 btrace_insn_iterator *replay = nullptr;
1595 /* Thread-db may ask for a thread's registers before GDB knows about the
1596 thread. We forward the request to the target beneath in this
1597 case. */
1598 thread_info *tp
1599 = current_inferior ()->process_target ()->find_thread (regcache->ptid ());
1600 if (tp != nullptr)
1601 replay = tp->btrace.replay;
1603 if (replay != nullptr && !record_btrace_generating_corefile)
1605 const struct btrace_insn *insn;
1606 struct gdbarch *gdbarch;
1607 int pcreg;
1609 gdbarch = regcache->arch ();
1610 pcreg = gdbarch_pc_regnum (gdbarch);
1611 if (pcreg < 0)
1612 return;
1614 /* We can only provide the PC register. */
1615 if (regno >= 0 && regno != pcreg)
1616 return;
1618 insn = btrace_insn_get (replay);
1619 gdb_assert (insn != NULL);
1621 regcache->raw_supply (regno, &insn->pc);
1623 else
1624 this->beneath ()->fetch_registers (regcache, regno);
1627 /* The store_registers method of target record-btrace. */
1629 void
1630 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1632 if (!record_btrace_generating_corefile
1633 && record_is_replaying (regcache->ptid ()))
1634 error (_("Cannot write registers while replaying."));
1636 gdb_assert (may_write_registers);
1638 this->beneath ()->store_registers (regcache, regno);
1641 /* The prepare_to_store method of target record-btrace. */
1643 void
1644 record_btrace_target::prepare_to_store (struct regcache *regcache)
1646 if (!record_btrace_generating_corefile
1647 && record_is_replaying (regcache->ptid ()))
1648 return;
1650 this->beneath ()->prepare_to_store (regcache);
1653 /* The branch trace frame cache. */
1655 struct btrace_frame_cache
1657 /* The thread. */
1658 struct thread_info *tp;
1660 /* The frame info. */
1661 frame_info *frame;
1663 /* The branch trace function segment. */
1664 const struct btrace_function *bfun;
1667 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1669 static htab_t bfcache;
1671 /* hash_f for htab_create_alloc of bfcache. */
1673 static hashval_t
1674 bfcache_hash (const void *arg)
1676 const struct btrace_frame_cache *cache
1677 = (const struct btrace_frame_cache *) arg;
1679 return htab_hash_pointer (cache->frame);
1682 /* eq_f for htab_create_alloc of bfcache. */
1684 static int
1685 bfcache_eq (const void *arg1, const void *arg2)
1687 const struct btrace_frame_cache *cache1
1688 = (const struct btrace_frame_cache *) arg1;
1689 const struct btrace_frame_cache *cache2
1690 = (const struct btrace_frame_cache *) arg2;
1692 return cache1->frame == cache2->frame;
1695 /* Create a new btrace frame cache. */
1697 static struct btrace_frame_cache *
1698 bfcache_new (const frame_info_ptr &frame)
1700 struct btrace_frame_cache *cache;
1701 void **slot;
1703 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1704 cache->frame = frame.get ();
1706 slot = htab_find_slot (bfcache, cache, INSERT);
1707 gdb_assert (*slot == NULL);
1708 *slot = cache;
1710 return cache;
1713 /* Extract the branch trace function from a branch trace frame. */
1715 static const struct btrace_function *
1716 btrace_get_frame_function (const frame_info_ptr &frame)
1718 const struct btrace_frame_cache *cache;
1719 struct btrace_frame_cache pattern;
1720 void **slot;
1722 pattern.frame = frame.get ();
1724 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1725 if (slot == NULL)
1726 return NULL;
1728 cache = (const struct btrace_frame_cache *) *slot;
1729 return cache->bfun;
1732 /* Implement stop_reason method for record_btrace_frame_unwind. */
1734 static enum unwind_stop_reason
1735 record_btrace_frame_unwind_stop_reason (const frame_info_ptr &this_frame,
1736 void **this_cache)
1738 const struct btrace_frame_cache *cache;
1739 const struct btrace_function *bfun;
1741 cache = (const struct btrace_frame_cache *) *this_cache;
1742 bfun = cache->bfun;
1743 gdb_assert (bfun != NULL);
1745 if (bfun->up == 0)
1746 return UNWIND_UNAVAILABLE;
1748 return UNWIND_NO_REASON;
1751 /* Implement this_id method for record_btrace_frame_unwind. */
1753 static void
1754 record_btrace_frame_this_id (const frame_info_ptr &this_frame, void **this_cache,
1755 struct frame_id *this_id)
1757 const struct btrace_frame_cache *cache;
1758 const struct btrace_function *bfun;
1759 struct btrace_call_iterator it;
1760 CORE_ADDR code, special;
1762 cache = (const struct btrace_frame_cache *) *this_cache;
1764 bfun = cache->bfun;
1765 gdb_assert (bfun != NULL);
1767 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1768 bfun = btrace_call_get (&it);
1770 code = get_frame_func (this_frame);
1771 special = bfun->number;
1773 *this_id = frame_id_build_unavailable_stack_special (code, special);
1775 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1776 btrace_get_bfun_name (cache->bfun),
1777 core_addr_to_string_nz (this_id->code_addr),
1778 core_addr_to_string_nz (this_id->special_addr));
1781 /* Implement prev_register method for record_btrace_frame_unwind. */
1783 static struct value *
1784 record_btrace_frame_prev_register (const frame_info_ptr &this_frame,
1785 void **this_cache,
1786 int regnum)
1788 const struct btrace_frame_cache *cache;
1789 const struct btrace_function *bfun, *caller;
1790 struct btrace_call_iterator it;
1791 struct gdbarch *gdbarch;
1792 CORE_ADDR pc;
1793 int pcreg;
1795 gdbarch = get_frame_arch (this_frame);
1796 pcreg = gdbarch_pc_regnum (gdbarch);
1797 if (pcreg < 0 || regnum != pcreg)
1798 throw_error (NOT_AVAILABLE_ERROR,
1799 _("Registers are not available in btrace record history"));
1801 cache = (const struct btrace_frame_cache *) *this_cache;
1802 bfun = cache->bfun;
1803 gdb_assert (bfun != NULL);
1805 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1806 throw_error (NOT_AVAILABLE_ERROR,
1807 _("No caller in btrace record history"));
1809 caller = btrace_call_get (&it);
1811 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1812 pc = caller->insn.front ().pc;
1813 else
1815 pc = caller->insn.back ().pc;
1816 pc += gdb_insn_length (gdbarch, pc);
1819 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1820 btrace_get_bfun_name (bfun), bfun->level,
1821 core_addr_to_string_nz (pc));
1823 return frame_unwind_got_address (this_frame, regnum, pc);
1826 /* Implement sniffer method for record_btrace_frame_unwind. */
1828 static int
1829 record_btrace_frame_sniffer (const struct frame_unwind *self,
1830 const frame_info_ptr &this_frame,
1831 void **this_cache)
1833 const struct btrace_function *bfun;
1834 struct btrace_frame_cache *cache;
1835 struct thread_info *tp;
1836 frame_info_ptr next;
1838 /* THIS_FRAME does not contain a reference to its thread. */
1839 tp = inferior_thread ();
1841 bfun = NULL;
1842 next = get_next_frame (this_frame);
1843 if (next == NULL)
1845 const struct btrace_insn_iterator *replay;
1847 replay = tp->btrace.replay;
1848 if (replay != NULL)
1849 bfun = &replay->btinfo->functions[replay->call_index];
1851 else
1853 const struct btrace_function *callee;
1854 struct btrace_call_iterator it;
1856 callee = btrace_get_frame_function (next);
1857 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1858 return 0;
1860 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1861 return 0;
1863 bfun = btrace_call_get (&it);
1866 if (bfun == NULL)
1867 return 0;
1869 DEBUG ("[frame] sniffed frame for %s on level %d",
1870 btrace_get_bfun_name (bfun), bfun->level);
1872 /* This is our frame. Initialize the frame cache. */
1873 cache = bfcache_new (this_frame);
1874 cache->tp = tp;
1875 cache->bfun = bfun;
1877 *this_cache = cache;
1878 return 1;
1881 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1883 static int
1884 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1885 const frame_info_ptr &this_frame,
1886 void **this_cache)
1888 const struct btrace_function *bfun, *callee;
1889 struct btrace_frame_cache *cache;
1890 struct btrace_call_iterator it;
1891 frame_info_ptr next;
1892 struct thread_info *tinfo;
1894 next = get_next_frame (this_frame);
1895 if (next == NULL)
1896 return 0;
1898 callee = btrace_get_frame_function (next);
1899 if (callee == NULL)
1900 return 0;
1902 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1903 return 0;
1905 tinfo = inferior_thread ();
1906 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1907 return 0;
1909 bfun = btrace_call_get (&it);
1911 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1912 btrace_get_bfun_name (bfun), bfun->level);
1914 /* This is our frame. Initialize the frame cache. */
1915 cache = bfcache_new (this_frame);
1916 cache->tp = tinfo;
1917 cache->bfun = bfun;
1919 *this_cache = cache;
1920 return 1;
1923 static void
1924 record_btrace_frame_dealloc_cache (frame_info *self, void *this_cache)
1926 struct btrace_frame_cache *cache;
1927 void **slot;
1929 cache = (struct btrace_frame_cache *) this_cache;
1931 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1932 gdb_assert (slot != NULL);
1934 htab_remove_elt (bfcache, cache);
1937 /* btrace recording does not store previous memory content, neither the stack
1938 frames content. Any unwinding would return erroneous results as the stack
1939 contents no longer matches the changed PC value restored from history.
1940 Therefore this unwinder reports any possibly unwound registers as
1941 <unavailable>. */
1943 const struct frame_unwind record_btrace_frame_unwind =
1945 "record-btrace",
1946 NORMAL_FRAME,
1947 record_btrace_frame_unwind_stop_reason,
1948 record_btrace_frame_this_id,
1949 record_btrace_frame_prev_register,
1950 NULL,
1951 record_btrace_frame_sniffer,
1952 record_btrace_frame_dealloc_cache
1955 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1957 "record-btrace tailcall",
1958 TAILCALL_FRAME,
1959 record_btrace_frame_unwind_stop_reason,
1960 record_btrace_frame_this_id,
1961 record_btrace_frame_prev_register,
1962 NULL,
1963 record_btrace_tailcall_frame_sniffer,
1964 record_btrace_frame_dealloc_cache
1967 /* Implement the get_unwinder method. */
1969 const struct frame_unwind *
1970 record_btrace_target::get_unwinder ()
1972 return &record_btrace_frame_unwind;
1975 /* Implement the get_tailcall_unwinder method. */
1977 const struct frame_unwind *
1978 record_btrace_target::get_tailcall_unwinder ()
1980 return &record_btrace_tailcall_frame_unwind;
1983 /* Return a human-readable string for FLAG. */
1985 static const char *
1986 btrace_thread_flag_to_str (btrace_thread_flags flag)
1988 switch (flag)
1990 case BTHR_STEP:
1991 return "step";
1993 case BTHR_RSTEP:
1994 return "reverse-step";
1996 case BTHR_CONT:
1997 return "cont";
1999 case BTHR_RCONT:
2000 return "reverse-cont";
2002 case BTHR_STOP:
2003 return "stop";
2006 return "<invalid>";
2009 /* Indicate that TP should be resumed according to FLAG. */
2011 static void
2012 record_btrace_resume_thread (struct thread_info *tp,
2013 enum btrace_thread_flag flag)
2015 struct btrace_thread_info *btinfo;
2017 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
2018 tp->ptid.to_string ().c_str (), flag,
2019 btrace_thread_flag_to_str (flag));
2021 btinfo = &tp->btrace;
2023 /* Fetch the latest branch trace. */
2024 btrace_fetch (tp, record_btrace_get_cpu ());
2026 /* A resume request overwrites a preceding resume or stop request. */
2027 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2028 btinfo->flags |= flag;
2031 /* Get the current frame for TP. */
2033 static struct frame_id
2034 get_thread_current_frame_id (struct thread_info *tp)
2036 /* Set current thread, which is implicitly used by
2037 get_current_frame. */
2038 scoped_restore_current_thread restore_thread;
2040 switch_to_thread (tp);
2042 process_stratum_target *proc_target = tp->inf->process_target ();
2044 /* Clear the executing flag to allow changes to the current frame.
2045 We are not actually running, yet. We just started a reverse execution
2046 command or a record goto command.
2047 For the latter, EXECUTING is false and this has no effect.
2048 For the former, EXECUTING is true and we're in wait, about to
2049 move the thread. Since we need to recompute the stack, we temporarily
2050 set EXECUTING to false. */
2051 bool executing = tp->executing ();
2052 set_executing (proc_target, inferior_ptid, false);
2053 SCOPE_EXIT
2055 set_executing (proc_target, inferior_ptid, executing);
2057 return get_frame_id (get_current_frame ());
2060 /* Start replaying a thread. */
2062 static struct btrace_insn_iterator *
2063 record_btrace_start_replaying (struct thread_info *tp)
2065 struct btrace_insn_iterator *replay;
2066 struct btrace_thread_info *btinfo;
2068 btinfo = &tp->btrace;
2069 replay = NULL;
2071 /* We can't start replaying without trace. */
2072 if (btinfo->functions.empty ())
2073 error (_("No trace."));
2075 /* GDB stores the current frame_id when stepping in order to detects steps
2076 into subroutines.
2077 Since frames are computed differently when we're replaying, we need to
2078 recompute those stored frames and fix them up so we can still detect
2079 subroutines after we started replaying. */
2082 struct frame_id frame_id;
2083 int upd_step_frame_id, upd_step_stack_frame_id;
2085 /* The current frame without replaying - computed via normal unwind. */
2086 frame_id = get_thread_current_frame_id (tp);
2088 /* Check if we need to update any stepping-related frame id's. */
2089 upd_step_frame_id = (frame_id == tp->control.step_frame_id);
2090 upd_step_stack_frame_id = (frame_id == tp->control.step_stack_frame_id);
2092 /* We start replaying at the end of the branch trace. This corresponds
2093 to the current instruction. */
2094 replay = XNEW (struct btrace_insn_iterator);
2095 btrace_insn_end (replay, btinfo);
2097 /* Skip gaps at the end of the trace. */
2098 while (btrace_insn_get (replay) == NULL)
2100 unsigned int steps;
2102 steps = btrace_insn_prev (replay, 1);
2103 if (steps == 0)
2104 error (_("No trace."));
2107 /* We're not replaying, yet. */
2108 gdb_assert (btinfo->replay == NULL);
2109 btinfo->replay = replay;
2111 /* Make sure we're not using any stale registers. */
2112 registers_changed_thread (tp);
2114 /* The current frame with replaying - computed via btrace unwind. */
2115 frame_id = get_thread_current_frame_id (tp);
2117 /* Replace stepping related frames where necessary. */
2118 if (upd_step_frame_id)
2119 tp->control.step_frame_id = frame_id;
2120 if (upd_step_stack_frame_id)
2121 tp->control.step_stack_frame_id = frame_id;
2123 catch (const gdb_exception &except)
2125 xfree (btinfo->replay);
2126 btinfo->replay = NULL;
2128 registers_changed_thread (tp);
2130 throw;
2133 return replay;
2136 /* Stop replaying a thread. */
2138 static void
2139 record_btrace_stop_replaying (struct thread_info *tp)
2141 struct btrace_thread_info *btinfo;
2143 btinfo = &tp->btrace;
2145 xfree (btinfo->replay);
2146 btinfo->replay = NULL;
2148 /* Make sure we're not leaving any stale registers. */
2149 registers_changed_thread (tp);
2152 /* Stop replaying TP if it is at the end of its execution history. */
2154 static void
2155 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2157 struct btrace_insn_iterator *replay, end;
2158 struct btrace_thread_info *btinfo;
2160 btinfo = &tp->btrace;
2161 replay = btinfo->replay;
2163 if (replay == NULL)
2164 return;
2166 btrace_insn_end (&end, btinfo);
2168 if (btrace_insn_cmp (replay, &end) == 0)
2169 record_btrace_stop_replaying (tp);
2172 /* The resume method of target record-btrace. */
2174 void
2175 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2177 enum btrace_thread_flag flag, cflag;
2179 DEBUG ("resume %s: %s%s", ptid.to_string ().c_str (),
2180 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2181 step ? "step" : "cont");
2183 /* Store the execution direction of the last resume.
2185 If there is more than one resume call, we have to rely on infrun
2186 to not change the execution direction in-between. */
2187 record_btrace_resume_exec_dir = ::execution_direction;
2189 /* As long as we're not replaying, just forward the request.
2191 For non-stop targets this means that no thread is replaying. In order to
2192 make progress, we may need to explicitly move replaying threads to the end
2193 of their execution history. */
2194 if ((::execution_direction != EXEC_REVERSE)
2195 && !record_is_replaying (minus_one_ptid))
2197 this->beneath ()->resume (ptid, step, signal);
2198 return;
2201 /* Compute the btrace thread flag for the requested move. */
2202 if (::execution_direction == EXEC_REVERSE)
2204 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2205 cflag = BTHR_RCONT;
2207 else
2209 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2210 cflag = BTHR_CONT;
2213 /* We just indicate the resume intent here. The actual stepping happens in
2214 record_btrace_wait below.
2216 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2218 process_stratum_target *proc_target = current_inferior ()->process_target ();
2220 if (!target_is_non_stop_p ())
2222 gdb_assert (inferior_ptid.matches (ptid));
2224 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2226 if (tp->ptid.matches (inferior_ptid))
2227 record_btrace_resume_thread (tp, flag);
2228 else
2229 record_btrace_resume_thread (tp, cflag);
2232 else
2234 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2235 record_btrace_resume_thread (tp, flag);
2238 /* Async support. */
2239 if (target_can_async_p ())
2241 target_async (true);
2242 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2246 /* Cancel resuming TP. */
2248 static void
2249 record_btrace_cancel_resume (struct thread_info *tp)
2251 btrace_thread_flags flags;
2253 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2254 if (flags == 0)
2255 return;
2257 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2258 print_thread_id (tp),
2259 tp->ptid.to_string ().c_str (), flags.raw (),
2260 btrace_thread_flag_to_str (flags));
2262 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2263 record_btrace_stop_replaying_at_end (tp);
2266 /* Return a target_waitstatus indicating that we ran out of history. */
2268 static struct target_waitstatus
2269 btrace_step_no_history (void)
2271 struct target_waitstatus status;
2273 status.set_no_history ();
2275 return status;
2278 /* Return a target_waitstatus indicating that a step finished. */
2280 static struct target_waitstatus
2281 btrace_step_stopped (void)
2283 struct target_waitstatus status;
2285 status.set_stopped (GDB_SIGNAL_TRAP);
2287 return status;
2290 /* Return a target_waitstatus indicating that a thread was stopped as
2291 requested. */
2293 static struct target_waitstatus
2294 btrace_step_stopped_on_request (void)
2296 struct target_waitstatus status;
2298 status.set_stopped (GDB_SIGNAL_0);
2300 return status;
2303 /* Return a target_waitstatus indicating a spurious stop. */
2305 static struct target_waitstatus
2306 btrace_step_spurious (void)
2308 struct target_waitstatus status;
2310 status.set_spurious ();
2312 return status;
2315 /* Return a target_waitstatus indicating that the thread was not resumed. */
2317 static struct target_waitstatus
2318 btrace_step_no_resumed (void)
2320 struct target_waitstatus status;
2322 status.set_no_resumed ();
2324 return status;
2327 /* Return a target_waitstatus indicating that we should wait again. */
2329 static struct target_waitstatus
2330 btrace_step_again (void)
2332 struct target_waitstatus status;
2334 status.set_ignore ();
2336 return status;
2339 /* Clear the record histories. */
2341 static void
2342 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2344 xfree (btinfo->insn_history);
2345 xfree (btinfo->call_history);
2347 btinfo->insn_history = NULL;
2348 btinfo->call_history = NULL;
2351 /* Check whether TP's current replay position is at a breakpoint. */
2353 static int
2354 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2356 struct btrace_insn_iterator *replay;
2357 struct btrace_thread_info *btinfo;
2358 const struct btrace_insn *insn;
2360 btinfo = &tp->btrace;
2361 replay = btinfo->replay;
2363 if (replay == NULL)
2364 return 0;
2366 insn = btrace_insn_get (replay);
2367 if (insn == NULL)
2368 return 0;
2370 return record_check_stopped_by_breakpoint (tp->inf->aspace.get (), insn->pc,
2371 &btinfo->stop_reason);
2374 /* Step one instruction in forward direction. */
2376 static struct target_waitstatus
2377 record_btrace_single_step_forward (struct thread_info *tp)
2379 struct btrace_insn_iterator *replay, end, start;
2380 struct btrace_thread_info *btinfo;
2382 btinfo = &tp->btrace;
2383 replay = btinfo->replay;
2385 /* We're done if we're not replaying. */
2386 if (replay == NULL)
2387 return btrace_step_no_history ();
2389 /* Check if we're stepping a breakpoint. */
2390 if (record_btrace_replay_at_breakpoint (tp))
2391 return btrace_step_stopped ();
2393 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2394 jump back to the instruction at which we started. If we're stepping a
2395 BTRACE_INSN_AUX instruction, print the auxiliary data and skip the
2396 instruction. */
2398 start = *replay;
2400 for (;;)
2402 unsigned int steps;
2404 /* We will bail out here if we continue stepping after reaching the end
2405 of the execution history. */
2406 steps = btrace_insn_next (replay, 1);
2407 if (steps == 0)
2409 *replay = start;
2410 return btrace_step_no_history ();
2413 const struct btrace_insn *insn = btrace_insn_get (replay);
2414 if (insn == nullptr)
2415 continue;
2417 /* If we're stepping a BTRACE_INSN_AUX instruction, print the auxiliary
2418 data and skip the instruction. */
2419 if (insn->iclass == BTRACE_INSN_AUX)
2421 gdb_printf ("[%s]\n",
2422 btinfo->aux_data.at (insn->aux_data_index).c_str ());
2423 continue;
2426 /* We have an instruction, we are done. */
2427 break;
2430 /* Determine the end of the instruction trace. */
2431 btrace_insn_end (&end, btinfo);
2433 /* The execution trace contains (and ends with) the current instruction.
2434 This instruction has not been executed, yet, so the trace really ends
2435 one instruction earlier. */
2436 if (btrace_insn_cmp (replay, &end) == 0)
2437 return btrace_step_no_history ();
2439 return btrace_step_spurious ();
2442 /* Step one instruction in backward direction. */
2444 static struct target_waitstatus
2445 record_btrace_single_step_backward (struct thread_info *tp)
2447 struct btrace_insn_iterator *replay, start;
2448 struct btrace_thread_info *btinfo;
2450 btinfo = &tp->btrace;
2451 replay = btinfo->replay;
2453 /* Start replaying if we're not already doing so. */
2454 if (replay == NULL)
2455 replay = record_btrace_start_replaying (tp);
2457 /* If we can't step any further, we reached the end of the history.
2458 Skip gaps during replay. If we end up at a gap (at the beginning of
2459 the trace), jump back to the instruction at which we started.
2460 If we're stepping a BTRACE_INSN_AUX instruction, print the auxiliary
2461 data and skip the instruction. */
2462 start = *replay;
2464 for (;;)
2466 unsigned int steps;
2468 steps = btrace_insn_prev (replay, 1);
2469 if (steps == 0)
2471 *replay = start;
2472 return btrace_step_no_history ();
2475 const struct btrace_insn *insn = btrace_insn_get (replay);
2476 if (insn == nullptr)
2477 continue;
2479 /* Check if we're stepping a BTRACE_INSN_AUX instruction and skip it. */
2480 if (insn->iclass == BTRACE_INSN_AUX)
2482 gdb_printf ("[%s]\n",
2483 btinfo->aux_data.at (insn->aux_data_index).c_str ());
2484 continue;
2487 /* We have an instruction, we are done. */
2488 break;
2491 /* Check if we're stepping a breakpoint.
2493 For reverse-stepping, this check is after the step. There is logic in
2494 infrun.c that handles reverse-stepping separately. See, for example,
2495 proceed and adjust_pc_after_break.
2497 This code assumes that for reverse-stepping, PC points to the last
2498 de-executed instruction, whereas for forward-stepping PC points to the
2499 next to-be-executed instruction. */
2500 if (record_btrace_replay_at_breakpoint (tp))
2501 return btrace_step_stopped ();
2503 return btrace_step_spurious ();
2506 /* Step a single thread. */
2508 static struct target_waitstatus
2509 record_btrace_step_thread (struct thread_info *tp)
2511 struct btrace_thread_info *btinfo;
2512 struct target_waitstatus status;
2513 btrace_thread_flags flags;
2515 btinfo = &tp->btrace;
2517 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2518 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2520 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2521 tp->ptid.to_string ().c_str (), flags.raw (),
2522 btrace_thread_flag_to_str (flags));
2524 /* We can't step without an execution history. */
2525 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2526 return btrace_step_no_history ();
2528 switch (flags)
2530 default:
2531 internal_error (_("invalid stepping type."));
2533 case BTHR_STOP:
2534 return btrace_step_stopped_on_request ();
2536 case BTHR_STEP:
2537 status = record_btrace_single_step_forward (tp);
2538 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2539 break;
2541 return btrace_step_stopped ();
2543 case BTHR_RSTEP:
2544 status = record_btrace_single_step_backward (tp);
2545 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2546 break;
2548 return btrace_step_stopped ();
2550 case BTHR_CONT:
2551 status = record_btrace_single_step_forward (tp);
2552 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2553 break;
2555 btinfo->flags |= flags;
2556 return btrace_step_again ();
2558 case BTHR_RCONT:
2559 status = record_btrace_single_step_backward (tp);
2560 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2561 break;
2563 btinfo->flags |= flags;
2564 return btrace_step_again ();
2567 /* We keep threads moving at the end of their execution history. The wait
2568 method will stop the thread for whom the event is reported. */
2569 if (status.kind () == TARGET_WAITKIND_NO_HISTORY)
2570 btinfo->flags |= flags;
2572 return status;
2575 /* Announce further events if necessary. */
2577 static void
2578 record_btrace_maybe_mark_async_event
2579 (const std::vector<thread_info *> &moving,
2580 const std::vector<thread_info *> &no_history)
2582 bool more_moving = !moving.empty ();
2583 bool more_no_history = !no_history.empty ();;
2585 if (!more_moving && !more_no_history)
2586 return;
2588 if (more_moving)
2589 DEBUG ("movers pending");
2591 if (more_no_history)
2592 DEBUG ("no-history pending");
2594 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2597 /* The wait method of target record-btrace. */
2599 ptid_t
2600 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2601 target_wait_flags options)
2603 std::vector<thread_info *> moving;
2604 std::vector<thread_info *> no_history;
2606 /* Clear this, if needed we'll re-mark it below. */
2607 clear_async_event_handler (record_btrace_async_inferior_event_handler);
2609 DEBUG ("wait %s (0x%x)", ptid.to_string ().c_str (),
2610 (unsigned) options);
2612 /* As long as we're not replaying, just forward the request. */
2613 if ((::execution_direction != EXEC_REVERSE)
2614 && !record_is_replaying (minus_one_ptid))
2616 return this->beneath ()->wait (ptid, status, options);
2619 /* Keep a work list of moving threads. */
2620 process_stratum_target *proc_target = current_inferior ()->process_target ();
2621 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2622 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2623 moving.push_back (tp);
2625 if (moving.empty ())
2627 *status = btrace_step_no_resumed ();
2629 DEBUG ("wait ended by %s: %s", null_ptid.to_string ().c_str (),
2630 status->to_string ().c_str ());
2632 return null_ptid;
2635 /* Step moving threads one by one, one step each, until either one thread
2636 reports an event or we run out of threads to step.
2638 When stepping more than one thread, chances are that some threads reach
2639 the end of their execution history earlier than others. If we reported
2640 this immediately, all-stop on top of non-stop would stop all threads and
2641 resume the same threads next time. And we would report the same thread
2642 having reached the end of its execution history again.
2644 In the worst case, this would starve the other threads. But even if other
2645 threads would be allowed to make progress, this would result in far too
2646 many intermediate stops.
2648 We therefore delay the reporting of "no execution history" until we have
2649 nothing else to report. By this time, all threads should have moved to
2650 either the beginning or the end of their execution history. There will
2651 be a single user-visible stop. */
2652 struct thread_info *eventing = NULL;
2653 while ((eventing == NULL) && !moving.empty ())
2655 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2657 thread_info *tp = moving[ix];
2659 *status = record_btrace_step_thread (tp);
2661 switch (status->kind ())
2663 case TARGET_WAITKIND_IGNORE:
2664 ix++;
2665 break;
2667 case TARGET_WAITKIND_NO_HISTORY:
2668 no_history.push_back (ordered_remove (moving, ix));
2669 break;
2671 default:
2672 eventing = unordered_remove (moving, ix);
2673 break;
2678 if (eventing == NULL)
2680 /* We started with at least one moving thread. This thread must have
2681 either stopped or reached the end of its execution history.
2683 In the former case, EVENTING must not be NULL.
2684 In the latter case, NO_HISTORY must not be empty. */
2685 gdb_assert (!no_history.empty ());
2687 /* We kept threads moving at the end of their execution history. Stop
2688 EVENTING now that we are going to report its stop. */
2689 eventing = unordered_remove (no_history, 0);
2690 eventing->btrace.flags &= ~BTHR_MOVE;
2692 *status = btrace_step_no_history ();
2695 gdb_assert (eventing != NULL);
2697 /* We kept threads replaying at the end of their execution history. Stop
2698 replaying EVENTING now that we are going to report its stop. */
2699 record_btrace_stop_replaying_at_end (eventing);
2701 /* Stop all other threads. */
2702 if (!target_is_non_stop_p ())
2704 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2705 record_btrace_cancel_resume (tp);
2708 /* In async mode, we need to announce further events. */
2709 if (target_is_async_p ())
2710 record_btrace_maybe_mark_async_event (moving, no_history);
2712 /* Start record histories anew from the current position. */
2713 record_btrace_clear_histories (&eventing->btrace);
2715 /* We moved the replay position but did not update registers. */
2716 registers_changed_thread (eventing);
2718 DEBUG ("wait ended by thread %s (%s): %s",
2719 print_thread_id (eventing),
2720 eventing->ptid.to_string ().c_str (),
2721 status->to_string ().c_str ());
2723 return eventing->ptid;
2726 /* The stop method of target record-btrace. */
2728 void
2729 record_btrace_target::stop (ptid_t ptid)
2731 DEBUG ("stop %s", ptid.to_string ().c_str ());
2733 /* As long as we're not replaying, just forward the request. */
2734 if ((::execution_direction != EXEC_REVERSE)
2735 && !record_is_replaying (minus_one_ptid))
2737 this->beneath ()->stop (ptid);
2739 else
2741 process_stratum_target *proc_target
2742 = current_inferior ()->process_target ();
2744 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2746 tp->btrace.flags &= ~BTHR_MOVE;
2747 tp->btrace.flags |= BTHR_STOP;
2752 /* The can_execute_reverse method of target record-btrace. */
2754 bool
2755 record_btrace_target::can_execute_reverse ()
2757 return true;
2760 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2762 bool
2763 record_btrace_target::stopped_by_sw_breakpoint ()
2765 if (record_is_replaying (minus_one_ptid))
2767 struct thread_info *tp = inferior_thread ();
2769 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2772 return this->beneath ()->stopped_by_sw_breakpoint ();
2775 /* The supports_stopped_by_sw_breakpoint method of target
2776 record-btrace. */
2778 bool
2779 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2781 if (record_is_replaying (minus_one_ptid))
2782 return true;
2784 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2787 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2789 bool
2790 record_btrace_target::stopped_by_hw_breakpoint ()
2792 if (record_is_replaying (minus_one_ptid))
2794 struct thread_info *tp = inferior_thread ();
2796 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2799 return this->beneath ()->stopped_by_hw_breakpoint ();
2802 /* The supports_stopped_by_hw_breakpoint method of target
2803 record-btrace. */
2805 bool
2806 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2808 if (record_is_replaying (minus_one_ptid))
2809 return true;
2811 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2814 /* The update_thread_list method of target record-btrace. */
2816 void
2817 record_btrace_target::update_thread_list ()
2819 /* We don't add or remove threads during replay. */
2820 if (record_is_replaying (minus_one_ptid))
2821 return;
2823 /* Forward the request. */
2824 this->beneath ()->update_thread_list ();
2827 /* The thread_alive method of target record-btrace. */
2829 bool
2830 record_btrace_target::thread_alive (ptid_t ptid)
2832 /* We don't add or remove threads during replay. */
2833 if (record_is_replaying (minus_one_ptid))
2834 return true;
2836 /* Forward the request. */
2837 return this->beneath ()->thread_alive (ptid);
2840 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2841 is stopped. */
2843 static void
2844 record_btrace_set_replay (struct thread_info *tp,
2845 const struct btrace_insn_iterator *it)
2847 struct btrace_thread_info *btinfo;
2849 btinfo = &tp->btrace;
2851 if (it == NULL)
2852 record_btrace_stop_replaying (tp);
2853 else
2855 if (btinfo->replay == NULL)
2856 record_btrace_start_replaying (tp);
2857 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2858 return;
2860 *btinfo->replay = *it;
2861 registers_changed_thread (tp);
2864 /* Start anew from the new replay position. */
2865 record_btrace_clear_histories (btinfo);
2867 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
2868 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2871 /* The goto_record_begin method of target record-btrace. */
2873 void
2874 record_btrace_target::goto_record_begin ()
2876 struct thread_info *tp;
2877 struct btrace_insn_iterator begin;
2879 tp = require_btrace_thread ();
2881 btrace_insn_begin (&begin, &tp->btrace);
2883 /* Skip gaps at the beginning of the trace. */
2884 while (btrace_insn_get (&begin) == NULL)
2886 unsigned int steps;
2888 steps = btrace_insn_next (&begin, 1);
2889 if (steps == 0)
2890 error (_("No trace."));
2893 record_btrace_set_replay (tp, &begin);
2896 /* The goto_record_end method of target record-btrace. */
2898 void
2899 record_btrace_target::goto_record_end ()
2901 struct thread_info *tp;
2903 tp = require_btrace_thread ();
2905 record_btrace_set_replay (tp, NULL);
2908 /* The goto_record method of target record-btrace. */
2910 void
2911 record_btrace_target::goto_record (ULONGEST insn_number)
2913 struct thread_info *tp;
2914 struct btrace_insn_iterator it;
2915 unsigned int number;
2916 int found;
2918 number = insn_number;
2920 /* Check for wrap-arounds. */
2921 if (number != insn_number)
2922 error (_("Instruction number out of range."));
2924 tp = require_btrace_thread ();
2926 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2928 /* Check if the instruction could not be found or is a gap or an
2929 auxiliary instruction. */
2930 if (found == 0)
2931 error (_("No such instruction."));
2933 const struct btrace_insn *insn = btrace_insn_get (&it);
2934 if (insn == NULL)
2935 error (_("No such instruction."));
2936 if (insn->iclass == BTRACE_INSN_AUX)
2937 error (_("Can't go to an auxiliary instruction."));
2939 record_btrace_set_replay (tp, &it);
2942 /* The record_stop_replaying method of target record-btrace. */
2944 void
2945 record_btrace_target::record_stop_replaying ()
2947 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2948 record_btrace_stop_replaying (tp);
2951 /* The execution_direction target method. */
2953 enum exec_direction_kind
2954 record_btrace_target::execution_direction ()
2956 return record_btrace_resume_exec_dir;
2959 /* The prepare_to_generate_core target method. */
2961 void
2962 record_btrace_target::prepare_to_generate_core ()
2964 record_btrace_generating_corefile = 1;
2967 /* The done_generating_core target method. */
2969 void
2970 record_btrace_target::done_generating_core ()
2972 record_btrace_generating_corefile = 0;
2975 /* Start recording in BTS format. */
2977 static void
2978 cmd_record_btrace_bts_start (const char *args, int from_tty)
2980 if (args != NULL && *args != 0)
2981 error (_("Invalid argument."));
2983 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2987 execute_command ("target record-btrace", from_tty);
2989 catch (const gdb_exception &exception)
2991 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2992 throw;
2996 /* Start recording in Intel Processor Trace format. */
2998 static void
2999 cmd_record_btrace_pt_start (const char *args, int from_tty)
3001 if (args != NULL && *args != 0)
3002 error (_("Invalid argument."));
3004 record_btrace_conf.format = BTRACE_FORMAT_PT;
3008 execute_command ("target record-btrace", from_tty);
3010 catch (const gdb_exception &exception)
3012 record_btrace_conf.format = BTRACE_FORMAT_NONE;
3013 throw;
3017 /* Alias for "target record". */
3019 static void
3020 cmd_record_btrace_start (const char *args, int from_tty)
3022 if (args != NULL && *args != 0)
3023 error (_("Invalid argument."));
3025 record_btrace_conf.format = BTRACE_FORMAT_PT;
3029 execute_command ("target record-btrace", from_tty);
3031 catch (const gdb_exception_error &exception)
3033 record_btrace_conf.format = BTRACE_FORMAT_BTS;
3037 execute_command ("target record-btrace", from_tty);
3039 catch (const gdb_exception &ex)
3041 record_btrace_conf.format = BTRACE_FORMAT_NONE;
3042 throw;
3047 /* The "show record btrace replay-memory-access" command. */
3049 static void
3050 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3051 struct cmd_list_element *c, const char *value)
3053 gdb_printf (file, _("Replay memory access is %s.\n"),
3054 replay_memory_access);
3057 /* The "set record btrace cpu none" command. */
3059 static void
3060 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
3062 if (args != nullptr && *args != 0)
3063 error (_("Trailing junk: '%s'."), args);
3065 record_btrace_cpu_state = CS_NONE;
3068 /* The "set record btrace cpu auto" command. */
3070 static void
3071 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3073 if (args != nullptr && *args != 0)
3074 error (_("Trailing junk: '%s'."), args);
3076 record_btrace_cpu_state = CS_AUTO;
3079 /* The "set record btrace cpu" command. */
3081 static void
3082 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3084 if (args == nullptr)
3085 args = "";
3087 /* We use a hard-coded vendor string for now. */
3088 unsigned int family, model, stepping;
3089 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3090 &model, &l1, &stepping, &l2);
3091 if (matches == 3)
3093 if (strlen (args) != l2)
3094 error (_("Trailing junk: '%s'."), args + l2);
3096 else if (matches == 2)
3098 if (strlen (args) != l1)
3099 error (_("Trailing junk: '%s'."), args + l1);
3101 stepping = 0;
3103 else
3104 error (_("Bad format. See \"help set record btrace cpu\"."));
3106 if (USHRT_MAX < family)
3107 error (_("Cpu family too big."));
3109 if (UCHAR_MAX < model)
3110 error (_("Cpu model too big."));
3112 if (UCHAR_MAX < stepping)
3113 error (_("Cpu stepping too big."));
3115 record_btrace_cpu.vendor = CV_INTEL;
3116 record_btrace_cpu.family = family;
3117 record_btrace_cpu.model = model;
3118 record_btrace_cpu.stepping = stepping;
3120 record_btrace_cpu_state = CS_CPU;
3123 /* The "show record btrace cpu" command. */
3125 static void
3126 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3128 if (args != nullptr && *args != 0)
3129 error (_("Trailing junk: '%s'."), args);
3131 switch (record_btrace_cpu_state)
3133 case CS_AUTO:
3134 gdb_printf (_("btrace cpu is 'auto'.\n"));
3135 return;
3137 case CS_NONE:
3138 gdb_printf (_("btrace cpu is 'none'.\n"));
3139 return;
3141 case CS_CPU:
3142 switch (record_btrace_cpu.vendor)
3144 case CV_INTEL:
3145 if (record_btrace_cpu.stepping == 0)
3146 gdb_printf (_("btrace cpu is 'intel: %u/%u'.\n"),
3147 record_btrace_cpu.family,
3148 record_btrace_cpu.model);
3149 else
3150 gdb_printf (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3151 record_btrace_cpu.family,
3152 record_btrace_cpu.model,
3153 record_btrace_cpu.stepping);
3154 return;
3158 error (_("Internal error: bad cpu state."));
3161 /* The "record bts buffer-size" show value function. */
3163 static void
3164 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3165 struct cmd_list_element *c,
3166 const char *value)
3168 gdb_printf (file, _("The record/replay bts buffer size is %s.\n"),
3169 value);
3172 /* The "record pt buffer-size" show value function. */
3174 static void
3175 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3176 struct cmd_list_element *c,
3177 const char *value)
3179 gdb_printf (file, _("The record/replay pt buffer size is %s.\n"),
3180 value);
3184 static bool event_tracing = false;
3186 /* The "record pt event-tracing" show value function. */
3188 static void
3189 show_record_pt_event_tracing_value (struct ui_file *file, int from_tty,
3190 struct cmd_list_element *c,
3191 const char *value)
3193 #if (LIBIPT_VERSION >= 0x201)
3194 gdb_printf (file, _("record pt event-tracing is %s.\n"), value);
3195 #else
3196 gdb_printf (_("Event-tracing is not supported by GDB.\n"));
3197 #endif /* defined (LIBIPT_VERSION >= 0x201) */
3200 /* The "record pt event-tracing" set value function. */
3202 static void
3203 set_record_pt_event_tracing_value (const char *args, int from_tty,
3204 cmd_list_element *c)
3206 #if (LIBIPT_VERSION >= 0x201)
3207 record_btrace_conf.pt.event_tracing = event_tracing;
3208 #else
3209 gdb_printf (_("Event-tracing is not supported by GDB.\n"));
3210 #endif /* defined (LIBIPT_VERSION >= 0x201) */
3213 /* Initialize btrace commands. */
3215 void _initialize_record_btrace ();
3216 void
3217 _initialize_record_btrace ()
3219 cmd_list_element *record_btrace_cmd
3220 = add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3221 _("Start branch trace recording."),
3222 &record_btrace_cmdlist, 0, &record_cmdlist);
3223 add_alias_cmd ("b", record_btrace_cmd, class_obscure, 1, &record_cmdlist);
3225 cmd_list_element *record_btrace_bts_cmd
3226 = add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3227 _("\
3228 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3229 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3230 This format may not be available on all processors."),
3231 &record_btrace_cmdlist);
3232 add_alias_cmd ("bts", record_btrace_bts_cmd, class_obscure, 1,
3233 &record_cmdlist);
3235 cmd_list_element *record_btrace_pt_cmd
3236 = add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3237 _("\
3238 Start branch trace recording in Intel Processor Trace format.\n\n\
3239 This format may not be available on all processors."),
3240 &record_btrace_cmdlist);
3241 add_alias_cmd ("pt", record_btrace_pt_cmd, class_obscure, 1, &record_cmdlist);
3243 add_setshow_prefix_cmd ("btrace", class_support,
3244 _("Set record options."),
3245 _("Show record options."),
3246 &set_record_btrace_cmdlist,
3247 &show_record_btrace_cmdlist,
3248 &set_record_cmdlist, &show_record_cmdlist);
3250 add_setshow_enum_cmd ("replay-memory-access", no_class,
3251 replay_memory_access_types, &replay_memory_access, _("\
3252 Set what memory accesses are allowed during replay."), _("\
3253 Show what memory accesses are allowed during replay."),
3254 _("Default is READ-ONLY.\n\n\
3255 The btrace record target does not trace data.\n\
3256 The memory therefore corresponds to the live target and not\n\
3257 to the current replay position.\n\n\
3258 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3259 When READ-WRITE, allow accesses to read-only and read-write memory during\n\
3260 replay."),
3261 NULL, cmd_show_replay_memory_access,
3262 &set_record_btrace_cmdlist,
3263 &show_record_btrace_cmdlist);
3265 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3266 _("\
3267 Set the cpu to be used for trace decode.\n\n\
3268 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3269 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3270 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3271 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3272 When GDB does not support that cpu, this option can be used to enable\n\
3273 workarounds for a similar cpu that GDB supports.\n\n\
3274 When set to \"none\", errata workarounds are disabled."),
3275 &set_record_btrace_cpu_cmdlist,
3277 &set_record_btrace_cmdlist);
3279 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3280 Automatically determine the cpu to be used for trace decode."),
3281 &set_record_btrace_cpu_cmdlist);
3283 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3284 Do not enable errata workarounds for trace decode."),
3285 &set_record_btrace_cpu_cmdlist);
3287 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3288 Show the cpu to be used for trace decode."),
3289 &show_record_btrace_cmdlist);
3291 add_setshow_prefix_cmd ("bts", class_support,
3292 _("Set record btrace bts options."),
3293 _("Show record btrace bts options."),
3294 &set_record_btrace_bts_cmdlist,
3295 &show_record_btrace_bts_cmdlist,
3296 &set_record_btrace_cmdlist,
3297 &show_record_btrace_cmdlist);
3299 add_setshow_uinteger_cmd ("buffer-size", no_class,
3300 &record_btrace_conf.bts.size,
3301 _("Set the record/replay bts buffer size."),
3302 _("Show the record/replay bts buffer size."), _("\
3303 When starting recording request a trace buffer of this size.\n\
3304 The actual buffer size may differ from the requested size.\n\
3305 Use \"info record\" to see the actual buffer size.\n\n\
3306 Bigger buffers allow longer recording but also take more time to process\n\
3307 the recorded execution trace.\n\n\
3308 The trace buffer size may not be changed while recording."), NULL,
3309 show_record_bts_buffer_size_value,
3310 &set_record_btrace_bts_cmdlist,
3311 &show_record_btrace_bts_cmdlist);
3313 add_setshow_prefix_cmd ("pt", class_support,
3314 _("Set record btrace pt options."),
3315 _("Show record btrace pt options."),
3316 &set_record_btrace_pt_cmdlist,
3317 &show_record_btrace_pt_cmdlist,
3318 &set_record_btrace_cmdlist,
3319 &show_record_btrace_cmdlist);
3321 add_setshow_uinteger_cmd ("buffer-size", no_class,
3322 &record_btrace_conf.pt.size,
3323 _("Set the record/replay pt buffer size."),
3324 _("Show the record/replay pt buffer size."), _("\
3325 Bigger buffers allow longer recording but also take more time to process\n\
3326 the recorded execution.\n\
3327 The actual buffer size may differ from the requested size. Use \"info record\"\n\
3328 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3329 &set_record_btrace_pt_cmdlist,
3330 &show_record_btrace_pt_cmdlist);
3332 add_setshow_boolean_cmd ("event-tracing", no_class, &event_tracing,
3333 _("Set event-tracing for record pt."),
3334 _("Show event-tracing for record pt."),
3335 _("\
3336 Use \"on\" to enable event tracing for recordings with Intel Processor Trace,\n\
3337 and \"off\" to disable it.\n\
3338 Without an argument, event tracing is enabled. Changing this setting has no\n\
3339 effect on an active recording."),
3340 set_record_pt_event_tracing_value,
3341 show_record_pt_event_tracing_value,
3342 &set_record_btrace_pt_cmdlist,
3343 &show_record_btrace_pt_cmdlist);
3345 add_target (record_btrace_target_info, record_btrace_target_open);
3347 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3348 xcalloc, xfree);
3350 record_btrace_conf.bts.size = 64 * 1024;
3351 record_btrace_conf.pt.size = 16 * 1024;
3352 #if (LIBIPT_VERSION >= 0x200)
3353 record_btrace_conf.pt.ptwrite = true;
3354 #else
3355 record_btrace_conf.pt.ptwrite = false;
3356 #endif
3357 record_btrace_conf.pt.event_tracing = false;