More updated translations
[binutils-gdb.git] / gdb / record-btrace.c
blobd9f3e91fe35576cdeed686ad0e17578b6a032120
1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2024 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "gdbsupport/gdb_vecs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "cli/cli-cmds.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "top.h"
33 #include "ui-out.h"
34 #include "symtab.h"
35 #include "filenames.h"
36 #include "regcache.h"
37 #include "frame-unwind.h"
38 #include "hashtab.h"
39 #include "infrun.h"
40 #include "gdbsupport/event-loop.h"
41 #include "inf-loop.h"
42 #include "inferior.h"
43 #include <algorithm>
44 #include "gdbarch.h"
45 #include "cli/cli-style.h"
46 #include "async-event.h"
47 #include <forward_list>
48 #include "objfiles.h"
49 #include "interps.h"
51 static const target_info record_btrace_target_info = {
52 "record-btrace",
53 N_("Branch tracing target"),
54 N_("Collect control-flow trace and provide the execution history.")
57 /* The target_ops of record-btrace. */
59 class record_btrace_target final : public target_ops
61 public:
62 const target_info &info () const override
63 { return record_btrace_target_info; }
65 strata stratum () const override { return record_stratum; }
67 void close () override;
68 void async (bool) override;
70 void detach (inferior *inf, int from_tty) override
71 { record_detach (this, inf, from_tty); }
73 void disconnect (const char *, int) override;
75 void mourn_inferior () override
76 { record_mourn_inferior (this); }
78 void kill () override
79 { record_kill (this); }
81 enum record_method record_method (ptid_t ptid) override;
83 void stop_recording () override;
84 void info_record () override;
86 void insn_history (int size, gdb_disassembly_flags flags) override;
87 void insn_history_from (ULONGEST from, int size,
88 gdb_disassembly_flags flags) override;
89 void insn_history_range (ULONGEST begin, ULONGEST end,
90 gdb_disassembly_flags flags) override;
91 void call_history (int size, record_print_flags flags) override;
92 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
93 override;
94 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
95 override;
97 bool record_is_replaying (ptid_t ptid) override;
98 bool record_will_replay (ptid_t ptid, int dir) override;
99 void record_stop_replaying () override;
101 enum target_xfer_status xfer_partial (enum target_object object,
102 const char *annex,
103 gdb_byte *readbuf,
104 const gdb_byte *writebuf,
105 ULONGEST offset, ULONGEST len,
106 ULONGEST *xfered_len) override;
108 int insert_breakpoint (struct gdbarch *,
109 struct bp_target_info *) override;
110 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
111 enum remove_bp_reason) override;
113 void fetch_registers (struct regcache *, int) override;
115 void store_registers (struct regcache *, int) override;
116 void prepare_to_store (struct regcache *) override;
118 const struct frame_unwind *get_unwinder () override;
120 const struct frame_unwind *get_tailcall_unwinder () override;
122 void resume (ptid_t, int, enum gdb_signal) override;
123 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
125 void stop (ptid_t) override;
126 void update_thread_list () override;
127 bool thread_alive (ptid_t ptid) override;
128 void goto_record_begin () override;
129 void goto_record_end () override;
130 void goto_record (ULONGEST insn) override;
132 bool can_execute_reverse () override;
134 bool stopped_by_sw_breakpoint () override;
135 bool supports_stopped_by_sw_breakpoint () override;
137 bool stopped_by_hw_breakpoint () override;
138 bool supports_stopped_by_hw_breakpoint () override;
140 enum exec_direction_kind execution_direction () override;
141 void prepare_to_generate_core () override;
142 void done_generating_core () override;
145 static record_btrace_target record_btrace_ops;
147 /* Initialize the record-btrace target ops. */
149 /* Token associated with a new-thread observer enabling branch tracing
150 for the new thread. */
151 static const gdb::observers::token record_btrace_thread_observer_token {};
153 /* Memory access types used in set/show record btrace replay-memory-access. */
154 static const char replay_memory_access_read_only[] = "read-only";
155 static const char replay_memory_access_read_write[] = "read-write";
156 static const char *const replay_memory_access_types[] =
158 replay_memory_access_read_only,
159 replay_memory_access_read_write,
160 NULL
163 /* The currently allowed replay memory access type. */
164 static const char *replay_memory_access = replay_memory_access_read_only;
166 /* The cpu state kinds. */
167 enum record_btrace_cpu_state_kind
169 CS_AUTO,
170 CS_NONE,
171 CS_CPU
174 /* The current cpu state. */
175 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
177 /* The current cpu for trace decode. */
178 static struct btrace_cpu record_btrace_cpu;
180 /* Command lists for "set/show record btrace". */
181 static struct cmd_list_element *set_record_btrace_cmdlist;
182 static struct cmd_list_element *show_record_btrace_cmdlist;
184 /* The execution direction of the last resume we got. See record-full.c. */
185 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
187 /* The async event handler for reverse/replay execution. */
188 static struct async_event_handler *record_btrace_async_inferior_event_handler;
190 /* A flag indicating that we are currently generating a core file. */
191 static int record_btrace_generating_corefile;
193 /* The current branch trace configuration. */
194 static struct btrace_config record_btrace_conf;
196 /* Command list for "record btrace". */
197 static struct cmd_list_element *record_btrace_cmdlist;
199 /* Command lists for "set/show record btrace bts". */
200 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
201 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
203 /* Command lists for "set/show record btrace pt". */
204 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
205 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
207 /* Command list for "set record btrace cpu". */
208 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
210 /* Print a record-btrace debug message. Use do ... while (0) to avoid
211 ambiguities when used in if statements. */
213 #define DEBUG(msg, args...) \
214 do \
216 if (record_debug != 0) \
217 gdb_printf (gdb_stdlog, \
218 "[record-btrace] " msg "\n", ##args); \
220 while (0)
223 /* Return the cpu configured by the user. Returns NULL if the cpu was
224 configured as auto. */
225 const struct btrace_cpu *
226 record_btrace_get_cpu (void)
228 switch (record_btrace_cpu_state)
230 case CS_AUTO:
231 return nullptr;
233 case CS_NONE:
234 record_btrace_cpu.vendor = CV_UNKNOWN;
235 [[fallthrough]];
236 case CS_CPU:
237 return &record_btrace_cpu;
240 error (_("Internal error: bad record btrace cpu state."));
243 /* Update the branch trace for the current thread and return a pointer to its
244 thread_info.
246 Throws an error if there is no thread or no trace. This function never
247 returns NULL. */
249 static struct thread_info *
250 require_btrace_thread (void)
252 DEBUG ("require");
254 if (inferior_ptid == null_ptid)
255 error (_("No thread."));
257 thread_info *tp = inferior_thread ();
259 validate_registers_access ();
261 btrace_fetch (tp, record_btrace_get_cpu ());
263 if (btrace_is_empty (tp))
264 error (_("No trace."));
266 return tp;
269 /* Update the branch trace for the current thread and return a pointer to its
270 branch trace information struct.
272 Throws an error if there is no thread or no trace. This function never
273 returns NULL. */
275 static struct btrace_thread_info *
276 require_btrace (void)
278 struct thread_info *tp;
280 tp = require_btrace_thread ();
282 return &tp->btrace;
285 /* The new thread observer. */
287 static void
288 record_btrace_on_new_thread (struct thread_info *tp)
290 /* Ignore this thread if its inferior is not recorded by us. */
291 target_ops *rec = tp->inf->target_at (record_stratum);
292 if (rec != &record_btrace_ops)
293 return;
297 btrace_enable (tp, &record_btrace_conf);
299 catch (const gdb_exception_error &error)
301 warning ("%s", error.what ());
305 /* Enable automatic tracing of new threads. */
307 static void
308 record_btrace_auto_enable (void)
310 DEBUG ("attach thread observer");
312 gdb::observers::new_thread.attach (record_btrace_on_new_thread,
313 record_btrace_thread_observer_token,
314 "record-btrace");
317 /* Disable automatic tracing of new threads. */
319 static void
320 record_btrace_auto_disable (void)
322 DEBUG ("detach thread observer");
324 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
327 /* The record-btrace async event handler function. */
329 static void
330 record_btrace_handle_async_inferior_event (gdb_client_data data)
332 inferior_event_handler (INF_REG_EVENT);
335 /* See record-btrace.h. */
337 void
338 record_btrace_push_target (void)
340 const char *format;
342 record_btrace_auto_enable ();
344 current_inferior ()->push_target (&record_btrace_ops);
346 record_btrace_async_inferior_event_handler
347 = create_async_event_handler (record_btrace_handle_async_inferior_event,
348 NULL, "record-btrace");
349 record_btrace_generating_corefile = 0;
351 format = btrace_format_short_string (record_btrace_conf.format);
352 interps_notify_record_changed (current_inferior (), 1, "btrace", format);
355 /* Disable btrace on a set of threads on scope exit. */
357 struct scoped_btrace_disable
359 scoped_btrace_disable () = default;
361 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
363 ~scoped_btrace_disable ()
365 for (thread_info *tp : m_threads)
366 btrace_disable (tp);
369 void add_thread (thread_info *thread)
371 m_threads.push_front (thread);
374 void discard ()
376 m_threads.clear ();
379 private:
380 std::forward_list<thread_info *> m_threads;
383 /* Open target record-btrace. */
385 static void
386 record_btrace_target_open (const char *args, int from_tty)
388 /* If we fail to enable btrace for one thread, disable it for the threads for
389 which it was successfully enabled. */
390 scoped_btrace_disable btrace_disable;
392 DEBUG ("open");
394 record_preopen ();
396 if (!target_has_execution ())
397 error (_("The program is not being run."));
399 for (thread_info *tp : current_inferior ()->non_exited_threads ())
400 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
402 btrace_enable (tp, &record_btrace_conf);
404 btrace_disable.add_thread (tp);
407 record_btrace_push_target ();
409 btrace_disable.discard ();
412 /* The stop_recording method of target record-btrace. */
414 void
415 record_btrace_target::stop_recording ()
417 DEBUG ("stop recording");
419 record_btrace_auto_disable ();
421 for (thread_info *tp : current_inferior ()->non_exited_threads ())
422 if (tp->btrace.target != NULL)
423 btrace_disable (tp);
426 /* The disconnect method of target record-btrace. */
428 void
429 record_btrace_target::disconnect (const char *args,
430 int from_tty)
432 struct target_ops *beneath = this->beneath ();
434 /* Do not stop recording, just clean up GDB side. */
435 current_inferior ()->unpush_target (this);
437 /* Forward disconnect. */
438 beneath->disconnect (args, from_tty);
441 /* The close method of target record-btrace. */
443 void
444 record_btrace_target::close ()
446 if (record_btrace_async_inferior_event_handler != NULL)
447 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
449 /* Make sure automatic recording gets disabled even if we did not stop
450 recording before closing the record-btrace target. */
451 record_btrace_auto_disable ();
453 /* We should have already stopped recording.
454 Tear down btrace in case we have not. */
455 for (thread_info *tp : current_inferior ()->non_exited_threads ())
456 btrace_teardown (tp);
459 /* The async method of target record-btrace. */
461 void
462 record_btrace_target::async (bool enable)
464 if (enable)
465 mark_async_event_handler (record_btrace_async_inferior_event_handler);
466 else
467 clear_async_event_handler (record_btrace_async_inferior_event_handler);
469 this->beneath ()->async (enable);
472 /* Adjusts the size and returns a human readable size suffix. */
474 static const char *
475 record_btrace_adjust_size (unsigned int *size)
477 unsigned int sz;
479 sz = *size;
481 if ((sz & ((1u << 30) - 1)) == 0)
483 *size = sz >> 30;
484 return "GB";
486 else if ((sz & ((1u << 20) - 1)) == 0)
488 *size = sz >> 20;
489 return "MB";
491 else if ((sz & ((1u << 10) - 1)) == 0)
493 *size = sz >> 10;
494 return "kB";
496 else
497 return "";
500 /* Print a BTS configuration. */
502 static void
503 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
505 const char *suffix;
506 unsigned int size;
508 size = conf->size;
509 if (size > 0)
511 suffix = record_btrace_adjust_size (&size);
512 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
516 /* Print an Intel Processor Trace configuration. */
518 static void
519 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
521 const char *suffix;
522 unsigned int size;
524 size = conf->size;
525 if (size > 0)
527 suffix = record_btrace_adjust_size (&size);
528 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
532 /* Print a branch tracing configuration. */
534 static void
535 record_btrace_print_conf (const struct btrace_config *conf)
537 gdb_printf (_("Recording format: %s.\n"),
538 btrace_format_string (conf->format));
540 switch (conf->format)
542 case BTRACE_FORMAT_NONE:
543 return;
545 case BTRACE_FORMAT_BTS:
546 record_btrace_print_bts_conf (&conf->bts);
547 return;
549 case BTRACE_FORMAT_PT:
550 record_btrace_print_pt_conf (&conf->pt);
551 return;
554 internal_error (_("Unknown branch trace format."));
557 /* The info_record method of target record-btrace. */
559 void
560 record_btrace_target::info_record ()
562 struct btrace_thread_info *btinfo;
563 const struct btrace_config *conf;
564 struct thread_info *tp;
565 unsigned int insns, calls, gaps;
567 DEBUG ("info");
569 if (inferior_ptid == null_ptid)
570 error (_("No thread."));
572 tp = inferior_thread ();
574 validate_registers_access ();
576 btinfo = &tp->btrace;
578 conf = ::btrace_conf (btinfo);
579 if (conf != NULL)
580 record_btrace_print_conf (conf);
582 btrace_fetch (tp, record_btrace_get_cpu ());
584 insns = 0;
585 calls = 0;
586 gaps = 0;
588 if (!btrace_is_empty (tp))
590 struct btrace_call_iterator call;
591 struct btrace_insn_iterator insn;
593 btrace_call_end (&call, btinfo);
594 btrace_call_prev (&call, 1);
595 calls = btrace_call_number (&call);
597 btrace_insn_end (&insn, btinfo);
598 insns = btrace_insn_number (&insn);
600 /* If the last instruction is not a gap, it is the current instruction
601 that is not actually part of the record. */
602 if (btrace_insn_get (&insn) != NULL)
603 insns -= 1;
605 gaps = btinfo->ngaps;
608 gdb_printf (_("Recorded %u instructions in %u functions (%u gaps) "
609 "for thread %s (%s).\n"), insns, calls, gaps,
610 print_thread_id (tp),
611 target_pid_to_str (tp->ptid).c_str ());
613 if (btrace_is_replaying (tp))
614 gdb_printf (_("Replay in progress. At instruction %u.\n"),
615 btrace_insn_number (btinfo->replay));
618 /* Print a decode error. */
620 static void
621 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
622 enum btrace_format format)
624 const char *errstr = btrace_decode_error (format, errcode);
626 uiout->text (_("["));
627 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
628 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
630 uiout->text (_("decode error ("));
631 uiout->field_signed ("errcode", errcode);
632 uiout->text (_("): "));
634 uiout->text (errstr);
635 uiout->text (_("]\n"));
638 /* A range of source lines. */
640 struct btrace_line_range
642 /* The symtab this line is from. */
643 struct symtab *symtab;
645 /* The first line (inclusive). */
646 int begin;
648 /* The last line (exclusive). */
649 int end;
652 /* Construct a line range. */
654 static struct btrace_line_range
655 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
657 struct btrace_line_range range;
659 range.symtab = symtab;
660 range.begin = begin;
661 range.end = end;
663 return range;
666 /* Add a line to a line range. */
668 static struct btrace_line_range
669 btrace_line_range_add (struct btrace_line_range range, int line)
671 if (range.end <= range.begin)
673 /* This is the first entry. */
674 range.begin = line;
675 range.end = line + 1;
677 else if (line < range.begin)
678 range.begin = line;
679 else if (range.end < line)
680 range.end = line;
682 return range;
685 /* Return non-zero if RANGE is empty, zero otherwise. */
687 static int
688 btrace_line_range_is_empty (struct btrace_line_range range)
690 return range.end <= range.begin;
693 /* Return non-zero if LHS contains RHS, zero otherwise. */
695 static int
696 btrace_line_range_contains_range (struct btrace_line_range lhs,
697 struct btrace_line_range rhs)
699 return ((lhs.symtab == rhs.symtab)
700 && (lhs.begin <= rhs.begin)
701 && (rhs.end <= lhs.end));
704 /* Find the line range associated with PC. */
706 static struct btrace_line_range
707 btrace_find_line_range (CORE_ADDR pc)
709 struct btrace_line_range range;
710 const linetable_entry *lines;
711 const linetable *ltable;
712 struct symtab *symtab;
713 int nlines, i;
715 symtab = find_pc_line_symtab (pc);
716 if (symtab == NULL)
717 return btrace_mk_line_range (NULL, 0, 0);
719 ltable = symtab->linetable ();
720 if (ltable == NULL)
721 return btrace_mk_line_range (symtab, 0, 0);
723 nlines = ltable->nitems;
724 lines = ltable->item;
725 if (nlines <= 0)
726 return btrace_mk_line_range (symtab, 0, 0);
728 struct objfile *objfile = symtab->compunit ()->objfile ();
729 unrelocated_addr unrel_pc
730 = unrelocated_addr (pc - objfile->text_section_offset ());
732 range = btrace_mk_line_range (symtab, 0, 0);
733 for (i = 0; i < nlines - 1; i++)
735 /* The test of is_stmt here was added when the is_stmt field was
736 introduced to the 'struct linetable_entry' structure. This
737 ensured that this loop maintained the same behavior as before we
738 introduced is_stmt. That said, it might be that we would be
739 better off not checking is_stmt here, this would lead to us
740 possibly adding more line numbers to the range. At the time this
741 change was made I was unsure how to test this so chose to go with
742 maintaining the existing experience. */
743 if (lines[i].unrelocated_pc () == unrel_pc && lines[i].line != 0
744 && lines[i].is_stmt)
745 range = btrace_line_range_add (range, lines[i].line);
748 return range;
751 /* Print source lines in LINES to UIOUT.
753 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
754 instructions corresponding to that source line. When printing a new source
755 line, we do the cleanups for the open chain and open a new cleanup chain for
756 the new source line. If the source line range in LINES is not empty, this
757 function will leave the cleanup chain for the last printed source line open
758 so instructions can be added to it. */
760 static void
761 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
762 std::optional<ui_out_emit_tuple> *src_and_asm_tuple,
763 std::optional<ui_out_emit_list> *asm_list,
764 gdb_disassembly_flags flags)
766 print_source_lines_flags psl_flags;
768 if (flags & DISASSEMBLY_FILENAME)
769 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
771 for (int line = lines.begin; line < lines.end; ++line)
773 asm_list->reset ();
775 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
777 print_source_lines (lines.symtab, line, line + 1, psl_flags);
779 asm_list->emplace (uiout, "line_asm_insn");
783 /* Disassemble a section of the recorded instruction trace. */
785 static void
786 btrace_insn_history (struct ui_out *uiout,
787 const struct btrace_thread_info *btinfo,
788 const struct btrace_insn_iterator *begin,
789 const struct btrace_insn_iterator *end,
790 gdb_disassembly_flags flags)
792 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
793 btrace_insn_number (begin), btrace_insn_number (end));
795 flags |= DISASSEMBLY_SPECULATIVE;
797 gdbarch *gdbarch = current_inferior ()->arch ();
798 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
800 ui_out_emit_list list_emitter (uiout, "asm_insns");
802 std::optional<ui_out_emit_tuple> src_and_asm_tuple;
803 std::optional<ui_out_emit_list> asm_list;
805 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
807 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
808 btrace_insn_next (&it, 1))
810 const struct btrace_insn *insn;
812 insn = btrace_insn_get (&it);
814 /* A NULL instruction indicates a gap in the trace. */
815 if (insn == NULL)
817 const struct btrace_config *conf;
819 conf = btrace_conf (btinfo);
821 /* We have trace so we must have a configuration. */
822 gdb_assert (conf != NULL);
824 uiout->field_fmt ("insn-number", "%u",
825 btrace_insn_number (&it));
826 uiout->text ("\t");
828 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
829 conf->format);
831 else if (insn->iclass == BTRACE_INSN_AUX)
833 if ((flags & DISASSEMBLY_OMIT_AUX_INSN) != 0)
834 continue;
836 uiout->field_fmt ("insn-number", "%u", btrace_insn_number (&it));
837 uiout->text ("\t");
838 /* Add 3 spaces to match the instructions and 2 to indent the aux
839 string to make it more visible. */
840 uiout->spaces (5);
841 uiout->text ("[");
842 uiout->field_fmt ("aux-data", "%s",
843 it.btinfo->aux_data.at
844 (insn->aux_data_index).c_str ());
845 uiout->text ("]\n");
847 else
849 struct disasm_insn dinsn;
851 if ((flags & DISASSEMBLY_SOURCE) != 0)
853 struct btrace_line_range lines;
855 lines = btrace_find_line_range (insn->pc);
856 if (!btrace_line_range_is_empty (lines)
857 && !btrace_line_range_contains_range (last_lines, lines))
859 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
860 flags);
861 last_lines = lines;
863 else if (!src_and_asm_tuple.has_value ())
865 gdb_assert (!asm_list.has_value ());
867 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
869 /* No source information. */
870 asm_list.emplace (uiout, "line_asm_insn");
873 gdb_assert (src_and_asm_tuple.has_value ());
874 gdb_assert (asm_list.has_value ());
877 memset (&dinsn, 0, sizeof (dinsn));
878 dinsn.number = btrace_insn_number (&it);
879 dinsn.addr = insn->pc;
881 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
882 dinsn.is_speculative = 1;
884 disasm.pretty_print_insn (&dinsn, flags);
889 /* The insn_history method of target record-btrace. */
891 void
892 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
894 struct btrace_thread_info *btinfo;
895 struct btrace_insn_history *history;
896 struct btrace_insn_iterator begin, end;
897 struct ui_out *uiout;
898 unsigned int context, covered;
900 uiout = current_uiout;
901 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
902 context = abs (size);
903 if (context == 0)
904 error (_("Bad record instruction-history-size."));
906 btinfo = require_btrace ();
907 history = btinfo->insn_history;
908 if (history == NULL)
910 struct btrace_insn_iterator *replay;
912 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
914 /* If we're replaying, we start at the replay position. Otherwise, we
915 start at the tail of the trace. */
916 replay = btinfo->replay;
917 if (replay != NULL)
918 begin = *replay;
919 else
920 btrace_insn_end (&begin, btinfo);
922 /* We start from here and expand in the requested direction. Then we
923 expand in the other direction, as well, to fill up any remaining
924 context. */
925 end = begin;
926 if (size < 0)
928 /* We want the current position covered, as well. */
929 covered = btrace_insn_next (&end, 1);
930 covered += btrace_insn_prev (&begin, context - covered);
931 covered += btrace_insn_next (&end, context - covered);
933 else
935 covered = btrace_insn_next (&end, context);
936 covered += btrace_insn_prev (&begin, context - covered);
939 else
941 begin = history->begin;
942 end = history->end;
944 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
945 btrace_insn_number (&begin), btrace_insn_number (&end));
947 if (size < 0)
949 end = begin;
950 covered = btrace_insn_prev (&begin, context);
952 else
954 begin = end;
955 covered = btrace_insn_next (&end, context);
959 if (covered > 0)
960 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
961 else
963 if (size < 0)
964 gdb_printf (_("At the start of the branch trace record.\n"));
965 else
966 gdb_printf (_("At the end of the branch trace record.\n"));
969 btrace_set_insn_history (btinfo, &begin, &end);
972 /* The insn_history_range method of target record-btrace. */
974 void
975 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
976 gdb_disassembly_flags flags)
978 struct btrace_thread_info *btinfo;
979 struct btrace_insn_iterator begin, end;
980 struct ui_out *uiout;
981 unsigned int low, high;
982 int found;
984 uiout = current_uiout;
985 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
986 low = from;
987 high = to;
989 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
991 /* Check for wrap-arounds. */
992 if (low != from || high != to)
993 error (_("Bad range."));
995 if (high < low)
996 error (_("Bad range."));
998 btinfo = require_btrace ();
1000 found = btrace_find_insn_by_number (&begin, btinfo, low);
1001 if (found == 0)
1002 error (_("Range out of bounds."));
1004 found = btrace_find_insn_by_number (&end, btinfo, high);
1005 if (found == 0)
1007 /* Silently truncate the range. */
1008 btrace_insn_end (&end, btinfo);
1010 else
1012 /* We want both begin and end to be inclusive. */
1013 btrace_insn_next (&end, 1);
1016 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
1017 btrace_set_insn_history (btinfo, &begin, &end);
1020 /* The insn_history_from method of target record-btrace. */
1022 void
1023 record_btrace_target::insn_history_from (ULONGEST from, int size,
1024 gdb_disassembly_flags flags)
1026 ULONGEST begin, end, context;
1028 context = abs (size);
1029 if (context == 0)
1030 error (_("Bad record instruction-history-size."));
1032 if (size < 0)
1034 end = from;
1036 if (from < context)
1037 begin = 0;
1038 else
1039 begin = from - context + 1;
1041 else
1043 begin = from;
1044 end = from + context - 1;
1046 /* Check for wrap-around. */
1047 if (end < begin)
1048 end = ULONGEST_MAX;
1051 insn_history_range (begin, end, flags);
1054 /* Print the instruction number range for a function call history line. */
1056 static void
1057 btrace_call_history_insn_range (struct ui_out *uiout,
1058 const struct btrace_function *bfun)
1060 unsigned int begin, end, size;
1062 size = bfun->insn.size ();
1063 gdb_assert (size > 0);
1065 begin = bfun->insn_offset;
1066 end = begin + size - 1;
1068 uiout->field_unsigned ("insn begin", begin);
1069 uiout->text (",");
1070 uiout->field_unsigned ("insn end", end);
1073 /* Compute the lowest and highest source line for the instructions in BFUN
1074 and return them in PBEGIN and PEND.
1075 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1076 result from inlining or macro expansion. */
1078 static void
1079 btrace_compute_src_line_range (const struct btrace_function *bfun,
1080 int *pbegin, int *pend)
1082 struct symtab *symtab;
1083 struct symbol *sym;
1084 int begin, end;
1086 begin = INT_MAX;
1087 end = INT_MIN;
1089 sym = bfun->sym;
1090 if (sym == NULL)
1091 goto out;
1093 symtab = sym->symtab ();
1095 for (const btrace_insn &insn : bfun->insn)
1097 struct symtab_and_line sal;
1099 sal = find_pc_line (insn.pc, 0);
1100 if (sal.symtab != symtab || sal.line == 0)
1101 continue;
1103 begin = std::min (begin, sal.line);
1104 end = std::max (end, sal.line);
1107 out:
1108 *pbegin = begin;
1109 *pend = end;
1112 /* Print the source line information for a function call history line. */
1114 static void
1115 btrace_call_history_src_line (struct ui_out *uiout,
1116 const struct btrace_function *bfun)
1118 struct symbol *sym;
1119 int begin, end;
1121 sym = bfun->sym;
1122 if (sym == NULL)
1123 return;
1125 uiout->field_string ("file",
1126 symtab_to_filename_for_display (sym->symtab ()),
1127 file_name_style.style ());
1129 btrace_compute_src_line_range (bfun, &begin, &end);
1130 if (end < begin)
1131 return;
1133 uiout->text (":");
1134 uiout->field_signed ("min line", begin);
1136 if (end == begin)
1137 return;
1139 uiout->text (",");
1140 uiout->field_signed ("max line", end);
1143 /* Get the name of a branch trace function. */
1145 static const char *
1146 btrace_get_bfun_name (const struct btrace_function *bfun)
1148 struct minimal_symbol *msym;
1149 struct symbol *sym;
1151 if (bfun == NULL)
1152 return "??";
1154 msym = bfun->msym;
1155 sym = bfun->sym;
1157 if (sym != NULL)
1158 return sym->print_name ();
1159 else if (msym != NULL)
1160 return msym->print_name ();
1161 else
1162 return "??";
1165 static void
1166 btrace_print_aux_insn (struct ui_out *uiout,
1167 const struct btrace_function *bfun,
1168 const struct btrace_thread_info *btinfo,
1169 int level)
1171 for (const btrace_insn &insn : bfun->insn)
1173 if (insn.iclass == BTRACE_INSN_AUX)
1175 /* Indent to the function level. */
1176 uiout->text ("\t");
1177 /* Adjust for RECORD_PRINT_INDENT_CALLS and indent one
1178 additional level. */
1179 for (int i = 0; i <= level; ++i)
1180 uiout->text (" ");
1182 uiout->text ("[");
1183 uiout->field_fmt ("aux-data", "%s",
1184 btinfo->aux_data.at (insn.aux_data_index).c_str ());
1185 uiout->text ("]\n");
1190 /* Disassemble a section of the recorded function trace. */
1192 static void
1193 btrace_call_history (struct ui_out *uiout,
1194 const struct btrace_thread_info *btinfo,
1195 const struct btrace_call_iterator *begin,
1196 const struct btrace_call_iterator *end,
1197 int int_flags)
1199 struct btrace_call_iterator it;
1200 record_print_flags flags = (enum record_print_flag) int_flags;
1202 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1203 btrace_call_number (end));
1205 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1207 const struct btrace_function *bfun;
1208 struct minimal_symbol *msym;
1209 struct symbol *sym;
1210 int level = 0;
1212 bfun = btrace_call_get (&it);
1213 sym = bfun->sym;
1214 msym = bfun->msym;
1216 /* Print the function index. */
1217 uiout->field_unsigned ("index", bfun->number);
1218 uiout->text ("\t");
1220 /* Indicate gaps in the trace. */
1221 if (bfun->errcode != 0)
1223 const struct btrace_config *conf;
1225 conf = btrace_conf (btinfo);
1227 /* We have trace so we must have a configuration. */
1228 gdb_assert (conf != NULL);
1230 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1232 continue;
1235 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1237 level = bfun->level + btinfo->level;
1239 for (int i = 0; i < level; ++i)
1240 uiout->text (" ");
1243 if (sym != NULL)
1244 uiout->field_string ("function", sym->print_name (),
1245 function_name_style.style ());
1246 else if (msym != NULL)
1247 uiout->field_string ("function", msym->print_name (),
1248 function_name_style.style ());
1249 else if (!uiout->is_mi_like_p ())
1250 uiout->field_string ("function", "??",
1251 function_name_style.style ());
1253 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1255 uiout->text (_("\tinst "));
1256 btrace_call_history_insn_range (uiout, bfun);
1259 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1261 uiout->text (_("\tat "));
1262 btrace_call_history_src_line (uiout, bfun);
1265 uiout->text ("\n");
1267 if (((flags & RECORD_DONT_PRINT_AUX) == 0)
1268 && ((bfun->flags & BFUN_CONTAINS_AUX) != 0))
1269 btrace_print_aux_insn (uiout, bfun, btinfo, level);
1273 /* The call_history method of target record-btrace. */
1275 void
1276 record_btrace_target::call_history (int size, record_print_flags flags)
1278 struct btrace_thread_info *btinfo;
1279 struct btrace_call_history *history;
1280 struct btrace_call_iterator begin, end;
1281 struct ui_out *uiout;
1282 unsigned int context, covered;
1284 uiout = current_uiout;
1285 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1286 context = abs (size);
1287 if (context == 0)
1288 error (_("Bad record function-call-history-size."));
1290 btinfo = require_btrace ();
1291 history = btinfo->call_history;
1292 if (history == NULL)
1294 struct btrace_insn_iterator *replay;
1296 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1298 /* If we're replaying, we start at the replay position. Otherwise, we
1299 start at the tail of the trace. */
1300 replay = btinfo->replay;
1301 if (replay != NULL)
1303 begin.btinfo = btinfo;
1304 begin.index = replay->call_index;
1306 else
1307 btrace_call_end (&begin, btinfo);
1309 /* We start from here and expand in the requested direction. Then we
1310 expand in the other direction, as well, to fill up any remaining
1311 context. */
1312 end = begin;
1313 if (size < 0)
1315 /* We want the current position covered, as well. */
1316 covered = btrace_call_next (&end, 1);
1317 covered += btrace_call_prev (&begin, context - covered);
1318 covered += btrace_call_next (&end, context - covered);
1320 else
1322 covered = btrace_call_next (&end, context);
1323 covered += btrace_call_prev (&begin, context- covered);
1326 else
1328 begin = history->begin;
1329 end = history->end;
1331 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1332 btrace_call_number (&begin), btrace_call_number (&end));
1334 if (size < 0)
1336 end = begin;
1337 covered = btrace_call_prev (&begin, context);
1339 else
1341 begin = end;
1342 covered = btrace_call_next (&end, context);
1346 if (covered > 0)
1347 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1348 else
1350 if (size < 0)
1351 gdb_printf (_("At the start of the branch trace record.\n"));
1352 else
1353 gdb_printf (_("At the end of the branch trace record.\n"));
1356 btrace_set_call_history (btinfo, &begin, &end);
1359 /* The call_history_range method of target record-btrace. */
1361 void
1362 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1363 record_print_flags flags)
1365 struct btrace_thread_info *btinfo;
1366 struct btrace_call_iterator begin, end;
1367 struct ui_out *uiout;
1368 unsigned int low, high;
1369 int found;
1371 uiout = current_uiout;
1372 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1373 low = from;
1374 high = to;
1376 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1378 /* Check for wrap-arounds. */
1379 if (low != from || high != to)
1380 error (_("Bad range."));
1382 if (high < low)
1383 error (_("Bad range."));
1385 btinfo = require_btrace ();
1387 found = btrace_find_call_by_number (&begin, btinfo, low);
1388 if (found == 0)
1389 error (_("Range out of bounds."));
1391 found = btrace_find_call_by_number (&end, btinfo, high);
1392 if (found == 0)
1394 /* Silently truncate the range. */
1395 btrace_call_end (&end, btinfo);
1397 else
1399 /* We want both begin and end to be inclusive. */
1400 btrace_call_next (&end, 1);
1403 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1404 btrace_set_call_history (btinfo, &begin, &end);
1407 /* The call_history_from method of target record-btrace. */
1409 void
1410 record_btrace_target::call_history_from (ULONGEST from, int size,
1411 record_print_flags flags)
1413 ULONGEST begin, end, context;
1415 context = abs (size);
1416 if (context == 0)
1417 error (_("Bad record function-call-history-size."));
1419 if (size < 0)
1421 end = from;
1423 if (from < context)
1424 begin = 0;
1425 else
1426 begin = from - context + 1;
1428 else
1430 begin = from;
1431 end = from + context - 1;
1433 /* Check for wrap-around. */
1434 if (end < begin)
1435 end = ULONGEST_MAX;
1438 call_history_range ( begin, end, flags);
1441 /* The record_method method of target record-btrace. */
1443 enum record_method
1444 record_btrace_target::record_method (ptid_t ptid)
1446 process_stratum_target *proc_target = current_inferior ()->process_target ();
1447 thread_info *const tp = proc_target->find_thread (ptid);
1449 if (tp == NULL)
1450 error (_("No thread."));
1452 if (tp->btrace.target == NULL)
1453 return RECORD_METHOD_NONE;
1455 return RECORD_METHOD_BTRACE;
1458 /* The record_is_replaying method of target record-btrace. */
1460 bool
1461 record_btrace_target::record_is_replaying (ptid_t ptid)
1463 process_stratum_target *proc_target = current_inferior ()->process_target ();
1464 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
1465 if (btrace_is_replaying (tp))
1466 return true;
1468 return false;
1471 /* The record_will_replay method of target record-btrace. */
1473 bool
1474 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1476 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1479 /* The xfer_partial method of target record-btrace. */
1481 enum target_xfer_status
1482 record_btrace_target::xfer_partial (enum target_object object,
1483 const char *annex, gdb_byte *readbuf,
1484 const gdb_byte *writebuf, ULONGEST offset,
1485 ULONGEST len, ULONGEST *xfered_len)
1487 /* Filter out requests that don't make sense during replay. */
1488 if (replay_memory_access == replay_memory_access_read_only
1489 && !record_btrace_generating_corefile
1490 && record_is_replaying (inferior_ptid))
1492 switch (object)
1494 case TARGET_OBJECT_MEMORY:
1496 const struct target_section *section;
1498 /* We do not allow writing memory in general. */
1499 if (writebuf != NULL)
1501 *xfered_len = len;
1502 return TARGET_XFER_UNAVAILABLE;
1505 /* We allow reading readonly memory. */
1506 section = target_section_by_addr (this, offset);
1507 if (section != NULL)
1509 /* Check if the section we found is readonly. */
1510 if ((bfd_section_flags (section->the_bfd_section)
1511 & SEC_READONLY) != 0)
1513 /* Truncate the request to fit into this section. */
1514 len = std::min (len, section->endaddr - offset);
1515 break;
1519 *xfered_len = len;
1520 return TARGET_XFER_UNAVAILABLE;
1525 /* Forward the request. */
1526 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1527 offset, len, xfered_len);
1530 /* The insert_breakpoint method of target record-btrace. */
1533 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1534 struct bp_target_info *bp_tgt)
1536 const char *old;
1537 int ret;
1539 /* Inserting breakpoints requires accessing memory. Allow it for the
1540 duration of this function. */
1541 old = replay_memory_access;
1542 replay_memory_access = replay_memory_access_read_write;
1544 ret = 0;
1547 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1549 catch (const gdb_exception &except)
1551 replay_memory_access = old;
1552 throw;
1554 replay_memory_access = old;
1556 return ret;
1559 /* The remove_breakpoint method of target record-btrace. */
1562 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1563 struct bp_target_info *bp_tgt,
1564 enum remove_bp_reason reason)
1566 const char *old;
1567 int ret;
1569 /* Removing breakpoints requires accessing memory. Allow it for the
1570 duration of this function. */
1571 old = replay_memory_access;
1572 replay_memory_access = replay_memory_access_read_write;
1574 ret = 0;
1577 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1579 catch (const gdb_exception &except)
1581 replay_memory_access = old;
1582 throw;
1584 replay_memory_access = old;
1586 return ret;
1589 /* The fetch_registers method of target record-btrace. */
1591 void
1592 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1594 btrace_insn_iterator *replay = nullptr;
1596 /* Thread-db may ask for a thread's registers before GDB knows about the
1597 thread. We forward the request to the target beneath in this
1598 case. */
1599 thread_info *tp
1600 = current_inferior ()->process_target ()->find_thread (regcache->ptid ());
1601 if (tp != nullptr)
1602 replay = tp->btrace.replay;
1604 if (replay != nullptr && !record_btrace_generating_corefile)
1606 const struct btrace_insn *insn;
1607 struct gdbarch *gdbarch;
1608 int pcreg;
1610 gdbarch = regcache->arch ();
1611 pcreg = gdbarch_pc_regnum (gdbarch);
1612 if (pcreg < 0)
1613 return;
1615 /* We can only provide the PC register. */
1616 if (regno >= 0 && regno != pcreg)
1617 return;
1619 insn = btrace_insn_get (replay);
1620 gdb_assert (insn != NULL);
1622 regcache->raw_supply (regno, &insn->pc);
1624 else
1625 this->beneath ()->fetch_registers (regcache, regno);
1628 /* The store_registers method of target record-btrace. */
1630 void
1631 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1633 if (!record_btrace_generating_corefile
1634 && record_is_replaying (regcache->ptid ()))
1635 error (_("Cannot write registers while replaying."));
1637 gdb_assert (may_write_registers);
1639 this->beneath ()->store_registers (regcache, regno);
1642 /* The prepare_to_store method of target record-btrace. */
1644 void
1645 record_btrace_target::prepare_to_store (struct regcache *regcache)
1647 if (!record_btrace_generating_corefile
1648 && record_is_replaying (regcache->ptid ()))
1649 return;
1651 this->beneath ()->prepare_to_store (regcache);
1654 /* The branch trace frame cache. */
1656 struct btrace_frame_cache
1658 /* The thread. */
1659 struct thread_info *tp;
1661 /* The frame info. */
1662 frame_info *frame;
1664 /* The branch trace function segment. */
1665 const struct btrace_function *bfun;
1668 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1670 static htab_t bfcache;
1672 /* hash_f for htab_create_alloc of bfcache. */
1674 static hashval_t
1675 bfcache_hash (const void *arg)
1677 const struct btrace_frame_cache *cache
1678 = (const struct btrace_frame_cache *) arg;
1680 return htab_hash_pointer (cache->frame);
1683 /* eq_f for htab_create_alloc of bfcache. */
1685 static int
1686 bfcache_eq (const void *arg1, const void *arg2)
1688 const struct btrace_frame_cache *cache1
1689 = (const struct btrace_frame_cache *) arg1;
1690 const struct btrace_frame_cache *cache2
1691 = (const struct btrace_frame_cache *) arg2;
1693 return cache1->frame == cache2->frame;
1696 /* Create a new btrace frame cache. */
1698 static struct btrace_frame_cache *
1699 bfcache_new (const frame_info_ptr &frame)
1701 struct btrace_frame_cache *cache;
1702 void **slot;
1704 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1705 cache->frame = frame.get ();
1707 slot = htab_find_slot (bfcache, cache, INSERT);
1708 gdb_assert (*slot == NULL);
1709 *slot = cache;
1711 return cache;
1714 /* Extract the branch trace function from a branch trace frame. */
1716 static const struct btrace_function *
1717 btrace_get_frame_function (const frame_info_ptr &frame)
1719 const struct btrace_frame_cache *cache;
1720 struct btrace_frame_cache pattern;
1721 void **slot;
1723 pattern.frame = frame.get ();
1725 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1726 if (slot == NULL)
1727 return NULL;
1729 cache = (const struct btrace_frame_cache *) *slot;
1730 return cache->bfun;
1733 /* Implement stop_reason method for record_btrace_frame_unwind. */
1735 static enum unwind_stop_reason
1736 record_btrace_frame_unwind_stop_reason (const frame_info_ptr &this_frame,
1737 void **this_cache)
1739 const struct btrace_frame_cache *cache;
1740 const struct btrace_function *bfun;
1742 cache = (const struct btrace_frame_cache *) *this_cache;
1743 bfun = cache->bfun;
1744 gdb_assert (bfun != NULL);
1746 if (bfun->up == 0)
1747 return UNWIND_UNAVAILABLE;
1749 return UNWIND_NO_REASON;
1752 /* Implement this_id method for record_btrace_frame_unwind. */
1754 static void
1755 record_btrace_frame_this_id (const frame_info_ptr &this_frame, void **this_cache,
1756 struct frame_id *this_id)
1758 const struct btrace_frame_cache *cache;
1759 const struct btrace_function *bfun;
1760 struct btrace_call_iterator it;
1761 CORE_ADDR code, special;
1763 cache = (const struct btrace_frame_cache *) *this_cache;
1765 bfun = cache->bfun;
1766 gdb_assert (bfun != NULL);
1768 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1769 bfun = btrace_call_get (&it);
1771 code = get_frame_func (this_frame);
1772 special = bfun->number;
1774 *this_id = frame_id_build_unavailable_stack_special (code, special);
1776 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1777 btrace_get_bfun_name (cache->bfun),
1778 core_addr_to_string_nz (this_id->code_addr),
1779 core_addr_to_string_nz (this_id->special_addr));
1782 /* Implement prev_register method for record_btrace_frame_unwind. */
1784 static struct value *
1785 record_btrace_frame_prev_register (const frame_info_ptr &this_frame,
1786 void **this_cache,
1787 int regnum)
1789 const struct btrace_frame_cache *cache;
1790 const struct btrace_function *bfun, *caller;
1791 struct btrace_call_iterator it;
1792 struct gdbarch *gdbarch;
1793 CORE_ADDR pc;
1794 int pcreg;
1796 gdbarch = get_frame_arch (this_frame);
1797 pcreg = gdbarch_pc_regnum (gdbarch);
1798 if (pcreg < 0 || regnum != pcreg)
1799 throw_error (NOT_AVAILABLE_ERROR,
1800 _("Registers are not available in btrace record history"));
1802 cache = (const struct btrace_frame_cache *) *this_cache;
1803 bfun = cache->bfun;
1804 gdb_assert (bfun != NULL);
1806 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1807 throw_error (NOT_AVAILABLE_ERROR,
1808 _("No caller in btrace record history"));
1810 caller = btrace_call_get (&it);
1812 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1813 pc = caller->insn.front ().pc;
1814 else
1816 pc = caller->insn.back ().pc;
1817 pc += gdb_insn_length (gdbarch, pc);
1820 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1821 btrace_get_bfun_name (bfun), bfun->level,
1822 core_addr_to_string_nz (pc));
1824 return frame_unwind_got_address (this_frame, regnum, pc);
1827 /* Implement sniffer method for record_btrace_frame_unwind. */
1829 static int
1830 record_btrace_frame_sniffer (const struct frame_unwind *self,
1831 const frame_info_ptr &this_frame,
1832 void **this_cache)
1834 const struct btrace_function *bfun;
1835 struct btrace_frame_cache *cache;
1836 struct thread_info *tp;
1837 frame_info_ptr next;
1839 /* THIS_FRAME does not contain a reference to its thread. */
1840 tp = inferior_thread ();
1842 bfun = NULL;
1843 next = get_next_frame (this_frame);
1844 if (next == NULL)
1846 const struct btrace_insn_iterator *replay;
1848 replay = tp->btrace.replay;
1849 if (replay != NULL)
1850 bfun = &replay->btinfo->functions[replay->call_index];
1852 else
1854 const struct btrace_function *callee;
1855 struct btrace_call_iterator it;
1857 callee = btrace_get_frame_function (next);
1858 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1859 return 0;
1861 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1862 return 0;
1864 bfun = btrace_call_get (&it);
1867 if (bfun == NULL)
1868 return 0;
1870 DEBUG ("[frame] sniffed frame for %s on level %d",
1871 btrace_get_bfun_name (bfun), bfun->level);
1873 /* This is our frame. Initialize the frame cache. */
1874 cache = bfcache_new (this_frame);
1875 cache->tp = tp;
1876 cache->bfun = bfun;
1878 *this_cache = cache;
1879 return 1;
1882 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1884 static int
1885 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1886 const frame_info_ptr &this_frame,
1887 void **this_cache)
1889 const struct btrace_function *bfun, *callee;
1890 struct btrace_frame_cache *cache;
1891 struct btrace_call_iterator it;
1892 frame_info_ptr next;
1893 struct thread_info *tinfo;
1895 next = get_next_frame (this_frame);
1896 if (next == NULL)
1897 return 0;
1899 callee = btrace_get_frame_function (next);
1900 if (callee == NULL)
1901 return 0;
1903 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1904 return 0;
1906 tinfo = inferior_thread ();
1907 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1908 return 0;
1910 bfun = btrace_call_get (&it);
1912 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1913 btrace_get_bfun_name (bfun), bfun->level);
1915 /* This is our frame. Initialize the frame cache. */
1916 cache = bfcache_new (this_frame);
1917 cache->tp = tinfo;
1918 cache->bfun = bfun;
1920 *this_cache = cache;
1921 return 1;
1924 static void
1925 record_btrace_frame_dealloc_cache (frame_info *self, void *this_cache)
1927 struct btrace_frame_cache *cache;
1928 void **slot;
1930 cache = (struct btrace_frame_cache *) this_cache;
1932 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1933 gdb_assert (slot != NULL);
1935 htab_remove_elt (bfcache, cache);
1938 /* btrace recording does not store previous memory content, neither the stack
1939 frames content. Any unwinding would return erroneous results as the stack
1940 contents no longer matches the changed PC value restored from history.
1941 Therefore this unwinder reports any possibly unwound registers as
1942 <unavailable>. */
1944 const struct frame_unwind_legacy record_btrace_frame_unwind (
1945 "record-btrace",
1946 NORMAL_FRAME,
1947 FRAME_UNWIND_GDB,
1948 record_btrace_frame_unwind_stop_reason,
1949 record_btrace_frame_this_id,
1950 record_btrace_frame_prev_register,
1951 NULL,
1952 record_btrace_frame_sniffer,
1953 record_btrace_frame_dealloc_cache
1956 const struct frame_unwind_legacy record_btrace_tailcall_frame_unwind (
1957 "record-btrace tailcall",
1958 TAILCALL_FRAME,
1959 FRAME_UNWIND_GDB,
1960 record_btrace_frame_unwind_stop_reason,
1961 record_btrace_frame_this_id,
1962 record_btrace_frame_prev_register,
1963 NULL,
1964 record_btrace_tailcall_frame_sniffer,
1965 record_btrace_frame_dealloc_cache
1968 /* Implement the get_unwinder method. */
1970 const struct frame_unwind *
1971 record_btrace_target::get_unwinder ()
1973 return &record_btrace_frame_unwind;
1976 /* Implement the get_tailcall_unwinder method. */
1978 const struct frame_unwind *
1979 record_btrace_target::get_tailcall_unwinder ()
1981 return &record_btrace_tailcall_frame_unwind;
1984 /* Return a human-readable string for FLAG. */
1986 static const char *
1987 btrace_thread_flag_to_str (btrace_thread_flags flag)
1989 switch (flag)
1991 case BTHR_STEP:
1992 return "step";
1994 case BTHR_RSTEP:
1995 return "reverse-step";
1997 case BTHR_CONT:
1998 return "cont";
2000 case BTHR_RCONT:
2001 return "reverse-cont";
2003 case BTHR_STOP:
2004 return "stop";
2007 return "<invalid>";
2010 /* Indicate that TP should be resumed according to FLAG. */
2012 static void
2013 record_btrace_resume_thread (struct thread_info *tp,
2014 enum btrace_thread_flag flag)
2016 struct btrace_thread_info *btinfo;
2018 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
2019 tp->ptid.to_string ().c_str (), flag,
2020 btrace_thread_flag_to_str (flag));
2022 btinfo = &tp->btrace;
2024 /* Fetch the latest branch trace. */
2025 btrace_fetch (tp, record_btrace_get_cpu ());
2027 /* A resume request overwrites a preceding resume or stop request. */
2028 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2029 btinfo->flags |= flag;
2032 /* Get the current frame for TP. */
2034 static struct frame_id
2035 get_thread_current_frame_id (struct thread_info *tp)
2037 /* Set current thread, which is implicitly used by
2038 get_current_frame. */
2039 scoped_restore_current_thread restore_thread;
2041 switch_to_thread (tp);
2043 process_stratum_target *proc_target = tp->inf->process_target ();
2045 /* Clear the executing flag to allow changes to the current frame.
2046 We are not actually running, yet. We just started a reverse execution
2047 command or a record goto command.
2048 For the latter, EXECUTING is false and this has no effect.
2049 For the former, EXECUTING is true and we're in wait, about to
2050 move the thread. Since we need to recompute the stack, we temporarily
2051 set EXECUTING to false. */
2052 bool executing = tp->executing ();
2053 set_executing (proc_target, inferior_ptid, false);
2054 SCOPE_EXIT
2056 set_executing (proc_target, inferior_ptid, executing);
2058 return get_frame_id (get_current_frame ());
2061 /* Start replaying a thread. */
2063 static struct btrace_insn_iterator *
2064 record_btrace_start_replaying (struct thread_info *tp)
2066 struct btrace_insn_iterator *replay;
2067 struct btrace_thread_info *btinfo;
2069 btinfo = &tp->btrace;
2070 replay = NULL;
2072 /* We can't start replaying without trace. */
2073 if (btinfo->functions.empty ())
2074 error (_("No trace."));
2076 /* GDB stores the current frame_id when stepping in order to detects steps
2077 into subroutines.
2078 Since frames are computed differently when we're replaying, we need to
2079 recompute those stored frames and fix them up so we can still detect
2080 subroutines after we started replaying. */
2083 struct frame_id frame_id;
2084 int upd_step_frame_id, upd_step_stack_frame_id;
2086 /* The current frame without replaying - computed via normal unwind. */
2087 frame_id = get_thread_current_frame_id (tp);
2089 /* Check if we need to update any stepping-related frame id's. */
2090 upd_step_frame_id = (frame_id == tp->control.step_frame_id);
2091 upd_step_stack_frame_id = (frame_id == tp->control.step_stack_frame_id);
2093 /* We start replaying at the end of the branch trace. This corresponds
2094 to the current instruction. */
2095 replay = XNEW (struct btrace_insn_iterator);
2096 btrace_insn_end (replay, btinfo);
2098 /* Skip gaps at the end of the trace. */
2099 while (btrace_insn_get (replay) == NULL)
2101 unsigned int steps;
2103 steps = btrace_insn_prev (replay, 1);
2104 if (steps == 0)
2105 error (_("No trace."));
2108 /* We're not replaying, yet. */
2109 gdb_assert (btinfo->replay == NULL);
2110 btinfo->replay = replay;
2112 /* Make sure we're not using any stale registers. */
2113 registers_changed_thread (tp);
2115 /* The current frame with replaying - computed via btrace unwind. */
2116 frame_id = get_thread_current_frame_id (tp);
2118 /* Replace stepping related frames where necessary. */
2119 if (upd_step_frame_id)
2120 tp->control.step_frame_id = frame_id;
2121 if (upd_step_stack_frame_id)
2122 tp->control.step_stack_frame_id = frame_id;
2124 catch (const gdb_exception &except)
2126 xfree (btinfo->replay);
2127 btinfo->replay = NULL;
2129 registers_changed_thread (tp);
2131 throw;
2134 return replay;
2137 /* Stop replaying a thread. */
2139 static void
2140 record_btrace_stop_replaying (struct thread_info *tp)
2142 struct btrace_thread_info *btinfo;
2144 btinfo = &tp->btrace;
2146 xfree (btinfo->replay);
2147 btinfo->replay = NULL;
2149 /* Make sure we're not leaving any stale registers. */
2150 registers_changed_thread (tp);
2153 /* Stop replaying TP if it is at the end of its execution history. */
2155 static void
2156 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2158 struct btrace_insn_iterator *replay, end;
2159 struct btrace_thread_info *btinfo;
2161 btinfo = &tp->btrace;
2162 replay = btinfo->replay;
2164 if (replay == NULL)
2165 return;
2167 btrace_insn_end (&end, btinfo);
2169 if (btrace_insn_cmp (replay, &end) == 0)
2170 record_btrace_stop_replaying (tp);
2173 /* The resume method of target record-btrace. */
2175 void
2176 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2178 enum btrace_thread_flag flag, cflag;
2180 DEBUG ("resume %s: %s%s", ptid.to_string ().c_str (),
2181 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2182 step ? "step" : "cont");
2184 /* Store the execution direction of the last resume.
2186 If there is more than one resume call, we have to rely on infrun
2187 to not change the execution direction in-between. */
2188 record_btrace_resume_exec_dir = ::execution_direction;
2190 /* As long as we're not replaying, just forward the request.
2192 For non-stop targets this means that no thread is replaying. In order to
2193 make progress, we may need to explicitly move replaying threads to the end
2194 of their execution history. */
2195 if ((::execution_direction != EXEC_REVERSE)
2196 && !record_is_replaying (minus_one_ptid))
2198 this->beneath ()->resume (ptid, step, signal);
2199 return;
2202 /* Compute the btrace thread flag for the requested move. */
2203 if (::execution_direction == EXEC_REVERSE)
2205 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2206 cflag = BTHR_RCONT;
2208 else
2210 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2211 cflag = BTHR_CONT;
2214 /* We just indicate the resume intent here. The actual stepping happens in
2215 record_btrace_wait below.
2217 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2219 process_stratum_target *proc_target = current_inferior ()->process_target ();
2221 if (!target_is_non_stop_p ())
2223 gdb_assert (inferior_ptid.matches (ptid));
2225 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2227 if (tp->ptid.matches (inferior_ptid))
2228 record_btrace_resume_thread (tp, flag);
2229 else
2230 record_btrace_resume_thread (tp, cflag);
2233 else
2235 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2236 record_btrace_resume_thread (tp, flag);
2239 /* Async support. */
2240 if (target_can_async_p ())
2242 target_async (true);
2243 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2247 /* Cancel resuming TP. */
2249 static void
2250 record_btrace_cancel_resume (struct thread_info *tp)
2252 btrace_thread_flags flags;
2254 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2255 if (flags == 0)
2256 return;
2258 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2259 print_thread_id (tp),
2260 tp->ptid.to_string ().c_str (), flags.raw (),
2261 btrace_thread_flag_to_str (flags));
2263 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2264 record_btrace_stop_replaying_at_end (tp);
2267 /* Return a target_waitstatus indicating that we ran out of history. */
2269 static struct target_waitstatus
2270 btrace_step_no_history (void)
2272 struct target_waitstatus status;
2274 status.set_no_history ();
2276 return status;
2279 /* Return a target_waitstatus indicating that a step finished. */
2281 static struct target_waitstatus
2282 btrace_step_stopped (void)
2284 struct target_waitstatus status;
2286 status.set_stopped (GDB_SIGNAL_TRAP);
2288 return status;
2291 /* Return a target_waitstatus indicating that a thread was stopped as
2292 requested. */
2294 static struct target_waitstatus
2295 btrace_step_stopped_on_request (void)
2297 struct target_waitstatus status;
2299 status.set_stopped (GDB_SIGNAL_0);
2301 return status;
2304 /* Return a target_waitstatus indicating a spurious stop. */
2306 static struct target_waitstatus
2307 btrace_step_spurious (void)
2309 struct target_waitstatus status;
2311 status.set_spurious ();
2313 return status;
2316 /* Return a target_waitstatus indicating that the thread was not resumed. */
2318 static struct target_waitstatus
2319 btrace_step_no_resumed (void)
2321 struct target_waitstatus status;
2323 status.set_no_resumed ();
2325 return status;
2328 /* Return a target_waitstatus indicating that we should wait again. */
2330 static struct target_waitstatus
2331 btrace_step_again (void)
2333 struct target_waitstatus status;
2335 status.set_ignore ();
2337 return status;
2340 /* Clear the record histories. */
2342 static void
2343 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2345 xfree (btinfo->insn_history);
2346 xfree (btinfo->call_history);
2348 btinfo->insn_history = NULL;
2349 btinfo->call_history = NULL;
2352 /* Check whether TP's current replay position is at a breakpoint. */
2354 static int
2355 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2357 struct btrace_insn_iterator *replay;
2358 struct btrace_thread_info *btinfo;
2359 const struct btrace_insn *insn;
2361 btinfo = &tp->btrace;
2362 replay = btinfo->replay;
2364 if (replay == NULL)
2365 return 0;
2367 insn = btrace_insn_get (replay);
2368 if (insn == NULL)
2369 return 0;
2371 return record_check_stopped_by_breakpoint (tp->inf->aspace.get (), insn->pc,
2372 &btinfo->stop_reason);
2375 /* Step one instruction in forward direction. */
2377 static struct target_waitstatus
2378 record_btrace_single_step_forward (struct thread_info *tp)
2380 struct btrace_insn_iterator *replay, end, start;
2381 struct btrace_thread_info *btinfo;
2383 btinfo = &tp->btrace;
2384 replay = btinfo->replay;
2386 /* We're done if we're not replaying. */
2387 if (replay == NULL)
2388 return btrace_step_no_history ();
2390 /* Check if we're stepping a breakpoint. */
2391 if (record_btrace_replay_at_breakpoint (tp))
2392 return btrace_step_stopped ();
2394 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2395 jump back to the instruction at which we started. If we're stepping a
2396 BTRACE_INSN_AUX instruction, print the auxiliary data and skip the
2397 instruction. */
2399 start = *replay;
2401 for (;;)
2403 unsigned int steps;
2405 /* We will bail out here if we continue stepping after reaching the end
2406 of the execution history. */
2407 steps = btrace_insn_next (replay, 1);
2408 if (steps == 0)
2410 *replay = start;
2411 return btrace_step_no_history ();
2414 const struct btrace_insn *insn = btrace_insn_get (replay);
2415 if (insn == nullptr)
2416 continue;
2418 /* If we're stepping a BTRACE_INSN_AUX instruction, print the auxiliary
2419 data and skip the instruction. */
2420 if (insn->iclass == BTRACE_INSN_AUX)
2422 gdb_printf ("[%s]\n",
2423 btinfo->aux_data.at (insn->aux_data_index).c_str ());
2424 continue;
2427 /* We have an instruction, we are done. */
2428 break;
2431 /* Determine the end of the instruction trace. */
2432 btrace_insn_end (&end, btinfo);
2434 /* The execution trace contains (and ends with) the current instruction.
2435 This instruction has not been executed, yet, so the trace really ends
2436 one instruction earlier. */
2437 if (btrace_insn_cmp (replay, &end) == 0)
2438 return btrace_step_no_history ();
2440 return btrace_step_spurious ();
2443 /* Step one instruction in backward direction. */
2445 static struct target_waitstatus
2446 record_btrace_single_step_backward (struct thread_info *tp)
2448 struct btrace_insn_iterator *replay, start;
2449 struct btrace_thread_info *btinfo;
2451 btinfo = &tp->btrace;
2452 replay = btinfo->replay;
2454 /* Start replaying if we're not already doing so. */
2455 if (replay == NULL)
2456 replay = record_btrace_start_replaying (tp);
2458 /* If we can't step any further, we reached the end of the history.
2459 Skip gaps during replay. If we end up at a gap (at the beginning of
2460 the trace), jump back to the instruction at which we started.
2461 If we're stepping a BTRACE_INSN_AUX instruction, print the auxiliary
2462 data and skip the instruction. */
2463 start = *replay;
2465 for (;;)
2467 unsigned int steps;
2469 steps = btrace_insn_prev (replay, 1);
2470 if (steps == 0)
2472 *replay = start;
2473 return btrace_step_no_history ();
2476 const struct btrace_insn *insn = btrace_insn_get (replay);
2477 if (insn == nullptr)
2478 continue;
2480 /* Check if we're stepping a BTRACE_INSN_AUX instruction and skip it. */
2481 if (insn->iclass == BTRACE_INSN_AUX)
2483 gdb_printf ("[%s]\n",
2484 btinfo->aux_data.at (insn->aux_data_index).c_str ());
2485 continue;
2488 /* We have an instruction, we are done. */
2489 break;
2492 /* Check if we're stepping a breakpoint.
2494 For reverse-stepping, this check is after the step. There is logic in
2495 infrun.c that handles reverse-stepping separately. See, for example,
2496 proceed and adjust_pc_after_break.
2498 This code assumes that for reverse-stepping, PC points to the last
2499 de-executed instruction, whereas for forward-stepping PC points to the
2500 next to-be-executed instruction. */
2501 if (record_btrace_replay_at_breakpoint (tp))
2502 return btrace_step_stopped ();
2504 return btrace_step_spurious ();
2507 /* Step a single thread. */
2509 static struct target_waitstatus
2510 record_btrace_step_thread (struct thread_info *tp)
2512 struct btrace_thread_info *btinfo;
2513 struct target_waitstatus status;
2514 btrace_thread_flags flags;
2516 btinfo = &tp->btrace;
2518 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2519 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2521 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2522 tp->ptid.to_string ().c_str (), flags.raw (),
2523 btrace_thread_flag_to_str (flags));
2525 /* We can't step without an execution history. */
2526 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2527 return btrace_step_no_history ();
2529 switch (flags)
2531 default:
2532 internal_error (_("invalid stepping type."));
2534 case BTHR_STOP:
2535 return btrace_step_stopped_on_request ();
2537 case BTHR_STEP:
2538 status = record_btrace_single_step_forward (tp);
2539 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2540 break;
2542 return btrace_step_stopped ();
2544 case BTHR_RSTEP:
2545 status = record_btrace_single_step_backward (tp);
2546 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2547 break;
2549 return btrace_step_stopped ();
2551 case BTHR_CONT:
2552 status = record_btrace_single_step_forward (tp);
2553 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2554 break;
2556 btinfo->flags |= flags;
2557 return btrace_step_again ();
2559 case BTHR_RCONT:
2560 status = record_btrace_single_step_backward (tp);
2561 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2562 break;
2564 btinfo->flags |= flags;
2565 return btrace_step_again ();
2568 /* We keep threads moving at the end of their execution history. The wait
2569 method will stop the thread for whom the event is reported. */
2570 if (status.kind () == TARGET_WAITKIND_NO_HISTORY)
2571 btinfo->flags |= flags;
2573 return status;
2576 /* Announce further events if necessary. */
2578 static void
2579 record_btrace_maybe_mark_async_event
2580 (const std::vector<thread_info *> &moving,
2581 const std::vector<thread_info *> &no_history)
2583 bool more_moving = !moving.empty ();
2584 bool more_no_history = !no_history.empty ();;
2586 if (!more_moving && !more_no_history)
2587 return;
2589 if (more_moving)
2590 DEBUG ("movers pending");
2592 if (more_no_history)
2593 DEBUG ("no-history pending");
2595 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2598 /* The wait method of target record-btrace. */
2600 ptid_t
2601 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2602 target_wait_flags options)
2604 std::vector<thread_info *> moving;
2605 std::vector<thread_info *> no_history;
2607 /* Clear this, if needed we'll re-mark it below. */
2608 clear_async_event_handler (record_btrace_async_inferior_event_handler);
2610 DEBUG ("wait %s (0x%x)", ptid.to_string ().c_str (),
2611 (unsigned) options);
2613 /* As long as we're not replaying, just forward the request. */
2614 if ((::execution_direction != EXEC_REVERSE)
2615 && !record_is_replaying (minus_one_ptid))
2617 return this->beneath ()->wait (ptid, status, options);
2620 /* Keep a work list of moving threads. */
2621 process_stratum_target *proc_target = current_inferior ()->process_target ();
2622 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2623 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2624 moving.push_back (tp);
2626 if (moving.empty ())
2628 *status = btrace_step_no_resumed ();
2630 DEBUG ("wait ended by %s: %s", null_ptid.to_string ().c_str (),
2631 status->to_string ().c_str ());
2633 return null_ptid;
2636 /* Step moving threads one by one, one step each, until either one thread
2637 reports an event or we run out of threads to step.
2639 When stepping more than one thread, chances are that some threads reach
2640 the end of their execution history earlier than others. If we reported
2641 this immediately, all-stop on top of non-stop would stop all threads and
2642 resume the same threads next time. And we would report the same thread
2643 having reached the end of its execution history again.
2645 In the worst case, this would starve the other threads. But even if other
2646 threads would be allowed to make progress, this would result in far too
2647 many intermediate stops.
2649 We therefore delay the reporting of "no execution history" until we have
2650 nothing else to report. By this time, all threads should have moved to
2651 either the beginning or the end of their execution history. There will
2652 be a single user-visible stop. */
2653 struct thread_info *eventing = NULL;
2654 while ((eventing == NULL) && !moving.empty ())
2656 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2658 thread_info *tp = moving[ix];
2660 *status = record_btrace_step_thread (tp);
2662 switch (status->kind ())
2664 case TARGET_WAITKIND_IGNORE:
2665 ix++;
2666 break;
2668 case TARGET_WAITKIND_NO_HISTORY:
2669 no_history.push_back (ordered_remove (moving, ix));
2670 break;
2672 default:
2673 eventing = unordered_remove (moving, ix);
2674 break;
2679 if (eventing == NULL)
2681 /* We started with at least one moving thread. This thread must have
2682 either stopped or reached the end of its execution history.
2684 In the former case, EVENTING must not be NULL.
2685 In the latter case, NO_HISTORY must not be empty. */
2686 gdb_assert (!no_history.empty ());
2688 /* We kept threads moving at the end of their execution history. Stop
2689 EVENTING now that we are going to report its stop. */
2690 eventing = unordered_remove (no_history, 0);
2691 eventing->btrace.flags &= ~BTHR_MOVE;
2693 *status = btrace_step_no_history ();
2696 gdb_assert (eventing != NULL);
2698 /* We kept threads replaying at the end of their execution history. Stop
2699 replaying EVENTING now that we are going to report its stop. */
2700 record_btrace_stop_replaying_at_end (eventing);
2702 /* Stop all other threads. */
2703 if (!target_is_non_stop_p ())
2705 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2706 record_btrace_cancel_resume (tp);
2709 /* In async mode, we need to announce further events. */
2710 if (target_is_async_p ())
2711 record_btrace_maybe_mark_async_event (moving, no_history);
2713 /* Start record histories anew from the current position. */
2714 record_btrace_clear_histories (&eventing->btrace);
2716 /* We moved the replay position but did not update registers. */
2717 registers_changed_thread (eventing);
2719 DEBUG ("wait ended by thread %s (%s): %s",
2720 print_thread_id (eventing),
2721 eventing->ptid.to_string ().c_str (),
2722 status->to_string ().c_str ());
2724 return eventing->ptid;
2727 /* The stop method of target record-btrace. */
2729 void
2730 record_btrace_target::stop (ptid_t ptid)
2732 DEBUG ("stop %s", ptid.to_string ().c_str ());
2734 /* As long as we're not replaying, just forward the request. */
2735 if ((::execution_direction != EXEC_REVERSE)
2736 && !record_is_replaying (minus_one_ptid))
2738 this->beneath ()->stop (ptid);
2740 else
2742 process_stratum_target *proc_target
2743 = current_inferior ()->process_target ();
2745 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2747 tp->btrace.flags &= ~BTHR_MOVE;
2748 tp->btrace.flags |= BTHR_STOP;
2753 /* The can_execute_reverse method of target record-btrace. */
2755 bool
2756 record_btrace_target::can_execute_reverse ()
2758 return true;
2761 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2763 bool
2764 record_btrace_target::stopped_by_sw_breakpoint ()
2766 if (record_is_replaying (minus_one_ptid))
2768 struct thread_info *tp = inferior_thread ();
2770 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2773 return this->beneath ()->stopped_by_sw_breakpoint ();
2776 /* The supports_stopped_by_sw_breakpoint method of target
2777 record-btrace. */
2779 bool
2780 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2782 if (record_is_replaying (minus_one_ptid))
2783 return true;
2785 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2788 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2790 bool
2791 record_btrace_target::stopped_by_hw_breakpoint ()
2793 if (record_is_replaying (minus_one_ptid))
2795 struct thread_info *tp = inferior_thread ();
2797 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2800 return this->beneath ()->stopped_by_hw_breakpoint ();
2803 /* The supports_stopped_by_hw_breakpoint method of target
2804 record-btrace. */
2806 bool
2807 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2809 if (record_is_replaying (minus_one_ptid))
2810 return true;
2812 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2815 /* The update_thread_list method of target record-btrace. */
2817 void
2818 record_btrace_target::update_thread_list ()
2820 /* We don't add or remove threads during replay. */
2821 if (record_is_replaying (minus_one_ptid))
2822 return;
2824 /* Forward the request. */
2825 this->beneath ()->update_thread_list ();
2828 /* The thread_alive method of target record-btrace. */
2830 bool
2831 record_btrace_target::thread_alive (ptid_t ptid)
2833 /* We don't add or remove threads during replay. */
2834 if (record_is_replaying (minus_one_ptid))
2835 return true;
2837 /* Forward the request. */
2838 return this->beneath ()->thread_alive (ptid);
2841 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2842 is stopped. */
2844 static void
2845 record_btrace_set_replay (struct thread_info *tp,
2846 const struct btrace_insn_iterator *it)
2848 struct btrace_thread_info *btinfo;
2850 btinfo = &tp->btrace;
2852 if (it == NULL)
2853 record_btrace_stop_replaying (tp);
2854 else
2856 if (btinfo->replay == NULL)
2857 record_btrace_start_replaying (tp);
2858 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2859 return;
2861 *btinfo->replay = *it;
2862 registers_changed_thread (tp);
2865 /* Start anew from the new replay position. */
2866 record_btrace_clear_histories (btinfo);
2868 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
2869 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2872 /* The goto_record_begin method of target record-btrace. */
2874 void
2875 record_btrace_target::goto_record_begin ()
2877 struct thread_info *tp;
2878 struct btrace_insn_iterator begin;
2880 tp = require_btrace_thread ();
2882 btrace_insn_begin (&begin, &tp->btrace);
2884 /* Skip gaps at the beginning of the trace. */
2885 while (btrace_insn_get (&begin) == NULL)
2887 unsigned int steps;
2889 steps = btrace_insn_next (&begin, 1);
2890 if (steps == 0)
2891 error (_("No trace."));
2894 record_btrace_set_replay (tp, &begin);
2897 /* The goto_record_end method of target record-btrace. */
2899 void
2900 record_btrace_target::goto_record_end ()
2902 struct thread_info *tp;
2904 tp = require_btrace_thread ();
2906 record_btrace_set_replay (tp, NULL);
2909 /* The goto_record method of target record-btrace. */
2911 void
2912 record_btrace_target::goto_record (ULONGEST insn_number)
2914 struct thread_info *tp;
2915 struct btrace_insn_iterator it;
2916 unsigned int number;
2917 int found;
2919 number = insn_number;
2921 /* Check for wrap-arounds. */
2922 if (number != insn_number)
2923 error (_("Instruction number out of range."));
2925 tp = require_btrace_thread ();
2927 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2929 /* Check if the instruction could not be found or is a gap or an
2930 auxiliary instruction. */
2931 if (found == 0)
2932 error (_("No such instruction."));
2934 const struct btrace_insn *insn = btrace_insn_get (&it);
2935 if (insn == NULL)
2936 error (_("No such instruction."));
2937 if (insn->iclass == BTRACE_INSN_AUX)
2938 error (_("Can't go to an auxiliary instruction."));
2940 record_btrace_set_replay (tp, &it);
2943 /* The record_stop_replaying method of target record-btrace. */
2945 void
2946 record_btrace_target::record_stop_replaying ()
2948 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2949 record_btrace_stop_replaying (tp);
2952 /* The execution_direction target method. */
2954 enum exec_direction_kind
2955 record_btrace_target::execution_direction ()
2957 return record_btrace_resume_exec_dir;
2960 /* The prepare_to_generate_core target method. */
2962 void
2963 record_btrace_target::prepare_to_generate_core ()
2965 record_btrace_generating_corefile = 1;
2968 /* The done_generating_core target method. */
2970 void
2971 record_btrace_target::done_generating_core ()
2973 record_btrace_generating_corefile = 0;
2976 /* Start recording in BTS format. */
2978 static void
2979 cmd_record_btrace_bts_start (const char *args, int from_tty)
2981 if (args != NULL && *args != 0)
2982 error (_("Invalid argument."));
2984 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2988 execute_command ("target record-btrace", from_tty);
2990 catch (const gdb_exception &exception)
2992 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2993 throw;
2997 /* Start recording in Intel Processor Trace format. */
2999 static void
3000 cmd_record_btrace_pt_start (const char *args, int from_tty)
3002 if (args != NULL && *args != 0)
3003 error (_("Invalid argument."));
3005 record_btrace_conf.format = BTRACE_FORMAT_PT;
3009 execute_command ("target record-btrace", from_tty);
3011 catch (const gdb_exception &exception)
3013 record_btrace_conf.format = BTRACE_FORMAT_NONE;
3014 throw;
3018 /* Alias for "target record". */
3020 static void
3021 cmd_record_btrace_start (const char *args, int from_tty)
3023 if (args != NULL && *args != 0)
3024 error (_("Invalid argument."));
3026 record_btrace_conf.format = BTRACE_FORMAT_PT;
3030 execute_command ("target record-btrace", from_tty);
3032 catch (const gdb_exception_error &exception)
3034 record_btrace_conf.format = BTRACE_FORMAT_BTS;
3038 execute_command ("target record-btrace", from_tty);
3040 catch (const gdb_exception &ex)
3042 record_btrace_conf.format = BTRACE_FORMAT_NONE;
3043 throw;
3048 /* The "show record btrace replay-memory-access" command. */
3050 static void
3051 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3052 struct cmd_list_element *c, const char *value)
3054 gdb_printf (file, _("Replay memory access is %s.\n"),
3055 replay_memory_access);
3058 /* The "set record btrace cpu none" command. */
3060 static void
3061 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
3063 if (args != nullptr && *args != 0)
3064 error (_("Trailing junk: '%s'."), args);
3066 record_btrace_cpu_state = CS_NONE;
3069 /* The "set record btrace cpu auto" command. */
3071 static void
3072 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3074 if (args != nullptr && *args != 0)
3075 error (_("Trailing junk: '%s'."), args);
3077 record_btrace_cpu_state = CS_AUTO;
3080 /* The "set record btrace cpu" command. */
3082 static void
3083 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3085 if (args == nullptr)
3086 args = "";
3088 /* We use a hard-coded vendor string for now. */
3089 unsigned int family, model, stepping;
3090 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3091 &model, &l1, &stepping, &l2);
3092 if (matches == 3)
3094 if (strlen (args) != l2)
3095 error (_("Trailing junk: '%s'."), args + l2);
3097 else if (matches == 2)
3099 if (strlen (args) != l1)
3100 error (_("Trailing junk: '%s'."), args + l1);
3102 stepping = 0;
3104 else
3105 error (_("Bad format. See \"help set record btrace cpu\"."));
3107 if (USHRT_MAX < family)
3108 error (_("Cpu family too big."));
3110 if (UCHAR_MAX < model)
3111 error (_("Cpu model too big."));
3113 if (UCHAR_MAX < stepping)
3114 error (_("Cpu stepping too big."));
3116 record_btrace_cpu.vendor = CV_INTEL;
3117 record_btrace_cpu.family = family;
3118 record_btrace_cpu.model = model;
3119 record_btrace_cpu.stepping = stepping;
3121 record_btrace_cpu_state = CS_CPU;
3124 /* The "show record btrace cpu" command. */
3126 static void
3127 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3129 if (args != nullptr && *args != 0)
3130 error (_("Trailing junk: '%s'."), args);
3132 switch (record_btrace_cpu_state)
3134 case CS_AUTO:
3135 gdb_printf (_("btrace cpu is 'auto'.\n"));
3136 return;
3138 case CS_NONE:
3139 gdb_printf (_("btrace cpu is 'none'.\n"));
3140 return;
3142 case CS_CPU:
3143 switch (record_btrace_cpu.vendor)
3145 case CV_INTEL:
3146 if (record_btrace_cpu.stepping == 0)
3147 gdb_printf (_("btrace cpu is 'intel: %u/%u'.\n"),
3148 record_btrace_cpu.family,
3149 record_btrace_cpu.model);
3150 else
3151 gdb_printf (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3152 record_btrace_cpu.family,
3153 record_btrace_cpu.model,
3154 record_btrace_cpu.stepping);
3155 return;
3159 error (_("Internal error: bad cpu state."));
3162 /* The "record bts buffer-size" show value function. */
3164 static void
3165 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3166 struct cmd_list_element *c,
3167 const char *value)
3169 gdb_printf (file, _("The record/replay bts buffer size is %s.\n"),
3170 value);
3173 /* The "record pt buffer-size" show value function. */
3175 static void
3176 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3177 struct cmd_list_element *c,
3178 const char *value)
3180 gdb_printf (file, _("The record/replay pt buffer size is %s.\n"),
3181 value);
3185 static bool event_tracing = false;
3187 /* The "record pt event-tracing" show value function. */
3189 static void
3190 show_record_pt_event_tracing_value (struct ui_file *file, int from_tty,
3191 struct cmd_list_element *c,
3192 const char *value)
3194 #if (LIBIPT_VERSION >= 0x201)
3195 gdb_printf (file, _("record pt event-tracing is %s.\n"), value);
3196 #else
3197 gdb_printf (_("Event-tracing is not supported by GDB.\n"));
3198 #endif /* defined (LIBIPT_VERSION >= 0x201) */
3201 /* The "record pt event-tracing" set value function. */
3203 static void
3204 set_record_pt_event_tracing_value (const char *args, int from_tty,
3205 cmd_list_element *c)
3207 #if (LIBIPT_VERSION >= 0x201)
3208 record_btrace_conf.pt.event_tracing = event_tracing;
3209 #else
3210 gdb_printf (_("Event-tracing is not supported by GDB.\n"));
3211 #endif /* defined (LIBIPT_VERSION >= 0x201) */
3214 /* Initialize btrace commands. */
3216 void _initialize_record_btrace ();
3217 void
3218 _initialize_record_btrace ()
3220 cmd_list_element *record_btrace_cmd
3221 = add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3222 _("Start branch trace recording."),
3223 &record_btrace_cmdlist, 0, &record_cmdlist);
3224 add_alias_cmd ("b", record_btrace_cmd, class_obscure, 1, &record_cmdlist);
3226 cmd_list_element *record_btrace_bts_cmd
3227 = add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3228 _("\
3229 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3230 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3231 This format may not be available on all processors."),
3232 &record_btrace_cmdlist);
3233 add_alias_cmd ("bts", record_btrace_bts_cmd, class_obscure, 1,
3234 &record_cmdlist);
3236 cmd_list_element *record_btrace_pt_cmd
3237 = add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3238 _("\
3239 Start branch trace recording in Intel Processor Trace format.\n\n\
3240 This format may not be available on all processors."),
3241 &record_btrace_cmdlist);
3242 add_alias_cmd ("pt", record_btrace_pt_cmd, class_obscure, 1, &record_cmdlist);
3244 add_setshow_prefix_cmd ("btrace", class_support,
3245 _("Set record options."),
3246 _("Show record options."),
3247 &set_record_btrace_cmdlist,
3248 &show_record_btrace_cmdlist,
3249 &set_record_cmdlist, &show_record_cmdlist);
3251 add_setshow_enum_cmd ("replay-memory-access", no_class,
3252 replay_memory_access_types, &replay_memory_access, _("\
3253 Set what memory accesses are allowed during replay."), _("\
3254 Show what memory accesses are allowed during replay."),
3255 _("Default is READ-ONLY.\n\n\
3256 The btrace record target does not trace data.\n\
3257 The memory therefore corresponds to the live target and not\n\
3258 to the current replay position.\n\n\
3259 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3260 When READ-WRITE, allow accesses to read-only and read-write memory during\n\
3261 replay."),
3262 NULL, cmd_show_replay_memory_access,
3263 &set_record_btrace_cmdlist,
3264 &show_record_btrace_cmdlist);
3266 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3267 _("\
3268 Set the cpu to be used for trace decode.\n\n\
3269 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3270 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3271 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3272 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3273 When GDB does not support that cpu, this option can be used to enable\n\
3274 workarounds for a similar cpu that GDB supports.\n\n\
3275 When set to \"none\", errata workarounds are disabled."),
3276 &set_record_btrace_cpu_cmdlist,
3278 &set_record_btrace_cmdlist);
3280 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3281 Automatically determine the cpu to be used for trace decode."),
3282 &set_record_btrace_cpu_cmdlist);
3284 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3285 Do not enable errata workarounds for trace decode."),
3286 &set_record_btrace_cpu_cmdlist);
3288 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3289 Show the cpu to be used for trace decode."),
3290 &show_record_btrace_cmdlist);
3292 add_setshow_prefix_cmd ("bts", class_support,
3293 _("Set record btrace bts options."),
3294 _("Show record btrace bts options."),
3295 &set_record_btrace_bts_cmdlist,
3296 &show_record_btrace_bts_cmdlist,
3297 &set_record_btrace_cmdlist,
3298 &show_record_btrace_cmdlist);
3300 add_setshow_uinteger_cmd ("buffer-size", no_class,
3301 &record_btrace_conf.bts.size,
3302 _("Set the record/replay bts buffer size."),
3303 _("Show the record/replay bts buffer size."), _("\
3304 When starting recording request a trace buffer of this size.\n\
3305 The actual buffer size may differ from the requested size.\n\
3306 Use \"info record\" to see the actual buffer size.\n\n\
3307 Bigger buffers allow longer recording but also take more time to process\n\
3308 the recorded execution trace.\n\n\
3309 The trace buffer size may not be changed while recording."), NULL,
3310 show_record_bts_buffer_size_value,
3311 &set_record_btrace_bts_cmdlist,
3312 &show_record_btrace_bts_cmdlist);
3314 add_setshow_prefix_cmd ("pt", class_support,
3315 _("Set record btrace pt options."),
3316 _("Show record btrace pt options."),
3317 &set_record_btrace_pt_cmdlist,
3318 &show_record_btrace_pt_cmdlist,
3319 &set_record_btrace_cmdlist,
3320 &show_record_btrace_cmdlist);
3322 add_setshow_uinteger_cmd ("buffer-size", no_class,
3323 &record_btrace_conf.pt.size,
3324 _("Set the record/replay pt buffer size."),
3325 _("Show the record/replay pt buffer size."), _("\
3326 Bigger buffers allow longer recording but also take more time to process\n\
3327 the recorded execution.\n\
3328 The actual buffer size may differ from the requested size. Use \"info record\"\n\
3329 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3330 &set_record_btrace_pt_cmdlist,
3331 &show_record_btrace_pt_cmdlist);
3333 add_setshow_boolean_cmd ("event-tracing", no_class, &event_tracing,
3334 _("Set event-tracing for record pt."),
3335 _("Show event-tracing for record pt."),
3336 _("\
3337 Use \"on\" to enable event tracing for recordings with Intel Processor Trace,\n\
3338 and \"off\" to disable it.\n\
3339 Without an argument, event tracing is enabled. Changing this setting has no\n\
3340 effect on an active recording."),
3341 set_record_pt_event_tracing_value,
3342 show_record_pt_event_tracing_value,
3343 &set_record_btrace_pt_cmdlist,
3344 &show_record_btrace_pt_cmdlist);
3346 add_target (record_btrace_target_info, record_btrace_target_open);
3348 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3349 xcalloc, xfree);
3351 record_btrace_conf.bts.size = 64 * 1024;
3352 record_btrace_conf.pt.size = 16 * 1024;
3353 #if (LIBIPT_VERSION >= 0x200)
3354 record_btrace_conf.pt.ptwrite = true;
3355 #else
3356 record_btrace_conf.pt.ptwrite = false;
3357 #endif
3358 record_btrace_conf.pt.event_tracing = false;