1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "gdbsupport/event-loop.h"
44 #include "cli/cli-style.h"
45 #include "async-event.h"
46 #include <forward_list>
48 static const target_info record_btrace_target_info
= {
50 N_("Branch tracing target"),
51 N_("Collect control-flow trace and provide the execution history.")
54 /* The target_ops of record-btrace. */
56 class record_btrace_target final
: public target_ops
59 const target_info
&info () const override
60 { return record_btrace_target_info
; }
62 strata
stratum () const override
{ return record_stratum
; }
64 void close () override
;
65 void async (int) override
;
67 void detach (inferior
*inf
, int from_tty
) override
68 { record_detach (this, inf
, from_tty
); }
70 void disconnect (const char *, int) override
;
72 void mourn_inferior () override
73 { record_mourn_inferior (this); }
76 { record_kill (this); }
78 enum record_method
record_method (ptid_t ptid
) override
;
80 void stop_recording () override
;
81 void info_record () override
;
83 void insn_history (int size
, gdb_disassembly_flags flags
) override
;
84 void insn_history_from (ULONGEST from
, int size
,
85 gdb_disassembly_flags flags
) override
;
86 void insn_history_range (ULONGEST begin
, ULONGEST end
,
87 gdb_disassembly_flags flags
) override
;
88 void call_history (int size
, record_print_flags flags
) override
;
89 void call_history_from (ULONGEST begin
, int size
, record_print_flags flags
)
91 void call_history_range (ULONGEST begin
, ULONGEST end
, record_print_flags flags
)
94 bool record_is_replaying (ptid_t ptid
) override
;
95 bool record_will_replay (ptid_t ptid
, int dir
) override
;
96 void record_stop_replaying () override
;
98 enum target_xfer_status
xfer_partial (enum target_object object
,
101 const gdb_byte
*writebuf
,
102 ULONGEST offset
, ULONGEST len
,
103 ULONGEST
*xfered_len
) override
;
105 int insert_breakpoint (struct gdbarch
*,
106 struct bp_target_info
*) override
;
107 int remove_breakpoint (struct gdbarch
*, struct bp_target_info
*,
108 enum remove_bp_reason
) override
;
110 void fetch_registers (struct regcache
*, int) override
;
112 void store_registers (struct regcache
*, int) override
;
113 void prepare_to_store (struct regcache
*) override
;
115 const struct frame_unwind
*get_unwinder () override
;
117 const struct frame_unwind
*get_tailcall_unwinder () override
;
119 void commit_resume () override
;
120 void resume (ptid_t
, int, enum gdb_signal
) override
;
121 ptid_t
wait (ptid_t
, struct target_waitstatus
*, target_wait_flags
) override
;
123 void stop (ptid_t
) override
;
124 void update_thread_list () override
;
125 bool thread_alive (ptid_t ptid
) override
;
126 void goto_record_begin () override
;
127 void goto_record_end () override
;
128 void goto_record (ULONGEST insn
) override
;
130 bool can_execute_reverse () override
;
132 bool stopped_by_sw_breakpoint () override
;
133 bool supports_stopped_by_sw_breakpoint () override
;
135 bool stopped_by_hw_breakpoint () override
;
136 bool supports_stopped_by_hw_breakpoint () override
;
138 enum exec_direction_kind
execution_direction () override
;
139 void prepare_to_generate_core () override
;
140 void done_generating_core () override
;
143 static record_btrace_target record_btrace_ops
;
145 /* Initialize the record-btrace target ops. */
147 /* Token associated with a new-thread observer enabling branch tracing
148 for the new thread. */
149 static const gdb::observers::token record_btrace_thread_observer_token
{};
151 /* Memory access types used in set/show record btrace replay-memory-access. */
152 static const char replay_memory_access_read_only
[] = "read-only";
153 static const char replay_memory_access_read_write
[] = "read-write";
154 static const char *const replay_memory_access_types
[] =
156 replay_memory_access_read_only
,
157 replay_memory_access_read_write
,
161 /* The currently allowed replay memory access type. */
162 static const char *replay_memory_access
= replay_memory_access_read_only
;
164 /* The cpu state kinds. */
165 enum record_btrace_cpu_state_kind
172 /* The current cpu state. */
173 static enum record_btrace_cpu_state_kind record_btrace_cpu_state
= CS_AUTO
;
175 /* The current cpu for trace decode. */
176 static struct btrace_cpu record_btrace_cpu
;
178 /* Command lists for "set/show record btrace". */
179 static struct cmd_list_element
*set_record_btrace_cmdlist
;
180 static struct cmd_list_element
*show_record_btrace_cmdlist
;
182 /* The execution direction of the last resume we got. See record-full.c. */
183 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
185 /* The async event handler for reverse/replay execution. */
186 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
188 /* A flag indicating that we are currently generating a core file. */
189 static int record_btrace_generating_corefile
;
191 /* The current branch trace configuration. */
192 static struct btrace_config record_btrace_conf
;
194 /* Command list for "record btrace". */
195 static struct cmd_list_element
*record_btrace_cmdlist
;
197 /* Command lists for "set/show record btrace bts". */
198 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
199 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
201 /* Command lists for "set/show record btrace pt". */
202 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
203 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
205 /* Command list for "set record btrace cpu". */
206 static struct cmd_list_element
*set_record_btrace_cpu_cmdlist
;
208 /* Print a record-btrace debug message. Use do ... while (0) to avoid
209 ambiguities when used in if statements. */
211 #define DEBUG(msg, args...) \
214 if (record_debug != 0) \
215 fprintf_unfiltered (gdb_stdlog, \
216 "[record-btrace] " msg "\n", ##args); \
221 /* Return the cpu configured by the user. Returns NULL if the cpu was
222 configured as auto. */
223 const struct btrace_cpu
*
224 record_btrace_get_cpu (void)
226 switch (record_btrace_cpu_state
)
232 record_btrace_cpu
.vendor
= CV_UNKNOWN
;
235 return &record_btrace_cpu
;
238 error (_("Internal error: bad record btrace cpu state."));
241 /* Update the branch trace for the current thread and return a pointer to its
244 Throws an error if there is no thread or no trace. This function never
247 static struct thread_info
*
248 require_btrace_thread (void)
252 if (inferior_ptid
== null_ptid
)
253 error (_("No thread."));
255 thread_info
*tp
= inferior_thread ();
257 validate_registers_access ();
259 btrace_fetch (tp
, record_btrace_get_cpu ());
261 if (btrace_is_empty (tp
))
262 error (_("No trace."));
267 /* Update the branch trace for the current thread and return a pointer to its
268 branch trace information struct.
270 Throws an error if there is no thread or no trace. This function never
273 static struct btrace_thread_info
*
274 require_btrace (void)
276 struct thread_info
*tp
;
278 tp
= require_btrace_thread ();
283 /* Enable branch tracing for one thread. Warn on errors. */
286 record_btrace_enable_warn (struct thread_info
*tp
)
288 /* Ignore this thread if its inferior is not recorded by us. */
289 target_ops
*rec
= tp
->inf
->target_at (record_stratum
);
290 if (rec
!= &record_btrace_ops
)
295 btrace_enable (tp
, &record_btrace_conf
);
297 catch (const gdb_exception_error
&error
)
299 warning ("%s", error
.what ());
303 /* Enable automatic tracing of new threads. */
306 record_btrace_auto_enable (void)
308 DEBUG ("attach thread observer");
310 gdb::observers::new_thread
.attach (record_btrace_enable_warn
,
311 record_btrace_thread_observer_token
);
314 /* Disable automatic tracing of new threads. */
317 record_btrace_auto_disable (void)
319 DEBUG ("detach thread observer");
321 gdb::observers::new_thread
.detach (record_btrace_thread_observer_token
);
324 /* The record-btrace async event handler function. */
327 record_btrace_handle_async_inferior_event (gdb_client_data data
)
329 inferior_event_handler (INF_REG_EVENT
);
332 /* See record-btrace.h. */
335 record_btrace_push_target (void)
339 record_btrace_auto_enable ();
341 push_target (&record_btrace_ops
);
343 record_btrace_async_inferior_event_handler
344 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
345 NULL
, "record-btrace");
346 record_btrace_generating_corefile
= 0;
348 format
= btrace_format_short_string (record_btrace_conf
.format
);
349 gdb::observers::record_changed
.notify (current_inferior (), 1, "btrace", format
);
352 /* Disable btrace on a set of threads on scope exit. */
354 struct scoped_btrace_disable
356 scoped_btrace_disable () = default;
358 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable
);
360 ~scoped_btrace_disable ()
362 for (thread_info
*tp
: m_threads
)
366 void add_thread (thread_info
*thread
)
368 m_threads
.push_front (thread
);
377 std::forward_list
<thread_info
*> m_threads
;
380 /* Open target record-btrace. */
383 record_btrace_target_open (const char *args
, int from_tty
)
385 /* If we fail to enable btrace for one thread, disable it for the threads for
386 which it was successfully enabled. */
387 scoped_btrace_disable btrace_disable
;
393 if (!target_has_execution ())
394 error (_("The program is not being run."));
396 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
397 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->global_num
))
399 btrace_enable (tp
, &record_btrace_conf
);
401 btrace_disable
.add_thread (tp
);
404 record_btrace_push_target ();
406 btrace_disable
.discard ();
409 /* The stop_recording method of target record-btrace. */
412 record_btrace_target::stop_recording ()
414 DEBUG ("stop recording");
416 record_btrace_auto_disable ();
418 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
419 if (tp
->btrace
.target
!= NULL
)
423 /* The disconnect method of target record-btrace. */
426 record_btrace_target::disconnect (const char *args
,
429 struct target_ops
*beneath
= this->beneath ();
431 /* Do not stop recording, just clean up GDB side. */
432 unpush_target (this);
434 /* Forward disconnect. */
435 beneath
->disconnect (args
, from_tty
);
438 /* The close method of target record-btrace. */
441 record_btrace_target::close ()
443 if (record_btrace_async_inferior_event_handler
!= NULL
)
444 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
446 /* Make sure automatic recording gets disabled even if we did not stop
447 recording before closing the record-btrace target. */
448 record_btrace_auto_disable ();
450 /* We should have already stopped recording.
451 Tear down btrace in case we have not. */
452 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
453 btrace_teardown (tp
);
456 /* The async method of target record-btrace. */
459 record_btrace_target::async (int enable
)
462 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
464 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
466 this->beneath ()->async (enable
);
469 /* Adjusts the size and returns a human readable size suffix. */
472 record_btrace_adjust_size (unsigned int *size
)
478 if ((sz
& ((1u << 30) - 1)) == 0)
483 else if ((sz
& ((1u << 20) - 1)) == 0)
488 else if ((sz
& ((1u << 10) - 1)) == 0)
497 /* Print a BTS configuration. */
500 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
508 suffix
= record_btrace_adjust_size (&size
);
509 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
513 /* Print an Intel Processor Trace configuration. */
516 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
524 suffix
= record_btrace_adjust_size (&size
);
525 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
529 /* Print a branch tracing configuration. */
532 record_btrace_print_conf (const struct btrace_config
*conf
)
534 printf_unfiltered (_("Recording format: %s.\n"),
535 btrace_format_string (conf
->format
));
537 switch (conf
->format
)
539 case BTRACE_FORMAT_NONE
:
542 case BTRACE_FORMAT_BTS
:
543 record_btrace_print_bts_conf (&conf
->bts
);
546 case BTRACE_FORMAT_PT
:
547 record_btrace_print_pt_conf (&conf
->pt
);
551 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format."));
554 /* The info_record method of target record-btrace. */
557 record_btrace_target::info_record ()
559 struct btrace_thread_info
*btinfo
;
560 const struct btrace_config
*conf
;
561 struct thread_info
*tp
;
562 unsigned int insns
, calls
, gaps
;
566 if (inferior_ptid
== null_ptid
)
567 error (_("No thread."));
569 tp
= inferior_thread ();
571 validate_registers_access ();
573 btinfo
= &tp
->btrace
;
575 conf
= ::btrace_conf (btinfo
);
577 record_btrace_print_conf (conf
);
579 btrace_fetch (tp
, record_btrace_get_cpu ());
585 if (!btrace_is_empty (tp
))
587 struct btrace_call_iterator call
;
588 struct btrace_insn_iterator insn
;
590 btrace_call_end (&call
, btinfo
);
591 btrace_call_prev (&call
, 1);
592 calls
= btrace_call_number (&call
);
594 btrace_insn_end (&insn
, btinfo
);
595 insns
= btrace_insn_number (&insn
);
597 /* If the last instruction is not a gap, it is the current instruction
598 that is not actually part of the record. */
599 if (btrace_insn_get (&insn
) != NULL
)
602 gaps
= btinfo
->ngaps
;
605 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
606 "for thread %s (%s).\n"), insns
, calls
, gaps
,
607 print_thread_id (tp
),
608 target_pid_to_str (tp
->ptid
).c_str ());
610 if (btrace_is_replaying (tp
))
611 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
612 btrace_insn_number (btinfo
->replay
));
615 /* Print a decode error. */
618 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
619 enum btrace_format format
)
621 const char *errstr
= btrace_decode_error (format
, errcode
);
623 uiout
->text (_("["));
624 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
625 if (!(format
== BTRACE_FORMAT_PT
&& errcode
> 0))
627 uiout
->text (_("decode error ("));
628 uiout
->field_signed ("errcode", errcode
);
629 uiout
->text (_("): "));
631 uiout
->text (errstr
);
632 uiout
->text (_("]\n"));
635 /* A range of source lines. */
637 struct btrace_line_range
639 /* The symtab this line is from. */
640 struct symtab
*symtab
;
642 /* The first line (inclusive). */
645 /* The last line (exclusive). */
649 /* Construct a line range. */
651 static struct btrace_line_range
652 btrace_mk_line_range (struct symtab
*symtab
, int begin
, int end
)
654 struct btrace_line_range range
;
656 range
.symtab
= symtab
;
663 /* Add a line to a line range. */
665 static struct btrace_line_range
666 btrace_line_range_add (struct btrace_line_range range
, int line
)
668 if (range
.end
<= range
.begin
)
670 /* This is the first entry. */
672 range
.end
= line
+ 1;
674 else if (line
< range
.begin
)
676 else if (range
.end
< line
)
682 /* Return non-zero if RANGE is empty, zero otherwise. */
685 btrace_line_range_is_empty (struct btrace_line_range range
)
687 return range
.end
<= range
.begin
;
690 /* Return non-zero if LHS contains RHS, zero otherwise. */
693 btrace_line_range_contains_range (struct btrace_line_range lhs
,
694 struct btrace_line_range rhs
)
696 return ((lhs
.symtab
== rhs
.symtab
)
697 && (lhs
.begin
<= rhs
.begin
)
698 && (rhs
.end
<= lhs
.end
));
701 /* Find the line range associated with PC. */
703 static struct btrace_line_range
704 btrace_find_line_range (CORE_ADDR pc
)
706 struct btrace_line_range range
;
707 struct linetable_entry
*lines
;
708 struct linetable
*ltable
;
709 struct symtab
*symtab
;
712 symtab
= find_pc_line_symtab (pc
);
714 return btrace_mk_line_range (NULL
, 0, 0);
716 ltable
= SYMTAB_LINETABLE (symtab
);
718 return btrace_mk_line_range (symtab
, 0, 0);
720 nlines
= ltable
->nitems
;
721 lines
= ltable
->item
;
723 return btrace_mk_line_range (symtab
, 0, 0);
725 range
= btrace_mk_line_range (symtab
, 0, 0);
726 for (i
= 0; i
< nlines
- 1; i
++)
728 /* The test of is_stmt here was added when the is_stmt field was
729 introduced to the 'struct linetable_entry' structure. This
730 ensured that this loop maintained the same behaviour as before we
731 introduced is_stmt. That said, it might be that we would be
732 better off not checking is_stmt here, this would lead to us
733 possibly adding more line numbers to the range. At the time this
734 change was made I was unsure how to test this so chose to go with
735 maintaining the existing experience. */
736 if ((lines
[i
].pc
== pc
) && (lines
[i
].line
!= 0)
737 && (lines
[i
].is_stmt
== 1))
738 range
= btrace_line_range_add (range
, lines
[i
].line
);
744 /* Print source lines in LINES to UIOUT.
746 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
747 instructions corresponding to that source line. When printing a new source
748 line, we do the cleanups for the open chain and open a new cleanup chain for
749 the new source line. If the source line range in LINES is not empty, this
750 function will leave the cleanup chain for the last printed source line open
751 so instructions can be added to it. */
754 btrace_print_lines (struct btrace_line_range lines
, struct ui_out
*uiout
,
755 gdb::optional
<ui_out_emit_tuple
> *src_and_asm_tuple
,
756 gdb::optional
<ui_out_emit_list
> *asm_list
,
757 gdb_disassembly_flags flags
)
759 print_source_lines_flags psl_flags
;
761 if (flags
& DISASSEMBLY_FILENAME
)
762 psl_flags
|= PRINT_SOURCE_LINES_FILENAME
;
764 for (int line
= lines
.begin
; line
< lines
.end
; ++line
)
768 src_and_asm_tuple
->emplace (uiout
, "src_and_asm_line");
770 print_source_lines (lines
.symtab
, line
, line
+ 1, psl_flags
);
772 asm_list
->emplace (uiout
, "line_asm_insn");
776 /* Disassemble a section of the recorded instruction trace. */
779 btrace_insn_history (struct ui_out
*uiout
,
780 const struct btrace_thread_info
*btinfo
,
781 const struct btrace_insn_iterator
*begin
,
782 const struct btrace_insn_iterator
*end
,
783 gdb_disassembly_flags flags
)
785 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags
,
786 btrace_insn_number (begin
), btrace_insn_number (end
));
788 flags
|= DISASSEMBLY_SPECULATIVE
;
790 struct gdbarch
*gdbarch
= target_gdbarch ();
791 btrace_line_range last_lines
= btrace_mk_line_range (NULL
, 0, 0);
793 ui_out_emit_list
list_emitter (uiout
, "asm_insns");
795 gdb::optional
<ui_out_emit_tuple
> src_and_asm_tuple
;
796 gdb::optional
<ui_out_emit_list
> asm_list
;
798 gdb_pretty_print_disassembler
disasm (gdbarch
, uiout
);
800 for (btrace_insn_iterator it
= *begin
; btrace_insn_cmp (&it
, end
) != 0;
801 btrace_insn_next (&it
, 1))
803 const struct btrace_insn
*insn
;
805 insn
= btrace_insn_get (&it
);
807 /* A NULL instruction indicates a gap in the trace. */
810 const struct btrace_config
*conf
;
812 conf
= btrace_conf (btinfo
);
814 /* We have trace so we must have a configuration. */
815 gdb_assert (conf
!= NULL
);
817 uiout
->field_fmt ("insn-number", "%u",
818 btrace_insn_number (&it
));
821 btrace_ui_out_decode_error (uiout
, btrace_insn_get_error (&it
),
826 struct disasm_insn dinsn
;
828 if ((flags
& DISASSEMBLY_SOURCE
) != 0)
830 struct btrace_line_range lines
;
832 lines
= btrace_find_line_range (insn
->pc
);
833 if (!btrace_line_range_is_empty (lines
)
834 && !btrace_line_range_contains_range (last_lines
, lines
))
836 btrace_print_lines (lines
, uiout
, &src_and_asm_tuple
, &asm_list
,
840 else if (!src_and_asm_tuple
.has_value ())
842 gdb_assert (!asm_list
.has_value ());
844 src_and_asm_tuple
.emplace (uiout
, "src_and_asm_line");
846 /* No source information. */
847 asm_list
.emplace (uiout
, "line_asm_insn");
850 gdb_assert (src_and_asm_tuple
.has_value ());
851 gdb_assert (asm_list
.has_value ());
854 memset (&dinsn
, 0, sizeof (dinsn
));
855 dinsn
.number
= btrace_insn_number (&it
);
856 dinsn
.addr
= insn
->pc
;
858 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
859 dinsn
.is_speculative
= 1;
861 disasm
.pretty_print_insn (&dinsn
, flags
);
866 /* The insn_history method of target record-btrace. */
869 record_btrace_target::insn_history (int size
, gdb_disassembly_flags flags
)
871 struct btrace_thread_info
*btinfo
;
872 struct btrace_insn_history
*history
;
873 struct btrace_insn_iterator begin
, end
;
874 struct ui_out
*uiout
;
875 unsigned int context
, covered
;
877 uiout
= current_uiout
;
878 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
879 context
= abs (size
);
881 error (_("Bad record instruction-history-size."));
883 btinfo
= require_btrace ();
884 history
= btinfo
->insn_history
;
887 struct btrace_insn_iterator
*replay
;
889 DEBUG ("insn-history (0x%x): %d", (unsigned) flags
, size
);
891 /* If we're replaying, we start at the replay position. Otherwise, we
892 start at the tail of the trace. */
893 replay
= btinfo
->replay
;
897 btrace_insn_end (&begin
, btinfo
);
899 /* We start from here and expand in the requested direction. Then we
900 expand in the other direction, as well, to fill up any remaining
905 /* We want the current position covered, as well. */
906 covered
= btrace_insn_next (&end
, 1);
907 covered
+= btrace_insn_prev (&begin
, context
- covered
);
908 covered
+= btrace_insn_next (&end
, context
- covered
);
912 covered
= btrace_insn_next (&end
, context
);
913 covered
+= btrace_insn_prev (&begin
, context
- covered
);
918 begin
= history
->begin
;
921 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags
, size
,
922 btrace_insn_number (&begin
), btrace_insn_number (&end
));
927 covered
= btrace_insn_prev (&begin
, context
);
932 covered
= btrace_insn_next (&end
, context
);
937 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
941 printf_unfiltered (_("At the start of the branch trace record.\n"));
943 printf_unfiltered (_("At the end of the branch trace record.\n"));
946 btrace_set_insn_history (btinfo
, &begin
, &end
);
949 /* The insn_history_range method of target record-btrace. */
952 record_btrace_target::insn_history_range (ULONGEST from
, ULONGEST to
,
953 gdb_disassembly_flags flags
)
955 struct btrace_thread_info
*btinfo
;
956 struct btrace_insn_iterator begin
, end
;
957 struct ui_out
*uiout
;
958 unsigned int low
, high
;
961 uiout
= current_uiout
;
962 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
966 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags
, low
, high
);
968 /* Check for wrap-arounds. */
969 if (low
!= from
|| high
!= to
)
970 error (_("Bad range."));
973 error (_("Bad range."));
975 btinfo
= require_btrace ();
977 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
979 error (_("Range out of bounds."));
981 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
984 /* Silently truncate the range. */
985 btrace_insn_end (&end
, btinfo
);
989 /* We want both begin and end to be inclusive. */
990 btrace_insn_next (&end
, 1);
993 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
994 btrace_set_insn_history (btinfo
, &begin
, &end
);
997 /* The insn_history_from method of target record-btrace. */
1000 record_btrace_target::insn_history_from (ULONGEST from
, int size
,
1001 gdb_disassembly_flags flags
)
1003 ULONGEST begin
, end
, context
;
1005 context
= abs (size
);
1007 error (_("Bad record instruction-history-size."));
1016 begin
= from
- context
+ 1;
1021 end
= from
+ context
- 1;
1023 /* Check for wrap-around. */
1028 insn_history_range (begin
, end
, flags
);
1031 /* Print the instruction number range for a function call history line. */
1034 btrace_call_history_insn_range (struct ui_out
*uiout
,
1035 const struct btrace_function
*bfun
)
1037 unsigned int begin
, end
, size
;
1039 size
= bfun
->insn
.size ();
1040 gdb_assert (size
> 0);
1042 begin
= bfun
->insn_offset
;
1043 end
= begin
+ size
- 1;
1045 uiout
->field_unsigned ("insn begin", begin
);
1047 uiout
->field_unsigned ("insn end", end
);
1050 /* Compute the lowest and highest source line for the instructions in BFUN
1051 and return them in PBEGIN and PEND.
1052 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1053 result from inlining or macro expansion. */
1056 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
1057 int *pbegin
, int *pend
)
1059 struct symtab
*symtab
;
1070 symtab
= symbol_symtab (sym
);
1072 for (const btrace_insn
&insn
: bfun
->insn
)
1074 struct symtab_and_line sal
;
1076 sal
= find_pc_line (insn
.pc
, 0);
1077 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
1080 begin
= std::min (begin
, sal
.line
);
1081 end
= std::max (end
, sal
.line
);
1089 /* Print the source line information for a function call history line. */
1092 btrace_call_history_src_line (struct ui_out
*uiout
,
1093 const struct btrace_function
*bfun
)
1102 uiout
->field_string ("file",
1103 symtab_to_filename_for_display (symbol_symtab (sym
)),
1104 file_name_style
.style ());
1106 btrace_compute_src_line_range (bfun
, &begin
, &end
);
1111 uiout
->field_signed ("min line", begin
);
1117 uiout
->field_signed ("max line", end
);
1120 /* Get the name of a branch trace function. */
1123 btrace_get_bfun_name (const struct btrace_function
*bfun
)
1125 struct minimal_symbol
*msym
;
1135 return sym
->print_name ();
1136 else if (msym
!= NULL
)
1137 return msym
->print_name ();
1142 /* Disassemble a section of the recorded function trace. */
1145 btrace_call_history (struct ui_out
*uiout
,
1146 const struct btrace_thread_info
*btinfo
,
1147 const struct btrace_call_iterator
*begin
,
1148 const struct btrace_call_iterator
*end
,
1151 struct btrace_call_iterator it
;
1152 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1154 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags
, btrace_call_number (begin
),
1155 btrace_call_number (end
));
1157 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
1159 const struct btrace_function
*bfun
;
1160 struct minimal_symbol
*msym
;
1163 bfun
= btrace_call_get (&it
);
1167 /* Print the function index. */
1168 uiout
->field_unsigned ("index", bfun
->number
);
1171 /* Indicate gaps in the trace. */
1172 if (bfun
->errcode
!= 0)
1174 const struct btrace_config
*conf
;
1176 conf
= btrace_conf (btinfo
);
1178 /* We have trace so we must have a configuration. */
1179 gdb_assert (conf
!= NULL
);
1181 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
1186 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
1188 int level
= bfun
->level
+ btinfo
->level
, i
;
1190 for (i
= 0; i
< level
; ++i
)
1195 uiout
->field_string ("function", sym
->print_name (),
1196 function_name_style
.style ());
1197 else if (msym
!= NULL
)
1198 uiout
->field_string ("function", msym
->print_name (),
1199 function_name_style
.style ());
1200 else if (!uiout
->is_mi_like_p ())
1201 uiout
->field_string ("function", "??",
1202 function_name_style
.style ());
1204 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
1206 uiout
->text (_("\tinst "));
1207 btrace_call_history_insn_range (uiout
, bfun
);
1210 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
1212 uiout
->text (_("\tat "));
1213 btrace_call_history_src_line (uiout
, bfun
);
1220 /* The call_history method of target record-btrace. */
1223 record_btrace_target::call_history (int size
, record_print_flags flags
)
1225 struct btrace_thread_info
*btinfo
;
1226 struct btrace_call_history
*history
;
1227 struct btrace_call_iterator begin
, end
;
1228 struct ui_out
*uiout
;
1229 unsigned int context
, covered
;
1231 uiout
= current_uiout
;
1232 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
1233 context
= abs (size
);
1235 error (_("Bad record function-call-history-size."));
1237 btinfo
= require_btrace ();
1238 history
= btinfo
->call_history
;
1239 if (history
== NULL
)
1241 struct btrace_insn_iterator
*replay
;
1243 DEBUG ("call-history (0x%x): %d", (int) flags
, size
);
1245 /* If we're replaying, we start at the replay position. Otherwise, we
1246 start at the tail of the trace. */
1247 replay
= btinfo
->replay
;
1250 begin
.btinfo
= btinfo
;
1251 begin
.index
= replay
->call_index
;
1254 btrace_call_end (&begin
, btinfo
);
1256 /* We start from here and expand in the requested direction. Then we
1257 expand in the other direction, as well, to fill up any remaining
1262 /* We want the current position covered, as well. */
1263 covered
= btrace_call_next (&end
, 1);
1264 covered
+= btrace_call_prev (&begin
, context
- covered
);
1265 covered
+= btrace_call_next (&end
, context
- covered
);
1269 covered
= btrace_call_next (&end
, context
);
1270 covered
+= btrace_call_prev (&begin
, context
- covered
);
1275 begin
= history
->begin
;
1278 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags
, size
,
1279 btrace_call_number (&begin
), btrace_call_number (&end
));
1284 covered
= btrace_call_prev (&begin
, context
);
1289 covered
= btrace_call_next (&end
, context
);
1294 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1298 printf_unfiltered (_("At the start of the branch trace record.\n"));
1300 printf_unfiltered (_("At the end of the branch trace record.\n"));
1303 btrace_set_call_history (btinfo
, &begin
, &end
);
1306 /* The call_history_range method of target record-btrace. */
1309 record_btrace_target::call_history_range (ULONGEST from
, ULONGEST to
,
1310 record_print_flags flags
)
1312 struct btrace_thread_info
*btinfo
;
1313 struct btrace_call_iterator begin
, end
;
1314 struct ui_out
*uiout
;
1315 unsigned int low
, high
;
1318 uiout
= current_uiout
;
1319 ui_out_emit_tuple
tuple_emitter (uiout
, "func history");
1323 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags
, low
, high
);
1325 /* Check for wrap-arounds. */
1326 if (low
!= from
|| high
!= to
)
1327 error (_("Bad range."));
1330 error (_("Bad range."));
1332 btinfo
= require_btrace ();
1334 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1336 error (_("Range out of bounds."));
1338 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1341 /* Silently truncate the range. */
1342 btrace_call_end (&end
, btinfo
);
1346 /* We want both begin and end to be inclusive. */
1347 btrace_call_next (&end
, 1);
1350 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1351 btrace_set_call_history (btinfo
, &begin
, &end
);
1354 /* The call_history_from method of target record-btrace. */
1357 record_btrace_target::call_history_from (ULONGEST from
, int size
,
1358 record_print_flags flags
)
1360 ULONGEST begin
, end
, context
;
1362 context
= abs (size
);
1364 error (_("Bad record function-call-history-size."));
1373 begin
= from
- context
+ 1;
1378 end
= from
+ context
- 1;
1380 /* Check for wrap-around. */
1385 call_history_range ( begin
, end
, flags
);
1388 /* The record_method method of target record-btrace. */
1391 record_btrace_target::record_method (ptid_t ptid
)
1393 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
1394 thread_info
*const tp
= find_thread_ptid (proc_target
, ptid
);
1397 error (_("No thread."));
1399 if (tp
->btrace
.target
== NULL
)
1400 return RECORD_METHOD_NONE
;
1402 return RECORD_METHOD_BTRACE
;
1405 /* The record_is_replaying method of target record-btrace. */
1408 record_btrace_target::record_is_replaying (ptid_t ptid
)
1410 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
1411 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
1412 if (btrace_is_replaying (tp
))
1418 /* The record_will_replay method of target record-btrace. */
1421 record_btrace_target::record_will_replay (ptid_t ptid
, int dir
)
1423 return dir
== EXEC_REVERSE
|| record_is_replaying (ptid
);
1426 /* The xfer_partial method of target record-btrace. */
1428 enum target_xfer_status
1429 record_btrace_target::xfer_partial (enum target_object object
,
1430 const char *annex
, gdb_byte
*readbuf
,
1431 const gdb_byte
*writebuf
, ULONGEST offset
,
1432 ULONGEST len
, ULONGEST
*xfered_len
)
1434 /* Filter out requests that don't make sense during replay. */
1435 if (replay_memory_access
== replay_memory_access_read_only
1436 && !record_btrace_generating_corefile
1437 && record_is_replaying (inferior_ptid
))
1441 case TARGET_OBJECT_MEMORY
:
1443 struct target_section
*section
;
1445 /* We do not allow writing memory in general. */
1446 if (writebuf
!= NULL
)
1449 return TARGET_XFER_UNAVAILABLE
;
1452 /* We allow reading readonly memory. */
1453 section
= target_section_by_addr (this, offset
);
1454 if (section
!= NULL
)
1456 /* Check if the section we found is readonly. */
1457 if ((bfd_section_flags (section
->the_bfd_section
)
1458 & SEC_READONLY
) != 0)
1460 /* Truncate the request to fit into this section. */
1461 len
= std::min (len
, section
->endaddr
- offset
);
1467 return TARGET_XFER_UNAVAILABLE
;
1472 /* Forward the request. */
1473 return this->beneath ()->xfer_partial (object
, annex
, readbuf
, writebuf
,
1474 offset
, len
, xfered_len
);
1477 /* The insert_breakpoint method of target record-btrace. */
1480 record_btrace_target::insert_breakpoint (struct gdbarch
*gdbarch
,
1481 struct bp_target_info
*bp_tgt
)
1486 /* Inserting breakpoints requires accessing memory. Allow it for the
1487 duration of this function. */
1488 old
= replay_memory_access
;
1489 replay_memory_access
= replay_memory_access_read_write
;
1494 ret
= this->beneath ()->insert_breakpoint (gdbarch
, bp_tgt
);
1496 catch (const gdb_exception
&except
)
1498 replay_memory_access
= old
;
1501 replay_memory_access
= old
;
1506 /* The remove_breakpoint method of target record-btrace. */
1509 record_btrace_target::remove_breakpoint (struct gdbarch
*gdbarch
,
1510 struct bp_target_info
*bp_tgt
,
1511 enum remove_bp_reason reason
)
1516 /* Removing breakpoints requires accessing memory. Allow it for the
1517 duration of this function. */
1518 old
= replay_memory_access
;
1519 replay_memory_access
= replay_memory_access_read_write
;
1524 ret
= this->beneath ()->remove_breakpoint (gdbarch
, bp_tgt
, reason
);
1526 catch (const gdb_exception
&except
)
1528 replay_memory_access
= old
;
1531 replay_memory_access
= old
;
1536 /* The fetch_registers method of target record-btrace. */
1539 record_btrace_target::fetch_registers (struct regcache
*regcache
, int regno
)
1541 btrace_insn_iterator
*replay
= nullptr;
1543 /* Thread-db may ask for a thread's registers before GDB knows about the
1544 thread. We forward the request to the target beneath in this
1546 thread_info
*tp
= find_thread_ptid (regcache
->target (), regcache
->ptid ());
1548 replay
= tp
->btrace
.replay
;
1550 if (replay
!= nullptr && !record_btrace_generating_corefile
)
1552 const struct btrace_insn
*insn
;
1553 struct gdbarch
*gdbarch
;
1556 gdbarch
= regcache
->arch ();
1557 pcreg
= gdbarch_pc_regnum (gdbarch
);
1561 /* We can only provide the PC register. */
1562 if (regno
>= 0 && regno
!= pcreg
)
1565 insn
= btrace_insn_get (replay
);
1566 gdb_assert (insn
!= NULL
);
1568 regcache
->raw_supply (regno
, &insn
->pc
);
1571 this->beneath ()->fetch_registers (regcache
, regno
);
1574 /* The store_registers method of target record-btrace. */
1577 record_btrace_target::store_registers (struct regcache
*regcache
, int regno
)
1579 if (!record_btrace_generating_corefile
1580 && record_is_replaying (regcache
->ptid ()))
1581 error (_("Cannot write registers while replaying."));
1583 gdb_assert (may_write_registers
);
1585 this->beneath ()->store_registers (regcache
, regno
);
1588 /* The prepare_to_store method of target record-btrace. */
1591 record_btrace_target::prepare_to_store (struct regcache
*regcache
)
1593 if (!record_btrace_generating_corefile
1594 && record_is_replaying (regcache
->ptid ()))
1597 this->beneath ()->prepare_to_store (regcache
);
1600 /* The branch trace frame cache. */
1602 struct btrace_frame_cache
1605 struct thread_info
*tp
;
1607 /* The frame info. */
1608 struct frame_info
*frame
;
1610 /* The branch trace function segment. */
1611 const struct btrace_function
*bfun
;
1614 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1616 static htab_t bfcache
;
1618 /* hash_f for htab_create_alloc of bfcache. */
1621 bfcache_hash (const void *arg
)
1623 const struct btrace_frame_cache
*cache
1624 = (const struct btrace_frame_cache
*) arg
;
1626 return htab_hash_pointer (cache
->frame
);
1629 /* eq_f for htab_create_alloc of bfcache. */
1632 bfcache_eq (const void *arg1
, const void *arg2
)
1634 const struct btrace_frame_cache
*cache1
1635 = (const struct btrace_frame_cache
*) arg1
;
1636 const struct btrace_frame_cache
*cache2
1637 = (const struct btrace_frame_cache
*) arg2
;
1639 return cache1
->frame
== cache2
->frame
;
1642 /* Create a new btrace frame cache. */
1644 static struct btrace_frame_cache
*
1645 bfcache_new (struct frame_info
*frame
)
1647 struct btrace_frame_cache
*cache
;
1650 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1651 cache
->frame
= frame
;
1653 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1654 gdb_assert (*slot
== NULL
);
1660 /* Extract the branch trace function from a branch trace frame. */
1662 static const struct btrace_function
*
1663 btrace_get_frame_function (struct frame_info
*frame
)
1665 const struct btrace_frame_cache
*cache
;
1666 struct btrace_frame_cache pattern
;
1669 pattern
.frame
= frame
;
1671 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1675 cache
= (const struct btrace_frame_cache
*) *slot
;
1679 /* Implement stop_reason method for record_btrace_frame_unwind. */
1681 static enum unwind_stop_reason
1682 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1685 const struct btrace_frame_cache
*cache
;
1686 const struct btrace_function
*bfun
;
1688 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1690 gdb_assert (bfun
!= NULL
);
1693 return UNWIND_UNAVAILABLE
;
1695 return UNWIND_NO_REASON
;
1698 /* Implement this_id method for record_btrace_frame_unwind. */
1701 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1702 struct frame_id
*this_id
)
1704 const struct btrace_frame_cache
*cache
;
1705 const struct btrace_function
*bfun
;
1706 struct btrace_call_iterator it
;
1707 CORE_ADDR code
, special
;
1709 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1712 gdb_assert (bfun
!= NULL
);
1714 while (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->prev
) != 0)
1715 bfun
= btrace_call_get (&it
);
1717 code
= get_frame_func (this_frame
);
1718 special
= bfun
->number
;
1720 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1722 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1723 btrace_get_bfun_name (cache
->bfun
),
1724 core_addr_to_string_nz (this_id
->code_addr
),
1725 core_addr_to_string_nz (this_id
->special_addr
));
1728 /* Implement prev_register method for record_btrace_frame_unwind. */
1730 static struct value
*
1731 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1735 const struct btrace_frame_cache
*cache
;
1736 const struct btrace_function
*bfun
, *caller
;
1737 struct btrace_call_iterator it
;
1738 struct gdbarch
*gdbarch
;
1742 gdbarch
= get_frame_arch (this_frame
);
1743 pcreg
= gdbarch_pc_regnum (gdbarch
);
1744 if (pcreg
< 0 || regnum
!= pcreg
)
1745 throw_error (NOT_AVAILABLE_ERROR
,
1746 _("Registers are not available in btrace record history"));
1748 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1750 gdb_assert (bfun
!= NULL
);
1752 if (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->up
) == 0)
1753 throw_error (NOT_AVAILABLE_ERROR
,
1754 _("No caller in btrace record history"));
1756 caller
= btrace_call_get (&it
);
1758 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1759 pc
= caller
->insn
.front ().pc
;
1762 pc
= caller
->insn
.back ().pc
;
1763 pc
+= gdb_insn_length (gdbarch
, pc
);
1766 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1767 btrace_get_bfun_name (bfun
), bfun
->level
,
1768 core_addr_to_string_nz (pc
));
1770 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1773 /* Implement sniffer method for record_btrace_frame_unwind. */
1776 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1777 struct frame_info
*this_frame
,
1780 const struct btrace_function
*bfun
;
1781 struct btrace_frame_cache
*cache
;
1782 struct thread_info
*tp
;
1783 struct frame_info
*next
;
1785 /* THIS_FRAME does not contain a reference to its thread. */
1786 tp
= inferior_thread ();
1789 next
= get_next_frame (this_frame
);
1792 const struct btrace_insn_iterator
*replay
;
1794 replay
= tp
->btrace
.replay
;
1796 bfun
= &replay
->btinfo
->functions
[replay
->call_index
];
1800 const struct btrace_function
*callee
;
1801 struct btrace_call_iterator it
;
1803 callee
= btrace_get_frame_function (next
);
1804 if (callee
== NULL
|| (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
1807 if (btrace_find_call_by_number (&it
, &tp
->btrace
, callee
->up
) == 0)
1810 bfun
= btrace_call_get (&it
);
1816 DEBUG ("[frame] sniffed frame for %s on level %d",
1817 btrace_get_bfun_name (bfun
), bfun
->level
);
1819 /* This is our frame. Initialize the frame cache. */
1820 cache
= bfcache_new (this_frame
);
1824 *this_cache
= cache
;
1828 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1831 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1832 struct frame_info
*this_frame
,
1835 const struct btrace_function
*bfun
, *callee
;
1836 struct btrace_frame_cache
*cache
;
1837 struct btrace_call_iterator it
;
1838 struct frame_info
*next
;
1839 struct thread_info
*tinfo
;
1841 next
= get_next_frame (this_frame
);
1845 callee
= btrace_get_frame_function (next
);
1849 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1852 tinfo
= inferior_thread ();
1853 if (btrace_find_call_by_number (&it
, &tinfo
->btrace
, callee
->up
) == 0)
1856 bfun
= btrace_call_get (&it
);
1858 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1859 btrace_get_bfun_name (bfun
), bfun
->level
);
1861 /* This is our frame. Initialize the frame cache. */
1862 cache
= bfcache_new (this_frame
);
1866 *this_cache
= cache
;
1871 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1873 struct btrace_frame_cache
*cache
;
1876 cache
= (struct btrace_frame_cache
*) this_cache
;
1878 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1879 gdb_assert (slot
!= NULL
);
1881 htab_remove_elt (bfcache
, cache
);
1884 /* btrace recording does not store previous memory content, neither the stack
1885 frames content. Any unwinding would return erroneous results as the stack
1886 contents no longer matches the changed PC value restored from history.
1887 Therefore this unwinder reports any possibly unwound registers as
1890 const struct frame_unwind record_btrace_frame_unwind
=
1893 record_btrace_frame_unwind_stop_reason
,
1894 record_btrace_frame_this_id
,
1895 record_btrace_frame_prev_register
,
1897 record_btrace_frame_sniffer
,
1898 record_btrace_frame_dealloc_cache
1901 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1904 record_btrace_frame_unwind_stop_reason
,
1905 record_btrace_frame_this_id
,
1906 record_btrace_frame_prev_register
,
1908 record_btrace_tailcall_frame_sniffer
,
1909 record_btrace_frame_dealloc_cache
1912 /* Implement the get_unwinder method. */
1914 const struct frame_unwind
*
1915 record_btrace_target::get_unwinder ()
1917 return &record_btrace_frame_unwind
;
1920 /* Implement the get_tailcall_unwinder method. */
1922 const struct frame_unwind
*
1923 record_btrace_target::get_tailcall_unwinder ()
1925 return &record_btrace_tailcall_frame_unwind
;
1928 /* Return a human-readable string for FLAG. */
1931 btrace_thread_flag_to_str (btrace_thread_flags flag
)
1939 return "reverse-step";
1945 return "reverse-cont";
1954 /* Indicate that TP should be resumed according to FLAG. */
1957 record_btrace_resume_thread (struct thread_info
*tp
,
1958 enum btrace_thread_flag flag
)
1960 struct btrace_thread_info
*btinfo
;
1962 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp
),
1963 target_pid_to_str (tp
->ptid
).c_str (), flag
,
1964 btrace_thread_flag_to_str (flag
));
1966 btinfo
= &tp
->btrace
;
1968 /* Fetch the latest branch trace. */
1969 btrace_fetch (tp
, record_btrace_get_cpu ());
1971 /* A resume request overwrites a preceding resume or stop request. */
1972 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1973 btinfo
->flags
|= flag
;
1976 /* Get the current frame for TP. */
1978 static struct frame_id
1979 get_thread_current_frame_id (struct thread_info
*tp
)
1984 /* Set current thread, which is implicitly used by
1985 get_current_frame. */
1986 scoped_restore_current_thread restore_thread
;
1988 switch_to_thread (tp
);
1990 process_stratum_target
*proc_target
= tp
->inf
->process_target ();
1992 /* Clear the executing flag to allow changes to the current frame.
1993 We are not actually running, yet. We just started a reverse execution
1994 command or a record goto command.
1995 For the latter, EXECUTING is false and this has no effect.
1996 For the former, EXECUTING is true and we're in wait, about to
1997 move the thread. Since we need to recompute the stack, we temporarily
1998 set EXECUTING to false. */
1999 executing
= tp
->executing
;
2000 set_executing (proc_target
, inferior_ptid
, false);
2005 id
= get_frame_id (get_current_frame ());
2007 catch (const gdb_exception
&except
)
2009 /* Restore the previous execution state. */
2010 set_executing (proc_target
, inferior_ptid
, executing
);
2015 /* Restore the previous execution state. */
2016 set_executing (proc_target
, inferior_ptid
, executing
);
2021 /* Start replaying a thread. */
2023 static struct btrace_insn_iterator
*
2024 record_btrace_start_replaying (struct thread_info
*tp
)
2026 struct btrace_insn_iterator
*replay
;
2027 struct btrace_thread_info
*btinfo
;
2029 btinfo
= &tp
->btrace
;
2032 /* We can't start replaying without trace. */
2033 if (btinfo
->functions
.empty ())
2036 /* GDB stores the current frame_id when stepping in order to detects steps
2038 Since frames are computed differently when we're replaying, we need to
2039 recompute those stored frames and fix them up so we can still detect
2040 subroutines after we started replaying. */
2043 struct frame_id frame_id
;
2044 int upd_step_frame_id
, upd_step_stack_frame_id
;
2046 /* The current frame without replaying - computed via normal unwind. */
2047 frame_id
= get_thread_current_frame_id (tp
);
2049 /* Check if we need to update any stepping-related frame id's. */
2050 upd_step_frame_id
= frame_id_eq (frame_id
,
2051 tp
->control
.step_frame_id
);
2052 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
2053 tp
->control
.step_stack_frame_id
);
2055 /* We start replaying at the end of the branch trace. This corresponds
2056 to the current instruction. */
2057 replay
= XNEW (struct btrace_insn_iterator
);
2058 btrace_insn_end (replay
, btinfo
);
2060 /* Skip gaps at the end of the trace. */
2061 while (btrace_insn_get (replay
) == NULL
)
2065 steps
= btrace_insn_prev (replay
, 1);
2067 error (_("No trace."));
2070 /* We're not replaying, yet. */
2071 gdb_assert (btinfo
->replay
== NULL
);
2072 btinfo
->replay
= replay
;
2074 /* Make sure we're not using any stale registers. */
2075 registers_changed_thread (tp
);
2077 /* The current frame with replaying - computed via btrace unwind. */
2078 frame_id
= get_thread_current_frame_id (tp
);
2080 /* Replace stepping related frames where necessary. */
2081 if (upd_step_frame_id
)
2082 tp
->control
.step_frame_id
= frame_id
;
2083 if (upd_step_stack_frame_id
)
2084 tp
->control
.step_stack_frame_id
= frame_id
;
2086 catch (const gdb_exception
&except
)
2088 xfree (btinfo
->replay
);
2089 btinfo
->replay
= NULL
;
2091 registers_changed_thread (tp
);
2099 /* Stop replaying a thread. */
2102 record_btrace_stop_replaying (struct thread_info
*tp
)
2104 struct btrace_thread_info
*btinfo
;
2106 btinfo
= &tp
->btrace
;
2108 xfree (btinfo
->replay
);
2109 btinfo
->replay
= NULL
;
2111 /* Make sure we're not leaving any stale registers. */
2112 registers_changed_thread (tp
);
2115 /* Stop replaying TP if it is at the end of its execution history. */
2118 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
2120 struct btrace_insn_iterator
*replay
, end
;
2121 struct btrace_thread_info
*btinfo
;
2123 btinfo
= &tp
->btrace
;
2124 replay
= btinfo
->replay
;
2129 btrace_insn_end (&end
, btinfo
);
2131 if (btrace_insn_cmp (replay
, &end
) == 0)
2132 record_btrace_stop_replaying (tp
);
2135 /* The resume method of target record-btrace. */
2138 record_btrace_target::resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2140 enum btrace_thread_flag flag
, cflag
;
2142 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid
).c_str (),
2143 ::execution_direction
== EXEC_REVERSE
? "reverse-" : "",
2144 step
? "step" : "cont");
2146 /* Store the execution direction of the last resume.
2148 If there is more than one resume call, we have to rely on infrun
2149 to not change the execution direction in-between. */
2150 record_btrace_resume_exec_dir
= ::execution_direction
;
2152 /* As long as we're not replaying, just forward the request.
2154 For non-stop targets this means that no thread is replaying. In order to
2155 make progress, we may need to explicitly move replaying threads to the end
2156 of their execution history. */
2157 if ((::execution_direction
!= EXEC_REVERSE
)
2158 && !record_is_replaying (minus_one_ptid
))
2160 this->beneath ()->resume (ptid
, step
, signal
);
2164 /* Compute the btrace thread flag for the requested move. */
2165 if (::execution_direction
== EXEC_REVERSE
)
2167 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
2172 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
2176 /* We just indicate the resume intent here. The actual stepping happens in
2177 record_btrace_wait below.
2179 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2181 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2183 if (!target_is_non_stop_p ())
2185 gdb_assert (inferior_ptid
.matches (ptid
));
2187 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2189 if (tp
->ptid
.matches (inferior_ptid
))
2190 record_btrace_resume_thread (tp
, flag
);
2192 record_btrace_resume_thread (tp
, cflag
);
2197 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2198 record_btrace_resume_thread (tp
, flag
);
2201 /* Async support. */
2202 if (target_can_async_p ())
2205 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2209 /* The commit_resume method of target record-btrace. */
2212 record_btrace_target::commit_resume ()
2214 if ((::execution_direction
!= EXEC_REVERSE
)
2215 && !record_is_replaying (minus_one_ptid
))
2216 beneath ()->commit_resume ();
2219 /* Cancel resuming TP. */
2222 record_btrace_cancel_resume (struct thread_info
*tp
)
2224 btrace_thread_flags flags
;
2226 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
2230 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2231 print_thread_id (tp
),
2232 target_pid_to_str (tp
->ptid
).c_str (), flags
.raw (),
2233 btrace_thread_flag_to_str (flags
));
2235 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2236 record_btrace_stop_replaying_at_end (tp
);
2239 /* Return a target_waitstatus indicating that we ran out of history. */
2241 static struct target_waitstatus
2242 btrace_step_no_history (void)
2244 struct target_waitstatus status
;
2246 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
2251 /* Return a target_waitstatus indicating that a step finished. */
2253 static struct target_waitstatus
2254 btrace_step_stopped (void)
2256 struct target_waitstatus status
;
2258 status
.kind
= TARGET_WAITKIND_STOPPED
;
2259 status
.value
.sig
= GDB_SIGNAL_TRAP
;
2264 /* Return a target_waitstatus indicating that a thread was stopped as
2267 static struct target_waitstatus
2268 btrace_step_stopped_on_request (void)
2270 struct target_waitstatus status
;
2272 status
.kind
= TARGET_WAITKIND_STOPPED
;
2273 status
.value
.sig
= GDB_SIGNAL_0
;
2278 /* Return a target_waitstatus indicating a spurious stop. */
2280 static struct target_waitstatus
2281 btrace_step_spurious (void)
2283 struct target_waitstatus status
;
2285 status
.kind
= TARGET_WAITKIND_SPURIOUS
;
2290 /* Return a target_waitstatus indicating that the thread was not resumed. */
2292 static struct target_waitstatus
2293 btrace_step_no_resumed (void)
2295 struct target_waitstatus status
;
2297 status
.kind
= TARGET_WAITKIND_NO_RESUMED
;
2302 /* Return a target_waitstatus indicating that we should wait again. */
2304 static struct target_waitstatus
2305 btrace_step_again (void)
2307 struct target_waitstatus status
;
2309 status
.kind
= TARGET_WAITKIND_IGNORE
;
2314 /* Clear the record histories. */
2317 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2319 xfree (btinfo
->insn_history
);
2320 xfree (btinfo
->call_history
);
2322 btinfo
->insn_history
= NULL
;
2323 btinfo
->call_history
= NULL
;
2326 /* Check whether TP's current replay position is at a breakpoint. */
2329 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2331 struct btrace_insn_iterator
*replay
;
2332 struct btrace_thread_info
*btinfo
;
2333 const struct btrace_insn
*insn
;
2335 btinfo
= &tp
->btrace
;
2336 replay
= btinfo
->replay
;
2341 insn
= btrace_insn_get (replay
);
2345 return record_check_stopped_by_breakpoint (tp
->inf
->aspace
, insn
->pc
,
2346 &btinfo
->stop_reason
);
2349 /* Step one instruction in forward direction. */
2351 static struct target_waitstatus
2352 record_btrace_single_step_forward (struct thread_info
*tp
)
2354 struct btrace_insn_iterator
*replay
, end
, start
;
2355 struct btrace_thread_info
*btinfo
;
2357 btinfo
= &tp
->btrace
;
2358 replay
= btinfo
->replay
;
2360 /* We're done if we're not replaying. */
2362 return btrace_step_no_history ();
2364 /* Check if we're stepping a breakpoint. */
2365 if (record_btrace_replay_at_breakpoint (tp
))
2366 return btrace_step_stopped ();
2368 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2369 jump back to the instruction at which we started. */
2375 /* We will bail out here if we continue stepping after reaching the end
2376 of the execution history. */
2377 steps
= btrace_insn_next (replay
, 1);
2381 return btrace_step_no_history ();
2384 while (btrace_insn_get (replay
) == NULL
);
2386 /* Determine the end of the instruction trace. */
2387 btrace_insn_end (&end
, btinfo
);
2389 /* The execution trace contains (and ends with) the current instruction.
2390 This instruction has not been executed, yet, so the trace really ends
2391 one instruction earlier. */
2392 if (btrace_insn_cmp (replay
, &end
) == 0)
2393 return btrace_step_no_history ();
2395 return btrace_step_spurious ();
2398 /* Step one instruction in backward direction. */
2400 static struct target_waitstatus
2401 record_btrace_single_step_backward (struct thread_info
*tp
)
2403 struct btrace_insn_iterator
*replay
, start
;
2404 struct btrace_thread_info
*btinfo
;
2406 btinfo
= &tp
->btrace
;
2407 replay
= btinfo
->replay
;
2409 /* Start replaying if we're not already doing so. */
2411 replay
= record_btrace_start_replaying (tp
);
2413 /* If we can't step any further, we reached the end of the history.
2414 Skip gaps during replay. If we end up at a gap (at the beginning of
2415 the trace), jump back to the instruction at which we started. */
2421 steps
= btrace_insn_prev (replay
, 1);
2425 return btrace_step_no_history ();
2428 while (btrace_insn_get (replay
) == NULL
);
2430 /* Check if we're stepping a breakpoint.
2432 For reverse-stepping, this check is after the step. There is logic in
2433 infrun.c that handles reverse-stepping separately. See, for example,
2434 proceed and adjust_pc_after_break.
2436 This code assumes that for reverse-stepping, PC points to the last
2437 de-executed instruction, whereas for forward-stepping PC points to the
2438 next to-be-executed instruction. */
2439 if (record_btrace_replay_at_breakpoint (tp
))
2440 return btrace_step_stopped ();
2442 return btrace_step_spurious ();
2445 /* Step a single thread. */
2447 static struct target_waitstatus
2448 record_btrace_step_thread (struct thread_info
*tp
)
2450 struct btrace_thread_info
*btinfo
;
2451 struct target_waitstatus status
;
2452 btrace_thread_flags flags
;
2454 btinfo
= &tp
->btrace
;
2456 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2457 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2459 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp
),
2460 target_pid_to_str (tp
->ptid
).c_str (), flags
.raw (),
2461 btrace_thread_flag_to_str (flags
));
2463 /* We can't step without an execution history. */
2464 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2465 return btrace_step_no_history ();
2470 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
2473 return btrace_step_stopped_on_request ();
2476 status
= record_btrace_single_step_forward (tp
);
2477 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2480 return btrace_step_stopped ();
2483 status
= record_btrace_single_step_backward (tp
);
2484 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2487 return btrace_step_stopped ();
2490 status
= record_btrace_single_step_forward (tp
);
2491 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2494 btinfo
->flags
|= flags
;
2495 return btrace_step_again ();
2498 status
= record_btrace_single_step_backward (tp
);
2499 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2502 btinfo
->flags
|= flags
;
2503 return btrace_step_again ();
2506 /* We keep threads moving at the end of their execution history. The wait
2507 method will stop the thread for whom the event is reported. */
2508 if (status
.kind
== TARGET_WAITKIND_NO_HISTORY
)
2509 btinfo
->flags
|= flags
;
2514 /* Announce further events if necessary. */
2517 record_btrace_maybe_mark_async_event
2518 (const std::vector
<thread_info
*> &moving
,
2519 const std::vector
<thread_info
*> &no_history
)
2521 bool more_moving
= !moving
.empty ();
2522 bool more_no_history
= !no_history
.empty ();;
2524 if (!more_moving
&& !more_no_history
)
2528 DEBUG ("movers pending");
2530 if (more_no_history
)
2531 DEBUG ("no-history pending");
2533 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2536 /* The wait method of target record-btrace. */
2539 record_btrace_target::wait (ptid_t ptid
, struct target_waitstatus
*status
,
2540 target_wait_flags options
)
2542 std::vector
<thread_info
*> moving
;
2543 std::vector
<thread_info
*> no_history
;
2545 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
).c_str (),
2546 (unsigned) options
);
2548 /* As long as we're not replaying, just forward the request. */
2549 if ((::execution_direction
!= EXEC_REVERSE
)
2550 && !record_is_replaying (minus_one_ptid
))
2552 return this->beneath ()->wait (ptid
, status
, options
);
2555 /* Keep a work list of moving threads. */
2556 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2557 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2558 if ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0)
2559 moving
.push_back (tp
);
2561 if (moving
.empty ())
2563 *status
= btrace_step_no_resumed ();
2565 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid
).c_str (),
2566 target_waitstatus_to_string (status
).c_str ());
2571 /* Step moving threads one by one, one step each, until either one thread
2572 reports an event or we run out of threads to step.
2574 When stepping more than one thread, chances are that some threads reach
2575 the end of their execution history earlier than others. If we reported
2576 this immediately, all-stop on top of non-stop would stop all threads and
2577 resume the same threads next time. And we would report the same thread
2578 having reached the end of its execution history again.
2580 In the worst case, this would starve the other threads. But even if other
2581 threads would be allowed to make progress, this would result in far too
2582 many intermediate stops.
2584 We therefore delay the reporting of "no execution history" until we have
2585 nothing else to report. By this time, all threads should have moved to
2586 either the beginning or the end of their execution history. There will
2587 be a single user-visible stop. */
2588 struct thread_info
*eventing
= NULL
;
2589 while ((eventing
== NULL
) && !moving
.empty ())
2591 for (unsigned int ix
= 0; eventing
== NULL
&& ix
< moving
.size ();)
2593 thread_info
*tp
= moving
[ix
];
2595 *status
= record_btrace_step_thread (tp
);
2597 switch (status
->kind
)
2599 case TARGET_WAITKIND_IGNORE
:
2603 case TARGET_WAITKIND_NO_HISTORY
:
2604 no_history
.push_back (ordered_remove (moving
, ix
));
2608 eventing
= unordered_remove (moving
, ix
);
2614 if (eventing
== NULL
)
2616 /* We started with at least one moving thread. This thread must have
2617 either stopped or reached the end of its execution history.
2619 In the former case, EVENTING must not be NULL.
2620 In the latter case, NO_HISTORY must not be empty. */
2621 gdb_assert (!no_history
.empty ());
2623 /* We kept threads moving at the end of their execution history. Stop
2624 EVENTING now that we are going to report its stop. */
2625 eventing
= unordered_remove (no_history
, 0);
2626 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2628 *status
= btrace_step_no_history ();
2631 gdb_assert (eventing
!= NULL
);
2633 /* We kept threads replaying at the end of their execution history. Stop
2634 replaying EVENTING now that we are going to report its stop. */
2635 record_btrace_stop_replaying_at_end (eventing
);
2637 /* Stop all other threads. */
2638 if (!target_is_non_stop_p ())
2640 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
2641 record_btrace_cancel_resume (tp
);
2644 /* In async mode, we need to announce further events. */
2645 if (target_is_async_p ())
2646 record_btrace_maybe_mark_async_event (moving
, no_history
);
2648 /* Start record histories anew from the current position. */
2649 record_btrace_clear_histories (&eventing
->btrace
);
2651 /* We moved the replay position but did not update registers. */
2652 registers_changed_thread (eventing
);
2654 DEBUG ("wait ended by thread %s (%s): %s",
2655 print_thread_id (eventing
),
2656 target_pid_to_str (eventing
->ptid
).c_str (),
2657 target_waitstatus_to_string (status
).c_str ());
2659 return eventing
->ptid
;
2662 /* The stop method of target record-btrace. */
2665 record_btrace_target::stop (ptid_t ptid
)
2667 DEBUG ("stop %s", target_pid_to_str (ptid
).c_str ());
2669 /* As long as we're not replaying, just forward the request. */
2670 if ((::execution_direction
!= EXEC_REVERSE
)
2671 && !record_is_replaying (minus_one_ptid
))
2673 this->beneath ()->stop (ptid
);
2677 process_stratum_target
*proc_target
2678 = current_inferior ()->process_target ();
2680 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2682 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2683 tp
->btrace
.flags
|= BTHR_STOP
;
2688 /* The can_execute_reverse method of target record-btrace. */
2691 record_btrace_target::can_execute_reverse ()
2696 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2699 record_btrace_target::stopped_by_sw_breakpoint ()
2701 if (record_is_replaying (minus_one_ptid
))
2703 struct thread_info
*tp
= inferior_thread ();
2705 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2708 return this->beneath ()->stopped_by_sw_breakpoint ();
2711 /* The supports_stopped_by_sw_breakpoint method of target
2715 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2717 if (record_is_replaying (minus_one_ptid
))
2720 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2723 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2726 record_btrace_target::stopped_by_hw_breakpoint ()
2728 if (record_is_replaying (minus_one_ptid
))
2730 struct thread_info
*tp
= inferior_thread ();
2732 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2735 return this->beneath ()->stopped_by_hw_breakpoint ();
2738 /* The supports_stopped_by_hw_breakpoint method of target
2742 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2744 if (record_is_replaying (minus_one_ptid
))
2747 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2750 /* The update_thread_list method of target record-btrace. */
2753 record_btrace_target::update_thread_list ()
2755 /* We don't add or remove threads during replay. */
2756 if (record_is_replaying (minus_one_ptid
))
2759 /* Forward the request. */
2760 this->beneath ()->update_thread_list ();
2763 /* The thread_alive method of target record-btrace. */
2766 record_btrace_target::thread_alive (ptid_t ptid
)
2768 /* We don't add or remove threads during replay. */
2769 if (record_is_replaying (minus_one_ptid
))
2772 /* Forward the request. */
2773 return this->beneath ()->thread_alive (ptid
);
2776 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2780 record_btrace_set_replay (struct thread_info
*tp
,
2781 const struct btrace_insn_iterator
*it
)
2783 struct btrace_thread_info
*btinfo
;
2785 btinfo
= &tp
->btrace
;
2788 record_btrace_stop_replaying (tp
);
2791 if (btinfo
->replay
== NULL
)
2792 record_btrace_start_replaying (tp
);
2793 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2796 *btinfo
->replay
= *it
;
2797 registers_changed_thread (tp
);
2800 /* Start anew from the new replay position. */
2801 record_btrace_clear_histories (btinfo
);
2803 inferior_thread ()->suspend
.stop_pc
2804 = regcache_read_pc (get_current_regcache ());
2805 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2808 /* The goto_record_begin method of target record-btrace. */
2811 record_btrace_target::goto_record_begin ()
2813 struct thread_info
*tp
;
2814 struct btrace_insn_iterator begin
;
2816 tp
= require_btrace_thread ();
2818 btrace_insn_begin (&begin
, &tp
->btrace
);
2820 /* Skip gaps at the beginning of the trace. */
2821 while (btrace_insn_get (&begin
) == NULL
)
2825 steps
= btrace_insn_next (&begin
, 1);
2827 error (_("No trace."));
2830 record_btrace_set_replay (tp
, &begin
);
2833 /* The goto_record_end method of target record-btrace. */
2836 record_btrace_target::goto_record_end ()
2838 struct thread_info
*tp
;
2840 tp
= require_btrace_thread ();
2842 record_btrace_set_replay (tp
, NULL
);
2845 /* The goto_record method of target record-btrace. */
2848 record_btrace_target::goto_record (ULONGEST insn
)
2850 struct thread_info
*tp
;
2851 struct btrace_insn_iterator it
;
2852 unsigned int number
;
2857 /* Check for wrap-arounds. */
2859 error (_("Instruction number out of range."));
2861 tp
= require_btrace_thread ();
2863 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2865 /* Check if the instruction could not be found or is a gap. */
2866 if (found
== 0 || btrace_insn_get (&it
) == NULL
)
2867 error (_("No such instruction."));
2869 record_btrace_set_replay (tp
, &it
);
2872 /* The record_stop_replaying method of target record-btrace. */
2875 record_btrace_target::record_stop_replaying ()
2877 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
2878 record_btrace_stop_replaying (tp
);
2881 /* The execution_direction target method. */
2883 enum exec_direction_kind
2884 record_btrace_target::execution_direction ()
2886 return record_btrace_resume_exec_dir
;
2889 /* The prepare_to_generate_core target method. */
2892 record_btrace_target::prepare_to_generate_core ()
2894 record_btrace_generating_corefile
= 1;
2897 /* The done_generating_core target method. */
2900 record_btrace_target::done_generating_core ()
2902 record_btrace_generating_corefile
= 0;
2905 /* Start recording in BTS format. */
2908 cmd_record_btrace_bts_start (const char *args
, int from_tty
)
2910 if (args
!= NULL
&& *args
!= 0)
2911 error (_("Invalid argument."));
2913 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2917 execute_command ("target record-btrace", from_tty
);
2919 catch (const gdb_exception
&exception
)
2921 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2926 /* Start recording in Intel Processor Trace format. */
2929 cmd_record_btrace_pt_start (const char *args
, int from_tty
)
2931 if (args
!= NULL
&& *args
!= 0)
2932 error (_("Invalid argument."));
2934 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2938 execute_command ("target record-btrace", from_tty
);
2940 catch (const gdb_exception
&exception
)
2942 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2947 /* Alias for "target record". */
2950 cmd_record_btrace_start (const char *args
, int from_tty
)
2952 if (args
!= NULL
&& *args
!= 0)
2953 error (_("Invalid argument."));
2955 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2959 execute_command ("target record-btrace", from_tty
);
2961 catch (const gdb_exception
&exception
)
2963 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2967 execute_command ("target record-btrace", from_tty
);
2969 catch (const gdb_exception
&ex
)
2971 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2977 /* The "show record btrace replay-memory-access" command. */
2980 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2981 struct cmd_list_element
*c
, const char *value
)
2983 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2984 replay_memory_access
);
2987 /* The "set record btrace cpu none" command. */
2990 cmd_set_record_btrace_cpu_none (const char *args
, int from_tty
)
2992 if (args
!= nullptr && *args
!= 0)
2993 error (_("Trailing junk: '%s'."), args
);
2995 record_btrace_cpu_state
= CS_NONE
;
2998 /* The "set record btrace cpu auto" command. */
3001 cmd_set_record_btrace_cpu_auto (const char *args
, int from_tty
)
3003 if (args
!= nullptr && *args
!= 0)
3004 error (_("Trailing junk: '%s'."), args
);
3006 record_btrace_cpu_state
= CS_AUTO
;
3009 /* The "set record btrace cpu" command. */
3012 cmd_set_record_btrace_cpu (const char *args
, int from_tty
)
3014 if (args
== nullptr)
3017 /* We use a hard-coded vendor string for now. */
3018 unsigned int family
, model
, stepping
;
3019 int l1
, l2
, matches
= sscanf (args
, "intel: %u/%u%n/%u%n", &family
,
3020 &model
, &l1
, &stepping
, &l2
);
3023 if (strlen (args
) != l2
)
3024 error (_("Trailing junk: '%s'."), args
+ l2
);
3026 else if (matches
== 2)
3028 if (strlen (args
) != l1
)
3029 error (_("Trailing junk: '%s'."), args
+ l1
);
3034 error (_("Bad format. See \"help set record btrace cpu\"."));
3036 if (USHRT_MAX
< family
)
3037 error (_("Cpu family too big."));
3039 if (UCHAR_MAX
< model
)
3040 error (_("Cpu model too big."));
3042 if (UCHAR_MAX
< stepping
)
3043 error (_("Cpu stepping too big."));
3045 record_btrace_cpu
.vendor
= CV_INTEL
;
3046 record_btrace_cpu
.family
= family
;
3047 record_btrace_cpu
.model
= model
;
3048 record_btrace_cpu
.stepping
= stepping
;
3050 record_btrace_cpu_state
= CS_CPU
;
3053 /* The "show record btrace cpu" command. */
3056 cmd_show_record_btrace_cpu (const char *args
, int from_tty
)
3058 if (args
!= nullptr && *args
!= 0)
3059 error (_("Trailing junk: '%s'."), args
);
3061 switch (record_btrace_cpu_state
)
3064 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3068 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3072 switch (record_btrace_cpu
.vendor
)
3075 if (record_btrace_cpu
.stepping
== 0)
3076 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3077 record_btrace_cpu
.family
,
3078 record_btrace_cpu
.model
);
3080 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3081 record_btrace_cpu
.family
,
3082 record_btrace_cpu
.model
,
3083 record_btrace_cpu
.stepping
);
3088 error (_("Internal error: bad cpu state."));
3091 /* The "record bts buffer-size" show value function. */
3094 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
3095 struct cmd_list_element
*c
,
3098 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
3102 /* The "record pt buffer-size" show value function. */
3105 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
3106 struct cmd_list_element
*c
,
3109 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
3113 /* Initialize btrace commands. */
3115 void _initialize_record_btrace ();
3117 _initialize_record_btrace ()
3119 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
3120 _("Start branch trace recording."), &record_btrace_cmdlist
,
3121 "record btrace ", 0, &record_cmdlist
);
3122 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
3124 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
3126 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3127 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3128 This format may not be available on all processors."),
3129 &record_btrace_cmdlist
);
3130 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
3132 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
3134 Start branch trace recording in Intel Processor Trace format.\n\n\
3135 This format may not be available on all processors."),
3136 &record_btrace_cmdlist
);
3137 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
3139 add_basic_prefix_cmd ("btrace", class_support
,
3140 _("Set record options."), &set_record_btrace_cmdlist
,
3141 "set record btrace ", 0, &set_record_cmdlist
);
3143 add_show_prefix_cmd ("btrace", class_support
,
3144 _("Show record options."), &show_record_btrace_cmdlist
,
3145 "show record btrace ", 0, &show_record_cmdlist
);
3147 add_setshow_enum_cmd ("replay-memory-access", no_class
,
3148 replay_memory_access_types
, &replay_memory_access
, _("\
3149 Set what memory accesses are allowed during replay."), _("\
3150 Show what memory accesses are allowed during replay."),
3151 _("Default is READ-ONLY.\n\n\
3152 The btrace record target does not trace data.\n\
3153 The memory therefore corresponds to the live target and not \
3154 to the current replay position.\n\n\
3155 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3156 When READ-WRITE, allow accesses to read-only and read-write memory during \
3158 NULL
, cmd_show_replay_memory_access
,
3159 &set_record_btrace_cmdlist
,
3160 &show_record_btrace_cmdlist
);
3162 add_prefix_cmd ("cpu", class_support
, cmd_set_record_btrace_cpu
,
3164 Set the cpu to be used for trace decode.\n\n\
3165 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3166 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3167 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3168 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3169 When GDB does not support that cpu, this option can be used to enable\n\
3170 workarounds for a similar cpu that GDB supports.\n\n\
3171 When set to \"none\", errata workarounds are disabled."),
3172 &set_record_btrace_cpu_cmdlist
,
3173 "set record btrace cpu ", 1,
3174 &set_record_btrace_cmdlist
);
3176 add_cmd ("auto", class_support
, cmd_set_record_btrace_cpu_auto
, _("\
3177 Automatically determine the cpu to be used for trace decode."),
3178 &set_record_btrace_cpu_cmdlist
);
3180 add_cmd ("none", class_support
, cmd_set_record_btrace_cpu_none
, _("\
3181 Do not enable errata workarounds for trace decode."),
3182 &set_record_btrace_cpu_cmdlist
);
3184 add_cmd ("cpu", class_support
, cmd_show_record_btrace_cpu
, _("\
3185 Show the cpu to be used for trace decode."),
3186 &show_record_btrace_cmdlist
);
3188 add_basic_prefix_cmd ("bts", class_support
,
3189 _("Set record btrace bts options."),
3190 &set_record_btrace_bts_cmdlist
,
3191 "set record btrace bts ", 0,
3192 &set_record_btrace_cmdlist
);
3194 add_show_prefix_cmd ("bts", class_support
,
3195 _("Show record btrace bts options."),
3196 &show_record_btrace_bts_cmdlist
,
3197 "show record btrace bts ", 0,
3198 &show_record_btrace_cmdlist
);
3200 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3201 &record_btrace_conf
.bts
.size
,
3202 _("Set the record/replay bts buffer size."),
3203 _("Show the record/replay bts buffer size."), _("\
3204 When starting recording request a trace buffer of this size. \
3205 The actual buffer size may differ from the requested size. \
3206 Use \"info record\" to see the actual buffer size.\n\n\
3207 Bigger buffers allow longer recording but also take more time to process \
3208 the recorded execution trace.\n\n\
3209 The trace buffer size may not be changed while recording."), NULL
,
3210 show_record_bts_buffer_size_value
,
3211 &set_record_btrace_bts_cmdlist
,
3212 &show_record_btrace_bts_cmdlist
);
3214 add_basic_prefix_cmd ("pt", class_support
,
3215 _("Set record btrace pt options."),
3216 &set_record_btrace_pt_cmdlist
,
3217 "set record btrace pt ", 0,
3218 &set_record_btrace_cmdlist
);
3220 add_show_prefix_cmd ("pt", class_support
,
3221 _("Show record btrace pt options."),
3222 &show_record_btrace_pt_cmdlist
,
3223 "show record btrace pt ", 0,
3224 &show_record_btrace_cmdlist
);
3226 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3227 &record_btrace_conf
.pt
.size
,
3228 _("Set the record/replay pt buffer size."),
3229 _("Show the record/replay pt buffer size."), _("\
3230 Bigger buffers allow longer recording but also take more time to process \
3231 the recorded execution.\n\
3232 The actual buffer size may differ from the requested size. Use \"info record\" \
3233 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
3234 &set_record_btrace_pt_cmdlist
,
3235 &show_record_btrace_pt_cmdlist
);
3237 add_target (record_btrace_target_info
, record_btrace_target_open
);
3239 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
3242 record_btrace_conf
.bts
.size
= 64 * 1024;
3243 record_btrace_conf
.pt
.size
= 16 * 1024;