1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2024 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "gdbthread.h"
30 #include "filenames.h"
32 #include "gdbsupport/rsp-low.h"
33 #include "cli/cli-cmds.h"
34 #include "cli/cli-utils.h"
35 #include "extension.h"
38 /* For maintenance commands. */
39 #include "record-btrace.h"
46 /* Command lists for btrace maintenance commands. */
47 static struct cmd_list_element
*maint_btrace_cmdlist
;
48 static struct cmd_list_element
*maint_btrace_set_cmdlist
;
49 static struct cmd_list_element
*maint_btrace_show_cmdlist
;
50 static struct cmd_list_element
*maint_btrace_pt_set_cmdlist
;
51 static struct cmd_list_element
*maint_btrace_pt_show_cmdlist
;
53 /* Control whether to skip PAD packets when computing the packet history. */
54 static bool maint_btrace_pt_skip_pad
= true;
56 static void btrace_add_pc (struct thread_info
*tp
);
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
61 #define DEBUG(msg, args...) \
64 if (record_debug != 0) \
65 gdb_printf (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
76 ftrace_print_function_name (const struct btrace_function
*bfun
)
78 struct minimal_symbol
*msym
;
85 return sym
->print_name ();
88 return msym
->print_name ();
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
97 ftrace_print_filename (const struct btrace_function
*bfun
)
100 const char *filename
;
105 filename
= symtab_to_filename_for_display (sym
->symtab ());
107 filename
= "<unknown>";
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
116 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
121 return core_addr_to_string_nz (insn
->pc
);
124 /* Print an ftrace debug status message. */
127 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
129 const char *fun
, *file
;
130 unsigned int ibegin
, iend
;
133 fun
= ftrace_print_function_name (bfun
);
134 file
= ftrace_print_filename (bfun
);
137 ibegin
= bfun
->insn_offset
;
138 iend
= ibegin
+ bfun
->insn
.size ();
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix
, fun
, file
, level
, ibegin
, iend
);
144 /* Return the number of instructions in a given function call segment. */
147 ftrace_call_num_insn (const struct btrace_function
* bfun
)
152 /* A gap is always counted as one instruction. */
153 if (bfun
->errcode
!= 0)
156 return bfun
->insn
.size ();
159 /* Return the function segment with the given NUMBER or NULL if no such segment
160 exists. BTINFO is the branch trace information for the current thread. */
162 static struct btrace_function
*
163 ftrace_find_call_by_number (struct btrace_thread_info
*btinfo
,
166 if (number
== 0 || number
> btinfo
->functions
.size ())
169 return &btinfo
->functions
[number
- 1];
172 /* A const version of the function above. */
174 static const struct btrace_function
*
175 ftrace_find_call_by_number (const struct btrace_thread_info
*btinfo
,
178 if (number
== 0 || number
> btinfo
->functions
.size ())
181 return &btinfo
->functions
[number
- 1];
184 /* Return non-zero if BFUN does not match MFUN and FUN,
185 return zero otherwise. */
188 ftrace_function_switched (const struct btrace_function
*bfun
,
189 const struct minimal_symbol
*mfun
,
190 const struct symbol
*fun
)
192 struct minimal_symbol
*msym
;
198 /* If the minimal symbol changed, we certainly switched functions. */
199 if (mfun
!= NULL
&& msym
!= NULL
200 && strcmp (mfun
->linkage_name (), msym
->linkage_name ()) != 0)
203 /* If the symbol changed, we certainly switched functions. */
204 if (fun
!= NULL
&& sym
!= NULL
)
206 const char *bfname
, *fname
;
208 /* Check the function name. */
209 if (strcmp (fun
->linkage_name (), sym
->linkage_name ()) != 0)
212 /* Check the location of those functions, as well. */
213 bfname
= symtab_to_fullname (sym
->symtab ());
214 fname
= symtab_to_fullname (fun
->symtab ());
215 if (filename_cmp (fname
, bfname
) != 0)
219 /* If we lost symbol information, we switched functions. */
220 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
223 /* If we gained symbol information, we switched functions. */
224 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
230 /* Allocate and initialize a new branch trace function segment at the end of
232 BTINFO is the branch trace information for the current thread.
233 MFUN and FUN are the symbol information we have for this function.
234 This invalidates all struct btrace_function pointer currently held. */
236 static struct btrace_function
*
237 ftrace_new_function (struct btrace_thread_info
*btinfo
,
238 struct minimal_symbol
*mfun
,
242 unsigned int number
, insn_offset
;
244 if (btinfo
->functions
.empty ())
246 /* Start counting NUMBER and INSN_OFFSET at one. */
253 const struct btrace_function
*prev
= &btinfo
->functions
.back ();
255 number
= prev
->number
+ 1;
256 insn_offset
= prev
->insn_offset
+ ftrace_call_num_insn (prev
);
259 return &btinfo
->functions
.emplace_back (mfun
, fun
, number
, insn_offset
,
263 /* Update the UP field of a function segment. */
266 ftrace_update_caller (struct btrace_function
*bfun
,
267 struct btrace_function
*caller
,
268 btrace_function_flags flags
)
271 ftrace_debug (bfun
, "updating caller");
273 bfun
->up
= caller
->number
;
276 ftrace_debug (bfun
, "set caller");
277 ftrace_debug (caller
, "..to");
280 /* Fix up the caller for all segments of a function. */
283 ftrace_fixup_caller (struct btrace_thread_info
*btinfo
,
284 struct btrace_function
*bfun
,
285 struct btrace_function
*caller
,
286 btrace_function_flags flags
)
288 unsigned int prev
, next
;
292 ftrace_update_caller (bfun
, caller
, flags
);
294 /* Update all function segments belonging to the same function. */
295 for (; prev
!= 0; prev
= bfun
->prev
)
297 bfun
= ftrace_find_call_by_number (btinfo
, prev
);
298 ftrace_update_caller (bfun
, caller
, flags
);
301 for (; next
!= 0; next
= bfun
->next
)
303 bfun
= ftrace_find_call_by_number (btinfo
, next
);
304 ftrace_update_caller (bfun
, caller
, flags
);
308 /* Add a new function segment for a call at the end of the trace.
309 BTINFO is the branch trace information for the current thread.
310 MFUN and FUN are the symbol information we have for this function. */
312 static struct btrace_function
*
313 ftrace_new_call (struct btrace_thread_info
*btinfo
,
314 struct minimal_symbol
*mfun
,
317 const unsigned int length
= btinfo
->functions
.size ();
318 struct btrace_function
*bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
323 ftrace_debug (bfun
, "new call");
328 /* Add a new function segment for a tail call at the end of the trace.
329 BTINFO is the branch trace information for the current thread.
330 MFUN and FUN are the symbol information we have for this function. */
332 static struct btrace_function
*
333 ftrace_new_tailcall (struct btrace_thread_info
*btinfo
,
334 struct minimal_symbol
*mfun
,
337 const unsigned int length
= btinfo
->functions
.size ();
338 struct btrace_function
*bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
342 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
344 ftrace_debug (bfun
, "new tail call");
349 /* Return the caller of BFUN or NULL if there is none. This function skips
350 tail calls in the call chain. BTINFO is the branch trace information for
351 the current thread. */
352 static struct btrace_function
*
353 ftrace_get_caller (struct btrace_thread_info
*btinfo
,
354 struct btrace_function
*bfun
)
356 for (; bfun
!= NULL
; bfun
= ftrace_find_call_by_number (btinfo
, bfun
->up
))
357 if ((bfun
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
358 return ftrace_find_call_by_number (btinfo
, bfun
->up
);
363 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
364 symbol information. BTINFO is the branch trace information for the current
367 static struct btrace_function
*
368 ftrace_find_caller (struct btrace_thread_info
*btinfo
,
369 struct btrace_function
*bfun
,
370 struct minimal_symbol
*mfun
,
373 for (; bfun
!= NULL
; bfun
= ftrace_find_call_by_number (btinfo
, bfun
->up
))
375 /* Skip functions with incompatible symbol information. */
376 if (ftrace_function_switched (bfun
, mfun
, fun
))
379 /* This is the function segment we're looking for. */
386 /* Find the innermost caller in the back trace of BFUN, skipping all
387 function segments that do not end with a call instruction (e.g.
388 tail calls ending with a jump). BTINFO is the branch trace information for
389 the current thread. */
391 static struct btrace_function
*
392 ftrace_find_call (struct btrace_thread_info
*btinfo
,
393 struct btrace_function
*bfun
)
395 for (; bfun
!= NULL
; bfun
= ftrace_find_call_by_number (btinfo
, bfun
->up
))
398 if (bfun
->errcode
!= 0)
401 btrace_insn
&last
= bfun
->insn
.back ();
403 if (last
.iclass
== BTRACE_INSN_CALL
)
410 /* Add a continuation segment for a function into which we return at the end of
412 BTINFO is the branch trace information for the current thread.
413 MFUN and FUN are the symbol information we have for this function. */
415 static struct btrace_function
*
416 ftrace_new_return (struct btrace_thread_info
*btinfo
,
417 struct minimal_symbol
*mfun
,
420 struct btrace_function
*prev
, *bfun
, *caller
;
422 bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
423 prev
= ftrace_find_call_by_number (btinfo
, bfun
->number
- 1);
425 /* It is important to start at PREV's caller. Otherwise, we might find
426 PREV itself, if PREV is a recursive function. */
427 caller
= ftrace_find_call_by_number (btinfo
, prev
->up
);
428 caller
= ftrace_find_caller (btinfo
, caller
, mfun
, fun
);
431 /* The caller of PREV is the preceding btrace function segment in this
432 function instance. */
433 gdb_assert (caller
->next
== 0);
435 caller
->next
= bfun
->number
;
436 bfun
->prev
= caller
->number
;
438 /* Maintain the function level. */
439 bfun
->level
= caller
->level
;
441 /* Maintain the call stack. */
442 bfun
->up
= caller
->up
;
443 bfun
->flags
= caller
->flags
;
445 ftrace_debug (bfun
, "new return");
449 /* We did not find a caller. This could mean that something went
450 wrong or that the call is simply not included in the trace. */
452 /* Let's search for some actual call. */
453 caller
= ftrace_find_call_by_number (btinfo
, prev
->up
);
454 caller
= ftrace_find_call (btinfo
, caller
);
457 /* There is no call in PREV's back trace. We assume that the
458 branch trace did not include it. */
460 /* Let's find the topmost function and add a new caller for it.
461 This should handle a series of initial tail calls. */
462 while (prev
->up
!= 0)
463 prev
= ftrace_find_call_by_number (btinfo
, prev
->up
);
465 bfun
->level
= prev
->level
- 1;
467 /* Fix up the call stack for PREV. */
468 ftrace_fixup_caller (btinfo
, prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
470 ftrace_debug (bfun
, "new return - no caller");
474 /* There is a call in PREV's back trace to which we should have
475 returned but didn't. Let's start a new, separate back trace
476 from PREV's level. */
477 bfun
->level
= prev
->level
- 1;
479 /* We fix up the back trace for PREV but leave other function segments
480 on the same level as they are.
481 This should handle things like schedule () correctly where we're
482 switching contexts. */
483 prev
->up
= bfun
->number
;
484 prev
->flags
= BFUN_UP_LINKS_TO_RET
;
486 ftrace_debug (bfun
, "new return - unknown caller");
493 /* Add a new function segment for a function switch at the end of the trace.
494 BTINFO is the branch trace information for the current thread.
495 MFUN and FUN are the symbol information we have for this function. */
497 static struct btrace_function
*
498 ftrace_new_switch (struct btrace_thread_info
*btinfo
,
499 struct minimal_symbol
*mfun
,
502 struct btrace_function
*prev
, *bfun
;
504 /* This is an unexplained function switch. We can't really be sure about the
505 call stack, yet the best I can think of right now is to preserve it. */
506 bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
507 prev
= ftrace_find_call_by_number (btinfo
, bfun
->number
- 1);
509 bfun
->flags
= prev
->flags
;
511 ftrace_debug (bfun
, "new switch");
516 /* Add a new function segment for a gap in the trace due to a decode error at
517 the end of the trace.
518 BTINFO is the branch trace information for the current thread.
519 ERRCODE is the format-specific error code. */
521 static struct btrace_function
*
522 ftrace_new_gap (struct btrace_thread_info
*btinfo
, int errcode
,
523 std::vector
<unsigned int> &gaps
)
525 struct btrace_function
*bfun
;
527 if (btinfo
->functions
.empty ())
528 bfun
= ftrace_new_function (btinfo
, NULL
, NULL
);
531 /* We hijack the previous function segment if it was empty. */
532 bfun
= &btinfo
->functions
.back ();
533 if (bfun
->errcode
!= 0 || !bfun
->insn
.empty ())
534 bfun
= ftrace_new_function (btinfo
, NULL
, NULL
);
537 bfun
->errcode
= errcode
;
538 gaps
.push_back (bfun
->number
);
540 ftrace_debug (bfun
, "new gap");
545 /* Update the current function segment at the end of the trace in BTINFO with
546 respect to the instruction at PC. This may create new function segments.
547 Return the chronologically latest function segment, never NULL. */
549 static struct btrace_function
*
550 ftrace_update_function (struct btrace_thread_info
*btinfo
, CORE_ADDR pc
)
552 struct minimal_symbol
*mfun
;
554 struct btrace_function
*bfun
;
556 /* Try to determine the function we're in. We use both types of symbols
557 to avoid surprises when we sometimes get a full symbol and sometimes
558 only a minimal symbol. */
559 fun
= find_pc_function (pc
);
560 bound_minimal_symbol bmfun
= lookup_minimal_symbol_by_pc (pc
);
563 if (fun
== NULL
&& mfun
== NULL
)
564 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
566 /* If we didn't have a function, we create one. */
567 if (btinfo
->functions
.empty ())
568 return ftrace_new_function (btinfo
, mfun
, fun
);
570 /* If we had a gap before, we create a function. */
571 bfun
= &btinfo
->functions
.back ();
572 if (bfun
->errcode
!= 0)
573 return ftrace_new_function (btinfo
, mfun
, fun
);
575 /* Check the last instruction, if we have one.
576 We do this check first, since it allows us to fill in the call stack
577 links in addition to the normal flow links. */
578 btrace_insn
*last
= NULL
;
579 if (!bfun
->insn
.empty ())
580 last
= &bfun
->insn
.back ();
584 switch (last
->iclass
)
586 case BTRACE_INSN_RETURN
:
590 /* On some systems, _dl_runtime_resolve returns to the resolved
591 function instead of jumping to it. From our perspective,
592 however, this is a tailcall.
593 If we treated it as return, we wouldn't be able to find the
594 resolved function in our stack back trace. Hence, we would
595 lose the current stack back trace and start anew with an empty
596 back trace. When the resolved function returns, we would then
597 create a stack back trace with the same function names but
598 different frame id's. This will confuse stepping. */
599 fname
= ftrace_print_function_name (bfun
);
600 if (strcmp (fname
, "_dl_runtime_resolve") == 0)
601 return ftrace_new_tailcall (btinfo
, mfun
, fun
);
603 return ftrace_new_return (btinfo
, mfun
, fun
);
606 case BTRACE_INSN_CALL
:
607 /* Ignore calls to the next instruction. They are used for PIC. */
608 if (last
->pc
+ last
->size
== pc
)
611 return ftrace_new_call (btinfo
, mfun
, fun
);
613 case BTRACE_INSN_JUMP
:
617 start
= get_pc_function_start (pc
);
619 /* A jump to the start of a function is (typically) a tail call. */
621 return ftrace_new_tailcall (btinfo
, mfun
, fun
);
623 /* Some versions of _Unwind_RaiseException use an indirect
624 jump to 'return' to the exception handler of the caller
625 handling the exception instead of a return. Let's restrict
626 this heuristic to that and related functions. */
627 const char *fname
= ftrace_print_function_name (bfun
);
628 if (strncmp (fname
, "_Unwind_", strlen ("_Unwind_")) == 0)
630 struct btrace_function
*caller
631 = ftrace_find_call_by_number (btinfo
, bfun
->up
);
632 caller
= ftrace_find_caller (btinfo
, caller
, mfun
, fun
);
634 return ftrace_new_return (btinfo
, mfun
, fun
);
637 /* If we can't determine the function for PC, we treat a jump at
638 the end of the block as tail call if we're switching functions
639 and as an intra-function branch if we don't. */
640 if (start
== 0 && ftrace_function_switched (bfun
, mfun
, fun
))
641 return ftrace_new_tailcall (btinfo
, mfun
, fun
);
648 /* Check if we're switching functions for some other reason. */
649 if (ftrace_function_switched (bfun
, mfun
, fun
))
651 DEBUG_FTRACE ("switching from %s in %s at %s",
652 ftrace_print_insn_addr (last
),
653 ftrace_print_function_name (bfun
),
654 ftrace_print_filename (bfun
));
656 return ftrace_new_switch (btinfo
, mfun
, fun
);
662 /* Add the instruction at PC to BFUN's instructions. */
665 ftrace_update_insns (struct btrace_function
*bfun
, const btrace_insn
&insn
)
667 bfun
->insn
.push_back (insn
);
669 if (insn
.iclass
== BTRACE_INSN_AUX
)
670 bfun
->flags
|= BFUN_CONTAINS_AUX
;
672 if (record_debug
> 1)
673 ftrace_debug (bfun
, "update insn");
676 /* Classify the instruction at PC. */
678 static enum btrace_insn_class
679 ftrace_classify_insn (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
681 enum btrace_insn_class iclass
;
683 iclass
= BTRACE_INSN_OTHER
;
686 if (gdbarch_insn_is_call (gdbarch
, pc
))
687 iclass
= BTRACE_INSN_CALL
;
688 else if (gdbarch_insn_is_ret (gdbarch
, pc
))
689 iclass
= BTRACE_INSN_RETURN
;
690 else if (gdbarch_insn_is_jump (gdbarch
, pc
))
691 iclass
= BTRACE_INSN_JUMP
;
693 catch (const gdb_exception_error
&error
)
700 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
701 number of matching function segments or zero if the back traces do not
702 match. BTINFO is the branch trace information for the current thread. */
705 ftrace_match_backtrace (struct btrace_thread_info
*btinfo
,
706 struct btrace_function
*lhs
,
707 struct btrace_function
*rhs
)
711 for (matches
= 0; lhs
!= NULL
&& rhs
!= NULL
; ++matches
)
713 if (ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
))
716 lhs
= ftrace_get_caller (btinfo
, lhs
);
717 rhs
= ftrace_get_caller (btinfo
, rhs
);
723 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
724 BTINFO is the branch trace information for the current thread. */
727 ftrace_fixup_level (struct btrace_thread_info
*btinfo
,
728 struct btrace_function
*bfun
, int adjustment
)
733 DEBUG_FTRACE ("fixup level (%+d)", adjustment
);
734 ftrace_debug (bfun
, "..bfun");
738 bfun
->level
+= adjustment
;
739 bfun
= ftrace_find_call_by_number (btinfo
, bfun
->number
+ 1);
743 /* Recompute the global level offset. Traverse the function trace and compute
744 the global level offset as the negative of the minimal function level. */
747 ftrace_compute_global_level_offset (struct btrace_thread_info
*btinfo
)
754 if (btinfo
->functions
.empty ())
757 unsigned int length
= btinfo
->functions
.size() - 1;
758 for (unsigned int i
= 0; i
< length
; ++i
)
759 level
= std::min (level
, btinfo
->functions
[i
].level
);
761 /* The last function segment contains the current instruction, which is not
762 really part of the trace. If it contains just this one instruction, we
763 ignore the segment. */
764 struct btrace_function
*last
= &btinfo
->functions
.back();
765 if (last
->insn
.size () != 1)
766 level
= std::min (level
, last
->level
);
768 DEBUG_FTRACE ("setting global level offset: %d", -level
);
769 btinfo
->level
= -level
;
772 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
773 ftrace_connect_backtrace. BTINFO is the branch trace information for the
777 ftrace_connect_bfun (struct btrace_thread_info
*btinfo
,
778 struct btrace_function
*prev
,
779 struct btrace_function
*next
)
781 DEBUG_FTRACE ("connecting...");
782 ftrace_debug (prev
, "..prev");
783 ftrace_debug (next
, "..next");
785 /* The function segments are not yet connected. */
786 gdb_assert (prev
->next
== 0);
787 gdb_assert (next
->prev
== 0);
789 prev
->next
= next
->number
;
790 next
->prev
= prev
->number
;
792 /* We may have moved NEXT to a different function level. */
793 ftrace_fixup_level (btinfo
, next
, prev
->level
- next
->level
);
795 /* If we run out of back trace for one, let's use the other's. */
798 const btrace_function_flags flags
= next
->flags
;
800 next
= ftrace_find_call_by_number (btinfo
, next
->up
);
803 DEBUG_FTRACE ("using next's callers");
804 ftrace_fixup_caller (btinfo
, prev
, next
, flags
);
807 else if (next
->up
== 0)
809 const btrace_function_flags flags
= prev
->flags
;
811 prev
= ftrace_find_call_by_number (btinfo
, prev
->up
);
814 DEBUG_FTRACE ("using prev's callers");
815 ftrace_fixup_caller (btinfo
, next
, prev
, flags
);
820 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
821 link to add the tail callers to NEXT's back trace.
823 This removes NEXT->UP from NEXT's back trace. It will be added back
824 when connecting NEXT and PREV's callers - provided they exist.
826 If PREV's back trace consists of a series of tail calls without an
827 actual call, there will be no further connection and NEXT's caller will
828 be removed for good. To catch this case, we handle it here and connect
829 the top of PREV's back trace to NEXT's caller. */
830 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
832 struct btrace_function
*caller
;
833 btrace_function_flags next_flags
, prev_flags
;
835 /* We checked NEXT->UP above so CALLER can't be NULL. */
836 caller
= ftrace_find_call_by_number (btinfo
, next
->up
);
837 next_flags
= next
->flags
;
838 prev_flags
= prev
->flags
;
840 DEBUG_FTRACE ("adding prev's tail calls to next");
842 prev
= ftrace_find_call_by_number (btinfo
, prev
->up
);
843 ftrace_fixup_caller (btinfo
, next
, prev
, prev_flags
);
845 for (; prev
!= NULL
; prev
= ftrace_find_call_by_number (btinfo
,
848 /* At the end of PREV's back trace, continue with CALLER. */
851 DEBUG_FTRACE ("fixing up link for tailcall chain");
852 ftrace_debug (prev
, "..top");
853 ftrace_debug (caller
, "..up");
855 ftrace_fixup_caller (btinfo
, prev
, caller
, next_flags
);
857 /* If we skipped any tail calls, this may move CALLER to a
858 different function level.
860 Note that changing CALLER's level is only OK because we
861 know that this is the last iteration of the bottom-to-top
862 walk in ftrace_connect_backtrace.
864 Otherwise we will fix up CALLER's level when we connect it
865 to PREV's caller in the next iteration. */
866 ftrace_fixup_level (btinfo
, caller
,
867 prev
->level
- caller
->level
- 1);
871 /* There's nothing to do if we find a real call. */
872 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
874 DEBUG_FTRACE ("will fix up link in next iteration");
882 /* Connect function segments on the same level in the back trace at LHS and RHS.
883 The back traces at LHS and RHS are expected to match according to
884 ftrace_match_backtrace. BTINFO is the branch trace information for the
888 ftrace_connect_backtrace (struct btrace_thread_info
*btinfo
,
889 struct btrace_function
*lhs
,
890 struct btrace_function
*rhs
)
892 while (lhs
!= NULL
&& rhs
!= NULL
)
894 struct btrace_function
*prev
, *next
;
896 gdb_assert (!ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
));
898 /* Connecting LHS and RHS may change the up link. */
902 lhs
= ftrace_get_caller (btinfo
, lhs
);
903 rhs
= ftrace_get_caller (btinfo
, rhs
);
905 ftrace_connect_bfun (btinfo
, prev
, next
);
909 /* Bridge the gap between two function segments left and right of a gap if their
910 respective back traces match in at least MIN_MATCHES functions. BTINFO is
911 the branch trace information for the current thread.
913 Returns non-zero if the gap could be bridged, zero otherwise. */
916 ftrace_bridge_gap (struct btrace_thread_info
*btinfo
,
917 struct btrace_function
*lhs
, struct btrace_function
*rhs
,
920 struct btrace_function
*best_l
, *best_r
, *cand_l
, *cand_r
;
923 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
924 rhs
->insn_offset
- 1, min_matches
);
930 /* We search the back traces of LHS and RHS for valid connections and connect
931 the two function segments that give the longest combined back trace. */
933 for (cand_l
= lhs
; cand_l
!= NULL
;
934 cand_l
= ftrace_get_caller (btinfo
, cand_l
))
935 for (cand_r
= rhs
; cand_r
!= NULL
;
936 cand_r
= ftrace_get_caller (btinfo
, cand_r
))
940 matches
= ftrace_match_backtrace (btinfo
, cand_l
, cand_r
);
941 if (best_matches
< matches
)
943 best_matches
= matches
;
949 /* We need at least MIN_MATCHES matches. */
950 gdb_assert (min_matches
> 0);
951 if (best_matches
< min_matches
)
954 DEBUG_FTRACE ("..matches: %d", best_matches
);
956 /* We will fix up the level of BEST_R and succeeding function segments such
957 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
959 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
960 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
962 To catch this, we already fix up the level here where we can start at RHS
963 instead of at BEST_R. We will ignore the level fixup when connecting
964 BEST_L to BEST_R as they will already be on the same level. */
965 ftrace_fixup_level (btinfo
, rhs
, best_l
->level
- best_r
->level
);
967 ftrace_connect_backtrace (btinfo
, best_l
, best_r
);
972 /* Try to bridge gaps due to overflow or decode errors by connecting the
973 function segments that are separated by the gap. */
976 btrace_bridge_gaps (struct thread_info
*tp
, std::vector
<unsigned int> &gaps
)
978 struct btrace_thread_info
*btinfo
= &tp
->btrace
;
979 std::vector
<unsigned int> remaining
;
982 DEBUG ("bridge gaps");
984 /* We require a minimum amount of matches for bridging a gap. The number of
985 required matches will be lowered with each iteration.
987 The more matches the higher our confidence that the bridging is correct.
988 For big gaps or small traces, however, it may not be feasible to require a
989 high number of matches. */
990 for (min_matches
= 5; min_matches
> 0; --min_matches
)
992 /* Let's try to bridge as many gaps as we can. In some cases, we need to
993 skip a gap and revisit it again after we closed later gaps. */
994 while (!gaps
.empty ())
996 for (const unsigned int number
: gaps
)
998 struct btrace_function
*gap
, *lhs
, *rhs
;
1001 gap
= ftrace_find_call_by_number (btinfo
, number
);
1003 /* We may have a sequence of gaps if we run from one error into
1004 the next as we try to re-sync onto the trace stream. Ignore
1005 all but the leftmost gap in such a sequence.
1007 Also ignore gaps at the beginning of the trace. */
1008 lhs
= ftrace_find_call_by_number (btinfo
, gap
->number
- 1);
1009 if (lhs
== NULL
|| lhs
->errcode
!= 0)
1012 /* Skip gaps to the right. */
1013 rhs
= ftrace_find_call_by_number (btinfo
, gap
->number
+ 1);
1014 while (rhs
!= NULL
&& rhs
->errcode
!= 0)
1015 rhs
= ftrace_find_call_by_number (btinfo
, rhs
->number
+ 1);
1017 /* Ignore gaps at the end of the trace. */
1021 bridged
= ftrace_bridge_gap (btinfo
, lhs
, rhs
, min_matches
);
1023 /* Keep track of gaps we were not able to bridge and try again.
1024 If we just pushed them to the end of GAPS we would risk an
1025 infinite loop in case we simply cannot bridge a gap. */
1027 remaining
.push_back (number
);
1030 /* Let's see if we made any progress. */
1031 if (remaining
.size () == gaps
.size ())
1035 gaps
.swap (remaining
);
1038 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1045 /* We may omit this in some cases. Not sure it is worth the extra
1046 complication, though. */
1047 ftrace_compute_global_level_offset (btinfo
);
1050 /* Compute the function branch trace from BTS trace. */
1053 btrace_compute_ftrace_bts (struct thread_info
*tp
,
1054 const struct btrace_data_bts
*btrace
,
1055 std::vector
<unsigned int> &gaps
)
1057 /* We may end up doing target calls that require the current thread to be TP,
1058 for example reading memory through gdb_insn_length. Make sure TP is the
1060 scoped_restore_current_thread restore_thread
;
1061 switch_to_thread (tp
);
1063 struct btrace_thread_info
*btinfo
;
1067 gdbarch
*gdbarch
= current_inferior ()->arch ();
1068 btinfo
= &tp
->btrace
;
1069 blk
= btrace
->blocks
->size ();
1071 if (btinfo
->functions
.empty ())
1074 level
= -btinfo
->level
;
1082 const btrace_block
&block
= btrace
->blocks
->at (blk
);
1087 struct btrace_function
*bfun
;
1088 struct btrace_insn insn
;
1091 /* We should hit the end of the block. Warn if we went too far. */
1094 /* Indicate the gap in the trace. */
1095 bfun
= ftrace_new_gap (btinfo
, BDE_BTS_OVERFLOW
, gaps
);
1097 warning (_("Recorded trace may be corrupted at instruction "
1098 "%u (pc = %s)."), bfun
->insn_offset
- 1,
1099 core_addr_to_string_nz (pc
));
1104 bfun
= ftrace_update_function (btinfo
, pc
);
1106 /* Maintain the function level offset.
1107 For all but the last block, we do it here. */
1109 level
= std::min (level
, bfun
->level
);
1114 size
= gdb_insn_length (gdbarch
, pc
);
1116 catch (const gdb_exception_error
&error
)
1122 insn
.iclass
= ftrace_classify_insn (gdbarch
, pc
);
1125 ftrace_update_insns (bfun
, insn
);
1127 /* We're done once we pushed the instruction at the end. */
1128 if (block
.end
== pc
)
1131 /* We can't continue if we fail to compute the size. */
1134 /* Indicate the gap in the trace. We just added INSN so we're
1135 not at the beginning. */
1136 bfun
= ftrace_new_gap (btinfo
, BDE_BTS_INSN_SIZE
, gaps
);
1138 warning (_("Recorded trace may be incomplete at instruction %u "
1139 "(pc = %s)."), bfun
->insn_offset
- 1,
1140 core_addr_to_string_nz (pc
));
1147 /* Maintain the function level offset.
1148 For the last block, we do it here to not consider the last
1150 Since the last instruction corresponds to the current instruction
1151 and is not really part of the execution history, it shouldn't
1152 affect the level. */
1154 level
= std::min (level
, bfun
->level
);
1158 /* LEVEL is the minimal function level of all btrace function segments.
1159 Define the global level offset to -LEVEL so all function levels are
1160 normalized to start at zero. */
1161 btinfo
->level
= -level
;
1164 #if defined (HAVE_LIBIPT)
1166 static enum btrace_insn_class
1167 pt_reclassify_insn (enum pt_insn_class iclass
)
1172 return BTRACE_INSN_CALL
;
1175 return BTRACE_INSN_RETURN
;
1178 return BTRACE_INSN_JUMP
;
1181 return BTRACE_INSN_OTHER
;
1185 /* Return the btrace instruction flags for INSN. */
1187 static btrace_insn_flags
1188 pt_btrace_insn_flags (const struct pt_insn
&insn
)
1190 btrace_insn_flags flags
= 0;
1192 if (insn
.speculative
)
1193 flags
|= BTRACE_INSN_FLAG_SPECULATIVE
;
1198 /* Return the btrace instruction for INSN. */
1201 pt_btrace_insn (const struct pt_insn
&insn
)
1203 return {(CORE_ADDR
) insn
.ip
, (gdb_byte
) insn
.size
,
1204 pt_reclassify_insn (insn
.iclass
),
1205 pt_btrace_insn_flags (insn
)};
1208 #if defined (HAVE_PT_INSN_EVENT)
1209 /* Helper for events that will result in an aux_insn. */
1212 handle_pt_aux_insn (btrace_thread_info
*btinfo
, btrace_function
*bfun
,
1213 std::string
&aux_str
, CORE_ADDR ip
)
1215 btinfo
->aux_data
.emplace_back (std::move (aux_str
));
1216 bfun
= ftrace_update_function (btinfo
, ip
);
1218 btrace_insn insn
{btinfo
->aux_data
.size () - 1, 0,
1219 BTRACE_INSN_AUX
, 0};
1221 ftrace_update_insns (bfun
, insn
);
1224 #endif /* defined (HAVE_PT_INSN_EVENT) */
1226 /* Handle instruction decode events (libipt-v2). */
1229 handle_pt_insn_events (struct btrace_thread_info
*btinfo
,
1230 struct pt_insn_decoder
*decoder
,
1231 std::vector
<unsigned int> &gaps
, int status
)
1233 #if defined (HAVE_PT_INSN_EVENT)
1234 while (status
& pts_event_pending
)
1236 struct btrace_function
*bfun
;
1237 struct pt_event event
;
1240 status
= pt_insn_event (decoder
, &event
, sizeof (event
));
1250 if (event
.status_update
!= 0)
1253 if (event
.variant
.enabled
.resumed
== 0 && !btinfo
->functions
.empty ())
1255 bfun
= ftrace_new_gap (btinfo
, BDE_PT_DISABLED
, gaps
);
1257 pt_insn_get_offset (decoder
, &offset
);
1259 warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
1260 PRIx64
")."), bfun
->insn_offset
- 1, offset
);
1266 bfun
= ftrace_new_gap (btinfo
, BDE_PT_OVERFLOW
, gaps
);
1268 pt_insn_get_offset (decoder
, &offset
);
1270 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
")."),
1271 bfun
->insn_offset
- 1, offset
);
1274 #if defined (HAVE_STRUCT_PT_EVENT_VARIANT_PTWRITE)
1278 std::optional
<std::string
> ptw_string
;
1280 /* Lookup the PC if available. The event often doesn't provide
1281 one, so we look into the last function segment as well.
1282 Looking further back makes limited sense for ptwrite. */
1283 if (event
.ip_suppressed
== 0)
1284 pc
= event
.variant
.ptwrite
.ip
;
1285 else if (!btinfo
->functions
.empty ())
1287 std::vector
<btrace_insn
> &insns
1288 = btinfo
->functions
.back ().insn
;
1289 for (auto insn
= insns
.rbegin (); insn
!= insns
.rend ();
1292 switch (insn
->iclass
)
1294 case BTRACE_INSN_AUX
:
1297 case BTRACE_INSN_OTHER
:
1298 case BTRACE_INSN_CALL
:
1299 case BTRACE_INSN_RETURN
:
1300 case BTRACE_INSN_JUMP
:
1303 /* No default to rely on compiler warnings. */
1310 warning (_("Failed to determine the PC for ptwrite."));
1312 if (btinfo
->ptw_callback_fun
!= nullptr)
1314 = btinfo
->ptw_callback_fun (event
.variant
.ptwrite
.payload
,
1315 pc
, btinfo
->ptw_context
);
1317 if (ptw_string
.has_value () && (*ptw_string
).empty ())
1320 if (!ptw_string
.has_value ())
1321 *ptw_string
= hex_string (event
.variant
.ptwrite
.payload
);
1323 handle_pt_aux_insn (btinfo
, bfun
, *ptw_string
, pc
);
1327 #endif /* defined (HAVE_STRUCT_PT_EVENT_VARIANT_PTWRITE) */
1330 #endif /* defined (HAVE_PT_INSN_EVENT) */
1335 /* Handle events indicated by flags in INSN (libipt-v1). */
1338 handle_pt_insn_event_flags (struct btrace_thread_info
*btinfo
,
1339 struct pt_insn_decoder
*decoder
,
1340 const struct pt_insn
&insn
,
1341 std::vector
<unsigned int> &gaps
)
1343 #if defined (HAVE_STRUCT_PT_INSN_ENABLED)
1344 /* Tracing is disabled and re-enabled each time we enter the kernel. Most
1345 times, we continue from the same instruction we stopped before. This is
1346 indicated via the RESUMED instruction flag. The ENABLED instruction flag
1347 means that we continued from some other instruction. Indicate this as a
1348 trace gap except when tracing just started. */
1349 if (insn
.enabled
&& !btinfo
->functions
.empty ())
1351 struct btrace_function
*bfun
;
1354 bfun
= ftrace_new_gap (btinfo
, BDE_PT_DISABLED
, gaps
);
1356 pt_insn_get_offset (decoder
, &offset
);
1358 warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
1359 ", pc = 0x%" PRIx64
")."), bfun
->insn_offset
- 1, offset
,
1362 #endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
1364 #if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
1365 /* Indicate trace overflows. */
1368 struct btrace_function
*bfun
;
1371 bfun
= ftrace_new_gap (btinfo
, BDE_PT_OVERFLOW
, gaps
);
1373 pt_insn_get_offset (decoder
, &offset
);
1375 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
", pc = 0x%"
1376 PRIx64
")."), bfun
->insn_offset
- 1, offset
, insn
.ip
);
1378 #endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
1381 /* Add function branch trace to BTINFO using DECODER. */
1384 ftrace_add_pt (struct btrace_thread_info
*btinfo
,
1385 struct pt_insn_decoder
*decoder
,
1387 std::vector
<unsigned int> &gaps
)
1389 struct btrace_function
*bfun
;
1393 /* Register the ptwrite filter. */
1394 apply_ext_lang_ptwrite_filter (btinfo
);
1398 struct pt_insn insn
;
1400 status
= pt_insn_sync_forward (decoder
);
1403 if (status
!= -pte_eos
)
1404 warning (_("Failed to synchronize onto the Intel Processor "
1405 "Trace stream: %s."), pt_errstr (pt_errcode (status
)));
1411 /* Handle events from the previous iteration or synchronization. */
1412 status
= handle_pt_insn_events (btinfo
, decoder
, gaps
, status
);
1416 status
= pt_insn_next (decoder
, &insn
, sizeof(insn
));
1420 /* Handle events indicated by flags in INSN. */
1421 handle_pt_insn_event_flags (btinfo
, decoder
, insn
, gaps
);
1423 bfun
= ftrace_update_function (btinfo
, insn
.ip
);
1425 /* Maintain the function level offset. */
1426 *plevel
= std::min (*plevel
, bfun
->level
);
1428 ftrace_update_insns (bfun
, pt_btrace_insn (insn
));
1431 if (status
== -pte_eos
)
1434 /* Indicate the gap in the trace. */
1435 bfun
= ftrace_new_gap (btinfo
, status
, gaps
);
1437 pt_insn_get_offset (decoder
, &offset
);
1439 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1440 ", pc = 0x%" PRIx64
"): %s."), status
, bfun
->insn_offset
- 1,
1441 offset
, insn
.ip
, pt_errstr (pt_errcode (status
)));
1445 /* A callback function to allow the trace decoder to read the inferior's
1449 btrace_pt_readmem_callback (gdb_byte
*buffer
, size_t size
,
1450 const struct pt_asid
*asid
, uint64_t pc
,
1453 int result
, errcode
;
1455 result
= (int) size
;
1458 errcode
= target_read_code ((CORE_ADDR
) pc
, buffer
, size
);
1460 result
= -pte_nomap
;
1462 catch (const gdb_exception_error
&error
)
1464 result
= -pte_nomap
;
1470 /* Translate the vendor from one enum to another. */
1472 static enum pt_cpu_vendor
1473 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor
)
1485 /* Finalize the function branch trace after decode. */
1487 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder
*decoder
,
1488 struct thread_info
*tp
, int level
)
1490 pt_insn_free_decoder (decoder
);
1492 /* LEVEL is the minimal function level of all btrace function segments.
1493 Define the global level offset to -LEVEL so all function levels are
1494 normalized to start at zero. */
1495 tp
->btrace
.level
= -level
;
1497 /* Add a single last instruction entry for the current PC.
1498 This allows us to compute the backtrace at the current PC using both
1499 standard unwind and btrace unwind.
1500 This extra entry is ignored by all record commands. */
1504 /* Compute the function branch trace from Intel Processor Trace
1508 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1509 const struct btrace_data_pt
*btrace
,
1510 std::vector
<unsigned int> &gaps
)
1512 /* We may end up doing target calls that require the current thread to be TP,
1513 for example reading memory through btrace_pt_readmem_callback. Make sure
1514 TP is the current thread. */
1515 scoped_restore_current_thread restore_thread
;
1516 switch_to_thread (tp
);
1518 struct btrace_thread_info
*btinfo
;
1519 struct pt_insn_decoder
*decoder
;
1520 struct pt_config config
;
1523 if (btrace
->size
== 0)
1526 btinfo
= &tp
->btrace
;
1527 if (btinfo
->functions
.empty ())
1530 level
= -btinfo
->level
;
1532 pt_config_init(&config
);
1533 config
.begin
= btrace
->data
;
1534 config
.end
= btrace
->data
+ btrace
->size
;
1536 /* We treat an unknown vendor as 'no errata'. */
1537 if (btrace
->config
.cpu
.vendor
!= CV_UNKNOWN
)
1540 = pt_translate_cpu_vendor (btrace
->config
.cpu
.vendor
);
1541 config
.cpu
.family
= btrace
->config
.cpu
.family
;
1542 config
.cpu
.model
= btrace
->config
.cpu
.model
;
1543 config
.cpu
.stepping
= btrace
->config
.cpu
.stepping
;
1545 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
1547 error (_("Failed to configure the Intel Processor Trace "
1548 "decoder: %s."), pt_errstr (pt_errcode (errcode
)));
1551 decoder
= pt_insn_alloc_decoder (&config
);
1552 if (decoder
== NULL
)
1553 error (_("Failed to allocate the Intel Processor Trace decoder."));
1557 struct pt_image
*image
;
1559 image
= pt_insn_get_image(decoder
);
1561 error (_("Failed to configure the Intel Processor Trace decoder."));
1563 errcode
= pt_image_set_callback(image
, btrace_pt_readmem_callback
, NULL
);
1565 error (_("Failed to configure the Intel Processor Trace decoder: "
1566 "%s."), pt_errstr (pt_errcode (errcode
)));
1568 ftrace_add_pt (btinfo
, decoder
, &level
, gaps
);
1570 catch (const gdb_exception
&error
)
1572 /* Indicate a gap in the trace if we quit trace processing. */
1573 if (error
.reason
== RETURN_QUIT
&& !btinfo
->functions
.empty ())
1574 ftrace_new_gap (btinfo
, BDE_PT_USER_QUIT
, gaps
);
1576 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1581 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1584 #else /* defined (HAVE_LIBIPT) */
1587 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1588 const struct btrace_data_pt
*btrace
,
1589 std::vector
<unsigned int> &gaps
)
1591 internal_error (_("Unexpected branch trace format."));
1594 #endif /* defined (HAVE_LIBIPT) */
1596 /* Compute the function branch trace from a block branch trace BTRACE for
1597 a thread given by BTINFO. If CPU is not NULL, overwrite the cpu in the
1598 branch trace configuration. This is currently only used for the PT
1602 btrace_compute_ftrace_1 (struct thread_info
*tp
,
1603 struct btrace_data
*btrace
,
1604 const struct btrace_cpu
*cpu
,
1605 std::vector
<unsigned int> &gaps
)
1607 DEBUG ("compute ftrace");
1609 switch (btrace
->format
)
1611 case BTRACE_FORMAT_NONE
:
1614 case BTRACE_FORMAT_BTS
:
1615 btrace_compute_ftrace_bts (tp
, &btrace
->variant
.bts
, gaps
);
1618 case BTRACE_FORMAT_PT
:
1619 /* Overwrite the cpu we use for enabling errata workarounds. */
1621 btrace
->variant
.pt
.config
.cpu
= *cpu
;
1623 btrace_compute_ftrace_pt (tp
, &btrace
->variant
.pt
, gaps
);
1627 internal_error (_("Unknown branch trace format."));
1631 btrace_finalize_ftrace (struct thread_info
*tp
, std::vector
<unsigned int> &gaps
)
1635 tp
->btrace
.ngaps
+= gaps
.size ();
1636 btrace_bridge_gaps (tp
, gaps
);
1641 btrace_compute_ftrace (struct thread_info
*tp
, struct btrace_data
*btrace
,
1642 const struct btrace_cpu
*cpu
)
1644 std::vector
<unsigned int> gaps
;
1648 btrace_compute_ftrace_1 (tp
, btrace
, cpu
, gaps
);
1650 catch (const gdb_exception
&error
)
1652 btrace_finalize_ftrace (tp
, gaps
);
1657 btrace_finalize_ftrace (tp
, gaps
);
1660 /* Add an entry for the current PC. */
1663 btrace_add_pc (struct thread_info
*tp
)
1665 struct btrace_data btrace
;
1666 struct regcache
*regcache
;
1669 regcache
= get_thread_regcache (tp
);
1670 pc
= regcache_read_pc (regcache
);
1672 btrace
.format
= BTRACE_FORMAT_BTS
;
1673 btrace
.variant
.bts
.blocks
= new std::vector
<btrace_block
>;
1675 btrace
.variant
.bts
.blocks
->emplace_back (pc
, pc
);
1677 btrace_compute_ftrace (tp
, &btrace
, NULL
);
1683 btrace_enable (struct thread_info
*tp
, const struct btrace_config
*conf
)
1685 if (tp
->btrace
.target
!= NULL
)
1686 error (_("Recording already enabled on thread %s (%s)."),
1687 print_thread_id (tp
), target_pid_to_str (tp
->ptid
).c_str ());
1689 #if !defined (HAVE_LIBIPT)
1690 if (conf
->format
== BTRACE_FORMAT_PT
)
1691 error (_("Intel Processor Trace support was disabled at compile time."));
1692 #endif /* !defined (HAVE_LIBIPT) */
1694 DEBUG ("enable thread %s (%s)", print_thread_id (tp
),
1695 tp
->ptid
.to_string ().c_str ());
1697 tp
->btrace
.target
= target_enable_btrace (tp
, conf
);
1699 if (tp
->btrace
.target
== NULL
)
1700 error (_("Failed to enable recording on thread %s (%s)."),
1701 print_thread_id (tp
), target_pid_to_str (tp
->ptid
).c_str ());
1703 /* We need to undo the enable in case of errors. */
1706 /* Add an entry for the current PC so we start tracing from where we
1709 If we can't access TP's registers, TP is most likely running. In this
1710 case, we can't really say where tracing was enabled so it should be
1711 safe to simply skip this step.
1713 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1714 start at the PC at which tracing was enabled. */
1715 if (conf
->format
!= BTRACE_FORMAT_PT
1716 && can_access_registers_thread (tp
))
1719 catch (const gdb_exception
&exception
)
1721 btrace_disable (tp
);
1729 const struct btrace_config
*
1730 btrace_conf (const struct btrace_thread_info
*btinfo
)
1732 if (btinfo
->target
== NULL
)
1735 return target_btrace_conf (btinfo
->target
);
1741 btrace_disable (struct thread_info
*tp
)
1743 struct btrace_thread_info
*btp
= &tp
->btrace
;
1745 if (btp
->target
== NULL
)
1746 error (_("Recording not enabled on thread %s (%s)."),
1747 print_thread_id (tp
), target_pid_to_str (tp
->ptid
).c_str ());
1749 DEBUG ("disable thread %s (%s)", print_thread_id (tp
),
1750 tp
->ptid
.to_string ().c_str ());
1752 target_disable_btrace (btp
->target
);
1761 btrace_teardown (struct thread_info
*tp
)
1763 struct btrace_thread_info
*btp
= &tp
->btrace
;
1765 if (btp
->target
== NULL
)
1768 DEBUG ("teardown thread %s (%s)", print_thread_id (tp
),
1769 tp
->ptid
.to_string ().c_str ());
1771 target_teardown_btrace (btp
->target
);
1777 /* Stitch branch trace in BTS format. */
1780 btrace_stitch_bts (struct btrace_data_bts
*btrace
, struct thread_info
*tp
)
1782 struct btrace_thread_info
*btinfo
;
1783 struct btrace_function
*last_bfun
;
1784 btrace_block
*first_new_block
;
1786 btinfo
= &tp
->btrace
;
1787 gdb_assert (!btinfo
->functions
.empty ());
1788 gdb_assert (!btrace
->blocks
->empty ());
1790 last_bfun
= &btinfo
->functions
.back ();
1792 /* If the existing trace ends with a gap, we just glue the traces
1793 together. We need to drop the last (i.e. chronologically first) block
1794 of the new trace, though, since we can't fill in the start address.*/
1795 if (last_bfun
->insn
.empty ())
1797 btrace
->blocks
->pop_back ();
1801 /* Beware that block trace starts with the most recent block, so the
1802 chronologically first block in the new trace is the last block in
1803 the new trace's block vector. */
1804 first_new_block
= &btrace
->blocks
->back ();
1805 const btrace_insn
&last_insn
= last_bfun
->insn
.back ();
1807 /* If the current PC at the end of the block is the same as in our current
1808 trace, there are two explanations:
1809 1. we executed the instruction and some branch brought us back.
1810 2. we have not made any progress.
1811 In the first case, the delta trace vector should contain at least two
1813 In the second case, the delta trace vector should contain exactly one
1814 entry for the partial block containing the current PC. Remove it. */
1815 if (first_new_block
->end
== last_insn
.pc
&& btrace
->blocks
->size () == 1)
1817 btrace
->blocks
->pop_back ();
1821 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn
),
1822 core_addr_to_string_nz (first_new_block
->end
));
1824 /* Do a simple sanity check to make sure we don't accidentally end up
1825 with a bad block. This should not occur in practice. */
1826 if (first_new_block
->end
< last_insn
.pc
)
1828 warning (_("Error while trying to read delta trace. Falling back to "
1833 /* We adjust the last block to start at the end of our current trace. */
1834 gdb_assert (first_new_block
->begin
== 0);
1835 first_new_block
->begin
= last_insn
.pc
;
1837 /* We simply pop the last insn so we can insert it again as part of
1838 the normal branch trace computation.
1839 Since instruction iterators are based on indices in the instructions
1840 vector, we don't leave any pointers dangling. */
1841 DEBUG ("pruning insn at %s for stitching",
1842 ftrace_print_insn_addr (&last_insn
));
1844 last_bfun
->insn
.pop_back ();
1846 /* The instructions vector may become empty temporarily if this has
1847 been the only instruction in this function segment.
1848 This violates the invariant but will be remedied shortly by
1849 btrace_compute_ftrace when we add the new trace. */
1851 /* The only case where this would hurt is if the entire trace consisted
1852 of just that one instruction. If we remove it, we might turn the now
1853 empty btrace function segment into a gap. But we don't want gaps at
1854 the beginning. To avoid this, we remove the entire old trace. */
1855 if (last_bfun
->number
== 1 && last_bfun
->insn
.empty ())
1861 /* Adjust the block trace in order to stitch old and new trace together.
1862 BTRACE is the new delta trace between the last and the current stop.
1863 TP is the traced thread.
1864 May modifx BTRACE as well as the existing trace in TP.
1865 Return 0 on success, -1 otherwise. */
1868 btrace_stitch_trace (struct btrace_data
*btrace
, struct thread_info
*tp
)
1870 /* If we don't have trace, there's nothing to do. */
1871 if (btrace
->empty ())
1874 switch (btrace
->format
)
1876 case BTRACE_FORMAT_NONE
:
1879 case BTRACE_FORMAT_BTS
:
1880 return btrace_stitch_bts (&btrace
->variant
.bts
, tp
);
1882 case BTRACE_FORMAT_PT
:
1883 /* Delta reads are not supported. */
1887 internal_error (_("Unknown branch trace format."));
1890 /* Clear the branch trace histories in BTINFO. */
1893 btrace_clear_history (struct btrace_thread_info
*btinfo
)
1895 xfree (btinfo
->insn_history
);
1896 xfree (btinfo
->call_history
);
1897 xfree (btinfo
->replay
);
1899 btinfo
->insn_history
= NULL
;
1900 btinfo
->call_history
= NULL
;
1901 btinfo
->replay
= NULL
;
1903 btinfo
->aux_data
.clear ();
1906 /* Clear the branch trace maintenance histories in BTINFO. */
1909 btrace_maint_clear (struct btrace_thread_info
*btinfo
)
1911 switch (btinfo
->data
.format
)
1916 case BTRACE_FORMAT_BTS
:
1917 btinfo
->maint
.variant
.bts
.packet_history
.begin
= 0;
1918 btinfo
->maint
.variant
.bts
.packet_history
.end
= 0;
1921 #if defined (HAVE_LIBIPT)
1922 case BTRACE_FORMAT_PT
:
1923 delete btinfo
->maint
.variant
.pt
.packets
;
1925 btinfo
->maint
.variant
.pt
.packets
= NULL
;
1926 btinfo
->maint
.variant
.pt
.packet_history
.begin
= 0;
1927 btinfo
->maint
.variant
.pt
.packet_history
.end
= 0;
1929 #endif /* defined (HAVE_LIBIPT) */
1936 btrace_decode_error (enum btrace_format format
, int errcode
)
1940 case BTRACE_FORMAT_BTS
:
1943 case BDE_BTS_OVERFLOW
:
1944 return _("instruction overflow");
1946 case BDE_BTS_INSN_SIZE
:
1947 return _("unknown instruction");
1954 #if defined (HAVE_LIBIPT)
1955 case BTRACE_FORMAT_PT
:
1958 case BDE_PT_USER_QUIT
:
1959 return _("trace decode cancelled");
1961 case BDE_PT_DISABLED
:
1962 return _("disabled");
1964 case BDE_PT_OVERFLOW
:
1965 return _("overflow");
1969 return pt_errstr (pt_errcode (errcode
));
1973 #endif /* defined (HAVE_LIBIPT) */
1979 return _("unknown");
1985 btrace_fetch (struct thread_info
*tp
, const struct btrace_cpu
*cpu
)
1987 struct btrace_thread_info
*btinfo
;
1988 struct btrace_target_info
*tinfo
;
1989 struct btrace_data btrace
;
1992 DEBUG ("fetch thread %s (%s)", print_thread_id (tp
),
1993 tp
->ptid
.to_string ().c_str ());
1995 btinfo
= &tp
->btrace
;
1996 tinfo
= btinfo
->target
;
2000 /* There's no way we could get new trace while replaying.
2001 On the other hand, delta trace would return a partial record with the
2002 current PC, which is the replay PC, not the last PC, as expected. */
2003 if (btinfo
->replay
!= NULL
)
2006 /* With CLI usage, TP is always the current thread when we get here.
2007 However, since we can also store a gdb.Record object in Python
2008 referring to a different thread than the current one, we need to
2009 temporarily set the current thread. */
2010 scoped_restore_current_thread restore_thread
;
2011 switch_to_thread (tp
);
2013 /* We should not be called on running or exited threads. */
2014 gdb_assert (can_access_registers_thread (tp
));
2016 /* Let's first try to extend the trace we already have. */
2017 if (!btinfo
->functions
.empty ())
2019 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
2022 /* Success. Let's try to stitch the traces together. */
2023 errcode
= btrace_stitch_trace (&btrace
, tp
);
2027 /* We failed to read delta trace. Let's try to read new trace. */
2028 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
2030 /* If we got any new trace, discard what we have. */
2031 if (errcode
== 0 && !btrace
.empty ())
2035 /* If we were not able to read the trace, we start over. */
2039 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
2043 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
2045 /* If we were not able to read the branch trace, signal an error. */
2047 error (_("Failed to read branch trace."));
2049 /* Compute the trace, provided we have any. */
2050 if (!btrace
.empty ())
2052 /* Store the raw trace data. The stored data will be cleared in
2053 btrace_clear, so we always append the new trace. */
2054 btrace_data_append (&btinfo
->data
, &btrace
);
2055 btrace_maint_clear (btinfo
);
2057 btrace_clear_history (btinfo
);
2058 btrace_compute_ftrace (tp
, &btrace
, cpu
);
2065 btrace_clear (struct thread_info
*tp
)
2067 struct btrace_thread_info
*btinfo
;
2069 DEBUG ("clear thread %s (%s)", print_thread_id (tp
),
2070 tp
->ptid
.to_string ().c_str ());
2072 /* Make sure btrace frames that may hold a pointer into the branch
2073 trace data are destroyed. */
2074 reinit_frame_cache ();
2076 btinfo
= &tp
->btrace
;
2078 btinfo
->functions
.clear ();
2081 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2082 btrace_maint_clear (btinfo
);
2083 btinfo
->data
.clear ();
2084 btrace_clear_history (btinfo
);
2090 btrace_free_objfile (struct objfile
*objfile
)
2092 DEBUG ("free objfile");
2094 for (thread_info
*tp
: all_non_exited_threads ())
2100 const struct btrace_insn
*
2101 btrace_insn_get (const struct btrace_insn_iterator
*it
)
2103 const struct btrace_function
*bfun
;
2104 unsigned int index
, end
;
2106 index
= it
->insn_index
;
2107 bfun
= &it
->btinfo
->functions
[it
->call_index
];
2109 /* Check if the iterator points to a gap in the trace. */
2110 if (bfun
->errcode
!= 0)
2113 /* The index is within the bounds of this function's instruction vector. */
2114 end
= bfun
->insn
.size ();
2115 gdb_assert (0 < end
);
2116 gdb_assert (index
< end
);
2118 return &bfun
->insn
[index
];
2124 btrace_insn_get_error (const struct btrace_insn_iterator
*it
)
2126 return it
->btinfo
->functions
[it
->call_index
].errcode
;
2132 btrace_insn_number (const struct btrace_insn_iterator
*it
)
2134 return it
->btinfo
->functions
[it
->call_index
].insn_offset
+ it
->insn_index
;
2140 btrace_insn_begin (struct btrace_insn_iterator
*it
,
2141 const struct btrace_thread_info
*btinfo
)
2143 if (btinfo
->functions
.empty ())
2144 error (_("No trace."));
2146 it
->btinfo
= btinfo
;
2154 btrace_insn_end (struct btrace_insn_iterator
*it
,
2155 const struct btrace_thread_info
*btinfo
)
2157 const struct btrace_function
*bfun
;
2158 unsigned int length
;
2160 if (btinfo
->functions
.empty ())
2161 error (_("No trace."));
2163 bfun
= &btinfo
->functions
.back ();
2164 length
= bfun
->insn
.size ();
2166 /* The last function may either be a gap or it contains the current
2167 instruction, which is one past the end of the execution trace; ignore
2172 it
->btinfo
= btinfo
;
2173 it
->call_index
= bfun
->number
- 1;
2174 it
->insn_index
= length
;
2180 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
2182 const struct btrace_function
*bfun
;
2183 unsigned int index
, steps
;
2185 bfun
= &it
->btinfo
->functions
[it
->call_index
];
2187 index
= it
->insn_index
;
2191 unsigned int end
, space
, adv
;
2193 end
= bfun
->insn
.size ();
2195 /* An empty function segment represents a gap in the trace. We count
2196 it as one instruction. */
2199 const struct btrace_function
*next
;
2201 next
= ftrace_find_call_by_number (it
->btinfo
, bfun
->number
+ 1);
2214 gdb_assert (0 < end
);
2215 gdb_assert (index
< end
);
2217 /* Compute the number of instructions remaining in this segment. */
2218 space
= end
- index
;
2220 /* Advance the iterator as far as possible within this segment. */
2221 adv
= std::min (space
, stride
);
2226 /* Move to the next function if we're at the end of this one. */
2229 const struct btrace_function
*next
;
2231 next
= ftrace_find_call_by_number (it
->btinfo
, bfun
->number
+ 1);
2234 /* We stepped past the last function.
2236 Let's adjust the index to point to the last instruction in
2237 the previous function. */
2243 /* We now point to the first instruction in the new function. */
2248 /* We did make progress. */
2249 gdb_assert (adv
> 0);
2252 /* Update the iterator. */
2253 it
->call_index
= bfun
->number
- 1;
2254 it
->insn_index
= index
;
2262 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
2264 const struct btrace_function
*bfun
;
2265 unsigned int index
, steps
;
2267 bfun
= &it
->btinfo
->functions
[it
->call_index
];
2269 index
= it
->insn_index
;
2275 /* Move to the previous function if we're at the start of this one. */
2278 const struct btrace_function
*prev
;
2280 prev
= ftrace_find_call_by_number (it
->btinfo
, bfun
->number
- 1);
2284 /* We point to one after the last instruction in the new function. */
2286 index
= bfun
->insn
.size ();
2288 /* An empty function segment represents a gap in the trace. We count
2289 it as one instruction. */
2299 /* Advance the iterator as far as possible within this segment. */
2300 adv
= std::min (index
, stride
);
2306 /* We did make progress. */
2307 gdb_assert (adv
> 0);
2310 /* Update the iterator. */
2311 it
->call_index
= bfun
->number
- 1;
2312 it
->insn_index
= index
;
2320 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
2321 const struct btrace_insn_iterator
*rhs
)
2323 gdb_assert (lhs
->btinfo
== rhs
->btinfo
);
2325 if (lhs
->call_index
!= rhs
->call_index
)
2326 return lhs
->call_index
- rhs
->call_index
;
2328 return lhs
->insn_index
- rhs
->insn_index
;
2334 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
2335 const struct btrace_thread_info
*btinfo
,
2336 unsigned int number
)
2338 const struct btrace_function
*bfun
;
2339 unsigned int upper
, lower
;
2341 if (btinfo
->functions
.empty ())
2345 bfun
= &btinfo
->functions
[lower
];
2346 if (number
< bfun
->insn_offset
)
2349 upper
= btinfo
->functions
.size () - 1;
2350 bfun
= &btinfo
->functions
[upper
];
2351 if (number
>= bfun
->insn_offset
+ ftrace_call_num_insn (bfun
))
2354 /* We assume that there are no holes in the numbering. */
2357 const unsigned int average
= lower
+ (upper
- lower
) / 2;
2359 bfun
= &btinfo
->functions
[average
];
2361 if (number
< bfun
->insn_offset
)
2363 upper
= average
- 1;
2367 if (number
>= bfun
->insn_offset
+ ftrace_call_num_insn (bfun
))
2369 lower
= average
+ 1;
2376 it
->btinfo
= btinfo
;
2377 it
->call_index
= bfun
->number
- 1;
2378 it
->insn_index
= number
- bfun
->insn_offset
;
2382 /* Returns true if the recording ends with a function segment that
2383 contains only a single (i.e. the current) instruction. */
2386 btrace_ends_with_single_insn (const struct btrace_thread_info
*btinfo
)
2388 const btrace_function
*bfun
;
2390 if (btinfo
->functions
.empty ())
2393 bfun
= &btinfo
->functions
.back ();
2394 if (bfun
->errcode
!= 0)
2397 return ftrace_call_num_insn (bfun
) == 1;
2402 const struct btrace_function
*
2403 btrace_call_get (const struct btrace_call_iterator
*it
)
2405 if (it
->index
>= it
->btinfo
->functions
.size ())
2408 return &it
->btinfo
->functions
[it
->index
];
2414 btrace_call_number (const struct btrace_call_iterator
*it
)
2416 const unsigned int length
= it
->btinfo
->functions
.size ();
2418 /* If the last function segment contains only a single instruction (i.e. the
2419 current instruction), skip it. */
2420 if ((it
->index
== length
) && btrace_ends_with_single_insn (it
->btinfo
))
2423 return it
->index
+ 1;
2429 btrace_call_begin (struct btrace_call_iterator
*it
,
2430 const struct btrace_thread_info
*btinfo
)
2432 if (btinfo
->functions
.empty ())
2433 error (_("No trace."));
2435 it
->btinfo
= btinfo
;
2442 btrace_call_end (struct btrace_call_iterator
*it
,
2443 const struct btrace_thread_info
*btinfo
)
2445 if (btinfo
->functions
.empty ())
2446 error (_("No trace."));
2448 it
->btinfo
= btinfo
;
2449 it
->index
= btinfo
->functions
.size ();
2455 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
2457 const unsigned int length
= it
->btinfo
->functions
.size ();
2459 if (it
->index
+ stride
< length
- 1)
2460 /* Default case: Simply advance the iterator. */
2461 it
->index
+= stride
;
2462 else if (it
->index
+ stride
== length
- 1)
2464 /* We land exactly at the last function segment. If it contains only one
2465 instruction (i.e. the current instruction) it is not actually part of
2467 if (btrace_ends_with_single_insn (it
->btinfo
))
2470 it
->index
= length
- 1;
2474 /* We land past the last function segment and have to adjust the stride.
2475 If the last function segment contains only one instruction (i.e. the
2476 current instruction) it is not actually part of the trace. */
2477 if (btrace_ends_with_single_insn (it
->btinfo
))
2478 stride
= length
- it
->index
- 1;
2480 stride
= length
- it
->index
;
2491 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
2493 const unsigned int length
= it
->btinfo
->functions
.size ();
2496 gdb_assert (it
->index
<= length
);
2498 if (stride
== 0 || it
->index
== 0)
2501 /* If we are at the end, the first step is a special case. If the last
2502 function segment contains only one instruction (i.e. the current
2503 instruction) it is not actually part of the trace. To be able to step
2504 over this instruction, we need at least one more function segment. */
2505 if ((it
->index
== length
) && (length
> 1))
2507 if (btrace_ends_with_single_insn (it
->btinfo
))
2508 it
->index
= length
- 2;
2510 it
->index
= length
- 1;
2516 stride
= std::min (stride
, it
->index
);
2518 it
->index
-= stride
;
2519 return steps
+ stride
;
2525 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
2526 const struct btrace_call_iterator
*rhs
)
2528 gdb_assert (lhs
->btinfo
== rhs
->btinfo
);
2529 return (int) (lhs
->index
- rhs
->index
);
2535 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
2536 const struct btrace_thread_info
*btinfo
,
2537 unsigned int number
)
2539 const unsigned int length
= btinfo
->functions
.size ();
2541 if ((number
== 0) || (number
> length
))
2544 it
->btinfo
= btinfo
;
2545 it
->index
= number
- 1;
2552 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
2553 const struct btrace_insn_iterator
*begin
,
2554 const struct btrace_insn_iterator
*end
)
2556 if (btinfo
->insn_history
== NULL
)
2557 btinfo
->insn_history
= XCNEW (struct btrace_insn_history
);
2559 btinfo
->insn_history
->begin
= *begin
;
2560 btinfo
->insn_history
->end
= *end
;
2566 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
2567 const struct btrace_call_iterator
*begin
,
2568 const struct btrace_call_iterator
*end
)
2570 gdb_assert (begin
->btinfo
== end
->btinfo
);
2572 if (btinfo
->call_history
== NULL
)
2573 btinfo
->call_history
= XCNEW (struct btrace_call_history
);
2575 btinfo
->call_history
->begin
= *begin
;
2576 btinfo
->call_history
->end
= *end
;
2582 btrace_is_replaying (struct thread_info
*tp
)
2584 return tp
->btrace
.replay
!= NULL
;
2590 btrace_is_empty (struct thread_info
*tp
)
2592 struct btrace_insn_iterator begin
, end
;
2593 struct btrace_thread_info
*btinfo
;
2595 btinfo
= &tp
->btrace
;
2597 if (btinfo
->functions
.empty ())
2600 btrace_insn_begin (&begin
, btinfo
);
2601 btrace_insn_end (&end
, btinfo
);
2603 return btrace_insn_cmp (&begin
, &end
) == 0;
2606 #if defined (HAVE_LIBIPT)
2608 /* Print a single packet. */
2611 pt_print_packet (const struct pt_packet
*packet
)
2613 switch (packet
->type
)
2616 gdb_printf (("[??: %x]"), packet
->type
);
2620 gdb_printf (("psb"));
2624 gdb_printf (("psbend"));
2628 gdb_printf (("pad"));
2632 gdb_printf (("tip %u: 0x%" PRIx64
""),
2633 packet
->payload
.ip
.ipc
,
2634 packet
->payload
.ip
.ip
);
2638 gdb_printf (("tip.pge %u: 0x%" PRIx64
""),
2639 packet
->payload
.ip
.ipc
,
2640 packet
->payload
.ip
.ip
);
2644 gdb_printf (("tip.pgd %u: 0x%" PRIx64
""),
2645 packet
->payload
.ip
.ipc
,
2646 packet
->payload
.ip
.ip
);
2650 gdb_printf (("fup %u: 0x%" PRIx64
""),
2651 packet
->payload
.ip
.ipc
,
2652 packet
->payload
.ip
.ip
);
2656 gdb_printf (("tnt-8 %u: 0x%" PRIx64
""),
2657 packet
->payload
.tnt
.bit_size
,
2658 packet
->payload
.tnt
.payload
);
2662 gdb_printf (("tnt-64 %u: 0x%" PRIx64
""),
2663 packet
->payload
.tnt
.bit_size
,
2664 packet
->payload
.tnt
.payload
);
2668 gdb_printf (("pip %" PRIx64
"%s"), packet
->payload
.pip
.cr3
,
2669 packet
->payload
.pip
.nr
? (" nr") : (""));
2673 gdb_printf (("tsc %" PRIx64
""), packet
->payload
.tsc
.tsc
);
2677 gdb_printf (("cbr %u"), packet
->payload
.cbr
.ratio
);
2681 switch (packet
->payload
.mode
.leaf
)
2684 gdb_printf (("mode %u"), packet
->payload
.mode
.leaf
);
2688 gdb_printf (("mode.exec%s%s"),
2689 packet
->payload
.mode
.bits
.exec
.csl
2691 packet
->payload
.mode
.bits
.exec
.csd
2692 ? (" cs.d") : (""));
2696 gdb_printf (("mode.tsx%s%s"),
2697 packet
->payload
.mode
.bits
.tsx
.intx
2699 packet
->payload
.mode
.bits
.tsx
.abrt
2700 ? (" abrt") : (""));
2706 gdb_printf (("ovf"));
2710 gdb_printf (("stop"));
2714 gdb_printf (("vmcs %" PRIx64
""), packet
->payload
.vmcs
.base
);
2718 gdb_printf (("tma %x %x"), packet
->payload
.tma
.ctc
,
2719 packet
->payload
.tma
.fc
);
2723 gdb_printf (("mtc %x"), packet
->payload
.mtc
.ctc
);
2727 gdb_printf (("cyc %" PRIx64
""), packet
->payload
.cyc
.value
);
2731 gdb_printf (("mnt %" PRIx64
""), packet
->payload
.mnt
.payload
);
2734 #if (LIBIPT_VERSION >= 0x200)
2736 gdb_printf (("ptw %u: 0x%" PRIx64
"%s"), packet
->payload
.ptw
.plc
,
2737 packet
->payload
.ptw
.payload
,
2738 packet
->payload
.ptw
.ip
? (" ip") : (""));
2740 #endif /* defined (LIBIPT_VERSION >= 0x200) */
2744 /* Decode packets into MAINT using DECODER. */
2747 btrace_maint_decode_pt (struct btrace_maint_info
*maint
,
2748 struct pt_packet_decoder
*decoder
)
2752 if (maint
->variant
.pt
.packets
== NULL
)
2753 maint
->variant
.pt
.packets
= new std::vector
<btrace_pt_packet
>;
2757 struct btrace_pt_packet packet
;
2759 errcode
= pt_pkt_sync_forward (decoder
);
2765 pt_pkt_get_offset (decoder
, &packet
.offset
);
2767 errcode
= pt_pkt_next (decoder
, &packet
.packet
,
2768 sizeof(packet
.packet
));
2772 if (maint_btrace_pt_skip_pad
== 0 || packet
.packet
.type
!= ppt_pad
)
2774 packet
.errcode
= pt_errcode (errcode
);
2775 maint
->variant
.pt
.packets
->push_back (packet
);
2779 if (errcode
== -pte_eos
)
2782 packet
.errcode
= pt_errcode (errcode
);
2783 maint
->variant
.pt
.packets
->push_back (packet
);
2785 warning (_("Error at trace offset 0x%" PRIx64
": %s."),
2786 packet
.offset
, pt_errstr (packet
.errcode
));
2789 if (errcode
!= -pte_eos
)
2790 warning (_("Failed to synchronize onto the Intel Processor Trace "
2791 "stream: %s."), pt_errstr (pt_errcode (errcode
)));
2794 /* Update the packet history in BTINFO. */
2797 btrace_maint_update_pt_packets (struct btrace_thread_info
*btinfo
)
2799 struct pt_packet_decoder
*decoder
;
2800 const struct btrace_cpu
*cpu
;
2801 struct btrace_data_pt
*pt
;
2802 struct pt_config config
;
2805 pt
= &btinfo
->data
.variant
.pt
;
2807 /* Nothing to do if there is no trace. */
2811 memset (&config
, 0, sizeof(config
));
2813 config
.size
= sizeof (config
);
2814 config
.begin
= pt
->data
;
2815 config
.end
= pt
->data
+ pt
->size
;
2817 cpu
= record_btrace_get_cpu ();
2819 cpu
= &pt
->config
.cpu
;
2821 /* We treat an unknown vendor as 'no errata'. */
2822 if (cpu
->vendor
!= CV_UNKNOWN
)
2824 config
.cpu
.vendor
= pt_translate_cpu_vendor (cpu
->vendor
);
2825 config
.cpu
.family
= cpu
->family
;
2826 config
.cpu
.model
= cpu
->model
;
2827 config
.cpu
.stepping
= cpu
->stepping
;
2829 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
2831 error (_("Failed to configure the Intel Processor Trace "
2832 "decoder: %s."), pt_errstr (pt_errcode (errcode
)));
2835 decoder
= pt_pkt_alloc_decoder (&config
);
2836 if (decoder
== NULL
)
2837 error (_("Failed to allocate the Intel Processor Trace decoder."));
2841 btrace_maint_decode_pt (&btinfo
->maint
, decoder
);
2843 catch (const gdb_exception
&except
)
2845 pt_pkt_free_decoder (decoder
);
2847 if (except
.reason
< 0)
2851 pt_pkt_free_decoder (decoder
);
2854 #endif /* !defined (HAVE_LIBIPT) */
2856 /* Update the packet maintenance information for BTINFO and store the
2857 low and high bounds into BEGIN and END, respectively.
2858 Store the current iterator state into FROM and TO. */
2861 btrace_maint_update_packets (struct btrace_thread_info
*btinfo
,
2862 unsigned int *begin
, unsigned int *end
,
2863 unsigned int *from
, unsigned int *to
)
2865 switch (btinfo
->data
.format
)
2874 case BTRACE_FORMAT_BTS
:
2875 /* Nothing to do - we operate directly on BTINFO->DATA. */
2877 *end
= btinfo
->data
.variant
.bts
.blocks
->size ();
2878 *from
= btinfo
->maint
.variant
.bts
.packet_history
.begin
;
2879 *to
= btinfo
->maint
.variant
.bts
.packet_history
.end
;
2882 #if defined (HAVE_LIBIPT)
2883 case BTRACE_FORMAT_PT
:
2884 if (btinfo
->maint
.variant
.pt
.packets
== nullptr)
2885 btinfo
->maint
.variant
.pt
.packets
= new std::vector
<btrace_pt_packet
>;
2887 if (btinfo
->maint
.variant
.pt
.packets
->empty ())
2888 btrace_maint_update_pt_packets (btinfo
);
2891 *end
= btinfo
->maint
.variant
.pt
.packets
->size ();
2892 *from
= btinfo
->maint
.variant
.pt
.packet_history
.begin
;
2893 *to
= btinfo
->maint
.variant
.pt
.packet_history
.end
;
2895 #endif /* defined (HAVE_LIBIPT) */
2899 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2900 update the current iterator position. */
2903 btrace_maint_print_packets (struct btrace_thread_info
*btinfo
,
2904 unsigned int begin
, unsigned int end
)
2906 switch (btinfo
->data
.format
)
2911 case BTRACE_FORMAT_BTS
:
2913 const std::vector
<btrace_block
> &blocks
2914 = *btinfo
->data
.variant
.bts
.blocks
;
2917 for (blk
= begin
; blk
< end
; ++blk
)
2919 const btrace_block
&block
= blocks
.at (blk
);
2921 gdb_printf ("%u\tbegin: %s, end: %s\n", blk
,
2922 core_addr_to_string_nz (block
.begin
),
2923 core_addr_to_string_nz (block
.end
));
2926 btinfo
->maint
.variant
.bts
.packet_history
.begin
= begin
;
2927 btinfo
->maint
.variant
.bts
.packet_history
.end
= end
;
2931 #if defined (HAVE_LIBIPT)
2932 case BTRACE_FORMAT_PT
:
2934 const std::vector
<btrace_pt_packet
> &packets
2935 = *btinfo
->maint
.variant
.pt
.packets
;
2938 for (pkt
= begin
; pkt
< end
; ++pkt
)
2940 const struct btrace_pt_packet
&packet
= packets
.at (pkt
);
2942 gdb_printf ("%u\t", pkt
);
2943 gdb_printf ("0x%" PRIx64
"\t", packet
.offset
);
2945 if (packet
.errcode
== pte_ok
)
2946 pt_print_packet (&packet
.packet
);
2948 gdb_printf ("[error: %s]", pt_errstr (packet
.errcode
));
2953 btinfo
->maint
.variant
.pt
.packet_history
.begin
= begin
;
2954 btinfo
->maint
.variant
.pt
.packet_history
.end
= end
;
2957 #endif /* defined (HAVE_LIBIPT) */
2961 /* Read a number from an argument string. */
2964 get_uint (const char **arg
)
2966 const char *begin
, *pos
;
2968 unsigned long number
;
2971 pos
= skip_spaces (begin
);
2973 if (!isdigit (*pos
))
2974 error (_("Expected positive number, got: %s."), pos
);
2976 number
= strtoul (pos
, &end
, 10);
2977 if (number
> UINT_MAX
)
2978 error (_("Number too big."));
2980 *arg
+= (end
- begin
);
2982 return (unsigned int) number
;
2985 /* Read a context size from an argument string. */
2988 get_context_size (const char **arg
)
2990 const char *pos
= skip_spaces (*arg
);
2992 if (!isdigit (*pos
))
2993 error (_("Expected positive number, got: %s."), pos
);
2996 long result
= strtol (pos
, &end
, 10);
3001 /* Complain about junk at the end of an argument string. */
3004 no_chunk (const char *arg
)
3007 error (_("Junk after argument: %s."), arg
);
3010 /* The "maintenance btrace packet-history" command. */
3013 maint_btrace_packet_history_cmd (const char *arg
, int from_tty
)
3015 struct btrace_thread_info
*btinfo
;
3016 unsigned int size
, begin
, end
, from
, to
;
3018 thread_info
*tp
= current_inferior ()->find_thread (inferior_ptid
);
3020 error (_("No thread."));
3023 btinfo
= &tp
->btrace
;
3025 btrace_maint_update_packets (btinfo
, &begin
, &end
, &from
, &to
);
3028 gdb_printf (_("No trace.\n"));
3032 if (arg
== NULL
|| *arg
== 0 || strcmp (arg
, "+") == 0)
3036 if (end
- from
< size
)
3040 else if (strcmp (arg
, "-") == 0)
3044 if (to
- begin
< size
)
3050 from
= get_uint (&arg
);
3052 error (_("'%u' is out of range."), from
);
3054 arg
= skip_spaces (arg
);
3057 arg
= skip_spaces (++arg
);
3062 size
= get_context_size (&arg
);
3066 if (end
- from
< size
)
3070 else if (*arg
== '-')
3073 size
= get_context_size (&arg
);
3077 /* Include the packet given as first argument. */
3081 if (to
- begin
< size
)
3087 to
= get_uint (&arg
);
3089 /* Include the packet at the second argument and silently
3090 truncate the range. */
3103 if (end
- from
< size
)
3111 btrace_maint_print_packets (btinfo
, from
, to
);
3114 /* The "maintenance btrace clear-packet-history" command. */
3117 maint_btrace_clear_packet_history_cmd (const char *args
, int from_tty
)
3119 if (args
!= NULL
&& *args
!= 0)
3120 error (_("Invalid argument."));
3122 if (inferior_ptid
== null_ptid
)
3123 error (_("No thread."));
3125 thread_info
*tp
= inferior_thread ();
3126 btrace_thread_info
*btinfo
= &tp
->btrace
;
3128 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3129 btrace_maint_clear (btinfo
);
3130 btinfo
->data
.clear ();
3133 /* The "maintenance btrace clear" command. */
3136 maint_btrace_clear_cmd (const char *args
, int from_tty
)
3138 if (args
!= NULL
&& *args
!= 0)
3139 error (_("Invalid argument."));
3141 if (inferior_ptid
== null_ptid
)
3142 error (_("No thread."));
3144 thread_info
*tp
= inferior_thread ();
3148 /* The "maintenance info btrace" command. */
3151 maint_info_btrace_cmd (const char *args
, int from_tty
)
3153 struct btrace_thread_info
*btinfo
;
3154 const struct btrace_config
*conf
;
3156 if (args
!= NULL
&& *args
!= 0)
3157 error (_("Invalid argument."));
3159 if (inferior_ptid
== null_ptid
)
3160 error (_("No thread."));
3162 thread_info
*tp
= inferior_thread ();
3164 btinfo
= &tp
->btrace
;
3166 conf
= btrace_conf (btinfo
);
3168 error (_("No btrace configuration."));
3170 gdb_printf (_("Format: %s.\n"),
3171 btrace_format_string (conf
->format
));
3173 switch (conf
->format
)
3178 case BTRACE_FORMAT_BTS
:
3179 gdb_printf (_("Number of packets: %zu.\n"),
3180 btinfo
->data
.variant
.bts
.blocks
->size ());
3183 #if defined (HAVE_LIBIPT)
3184 case BTRACE_FORMAT_PT
:
3186 struct pt_version version
;
3188 version
= pt_library_version ();
3189 gdb_printf (_("Version: %u.%u.%u%s.\n"), version
.major
,
3190 version
.minor
, version
.build
,
3191 version
.ext
!= NULL
? version
.ext
: "");
3193 btrace_maint_update_pt_packets (btinfo
);
3194 gdb_printf (_("Number of packets: %zu.\n"),
3195 ((btinfo
->maint
.variant
.pt
.packets
== nullptr)
3196 ? 0 : btinfo
->maint
.variant
.pt
.packets
->size ()));
3199 #endif /* defined (HAVE_LIBIPT) */
3203 /* The "maint show btrace pt skip-pad" show value function. */
3206 show_maint_btrace_pt_skip_pad (struct ui_file
*file
, int from_tty
,
3207 struct cmd_list_element
*c
,
3210 gdb_printf (file
, _("Skip PAD packets is %s.\n"), value
);
3214 /* Initialize btrace maintenance commands. */
3216 void _initialize_btrace ();
3218 _initialize_btrace ()
3220 add_cmd ("btrace", class_maintenance
, maint_info_btrace_cmd
,
3221 _("Info about branch tracing data."), &maintenanceinfolist
);
3223 add_basic_prefix_cmd ("btrace", class_maintenance
,
3224 _("Branch tracing maintenance commands."),
3225 &maint_btrace_cmdlist
, 0, &maintenancelist
);
3227 add_setshow_prefix_cmd ("btrace", class_maintenance
,
3228 _("Set branch tracing specific variables."),
3229 _("Show branch tracing specific variables."),
3230 &maint_btrace_set_cmdlist
,
3231 &maint_btrace_show_cmdlist
,
3232 &maintenance_set_cmdlist
,
3233 &maintenance_show_cmdlist
);
3235 add_setshow_prefix_cmd ("pt", class_maintenance
,
3236 _("Set Intel Processor Trace specific variables."),
3237 _("Show Intel Processor Trace specific variables."),
3238 &maint_btrace_pt_set_cmdlist
,
3239 &maint_btrace_pt_show_cmdlist
,
3240 &maint_btrace_set_cmdlist
,
3241 &maint_btrace_show_cmdlist
);
3243 add_setshow_boolean_cmd ("skip-pad", class_maintenance
,
3244 &maint_btrace_pt_skip_pad
, _("\
3245 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3246 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3247 When enabled, PAD packets are ignored in the btrace packet history."),
3248 NULL
, show_maint_btrace_pt_skip_pad
,
3249 &maint_btrace_pt_set_cmdlist
,
3250 &maint_btrace_pt_show_cmdlist
);
3252 add_cmd ("packet-history", class_maintenance
, maint_btrace_packet_history_cmd
,
3253 _("Print the raw branch tracing data.\n\
3254 With no argument, print ten more packets after the previous ten-line print.\n\
3255 With '-' as argument print ten packets before a previous ten-line print.\n\
3256 One argument specifies the starting packet of a ten-line print.\n\
3257 Two arguments with comma between specify starting and ending packets to \
3259 Preceded with '+'/'-' the second argument specifies the distance from the \
3261 &maint_btrace_cmdlist
);
3263 add_cmd ("clear-packet-history", class_maintenance
,
3264 maint_btrace_clear_packet_history_cmd
,
3265 _("Clears the branch tracing packet history.\n\
3266 Discards the raw branch tracing data but not the execution history data."),
3267 &maint_btrace_cmdlist
);
3269 add_cmd ("clear", class_maintenance
, maint_btrace_clear_cmd
,
3270 _("Clears the branch tracing data.\n\
3271 Discards the raw branch tracing data and the execution history data.\n\
3272 The next 'record' command will fetch the branch tracing data anew."),
3273 &maint_btrace_cmdlist
);