Update release readme after making 2.43.1 release
[binutils-gdb.git] / gdb / btrace.c
blob95ff27cc4fe24d0544f5ab61ba8b79a97132f13c
1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2024 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "btrace.h"
23 #include "gdbthread.h"
24 #include "inferior.h"
25 #include "target.h"
26 #include "record.h"
27 #include "symtab.h"
28 #include "disasm.h"
29 #include "source.h"
30 #include "filenames.h"
31 #include "regcache.h"
32 #include "gdbsupport/rsp-low.h"
33 #include "cli/cli-cmds.h"
34 #include "cli/cli-utils.h"
35 #include "extension.h"
36 #include "gdbarch.h"
38 /* For maintenance commands. */
39 #include "record-btrace.h"
41 #include <inttypes.h>
42 #include <ctype.h>
43 #include <algorithm>
44 #include <string>
46 /* Command lists for btrace maintenance commands. */
47 static struct cmd_list_element *maint_btrace_cmdlist;
48 static struct cmd_list_element *maint_btrace_set_cmdlist;
49 static struct cmd_list_element *maint_btrace_show_cmdlist;
50 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
51 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
53 /* Control whether to skip PAD packets when computing the packet history. */
54 static bool maint_btrace_pt_skip_pad = true;
56 static void btrace_add_pc (struct thread_info *tp);
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
61 #define DEBUG(msg, args...) \
62 do \
63 { \
64 if (record_debug != 0) \
65 gdb_printf (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
67 } \
68 while (0)
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
75 static const char *
76 ftrace_print_function_name (const struct btrace_function *bfun)
78 struct minimal_symbol *msym;
79 struct symbol *sym;
81 msym = bfun->msym;
82 sym = bfun->sym;
84 if (sym != NULL)
85 return sym->print_name ();
87 if (msym != NULL)
88 return msym->print_name ();
90 return "<unknown>";
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
96 static const char *
97 ftrace_print_filename (const struct btrace_function *bfun)
99 struct symbol *sym;
100 const char *filename;
102 sym = bfun->sym;
104 if (sym != NULL)
105 filename = symtab_to_filename_for_display (sym->symtab ());
106 else
107 filename = "<unknown>";
109 return filename;
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
115 static const char *
116 ftrace_print_insn_addr (const struct btrace_insn *insn)
118 if (insn == NULL)
119 return "<nil>";
121 return core_addr_to_string_nz (insn->pc);
124 /* Print an ftrace debug status message. */
126 static void
127 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
129 const char *fun, *file;
130 unsigned int ibegin, iend;
131 int level;
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
135 level = bfun->level;
137 ibegin = bfun->insn_offset;
138 iend = ibegin + bfun->insn.size ();
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
144 /* Return the number of instructions in a given function call segment. */
146 static unsigned int
147 ftrace_call_num_insn (const struct btrace_function* bfun)
149 if (bfun == NULL)
150 return 0;
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
154 return 1;
156 return bfun->insn.size ();
159 /* Return the function segment with the given NUMBER or NULL if no such segment
160 exists. BTINFO is the branch trace information for the current thread. */
162 static struct btrace_function *
163 ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
164 unsigned int number)
166 if (number == 0 || number > btinfo->functions.size ())
167 return NULL;
169 return &btinfo->functions[number - 1];
172 /* A const version of the function above. */
174 static const struct btrace_function *
175 ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
176 unsigned int number)
178 if (number == 0 || number > btinfo->functions.size ())
179 return NULL;
181 return &btinfo->functions[number - 1];
184 /* Return non-zero if BFUN does not match MFUN and FUN,
185 return zero otherwise. */
187 static int
188 ftrace_function_switched (const struct btrace_function *bfun,
189 const struct minimal_symbol *mfun,
190 const struct symbol *fun)
192 struct minimal_symbol *msym;
193 struct symbol *sym;
195 msym = bfun->msym;
196 sym = bfun->sym;
198 /* If the minimal symbol changed, we certainly switched functions. */
199 if (mfun != NULL && msym != NULL
200 && strcmp (mfun->linkage_name (), msym->linkage_name ()) != 0)
201 return 1;
203 /* If the symbol changed, we certainly switched functions. */
204 if (fun != NULL && sym != NULL)
206 const char *bfname, *fname;
208 /* Check the function name. */
209 if (strcmp (fun->linkage_name (), sym->linkage_name ()) != 0)
210 return 1;
212 /* Check the location of those functions, as well. */
213 bfname = symtab_to_fullname (sym->symtab ());
214 fname = symtab_to_fullname (fun->symtab ());
215 if (filename_cmp (fname, bfname) != 0)
216 return 1;
219 /* If we lost symbol information, we switched functions. */
220 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
221 return 1;
223 /* If we gained symbol information, we switched functions. */
224 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
225 return 1;
227 return 0;
230 /* Allocate and initialize a new branch trace function segment at the end of
231 the trace.
232 BTINFO is the branch trace information for the current thread.
233 MFUN and FUN are the symbol information we have for this function.
234 This invalidates all struct btrace_function pointer currently held. */
236 static struct btrace_function *
237 ftrace_new_function (struct btrace_thread_info *btinfo,
238 struct minimal_symbol *mfun,
239 struct symbol *fun)
241 int level;
242 unsigned int number, insn_offset;
244 if (btinfo->functions.empty ())
246 /* Start counting NUMBER and INSN_OFFSET at one. */
247 level = 0;
248 number = 1;
249 insn_offset = 1;
251 else
253 const struct btrace_function *prev = &btinfo->functions.back ();
254 level = prev->level;
255 number = prev->number + 1;
256 insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
259 return &btinfo->functions.emplace_back (mfun, fun, number, insn_offset,
260 level);
263 /* Update the UP field of a function segment. */
265 static void
266 ftrace_update_caller (struct btrace_function *bfun,
267 struct btrace_function *caller,
268 btrace_function_flags flags)
270 if (bfun->up != 0)
271 ftrace_debug (bfun, "updating caller");
273 bfun->up = caller->number;
274 bfun->flags = flags;
276 ftrace_debug (bfun, "set caller");
277 ftrace_debug (caller, "..to");
280 /* Fix up the caller for all segments of a function. */
282 static void
283 ftrace_fixup_caller (struct btrace_thread_info *btinfo,
284 struct btrace_function *bfun,
285 struct btrace_function *caller,
286 btrace_function_flags flags)
288 unsigned int prev, next;
290 prev = bfun->prev;
291 next = bfun->next;
292 ftrace_update_caller (bfun, caller, flags);
294 /* Update all function segments belonging to the same function. */
295 for (; prev != 0; prev = bfun->prev)
297 bfun = ftrace_find_call_by_number (btinfo, prev);
298 ftrace_update_caller (bfun, caller, flags);
301 for (; next != 0; next = bfun->next)
303 bfun = ftrace_find_call_by_number (btinfo, next);
304 ftrace_update_caller (bfun, caller, flags);
308 /* Add a new function segment for a call at the end of the trace.
309 BTINFO is the branch trace information for the current thread.
310 MFUN and FUN are the symbol information we have for this function. */
312 static struct btrace_function *
313 ftrace_new_call (struct btrace_thread_info *btinfo,
314 struct minimal_symbol *mfun,
315 struct symbol *fun)
317 const unsigned int length = btinfo->functions.size ();
318 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
320 bfun->up = length;
321 bfun->level += 1;
323 ftrace_debug (bfun, "new call");
325 return bfun;
328 /* Add a new function segment for a tail call at the end of the trace.
329 BTINFO is the branch trace information for the current thread.
330 MFUN and FUN are the symbol information we have for this function. */
332 static struct btrace_function *
333 ftrace_new_tailcall (struct btrace_thread_info *btinfo,
334 struct minimal_symbol *mfun,
335 struct symbol *fun)
337 const unsigned int length = btinfo->functions.size ();
338 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
340 bfun->up = length;
341 bfun->level += 1;
342 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
344 ftrace_debug (bfun, "new tail call");
346 return bfun;
349 /* Return the caller of BFUN or NULL if there is none. This function skips
350 tail calls in the call chain. BTINFO is the branch trace information for
351 the current thread. */
352 static struct btrace_function *
353 ftrace_get_caller (struct btrace_thread_info *btinfo,
354 struct btrace_function *bfun)
356 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
357 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
358 return ftrace_find_call_by_number (btinfo, bfun->up);
360 return NULL;
363 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
364 symbol information. BTINFO is the branch trace information for the current
365 thread. */
367 static struct btrace_function *
368 ftrace_find_caller (struct btrace_thread_info *btinfo,
369 struct btrace_function *bfun,
370 struct minimal_symbol *mfun,
371 struct symbol *fun)
373 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
375 /* Skip functions with incompatible symbol information. */
376 if (ftrace_function_switched (bfun, mfun, fun))
377 continue;
379 /* This is the function segment we're looking for. */
380 break;
383 return bfun;
386 /* Find the innermost caller in the back trace of BFUN, skipping all
387 function segments that do not end with a call instruction (e.g.
388 tail calls ending with a jump). BTINFO is the branch trace information for
389 the current thread. */
391 static struct btrace_function *
392 ftrace_find_call (struct btrace_thread_info *btinfo,
393 struct btrace_function *bfun)
395 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
397 /* Skip gaps. */
398 if (bfun->errcode != 0)
399 continue;
401 btrace_insn &last = bfun->insn.back ();
403 if (last.iclass == BTRACE_INSN_CALL)
404 break;
407 return bfun;
410 /* Add a continuation segment for a function into which we return at the end of
411 the trace.
412 BTINFO is the branch trace information for the current thread.
413 MFUN and FUN are the symbol information we have for this function. */
415 static struct btrace_function *
416 ftrace_new_return (struct btrace_thread_info *btinfo,
417 struct minimal_symbol *mfun,
418 struct symbol *fun)
420 struct btrace_function *prev, *bfun, *caller;
422 bfun = ftrace_new_function (btinfo, mfun, fun);
423 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
425 /* It is important to start at PREV's caller. Otherwise, we might find
426 PREV itself, if PREV is a recursive function. */
427 caller = ftrace_find_call_by_number (btinfo, prev->up);
428 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
429 if (caller != NULL)
431 /* The caller of PREV is the preceding btrace function segment in this
432 function instance. */
433 gdb_assert (caller->next == 0);
435 caller->next = bfun->number;
436 bfun->prev = caller->number;
438 /* Maintain the function level. */
439 bfun->level = caller->level;
441 /* Maintain the call stack. */
442 bfun->up = caller->up;
443 bfun->flags = caller->flags;
445 ftrace_debug (bfun, "new return");
447 else
449 /* We did not find a caller. This could mean that something went
450 wrong or that the call is simply not included in the trace. */
452 /* Let's search for some actual call. */
453 caller = ftrace_find_call_by_number (btinfo, prev->up);
454 caller = ftrace_find_call (btinfo, caller);
455 if (caller == NULL)
457 /* There is no call in PREV's back trace. We assume that the
458 branch trace did not include it. */
460 /* Let's find the topmost function and add a new caller for it.
461 This should handle a series of initial tail calls. */
462 while (prev->up != 0)
463 prev = ftrace_find_call_by_number (btinfo, prev->up);
465 bfun->level = prev->level - 1;
467 /* Fix up the call stack for PREV. */
468 ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
470 ftrace_debug (bfun, "new return - no caller");
472 else
474 /* There is a call in PREV's back trace to which we should have
475 returned but didn't. Let's start a new, separate back trace
476 from PREV's level. */
477 bfun->level = prev->level - 1;
479 /* We fix up the back trace for PREV but leave other function segments
480 on the same level as they are.
481 This should handle things like schedule () correctly where we're
482 switching contexts. */
483 prev->up = bfun->number;
484 prev->flags = BFUN_UP_LINKS_TO_RET;
486 ftrace_debug (bfun, "new return - unknown caller");
490 return bfun;
493 /* Add a new function segment for a function switch at the end of the trace.
494 BTINFO is the branch trace information for the current thread.
495 MFUN and FUN are the symbol information we have for this function. */
497 static struct btrace_function *
498 ftrace_new_switch (struct btrace_thread_info *btinfo,
499 struct minimal_symbol *mfun,
500 struct symbol *fun)
502 struct btrace_function *prev, *bfun;
504 /* This is an unexplained function switch. We can't really be sure about the
505 call stack, yet the best I can think of right now is to preserve it. */
506 bfun = ftrace_new_function (btinfo, mfun, fun);
507 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
508 bfun->up = prev->up;
509 bfun->flags = prev->flags;
511 ftrace_debug (bfun, "new switch");
513 return bfun;
516 /* Add a new function segment for a gap in the trace due to a decode error at
517 the end of the trace.
518 BTINFO is the branch trace information for the current thread.
519 ERRCODE is the format-specific error code. */
521 static struct btrace_function *
522 ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
523 std::vector<unsigned int> &gaps)
525 struct btrace_function *bfun;
527 if (btinfo->functions.empty ())
528 bfun = ftrace_new_function (btinfo, NULL, NULL);
529 else
531 /* We hijack the previous function segment if it was empty. */
532 bfun = &btinfo->functions.back ();
533 if (bfun->errcode != 0 || !bfun->insn.empty ())
534 bfun = ftrace_new_function (btinfo, NULL, NULL);
537 bfun->errcode = errcode;
538 gaps.push_back (bfun->number);
540 ftrace_debug (bfun, "new gap");
542 return bfun;
545 /* Update the current function segment at the end of the trace in BTINFO with
546 respect to the instruction at PC. This may create new function segments.
547 Return the chronologically latest function segment, never NULL. */
549 static struct btrace_function *
550 ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
552 struct minimal_symbol *mfun;
553 struct symbol *fun;
554 struct btrace_function *bfun;
556 /* Try to determine the function we're in. We use both types of symbols
557 to avoid surprises when we sometimes get a full symbol and sometimes
558 only a minimal symbol. */
559 fun = find_pc_function (pc);
560 bound_minimal_symbol bmfun = lookup_minimal_symbol_by_pc (pc);
561 mfun = bmfun.minsym;
563 if (fun == NULL && mfun == NULL)
564 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
566 /* If we didn't have a function, we create one. */
567 if (btinfo->functions.empty ())
568 return ftrace_new_function (btinfo, mfun, fun);
570 /* If we had a gap before, we create a function. */
571 bfun = &btinfo->functions.back ();
572 if (bfun->errcode != 0)
573 return ftrace_new_function (btinfo, mfun, fun);
575 /* Check the last instruction, if we have one.
576 We do this check first, since it allows us to fill in the call stack
577 links in addition to the normal flow links. */
578 btrace_insn *last = NULL;
579 if (!bfun->insn.empty ())
580 last = &bfun->insn.back ();
582 if (last != NULL)
584 switch (last->iclass)
586 case BTRACE_INSN_RETURN:
588 const char *fname;
590 /* On some systems, _dl_runtime_resolve returns to the resolved
591 function instead of jumping to it. From our perspective,
592 however, this is a tailcall.
593 If we treated it as return, we wouldn't be able to find the
594 resolved function in our stack back trace. Hence, we would
595 lose the current stack back trace and start anew with an empty
596 back trace. When the resolved function returns, we would then
597 create a stack back trace with the same function names but
598 different frame id's. This will confuse stepping. */
599 fname = ftrace_print_function_name (bfun);
600 if (strcmp (fname, "_dl_runtime_resolve") == 0)
601 return ftrace_new_tailcall (btinfo, mfun, fun);
603 return ftrace_new_return (btinfo, mfun, fun);
606 case BTRACE_INSN_CALL:
607 /* Ignore calls to the next instruction. They are used for PIC. */
608 if (last->pc + last->size == pc)
609 break;
611 return ftrace_new_call (btinfo, mfun, fun);
613 case BTRACE_INSN_JUMP:
615 CORE_ADDR start;
617 start = get_pc_function_start (pc);
619 /* A jump to the start of a function is (typically) a tail call. */
620 if (start == pc)
621 return ftrace_new_tailcall (btinfo, mfun, fun);
623 /* Some versions of _Unwind_RaiseException use an indirect
624 jump to 'return' to the exception handler of the caller
625 handling the exception instead of a return. Let's restrict
626 this heuristic to that and related functions. */
627 const char *fname = ftrace_print_function_name (bfun);
628 if (strncmp (fname, "_Unwind_", strlen ("_Unwind_")) == 0)
630 struct btrace_function *caller
631 = ftrace_find_call_by_number (btinfo, bfun->up);
632 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
633 if (caller != NULL)
634 return ftrace_new_return (btinfo, mfun, fun);
637 /* If we can't determine the function for PC, we treat a jump at
638 the end of the block as tail call if we're switching functions
639 and as an intra-function branch if we don't. */
640 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
641 return ftrace_new_tailcall (btinfo, mfun, fun);
643 break;
648 /* Check if we're switching functions for some other reason. */
649 if (ftrace_function_switched (bfun, mfun, fun))
651 DEBUG_FTRACE ("switching from %s in %s at %s",
652 ftrace_print_insn_addr (last),
653 ftrace_print_function_name (bfun),
654 ftrace_print_filename (bfun));
656 return ftrace_new_switch (btinfo, mfun, fun);
659 return bfun;
662 /* Add the instruction at PC to BFUN's instructions. */
664 static void
665 ftrace_update_insns (struct btrace_function *bfun, const btrace_insn &insn)
667 bfun->insn.push_back (insn);
669 if (insn.iclass == BTRACE_INSN_AUX)
670 bfun->flags |= BFUN_CONTAINS_AUX;
672 if (record_debug > 1)
673 ftrace_debug (bfun, "update insn");
676 /* Classify the instruction at PC. */
678 static enum btrace_insn_class
679 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
681 enum btrace_insn_class iclass;
683 iclass = BTRACE_INSN_OTHER;
686 if (gdbarch_insn_is_call (gdbarch, pc))
687 iclass = BTRACE_INSN_CALL;
688 else if (gdbarch_insn_is_ret (gdbarch, pc))
689 iclass = BTRACE_INSN_RETURN;
690 else if (gdbarch_insn_is_jump (gdbarch, pc))
691 iclass = BTRACE_INSN_JUMP;
693 catch (const gdb_exception_error &error)
697 return iclass;
700 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
701 number of matching function segments or zero if the back traces do not
702 match. BTINFO is the branch trace information for the current thread. */
704 static int
705 ftrace_match_backtrace (struct btrace_thread_info *btinfo,
706 struct btrace_function *lhs,
707 struct btrace_function *rhs)
709 int matches;
711 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
713 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
714 return 0;
716 lhs = ftrace_get_caller (btinfo, lhs);
717 rhs = ftrace_get_caller (btinfo, rhs);
720 return matches;
723 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
724 BTINFO is the branch trace information for the current thread. */
726 static void
727 ftrace_fixup_level (struct btrace_thread_info *btinfo,
728 struct btrace_function *bfun, int adjustment)
730 if (adjustment == 0)
731 return;
733 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
734 ftrace_debug (bfun, "..bfun");
736 while (bfun != NULL)
738 bfun->level += adjustment;
739 bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
743 /* Recompute the global level offset. Traverse the function trace and compute
744 the global level offset as the negative of the minimal function level. */
746 static void
747 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
749 int level = INT_MAX;
751 if (btinfo == NULL)
752 return;
754 if (btinfo->functions.empty ())
755 return;
757 unsigned int length = btinfo->functions.size() - 1;
758 for (unsigned int i = 0; i < length; ++i)
759 level = std::min (level, btinfo->functions[i].level);
761 /* The last function segment contains the current instruction, which is not
762 really part of the trace. If it contains just this one instruction, we
763 ignore the segment. */
764 struct btrace_function *last = &btinfo->functions.back();
765 if (last->insn.size () != 1)
766 level = std::min (level, last->level);
768 DEBUG_FTRACE ("setting global level offset: %d", -level);
769 btinfo->level = -level;
772 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
773 ftrace_connect_backtrace. BTINFO is the branch trace information for the
774 current thread. */
776 static void
777 ftrace_connect_bfun (struct btrace_thread_info *btinfo,
778 struct btrace_function *prev,
779 struct btrace_function *next)
781 DEBUG_FTRACE ("connecting...");
782 ftrace_debug (prev, "..prev");
783 ftrace_debug (next, "..next");
785 /* The function segments are not yet connected. */
786 gdb_assert (prev->next == 0);
787 gdb_assert (next->prev == 0);
789 prev->next = next->number;
790 next->prev = prev->number;
792 /* We may have moved NEXT to a different function level. */
793 ftrace_fixup_level (btinfo, next, prev->level - next->level);
795 /* If we run out of back trace for one, let's use the other's. */
796 if (prev->up == 0)
798 const btrace_function_flags flags = next->flags;
800 next = ftrace_find_call_by_number (btinfo, next->up);
801 if (next != NULL)
803 DEBUG_FTRACE ("using next's callers");
804 ftrace_fixup_caller (btinfo, prev, next, flags);
807 else if (next->up == 0)
809 const btrace_function_flags flags = prev->flags;
811 prev = ftrace_find_call_by_number (btinfo, prev->up);
812 if (prev != NULL)
814 DEBUG_FTRACE ("using prev's callers");
815 ftrace_fixup_caller (btinfo, next, prev, flags);
818 else
820 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
821 link to add the tail callers to NEXT's back trace.
823 This removes NEXT->UP from NEXT's back trace. It will be added back
824 when connecting NEXT and PREV's callers - provided they exist.
826 If PREV's back trace consists of a series of tail calls without an
827 actual call, there will be no further connection and NEXT's caller will
828 be removed for good. To catch this case, we handle it here and connect
829 the top of PREV's back trace to NEXT's caller. */
830 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
832 struct btrace_function *caller;
833 btrace_function_flags next_flags, prev_flags;
835 /* We checked NEXT->UP above so CALLER can't be NULL. */
836 caller = ftrace_find_call_by_number (btinfo, next->up);
837 next_flags = next->flags;
838 prev_flags = prev->flags;
840 DEBUG_FTRACE ("adding prev's tail calls to next");
842 prev = ftrace_find_call_by_number (btinfo, prev->up);
843 ftrace_fixup_caller (btinfo, next, prev, prev_flags);
845 for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
846 prev->up))
848 /* At the end of PREV's back trace, continue with CALLER. */
849 if (prev->up == 0)
851 DEBUG_FTRACE ("fixing up link for tailcall chain");
852 ftrace_debug (prev, "..top");
853 ftrace_debug (caller, "..up");
855 ftrace_fixup_caller (btinfo, prev, caller, next_flags);
857 /* If we skipped any tail calls, this may move CALLER to a
858 different function level.
860 Note that changing CALLER's level is only OK because we
861 know that this is the last iteration of the bottom-to-top
862 walk in ftrace_connect_backtrace.
864 Otherwise we will fix up CALLER's level when we connect it
865 to PREV's caller in the next iteration. */
866 ftrace_fixup_level (btinfo, caller,
867 prev->level - caller->level - 1);
868 break;
871 /* There's nothing to do if we find a real call. */
872 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
874 DEBUG_FTRACE ("will fix up link in next iteration");
875 break;
882 /* Connect function segments on the same level in the back trace at LHS and RHS.
883 The back traces at LHS and RHS are expected to match according to
884 ftrace_match_backtrace. BTINFO is the branch trace information for the
885 current thread. */
887 static void
888 ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
889 struct btrace_function *lhs,
890 struct btrace_function *rhs)
892 while (lhs != NULL && rhs != NULL)
894 struct btrace_function *prev, *next;
896 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
898 /* Connecting LHS and RHS may change the up link. */
899 prev = lhs;
900 next = rhs;
902 lhs = ftrace_get_caller (btinfo, lhs);
903 rhs = ftrace_get_caller (btinfo, rhs);
905 ftrace_connect_bfun (btinfo, prev, next);
909 /* Bridge the gap between two function segments left and right of a gap if their
910 respective back traces match in at least MIN_MATCHES functions. BTINFO is
911 the branch trace information for the current thread.
913 Returns non-zero if the gap could be bridged, zero otherwise. */
915 static int
916 ftrace_bridge_gap (struct btrace_thread_info *btinfo,
917 struct btrace_function *lhs, struct btrace_function *rhs,
918 int min_matches)
920 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
921 int best_matches;
923 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
924 rhs->insn_offset - 1, min_matches);
926 best_matches = 0;
927 best_l = NULL;
928 best_r = NULL;
930 /* We search the back traces of LHS and RHS for valid connections and connect
931 the two function segments that give the longest combined back trace. */
933 for (cand_l = lhs; cand_l != NULL;
934 cand_l = ftrace_get_caller (btinfo, cand_l))
935 for (cand_r = rhs; cand_r != NULL;
936 cand_r = ftrace_get_caller (btinfo, cand_r))
938 int matches;
940 matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
941 if (best_matches < matches)
943 best_matches = matches;
944 best_l = cand_l;
945 best_r = cand_r;
949 /* We need at least MIN_MATCHES matches. */
950 gdb_assert (min_matches > 0);
951 if (best_matches < min_matches)
952 return 0;
954 DEBUG_FTRACE ("..matches: %d", best_matches);
956 /* We will fix up the level of BEST_R and succeeding function segments such
957 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
959 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
960 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
962 To catch this, we already fix up the level here where we can start at RHS
963 instead of at BEST_R. We will ignore the level fixup when connecting
964 BEST_L to BEST_R as they will already be on the same level. */
965 ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
967 ftrace_connect_backtrace (btinfo, best_l, best_r);
969 return best_matches;
972 /* Try to bridge gaps due to overflow or decode errors by connecting the
973 function segments that are separated by the gap. */
975 static void
976 btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
978 struct btrace_thread_info *btinfo = &tp->btrace;
979 std::vector<unsigned int> remaining;
980 int min_matches;
982 DEBUG ("bridge gaps");
984 /* We require a minimum amount of matches for bridging a gap. The number of
985 required matches will be lowered with each iteration.
987 The more matches the higher our confidence that the bridging is correct.
988 For big gaps or small traces, however, it may not be feasible to require a
989 high number of matches. */
990 for (min_matches = 5; min_matches > 0; --min_matches)
992 /* Let's try to bridge as many gaps as we can. In some cases, we need to
993 skip a gap and revisit it again after we closed later gaps. */
994 while (!gaps.empty ())
996 for (const unsigned int number : gaps)
998 struct btrace_function *gap, *lhs, *rhs;
999 int bridged;
1001 gap = ftrace_find_call_by_number (btinfo, number);
1003 /* We may have a sequence of gaps if we run from one error into
1004 the next as we try to re-sync onto the trace stream. Ignore
1005 all but the leftmost gap in such a sequence.
1007 Also ignore gaps at the beginning of the trace. */
1008 lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
1009 if (lhs == NULL || lhs->errcode != 0)
1010 continue;
1012 /* Skip gaps to the right. */
1013 rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
1014 while (rhs != NULL && rhs->errcode != 0)
1015 rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
1017 /* Ignore gaps at the end of the trace. */
1018 if (rhs == NULL)
1019 continue;
1021 bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
1023 /* Keep track of gaps we were not able to bridge and try again.
1024 If we just pushed them to the end of GAPS we would risk an
1025 infinite loop in case we simply cannot bridge a gap. */
1026 if (bridged == 0)
1027 remaining.push_back (number);
1030 /* Let's see if we made any progress. */
1031 if (remaining.size () == gaps.size ())
1032 break;
1034 gaps.clear ();
1035 gaps.swap (remaining);
1038 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1039 if (gaps.empty ())
1040 break;
1042 remaining.clear ();
1045 /* We may omit this in some cases. Not sure it is worth the extra
1046 complication, though. */
1047 ftrace_compute_global_level_offset (btinfo);
1050 /* Compute the function branch trace from BTS trace. */
1052 static void
1053 btrace_compute_ftrace_bts (struct thread_info *tp,
1054 const struct btrace_data_bts *btrace,
1055 std::vector<unsigned int> &gaps)
1057 /* We may end up doing target calls that require the current thread to be TP,
1058 for example reading memory through gdb_insn_length. Make sure TP is the
1059 current thread. */
1060 scoped_restore_current_thread restore_thread;
1061 switch_to_thread (tp);
1063 struct btrace_thread_info *btinfo;
1064 unsigned int blk;
1065 int level;
1067 gdbarch *gdbarch = current_inferior ()->arch ();
1068 btinfo = &tp->btrace;
1069 blk = btrace->blocks->size ();
1071 if (btinfo->functions.empty ())
1072 level = INT_MAX;
1073 else
1074 level = -btinfo->level;
1076 while (blk != 0)
1078 CORE_ADDR pc;
1080 blk -= 1;
1082 const btrace_block &block = btrace->blocks->at (blk);
1083 pc = block.begin;
1085 for (;;)
1087 struct btrace_function *bfun;
1088 struct btrace_insn insn;
1089 int size;
1091 /* We should hit the end of the block. Warn if we went too far. */
1092 if (block.end < pc)
1094 /* Indicate the gap in the trace. */
1095 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
1097 warning (_("Recorded trace may be corrupted at instruction "
1098 "%u (pc = %s)."), bfun->insn_offset - 1,
1099 core_addr_to_string_nz (pc));
1101 break;
1104 bfun = ftrace_update_function (btinfo, pc);
1106 /* Maintain the function level offset.
1107 For all but the last block, we do it here. */
1108 if (blk != 0)
1109 level = std::min (level, bfun->level);
1111 size = 0;
1114 size = gdb_insn_length (gdbarch, pc);
1116 catch (const gdb_exception_error &error)
1120 insn.pc = pc;
1121 insn.size = size;
1122 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1123 insn.flags = 0;
1125 ftrace_update_insns (bfun, insn);
1127 /* We're done once we pushed the instruction at the end. */
1128 if (block.end == pc)
1129 break;
1131 /* We can't continue if we fail to compute the size. */
1132 if (size <= 0)
1134 /* Indicate the gap in the trace. We just added INSN so we're
1135 not at the beginning. */
1136 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
1138 warning (_("Recorded trace may be incomplete at instruction %u "
1139 "(pc = %s)."), bfun->insn_offset - 1,
1140 core_addr_to_string_nz (pc));
1142 break;
1145 pc += size;
1147 /* Maintain the function level offset.
1148 For the last block, we do it here to not consider the last
1149 instruction.
1150 Since the last instruction corresponds to the current instruction
1151 and is not really part of the execution history, it shouldn't
1152 affect the level. */
1153 if (blk == 0)
1154 level = std::min (level, bfun->level);
1158 /* LEVEL is the minimal function level of all btrace function segments.
1159 Define the global level offset to -LEVEL so all function levels are
1160 normalized to start at zero. */
1161 btinfo->level = -level;
1164 #if defined (HAVE_LIBIPT)
1166 static enum btrace_insn_class
1167 pt_reclassify_insn (enum pt_insn_class iclass)
1169 switch (iclass)
1171 case ptic_call:
1172 return BTRACE_INSN_CALL;
1174 case ptic_return:
1175 return BTRACE_INSN_RETURN;
1177 case ptic_jump:
1178 return BTRACE_INSN_JUMP;
1180 default:
1181 return BTRACE_INSN_OTHER;
1185 /* Return the btrace instruction flags for INSN. */
1187 static btrace_insn_flags
1188 pt_btrace_insn_flags (const struct pt_insn &insn)
1190 btrace_insn_flags flags = 0;
1192 if (insn.speculative)
1193 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1195 return flags;
1198 /* Return the btrace instruction for INSN. */
1200 static btrace_insn
1201 pt_btrace_insn (const struct pt_insn &insn)
1203 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1204 pt_reclassify_insn (insn.iclass),
1205 pt_btrace_insn_flags (insn)};
1208 #if defined (HAVE_PT_INSN_EVENT)
1209 /* Helper for events that will result in an aux_insn. */
1211 static void
1212 handle_pt_aux_insn (btrace_thread_info *btinfo, btrace_function *bfun,
1213 std::string &aux_str, CORE_ADDR ip)
1215 btinfo->aux_data.emplace_back (std::move (aux_str));
1216 bfun = ftrace_update_function (btinfo, ip);
1218 btrace_insn insn {btinfo->aux_data.size () - 1, 0,
1219 BTRACE_INSN_AUX, 0};
1221 ftrace_update_insns (bfun, insn);
1224 #endif /* defined (HAVE_PT_INSN_EVENT) */
1226 /* Handle instruction decode events (libipt-v2). */
1228 static int
1229 handle_pt_insn_events (struct btrace_thread_info *btinfo,
1230 struct pt_insn_decoder *decoder,
1231 std::vector<unsigned int> &gaps, int status)
1233 #if defined (HAVE_PT_INSN_EVENT)
1234 while (status & pts_event_pending)
1236 struct btrace_function *bfun;
1237 struct pt_event event;
1238 uint64_t offset;
1240 status = pt_insn_event (decoder, &event, sizeof (event));
1241 if (status < 0)
1242 break;
1244 switch (event.type)
1246 default:
1247 break;
1249 case ptev_enabled:
1250 if (event.status_update != 0)
1251 break;
1253 if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
1255 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1257 pt_insn_get_offset (decoder, &offset);
1259 warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
1260 PRIx64 ")."), bfun->insn_offset - 1, offset);
1263 break;
1265 case ptev_overflow:
1266 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1268 pt_insn_get_offset (decoder, &offset);
1270 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
1271 bfun->insn_offset - 1, offset);
1273 break;
1274 #if defined (HAVE_STRUCT_PT_EVENT_VARIANT_PTWRITE)
1275 case ptev_ptwrite:
1277 uint64_t pc = 0;
1278 std::optional<std::string> ptw_string;
1280 /* Lookup the PC if available. The event often doesn't provide
1281 one, so we look into the last function segment as well.
1282 Looking further back makes limited sense for ptwrite. */
1283 if (event.ip_suppressed == 0)
1284 pc = event.variant.ptwrite.ip;
1285 else if (!btinfo->functions.empty ())
1287 std::vector<btrace_insn> &insns
1288 = btinfo->functions.back ().insn;
1289 for (auto insn = insns.rbegin (); insn != insns.rend ();
1290 ++insn)
1292 switch (insn->iclass)
1294 case BTRACE_INSN_AUX:
1295 continue;
1297 case BTRACE_INSN_OTHER:
1298 case BTRACE_INSN_CALL:
1299 case BTRACE_INSN_RETURN:
1300 case BTRACE_INSN_JUMP:
1301 pc = insn->pc;
1302 break;
1303 /* No default to rely on compiler warnings. */
1305 break;
1309 if (pc == 0)
1310 warning (_("Failed to determine the PC for ptwrite."));
1312 if (btinfo->ptw_callback_fun != nullptr)
1313 ptw_string
1314 = btinfo->ptw_callback_fun (event.variant.ptwrite.payload,
1315 pc, btinfo->ptw_context);
1317 if (ptw_string.has_value () && (*ptw_string).empty ())
1318 continue;
1320 if (!ptw_string.has_value ())
1321 *ptw_string = hex_string (event.variant.ptwrite.payload);
1323 handle_pt_aux_insn (btinfo, bfun, *ptw_string, pc);
1325 break;
1327 #endif /* defined (HAVE_STRUCT_PT_EVENT_VARIANT_PTWRITE) */
1330 #endif /* defined (HAVE_PT_INSN_EVENT) */
1332 return status;
1335 /* Handle events indicated by flags in INSN (libipt-v1). */
1337 static void
1338 handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
1339 struct pt_insn_decoder *decoder,
1340 const struct pt_insn &insn,
1341 std::vector<unsigned int> &gaps)
1343 #if defined (HAVE_STRUCT_PT_INSN_ENABLED)
1344 /* Tracing is disabled and re-enabled each time we enter the kernel. Most
1345 times, we continue from the same instruction we stopped before. This is
1346 indicated via the RESUMED instruction flag. The ENABLED instruction flag
1347 means that we continued from some other instruction. Indicate this as a
1348 trace gap except when tracing just started. */
1349 if (insn.enabled && !btinfo->functions.empty ())
1351 struct btrace_function *bfun;
1352 uint64_t offset;
1354 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1356 pt_insn_get_offset (decoder, &offset);
1358 warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
1359 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
1360 insn.ip);
1362 #endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
1364 #if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
1365 /* Indicate trace overflows. */
1366 if (insn.resynced)
1368 struct btrace_function *bfun;
1369 uint64_t offset;
1371 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1373 pt_insn_get_offset (decoder, &offset);
1375 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
1376 PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
1378 #endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
1381 /* Add function branch trace to BTINFO using DECODER. */
1383 static void
1384 ftrace_add_pt (struct btrace_thread_info *btinfo,
1385 struct pt_insn_decoder *decoder,
1386 int *plevel,
1387 std::vector<unsigned int> &gaps)
1389 struct btrace_function *bfun;
1390 uint64_t offset;
1391 int status;
1393 /* Register the ptwrite filter. */
1394 apply_ext_lang_ptwrite_filter (btinfo);
1396 for (;;)
1398 struct pt_insn insn;
1400 status = pt_insn_sync_forward (decoder);
1401 if (status < 0)
1403 if (status != -pte_eos)
1404 warning (_("Failed to synchronize onto the Intel Processor "
1405 "Trace stream: %s."), pt_errstr (pt_errcode (status)));
1406 break;
1409 for (;;)
1411 /* Handle events from the previous iteration or synchronization. */
1412 status = handle_pt_insn_events (btinfo, decoder, gaps, status);
1413 if (status < 0)
1414 break;
1416 status = pt_insn_next (decoder, &insn, sizeof(insn));
1417 if (status < 0)
1418 break;
1420 /* Handle events indicated by flags in INSN. */
1421 handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
1423 bfun = ftrace_update_function (btinfo, insn.ip);
1425 /* Maintain the function level offset. */
1426 *plevel = std::min (*plevel, bfun->level);
1428 ftrace_update_insns (bfun, pt_btrace_insn (insn));
1431 if (status == -pte_eos)
1432 break;
1434 /* Indicate the gap in the trace. */
1435 bfun = ftrace_new_gap (btinfo, status, gaps);
1437 pt_insn_get_offset (decoder, &offset);
1439 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1440 ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
1441 offset, insn.ip, pt_errstr (pt_errcode (status)));
1445 /* A callback function to allow the trace decoder to read the inferior's
1446 memory. */
1448 static int
1449 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1450 const struct pt_asid *asid, uint64_t pc,
1451 void *context)
1453 int result, errcode;
1455 result = (int) size;
1458 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1459 if (errcode != 0)
1460 result = -pte_nomap;
1462 catch (const gdb_exception_error &error)
1464 result = -pte_nomap;
1467 return result;
1470 /* Translate the vendor from one enum to another. */
1472 static enum pt_cpu_vendor
1473 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1475 switch (vendor)
1477 default:
1478 return pcv_unknown;
1480 case CV_INTEL:
1481 return pcv_intel;
1485 /* Finalize the function branch trace after decode. */
1487 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1488 struct thread_info *tp, int level)
1490 pt_insn_free_decoder (decoder);
1492 /* LEVEL is the minimal function level of all btrace function segments.
1493 Define the global level offset to -LEVEL so all function levels are
1494 normalized to start at zero. */
1495 tp->btrace.level = -level;
1497 /* Add a single last instruction entry for the current PC.
1498 This allows us to compute the backtrace at the current PC using both
1499 standard unwind and btrace unwind.
1500 This extra entry is ignored by all record commands. */
1501 btrace_add_pc (tp);
1504 /* Compute the function branch trace from Intel Processor Trace
1505 format. */
1507 static void
1508 btrace_compute_ftrace_pt (struct thread_info *tp,
1509 const struct btrace_data_pt *btrace,
1510 std::vector<unsigned int> &gaps)
1512 /* We may end up doing target calls that require the current thread to be TP,
1513 for example reading memory through btrace_pt_readmem_callback. Make sure
1514 TP is the current thread. */
1515 scoped_restore_current_thread restore_thread;
1516 switch_to_thread (tp);
1518 struct btrace_thread_info *btinfo;
1519 struct pt_insn_decoder *decoder;
1520 struct pt_config config;
1521 int level, errcode;
1523 if (btrace->size == 0)
1524 return;
1526 btinfo = &tp->btrace;
1527 if (btinfo->functions.empty ())
1528 level = INT_MAX;
1529 else
1530 level = -btinfo->level;
1532 pt_config_init(&config);
1533 config.begin = btrace->data;
1534 config.end = btrace->data + btrace->size;
1536 /* We treat an unknown vendor as 'no errata'. */
1537 if (btrace->config.cpu.vendor != CV_UNKNOWN)
1539 config.cpu.vendor
1540 = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1541 config.cpu.family = btrace->config.cpu.family;
1542 config.cpu.model = btrace->config.cpu.model;
1543 config.cpu.stepping = btrace->config.cpu.stepping;
1545 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1546 if (errcode < 0)
1547 error (_("Failed to configure the Intel Processor Trace "
1548 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
1551 decoder = pt_insn_alloc_decoder (&config);
1552 if (decoder == NULL)
1553 error (_("Failed to allocate the Intel Processor Trace decoder."));
1557 struct pt_image *image;
1559 image = pt_insn_get_image(decoder);
1560 if (image == NULL)
1561 error (_("Failed to configure the Intel Processor Trace decoder."));
1563 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1564 if (errcode < 0)
1565 error (_("Failed to configure the Intel Processor Trace decoder: "
1566 "%s."), pt_errstr (pt_errcode (errcode)));
1568 ftrace_add_pt (btinfo, decoder, &level, gaps);
1570 catch (const gdb_exception &error)
1572 /* Indicate a gap in the trace if we quit trace processing. */
1573 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
1574 ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
1576 btrace_finalize_ftrace_pt (decoder, tp, level);
1578 throw;
1581 btrace_finalize_ftrace_pt (decoder, tp, level);
1584 #else /* defined (HAVE_LIBIPT) */
1586 static void
1587 btrace_compute_ftrace_pt (struct thread_info *tp,
1588 const struct btrace_data_pt *btrace,
1589 std::vector<unsigned int> &gaps)
1591 internal_error (_("Unexpected branch trace format."));
1594 #endif /* defined (HAVE_LIBIPT) */
1596 /* Compute the function branch trace from a block branch trace BTRACE for
1597 a thread given by BTINFO. If CPU is not NULL, overwrite the cpu in the
1598 branch trace configuration. This is currently only used for the PT
1599 format. */
1601 static void
1602 btrace_compute_ftrace_1 (struct thread_info *tp,
1603 struct btrace_data *btrace,
1604 const struct btrace_cpu *cpu,
1605 std::vector<unsigned int> &gaps)
1607 DEBUG ("compute ftrace");
1609 switch (btrace->format)
1611 case BTRACE_FORMAT_NONE:
1612 return;
1614 case BTRACE_FORMAT_BTS:
1615 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1616 return;
1618 case BTRACE_FORMAT_PT:
1619 /* Overwrite the cpu we use for enabling errata workarounds. */
1620 if (cpu != nullptr)
1621 btrace->variant.pt.config.cpu = *cpu;
1623 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1624 return;
1627 internal_error (_("Unknown branch trace format."));
1630 static void
1631 btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
1633 if (!gaps.empty ())
1635 tp->btrace.ngaps += gaps.size ();
1636 btrace_bridge_gaps (tp, gaps);
1640 static void
1641 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace,
1642 const struct btrace_cpu *cpu)
1644 std::vector<unsigned int> gaps;
1648 btrace_compute_ftrace_1 (tp, btrace, cpu, gaps);
1650 catch (const gdb_exception &error)
1652 btrace_finalize_ftrace (tp, gaps);
1654 throw;
1657 btrace_finalize_ftrace (tp, gaps);
1660 /* Add an entry for the current PC. */
1662 static void
1663 btrace_add_pc (struct thread_info *tp)
1665 struct btrace_data btrace;
1666 struct regcache *regcache;
1667 CORE_ADDR pc;
1669 regcache = get_thread_regcache (tp);
1670 pc = regcache_read_pc (regcache);
1672 btrace.format = BTRACE_FORMAT_BTS;
1673 btrace.variant.bts.blocks = new std::vector<btrace_block>;
1675 btrace.variant.bts.blocks->emplace_back (pc, pc);
1677 btrace_compute_ftrace (tp, &btrace, NULL);
1680 /* See btrace.h. */
1682 void
1683 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1685 if (tp->btrace.target != NULL)
1686 error (_("Recording already enabled on thread %s (%s)."),
1687 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1689 #if !defined (HAVE_LIBIPT)
1690 if (conf->format == BTRACE_FORMAT_PT)
1691 error (_("Intel Processor Trace support was disabled at compile time."));
1692 #endif /* !defined (HAVE_LIBIPT) */
1694 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1695 tp->ptid.to_string ().c_str ());
1697 tp->btrace.target = target_enable_btrace (tp, conf);
1699 if (tp->btrace.target == NULL)
1700 error (_("Failed to enable recording on thread %s (%s)."),
1701 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1703 /* We need to undo the enable in case of errors. */
1706 /* Add an entry for the current PC so we start tracing from where we
1707 enabled it.
1709 If we can't access TP's registers, TP is most likely running. In this
1710 case, we can't really say where tracing was enabled so it should be
1711 safe to simply skip this step.
1713 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1714 start at the PC at which tracing was enabled. */
1715 if (conf->format != BTRACE_FORMAT_PT
1716 && can_access_registers_thread (tp))
1717 btrace_add_pc (tp);
1719 catch (const gdb_exception &exception)
1721 btrace_disable (tp);
1723 throw;
1727 /* See btrace.h. */
1729 const struct btrace_config *
1730 btrace_conf (const struct btrace_thread_info *btinfo)
1732 if (btinfo->target == NULL)
1733 return NULL;
1735 return target_btrace_conf (btinfo->target);
1738 /* See btrace.h. */
1740 void
1741 btrace_disable (struct thread_info *tp)
1743 struct btrace_thread_info *btp = &tp->btrace;
1745 if (btp->target == NULL)
1746 error (_("Recording not enabled on thread %s (%s)."),
1747 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1749 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1750 tp->ptid.to_string ().c_str ());
1752 target_disable_btrace (btp->target);
1753 btp->target = NULL;
1755 btrace_clear (tp);
1758 /* See btrace.h. */
1760 void
1761 btrace_teardown (struct thread_info *tp)
1763 struct btrace_thread_info *btp = &tp->btrace;
1765 if (btp->target == NULL)
1766 return;
1768 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1769 tp->ptid.to_string ().c_str ());
1771 target_teardown_btrace (btp->target);
1772 btp->target = NULL;
1774 btrace_clear (tp);
1777 /* Stitch branch trace in BTS format. */
1779 static int
1780 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1782 struct btrace_thread_info *btinfo;
1783 struct btrace_function *last_bfun;
1784 btrace_block *first_new_block;
1786 btinfo = &tp->btrace;
1787 gdb_assert (!btinfo->functions.empty ());
1788 gdb_assert (!btrace->blocks->empty ());
1790 last_bfun = &btinfo->functions.back ();
1792 /* If the existing trace ends with a gap, we just glue the traces
1793 together. We need to drop the last (i.e. chronologically first) block
1794 of the new trace, though, since we can't fill in the start address.*/
1795 if (last_bfun->insn.empty ())
1797 btrace->blocks->pop_back ();
1798 return 0;
1801 /* Beware that block trace starts with the most recent block, so the
1802 chronologically first block in the new trace is the last block in
1803 the new trace's block vector. */
1804 first_new_block = &btrace->blocks->back ();
1805 const btrace_insn &last_insn = last_bfun->insn.back ();
1807 /* If the current PC at the end of the block is the same as in our current
1808 trace, there are two explanations:
1809 1. we executed the instruction and some branch brought us back.
1810 2. we have not made any progress.
1811 In the first case, the delta trace vector should contain at least two
1812 entries.
1813 In the second case, the delta trace vector should contain exactly one
1814 entry for the partial block containing the current PC. Remove it. */
1815 if (first_new_block->end == last_insn.pc && btrace->blocks->size () == 1)
1817 btrace->blocks->pop_back ();
1818 return 0;
1821 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
1822 core_addr_to_string_nz (first_new_block->end));
1824 /* Do a simple sanity check to make sure we don't accidentally end up
1825 with a bad block. This should not occur in practice. */
1826 if (first_new_block->end < last_insn.pc)
1828 warning (_("Error while trying to read delta trace. Falling back to "
1829 "a full read."));
1830 return -1;
1833 /* We adjust the last block to start at the end of our current trace. */
1834 gdb_assert (first_new_block->begin == 0);
1835 first_new_block->begin = last_insn.pc;
1837 /* We simply pop the last insn so we can insert it again as part of
1838 the normal branch trace computation.
1839 Since instruction iterators are based on indices in the instructions
1840 vector, we don't leave any pointers dangling. */
1841 DEBUG ("pruning insn at %s for stitching",
1842 ftrace_print_insn_addr (&last_insn));
1844 last_bfun->insn.pop_back ();
1846 /* The instructions vector may become empty temporarily if this has
1847 been the only instruction in this function segment.
1848 This violates the invariant but will be remedied shortly by
1849 btrace_compute_ftrace when we add the new trace. */
1851 /* The only case where this would hurt is if the entire trace consisted
1852 of just that one instruction. If we remove it, we might turn the now
1853 empty btrace function segment into a gap. But we don't want gaps at
1854 the beginning. To avoid this, we remove the entire old trace. */
1855 if (last_bfun->number == 1 && last_bfun->insn.empty ())
1856 btrace_clear (tp);
1858 return 0;
1861 /* Adjust the block trace in order to stitch old and new trace together.
1862 BTRACE is the new delta trace between the last and the current stop.
1863 TP is the traced thread.
1864 May modifx BTRACE as well as the existing trace in TP.
1865 Return 0 on success, -1 otherwise. */
1867 static int
1868 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1870 /* If we don't have trace, there's nothing to do. */
1871 if (btrace->empty ())
1872 return 0;
1874 switch (btrace->format)
1876 case BTRACE_FORMAT_NONE:
1877 return 0;
1879 case BTRACE_FORMAT_BTS:
1880 return btrace_stitch_bts (&btrace->variant.bts, tp);
1882 case BTRACE_FORMAT_PT:
1883 /* Delta reads are not supported. */
1884 return -1;
1887 internal_error (_("Unknown branch trace format."));
1890 /* Clear the branch trace histories in BTINFO. */
1892 static void
1893 btrace_clear_history (struct btrace_thread_info *btinfo)
1895 xfree (btinfo->insn_history);
1896 xfree (btinfo->call_history);
1897 xfree (btinfo->replay);
1899 btinfo->insn_history = NULL;
1900 btinfo->call_history = NULL;
1901 btinfo->replay = NULL;
1903 btinfo->aux_data.clear ();
1906 /* Clear the branch trace maintenance histories in BTINFO. */
1908 static void
1909 btrace_maint_clear (struct btrace_thread_info *btinfo)
1911 switch (btinfo->data.format)
1913 default:
1914 break;
1916 case BTRACE_FORMAT_BTS:
1917 btinfo->maint.variant.bts.packet_history.begin = 0;
1918 btinfo->maint.variant.bts.packet_history.end = 0;
1919 break;
1921 #if defined (HAVE_LIBIPT)
1922 case BTRACE_FORMAT_PT:
1923 delete btinfo->maint.variant.pt.packets;
1925 btinfo->maint.variant.pt.packets = NULL;
1926 btinfo->maint.variant.pt.packet_history.begin = 0;
1927 btinfo->maint.variant.pt.packet_history.end = 0;
1928 break;
1929 #endif /* defined (HAVE_LIBIPT) */
1933 /* See btrace.h. */
1935 const char *
1936 btrace_decode_error (enum btrace_format format, int errcode)
1938 switch (format)
1940 case BTRACE_FORMAT_BTS:
1941 switch (errcode)
1943 case BDE_BTS_OVERFLOW:
1944 return _("instruction overflow");
1946 case BDE_BTS_INSN_SIZE:
1947 return _("unknown instruction");
1949 default:
1950 break;
1952 break;
1954 #if defined (HAVE_LIBIPT)
1955 case BTRACE_FORMAT_PT:
1956 switch (errcode)
1958 case BDE_PT_USER_QUIT:
1959 return _("trace decode cancelled");
1961 case BDE_PT_DISABLED:
1962 return _("disabled");
1964 case BDE_PT_OVERFLOW:
1965 return _("overflow");
1967 default:
1968 if (errcode < 0)
1969 return pt_errstr (pt_errcode (errcode));
1970 break;
1972 break;
1973 #endif /* defined (HAVE_LIBIPT) */
1975 default:
1976 break;
1979 return _("unknown");
1982 /* See btrace.h. */
1984 void
1985 btrace_fetch (struct thread_info *tp, const struct btrace_cpu *cpu)
1987 struct btrace_thread_info *btinfo;
1988 struct btrace_target_info *tinfo;
1989 struct btrace_data btrace;
1990 int errcode;
1992 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1993 tp->ptid.to_string ().c_str ());
1995 btinfo = &tp->btrace;
1996 tinfo = btinfo->target;
1997 if (tinfo == NULL)
1998 return;
2000 /* There's no way we could get new trace while replaying.
2001 On the other hand, delta trace would return a partial record with the
2002 current PC, which is the replay PC, not the last PC, as expected. */
2003 if (btinfo->replay != NULL)
2004 return;
2006 /* With CLI usage, TP is always the current thread when we get here.
2007 However, since we can also store a gdb.Record object in Python
2008 referring to a different thread than the current one, we need to
2009 temporarily set the current thread. */
2010 scoped_restore_current_thread restore_thread;
2011 switch_to_thread (tp);
2013 /* We should not be called on running or exited threads. */
2014 gdb_assert (can_access_registers_thread (tp));
2016 /* Let's first try to extend the trace we already have. */
2017 if (!btinfo->functions.empty ())
2019 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
2020 if (errcode == 0)
2022 /* Success. Let's try to stitch the traces together. */
2023 errcode = btrace_stitch_trace (&btrace, tp);
2025 else
2027 /* We failed to read delta trace. Let's try to read new trace. */
2028 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
2030 /* If we got any new trace, discard what we have. */
2031 if (errcode == 0 && !btrace.empty ())
2032 btrace_clear (tp);
2035 /* If we were not able to read the trace, we start over. */
2036 if (errcode != 0)
2038 btrace_clear (tp);
2039 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
2042 else
2043 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
2045 /* If we were not able to read the branch trace, signal an error. */
2046 if (errcode != 0)
2047 error (_("Failed to read branch trace."));
2049 /* Compute the trace, provided we have any. */
2050 if (!btrace.empty ())
2052 /* Store the raw trace data. The stored data will be cleared in
2053 btrace_clear, so we always append the new trace. */
2054 btrace_data_append (&btinfo->data, &btrace);
2055 btrace_maint_clear (btinfo);
2057 btrace_clear_history (btinfo);
2058 btrace_compute_ftrace (tp, &btrace, cpu);
2062 /* See btrace.h. */
2064 void
2065 btrace_clear (struct thread_info *tp)
2067 struct btrace_thread_info *btinfo;
2069 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
2070 tp->ptid.to_string ().c_str ());
2072 /* Make sure btrace frames that may hold a pointer into the branch
2073 trace data are destroyed. */
2074 reinit_frame_cache ();
2076 btinfo = &tp->btrace;
2078 btinfo->functions.clear ();
2079 btinfo->ngaps = 0;
2081 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2082 btrace_maint_clear (btinfo);
2083 btinfo->data.clear ();
2084 btrace_clear_history (btinfo);
2087 /* See btrace.h. */
2089 void
2090 btrace_free_objfile (struct objfile *objfile)
2092 DEBUG ("free objfile");
2094 for (thread_info *tp : all_non_exited_threads ())
2095 btrace_clear (tp);
2098 /* See btrace.h. */
2100 const struct btrace_insn *
2101 btrace_insn_get (const struct btrace_insn_iterator *it)
2103 const struct btrace_function *bfun;
2104 unsigned int index, end;
2106 index = it->insn_index;
2107 bfun = &it->btinfo->functions[it->call_index];
2109 /* Check if the iterator points to a gap in the trace. */
2110 if (bfun->errcode != 0)
2111 return NULL;
2113 /* The index is within the bounds of this function's instruction vector. */
2114 end = bfun->insn.size ();
2115 gdb_assert (0 < end);
2116 gdb_assert (index < end);
2118 return &bfun->insn[index];
2121 /* See btrace.h. */
2124 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2126 return it->btinfo->functions[it->call_index].errcode;
2129 /* See btrace.h. */
2131 unsigned int
2132 btrace_insn_number (const struct btrace_insn_iterator *it)
2134 return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
2137 /* See btrace.h. */
2139 void
2140 btrace_insn_begin (struct btrace_insn_iterator *it,
2141 const struct btrace_thread_info *btinfo)
2143 if (btinfo->functions.empty ())
2144 error (_("No trace."));
2146 it->btinfo = btinfo;
2147 it->call_index = 0;
2148 it->insn_index = 0;
2151 /* See btrace.h. */
2153 void
2154 btrace_insn_end (struct btrace_insn_iterator *it,
2155 const struct btrace_thread_info *btinfo)
2157 const struct btrace_function *bfun;
2158 unsigned int length;
2160 if (btinfo->functions.empty ())
2161 error (_("No trace."));
2163 bfun = &btinfo->functions.back ();
2164 length = bfun->insn.size ();
2166 /* The last function may either be a gap or it contains the current
2167 instruction, which is one past the end of the execution trace; ignore
2168 it. */
2169 if (length > 0)
2170 length -= 1;
2172 it->btinfo = btinfo;
2173 it->call_index = bfun->number - 1;
2174 it->insn_index = length;
2177 /* See btrace.h. */
2179 unsigned int
2180 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2182 const struct btrace_function *bfun;
2183 unsigned int index, steps;
2185 bfun = &it->btinfo->functions[it->call_index];
2186 steps = 0;
2187 index = it->insn_index;
2189 while (stride != 0)
2191 unsigned int end, space, adv;
2193 end = bfun->insn.size ();
2195 /* An empty function segment represents a gap in the trace. We count
2196 it as one instruction. */
2197 if (end == 0)
2199 const struct btrace_function *next;
2201 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2202 if (next == NULL)
2203 break;
2205 stride -= 1;
2206 steps += 1;
2208 bfun = next;
2209 index = 0;
2211 continue;
2214 gdb_assert (0 < end);
2215 gdb_assert (index < end);
2217 /* Compute the number of instructions remaining in this segment. */
2218 space = end - index;
2220 /* Advance the iterator as far as possible within this segment. */
2221 adv = std::min (space, stride);
2222 stride -= adv;
2223 index += adv;
2224 steps += adv;
2226 /* Move to the next function if we're at the end of this one. */
2227 if (index == end)
2229 const struct btrace_function *next;
2231 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2232 if (next == NULL)
2234 /* We stepped past the last function.
2236 Let's adjust the index to point to the last instruction in
2237 the previous function. */
2238 index -= 1;
2239 steps -= 1;
2240 break;
2243 /* We now point to the first instruction in the new function. */
2244 bfun = next;
2245 index = 0;
2248 /* We did make progress. */
2249 gdb_assert (adv > 0);
2252 /* Update the iterator. */
2253 it->call_index = bfun->number - 1;
2254 it->insn_index = index;
2256 return steps;
2259 /* See btrace.h. */
2261 unsigned int
2262 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2264 const struct btrace_function *bfun;
2265 unsigned int index, steps;
2267 bfun = &it->btinfo->functions[it->call_index];
2268 steps = 0;
2269 index = it->insn_index;
2271 while (stride != 0)
2273 unsigned int adv;
2275 /* Move to the previous function if we're at the start of this one. */
2276 if (index == 0)
2278 const struct btrace_function *prev;
2280 prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
2281 if (prev == NULL)
2282 break;
2284 /* We point to one after the last instruction in the new function. */
2285 bfun = prev;
2286 index = bfun->insn.size ();
2288 /* An empty function segment represents a gap in the trace. We count
2289 it as one instruction. */
2290 if (index == 0)
2292 stride -= 1;
2293 steps += 1;
2295 continue;
2299 /* Advance the iterator as far as possible within this segment. */
2300 adv = std::min (index, stride);
2302 stride -= adv;
2303 index -= adv;
2304 steps += adv;
2306 /* We did make progress. */
2307 gdb_assert (adv > 0);
2310 /* Update the iterator. */
2311 it->call_index = bfun->number - 1;
2312 it->insn_index = index;
2314 return steps;
2317 /* See btrace.h. */
2320 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2321 const struct btrace_insn_iterator *rhs)
2323 gdb_assert (lhs->btinfo == rhs->btinfo);
2325 if (lhs->call_index != rhs->call_index)
2326 return lhs->call_index - rhs->call_index;
2328 return lhs->insn_index - rhs->insn_index;
2331 /* See btrace.h. */
2334 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2335 const struct btrace_thread_info *btinfo,
2336 unsigned int number)
2338 const struct btrace_function *bfun;
2339 unsigned int upper, lower;
2341 if (btinfo->functions.empty ())
2342 return 0;
2344 lower = 0;
2345 bfun = &btinfo->functions[lower];
2346 if (number < bfun->insn_offset)
2347 return 0;
2349 upper = btinfo->functions.size () - 1;
2350 bfun = &btinfo->functions[upper];
2351 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2352 return 0;
2354 /* We assume that there are no holes in the numbering. */
2355 for (;;)
2357 const unsigned int average = lower + (upper - lower) / 2;
2359 bfun = &btinfo->functions[average];
2361 if (number < bfun->insn_offset)
2363 upper = average - 1;
2364 continue;
2367 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2369 lower = average + 1;
2370 continue;
2373 break;
2376 it->btinfo = btinfo;
2377 it->call_index = bfun->number - 1;
2378 it->insn_index = number - bfun->insn_offset;
2379 return 1;
2382 /* Returns true if the recording ends with a function segment that
2383 contains only a single (i.e. the current) instruction. */
2385 static bool
2386 btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2388 const btrace_function *bfun;
2390 if (btinfo->functions.empty ())
2391 return false;
2393 bfun = &btinfo->functions.back ();
2394 if (bfun->errcode != 0)
2395 return false;
2397 return ftrace_call_num_insn (bfun) == 1;
2400 /* See btrace.h. */
2402 const struct btrace_function *
2403 btrace_call_get (const struct btrace_call_iterator *it)
2405 if (it->index >= it->btinfo->functions.size ())
2406 return NULL;
2408 return &it->btinfo->functions[it->index];
2411 /* See btrace.h. */
2413 unsigned int
2414 btrace_call_number (const struct btrace_call_iterator *it)
2416 const unsigned int length = it->btinfo->functions.size ();
2418 /* If the last function segment contains only a single instruction (i.e. the
2419 current instruction), skip it. */
2420 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2421 return length;
2423 return it->index + 1;
2426 /* See btrace.h. */
2428 void
2429 btrace_call_begin (struct btrace_call_iterator *it,
2430 const struct btrace_thread_info *btinfo)
2432 if (btinfo->functions.empty ())
2433 error (_("No trace."));
2435 it->btinfo = btinfo;
2436 it->index = 0;
2439 /* See btrace.h. */
2441 void
2442 btrace_call_end (struct btrace_call_iterator *it,
2443 const struct btrace_thread_info *btinfo)
2445 if (btinfo->functions.empty ())
2446 error (_("No trace."));
2448 it->btinfo = btinfo;
2449 it->index = btinfo->functions.size ();
2452 /* See btrace.h. */
2454 unsigned int
2455 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2457 const unsigned int length = it->btinfo->functions.size ();
2459 if (it->index + stride < length - 1)
2460 /* Default case: Simply advance the iterator. */
2461 it->index += stride;
2462 else if (it->index + stride == length - 1)
2464 /* We land exactly at the last function segment. If it contains only one
2465 instruction (i.e. the current instruction) it is not actually part of
2466 the trace. */
2467 if (btrace_ends_with_single_insn (it->btinfo))
2468 it->index = length;
2469 else
2470 it->index = length - 1;
2472 else
2474 /* We land past the last function segment and have to adjust the stride.
2475 If the last function segment contains only one instruction (i.e. the
2476 current instruction) it is not actually part of the trace. */
2477 if (btrace_ends_with_single_insn (it->btinfo))
2478 stride = length - it->index - 1;
2479 else
2480 stride = length - it->index;
2482 it->index = length;
2485 return stride;
2488 /* See btrace.h. */
2490 unsigned int
2491 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2493 const unsigned int length = it->btinfo->functions.size ();
2494 int steps = 0;
2496 gdb_assert (it->index <= length);
2498 if (stride == 0 || it->index == 0)
2499 return 0;
2501 /* If we are at the end, the first step is a special case. If the last
2502 function segment contains only one instruction (i.e. the current
2503 instruction) it is not actually part of the trace. To be able to step
2504 over this instruction, we need at least one more function segment. */
2505 if ((it->index == length) && (length > 1))
2507 if (btrace_ends_with_single_insn (it->btinfo))
2508 it->index = length - 2;
2509 else
2510 it->index = length - 1;
2512 steps = 1;
2513 stride -= 1;
2516 stride = std::min (stride, it->index);
2518 it->index -= stride;
2519 return steps + stride;
2522 /* See btrace.h. */
2525 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2526 const struct btrace_call_iterator *rhs)
2528 gdb_assert (lhs->btinfo == rhs->btinfo);
2529 return (int) (lhs->index - rhs->index);
2532 /* See btrace.h. */
2535 btrace_find_call_by_number (struct btrace_call_iterator *it,
2536 const struct btrace_thread_info *btinfo,
2537 unsigned int number)
2539 const unsigned int length = btinfo->functions.size ();
2541 if ((number == 0) || (number > length))
2542 return 0;
2544 it->btinfo = btinfo;
2545 it->index = number - 1;
2546 return 1;
2549 /* See btrace.h. */
2551 void
2552 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2553 const struct btrace_insn_iterator *begin,
2554 const struct btrace_insn_iterator *end)
2556 if (btinfo->insn_history == NULL)
2557 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2559 btinfo->insn_history->begin = *begin;
2560 btinfo->insn_history->end = *end;
2563 /* See btrace.h. */
2565 void
2566 btrace_set_call_history (struct btrace_thread_info *btinfo,
2567 const struct btrace_call_iterator *begin,
2568 const struct btrace_call_iterator *end)
2570 gdb_assert (begin->btinfo == end->btinfo);
2572 if (btinfo->call_history == NULL)
2573 btinfo->call_history = XCNEW (struct btrace_call_history);
2575 btinfo->call_history->begin = *begin;
2576 btinfo->call_history->end = *end;
2579 /* See btrace.h. */
2582 btrace_is_replaying (struct thread_info *tp)
2584 return tp->btrace.replay != NULL;
2587 /* See btrace.h. */
2590 btrace_is_empty (struct thread_info *tp)
2592 struct btrace_insn_iterator begin, end;
2593 struct btrace_thread_info *btinfo;
2595 btinfo = &tp->btrace;
2597 if (btinfo->functions.empty ())
2598 return 1;
2600 btrace_insn_begin (&begin, btinfo);
2601 btrace_insn_end (&end, btinfo);
2603 return btrace_insn_cmp (&begin, &end) == 0;
2606 #if defined (HAVE_LIBIPT)
2608 /* Print a single packet. */
2610 static void
2611 pt_print_packet (const struct pt_packet *packet)
2613 switch (packet->type)
2615 default:
2616 gdb_printf (("[??: %x]"), packet->type);
2617 break;
2619 case ppt_psb:
2620 gdb_printf (("psb"));
2621 break;
2623 case ppt_psbend:
2624 gdb_printf (("psbend"));
2625 break;
2627 case ppt_pad:
2628 gdb_printf (("pad"));
2629 break;
2631 case ppt_tip:
2632 gdb_printf (("tip %u: 0x%" PRIx64 ""),
2633 packet->payload.ip.ipc,
2634 packet->payload.ip.ip);
2635 break;
2637 case ppt_tip_pge:
2638 gdb_printf (("tip.pge %u: 0x%" PRIx64 ""),
2639 packet->payload.ip.ipc,
2640 packet->payload.ip.ip);
2641 break;
2643 case ppt_tip_pgd:
2644 gdb_printf (("tip.pgd %u: 0x%" PRIx64 ""),
2645 packet->payload.ip.ipc,
2646 packet->payload.ip.ip);
2647 break;
2649 case ppt_fup:
2650 gdb_printf (("fup %u: 0x%" PRIx64 ""),
2651 packet->payload.ip.ipc,
2652 packet->payload.ip.ip);
2653 break;
2655 case ppt_tnt_8:
2656 gdb_printf (("tnt-8 %u: 0x%" PRIx64 ""),
2657 packet->payload.tnt.bit_size,
2658 packet->payload.tnt.payload);
2659 break;
2661 case ppt_tnt_64:
2662 gdb_printf (("tnt-64 %u: 0x%" PRIx64 ""),
2663 packet->payload.tnt.bit_size,
2664 packet->payload.tnt.payload);
2665 break;
2667 case ppt_pip:
2668 gdb_printf (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2669 packet->payload.pip.nr ? (" nr") : (""));
2670 break;
2672 case ppt_tsc:
2673 gdb_printf (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2674 break;
2676 case ppt_cbr:
2677 gdb_printf (("cbr %u"), packet->payload.cbr.ratio);
2678 break;
2680 case ppt_mode:
2681 switch (packet->payload.mode.leaf)
2683 default:
2684 gdb_printf (("mode %u"), packet->payload.mode.leaf);
2685 break;
2687 case pt_mol_exec:
2688 gdb_printf (("mode.exec%s%s"),
2689 packet->payload.mode.bits.exec.csl
2690 ? (" cs.l") : (""),
2691 packet->payload.mode.bits.exec.csd
2692 ? (" cs.d") : (""));
2693 break;
2695 case pt_mol_tsx:
2696 gdb_printf (("mode.tsx%s%s"),
2697 packet->payload.mode.bits.tsx.intx
2698 ? (" intx") : (""),
2699 packet->payload.mode.bits.tsx.abrt
2700 ? (" abrt") : (""));
2701 break;
2703 break;
2705 case ppt_ovf:
2706 gdb_printf (("ovf"));
2707 break;
2709 case ppt_stop:
2710 gdb_printf (("stop"));
2711 break;
2713 case ppt_vmcs:
2714 gdb_printf (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2715 break;
2717 case ppt_tma:
2718 gdb_printf (("tma %x %x"), packet->payload.tma.ctc,
2719 packet->payload.tma.fc);
2720 break;
2722 case ppt_mtc:
2723 gdb_printf (("mtc %x"), packet->payload.mtc.ctc);
2724 break;
2726 case ppt_cyc:
2727 gdb_printf (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2728 break;
2730 case ppt_mnt:
2731 gdb_printf (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2732 break;
2734 #if (LIBIPT_VERSION >= 0x200)
2735 case ppt_ptw:
2736 gdb_printf (("ptw %u: 0x%" PRIx64 "%s"), packet->payload.ptw.plc,
2737 packet->payload.ptw.payload,
2738 packet->payload.ptw.ip ? (" ip") : (""));
2739 break;
2740 #endif /* defined (LIBIPT_VERSION >= 0x200) */
2744 /* Decode packets into MAINT using DECODER. */
2746 static void
2747 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2748 struct pt_packet_decoder *decoder)
2750 int errcode;
2752 if (maint->variant.pt.packets == NULL)
2753 maint->variant.pt.packets = new std::vector<btrace_pt_packet>;
2755 for (;;)
2757 struct btrace_pt_packet packet;
2759 errcode = pt_pkt_sync_forward (decoder);
2760 if (errcode < 0)
2761 break;
2763 for (;;)
2765 pt_pkt_get_offset (decoder, &packet.offset);
2767 errcode = pt_pkt_next (decoder, &packet.packet,
2768 sizeof(packet.packet));
2769 if (errcode < 0)
2770 break;
2772 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2774 packet.errcode = pt_errcode (errcode);
2775 maint->variant.pt.packets->push_back (packet);
2779 if (errcode == -pte_eos)
2780 break;
2782 packet.errcode = pt_errcode (errcode);
2783 maint->variant.pt.packets->push_back (packet);
2785 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2786 packet.offset, pt_errstr (packet.errcode));
2789 if (errcode != -pte_eos)
2790 warning (_("Failed to synchronize onto the Intel Processor Trace "
2791 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2794 /* Update the packet history in BTINFO. */
2796 static void
2797 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2799 struct pt_packet_decoder *decoder;
2800 const struct btrace_cpu *cpu;
2801 struct btrace_data_pt *pt;
2802 struct pt_config config;
2803 int errcode;
2805 pt = &btinfo->data.variant.pt;
2807 /* Nothing to do if there is no trace. */
2808 if (pt->size == 0)
2809 return;
2811 memset (&config, 0, sizeof(config));
2813 config.size = sizeof (config);
2814 config.begin = pt->data;
2815 config.end = pt->data + pt->size;
2817 cpu = record_btrace_get_cpu ();
2818 if (cpu == nullptr)
2819 cpu = &pt->config.cpu;
2821 /* We treat an unknown vendor as 'no errata'. */
2822 if (cpu->vendor != CV_UNKNOWN)
2824 config.cpu.vendor = pt_translate_cpu_vendor (cpu->vendor);
2825 config.cpu.family = cpu->family;
2826 config.cpu.model = cpu->model;
2827 config.cpu.stepping = cpu->stepping;
2829 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2830 if (errcode < 0)
2831 error (_("Failed to configure the Intel Processor Trace "
2832 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
2835 decoder = pt_pkt_alloc_decoder (&config);
2836 if (decoder == NULL)
2837 error (_("Failed to allocate the Intel Processor Trace decoder."));
2841 btrace_maint_decode_pt (&btinfo->maint, decoder);
2843 catch (const gdb_exception &except)
2845 pt_pkt_free_decoder (decoder);
2847 if (except.reason < 0)
2848 throw;
2851 pt_pkt_free_decoder (decoder);
2854 #endif /* !defined (HAVE_LIBIPT) */
2856 /* Update the packet maintenance information for BTINFO and store the
2857 low and high bounds into BEGIN and END, respectively.
2858 Store the current iterator state into FROM and TO. */
2860 static void
2861 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2862 unsigned int *begin, unsigned int *end,
2863 unsigned int *from, unsigned int *to)
2865 switch (btinfo->data.format)
2867 default:
2868 *begin = 0;
2869 *end = 0;
2870 *from = 0;
2871 *to = 0;
2872 break;
2874 case BTRACE_FORMAT_BTS:
2875 /* Nothing to do - we operate directly on BTINFO->DATA. */
2876 *begin = 0;
2877 *end = btinfo->data.variant.bts.blocks->size ();
2878 *from = btinfo->maint.variant.bts.packet_history.begin;
2879 *to = btinfo->maint.variant.bts.packet_history.end;
2880 break;
2882 #if defined (HAVE_LIBIPT)
2883 case BTRACE_FORMAT_PT:
2884 if (btinfo->maint.variant.pt.packets == nullptr)
2885 btinfo->maint.variant.pt.packets = new std::vector<btrace_pt_packet>;
2887 if (btinfo->maint.variant.pt.packets->empty ())
2888 btrace_maint_update_pt_packets (btinfo);
2890 *begin = 0;
2891 *end = btinfo->maint.variant.pt.packets->size ();
2892 *from = btinfo->maint.variant.pt.packet_history.begin;
2893 *to = btinfo->maint.variant.pt.packet_history.end;
2894 break;
2895 #endif /* defined (HAVE_LIBIPT) */
2899 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2900 update the current iterator position. */
2902 static void
2903 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2904 unsigned int begin, unsigned int end)
2906 switch (btinfo->data.format)
2908 default:
2909 break;
2911 case BTRACE_FORMAT_BTS:
2913 const std::vector<btrace_block> &blocks
2914 = *btinfo->data.variant.bts.blocks;
2915 unsigned int blk;
2917 for (blk = begin; blk < end; ++blk)
2919 const btrace_block &block = blocks.at (blk);
2921 gdb_printf ("%u\tbegin: %s, end: %s\n", blk,
2922 core_addr_to_string_nz (block.begin),
2923 core_addr_to_string_nz (block.end));
2926 btinfo->maint.variant.bts.packet_history.begin = begin;
2927 btinfo->maint.variant.bts.packet_history.end = end;
2929 break;
2931 #if defined (HAVE_LIBIPT)
2932 case BTRACE_FORMAT_PT:
2934 const std::vector<btrace_pt_packet> &packets
2935 = *btinfo->maint.variant.pt.packets;
2936 unsigned int pkt;
2938 for (pkt = begin; pkt < end; ++pkt)
2940 const struct btrace_pt_packet &packet = packets.at (pkt);
2942 gdb_printf ("%u\t", pkt);
2943 gdb_printf ("0x%" PRIx64 "\t", packet.offset);
2945 if (packet.errcode == pte_ok)
2946 pt_print_packet (&packet.packet);
2947 else
2948 gdb_printf ("[error: %s]", pt_errstr (packet.errcode));
2950 gdb_printf ("\n");
2953 btinfo->maint.variant.pt.packet_history.begin = begin;
2954 btinfo->maint.variant.pt.packet_history.end = end;
2956 break;
2957 #endif /* defined (HAVE_LIBIPT) */
2961 /* Read a number from an argument string. */
2963 static unsigned int
2964 get_uint (const char **arg)
2966 const char *begin, *pos;
2967 char *end;
2968 unsigned long number;
2970 begin = *arg;
2971 pos = skip_spaces (begin);
2973 if (!isdigit (*pos))
2974 error (_("Expected positive number, got: %s."), pos);
2976 number = strtoul (pos, &end, 10);
2977 if (number > UINT_MAX)
2978 error (_("Number too big."));
2980 *arg += (end - begin);
2982 return (unsigned int) number;
2985 /* Read a context size from an argument string. */
2987 static int
2988 get_context_size (const char **arg)
2990 const char *pos = skip_spaces (*arg);
2992 if (!isdigit (*pos))
2993 error (_("Expected positive number, got: %s."), pos);
2995 char *end;
2996 long result = strtol (pos, &end, 10);
2997 *arg = end;
2998 return result;
3001 /* Complain about junk at the end of an argument string. */
3003 static void
3004 no_chunk (const char *arg)
3006 if (*arg != 0)
3007 error (_("Junk after argument: %s."), arg);
3010 /* The "maintenance btrace packet-history" command. */
3012 static void
3013 maint_btrace_packet_history_cmd (const char *arg, int from_tty)
3015 struct btrace_thread_info *btinfo;
3016 unsigned int size, begin, end, from, to;
3018 thread_info *tp = current_inferior ()->find_thread (inferior_ptid);
3019 if (tp == NULL)
3020 error (_("No thread."));
3022 size = 10;
3023 btinfo = &tp->btrace;
3025 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3026 if (begin == end)
3028 gdb_printf (_("No trace.\n"));
3029 return;
3032 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3034 from = to;
3036 if (end - from < size)
3037 size = end - from;
3038 to = from + size;
3040 else if (strcmp (arg, "-") == 0)
3042 to = from;
3044 if (to - begin < size)
3045 size = to - begin;
3046 from = to - size;
3048 else
3050 from = get_uint (&arg);
3051 if (end <= from)
3052 error (_("'%u' is out of range."), from);
3054 arg = skip_spaces (arg);
3055 if (*arg == ',')
3057 arg = skip_spaces (++arg);
3059 if (*arg == '+')
3061 arg += 1;
3062 size = get_context_size (&arg);
3064 no_chunk (arg);
3066 if (end - from < size)
3067 size = end - from;
3068 to = from + size;
3070 else if (*arg == '-')
3072 arg += 1;
3073 size = get_context_size (&arg);
3075 no_chunk (arg);
3077 /* Include the packet given as first argument. */
3078 from += 1;
3079 to = from;
3081 if (to - begin < size)
3082 size = to - begin;
3083 from = to - size;
3085 else
3087 to = get_uint (&arg);
3089 /* Include the packet at the second argument and silently
3090 truncate the range. */
3091 if (to < end)
3092 to += 1;
3093 else
3094 to = end;
3096 no_chunk (arg);
3099 else
3101 no_chunk (arg);
3103 if (end - from < size)
3104 size = end - from;
3105 to = from + size;
3108 dont_repeat ();
3111 btrace_maint_print_packets (btinfo, from, to);
3114 /* The "maintenance btrace clear-packet-history" command. */
3116 static void
3117 maint_btrace_clear_packet_history_cmd (const char *args, int from_tty)
3119 if (args != NULL && *args != 0)
3120 error (_("Invalid argument."));
3122 if (inferior_ptid == null_ptid)
3123 error (_("No thread."));
3125 thread_info *tp = inferior_thread ();
3126 btrace_thread_info *btinfo = &tp->btrace;
3128 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3129 btrace_maint_clear (btinfo);
3130 btinfo->data.clear ();
3133 /* The "maintenance btrace clear" command. */
3135 static void
3136 maint_btrace_clear_cmd (const char *args, int from_tty)
3138 if (args != NULL && *args != 0)
3139 error (_("Invalid argument."));
3141 if (inferior_ptid == null_ptid)
3142 error (_("No thread."));
3144 thread_info *tp = inferior_thread ();
3145 btrace_clear (tp);
3148 /* The "maintenance info btrace" command. */
3150 static void
3151 maint_info_btrace_cmd (const char *args, int from_tty)
3153 struct btrace_thread_info *btinfo;
3154 const struct btrace_config *conf;
3156 if (args != NULL && *args != 0)
3157 error (_("Invalid argument."));
3159 if (inferior_ptid == null_ptid)
3160 error (_("No thread."));
3162 thread_info *tp = inferior_thread ();
3164 btinfo = &tp->btrace;
3166 conf = btrace_conf (btinfo);
3167 if (conf == NULL)
3168 error (_("No btrace configuration."));
3170 gdb_printf (_("Format: %s.\n"),
3171 btrace_format_string (conf->format));
3173 switch (conf->format)
3175 default:
3176 break;
3178 case BTRACE_FORMAT_BTS:
3179 gdb_printf (_("Number of packets: %zu.\n"),
3180 btinfo->data.variant.bts.blocks->size ());
3181 break;
3183 #if defined (HAVE_LIBIPT)
3184 case BTRACE_FORMAT_PT:
3186 struct pt_version version;
3188 version = pt_library_version ();
3189 gdb_printf (_("Version: %u.%u.%u%s.\n"), version.major,
3190 version.minor, version.build,
3191 version.ext != NULL ? version.ext : "");
3193 btrace_maint_update_pt_packets (btinfo);
3194 gdb_printf (_("Number of packets: %zu.\n"),
3195 ((btinfo->maint.variant.pt.packets == nullptr)
3196 ? 0 : btinfo->maint.variant.pt.packets->size ()));
3198 break;
3199 #endif /* defined (HAVE_LIBIPT) */
3203 /* The "maint show btrace pt skip-pad" show value function. */
3205 static void
3206 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3207 struct cmd_list_element *c,
3208 const char *value)
3210 gdb_printf (file, _("Skip PAD packets is %s.\n"), value);
3214 /* Initialize btrace maintenance commands. */
3216 void _initialize_btrace ();
3217 void
3218 _initialize_btrace ()
3220 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3221 _("Info about branch tracing data."), &maintenanceinfolist);
3223 add_basic_prefix_cmd ("btrace", class_maintenance,
3224 _("Branch tracing maintenance commands."),
3225 &maint_btrace_cmdlist, 0, &maintenancelist);
3227 add_setshow_prefix_cmd ("btrace", class_maintenance,
3228 _("Set branch tracing specific variables."),
3229 _("Show branch tracing specific variables."),
3230 &maint_btrace_set_cmdlist,
3231 &maint_btrace_show_cmdlist,
3232 &maintenance_set_cmdlist,
3233 &maintenance_show_cmdlist);
3235 add_setshow_prefix_cmd ("pt", class_maintenance,
3236 _("Set Intel Processor Trace specific variables."),
3237 _("Show Intel Processor Trace specific variables."),
3238 &maint_btrace_pt_set_cmdlist,
3239 &maint_btrace_pt_show_cmdlist,
3240 &maint_btrace_set_cmdlist,
3241 &maint_btrace_show_cmdlist);
3243 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3244 &maint_btrace_pt_skip_pad, _("\
3245 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3246 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3247 When enabled, PAD packets are ignored in the btrace packet history."),
3248 NULL, show_maint_btrace_pt_skip_pad,
3249 &maint_btrace_pt_set_cmdlist,
3250 &maint_btrace_pt_show_cmdlist);
3252 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3253 _("Print the raw branch tracing data.\n\
3254 With no argument, print ten more packets after the previous ten-line print.\n\
3255 With '-' as argument print ten packets before a previous ten-line print.\n\
3256 One argument specifies the starting packet of a ten-line print.\n\
3257 Two arguments with comma between specify starting and ending packets to \
3258 print.\n\
3259 Preceded with '+'/'-' the second argument specifies the distance from the \
3260 first."),
3261 &maint_btrace_cmdlist);
3263 add_cmd ("clear-packet-history", class_maintenance,
3264 maint_btrace_clear_packet_history_cmd,
3265 _("Clears the branch tracing packet history.\n\
3266 Discards the raw branch tracing data but not the execution history data."),
3267 &maint_btrace_cmdlist);
3269 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3270 _("Clears the branch tracing data.\n\
3271 Discards the raw branch tracing data and the execution history data.\n\
3272 The next 'record' command will fetch the branch tracing data anew."),
3273 &maint_btrace_cmdlist);