1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common/common-defs.h"
23 #include "linux-btrace.h"
24 #include "common/common-regcache.h"
25 #include "common/gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "common/filestuff.h"
28 #include "common/scoped_fd.h"
29 #include "common/scoped_mmap.h"
33 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
43 /* A branch trace record in perf_event. */
46 /* The linear address of the branch source. */
49 /* The linear address of the branch destination. */
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header
;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts
;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu
;
68 unsigned int eax
, ebx
, ecx
, edx
;
71 memset (&cpu
, 0, sizeof (cpu
));
73 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
76 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
77 && edx
== signature_INTEL_edx
)
79 unsigned int cpuid
, ignore
;
81 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
84 cpu
.vendor
= CV_INTEL
;
86 cpu
.family
= (cpuid
>> 8) & 0xf;
87 cpu
.model
= (cpuid
>> 4) & 0xf;
89 if (cpu
.family
== 0x6)
90 cpu
.model
+= (cpuid
>> 12) & 0xf0;
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
101 perf_event_new_data (const struct perf_event_buffer
*pev
)
103 return *pev
->data_head
!= pev
->last_head
;
106 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
111 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
114 const gdb_byte
*begin
, *end
, *start
, *stop
;
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size
= pev
->size
;
124 gdb_assert (size
<= buffer_size
);
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head
< size
)
135 data_head
+= buffer_size
;
137 gdb_assert (size
<= data_head
);
138 data_tail
= data_head
- size
;
141 start
= begin
+ data_tail
% buffer_size
;
142 stop
= begin
+ data_head
% buffer_size
;
144 buffer
= (gdb_byte
*) xmalloc (size
);
147 memcpy (buffer
, start
, stop
- start
);
150 end
= begin
+ buffer_size
;
152 memcpy (buffer
, start
, end
- start
);
153 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
159 /* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
163 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
169 data_head
= *pev
->data_head
;
172 *data
= perf_event_read (pev
, data_head
, size
);
175 pev
->last_head
= data_head
;
178 /* Try to determine the start address of the Linux kernel. */
181 linux_determine_kernel_start (void)
183 static uint64_t kernel_start
;
191 gdb_file_up file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
195 while (!feof (file
.get ()))
197 char buffer
[1024], symbol
[8], *line
;
201 line
= fgets (buffer
, sizeof (buffer
), file
.get ());
205 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
209 if (strcmp (symbol
, "_text") == 0)
219 /* Check whether an address is in the kernel. */
222 perf_event_is_kernel_addr (uint64_t addr
)
224 uint64_t kernel_start
;
226 kernel_start
= linux_determine_kernel_start ();
227 if (kernel_start
!= 0ull)
228 return (addr
>= kernel_start
);
230 /* If we don't know the kernel's start address, let's check the most
231 significant bit. This will work at least for 64-bit kernels. */
232 return ((addr
& (1ull << 63)) != 0);
235 /* Check whether a perf event record should be skipped. */
238 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
240 /* The hardware may report branches from kernel into user space. Branches
241 from user into kernel space will be suppressed. We filter the former to
242 provide a consistent branch trace excluding kernel. */
243 return perf_event_is_kernel_addr (bts
->from
);
246 /* Perform a few consistency checks on a perf event sample record. This is
247 meant to catch cases when we get out of sync with the perf event stream. */
250 perf_event_sample_ok (const struct perf_event_sample
*sample
)
252 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
255 if (sample
->header
.size
!= sizeof (*sample
))
261 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
262 and to addresses (plus a header).
264 Start points into that buffer at the next sample position.
265 We read the collected samples backwards from start.
267 While reading the samples, we convert the information into a list of blocks.
268 For two adjacent samples s1 and s2, we form a block b such that b.begin =
269 s1.to and b.end = s2.from.
271 In case the buffer overflows during sampling, one sample may have its lower
272 part at the end and its upper part at the beginning of the buffer. */
274 static VEC (btrace_block_s
) *
275 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
276 const uint8_t *end
, const uint8_t *start
, size_t size
)
278 VEC (btrace_block_s
) *btrace
= NULL
;
279 struct perf_event_sample sample
;
281 struct btrace_block block
= { 0, 0 };
282 struct regcache
*regcache
;
284 gdb_assert (begin
<= start
);
285 gdb_assert (start
<= end
);
287 /* The first block ends at the current pc. */
288 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
289 block
.end
= regcache_read_pc (regcache
);
291 /* The buffer may contain a partial record as its last entry (i.e. when the
292 buffer size is not a multiple of the sample size). */
293 read
= sizeof (sample
) - 1;
295 for (; read
< size
; read
+= sizeof (sample
))
297 const struct perf_event_sample
*psample
;
299 /* Find the next perf_event sample in a backwards traversal. */
300 start
-= sizeof (sample
);
302 /* If we're still inside the buffer, we're done. */
304 psample
= (const struct perf_event_sample
*) start
;
309 /* We're to the left of the ring buffer, we will wrap around and
310 reappear at the very right of the ring buffer. */
312 missing
= (begin
- start
);
313 start
= (end
- missing
);
315 /* If the entire sample is missing, we're done. */
316 if (missing
== sizeof (sample
))
317 psample
= (const struct perf_event_sample
*) start
;
322 /* The sample wrapped around. The lower part is at the end and
323 the upper part is at the beginning of the buffer. */
324 stack
= (uint8_t *) &sample
;
326 /* Copy the two parts so we have a contiguous sample. */
327 memcpy (stack
, start
, missing
);
328 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
334 if (!perf_event_sample_ok (psample
))
336 warning (_("Branch trace may be incomplete."));
340 if (perf_event_skip_bts_record (&psample
->bts
))
343 /* We found a valid sample, so we can complete the current block. */
344 block
.begin
= psample
->bts
.to
;
346 VEC_safe_push (btrace_block_s
, btrace
, &block
);
348 /* Start the next block. */
349 block
.end
= psample
->bts
.from
;
352 /* Push the last block (i.e. the first one of inferior execution), as well.
353 We don't know where it ends, but we know where it starts. If we're
354 reading delta trace, we can fill in the start address later on.
355 Otherwise we will prune it. */
357 VEC_safe_push (btrace_block_s
, btrace
, &block
);
362 /* Check whether an Intel cpu supports BTS. */
365 intel_supports_bts (const struct btrace_cpu
*cpu
)
372 case 0x1a: /* Nehalem */
376 case 0x25: /* Westmere */
379 case 0x2a: /* Sandy Bridge */
381 case 0x3a: /* Ivy Bridge */
383 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
384 "from" information afer an EIST transition, T-states, C1E, or
385 Adaptive Thermal Throttling. */
393 /* Check whether the cpu supports BTS. */
396 cpu_supports_bts (void)
398 struct btrace_cpu cpu
;
400 cpu
= btrace_this_cpu ();
404 /* Don't know about others. Let's assume they do. */
408 return intel_supports_bts (&cpu
);
412 /* The perf_event_open syscall failed. Try to print a helpful error
416 diagnose_perf_event_open_fail ()
423 static const char filename
[] = "/proc/sys/kernel/perf_event_paranoid";
424 gdb_file_up file
= gdb_fopen_cloexec (filename
, "r");
425 if (file
.get () == nullptr)
428 int level
, found
= fscanf (file
.get (), "%d", &level
);
429 if (found
== 1 && level
> 2)
430 error (_("You do not have permission to record the process. "
431 "Try setting %s to 2 or less."), filename
);
437 error (_("Failed to start recording: %s"), safe_strerror (errno
));
440 /* Enable branch tracing in BTS format. */
442 static struct btrace_target_info
*
443 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
445 struct btrace_tinfo_bts
*bts
;
450 if (!cpu_supports_bts ())
451 error (_("BTS support has been disabled for the target cpu."));
453 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
454 (XCNEW (btrace_target_info
));
457 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
458 bts
= &tinfo
->variant
.bts
;
460 bts
->attr
.size
= sizeof (bts
->attr
);
461 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
462 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
463 bts
->attr
.sample_period
= 1;
465 /* We sample from and to address. */
466 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
468 bts
->attr
.exclude_kernel
= 1;
469 bts
->attr
.exclude_hv
= 1;
470 bts
->attr
.exclude_idle
= 1;
477 scoped_fd
fd (syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0));
479 diagnose_perf_event_open_fail ();
481 /* Convert the requested size in bytes to pages (rounding up). */
482 pages
= ((size_t) conf
->size
/ PAGE_SIZE
483 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
484 /* We need at least one page. */
488 /* The buffer size can be requested in powers of two pages. Adjust PAGES
489 to the next power of two. */
490 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
491 if ((pages
& ((size_t) 1 << pg
)) != 0)
492 pages
+= ((size_t) 1 << pg
);
494 /* We try to allocate the requested size.
495 If that fails, try to get as much as we can. */
497 for (; pages
> 0; pages
>>= 1)
502 data_size
= (__u64
) pages
* PAGE_SIZE
;
504 /* Don't ask for more than we can represent in the configuration. */
505 if ((__u64
) UINT_MAX
< data_size
)
508 size
= (size_t) data_size
;
509 length
= size
+ PAGE_SIZE
;
511 /* Check for overflows. */
512 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
516 /* The number of pages we request needs to be a power of two. */
517 data
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (), 0);
518 if (data
.get () != MAP_FAILED
)
523 error (_("Failed to map trace buffer: %s."), safe_strerror (errno
));
525 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
527 data_offset
= PAGE_SIZE
;
529 #if defined (PERF_ATTR_SIZE_VER5)
530 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
534 data_offset
= header
->data_offset
;
535 data_size
= header
->data_size
;
537 size
= (unsigned int) data_size
;
539 /* Check for overflows. */
540 if ((__u64
) size
!= data_size
)
541 error (_("Failed to determine trace buffer size."));
543 #endif /* defined (PERF_ATTR_SIZE_VER5) */
545 bts
->bts
.size
= size
;
546 bts
->bts
.data_head
= &header
->data_head
;
547 bts
->bts
.mem
= (const uint8_t *) data
.release () + data_offset
;
548 bts
->bts
.last_head
= 0ull;
549 bts
->header
= header
;
550 bts
->file
= fd
.release ();
552 tinfo
->conf
.bts
.size
= (unsigned int) size
;
553 return tinfo
.release ();
556 #if defined (PERF_ATTR_SIZE_VER5)
558 /* Determine the event type. */
561 perf_event_pt_event_type ()
563 static const char filename
[] = "/sys/bus/event_source/devices/intel_pt/type";
566 gdb_file_up file
= gdb_fopen_cloexec (filename
, "r");
567 if (file
.get () == nullptr)
568 error (_("Failed to open %s: %s."), filename
, safe_strerror (errno
));
570 int type
, found
= fscanf (file
.get (), "%d", &type
);
572 error (_("Failed to read the PT event type from %s."), filename
);
577 /* Enable branch tracing in Intel Processor Trace format. */
579 static struct btrace_target_info
*
580 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
582 struct btrace_tinfo_pt
*pt
;
590 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
591 (XCNEW (btrace_target_info
));
594 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
595 pt
= &tinfo
->variant
.pt
;
597 pt
->attr
.size
= sizeof (pt
->attr
);
598 pt
->attr
.type
= perf_event_pt_event_type ();
600 pt
->attr
.exclude_kernel
= 1;
601 pt
->attr
.exclude_hv
= 1;
602 pt
->attr
.exclude_idle
= 1;
605 scoped_fd
fd (syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0));
607 diagnose_perf_event_open_fail ();
609 /* Allocate the configuration page. */
610 scoped_mmap
data (nullptr, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
612 if (data
.get () == MAP_FAILED
)
613 error (_("Failed to map trace user page: %s."), safe_strerror (errno
));
615 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
618 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
620 /* Convert the requested size in bytes to pages (rounding up). */
621 pages
= ((size_t) conf
->size
/ PAGE_SIZE
622 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
623 /* We need at least one page. */
627 /* The buffer size can be requested in powers of two pages. Adjust PAGES
628 to the next power of two. */
629 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
630 if ((pages
& ((size_t) 1 << pg
)) != 0)
631 pages
+= ((size_t) 1 << pg
);
633 /* We try to allocate the requested size.
634 If that fails, try to get as much as we can. */
636 for (; pages
> 0; pages
>>= 1)
641 data_size
= (__u64
) pages
* PAGE_SIZE
;
643 /* Don't ask for more than we can represent in the configuration. */
644 if ((__u64
) UINT_MAX
< data_size
)
647 length
= (size_t) data_size
;
649 /* Check for overflows. */
650 if ((__u64
) length
!= data_size
)
653 header
->aux_size
= data_size
;
656 aux
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (),
658 if (aux
.get () != MAP_FAILED
)
663 error (_("Failed to map trace buffer: %s."), safe_strerror (errno
));
665 pt
->pt
.size
= aux
.size ();
666 pt
->pt
.mem
= (const uint8_t *) aux
.release ();
667 pt
->pt
.data_head
= &header
->aux_head
;
668 pt
->header
= (struct perf_event_mmap_page
*) data
.release ();
669 gdb_assert (pt
->header
== header
);
670 pt
->file
= fd
.release ();
672 tinfo
->conf
.pt
.size
= (unsigned int) pt
->pt
.size
;
673 return tinfo
.release ();
676 #else /* !defined (PERF_ATTR_SIZE_VER5) */
678 static struct btrace_target_info
*
679 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
681 error (_("Intel Processor Trace support was disabled at compile time."));
684 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
686 /* See linux-btrace.h. */
688 struct btrace_target_info
*
689 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
691 switch (conf
->format
)
693 case BTRACE_FORMAT_NONE
:
694 error (_("Bad branch trace format."));
697 error (_("Unknown branch trace format."));
699 case BTRACE_FORMAT_BTS
:
700 return linux_enable_bts (ptid
, &conf
->bts
);
702 case BTRACE_FORMAT_PT
:
703 return linux_enable_pt (ptid
, &conf
->pt
);
707 /* Disable BTS tracing. */
709 static enum btrace_error
710 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
712 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
715 return BTRACE_ERR_NONE
;
718 /* Disable Intel Processor Trace tracing. */
720 static enum btrace_error
721 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
723 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
724 munmap((void *) tinfo
->header
, PAGE_SIZE
);
727 return BTRACE_ERR_NONE
;
730 /* See linux-btrace.h. */
733 linux_disable_btrace (struct btrace_target_info
*tinfo
)
735 enum btrace_error errcode
;
737 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
738 switch (tinfo
->conf
.format
)
740 case BTRACE_FORMAT_NONE
:
743 case BTRACE_FORMAT_BTS
:
744 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
747 case BTRACE_FORMAT_PT
:
748 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
752 if (errcode
== BTRACE_ERR_NONE
)
758 /* Read branch trace data in BTS format for the thread given by TINFO into
759 BTRACE using the TYPE reading method. */
761 static enum btrace_error
762 linux_read_bts (struct btrace_data_bts
*btrace
,
763 struct btrace_target_info
*tinfo
,
764 enum btrace_read_type type
)
766 struct perf_event_buffer
*pevent
;
767 const uint8_t *begin
, *end
, *start
;
768 size_t buffer_size
, size
;
769 __u64 data_head
, data_tail
;
770 unsigned int retries
= 5;
772 pevent
= &tinfo
->variant
.bts
.bts
;
774 /* For delta reads, we return at least the partial last block containing
776 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
777 return BTRACE_ERR_NONE
;
779 buffer_size
= pevent
->size
;
780 data_tail
= pevent
->last_head
;
782 /* We may need to retry reading the trace. See below. */
785 data_head
= *pevent
->data_head
;
787 /* Delete any leftover trace from the previous iteration. */
788 VEC_free (btrace_block_s
, btrace
->blocks
);
790 if (type
== BTRACE_READ_DELTA
)
794 /* Determine the number of bytes to read and check for buffer
797 /* Check for data head overflows. We might be able to recover from
798 those but they are very unlikely and it's not really worth the
800 if (data_head
< data_tail
)
801 return BTRACE_ERR_OVERFLOW
;
803 /* If the buffer is smaller than the trace delta, we overflowed. */
804 data_size
= data_head
- data_tail
;
805 if (buffer_size
< data_size
)
806 return BTRACE_ERR_OVERFLOW
;
808 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
809 size
= (size_t) data_size
;
813 /* Read the entire buffer. */
816 /* Adjust the size if the buffer has not overflowed, yet. */
817 if (data_head
< size
)
818 size
= (size_t) data_head
;
821 /* Data_head keeps growing; the buffer itself is circular. */
823 start
= begin
+ data_head
% buffer_size
;
825 if (data_head
<= buffer_size
)
828 end
= begin
+ pevent
->size
;
830 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
832 /* The stopping thread notifies its ptracer before it is scheduled out.
833 On multi-core systems, the debugger might therefore run while the
834 kernel might be writing the last branch trace records.
836 Let's check whether the data head moved while we read the trace. */
837 if (data_head
== *pevent
->data_head
)
841 pevent
->last_head
= data_head
;
843 /* Prune the incomplete last block (i.e. the first one of inferior execution)
844 if we're not doing a delta read. There is no way of filling in its zeroed
846 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
847 && type
!= BTRACE_READ_DELTA
)
848 VEC_pop (btrace_block_s
, btrace
->blocks
);
850 return BTRACE_ERR_NONE
;
853 /* Fill in the Intel Processor Trace configuration information. */
856 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
858 conf
->cpu
= btrace_this_cpu ();
861 /* Read branch trace data in Intel Processor Trace format for the thread
862 given by TINFO into BTRACE using the TYPE reading method. */
864 static enum btrace_error
865 linux_read_pt (struct btrace_data_pt
*btrace
,
866 struct btrace_target_info
*tinfo
,
867 enum btrace_read_type type
)
869 struct perf_event_buffer
*pt
;
871 pt
= &tinfo
->variant
.pt
.pt
;
873 linux_fill_btrace_pt_config (&btrace
->config
);
877 case BTRACE_READ_DELTA
:
878 /* We don't support delta reads. The data head (i.e. aux_head) wraps
879 around to stay inside the aux buffer. */
880 return BTRACE_ERR_NOT_SUPPORTED
;
882 case BTRACE_READ_NEW
:
883 if (!perf_event_new_data (pt
))
884 return BTRACE_ERR_NONE
;
887 case BTRACE_READ_ALL
:
888 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
889 return BTRACE_ERR_NONE
;
892 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
895 /* See linux-btrace.h. */
898 linux_read_btrace (struct btrace_data
*btrace
,
899 struct btrace_target_info
*tinfo
,
900 enum btrace_read_type type
)
902 switch (tinfo
->conf
.format
)
904 case BTRACE_FORMAT_NONE
:
905 return BTRACE_ERR_NOT_SUPPORTED
;
907 case BTRACE_FORMAT_BTS
:
908 /* We read btrace in BTS format. */
909 btrace
->format
= BTRACE_FORMAT_BTS
;
910 btrace
->variant
.bts
.blocks
= NULL
;
912 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
914 case BTRACE_FORMAT_PT
:
915 /* We read btrace in Intel Processor Trace format. */
916 btrace
->format
= BTRACE_FORMAT_PT
;
917 btrace
->variant
.pt
.data
= NULL
;
918 btrace
->variant
.pt
.size
= 0;
920 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
923 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
926 /* See linux-btrace.h. */
928 const struct btrace_config
*
929 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
934 #else /* !HAVE_LINUX_PERF_EVENT_H */
936 /* See linux-btrace.h. */
938 struct btrace_target_info
*
939 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
944 /* See linux-btrace.h. */
947 linux_disable_btrace (struct btrace_target_info
*tinfo
)
949 return BTRACE_ERR_NOT_SUPPORTED
;
952 /* See linux-btrace.h. */
955 linux_read_btrace (struct btrace_data
*btrace
,
956 struct btrace_target_info
*tinfo
,
957 enum btrace_read_type type
)
959 return BTRACE_ERR_NOT_SUPPORTED
;
962 /* See linux-btrace.h. */
964 const struct btrace_config
*
965 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
970 #endif /* !HAVE_LINUX_PERF_EVENT_H */