1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
31 #include <sys/syscall.h>
33 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
37 #include "nat/gdb_ptrace.h"
38 #include <sys/types.h>
41 /* A branch trace record in perf_event. */
44 /* The linear address of the branch source. */
47 /* The linear address of the branch destination. */
51 /* A perf_event branch trace sample. */
52 struct perf_event_sample
54 /* The perf_event sample header. */
55 struct perf_event_header header
;
57 /* The perf_event branch tracing payload. */
58 struct perf_event_bts bts
;
61 /* Identify the cpu we're running on. */
62 static struct btrace_cpu
63 btrace_this_cpu (void)
65 struct btrace_cpu cpu
;
66 unsigned int eax
, ebx
, ecx
, edx
;
69 memset (&cpu
, 0, sizeof (cpu
));
71 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
74 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
75 && edx
== signature_INTEL_edx
)
77 unsigned int cpuid
, ignore
;
79 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
82 cpu
.vendor
= CV_INTEL
;
84 cpu
.family
= (cpuid
>> 8) & 0xf;
85 cpu
.model
= (cpuid
>> 4) & 0xf;
87 if (cpu
.family
== 0x6)
88 cpu
.model
+= (cpuid
>> 12) & 0xf0;
96 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
99 perf_event_new_data (const struct perf_event_buffer
*pev
)
101 return *pev
->data_head
!= pev
->last_head
;
104 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
105 to the memory holding the copy.
106 The caller is responsible for freeing the memory. */
109 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
112 const gdb_byte
*begin
, *end
, *start
, *stop
;
120 /* We should never ask for more data than the buffer can hold. */
121 buffer_size
= pev
->size
;
122 gdb_assert (size
<= buffer_size
);
124 /* If we ask for more data than we seem to have, we wrap around and read
125 data from the end of the buffer. This is already handled by the %
126 BUFFER_SIZE operation, below. Here, we just need to make sure that we
129 Note that this is perfectly OK for perf event buffers where data_head
130 doesn'grow indefinitely and instead wraps around to remain within the
131 buffer's boundaries. */
132 if (data_head
< size
)
133 data_head
+= buffer_size
;
135 gdb_assert (size
<= data_head
);
136 data_tail
= data_head
- size
;
139 start
= begin
+ data_tail
% buffer_size
;
140 stop
= begin
+ data_head
% buffer_size
;
142 buffer
= (gdb_byte
*) xmalloc (size
);
145 memcpy (buffer
, start
, stop
- start
);
148 end
= begin
+ buffer_size
;
150 memcpy (buffer
, start
, end
- start
);
151 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
157 /* Copy the perf event buffer data from PEV.
158 Store a pointer to the copy into DATA and its size in SIZE. */
161 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
167 data_head
= *pev
->data_head
;
170 *data
= perf_event_read (pev
, data_head
, size
);
173 pev
->last_head
= data_head
;
176 /* Determine the event type.
177 Returns zero on success and fills in TYPE; returns -1 otherwise. */
180 perf_event_pt_event_type (int *type
)
185 file
= fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
189 found
= fscanf (file
, "%d", type
);
198 /* Try to determine the start address of the Linux kernel. */
201 linux_determine_kernel_start (void)
203 static uint64_t kernel_start
;
211 gdb_file_up file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
215 while (!feof (file
.get ()))
217 char buffer
[1024], symbol
[8], *line
;
221 line
= fgets (buffer
, sizeof (buffer
), file
.get ());
225 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
229 if (strcmp (symbol
, "_text") == 0)
239 /* Check whether an address is in the kernel. */
242 perf_event_is_kernel_addr (uint64_t addr
)
244 uint64_t kernel_start
;
246 kernel_start
= linux_determine_kernel_start ();
247 if (kernel_start
!= 0ull)
248 return (addr
>= kernel_start
);
250 /* If we don't know the kernel's start address, let's check the most
251 significant bit. This will work at least for 64-bit kernels. */
252 return ((addr
& (1ull << 63)) != 0);
255 /* Check whether a perf event record should be skipped. */
258 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
260 /* The hardware may report branches from kernel into user space. Branches
261 from user into kernel space will be suppressed. We filter the former to
262 provide a consistent branch trace excluding kernel. */
263 return perf_event_is_kernel_addr (bts
->from
);
266 /* Perform a few consistency checks on a perf event sample record. This is
267 meant to catch cases when we get out of sync with the perf event stream. */
270 perf_event_sample_ok (const struct perf_event_sample
*sample
)
272 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
275 if (sample
->header
.size
!= sizeof (*sample
))
281 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
282 and to addresses (plus a header).
284 Start points into that buffer at the next sample position.
285 We read the collected samples backwards from start.
287 While reading the samples, we convert the information into a list of blocks.
288 For two adjacent samples s1 and s2, we form a block b such that b.begin =
289 s1.to and b.end = s2.from.
291 In case the buffer overflows during sampling, one sample may have its lower
292 part at the end and its upper part at the beginning of the buffer. */
294 static VEC (btrace_block_s
) *
295 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
296 const uint8_t *end
, const uint8_t *start
, size_t size
)
298 VEC (btrace_block_s
) *btrace
= NULL
;
299 struct perf_event_sample sample
;
301 struct btrace_block block
= { 0, 0 };
302 struct regcache
*regcache
;
304 gdb_assert (begin
<= start
);
305 gdb_assert (start
<= end
);
307 /* The first block ends at the current pc. */
308 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
309 block
.end
= regcache_read_pc (regcache
);
311 /* The buffer may contain a partial record as its last entry (i.e. when the
312 buffer size is not a multiple of the sample size). */
313 read
= sizeof (sample
) - 1;
315 for (; read
< size
; read
+= sizeof (sample
))
317 const struct perf_event_sample
*psample
;
319 /* Find the next perf_event sample in a backwards traversal. */
320 start
-= sizeof (sample
);
322 /* If we're still inside the buffer, we're done. */
324 psample
= (const struct perf_event_sample
*) start
;
329 /* We're to the left of the ring buffer, we will wrap around and
330 reappear at the very right of the ring buffer. */
332 missing
= (begin
- start
);
333 start
= (end
- missing
);
335 /* If the entire sample is missing, we're done. */
336 if (missing
== sizeof (sample
))
337 psample
= (const struct perf_event_sample
*) start
;
342 /* The sample wrapped around. The lower part is at the end and
343 the upper part is at the beginning of the buffer. */
344 stack
= (uint8_t *) &sample
;
346 /* Copy the two parts so we have a contiguous sample. */
347 memcpy (stack
, start
, missing
);
348 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
354 if (!perf_event_sample_ok (psample
))
356 warning (_("Branch trace may be incomplete."));
360 if (perf_event_skip_bts_record (&psample
->bts
))
363 /* We found a valid sample, so we can complete the current block. */
364 block
.begin
= psample
->bts
.to
;
366 VEC_safe_push (btrace_block_s
, btrace
, &block
);
368 /* Start the next block. */
369 block
.end
= psample
->bts
.from
;
372 /* Push the last block (i.e. the first one of inferior execution), as well.
373 We don't know where it ends, but we know where it starts. If we're
374 reading delta trace, we can fill in the start address later on.
375 Otherwise we will prune it. */
377 VEC_safe_push (btrace_block_s
, btrace
, &block
);
382 /* Check whether the kernel supports BTS. */
385 kernel_supports_bts (void)
387 struct perf_event_attr attr
;
396 warning (_("test bts: cannot fork: %s."), safe_strerror (errno
));
400 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
403 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
404 safe_strerror (errno
));
408 status
= raise (SIGTRAP
);
411 warning (_("test bts: cannot raise SIGTRAP: %s."),
412 safe_strerror (errno
));
419 pid
= waitpid (child
, &status
, 0);
422 warning (_("test bts: bad pid %ld, error: %s."),
423 (long) pid
, safe_strerror (errno
));
427 if (!WIFSTOPPED (status
))
429 warning (_("test bts: expected stop. status: %d."),
434 memset (&attr
, 0, sizeof (attr
));
436 attr
.type
= PERF_TYPE_HARDWARE
;
437 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
438 attr
.sample_period
= 1;
439 attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
440 attr
.exclude_kernel
= 1;
442 attr
.exclude_idle
= 1;
444 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
448 kill (child
, SIGKILL
);
449 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
451 pid
= waitpid (child
, &status
, 0);
454 warning (_("test bts: bad pid %ld, error: %s."),
455 (long) pid
, safe_strerror (errno
));
456 if (!WIFSIGNALED (status
))
457 warning (_("test bts: expected killed. status: %d."),
465 /* Check whether the kernel supports Intel Processor Trace. */
468 kernel_supports_pt (void)
470 struct perf_event_attr attr
;
472 int status
, file
, type
;
479 warning (_("test pt: cannot fork: %s."), safe_strerror (errno
));
483 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
486 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
487 safe_strerror (errno
));
491 status
= raise (SIGTRAP
);
494 warning (_("test pt: cannot raise SIGTRAP: %s."),
495 safe_strerror (errno
));
502 pid
= waitpid (child
, &status
, 0);
505 warning (_("test pt: bad pid %ld, error: %s."),
506 (long) pid
, safe_strerror (errno
));
510 if (!WIFSTOPPED (status
))
512 warning (_("test pt: expected stop. status: %d."),
517 status
= perf_event_pt_event_type (&type
);
522 memset (&attr
, 0, sizeof (attr
));
524 attr
.size
= sizeof (attr
);
526 attr
.exclude_kernel
= 1;
528 attr
.exclude_idle
= 1;
530 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
535 kill (child
, SIGKILL
);
536 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
538 pid
= waitpid (child
, &status
, 0);
541 warning (_("test pt: bad pid %ld, error: %s."),
542 (long) pid
, safe_strerror (errno
));
543 if (!WIFSIGNALED (status
))
544 warning (_("test pt: expected killed. status: %d."),
552 /* Check whether an Intel cpu supports BTS. */
555 intel_supports_bts (const struct btrace_cpu
*cpu
)
562 case 0x1a: /* Nehalem */
566 case 0x25: /* Westmere */
569 case 0x2a: /* Sandy Bridge */
571 case 0x3a: /* Ivy Bridge */
573 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
574 "from" information afer an EIST transition, T-states, C1E, or
575 Adaptive Thermal Throttling. */
583 /* Check whether the cpu supports BTS. */
586 cpu_supports_bts (void)
588 struct btrace_cpu cpu
;
590 cpu
= btrace_this_cpu ();
594 /* Don't know about others. Let's assume they do. */
598 return intel_supports_bts (&cpu
);
602 /* Check whether the linux target supports BTS. */
605 linux_supports_bts (void)
611 if (!kernel_supports_bts ())
613 else if (!cpu_supports_bts ())
622 /* Check whether the linux target supports Intel Processor Trace. */
625 linux_supports_pt (void)
631 if (!kernel_supports_pt ())
640 /* See linux-btrace.h. */
643 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
647 case BTRACE_FORMAT_NONE
:
650 case BTRACE_FORMAT_BTS
:
651 return linux_supports_bts ();
653 case BTRACE_FORMAT_PT
:
654 return linux_supports_pt ();
657 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format"));
660 /* Enable branch tracing in BTS format. */
662 static struct btrace_target_info
*
663 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
665 struct perf_event_mmap_page
*header
;
666 struct btrace_target_info
*tinfo
;
667 struct btrace_tinfo_bts
*bts
;
672 tinfo
= XCNEW (struct btrace_target_info
);
675 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
676 bts
= &tinfo
->variant
.bts
;
678 bts
->attr
.size
= sizeof (bts
->attr
);
679 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
680 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
681 bts
->attr
.sample_period
= 1;
683 /* We sample from and to address. */
684 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
686 bts
->attr
.exclude_kernel
= 1;
687 bts
->attr
.exclude_hv
= 1;
688 bts
->attr
.exclude_idle
= 1;
690 pid
= ptid_get_lwp (ptid
);
692 pid
= ptid_get_pid (ptid
);
695 bts
->file
= syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0);
699 /* Convert the requested size in bytes to pages (rounding up). */
700 pages
= ((size_t) conf
->size
/ PAGE_SIZE
701 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
702 /* We need at least one page. */
706 /* The buffer size can be requested in powers of two pages. Adjust PAGES
707 to the next power of two. */
708 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
709 if ((pages
& ((size_t) 1 << pg
)) != 0)
710 pages
+= ((size_t) 1 << pg
);
712 /* We try to allocate the requested size.
713 If that fails, try to get as much as we can. */
714 for (; pages
> 0; pages
>>= 1)
719 data_size
= (__u64
) pages
* PAGE_SIZE
;
721 /* Don't ask for more than we can represent in the configuration. */
722 if ((__u64
) UINT_MAX
< data_size
)
725 size
= (size_t) data_size
;
726 length
= size
+ PAGE_SIZE
;
728 /* Check for overflows. */
729 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
732 /* The number of pages we request needs to be a power of two. */
733 header
= ((struct perf_event_mmap_page
*)
734 mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, bts
->file
, 0));
735 if (header
!= MAP_FAILED
)
742 data_offset
= PAGE_SIZE
;
744 #if defined (PERF_ATTR_SIZE_VER5)
745 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
749 data_offset
= header
->data_offset
;
750 data_size
= header
->data_size
;
752 size
= (unsigned int) data_size
;
754 /* Check for overflows. */
755 if ((__u64
) size
!= data_size
)
757 munmap ((void *) header
, size
+ PAGE_SIZE
);
761 #endif /* defined (PERF_ATTR_SIZE_VER5) */
763 bts
->header
= header
;
764 bts
->bts
.mem
= ((const uint8_t *) header
) + data_offset
;
765 bts
->bts
.size
= size
;
766 bts
->bts
.data_head
= &header
->data_head
;
767 bts
->bts
.last_head
= 0ull;
769 tinfo
->conf
.bts
.size
= (unsigned int) size
;
773 /* We were not able to allocate any buffer. */
781 #if defined (PERF_ATTR_SIZE_VER5)
783 /* Enable branch tracing in Intel Processor Trace format. */
785 static struct btrace_target_info
*
786 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
788 struct perf_event_mmap_page
*header
;
789 struct btrace_target_info
*tinfo
;
790 struct btrace_tinfo_pt
*pt
;
792 int pid
, pg
, errcode
, type
;
797 errcode
= perf_event_pt_event_type (&type
);
801 pid
= ptid_get_lwp (ptid
);
803 pid
= ptid_get_pid (ptid
);
805 tinfo
= XCNEW (struct btrace_target_info
);
808 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
809 pt
= &tinfo
->variant
.pt
;
811 pt
->attr
.size
= sizeof (pt
->attr
);
812 pt
->attr
.type
= type
;
814 pt
->attr
.exclude_kernel
= 1;
815 pt
->attr
.exclude_hv
= 1;
816 pt
->attr
.exclude_idle
= 1;
819 pt
->file
= syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0);
823 /* Allocate the configuration page. */
824 header
= ((struct perf_event_mmap_page
*)
825 mmap (NULL
, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
827 if (header
== MAP_FAILED
)
830 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
832 /* Convert the requested size in bytes to pages (rounding up). */
833 pages
= ((size_t) conf
->size
/ PAGE_SIZE
834 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
835 /* We need at least one page. */
839 /* The buffer size can be requested in powers of two pages. Adjust PAGES
840 to the next power of two. */
841 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
842 if ((pages
& ((size_t) 1 << pg
)) != 0)
843 pages
+= ((size_t) 1 << pg
);
845 /* We try to allocate the requested size.
846 If that fails, try to get as much as we can. */
847 for (; pages
> 0; pages
>>= 1)
852 data_size
= (__u64
) pages
* PAGE_SIZE
;
854 /* Don't ask for more than we can represent in the configuration. */
855 if ((__u64
) UINT_MAX
< data_size
)
858 size
= (size_t) data_size
;
860 /* Check for overflows. */
861 if ((__u64
) size
!= data_size
)
864 header
->aux_size
= data_size
;
867 pt
->pt
.mem
= ((const uint8_t *)
868 mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, pt
->file
,
869 header
->aux_offset
));
870 if (pt
->pt
.mem
!= MAP_FAILED
)
879 pt
->pt
.data_head
= &header
->aux_head
;
881 tinfo
->conf
.pt
.size
= (unsigned int) size
;
885 munmap((void *) header
, PAGE_SIZE
);
895 #else /* !defined (PERF_ATTR_SIZE_VER5) */
897 static struct btrace_target_info
*
898 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
904 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
906 /* See linux-btrace.h. */
908 struct btrace_target_info
*
909 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
911 struct btrace_target_info
*tinfo
;
914 switch (conf
->format
)
916 case BTRACE_FORMAT_NONE
:
919 case BTRACE_FORMAT_BTS
:
920 tinfo
= linux_enable_bts (ptid
, &conf
->bts
);
923 case BTRACE_FORMAT_PT
:
924 tinfo
= linux_enable_pt (ptid
, &conf
->pt
);
931 /* Disable BTS tracing. */
933 static enum btrace_error
934 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
936 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
939 return BTRACE_ERR_NONE
;
942 /* Disable Intel Processor Trace tracing. */
944 static enum btrace_error
945 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
947 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
948 munmap((void *) tinfo
->header
, PAGE_SIZE
);
951 return BTRACE_ERR_NONE
;
954 /* See linux-btrace.h. */
957 linux_disable_btrace (struct btrace_target_info
*tinfo
)
959 enum btrace_error errcode
;
961 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
962 switch (tinfo
->conf
.format
)
964 case BTRACE_FORMAT_NONE
:
967 case BTRACE_FORMAT_BTS
:
968 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
971 case BTRACE_FORMAT_PT
:
972 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
976 if (errcode
== BTRACE_ERR_NONE
)
982 /* Read branch trace data in BTS format for the thread given by TINFO into
983 BTRACE using the TYPE reading method. */
985 static enum btrace_error
986 linux_read_bts (struct btrace_data_bts
*btrace
,
987 struct btrace_target_info
*tinfo
,
988 enum btrace_read_type type
)
990 struct perf_event_buffer
*pevent
;
991 const uint8_t *begin
, *end
, *start
;
992 size_t buffer_size
, size
;
993 __u64 data_head
, data_tail
;
994 unsigned int retries
= 5;
996 pevent
= &tinfo
->variant
.bts
.bts
;
998 /* For delta reads, we return at least the partial last block containing
1000 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
1001 return BTRACE_ERR_NONE
;
1003 buffer_size
= pevent
->size
;
1004 data_tail
= pevent
->last_head
;
1006 /* We may need to retry reading the trace. See below. */
1009 data_head
= *pevent
->data_head
;
1011 /* Delete any leftover trace from the previous iteration. */
1012 VEC_free (btrace_block_s
, btrace
->blocks
);
1014 if (type
== BTRACE_READ_DELTA
)
1018 /* Determine the number of bytes to read and check for buffer
1021 /* Check for data head overflows. We might be able to recover from
1022 those but they are very unlikely and it's not really worth the
1024 if (data_head
< data_tail
)
1025 return BTRACE_ERR_OVERFLOW
;
1027 /* If the buffer is smaller than the trace delta, we overflowed. */
1028 data_size
= data_head
- data_tail
;
1029 if (buffer_size
< data_size
)
1030 return BTRACE_ERR_OVERFLOW
;
1032 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1033 size
= (size_t) data_size
;
1037 /* Read the entire buffer. */
1040 /* Adjust the size if the buffer has not overflowed, yet. */
1041 if (data_head
< size
)
1042 size
= (size_t) data_head
;
1045 /* Data_head keeps growing; the buffer itself is circular. */
1046 begin
= pevent
->mem
;
1047 start
= begin
+ data_head
% buffer_size
;
1049 if (data_head
<= buffer_size
)
1052 end
= begin
+ pevent
->size
;
1054 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
1056 /* The stopping thread notifies its ptracer before it is scheduled out.
1057 On multi-core systems, the debugger might therefore run while the
1058 kernel might be writing the last branch trace records.
1060 Let's check whether the data head moved while we read the trace. */
1061 if (data_head
== *pevent
->data_head
)
1065 pevent
->last_head
= data_head
;
1067 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1068 if we're not doing a delta read. There is no way of filling in its zeroed
1070 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
1071 && type
!= BTRACE_READ_DELTA
)
1072 VEC_pop (btrace_block_s
, btrace
->blocks
);
1074 return BTRACE_ERR_NONE
;
1077 /* Fill in the Intel Processor Trace configuration information. */
1080 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
1082 conf
->cpu
= btrace_this_cpu ();
1085 /* Read branch trace data in Intel Processor Trace format for the thread
1086 given by TINFO into BTRACE using the TYPE reading method. */
1088 static enum btrace_error
1089 linux_read_pt (struct btrace_data_pt
*btrace
,
1090 struct btrace_target_info
*tinfo
,
1091 enum btrace_read_type type
)
1093 struct perf_event_buffer
*pt
;
1095 pt
= &tinfo
->variant
.pt
.pt
;
1097 linux_fill_btrace_pt_config (&btrace
->config
);
1101 case BTRACE_READ_DELTA
:
1102 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1103 around to stay inside the aux buffer. */
1104 return BTRACE_ERR_NOT_SUPPORTED
;
1106 case BTRACE_READ_NEW
:
1107 if (!perf_event_new_data (pt
))
1108 return BTRACE_ERR_NONE
;
1111 case BTRACE_READ_ALL
:
1112 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
1113 return BTRACE_ERR_NONE
;
1116 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
1119 /* See linux-btrace.h. */
1122 linux_read_btrace (struct btrace_data
*btrace
,
1123 struct btrace_target_info
*tinfo
,
1124 enum btrace_read_type type
)
1126 switch (tinfo
->conf
.format
)
1128 case BTRACE_FORMAT_NONE
:
1129 return BTRACE_ERR_NOT_SUPPORTED
;
1131 case BTRACE_FORMAT_BTS
:
1132 /* We read btrace in BTS format. */
1133 btrace
->format
= BTRACE_FORMAT_BTS
;
1134 btrace
->variant
.bts
.blocks
= NULL
;
1136 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
1138 case BTRACE_FORMAT_PT
:
1139 /* We read btrace in Intel Processor Trace format. */
1140 btrace
->format
= BTRACE_FORMAT_PT
;
1141 btrace
->variant
.pt
.data
= NULL
;
1142 btrace
->variant
.pt
.size
= 0;
1144 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
1147 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1150 /* See linux-btrace.h. */
1152 const struct btrace_config
*
1153 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1155 return &tinfo
->conf
;
1158 #else /* !HAVE_LINUX_PERF_EVENT_H */
1160 /* See linux-btrace.h. */
1163 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
1168 /* See linux-btrace.h. */
1170 struct btrace_target_info
*
1171 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
1176 /* See linux-btrace.h. */
1179 linux_disable_btrace (struct btrace_target_info
*tinfo
)
1181 return BTRACE_ERR_NOT_SUPPORTED
;
1184 /* See linux-btrace.h. */
1187 linux_read_btrace (struct btrace_data
*btrace
,
1188 struct btrace_target_info
*tinfo
,
1189 enum btrace_read_type type
)
1191 return BTRACE_ERR_NOT_SUPPORTED
;
1194 /* See linux-btrace.h. */
1196 const struct btrace_config
*
1197 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1202 #endif /* !HAVE_LINUX_PERF_EVENT_H */