(Ada) problem printing renaming which references a subprogram parameter
[binutils-gdb.git] / gdb / nat / linux-btrace.c
blobbbd0fe68a808bf23428814243ed3fd760a4ae45e
1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
25 #include "gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
29 #include <inttypes.h>
31 #include <sys/syscall.h>
33 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
34 #include <unistd.h>
35 #include <sys/mman.h>
36 #include <sys/user.h>
37 #include "nat/gdb_ptrace.h"
38 #include <sys/types.h>
39 #include <signal.h>
41 /* A branch trace record in perf_event. */
42 struct perf_event_bts
44 /* The linear address of the branch source. */
45 uint64_t from;
47 /* The linear address of the branch destination. */
48 uint64_t to;
51 /* A perf_event branch trace sample. */
52 struct perf_event_sample
54 /* The perf_event sample header. */
55 struct perf_event_header header;
57 /* The perf_event branch tracing payload. */
58 struct perf_event_bts bts;
61 /* Identify the cpu we're running on. */
62 static struct btrace_cpu
63 btrace_this_cpu (void)
65 struct btrace_cpu cpu;
66 unsigned int eax, ebx, ecx, edx;
67 int ok;
69 memset (&cpu, 0, sizeof (cpu));
71 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
72 if (ok != 0)
74 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
75 && edx == signature_INTEL_edx)
77 unsigned int cpuid, ignore;
79 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
80 if (ok != 0)
82 cpu.vendor = CV_INTEL;
84 cpu.family = (cpuid >> 8) & 0xf;
85 cpu.model = (cpuid >> 4) & 0xf;
87 if (cpu.family == 0x6)
88 cpu.model += (cpuid >> 12) & 0xf0;
93 return cpu;
96 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
98 static int
99 perf_event_new_data (const struct perf_event_buffer *pev)
101 return *pev->data_head != pev->last_head;
104 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
105 to the memory holding the copy.
106 The caller is responsible for freeing the memory. */
108 static gdb_byte *
109 perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
110 size_t size)
112 const gdb_byte *begin, *end, *start, *stop;
113 gdb_byte *buffer;
114 size_t buffer_size;
115 __u64 data_tail;
117 if (size == 0)
118 return NULL;
120 /* We should never ask for more data than the buffer can hold. */
121 buffer_size = pev->size;
122 gdb_assert (size <= buffer_size);
124 /* If we ask for more data than we seem to have, we wrap around and read
125 data from the end of the buffer. This is already handled by the %
126 BUFFER_SIZE operation, below. Here, we just need to make sure that we
127 don't underflow.
129 Note that this is perfectly OK for perf event buffers where data_head
130 doesn'grow indefinitely and instead wraps around to remain within the
131 buffer's boundaries. */
132 if (data_head < size)
133 data_head += buffer_size;
135 gdb_assert (size <= data_head);
136 data_tail = data_head - size;
138 begin = pev->mem;
139 start = begin + data_tail % buffer_size;
140 stop = begin + data_head % buffer_size;
142 buffer = (gdb_byte *) xmalloc (size);
144 if (start < stop)
145 memcpy (buffer, start, stop - start);
146 else
148 end = begin + buffer_size;
150 memcpy (buffer, start, end - start);
151 memcpy (buffer + (end - start), begin, stop - begin);
154 return buffer;
157 /* Copy the perf event buffer data from PEV.
158 Store a pointer to the copy into DATA and its size in SIZE. */
160 static void
161 perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
162 size_t *psize)
164 size_t size;
165 __u64 data_head;
167 data_head = *pev->data_head;
168 size = pev->size;
170 *data = perf_event_read (pev, data_head, size);
171 *psize = size;
173 pev->last_head = data_head;
176 /* Determine the event type.
177 Returns zero on success and fills in TYPE; returns -1 otherwise. */
179 static int
180 perf_event_pt_event_type (int *type)
182 FILE *file;
183 int found;
185 file = fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
186 if (file == NULL)
187 return -1;
189 found = fscanf (file, "%d", type);
191 fclose (file);
193 if (found == 1)
194 return 0;
195 return -1;
198 /* Try to determine the start address of the Linux kernel. */
200 static uint64_t
201 linux_determine_kernel_start (void)
203 static uint64_t kernel_start;
204 static int cached;
206 if (cached != 0)
207 return kernel_start;
209 cached = 1;
211 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
212 if (file == NULL)
213 return kernel_start;
215 while (!feof (file.get ()))
217 char buffer[1024], symbol[8], *line;
218 uint64_t addr;
219 int match;
221 line = fgets (buffer, sizeof (buffer), file.get ());
222 if (line == NULL)
223 break;
225 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
226 if (match != 2)
227 continue;
229 if (strcmp (symbol, "_text") == 0)
231 kernel_start = addr;
232 break;
236 return kernel_start;
239 /* Check whether an address is in the kernel. */
241 static inline int
242 perf_event_is_kernel_addr (uint64_t addr)
244 uint64_t kernel_start;
246 kernel_start = linux_determine_kernel_start ();
247 if (kernel_start != 0ull)
248 return (addr >= kernel_start);
250 /* If we don't know the kernel's start address, let's check the most
251 significant bit. This will work at least for 64-bit kernels. */
252 return ((addr & (1ull << 63)) != 0);
255 /* Check whether a perf event record should be skipped. */
257 static inline int
258 perf_event_skip_bts_record (const struct perf_event_bts *bts)
260 /* The hardware may report branches from kernel into user space. Branches
261 from user into kernel space will be suppressed. We filter the former to
262 provide a consistent branch trace excluding kernel. */
263 return perf_event_is_kernel_addr (bts->from);
266 /* Perform a few consistency checks on a perf event sample record. This is
267 meant to catch cases when we get out of sync with the perf event stream. */
269 static inline int
270 perf_event_sample_ok (const struct perf_event_sample *sample)
272 if (sample->header.type != PERF_RECORD_SAMPLE)
273 return 0;
275 if (sample->header.size != sizeof (*sample))
276 return 0;
278 return 1;
281 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
282 and to addresses (plus a header).
284 Start points into that buffer at the next sample position.
285 We read the collected samples backwards from start.
287 While reading the samples, we convert the information into a list of blocks.
288 For two adjacent samples s1 and s2, we form a block b such that b.begin =
289 s1.to and b.end = s2.from.
291 In case the buffer overflows during sampling, one sample may have its lower
292 part at the end and its upper part at the beginning of the buffer. */
294 static VEC (btrace_block_s) *
295 perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
296 const uint8_t *end, const uint8_t *start, size_t size)
298 VEC (btrace_block_s) *btrace = NULL;
299 struct perf_event_sample sample;
300 size_t read = 0;
301 struct btrace_block block = { 0, 0 };
302 struct regcache *regcache;
304 gdb_assert (begin <= start);
305 gdb_assert (start <= end);
307 /* The first block ends at the current pc. */
308 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
309 block.end = regcache_read_pc (regcache);
311 /* The buffer may contain a partial record as its last entry (i.e. when the
312 buffer size is not a multiple of the sample size). */
313 read = sizeof (sample) - 1;
315 for (; read < size; read += sizeof (sample))
317 const struct perf_event_sample *psample;
319 /* Find the next perf_event sample in a backwards traversal. */
320 start -= sizeof (sample);
322 /* If we're still inside the buffer, we're done. */
323 if (begin <= start)
324 psample = (const struct perf_event_sample *) start;
325 else
327 int missing;
329 /* We're to the left of the ring buffer, we will wrap around and
330 reappear at the very right of the ring buffer. */
332 missing = (begin - start);
333 start = (end - missing);
335 /* If the entire sample is missing, we're done. */
336 if (missing == sizeof (sample))
337 psample = (const struct perf_event_sample *) start;
338 else
340 uint8_t *stack;
342 /* The sample wrapped around. The lower part is at the end and
343 the upper part is at the beginning of the buffer. */
344 stack = (uint8_t *) &sample;
346 /* Copy the two parts so we have a contiguous sample. */
347 memcpy (stack, start, missing);
348 memcpy (stack + missing, begin, sizeof (sample) - missing);
350 psample = &sample;
354 if (!perf_event_sample_ok (psample))
356 warning (_("Branch trace may be incomplete."));
357 break;
360 if (perf_event_skip_bts_record (&psample->bts))
361 continue;
363 /* We found a valid sample, so we can complete the current block. */
364 block.begin = psample->bts.to;
366 VEC_safe_push (btrace_block_s, btrace, &block);
368 /* Start the next block. */
369 block.end = psample->bts.from;
372 /* Push the last block (i.e. the first one of inferior execution), as well.
373 We don't know where it ends, but we know where it starts. If we're
374 reading delta trace, we can fill in the start address later on.
375 Otherwise we will prune it. */
376 block.begin = 0;
377 VEC_safe_push (btrace_block_s, btrace, &block);
379 return btrace;
382 /* Check whether the kernel supports BTS. */
384 static int
385 kernel_supports_bts (void)
387 struct perf_event_attr attr;
388 pid_t child, pid;
389 int status, file;
391 errno = 0;
392 child = fork ();
393 switch (child)
395 case -1:
396 warning (_("test bts: cannot fork: %s."), safe_strerror (errno));
397 return 0;
399 case 0:
400 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
401 if (status != 0)
403 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
404 safe_strerror (errno));
405 _exit (1);
408 status = raise (SIGTRAP);
409 if (status != 0)
411 warning (_("test bts: cannot raise SIGTRAP: %s."),
412 safe_strerror (errno));
413 _exit (1);
416 _exit (1);
418 default:
419 pid = waitpid (child, &status, 0);
420 if (pid != child)
422 warning (_("test bts: bad pid %ld, error: %s."),
423 (long) pid, safe_strerror (errno));
424 return 0;
427 if (!WIFSTOPPED (status))
429 warning (_("test bts: expected stop. status: %d."),
430 status);
431 return 0;
434 memset (&attr, 0, sizeof (attr));
436 attr.type = PERF_TYPE_HARDWARE;
437 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
438 attr.sample_period = 1;
439 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
440 attr.exclude_kernel = 1;
441 attr.exclude_hv = 1;
442 attr.exclude_idle = 1;
444 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
445 if (file >= 0)
446 close (file);
448 kill (child, SIGKILL);
449 ptrace (PTRACE_KILL, child, NULL, NULL);
451 pid = waitpid (child, &status, 0);
452 if (pid != child)
454 warning (_("test bts: bad pid %ld, error: %s."),
455 (long) pid, safe_strerror (errno));
456 if (!WIFSIGNALED (status))
457 warning (_("test bts: expected killed. status: %d."),
458 status);
461 return (file >= 0);
465 /* Check whether the kernel supports Intel Processor Trace. */
467 static int
468 kernel_supports_pt (void)
470 struct perf_event_attr attr;
471 pid_t child, pid;
472 int status, file, type;
474 errno = 0;
475 child = fork ();
476 switch (child)
478 case -1:
479 warning (_("test pt: cannot fork: %s."), safe_strerror (errno));
480 return 0;
482 case 0:
483 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
484 if (status != 0)
486 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
487 safe_strerror (errno));
488 _exit (1);
491 status = raise (SIGTRAP);
492 if (status != 0)
494 warning (_("test pt: cannot raise SIGTRAP: %s."),
495 safe_strerror (errno));
496 _exit (1);
499 _exit (1);
501 default:
502 pid = waitpid (child, &status, 0);
503 if (pid != child)
505 warning (_("test pt: bad pid %ld, error: %s."),
506 (long) pid, safe_strerror (errno));
507 return 0;
510 if (!WIFSTOPPED (status))
512 warning (_("test pt: expected stop. status: %d."),
513 status);
514 return 0;
517 status = perf_event_pt_event_type (&type);
518 if (status != 0)
519 file = -1;
520 else
522 memset (&attr, 0, sizeof (attr));
524 attr.size = sizeof (attr);
525 attr.type = type;
526 attr.exclude_kernel = 1;
527 attr.exclude_hv = 1;
528 attr.exclude_idle = 1;
530 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
531 if (file >= 0)
532 close (file);
535 kill (child, SIGKILL);
536 ptrace (PTRACE_KILL, child, NULL, NULL);
538 pid = waitpid (child, &status, 0);
539 if (pid != child)
541 warning (_("test pt: bad pid %ld, error: %s."),
542 (long) pid, safe_strerror (errno));
543 if (!WIFSIGNALED (status))
544 warning (_("test pt: expected killed. status: %d."),
545 status);
548 return (file >= 0);
552 /* Check whether an Intel cpu supports BTS. */
554 static int
555 intel_supports_bts (const struct btrace_cpu *cpu)
557 switch (cpu->family)
559 case 0x6:
560 switch (cpu->model)
562 case 0x1a: /* Nehalem */
563 case 0x1f:
564 case 0x1e:
565 case 0x2e:
566 case 0x25: /* Westmere */
567 case 0x2c:
568 case 0x2f:
569 case 0x2a: /* Sandy Bridge */
570 case 0x2d:
571 case 0x3a: /* Ivy Bridge */
573 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
574 "from" information afer an EIST transition, T-states, C1E, or
575 Adaptive Thermal Throttling. */
576 return 0;
580 return 1;
583 /* Check whether the cpu supports BTS. */
585 static int
586 cpu_supports_bts (void)
588 struct btrace_cpu cpu;
590 cpu = btrace_this_cpu ();
591 switch (cpu.vendor)
593 default:
594 /* Don't know about others. Let's assume they do. */
595 return 1;
597 case CV_INTEL:
598 return intel_supports_bts (&cpu);
602 /* Check whether the linux target supports BTS. */
604 static int
605 linux_supports_bts (void)
607 static int cached;
609 if (cached == 0)
611 if (!kernel_supports_bts ())
612 cached = -1;
613 else if (!cpu_supports_bts ())
614 cached = -1;
615 else
616 cached = 1;
619 return cached > 0;
622 /* Check whether the linux target supports Intel Processor Trace. */
624 static int
625 linux_supports_pt (void)
627 static int cached;
629 if (cached == 0)
631 if (!kernel_supports_pt ())
632 cached = -1;
633 else
634 cached = 1;
637 return cached > 0;
640 /* See linux-btrace.h. */
643 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
645 switch (format)
647 case BTRACE_FORMAT_NONE:
648 return 0;
650 case BTRACE_FORMAT_BTS:
651 return linux_supports_bts ();
653 case BTRACE_FORMAT_PT:
654 return linux_supports_pt ();
657 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
660 /* Enable branch tracing in BTS format. */
662 static struct btrace_target_info *
663 linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
665 struct perf_event_mmap_page *header;
666 struct btrace_target_info *tinfo;
667 struct btrace_tinfo_bts *bts;
668 size_t size, pages;
669 __u64 data_offset;
670 int pid, pg;
672 tinfo = XCNEW (struct btrace_target_info);
673 tinfo->ptid = ptid;
675 tinfo->conf.format = BTRACE_FORMAT_BTS;
676 bts = &tinfo->variant.bts;
678 bts->attr.size = sizeof (bts->attr);
679 bts->attr.type = PERF_TYPE_HARDWARE;
680 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
681 bts->attr.sample_period = 1;
683 /* We sample from and to address. */
684 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
686 bts->attr.exclude_kernel = 1;
687 bts->attr.exclude_hv = 1;
688 bts->attr.exclude_idle = 1;
690 pid = ptid_get_lwp (ptid);
691 if (pid == 0)
692 pid = ptid_get_pid (ptid);
694 errno = 0;
695 bts->file = syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0);
696 if (bts->file < 0)
697 goto err_out;
699 /* Convert the requested size in bytes to pages (rounding up). */
700 pages = ((size_t) conf->size / PAGE_SIZE
701 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
702 /* We need at least one page. */
703 if (pages == 0)
704 pages = 1;
706 /* The buffer size can be requested in powers of two pages. Adjust PAGES
707 to the next power of two. */
708 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
709 if ((pages & ((size_t) 1 << pg)) != 0)
710 pages += ((size_t) 1 << pg);
712 /* We try to allocate the requested size.
713 If that fails, try to get as much as we can. */
714 for (; pages > 0; pages >>= 1)
716 size_t length;
717 __u64 data_size;
719 data_size = (__u64) pages * PAGE_SIZE;
721 /* Don't ask for more than we can represent in the configuration. */
722 if ((__u64) UINT_MAX < data_size)
723 continue;
725 size = (size_t) data_size;
726 length = size + PAGE_SIZE;
728 /* Check for overflows. */
729 if ((__u64) length != data_size + PAGE_SIZE)
730 continue;
732 /* The number of pages we request needs to be a power of two. */
733 header = ((struct perf_event_mmap_page *)
734 mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0));
735 if (header != MAP_FAILED)
736 break;
739 if (pages == 0)
740 goto err_file;
742 data_offset = PAGE_SIZE;
744 #if defined (PERF_ATTR_SIZE_VER5)
745 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
747 __u64 data_size;
749 data_offset = header->data_offset;
750 data_size = header->data_size;
752 size = (unsigned int) data_size;
754 /* Check for overflows. */
755 if ((__u64) size != data_size)
757 munmap ((void *) header, size + PAGE_SIZE);
758 goto err_file;
761 #endif /* defined (PERF_ATTR_SIZE_VER5) */
763 bts->header = header;
764 bts->bts.mem = ((const uint8_t *) header) + data_offset;
765 bts->bts.size = size;
766 bts->bts.data_head = &header->data_head;
767 bts->bts.last_head = 0ull;
769 tinfo->conf.bts.size = (unsigned int) size;
770 return tinfo;
772 err_file:
773 /* We were not able to allocate any buffer. */
774 close (bts->file);
776 err_out:
777 xfree (tinfo);
778 return NULL;
781 #if defined (PERF_ATTR_SIZE_VER5)
783 /* Enable branch tracing in Intel Processor Trace format. */
785 static struct btrace_target_info *
786 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
788 struct perf_event_mmap_page *header;
789 struct btrace_target_info *tinfo;
790 struct btrace_tinfo_pt *pt;
791 size_t pages, size;
792 int pid, pg, errcode, type;
794 if (conf->size == 0)
795 return NULL;
797 errcode = perf_event_pt_event_type (&type);
798 if (errcode != 0)
799 return NULL;
801 pid = ptid_get_lwp (ptid);
802 if (pid == 0)
803 pid = ptid_get_pid (ptid);
805 tinfo = XCNEW (struct btrace_target_info);
806 tinfo->ptid = ptid;
808 tinfo->conf.format = BTRACE_FORMAT_PT;
809 pt = &tinfo->variant.pt;
811 pt->attr.size = sizeof (pt->attr);
812 pt->attr.type = type;
814 pt->attr.exclude_kernel = 1;
815 pt->attr.exclude_hv = 1;
816 pt->attr.exclude_idle = 1;
818 errno = 0;
819 pt->file = syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0);
820 if (pt->file < 0)
821 goto err;
823 /* Allocate the configuration page. */
824 header = ((struct perf_event_mmap_page *)
825 mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
826 pt->file, 0));
827 if (header == MAP_FAILED)
828 goto err_file;
830 header->aux_offset = header->data_offset + header->data_size;
832 /* Convert the requested size in bytes to pages (rounding up). */
833 pages = ((size_t) conf->size / PAGE_SIZE
834 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
835 /* We need at least one page. */
836 if (pages == 0)
837 pages = 1;
839 /* The buffer size can be requested in powers of two pages. Adjust PAGES
840 to the next power of two. */
841 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
842 if ((pages & ((size_t) 1 << pg)) != 0)
843 pages += ((size_t) 1 << pg);
845 /* We try to allocate the requested size.
846 If that fails, try to get as much as we can. */
847 for (; pages > 0; pages >>= 1)
849 size_t length;
850 __u64 data_size;
852 data_size = (__u64) pages * PAGE_SIZE;
854 /* Don't ask for more than we can represent in the configuration. */
855 if ((__u64) UINT_MAX < data_size)
856 continue;
858 size = (size_t) data_size;
860 /* Check for overflows. */
861 if ((__u64) size != data_size)
862 continue;
864 header->aux_size = data_size;
865 length = size;
867 pt->pt.mem = ((const uint8_t *)
868 mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
869 header->aux_offset));
870 if (pt->pt.mem != MAP_FAILED)
871 break;
874 if (pages == 0)
875 goto err_conf;
877 pt->header = header;
878 pt->pt.size = size;
879 pt->pt.data_head = &header->aux_head;
881 tinfo->conf.pt.size = (unsigned int) size;
882 return tinfo;
884 err_conf:
885 munmap((void *) header, PAGE_SIZE);
887 err_file:
888 close (pt->file);
890 err:
891 xfree (tinfo);
892 return NULL;
895 #else /* !defined (PERF_ATTR_SIZE_VER5) */
897 static struct btrace_target_info *
898 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
900 errno = EOPNOTSUPP;
901 return NULL;
904 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
906 /* See linux-btrace.h. */
908 struct btrace_target_info *
909 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
911 struct btrace_target_info *tinfo;
913 tinfo = NULL;
914 switch (conf->format)
916 case BTRACE_FORMAT_NONE:
917 break;
919 case BTRACE_FORMAT_BTS:
920 tinfo = linux_enable_bts (ptid, &conf->bts);
921 break;
923 case BTRACE_FORMAT_PT:
924 tinfo = linux_enable_pt (ptid, &conf->pt);
925 break;
928 return tinfo;
931 /* Disable BTS tracing. */
933 static enum btrace_error
934 linux_disable_bts (struct btrace_tinfo_bts *tinfo)
936 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
937 close (tinfo->file);
939 return BTRACE_ERR_NONE;
942 /* Disable Intel Processor Trace tracing. */
944 static enum btrace_error
945 linux_disable_pt (struct btrace_tinfo_pt *tinfo)
947 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
948 munmap((void *) tinfo->header, PAGE_SIZE);
949 close (tinfo->file);
951 return BTRACE_ERR_NONE;
954 /* See linux-btrace.h. */
956 enum btrace_error
957 linux_disable_btrace (struct btrace_target_info *tinfo)
959 enum btrace_error errcode;
961 errcode = BTRACE_ERR_NOT_SUPPORTED;
962 switch (tinfo->conf.format)
964 case BTRACE_FORMAT_NONE:
965 break;
967 case BTRACE_FORMAT_BTS:
968 errcode = linux_disable_bts (&tinfo->variant.bts);
969 break;
971 case BTRACE_FORMAT_PT:
972 errcode = linux_disable_pt (&tinfo->variant.pt);
973 break;
976 if (errcode == BTRACE_ERR_NONE)
977 xfree (tinfo);
979 return errcode;
982 /* Read branch trace data in BTS format for the thread given by TINFO into
983 BTRACE using the TYPE reading method. */
985 static enum btrace_error
986 linux_read_bts (struct btrace_data_bts *btrace,
987 struct btrace_target_info *tinfo,
988 enum btrace_read_type type)
990 struct perf_event_buffer *pevent;
991 const uint8_t *begin, *end, *start;
992 size_t buffer_size, size;
993 __u64 data_head, data_tail;
994 unsigned int retries = 5;
996 pevent = &tinfo->variant.bts.bts;
998 /* For delta reads, we return at least the partial last block containing
999 the current PC. */
1000 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
1001 return BTRACE_ERR_NONE;
1003 buffer_size = pevent->size;
1004 data_tail = pevent->last_head;
1006 /* We may need to retry reading the trace. See below. */
1007 while (retries--)
1009 data_head = *pevent->data_head;
1011 /* Delete any leftover trace from the previous iteration. */
1012 VEC_free (btrace_block_s, btrace->blocks);
1014 if (type == BTRACE_READ_DELTA)
1016 __u64 data_size;
1018 /* Determine the number of bytes to read and check for buffer
1019 overflows. */
1021 /* Check for data head overflows. We might be able to recover from
1022 those but they are very unlikely and it's not really worth the
1023 effort, I think. */
1024 if (data_head < data_tail)
1025 return BTRACE_ERR_OVERFLOW;
1027 /* If the buffer is smaller than the trace delta, we overflowed. */
1028 data_size = data_head - data_tail;
1029 if (buffer_size < data_size)
1030 return BTRACE_ERR_OVERFLOW;
1032 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1033 size = (size_t) data_size;
1035 else
1037 /* Read the entire buffer. */
1038 size = buffer_size;
1040 /* Adjust the size if the buffer has not overflowed, yet. */
1041 if (data_head < size)
1042 size = (size_t) data_head;
1045 /* Data_head keeps growing; the buffer itself is circular. */
1046 begin = pevent->mem;
1047 start = begin + data_head % buffer_size;
1049 if (data_head <= buffer_size)
1050 end = start;
1051 else
1052 end = begin + pevent->size;
1054 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
1056 /* The stopping thread notifies its ptracer before it is scheduled out.
1057 On multi-core systems, the debugger might therefore run while the
1058 kernel might be writing the last branch trace records.
1060 Let's check whether the data head moved while we read the trace. */
1061 if (data_head == *pevent->data_head)
1062 break;
1065 pevent->last_head = data_head;
1067 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1068 if we're not doing a delta read. There is no way of filling in its zeroed
1069 BEGIN element. */
1070 if (!VEC_empty (btrace_block_s, btrace->blocks)
1071 && type != BTRACE_READ_DELTA)
1072 VEC_pop (btrace_block_s, btrace->blocks);
1074 return BTRACE_ERR_NONE;
1077 /* Fill in the Intel Processor Trace configuration information. */
1079 static void
1080 linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
1082 conf->cpu = btrace_this_cpu ();
1085 /* Read branch trace data in Intel Processor Trace format for the thread
1086 given by TINFO into BTRACE using the TYPE reading method. */
1088 static enum btrace_error
1089 linux_read_pt (struct btrace_data_pt *btrace,
1090 struct btrace_target_info *tinfo,
1091 enum btrace_read_type type)
1093 struct perf_event_buffer *pt;
1095 pt = &tinfo->variant.pt.pt;
1097 linux_fill_btrace_pt_config (&btrace->config);
1099 switch (type)
1101 case BTRACE_READ_DELTA:
1102 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1103 around to stay inside the aux buffer. */
1104 return BTRACE_ERR_NOT_SUPPORTED;
1106 case BTRACE_READ_NEW:
1107 if (!perf_event_new_data (pt))
1108 return BTRACE_ERR_NONE;
1110 /* Fall through. */
1111 case BTRACE_READ_ALL:
1112 perf_event_read_all (pt, &btrace->data, &btrace->size);
1113 return BTRACE_ERR_NONE;
1116 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
1119 /* See linux-btrace.h. */
1121 enum btrace_error
1122 linux_read_btrace (struct btrace_data *btrace,
1123 struct btrace_target_info *tinfo,
1124 enum btrace_read_type type)
1126 switch (tinfo->conf.format)
1128 case BTRACE_FORMAT_NONE:
1129 return BTRACE_ERR_NOT_SUPPORTED;
1131 case BTRACE_FORMAT_BTS:
1132 /* We read btrace in BTS format. */
1133 btrace->format = BTRACE_FORMAT_BTS;
1134 btrace->variant.bts.blocks = NULL;
1136 return linux_read_bts (&btrace->variant.bts, tinfo, type);
1138 case BTRACE_FORMAT_PT:
1139 /* We read btrace in Intel Processor Trace format. */
1140 btrace->format = BTRACE_FORMAT_PT;
1141 btrace->variant.pt.data = NULL;
1142 btrace->variant.pt.size = 0;
1144 return linux_read_pt (&btrace->variant.pt, tinfo, type);
1147 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1150 /* See linux-btrace.h. */
1152 const struct btrace_config *
1153 linux_btrace_conf (const struct btrace_target_info *tinfo)
1155 return &tinfo->conf;
1158 #else /* !HAVE_LINUX_PERF_EVENT_H */
1160 /* See linux-btrace.h. */
1163 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
1165 return 0;
1168 /* See linux-btrace.h. */
1170 struct btrace_target_info *
1171 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
1173 return NULL;
1176 /* See linux-btrace.h. */
1178 enum btrace_error
1179 linux_disable_btrace (struct btrace_target_info *tinfo)
1181 return BTRACE_ERR_NOT_SUPPORTED;
1184 /* See linux-btrace.h. */
1186 enum btrace_error
1187 linux_read_btrace (struct btrace_data *btrace,
1188 struct btrace_target_info *tinfo,
1189 enum btrace_read_type type)
1191 return BTRACE_ERR_NOT_SUPPORTED;
1194 /* See linux-btrace.h. */
1196 const struct btrace_config *
1197 linux_btrace_conf (const struct btrace_target_info *tinfo)
1199 return NULL;
1202 #endif /* !HAVE_LINUX_PERF_EVENT_H */