2 * intel-bts.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/bitops.h>
23 #include <linux/log2.h>
33 #include "thread-stack.h"
37 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
38 #include "intel-bts.h"
40 #define MAX_TIMESTAMP (~0ULL)
42 #define INTEL_BTS_ERR_NOINSN 5
43 #define INTEL_BTS_ERR_LOST 9
45 #if __BYTE_ORDER == __BIG_ENDIAN
46 #define le64_to_cpu bswap_64
52 struct auxtrace auxtrace
;
53 struct auxtrace_queues queues
;
54 struct auxtrace_heap heap
;
56 struct perf_session
*session
;
57 struct machine
*machine
;
62 struct perf_tsc_conversion tc
;
63 bool cap_user_time_zero
;
64 struct itrace_synth_opts synth_opts
;
67 u64 branches_sample_type
;
69 size_t branches_event_size
;
70 unsigned long num_events
;
73 struct intel_bts_queue
{
74 struct intel_bts
*bts
;
75 unsigned int queue_nr
;
76 struct auxtrace_buffer
*buffer
;
83 struct intel_pt_insn intel_pt_insn
;
93 static void intel_bts_dump(struct intel_bts
*bts __maybe_unused
,
94 unsigned char *buf
, size_t len
)
96 struct branch
*branch
;
97 size_t i
, pos
= 0, br_sz
= sizeof(struct branch
), sz
;
98 const char *color
= PERF_COLOR_BLUE
;
100 color_fprintf(stdout
, color
,
101 ". ... Intel BTS data: size %zu bytes\n",
110 color_fprintf(stdout
, color
, " %08x: ", pos
);
111 for (i
= 0; i
< sz
; i
++)
112 color_fprintf(stdout
, color
, " %02x", buf
[i
]);
113 for (; i
< br_sz
; i
++)
114 color_fprintf(stdout
, color
, " ");
116 branch
= (struct branch
*)buf
;
117 color_fprintf(stdout
, color
, " %"PRIx64
" -> %"PRIx64
" %s\n",
118 le64_to_cpu(branch
->from
),
119 le64_to_cpu(branch
->to
),
120 le64_to_cpu(branch
->misc
) & 0x10 ?
123 color_fprintf(stdout
, color
, " Bad record!\n");
131 static void intel_bts_dump_event(struct intel_bts
*bts
, unsigned char *buf
,
135 intel_bts_dump(bts
, buf
, len
);
138 static int intel_bts_lost(struct intel_bts
*bts
, struct perf_sample
*sample
)
140 union perf_event event
;
143 auxtrace_synth_error(&event
.auxtrace_error
, PERF_AUXTRACE_ERROR_ITRACE
,
144 INTEL_BTS_ERR_LOST
, sample
->cpu
, sample
->pid
,
145 sample
->tid
, 0, "Lost trace data");
147 err
= perf_session__deliver_synth_event(bts
->session
, &event
, NULL
);
149 pr_err("Intel BTS: failed to deliver error event, error %d\n",
155 static struct intel_bts_queue
*intel_bts_alloc_queue(struct intel_bts
*bts
,
156 unsigned int queue_nr
)
158 struct intel_bts_queue
*btsq
;
160 btsq
= zalloc(sizeof(struct intel_bts_queue
));
165 btsq
->queue_nr
= queue_nr
;
173 static int intel_bts_setup_queue(struct intel_bts
*bts
,
174 struct auxtrace_queue
*queue
,
175 unsigned int queue_nr
)
177 struct intel_bts_queue
*btsq
= queue
->priv
;
179 if (list_empty(&queue
->head
))
183 btsq
= intel_bts_alloc_queue(bts
, queue_nr
);
188 if (queue
->cpu
!= -1)
189 btsq
->cpu
= queue
->cpu
;
190 btsq
->tid
= queue
->tid
;
193 if (bts
->sampling_mode
)
196 if (!btsq
->on_heap
&& !btsq
->buffer
) {
199 btsq
->buffer
= auxtrace_buffer__next(queue
, NULL
);
203 ret
= auxtrace_heap__add(&bts
->heap
, queue_nr
,
204 btsq
->buffer
->reference
);
207 btsq
->on_heap
= true;
213 static int intel_bts_setup_queues(struct intel_bts
*bts
)
218 for (i
= 0; i
< bts
->queues
.nr_queues
; i
++) {
219 ret
= intel_bts_setup_queue(bts
, &bts
->queues
.queue_array
[i
],
227 static inline int intel_bts_update_queues(struct intel_bts
*bts
)
229 if (bts
->queues
.new_data
) {
230 bts
->queues
.new_data
= false;
231 return intel_bts_setup_queues(bts
);
236 static unsigned char *intel_bts_find_overlap(unsigned char *buf_a
, size_t len_a
,
237 unsigned char *buf_b
, size_t len_b
)
242 offs
= len_a
- len_b
;
246 for (; offs
< len_a
; offs
+= sizeof(struct branch
)) {
248 if (!memcmp(buf_a
+ offs
, buf_b
, len
))
255 static int intel_bts_do_fix_overlap(struct auxtrace_queue
*queue
,
256 struct auxtrace_buffer
*b
)
258 struct auxtrace_buffer
*a
;
261 if (b
->list
.prev
== &queue
->head
)
263 a
= list_entry(b
->list
.prev
, struct auxtrace_buffer
, list
);
264 start
= intel_bts_find_overlap(a
->data
, a
->size
, b
->data
, b
->size
);
267 b
->use_size
= b
->data
+ b
->size
- start
;
272 static inline u8
intel_bts_cpumode(struct intel_bts
*bts
, uint64_t ip
)
274 return machine__kernel_ip(bts
->machine
, ip
) ?
275 PERF_RECORD_MISC_KERNEL
:
276 PERF_RECORD_MISC_USER
;
279 static int intel_bts_synth_branch_sample(struct intel_bts_queue
*btsq
,
280 struct branch
*branch
)
283 struct intel_bts
*bts
= btsq
->bts
;
284 union perf_event event
;
285 struct perf_sample sample
= { .ip
= 0, };
287 if (bts
->synth_opts
.initial_skip
&&
288 bts
->num_events
++ <= bts
->synth_opts
.initial_skip
)
291 sample
.ip
= le64_to_cpu(branch
->from
);
292 sample
.cpumode
= intel_bts_cpumode(bts
, sample
.ip
);
293 sample
.pid
= btsq
->pid
;
294 sample
.tid
= btsq
->tid
;
295 sample
.addr
= le64_to_cpu(branch
->to
);
296 sample
.id
= btsq
->bts
->branches_id
;
297 sample
.stream_id
= btsq
->bts
->branches_id
;
299 sample
.cpu
= btsq
->cpu
;
300 sample
.flags
= btsq
->sample_flags
;
301 sample
.insn_len
= btsq
->intel_pt_insn
.length
;
302 memcpy(sample
.insn
, btsq
->intel_pt_insn
.buf
, INTEL_PT_INSN_BUF_SZ
);
304 event
.sample
.header
.type
= PERF_RECORD_SAMPLE
;
305 event
.sample
.header
.misc
= sample
.cpumode
;
306 event
.sample
.header
.size
= sizeof(struct perf_event_header
);
308 if (bts
->synth_opts
.inject
) {
309 event
.sample
.header
.size
= bts
->branches_event_size
;
310 ret
= perf_event__synthesize_sample(&event
,
311 bts
->branches_sample_type
,
317 ret
= perf_session__deliver_synth_event(bts
->session
, &event
, &sample
);
319 pr_err("Intel BTS: failed to deliver branch event, error %d\n",
325 static int intel_bts_get_next_insn(struct intel_bts_queue
*btsq
, u64 ip
)
327 struct machine
*machine
= btsq
->bts
->machine
;
328 struct thread
*thread
;
329 struct addr_location al
;
330 unsigned char buf
[INTEL_PT_INSN_BUF_SZ
];
336 if (machine__kernel_ip(machine
, ip
))
337 cpumode
= PERF_RECORD_MISC_KERNEL
;
339 cpumode
= PERF_RECORD_MISC_USER
;
341 thread
= machine__find_thread(machine
, -1, btsq
->tid
);
345 if (!thread__find_map(thread
, cpumode
, ip
, &al
) || !al
.map
->dso
)
348 len
= dso__data_read_addr(al
.map
->dso
, al
.map
, machine
, ip
, buf
,
349 INTEL_PT_INSN_BUF_SZ
);
353 /* Load maps to ensure dso->is_64_bit has been updated */
356 x86_64
= al
.map
->dso
->is_64_bit
;
358 if (intel_pt_get_insn(buf
, len
, x86_64
, &btsq
->intel_pt_insn
))
367 static int intel_bts_synth_error(struct intel_bts
*bts
, int cpu
, pid_t pid
,
370 union perf_event event
;
373 auxtrace_synth_error(&event
.auxtrace_error
, PERF_AUXTRACE_ERROR_ITRACE
,
374 INTEL_BTS_ERR_NOINSN
, cpu
, pid
, tid
, ip
,
375 "Failed to get instruction");
377 err
= perf_session__deliver_synth_event(bts
->session
, &event
, NULL
);
379 pr_err("Intel BTS: failed to deliver error event, error %d\n",
385 static int intel_bts_get_branch_type(struct intel_bts_queue
*btsq
,
386 struct branch
*branch
)
392 btsq
->sample_flags
= PERF_IP_FLAG_BRANCH
|
393 PERF_IP_FLAG_TRACE_BEGIN
;
395 btsq
->sample_flags
= 0;
396 btsq
->intel_pt_insn
.length
= 0;
397 } else if (!branch
->to
) {
398 btsq
->sample_flags
= PERF_IP_FLAG_BRANCH
|
399 PERF_IP_FLAG_TRACE_END
;
400 btsq
->intel_pt_insn
.length
= 0;
402 err
= intel_bts_get_next_insn(btsq
, branch
->from
);
404 btsq
->sample_flags
= 0;
405 btsq
->intel_pt_insn
.length
= 0;
406 if (!btsq
->bts
->synth_opts
.errors
)
408 err
= intel_bts_synth_error(btsq
->bts
, btsq
->cpu
,
409 btsq
->pid
, btsq
->tid
,
413 btsq
->sample_flags
= intel_pt_insn_type(btsq
->intel_pt_insn
.op
);
414 /* Check for an async branch into the kernel */
415 if (!machine__kernel_ip(btsq
->bts
->machine
, branch
->from
) &&
416 machine__kernel_ip(btsq
->bts
->machine
, branch
->to
) &&
417 btsq
->sample_flags
!= (PERF_IP_FLAG_BRANCH
|
419 PERF_IP_FLAG_SYSCALLRET
))
420 btsq
->sample_flags
= PERF_IP_FLAG_BRANCH
|
423 PERF_IP_FLAG_INTERRUPT
;
429 static int intel_bts_process_buffer(struct intel_bts_queue
*btsq
,
430 struct auxtrace_buffer
*buffer
,
431 struct thread
*thread
)
433 struct branch
*branch
;
434 size_t sz
, bsz
= sizeof(struct branch
);
435 u32 filter
= btsq
->bts
->branches_filter
;
438 if (buffer
->use_data
) {
439 sz
= buffer
->use_size
;
440 branch
= buffer
->use_data
;
443 branch
= buffer
->data
;
446 if (!btsq
->bts
->sample_branches
)
449 for (; sz
> bsz
; branch
+= 1, sz
-= bsz
) {
450 if (!branch
->from
&& !branch
->to
)
452 intel_bts_get_branch_type(btsq
, branch
);
453 if (btsq
->bts
->synth_opts
.thread_stack
)
454 thread_stack__event(thread
, btsq
->sample_flags
,
455 le64_to_cpu(branch
->from
),
456 le64_to_cpu(branch
->to
),
457 btsq
->intel_pt_insn
.length
,
458 buffer
->buffer_nr
+ 1);
459 if (filter
&& !(filter
& btsq
->sample_flags
))
461 err
= intel_bts_synth_branch_sample(btsq
, branch
);
468 static int intel_bts_process_queue(struct intel_bts_queue
*btsq
, u64
*timestamp
)
470 struct auxtrace_buffer
*buffer
= btsq
->buffer
, *old_buffer
= buffer
;
471 struct auxtrace_queue
*queue
;
472 struct thread
*thread
;
478 if (btsq
->pid
== -1) {
479 thread
= machine__find_thread(btsq
->bts
->machine
, -1,
482 btsq
->pid
= thread
->pid_
;
484 thread
= machine__findnew_thread(btsq
->bts
->machine
, btsq
->pid
,
488 queue
= &btsq
->bts
->queues
.queue_array
[btsq
->queue_nr
];
491 buffer
= auxtrace_buffer__next(queue
, NULL
);
494 if (!btsq
->bts
->sampling_mode
)
500 /* Currently there is no support for split buffers */
501 if (buffer
->consecutive
) {
507 int fd
= perf_data__fd(btsq
->bts
->session
->data
);
509 buffer
->data
= auxtrace_buffer__get_data(buffer
, fd
);
516 if (btsq
->bts
->snapshot_mode
&& !buffer
->consecutive
&&
517 intel_bts_do_fix_overlap(queue
, buffer
)) {
522 if (!btsq
->bts
->synth_opts
.callchain
&&
523 !btsq
->bts
->synth_opts
.thread_stack
&& thread
&&
524 (!old_buffer
|| btsq
->bts
->sampling_mode
||
525 (btsq
->bts
->snapshot_mode
&& !buffer
->consecutive
)))
526 thread_stack__set_trace_nr(thread
, buffer
->buffer_nr
+ 1);
528 err
= intel_bts_process_buffer(btsq
, buffer
, thread
);
530 auxtrace_buffer__drop_data(buffer
);
532 btsq
->buffer
= auxtrace_buffer__next(queue
, buffer
);
535 *timestamp
= btsq
->buffer
->reference
;
537 if (!btsq
->bts
->sampling_mode
)
545 static int intel_bts_flush_queue(struct intel_bts_queue
*btsq
)
551 ret
= intel_bts_process_queue(btsq
, &ts
);
560 static int intel_bts_process_tid_exit(struct intel_bts
*bts
, pid_t tid
)
562 struct auxtrace_queues
*queues
= &bts
->queues
;
565 for (i
= 0; i
< queues
->nr_queues
; i
++) {
566 struct auxtrace_queue
*queue
= &bts
->queues
.queue_array
[i
];
567 struct intel_bts_queue
*btsq
= queue
->priv
;
569 if (btsq
&& btsq
->tid
== tid
)
570 return intel_bts_flush_queue(btsq
);
575 static int intel_bts_process_queues(struct intel_bts
*bts
, u64 timestamp
)
578 unsigned int queue_nr
;
579 struct auxtrace_queue
*queue
;
580 struct intel_bts_queue
*btsq
;
584 if (!bts
->heap
.heap_cnt
)
587 if (bts
->heap
.heap_array
[0].ordinal
> timestamp
)
590 queue_nr
= bts
->heap
.heap_array
[0].queue_nr
;
591 queue
= &bts
->queues
.queue_array
[queue_nr
];
594 auxtrace_heap__pop(&bts
->heap
);
596 ret
= intel_bts_process_queue(btsq
, &ts
);
598 auxtrace_heap__add(&bts
->heap
, queue_nr
, ts
);
603 ret
= auxtrace_heap__add(&bts
->heap
, queue_nr
, ts
);
607 btsq
->on_heap
= false;
614 static int intel_bts_process_event(struct perf_session
*session
,
615 union perf_event
*event
,
616 struct perf_sample
*sample
,
617 struct perf_tool
*tool
)
619 struct intel_bts
*bts
= container_of(session
->auxtrace
, struct intel_bts
,
627 if (!tool
->ordered_events
) {
628 pr_err("Intel BTS requires ordered events\n");
632 if (sample
->time
&& sample
->time
!= (u64
)-1)
633 timestamp
= perf_time_to_tsc(sample
->time
, &bts
->tc
);
637 err
= intel_bts_update_queues(bts
);
641 err
= intel_bts_process_queues(bts
, timestamp
);
644 if (event
->header
.type
== PERF_RECORD_EXIT
) {
645 err
= intel_bts_process_tid_exit(bts
, event
->fork
.tid
);
650 if (event
->header
.type
== PERF_RECORD_AUX
&&
651 (event
->aux
.flags
& PERF_AUX_FLAG_TRUNCATED
) &&
652 bts
->synth_opts
.errors
)
653 err
= intel_bts_lost(bts
, sample
);
658 static int intel_bts_process_auxtrace_event(struct perf_session
*session
,
659 union perf_event
*event
,
660 struct perf_tool
*tool __maybe_unused
)
662 struct intel_bts
*bts
= container_of(session
->auxtrace
, struct intel_bts
,
665 if (bts
->sampling_mode
)
668 if (!bts
->data_queued
) {
669 struct auxtrace_buffer
*buffer
;
671 int fd
= perf_data__fd(session
->data
);
674 if (perf_data__is_pipe(session
->data
)) {
677 data_offset
= lseek(fd
, 0, SEEK_CUR
);
678 if (data_offset
== -1)
682 err
= auxtrace_queues__add_event(&bts
->queues
, session
, event
,
683 data_offset
, &buffer
);
687 /* Dump here now we have copied a piped trace out of the pipe */
689 if (auxtrace_buffer__get_data(buffer
, fd
)) {
690 intel_bts_dump_event(bts
, buffer
->data
,
692 auxtrace_buffer__put_data(buffer
);
700 static int intel_bts_flush(struct perf_session
*session
,
701 struct perf_tool
*tool __maybe_unused
)
703 struct intel_bts
*bts
= container_of(session
->auxtrace
, struct intel_bts
,
707 if (dump_trace
|| bts
->sampling_mode
)
710 if (!tool
->ordered_events
)
713 ret
= intel_bts_update_queues(bts
);
717 return intel_bts_process_queues(bts
, MAX_TIMESTAMP
);
720 static void intel_bts_free_queue(void *priv
)
722 struct intel_bts_queue
*btsq
= priv
;
729 static void intel_bts_free_events(struct perf_session
*session
)
731 struct intel_bts
*bts
= container_of(session
->auxtrace
, struct intel_bts
,
733 struct auxtrace_queues
*queues
= &bts
->queues
;
736 for (i
= 0; i
< queues
->nr_queues
; i
++) {
737 intel_bts_free_queue(queues
->queue_array
[i
].priv
);
738 queues
->queue_array
[i
].priv
= NULL
;
740 auxtrace_queues__free(queues
);
743 static void intel_bts_free(struct perf_session
*session
)
745 struct intel_bts
*bts
= container_of(session
->auxtrace
, struct intel_bts
,
748 auxtrace_heap__free(&bts
->heap
);
749 intel_bts_free_events(session
);
750 session
->auxtrace
= NULL
;
754 struct intel_bts_synth
{
755 struct perf_tool dummy_tool
;
756 struct perf_session
*session
;
759 static int intel_bts_event_synth(struct perf_tool
*tool
,
760 union perf_event
*event
,
761 struct perf_sample
*sample __maybe_unused
,
762 struct machine
*machine __maybe_unused
)
764 struct intel_bts_synth
*intel_bts_synth
=
765 container_of(tool
, struct intel_bts_synth
, dummy_tool
);
767 return perf_session__deliver_synth_event(intel_bts_synth
->session
,
771 static int intel_bts_synth_event(struct perf_session
*session
,
772 struct perf_event_attr
*attr
, u64 id
)
774 struct intel_bts_synth intel_bts_synth
;
776 memset(&intel_bts_synth
, 0, sizeof(struct intel_bts_synth
));
777 intel_bts_synth
.session
= session
;
779 return perf_event__synthesize_attr(&intel_bts_synth
.dummy_tool
, attr
, 1,
780 &id
, intel_bts_event_synth
);
783 static int intel_bts_synth_events(struct intel_bts
*bts
,
784 struct perf_session
*session
)
786 struct perf_evlist
*evlist
= session
->evlist
;
787 struct perf_evsel
*evsel
;
788 struct perf_event_attr attr
;
793 evlist__for_each_entry(evlist
, evsel
) {
794 if (evsel
->attr
.type
== bts
->pmu_type
&& evsel
->ids
) {
801 pr_debug("There are no selected events with Intel BTS data\n");
805 memset(&attr
, 0, sizeof(struct perf_event_attr
));
806 attr
.size
= sizeof(struct perf_event_attr
);
807 attr
.type
= PERF_TYPE_HARDWARE
;
808 attr
.sample_type
= evsel
->attr
.sample_type
& PERF_SAMPLE_MASK
;
809 attr
.sample_type
|= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
|
811 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_TIME
;
812 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_CPU
;
813 attr
.exclude_user
= evsel
->attr
.exclude_user
;
814 attr
.exclude_kernel
= evsel
->attr
.exclude_kernel
;
815 attr
.exclude_hv
= evsel
->attr
.exclude_hv
;
816 attr
.exclude_host
= evsel
->attr
.exclude_host
;
817 attr
.exclude_guest
= evsel
->attr
.exclude_guest
;
818 attr
.sample_id_all
= evsel
->attr
.sample_id_all
;
819 attr
.read_format
= evsel
->attr
.read_format
;
821 id
= evsel
->id
[0] + 1000000000;
825 if (bts
->synth_opts
.branches
) {
826 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
827 attr
.sample_period
= 1;
828 attr
.sample_type
|= PERF_SAMPLE_ADDR
;
829 pr_debug("Synthesizing 'branches' event with id %" PRIu64
" sample type %#" PRIx64
"\n",
830 id
, (u64
)attr
.sample_type
);
831 err
= intel_bts_synth_event(session
, &attr
, id
);
833 pr_err("%s: failed to synthesize 'branches' event type\n",
837 bts
->sample_branches
= true;
838 bts
->branches_sample_type
= attr
.sample_type
;
839 bts
->branches_id
= id
;
841 * We only use sample types from PERF_SAMPLE_MASK so we can use
842 * __perf_evsel__sample_size() here.
844 bts
->branches_event_size
= sizeof(struct sample_event
) +
845 __perf_evsel__sample_size(attr
.sample_type
);
851 static const char * const intel_bts_info_fmts
[] = {
852 [INTEL_BTS_PMU_TYPE
] = " PMU Type %"PRId64
"\n",
853 [INTEL_BTS_TIME_SHIFT
] = " Time Shift %"PRIu64
"\n",
854 [INTEL_BTS_TIME_MULT
] = " Time Muliplier %"PRIu64
"\n",
855 [INTEL_BTS_TIME_ZERO
] = " Time Zero %"PRIu64
"\n",
856 [INTEL_BTS_CAP_USER_TIME_ZERO
] = " Cap Time Zero %"PRId64
"\n",
857 [INTEL_BTS_SNAPSHOT_MODE
] = " Snapshot mode %"PRId64
"\n",
860 static void intel_bts_print_info(u64
*arr
, int start
, int finish
)
867 for (i
= start
; i
<= finish
; i
++)
868 fprintf(stdout
, intel_bts_info_fmts
[i
], arr
[i
]);
871 int intel_bts_process_auxtrace_info(union perf_event
*event
,
872 struct perf_session
*session
)
874 struct auxtrace_info_event
*auxtrace_info
= &event
->auxtrace_info
;
875 size_t min_sz
= sizeof(u64
) * INTEL_BTS_SNAPSHOT_MODE
;
876 struct intel_bts
*bts
;
879 if (auxtrace_info
->header
.size
< sizeof(struct auxtrace_info_event
) +
883 bts
= zalloc(sizeof(struct intel_bts
));
887 err
= auxtrace_queues__init(&bts
->queues
);
891 bts
->session
= session
;
892 bts
->machine
= &session
->machines
.host
; /* No kvm support */
893 bts
->auxtrace_type
= auxtrace_info
->type
;
894 bts
->pmu_type
= auxtrace_info
->priv
[INTEL_BTS_PMU_TYPE
];
895 bts
->tc
.time_shift
= auxtrace_info
->priv
[INTEL_BTS_TIME_SHIFT
];
896 bts
->tc
.time_mult
= auxtrace_info
->priv
[INTEL_BTS_TIME_MULT
];
897 bts
->tc
.time_zero
= auxtrace_info
->priv
[INTEL_BTS_TIME_ZERO
];
898 bts
->cap_user_time_zero
=
899 auxtrace_info
->priv
[INTEL_BTS_CAP_USER_TIME_ZERO
];
900 bts
->snapshot_mode
= auxtrace_info
->priv
[INTEL_BTS_SNAPSHOT_MODE
];
902 bts
->sampling_mode
= false;
904 bts
->auxtrace
.process_event
= intel_bts_process_event
;
905 bts
->auxtrace
.process_auxtrace_event
= intel_bts_process_auxtrace_event
;
906 bts
->auxtrace
.flush_events
= intel_bts_flush
;
907 bts
->auxtrace
.free_events
= intel_bts_free_events
;
908 bts
->auxtrace
.free
= intel_bts_free
;
909 session
->auxtrace
= &bts
->auxtrace
;
911 intel_bts_print_info(&auxtrace_info
->priv
[0], INTEL_BTS_PMU_TYPE
,
912 INTEL_BTS_SNAPSHOT_MODE
);
917 if (session
->itrace_synth_opts
&& session
->itrace_synth_opts
->set
) {
918 bts
->synth_opts
= *session
->itrace_synth_opts
;
920 itrace_synth_opts__set_default(&bts
->synth_opts
);
921 if (session
->itrace_synth_opts
)
922 bts
->synth_opts
.thread_stack
=
923 session
->itrace_synth_opts
->thread_stack
;
926 if (bts
->synth_opts
.calls
)
927 bts
->branches_filter
|= PERF_IP_FLAG_CALL
| PERF_IP_FLAG_ASYNC
|
928 PERF_IP_FLAG_TRACE_END
;
929 if (bts
->synth_opts
.returns
)
930 bts
->branches_filter
|= PERF_IP_FLAG_RETURN
|
931 PERF_IP_FLAG_TRACE_BEGIN
;
933 err
= intel_bts_synth_events(bts
, session
);
935 goto err_free_queues
;
937 err
= auxtrace_queues__process_index(&bts
->queues
, session
);
939 goto err_free_queues
;
941 if (bts
->queues
.populated
)
942 bts
->data_queued
= true;
947 auxtrace_queues__free(&bts
->queues
);
948 session
->auxtrace
= NULL
;