2 * intel-bts.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/bitops.h>
21 #include <linux/log2.h>
31 #include "thread-stack.h"
35 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
36 #include "intel-bts.h"
38 #define MAX_TIMESTAMP (~0ULL)
40 #define INTEL_BTS_ERR_NOINSN 5
41 #define INTEL_BTS_ERR_LOST 9
43 #if __BYTE_ORDER == __BIG_ENDIAN
44 #define le64_to_cpu bswap_64
50 struct auxtrace auxtrace
;
51 struct auxtrace_queues queues
;
52 struct auxtrace_heap heap
;
54 struct perf_session
*session
;
55 struct machine
*machine
;
60 struct perf_tsc_conversion tc
;
61 bool cap_user_time_zero
;
62 struct itrace_synth_opts synth_opts
;
65 u64 branches_sample_type
;
67 size_t branches_event_size
;
68 bool synth_needs_swap
;
69 unsigned long num_events
;
72 struct intel_bts_queue
{
73 struct intel_bts
*bts
;
74 unsigned int queue_nr
;
75 struct auxtrace_buffer
*buffer
;
82 struct intel_pt_insn intel_pt_insn
;
92 static void intel_bts_dump(struct intel_bts
*bts __maybe_unused
,
93 unsigned char *buf
, size_t len
)
95 struct branch
*branch
;
96 size_t i
, pos
= 0, br_sz
= sizeof(struct branch
), sz
;
97 const char *color
= PERF_COLOR_BLUE
;
99 color_fprintf(stdout
, color
,
100 ". ... Intel BTS data: size %zu bytes\n",
109 color_fprintf(stdout
, color
, " %08x: ", pos
);
110 for (i
= 0; i
< sz
; i
++)
111 color_fprintf(stdout
, color
, " %02x", buf
[i
]);
112 for (; i
< br_sz
; i
++)
113 color_fprintf(stdout
, color
, " ");
115 branch
= (struct branch
*)buf
;
116 color_fprintf(stdout
, color
, " %"PRIx64
" -> %"PRIx64
" %s\n",
117 le64_to_cpu(branch
->from
),
118 le64_to_cpu(branch
->to
),
119 le64_to_cpu(branch
->misc
) & 0x10 ?
122 color_fprintf(stdout
, color
, " Bad record!\n");
130 static void intel_bts_dump_event(struct intel_bts
*bts
, unsigned char *buf
,
134 intel_bts_dump(bts
, buf
, len
);
137 static int intel_bts_lost(struct intel_bts
*bts
, struct perf_sample
*sample
)
139 union perf_event event
;
142 auxtrace_synth_error(&event
.auxtrace_error
, PERF_AUXTRACE_ERROR_ITRACE
,
143 INTEL_BTS_ERR_LOST
, sample
->cpu
, sample
->pid
,
144 sample
->tid
, 0, "Lost trace data");
146 err
= perf_session__deliver_synth_event(bts
->session
, &event
, NULL
);
148 pr_err("Intel BTS: failed to deliver error event, error %d\n",
154 static struct intel_bts_queue
*intel_bts_alloc_queue(struct intel_bts
*bts
,
155 unsigned int queue_nr
)
157 struct intel_bts_queue
*btsq
;
159 btsq
= zalloc(sizeof(struct intel_bts_queue
));
164 btsq
->queue_nr
= queue_nr
;
172 static int intel_bts_setup_queue(struct intel_bts
*bts
,
173 struct auxtrace_queue
*queue
,
174 unsigned int queue_nr
)
176 struct intel_bts_queue
*btsq
= queue
->priv
;
178 if (list_empty(&queue
->head
))
182 btsq
= intel_bts_alloc_queue(bts
, queue_nr
);
187 if (queue
->cpu
!= -1)
188 btsq
->cpu
= queue
->cpu
;
189 btsq
->tid
= queue
->tid
;
192 if (bts
->sampling_mode
)
195 if (!btsq
->on_heap
&& !btsq
->buffer
) {
198 btsq
->buffer
= auxtrace_buffer__next(queue
, NULL
);
202 ret
= auxtrace_heap__add(&bts
->heap
, queue_nr
,
203 btsq
->buffer
->reference
);
206 btsq
->on_heap
= true;
212 static int intel_bts_setup_queues(struct intel_bts
*bts
)
217 for (i
= 0; i
< bts
->queues
.nr_queues
; i
++) {
218 ret
= intel_bts_setup_queue(bts
, &bts
->queues
.queue_array
[i
],
226 static inline int intel_bts_update_queues(struct intel_bts
*bts
)
228 if (bts
->queues
.new_data
) {
229 bts
->queues
.new_data
= false;
230 return intel_bts_setup_queues(bts
);
235 static unsigned char *intel_bts_find_overlap(unsigned char *buf_a
, size_t len_a
,
236 unsigned char *buf_b
, size_t len_b
)
241 offs
= len_a
- len_b
;
245 for (; offs
< len_a
; offs
+= sizeof(struct branch
)) {
247 if (!memcmp(buf_a
+ offs
, buf_b
, len
))
254 static int intel_bts_do_fix_overlap(struct auxtrace_queue
*queue
,
255 struct auxtrace_buffer
*b
)
257 struct auxtrace_buffer
*a
;
260 if (b
->list
.prev
== &queue
->head
)
262 a
= list_entry(b
->list
.prev
, struct auxtrace_buffer
, list
);
263 start
= intel_bts_find_overlap(a
->data
, a
->size
, b
->data
, b
->size
);
266 b
->use_size
= b
->data
+ b
->size
- start
;
271 static int intel_bts_synth_branch_sample(struct intel_bts_queue
*btsq
,
272 struct branch
*branch
)
275 struct intel_bts
*bts
= btsq
->bts
;
276 union perf_event event
;
277 struct perf_sample sample
= { .ip
= 0, };
279 if (bts
->synth_opts
.initial_skip
&&
280 bts
->num_events
++ <= bts
->synth_opts
.initial_skip
)
283 event
.sample
.header
.type
= PERF_RECORD_SAMPLE
;
284 event
.sample
.header
.misc
= PERF_RECORD_MISC_USER
;
285 event
.sample
.header
.size
= sizeof(struct perf_event_header
);
287 sample
.cpumode
= PERF_RECORD_MISC_USER
;
288 sample
.ip
= le64_to_cpu(branch
->from
);
289 sample
.pid
= btsq
->pid
;
290 sample
.tid
= btsq
->tid
;
291 sample
.addr
= le64_to_cpu(branch
->to
);
292 sample
.id
= btsq
->bts
->branches_id
;
293 sample
.stream_id
= btsq
->bts
->branches_id
;
295 sample
.cpu
= btsq
->cpu
;
296 sample
.flags
= btsq
->sample_flags
;
297 sample
.insn_len
= btsq
->intel_pt_insn
.length
;
298 memcpy(sample
.insn
, btsq
->intel_pt_insn
.buf
, INTEL_PT_INSN_BUF_SZ
);
300 if (bts
->synth_opts
.inject
) {
301 event
.sample
.header
.size
= bts
->branches_event_size
;
302 ret
= perf_event__synthesize_sample(&event
,
303 bts
->branches_sample_type
,
305 bts
->synth_needs_swap
);
310 ret
= perf_session__deliver_synth_event(bts
->session
, &event
, &sample
);
312 pr_err("Intel BTS: failed to deliver branch event, error %d\n",
318 static int intel_bts_get_next_insn(struct intel_bts_queue
*btsq
, u64 ip
)
320 struct machine
*machine
= btsq
->bts
->machine
;
321 struct thread
*thread
;
322 struct addr_location al
;
323 unsigned char buf
[INTEL_PT_INSN_BUF_SZ
];
329 if (machine__kernel_ip(machine
, ip
))
330 cpumode
= PERF_RECORD_MISC_KERNEL
;
332 cpumode
= PERF_RECORD_MISC_USER
;
334 thread
= machine__find_thread(machine
, -1, btsq
->tid
);
338 thread__find_addr_map(thread
, cpumode
, MAP__FUNCTION
, ip
, &al
);
339 if (!al
.map
|| !al
.map
->dso
)
342 len
= dso__data_read_addr(al
.map
->dso
, al
.map
, machine
, ip
, buf
,
343 INTEL_PT_INSN_BUF_SZ
);
347 /* Load maps to ensure dso->is_64_bit has been updated */
350 x86_64
= al
.map
->dso
->is_64_bit
;
352 if (intel_pt_get_insn(buf
, len
, x86_64
, &btsq
->intel_pt_insn
))
361 static int intel_bts_synth_error(struct intel_bts
*bts
, int cpu
, pid_t pid
,
364 union perf_event event
;
367 auxtrace_synth_error(&event
.auxtrace_error
, PERF_AUXTRACE_ERROR_ITRACE
,
368 INTEL_BTS_ERR_NOINSN
, cpu
, pid
, tid
, ip
,
369 "Failed to get instruction");
371 err
= perf_session__deliver_synth_event(bts
->session
, &event
, NULL
);
373 pr_err("Intel BTS: failed to deliver error event, error %d\n",
379 static int intel_bts_get_branch_type(struct intel_bts_queue
*btsq
,
380 struct branch
*branch
)
386 btsq
->sample_flags
= PERF_IP_FLAG_BRANCH
|
387 PERF_IP_FLAG_TRACE_BEGIN
;
389 btsq
->sample_flags
= 0;
390 btsq
->intel_pt_insn
.length
= 0;
391 } else if (!branch
->to
) {
392 btsq
->sample_flags
= PERF_IP_FLAG_BRANCH
|
393 PERF_IP_FLAG_TRACE_END
;
394 btsq
->intel_pt_insn
.length
= 0;
396 err
= intel_bts_get_next_insn(btsq
, branch
->from
);
398 btsq
->sample_flags
= 0;
399 btsq
->intel_pt_insn
.length
= 0;
400 if (!btsq
->bts
->synth_opts
.errors
)
402 err
= intel_bts_synth_error(btsq
->bts
, btsq
->cpu
,
403 btsq
->pid
, btsq
->tid
,
407 btsq
->sample_flags
= intel_pt_insn_type(btsq
->intel_pt_insn
.op
);
408 /* Check for an async branch into the kernel */
409 if (!machine__kernel_ip(btsq
->bts
->machine
, branch
->from
) &&
410 machine__kernel_ip(btsq
->bts
->machine
, branch
->to
) &&
411 btsq
->sample_flags
!= (PERF_IP_FLAG_BRANCH
|
413 PERF_IP_FLAG_SYSCALLRET
))
414 btsq
->sample_flags
= PERF_IP_FLAG_BRANCH
|
417 PERF_IP_FLAG_INTERRUPT
;
423 static int intel_bts_process_buffer(struct intel_bts_queue
*btsq
,
424 struct auxtrace_buffer
*buffer
,
425 struct thread
*thread
)
427 struct branch
*branch
;
428 size_t sz
, bsz
= sizeof(struct branch
);
429 u32 filter
= btsq
->bts
->branches_filter
;
432 if (buffer
->use_data
) {
433 sz
= buffer
->use_size
;
434 branch
= buffer
->use_data
;
437 branch
= buffer
->data
;
440 if (!btsq
->bts
->sample_branches
)
443 for (; sz
> bsz
; branch
+= 1, sz
-= bsz
) {
444 if (!branch
->from
&& !branch
->to
)
446 intel_bts_get_branch_type(btsq
, branch
);
447 if (btsq
->bts
->synth_opts
.thread_stack
)
448 thread_stack__event(thread
, btsq
->sample_flags
,
449 le64_to_cpu(branch
->from
),
450 le64_to_cpu(branch
->to
),
451 btsq
->intel_pt_insn
.length
,
452 buffer
->buffer_nr
+ 1);
453 if (filter
&& !(filter
& btsq
->sample_flags
))
455 err
= intel_bts_synth_branch_sample(btsq
, branch
);
462 static int intel_bts_process_queue(struct intel_bts_queue
*btsq
, u64
*timestamp
)
464 struct auxtrace_buffer
*buffer
= btsq
->buffer
, *old_buffer
= buffer
;
465 struct auxtrace_queue
*queue
;
466 struct thread
*thread
;
472 if (btsq
->pid
== -1) {
473 thread
= machine__find_thread(btsq
->bts
->machine
, -1,
476 btsq
->pid
= thread
->pid_
;
478 thread
= machine__findnew_thread(btsq
->bts
->machine
, btsq
->pid
,
482 queue
= &btsq
->bts
->queues
.queue_array
[btsq
->queue_nr
];
485 buffer
= auxtrace_buffer__next(queue
, NULL
);
488 if (!btsq
->bts
->sampling_mode
)
494 /* Currently there is no support for split buffers */
495 if (buffer
->consecutive
) {
501 int fd
= perf_data_file__fd(btsq
->bts
->session
->file
);
503 buffer
->data
= auxtrace_buffer__get_data(buffer
, fd
);
510 if (btsq
->bts
->snapshot_mode
&& !buffer
->consecutive
&&
511 intel_bts_do_fix_overlap(queue
, buffer
)) {
516 if (!btsq
->bts
->synth_opts
.callchain
&&
517 !btsq
->bts
->synth_opts
.thread_stack
&& thread
&&
518 (!old_buffer
|| btsq
->bts
->sampling_mode
||
519 (btsq
->bts
->snapshot_mode
&& !buffer
->consecutive
)))
520 thread_stack__set_trace_nr(thread
, buffer
->buffer_nr
+ 1);
522 err
= intel_bts_process_buffer(btsq
, buffer
, thread
);
524 auxtrace_buffer__drop_data(buffer
);
526 btsq
->buffer
= auxtrace_buffer__next(queue
, buffer
);
529 *timestamp
= btsq
->buffer
->reference
;
531 if (!btsq
->bts
->sampling_mode
)
539 static int intel_bts_flush_queue(struct intel_bts_queue
*btsq
)
545 ret
= intel_bts_process_queue(btsq
, &ts
);
554 static int intel_bts_process_tid_exit(struct intel_bts
*bts
, pid_t tid
)
556 struct auxtrace_queues
*queues
= &bts
->queues
;
559 for (i
= 0; i
< queues
->nr_queues
; i
++) {
560 struct auxtrace_queue
*queue
= &bts
->queues
.queue_array
[i
];
561 struct intel_bts_queue
*btsq
= queue
->priv
;
563 if (btsq
&& btsq
->tid
== tid
)
564 return intel_bts_flush_queue(btsq
);
569 static int intel_bts_process_queues(struct intel_bts
*bts
, u64 timestamp
)
572 unsigned int queue_nr
;
573 struct auxtrace_queue
*queue
;
574 struct intel_bts_queue
*btsq
;
578 if (!bts
->heap
.heap_cnt
)
581 if (bts
->heap
.heap_array
[0].ordinal
> timestamp
)
584 queue_nr
= bts
->heap
.heap_array
[0].queue_nr
;
585 queue
= &bts
->queues
.queue_array
[queue_nr
];
588 auxtrace_heap__pop(&bts
->heap
);
590 ret
= intel_bts_process_queue(btsq
, &ts
);
592 auxtrace_heap__add(&bts
->heap
, queue_nr
, ts
);
597 ret
= auxtrace_heap__add(&bts
->heap
, queue_nr
, ts
);
601 btsq
->on_heap
= false;
608 static int intel_bts_process_event(struct perf_session
*session
,
609 union perf_event
*event
,
610 struct perf_sample
*sample
,
611 struct perf_tool
*tool
)
613 struct intel_bts
*bts
= container_of(session
->auxtrace
, struct intel_bts
,
621 if (!tool
->ordered_events
) {
622 pr_err("Intel BTS requires ordered events\n");
626 if (sample
->time
&& sample
->time
!= (u64
)-1)
627 timestamp
= perf_time_to_tsc(sample
->time
, &bts
->tc
);
631 err
= intel_bts_update_queues(bts
);
635 err
= intel_bts_process_queues(bts
, timestamp
);
638 if (event
->header
.type
== PERF_RECORD_EXIT
) {
639 err
= intel_bts_process_tid_exit(bts
, event
->fork
.tid
);
644 if (event
->header
.type
== PERF_RECORD_AUX
&&
645 (event
->aux
.flags
& PERF_AUX_FLAG_TRUNCATED
) &&
646 bts
->synth_opts
.errors
)
647 err
= intel_bts_lost(bts
, sample
);
652 static int intel_bts_process_auxtrace_event(struct perf_session
*session
,
653 union perf_event
*event
,
654 struct perf_tool
*tool __maybe_unused
)
656 struct intel_bts
*bts
= container_of(session
->auxtrace
, struct intel_bts
,
659 if (bts
->sampling_mode
)
662 if (!bts
->data_queued
) {
663 struct auxtrace_buffer
*buffer
;
665 int fd
= perf_data_file__fd(session
->file
);
668 if (perf_data_file__is_pipe(session
->file
)) {
671 data_offset
= lseek(fd
, 0, SEEK_CUR
);
672 if (data_offset
== -1)
676 err
= auxtrace_queues__add_event(&bts
->queues
, session
, event
,
677 data_offset
, &buffer
);
681 /* Dump here now we have copied a piped trace out of the pipe */
683 if (auxtrace_buffer__get_data(buffer
, fd
)) {
684 intel_bts_dump_event(bts
, buffer
->data
,
686 auxtrace_buffer__put_data(buffer
);
694 static int intel_bts_flush(struct perf_session
*session
,
695 struct perf_tool
*tool __maybe_unused
)
697 struct intel_bts
*bts
= container_of(session
->auxtrace
, struct intel_bts
,
701 if (dump_trace
|| bts
->sampling_mode
)
704 if (!tool
->ordered_events
)
707 ret
= intel_bts_update_queues(bts
);
711 return intel_bts_process_queues(bts
, MAX_TIMESTAMP
);
714 static void intel_bts_free_queue(void *priv
)
716 struct intel_bts_queue
*btsq
= priv
;
723 static void intel_bts_free_events(struct perf_session
*session
)
725 struct intel_bts
*bts
= container_of(session
->auxtrace
, struct intel_bts
,
727 struct auxtrace_queues
*queues
= &bts
->queues
;
730 for (i
= 0; i
< queues
->nr_queues
; i
++) {
731 intel_bts_free_queue(queues
->queue_array
[i
].priv
);
732 queues
->queue_array
[i
].priv
= NULL
;
734 auxtrace_queues__free(queues
);
737 static void intel_bts_free(struct perf_session
*session
)
739 struct intel_bts
*bts
= container_of(session
->auxtrace
, struct intel_bts
,
742 auxtrace_heap__free(&bts
->heap
);
743 intel_bts_free_events(session
);
744 session
->auxtrace
= NULL
;
748 struct intel_bts_synth
{
749 struct perf_tool dummy_tool
;
750 struct perf_session
*session
;
753 static int intel_bts_event_synth(struct perf_tool
*tool
,
754 union perf_event
*event
,
755 struct perf_sample
*sample __maybe_unused
,
756 struct machine
*machine __maybe_unused
)
758 struct intel_bts_synth
*intel_bts_synth
=
759 container_of(tool
, struct intel_bts_synth
, dummy_tool
);
761 return perf_session__deliver_synth_event(intel_bts_synth
->session
,
765 static int intel_bts_synth_event(struct perf_session
*session
,
766 struct perf_event_attr
*attr
, u64 id
)
768 struct intel_bts_synth intel_bts_synth
;
770 memset(&intel_bts_synth
, 0, sizeof(struct intel_bts_synth
));
771 intel_bts_synth
.session
= session
;
773 return perf_event__synthesize_attr(&intel_bts_synth
.dummy_tool
, attr
, 1,
774 &id
, intel_bts_event_synth
);
777 static int intel_bts_synth_events(struct intel_bts
*bts
,
778 struct perf_session
*session
)
780 struct perf_evlist
*evlist
= session
->evlist
;
781 struct perf_evsel
*evsel
;
782 struct perf_event_attr attr
;
787 evlist__for_each_entry(evlist
, evsel
) {
788 if (evsel
->attr
.type
== bts
->pmu_type
&& evsel
->ids
) {
795 pr_debug("There are no selected events with Intel BTS data\n");
799 memset(&attr
, 0, sizeof(struct perf_event_attr
));
800 attr
.size
= sizeof(struct perf_event_attr
);
801 attr
.type
= PERF_TYPE_HARDWARE
;
802 attr
.sample_type
= evsel
->attr
.sample_type
& PERF_SAMPLE_MASK
;
803 attr
.sample_type
|= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
|
805 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_TIME
;
806 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_CPU
;
807 attr
.exclude_user
= evsel
->attr
.exclude_user
;
808 attr
.exclude_kernel
= evsel
->attr
.exclude_kernel
;
809 attr
.exclude_hv
= evsel
->attr
.exclude_hv
;
810 attr
.exclude_host
= evsel
->attr
.exclude_host
;
811 attr
.exclude_guest
= evsel
->attr
.exclude_guest
;
812 attr
.sample_id_all
= evsel
->attr
.sample_id_all
;
813 attr
.read_format
= evsel
->attr
.read_format
;
815 id
= evsel
->id
[0] + 1000000000;
819 if (bts
->synth_opts
.branches
) {
820 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
821 attr
.sample_period
= 1;
822 attr
.sample_type
|= PERF_SAMPLE_ADDR
;
823 pr_debug("Synthesizing 'branches' event with id %" PRIu64
" sample type %#" PRIx64
"\n",
824 id
, (u64
)attr
.sample_type
);
825 err
= intel_bts_synth_event(session
, &attr
, id
);
827 pr_err("%s: failed to synthesize 'branches' event type\n",
831 bts
->sample_branches
= true;
832 bts
->branches_sample_type
= attr
.sample_type
;
833 bts
->branches_id
= id
;
835 * We only use sample types from PERF_SAMPLE_MASK so we can use
836 * __perf_evsel__sample_size() here.
838 bts
->branches_event_size
= sizeof(struct sample_event
) +
839 __perf_evsel__sample_size(attr
.sample_type
);
842 bts
->synth_needs_swap
= evsel
->needs_swap
;
847 static const char * const intel_bts_info_fmts
[] = {
848 [INTEL_BTS_PMU_TYPE
] = " PMU Type %"PRId64
"\n",
849 [INTEL_BTS_TIME_SHIFT
] = " Time Shift %"PRIu64
"\n",
850 [INTEL_BTS_TIME_MULT
] = " Time Muliplier %"PRIu64
"\n",
851 [INTEL_BTS_TIME_ZERO
] = " Time Zero %"PRIu64
"\n",
852 [INTEL_BTS_CAP_USER_TIME_ZERO
] = " Cap Time Zero %"PRId64
"\n",
853 [INTEL_BTS_SNAPSHOT_MODE
] = " Snapshot mode %"PRId64
"\n",
856 static void intel_bts_print_info(u64
*arr
, int start
, int finish
)
863 for (i
= start
; i
<= finish
; i
++)
864 fprintf(stdout
, intel_bts_info_fmts
[i
], arr
[i
]);
867 u64 intel_bts_auxtrace_info_priv
[INTEL_BTS_AUXTRACE_PRIV_SIZE
];
869 int intel_bts_process_auxtrace_info(union perf_event
*event
,
870 struct perf_session
*session
)
872 struct auxtrace_info_event
*auxtrace_info
= &event
->auxtrace_info
;
873 size_t min_sz
= sizeof(u64
) * INTEL_BTS_SNAPSHOT_MODE
;
874 struct intel_bts
*bts
;
877 if (auxtrace_info
->header
.size
< sizeof(struct auxtrace_info_event
) +
881 bts
= zalloc(sizeof(struct intel_bts
));
885 err
= auxtrace_queues__init(&bts
->queues
);
889 bts
->session
= session
;
890 bts
->machine
= &session
->machines
.host
; /* No kvm support */
891 bts
->auxtrace_type
= auxtrace_info
->type
;
892 bts
->pmu_type
= auxtrace_info
->priv
[INTEL_BTS_PMU_TYPE
];
893 bts
->tc
.time_shift
= auxtrace_info
->priv
[INTEL_BTS_TIME_SHIFT
];
894 bts
->tc
.time_mult
= auxtrace_info
->priv
[INTEL_BTS_TIME_MULT
];
895 bts
->tc
.time_zero
= auxtrace_info
->priv
[INTEL_BTS_TIME_ZERO
];
896 bts
->cap_user_time_zero
=
897 auxtrace_info
->priv
[INTEL_BTS_CAP_USER_TIME_ZERO
];
898 bts
->snapshot_mode
= auxtrace_info
->priv
[INTEL_BTS_SNAPSHOT_MODE
];
900 bts
->sampling_mode
= false;
902 bts
->auxtrace
.process_event
= intel_bts_process_event
;
903 bts
->auxtrace
.process_auxtrace_event
= intel_bts_process_auxtrace_event
;
904 bts
->auxtrace
.flush_events
= intel_bts_flush
;
905 bts
->auxtrace
.free_events
= intel_bts_free_events
;
906 bts
->auxtrace
.free
= intel_bts_free
;
907 session
->auxtrace
= &bts
->auxtrace
;
909 intel_bts_print_info(&auxtrace_info
->priv
[0], INTEL_BTS_PMU_TYPE
,
910 INTEL_BTS_SNAPSHOT_MODE
);
915 if (session
->itrace_synth_opts
&& session
->itrace_synth_opts
->set
) {
916 bts
->synth_opts
= *session
->itrace_synth_opts
;
918 itrace_synth_opts__set_default(&bts
->synth_opts
);
919 if (session
->itrace_synth_opts
)
920 bts
->synth_opts
.thread_stack
=
921 session
->itrace_synth_opts
->thread_stack
;
924 if (bts
->synth_opts
.calls
)
925 bts
->branches_filter
|= PERF_IP_FLAG_CALL
| PERF_IP_FLAG_ASYNC
|
926 PERF_IP_FLAG_TRACE_END
;
927 if (bts
->synth_opts
.returns
)
928 bts
->branches_filter
|= PERF_IP_FLAG_RETURN
|
929 PERF_IP_FLAG_TRACE_BEGIN
;
931 err
= intel_bts_synth_events(bts
, session
);
933 goto err_free_queues
;
935 err
= auxtrace_queues__process_index(&bts
->queues
, session
);
937 goto err_free_queues
;
939 if (bts
->queues
.populated
)
940 bts
->data_queued
= true;
945 auxtrace_queues__free(&bts
->queues
);
946 session
->auxtrace
= NULL
;