1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/zalloc.h>
27 #include "thread-stack.h"
29 #include "callchain.h"
36 #include "util/synthetic-events.h"
37 #include "time-utils.h"
39 #include "../arch/x86/include/uapi/asm/perf_regs.h"
41 #include "intel-pt-decoder/intel-pt-log.h"
42 #include "intel-pt-decoder/intel-pt-decoder.h"
43 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
44 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
46 #define MAX_TIMESTAMP (~0ULL)
54 struct auxtrace auxtrace
;
55 struct auxtrace_queues queues
;
56 struct auxtrace_heap heap
;
58 struct perf_session
*session
;
59 struct machine
*machine
;
60 struct evsel
*switch_evsel
;
61 struct thread
*unknown_thread
;
62 bool timeless_decoding
;
71 int have_sched_switch
;
77 struct perf_tsc_conversion tc
;
78 bool cap_user_time_zero
;
80 struct itrace_synth_opts synth_opts
;
82 bool sample_instructions
;
83 u64 instructions_sample_type
;
88 u64 branches_sample_type
;
91 bool sample_transactions
;
92 u64 transactions_sample_type
;
96 u64 ptwrites_sample_type
;
99 bool sample_pwr_events
;
100 u64 pwr_events_sample_type
;
108 struct evsel
*pebs_evsel
;
117 unsigned max_non_turbo_ratio
;
120 unsigned long num_events
;
123 struct addr_filters filts
;
125 struct range
*time_ranges
;
126 unsigned int range_cnt
;
130 INTEL_PT_SS_NOT_TRACING
,
133 INTEL_PT_SS_EXPECTING_SWITCH_EVENT
,
134 INTEL_PT_SS_EXPECTING_SWITCH_IP
,
137 struct intel_pt_queue
{
139 unsigned int queue_nr
;
140 struct auxtrace_buffer
*buffer
;
141 struct auxtrace_buffer
*old_buffer
;
143 const struct intel_pt_state
*state
;
144 struct ip_callchain
*chain
;
145 struct branch_stack
*last_branch
;
146 struct branch_stack
*last_branch_rb
;
147 size_t last_branch_pos
;
148 union perf_event
*event_buf
;
151 bool step_through_buffers
;
152 bool use_buffer_pid_tid
;
158 struct thread
*thread
;
165 unsigned int sel_idx
;
171 u64 last_in_insn_cnt
;
173 u64 last_br_insn_cnt
;
175 unsigned int cbr_seen
;
176 char insn
[INTEL_PT_INSN_BUF_SZ
];
179 static void intel_pt_dump(struct intel_pt
*pt __maybe_unused
,
180 unsigned char *buf
, size_t len
)
182 struct intel_pt_pkt packet
;
185 char desc
[INTEL_PT_PKT_DESC_MAX
];
186 const char *color
= PERF_COLOR_BLUE
;
187 enum intel_pt_pkt_ctx ctx
= INTEL_PT_NO_CTX
;
189 color_fprintf(stdout
, color
,
190 ". ... Intel Processor Trace data: size %zu bytes\n",
194 ret
= intel_pt_get_packet(buf
, len
, &packet
, &ctx
);
200 color_fprintf(stdout
, color
, " %08x: ", pos
);
201 for (i
= 0; i
< pkt_len
; i
++)
202 color_fprintf(stdout
, color
, " %02x", buf
[i
]);
204 color_fprintf(stdout
, color
, " ");
206 ret
= intel_pt_pkt_desc(&packet
, desc
,
207 INTEL_PT_PKT_DESC_MAX
);
209 color_fprintf(stdout
, color
, " %s\n", desc
);
211 color_fprintf(stdout
, color
, " Bad packet!\n");
219 static void intel_pt_dump_event(struct intel_pt
*pt
, unsigned char *buf
,
223 intel_pt_dump(pt
, buf
, len
);
226 static void intel_pt_log_event(union perf_event
*event
)
228 FILE *f
= intel_pt_log_fp();
230 if (!intel_pt_enable_logging
|| !f
)
233 perf_event__fprintf(event
, f
);
236 static void intel_pt_dump_sample(struct perf_session
*session
,
237 struct perf_sample
*sample
)
239 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
243 intel_pt_dump(pt
, sample
->aux_sample
.data
, sample
->aux_sample
.size
);
246 static int intel_pt_do_fix_overlap(struct intel_pt
*pt
, struct auxtrace_buffer
*a
,
247 struct auxtrace_buffer
*b
)
249 bool consecutive
= false;
252 start
= intel_pt_find_overlap(a
->data
, a
->size
, b
->data
, b
->size
,
253 pt
->have_tsc
, &consecutive
);
256 b
->use_size
= b
->data
+ b
->size
- start
;
258 if (b
->use_size
&& consecutive
)
259 b
->consecutive
= true;
263 static int intel_pt_get_buffer(struct intel_pt_queue
*ptq
,
264 struct auxtrace_buffer
*buffer
,
265 struct auxtrace_buffer
*old_buffer
,
266 struct intel_pt_buffer
*b
)
271 int fd
= perf_data__fd(ptq
->pt
->session
->data
);
273 buffer
->data
= auxtrace_buffer__get_data(buffer
, fd
);
278 might_overlap
= ptq
->pt
->snapshot_mode
|| ptq
->pt
->sampling_mode
;
279 if (might_overlap
&& !buffer
->consecutive
&& old_buffer
&&
280 intel_pt_do_fix_overlap(ptq
->pt
, old_buffer
, buffer
))
283 if (buffer
->use_data
) {
284 b
->len
= buffer
->use_size
;
285 b
->buf
= buffer
->use_data
;
287 b
->len
= buffer
->size
;
288 b
->buf
= buffer
->data
;
290 b
->ref_timestamp
= buffer
->reference
;
292 if (!old_buffer
|| (might_overlap
&& !buffer
->consecutive
)) {
293 b
->consecutive
= false;
294 b
->trace_nr
= buffer
->buffer_nr
+ 1;
296 b
->consecutive
= true;
302 /* Do not drop buffers with references - refer intel_pt_get_trace() */
303 static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue
*ptq
,
304 struct auxtrace_buffer
*buffer
)
306 if (!buffer
|| buffer
== ptq
->buffer
|| buffer
== ptq
->old_buffer
)
309 auxtrace_buffer__drop_data(buffer
);
312 /* Must be serialized with respect to intel_pt_get_trace() */
313 static int intel_pt_lookahead(void *data
, intel_pt_lookahead_cb_t cb
,
316 struct intel_pt_queue
*ptq
= data
;
317 struct auxtrace_buffer
*buffer
= ptq
->buffer
;
318 struct auxtrace_buffer
*old_buffer
= ptq
->old_buffer
;
319 struct auxtrace_queue
*queue
;
322 queue
= &ptq
->pt
->queues
.queue_array
[ptq
->queue_nr
];
325 struct intel_pt_buffer b
= { .len
= 0 };
327 buffer
= auxtrace_buffer__next(queue
, buffer
);
331 err
= intel_pt_get_buffer(ptq
, buffer
, old_buffer
, &b
);
336 intel_pt_lookahead_drop_buffer(ptq
, old_buffer
);
339 intel_pt_lookahead_drop_buffer(ptq
, buffer
);
343 err
= cb(&b
, cb_data
);
348 if (buffer
!= old_buffer
)
349 intel_pt_lookahead_drop_buffer(ptq
, buffer
);
350 intel_pt_lookahead_drop_buffer(ptq
, old_buffer
);
356 * This function assumes data is processed sequentially only.
357 * Must be serialized with respect to intel_pt_lookahead()
359 static int intel_pt_get_trace(struct intel_pt_buffer
*b
, void *data
)
361 struct intel_pt_queue
*ptq
= data
;
362 struct auxtrace_buffer
*buffer
= ptq
->buffer
;
363 struct auxtrace_buffer
*old_buffer
= ptq
->old_buffer
;
364 struct auxtrace_queue
*queue
;
372 queue
= &ptq
->pt
->queues
.queue_array
[ptq
->queue_nr
];
374 buffer
= auxtrace_buffer__next(queue
, buffer
);
377 auxtrace_buffer__drop_data(old_buffer
);
382 ptq
->buffer
= buffer
;
384 err
= intel_pt_get_buffer(ptq
, buffer
, old_buffer
, b
);
388 if (ptq
->step_through_buffers
)
393 auxtrace_buffer__drop_data(old_buffer
);
394 ptq
->old_buffer
= buffer
;
396 auxtrace_buffer__drop_data(buffer
);
397 return intel_pt_get_trace(b
, data
);
403 struct intel_pt_cache_entry
{
404 struct auxtrace_cache_entry entry
;
407 enum intel_pt_insn_op op
;
408 enum intel_pt_insn_branch branch
;
411 char insn
[INTEL_PT_INSN_BUF_SZ
];
414 static int intel_pt_config_div(const char *var
, const char *value
, void *data
)
419 if (!strcmp(var
, "intel-pt.cache-divisor")) {
420 val
= strtol(value
, NULL
, 0);
421 if (val
> 0 && val
<= INT_MAX
)
428 static int intel_pt_cache_divisor(void)
435 perf_config(intel_pt_config_div
, &d
);
443 static unsigned int intel_pt_cache_size(struct dso
*dso
,
444 struct machine
*machine
)
448 size
= dso__data_size(dso
, machine
);
449 size
/= intel_pt_cache_divisor();
452 if (size
> (1 << 21))
454 return 32 - __builtin_clz(size
);
457 static struct auxtrace_cache
*intel_pt_cache(struct dso
*dso
,
458 struct machine
*machine
)
460 struct auxtrace_cache
*c
;
463 if (dso
->auxtrace_cache
)
464 return dso
->auxtrace_cache
;
466 bits
= intel_pt_cache_size(dso
, machine
);
468 /* Ignoring cache creation failure */
469 c
= auxtrace_cache__new(bits
, sizeof(struct intel_pt_cache_entry
), 200);
471 dso
->auxtrace_cache
= c
;
476 static int intel_pt_cache_add(struct dso
*dso
, struct machine
*machine
,
477 u64 offset
, u64 insn_cnt
, u64 byte_cnt
,
478 struct intel_pt_insn
*intel_pt_insn
)
480 struct auxtrace_cache
*c
= intel_pt_cache(dso
, machine
);
481 struct intel_pt_cache_entry
*e
;
487 e
= auxtrace_cache__alloc_entry(c
);
491 e
->insn_cnt
= insn_cnt
;
492 e
->byte_cnt
= byte_cnt
;
493 e
->op
= intel_pt_insn
->op
;
494 e
->branch
= intel_pt_insn
->branch
;
495 e
->length
= intel_pt_insn
->length
;
496 e
->rel
= intel_pt_insn
->rel
;
497 memcpy(e
->insn
, intel_pt_insn
->buf
, INTEL_PT_INSN_BUF_SZ
);
499 err
= auxtrace_cache__add(c
, offset
, &e
->entry
);
501 auxtrace_cache__free_entry(c
, e
);
506 static struct intel_pt_cache_entry
*
507 intel_pt_cache_lookup(struct dso
*dso
, struct machine
*machine
, u64 offset
)
509 struct auxtrace_cache
*c
= intel_pt_cache(dso
, machine
);
514 return auxtrace_cache__lookup(dso
->auxtrace_cache
, offset
);
517 static inline u8
intel_pt_cpumode(struct intel_pt
*pt
, uint64_t ip
)
519 return ip
>= pt
->kernel_start
?
520 PERF_RECORD_MISC_KERNEL
:
521 PERF_RECORD_MISC_USER
;
524 static int intel_pt_walk_next_insn(struct intel_pt_insn
*intel_pt_insn
,
525 uint64_t *insn_cnt_ptr
, uint64_t *ip
,
526 uint64_t to_ip
, uint64_t max_insn_cnt
,
529 struct intel_pt_queue
*ptq
= data
;
530 struct machine
*machine
= ptq
->pt
->machine
;
531 struct thread
*thread
;
532 struct addr_location al
;
533 unsigned char buf
[INTEL_PT_INSN_BUF_SZ
];
537 u64 offset
, start_offset
, start_ip
;
541 intel_pt_insn
->length
= 0;
543 if (to_ip
&& *ip
== to_ip
)
546 cpumode
= intel_pt_cpumode(ptq
->pt
, *ip
);
548 thread
= ptq
->thread
;
550 if (cpumode
!= PERF_RECORD_MISC_KERNEL
)
552 thread
= ptq
->pt
->unknown_thread
;
556 if (!thread__find_map(thread
, cpumode
, *ip
, &al
) || !al
.map
->dso
)
559 if (al
.map
->dso
->data
.status
== DSO_DATA_STATUS_ERROR
&&
560 dso__data_status_seen(al
.map
->dso
,
561 DSO_DATA_STATUS_SEEN_ITRACE
))
564 offset
= al
.map
->map_ip(al
.map
, *ip
);
566 if (!to_ip
&& one_map
) {
567 struct intel_pt_cache_entry
*e
;
569 e
= intel_pt_cache_lookup(al
.map
->dso
, machine
, offset
);
571 (!max_insn_cnt
|| e
->insn_cnt
<= max_insn_cnt
)) {
572 *insn_cnt_ptr
= e
->insn_cnt
;
574 intel_pt_insn
->op
= e
->op
;
575 intel_pt_insn
->branch
= e
->branch
;
576 intel_pt_insn
->length
= e
->length
;
577 intel_pt_insn
->rel
= e
->rel
;
578 memcpy(intel_pt_insn
->buf
, e
->insn
,
579 INTEL_PT_INSN_BUF_SZ
);
580 intel_pt_log_insn_no_data(intel_pt_insn
, *ip
);
585 start_offset
= offset
;
588 /* Load maps to ensure dso->is_64_bit has been updated */
591 x86_64
= al
.map
->dso
->is_64_bit
;
594 len
= dso__data_read_offset(al
.map
->dso
, machine
,
596 INTEL_PT_INSN_BUF_SZ
);
600 if (intel_pt_get_insn(buf
, len
, x86_64
, intel_pt_insn
))
603 intel_pt_log_insn(intel_pt_insn
, *ip
);
607 if (intel_pt_insn
->branch
!= INTEL_PT_BR_NO_BRANCH
)
610 if (max_insn_cnt
&& insn_cnt
>= max_insn_cnt
)
613 *ip
+= intel_pt_insn
->length
;
615 if (to_ip
&& *ip
== to_ip
)
618 if (*ip
>= al
.map
->end
)
621 offset
+= intel_pt_insn
->length
;
626 *insn_cnt_ptr
= insn_cnt
;
632 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
636 struct intel_pt_cache_entry
*e
;
638 e
= intel_pt_cache_lookup(al
.map
->dso
, machine
, start_offset
);
643 /* Ignore cache errors */
644 intel_pt_cache_add(al
.map
->dso
, machine
, start_offset
, insn_cnt
,
645 *ip
- start_ip
, intel_pt_insn
);
650 *insn_cnt_ptr
= insn_cnt
;
654 static bool intel_pt_match_pgd_ip(struct intel_pt
*pt
, uint64_t ip
,
655 uint64_t offset
, const char *filename
)
657 struct addr_filter
*filt
;
658 bool have_filter
= false;
659 bool hit_tracestop
= false;
660 bool hit_filter
= false;
662 list_for_each_entry(filt
, &pt
->filts
.head
, list
) {
666 if ((filename
&& !filt
->filename
) ||
667 (!filename
&& filt
->filename
) ||
668 (filename
&& strcmp(filename
, filt
->filename
)))
671 if (!(offset
>= filt
->addr
&& offset
< filt
->addr
+ filt
->size
))
674 intel_pt_log("TIP.PGD ip %#"PRIx64
" offset %#"PRIx64
" in %s hit filter: %s offset %#"PRIx64
" size %#"PRIx64
"\n",
675 ip
, offset
, filename
? filename
: "[kernel]",
676 filt
->start
? "filter" : "stop",
677 filt
->addr
, filt
->size
);
682 hit_tracestop
= true;
685 if (!hit_tracestop
&& !hit_filter
)
686 intel_pt_log("TIP.PGD ip %#"PRIx64
" offset %#"PRIx64
" in %s is not in a filter region\n",
687 ip
, offset
, filename
? filename
: "[kernel]");
689 return hit_tracestop
|| (have_filter
&& !hit_filter
);
692 static int __intel_pt_pgd_ip(uint64_t ip
, void *data
)
694 struct intel_pt_queue
*ptq
= data
;
695 struct thread
*thread
;
696 struct addr_location al
;
700 if (ip
>= ptq
->pt
->kernel_start
)
701 return intel_pt_match_pgd_ip(ptq
->pt
, ip
, ip
, NULL
);
703 cpumode
= PERF_RECORD_MISC_USER
;
705 thread
= ptq
->thread
;
709 if (!thread__find_map(thread
, cpumode
, ip
, &al
) || !al
.map
->dso
)
712 offset
= al
.map
->map_ip(al
.map
, ip
);
714 return intel_pt_match_pgd_ip(ptq
->pt
, ip
, offset
,
715 al
.map
->dso
->long_name
);
718 static bool intel_pt_pgd_ip(uint64_t ip
, void *data
)
720 return __intel_pt_pgd_ip(ip
, data
) > 0;
723 static bool intel_pt_get_config(struct intel_pt
*pt
,
724 struct perf_event_attr
*attr
, u64
*config
)
726 if (attr
->type
== pt
->pmu_type
) {
728 *config
= attr
->config
;
735 static bool intel_pt_exclude_kernel(struct intel_pt
*pt
)
739 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
740 if (intel_pt_get_config(pt
, &evsel
->core
.attr
, NULL
) &&
741 !evsel
->core
.attr
.exclude_kernel
)
747 static bool intel_pt_return_compression(struct intel_pt
*pt
)
752 if (!pt
->noretcomp_bit
)
755 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
756 if (intel_pt_get_config(pt
, &evsel
->core
.attr
, &config
) &&
757 (config
& pt
->noretcomp_bit
))
763 static bool intel_pt_branch_enable(struct intel_pt
*pt
)
768 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
769 if (intel_pt_get_config(pt
, &evsel
->core
.attr
, &config
) &&
770 (config
& 1) && !(config
& 0x2000))
776 static unsigned int intel_pt_mtc_period(struct intel_pt
*pt
)
782 if (!pt
->mtc_freq_bits
)
785 for (shift
= 0, config
= pt
->mtc_freq_bits
; !(config
& 1); shift
++)
788 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
789 if (intel_pt_get_config(pt
, &evsel
->core
.attr
, &config
))
790 return (config
& pt
->mtc_freq_bits
) >> shift
;
795 static bool intel_pt_timeless_decoding(struct intel_pt
*pt
)
798 bool timeless_decoding
= true;
801 if (!pt
->tsc_bit
|| !pt
->cap_user_time_zero
)
804 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
805 if (!(evsel
->core
.attr
.sample_type
& PERF_SAMPLE_TIME
))
807 if (intel_pt_get_config(pt
, &evsel
->core
.attr
, &config
)) {
808 if (config
& pt
->tsc_bit
)
809 timeless_decoding
= false;
814 return timeless_decoding
;
817 static bool intel_pt_tracing_kernel(struct intel_pt
*pt
)
821 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
822 if (intel_pt_get_config(pt
, &evsel
->core
.attr
, NULL
) &&
823 !evsel
->core
.attr
.exclude_kernel
)
829 static bool intel_pt_have_tsc(struct intel_pt
*pt
)
832 bool have_tsc
= false;
838 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
839 if (intel_pt_get_config(pt
, &evsel
->core
.attr
, &config
)) {
840 if (config
& pt
->tsc_bit
)
849 static bool intel_pt_sampling_mode(struct intel_pt
*pt
)
853 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
854 if ((evsel
->core
.attr
.sample_type
& PERF_SAMPLE_AUX
) &&
855 evsel
->core
.attr
.aux_sample_size
)
861 static u64
intel_pt_ns_to_ticks(const struct intel_pt
*pt
, u64 ns
)
865 quot
= ns
/ pt
->tc
.time_mult
;
866 rem
= ns
% pt
->tc
.time_mult
;
867 return (quot
<< pt
->tc
.time_shift
) + (rem
<< pt
->tc
.time_shift
) /
871 static struct intel_pt_queue
*intel_pt_alloc_queue(struct intel_pt
*pt
,
872 unsigned int queue_nr
)
874 struct intel_pt_params params
= { .get_trace
= 0, };
875 struct perf_env
*env
= pt
->machine
->env
;
876 struct intel_pt_queue
*ptq
;
878 ptq
= zalloc(sizeof(struct intel_pt_queue
));
882 if (pt
->synth_opts
.callchain
) {
883 size_t sz
= sizeof(struct ip_callchain
);
885 /* Add 1 to callchain_sz for callchain context */
886 sz
+= (pt
->synth_opts
.callchain_sz
+ 1) * sizeof(u64
);
887 ptq
->chain
= zalloc(sz
);
892 if (pt
->synth_opts
.last_branch
) {
893 size_t sz
= sizeof(struct branch_stack
);
895 sz
+= pt
->synth_opts
.last_branch_sz
*
896 sizeof(struct branch_entry
);
897 ptq
->last_branch
= zalloc(sz
);
898 if (!ptq
->last_branch
)
900 ptq
->last_branch_rb
= zalloc(sz
);
901 if (!ptq
->last_branch_rb
)
905 ptq
->event_buf
= malloc(PERF_SAMPLE_MAX_SIZE
);
910 ptq
->queue_nr
= queue_nr
;
911 ptq
->exclude_kernel
= intel_pt_exclude_kernel(pt
);
917 params
.get_trace
= intel_pt_get_trace
;
918 params
.walk_insn
= intel_pt_walk_next_insn
;
919 params
.lookahead
= intel_pt_lookahead
;
921 params
.return_compression
= intel_pt_return_compression(pt
);
922 params
.branch_enable
= intel_pt_branch_enable(pt
);
923 params
.max_non_turbo_ratio
= pt
->max_non_turbo_ratio
;
924 params
.mtc_period
= intel_pt_mtc_period(pt
);
925 params
.tsc_ctc_ratio_n
= pt
->tsc_ctc_ratio_n
;
926 params
.tsc_ctc_ratio_d
= pt
->tsc_ctc_ratio_d
;
928 if (pt
->filts
.cnt
> 0)
929 params
.pgd_ip
= intel_pt_pgd_ip
;
931 if (pt
->synth_opts
.instructions
) {
932 if (pt
->synth_opts
.period
) {
933 switch (pt
->synth_opts
.period_type
) {
934 case PERF_ITRACE_PERIOD_INSTRUCTIONS
:
936 INTEL_PT_PERIOD_INSTRUCTIONS
;
937 params
.period
= pt
->synth_opts
.period
;
939 case PERF_ITRACE_PERIOD_TICKS
:
940 params
.period_type
= INTEL_PT_PERIOD_TICKS
;
941 params
.period
= pt
->synth_opts
.period
;
943 case PERF_ITRACE_PERIOD_NANOSECS
:
944 params
.period_type
= INTEL_PT_PERIOD_TICKS
;
945 params
.period
= intel_pt_ns_to_ticks(pt
,
946 pt
->synth_opts
.period
);
953 if (!params
.period
) {
954 params
.period_type
= INTEL_PT_PERIOD_INSTRUCTIONS
;
959 if (env
->cpuid
&& !strncmp(env
->cpuid
, "GenuineIntel,6,92,", 18))
960 params
.flags
|= INTEL_PT_FUP_WITH_NLIP
;
962 ptq
->decoder
= intel_pt_decoder_new(¶ms
);
969 zfree(&ptq
->event_buf
);
970 zfree(&ptq
->last_branch
);
971 zfree(&ptq
->last_branch_rb
);
977 static void intel_pt_free_queue(void *priv
)
979 struct intel_pt_queue
*ptq
= priv
;
983 thread__zput(ptq
->thread
);
984 intel_pt_decoder_free(ptq
->decoder
);
985 zfree(&ptq
->event_buf
);
986 zfree(&ptq
->last_branch
);
987 zfree(&ptq
->last_branch_rb
);
992 static void intel_pt_set_pid_tid_cpu(struct intel_pt
*pt
,
993 struct auxtrace_queue
*queue
)
995 struct intel_pt_queue
*ptq
= queue
->priv
;
997 if (queue
->tid
== -1 || pt
->have_sched_switch
) {
998 ptq
->tid
= machine__get_current_tid(pt
->machine
, ptq
->cpu
);
999 thread__zput(ptq
->thread
);
1002 if (!ptq
->thread
&& ptq
->tid
!= -1)
1003 ptq
->thread
= machine__find_thread(pt
->machine
, -1, ptq
->tid
);
1006 ptq
->pid
= ptq
->thread
->pid_
;
1007 if (queue
->cpu
== -1)
1008 ptq
->cpu
= ptq
->thread
->cpu
;
1012 static void intel_pt_sample_flags(struct intel_pt_queue
*ptq
)
1014 if (ptq
->state
->flags
& INTEL_PT_ABORT_TX
) {
1015 ptq
->flags
= PERF_IP_FLAG_BRANCH
| PERF_IP_FLAG_TX_ABORT
;
1016 } else if (ptq
->state
->flags
& INTEL_PT_ASYNC
) {
1017 if (ptq
->state
->to_ip
)
1018 ptq
->flags
= PERF_IP_FLAG_BRANCH
| PERF_IP_FLAG_CALL
|
1019 PERF_IP_FLAG_ASYNC
|
1020 PERF_IP_FLAG_INTERRUPT
;
1022 ptq
->flags
= PERF_IP_FLAG_BRANCH
|
1023 PERF_IP_FLAG_TRACE_END
;
1026 if (ptq
->state
->from_ip
)
1027 ptq
->flags
= intel_pt_insn_type(ptq
->state
->insn_op
);
1029 ptq
->flags
= PERF_IP_FLAG_BRANCH
|
1030 PERF_IP_FLAG_TRACE_BEGIN
;
1031 if (ptq
->state
->flags
& INTEL_PT_IN_TX
)
1032 ptq
->flags
|= PERF_IP_FLAG_IN_TX
;
1033 ptq
->insn_len
= ptq
->state
->insn_len
;
1034 memcpy(ptq
->insn
, ptq
->state
->insn
, INTEL_PT_INSN_BUF_SZ
);
1037 if (ptq
->state
->type
& INTEL_PT_TRACE_BEGIN
)
1038 ptq
->flags
|= PERF_IP_FLAG_TRACE_BEGIN
;
1039 if (ptq
->state
->type
& INTEL_PT_TRACE_END
)
1040 ptq
->flags
|= PERF_IP_FLAG_TRACE_END
;
1043 static void intel_pt_setup_time_range(struct intel_pt
*pt
,
1044 struct intel_pt_queue
*ptq
)
1049 ptq
->sel_timestamp
= pt
->time_ranges
[0].start
;
1052 if (ptq
->sel_timestamp
) {
1053 ptq
->sel_start
= true;
1055 ptq
->sel_timestamp
= pt
->time_ranges
[0].end
;
1056 ptq
->sel_start
= false;
1060 static int intel_pt_setup_queue(struct intel_pt
*pt
,
1061 struct auxtrace_queue
*queue
,
1062 unsigned int queue_nr
)
1064 struct intel_pt_queue
*ptq
= queue
->priv
;
1066 if (list_empty(&queue
->head
))
1070 ptq
= intel_pt_alloc_queue(pt
, queue_nr
);
1075 if (queue
->cpu
!= -1)
1076 ptq
->cpu
= queue
->cpu
;
1077 ptq
->tid
= queue
->tid
;
1079 ptq
->cbr_seen
= UINT_MAX
;
1081 if (pt
->sampling_mode
&& !pt
->snapshot_mode
&&
1082 pt
->timeless_decoding
)
1083 ptq
->step_through_buffers
= true;
1085 ptq
->sync_switch
= pt
->sync_switch
;
1087 intel_pt_setup_time_range(pt
, ptq
);
1090 if (!ptq
->on_heap
&&
1091 (!ptq
->sync_switch
||
1092 ptq
->switch_state
!= INTEL_PT_SS_EXPECTING_SWITCH_EVENT
)) {
1093 const struct intel_pt_state
*state
;
1096 if (pt
->timeless_decoding
)
1099 intel_pt_log("queue %u getting timestamp\n", queue_nr
);
1100 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1101 queue_nr
, ptq
->cpu
, ptq
->pid
, ptq
->tid
);
1103 if (ptq
->sel_start
&& ptq
->sel_timestamp
) {
1104 ret
= intel_pt_fast_forward(ptq
->decoder
,
1105 ptq
->sel_timestamp
);
1111 state
= intel_pt_decode(ptq
->decoder
);
1113 if (state
->err
== INTEL_PT_ERR_NODATA
) {
1114 intel_pt_log("queue %u has no timestamp\n",
1120 if (state
->timestamp
)
1124 ptq
->timestamp
= state
->timestamp
;
1125 intel_pt_log("queue %u timestamp 0x%" PRIx64
"\n",
1126 queue_nr
, ptq
->timestamp
);
1128 ptq
->have_sample
= true;
1129 if (ptq
->sel_start
&& ptq
->sel_timestamp
&&
1130 ptq
->timestamp
< ptq
->sel_timestamp
)
1131 ptq
->have_sample
= false;
1132 intel_pt_sample_flags(ptq
);
1133 ret
= auxtrace_heap__add(&pt
->heap
, queue_nr
, ptq
->timestamp
);
1136 ptq
->on_heap
= true;
1142 static int intel_pt_setup_queues(struct intel_pt
*pt
)
1147 for (i
= 0; i
< pt
->queues
.nr_queues
; i
++) {
1148 ret
= intel_pt_setup_queue(pt
, &pt
->queues
.queue_array
[i
], i
);
1155 static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue
*ptq
)
1157 struct branch_stack
*bs_src
= ptq
->last_branch_rb
;
1158 struct branch_stack
*bs_dst
= ptq
->last_branch
;
1161 bs_dst
->nr
= bs_src
->nr
;
1166 nr
= ptq
->pt
->synth_opts
.last_branch_sz
- ptq
->last_branch_pos
;
1167 memcpy(&bs_dst
->entries
[0],
1168 &bs_src
->entries
[ptq
->last_branch_pos
],
1169 sizeof(struct branch_entry
) * nr
);
1171 if (bs_src
->nr
>= ptq
->pt
->synth_opts
.last_branch_sz
) {
1172 memcpy(&bs_dst
->entries
[nr
],
1173 &bs_src
->entries
[0],
1174 sizeof(struct branch_entry
) * ptq
->last_branch_pos
);
1178 static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue
*ptq
)
1180 ptq
->last_branch_pos
= 0;
1181 ptq
->last_branch_rb
->nr
= 0;
1184 static void intel_pt_update_last_branch_rb(struct intel_pt_queue
*ptq
)
1186 const struct intel_pt_state
*state
= ptq
->state
;
1187 struct branch_stack
*bs
= ptq
->last_branch_rb
;
1188 struct branch_entry
*be
;
1190 if (!ptq
->last_branch_pos
)
1191 ptq
->last_branch_pos
= ptq
->pt
->synth_opts
.last_branch_sz
;
1193 ptq
->last_branch_pos
-= 1;
1195 be
= &bs
->entries
[ptq
->last_branch_pos
];
1196 be
->from
= state
->from_ip
;
1197 be
->to
= state
->to_ip
;
1198 be
->flags
.abort
= !!(state
->flags
& INTEL_PT_ABORT_TX
);
1199 be
->flags
.in_tx
= !!(state
->flags
& INTEL_PT_IN_TX
);
1200 /* No support for mispredict */
1201 be
->flags
.mispred
= ptq
->pt
->mispred_all
;
1203 if (bs
->nr
< ptq
->pt
->synth_opts
.last_branch_sz
)
1207 static inline bool intel_pt_skip_event(struct intel_pt
*pt
)
1209 return pt
->synth_opts
.initial_skip
&&
1210 pt
->num_events
++ < pt
->synth_opts
.initial_skip
;
1214 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
1215 * Also ensure CBR is first non-skipped event by allowing for 4 more samples
1216 * from this decoder state.
1218 static inline bool intel_pt_skip_cbr_event(struct intel_pt
*pt
)
1220 return pt
->synth_opts
.initial_skip
&&
1221 pt
->num_events
+ 4 < pt
->synth_opts
.initial_skip
;
1224 static void intel_pt_prep_a_sample(struct intel_pt_queue
*ptq
,
1225 union perf_event
*event
,
1226 struct perf_sample
*sample
)
1228 event
->sample
.header
.type
= PERF_RECORD_SAMPLE
;
1229 event
->sample
.header
.size
= sizeof(struct perf_event_header
);
1231 sample
->pid
= ptq
->pid
;
1232 sample
->tid
= ptq
->tid
;
1233 sample
->cpu
= ptq
->cpu
;
1234 sample
->insn_len
= ptq
->insn_len
;
1235 memcpy(sample
->insn
, ptq
->insn
, INTEL_PT_INSN_BUF_SZ
);
1238 static void intel_pt_prep_b_sample(struct intel_pt
*pt
,
1239 struct intel_pt_queue
*ptq
,
1240 union perf_event
*event
,
1241 struct perf_sample
*sample
)
1243 intel_pt_prep_a_sample(ptq
, event
, sample
);
1245 if (!pt
->timeless_decoding
)
1246 sample
->time
= tsc_to_perf_time(ptq
->timestamp
, &pt
->tc
);
1248 sample
->ip
= ptq
->state
->from_ip
;
1249 sample
->cpumode
= intel_pt_cpumode(pt
, sample
->ip
);
1250 sample
->addr
= ptq
->state
->to_ip
;
1252 sample
->flags
= ptq
->flags
;
1254 event
->sample
.header
.misc
= sample
->cpumode
;
1257 static int intel_pt_inject_event(union perf_event
*event
,
1258 struct perf_sample
*sample
, u64 type
)
1260 event
->header
.size
= perf_event__sample_event_size(sample
, type
, 0);
1261 return perf_event__synthesize_sample(event
, type
, 0, sample
);
1264 static inline int intel_pt_opt_inject(struct intel_pt
*pt
,
1265 union perf_event
*event
,
1266 struct perf_sample
*sample
, u64 type
)
1268 if (!pt
->synth_opts
.inject
)
1271 return intel_pt_inject_event(event
, sample
, type
);
1274 static int intel_pt_deliver_synth_b_event(struct intel_pt
*pt
,
1275 union perf_event
*event
,
1276 struct perf_sample
*sample
, u64 type
)
1280 ret
= intel_pt_opt_inject(pt
, event
, sample
, type
);
1284 ret
= perf_session__deliver_synth_event(pt
->session
, event
, sample
);
1286 pr_err("Intel PT: failed to deliver event, error %d\n", ret
);
1291 static int intel_pt_synth_branch_sample(struct intel_pt_queue
*ptq
)
1293 struct intel_pt
*pt
= ptq
->pt
;
1294 union perf_event
*event
= ptq
->event_buf
;
1295 struct perf_sample sample
= { .ip
= 0, };
1296 struct dummy_branch_stack
{
1298 struct branch_entry entries
;
1301 if (pt
->branches_filter
&& !(pt
->branches_filter
& ptq
->flags
))
1304 if (intel_pt_skip_event(pt
))
1307 intel_pt_prep_b_sample(pt
, ptq
, event
, &sample
);
1309 sample
.id
= ptq
->pt
->branches_id
;
1310 sample
.stream_id
= ptq
->pt
->branches_id
;
1313 * perf report cannot handle events without a branch stack when using
1314 * SORT_MODE__BRANCH so make a dummy one.
1316 if (pt
->synth_opts
.last_branch
&& sort__mode
== SORT_MODE__BRANCH
) {
1317 dummy_bs
= (struct dummy_branch_stack
){
1324 sample
.branch_stack
= (struct branch_stack
*)&dummy_bs
;
1327 sample
.cyc_cnt
= ptq
->ipc_cyc_cnt
- ptq
->last_br_cyc_cnt
;
1328 if (sample
.cyc_cnt
) {
1329 sample
.insn_cnt
= ptq
->ipc_insn_cnt
- ptq
->last_br_insn_cnt
;
1330 ptq
->last_br_insn_cnt
= ptq
->ipc_insn_cnt
;
1331 ptq
->last_br_cyc_cnt
= ptq
->ipc_cyc_cnt
;
1334 return intel_pt_deliver_synth_b_event(pt
, event
, &sample
,
1335 pt
->branches_sample_type
);
1338 static void intel_pt_prep_sample(struct intel_pt
*pt
,
1339 struct intel_pt_queue
*ptq
,
1340 union perf_event
*event
,
1341 struct perf_sample
*sample
)
1343 intel_pt_prep_b_sample(pt
, ptq
, event
, sample
);
1345 if (pt
->synth_opts
.callchain
) {
1346 thread_stack__sample(ptq
->thread
, ptq
->cpu
, ptq
->chain
,
1347 pt
->synth_opts
.callchain_sz
+ 1,
1348 sample
->ip
, pt
->kernel_start
);
1349 sample
->callchain
= ptq
->chain
;
1352 if (pt
->synth_opts
.last_branch
) {
1353 intel_pt_copy_last_branch_rb(ptq
);
1354 sample
->branch_stack
= ptq
->last_branch
;
1358 static inline int intel_pt_deliver_synth_event(struct intel_pt
*pt
,
1359 struct intel_pt_queue
*ptq
,
1360 union perf_event
*event
,
1361 struct perf_sample
*sample
,
1366 ret
= intel_pt_deliver_synth_b_event(pt
, event
, sample
, type
);
1368 if (pt
->synth_opts
.last_branch
)
1369 intel_pt_reset_last_branch_rb(ptq
);
1374 static int intel_pt_synth_instruction_sample(struct intel_pt_queue
*ptq
)
1376 struct intel_pt
*pt
= ptq
->pt
;
1377 union perf_event
*event
= ptq
->event_buf
;
1378 struct perf_sample sample
= { .ip
= 0, };
1380 if (intel_pt_skip_event(pt
))
1383 intel_pt_prep_sample(pt
, ptq
, event
, &sample
);
1385 sample
.id
= ptq
->pt
->instructions_id
;
1386 sample
.stream_id
= ptq
->pt
->instructions_id
;
1387 sample
.period
= ptq
->state
->tot_insn_cnt
- ptq
->last_insn_cnt
;
1389 sample
.cyc_cnt
= ptq
->ipc_cyc_cnt
- ptq
->last_in_cyc_cnt
;
1390 if (sample
.cyc_cnt
) {
1391 sample
.insn_cnt
= ptq
->ipc_insn_cnt
- ptq
->last_in_insn_cnt
;
1392 ptq
->last_in_insn_cnt
= ptq
->ipc_insn_cnt
;
1393 ptq
->last_in_cyc_cnt
= ptq
->ipc_cyc_cnt
;
1396 ptq
->last_insn_cnt
= ptq
->state
->tot_insn_cnt
;
1398 return intel_pt_deliver_synth_event(pt
, ptq
, event
, &sample
,
1399 pt
->instructions_sample_type
);
1402 static int intel_pt_synth_transaction_sample(struct intel_pt_queue
*ptq
)
1404 struct intel_pt
*pt
= ptq
->pt
;
1405 union perf_event
*event
= ptq
->event_buf
;
1406 struct perf_sample sample
= { .ip
= 0, };
1408 if (intel_pt_skip_event(pt
))
1411 intel_pt_prep_sample(pt
, ptq
, event
, &sample
);
1413 sample
.id
= ptq
->pt
->transactions_id
;
1414 sample
.stream_id
= ptq
->pt
->transactions_id
;
1416 return intel_pt_deliver_synth_event(pt
, ptq
, event
, &sample
,
1417 pt
->transactions_sample_type
);
1420 static void intel_pt_prep_p_sample(struct intel_pt
*pt
,
1421 struct intel_pt_queue
*ptq
,
1422 union perf_event
*event
,
1423 struct perf_sample
*sample
)
1425 intel_pt_prep_sample(pt
, ptq
, event
, sample
);
1428 * Zero IP is used to mean "trace start" but that is not the case for
1429 * power or PTWRITE events with no IP, so clear the flags.
1435 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue
*ptq
)
1437 struct intel_pt
*pt
= ptq
->pt
;
1438 union perf_event
*event
= ptq
->event_buf
;
1439 struct perf_sample sample
= { .ip
= 0, };
1440 struct perf_synth_intel_ptwrite raw
;
1442 if (intel_pt_skip_event(pt
))
1445 intel_pt_prep_p_sample(pt
, ptq
, event
, &sample
);
1447 sample
.id
= ptq
->pt
->ptwrites_id
;
1448 sample
.stream_id
= ptq
->pt
->ptwrites_id
;
1451 raw
.ip
= !!(ptq
->state
->flags
& INTEL_PT_FUP_IP
);
1452 raw
.payload
= cpu_to_le64(ptq
->state
->ptw_payload
);
1454 sample
.raw_size
= perf_synth__raw_size(raw
);
1455 sample
.raw_data
= perf_synth__raw_data(&raw
);
1457 return intel_pt_deliver_synth_event(pt
, ptq
, event
, &sample
,
1458 pt
->ptwrites_sample_type
);
1461 static int intel_pt_synth_cbr_sample(struct intel_pt_queue
*ptq
)
1463 struct intel_pt
*pt
= ptq
->pt
;
1464 union perf_event
*event
= ptq
->event_buf
;
1465 struct perf_sample sample
= { .ip
= 0, };
1466 struct perf_synth_intel_cbr raw
;
1469 if (intel_pt_skip_cbr_event(pt
))
1472 ptq
->cbr_seen
= ptq
->state
->cbr
;
1474 intel_pt_prep_p_sample(pt
, ptq
, event
, &sample
);
1476 sample
.id
= ptq
->pt
->cbr_id
;
1477 sample
.stream_id
= ptq
->pt
->cbr_id
;
1479 flags
= (u16
)ptq
->state
->cbr_payload
| (pt
->max_non_turbo_ratio
<< 16);
1480 raw
.flags
= cpu_to_le32(flags
);
1481 raw
.freq
= cpu_to_le32(raw
.cbr
* pt
->cbr2khz
);
1484 sample
.raw_size
= perf_synth__raw_size(raw
);
1485 sample
.raw_data
= perf_synth__raw_data(&raw
);
1487 return intel_pt_deliver_synth_event(pt
, ptq
, event
, &sample
,
1488 pt
->pwr_events_sample_type
);
1491 static int intel_pt_synth_mwait_sample(struct intel_pt_queue
*ptq
)
1493 struct intel_pt
*pt
= ptq
->pt
;
1494 union perf_event
*event
= ptq
->event_buf
;
1495 struct perf_sample sample
= { .ip
= 0, };
1496 struct perf_synth_intel_mwait raw
;
1498 if (intel_pt_skip_event(pt
))
1501 intel_pt_prep_p_sample(pt
, ptq
, event
, &sample
);
1503 sample
.id
= ptq
->pt
->mwait_id
;
1504 sample
.stream_id
= ptq
->pt
->mwait_id
;
1507 raw
.payload
= cpu_to_le64(ptq
->state
->mwait_payload
);
1509 sample
.raw_size
= perf_synth__raw_size(raw
);
1510 sample
.raw_data
= perf_synth__raw_data(&raw
);
1512 return intel_pt_deliver_synth_event(pt
, ptq
, event
, &sample
,
1513 pt
->pwr_events_sample_type
);
1516 static int intel_pt_synth_pwre_sample(struct intel_pt_queue
*ptq
)
1518 struct intel_pt
*pt
= ptq
->pt
;
1519 union perf_event
*event
= ptq
->event_buf
;
1520 struct perf_sample sample
= { .ip
= 0, };
1521 struct perf_synth_intel_pwre raw
;
1523 if (intel_pt_skip_event(pt
))
1526 intel_pt_prep_p_sample(pt
, ptq
, event
, &sample
);
1528 sample
.id
= ptq
->pt
->pwre_id
;
1529 sample
.stream_id
= ptq
->pt
->pwre_id
;
1532 raw
.payload
= cpu_to_le64(ptq
->state
->pwre_payload
);
1534 sample
.raw_size
= perf_synth__raw_size(raw
);
1535 sample
.raw_data
= perf_synth__raw_data(&raw
);
1537 return intel_pt_deliver_synth_event(pt
, ptq
, event
, &sample
,
1538 pt
->pwr_events_sample_type
);
1541 static int intel_pt_synth_exstop_sample(struct intel_pt_queue
*ptq
)
1543 struct intel_pt
*pt
= ptq
->pt
;
1544 union perf_event
*event
= ptq
->event_buf
;
1545 struct perf_sample sample
= { .ip
= 0, };
1546 struct perf_synth_intel_exstop raw
;
1548 if (intel_pt_skip_event(pt
))
1551 intel_pt_prep_p_sample(pt
, ptq
, event
, &sample
);
1553 sample
.id
= ptq
->pt
->exstop_id
;
1554 sample
.stream_id
= ptq
->pt
->exstop_id
;
1557 raw
.ip
= !!(ptq
->state
->flags
& INTEL_PT_FUP_IP
);
1559 sample
.raw_size
= perf_synth__raw_size(raw
);
1560 sample
.raw_data
= perf_synth__raw_data(&raw
);
1562 return intel_pt_deliver_synth_event(pt
, ptq
, event
, &sample
,
1563 pt
->pwr_events_sample_type
);
1566 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue
*ptq
)
1568 struct intel_pt
*pt
= ptq
->pt
;
1569 union perf_event
*event
= ptq
->event_buf
;
1570 struct perf_sample sample
= { .ip
= 0, };
1571 struct perf_synth_intel_pwrx raw
;
1573 if (intel_pt_skip_event(pt
))
1576 intel_pt_prep_p_sample(pt
, ptq
, event
, &sample
);
1578 sample
.id
= ptq
->pt
->pwrx_id
;
1579 sample
.stream_id
= ptq
->pt
->pwrx_id
;
1582 raw
.payload
= cpu_to_le64(ptq
->state
->pwrx_payload
);
1584 sample
.raw_size
= perf_synth__raw_size(raw
);
1585 sample
.raw_data
= perf_synth__raw_data(&raw
);
1587 return intel_pt_deliver_synth_event(pt
, ptq
, event
, &sample
,
1588 pt
->pwr_events_sample_type
);
1592 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
1593 * intel_pt_add_gp_regs().
1595 static const int pebs_gp_regs
[] = {
1596 [PERF_REG_X86_FLAGS
] = 1,
1597 [PERF_REG_X86_IP
] = 2,
1598 [PERF_REG_X86_AX
] = 3,
1599 [PERF_REG_X86_CX
] = 4,
1600 [PERF_REG_X86_DX
] = 5,
1601 [PERF_REG_X86_BX
] = 6,
1602 [PERF_REG_X86_SP
] = 7,
1603 [PERF_REG_X86_BP
] = 8,
1604 [PERF_REG_X86_SI
] = 9,
1605 [PERF_REG_X86_DI
] = 10,
1606 [PERF_REG_X86_R8
] = 11,
1607 [PERF_REG_X86_R9
] = 12,
1608 [PERF_REG_X86_R10
] = 13,
1609 [PERF_REG_X86_R11
] = 14,
1610 [PERF_REG_X86_R12
] = 15,
1611 [PERF_REG_X86_R13
] = 16,
1612 [PERF_REG_X86_R14
] = 17,
1613 [PERF_REG_X86_R15
] = 18,
1616 static u64
*intel_pt_add_gp_regs(struct regs_dump
*intr_regs
, u64
*pos
,
1617 const struct intel_pt_blk_items
*items
,
1620 const u64
*gp_regs
= items
->val
[INTEL_PT_GP_REGS_POS
];
1621 u32 mask
= items
->mask
[INTEL_PT_GP_REGS_POS
];
1625 for (i
= 0, bit
= 1; i
< PERF_REG_X86_64_MAX
; i
++, bit
<<= 1) {
1626 /* Get the PEBS gp_regs array index */
1627 int n
= pebs_gp_regs
[i
] - 1;
1632 * Add only registers that were requested (i.e. 'regs_mask') and
1633 * that were provided (i.e. 'mask'), and update the resulting
1634 * mask (i.e. 'intr_regs->mask') accordingly.
1636 if (mask
& 1 << n
&& regs_mask
& bit
) {
1637 intr_regs
->mask
|= bit
;
1638 *pos
++ = gp_regs
[n
];
1645 #ifndef PERF_REG_X86_XMM0
1646 #define PERF_REG_X86_XMM0 32
1649 static void intel_pt_add_xmm(struct regs_dump
*intr_regs
, u64
*pos
,
1650 const struct intel_pt_blk_items
*items
,
1653 u32 mask
= items
->has_xmm
& (regs_mask
>> PERF_REG_X86_XMM0
);
1654 const u64
*xmm
= items
->xmm
;
1657 * If there are any XMM registers, then there should be all of them.
1658 * Nevertheless, follow the logic to add only registers that were
1659 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
1660 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
1662 intr_regs
->mask
|= (u64
)mask
<< PERF_REG_X86_XMM0
;
1664 for (; mask
; mask
>>= 1, xmm
++) {
1670 #define LBR_INFO_MISPRED (1ULL << 63)
1671 #define LBR_INFO_IN_TX (1ULL << 62)
1672 #define LBR_INFO_ABORT (1ULL << 61)
1673 #define LBR_INFO_CYCLES 0xffff
1675 /* Refer kernel's intel_pmu_store_pebs_lbrs() */
1676 static u64
intel_pt_lbr_flags(u64 info
)
1679 struct branch_flags flags
;
1683 .mispred
= !!(info
& LBR_INFO_MISPRED
),
1684 .predicted
= !(info
& LBR_INFO_MISPRED
),
1685 .in_tx
= !!(info
& LBR_INFO_IN_TX
),
1686 .abort
= !!(info
& LBR_INFO_ABORT
),
1687 .cycles
= info
& LBR_INFO_CYCLES
,
1694 static void intel_pt_add_lbrs(struct branch_stack
*br_stack
,
1695 const struct intel_pt_blk_items
*items
)
1702 to
= &br_stack
->entries
[0].from
;
1704 for (i
= INTEL_PT_LBR_0_POS
; i
<= INTEL_PT_LBR_2_POS
; i
++) {
1705 u32 mask
= items
->mask
[i
];
1706 const u64
*from
= items
->val
[i
];
1708 for (; mask
; mask
>>= 3, from
+= 3) {
1709 if ((mask
& 7) == 7) {
1712 *to
++ = intel_pt_lbr_flags(from
[2]);
1719 /* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
1720 #define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3)
1722 static int intel_pt_synth_pebs_sample(struct intel_pt_queue
*ptq
)
1724 const struct intel_pt_blk_items
*items
= &ptq
->state
->items
;
1725 struct perf_sample sample
= { .ip
= 0, };
1726 union perf_event
*event
= ptq
->event_buf
;
1727 struct intel_pt
*pt
= ptq
->pt
;
1728 struct evsel
*evsel
= pt
->pebs_evsel
;
1729 u64 sample_type
= evsel
->core
.attr
.sample_type
;
1730 u64 id
= evsel
->core
.id
[0];
1733 if (intel_pt_skip_event(pt
))
1736 intel_pt_prep_a_sample(ptq
, event
, &sample
);
1739 sample
.stream_id
= id
;
1741 if (!evsel
->core
.attr
.freq
)
1742 sample
.period
= evsel
->core
.attr
.sample_period
;
1744 /* No support for non-zero CS base */
1746 sample
.ip
= items
->ip
;
1747 else if (items
->has_rip
)
1748 sample
.ip
= items
->rip
;
1750 sample
.ip
= ptq
->state
->from_ip
;
1752 /* No support for guest mode at this time */
1753 cpumode
= sample
.ip
< ptq
->pt
->kernel_start
?
1754 PERF_RECORD_MISC_USER
:
1755 PERF_RECORD_MISC_KERNEL
;
1757 event
->sample
.header
.misc
= cpumode
| PERF_RECORD_MISC_EXACT_IP
;
1759 sample
.cpumode
= cpumode
;
1761 if (sample_type
& PERF_SAMPLE_TIME
) {
1764 if (items
->has_timestamp
)
1765 timestamp
= items
->timestamp
;
1766 else if (!pt
->timeless_decoding
)
1767 timestamp
= ptq
->timestamp
;
1769 sample
.time
= tsc_to_perf_time(timestamp
, &pt
->tc
);
1772 if (sample_type
& PERF_SAMPLE_CALLCHAIN
&&
1773 pt
->synth_opts
.callchain
) {
1774 thread_stack__sample(ptq
->thread
, ptq
->cpu
, ptq
->chain
,
1775 pt
->synth_opts
.callchain_sz
, sample
.ip
,
1777 sample
.callchain
= ptq
->chain
;
1780 if (sample_type
& PERF_SAMPLE_REGS_INTR
&&
1781 items
->mask
[INTEL_PT_GP_REGS_POS
]) {
1782 u64 regs
[sizeof(sample
.intr_regs
.mask
)];
1783 u64 regs_mask
= evsel
->core
.attr
.sample_regs_intr
;
1786 sample
.intr_regs
.abi
= items
->is_32_bit
?
1787 PERF_SAMPLE_REGS_ABI_32
:
1788 PERF_SAMPLE_REGS_ABI_64
;
1789 sample
.intr_regs
.regs
= regs
;
1791 pos
= intel_pt_add_gp_regs(&sample
.intr_regs
, regs
, items
, regs_mask
);
1793 intel_pt_add_xmm(&sample
.intr_regs
, pos
, items
, regs_mask
);
1796 if (sample_type
& PERF_SAMPLE_BRANCH_STACK
) {
1798 struct branch_stack br_stack
;
1799 struct branch_entry entries
[LBRS_MAX
];
1802 if (items
->mask
[INTEL_PT_LBR_0_POS
] ||
1803 items
->mask
[INTEL_PT_LBR_1_POS
] ||
1804 items
->mask
[INTEL_PT_LBR_2_POS
]) {
1805 intel_pt_add_lbrs(&br
.br_stack
, items
);
1806 sample
.branch_stack
= &br
.br_stack
;
1807 } else if (pt
->synth_opts
.last_branch
) {
1808 intel_pt_copy_last_branch_rb(ptq
);
1809 sample
.branch_stack
= ptq
->last_branch
;
1812 sample
.branch_stack
= &br
.br_stack
;
1816 if (sample_type
& PERF_SAMPLE_ADDR
&& items
->has_mem_access_address
)
1817 sample
.addr
= items
->mem_access_address
;
1819 if (sample_type
& PERF_SAMPLE_WEIGHT
) {
1821 * Refer kernel's setup_pebs_adaptive_sample_data() and
1822 * intel_hsw_weight().
1824 if (items
->has_mem_access_latency
)
1825 sample
.weight
= items
->mem_access_latency
;
1826 if (!sample
.weight
&& items
->has_tsx_aux_info
) {
1827 /* Cycles last block */
1828 sample
.weight
= (u32
)items
->tsx_aux_info
;
1832 if (sample_type
& PERF_SAMPLE_TRANSACTION
&& items
->has_tsx_aux_info
) {
1833 u64 ax
= items
->has_rax
? items
->rax
: 0;
1834 /* Refer kernel's intel_hsw_transaction() */
1835 u64 txn
= (u8
)(items
->tsx_aux_info
>> 32);
1837 /* For RTM XABORTs also log the abort code from AX */
1838 if (txn
& PERF_TXN_TRANSACTION
&& ax
& 1)
1839 txn
|= ((ax
>> 24) & 0xff) << PERF_TXN_ABORT_SHIFT
;
1840 sample
.transaction
= txn
;
1843 return intel_pt_deliver_synth_event(pt
, ptq
, event
, &sample
, sample_type
);
1846 static int intel_pt_synth_error(struct intel_pt
*pt
, int code
, int cpu
,
1847 pid_t pid
, pid_t tid
, u64 ip
, u64 timestamp
)
1849 union perf_event event
;
1850 char msg
[MAX_AUXTRACE_ERROR_MSG
];
1853 intel_pt__strerror(code
, msg
, MAX_AUXTRACE_ERROR_MSG
);
1855 auxtrace_synth_error(&event
.auxtrace_error
, PERF_AUXTRACE_ERROR_ITRACE
,
1856 code
, cpu
, pid
, tid
, ip
, msg
, timestamp
);
1858 err
= perf_session__deliver_synth_event(pt
->session
, &event
, NULL
);
1860 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1866 static int intel_ptq_synth_error(struct intel_pt_queue
*ptq
,
1867 const struct intel_pt_state
*state
)
1869 struct intel_pt
*pt
= ptq
->pt
;
1870 u64 tm
= ptq
->timestamp
;
1872 tm
= pt
->timeless_decoding
? 0 : tsc_to_perf_time(tm
, &pt
->tc
);
1874 return intel_pt_synth_error(pt
, state
->err
, ptq
->cpu
, ptq
->pid
,
1875 ptq
->tid
, state
->from_ip
, tm
);
1878 static int intel_pt_next_tid(struct intel_pt
*pt
, struct intel_pt_queue
*ptq
)
1880 struct auxtrace_queue
*queue
;
1881 pid_t tid
= ptq
->next_tid
;
1887 intel_pt_log("switch: cpu %d tid %d\n", ptq
->cpu
, tid
);
1889 err
= machine__set_current_tid(pt
->machine
, ptq
->cpu
, -1, tid
);
1891 queue
= &pt
->queues
.queue_array
[ptq
->queue_nr
];
1892 intel_pt_set_pid_tid_cpu(pt
, queue
);
1899 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue
*ptq
, u64 ip
)
1901 struct intel_pt
*pt
= ptq
->pt
;
1903 return ip
== pt
->switch_ip
&&
1904 (ptq
->flags
& PERF_IP_FLAG_BRANCH
) &&
1905 !(ptq
->flags
& (PERF_IP_FLAG_CONDITIONAL
| PERF_IP_FLAG_ASYNC
|
1906 PERF_IP_FLAG_INTERRUPT
| PERF_IP_FLAG_TX_ABORT
));
1909 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
1910 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
1912 static int intel_pt_sample(struct intel_pt_queue
*ptq
)
1914 const struct intel_pt_state
*state
= ptq
->state
;
1915 struct intel_pt
*pt
= ptq
->pt
;
1918 if (!ptq
->have_sample
)
1921 ptq
->have_sample
= false;
1923 if (ptq
->state
->tot_cyc_cnt
> ptq
->ipc_cyc_cnt
) {
1925 * Cycle count and instruction count only go together to create
1926 * a valid IPC ratio when the cycle count changes.
1928 ptq
->ipc_insn_cnt
= ptq
->state
->tot_insn_cnt
;
1929 ptq
->ipc_cyc_cnt
= ptq
->state
->tot_cyc_cnt
;
1933 * Do PEBS first to allow for the possibility that the PEBS timestamp
1934 * precedes the current timestamp.
1936 if (pt
->sample_pebs
&& state
->type
& INTEL_PT_BLK_ITEMS
) {
1937 err
= intel_pt_synth_pebs_sample(ptq
);
1942 if (pt
->sample_pwr_events
) {
1943 if (ptq
->state
->cbr
!= ptq
->cbr_seen
) {
1944 err
= intel_pt_synth_cbr_sample(ptq
);
1948 if (state
->type
& INTEL_PT_PWR_EVT
) {
1949 if (state
->type
& INTEL_PT_MWAIT_OP
) {
1950 err
= intel_pt_synth_mwait_sample(ptq
);
1954 if (state
->type
& INTEL_PT_PWR_ENTRY
) {
1955 err
= intel_pt_synth_pwre_sample(ptq
);
1959 if (state
->type
& INTEL_PT_EX_STOP
) {
1960 err
= intel_pt_synth_exstop_sample(ptq
);
1964 if (state
->type
& INTEL_PT_PWR_EXIT
) {
1965 err
= intel_pt_synth_pwrx_sample(ptq
);
1972 if (pt
->sample_instructions
&& (state
->type
& INTEL_PT_INSTRUCTION
)) {
1973 err
= intel_pt_synth_instruction_sample(ptq
);
1978 if (pt
->sample_transactions
&& (state
->type
& INTEL_PT_TRANSACTION
)) {
1979 err
= intel_pt_synth_transaction_sample(ptq
);
1984 if (pt
->sample_ptwrites
&& (state
->type
& INTEL_PT_PTW
)) {
1985 err
= intel_pt_synth_ptwrite_sample(ptq
);
1990 if (!(state
->type
& INTEL_PT_BRANCH
))
1993 if (pt
->synth_opts
.callchain
|| pt
->synth_opts
.thread_stack
)
1994 thread_stack__event(ptq
->thread
, ptq
->cpu
, ptq
->flags
, state
->from_ip
,
1995 state
->to_ip
, ptq
->insn_len
,
1998 thread_stack__set_trace_nr(ptq
->thread
, ptq
->cpu
, state
->trace_nr
);
2000 if (pt
->sample_branches
) {
2001 err
= intel_pt_synth_branch_sample(ptq
);
2006 if (pt
->synth_opts
.last_branch
)
2007 intel_pt_update_last_branch_rb(ptq
);
2009 if (!ptq
->sync_switch
)
2012 if (intel_pt_is_switch_ip(ptq
, state
->to_ip
)) {
2013 switch (ptq
->switch_state
) {
2014 case INTEL_PT_SS_NOT_TRACING
:
2015 case INTEL_PT_SS_UNKNOWN
:
2016 case INTEL_PT_SS_EXPECTING_SWITCH_IP
:
2017 err
= intel_pt_next_tid(pt
, ptq
);
2020 ptq
->switch_state
= INTEL_PT_SS_TRACING
;
2023 ptq
->switch_state
= INTEL_PT_SS_EXPECTING_SWITCH_EVENT
;
2026 } else if (!state
->to_ip
) {
2027 ptq
->switch_state
= INTEL_PT_SS_NOT_TRACING
;
2028 } else if (ptq
->switch_state
== INTEL_PT_SS_NOT_TRACING
) {
2029 ptq
->switch_state
= INTEL_PT_SS_UNKNOWN
;
2030 } else if (ptq
->switch_state
== INTEL_PT_SS_UNKNOWN
&&
2031 state
->to_ip
== pt
->ptss_ip
&&
2032 (ptq
->flags
& PERF_IP_FLAG_CALL
)) {
2033 ptq
->switch_state
= INTEL_PT_SS_TRACING
;
2039 static u64
intel_pt_switch_ip(struct intel_pt
*pt
, u64
*ptss_ip
)
2041 struct machine
*machine
= pt
->machine
;
2043 struct symbol
*sym
, *start
;
2044 u64 ip
, switch_ip
= 0;
2050 map
= machine__kernel_map(machine
);
2057 start
= dso__first_symbol(map
->dso
);
2059 for (sym
= start
; sym
; sym
= dso__next_symbol(sym
)) {
2060 if (sym
->binding
== STB_GLOBAL
&&
2061 !strcmp(sym
->name
, "__switch_to")) {
2062 ip
= map
->unmap_ip(map
, sym
->start
);
2063 if (ip
>= map
->start
&& ip
< map
->end
) {
2070 if (!switch_ip
|| !ptss_ip
)
2073 if (pt
->have_sched_switch
== 1)
2074 ptss
= "perf_trace_sched_switch";
2076 ptss
= "__perf_event_task_sched_out";
2078 for (sym
= start
; sym
; sym
= dso__next_symbol(sym
)) {
2079 if (!strcmp(sym
->name
, ptss
)) {
2080 ip
= map
->unmap_ip(map
, sym
->start
);
2081 if (ip
>= map
->start
&& ip
< map
->end
) {
2091 static void intel_pt_enable_sync_switch(struct intel_pt
*pt
)
2095 pt
->sync_switch
= true;
2097 for (i
= 0; i
< pt
->queues
.nr_queues
; i
++) {
2098 struct auxtrace_queue
*queue
= &pt
->queues
.queue_array
[i
];
2099 struct intel_pt_queue
*ptq
= queue
->priv
;
2102 ptq
->sync_switch
= true;
2107 * To filter against time ranges, it is only necessary to look at the next start
2110 static bool intel_pt_next_time(struct intel_pt_queue
*ptq
)
2112 struct intel_pt
*pt
= ptq
->pt
;
2114 if (ptq
->sel_start
) {
2115 /* Next time is an end time */
2116 ptq
->sel_start
= false;
2117 ptq
->sel_timestamp
= pt
->time_ranges
[ptq
->sel_idx
].end
;
2119 } else if (ptq
->sel_idx
+ 1 < pt
->range_cnt
) {
2120 /* Next time is a start time */
2121 ptq
->sel_start
= true;
2123 ptq
->sel_timestamp
= pt
->time_ranges
[ptq
->sel_idx
].start
;
2131 static int intel_pt_time_filter(struct intel_pt_queue
*ptq
, u64
*ff_timestamp
)
2136 if (ptq
->sel_start
) {
2137 if (ptq
->timestamp
>= ptq
->sel_timestamp
) {
2138 /* After start time, so consider next time */
2139 intel_pt_next_time(ptq
);
2140 if (!ptq
->sel_timestamp
) {
2144 /* Check against end time */
2147 /* Before start time, so fast forward */
2148 ptq
->have_sample
= false;
2149 if (ptq
->sel_timestamp
> *ff_timestamp
) {
2150 if (ptq
->sync_switch
) {
2151 intel_pt_next_tid(ptq
->pt
, ptq
);
2152 ptq
->switch_state
= INTEL_PT_SS_UNKNOWN
;
2154 *ff_timestamp
= ptq
->sel_timestamp
;
2155 err
= intel_pt_fast_forward(ptq
->decoder
,
2156 ptq
->sel_timestamp
);
2161 } else if (ptq
->timestamp
> ptq
->sel_timestamp
) {
2162 /* After end time, so consider next time */
2163 if (!intel_pt_next_time(ptq
)) {
2164 /* No next time range, so stop decoding */
2165 ptq
->have_sample
= false;
2166 ptq
->switch_state
= INTEL_PT_SS_NOT_TRACING
;
2169 /* Check against next start time */
2172 /* Before end time */
2178 static int intel_pt_run_decoder(struct intel_pt_queue
*ptq
, u64
*timestamp
)
2180 const struct intel_pt_state
*state
= ptq
->state
;
2181 struct intel_pt
*pt
= ptq
->pt
;
2182 u64 ff_timestamp
= 0;
2185 if (!pt
->kernel_start
) {
2186 pt
->kernel_start
= machine__kernel_start(pt
->machine
);
2187 if (pt
->per_cpu_mmaps
&&
2188 (pt
->have_sched_switch
== 1 || pt
->have_sched_switch
== 3) &&
2189 !pt
->timeless_decoding
&& intel_pt_tracing_kernel(pt
) &&
2190 !pt
->sampling_mode
) {
2191 pt
->switch_ip
= intel_pt_switch_ip(pt
, &pt
->ptss_ip
);
2192 if (pt
->switch_ip
) {
2193 intel_pt_log("switch_ip: %"PRIx64
" ptss_ip: %"PRIx64
"\n",
2194 pt
->switch_ip
, pt
->ptss_ip
);
2195 intel_pt_enable_sync_switch(pt
);
2200 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
2201 ptq
->queue_nr
, ptq
->cpu
, ptq
->pid
, ptq
->tid
);
2203 err
= intel_pt_sample(ptq
);
2207 state
= intel_pt_decode(ptq
->decoder
);
2209 if (state
->err
== INTEL_PT_ERR_NODATA
)
2211 if (ptq
->sync_switch
&&
2212 state
->from_ip
>= pt
->kernel_start
) {
2213 ptq
->sync_switch
= false;
2214 intel_pt_next_tid(pt
, ptq
);
2216 if (pt
->synth_opts
.errors
) {
2217 err
= intel_ptq_synth_error(ptq
, state
);
2225 ptq
->have_sample
= true;
2226 intel_pt_sample_flags(ptq
);
2228 /* Use estimated TSC upon return to user space */
2230 (state
->from_ip
>= pt
->kernel_start
|| !state
->from_ip
) &&
2231 state
->to_ip
&& state
->to_ip
< pt
->kernel_start
) {
2232 intel_pt_log("TSC %"PRIx64
" est. TSC %"PRIx64
"\n",
2233 state
->timestamp
, state
->est_timestamp
);
2234 ptq
->timestamp
= state
->est_timestamp
;
2235 /* Use estimated TSC in unknown switch state */
2236 } else if (ptq
->sync_switch
&&
2237 ptq
->switch_state
== INTEL_PT_SS_UNKNOWN
&&
2238 intel_pt_is_switch_ip(ptq
, state
->to_ip
) &&
2239 ptq
->next_tid
== -1) {
2240 intel_pt_log("TSC %"PRIx64
" est. TSC %"PRIx64
"\n",
2241 state
->timestamp
, state
->est_timestamp
);
2242 ptq
->timestamp
= state
->est_timestamp
;
2243 } else if (state
->timestamp
> ptq
->timestamp
) {
2244 ptq
->timestamp
= state
->timestamp
;
2247 if (ptq
->sel_timestamp
) {
2248 err
= intel_pt_time_filter(ptq
, &ff_timestamp
);
2253 if (!pt
->timeless_decoding
&& ptq
->timestamp
>= *timestamp
) {
2254 *timestamp
= ptq
->timestamp
;
2261 static inline int intel_pt_update_queues(struct intel_pt
*pt
)
2263 if (pt
->queues
.new_data
) {
2264 pt
->queues
.new_data
= false;
2265 return intel_pt_setup_queues(pt
);
2270 static int intel_pt_process_queues(struct intel_pt
*pt
, u64 timestamp
)
2272 unsigned int queue_nr
;
2277 struct auxtrace_queue
*queue
;
2278 struct intel_pt_queue
*ptq
;
2280 if (!pt
->heap
.heap_cnt
)
2283 if (pt
->heap
.heap_array
[0].ordinal
>= timestamp
)
2286 queue_nr
= pt
->heap
.heap_array
[0].queue_nr
;
2287 queue
= &pt
->queues
.queue_array
[queue_nr
];
2290 intel_pt_log("queue %u processing 0x%" PRIx64
" to 0x%" PRIx64
"\n",
2291 queue_nr
, pt
->heap
.heap_array
[0].ordinal
,
2294 auxtrace_heap__pop(&pt
->heap
);
2296 if (pt
->heap
.heap_cnt
) {
2297 ts
= pt
->heap
.heap_array
[0].ordinal
+ 1;
2304 intel_pt_set_pid_tid_cpu(pt
, queue
);
2306 ret
= intel_pt_run_decoder(ptq
, &ts
);
2309 auxtrace_heap__add(&pt
->heap
, queue_nr
, ts
);
2314 ret
= auxtrace_heap__add(&pt
->heap
, queue_nr
, ts
);
2318 ptq
->on_heap
= false;
2325 static int intel_pt_process_timeless_queues(struct intel_pt
*pt
, pid_t tid
,
2328 struct auxtrace_queues
*queues
= &pt
->queues
;
2332 for (i
= 0; i
< queues
->nr_queues
; i
++) {
2333 struct auxtrace_queue
*queue
= &pt
->queues
.queue_array
[i
];
2334 struct intel_pt_queue
*ptq
= queue
->priv
;
2336 if (ptq
&& (tid
== -1 || ptq
->tid
== tid
)) {
2338 intel_pt_set_pid_tid_cpu(pt
, queue
);
2339 intel_pt_run_decoder(ptq
, &ts
);
2345 static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue
*ptq
,
2346 struct auxtrace_queue
*queue
,
2347 struct perf_sample
*sample
)
2349 struct machine
*m
= ptq
->pt
->machine
;
2351 ptq
->pid
= sample
->pid
;
2352 ptq
->tid
= sample
->tid
;
2353 ptq
->cpu
= queue
->cpu
;
2355 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
2356 ptq
->queue_nr
, ptq
->cpu
, ptq
->pid
, ptq
->tid
);
2358 thread__zput(ptq
->thread
);
2363 if (ptq
->pid
== -1) {
2364 ptq
->thread
= machine__find_thread(m
, -1, ptq
->tid
);
2366 ptq
->pid
= ptq
->thread
->pid_
;
2370 ptq
->thread
= machine__findnew_thread(m
, ptq
->pid
, ptq
->tid
);
2373 static int intel_pt_process_timeless_sample(struct intel_pt
*pt
,
2374 struct perf_sample
*sample
)
2376 struct auxtrace_queue
*queue
;
2377 struct intel_pt_queue
*ptq
;
2380 queue
= auxtrace_queues__sample_queue(&pt
->queues
, sample
, pt
->session
);
2389 ptq
->time
= sample
->time
;
2390 intel_pt_sample_set_pid_tid_cpu(ptq
, queue
, sample
);
2391 intel_pt_run_decoder(ptq
, &ts
);
2395 static int intel_pt_lost(struct intel_pt
*pt
, struct perf_sample
*sample
)
2397 return intel_pt_synth_error(pt
, INTEL_PT_ERR_LOST
, sample
->cpu
,
2398 sample
->pid
, sample
->tid
, 0, sample
->time
);
2401 static struct intel_pt_queue
*intel_pt_cpu_to_ptq(struct intel_pt
*pt
, int cpu
)
2405 if (cpu
< 0 || !pt
->queues
.nr_queues
)
2408 if ((unsigned)cpu
>= pt
->queues
.nr_queues
)
2409 i
= pt
->queues
.nr_queues
- 1;
2413 if (pt
->queues
.queue_array
[i
].cpu
== cpu
)
2414 return pt
->queues
.queue_array
[i
].priv
;
2416 for (j
= 0; i
> 0; j
++) {
2417 if (pt
->queues
.queue_array
[--i
].cpu
== cpu
)
2418 return pt
->queues
.queue_array
[i
].priv
;
2421 for (; j
< pt
->queues
.nr_queues
; j
++) {
2422 if (pt
->queues
.queue_array
[j
].cpu
== cpu
)
2423 return pt
->queues
.queue_array
[j
].priv
;
2429 static int intel_pt_sync_switch(struct intel_pt
*pt
, int cpu
, pid_t tid
,
2432 struct intel_pt_queue
*ptq
;
2435 if (!pt
->sync_switch
)
2438 ptq
= intel_pt_cpu_to_ptq(pt
, cpu
);
2439 if (!ptq
|| !ptq
->sync_switch
)
2442 switch (ptq
->switch_state
) {
2443 case INTEL_PT_SS_NOT_TRACING
:
2445 case INTEL_PT_SS_UNKNOWN
:
2446 case INTEL_PT_SS_TRACING
:
2447 ptq
->next_tid
= tid
;
2448 ptq
->switch_state
= INTEL_PT_SS_EXPECTING_SWITCH_IP
;
2450 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT
:
2451 if (!ptq
->on_heap
) {
2452 ptq
->timestamp
= perf_time_to_tsc(timestamp
,
2454 err
= auxtrace_heap__add(&pt
->heap
, ptq
->queue_nr
,
2458 ptq
->on_heap
= true;
2460 ptq
->switch_state
= INTEL_PT_SS_TRACING
;
2462 case INTEL_PT_SS_EXPECTING_SWITCH_IP
:
2463 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu
);
2474 static int intel_pt_process_switch(struct intel_pt
*pt
,
2475 struct perf_sample
*sample
)
2477 struct evsel
*evsel
;
2481 evsel
= perf_evlist__id2evsel(pt
->session
->evlist
, sample
->id
);
2482 if (evsel
!= pt
->switch_evsel
)
2485 tid
= perf_evsel__intval(evsel
, sample
, "next_pid");
2488 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64
" tsc %#"PRIx64
"\n",
2489 cpu
, tid
, sample
->time
, perf_time_to_tsc(sample
->time
,
2492 ret
= intel_pt_sync_switch(pt
, cpu
, tid
, sample
->time
);
2496 return machine__set_current_tid(pt
->machine
, cpu
, -1, tid
);
2499 static int intel_pt_context_switch_in(struct intel_pt
*pt
,
2500 struct perf_sample
*sample
)
2502 pid_t pid
= sample
->pid
;
2503 pid_t tid
= sample
->tid
;
2504 int cpu
= sample
->cpu
;
2506 if (pt
->sync_switch
) {
2507 struct intel_pt_queue
*ptq
;
2509 ptq
= intel_pt_cpu_to_ptq(pt
, cpu
);
2510 if (ptq
&& ptq
->sync_switch
) {
2512 switch (ptq
->switch_state
) {
2513 case INTEL_PT_SS_NOT_TRACING
:
2514 case INTEL_PT_SS_UNKNOWN
:
2515 case INTEL_PT_SS_TRACING
:
2517 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT
:
2518 case INTEL_PT_SS_EXPECTING_SWITCH_IP
:
2519 ptq
->switch_state
= INTEL_PT_SS_TRACING
;
2528 * If the current tid has not been updated yet, ensure it is now that
2529 * a "switch in" event has occurred.
2531 if (machine__get_current_tid(pt
->machine
, cpu
) == tid
)
2534 return machine__set_current_tid(pt
->machine
, cpu
, pid
, tid
);
2537 static int intel_pt_context_switch(struct intel_pt
*pt
, union perf_event
*event
,
2538 struct perf_sample
*sample
)
2540 bool out
= event
->header
.misc
& PERF_RECORD_MISC_SWITCH_OUT
;
2546 if (pt
->have_sched_switch
== 3) {
2548 return intel_pt_context_switch_in(pt
, sample
);
2549 if (event
->header
.type
!= PERF_RECORD_SWITCH_CPU_WIDE
) {
2550 pr_err("Expecting CPU-wide context switch event\n");
2553 pid
= event
->context_switch
.next_prev_pid
;
2554 tid
= event
->context_switch
.next_prev_tid
;
2563 pr_err("context_switch event has no tid\n");
2567 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64
" tsc %#"PRIx64
"\n",
2568 cpu
, pid
, tid
, sample
->time
, perf_time_to_tsc(sample
->time
,
2571 ret
= intel_pt_sync_switch(pt
, cpu
, tid
, sample
->time
);
2575 return machine__set_current_tid(pt
->machine
, cpu
, pid
, tid
);
2578 static int intel_pt_process_itrace_start(struct intel_pt
*pt
,
2579 union perf_event
*event
,
2580 struct perf_sample
*sample
)
2582 if (!pt
->per_cpu_mmaps
)
2585 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64
" tsc %#"PRIx64
"\n",
2586 sample
->cpu
, event
->itrace_start
.pid
,
2587 event
->itrace_start
.tid
, sample
->time
,
2588 perf_time_to_tsc(sample
->time
, &pt
->tc
));
2590 return machine__set_current_tid(pt
->machine
, sample
->cpu
,
2591 event
->itrace_start
.pid
,
2592 event
->itrace_start
.tid
);
2595 static int intel_pt_process_event(struct perf_session
*session
,
2596 union perf_event
*event
,
2597 struct perf_sample
*sample
,
2598 struct perf_tool
*tool
)
2600 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
2608 if (!tool
->ordered_events
) {
2609 pr_err("Intel Processor Trace requires ordered events\n");
2613 if (sample
->time
&& sample
->time
!= (u64
)-1)
2614 timestamp
= perf_time_to_tsc(sample
->time
, &pt
->tc
);
2618 if (timestamp
|| pt
->timeless_decoding
) {
2619 err
= intel_pt_update_queues(pt
);
2624 if (pt
->timeless_decoding
) {
2625 if (pt
->sampling_mode
) {
2626 if (sample
->aux_sample
.size
)
2627 err
= intel_pt_process_timeless_sample(pt
,
2629 } else if (event
->header
.type
== PERF_RECORD_EXIT
) {
2630 err
= intel_pt_process_timeless_queues(pt
,
2634 } else if (timestamp
) {
2635 err
= intel_pt_process_queues(pt
, timestamp
);
2640 if (event
->header
.type
== PERF_RECORD_AUX
&&
2641 (event
->aux
.flags
& PERF_AUX_FLAG_TRUNCATED
) &&
2642 pt
->synth_opts
.errors
) {
2643 err
= intel_pt_lost(pt
, sample
);
2648 if (pt
->switch_evsel
&& event
->header
.type
== PERF_RECORD_SAMPLE
)
2649 err
= intel_pt_process_switch(pt
, sample
);
2650 else if (event
->header
.type
== PERF_RECORD_ITRACE_START
)
2651 err
= intel_pt_process_itrace_start(pt
, event
, sample
);
2652 else if (event
->header
.type
== PERF_RECORD_SWITCH
||
2653 event
->header
.type
== PERF_RECORD_SWITCH_CPU_WIDE
)
2654 err
= intel_pt_context_switch(pt
, event
, sample
);
2656 intel_pt_log("event %u: cpu %d time %"PRIu64
" tsc %#"PRIx64
" ",
2657 event
->header
.type
, sample
->cpu
, sample
->time
, timestamp
);
2658 intel_pt_log_event(event
);
2663 static int intel_pt_flush(struct perf_session
*session
, struct perf_tool
*tool
)
2665 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
2672 if (!tool
->ordered_events
)
2675 ret
= intel_pt_update_queues(pt
);
2679 if (pt
->timeless_decoding
)
2680 return intel_pt_process_timeless_queues(pt
, -1,
2683 return intel_pt_process_queues(pt
, MAX_TIMESTAMP
);
2686 static void intel_pt_free_events(struct perf_session
*session
)
2688 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
2690 struct auxtrace_queues
*queues
= &pt
->queues
;
2693 for (i
= 0; i
< queues
->nr_queues
; i
++) {
2694 intel_pt_free_queue(queues
->queue_array
[i
].priv
);
2695 queues
->queue_array
[i
].priv
= NULL
;
2697 intel_pt_log_disable();
2698 auxtrace_queues__free(queues
);
2701 static void intel_pt_free(struct perf_session
*session
)
2703 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
2706 auxtrace_heap__free(&pt
->heap
);
2707 intel_pt_free_events(session
);
2708 session
->auxtrace
= NULL
;
2709 thread__put(pt
->unknown_thread
);
2710 addr_filters__exit(&pt
->filts
);
2712 zfree(&pt
->time_ranges
);
2716 static int intel_pt_process_auxtrace_event(struct perf_session
*session
,
2717 union perf_event
*event
,
2718 struct perf_tool
*tool __maybe_unused
)
2720 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
2723 if (!pt
->data_queued
) {
2724 struct auxtrace_buffer
*buffer
;
2726 int fd
= perf_data__fd(session
->data
);
2729 if (perf_data__is_pipe(session
->data
)) {
2732 data_offset
= lseek(fd
, 0, SEEK_CUR
);
2733 if (data_offset
== -1)
2737 err
= auxtrace_queues__add_event(&pt
->queues
, session
, event
,
2738 data_offset
, &buffer
);
2742 /* Dump here now we have copied a piped trace out of the pipe */
2744 if (auxtrace_buffer__get_data(buffer
, fd
)) {
2745 intel_pt_dump_event(pt
, buffer
->data
,
2747 auxtrace_buffer__put_data(buffer
);
2755 static int intel_pt_queue_data(struct perf_session
*session
,
2756 struct perf_sample
*sample
,
2757 union perf_event
*event
, u64 data_offset
)
2759 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
2764 return auxtrace_queues__add_event(&pt
->queues
, session
, event
,
2768 if (sample
->time
&& sample
->time
!= (u64
)-1)
2769 timestamp
= perf_time_to_tsc(sample
->time
, &pt
->tc
);
2773 return auxtrace_queues__add_sample(&pt
->queues
, session
, sample
,
2774 data_offset
, timestamp
);
2777 struct intel_pt_synth
{
2778 struct perf_tool dummy_tool
;
2779 struct perf_session
*session
;
2782 static int intel_pt_event_synth(struct perf_tool
*tool
,
2783 union perf_event
*event
,
2784 struct perf_sample
*sample __maybe_unused
,
2785 struct machine
*machine __maybe_unused
)
2787 struct intel_pt_synth
*intel_pt_synth
=
2788 container_of(tool
, struct intel_pt_synth
, dummy_tool
);
2790 return perf_session__deliver_synth_event(intel_pt_synth
->session
, event
,
2794 static int intel_pt_synth_event(struct perf_session
*session
, const char *name
,
2795 struct perf_event_attr
*attr
, u64 id
)
2797 struct intel_pt_synth intel_pt_synth
;
2800 pr_debug("Synthesizing '%s' event with id %" PRIu64
" sample type %#" PRIx64
"\n",
2801 name
, id
, (u64
)attr
->sample_type
);
2803 memset(&intel_pt_synth
, 0, sizeof(struct intel_pt_synth
));
2804 intel_pt_synth
.session
= session
;
2806 err
= perf_event__synthesize_attr(&intel_pt_synth
.dummy_tool
, attr
, 1,
2807 &id
, intel_pt_event_synth
);
2809 pr_err("%s: failed to synthesize '%s' event type\n",
2815 static void intel_pt_set_event_name(struct evlist
*evlist
, u64 id
,
2818 struct evsel
*evsel
;
2820 evlist__for_each_entry(evlist
, evsel
) {
2821 if (evsel
->core
.id
&& evsel
->core
.id
[0] == id
) {
2823 zfree(&evsel
->name
);
2824 evsel
->name
= strdup(name
);
2830 static struct evsel
*intel_pt_evsel(struct intel_pt
*pt
,
2831 struct evlist
*evlist
)
2833 struct evsel
*evsel
;
2835 evlist__for_each_entry(evlist
, evsel
) {
2836 if (evsel
->core
.attr
.type
== pt
->pmu_type
&& evsel
->core
.ids
)
2843 static int intel_pt_synth_events(struct intel_pt
*pt
,
2844 struct perf_session
*session
)
2846 struct evlist
*evlist
= session
->evlist
;
2847 struct evsel
*evsel
= intel_pt_evsel(pt
, evlist
);
2848 struct perf_event_attr attr
;
2853 pr_debug("There are no selected events with Intel Processor Trace data\n");
2857 memset(&attr
, 0, sizeof(struct perf_event_attr
));
2858 attr
.size
= sizeof(struct perf_event_attr
);
2859 attr
.type
= PERF_TYPE_HARDWARE
;
2860 attr
.sample_type
= evsel
->core
.attr
.sample_type
& PERF_SAMPLE_MASK
;
2861 attr
.sample_type
|= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
|
2863 if (pt
->timeless_decoding
)
2864 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_TIME
;
2866 attr
.sample_type
|= PERF_SAMPLE_TIME
;
2867 if (!pt
->per_cpu_mmaps
)
2868 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_CPU
;
2869 attr
.exclude_user
= evsel
->core
.attr
.exclude_user
;
2870 attr
.exclude_kernel
= evsel
->core
.attr
.exclude_kernel
;
2871 attr
.exclude_hv
= evsel
->core
.attr
.exclude_hv
;
2872 attr
.exclude_host
= evsel
->core
.attr
.exclude_host
;
2873 attr
.exclude_guest
= evsel
->core
.attr
.exclude_guest
;
2874 attr
.sample_id_all
= evsel
->core
.attr
.sample_id_all
;
2875 attr
.read_format
= evsel
->core
.attr
.read_format
;
2877 id
= evsel
->core
.id
[0] + 1000000000;
2881 if (pt
->synth_opts
.branches
) {
2882 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
2883 attr
.sample_period
= 1;
2884 attr
.sample_type
|= PERF_SAMPLE_ADDR
;
2885 err
= intel_pt_synth_event(session
, "branches", &attr
, id
);
2888 pt
->sample_branches
= true;
2889 pt
->branches_sample_type
= attr
.sample_type
;
2890 pt
->branches_id
= id
;
2892 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_ADDR
;
2895 if (pt
->synth_opts
.callchain
)
2896 attr
.sample_type
|= PERF_SAMPLE_CALLCHAIN
;
2897 if (pt
->synth_opts
.last_branch
)
2898 attr
.sample_type
|= PERF_SAMPLE_BRANCH_STACK
;
2900 if (pt
->synth_opts
.instructions
) {
2901 attr
.config
= PERF_COUNT_HW_INSTRUCTIONS
;
2902 if (pt
->synth_opts
.period_type
== PERF_ITRACE_PERIOD_NANOSECS
)
2903 attr
.sample_period
=
2904 intel_pt_ns_to_ticks(pt
, pt
->synth_opts
.period
);
2906 attr
.sample_period
= pt
->synth_opts
.period
;
2907 err
= intel_pt_synth_event(session
, "instructions", &attr
, id
);
2910 pt
->sample_instructions
= true;
2911 pt
->instructions_sample_type
= attr
.sample_type
;
2912 pt
->instructions_id
= id
;
2916 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_PERIOD
;
2917 attr
.sample_period
= 1;
2919 if (pt
->synth_opts
.transactions
) {
2920 attr
.config
= PERF_COUNT_HW_INSTRUCTIONS
;
2921 err
= intel_pt_synth_event(session
, "transactions", &attr
, id
);
2924 pt
->sample_transactions
= true;
2925 pt
->transactions_sample_type
= attr
.sample_type
;
2926 pt
->transactions_id
= id
;
2927 intel_pt_set_event_name(evlist
, id
, "transactions");
2931 attr
.type
= PERF_TYPE_SYNTH
;
2932 attr
.sample_type
|= PERF_SAMPLE_RAW
;
2934 if (pt
->synth_opts
.ptwrites
) {
2935 attr
.config
= PERF_SYNTH_INTEL_PTWRITE
;
2936 err
= intel_pt_synth_event(session
, "ptwrite", &attr
, id
);
2939 pt
->sample_ptwrites
= true;
2940 pt
->ptwrites_sample_type
= attr
.sample_type
;
2941 pt
->ptwrites_id
= id
;
2942 intel_pt_set_event_name(evlist
, id
, "ptwrite");
2946 if (pt
->synth_opts
.pwr_events
) {
2947 pt
->sample_pwr_events
= true;
2948 pt
->pwr_events_sample_type
= attr
.sample_type
;
2950 attr
.config
= PERF_SYNTH_INTEL_CBR
;
2951 err
= intel_pt_synth_event(session
, "cbr", &attr
, id
);
2955 intel_pt_set_event_name(evlist
, id
, "cbr");
2959 if (pt
->synth_opts
.pwr_events
&& (evsel
->core
.attr
.config
& 0x10)) {
2960 attr
.config
= PERF_SYNTH_INTEL_MWAIT
;
2961 err
= intel_pt_synth_event(session
, "mwait", &attr
, id
);
2965 intel_pt_set_event_name(evlist
, id
, "mwait");
2968 attr
.config
= PERF_SYNTH_INTEL_PWRE
;
2969 err
= intel_pt_synth_event(session
, "pwre", &attr
, id
);
2973 intel_pt_set_event_name(evlist
, id
, "pwre");
2976 attr
.config
= PERF_SYNTH_INTEL_EXSTOP
;
2977 err
= intel_pt_synth_event(session
, "exstop", &attr
, id
);
2981 intel_pt_set_event_name(evlist
, id
, "exstop");
2984 attr
.config
= PERF_SYNTH_INTEL_PWRX
;
2985 err
= intel_pt_synth_event(session
, "pwrx", &attr
, id
);
2989 intel_pt_set_event_name(evlist
, id
, "pwrx");
2996 static void intel_pt_setup_pebs_events(struct intel_pt
*pt
)
2998 struct evsel
*evsel
;
3000 if (!pt
->synth_opts
.other_events
)
3003 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
3004 if (evsel
->core
.attr
.aux_output
&& evsel
->core
.id
) {
3005 pt
->sample_pebs
= true;
3006 pt
->pebs_evsel
= evsel
;
3012 static struct evsel
*intel_pt_find_sched_switch(struct evlist
*evlist
)
3014 struct evsel
*evsel
;
3016 evlist__for_each_entry_reverse(evlist
, evsel
) {
3017 const char *name
= perf_evsel__name(evsel
);
3019 if (!strcmp(name
, "sched:sched_switch"))
3026 static bool intel_pt_find_switch(struct evlist
*evlist
)
3028 struct evsel
*evsel
;
3030 evlist__for_each_entry(evlist
, evsel
) {
3031 if (evsel
->core
.attr
.context_switch
)
3038 static int intel_pt_perf_config(const char *var
, const char *value
, void *data
)
3040 struct intel_pt
*pt
= data
;
3042 if (!strcmp(var
, "intel-pt.mispred-all"))
3043 pt
->mispred_all
= perf_config_bool(var
, value
);
3048 /* Find least TSC which converts to ns or later */
3049 static u64
intel_pt_tsc_start(u64 ns
, struct intel_pt
*pt
)
3053 tsc
= perf_time_to_tsc(ns
, &pt
->tc
);
3056 tm
= tsc_to_perf_time(tsc
, &pt
->tc
);
3063 tm
= tsc_to_perf_time(++tsc
, &pt
->tc
);
3068 /* Find greatest TSC which converts to ns or earlier */
3069 static u64
intel_pt_tsc_end(u64 ns
, struct intel_pt
*pt
)
3073 tsc
= perf_time_to_tsc(ns
, &pt
->tc
);
3076 tm
= tsc_to_perf_time(tsc
, &pt
->tc
);
3083 tm
= tsc_to_perf_time(--tsc
, &pt
->tc
);
3088 static int intel_pt_setup_time_ranges(struct intel_pt
*pt
,
3089 struct itrace_synth_opts
*opts
)
3091 struct perf_time_interval
*p
= opts
->ptime_range
;
3092 int n
= opts
->range_num
;
3095 if (!n
|| !p
|| pt
->timeless_decoding
)
3098 pt
->time_ranges
= calloc(n
, sizeof(struct range
));
3099 if (!pt
->time_ranges
)
3104 intel_pt_log("%s: %u range(s)\n", __func__
, n
);
3106 for (i
= 0; i
< n
; i
++) {
3107 struct range
*r
= &pt
->time_ranges
[i
];
3108 u64 ts
= p
[i
].start
;
3112 * Take care to ensure the TSC range matches the perf-time range
3113 * when converted back to perf-time.
3115 r
->start
= ts
? intel_pt_tsc_start(ts
, pt
) : 0;
3116 r
->end
= te
? intel_pt_tsc_end(te
, pt
) : 0;
3118 intel_pt_log("range %d: perf time interval: %"PRIu64
" to %"PRIu64
"\n",
3120 intel_pt_log("range %d: TSC time interval: %#"PRIx64
" to %#"PRIx64
"\n",
3121 i
, r
->start
, r
->end
);
3127 static const char * const intel_pt_info_fmts
[] = {
3128 [INTEL_PT_PMU_TYPE
] = " PMU Type %"PRId64
"\n",
3129 [INTEL_PT_TIME_SHIFT
] = " Time Shift %"PRIu64
"\n",
3130 [INTEL_PT_TIME_MULT
] = " Time Muliplier %"PRIu64
"\n",
3131 [INTEL_PT_TIME_ZERO
] = " Time Zero %"PRIu64
"\n",
3132 [INTEL_PT_CAP_USER_TIME_ZERO
] = " Cap Time Zero %"PRId64
"\n",
3133 [INTEL_PT_TSC_BIT
] = " TSC bit %#"PRIx64
"\n",
3134 [INTEL_PT_NORETCOMP_BIT
] = " NoRETComp bit %#"PRIx64
"\n",
3135 [INTEL_PT_HAVE_SCHED_SWITCH
] = " Have sched_switch %"PRId64
"\n",
3136 [INTEL_PT_SNAPSHOT_MODE
] = " Snapshot mode %"PRId64
"\n",
3137 [INTEL_PT_PER_CPU_MMAPS
] = " Per-cpu maps %"PRId64
"\n",
3138 [INTEL_PT_MTC_BIT
] = " MTC bit %#"PRIx64
"\n",
3139 [INTEL_PT_TSC_CTC_N
] = " TSC:CTC numerator %"PRIu64
"\n",
3140 [INTEL_PT_TSC_CTC_D
] = " TSC:CTC denominator %"PRIu64
"\n",
3141 [INTEL_PT_CYC_BIT
] = " CYC bit %#"PRIx64
"\n",
3142 [INTEL_PT_MAX_NONTURBO_RATIO
] = " Max non-turbo ratio %"PRIu64
"\n",
3143 [INTEL_PT_FILTER_STR_LEN
] = " Filter string len. %"PRIu64
"\n",
3146 static void intel_pt_print_info(__u64
*arr
, int start
, int finish
)
3153 for (i
= start
; i
<= finish
; i
++)
3154 fprintf(stdout
, intel_pt_info_fmts
[i
], arr
[i
]);
3157 static void intel_pt_print_info_str(const char *name
, const char *str
)
3162 fprintf(stdout
, " %-20s%s\n", name
, str
? str
: "");
3165 static bool intel_pt_has(struct perf_record_auxtrace_info
*auxtrace_info
, int pos
)
3167 return auxtrace_info
->header
.size
>=
3168 sizeof(struct perf_record_auxtrace_info
) + (sizeof(u64
) * (pos
+ 1));
3171 int intel_pt_process_auxtrace_info(union perf_event
*event
,
3172 struct perf_session
*session
)
3174 struct perf_record_auxtrace_info
*auxtrace_info
= &event
->auxtrace_info
;
3175 size_t min_sz
= sizeof(u64
) * INTEL_PT_PER_CPU_MMAPS
;
3176 struct intel_pt
*pt
;
3181 if (auxtrace_info
->header
.size
< sizeof(struct perf_record_auxtrace_info
) +
3185 pt
= zalloc(sizeof(struct intel_pt
));
3189 addr_filters__init(&pt
->filts
);
3191 err
= perf_config(intel_pt_perf_config
, pt
);
3195 err
= auxtrace_queues__init(&pt
->queues
);
3199 intel_pt_log_set_name(INTEL_PT_PMU_NAME
);
3201 pt
->session
= session
;
3202 pt
->machine
= &session
->machines
.host
; /* No kvm support */
3203 pt
->auxtrace_type
= auxtrace_info
->type
;
3204 pt
->pmu_type
= auxtrace_info
->priv
[INTEL_PT_PMU_TYPE
];
3205 pt
->tc
.time_shift
= auxtrace_info
->priv
[INTEL_PT_TIME_SHIFT
];
3206 pt
->tc
.time_mult
= auxtrace_info
->priv
[INTEL_PT_TIME_MULT
];
3207 pt
->tc
.time_zero
= auxtrace_info
->priv
[INTEL_PT_TIME_ZERO
];
3208 pt
->cap_user_time_zero
= auxtrace_info
->priv
[INTEL_PT_CAP_USER_TIME_ZERO
];
3209 pt
->tsc_bit
= auxtrace_info
->priv
[INTEL_PT_TSC_BIT
];
3210 pt
->noretcomp_bit
= auxtrace_info
->priv
[INTEL_PT_NORETCOMP_BIT
];
3211 pt
->have_sched_switch
= auxtrace_info
->priv
[INTEL_PT_HAVE_SCHED_SWITCH
];
3212 pt
->snapshot_mode
= auxtrace_info
->priv
[INTEL_PT_SNAPSHOT_MODE
];
3213 pt
->per_cpu_mmaps
= auxtrace_info
->priv
[INTEL_PT_PER_CPU_MMAPS
];
3214 intel_pt_print_info(&auxtrace_info
->priv
[0], INTEL_PT_PMU_TYPE
,
3215 INTEL_PT_PER_CPU_MMAPS
);
3217 if (intel_pt_has(auxtrace_info
, INTEL_PT_CYC_BIT
)) {
3218 pt
->mtc_bit
= auxtrace_info
->priv
[INTEL_PT_MTC_BIT
];
3219 pt
->mtc_freq_bits
= auxtrace_info
->priv
[INTEL_PT_MTC_FREQ_BITS
];
3220 pt
->tsc_ctc_ratio_n
= auxtrace_info
->priv
[INTEL_PT_TSC_CTC_N
];
3221 pt
->tsc_ctc_ratio_d
= auxtrace_info
->priv
[INTEL_PT_TSC_CTC_D
];
3222 pt
->cyc_bit
= auxtrace_info
->priv
[INTEL_PT_CYC_BIT
];
3223 intel_pt_print_info(&auxtrace_info
->priv
[0], INTEL_PT_MTC_BIT
,
3227 if (intel_pt_has(auxtrace_info
, INTEL_PT_MAX_NONTURBO_RATIO
)) {
3228 pt
->max_non_turbo_ratio
=
3229 auxtrace_info
->priv
[INTEL_PT_MAX_NONTURBO_RATIO
];
3230 intel_pt_print_info(&auxtrace_info
->priv
[0],
3231 INTEL_PT_MAX_NONTURBO_RATIO
,
3232 INTEL_PT_MAX_NONTURBO_RATIO
);
3235 info
= &auxtrace_info
->priv
[INTEL_PT_FILTER_STR_LEN
] + 1;
3236 info_end
= (void *)info
+ auxtrace_info
->header
.size
;
3238 if (intel_pt_has(auxtrace_info
, INTEL_PT_FILTER_STR_LEN
)) {
3241 len
= auxtrace_info
->priv
[INTEL_PT_FILTER_STR_LEN
];
3242 intel_pt_print_info(&auxtrace_info
->priv
[0],
3243 INTEL_PT_FILTER_STR_LEN
,
3244 INTEL_PT_FILTER_STR_LEN
);
3246 const char *filter
= (const char *)info
;
3248 len
= roundup(len
+ 1, 8);
3250 if ((void *)info
> info_end
) {
3251 pr_err("%s: bad filter string length\n", __func__
);
3253 goto err_free_queues
;
3255 pt
->filter
= memdup(filter
, len
);
3258 goto err_free_queues
;
3260 if (session
->header
.needs_swap
)
3261 mem_bswap_64(pt
->filter
, len
);
3262 if (pt
->filter
[len
- 1]) {
3263 pr_err("%s: filter string not null terminated\n", __func__
);
3265 goto err_free_queues
;
3267 err
= addr_filters__parse_bare_filter(&pt
->filts
,
3270 goto err_free_queues
;
3272 intel_pt_print_info_str("Filter string", pt
->filter
);
3275 pt
->timeless_decoding
= intel_pt_timeless_decoding(pt
);
3276 if (pt
->timeless_decoding
&& !pt
->tc
.time_mult
)
3277 pt
->tc
.time_mult
= 1;
3278 pt
->have_tsc
= intel_pt_have_tsc(pt
);
3279 pt
->sampling_mode
= intel_pt_sampling_mode(pt
);
3280 pt
->est_tsc
= !pt
->timeless_decoding
;
3282 pt
->unknown_thread
= thread__new(999999999, 999999999);
3283 if (!pt
->unknown_thread
) {
3285 goto err_free_queues
;
3289 * Since this thread will not be kept in any rbtree not in a
3290 * list, initialize its list node so that at thread__put() the
3291 * current thread lifetime assuption is kept and we don't segfault
3292 * at list_del_init().
3294 INIT_LIST_HEAD(&pt
->unknown_thread
->node
);
3296 err
= thread__set_comm(pt
->unknown_thread
, "unknown", 0);
3298 goto err_delete_thread
;
3299 if (thread__init_maps(pt
->unknown_thread
, pt
->machine
)) {
3301 goto err_delete_thread
;
3304 pt
->auxtrace
.process_event
= intel_pt_process_event
;
3305 pt
->auxtrace
.process_auxtrace_event
= intel_pt_process_auxtrace_event
;
3306 pt
->auxtrace
.queue_data
= intel_pt_queue_data
;
3307 pt
->auxtrace
.dump_auxtrace_sample
= intel_pt_dump_sample
;
3308 pt
->auxtrace
.flush_events
= intel_pt_flush
;
3309 pt
->auxtrace
.free_events
= intel_pt_free_events
;
3310 pt
->auxtrace
.free
= intel_pt_free
;
3311 session
->auxtrace
= &pt
->auxtrace
;
3316 if (pt
->have_sched_switch
== 1) {
3317 pt
->switch_evsel
= intel_pt_find_sched_switch(session
->evlist
);
3318 if (!pt
->switch_evsel
) {
3319 pr_err("%s: missing sched_switch event\n", __func__
);
3321 goto err_delete_thread
;
3323 } else if (pt
->have_sched_switch
== 2 &&
3324 !intel_pt_find_switch(session
->evlist
)) {
3325 pr_err("%s: missing context_switch attribute flag\n", __func__
);
3327 goto err_delete_thread
;
3330 if (session
->itrace_synth_opts
->set
) {
3331 pt
->synth_opts
= *session
->itrace_synth_opts
;
3333 itrace_synth_opts__set_default(&pt
->synth_opts
,
3334 session
->itrace_synth_opts
->default_no_sample
);
3335 if (!session
->itrace_synth_opts
->default_no_sample
&&
3336 !session
->itrace_synth_opts
->inject
) {
3337 pt
->synth_opts
.branches
= false;
3338 pt
->synth_opts
.callchain
= true;
3340 pt
->synth_opts
.thread_stack
=
3341 session
->itrace_synth_opts
->thread_stack
;
3344 if (pt
->synth_opts
.log
)
3345 intel_pt_log_enable();
3347 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
3348 if (pt
->tc
.time_mult
) {
3349 u64 tsc_freq
= intel_pt_ns_to_ticks(pt
, 1000000000);
3351 if (!pt
->max_non_turbo_ratio
)
3352 pt
->max_non_turbo_ratio
=
3353 (tsc_freq
+ 50000000) / 100000000;
3354 intel_pt_log("TSC frequency %"PRIu64
"\n", tsc_freq
);
3355 intel_pt_log("Maximum non-turbo ratio %u\n",
3356 pt
->max_non_turbo_ratio
);
3357 pt
->cbr2khz
= tsc_freq
/ pt
->max_non_turbo_ratio
/ 1000;
3360 err
= intel_pt_setup_time_ranges(pt
, session
->itrace_synth_opts
);
3362 goto err_delete_thread
;
3364 if (pt
->synth_opts
.calls
)
3365 pt
->branches_filter
|= PERF_IP_FLAG_CALL
| PERF_IP_FLAG_ASYNC
|
3366 PERF_IP_FLAG_TRACE_END
;
3367 if (pt
->synth_opts
.returns
)
3368 pt
->branches_filter
|= PERF_IP_FLAG_RETURN
|
3369 PERF_IP_FLAG_TRACE_BEGIN
;
3371 if (pt
->synth_opts
.callchain
&& !symbol_conf
.use_callchain
) {
3372 symbol_conf
.use_callchain
= true;
3373 if (callchain_register_param(&callchain_param
) < 0) {
3374 symbol_conf
.use_callchain
= false;
3375 pt
->synth_opts
.callchain
= false;
3379 err
= intel_pt_synth_events(pt
, session
);
3381 goto err_delete_thread
;
3383 intel_pt_setup_pebs_events(pt
);
3385 if (pt
->sampling_mode
|| list_empty(&session
->auxtrace_index
))
3386 err
= auxtrace_queue_data(session
, true, true);
3388 err
= auxtrace_queues__process_index(&pt
->queues
, session
);
3390 goto err_delete_thread
;
3392 if (pt
->queues
.populated
)
3393 pt
->data_queued
= true;
3395 if (pt
->timeless_decoding
)
3396 pr_debug2("Intel PT decoding without timestamps\n");
3401 thread__zput(pt
->unknown_thread
);
3403 intel_pt_log_disable();
3404 auxtrace_queues__free(&pt
->queues
);
3405 session
->auxtrace
= NULL
;
3407 addr_filters__exit(&pt
->filts
);
3409 zfree(&pt
->time_ranges
);