1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/bitops.h>
12 #include <linux/log2.h>
13 #include <linux/zalloc.h>
14 #include <linux/err.h>
17 #include "../../../util/session.h"
18 #include "../../../util/event.h"
19 #include "../../../util/evlist.h"
20 #include "../../../util/evsel.h"
21 #include "../../../util/evsel_config.h"
22 #include "../../../util/cpumap.h"
23 #include "../../../util/mmap.h"
24 #include <subcmd/parse-options.h>
25 #include "../../../util/parse-events.h"
26 #include "../../../util/pmus.h"
27 #include "../../../util/debug.h"
28 #include "../../../util/auxtrace.h"
29 #include "../../../util/perf_api_probe.h"
30 #include "../../../util/record.h"
31 #include "../../../util/target.h"
32 #include "../../../util/tsc.h"
33 #include <internal/lib.h> // page_size
34 #include "../../../util/intel-pt.h"
35 #include <api/fs/fs.h>
37 #define KiB(x) ((x) * 1024)
38 #define MiB(x) ((x) * 1024 * 1024)
39 #define KiB_MASK(x) (KiB(x) - 1)
40 #define MiB_MASK(x) (MiB(x) - 1)
42 #define INTEL_PT_PSB_PERIOD_NEAR 256
44 struct intel_pt_snapshot_ref
{
50 struct intel_pt_recording
{
51 struct auxtrace_record itr
;
52 struct perf_pmu
*intel_pt_pmu
;
53 int have_sched_switch
;
54 struct evlist
*evlist
;
56 bool snapshot_init_done
;
58 size_t snapshot_ref_buf_size
;
60 struct intel_pt_snapshot_ref
*snapshot_refs
;
64 static int intel_pt_parse_terms_with_default(const struct perf_pmu
*pmu
,
68 struct parse_events_terms terms
;
69 struct perf_event_attr attr
= { .size
= 0, };
72 parse_events_terms__init(&terms
);
73 err
= parse_events_terms(&terms
, str
, /*input=*/ NULL
);
77 attr
.config
= *config
;
78 err
= perf_pmu__config_terms(pmu
, &attr
, &terms
, /*zero=*/true, /*apply_hardcoded=*/false,
83 *config
= attr
.config
;
85 parse_events_terms__exit(&terms
);
89 static int intel_pt_parse_terms(const struct perf_pmu
*pmu
, const char *str
, u64
*config
)
92 return intel_pt_parse_terms_with_default(pmu
, str
, config
);
95 static u64
intel_pt_masked_bits(u64 mask
, u64 bits
)
97 const u64 top_bit
= 1ULL << 63;
101 for (i
= 0; i
< 64; i
++) {
102 if (mask
& top_bit
) {
114 static int intel_pt_read_config(struct perf_pmu
*intel_pt_pmu
, const char *str
,
115 struct evlist
*evlist
, u64
*res
)
122 mask
= perf_pmu__format_bits(intel_pt_pmu
, str
);
126 evlist__for_each_entry(evlist
, evsel
) {
127 if (evsel
->core
.attr
.type
== intel_pt_pmu
->type
) {
128 *res
= intel_pt_masked_bits(mask
, evsel
->core
.attr
.config
);
136 static size_t intel_pt_psb_period(struct perf_pmu
*intel_pt_pmu
,
137 struct evlist
*evlist
)
140 int err
, topa_multiple_entries
;
143 if (perf_pmu__scan_file(intel_pt_pmu
, "caps/topa_multiple_entries",
144 "%d", &topa_multiple_entries
) != 1)
145 topa_multiple_entries
= 0;
148 * Use caps/topa_multiple_entries to indicate early hardware that had
149 * extra frequent PSBs.
151 if (!topa_multiple_entries
) {
156 err
= intel_pt_read_config(intel_pt_pmu
, "psb_period", evlist
, &val
);
160 psb_period
= 1 << (val
+ 11);
162 pr_debug2("%s psb_period %zu\n", intel_pt_pmu
->name
, psb_period
);
166 static int intel_pt_pick_bit(int bits
, int target
)
170 for (pos
= 0; bits
; bits
>>= 1, pos
++) {
172 if (pos
<= target
|| pick
< 0)
182 static u64
intel_pt_default_config(const struct perf_pmu
*intel_pt_pmu
)
185 int mtc
, mtc_periods
= 0, mtc_period
;
186 int psb_cyc
, psb_periods
, psb_period
;
192 dirfd
= perf_pmu__event_source_devices_fd();
194 pos
+= scnprintf(buf
+ pos
, sizeof(buf
) - pos
, "tsc");
196 if (perf_pmu__scan_file_at(intel_pt_pmu
, dirfd
, "caps/mtc", "%d",
201 if (perf_pmu__scan_file_at(intel_pt_pmu
, dirfd
, "caps/mtc_periods", "%x",
205 mtc_period
= intel_pt_pick_bit(mtc_periods
, 3);
206 pos
+= scnprintf(buf
+ pos
, sizeof(buf
) - pos
,
207 ",mtc,mtc_period=%d", mtc_period
);
211 if (perf_pmu__scan_file_at(intel_pt_pmu
, dirfd
, "caps/psb_cyc", "%d",
215 if (psb_cyc
&& mtc_periods
) {
216 if (perf_pmu__scan_file_at(intel_pt_pmu
, dirfd
, "caps/psb_periods", "%x",
220 psb_period
= intel_pt_pick_bit(psb_periods
, 3);
221 pos
+= scnprintf(buf
+ pos
, sizeof(buf
) - pos
,
222 ",psb_period=%d", psb_period
);
226 if (perf_pmu__scan_file_at(intel_pt_pmu
, dirfd
, "format/pt", "%c", &c
) == 1 &&
227 perf_pmu__scan_file_at(intel_pt_pmu
, dirfd
, "format/branch", "%c", &c
) == 1)
228 pos
+= scnprintf(buf
+ pos
, sizeof(buf
) - pos
, ",pt,branch");
230 pr_debug2("%s default config: %s\n", intel_pt_pmu
->name
, buf
);
232 intel_pt_parse_terms(intel_pt_pmu
, buf
, &config
);
238 static int intel_pt_parse_snapshot_options(struct auxtrace_record
*itr
,
239 struct record_opts
*opts
,
242 struct intel_pt_recording
*ptr
=
243 container_of(itr
, struct intel_pt_recording
, itr
);
244 unsigned long long snapshot_size
= 0;
248 snapshot_size
= strtoull(str
, &endptr
, 0);
249 if (*endptr
|| snapshot_size
> SIZE_MAX
)
253 opts
->auxtrace_snapshot_mode
= true;
254 opts
->auxtrace_snapshot_size
= snapshot_size
;
256 ptr
->snapshot_size
= snapshot_size
;
261 void intel_pt_pmu_default_config(const struct perf_pmu
*intel_pt_pmu
,
262 struct perf_event_attr
*attr
)
265 static bool initialized
;
268 config
= intel_pt_default_config(intel_pt_pmu
);
271 attr
->config
= config
;
274 static const char *intel_pt_find_filter(struct evlist
*evlist
,
275 struct perf_pmu
*intel_pt_pmu
)
279 evlist__for_each_entry(evlist
, evsel
) {
280 if (evsel
->core
.attr
.type
== intel_pt_pmu
->type
)
281 return evsel
->filter
;
287 static size_t intel_pt_filter_bytes(const char *filter
)
289 size_t len
= filter
? strlen(filter
) : 0;
291 return len
? roundup(len
+ 1, 8) : 0;
295 intel_pt_info_priv_size(struct auxtrace_record
*itr
, struct evlist
*evlist
)
297 struct intel_pt_recording
*ptr
=
298 container_of(itr
, struct intel_pt_recording
, itr
);
299 const char *filter
= intel_pt_find_filter(evlist
, ptr
->intel_pt_pmu
);
301 ptr
->priv_size
= (INTEL_PT_AUXTRACE_PRIV_MAX
* sizeof(u64
)) +
302 intel_pt_filter_bytes(filter
);
303 ptr
->priv_size
+= sizeof(u64
); /* Cap Event Trace */
305 return ptr
->priv_size
;
308 static void intel_pt_tsc_ctc_ratio(u32
*n
, u32
*d
)
310 unsigned int eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
312 __get_cpuid(0x15, &eax
, &ebx
, &ecx
, &edx
);
317 static int intel_pt_info_fill(struct auxtrace_record
*itr
,
318 struct perf_session
*session
,
319 struct perf_record_auxtrace_info
*auxtrace_info
,
322 struct intel_pt_recording
*ptr
=
323 container_of(itr
, struct intel_pt_recording
, itr
);
324 struct perf_pmu
*intel_pt_pmu
= ptr
->intel_pt_pmu
;
325 struct perf_event_mmap_page
*pc
;
326 struct perf_tsc_conversion tc
= { .time_mult
= 0, };
327 bool cap_user_time_zero
= false, per_cpu_mmaps
;
328 u64 tsc_bit
, mtc_bit
, mtc_freq_bits
, cyc_bit
, noretcomp_bit
;
329 u32 tsc_ctc_ratio_n
, tsc_ctc_ratio_d
;
330 unsigned long max_non_turbo_ratio
;
331 size_t filter_str_len
;
337 if (priv_size
!= ptr
->priv_size
)
340 intel_pt_parse_terms(intel_pt_pmu
, "tsc", &tsc_bit
);
341 intel_pt_parse_terms(intel_pt_pmu
, "noretcomp", &noretcomp_bit
);
342 intel_pt_parse_terms(intel_pt_pmu
, "mtc", &mtc_bit
);
343 mtc_freq_bits
= perf_pmu__format_bits(intel_pt_pmu
, "mtc_period");
344 intel_pt_parse_terms(intel_pt_pmu
, "cyc", &cyc_bit
);
346 intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n
, &tsc_ctc_ratio_d
);
348 if (perf_pmu__scan_file(intel_pt_pmu
, "max_nonturbo_ratio",
349 "%lu", &max_non_turbo_ratio
) != 1)
350 max_non_turbo_ratio
= 0;
351 if (perf_pmu__scan_file(intel_pt_pmu
, "caps/event_trace",
352 "%d", &event_trace
) != 1)
355 filter
= intel_pt_find_filter(session
->evlist
, ptr
->intel_pt_pmu
);
356 filter_str_len
= filter
? strlen(filter
) : 0;
358 if (!session
->evlist
->core
.nr_mmaps
)
361 pc
= session
->evlist
->mmap
[0].core
.base
;
363 err
= perf_read_tsc_conversion(pc
, &tc
);
365 if (err
!= -EOPNOTSUPP
)
368 cap_user_time_zero
= tc
.time_mult
!= 0;
370 if (!cap_user_time_zero
)
371 ui__warning("Intel Processor Trace: TSC not available\n");
374 per_cpu_mmaps
= !perf_cpu_map__is_any_cpu_or_is_empty(session
->evlist
->core
.user_requested_cpus
);
376 auxtrace_info
->type
= PERF_AUXTRACE_INTEL_PT
;
377 auxtrace_info
->priv
[INTEL_PT_PMU_TYPE
] = intel_pt_pmu
->type
;
378 auxtrace_info
->priv
[INTEL_PT_TIME_SHIFT
] = tc
.time_shift
;
379 auxtrace_info
->priv
[INTEL_PT_TIME_MULT
] = tc
.time_mult
;
380 auxtrace_info
->priv
[INTEL_PT_TIME_ZERO
] = tc
.time_zero
;
381 auxtrace_info
->priv
[INTEL_PT_CAP_USER_TIME_ZERO
] = cap_user_time_zero
;
382 auxtrace_info
->priv
[INTEL_PT_TSC_BIT
] = tsc_bit
;
383 auxtrace_info
->priv
[INTEL_PT_NORETCOMP_BIT
] = noretcomp_bit
;
384 auxtrace_info
->priv
[INTEL_PT_HAVE_SCHED_SWITCH
] = ptr
->have_sched_switch
;
385 auxtrace_info
->priv
[INTEL_PT_SNAPSHOT_MODE
] = ptr
->snapshot_mode
;
386 auxtrace_info
->priv
[INTEL_PT_PER_CPU_MMAPS
] = per_cpu_mmaps
;
387 auxtrace_info
->priv
[INTEL_PT_MTC_BIT
] = mtc_bit
;
388 auxtrace_info
->priv
[INTEL_PT_MTC_FREQ_BITS
] = mtc_freq_bits
;
389 auxtrace_info
->priv
[INTEL_PT_TSC_CTC_N
] = tsc_ctc_ratio_n
;
390 auxtrace_info
->priv
[INTEL_PT_TSC_CTC_D
] = tsc_ctc_ratio_d
;
391 auxtrace_info
->priv
[INTEL_PT_CYC_BIT
] = cyc_bit
;
392 auxtrace_info
->priv
[INTEL_PT_MAX_NONTURBO_RATIO
] = max_non_turbo_ratio
;
393 auxtrace_info
->priv
[INTEL_PT_FILTER_STR_LEN
] = filter_str_len
;
395 info
= &auxtrace_info
->priv
[INTEL_PT_FILTER_STR_LEN
] + 1;
397 if (filter_str_len
) {
398 size_t len
= intel_pt_filter_bytes(filter
);
400 strncpy((char *)info
, filter
, len
);
404 *info
++ = event_trace
;
409 #ifdef HAVE_LIBTRACEEVENT
410 static int intel_pt_track_switches(struct evlist
*evlist
)
412 const char *sched_switch
= "sched:sched_switch";
416 if (!evlist__can_select_event(evlist
, sched_switch
))
419 evsel
= evlist__add_sched_switch(evlist
, true);
421 err
= PTR_ERR(evsel
);
422 pr_debug2("%s: failed to create %s, error = %d\n",
423 __func__
, sched_switch
, err
);
427 evsel
->immediate
= true;
433 static bool intel_pt_exclude_guest(void)
437 if (sysfs__read_int("module/kvm_intel/parameters/pt_mode", &pt_mode
))
443 static void intel_pt_valid_str(char *str
, size_t len
, u64 valid
)
445 unsigned int val
, last
= 0, state
= 1;
450 for (val
= 0; val
<= 64; val
++, valid
>>= 1) {
455 p
+= scnprintf(str
+ p
, len
- p
, ",");
458 p
+= scnprintf(str
+ p
, len
- p
, "%u", val
);
473 p
+= scnprintf(str
+ p
, len
- p
, ",%u", last
);
477 p
+= scnprintf(str
+ p
, len
- p
, "-%u", last
);
489 static int intel_pt_val_config_term(struct perf_pmu
*intel_pt_pmu
, int dirfd
,
490 const char *caps
, const char *name
,
491 const char *supported
, u64 config
)
495 unsigned long long valid
;
499 if (perf_pmu__scan_file_at(intel_pt_pmu
, dirfd
, caps
, "%llx", &valid
) != 1)
503 perf_pmu__scan_file_at(intel_pt_pmu
, dirfd
, supported
, "%d", &ok
) == 1 && !ok
)
508 bits
= perf_pmu__format_bits(intel_pt_pmu
, name
);
512 for (shift
= 0; bits
&& !(bits
& 1); shift
++)
520 if (valid
& (1 << config
))
523 intel_pt_valid_str(valid_str
, sizeof(valid_str
), valid
);
524 pr_err("Invalid %s for %s. Valid values are: %s\n",
525 name
, INTEL_PT_PMU_NAME
, valid_str
);
529 static int intel_pt_validate_config(struct perf_pmu
*intel_pt_pmu
,
538 dirfd
= perf_pmu__event_source_devices_fd();
543 * If supported, force pass-through config term (pt=1) even if user
544 * sets pt=0, which avoids senseless kernel errors.
546 if (perf_pmu__scan_file_at(intel_pt_pmu
, dirfd
, "format/pt", "%c", &c
) == 1 &&
547 !(evsel
->core
.attr
.config
& 1)) {
548 pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
549 evsel
->core
.attr
.config
|= 1;
552 err
= intel_pt_val_config_term(intel_pt_pmu
, dirfd
, "caps/cycle_thresholds",
553 "cyc_thresh", "caps/psb_cyc",
554 evsel
->core
.attr
.config
);
558 err
= intel_pt_val_config_term(intel_pt_pmu
, dirfd
, "caps/mtc_periods",
559 "mtc_period", "caps/mtc",
560 evsel
->core
.attr
.config
);
564 err
= intel_pt_val_config_term(intel_pt_pmu
, dirfd
, "caps/psb_periods",
565 "psb_period", "caps/psb_cyc",
566 evsel
->core
.attr
.config
);
573 static void intel_pt_min_max_sample_sz(struct evlist
*evlist
,
574 size_t *min_sz
, size_t *max_sz
)
578 evlist__for_each_entry(evlist
, evsel
) {
579 size_t sz
= evsel
->core
.attr
.aux_sample_size
;
583 if (min_sz
&& (sz
< *min_sz
|| !*min_sz
))
585 if (max_sz
&& sz
> *max_sz
)
591 * Currently, there is not enough information to disambiguate different PEBS
592 * events, so only allow one.
594 static bool intel_pt_too_many_aux_output(struct evlist
*evlist
)
597 int aux_output_cnt
= 0;
599 evlist__for_each_entry(evlist
, evsel
)
600 aux_output_cnt
+= !!evsel
->core
.attr
.aux_output
;
602 if (aux_output_cnt
> 1) {
603 pr_err(INTEL_PT_PMU_NAME
" supports at most one event with aux-output\n");
610 static int intel_pt_recording_options(struct auxtrace_record
*itr
,
611 struct evlist
*evlist
,
612 struct record_opts
*opts
)
614 struct intel_pt_recording
*ptr
=
615 container_of(itr
, struct intel_pt_recording
, itr
);
616 struct perf_pmu
*intel_pt_pmu
= ptr
->intel_pt_pmu
;
617 bool have_timing_info
, need_immediate
= false;
618 struct evsel
*evsel
, *intel_pt_evsel
= NULL
;
619 const struct perf_cpu_map
*cpus
= evlist
->core
.user_requested_cpus
;
620 bool privileged
= perf_event_paranoid_check(-1);
624 ptr
->evlist
= evlist
;
625 ptr
->snapshot_mode
= opts
->auxtrace_snapshot_mode
;
627 evlist__for_each_entry(evlist
, evsel
) {
628 if (evsel
->core
.attr
.type
== intel_pt_pmu
->type
) {
629 if (intel_pt_evsel
) {
630 pr_err("There may be only one " INTEL_PT_PMU_NAME
" event\n");
633 evsel
->core
.attr
.freq
= 0;
634 evsel
->core
.attr
.sample_period
= 1;
635 evsel
->core
.attr
.exclude_guest
= intel_pt_exclude_guest();
636 evsel
->no_aux_samples
= true;
637 evsel
->needs_auxtrace_mmap
= true;
638 intel_pt_evsel
= evsel
;
639 opts
->full_auxtrace
= true;
643 if (opts
->auxtrace_snapshot_mode
&& !opts
->full_auxtrace
) {
644 pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME
" PMU event (-e " INTEL_PT_PMU_NAME
")\n");
648 if (opts
->auxtrace_snapshot_mode
&& opts
->auxtrace_sample_mode
) {
649 pr_err("Snapshot mode (" INTEL_PT_PMU_NAME
" PMU) and sample trace cannot be used together\n");
653 if (opts
->use_clockid
) {
654 pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME
"\n");
658 if (intel_pt_too_many_aux_output(evlist
))
661 if (!opts
->full_auxtrace
)
664 if (opts
->auxtrace_sample_mode
)
665 evsel__set_config_if_unset(intel_pt_pmu
, intel_pt_evsel
,
668 err
= intel_pt_validate_config(intel_pt_pmu
, intel_pt_evsel
);
672 /* Set default sizes for snapshot mode */
673 if (opts
->auxtrace_snapshot_mode
) {
674 size_t psb_period
= intel_pt_psb_period(intel_pt_pmu
, evlist
);
676 if (!opts
->auxtrace_snapshot_size
&& !opts
->auxtrace_mmap_pages
) {
678 opts
->auxtrace_mmap_pages
= MiB(4) / page_size
;
680 opts
->auxtrace_mmap_pages
= KiB(128) / page_size
;
681 if (opts
->mmap_pages
== UINT_MAX
)
682 opts
->mmap_pages
= KiB(256) / page_size
;
684 } else if (!opts
->auxtrace_mmap_pages
&& !privileged
&&
685 opts
->mmap_pages
== UINT_MAX
) {
686 opts
->mmap_pages
= KiB(256) / page_size
;
688 if (!opts
->auxtrace_snapshot_size
)
689 opts
->auxtrace_snapshot_size
=
690 opts
->auxtrace_mmap_pages
* (size_t)page_size
;
691 if (!opts
->auxtrace_mmap_pages
) {
692 size_t sz
= opts
->auxtrace_snapshot_size
;
694 sz
= round_up(sz
, page_size
) / page_size
;
695 opts
->auxtrace_mmap_pages
= roundup_pow_of_two(sz
);
697 if (opts
->auxtrace_snapshot_size
>
698 opts
->auxtrace_mmap_pages
* (size_t)page_size
) {
699 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
700 opts
->auxtrace_snapshot_size
,
701 opts
->auxtrace_mmap_pages
* (size_t)page_size
);
704 if (!opts
->auxtrace_snapshot_size
|| !opts
->auxtrace_mmap_pages
) {
705 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
708 pr_debug2("Intel PT snapshot size: %zu\n",
709 opts
->auxtrace_snapshot_size
);
711 opts
->auxtrace_snapshot_size
<= psb_period
+
712 INTEL_PT_PSB_PERIOD_NEAR
)
713 ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
714 opts
->auxtrace_snapshot_size
, psb_period
);
717 /* Set default sizes for sample mode */
718 if (opts
->auxtrace_sample_mode
) {
719 size_t psb_period
= intel_pt_psb_period(intel_pt_pmu
, evlist
);
720 size_t min_sz
= 0, max_sz
= 0;
722 intel_pt_min_max_sample_sz(evlist
, &min_sz
, &max_sz
);
723 if (!opts
->auxtrace_mmap_pages
&& !privileged
&&
724 opts
->mmap_pages
== UINT_MAX
)
725 opts
->mmap_pages
= KiB(256) / page_size
;
726 if (!opts
->auxtrace_mmap_pages
) {
727 size_t sz
= round_up(max_sz
, page_size
) / page_size
;
729 opts
->auxtrace_mmap_pages
= roundup_pow_of_two(sz
);
731 if (max_sz
> opts
->auxtrace_mmap_pages
* (size_t)page_size
) {
732 pr_err("Sample size %zu must not be greater than AUX area tracing mmap size %zu\n",
734 opts
->auxtrace_mmap_pages
* (size_t)page_size
);
737 pr_debug2("Intel PT min. sample size: %zu max. sample size: %zu\n",
740 min_sz
<= psb_period
+ INTEL_PT_PSB_PERIOD_NEAR
)
741 ui__warning("Intel PT sample size (%zu) may be too small for PSB period (%zu)\n",
745 /* Set default sizes for full trace mode */
746 if (opts
->full_auxtrace
&& !opts
->auxtrace_mmap_pages
) {
748 opts
->auxtrace_mmap_pages
= MiB(4) / page_size
;
750 opts
->auxtrace_mmap_pages
= KiB(128) / page_size
;
751 if (opts
->mmap_pages
== UINT_MAX
)
752 opts
->mmap_pages
= KiB(256) / page_size
;
756 /* Validate auxtrace_mmap_pages */
757 if (opts
->auxtrace_mmap_pages
) {
758 size_t sz
= opts
->auxtrace_mmap_pages
* (size_t)page_size
;
761 if (opts
->auxtrace_snapshot_mode
|| opts
->auxtrace_sample_mode
)
766 if (sz
< min_sz
|| !is_power_of_2(sz
)) {
767 pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
773 if (!opts
->auxtrace_snapshot_mode
&& !opts
->auxtrace_sample_mode
) {
774 size_t aw
= opts
->auxtrace_mmap_pages
* (size_t)page_size
/ 4;
775 u32 aux_watermark
= aw
> UINT_MAX
? UINT_MAX
: aw
;
777 intel_pt_evsel
->core
.attr
.aux_watermark
= aux_watermark
;
780 intel_pt_parse_terms(intel_pt_pmu
, "tsc", &tsc_bit
);
782 if (opts
->full_auxtrace
&& (intel_pt_evsel
->core
.attr
.config
& tsc_bit
))
783 have_timing_info
= true;
785 have_timing_info
= false;
788 * Per-cpu recording needs sched_switch events to distinguish different
791 if (have_timing_info
&& !perf_cpu_map__is_any_cpu_or_is_empty(cpus
) &&
792 !record_opts__no_switch_events(opts
)) {
793 if (perf_can_record_switch_events()) {
794 bool cpu_wide
= !target__none(&opts
->target
) &&
795 !target__has_task(&opts
->target
);
797 if (!cpu_wide
&& perf_can_record_cpu_wide()) {
798 struct evsel
*switch_evsel
;
800 switch_evsel
= evlist__add_dummy_on_all_cpus(evlist
);
804 switch_evsel
->core
.attr
.context_switch
= 1;
805 switch_evsel
->immediate
= true;
807 evsel__set_sample_bit(switch_evsel
, TID
);
808 evsel__set_sample_bit(switch_evsel
, TIME
);
809 evsel__set_sample_bit(switch_evsel
, CPU
);
810 evsel__reset_sample_bit(switch_evsel
, BRANCH_STACK
);
812 opts
->record_switch_events
= false;
813 ptr
->have_sched_switch
= 3;
815 opts
->record_switch_events
= true;
816 need_immediate
= true;
818 ptr
->have_sched_switch
= 3;
820 ptr
->have_sched_switch
= 2;
823 #ifdef HAVE_LIBTRACEEVENT
824 err
= intel_pt_track_switches(evlist
);
826 pr_debug2("Unable to select sched:sched_switch\n");
830 ptr
->have_sched_switch
= 1;
835 if (have_timing_info
&& !intel_pt_evsel
->core
.attr
.exclude_kernel
&&
836 perf_can_record_text_poke_events() && perf_can_record_cpu_wide())
837 opts
->text_poke
= true;
839 if (intel_pt_evsel
) {
841 * To obtain the auxtrace buffer file descriptor, the auxtrace
842 * event must come first.
844 evlist__to_front(evlist
, intel_pt_evsel
);
846 * In the case of per-cpu mmaps, we need the CPU on the
849 if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus
))
850 evsel__set_sample_bit(intel_pt_evsel
, CPU
);
853 /* Add dummy event to keep tracking */
854 if (opts
->full_auxtrace
) {
855 bool need_system_wide_tracking
;
856 struct evsel
*tracking_evsel
;
859 * User space tasks can migrate between CPUs, so when tracing
860 * selected CPUs, sideband for all CPUs is still needed.
862 need_system_wide_tracking
= opts
->target
.cpu_list
&&
863 !intel_pt_evsel
->core
.attr
.exclude_user
;
865 tracking_evsel
= evlist__add_aux_dummy(evlist
, need_system_wide_tracking
);
869 evlist__set_tracking_event(evlist
, tracking_evsel
);
872 tracking_evsel
->immediate
= true;
874 /* In per-cpu case, always need the time of mmap events etc */
875 if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus
)) {
876 evsel__set_sample_bit(tracking_evsel
, TIME
);
877 /* And the CPU for switch events */
878 evsel__set_sample_bit(tracking_evsel
, CPU
);
880 evsel__reset_sample_bit(tracking_evsel
, BRANCH_STACK
);
884 * Warn the user when we do not have enough information to decode i.e.
885 * per-cpu with no sched_switch (except workload-only).
887 if (!ptr
->have_sched_switch
&& !perf_cpu_map__is_any_cpu_or_is_empty(cpus
) &&
888 !target__none(&opts
->target
) &&
889 !intel_pt_evsel
->core
.attr
.exclude_user
)
890 ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
895 static int intel_pt_snapshot_start(struct auxtrace_record
*itr
)
897 struct intel_pt_recording
*ptr
=
898 container_of(itr
, struct intel_pt_recording
, itr
);
901 evlist__for_each_entry(ptr
->evlist
, evsel
) {
902 if (evsel
->core
.attr
.type
== ptr
->intel_pt_pmu
->type
)
903 return evsel__disable(evsel
);
908 static int intel_pt_snapshot_finish(struct auxtrace_record
*itr
)
910 struct intel_pt_recording
*ptr
=
911 container_of(itr
, struct intel_pt_recording
, itr
);
914 evlist__for_each_entry(ptr
->evlist
, evsel
) {
915 if (evsel
->core
.attr
.type
== ptr
->intel_pt_pmu
->type
)
916 return evsel__enable(evsel
);
921 static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording
*ptr
, int idx
)
923 const size_t sz
= sizeof(struct intel_pt_snapshot_ref
);
924 int cnt
= ptr
->snapshot_ref_cnt
, new_cnt
= cnt
* 2;
925 struct intel_pt_snapshot_ref
*refs
;
930 while (new_cnt
<= idx
)
933 refs
= calloc(new_cnt
, sz
);
937 memcpy(refs
, ptr
->snapshot_refs
, cnt
* sz
);
939 ptr
->snapshot_refs
= refs
;
940 ptr
->snapshot_ref_cnt
= new_cnt
;
945 static void intel_pt_free_snapshot_refs(struct intel_pt_recording
*ptr
)
949 for (i
= 0; i
< ptr
->snapshot_ref_cnt
; i
++)
950 zfree(&ptr
->snapshot_refs
[i
].ref_buf
);
951 zfree(&ptr
->snapshot_refs
);
954 static void intel_pt_recording_free(struct auxtrace_record
*itr
)
956 struct intel_pt_recording
*ptr
=
957 container_of(itr
, struct intel_pt_recording
, itr
);
959 intel_pt_free_snapshot_refs(ptr
);
963 static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording
*ptr
, int idx
,
964 size_t snapshot_buf_size
)
966 size_t ref_buf_size
= ptr
->snapshot_ref_buf_size
;
969 ref_buf
= zalloc(ref_buf_size
);
973 ptr
->snapshot_refs
[idx
].ref_buf
= ref_buf
;
974 ptr
->snapshot_refs
[idx
].ref_offset
= snapshot_buf_size
- ref_buf_size
;
979 static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording
*ptr
,
980 size_t snapshot_buf_size
)
982 const size_t max_size
= 256 * 1024;
983 size_t buf_size
= 0, psb_period
;
985 if (ptr
->snapshot_size
<= 64 * 1024)
988 psb_period
= intel_pt_psb_period(ptr
->intel_pt_pmu
, ptr
->evlist
);
990 buf_size
= psb_period
* 2;
992 if (!buf_size
|| buf_size
> max_size
)
995 if (buf_size
>= snapshot_buf_size
)
998 if (buf_size
>= ptr
->snapshot_size
/ 2)
1004 static int intel_pt_snapshot_init(struct intel_pt_recording
*ptr
,
1005 size_t snapshot_buf_size
)
1007 if (ptr
->snapshot_init_done
)
1010 ptr
->snapshot_init_done
= true;
1012 ptr
->snapshot_ref_buf_size
= intel_pt_snapshot_ref_buf_size(ptr
,
1019 * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer.
1020 * @buf1: first buffer
1021 * @compare_size: number of bytes to compare
1022 * @buf2: second buffer (a circular buffer)
1023 * @offs2: offset in second buffer
1024 * @buf2_size: size of second buffer
1026 * The comparison allows for the possibility that the bytes to compare in the
1027 * circular buffer are not contiguous. It is assumed that @compare_size <=
1028 * @buf2_size. This function returns %false if the bytes are identical, %true
1031 static bool intel_pt_compare_buffers(void *buf1
, size_t compare_size
,
1032 void *buf2
, size_t offs2
, size_t buf2_size
)
1034 size_t end2
= offs2
+ compare_size
, part_size
;
1036 if (end2
<= buf2_size
)
1037 return memcmp(buf1
, buf2
+ offs2
, compare_size
);
1039 part_size
= end2
- buf2_size
;
1040 if (memcmp(buf1
, buf2
+ offs2
, part_size
))
1043 compare_size
-= part_size
;
1045 return memcmp(buf1
+ part_size
, buf2
, compare_size
);
1048 static bool intel_pt_compare_ref(void *ref_buf
, size_t ref_offset
,
1049 size_t ref_size
, size_t buf_size
,
1050 void *data
, size_t head
)
1052 size_t ref_end
= ref_offset
+ ref_size
;
1054 if (ref_end
> buf_size
) {
1055 if (head
> ref_offset
|| head
< ref_end
- buf_size
)
1057 } else if (head
> ref_offset
&& head
< ref_end
) {
1061 return intel_pt_compare_buffers(ref_buf
, ref_size
, data
, ref_offset
,
1065 static void intel_pt_copy_ref(void *ref_buf
, size_t ref_size
, size_t buf_size
,
1066 void *data
, size_t head
)
1068 if (head
>= ref_size
) {
1069 memcpy(ref_buf
, data
+ head
- ref_size
, ref_size
);
1071 memcpy(ref_buf
, data
, head
);
1073 memcpy(ref_buf
+ head
, data
+ buf_size
- ref_size
, ref_size
);
1077 static bool intel_pt_wrapped(struct intel_pt_recording
*ptr
, int idx
,
1078 struct auxtrace_mmap
*mm
, unsigned char *data
,
1081 struct intel_pt_snapshot_ref
*ref
= &ptr
->snapshot_refs
[idx
];
1084 wrapped
= intel_pt_compare_ref(ref
->ref_buf
, ref
->ref_offset
,
1085 ptr
->snapshot_ref_buf_size
, mm
->len
,
1088 intel_pt_copy_ref(ref
->ref_buf
, ptr
->snapshot_ref_buf_size
, mm
->len
,
1094 static bool intel_pt_first_wrap(u64
*data
, size_t buf_size
)
1103 for (i
= a
; i
< b
; i
++) {
1111 static int intel_pt_find_snapshot(struct auxtrace_record
*itr
, int idx
,
1112 struct auxtrace_mmap
*mm
, unsigned char *data
,
1113 u64
*head
, u64
*old
)
1115 struct intel_pt_recording
*ptr
=
1116 container_of(itr
, struct intel_pt_recording
, itr
);
1120 pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
1121 __func__
, idx
, (size_t)*old
, (size_t)*head
);
1123 err
= intel_pt_snapshot_init(ptr
, mm
->len
);
1127 if (idx
>= ptr
->snapshot_ref_cnt
) {
1128 err
= intel_pt_alloc_snapshot_refs(ptr
, idx
);
1133 if (ptr
->snapshot_ref_buf_size
) {
1134 if (!ptr
->snapshot_refs
[idx
].ref_buf
) {
1135 err
= intel_pt_alloc_snapshot_ref(ptr
, idx
, mm
->len
);
1139 wrapped
= intel_pt_wrapped(ptr
, idx
, mm
, data
, *head
);
1141 wrapped
= ptr
->snapshot_refs
[idx
].wrapped
;
1142 if (!wrapped
&& intel_pt_first_wrap((u64
*)data
, mm
->len
)) {
1143 ptr
->snapshot_refs
[idx
].wrapped
= true;
1149 * In full trace mode 'head' continually increases. However in snapshot
1150 * mode 'head' is an offset within the buffer. Here 'old' and 'head'
1151 * are adjusted to match the full trace case which expects that 'old' is
1152 * always less than 'head'.
1166 pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
1167 __func__
, wrapped
? "" : "not ", (size_t)*old
, (size_t)*head
);
1172 pr_err("%s: failed, error %d\n", __func__
, err
);
1176 static u64
intel_pt_reference(struct auxtrace_record
*itr __maybe_unused
)
1181 struct auxtrace_record
*intel_pt_recording_init(int *err
)
1183 struct perf_pmu
*intel_pt_pmu
= perf_pmus__find(INTEL_PT_PMU_NAME
);
1184 struct intel_pt_recording
*ptr
;
1189 if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
1194 ptr
= zalloc(sizeof(struct intel_pt_recording
));
1200 ptr
->intel_pt_pmu
= intel_pt_pmu
;
1201 ptr
->itr
.recording_options
= intel_pt_recording_options
;
1202 ptr
->itr
.info_priv_size
= intel_pt_info_priv_size
;
1203 ptr
->itr
.info_fill
= intel_pt_info_fill
;
1204 ptr
->itr
.free
= intel_pt_recording_free
;
1205 ptr
->itr
.snapshot_start
= intel_pt_snapshot_start
;
1206 ptr
->itr
.snapshot_finish
= intel_pt_snapshot_finish
;
1207 ptr
->itr
.find_snapshot
= intel_pt_find_snapshot
;
1208 ptr
->itr
.parse_snapshot_options
= intel_pt_parse_snapshot_options
;
1209 ptr
->itr
.reference
= intel_pt_reference
;
1210 ptr
->itr
.read_finish
= auxtrace_record__read_finish
;
1212 * Decoding starts at a PSB packet. Minimum PSB period is 2K so 4K
1213 * should give at least 1 PSB per sample.
1215 ptr
->itr
.default_aux_sample_size
= 4096;