1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel-bts.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/bitops.h>
11 #include <linux/log2.h>
12 #include <linux/zalloc.h>
14 #include "../../../util/cpumap.h"
15 #include "../../../util/event.h"
16 #include "../../../util/evsel.h"
17 #include "../../../util/evlist.h"
18 #include "../../../util/mmap.h"
19 #include "../../../util/session.h"
20 #include "../../../util/pmus.h"
21 #include "../../../util/debug.h"
22 #include "../../../util/record.h"
23 #include "../../../util/tsc.h"
24 #include "../../../util/auxtrace.h"
25 #include "../../../util/intel-bts.h"
26 #include <internal/lib.h> // page_size
28 #define KiB(x) ((x) * 1024)
29 #define MiB(x) ((x) * 1024 * 1024)
30 #define KiB_MASK(x) (KiB(x) - 1)
31 #define MiB_MASK(x) (MiB(x) - 1)
33 struct intel_bts_snapshot_ref
{
39 struct intel_bts_recording
{
40 struct auxtrace_record itr
;
41 struct perf_pmu
*intel_bts_pmu
;
42 struct evlist
*evlist
;
46 struct intel_bts_snapshot_ref
*snapshot_refs
;
56 intel_bts_info_priv_size(struct auxtrace_record
*itr __maybe_unused
,
57 struct evlist
*evlist __maybe_unused
)
59 return INTEL_BTS_AUXTRACE_PRIV_SIZE
;
62 static int intel_bts_info_fill(struct auxtrace_record
*itr
,
63 struct perf_session
*session
,
64 struct perf_record_auxtrace_info
*auxtrace_info
,
67 struct intel_bts_recording
*btsr
=
68 container_of(itr
, struct intel_bts_recording
, itr
);
69 struct perf_pmu
*intel_bts_pmu
= btsr
->intel_bts_pmu
;
70 struct perf_event_mmap_page
*pc
;
71 struct perf_tsc_conversion tc
= { .time_mult
= 0, };
72 bool cap_user_time_zero
= false;
75 if (priv_size
!= INTEL_BTS_AUXTRACE_PRIV_SIZE
)
78 if (!session
->evlist
->core
.nr_mmaps
)
81 pc
= session
->evlist
->mmap
[0].core
.base
;
83 err
= perf_read_tsc_conversion(pc
, &tc
);
85 if (err
!= -EOPNOTSUPP
)
88 cap_user_time_zero
= tc
.time_mult
!= 0;
90 if (!cap_user_time_zero
)
91 ui__warning("Intel BTS: TSC not available\n");
94 auxtrace_info
->type
= PERF_AUXTRACE_INTEL_BTS
;
95 auxtrace_info
->priv
[INTEL_BTS_PMU_TYPE
] = intel_bts_pmu
->type
;
96 auxtrace_info
->priv
[INTEL_BTS_TIME_SHIFT
] = tc
.time_shift
;
97 auxtrace_info
->priv
[INTEL_BTS_TIME_MULT
] = tc
.time_mult
;
98 auxtrace_info
->priv
[INTEL_BTS_TIME_ZERO
] = tc
.time_zero
;
99 auxtrace_info
->priv
[INTEL_BTS_CAP_USER_TIME_ZERO
] = cap_user_time_zero
;
100 auxtrace_info
->priv
[INTEL_BTS_SNAPSHOT_MODE
] = btsr
->snapshot_mode
;
105 static int intel_bts_recording_options(struct auxtrace_record
*itr
,
106 struct evlist
*evlist
,
107 struct record_opts
*opts
)
109 struct intel_bts_recording
*btsr
=
110 container_of(itr
, struct intel_bts_recording
, itr
);
111 struct perf_pmu
*intel_bts_pmu
= btsr
->intel_bts_pmu
;
112 struct evsel
*evsel
, *intel_bts_evsel
= NULL
;
113 const struct perf_cpu_map
*cpus
= evlist
->core
.user_requested_cpus
;
114 bool privileged
= perf_event_paranoid_check(-1);
116 if (opts
->auxtrace_sample_mode
) {
117 pr_err("Intel BTS does not support AUX area sampling\n");
121 btsr
->evlist
= evlist
;
122 btsr
->snapshot_mode
= opts
->auxtrace_snapshot_mode
;
124 evlist__for_each_entry(evlist
, evsel
) {
125 if (evsel
->core
.attr
.type
== intel_bts_pmu
->type
) {
126 if (intel_bts_evsel
) {
127 pr_err("There may be only one " INTEL_BTS_PMU_NAME
" event\n");
130 evsel
->core
.attr
.freq
= 0;
131 evsel
->core
.attr
.sample_period
= 1;
132 evsel
->needs_auxtrace_mmap
= true;
133 intel_bts_evsel
= evsel
;
134 opts
->full_auxtrace
= true;
138 if (opts
->auxtrace_snapshot_mode
&& !opts
->full_auxtrace
) {
139 pr_err("Snapshot mode (-S option) requires " INTEL_BTS_PMU_NAME
" PMU event (-e " INTEL_BTS_PMU_NAME
")\n");
143 if (!opts
->full_auxtrace
)
146 if (opts
->full_auxtrace
&& !perf_cpu_map__is_any_cpu_or_is_empty(cpus
)) {
147 pr_err(INTEL_BTS_PMU_NAME
" does not support per-cpu recording\n");
151 /* Set default sizes for snapshot mode */
152 if (opts
->auxtrace_snapshot_mode
) {
153 if (!opts
->auxtrace_snapshot_size
&& !opts
->auxtrace_mmap_pages
) {
155 opts
->auxtrace_mmap_pages
= MiB(4) / page_size
;
157 opts
->auxtrace_mmap_pages
= KiB(128) / page_size
;
158 if (opts
->mmap_pages
== UINT_MAX
)
159 opts
->mmap_pages
= KiB(256) / page_size
;
161 } else if (!opts
->auxtrace_mmap_pages
&& !privileged
&&
162 opts
->mmap_pages
== UINT_MAX
) {
163 opts
->mmap_pages
= KiB(256) / page_size
;
165 if (!opts
->auxtrace_snapshot_size
)
166 opts
->auxtrace_snapshot_size
=
167 opts
->auxtrace_mmap_pages
* (size_t)page_size
;
168 if (!opts
->auxtrace_mmap_pages
) {
169 size_t sz
= opts
->auxtrace_snapshot_size
;
171 sz
= round_up(sz
, page_size
) / page_size
;
172 opts
->auxtrace_mmap_pages
= roundup_pow_of_two(sz
);
174 if (opts
->auxtrace_snapshot_size
>
175 opts
->auxtrace_mmap_pages
* (size_t)page_size
) {
176 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
177 opts
->auxtrace_snapshot_size
,
178 opts
->auxtrace_mmap_pages
* (size_t)page_size
);
181 if (!opts
->auxtrace_snapshot_size
|| !opts
->auxtrace_mmap_pages
) {
182 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
185 pr_debug2("Intel BTS snapshot size: %zu\n",
186 opts
->auxtrace_snapshot_size
);
189 /* Set default sizes for full trace mode */
190 if (opts
->full_auxtrace
&& !opts
->auxtrace_mmap_pages
) {
192 opts
->auxtrace_mmap_pages
= MiB(4) / page_size
;
194 opts
->auxtrace_mmap_pages
= KiB(128) / page_size
;
195 if (opts
->mmap_pages
== UINT_MAX
)
196 opts
->mmap_pages
= KiB(256) / page_size
;
200 /* Validate auxtrace_mmap_pages */
201 if (opts
->auxtrace_mmap_pages
) {
202 size_t sz
= opts
->auxtrace_mmap_pages
* (size_t)page_size
;
205 if (opts
->auxtrace_snapshot_mode
)
210 if (sz
< min_sz
|| !is_power_of_2(sz
)) {
211 pr_err("Invalid mmap size for Intel BTS: must be at least %zuKiB and a power of 2\n",
217 if (intel_bts_evsel
) {
219 * To obtain the auxtrace buffer file descriptor, the auxtrace event
222 evlist__to_front(evlist
, intel_bts_evsel
);
224 * In the case of per-cpu mmaps, we need the CPU on the
227 if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus
))
228 evsel__set_sample_bit(intel_bts_evsel
, CPU
);
231 /* Add dummy event to keep tracking */
232 if (opts
->full_auxtrace
) {
233 struct evsel
*tracking_evsel
;
236 err
= parse_event(evlist
, "dummy:u");
240 tracking_evsel
= evlist__last(evlist
);
242 evlist__set_tracking_event(evlist
, tracking_evsel
);
244 tracking_evsel
->core
.attr
.freq
= 0;
245 tracking_evsel
->core
.attr
.sample_period
= 1;
251 static int intel_bts_parse_snapshot_options(struct auxtrace_record
*itr
,
252 struct record_opts
*opts
,
255 struct intel_bts_recording
*btsr
=
256 container_of(itr
, struct intel_bts_recording
, itr
);
257 unsigned long long snapshot_size
= 0;
261 snapshot_size
= strtoull(str
, &endptr
, 0);
262 if (*endptr
|| snapshot_size
> SIZE_MAX
)
266 opts
->auxtrace_snapshot_mode
= true;
267 opts
->auxtrace_snapshot_size
= snapshot_size
;
269 btsr
->snapshot_size
= snapshot_size
;
274 static u64
intel_bts_reference(struct auxtrace_record
*itr __maybe_unused
)
279 static int intel_bts_alloc_snapshot_refs(struct intel_bts_recording
*btsr
,
282 const size_t sz
= sizeof(struct intel_bts_snapshot_ref
);
283 int cnt
= btsr
->snapshot_ref_cnt
, new_cnt
= cnt
* 2;
284 struct intel_bts_snapshot_ref
*refs
;
289 while (new_cnt
<= idx
)
292 refs
= calloc(new_cnt
, sz
);
296 memcpy(refs
, btsr
->snapshot_refs
, cnt
* sz
);
298 btsr
->snapshot_refs
= refs
;
299 btsr
->snapshot_ref_cnt
= new_cnt
;
304 static void intel_bts_free_snapshot_refs(struct intel_bts_recording
*btsr
)
308 for (i
= 0; i
< btsr
->snapshot_ref_cnt
; i
++)
309 zfree(&btsr
->snapshot_refs
[i
].ref_buf
);
310 zfree(&btsr
->snapshot_refs
);
313 static void intel_bts_recording_free(struct auxtrace_record
*itr
)
315 struct intel_bts_recording
*btsr
=
316 container_of(itr
, struct intel_bts_recording
, itr
);
318 intel_bts_free_snapshot_refs(btsr
);
322 static int intel_bts_snapshot_start(struct auxtrace_record
*itr
)
324 struct intel_bts_recording
*btsr
=
325 container_of(itr
, struct intel_bts_recording
, itr
);
328 evlist__for_each_entry(btsr
->evlist
, evsel
) {
329 if (evsel
->core
.attr
.type
== btsr
->intel_bts_pmu
->type
)
330 return evsel__disable(evsel
);
335 static int intel_bts_snapshot_finish(struct auxtrace_record
*itr
)
337 struct intel_bts_recording
*btsr
=
338 container_of(itr
, struct intel_bts_recording
, itr
);
341 evlist__for_each_entry(btsr
->evlist
, evsel
) {
342 if (evsel
->core
.attr
.type
== btsr
->intel_bts_pmu
->type
)
343 return evsel__enable(evsel
);
348 static bool intel_bts_first_wrap(u64
*data
, size_t buf_size
)
357 for (i
= a
; i
< b
; i
++) {
365 static int intel_bts_find_snapshot(struct auxtrace_record
*itr
, int idx
,
366 struct auxtrace_mmap
*mm
, unsigned char *data
,
369 struct intel_bts_recording
*btsr
=
370 container_of(itr
, struct intel_bts_recording
, itr
);
374 pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
375 __func__
, idx
, (size_t)*old
, (size_t)*head
);
377 if (idx
>= btsr
->snapshot_ref_cnt
) {
378 err
= intel_bts_alloc_snapshot_refs(btsr
, idx
);
383 wrapped
= btsr
->snapshot_refs
[idx
].wrapped
;
384 if (!wrapped
&& intel_bts_first_wrap((u64
*)data
, mm
->len
)) {
385 btsr
->snapshot_refs
[idx
].wrapped
= true;
390 * In full trace mode 'head' continually increases. However in snapshot
391 * mode 'head' is an offset within the buffer. Here 'old' and 'head'
392 * are adjusted to match the full trace case which expects that 'old' is
393 * always less than 'head'.
407 pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
408 __func__
, wrapped
? "" : "not ", (size_t)*old
, (size_t)*head
);
413 pr_err("%s: failed, error %d\n", __func__
, err
);
417 struct auxtrace_record
*intel_bts_recording_init(int *err
)
419 struct perf_pmu
*intel_bts_pmu
= perf_pmus__find(INTEL_BTS_PMU_NAME
);
420 struct intel_bts_recording
*btsr
;
425 if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
430 btsr
= zalloc(sizeof(struct intel_bts_recording
));
436 btsr
->intel_bts_pmu
= intel_bts_pmu
;
437 btsr
->itr
.recording_options
= intel_bts_recording_options
;
438 btsr
->itr
.info_priv_size
= intel_bts_info_priv_size
;
439 btsr
->itr
.info_fill
= intel_bts_info_fill
;
440 btsr
->itr
.free
= intel_bts_recording_free
;
441 btsr
->itr
.snapshot_start
= intel_bts_snapshot_start
;
442 btsr
->itr
.snapshot_finish
= intel_bts_snapshot_finish
;
443 btsr
->itr
.find_snapshot
= intel_bts_find_snapshot
;
444 btsr
->itr
.parse_snapshot_options
= intel_bts_parse_snapshot_options
;
445 btsr
->itr
.reference
= intel_bts_reference
;
446 btsr
->itr
.read_finish
= auxtrace_record__read_finish
;
447 btsr
->itr
.alignment
= sizeof(struct branch
);