1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PERF_RECORD_H
3 #define __PERF_RECORD_H
5 * The linux/stddef.h isn't need here, but is needed for __always_inline used
6 * in files included from uapi/linux/perf_event.h such as
7 * /usr/include/linux/swab.h and /usr/include/linux/byteorder/little_endian.h,
8 * detected in at least musl libc, used in Alpine Linux. -acme
11 #include <linux/stddef.h>
12 #include <perf/event.h>
13 #include <linux/types.h>
17 struct perf_event_attr
;
22 * /usr/include/inttypes.h uses just 'lu' for PRIu64, but we end up defining
23 * __u64 as long long unsigned int, and then -Werror=format= kicks in and
24 * complains of the mismatched types, so use these two special extra PRI
25 * macros to overcome that.
27 #define PRI_lu64 "l" PRIu64
28 #define PRI_lx64 "l" PRIx64
29 #define PRI_ld64 "l" PRId64
31 #define PRI_lu64 PRIu64
32 #define PRI_lx64 PRIx64
33 #define PRI_ld64 PRId64
36 #define PERF_SAMPLE_MASK \
37 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
38 PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
39 PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
40 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
41 PERF_SAMPLE_IDENTIFIER)
43 /* perf sample has 16 bits size limit */
44 #define PERF_SAMPLE_MAX_SIZE (1 << 16)
54 PERF_IP_FLAG_BRANCH
= 1ULL << 0,
55 PERF_IP_FLAG_CALL
= 1ULL << 1,
56 PERF_IP_FLAG_RETURN
= 1ULL << 2,
57 PERF_IP_FLAG_CONDITIONAL
= 1ULL << 3,
58 PERF_IP_FLAG_SYSCALLRET
= 1ULL << 4,
59 PERF_IP_FLAG_ASYNC
= 1ULL << 5,
60 PERF_IP_FLAG_INTERRUPT
= 1ULL << 6,
61 PERF_IP_FLAG_TX_ABORT
= 1ULL << 7,
62 PERF_IP_FLAG_TRACE_BEGIN
= 1ULL << 8,
63 PERF_IP_FLAG_TRACE_END
= 1ULL << 9,
64 PERF_IP_FLAG_IN_TX
= 1ULL << 10,
65 PERF_IP_FLAG_VMENTRY
= 1ULL << 11,
66 PERF_IP_FLAG_VMEXIT
= 1ULL << 12,
67 PERF_IP_FLAG_INTR_DISABLE
= 1ULL << 13,
68 PERF_IP_FLAG_INTR_TOGGLE
= 1ULL << 14,
69 PERF_IP_FLAG_BRANCH_MISS
= 1ULL << 15,
72 #define PERF_IP_FLAG_CHARS "bcrosyiABExghDt"
74 #define PERF_BRANCH_MASK (\
75 PERF_IP_FLAG_BRANCH |\
77 PERF_IP_FLAG_RETURN |\
78 PERF_IP_FLAG_CONDITIONAL |\
79 PERF_IP_FLAG_SYSCALLRET |\
81 PERF_IP_FLAG_INTERRUPT |\
82 PERF_IP_FLAG_TX_ABORT |\
83 PERF_IP_FLAG_TRACE_BEGIN |\
84 PERF_IP_FLAG_TRACE_END |\
85 PERF_IP_FLAG_VMENTRY |\
88 #define PERF_MEM_DATA_SRC_NONE \
89 (PERF_MEM_S(OP, NA) |\
90 PERF_MEM_S(LVL, NA) |\
91 PERF_MEM_S(SNOOP, NA) |\
92 PERF_MEM_S(LOCK, NA) |\
93 PERF_MEM_S(TLB, NA) |\
94 PERF_MEM_S(LVLNUM, NA))
96 /* Attribute type for custom synthesized events */
97 #define PERF_TYPE_SYNTH (INT_MAX + 1U)
99 /* Attribute config for custom synthesized events */
101 PERF_SYNTH_INTEL_PTWRITE
,
102 PERF_SYNTH_INTEL_MWAIT
,
103 PERF_SYNTH_INTEL_PWRE
,
104 PERF_SYNTH_INTEL_EXSTOP
,
105 PERF_SYNTH_INTEL_PWRX
,
106 PERF_SYNTH_INTEL_CBR
,
107 PERF_SYNTH_INTEL_PSB
,
108 PERF_SYNTH_INTEL_EVT
,
109 PERF_SYNTH_INTEL_IFLAG_CHG
,
113 * Raw data formats for synthesized events. Note that 4 bytes of padding are
114 * present to match the 'size' member of PERF_SAMPLE_RAW data which is always
115 * 8-byte aligned. That means we must dereference raw_data with an offset of 4.
116 * Refer perf_sample__synth_ptr() and perf_synth__raw_data(). It also means the
117 * structure sizes are 4 bytes bigger than the raw_size, refer
118 * perf_synth__raw_size().
121 struct perf_synth_intel_ptwrite
{
133 struct perf_synth_intel_mwait
{
147 struct perf_synth_intel_pwre
{
162 struct perf_synth_intel_exstop
{
173 struct perf_synth_intel_pwrx
{
178 u64 deepest_cstate
: 4,
187 struct perf_synth_intel_cbr
{
202 struct perf_synth_intel_psb
{
208 struct perf_synth_intel_evd
{
219 /* Intel PT Event Trace */
220 struct perf_synth_intel_evt
{
232 struct perf_synth_intel_evd evd
[0];
235 struct perf_synth_intel_iflag_chg
{
244 u64 branch_ip
; /* If via_branch */
247 static inline void *perf_synth__raw_data(void *p
)
252 #define perf_synth__raw_size(d) (sizeof(d) - 4)
254 #define perf_sample__bad_synth_size(s, d) ((s)->raw_size < sizeof(d) - 4)
257 PERF_STAT_ROUND_TYPE__INTERVAL
= 0,
258 PERF_STAT_ROUND_TYPE__FINAL
= 1,
261 void perf_event__print_totals(void);
264 struct perf_record_stat_config
;
265 struct perf_stat_config
;
268 void perf_event__read_stat_config(struct perf_stat_config
*config
,
269 struct perf_record_stat_config
*event
);
271 int perf_event__process_comm(const struct perf_tool
*tool
,
272 union perf_event
*event
,
273 struct perf_sample
*sample
,
274 struct machine
*machine
);
275 int perf_event__process_lost(const struct perf_tool
*tool
,
276 union perf_event
*event
,
277 struct perf_sample
*sample
,
278 struct machine
*machine
);
279 int perf_event__process_lost_samples(const struct perf_tool
*tool
,
280 union perf_event
*event
,
281 struct perf_sample
*sample
,
282 struct machine
*machine
);
283 int perf_event__process_aux(const struct perf_tool
*tool
,
284 union perf_event
*event
,
285 struct perf_sample
*sample
,
286 struct machine
*machine
);
287 int perf_event__process_itrace_start(const struct perf_tool
*tool
,
288 union perf_event
*event
,
289 struct perf_sample
*sample
,
290 struct machine
*machine
);
291 int perf_event__process_aux_output_hw_id(const struct perf_tool
*tool
,
292 union perf_event
*event
,
293 struct perf_sample
*sample
,
294 struct machine
*machine
);
295 int perf_event__process_switch(const struct perf_tool
*tool
,
296 union perf_event
*event
,
297 struct perf_sample
*sample
,
298 struct machine
*machine
);
299 int perf_event__process_namespaces(const struct perf_tool
*tool
,
300 union perf_event
*event
,
301 struct perf_sample
*sample
,
302 struct machine
*machine
);
303 int perf_event__process_cgroup(const struct perf_tool
*tool
,
304 union perf_event
*event
,
305 struct perf_sample
*sample
,
306 struct machine
*machine
);
307 int perf_event__process_mmap(const struct perf_tool
*tool
,
308 union perf_event
*event
,
309 struct perf_sample
*sample
,
310 struct machine
*machine
);
311 int perf_event__process_mmap2(const struct perf_tool
*tool
,
312 union perf_event
*event
,
313 struct perf_sample
*sample
,
314 struct machine
*machine
);
315 int perf_event__process_fork(const struct perf_tool
*tool
,
316 union perf_event
*event
,
317 struct perf_sample
*sample
,
318 struct machine
*machine
);
319 int perf_event__process_exit(const struct perf_tool
*tool
,
320 union perf_event
*event
,
321 struct perf_sample
*sample
,
322 struct machine
*machine
);
323 int perf_event__exit_del_thread(const struct perf_tool
*tool
,
324 union perf_event
*event
,
325 struct perf_sample
*sample
,
326 struct machine
*machine
);
327 int perf_event__process_ksymbol(const struct perf_tool
*tool
,
328 union perf_event
*event
,
329 struct perf_sample
*sample
,
330 struct machine
*machine
);
331 int perf_event__process_bpf(const struct perf_tool
*tool
,
332 union perf_event
*event
,
333 struct perf_sample
*sample
,
334 struct machine
*machine
);
335 int perf_event__process_text_poke(const struct perf_tool
*tool
,
336 union perf_event
*event
,
337 struct perf_sample
*sample
,
338 struct machine
*machine
);
339 int perf_event__process(const struct perf_tool
*tool
,
340 union perf_event
*event
,
341 struct perf_sample
*sample
,
342 struct machine
*machine
);
344 bool is_bts_event(struct perf_event_attr
*attr
);
345 bool sample_addr_correlates_sym(struct perf_event_attr
*attr
);
347 const char *perf_event__name(unsigned int id
);
349 size_t perf_event__fprintf_comm(union perf_event
*event
, FILE *fp
);
350 size_t perf_event__fprintf_mmap(union perf_event
*event
, FILE *fp
);
351 size_t perf_event__fprintf_mmap2(union perf_event
*event
, FILE *fp
);
352 size_t perf_event__fprintf_task(union perf_event
*event
, FILE *fp
);
353 size_t perf_event__fprintf_aux(union perf_event
*event
, FILE *fp
);
354 size_t perf_event__fprintf_itrace_start(union perf_event
*event
, FILE *fp
);
355 size_t perf_event__fprintf_aux_output_hw_id(union perf_event
*event
, FILE *fp
);
356 size_t perf_event__fprintf_switch(union perf_event
*event
, FILE *fp
);
357 size_t perf_event__fprintf_thread_map(union perf_event
*event
, FILE *fp
);
358 size_t perf_event__fprintf_cpu_map(union perf_event
*event
, FILE *fp
);
359 size_t perf_event__fprintf_namespaces(union perf_event
*event
, FILE *fp
);
360 size_t perf_event__fprintf_cgroup(union perf_event
*event
, FILE *fp
);
361 size_t perf_event__fprintf_ksymbol(union perf_event
*event
, FILE *fp
);
362 size_t perf_event__fprintf_bpf(union perf_event
*event
, FILE *fp
);
363 size_t perf_event__fprintf_text_poke(union perf_event
*event
, struct machine
*machine
,FILE *fp
);
364 size_t perf_event__fprintf(union perf_event
*event
, struct machine
*machine
, FILE *fp
);
366 int kallsyms__get_function_start(const char *kallsyms_filename
,
367 const char *symbol_name
, u64
*addr
);
368 int kallsyms__get_symbol_start(const char *kallsyms_filename
,
369 const char *symbol_name
, u64
*addr
);
371 void event_attr_init(struct perf_event_attr
*attr
);
373 int perf_event_paranoid(void);
374 bool perf_event_paranoid_check(int max_level
);
376 extern int sysctl_perf_event_max_stack
;
377 extern int sysctl_perf_event_max_contexts_per_stack
;
378 extern unsigned int proc_map_timeout
;
380 #define PAGE_SIZE_NAME_LEN 32
381 char *get_page_size_name(u64 size
, char *str
);
383 void arch_perf_parse_sample_weight(struct perf_sample
*data
, const __u64
*array
, u64 type
);
384 void arch_perf_synthesize_sample_weight(const struct perf_sample
*data
, __u64
*array
, u64 type
);
385 const char *arch_perf_header_entry(const char *se_header
);
386 int arch_support_sort_key(const char *sort_key
);
388 static inline bool perf_event_header__cpumode_is_guest(u8 cpumode
)
390 return cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
391 cpumode
== PERF_RECORD_MISC_GUEST_USER
;
394 static inline bool perf_event_header__misc_is_guest(u16 misc
)
396 return perf_event_header__cpumode_is_guest(misc
& PERF_RECORD_MISC_CPUMODE_MASK
);
399 static inline bool perf_event_header__is_guest(const struct perf_event_header
*header
)
401 return perf_event_header__misc_is_guest(header
->misc
);
404 static inline bool perf_event__is_guest(const union perf_event
*event
)
406 return perf_event_header__is_guest(&event
->header
);
409 #endif /* __PERF_RECORD_H */