2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include <linux/bitops.h>
12 #include <api/fs/debugfs.h>
13 #include <traceevent/event-parse.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/perf_event.h>
16 #include <sys/resource.h>
22 #include "thread_map.h"
24 #include "perf_regs.h"
26 #include "trace-event.h"
32 } perf_missing_features
;
34 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
36 int __perf_evsel__sample_size(u64 sample_type
)
38 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
42 for (i
= 0; i
< 64; i
++) {
43 if (mask
& (1ULL << i
))
53 * __perf_evsel__calc_id_pos - calculate id_pos.
54 * @sample_type: sample type
56 * This function returns the position of the event id (PERF_SAMPLE_ID or
57 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
60 static int __perf_evsel__calc_id_pos(u64 sample_type
)
64 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
67 if (!(sample_type
& PERF_SAMPLE_ID
))
70 if (sample_type
& PERF_SAMPLE_IP
)
73 if (sample_type
& PERF_SAMPLE_TID
)
76 if (sample_type
& PERF_SAMPLE_TIME
)
79 if (sample_type
& PERF_SAMPLE_ADDR
)
86 * __perf_evsel__calc_is_pos - calculate is_pos.
87 * @sample_type: sample type
89 * This function returns the position (counting backwards) of the event id
90 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
91 * sample_id_all is used there is an id sample appended to non-sample events.
93 static int __perf_evsel__calc_is_pos(u64 sample_type
)
97 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
100 if (!(sample_type
& PERF_SAMPLE_ID
))
103 if (sample_type
& PERF_SAMPLE_CPU
)
106 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
112 void perf_evsel__calc_id_pos(struct perf_evsel
*evsel
)
114 evsel
->id_pos
= __perf_evsel__calc_id_pos(evsel
->attr
.sample_type
);
115 evsel
->is_pos
= __perf_evsel__calc_is_pos(evsel
->attr
.sample_type
);
118 void hists__init(struct hists
*hists
)
120 memset(hists
, 0, sizeof(*hists
));
121 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
122 hists
->entries_in
= &hists
->entries_in_array
[0];
123 hists
->entries_collapsed
= RB_ROOT
;
124 hists
->entries
= RB_ROOT
;
125 pthread_mutex_init(&hists
->lock
, NULL
);
128 void __perf_evsel__set_sample_bit(struct perf_evsel
*evsel
,
129 enum perf_event_sample_format bit
)
131 if (!(evsel
->attr
.sample_type
& bit
)) {
132 evsel
->attr
.sample_type
|= bit
;
133 evsel
->sample_size
+= sizeof(u64
);
134 perf_evsel__calc_id_pos(evsel
);
138 void __perf_evsel__reset_sample_bit(struct perf_evsel
*evsel
,
139 enum perf_event_sample_format bit
)
141 if (evsel
->attr
.sample_type
& bit
) {
142 evsel
->attr
.sample_type
&= ~bit
;
143 evsel
->sample_size
-= sizeof(u64
);
144 perf_evsel__calc_id_pos(evsel
);
148 void perf_evsel__set_sample_id(struct perf_evsel
*evsel
,
149 bool can_sample_identifier
)
151 if (can_sample_identifier
) {
152 perf_evsel__reset_sample_bit(evsel
, ID
);
153 perf_evsel__set_sample_bit(evsel
, IDENTIFIER
);
155 perf_evsel__set_sample_bit(evsel
, ID
);
157 evsel
->attr
.read_format
|= PERF_FORMAT_ID
;
160 void perf_evsel__init(struct perf_evsel
*evsel
,
161 struct perf_event_attr
*attr
, int idx
)
165 evsel
->leader
= evsel
;
168 INIT_LIST_HEAD(&evsel
->node
);
169 hists__init(&evsel
->hists
);
170 evsel
->sample_size
= __perf_evsel__sample_size(attr
->sample_type
);
171 perf_evsel__calc_id_pos(evsel
);
174 struct perf_evsel
*perf_evsel__new_idx(struct perf_event_attr
*attr
, int idx
)
176 struct perf_evsel
*evsel
= zalloc(sizeof(*evsel
));
179 perf_evsel__init(evsel
, attr
, idx
);
184 struct perf_evsel
*perf_evsel__newtp_idx(const char *sys
, const char *name
, int idx
)
186 struct perf_evsel
*evsel
= zalloc(sizeof(*evsel
));
189 struct perf_event_attr attr
= {
190 .type
= PERF_TYPE_TRACEPOINT
,
191 .sample_type
= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
192 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
195 if (asprintf(&evsel
->name
, "%s:%s", sys
, name
) < 0)
198 evsel
->tp_format
= trace_event__tp_format(sys
, name
);
199 if (evsel
->tp_format
== NULL
)
202 event_attr_init(&attr
);
203 attr
.config
= evsel
->tp_format
->id
;
204 attr
.sample_period
= 1;
205 perf_evsel__init(evsel
, &attr
, idx
);
216 const char *perf_evsel__hw_names
[PERF_COUNT_HW_MAX
] = {
224 "stalled-cycles-frontend",
225 "stalled-cycles-backend",
229 static const char *__perf_evsel__hw_name(u64 config
)
231 if (config
< PERF_COUNT_HW_MAX
&& perf_evsel__hw_names
[config
])
232 return perf_evsel__hw_names
[config
];
234 return "unknown-hardware";
237 static int perf_evsel__add_modifiers(struct perf_evsel
*evsel
, char *bf
, size_t size
)
239 int colon
= 0, r
= 0;
240 struct perf_event_attr
*attr
= &evsel
->attr
;
241 bool exclude_guest_default
= false;
243 #define MOD_PRINT(context, mod) do { \
244 if (!attr->exclude_##context) { \
245 if (!colon) colon = ++r; \
246 r += scnprintf(bf + r, size - r, "%c", mod); \
249 if (attr
->exclude_kernel
|| attr
->exclude_user
|| attr
->exclude_hv
) {
250 MOD_PRINT(kernel
, 'k');
251 MOD_PRINT(user
, 'u');
253 exclude_guest_default
= true;
256 if (attr
->precise_ip
) {
259 r
+= scnprintf(bf
+ r
, size
- r
, "%.*s", attr
->precise_ip
, "ppp");
260 exclude_guest_default
= true;
263 if (attr
->exclude_host
|| attr
->exclude_guest
== exclude_guest_default
) {
264 MOD_PRINT(host
, 'H');
265 MOD_PRINT(guest
, 'G');
273 static int perf_evsel__hw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
275 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__hw_name(evsel
->attr
.config
));
276 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
279 const char *perf_evsel__sw_names
[PERF_COUNT_SW_MAX
] = {
292 static const char *__perf_evsel__sw_name(u64 config
)
294 if (config
< PERF_COUNT_SW_MAX
&& perf_evsel__sw_names
[config
])
295 return perf_evsel__sw_names
[config
];
296 return "unknown-software";
299 static int perf_evsel__sw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
301 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__sw_name(evsel
->attr
.config
));
302 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
305 static int __perf_evsel__bp_name(char *bf
, size_t size
, u64 addr
, u64 type
)
309 r
= scnprintf(bf
, size
, "mem:0x%" PRIx64
":", addr
);
311 if (type
& HW_BREAKPOINT_R
)
312 r
+= scnprintf(bf
+ r
, size
- r
, "r");
314 if (type
& HW_BREAKPOINT_W
)
315 r
+= scnprintf(bf
+ r
, size
- r
, "w");
317 if (type
& HW_BREAKPOINT_X
)
318 r
+= scnprintf(bf
+ r
, size
- r
, "x");
323 static int perf_evsel__bp_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
325 struct perf_event_attr
*attr
= &evsel
->attr
;
326 int r
= __perf_evsel__bp_name(bf
, size
, attr
->bp_addr
, attr
->bp_type
);
327 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
330 const char *perf_evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
]
331 [PERF_EVSEL__MAX_ALIASES
] = {
332 { "L1-dcache", "l1-d", "l1d", "L1-data", },
333 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
335 { "dTLB", "d-tlb", "Data-TLB", },
336 { "iTLB", "i-tlb", "Instruction-TLB", },
337 { "branch", "branches", "bpu", "btb", "bpc", },
341 const char *perf_evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
]
342 [PERF_EVSEL__MAX_ALIASES
] = {
343 { "load", "loads", "read", },
344 { "store", "stores", "write", },
345 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
348 const char *perf_evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
]
349 [PERF_EVSEL__MAX_ALIASES
] = {
350 { "refs", "Reference", "ops", "access", },
351 { "misses", "miss", },
354 #define C(x) PERF_COUNT_HW_CACHE_##x
355 #define CACHE_READ (1 << C(OP_READ))
356 #define CACHE_WRITE (1 << C(OP_WRITE))
357 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
358 #define COP(x) (1 << x)
361 * cache operartion stat
362 * L1I : Read and prefetch only
363 * ITLB and BPU : Read-only
365 static unsigned long perf_evsel__hw_cache_stat
[C(MAX
)] = {
366 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
367 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
368 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
369 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
370 [C(ITLB
)] = (CACHE_READ
),
371 [C(BPU
)] = (CACHE_READ
),
372 [C(NODE
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
375 bool perf_evsel__is_cache_op_valid(u8 type
, u8 op
)
377 if (perf_evsel__hw_cache_stat
[type
] & COP(op
))
378 return true; /* valid */
380 return false; /* invalid */
383 int __perf_evsel__hw_cache_type_op_res_name(u8 type
, u8 op
, u8 result
,
384 char *bf
, size_t size
)
387 return scnprintf(bf
, size
, "%s-%s-%s", perf_evsel__hw_cache
[type
][0],
388 perf_evsel__hw_cache_op
[op
][0],
389 perf_evsel__hw_cache_result
[result
][0]);
392 return scnprintf(bf
, size
, "%s-%s", perf_evsel__hw_cache
[type
][0],
393 perf_evsel__hw_cache_op
[op
][1]);
396 static int __perf_evsel__hw_cache_name(u64 config
, char *bf
, size_t size
)
398 u8 op
, result
, type
= (config
>> 0) & 0xff;
399 const char *err
= "unknown-ext-hardware-cache-type";
401 if (type
> PERF_COUNT_HW_CACHE_MAX
)
404 op
= (config
>> 8) & 0xff;
405 err
= "unknown-ext-hardware-cache-op";
406 if (op
> PERF_COUNT_HW_CACHE_OP_MAX
)
409 result
= (config
>> 16) & 0xff;
410 err
= "unknown-ext-hardware-cache-result";
411 if (result
> PERF_COUNT_HW_CACHE_RESULT_MAX
)
414 err
= "invalid-cache";
415 if (!perf_evsel__is_cache_op_valid(type
, op
))
418 return __perf_evsel__hw_cache_type_op_res_name(type
, op
, result
, bf
, size
);
420 return scnprintf(bf
, size
, "%s", err
);
423 static int perf_evsel__hw_cache_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
425 int ret
= __perf_evsel__hw_cache_name(evsel
->attr
.config
, bf
, size
);
426 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
429 static int perf_evsel__raw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
431 int ret
= scnprintf(bf
, size
, "raw 0x%" PRIx64
, evsel
->attr
.config
);
432 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
435 const char *perf_evsel__name(struct perf_evsel
*evsel
)
442 switch (evsel
->attr
.type
) {
444 perf_evsel__raw_name(evsel
, bf
, sizeof(bf
));
447 case PERF_TYPE_HARDWARE
:
448 perf_evsel__hw_name(evsel
, bf
, sizeof(bf
));
451 case PERF_TYPE_HW_CACHE
:
452 perf_evsel__hw_cache_name(evsel
, bf
, sizeof(bf
));
455 case PERF_TYPE_SOFTWARE
:
456 perf_evsel__sw_name(evsel
, bf
, sizeof(bf
));
459 case PERF_TYPE_TRACEPOINT
:
460 scnprintf(bf
, sizeof(bf
), "%s", "unknown tracepoint");
463 case PERF_TYPE_BREAKPOINT
:
464 perf_evsel__bp_name(evsel
, bf
, sizeof(bf
));
468 scnprintf(bf
, sizeof(bf
), "unknown attr type: %d",
473 evsel
->name
= strdup(bf
);
475 return evsel
->name
?: "unknown";
478 const char *perf_evsel__group_name(struct perf_evsel
*evsel
)
480 return evsel
->group_name
?: "anon group";
483 int perf_evsel__group_desc(struct perf_evsel
*evsel
, char *buf
, size_t size
)
486 struct perf_evsel
*pos
;
487 const char *group_name
= perf_evsel__group_name(evsel
);
489 ret
= scnprintf(buf
, size
, "%s", group_name
);
491 ret
+= scnprintf(buf
+ ret
, size
- ret
, " { %s",
492 perf_evsel__name(evsel
));
494 for_each_group_member(pos
, evsel
)
495 ret
+= scnprintf(buf
+ ret
, size
- ret
, ", %s",
496 perf_evsel__name(pos
));
498 ret
+= scnprintf(buf
+ ret
, size
- ret
, " }");
504 * The enable_on_exec/disabled value strategy:
506 * 1) For any type of traced program:
507 * - all independent events and group leaders are disabled
508 * - all group members are enabled
510 * Group members are ruled by group leaders. They need to
511 * be enabled, because the group scheduling relies on that.
513 * 2) For traced programs executed by perf:
514 * - all independent events and group leaders have
516 * - we don't specifically enable or disable any event during
519 * Independent events and group leaders are initially disabled
520 * and get enabled by exec. Group members are ruled by group
521 * leaders as stated in 1).
523 * 3) For traced programs attached by perf (pid/tid):
524 * - we specifically enable or disable all events during
527 * When attaching events to already running traced we
528 * enable/disable events specifically, as there's no
529 * initial traced exec call.
531 void perf_evsel__config(struct perf_evsel
*evsel
, struct record_opts
*opts
)
533 struct perf_evsel
*leader
= evsel
->leader
;
534 struct perf_event_attr
*attr
= &evsel
->attr
;
535 int track
= !evsel
->idx
; /* only the first counter needs these */
536 bool per_cpu
= opts
->target
.default_per_cpu
&& !opts
->target
.per_thread
;
538 attr
->sample_id_all
= perf_missing_features
.sample_id_all
? 0 : 1;
539 attr
->inherit
= !opts
->no_inherit
;
541 perf_evsel__set_sample_bit(evsel
, IP
);
542 perf_evsel__set_sample_bit(evsel
, TID
);
544 if (evsel
->sample_read
) {
545 perf_evsel__set_sample_bit(evsel
, READ
);
548 * We need ID even in case of single event, because
549 * PERF_SAMPLE_READ process ID specific data.
551 perf_evsel__set_sample_id(evsel
, false);
554 * Apply group format only if we belong to group
555 * with more than one members.
557 if (leader
->nr_members
> 1) {
558 attr
->read_format
|= PERF_FORMAT_GROUP
;
564 * We default some events to a 1 default interval. But keep
565 * it a weak assumption overridable by the user.
567 if (!attr
->sample_period
|| (opts
->user_freq
!= UINT_MAX
&&
568 opts
->user_interval
!= ULLONG_MAX
)) {
570 perf_evsel__set_sample_bit(evsel
, PERIOD
);
572 attr
->sample_freq
= opts
->freq
;
574 attr
->sample_period
= opts
->default_interval
;
579 * Disable sampling for all group members other
580 * than leader in case leader 'leads' the sampling.
582 if ((leader
!= evsel
) && leader
->sample_read
) {
583 attr
->sample_freq
= 0;
584 attr
->sample_period
= 0;
587 if (opts
->no_samples
)
588 attr
->sample_freq
= 0;
590 if (opts
->inherit_stat
)
591 attr
->inherit_stat
= 1;
593 if (opts
->sample_address
) {
594 perf_evsel__set_sample_bit(evsel
, ADDR
);
595 attr
->mmap_data
= track
;
598 if (opts
->call_graph
) {
599 perf_evsel__set_sample_bit(evsel
, CALLCHAIN
);
601 if (opts
->call_graph
== CALLCHAIN_DWARF
) {
602 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
603 perf_evsel__set_sample_bit(evsel
, STACK_USER
);
604 attr
->sample_regs_user
= PERF_REGS_MASK
;
605 attr
->sample_stack_user
= opts
->stack_dump_size
;
606 attr
->exclude_callchain_user
= 1;
610 if (target__has_cpu(&opts
->target
))
611 perf_evsel__set_sample_bit(evsel
, CPU
);
614 perf_evsel__set_sample_bit(evsel
, PERIOD
);
616 if (!perf_missing_features
.sample_id_all
&&
617 (opts
->sample_time
|| !opts
->no_inherit
||
618 target__has_cpu(&opts
->target
) || per_cpu
))
619 perf_evsel__set_sample_bit(evsel
, TIME
);
621 if (opts
->raw_samples
) {
622 perf_evsel__set_sample_bit(evsel
, TIME
);
623 perf_evsel__set_sample_bit(evsel
, RAW
);
624 perf_evsel__set_sample_bit(evsel
, CPU
);
627 if (opts
->sample_address
)
628 perf_evsel__set_sample_bit(evsel
, DATA_SRC
);
630 if (opts
->no_buffering
) {
632 attr
->wakeup_events
= 1;
634 if (opts
->branch_stack
) {
635 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
636 attr
->branch_sample_type
= opts
->branch_stack
;
639 if (opts
->sample_weight
)
640 perf_evsel__set_sample_bit(evsel
, WEIGHT
);
645 if (opts
->sample_transaction
)
646 perf_evsel__set_sample_bit(evsel
, TRANSACTION
);
649 * XXX see the function comment above
651 * Disabling only independent events or group leaders,
652 * keeping group members enabled.
654 if (perf_evsel__is_group_leader(evsel
))
658 * Setting enable_on_exec for independent events and
659 * group leaders for traced executed by perf.
661 if (target__none(&opts
->target
) && perf_evsel__is_group_leader(evsel
) &&
662 !opts
->initial_delay
)
663 attr
->enable_on_exec
= 1;
666 int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
669 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
672 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
673 for (thread
= 0; thread
< nthreads
; thread
++) {
674 FD(evsel
, cpu
, thread
) = -1;
679 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
682 static int perf_evsel__run_ioctl(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
687 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
688 for (thread
= 0; thread
< nthreads
; thread
++) {
689 int fd
= FD(evsel
, cpu
, thread
),
690 err
= ioctl(fd
, ioc
, arg
);
700 int perf_evsel__set_filter(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
703 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
704 PERF_EVENT_IOC_SET_FILTER
,
708 int perf_evsel__enable(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
710 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
711 PERF_EVENT_IOC_ENABLE
,
715 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
717 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
718 if (evsel
->sample_id
== NULL
)
721 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
722 if (evsel
->id
== NULL
) {
723 xyarray__delete(evsel
->sample_id
);
724 evsel
->sample_id
= NULL
;
731 void perf_evsel__reset_counts(struct perf_evsel
*evsel
, int ncpus
)
733 memset(evsel
->counts
, 0, (sizeof(*evsel
->counts
) +
734 (ncpus
* sizeof(struct perf_counts_values
))));
737 int perf_evsel__alloc_counts(struct perf_evsel
*evsel
, int ncpus
)
739 evsel
->counts
= zalloc((sizeof(*evsel
->counts
) +
740 (ncpus
* sizeof(struct perf_counts_values
))));
741 return evsel
->counts
!= NULL
? 0 : -ENOMEM
;
744 void perf_evsel__free_fd(struct perf_evsel
*evsel
)
746 xyarray__delete(evsel
->fd
);
750 void perf_evsel__free_id(struct perf_evsel
*evsel
)
752 xyarray__delete(evsel
->sample_id
);
753 evsel
->sample_id
= NULL
;
757 void perf_evsel__close_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
761 for (cpu
= 0; cpu
< ncpus
; cpu
++)
762 for (thread
= 0; thread
< nthreads
; ++thread
) {
763 close(FD(evsel
, cpu
, thread
));
764 FD(evsel
, cpu
, thread
) = -1;
768 void perf_evsel__free_counts(struct perf_evsel
*evsel
)
770 zfree(&evsel
->counts
);
773 void perf_evsel__exit(struct perf_evsel
*evsel
)
775 assert(list_empty(&evsel
->node
));
776 perf_evsel__free_fd(evsel
);
777 perf_evsel__free_id(evsel
);
780 void perf_evsel__delete(struct perf_evsel
*evsel
)
782 perf_evsel__exit(evsel
);
783 close_cgroup(evsel
->cgrp
);
784 zfree(&evsel
->group_name
);
785 if (evsel
->tp_format
)
786 pevent_free_format(evsel
->tp_format
);
791 static inline void compute_deltas(struct perf_evsel
*evsel
,
793 struct perf_counts_values
*count
)
795 struct perf_counts_values tmp
;
797 if (!evsel
->prev_raw_counts
)
801 tmp
= evsel
->prev_raw_counts
->aggr
;
802 evsel
->prev_raw_counts
->aggr
= *count
;
804 tmp
= evsel
->prev_raw_counts
->cpu
[cpu
];
805 evsel
->prev_raw_counts
->cpu
[cpu
] = *count
;
808 count
->val
= count
->val
- tmp
.val
;
809 count
->ena
= count
->ena
- tmp
.ena
;
810 count
->run
= count
->run
- tmp
.run
;
813 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
814 int cpu
, int thread
, bool scale
)
816 struct perf_counts_values count
;
817 size_t nv
= scale
? 3 : 1;
819 if (FD(evsel
, cpu
, thread
) < 0)
822 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1) < 0)
825 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) < 0)
828 compute_deltas(evsel
, cpu
, &count
);
833 else if (count
.run
< count
.ena
)
834 count
.val
= (u64
)((double)count
.val
* count
.ena
/ count
.run
+ 0.5);
836 count
.ena
= count
.run
= 0;
838 evsel
->counts
->cpu
[cpu
] = count
;
842 int __perf_evsel__read(struct perf_evsel
*evsel
,
843 int ncpus
, int nthreads
, bool scale
)
845 size_t nv
= scale
? 3 : 1;
847 struct perf_counts_values
*aggr
= &evsel
->counts
->aggr
, count
;
849 aggr
->val
= aggr
->ena
= aggr
->run
= 0;
851 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
852 for (thread
= 0; thread
< nthreads
; thread
++) {
853 if (FD(evsel
, cpu
, thread
) < 0)
856 if (readn(FD(evsel
, cpu
, thread
),
857 &count
, nv
* sizeof(u64
)) < 0)
860 aggr
->val
+= count
.val
;
862 aggr
->ena
+= count
.ena
;
863 aggr
->run
+= count
.run
;
868 compute_deltas(evsel
, -1, aggr
);
870 evsel
->counts
->scaled
= 0;
872 if (aggr
->run
== 0) {
873 evsel
->counts
->scaled
= -1;
878 if (aggr
->run
< aggr
->ena
) {
879 evsel
->counts
->scaled
= 1;
880 aggr
->val
= (u64
)((double)aggr
->val
* aggr
->ena
/ aggr
->run
+ 0.5);
883 aggr
->ena
= aggr
->run
= 0;
888 static int get_group_fd(struct perf_evsel
*evsel
, int cpu
, int thread
)
890 struct perf_evsel
*leader
= evsel
->leader
;
893 if (perf_evsel__is_group_leader(evsel
))
897 * Leader must be already processed/open,
902 fd
= FD(leader
, cpu
, thread
);
908 #define __PRINT_ATTR(fmt, cast, field) \
909 fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field)
911 #define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field)
912 #define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field)
913 #define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field)
914 #define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field)
916 #define PRINT_ATTR2N(name1, field1, name2, field2) \
917 fprintf(fp, " %-19s %u %-19s %u\n", \
918 name1, attr->field1, name2, attr->field2)
920 #define PRINT_ATTR2(field1, field2) \
921 PRINT_ATTR2N(#field1, field1, #field2, field2)
923 static size_t perf_event_attr__fprintf(struct perf_event_attr
*attr
, FILE *fp
)
927 ret
+= fprintf(fp
, "%.60s\n", graph_dotted_line
);
928 ret
+= fprintf(fp
, "perf_event_attr:\n");
930 ret
+= PRINT_ATTR_U32(type
);
931 ret
+= PRINT_ATTR_U32(size
);
932 ret
+= PRINT_ATTR_X64(config
);
933 ret
+= PRINT_ATTR_U64(sample_period
);
934 ret
+= PRINT_ATTR_U64(sample_freq
);
935 ret
+= PRINT_ATTR_X64(sample_type
);
936 ret
+= PRINT_ATTR_X64(read_format
);
938 ret
+= PRINT_ATTR2(disabled
, inherit
);
939 ret
+= PRINT_ATTR2(pinned
, exclusive
);
940 ret
+= PRINT_ATTR2(exclude_user
, exclude_kernel
);
941 ret
+= PRINT_ATTR2(exclude_hv
, exclude_idle
);
942 ret
+= PRINT_ATTR2(mmap
, comm
);
943 ret
+= PRINT_ATTR2(freq
, inherit_stat
);
944 ret
+= PRINT_ATTR2(enable_on_exec
, task
);
945 ret
+= PRINT_ATTR2(watermark
, precise_ip
);
946 ret
+= PRINT_ATTR2(mmap_data
, sample_id_all
);
947 ret
+= PRINT_ATTR2(exclude_host
, exclude_guest
);
948 ret
+= PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel
,
949 "excl.callchain_user", exclude_callchain_user
);
950 ret
+= PRINT_ATTR_U32(mmap2
);
952 ret
+= PRINT_ATTR_U32(wakeup_events
);
953 ret
+= PRINT_ATTR_U32(wakeup_watermark
);
954 ret
+= PRINT_ATTR_X32(bp_type
);
955 ret
+= PRINT_ATTR_X64(bp_addr
);
956 ret
+= PRINT_ATTR_X64(config1
);
957 ret
+= PRINT_ATTR_U64(bp_len
);
958 ret
+= PRINT_ATTR_X64(config2
);
959 ret
+= PRINT_ATTR_X64(branch_sample_type
);
960 ret
+= PRINT_ATTR_X64(sample_regs_user
);
961 ret
+= PRINT_ATTR_U32(sample_stack_user
);
963 ret
+= fprintf(fp
, "%.60s\n", graph_dotted_line
);
968 static int __perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
969 struct thread_map
*threads
)
972 unsigned long flags
= 0;
974 enum { NO_CHANGE
, SET_TO_MAX
, INCREASED_MAX
} set_rlimit
= NO_CHANGE
;
976 if (evsel
->fd
== NULL
&&
977 perf_evsel__alloc_fd(evsel
, cpus
->nr
, threads
->nr
) < 0)
981 flags
= PERF_FLAG_PID_CGROUP
;
982 pid
= evsel
->cgrp
->fd
;
985 fallback_missing_features
:
986 if (perf_missing_features
.mmap2
)
987 evsel
->attr
.mmap2
= 0;
988 if (perf_missing_features
.exclude_guest
)
989 evsel
->attr
.exclude_guest
= evsel
->attr
.exclude_host
= 0;
991 if (perf_missing_features
.sample_id_all
)
992 evsel
->attr
.sample_id_all
= 0;
995 perf_event_attr__fprintf(&evsel
->attr
, stderr
);
997 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
999 for (thread
= 0; thread
< threads
->nr
; thread
++) {
1003 pid
= threads
->map
[thread
];
1005 group_fd
= get_group_fd(evsel
, cpu
, thread
);
1007 pr_debug2("perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
1008 pid
, cpus
->map
[cpu
], group_fd
, flags
);
1010 FD(evsel
, cpu
, thread
) = sys_perf_event_open(&evsel
->attr
,
1014 if (FD(evsel
, cpu
, thread
) < 0) {
1016 pr_debug2("perf_event_open failed, error %d\n",
1020 set_rlimit
= NO_CHANGE
;
1028 * perf stat needs between 5 and 22 fds per CPU. When we run out
1029 * of them try to increase the limits.
1031 if (err
== -EMFILE
&& set_rlimit
< INCREASED_MAX
) {
1033 int old_errno
= errno
;
1035 if (getrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1036 if (set_rlimit
== NO_CHANGE
)
1037 l
.rlim_cur
= l
.rlim_max
;
1039 l
.rlim_cur
= l
.rlim_max
+ 1000;
1040 l
.rlim_max
= l
.rlim_cur
;
1042 if (setrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1051 if (err
!= -EINVAL
|| cpu
> 0 || thread
> 0)
1054 if (!perf_missing_features
.mmap2
&& evsel
->attr
.mmap2
) {
1055 perf_missing_features
.mmap2
= true;
1056 goto fallback_missing_features
;
1057 } else if (!perf_missing_features
.exclude_guest
&&
1058 (evsel
->attr
.exclude_guest
|| evsel
->attr
.exclude_host
)) {
1059 perf_missing_features
.exclude_guest
= true;
1060 goto fallback_missing_features
;
1061 } else if (!perf_missing_features
.sample_id_all
) {
1062 perf_missing_features
.sample_id_all
= true;
1063 goto retry_sample_id
;
1068 while (--thread
>= 0) {
1069 close(FD(evsel
, cpu
, thread
));
1070 FD(evsel
, cpu
, thread
) = -1;
1072 thread
= threads
->nr
;
1073 } while (--cpu
>= 0);
1077 void perf_evsel__close(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1079 if (evsel
->fd
== NULL
)
1082 perf_evsel__close_fd(evsel
, ncpus
, nthreads
);
1083 perf_evsel__free_fd(evsel
);
1095 struct thread_map map
;
1097 } empty_thread_map
= {
1102 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1103 struct thread_map
*threads
)
1106 /* Work around old compiler warnings about strict aliasing */
1107 cpus
= &empty_cpu_map
.map
;
1110 if (threads
== NULL
)
1111 threads
= &empty_thread_map
.map
;
1113 return __perf_evsel__open(evsel
, cpus
, threads
);
1116 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
1117 struct cpu_map
*cpus
)
1119 return __perf_evsel__open(evsel
, cpus
, &empty_thread_map
.map
);
1122 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
1123 struct thread_map
*threads
)
1125 return __perf_evsel__open(evsel
, &empty_cpu_map
.map
, threads
);
1128 static int perf_evsel__parse_id_sample(const struct perf_evsel
*evsel
,
1129 const union perf_event
*event
,
1130 struct perf_sample
*sample
)
1132 u64 type
= evsel
->attr
.sample_type
;
1133 const u64
*array
= event
->sample
.array
;
1134 bool swapped
= evsel
->needs_swap
;
1137 array
+= ((event
->header
.size
-
1138 sizeof(event
->header
)) / sizeof(u64
)) - 1;
1140 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1141 sample
->id
= *array
;
1145 if (type
& PERF_SAMPLE_CPU
) {
1148 /* undo swap of u64, then swap on individual u32s */
1149 u
.val64
= bswap_64(u
.val64
);
1150 u
.val32
[0] = bswap_32(u
.val32
[0]);
1153 sample
->cpu
= u
.val32
[0];
1157 if (type
& PERF_SAMPLE_STREAM_ID
) {
1158 sample
->stream_id
= *array
;
1162 if (type
& PERF_SAMPLE_ID
) {
1163 sample
->id
= *array
;
1167 if (type
& PERF_SAMPLE_TIME
) {
1168 sample
->time
= *array
;
1172 if (type
& PERF_SAMPLE_TID
) {
1175 /* undo swap of u64, then swap on individual u32s */
1176 u
.val64
= bswap_64(u
.val64
);
1177 u
.val32
[0] = bswap_32(u
.val32
[0]);
1178 u
.val32
[1] = bswap_32(u
.val32
[1]);
1181 sample
->pid
= u
.val32
[0];
1182 sample
->tid
= u
.val32
[1];
1189 static inline bool overflow(const void *endp
, u16 max_size
, const void *offset
,
1192 return size
> max_size
|| offset
+ size
> endp
;
1195 #define OVERFLOW_CHECK(offset, size, max_size) \
1197 if (overflow(endp, (max_size), (offset), (size))) \
1201 #define OVERFLOW_CHECK_u64(offset) \
1202 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1204 int perf_evsel__parse_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
1205 struct perf_sample
*data
)
1207 u64 type
= evsel
->attr
.sample_type
;
1208 bool swapped
= evsel
->needs_swap
;
1210 u16 max_size
= event
->header
.size
;
1211 const void *endp
= (void *)event
+ max_size
;
1215 * used for cross-endian analysis. See git commit 65014ab3
1216 * for why this goofiness is needed.
1220 memset(data
, 0, sizeof(*data
));
1221 data
->cpu
= data
->pid
= data
->tid
= -1;
1222 data
->stream_id
= data
->id
= data
->time
= -1ULL;
1226 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
1227 if (!evsel
->attr
.sample_id_all
)
1229 return perf_evsel__parse_id_sample(evsel
, event
, data
);
1232 array
= event
->sample
.array
;
1235 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1236 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
1237 * check the format does not go past the end of the event.
1239 if (evsel
->sample_size
+ sizeof(event
->header
) > event
->header
.size
)
1243 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1248 if (type
& PERF_SAMPLE_IP
) {
1253 if (type
& PERF_SAMPLE_TID
) {
1256 /* undo swap of u64, then swap on individual u32s */
1257 u
.val64
= bswap_64(u
.val64
);
1258 u
.val32
[0] = bswap_32(u
.val32
[0]);
1259 u
.val32
[1] = bswap_32(u
.val32
[1]);
1262 data
->pid
= u
.val32
[0];
1263 data
->tid
= u
.val32
[1];
1267 if (type
& PERF_SAMPLE_TIME
) {
1268 data
->time
= *array
;
1273 if (type
& PERF_SAMPLE_ADDR
) {
1274 data
->addr
= *array
;
1278 if (type
& PERF_SAMPLE_ID
) {
1283 if (type
& PERF_SAMPLE_STREAM_ID
) {
1284 data
->stream_id
= *array
;
1288 if (type
& PERF_SAMPLE_CPU
) {
1292 /* undo swap of u64, then swap on individual u32s */
1293 u
.val64
= bswap_64(u
.val64
);
1294 u
.val32
[0] = bswap_32(u
.val32
[0]);
1297 data
->cpu
= u
.val32
[0];
1301 if (type
& PERF_SAMPLE_PERIOD
) {
1302 data
->period
= *array
;
1306 if (type
& PERF_SAMPLE_READ
) {
1307 u64 read_format
= evsel
->attr
.read_format
;
1309 OVERFLOW_CHECK_u64(array
);
1310 if (read_format
& PERF_FORMAT_GROUP
)
1311 data
->read
.group
.nr
= *array
;
1313 data
->read
.one
.value
= *array
;
1317 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1318 OVERFLOW_CHECK_u64(array
);
1319 data
->read
.time_enabled
= *array
;
1323 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1324 OVERFLOW_CHECK_u64(array
);
1325 data
->read
.time_running
= *array
;
1329 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1330 if (read_format
& PERF_FORMAT_GROUP
) {
1331 const u64 max_group_nr
= UINT64_MAX
/
1332 sizeof(struct sample_read_value
);
1334 if (data
->read
.group
.nr
> max_group_nr
)
1336 sz
= data
->read
.group
.nr
*
1337 sizeof(struct sample_read_value
);
1338 OVERFLOW_CHECK(array
, sz
, max_size
);
1339 data
->read
.group
.values
=
1340 (struct sample_read_value
*)array
;
1341 array
= (void *)array
+ sz
;
1343 OVERFLOW_CHECK_u64(array
);
1344 data
->read
.one
.id
= *array
;
1349 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1350 const u64 max_callchain_nr
= UINT64_MAX
/ sizeof(u64
);
1352 OVERFLOW_CHECK_u64(array
);
1353 data
->callchain
= (struct ip_callchain
*)array
++;
1354 if (data
->callchain
->nr
> max_callchain_nr
)
1356 sz
= data
->callchain
->nr
* sizeof(u64
);
1357 OVERFLOW_CHECK(array
, sz
, max_size
);
1358 array
= (void *)array
+ sz
;
1361 if (type
& PERF_SAMPLE_RAW
) {
1362 OVERFLOW_CHECK_u64(array
);
1364 if (WARN_ONCE(swapped
,
1365 "Endianness of raw data not corrected!\n")) {
1366 /* undo swap of u64, then swap on individual u32s */
1367 u
.val64
= bswap_64(u
.val64
);
1368 u
.val32
[0] = bswap_32(u
.val32
[0]);
1369 u
.val32
[1] = bswap_32(u
.val32
[1]);
1371 data
->raw_size
= u
.val32
[0];
1372 array
= (void *)array
+ sizeof(u32
);
1374 OVERFLOW_CHECK(array
, data
->raw_size
, max_size
);
1375 data
->raw_data
= (void *)array
;
1376 array
= (void *)array
+ data
->raw_size
;
1379 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1380 const u64 max_branch_nr
= UINT64_MAX
/
1381 sizeof(struct branch_entry
);
1383 OVERFLOW_CHECK_u64(array
);
1384 data
->branch_stack
= (struct branch_stack
*)array
++;
1386 if (data
->branch_stack
->nr
> max_branch_nr
)
1388 sz
= data
->branch_stack
->nr
* sizeof(struct branch_entry
);
1389 OVERFLOW_CHECK(array
, sz
, max_size
);
1390 array
= (void *)array
+ sz
;
1393 if (type
& PERF_SAMPLE_REGS_USER
) {
1394 OVERFLOW_CHECK_u64(array
);
1395 data
->user_regs
.abi
= *array
;
1398 if (data
->user_regs
.abi
) {
1399 u64 regs_user
= evsel
->attr
.sample_regs_user
;
1401 sz
= hweight_long(regs_user
) * sizeof(u64
);
1402 OVERFLOW_CHECK(array
, sz
, max_size
);
1403 data
->user_regs
.regs
= (u64
*)array
;
1404 array
= (void *)array
+ sz
;
1408 if (type
& PERF_SAMPLE_STACK_USER
) {
1409 OVERFLOW_CHECK_u64(array
);
1412 data
->user_stack
.offset
= ((char *)(array
- 1)
1416 data
->user_stack
.size
= 0;
1418 OVERFLOW_CHECK(array
, sz
, max_size
);
1419 data
->user_stack
.data
= (char *)array
;
1420 array
= (void *)array
+ sz
;
1421 OVERFLOW_CHECK_u64(array
);
1422 data
->user_stack
.size
= *array
++;
1423 if (WARN_ONCE(data
->user_stack
.size
> sz
,
1424 "user stack dump failure\n"))
1430 if (type
& PERF_SAMPLE_WEIGHT
) {
1431 OVERFLOW_CHECK_u64(array
);
1432 data
->weight
= *array
;
1436 data
->data_src
= PERF_MEM_DATA_SRC_NONE
;
1437 if (type
& PERF_SAMPLE_DATA_SRC
) {
1438 OVERFLOW_CHECK_u64(array
);
1439 data
->data_src
= *array
;
1443 data
->transaction
= 0;
1444 if (type
& PERF_SAMPLE_TRANSACTION
) {
1445 OVERFLOW_CHECK_u64(array
);
1446 data
->transaction
= *array
;
1453 size_t perf_event__sample_event_size(const struct perf_sample
*sample
, u64 type
,
1454 u64 sample_regs_user
, u64 read_format
)
1456 size_t sz
, result
= sizeof(struct sample_event
);
1458 if (type
& PERF_SAMPLE_IDENTIFIER
)
1459 result
+= sizeof(u64
);
1461 if (type
& PERF_SAMPLE_IP
)
1462 result
+= sizeof(u64
);
1464 if (type
& PERF_SAMPLE_TID
)
1465 result
+= sizeof(u64
);
1467 if (type
& PERF_SAMPLE_TIME
)
1468 result
+= sizeof(u64
);
1470 if (type
& PERF_SAMPLE_ADDR
)
1471 result
+= sizeof(u64
);
1473 if (type
& PERF_SAMPLE_ID
)
1474 result
+= sizeof(u64
);
1476 if (type
& PERF_SAMPLE_STREAM_ID
)
1477 result
+= sizeof(u64
);
1479 if (type
& PERF_SAMPLE_CPU
)
1480 result
+= sizeof(u64
);
1482 if (type
& PERF_SAMPLE_PERIOD
)
1483 result
+= sizeof(u64
);
1485 if (type
& PERF_SAMPLE_READ
) {
1486 result
+= sizeof(u64
);
1487 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1488 result
+= sizeof(u64
);
1489 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1490 result
+= sizeof(u64
);
1491 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1492 if (read_format
& PERF_FORMAT_GROUP
) {
1493 sz
= sample
->read
.group
.nr
*
1494 sizeof(struct sample_read_value
);
1497 result
+= sizeof(u64
);
1501 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1502 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
1506 if (type
& PERF_SAMPLE_RAW
) {
1507 result
+= sizeof(u32
);
1508 result
+= sample
->raw_size
;
1511 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1512 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
1517 if (type
& PERF_SAMPLE_REGS_USER
) {
1518 if (sample
->user_regs
.abi
) {
1519 result
+= sizeof(u64
);
1520 sz
= hweight_long(sample_regs_user
) * sizeof(u64
);
1523 result
+= sizeof(u64
);
1527 if (type
& PERF_SAMPLE_STACK_USER
) {
1528 sz
= sample
->user_stack
.size
;
1529 result
+= sizeof(u64
);
1532 result
+= sizeof(u64
);
1536 if (type
& PERF_SAMPLE_WEIGHT
)
1537 result
+= sizeof(u64
);
1539 if (type
& PERF_SAMPLE_DATA_SRC
)
1540 result
+= sizeof(u64
);
1542 if (type
& PERF_SAMPLE_TRANSACTION
)
1543 result
+= sizeof(u64
);
1548 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
,
1549 u64 sample_regs_user
, u64 read_format
,
1550 const struct perf_sample
*sample
,
1556 * used for cross-endian analysis. See git commit 65014ab3
1557 * for why this goofiness is needed.
1561 array
= event
->sample
.array
;
1563 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1564 *array
= sample
->id
;
1568 if (type
& PERF_SAMPLE_IP
) {
1569 *array
= sample
->ip
;
1573 if (type
& PERF_SAMPLE_TID
) {
1574 u
.val32
[0] = sample
->pid
;
1575 u
.val32
[1] = sample
->tid
;
1578 * Inverse of what is done in perf_evsel__parse_sample
1580 u
.val32
[0] = bswap_32(u
.val32
[0]);
1581 u
.val32
[1] = bswap_32(u
.val32
[1]);
1582 u
.val64
= bswap_64(u
.val64
);
1589 if (type
& PERF_SAMPLE_TIME
) {
1590 *array
= sample
->time
;
1594 if (type
& PERF_SAMPLE_ADDR
) {
1595 *array
= sample
->addr
;
1599 if (type
& PERF_SAMPLE_ID
) {
1600 *array
= sample
->id
;
1604 if (type
& PERF_SAMPLE_STREAM_ID
) {
1605 *array
= sample
->stream_id
;
1609 if (type
& PERF_SAMPLE_CPU
) {
1610 u
.val32
[0] = sample
->cpu
;
1613 * Inverse of what is done in perf_evsel__parse_sample
1615 u
.val32
[0] = bswap_32(u
.val32
[0]);
1616 u
.val64
= bswap_64(u
.val64
);
1622 if (type
& PERF_SAMPLE_PERIOD
) {
1623 *array
= sample
->period
;
1627 if (type
& PERF_SAMPLE_READ
) {
1628 if (read_format
& PERF_FORMAT_GROUP
)
1629 *array
= sample
->read
.group
.nr
;
1631 *array
= sample
->read
.one
.value
;
1634 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1635 *array
= sample
->read
.time_enabled
;
1639 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1640 *array
= sample
->read
.time_running
;
1644 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1645 if (read_format
& PERF_FORMAT_GROUP
) {
1646 sz
= sample
->read
.group
.nr
*
1647 sizeof(struct sample_read_value
);
1648 memcpy(array
, sample
->read
.group
.values
, sz
);
1649 array
= (void *)array
+ sz
;
1651 *array
= sample
->read
.one
.id
;
1656 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1657 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
1658 memcpy(array
, sample
->callchain
, sz
);
1659 array
= (void *)array
+ sz
;
1662 if (type
& PERF_SAMPLE_RAW
) {
1663 u
.val32
[0] = sample
->raw_size
;
1664 if (WARN_ONCE(swapped
,
1665 "Endianness of raw data not corrected!\n")) {
1667 * Inverse of what is done in perf_evsel__parse_sample
1669 u
.val32
[0] = bswap_32(u
.val32
[0]);
1670 u
.val32
[1] = bswap_32(u
.val32
[1]);
1671 u
.val64
= bswap_64(u
.val64
);
1674 array
= (void *)array
+ sizeof(u32
);
1676 memcpy(array
, sample
->raw_data
, sample
->raw_size
);
1677 array
= (void *)array
+ sample
->raw_size
;
1680 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1681 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
1683 memcpy(array
, sample
->branch_stack
, sz
);
1684 array
= (void *)array
+ sz
;
1687 if (type
& PERF_SAMPLE_REGS_USER
) {
1688 if (sample
->user_regs
.abi
) {
1689 *array
++ = sample
->user_regs
.abi
;
1690 sz
= hweight_long(sample_regs_user
) * sizeof(u64
);
1691 memcpy(array
, sample
->user_regs
.regs
, sz
);
1692 array
= (void *)array
+ sz
;
1698 if (type
& PERF_SAMPLE_STACK_USER
) {
1699 sz
= sample
->user_stack
.size
;
1702 memcpy(array
, sample
->user_stack
.data
, sz
);
1703 array
= (void *)array
+ sz
;
1708 if (type
& PERF_SAMPLE_WEIGHT
) {
1709 *array
= sample
->weight
;
1713 if (type
& PERF_SAMPLE_DATA_SRC
) {
1714 *array
= sample
->data_src
;
1718 if (type
& PERF_SAMPLE_TRANSACTION
) {
1719 *array
= sample
->transaction
;
1726 struct format_field
*perf_evsel__field(struct perf_evsel
*evsel
, const char *name
)
1728 return pevent_find_field(evsel
->tp_format
, name
);
1731 void *perf_evsel__rawptr(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1734 struct format_field
*field
= perf_evsel__field(evsel
, name
);
1740 offset
= field
->offset
;
1742 if (field
->flags
& FIELD_IS_DYNAMIC
) {
1743 offset
= *(int *)(sample
->raw_data
+ field
->offset
);
1747 return sample
->raw_data
+ offset
;
1750 u64
perf_evsel__intval(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1753 struct format_field
*field
= perf_evsel__field(evsel
, name
);
1760 ptr
= sample
->raw_data
+ field
->offset
;
1762 switch (field
->size
) {
1766 value
= *(u16
*)ptr
;
1769 value
= *(u32
*)ptr
;
1772 value
= *(u64
*)ptr
;
1778 if (!evsel
->needs_swap
)
1781 switch (field
->size
) {
1783 return bswap_16(value
);
1785 return bswap_32(value
);
1787 return bswap_64(value
);
1795 static int comma_fprintf(FILE *fp
, bool *first
, const char *fmt
, ...)
1801 ret
+= fprintf(fp
, ",");
1803 ret
+= fprintf(fp
, ":");
1807 va_start(args
, fmt
);
1808 ret
+= vfprintf(fp
, fmt
, args
);
1813 static int __if_fprintf(FILE *fp
, bool *first
, const char *field
, u64 value
)
1818 return comma_fprintf(fp
, first
, " %s: %" PRIu64
, field
, value
);
1821 #define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
1828 static int bits__fprintf(FILE *fp
, const char *field
, u64 value
,
1829 struct bit_names
*bits
, bool *first
)
1831 int i
= 0, printed
= comma_fprintf(fp
, first
, " %s: ", field
);
1832 bool first_bit
= true;
1835 if (value
& bits
[i
].bit
) {
1836 printed
+= fprintf(fp
, "%s%s", first_bit
? "" : "|", bits
[i
].name
);
1839 } while (bits
[++i
].name
!= NULL
);
1844 static int sample_type__fprintf(FILE *fp
, bool *first
, u64 value
)
1846 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1847 struct bit_names bits
[] = {
1848 bit_name(IP
), bit_name(TID
), bit_name(TIME
), bit_name(ADDR
),
1849 bit_name(READ
), bit_name(CALLCHAIN
), bit_name(ID
), bit_name(CPU
),
1850 bit_name(PERIOD
), bit_name(STREAM_ID
), bit_name(RAW
),
1851 bit_name(BRANCH_STACK
), bit_name(REGS_USER
), bit_name(STACK_USER
),
1852 bit_name(IDENTIFIER
),
1856 return bits__fprintf(fp
, "sample_type", value
, bits
, first
);
1859 static int read_format__fprintf(FILE *fp
, bool *first
, u64 value
)
1861 #define bit_name(n) { PERF_FORMAT_##n, #n }
1862 struct bit_names bits
[] = {
1863 bit_name(TOTAL_TIME_ENABLED
), bit_name(TOTAL_TIME_RUNNING
),
1864 bit_name(ID
), bit_name(GROUP
),
1868 return bits__fprintf(fp
, "read_format", value
, bits
, first
);
1871 int perf_evsel__fprintf(struct perf_evsel
*evsel
,
1872 struct perf_attr_details
*details
, FILE *fp
)
1877 if (details
->event_group
) {
1878 struct perf_evsel
*pos
;
1880 if (!perf_evsel__is_group_leader(evsel
))
1883 if (evsel
->nr_members
> 1)
1884 printed
+= fprintf(fp
, "%s{", evsel
->group_name
?: "");
1886 printed
+= fprintf(fp
, "%s", perf_evsel__name(evsel
));
1887 for_each_group_member(pos
, evsel
)
1888 printed
+= fprintf(fp
, ",%s", perf_evsel__name(pos
));
1890 if (evsel
->nr_members
> 1)
1891 printed
+= fprintf(fp
, "}");
1895 printed
+= fprintf(fp
, "%s", perf_evsel__name(evsel
));
1897 if (details
->verbose
|| details
->freq
) {
1898 printed
+= comma_fprintf(fp
, &first
, " sample_freq=%" PRIu64
,
1899 (u64
)evsel
->attr
.sample_freq
);
1902 if (details
->verbose
) {
1908 printed
+= sample_type__fprintf(fp
, &first
, evsel
->attr
.sample_type
);
1909 if (evsel
->attr
.read_format
)
1910 printed
+= read_format__fprintf(fp
, &first
, evsel
->attr
.read_format
);
1914 if_print(exclusive
);
1915 if_print(exclude_user
);
1916 if_print(exclude_kernel
);
1917 if_print(exclude_hv
);
1918 if_print(exclude_idle
);
1923 if_print(inherit_stat
);
1924 if_print(enable_on_exec
);
1926 if_print(watermark
);
1927 if_print(precise_ip
);
1928 if_print(mmap_data
);
1929 if_print(sample_id_all
);
1930 if_print(exclude_host
);
1931 if_print(exclude_guest
);
1932 if_print(__reserved_1
);
1933 if_print(wakeup_events
);
1935 if_print(branch_sample_type
);
1942 bool perf_evsel__fallback(struct perf_evsel
*evsel
, int err
,
1943 char *msg
, size_t msgsize
)
1945 if ((err
== ENOENT
|| err
== ENXIO
|| err
== ENODEV
) &&
1946 evsel
->attr
.type
== PERF_TYPE_HARDWARE
&&
1947 evsel
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
) {
1949 * If it's cycles then fall back to hrtimer based
1950 * cpu-clock-tick sw counter, which is always available even if
1953 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
1956 scnprintf(msg
, msgsize
, "%s",
1957 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
1959 evsel
->attr
.type
= PERF_TYPE_SOFTWARE
;
1960 evsel
->attr
.config
= PERF_COUNT_SW_CPU_CLOCK
;
1962 zfree(&evsel
->name
);
1969 int perf_evsel__open_strerror(struct perf_evsel
*evsel
, struct target
*target
,
1970 int err
, char *msg
, size_t size
)
1975 return scnprintf(msg
, size
,
1976 "You may not have permission to collect %sstats.\n"
1977 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
1978 " -1 - Not paranoid at all\n"
1979 " 0 - Disallow raw tracepoint access for unpriv\n"
1980 " 1 - Disallow cpu events for unpriv\n"
1981 " 2 - Disallow kernel profiling for unpriv",
1982 target
->system_wide
? "system-wide " : "");
1984 return scnprintf(msg
, size
, "The %s event is not supported.",
1985 perf_evsel__name(evsel
));
1987 return scnprintf(msg
, size
, "%s",
1988 "Too many events are opened.\n"
1989 "Try again after reducing the number of events.");
1991 if (target
->cpu_list
)
1992 return scnprintf(msg
, size
, "%s",
1993 "No such device - did you specify an out-of-range profile CPU?\n");
1996 if (evsel
->attr
.precise_ip
)
1997 return scnprintf(msg
, size
, "%s",
1998 "\'precise\' request may not be supported. Try removing 'p' modifier.");
1999 #if defined(__i386__) || defined(__x86_64__)
2000 if (evsel
->attr
.type
== PERF_TYPE_HARDWARE
)
2001 return scnprintf(msg
, size
, "%s",
2002 "No hardware sampling interrupt available.\n"
2003 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2010 return scnprintf(msg
, size
,
2011 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n"
2012 "/bin/dmesg may provide additional information.\n"
2013 "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
2014 err
, strerror(err
), perf_evsel__name(evsel
));