1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from builtin-{top,stat,record}.c, see those files for further
13 #include "util/mmap.h"
14 #include "thread_map.h"
20 #include <internal/lib.h> // page_size
24 #include "bpf-event.h"
25 #include "util/string2.h"
31 #include "parse-events.h"
32 #include <subcmd/parse-options.h>
35 #include <sys/ioctl.h>
38 #include <linux/bitops.h>
39 #include <linux/hash.h>
40 #include <linux/log2.h>
41 #include <linux/err.h>
42 #include <linux/string.h>
43 #include <linux/zalloc.h>
44 #include <perf/evlist.h>
45 #include <perf/evsel.h>
46 #include <perf/cpumap.h>
47 #include <perf/mmap.h>
49 #include <internal/xyarray.h>
51 #ifdef LACKS_SIGQUEUE_PROTOTYPE
52 int sigqueue(pid_t pid
, int sig
, const union sigval value
);
55 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
56 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
58 void evlist__init(struct evlist
*evlist
, struct perf_cpu_map
*cpus
,
59 struct perf_thread_map
*threads
)
61 perf_evlist__init(&evlist
->core
);
62 perf_evlist__set_maps(&evlist
->core
, cpus
, threads
);
63 evlist
->workload
.pid
= -1;
64 evlist
->bkw_mmap_state
= BKW_MMAP_NOTREADY
;
67 struct evlist
*evlist__new(void)
69 struct evlist
*evlist
= zalloc(sizeof(*evlist
));
72 evlist__init(evlist
, NULL
, NULL
);
77 struct evlist
*perf_evlist__new_default(void)
79 struct evlist
*evlist
= evlist__new();
81 if (evlist
&& perf_evlist__add_default(evlist
)) {
82 evlist__delete(evlist
);
89 struct evlist
*perf_evlist__new_dummy(void)
91 struct evlist
*evlist
= evlist__new();
93 if (evlist
&& perf_evlist__add_dummy(evlist
)) {
94 evlist__delete(evlist
);
102 * perf_evlist__set_id_pos - set the positions of event ids.
103 * @evlist: selected event list
105 * Events with compatible sample types all have the same id_pos
106 * and is_pos. For convenience, put a copy on evlist.
108 void perf_evlist__set_id_pos(struct evlist
*evlist
)
110 struct evsel
*first
= evlist__first(evlist
);
112 evlist
->id_pos
= first
->id_pos
;
113 evlist
->is_pos
= first
->is_pos
;
116 static void perf_evlist__update_id_pos(struct evlist
*evlist
)
120 evlist__for_each_entry(evlist
, evsel
)
121 perf_evsel__calc_id_pos(evsel
);
123 perf_evlist__set_id_pos(evlist
);
126 static void evlist__purge(struct evlist
*evlist
)
128 struct evsel
*pos
, *n
;
130 evlist__for_each_entry_safe(evlist
, n
, pos
) {
131 list_del_init(&pos
->core
.node
);
136 evlist
->core
.nr_entries
= 0;
139 void evlist__exit(struct evlist
*evlist
)
141 zfree(&evlist
->mmap
);
142 zfree(&evlist
->overwrite_mmap
);
143 perf_evlist__exit(&evlist
->core
);
146 void evlist__delete(struct evlist
*evlist
)
151 evlist__munmap(evlist
);
152 evlist__close(evlist
);
153 evlist__purge(evlist
);
154 evlist__exit(evlist
);
158 void evlist__add(struct evlist
*evlist
, struct evsel
*entry
)
160 entry
->evlist
= evlist
;
161 entry
->idx
= evlist
->core
.nr_entries
;
162 entry
->tracking
= !entry
->idx
;
164 perf_evlist__add(&evlist
->core
, &entry
->core
);
166 if (evlist
->core
.nr_entries
== 1)
167 perf_evlist__set_id_pos(evlist
);
170 void evlist__remove(struct evlist
*evlist
, struct evsel
*evsel
)
172 evsel
->evlist
= NULL
;
173 perf_evlist__remove(&evlist
->core
, &evsel
->core
);
176 void perf_evlist__splice_list_tail(struct evlist
*evlist
,
177 struct list_head
*list
)
179 struct evsel
*evsel
, *temp
;
181 __evlist__for_each_entry_safe(list
, temp
, evsel
) {
182 list_del_init(&evsel
->core
.node
);
183 evlist__add(evlist
, evsel
);
187 int __evlist__set_tracepoints_handlers(struct evlist
*evlist
,
188 const struct evsel_str_handler
*assocs
, size_t nr_assocs
)
194 for (i
= 0; i
< nr_assocs
; i
++) {
195 // Adding a handler for an event not in this evlist, just ignore it.
196 evsel
= perf_evlist__find_tracepoint_by_name(evlist
, assocs
[i
].name
);
201 if (evsel
->handler
!= NULL
)
203 evsel
->handler
= assocs
[i
].handler
;
211 void __perf_evlist__set_leader(struct list_head
*list
)
213 struct evsel
*evsel
, *leader
;
215 leader
= list_entry(list
->next
, struct evsel
, core
.node
);
216 evsel
= list_entry(list
->prev
, struct evsel
, core
.node
);
218 leader
->core
.nr_members
= evsel
->idx
- leader
->idx
+ 1;
220 __evlist__for_each_entry(list
, evsel
) {
221 evsel
->leader
= leader
;
225 void perf_evlist__set_leader(struct evlist
*evlist
)
227 if (evlist
->core
.nr_entries
) {
228 evlist
->nr_groups
= evlist
->core
.nr_entries
> 1 ? 1 : 0;
229 __perf_evlist__set_leader(&evlist
->core
.entries
);
233 int __perf_evlist__add_default(struct evlist
*evlist
, bool precise
)
235 struct evsel
*evsel
= perf_evsel__new_cycles(precise
);
240 evlist__add(evlist
, evsel
);
244 int perf_evlist__add_dummy(struct evlist
*evlist
)
246 struct perf_event_attr attr
= {
247 .type
= PERF_TYPE_SOFTWARE
,
248 .config
= PERF_COUNT_SW_DUMMY
,
249 .size
= sizeof(attr
), /* to capture ABI version */
251 struct evsel
*evsel
= perf_evsel__new_idx(&attr
, evlist
->core
.nr_entries
);
256 evlist__add(evlist
, evsel
);
260 static int evlist__add_attrs(struct evlist
*evlist
,
261 struct perf_event_attr
*attrs
, size_t nr_attrs
)
263 struct evsel
*evsel
, *n
;
267 for (i
= 0; i
< nr_attrs
; i
++) {
268 evsel
= perf_evsel__new_idx(attrs
+ i
, evlist
->core
.nr_entries
+ i
);
270 goto out_delete_partial_list
;
271 list_add_tail(&evsel
->core
.node
, &head
);
274 perf_evlist__splice_list_tail(evlist
, &head
);
278 out_delete_partial_list
:
279 __evlist__for_each_entry_safe(&head
, n
, evsel
)
280 evsel__delete(evsel
);
284 int __perf_evlist__add_default_attrs(struct evlist
*evlist
,
285 struct perf_event_attr
*attrs
, size_t nr_attrs
)
289 for (i
= 0; i
< nr_attrs
; i
++)
290 event_attr_init(attrs
+ i
);
292 return evlist__add_attrs(evlist
, attrs
, nr_attrs
);
296 perf_evlist__find_tracepoint_by_id(struct evlist
*evlist
, int id
)
300 evlist__for_each_entry(evlist
, evsel
) {
301 if (evsel
->core
.attr
.type
== PERF_TYPE_TRACEPOINT
&&
302 (int)evsel
->core
.attr
.config
== id
)
310 perf_evlist__find_tracepoint_by_name(struct evlist
*evlist
,
315 evlist__for_each_entry(evlist
, evsel
) {
316 if ((evsel
->core
.attr
.type
== PERF_TYPE_TRACEPOINT
) &&
317 (strcmp(evsel
->name
, name
) == 0))
324 int perf_evlist__add_newtp(struct evlist
*evlist
,
325 const char *sys
, const char *name
, void *handler
)
327 struct evsel
*evsel
= perf_evsel__newtp(sys
, name
);
332 evsel
->handler
= handler
;
333 evlist__add(evlist
, evsel
);
337 static int perf_evlist__nr_threads(struct evlist
*evlist
,
340 if (evsel
->core
.system_wide
)
343 return perf_thread_map__nr(evlist
->core
.threads
);
346 void evlist__cpu_iter_start(struct evlist
*evlist
)
351 * Reset the per evsel cpu_iter. This is needed because
352 * each evsel's cpumap may have a different index space,
353 * and some operations need the index to modify
354 * the FD xyarray (e.g. open, close)
356 evlist__for_each_entry(evlist
, pos
)
360 bool evsel__cpu_iter_skip_no_inc(struct evsel
*ev
, int cpu
)
362 if (ev
->cpu_iter
>= ev
->core
.cpus
->nr
)
364 if (cpu
>= 0 && ev
->core
.cpus
->map
[ev
->cpu_iter
] != cpu
)
369 bool evsel__cpu_iter_skip(struct evsel
*ev
, int cpu
)
371 if (!evsel__cpu_iter_skip_no_inc(ev
, cpu
)) {
378 void evlist__disable(struct evlist
*evlist
)
381 struct affinity affinity
;
384 if (affinity__setup(&affinity
) < 0)
387 evlist__for_each_cpu(evlist
, i
, cpu
) {
388 affinity__set(&affinity
, cpu
);
390 evlist__for_each_entry(evlist
, pos
) {
391 if (evsel__cpu_iter_skip(pos
, cpu
))
393 if (pos
->disabled
|| !perf_evsel__is_group_leader(pos
) || !pos
->core
.fd
)
395 evsel__disable_cpu(pos
, pos
->cpu_iter
- 1);
398 affinity__cleanup(&affinity
);
399 evlist__for_each_entry(evlist
, pos
) {
400 if (!perf_evsel__is_group_leader(pos
) || !pos
->core
.fd
)
402 pos
->disabled
= true;
405 evlist
->enabled
= false;
408 void evlist__enable(struct evlist
*evlist
)
411 struct affinity affinity
;
414 if (affinity__setup(&affinity
) < 0)
417 evlist__for_each_cpu(evlist
, i
, cpu
) {
418 affinity__set(&affinity
, cpu
);
420 evlist__for_each_entry(evlist
, pos
) {
421 if (evsel__cpu_iter_skip(pos
, cpu
))
423 if (!perf_evsel__is_group_leader(pos
) || !pos
->core
.fd
)
425 evsel__enable_cpu(pos
, pos
->cpu_iter
- 1);
428 affinity__cleanup(&affinity
);
429 evlist__for_each_entry(evlist
, pos
) {
430 if (!perf_evsel__is_group_leader(pos
) || !pos
->core
.fd
)
432 pos
->disabled
= false;
435 evlist
->enabled
= true;
438 void perf_evlist__toggle_enable(struct evlist
*evlist
)
440 (evlist
->enabled
? evlist__disable
: evlist__enable
)(evlist
);
443 static int perf_evlist__enable_event_cpu(struct evlist
*evlist
,
444 struct evsel
*evsel
, int cpu
)
447 int nr_threads
= perf_evlist__nr_threads(evlist
, evsel
);
452 for (thread
= 0; thread
< nr_threads
; thread
++) {
453 int err
= ioctl(FD(evsel
, cpu
, thread
), PERF_EVENT_IOC_ENABLE
, 0);
460 static int perf_evlist__enable_event_thread(struct evlist
*evlist
,
465 int nr_cpus
= perf_cpu_map__nr(evlist
->core
.cpus
);
470 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
471 int err
= ioctl(FD(evsel
, cpu
, thread
), PERF_EVENT_IOC_ENABLE
, 0);
478 int perf_evlist__enable_event_idx(struct evlist
*evlist
,
479 struct evsel
*evsel
, int idx
)
481 bool per_cpu_mmaps
= !perf_cpu_map__empty(evlist
->core
.cpus
);
484 return perf_evlist__enable_event_cpu(evlist
, evsel
, idx
);
486 return perf_evlist__enable_event_thread(evlist
, evsel
, idx
);
489 int evlist__add_pollfd(struct evlist
*evlist
, int fd
)
491 return perf_evlist__add_pollfd(&evlist
->core
, fd
, NULL
, POLLIN
);
494 int evlist__filter_pollfd(struct evlist
*evlist
, short revents_and_mask
)
496 return perf_evlist__filter_pollfd(&evlist
->core
, revents_and_mask
);
499 int evlist__poll(struct evlist
*evlist
, int timeout
)
501 return perf_evlist__poll(&evlist
->core
, timeout
);
504 struct perf_sample_id
*perf_evlist__id2sid(struct evlist
*evlist
, u64 id
)
506 struct hlist_head
*head
;
507 struct perf_sample_id
*sid
;
510 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
511 head
= &evlist
->core
.heads
[hash
];
513 hlist_for_each_entry(sid
, head
, node
)
520 struct evsel
*perf_evlist__id2evsel(struct evlist
*evlist
, u64 id
)
522 struct perf_sample_id
*sid
;
524 if (evlist
->core
.nr_entries
== 1 || !id
)
525 return evlist__first(evlist
);
527 sid
= perf_evlist__id2sid(evlist
, id
);
529 return container_of(sid
->evsel
, struct evsel
, core
);
531 if (!perf_evlist__sample_id_all(evlist
))
532 return evlist__first(evlist
);
537 struct evsel
*perf_evlist__id2evsel_strict(struct evlist
*evlist
,
540 struct perf_sample_id
*sid
;
545 sid
= perf_evlist__id2sid(evlist
, id
);
547 return container_of(sid
->evsel
, struct evsel
, core
);
552 static int perf_evlist__event2id(struct evlist
*evlist
,
553 union perf_event
*event
, u64
*id
)
555 const __u64
*array
= event
->sample
.array
;
558 n
= (event
->header
.size
- sizeof(event
->header
)) >> 3;
560 if (event
->header
.type
== PERF_RECORD_SAMPLE
) {
561 if (evlist
->id_pos
>= n
)
563 *id
= array
[evlist
->id_pos
];
565 if (evlist
->is_pos
> n
)
573 struct evsel
*perf_evlist__event2evsel(struct evlist
*evlist
,
574 union perf_event
*event
)
576 struct evsel
*first
= evlist__first(evlist
);
577 struct hlist_head
*head
;
578 struct perf_sample_id
*sid
;
582 if (evlist
->core
.nr_entries
== 1)
585 if (!first
->core
.attr
.sample_id_all
&&
586 event
->header
.type
!= PERF_RECORD_SAMPLE
)
589 if (perf_evlist__event2id(evlist
, event
, &id
))
592 /* Synthesized events have an id of zero */
596 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
597 head
= &evlist
->core
.heads
[hash
];
599 hlist_for_each_entry(sid
, head
, node
) {
601 return container_of(sid
->evsel
, struct evsel
, core
);
606 static int perf_evlist__set_paused(struct evlist
*evlist
, bool value
)
610 if (!evlist
->overwrite_mmap
)
613 for (i
= 0; i
< evlist
->core
.nr_mmaps
; i
++) {
614 int fd
= evlist
->overwrite_mmap
[i
].core
.fd
;
619 err
= ioctl(fd
, PERF_EVENT_IOC_PAUSE_OUTPUT
, value
? 1 : 0);
626 static int perf_evlist__pause(struct evlist
*evlist
)
628 return perf_evlist__set_paused(evlist
, true);
631 static int perf_evlist__resume(struct evlist
*evlist
)
633 return perf_evlist__set_paused(evlist
, false);
636 static void evlist__munmap_nofree(struct evlist
*evlist
)
641 for (i
= 0; i
< evlist
->core
.nr_mmaps
; i
++)
642 perf_mmap__munmap(&evlist
->mmap
[i
].core
);
644 if (evlist
->overwrite_mmap
)
645 for (i
= 0; i
< evlist
->core
.nr_mmaps
; i
++)
646 perf_mmap__munmap(&evlist
->overwrite_mmap
[i
].core
);
649 void evlist__munmap(struct evlist
*evlist
)
651 evlist__munmap_nofree(evlist
);
652 zfree(&evlist
->mmap
);
653 zfree(&evlist
->overwrite_mmap
);
656 static void perf_mmap__unmap_cb(struct perf_mmap
*map
)
658 struct mmap
*m
= container_of(map
, struct mmap
, core
);
663 static struct mmap
*evlist__alloc_mmap(struct evlist
*evlist
,
669 map
= zalloc(evlist
->core
.nr_mmaps
* sizeof(struct mmap
));
673 for (i
= 0; i
< evlist
->core
.nr_mmaps
; i
++) {
674 struct perf_mmap
*prev
= i
? &map
[i
- 1].core
: NULL
;
677 * When the perf_mmap() call is made we grab one refcount, plus
678 * one extra to let perf_mmap__consume() get the last
679 * events after all real references (perf_mmap__get()) are
682 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
683 * thus does perf_mmap__get() on it.
685 perf_mmap__init(&map
[i
].core
, prev
, overwrite
, perf_mmap__unmap_cb
);
692 perf_evlist__mmap_cb_idx(struct perf_evlist
*_evlist
,
693 struct perf_mmap_param
*_mp
,
694 int idx
, bool per_cpu
)
696 struct evlist
*evlist
= container_of(_evlist
, struct evlist
, core
);
697 struct mmap_params
*mp
= container_of(_mp
, struct mmap_params
, core
);
699 auxtrace_mmap_params__set_idx(&mp
->auxtrace_mp
, evlist
, idx
, per_cpu
);
702 static struct perf_mmap
*
703 perf_evlist__mmap_cb_get(struct perf_evlist
*_evlist
, bool overwrite
, int idx
)
705 struct evlist
*evlist
= container_of(_evlist
, struct evlist
, core
);
708 maps
= overwrite
? evlist
->overwrite_mmap
: evlist
->mmap
;
711 maps
= evlist__alloc_mmap(evlist
, overwrite
);
716 evlist
->overwrite_mmap
= maps
;
717 if (evlist
->bkw_mmap_state
== BKW_MMAP_NOTREADY
)
718 perf_evlist__toggle_bkw_mmap(evlist
, BKW_MMAP_RUNNING
);
724 return &maps
[idx
].core
;
728 perf_evlist__mmap_cb_mmap(struct perf_mmap
*_map
, struct perf_mmap_param
*_mp
,
731 struct mmap
*map
= container_of(_map
, struct mmap
, core
);
732 struct mmap_params
*mp
= container_of(_mp
, struct mmap_params
, core
);
734 return mmap__mmap(map
, mp
, output
, cpu
);
737 unsigned long perf_event_mlock_kb_in_pages(void)
742 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max
) < 0) {
744 * Pick a once upon a time good value, i.e. things look
745 * strange since we can't read a sysctl value, but lets not
750 max
-= (page_size
/ 1024);
753 pages
= (max
* 1024) / page_size
;
754 if (!is_power_of_2(pages
))
755 pages
= rounddown_pow_of_two(pages
);
760 size_t evlist__mmap_size(unsigned long pages
)
762 if (pages
== UINT_MAX
)
763 pages
= perf_event_mlock_kb_in_pages();
764 else if (!is_power_of_2(pages
))
767 return (pages
+ 1) * page_size
;
770 static long parse_pages_arg(const char *str
, unsigned long min
,
773 unsigned long pages
, val
;
774 static struct parse_tag tags
[] = {
775 { .tag
= 'B', .mult
= 1 },
776 { .tag
= 'K', .mult
= 1 << 10 },
777 { .tag
= 'M', .mult
= 1 << 20 },
778 { .tag
= 'G', .mult
= 1 << 30 },
785 val
= parse_tag_value(str
, tags
);
786 if (val
!= (unsigned long) -1) {
787 /* we got file size value */
788 pages
= PERF_ALIGN(val
, page_size
) / page_size
;
790 /* we got pages count value */
792 pages
= strtoul(str
, &eptr
, 10);
797 if (pages
== 0 && min
== 0) {
798 /* leave number of pages at 0 */
799 } else if (!is_power_of_2(pages
)) {
802 /* round pages up to next power of 2 */
803 pages
= roundup_pow_of_two(pages
);
807 unit_number__scnprintf(buf
, sizeof(buf
), pages
* page_size
);
808 pr_info("rounding mmap pages size to %s (%lu pages)\n",
818 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages
, const char *str
)
820 unsigned long max
= UINT_MAX
;
823 if (max
> SIZE_MAX
/ page_size
)
824 max
= SIZE_MAX
/ page_size
;
826 pages
= parse_pages_arg(str
, 1, max
);
828 pr_err("Invalid argument for --mmap_pages/-m\n");
836 int perf_evlist__parse_mmap_pages(const struct option
*opt
, const char *str
,
837 int unset __maybe_unused
)
839 return __perf_evlist__parse_mmap_pages(opt
->value
, str
);
843 * evlist__mmap_ex - Create mmaps to receive events.
844 * @evlist: list of events
845 * @pages: map length in pages
846 * @overwrite: overwrite older events?
847 * @auxtrace_pages - auxtrace map length in pages
848 * @auxtrace_overwrite - overwrite older auxtrace data?
850 * If @overwrite is %false the user needs to signal event consumption using
851 * perf_mmap__write_tail(). Using evlist__mmap_read() does this
854 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
855 * consumption using auxtrace_mmap__write_tail().
857 * Return: %0 on success, negative error code otherwise.
859 int evlist__mmap_ex(struct evlist
*evlist
, unsigned int pages
,
860 unsigned int auxtrace_pages
,
861 bool auxtrace_overwrite
, int nr_cblocks
, int affinity
, int flush
,
865 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
866 * Its value is decided by evsel's write_backward.
867 * So &mp should not be passed through const pointer.
869 struct mmap_params mp
= {
870 .nr_cblocks
= nr_cblocks
,
871 .affinity
= affinity
,
873 .comp_level
= comp_level
875 struct perf_evlist_mmap_ops ops
= {
876 .idx
= perf_evlist__mmap_cb_idx
,
877 .get
= perf_evlist__mmap_cb_get
,
878 .mmap
= perf_evlist__mmap_cb_mmap
,
881 evlist
->core
.mmap_len
= evlist__mmap_size(pages
);
882 pr_debug("mmap size %zuB\n", evlist
->core
.mmap_len
);
884 auxtrace_mmap_params__init(&mp
.auxtrace_mp
, evlist
->core
.mmap_len
,
885 auxtrace_pages
, auxtrace_overwrite
);
887 return perf_evlist__mmap_ops(&evlist
->core
, &ops
, &mp
.core
);
890 int evlist__mmap(struct evlist
*evlist
, unsigned int pages
)
892 return evlist__mmap_ex(evlist
, pages
, 0, false, 0, PERF_AFFINITY_SYS
, 1, 0);
895 int perf_evlist__create_maps(struct evlist
*evlist
, struct target
*target
)
897 bool all_threads
= (target
->per_thread
&& target
->system_wide
);
898 struct perf_cpu_map
*cpus
;
899 struct perf_thread_map
*threads
;
902 * If specify '-a' and '--per-thread' to perf record, perf record
903 * will override '--per-thread'. target->per_thread = false and
904 * target->system_wide = true.
906 * If specify '--per-thread' only to perf record,
907 * target->per_thread = true and target->system_wide = false.
909 * So target->per_thread && target->system_wide is false.
910 * For perf record, thread_map__new_str doesn't call
911 * thread_map__new_all_cpus. That will keep perf record's
914 * For perf stat, it allows the case that target->per_thread and
915 * target->system_wide are all true. It means to collect system-wide
916 * per-thread data. thread_map__new_str will call
917 * thread_map__new_all_cpus to enumerate all threads.
919 threads
= thread_map__new_str(target
->pid
, target
->tid
, target
->uid
,
925 if (target__uses_dummy_map(target
))
926 cpus
= perf_cpu_map__dummy_new();
928 cpus
= perf_cpu_map__new(target
->cpu_list
);
931 goto out_delete_threads
;
933 evlist
->core
.has_user_cpus
= !!target
->cpu_list
;
935 perf_evlist__set_maps(&evlist
->core
, cpus
, threads
);
940 perf_thread_map__put(threads
);
944 void __perf_evlist__set_sample_bit(struct evlist
*evlist
,
945 enum perf_event_sample_format bit
)
949 evlist__for_each_entry(evlist
, evsel
)
950 __perf_evsel__set_sample_bit(evsel
, bit
);
953 void __perf_evlist__reset_sample_bit(struct evlist
*evlist
,
954 enum perf_event_sample_format bit
)
958 evlist__for_each_entry(evlist
, evsel
)
959 __perf_evsel__reset_sample_bit(evsel
, bit
);
962 int perf_evlist__apply_filters(struct evlist
*evlist
, struct evsel
**err_evsel
)
967 evlist__for_each_entry(evlist
, evsel
) {
968 if (evsel
->filter
== NULL
)
972 * filters only work for tracepoint event, which doesn't have cpu limit.
973 * So evlist and evsel should always be same.
975 err
= perf_evsel__apply_filter(&evsel
->core
, evsel
->filter
);
985 int perf_evlist__set_tp_filter(struct evlist
*evlist
, const char *filter
)
993 evlist__for_each_entry(evlist
, evsel
) {
994 if (evsel
->core
.attr
.type
!= PERF_TYPE_TRACEPOINT
)
997 err
= perf_evsel__set_filter(evsel
, filter
);
1005 int perf_evlist__append_tp_filter(struct evlist
*evlist
, const char *filter
)
1007 struct evsel
*evsel
;
1013 evlist__for_each_entry(evlist
, evsel
) {
1014 if (evsel
->core
.attr
.type
!= PERF_TYPE_TRACEPOINT
)
1017 err
= perf_evsel__append_tp_filter(evsel
, filter
);
1025 char *asprintf__tp_filter_pids(size_t npids
, pid_t
*pids
)
1030 for (i
= 0; i
< npids
; ++i
) {
1032 if (asprintf(&filter
, "common_pid != %d", pids
[i
]) < 0)
1037 if (asprintf(&tmp
, "%s && common_pid != %d", filter
, pids
[i
]) < 0)
1051 int perf_evlist__set_tp_filter_pids(struct evlist
*evlist
, size_t npids
, pid_t
*pids
)
1053 char *filter
= asprintf__tp_filter_pids(npids
, pids
);
1054 int ret
= perf_evlist__set_tp_filter(evlist
, filter
);
1060 int perf_evlist__set_tp_filter_pid(struct evlist
*evlist
, pid_t pid
)
1062 return perf_evlist__set_tp_filter_pids(evlist
, 1, &pid
);
1065 int perf_evlist__append_tp_filter_pids(struct evlist
*evlist
, size_t npids
, pid_t
*pids
)
1067 char *filter
= asprintf__tp_filter_pids(npids
, pids
);
1068 int ret
= perf_evlist__append_tp_filter(evlist
, filter
);
1074 int perf_evlist__append_tp_filter_pid(struct evlist
*evlist
, pid_t pid
)
1076 return perf_evlist__append_tp_filter_pids(evlist
, 1, &pid
);
1079 bool perf_evlist__valid_sample_type(struct evlist
*evlist
)
1083 if (evlist
->core
.nr_entries
== 1)
1086 if (evlist
->id_pos
< 0 || evlist
->is_pos
< 0)
1089 evlist__for_each_entry(evlist
, pos
) {
1090 if (pos
->id_pos
!= evlist
->id_pos
||
1091 pos
->is_pos
!= evlist
->is_pos
)
1098 u64
__perf_evlist__combined_sample_type(struct evlist
*evlist
)
1100 struct evsel
*evsel
;
1102 if (evlist
->combined_sample_type
)
1103 return evlist
->combined_sample_type
;
1105 evlist__for_each_entry(evlist
, evsel
)
1106 evlist
->combined_sample_type
|= evsel
->core
.attr
.sample_type
;
1108 return evlist
->combined_sample_type
;
1111 u64
perf_evlist__combined_sample_type(struct evlist
*evlist
)
1113 evlist
->combined_sample_type
= 0;
1114 return __perf_evlist__combined_sample_type(evlist
);
1117 u64
perf_evlist__combined_branch_type(struct evlist
*evlist
)
1119 struct evsel
*evsel
;
1120 u64 branch_type
= 0;
1122 evlist__for_each_entry(evlist
, evsel
)
1123 branch_type
|= evsel
->core
.attr
.branch_sample_type
;
1127 bool perf_evlist__valid_read_format(struct evlist
*evlist
)
1129 struct evsel
*first
= evlist__first(evlist
), *pos
= first
;
1130 u64 read_format
= first
->core
.attr
.read_format
;
1131 u64 sample_type
= first
->core
.attr
.sample_type
;
1133 evlist__for_each_entry(evlist
, pos
) {
1134 if (read_format
!= pos
->core
.attr
.read_format
)
1138 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1139 if ((sample_type
& PERF_SAMPLE_READ
) &&
1140 !(read_format
& PERF_FORMAT_ID
)) {
1147 u16
perf_evlist__id_hdr_size(struct evlist
*evlist
)
1149 struct evsel
*first
= evlist__first(evlist
);
1150 struct perf_sample
*data
;
1154 if (!first
->core
.attr
.sample_id_all
)
1157 sample_type
= first
->core
.attr
.sample_type
;
1159 if (sample_type
& PERF_SAMPLE_TID
)
1160 size
+= sizeof(data
->tid
) * 2;
1162 if (sample_type
& PERF_SAMPLE_TIME
)
1163 size
+= sizeof(data
->time
);
1165 if (sample_type
& PERF_SAMPLE_ID
)
1166 size
+= sizeof(data
->id
);
1168 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
1169 size
+= sizeof(data
->stream_id
);
1171 if (sample_type
& PERF_SAMPLE_CPU
)
1172 size
+= sizeof(data
->cpu
) * 2;
1174 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
1175 size
+= sizeof(data
->id
);
1180 bool perf_evlist__valid_sample_id_all(struct evlist
*evlist
)
1182 struct evsel
*first
= evlist__first(evlist
), *pos
= first
;
1184 evlist__for_each_entry_continue(evlist
, pos
) {
1185 if (first
->core
.attr
.sample_id_all
!= pos
->core
.attr
.sample_id_all
)
1192 bool perf_evlist__sample_id_all(struct evlist
*evlist
)
1194 struct evsel
*first
= evlist__first(evlist
);
1195 return first
->core
.attr
.sample_id_all
;
1198 void perf_evlist__set_selected(struct evlist
*evlist
,
1199 struct evsel
*evsel
)
1201 evlist
->selected
= evsel
;
1204 void evlist__close(struct evlist
*evlist
)
1206 struct evsel
*evsel
;
1207 struct affinity affinity
;
1211 * With perf record core.cpus is usually NULL.
1212 * Use the old method to handle this for now.
1214 if (!evlist
->core
.cpus
) {
1215 evlist__for_each_entry_reverse(evlist
, evsel
)
1216 evsel__close(evsel
);
1220 if (affinity__setup(&affinity
) < 0)
1222 evlist__for_each_cpu(evlist
, i
, cpu
) {
1223 affinity__set(&affinity
, cpu
);
1225 evlist__for_each_entry_reverse(evlist
, evsel
) {
1226 if (evsel__cpu_iter_skip(evsel
, cpu
))
1228 perf_evsel__close_cpu(&evsel
->core
, evsel
->cpu_iter
- 1);
1231 affinity__cleanup(&affinity
);
1232 evlist__for_each_entry_reverse(evlist
, evsel
) {
1233 perf_evsel__free_fd(&evsel
->core
);
1234 perf_evsel__free_id(&evsel
->core
);
1238 static int perf_evlist__create_syswide_maps(struct evlist
*evlist
)
1240 struct perf_cpu_map
*cpus
;
1241 struct perf_thread_map
*threads
;
1245 * Try reading /sys/devices/system/cpu/online to get
1248 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1249 * code needs an overhaul to properly forward the
1250 * error, and we may not want to do that fallback to a
1251 * default cpu identity map :-\
1253 cpus
= perf_cpu_map__new(NULL
);
1257 threads
= perf_thread_map__new_dummy();
1261 perf_evlist__set_maps(&evlist
->core
, cpus
, threads
);
1265 perf_cpu_map__put(cpus
);
1269 int evlist__open(struct evlist
*evlist
)
1271 struct evsel
*evsel
;
1275 * Default: one fd per CPU, all threads, aka systemwide
1276 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1278 if (evlist
->core
.threads
== NULL
&& evlist
->core
.cpus
== NULL
) {
1279 err
= perf_evlist__create_syswide_maps(evlist
);
1284 perf_evlist__update_id_pos(evlist
);
1286 evlist__for_each_entry(evlist
, evsel
) {
1287 err
= evsel__open(evsel
, evsel
->core
.cpus
, evsel
->core
.threads
);
1294 evlist__close(evlist
);
1299 int perf_evlist__prepare_workload(struct evlist
*evlist
, struct target
*target
,
1300 const char *argv
[], bool pipe_output
,
1301 void (*exec_error
)(int signo
, siginfo_t
*info
, void *ucontext
))
1303 int child_ready_pipe
[2], go_pipe
[2];
1306 if (pipe(child_ready_pipe
) < 0) {
1307 perror("failed to create 'ready' pipe");
1311 if (pipe(go_pipe
) < 0) {
1312 perror("failed to create 'go' pipe");
1313 goto out_close_ready_pipe
;
1316 evlist
->workload
.pid
= fork();
1317 if (evlist
->workload
.pid
< 0) {
1318 perror("failed to fork");
1319 goto out_close_pipes
;
1322 if (!evlist
->workload
.pid
) {
1328 signal(SIGTERM
, SIG_DFL
);
1330 close(child_ready_pipe
[0]);
1332 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
1335 * Tell the parent we're ready to go
1337 close(child_ready_pipe
[1]);
1340 * Wait until the parent tells us to go.
1342 ret
= read(go_pipe
[0], &bf
, 1);
1344 * The parent will ask for the execvp() to be performed by
1345 * writing exactly one byte, in workload.cork_fd, usually via
1346 * perf_evlist__start_workload().
1348 * For cancelling the workload without actually running it,
1349 * the parent will just close workload.cork_fd, without writing
1350 * anything, i.e. read will return zero and we just exit()
1355 perror("unable to read pipe");
1359 execvp(argv
[0], (char **)argv
);
1364 val
.sival_int
= errno
;
1365 if (sigqueue(getppid(), SIGUSR1
, val
))
1373 struct sigaction act
= {
1374 .sa_flags
= SA_SIGINFO
,
1375 .sa_sigaction
= exec_error
,
1377 sigaction(SIGUSR1
, &act
, NULL
);
1380 if (target__none(target
)) {
1381 if (evlist
->core
.threads
== NULL
) {
1382 fprintf(stderr
, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1383 __func__
, __LINE__
);
1384 goto out_close_pipes
;
1386 perf_thread_map__set_pid(evlist
->core
.threads
, 0, evlist
->workload
.pid
);
1389 close(child_ready_pipe
[1]);
1392 * wait for child to settle
1394 if (read(child_ready_pipe
[0], &bf
, 1) == -1) {
1395 perror("unable to read pipe");
1396 goto out_close_pipes
;
1399 fcntl(go_pipe
[1], F_SETFD
, FD_CLOEXEC
);
1400 evlist
->workload
.cork_fd
= go_pipe
[1];
1401 close(child_ready_pipe
[0]);
1407 out_close_ready_pipe
:
1408 close(child_ready_pipe
[0]);
1409 close(child_ready_pipe
[1]);
1413 int perf_evlist__start_workload(struct evlist
*evlist
)
1415 if (evlist
->workload
.cork_fd
> 0) {
1419 * Remove the cork, let it rip!
1421 ret
= write(evlist
->workload
.cork_fd
, &bf
, 1);
1423 perror("unable to write to pipe");
1425 close(evlist
->workload
.cork_fd
);
1432 int perf_evlist__parse_sample(struct evlist
*evlist
, union perf_event
*event
,
1433 struct perf_sample
*sample
)
1435 struct evsel
*evsel
= perf_evlist__event2evsel(evlist
, event
);
1439 return perf_evsel__parse_sample(evsel
, event
, sample
);
1442 int perf_evlist__parse_sample_timestamp(struct evlist
*evlist
,
1443 union perf_event
*event
,
1446 struct evsel
*evsel
= perf_evlist__event2evsel(evlist
, event
);
1450 return perf_evsel__parse_sample_timestamp(evsel
, event
, timestamp
);
1453 int perf_evlist__strerror_open(struct evlist
*evlist
,
1454 int err
, char *buf
, size_t size
)
1457 char sbuf
[STRERR_BUFSIZE
], *emsg
= str_error_r(err
, sbuf
, sizeof(sbuf
));
1462 printed
= scnprintf(buf
, size
,
1464 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg
);
1466 value
= perf_event_paranoid();
1468 printed
+= scnprintf(buf
+ printed
, size
- printed
, "\nHint:\t");
1471 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1472 "For your workloads it needs to be <= 1\nHint:\t");
1474 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1475 "For system wide tracing it needs to be set to -1.\n");
1477 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1478 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1479 "Hint:\tThe current value is %d.", value
);
1482 struct evsel
*first
= evlist__first(evlist
);
1485 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq
) < 0)
1488 if (first
->core
.attr
.sample_freq
< (u64
)max_freq
)
1491 printed
= scnprintf(buf
, size
,
1493 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1494 "Hint:\tThe current value is %d and %" PRIu64
" is being requested.",
1495 emsg
, max_freq
, first
->core
.attr
.sample_freq
);
1500 scnprintf(buf
, size
, "%s", emsg
);
1507 int perf_evlist__strerror_mmap(struct evlist
*evlist
, int err
, char *buf
, size_t size
)
1509 char sbuf
[STRERR_BUFSIZE
], *emsg
= str_error_r(err
, sbuf
, sizeof(sbuf
));
1510 int pages_attempted
= evlist
->core
.mmap_len
/ 1024, pages_max_per_user
, printed
= 0;
1514 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user
);
1515 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1517 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1518 "Hint:\tTried using %zd kB.\n",
1519 emsg
, pages_max_per_user
, pages_attempted
);
1521 if (pages_attempted
>= pages_max_per_user
) {
1522 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1523 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1524 pages_max_per_user
+ pages_attempted
);
1527 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1528 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1531 scnprintf(buf
, size
, "%s", emsg
);
1538 void perf_evlist__to_front(struct evlist
*evlist
,
1539 struct evsel
*move_evsel
)
1541 struct evsel
*evsel
, *n
;
1544 if (move_evsel
== evlist__first(evlist
))
1547 evlist__for_each_entry_safe(evlist
, n
, evsel
) {
1548 if (evsel
->leader
== move_evsel
->leader
)
1549 list_move_tail(&evsel
->core
.node
, &move
);
1552 list_splice(&move
, &evlist
->core
.entries
);
1555 void perf_evlist__set_tracking_event(struct evlist
*evlist
,
1556 struct evsel
*tracking_evsel
)
1558 struct evsel
*evsel
;
1560 if (tracking_evsel
->tracking
)
1563 evlist__for_each_entry(evlist
, evsel
) {
1564 if (evsel
!= tracking_evsel
)
1565 evsel
->tracking
= false;
1568 tracking_evsel
->tracking
= true;
1572 perf_evlist__find_evsel_by_str(struct evlist
*evlist
,
1575 struct evsel
*evsel
;
1577 evlist__for_each_entry(evlist
, evsel
) {
1580 if (strcmp(str
, evsel
->name
) == 0)
1587 void perf_evlist__toggle_bkw_mmap(struct evlist
*evlist
,
1588 enum bkw_mmap_state state
)
1590 enum bkw_mmap_state old_state
= evlist
->bkw_mmap_state
;
1597 if (!evlist
->overwrite_mmap
)
1600 switch (old_state
) {
1601 case BKW_MMAP_NOTREADY
: {
1602 if (state
!= BKW_MMAP_RUNNING
)
1606 case BKW_MMAP_RUNNING
: {
1607 if (state
!= BKW_MMAP_DATA_PENDING
)
1612 case BKW_MMAP_DATA_PENDING
: {
1613 if (state
!= BKW_MMAP_EMPTY
)
1617 case BKW_MMAP_EMPTY
: {
1618 if (state
!= BKW_MMAP_RUNNING
)
1624 WARN_ONCE(1, "Shouldn't get there\n");
1627 evlist
->bkw_mmap_state
= state
;
1631 perf_evlist__pause(evlist
);
1634 perf_evlist__resume(evlist
);
1645 bool perf_evlist__exclude_kernel(struct evlist
*evlist
)
1647 struct evsel
*evsel
;
1649 evlist__for_each_entry(evlist
, evsel
) {
1650 if (!evsel
->core
.attr
.exclude_kernel
)
1658 * Events in data file are not collect in groups, but we still want
1659 * the group display. Set the artificial group and set the leader's
1660 * forced_leader flag to notify the display code.
1662 void perf_evlist__force_leader(struct evlist
*evlist
)
1664 if (!evlist
->nr_groups
) {
1665 struct evsel
*leader
= evlist__first(evlist
);
1667 perf_evlist__set_leader(evlist
);
1668 leader
->forced_leader
= true;
1672 struct evsel
*perf_evlist__reset_weak_group(struct evlist
*evsel_list
,
1673 struct evsel
*evsel
,
1676 struct evsel
*c2
, *leader
;
1677 bool is_open
= true;
1679 leader
= evsel
->leader
;
1680 pr_debug("Weak group for %s/%d failed\n",
1681 leader
->name
, leader
->core
.nr_members
);
1684 * for_each_group_member doesn't work here because it doesn't
1685 * include the first entry.
1687 evlist__for_each_entry(evsel_list
, c2
) {
1690 if (c2
->leader
== leader
) {
1691 if (is_open
&& close
)
1692 perf_evsel__close(&c2
->core
);
1694 c2
->core
.nr_members
= 0;
1696 * Set this for all former members of the group
1697 * to indicate they get reopened.
1699 c2
->reset_group
= true;
1705 int perf_evlist__add_sb_event(struct evlist
**evlist
,
1706 struct perf_event_attr
*attr
,
1707 perf_evsel__sb_cb_t cb
,
1710 struct evsel
*evsel
;
1711 bool new_evlist
= (*evlist
) == NULL
;
1713 if (*evlist
== NULL
)
1714 *evlist
= evlist__new();
1715 if (*evlist
== NULL
)
1718 if (!attr
->sample_id_all
) {
1719 pr_warning("enabling sample_id_all for all side band events\n");
1720 attr
->sample_id_all
= 1;
1723 evsel
= perf_evsel__new_idx(attr
, (*evlist
)->core
.nr_entries
);
1727 evsel
->side_band
.cb
= cb
;
1728 evsel
->side_band
.data
= data
;
1729 evlist__add(*evlist
, evsel
);
1734 evlist__delete(*evlist
);
1740 static void *perf_evlist__poll_thread(void *arg
)
1742 struct evlist
*evlist
= arg
;
1743 bool draining
= false;
1746 * In order to read symbols from other namespaces perf to needs to call
1747 * setns(2). This isn't permitted if the struct_fs has multiple users.
1748 * unshare(2) the fs so that we may continue to setns into namespaces
1749 * that we're observing when, for instance, reading the build-ids at
1750 * the end of a 'perf record' session.
1755 bool got_data
= false;
1757 if (evlist
->thread
.done
)
1761 evlist__poll(evlist
, 1000);
1763 for (i
= 0; i
< evlist
->core
.nr_mmaps
; i
++) {
1764 struct mmap
*map
= &evlist
->mmap
[i
];
1765 union perf_event
*event
;
1767 if (perf_mmap__read_init(&map
->core
))
1769 while ((event
= perf_mmap__read_event(&map
->core
)) != NULL
) {
1770 struct evsel
*evsel
= perf_evlist__event2evsel(evlist
, event
);
1772 if (evsel
&& evsel
->side_band
.cb
)
1773 evsel
->side_band
.cb(event
, evsel
->side_band
.data
);
1775 pr_warning("cannot locate proper evsel for the side band event\n");
1777 perf_mmap__consume(&map
->core
);
1780 perf_mmap__read_done(&map
->core
);
1783 if (draining
&& !got_data
)
1789 int perf_evlist__start_sb_thread(struct evlist
*evlist
,
1790 struct target
*target
)
1792 struct evsel
*counter
;
1797 if (perf_evlist__create_maps(evlist
, target
))
1798 goto out_delete_evlist
;
1800 evlist__for_each_entry(evlist
, counter
) {
1801 if (evsel__open(counter
, evlist
->core
.cpus
,
1802 evlist
->core
.threads
) < 0)
1803 goto out_delete_evlist
;
1806 if (evlist__mmap(evlist
, UINT_MAX
))
1807 goto out_delete_evlist
;
1809 evlist__for_each_entry(evlist
, counter
) {
1810 if (evsel__enable(counter
))
1811 goto out_delete_evlist
;
1814 evlist
->thread
.done
= 0;
1815 if (pthread_create(&evlist
->thread
.th
, NULL
, perf_evlist__poll_thread
, evlist
))
1816 goto out_delete_evlist
;
1821 evlist__delete(evlist
);
1826 void perf_evlist__stop_sb_thread(struct evlist
*evlist
)
1830 evlist
->thread
.done
= 1;
1831 pthread_join(evlist
->thread
.th
, NULL
);
1832 evlist__delete(evlist
);