2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <api/fs/fs.h>
13 #include "thread_map.h"
20 #include "parse-events.h"
21 #include "parse-options.h"
25 #include <linux/bitops.h>
26 #include <linux/hash.h>
27 #include <linux/log2.h>
28 #include <linux/err.h>
30 static void perf_evlist__mmap_put(struct perf_evlist
*evlist
, int idx
);
31 static void __perf_evlist__munmap(struct perf_evlist
*evlist
, int idx
);
33 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
34 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
36 void perf_evlist__init(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
37 struct thread_map
*threads
)
41 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
42 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
43 INIT_LIST_HEAD(&evlist
->entries
);
44 perf_evlist__set_maps(evlist
, cpus
, threads
);
45 fdarray__init(&evlist
->pollfd
, 64);
46 evlist
->workload
.pid
= -1;
49 struct perf_evlist
*perf_evlist__new(void)
51 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
54 perf_evlist__init(evlist
, NULL
, NULL
);
59 struct perf_evlist
*perf_evlist__new_default(void)
61 struct perf_evlist
*evlist
= perf_evlist__new();
63 if (evlist
&& perf_evlist__add_default(evlist
)) {
64 perf_evlist__delete(evlist
);
72 * perf_evlist__set_id_pos - set the positions of event ids.
73 * @evlist: selected event list
75 * Events with compatible sample types all have the same id_pos
76 * and is_pos. For convenience, put a copy on evlist.
78 void perf_evlist__set_id_pos(struct perf_evlist
*evlist
)
80 struct perf_evsel
*first
= perf_evlist__first(evlist
);
82 evlist
->id_pos
= first
->id_pos
;
83 evlist
->is_pos
= first
->is_pos
;
86 static void perf_evlist__update_id_pos(struct perf_evlist
*evlist
)
88 struct perf_evsel
*evsel
;
90 evlist__for_each(evlist
, evsel
)
91 perf_evsel__calc_id_pos(evsel
);
93 perf_evlist__set_id_pos(evlist
);
96 static void perf_evlist__purge(struct perf_evlist
*evlist
)
98 struct perf_evsel
*pos
, *n
;
100 evlist__for_each_safe(evlist
, n
, pos
) {
101 list_del_init(&pos
->node
);
103 perf_evsel__delete(pos
);
106 evlist
->nr_entries
= 0;
109 void perf_evlist__exit(struct perf_evlist
*evlist
)
111 zfree(&evlist
->mmap
);
112 fdarray__exit(&evlist
->pollfd
);
115 void perf_evlist__delete(struct perf_evlist
*evlist
)
117 perf_evlist__munmap(evlist
);
118 perf_evlist__close(evlist
);
119 cpu_map__put(evlist
->cpus
);
120 thread_map__put(evlist
->threads
);
122 evlist
->threads
= NULL
;
123 perf_evlist__purge(evlist
);
124 perf_evlist__exit(evlist
);
128 static void __perf_evlist__propagate_maps(struct perf_evlist
*evlist
,
129 struct perf_evsel
*evsel
)
132 * We already have cpus for evsel (via PMU sysfs) so
133 * keep it, if there's no target cpu list defined.
135 if (!evsel
->own_cpus
|| evlist
->has_user_cpus
) {
136 cpu_map__put(evsel
->cpus
);
137 evsel
->cpus
= cpu_map__get(evlist
->cpus
);
138 } else if (evsel
->cpus
!= evsel
->own_cpus
) {
139 cpu_map__put(evsel
->cpus
);
140 evsel
->cpus
= cpu_map__get(evsel
->own_cpus
);
143 thread_map__put(evsel
->threads
);
144 evsel
->threads
= thread_map__get(evlist
->threads
);
147 static void perf_evlist__propagate_maps(struct perf_evlist
*evlist
)
149 struct perf_evsel
*evsel
;
151 evlist__for_each(evlist
, evsel
)
152 __perf_evlist__propagate_maps(evlist
, evsel
);
155 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
157 entry
->evlist
= evlist
;
158 list_add_tail(&entry
->node
, &evlist
->entries
);
159 entry
->idx
= evlist
->nr_entries
;
160 entry
->tracking
= !entry
->idx
;
162 if (!evlist
->nr_entries
++)
163 perf_evlist__set_id_pos(evlist
);
165 __perf_evlist__propagate_maps(evlist
, entry
);
168 void perf_evlist__remove(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
)
170 evsel
->evlist
= NULL
;
171 list_del_init(&evsel
->node
);
172 evlist
->nr_entries
-= 1;
175 void perf_evlist__splice_list_tail(struct perf_evlist
*evlist
,
176 struct list_head
*list
)
178 struct perf_evsel
*evsel
, *temp
;
180 __evlist__for_each_safe(list
, temp
, evsel
) {
181 list_del_init(&evsel
->node
);
182 perf_evlist__add(evlist
, evsel
);
186 void __perf_evlist__set_leader(struct list_head
*list
)
188 struct perf_evsel
*evsel
, *leader
;
190 leader
= list_entry(list
->next
, struct perf_evsel
, node
);
191 evsel
= list_entry(list
->prev
, struct perf_evsel
, node
);
193 leader
->nr_members
= evsel
->idx
- leader
->idx
+ 1;
195 __evlist__for_each(list
, evsel
) {
196 evsel
->leader
= leader
;
200 void perf_evlist__set_leader(struct perf_evlist
*evlist
)
202 if (evlist
->nr_entries
) {
203 evlist
->nr_groups
= evlist
->nr_entries
> 1 ? 1 : 0;
204 __perf_evlist__set_leader(&evlist
->entries
);
208 void perf_event_attr__set_max_precise_ip(struct perf_event_attr
*attr
)
210 attr
->precise_ip
= 3;
212 while (attr
->precise_ip
!= 0) {
213 int fd
= sys_perf_event_open(attr
, 0, -1, -1, 0);
222 int perf_evlist__add_default(struct perf_evlist
*evlist
)
224 struct perf_event_attr attr
= {
225 .type
= PERF_TYPE_HARDWARE
,
226 .config
= PERF_COUNT_HW_CPU_CYCLES
,
228 struct perf_evsel
*evsel
;
230 event_attr_init(&attr
);
232 perf_event_attr__set_max_precise_ip(&attr
);
234 evsel
= perf_evsel__new(&attr
);
238 /* use asprintf() because free(evsel) assumes name is allocated */
239 if (asprintf(&evsel
->name
, "cycles%.*s",
240 attr
.precise_ip
? attr
.precise_ip
+ 1 : 0, ":ppp") < 0)
243 perf_evlist__add(evlist
, evsel
);
246 perf_evsel__delete(evsel
);
251 static int perf_evlist__add_attrs(struct perf_evlist
*evlist
,
252 struct perf_event_attr
*attrs
, size_t nr_attrs
)
254 struct perf_evsel
*evsel
, *n
;
258 for (i
= 0; i
< nr_attrs
; i
++) {
259 evsel
= perf_evsel__new_idx(attrs
+ i
, evlist
->nr_entries
+ i
);
261 goto out_delete_partial_list
;
262 list_add_tail(&evsel
->node
, &head
);
265 perf_evlist__splice_list_tail(evlist
, &head
);
269 out_delete_partial_list
:
270 __evlist__for_each_safe(&head
, n
, evsel
)
271 perf_evsel__delete(evsel
);
275 int __perf_evlist__add_default_attrs(struct perf_evlist
*evlist
,
276 struct perf_event_attr
*attrs
, size_t nr_attrs
)
280 for (i
= 0; i
< nr_attrs
; i
++)
281 event_attr_init(attrs
+ i
);
283 return perf_evlist__add_attrs(evlist
, attrs
, nr_attrs
);
287 perf_evlist__find_tracepoint_by_id(struct perf_evlist
*evlist
, int id
)
289 struct perf_evsel
*evsel
;
291 evlist__for_each(evlist
, evsel
) {
292 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
293 (int)evsel
->attr
.config
== id
)
301 perf_evlist__find_tracepoint_by_name(struct perf_evlist
*evlist
,
304 struct perf_evsel
*evsel
;
306 evlist__for_each(evlist
, evsel
) {
307 if ((evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
) &&
308 (strcmp(evsel
->name
, name
) == 0))
315 int perf_evlist__add_newtp(struct perf_evlist
*evlist
,
316 const char *sys
, const char *name
, void *handler
)
318 struct perf_evsel
*evsel
= perf_evsel__newtp(sys
, name
);
323 evsel
->handler
= handler
;
324 perf_evlist__add(evlist
, evsel
);
328 static int perf_evlist__nr_threads(struct perf_evlist
*evlist
,
329 struct perf_evsel
*evsel
)
331 if (evsel
->system_wide
)
334 return thread_map__nr(evlist
->threads
);
337 void perf_evlist__disable(struct perf_evlist
*evlist
)
340 struct perf_evsel
*pos
;
341 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
344 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
345 evlist__for_each(evlist
, pos
) {
346 if (!perf_evsel__is_group_leader(pos
) || !pos
->fd
)
348 nr_threads
= perf_evlist__nr_threads(evlist
, pos
);
349 for (thread
= 0; thread
< nr_threads
; thread
++)
350 ioctl(FD(pos
, cpu
, thread
),
351 PERF_EVENT_IOC_DISABLE
, 0);
355 evlist
->enabled
= false;
358 void perf_evlist__enable(struct perf_evlist
*evlist
)
361 struct perf_evsel
*pos
;
362 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
365 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
366 evlist__for_each(evlist
, pos
) {
367 if (!perf_evsel__is_group_leader(pos
) || !pos
->fd
)
369 nr_threads
= perf_evlist__nr_threads(evlist
, pos
);
370 for (thread
= 0; thread
< nr_threads
; thread
++)
371 ioctl(FD(pos
, cpu
, thread
),
372 PERF_EVENT_IOC_ENABLE
, 0);
376 evlist
->enabled
= true;
379 void perf_evlist__toggle_enable(struct perf_evlist
*evlist
)
381 (evlist
->enabled
? perf_evlist__disable
: perf_evlist__enable
)(evlist
);
384 int perf_evlist__disable_event(struct perf_evlist
*evlist
,
385 struct perf_evsel
*evsel
)
387 int cpu
, thread
, err
;
388 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
389 int nr_threads
= perf_evlist__nr_threads(evlist
, evsel
);
394 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
395 for (thread
= 0; thread
< nr_threads
; thread
++) {
396 err
= ioctl(FD(evsel
, cpu
, thread
),
397 PERF_EVENT_IOC_DISABLE
, 0);
405 int perf_evlist__enable_event(struct perf_evlist
*evlist
,
406 struct perf_evsel
*evsel
)
408 int cpu
, thread
, err
;
409 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
410 int nr_threads
= perf_evlist__nr_threads(evlist
, evsel
);
415 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
416 for (thread
= 0; thread
< nr_threads
; thread
++) {
417 err
= ioctl(FD(evsel
, cpu
, thread
),
418 PERF_EVENT_IOC_ENABLE
, 0);
426 static int perf_evlist__enable_event_cpu(struct perf_evlist
*evlist
,
427 struct perf_evsel
*evsel
, int cpu
)
430 int nr_threads
= perf_evlist__nr_threads(evlist
, evsel
);
435 for (thread
= 0; thread
< nr_threads
; thread
++) {
436 err
= ioctl(FD(evsel
, cpu
, thread
),
437 PERF_EVENT_IOC_ENABLE
, 0);
444 static int perf_evlist__enable_event_thread(struct perf_evlist
*evlist
,
445 struct perf_evsel
*evsel
,
449 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
454 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
455 err
= ioctl(FD(evsel
, cpu
, thread
), PERF_EVENT_IOC_ENABLE
, 0);
462 int perf_evlist__enable_event_idx(struct perf_evlist
*evlist
,
463 struct perf_evsel
*evsel
, int idx
)
465 bool per_cpu_mmaps
= !cpu_map__empty(evlist
->cpus
);
468 return perf_evlist__enable_event_cpu(evlist
, evsel
, idx
);
470 return perf_evlist__enable_event_thread(evlist
, evsel
, idx
);
473 int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
)
475 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
476 int nr_threads
= thread_map__nr(evlist
->threads
);
478 struct perf_evsel
*evsel
;
480 evlist__for_each(evlist
, evsel
) {
481 if (evsel
->system_wide
)
484 nfds
+= nr_cpus
* nr_threads
;
487 if (fdarray__available_entries(&evlist
->pollfd
) < nfds
&&
488 fdarray__grow(&evlist
->pollfd
, nfds
) < 0)
494 static int __perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
, int idx
)
496 int pos
= fdarray__add(&evlist
->pollfd
, fd
, POLLIN
| POLLERR
| POLLHUP
);
498 * Save the idx so that when we filter out fds POLLHUP'ed we can
499 * close the associated evlist->mmap[] entry.
502 evlist
->pollfd
.priv
[pos
].idx
= idx
;
504 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
510 int perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
512 return __perf_evlist__add_pollfd(evlist
, fd
, -1);
515 static void perf_evlist__munmap_filtered(struct fdarray
*fda
, int fd
)
517 struct perf_evlist
*evlist
= container_of(fda
, struct perf_evlist
, pollfd
);
519 perf_evlist__mmap_put(evlist
, fda
->priv
[fd
].idx
);
522 int perf_evlist__filter_pollfd(struct perf_evlist
*evlist
, short revents_and_mask
)
524 return fdarray__filter(&evlist
->pollfd
, revents_and_mask
,
525 perf_evlist__munmap_filtered
);
528 int perf_evlist__poll(struct perf_evlist
*evlist
, int timeout
)
530 return fdarray__poll(&evlist
->pollfd
, timeout
);
533 static void perf_evlist__id_hash(struct perf_evlist
*evlist
,
534 struct perf_evsel
*evsel
,
535 int cpu
, int thread
, u64 id
)
538 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
542 hash
= hash_64(sid
->id
, PERF_EVLIST__HLIST_BITS
);
543 hlist_add_head(&sid
->node
, &evlist
->heads
[hash
]);
546 void perf_evlist__id_add(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
547 int cpu
, int thread
, u64 id
)
549 perf_evlist__id_hash(evlist
, evsel
, cpu
, thread
, id
);
550 evsel
->id
[evsel
->ids
++] = id
;
553 static int perf_evlist__id_add_fd(struct perf_evlist
*evlist
,
554 struct perf_evsel
*evsel
,
555 int cpu
, int thread
, int fd
)
557 u64 read_data
[4] = { 0, };
558 int id_idx
= 1; /* The first entry is the counter value */
562 ret
= ioctl(fd
, PERF_EVENT_IOC_ID
, &id
);
569 /* Legacy way to get event id.. All hail to old kernels! */
572 * This way does not work with group format read, so bail
575 if (perf_evlist__read_format(evlist
) & PERF_FORMAT_GROUP
)
578 if (!(evsel
->attr
.read_format
& PERF_FORMAT_ID
) ||
579 read(fd
, &read_data
, sizeof(read_data
)) == -1)
582 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
584 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
587 id
= read_data
[id_idx
];
590 perf_evlist__id_add(evlist
, evsel
, cpu
, thread
, id
);
594 static void perf_evlist__set_sid_idx(struct perf_evlist
*evlist
,
595 struct perf_evsel
*evsel
, int idx
, int cpu
,
598 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
600 if (evlist
->cpus
&& cpu
>= 0)
601 sid
->cpu
= evlist
->cpus
->map
[cpu
];
604 if (!evsel
->system_wide
&& evlist
->threads
&& thread
>= 0)
605 sid
->tid
= thread_map__pid(evlist
->threads
, thread
);
610 struct perf_sample_id
*perf_evlist__id2sid(struct perf_evlist
*evlist
, u64 id
)
612 struct hlist_head
*head
;
613 struct perf_sample_id
*sid
;
616 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
617 head
= &evlist
->heads
[hash
];
619 hlist_for_each_entry(sid
, head
, node
)
626 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
628 struct perf_sample_id
*sid
;
630 if (evlist
->nr_entries
== 1 || !id
)
631 return perf_evlist__first(evlist
);
633 sid
= perf_evlist__id2sid(evlist
, id
);
637 if (!perf_evlist__sample_id_all(evlist
))
638 return perf_evlist__first(evlist
);
643 struct perf_evsel
*perf_evlist__id2evsel_strict(struct perf_evlist
*evlist
,
646 struct perf_sample_id
*sid
;
651 sid
= perf_evlist__id2sid(evlist
, id
);
658 static int perf_evlist__event2id(struct perf_evlist
*evlist
,
659 union perf_event
*event
, u64
*id
)
661 const u64
*array
= event
->sample
.array
;
664 n
= (event
->header
.size
- sizeof(event
->header
)) >> 3;
666 if (event
->header
.type
== PERF_RECORD_SAMPLE
) {
667 if (evlist
->id_pos
>= n
)
669 *id
= array
[evlist
->id_pos
];
671 if (evlist
->is_pos
> n
)
679 static struct perf_evsel
*perf_evlist__event2evsel(struct perf_evlist
*evlist
,
680 union perf_event
*event
)
682 struct perf_evsel
*first
= perf_evlist__first(evlist
);
683 struct hlist_head
*head
;
684 struct perf_sample_id
*sid
;
688 if (evlist
->nr_entries
== 1)
691 if (!first
->attr
.sample_id_all
&&
692 event
->header
.type
!= PERF_RECORD_SAMPLE
)
695 if (perf_evlist__event2id(evlist
, event
, &id
))
698 /* Synthesized events have an id of zero */
702 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
703 head
= &evlist
->heads
[hash
];
705 hlist_for_each_entry(sid
, head
, node
) {
712 union perf_event
*perf_evlist__mmap_read(struct perf_evlist
*evlist
, int idx
)
714 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
717 unsigned char *data
= md
->base
+ page_size
;
718 union perf_event
*event
= NULL
;
721 * Check if event was unmapped due to a POLLHUP/POLLERR.
723 if (!atomic_read(&md
->refcnt
))
726 head
= perf_mmap__read_head(md
);
727 if (evlist
->overwrite
) {
729 * If we're further behind than half the buffer, there's a chance
730 * the writer will bite our tail and mess up the samples under us.
732 * If we somehow ended up ahead of the head, we got messed up.
734 * In either case, truncate and restart at head.
736 int diff
= head
- old
;
737 if (diff
> md
->mask
/ 2 || diff
< 0) {
738 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
741 * head points to a known good entry, start there.
750 event
= (union perf_event
*)&data
[old
& md
->mask
];
751 size
= event
->header
.size
;
754 * Event straddles the mmap boundary -- header should always
755 * be inside due to u64 alignment of output.
757 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
758 unsigned int offset
= old
;
759 unsigned int len
= min(sizeof(*event
), size
), cpy
;
760 void *dst
= md
->event_copy
;
763 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
764 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
770 event
= (union perf_event
*) md
->event_copy
;
781 static bool perf_mmap__empty(struct perf_mmap
*md
)
783 return perf_mmap__read_head(md
) == md
->prev
&& !md
->auxtrace_mmap
.base
;
786 static void perf_evlist__mmap_get(struct perf_evlist
*evlist
, int idx
)
788 atomic_inc(&evlist
->mmap
[idx
].refcnt
);
791 static void perf_evlist__mmap_put(struct perf_evlist
*evlist
, int idx
)
793 BUG_ON(atomic_read(&evlist
->mmap
[idx
].refcnt
) == 0);
795 if (atomic_dec_and_test(&evlist
->mmap
[idx
].refcnt
))
796 __perf_evlist__munmap(evlist
, idx
);
799 void perf_evlist__mmap_consume(struct perf_evlist
*evlist
, int idx
)
801 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
803 if (!evlist
->overwrite
) {
806 perf_mmap__write_tail(md
, old
);
809 if (atomic_read(&md
->refcnt
) == 1 && perf_mmap__empty(md
))
810 perf_evlist__mmap_put(evlist
, idx
);
813 int __weak
auxtrace_mmap__mmap(struct auxtrace_mmap
*mm __maybe_unused
,
814 struct auxtrace_mmap_params
*mp __maybe_unused
,
815 void *userpg __maybe_unused
,
816 int fd __maybe_unused
)
821 void __weak
auxtrace_mmap__munmap(struct auxtrace_mmap
*mm __maybe_unused
)
825 void __weak
auxtrace_mmap_params__init(
826 struct auxtrace_mmap_params
*mp __maybe_unused
,
827 off_t auxtrace_offset __maybe_unused
,
828 unsigned int auxtrace_pages __maybe_unused
,
829 bool auxtrace_overwrite __maybe_unused
)
833 void __weak
auxtrace_mmap_params__set_idx(
834 struct auxtrace_mmap_params
*mp __maybe_unused
,
835 struct perf_evlist
*evlist __maybe_unused
,
836 int idx __maybe_unused
,
837 bool per_cpu __maybe_unused
)
841 static void __perf_evlist__munmap(struct perf_evlist
*evlist
, int idx
)
843 if (evlist
->mmap
[idx
].base
!= NULL
) {
844 munmap(evlist
->mmap
[idx
].base
, evlist
->mmap_len
);
845 evlist
->mmap
[idx
].base
= NULL
;
846 atomic_set(&evlist
->mmap
[idx
].refcnt
, 0);
848 auxtrace_mmap__munmap(&evlist
->mmap
[idx
].auxtrace_mmap
);
851 void perf_evlist__munmap(struct perf_evlist
*evlist
)
855 if (evlist
->mmap
== NULL
)
858 for (i
= 0; i
< evlist
->nr_mmaps
; i
++)
859 __perf_evlist__munmap(evlist
, i
);
861 zfree(&evlist
->mmap
);
864 static int perf_evlist__alloc_mmap(struct perf_evlist
*evlist
)
866 evlist
->nr_mmaps
= cpu_map__nr(evlist
->cpus
);
867 if (cpu_map__empty(evlist
->cpus
))
868 evlist
->nr_mmaps
= thread_map__nr(evlist
->threads
);
869 evlist
->mmap
= zalloc(evlist
->nr_mmaps
* sizeof(struct perf_mmap
));
870 return evlist
->mmap
!= NULL
? 0 : -ENOMEM
;
876 struct auxtrace_mmap_params auxtrace_mp
;
879 static int __perf_evlist__mmap(struct perf_evlist
*evlist
, int idx
,
880 struct mmap_params
*mp
, int fd
)
883 * The last one will be done at perf_evlist__mmap_consume(), so that we
884 * make sure we don't prevent tools from consuming every last event in
887 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
888 * anymore, but the last events for it are still in the ring buffer,
889 * waiting to be consumed.
891 * Tools can chose to ignore this at their own discretion, but the
892 * evlist layer can't just drop it when filtering events in
893 * perf_evlist__filter_pollfd().
895 atomic_set(&evlist
->mmap
[idx
].refcnt
, 2);
896 evlist
->mmap
[idx
].prev
= 0;
897 evlist
->mmap
[idx
].mask
= mp
->mask
;
898 evlist
->mmap
[idx
].base
= mmap(NULL
, evlist
->mmap_len
, mp
->prot
,
900 if (evlist
->mmap
[idx
].base
== MAP_FAILED
) {
901 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
903 evlist
->mmap
[idx
].base
= NULL
;
907 if (auxtrace_mmap__mmap(&evlist
->mmap
[idx
].auxtrace_mmap
,
908 &mp
->auxtrace_mp
, evlist
->mmap
[idx
].base
, fd
))
914 static int perf_evlist__mmap_per_evsel(struct perf_evlist
*evlist
, int idx
,
915 struct mmap_params
*mp
, int cpu
,
916 int thread
, int *output
)
918 struct perf_evsel
*evsel
;
920 evlist__for_each(evlist
, evsel
) {
923 if (evsel
->system_wide
&& thread
)
926 fd
= FD(evsel
, cpu
, thread
);
930 if (__perf_evlist__mmap(evlist
, idx
, mp
, *output
) < 0)
933 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, *output
) != 0)
936 perf_evlist__mmap_get(evlist
, idx
);
940 * The system_wide flag causes a selected event to be opened
941 * always without a pid. Consequently it will never get a
942 * POLLHUP, but it is used for tracking in combination with
943 * other events, so it should not need to be polled anyway.
944 * Therefore don't add it for polling.
946 if (!evsel
->system_wide
&&
947 __perf_evlist__add_pollfd(evlist
, fd
, idx
) < 0) {
948 perf_evlist__mmap_put(evlist
, idx
);
952 if (evsel
->attr
.read_format
& PERF_FORMAT_ID
) {
953 if (perf_evlist__id_add_fd(evlist
, evsel
, cpu
, thread
,
956 perf_evlist__set_sid_idx(evlist
, evsel
, idx
, cpu
,
964 static int perf_evlist__mmap_per_cpu(struct perf_evlist
*evlist
,
965 struct mmap_params
*mp
)
968 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
969 int nr_threads
= thread_map__nr(evlist
->threads
);
971 pr_debug2("perf event ring buffer mmapped per cpu\n");
972 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
975 auxtrace_mmap_params__set_idx(&mp
->auxtrace_mp
, evlist
, cpu
,
978 for (thread
= 0; thread
< nr_threads
; thread
++) {
979 if (perf_evlist__mmap_per_evsel(evlist
, cpu
, mp
, cpu
,
988 for (cpu
= 0; cpu
< nr_cpus
; cpu
++)
989 __perf_evlist__munmap(evlist
, cpu
);
993 static int perf_evlist__mmap_per_thread(struct perf_evlist
*evlist
,
994 struct mmap_params
*mp
)
997 int nr_threads
= thread_map__nr(evlist
->threads
);
999 pr_debug2("perf event ring buffer mmapped per thread\n");
1000 for (thread
= 0; thread
< nr_threads
; thread
++) {
1003 auxtrace_mmap_params__set_idx(&mp
->auxtrace_mp
, evlist
, thread
,
1006 if (perf_evlist__mmap_per_evsel(evlist
, thread
, mp
, 0, thread
,
1014 for (thread
= 0; thread
< nr_threads
; thread
++)
1015 __perf_evlist__munmap(evlist
, thread
);
1019 static size_t perf_evlist__mmap_size(unsigned long pages
)
1021 if (pages
== UINT_MAX
) {
1024 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max
) < 0) {
1026 * Pick a once upon a time good value, i.e. things look
1027 * strange since we can't read a sysctl value, but lets not
1032 max
-= (page_size
/ 1024);
1035 pages
= (max
* 1024) / page_size
;
1036 if (!is_power_of_2(pages
))
1037 pages
= rounddown_pow_of_two(pages
);
1038 } else if (!is_power_of_2(pages
))
1041 return (pages
+ 1) * page_size
;
1044 static long parse_pages_arg(const char *str
, unsigned long min
,
1047 unsigned long pages
, val
;
1048 static struct parse_tag tags
[] = {
1049 { .tag
= 'B', .mult
= 1 },
1050 { .tag
= 'K', .mult
= 1 << 10 },
1051 { .tag
= 'M', .mult
= 1 << 20 },
1052 { .tag
= 'G', .mult
= 1 << 30 },
1059 val
= parse_tag_value(str
, tags
);
1060 if (val
!= (unsigned long) -1) {
1061 /* we got file size value */
1062 pages
= PERF_ALIGN(val
, page_size
) / page_size
;
1064 /* we got pages count value */
1066 pages
= strtoul(str
, &eptr
, 10);
1071 if (pages
== 0 && min
== 0) {
1072 /* leave number of pages at 0 */
1073 } else if (!is_power_of_2(pages
)) {
1074 /* round pages up to next power of 2 */
1075 pages
= roundup_pow_of_two(pages
);
1078 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
1079 pages
* page_size
, pages
);
1088 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages
, const char *str
)
1090 unsigned long max
= UINT_MAX
;
1093 if (max
> SIZE_MAX
/ page_size
)
1094 max
= SIZE_MAX
/ page_size
;
1096 pages
= parse_pages_arg(str
, 1, max
);
1098 pr_err("Invalid argument for --mmap_pages/-m\n");
1102 *mmap_pages
= pages
;
1106 int perf_evlist__parse_mmap_pages(const struct option
*opt
, const char *str
,
1107 int unset __maybe_unused
)
1109 return __perf_evlist__parse_mmap_pages(opt
->value
, str
);
1113 * perf_evlist__mmap_ex - Create mmaps to receive events.
1114 * @evlist: list of events
1115 * @pages: map length in pages
1116 * @overwrite: overwrite older events?
1117 * @auxtrace_pages - auxtrace map length in pages
1118 * @auxtrace_overwrite - overwrite older auxtrace data?
1120 * If @overwrite is %false the user needs to signal event consumption using
1121 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1124 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1125 * consumption using auxtrace_mmap__write_tail().
1127 * Return: %0 on success, negative error code otherwise.
1129 int perf_evlist__mmap_ex(struct perf_evlist
*evlist
, unsigned int pages
,
1130 bool overwrite
, unsigned int auxtrace_pages
,
1131 bool auxtrace_overwrite
)
1133 struct perf_evsel
*evsel
;
1134 const struct cpu_map
*cpus
= evlist
->cpus
;
1135 const struct thread_map
*threads
= evlist
->threads
;
1136 struct mmap_params mp
= {
1137 .prot
= PROT_READ
| (overwrite
? 0 : PROT_WRITE
),
1140 if (evlist
->mmap
== NULL
&& perf_evlist__alloc_mmap(evlist
) < 0)
1143 if (evlist
->pollfd
.entries
== NULL
&& perf_evlist__alloc_pollfd(evlist
) < 0)
1146 evlist
->overwrite
= overwrite
;
1147 evlist
->mmap_len
= perf_evlist__mmap_size(pages
);
1148 pr_debug("mmap size %zuB\n", evlist
->mmap_len
);
1149 mp
.mask
= evlist
->mmap_len
- page_size
- 1;
1151 auxtrace_mmap_params__init(&mp
.auxtrace_mp
, evlist
->mmap_len
,
1152 auxtrace_pages
, auxtrace_overwrite
);
1154 evlist__for_each(evlist
, evsel
) {
1155 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
1156 evsel
->sample_id
== NULL
&&
1157 perf_evsel__alloc_id(evsel
, cpu_map__nr(cpus
), threads
->nr
) < 0)
1161 if (cpu_map__empty(cpus
))
1162 return perf_evlist__mmap_per_thread(evlist
, &mp
);
1164 return perf_evlist__mmap_per_cpu(evlist
, &mp
);
1167 int perf_evlist__mmap(struct perf_evlist
*evlist
, unsigned int pages
,
1170 return perf_evlist__mmap_ex(evlist
, pages
, overwrite
, 0, false);
1173 int perf_evlist__create_maps(struct perf_evlist
*evlist
, struct target
*target
)
1175 struct cpu_map
*cpus
;
1176 struct thread_map
*threads
;
1178 threads
= thread_map__new_str(target
->pid
, target
->tid
, target
->uid
);
1183 if (target__uses_dummy_map(target
))
1184 cpus
= cpu_map__dummy_new();
1186 cpus
= cpu_map__new(target
->cpu_list
);
1189 goto out_delete_threads
;
1191 evlist
->has_user_cpus
= !!target
->cpu_list
;
1193 perf_evlist__set_maps(evlist
, cpus
, threads
);
1198 thread_map__put(threads
);
1202 void perf_evlist__set_maps(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
1203 struct thread_map
*threads
)
1206 * Allow for the possibility that one or another of the maps isn't being
1207 * changed i.e. don't put it. Note we are assuming the maps that are
1208 * being applied are brand new and evlist is taking ownership of the
1209 * original reference count of 1. If that is not the case it is up to
1210 * the caller to increase the reference count.
1212 if (cpus
!= evlist
->cpus
) {
1213 cpu_map__put(evlist
->cpus
);
1214 evlist
->cpus
= cpus
;
1217 if (threads
!= evlist
->threads
) {
1218 thread_map__put(evlist
->threads
);
1219 evlist
->threads
= threads
;
1222 perf_evlist__propagate_maps(evlist
);
1225 int perf_evlist__apply_filters(struct perf_evlist
*evlist
, struct perf_evsel
**err_evsel
)
1227 struct perf_evsel
*evsel
;
1229 const int ncpus
= cpu_map__nr(evlist
->cpus
),
1230 nthreads
= thread_map__nr(evlist
->threads
);
1232 evlist__for_each(evlist
, evsel
) {
1233 if (evsel
->filter
== NULL
)
1237 * filters only work for tracepoint event, which doesn't have cpu limit.
1238 * So evlist and evsel should always be same.
1240 err
= perf_evsel__apply_filter(evsel
, ncpus
, nthreads
, evsel
->filter
);
1250 int perf_evlist__set_filter(struct perf_evlist
*evlist
, const char *filter
)
1252 struct perf_evsel
*evsel
;
1255 evlist__for_each(evlist
, evsel
) {
1256 err
= perf_evsel__set_filter(evsel
, filter
);
1264 int perf_evlist__set_filter_pids(struct perf_evlist
*evlist
, size_t npids
, pid_t
*pids
)
1270 for (i
= 0; i
< npids
; ++i
) {
1272 if (asprintf(&filter
, "common_pid != %d", pids
[i
]) < 0)
1277 if (asprintf(&tmp
, "%s && common_pid != %d", filter
, pids
[i
]) < 0)
1285 ret
= perf_evlist__set_filter(evlist
, filter
);
1291 int perf_evlist__set_filter_pid(struct perf_evlist
*evlist
, pid_t pid
)
1293 return perf_evlist__set_filter_pids(evlist
, 1, &pid
);
1296 bool perf_evlist__valid_sample_type(struct perf_evlist
*evlist
)
1298 struct perf_evsel
*pos
;
1300 if (evlist
->nr_entries
== 1)
1303 if (evlist
->id_pos
< 0 || evlist
->is_pos
< 0)
1306 evlist__for_each(evlist
, pos
) {
1307 if (pos
->id_pos
!= evlist
->id_pos
||
1308 pos
->is_pos
!= evlist
->is_pos
)
1315 u64
__perf_evlist__combined_sample_type(struct perf_evlist
*evlist
)
1317 struct perf_evsel
*evsel
;
1319 if (evlist
->combined_sample_type
)
1320 return evlist
->combined_sample_type
;
1322 evlist__for_each(evlist
, evsel
)
1323 evlist
->combined_sample_type
|= evsel
->attr
.sample_type
;
1325 return evlist
->combined_sample_type
;
1328 u64
perf_evlist__combined_sample_type(struct perf_evlist
*evlist
)
1330 evlist
->combined_sample_type
= 0;
1331 return __perf_evlist__combined_sample_type(evlist
);
1334 u64
perf_evlist__combined_branch_type(struct perf_evlist
*evlist
)
1336 struct perf_evsel
*evsel
;
1337 u64 branch_type
= 0;
1339 evlist__for_each(evlist
, evsel
)
1340 branch_type
|= evsel
->attr
.branch_sample_type
;
1344 bool perf_evlist__valid_read_format(struct perf_evlist
*evlist
)
1346 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
1347 u64 read_format
= first
->attr
.read_format
;
1348 u64 sample_type
= first
->attr
.sample_type
;
1350 evlist__for_each(evlist
, pos
) {
1351 if (read_format
!= pos
->attr
.read_format
)
1355 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1356 if ((sample_type
& PERF_SAMPLE_READ
) &&
1357 !(read_format
& PERF_FORMAT_ID
)) {
1364 u64
perf_evlist__read_format(struct perf_evlist
*evlist
)
1366 struct perf_evsel
*first
= perf_evlist__first(evlist
);
1367 return first
->attr
.read_format
;
1370 u16
perf_evlist__id_hdr_size(struct perf_evlist
*evlist
)
1372 struct perf_evsel
*first
= perf_evlist__first(evlist
);
1373 struct perf_sample
*data
;
1377 if (!first
->attr
.sample_id_all
)
1380 sample_type
= first
->attr
.sample_type
;
1382 if (sample_type
& PERF_SAMPLE_TID
)
1383 size
+= sizeof(data
->tid
) * 2;
1385 if (sample_type
& PERF_SAMPLE_TIME
)
1386 size
+= sizeof(data
->time
);
1388 if (sample_type
& PERF_SAMPLE_ID
)
1389 size
+= sizeof(data
->id
);
1391 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
1392 size
+= sizeof(data
->stream_id
);
1394 if (sample_type
& PERF_SAMPLE_CPU
)
1395 size
+= sizeof(data
->cpu
) * 2;
1397 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
1398 size
+= sizeof(data
->id
);
1403 bool perf_evlist__valid_sample_id_all(struct perf_evlist
*evlist
)
1405 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
1407 evlist__for_each_continue(evlist
, pos
) {
1408 if (first
->attr
.sample_id_all
!= pos
->attr
.sample_id_all
)
1415 bool perf_evlist__sample_id_all(struct perf_evlist
*evlist
)
1417 struct perf_evsel
*first
= perf_evlist__first(evlist
);
1418 return first
->attr
.sample_id_all
;
1421 void perf_evlist__set_selected(struct perf_evlist
*evlist
,
1422 struct perf_evsel
*evsel
)
1424 evlist
->selected
= evsel
;
1427 void perf_evlist__close(struct perf_evlist
*evlist
)
1429 struct perf_evsel
*evsel
;
1430 int ncpus
= cpu_map__nr(evlist
->cpus
);
1431 int nthreads
= thread_map__nr(evlist
->threads
);
1434 evlist__for_each_reverse(evlist
, evsel
) {
1435 n
= evsel
->cpus
? evsel
->cpus
->nr
: ncpus
;
1436 perf_evsel__close(evsel
, n
, nthreads
);
1440 static int perf_evlist__create_syswide_maps(struct perf_evlist
*evlist
)
1442 struct cpu_map
*cpus
;
1443 struct thread_map
*threads
;
1447 * Try reading /sys/devices/system/cpu/online to get
1450 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1451 * code needs an overhaul to properly forward the
1452 * error, and we may not want to do that fallback to a
1453 * default cpu identity map :-\
1455 cpus
= cpu_map__new(NULL
);
1459 threads
= thread_map__new_dummy();
1463 perf_evlist__set_maps(evlist
, cpus
, threads
);
1471 int perf_evlist__open(struct perf_evlist
*evlist
)
1473 struct perf_evsel
*evsel
;
1477 * Default: one fd per CPU, all threads, aka systemwide
1478 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1480 if (evlist
->threads
== NULL
&& evlist
->cpus
== NULL
) {
1481 err
= perf_evlist__create_syswide_maps(evlist
);
1486 perf_evlist__update_id_pos(evlist
);
1488 evlist__for_each(evlist
, evsel
) {
1489 err
= perf_evsel__open(evsel
, evlist
->cpus
, evlist
->threads
);
1496 perf_evlist__close(evlist
);
1501 int perf_evlist__prepare_workload(struct perf_evlist
*evlist
, struct target
*target
,
1502 const char *argv
[], bool pipe_output
,
1503 void (*exec_error
)(int signo
, siginfo_t
*info
, void *ucontext
))
1505 int child_ready_pipe
[2], go_pipe
[2];
1508 if (pipe(child_ready_pipe
) < 0) {
1509 perror("failed to create 'ready' pipe");
1513 if (pipe(go_pipe
) < 0) {
1514 perror("failed to create 'go' pipe");
1515 goto out_close_ready_pipe
;
1518 evlist
->workload
.pid
= fork();
1519 if (evlist
->workload
.pid
< 0) {
1520 perror("failed to fork");
1521 goto out_close_pipes
;
1524 if (!evlist
->workload
.pid
) {
1530 signal(SIGTERM
, SIG_DFL
);
1532 close(child_ready_pipe
[0]);
1534 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
1537 * Tell the parent we're ready to go
1539 close(child_ready_pipe
[1]);
1542 * Wait until the parent tells us to go.
1544 ret
= read(go_pipe
[0], &bf
, 1);
1546 * The parent will ask for the execvp() to be performed by
1547 * writing exactly one byte, in workload.cork_fd, usually via
1548 * perf_evlist__start_workload().
1550 * For cancelling the workload without actually running it,
1551 * the parent will just close workload.cork_fd, without writing
1552 * anything, i.e. read will return zero and we just exit()
1557 perror("unable to read pipe");
1561 execvp(argv
[0], (char **)argv
);
1566 val
.sival_int
= errno
;
1567 if (sigqueue(getppid(), SIGUSR1
, val
))
1575 struct sigaction act
= {
1576 .sa_flags
= SA_SIGINFO
,
1577 .sa_sigaction
= exec_error
,
1579 sigaction(SIGUSR1
, &act
, NULL
);
1582 if (target__none(target
)) {
1583 if (evlist
->threads
== NULL
) {
1584 fprintf(stderr
, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1585 __func__
, __LINE__
);
1586 goto out_close_pipes
;
1588 thread_map__set_pid(evlist
->threads
, 0, evlist
->workload
.pid
);
1591 close(child_ready_pipe
[1]);
1594 * wait for child to settle
1596 if (read(child_ready_pipe
[0], &bf
, 1) == -1) {
1597 perror("unable to read pipe");
1598 goto out_close_pipes
;
1601 fcntl(go_pipe
[1], F_SETFD
, FD_CLOEXEC
);
1602 evlist
->workload
.cork_fd
= go_pipe
[1];
1603 close(child_ready_pipe
[0]);
1609 out_close_ready_pipe
:
1610 close(child_ready_pipe
[0]);
1611 close(child_ready_pipe
[1]);
1615 int perf_evlist__start_workload(struct perf_evlist
*evlist
)
1617 if (evlist
->workload
.cork_fd
> 0) {
1621 * Remove the cork, let it rip!
1623 ret
= write(evlist
->workload
.cork_fd
, &bf
, 1);
1625 perror("enable to write to pipe");
1627 close(evlist
->workload
.cork_fd
);
1634 int perf_evlist__parse_sample(struct perf_evlist
*evlist
, union perf_event
*event
,
1635 struct perf_sample
*sample
)
1637 struct perf_evsel
*evsel
= perf_evlist__event2evsel(evlist
, event
);
1641 return perf_evsel__parse_sample(evsel
, event
, sample
);
1644 size_t perf_evlist__fprintf(struct perf_evlist
*evlist
, FILE *fp
)
1646 struct perf_evsel
*evsel
;
1649 evlist__for_each(evlist
, evsel
) {
1650 printed
+= fprintf(fp
, "%s%s", evsel
->idx
? ", " : "",
1651 perf_evsel__name(evsel
));
1654 return printed
+ fprintf(fp
, "\n");
1657 int perf_evlist__strerror_open(struct perf_evlist
*evlist __maybe_unused
,
1658 int err
, char *buf
, size_t size
)
1661 char sbuf
[STRERR_BUFSIZE
], *emsg
= strerror_r(err
, sbuf
, sizeof(sbuf
));
1666 printed
= scnprintf(buf
, size
,
1668 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg
);
1670 value
= perf_event_paranoid();
1672 printed
+= scnprintf(buf
+ printed
, size
- printed
, "\nHint:\t");
1675 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1676 "For your workloads it needs to be <= 1\nHint:\t");
1678 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1679 "For system wide tracing it needs to be set to -1.\n");
1681 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1682 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1683 "Hint:\tThe current value is %d.", value
);
1686 scnprintf(buf
, size
, "%s", emsg
);
1693 int perf_evlist__strerror_mmap(struct perf_evlist
*evlist
, int err
, char *buf
, size_t size
)
1695 char sbuf
[STRERR_BUFSIZE
], *emsg
= strerror_r(err
, sbuf
, sizeof(sbuf
));
1696 int pages_attempted
= evlist
->mmap_len
/ 1024, pages_max_per_user
, printed
= 0;
1700 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user
);
1701 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1703 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1704 "Hint:\tTried using %zd kB.\n",
1705 emsg
, pages_max_per_user
, pages_attempted
);
1707 if (pages_attempted
>= pages_max_per_user
) {
1708 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1709 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1710 pages_max_per_user
+ pages_attempted
);
1713 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1714 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1717 scnprintf(buf
, size
, "%s", emsg
);
1724 void perf_evlist__to_front(struct perf_evlist
*evlist
,
1725 struct perf_evsel
*move_evsel
)
1727 struct perf_evsel
*evsel
, *n
;
1730 if (move_evsel
== perf_evlist__first(evlist
))
1733 evlist__for_each_safe(evlist
, n
, evsel
) {
1734 if (evsel
->leader
== move_evsel
->leader
)
1735 list_move_tail(&evsel
->node
, &move
);
1738 list_splice(&move
, &evlist
->entries
);
1741 void perf_evlist__set_tracking_event(struct perf_evlist
*evlist
,
1742 struct perf_evsel
*tracking_evsel
)
1744 struct perf_evsel
*evsel
;
1746 if (tracking_evsel
->tracking
)
1749 evlist__for_each(evlist
, evsel
) {
1750 if (evsel
!= tracking_evsel
)
1751 evsel
->tracking
= false;
1754 tracking_evsel
->tracking
= true;