mmc: bcm2835: Fix DMA channel leak on probe error
[linux/fpc-iii.git] / tools / perf / util / evlist.c
blob819aa4491b534072cfebf69cded6fa4478d37510
1 /*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9 #include "util.h"
10 #include <api/fs/fs.h>
11 #include <errno.h>
12 #include <inttypes.h>
13 #include <poll.h>
14 #include "cpumap.h"
15 #include "thread_map.h"
16 #include "target.h"
17 #include "evlist.h"
18 #include "evsel.h"
19 #include "debug.h"
20 #include "units.h"
21 #include "asm/bug.h"
22 #include <signal.h>
23 #include <unistd.h>
25 #include "parse-events.h"
26 #include <subcmd/parse-options.h>
28 #include <fcntl.h>
29 #include <sys/ioctl.h>
30 #include <sys/mman.h>
32 #include <linux/bitops.h>
33 #include <linux/hash.h>
34 #include <linux/log2.h>
35 #include <linux/err.h>
37 #ifdef LACKS_SIGQUEUE_PROTOTYPE
38 int sigqueue(pid_t pid, int sig, const union sigval value);
39 #endif
41 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
42 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
44 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
45 struct thread_map *threads)
47 int i;
49 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
50 INIT_HLIST_HEAD(&evlist->heads[i]);
51 INIT_LIST_HEAD(&evlist->entries);
52 perf_evlist__set_maps(evlist, cpus, threads);
53 fdarray__init(&evlist->pollfd, 64);
54 evlist->workload.pid = -1;
55 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
58 struct perf_evlist *perf_evlist__new(void)
60 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
62 if (evlist != NULL)
63 perf_evlist__init(evlist, NULL, NULL);
65 return evlist;
68 struct perf_evlist *perf_evlist__new_default(void)
70 struct perf_evlist *evlist = perf_evlist__new();
72 if (evlist && perf_evlist__add_default(evlist)) {
73 perf_evlist__delete(evlist);
74 evlist = NULL;
77 return evlist;
80 struct perf_evlist *perf_evlist__new_dummy(void)
82 struct perf_evlist *evlist = perf_evlist__new();
84 if (evlist && perf_evlist__add_dummy(evlist)) {
85 perf_evlist__delete(evlist);
86 evlist = NULL;
89 return evlist;
92 /**
93 * perf_evlist__set_id_pos - set the positions of event ids.
94 * @evlist: selected event list
96 * Events with compatible sample types all have the same id_pos
97 * and is_pos. For convenience, put a copy on evlist.
99 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
101 struct perf_evsel *first = perf_evlist__first(evlist);
103 evlist->id_pos = first->id_pos;
104 evlist->is_pos = first->is_pos;
107 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
109 struct perf_evsel *evsel;
111 evlist__for_each_entry(evlist, evsel)
112 perf_evsel__calc_id_pos(evsel);
114 perf_evlist__set_id_pos(evlist);
117 static void perf_evlist__purge(struct perf_evlist *evlist)
119 struct perf_evsel *pos, *n;
121 evlist__for_each_entry_safe(evlist, n, pos) {
122 list_del_init(&pos->node);
123 pos->evlist = NULL;
124 perf_evsel__delete(pos);
127 evlist->nr_entries = 0;
130 void perf_evlist__exit(struct perf_evlist *evlist)
132 zfree(&evlist->mmap);
133 zfree(&evlist->overwrite_mmap);
134 fdarray__exit(&evlist->pollfd);
137 void perf_evlist__delete(struct perf_evlist *evlist)
139 if (evlist == NULL)
140 return;
142 perf_evlist__munmap(evlist);
143 perf_evlist__close(evlist);
144 cpu_map__put(evlist->cpus);
145 thread_map__put(evlist->threads);
146 evlist->cpus = NULL;
147 evlist->threads = NULL;
148 perf_evlist__purge(evlist);
149 perf_evlist__exit(evlist);
150 free(evlist);
153 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
154 struct perf_evsel *evsel)
157 * We already have cpus for evsel (via PMU sysfs) so
158 * keep it, if there's no target cpu list defined.
160 if (!evsel->own_cpus || evlist->has_user_cpus) {
161 cpu_map__put(evsel->cpus);
162 evsel->cpus = cpu_map__get(evlist->cpus);
163 } else if (evsel->cpus != evsel->own_cpus) {
164 cpu_map__put(evsel->cpus);
165 evsel->cpus = cpu_map__get(evsel->own_cpus);
168 thread_map__put(evsel->threads);
169 evsel->threads = thread_map__get(evlist->threads);
172 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
174 struct perf_evsel *evsel;
176 evlist__for_each_entry(evlist, evsel)
177 __perf_evlist__propagate_maps(evlist, evsel);
180 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
182 entry->evlist = evlist;
183 list_add_tail(&entry->node, &evlist->entries);
184 entry->idx = evlist->nr_entries;
185 entry->tracking = !entry->idx;
187 if (!evlist->nr_entries++)
188 perf_evlist__set_id_pos(evlist);
190 __perf_evlist__propagate_maps(evlist, entry);
193 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
195 evsel->evlist = NULL;
196 list_del_init(&evsel->node);
197 evlist->nr_entries -= 1;
200 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
201 struct list_head *list)
203 struct perf_evsel *evsel, *temp;
205 __evlist__for_each_entry_safe(list, temp, evsel) {
206 list_del_init(&evsel->node);
207 perf_evlist__add(evlist, evsel);
211 void __perf_evlist__set_leader(struct list_head *list)
213 struct perf_evsel *evsel, *leader;
215 leader = list_entry(list->next, struct perf_evsel, node);
216 evsel = list_entry(list->prev, struct perf_evsel, node);
218 leader->nr_members = evsel->idx - leader->idx + 1;
220 __evlist__for_each_entry(list, evsel) {
221 evsel->leader = leader;
225 void perf_evlist__set_leader(struct perf_evlist *evlist)
227 if (evlist->nr_entries) {
228 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
229 __perf_evlist__set_leader(&evlist->entries);
233 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
235 attr->precise_ip = 3;
237 while (attr->precise_ip != 0) {
238 int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
239 if (fd != -1) {
240 close(fd);
241 break;
243 --attr->precise_ip;
247 int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
249 struct perf_evsel *evsel = perf_evsel__new_cycles(precise);
251 if (evsel == NULL)
252 return -ENOMEM;
254 perf_evlist__add(evlist, evsel);
255 return 0;
258 int perf_evlist__add_dummy(struct perf_evlist *evlist)
260 struct perf_event_attr attr = {
261 .type = PERF_TYPE_SOFTWARE,
262 .config = PERF_COUNT_SW_DUMMY,
263 .size = sizeof(attr), /* to capture ABI version */
265 struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);
267 if (evsel == NULL)
268 return -ENOMEM;
270 perf_evlist__add(evlist, evsel);
271 return 0;
274 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
275 struct perf_event_attr *attrs, size_t nr_attrs)
277 struct perf_evsel *evsel, *n;
278 LIST_HEAD(head);
279 size_t i;
281 for (i = 0; i < nr_attrs; i++) {
282 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
283 if (evsel == NULL)
284 goto out_delete_partial_list;
285 list_add_tail(&evsel->node, &head);
288 perf_evlist__splice_list_tail(evlist, &head);
290 return 0;
292 out_delete_partial_list:
293 __evlist__for_each_entry_safe(&head, n, evsel)
294 perf_evsel__delete(evsel);
295 return -1;
298 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
299 struct perf_event_attr *attrs, size_t nr_attrs)
301 size_t i;
303 for (i = 0; i < nr_attrs; i++)
304 event_attr_init(attrs + i);
306 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
309 struct perf_evsel *
310 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
312 struct perf_evsel *evsel;
314 evlist__for_each_entry(evlist, evsel) {
315 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
316 (int)evsel->attr.config == id)
317 return evsel;
320 return NULL;
323 struct perf_evsel *
324 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
325 const char *name)
327 struct perf_evsel *evsel;
329 evlist__for_each_entry(evlist, evsel) {
330 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
331 (strcmp(evsel->name, name) == 0))
332 return evsel;
335 return NULL;
338 int perf_evlist__add_newtp(struct perf_evlist *evlist,
339 const char *sys, const char *name, void *handler)
341 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
343 if (IS_ERR(evsel))
344 return -1;
346 evsel->handler = handler;
347 perf_evlist__add(evlist, evsel);
348 return 0;
351 static int perf_evlist__nr_threads(struct perf_evlist *evlist,
352 struct perf_evsel *evsel)
354 if (evsel->system_wide)
355 return 1;
356 else
357 return thread_map__nr(evlist->threads);
360 void perf_evlist__disable(struct perf_evlist *evlist)
362 struct perf_evsel *pos;
364 evlist__for_each_entry(evlist, pos) {
365 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
366 continue;
367 perf_evsel__disable(pos);
370 evlist->enabled = false;
373 void perf_evlist__enable(struct perf_evlist *evlist)
375 struct perf_evsel *pos;
377 evlist__for_each_entry(evlist, pos) {
378 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
379 continue;
380 perf_evsel__enable(pos);
383 evlist->enabled = true;
386 void perf_evlist__toggle_enable(struct perf_evlist *evlist)
388 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
391 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
392 struct perf_evsel *evsel, int cpu)
394 int thread;
395 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
397 if (!evsel->fd)
398 return -EINVAL;
400 for (thread = 0; thread < nr_threads; thread++) {
401 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
402 if (err)
403 return err;
405 return 0;
408 static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
409 struct perf_evsel *evsel,
410 int thread)
412 int cpu;
413 int nr_cpus = cpu_map__nr(evlist->cpus);
415 if (!evsel->fd)
416 return -EINVAL;
418 for (cpu = 0; cpu < nr_cpus; cpu++) {
419 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
420 if (err)
421 return err;
423 return 0;
426 int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
427 struct perf_evsel *evsel, int idx)
429 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
431 if (per_cpu_mmaps)
432 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
433 else
434 return perf_evlist__enable_event_thread(evlist, evsel, idx);
437 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
439 int nr_cpus = cpu_map__nr(evlist->cpus);
440 int nr_threads = thread_map__nr(evlist->threads);
441 int nfds = 0;
442 struct perf_evsel *evsel;
444 evlist__for_each_entry(evlist, evsel) {
445 if (evsel->system_wide)
446 nfds += nr_cpus;
447 else
448 nfds += nr_cpus * nr_threads;
451 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
452 fdarray__grow(&evlist->pollfd, nfds) < 0)
453 return -ENOMEM;
455 return 0;
458 static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
459 struct perf_mmap *map, short revent)
461 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
463 * Save the idx so that when we filter out fds POLLHUP'ed we can
464 * close the associated evlist->mmap[] entry.
466 if (pos >= 0) {
467 evlist->pollfd.priv[pos].ptr = map;
469 fcntl(fd, F_SETFL, O_NONBLOCK);
472 return pos;
475 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
477 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
480 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
481 void *arg __maybe_unused)
483 struct perf_mmap *map = fda->priv[fd].ptr;
485 if (map)
486 perf_mmap__put(map);
489 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
491 return fdarray__filter(&evlist->pollfd, revents_and_mask,
492 perf_evlist__munmap_filtered, NULL);
495 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
497 return fdarray__poll(&evlist->pollfd, timeout);
500 static void perf_evlist__id_hash(struct perf_evlist *evlist,
501 struct perf_evsel *evsel,
502 int cpu, int thread, u64 id)
504 int hash;
505 struct perf_sample_id *sid = SID(evsel, cpu, thread);
507 sid->id = id;
508 sid->evsel = evsel;
509 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
510 hlist_add_head(&sid->node, &evlist->heads[hash]);
513 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
514 int cpu, int thread, u64 id)
516 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
517 evsel->id[evsel->ids++] = id;
520 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
521 struct perf_evsel *evsel,
522 int cpu, int thread, int fd)
524 u64 read_data[4] = { 0, };
525 int id_idx = 1; /* The first entry is the counter value */
526 u64 id;
527 int ret;
529 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
530 if (!ret)
531 goto add;
533 if (errno != ENOTTY)
534 return -1;
536 /* Legacy way to get event id.. All hail to old kernels! */
539 * This way does not work with group format read, so bail
540 * out in that case.
542 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
543 return -1;
545 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
546 read(fd, &read_data, sizeof(read_data)) == -1)
547 return -1;
549 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
550 ++id_idx;
551 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
552 ++id_idx;
554 id = read_data[id_idx];
556 add:
557 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
558 return 0;
561 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
562 struct perf_evsel *evsel, int idx, int cpu,
563 int thread)
565 struct perf_sample_id *sid = SID(evsel, cpu, thread);
566 sid->idx = idx;
567 if (evlist->cpus && cpu >= 0)
568 sid->cpu = evlist->cpus->map[cpu];
569 else
570 sid->cpu = -1;
571 if (!evsel->system_wide && evlist->threads && thread >= 0)
572 sid->tid = thread_map__pid(evlist->threads, thread);
573 else
574 sid->tid = -1;
577 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
579 struct hlist_head *head;
580 struct perf_sample_id *sid;
581 int hash;
583 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
584 head = &evlist->heads[hash];
586 hlist_for_each_entry(sid, head, node)
587 if (sid->id == id)
588 return sid;
590 return NULL;
593 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
595 struct perf_sample_id *sid;
597 if (evlist->nr_entries == 1 || !id)
598 return perf_evlist__first(evlist);
600 sid = perf_evlist__id2sid(evlist, id);
601 if (sid)
602 return sid->evsel;
604 if (!perf_evlist__sample_id_all(evlist))
605 return perf_evlist__first(evlist);
607 return NULL;
610 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
611 u64 id)
613 struct perf_sample_id *sid;
615 if (!id)
616 return NULL;
618 sid = perf_evlist__id2sid(evlist, id);
619 if (sid)
620 return sid->evsel;
622 return NULL;
625 static int perf_evlist__event2id(struct perf_evlist *evlist,
626 union perf_event *event, u64 *id)
628 const u64 *array = event->sample.array;
629 ssize_t n;
631 n = (event->header.size - sizeof(event->header)) >> 3;
633 if (event->header.type == PERF_RECORD_SAMPLE) {
634 if (evlist->id_pos >= n)
635 return -1;
636 *id = array[evlist->id_pos];
637 } else {
638 if (evlist->is_pos > n)
639 return -1;
640 n -= evlist->is_pos;
641 *id = array[n];
643 return 0;
646 struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
647 union perf_event *event)
649 struct perf_evsel *first = perf_evlist__first(evlist);
650 struct hlist_head *head;
651 struct perf_sample_id *sid;
652 int hash;
653 u64 id;
655 if (evlist->nr_entries == 1)
656 return first;
658 if (!first->attr.sample_id_all &&
659 event->header.type != PERF_RECORD_SAMPLE)
660 return first;
662 if (perf_evlist__event2id(evlist, event, &id))
663 return NULL;
665 /* Synthesized events have an id of zero */
666 if (!id)
667 return first;
669 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
670 head = &evlist->heads[hash];
672 hlist_for_each_entry(sid, head, node) {
673 if (sid->id == id)
674 return sid->evsel;
676 return NULL;
679 static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
681 int i;
683 if (!evlist->overwrite_mmap)
684 return 0;
686 for (i = 0; i < evlist->nr_mmaps; i++) {
687 int fd = evlist->overwrite_mmap[i].fd;
688 int err;
690 if (fd < 0)
691 continue;
692 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
693 if (err)
694 return err;
696 return 0;
699 static int perf_evlist__pause(struct perf_evlist *evlist)
701 return perf_evlist__set_paused(evlist, true);
704 static int perf_evlist__resume(struct perf_evlist *evlist)
706 return perf_evlist__set_paused(evlist, false);
709 static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
711 int i;
713 if (evlist->mmap)
714 for (i = 0; i < evlist->nr_mmaps; i++)
715 perf_mmap__munmap(&evlist->mmap[i]);
717 if (evlist->overwrite_mmap)
718 for (i = 0; i < evlist->nr_mmaps; i++)
719 perf_mmap__munmap(&evlist->overwrite_mmap[i]);
722 void perf_evlist__munmap(struct perf_evlist *evlist)
724 perf_evlist__munmap_nofree(evlist);
725 zfree(&evlist->mmap);
726 zfree(&evlist->overwrite_mmap);
729 static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist,
730 bool overwrite)
732 int i;
733 struct perf_mmap *map;
735 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
736 if (cpu_map__empty(evlist->cpus))
737 evlist->nr_mmaps = thread_map__nr(evlist->threads);
738 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
739 if (!map)
740 return NULL;
742 for (i = 0; i < evlist->nr_mmaps; i++) {
743 map[i].fd = -1;
744 map[i].overwrite = overwrite;
746 * When the perf_mmap() call is made we grab one refcount, plus
747 * one extra to let perf_mmap__consume() get the last
748 * events after all real references (perf_mmap__get()) are
749 * dropped.
751 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
752 * thus does perf_mmap__get() on it.
754 refcount_set(&map[i].refcnt, 0);
756 return map;
759 static bool
760 perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
761 struct perf_evsel *evsel)
763 if (evsel->attr.write_backward)
764 return false;
765 return true;
768 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
769 struct mmap_params *mp, int cpu_idx,
770 int thread, int *_output, int *_output_overwrite)
772 struct perf_evsel *evsel;
773 int revent;
774 int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx);
776 evlist__for_each_entry(evlist, evsel) {
777 struct perf_mmap *maps = evlist->mmap;
778 int *output = _output;
779 int fd;
780 int cpu;
782 mp->prot = PROT_READ | PROT_WRITE;
783 if (evsel->attr.write_backward) {
784 output = _output_overwrite;
785 maps = evlist->overwrite_mmap;
787 if (!maps) {
788 maps = perf_evlist__alloc_mmap(evlist, true);
789 if (!maps)
790 return -1;
791 evlist->overwrite_mmap = maps;
792 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
793 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
795 mp->prot &= ~PROT_WRITE;
798 if (evsel->system_wide && thread)
799 continue;
801 cpu = cpu_map__idx(evsel->cpus, evlist_cpu);
802 if (cpu == -1)
803 continue;
805 fd = FD(evsel, cpu, thread);
807 if (*output == -1) {
808 *output = fd;
810 if (perf_mmap__mmap(&maps[idx], mp, *output, evlist_cpu) < 0)
811 return -1;
812 } else {
813 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
814 return -1;
816 perf_mmap__get(&maps[idx]);
819 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
822 * The system_wide flag causes a selected event to be opened
823 * always without a pid. Consequently it will never get a
824 * POLLHUP, but it is used for tracking in combination with
825 * other events, so it should not need to be polled anyway.
826 * Therefore don't add it for polling.
828 if (!evsel->system_wide &&
829 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
830 perf_mmap__put(&maps[idx]);
831 return -1;
834 if (evsel->attr.read_format & PERF_FORMAT_ID) {
835 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
836 fd) < 0)
837 return -1;
838 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
839 thread);
843 return 0;
846 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
847 struct mmap_params *mp)
849 int cpu, thread;
850 int nr_cpus = cpu_map__nr(evlist->cpus);
851 int nr_threads = thread_map__nr(evlist->threads);
853 pr_debug2("perf event ring buffer mmapped per cpu\n");
854 for (cpu = 0; cpu < nr_cpus; cpu++) {
855 int output = -1;
856 int output_overwrite = -1;
858 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
859 true);
861 for (thread = 0; thread < nr_threads; thread++) {
862 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
863 thread, &output, &output_overwrite))
864 goto out_unmap;
868 return 0;
870 out_unmap:
871 perf_evlist__munmap_nofree(evlist);
872 return -1;
875 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
876 struct mmap_params *mp)
878 int thread;
879 int nr_threads = thread_map__nr(evlist->threads);
881 pr_debug2("perf event ring buffer mmapped per thread\n");
882 for (thread = 0; thread < nr_threads; thread++) {
883 int output = -1;
884 int output_overwrite = -1;
886 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
887 false);
889 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
890 &output, &output_overwrite))
891 goto out_unmap;
894 return 0;
896 out_unmap:
897 perf_evlist__munmap_nofree(evlist);
898 return -1;
901 unsigned long perf_event_mlock_kb_in_pages(void)
903 unsigned long pages;
904 int max;
906 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
908 * Pick a once upon a time good value, i.e. things look
909 * strange since we can't read a sysctl value, but lets not
910 * die yet...
912 max = 512;
913 } else {
914 max -= (page_size / 1024);
917 pages = (max * 1024) / page_size;
918 if (!is_power_of_2(pages))
919 pages = rounddown_pow_of_two(pages);
921 return pages;
924 size_t perf_evlist__mmap_size(unsigned long pages)
926 if (pages == UINT_MAX)
927 pages = perf_event_mlock_kb_in_pages();
928 else if (!is_power_of_2(pages))
929 return 0;
931 return (pages + 1) * page_size;
934 static long parse_pages_arg(const char *str, unsigned long min,
935 unsigned long max)
937 unsigned long pages, val;
938 static struct parse_tag tags[] = {
939 { .tag = 'B', .mult = 1 },
940 { .tag = 'K', .mult = 1 << 10 },
941 { .tag = 'M', .mult = 1 << 20 },
942 { .tag = 'G', .mult = 1 << 30 },
943 { .tag = 0 },
946 if (str == NULL)
947 return -EINVAL;
949 val = parse_tag_value(str, tags);
950 if (val != (unsigned long) -1) {
951 /* we got file size value */
952 pages = PERF_ALIGN(val, page_size) / page_size;
953 } else {
954 /* we got pages count value */
955 char *eptr;
956 pages = strtoul(str, &eptr, 10);
957 if (*eptr != '\0')
958 return -EINVAL;
961 if (pages == 0 && min == 0) {
962 /* leave number of pages at 0 */
963 } else if (!is_power_of_2(pages)) {
964 char buf[100];
966 /* round pages up to next power of 2 */
967 pages = roundup_pow_of_two(pages);
968 if (!pages)
969 return -EINVAL;
971 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
972 pr_info("rounding mmap pages size to %s (%lu pages)\n",
973 buf, pages);
976 if (pages > max)
977 return -EINVAL;
979 return pages;
982 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
984 unsigned long max = UINT_MAX;
985 long pages;
987 if (max > SIZE_MAX / page_size)
988 max = SIZE_MAX / page_size;
990 pages = parse_pages_arg(str, 1, max);
991 if (pages < 0) {
992 pr_err("Invalid argument for --mmap_pages/-m\n");
993 return -1;
996 *mmap_pages = pages;
997 return 0;
1000 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1001 int unset __maybe_unused)
1003 return __perf_evlist__parse_mmap_pages(opt->value, str);
1007 * perf_evlist__mmap_ex - Create mmaps to receive events.
1008 * @evlist: list of events
1009 * @pages: map length in pages
1010 * @overwrite: overwrite older events?
1011 * @auxtrace_pages - auxtrace map length in pages
1012 * @auxtrace_overwrite - overwrite older auxtrace data?
1014 * If @overwrite is %false the user needs to signal event consumption using
1015 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1016 * automatically.
1018 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1019 * consumption using auxtrace_mmap__write_tail().
1021 * Return: %0 on success, negative error code otherwise.
1023 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1024 unsigned int auxtrace_pages,
1025 bool auxtrace_overwrite)
1027 struct perf_evsel *evsel;
1028 const struct cpu_map *cpus = evlist->cpus;
1029 const struct thread_map *threads = evlist->threads;
1031 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
1032 * Its value is decided by evsel's write_backward.
1033 * So &mp should not be passed through const pointer.
1035 struct mmap_params mp;
1037 if (!evlist->mmap)
1038 evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
1039 if (!evlist->mmap)
1040 return -ENOMEM;
1042 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
1043 return -ENOMEM;
1045 evlist->mmap_len = perf_evlist__mmap_size(pages);
1046 pr_debug("mmap size %zuB\n", evlist->mmap_len);
1047 mp.mask = evlist->mmap_len - page_size - 1;
1049 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1050 auxtrace_pages, auxtrace_overwrite);
1052 evlist__for_each_entry(evlist, evsel) {
1053 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1054 evsel->sample_id == NULL &&
1055 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1056 return -ENOMEM;
1059 if (cpu_map__empty(cpus))
1060 return perf_evlist__mmap_per_thread(evlist, &mp);
1062 return perf_evlist__mmap_per_cpu(evlist, &mp);
1065 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
1067 return perf_evlist__mmap_ex(evlist, pages, 0, false);
1070 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
1072 bool all_threads = (target->per_thread && target->system_wide);
1073 struct cpu_map *cpus;
1074 struct thread_map *threads;
1077 * If specify '-a' and '--per-thread' to perf record, perf record
1078 * will override '--per-thread'. target->per_thread = false and
1079 * target->system_wide = true.
1081 * If specify '--per-thread' only to perf record,
1082 * target->per_thread = true and target->system_wide = false.
1084 * So target->per_thread && target->system_wide is false.
1085 * For perf record, thread_map__new_str doesn't call
1086 * thread_map__new_all_cpus. That will keep perf record's
1087 * current behavior.
1089 * For perf stat, it allows the case that target->per_thread and
1090 * target->system_wide are all true. It means to collect system-wide
1091 * per-thread data. thread_map__new_str will call
1092 * thread_map__new_all_cpus to enumerate all threads.
1094 threads = thread_map__new_str(target->pid, target->tid, target->uid,
1095 all_threads);
1097 if (!threads)
1098 return -1;
1100 if (target__uses_dummy_map(target))
1101 cpus = cpu_map__dummy_new();
1102 else
1103 cpus = cpu_map__new(target->cpu_list);
1105 if (!cpus)
1106 goto out_delete_threads;
1108 evlist->has_user_cpus = !!target->cpu_list;
1110 perf_evlist__set_maps(evlist, cpus, threads);
1112 return 0;
1114 out_delete_threads:
1115 thread_map__put(threads);
1116 return -1;
1119 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1120 struct thread_map *threads)
1123 * Allow for the possibility that one or another of the maps isn't being
1124 * changed i.e. don't put it. Note we are assuming the maps that are
1125 * being applied are brand new and evlist is taking ownership of the
1126 * original reference count of 1. If that is not the case it is up to
1127 * the caller to increase the reference count.
1129 if (cpus != evlist->cpus) {
1130 cpu_map__put(evlist->cpus);
1131 evlist->cpus = cpu_map__get(cpus);
1134 if (threads != evlist->threads) {
1135 thread_map__put(evlist->threads);
1136 evlist->threads = thread_map__get(threads);
1139 perf_evlist__propagate_maps(evlist);
1142 void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1143 enum perf_event_sample_format bit)
1145 struct perf_evsel *evsel;
1147 evlist__for_each_entry(evlist, evsel)
1148 __perf_evsel__set_sample_bit(evsel, bit);
1151 void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1152 enum perf_event_sample_format bit)
1154 struct perf_evsel *evsel;
1156 evlist__for_each_entry(evlist, evsel)
1157 __perf_evsel__reset_sample_bit(evsel, bit);
1160 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
1162 struct perf_evsel *evsel;
1163 int err = 0;
1165 evlist__for_each_entry(evlist, evsel) {
1166 if (evsel->filter == NULL)
1167 continue;
1170 * filters only work for tracepoint event, which doesn't have cpu limit.
1171 * So evlist and evsel should always be same.
1173 err = perf_evsel__apply_filter(evsel, evsel->filter);
1174 if (err) {
1175 *err_evsel = evsel;
1176 break;
1180 return err;
1183 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1185 struct perf_evsel *evsel;
1186 int err = 0;
1188 evlist__for_each_entry(evlist, evsel) {
1189 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1190 continue;
1192 err = perf_evsel__set_filter(evsel, filter);
1193 if (err)
1194 break;
1197 return err;
1200 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1202 char *filter;
1203 int ret = -1;
1204 size_t i;
1206 for (i = 0; i < npids; ++i) {
1207 if (i == 0) {
1208 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1209 return -1;
1210 } else {
1211 char *tmp;
1213 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1214 goto out_free;
1216 free(filter);
1217 filter = tmp;
1221 ret = perf_evlist__set_filter(evlist, filter);
1222 out_free:
1223 free(filter);
1224 return ret;
1227 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1229 return perf_evlist__set_filter_pids(evlist, 1, &pid);
1232 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
1234 struct perf_evsel *pos;
1236 if (evlist->nr_entries == 1)
1237 return true;
1239 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1240 return false;
1242 evlist__for_each_entry(evlist, pos) {
1243 if (pos->id_pos != evlist->id_pos ||
1244 pos->is_pos != evlist->is_pos)
1245 return false;
1248 return true;
1251 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1253 struct perf_evsel *evsel;
1255 if (evlist->combined_sample_type)
1256 return evlist->combined_sample_type;
1258 evlist__for_each_entry(evlist, evsel)
1259 evlist->combined_sample_type |= evsel->attr.sample_type;
1261 return evlist->combined_sample_type;
1264 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1266 evlist->combined_sample_type = 0;
1267 return __perf_evlist__combined_sample_type(evlist);
1270 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1272 struct perf_evsel *evsel;
1273 u64 branch_type = 0;
1275 evlist__for_each_entry(evlist, evsel)
1276 branch_type |= evsel->attr.branch_sample_type;
1277 return branch_type;
1280 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1282 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1283 u64 read_format = first->attr.read_format;
1284 u64 sample_type = first->attr.sample_type;
1286 evlist__for_each_entry(evlist, pos) {
1287 if (read_format != pos->attr.read_format)
1288 return false;
1291 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1292 if ((sample_type & PERF_SAMPLE_READ) &&
1293 !(read_format & PERF_FORMAT_ID)) {
1294 return false;
1297 return true;
1300 u64 perf_evlist__read_format(struct perf_evlist *evlist)
1302 struct perf_evsel *first = perf_evlist__first(evlist);
1303 return first->attr.read_format;
1306 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
1308 struct perf_evsel *first = perf_evlist__first(evlist);
1309 struct perf_sample *data;
1310 u64 sample_type;
1311 u16 size = 0;
1313 if (!first->attr.sample_id_all)
1314 goto out;
1316 sample_type = first->attr.sample_type;
1318 if (sample_type & PERF_SAMPLE_TID)
1319 size += sizeof(data->tid) * 2;
1321 if (sample_type & PERF_SAMPLE_TIME)
1322 size += sizeof(data->time);
1324 if (sample_type & PERF_SAMPLE_ID)
1325 size += sizeof(data->id);
1327 if (sample_type & PERF_SAMPLE_STREAM_ID)
1328 size += sizeof(data->stream_id);
1330 if (sample_type & PERF_SAMPLE_CPU)
1331 size += sizeof(data->cpu) * 2;
1333 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1334 size += sizeof(data->id);
1335 out:
1336 return size;
1339 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
1341 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1343 evlist__for_each_entry_continue(evlist, pos) {
1344 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1345 return false;
1348 return true;
1351 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
1353 struct perf_evsel *first = perf_evlist__first(evlist);
1354 return first->attr.sample_id_all;
1357 void perf_evlist__set_selected(struct perf_evlist *evlist,
1358 struct perf_evsel *evsel)
1360 evlist->selected = evsel;
1363 void perf_evlist__close(struct perf_evlist *evlist)
1365 struct perf_evsel *evsel;
1367 evlist__for_each_entry_reverse(evlist, evsel)
1368 perf_evsel__close(evsel);
1371 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1373 struct cpu_map *cpus;
1374 struct thread_map *threads;
1375 int err = -ENOMEM;
1378 * Try reading /sys/devices/system/cpu/online to get
1379 * an all cpus map.
1381 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1382 * code needs an overhaul to properly forward the
1383 * error, and we may not want to do that fallback to a
1384 * default cpu identity map :-\
1386 cpus = cpu_map__new(NULL);
1387 if (!cpus)
1388 goto out;
1390 threads = thread_map__new_dummy();
1391 if (!threads)
1392 goto out_put;
1394 perf_evlist__set_maps(evlist, cpus, threads);
1395 out:
1396 return err;
1397 out_put:
1398 cpu_map__put(cpus);
1399 goto out;
1402 int perf_evlist__open(struct perf_evlist *evlist)
1404 struct perf_evsel *evsel;
1405 int err;
1408 * Default: one fd per CPU, all threads, aka systemwide
1409 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1411 if (evlist->threads == NULL && evlist->cpus == NULL) {
1412 err = perf_evlist__create_syswide_maps(evlist);
1413 if (err < 0)
1414 goto out_err;
1417 perf_evlist__update_id_pos(evlist);
1419 evlist__for_each_entry(evlist, evsel) {
1420 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
1421 if (err < 0)
1422 goto out_err;
1425 return 0;
1426 out_err:
1427 perf_evlist__close(evlist);
1428 errno = -err;
1429 return err;
1432 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1433 const char *argv[], bool pipe_output,
1434 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1436 int child_ready_pipe[2], go_pipe[2];
1437 char bf;
1439 if (pipe(child_ready_pipe) < 0) {
1440 perror("failed to create 'ready' pipe");
1441 return -1;
1444 if (pipe(go_pipe) < 0) {
1445 perror("failed to create 'go' pipe");
1446 goto out_close_ready_pipe;
1449 evlist->workload.pid = fork();
1450 if (evlist->workload.pid < 0) {
1451 perror("failed to fork");
1452 goto out_close_pipes;
1455 if (!evlist->workload.pid) {
1456 int ret;
1458 if (pipe_output)
1459 dup2(2, 1);
1461 signal(SIGTERM, SIG_DFL);
1463 close(child_ready_pipe[0]);
1464 close(go_pipe[1]);
1465 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1468 * Tell the parent we're ready to go
1470 close(child_ready_pipe[1]);
1473 * Wait until the parent tells us to go.
1475 ret = read(go_pipe[0], &bf, 1);
1477 * The parent will ask for the execvp() to be performed by
1478 * writing exactly one byte, in workload.cork_fd, usually via
1479 * perf_evlist__start_workload().
1481 * For cancelling the workload without actually running it,
1482 * the parent will just close workload.cork_fd, without writing
1483 * anything, i.e. read will return zero and we just exit()
1484 * here.
1486 if (ret != 1) {
1487 if (ret == -1)
1488 perror("unable to read pipe");
1489 exit(ret);
1492 execvp(argv[0], (char **)argv);
1494 if (exec_error) {
1495 union sigval val;
1497 val.sival_int = errno;
1498 if (sigqueue(getppid(), SIGUSR1, val))
1499 perror(argv[0]);
1500 } else
1501 perror(argv[0]);
1502 exit(-1);
1505 if (exec_error) {
1506 struct sigaction act = {
1507 .sa_flags = SA_SIGINFO,
1508 .sa_sigaction = exec_error,
1510 sigaction(SIGUSR1, &act, NULL);
1513 if (target__none(target)) {
1514 if (evlist->threads == NULL) {
1515 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1516 __func__, __LINE__);
1517 goto out_close_pipes;
1519 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
1522 close(child_ready_pipe[1]);
1523 close(go_pipe[0]);
1525 * wait for child to settle
1527 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1528 perror("unable to read pipe");
1529 goto out_close_pipes;
1532 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1533 evlist->workload.cork_fd = go_pipe[1];
1534 close(child_ready_pipe[0]);
1535 return 0;
1537 out_close_pipes:
1538 close(go_pipe[0]);
1539 close(go_pipe[1]);
1540 out_close_ready_pipe:
1541 close(child_ready_pipe[0]);
1542 close(child_ready_pipe[1]);
1543 return -1;
1546 int perf_evlist__start_workload(struct perf_evlist *evlist)
1548 if (evlist->workload.cork_fd > 0) {
1549 char bf = 0;
1550 int ret;
1552 * Remove the cork, let it rip!
1554 ret = write(evlist->workload.cork_fd, &bf, 1);
1555 if (ret < 0)
1556 perror("unable to write to pipe");
1558 close(evlist->workload.cork_fd);
1559 return ret;
1562 return 0;
1565 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1566 struct perf_sample *sample)
1568 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1570 if (!evsel)
1571 return -EFAULT;
1572 return perf_evsel__parse_sample(evsel, event, sample);
1575 int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist,
1576 union perf_event *event,
1577 u64 *timestamp)
1579 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1581 if (!evsel)
1582 return -EFAULT;
1583 return perf_evsel__parse_sample_timestamp(evsel, event, timestamp);
1586 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1588 struct perf_evsel *evsel;
1589 size_t printed = 0;
1591 evlist__for_each_entry(evlist, evsel) {
1592 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1593 perf_evsel__name(evsel));
1596 return printed + fprintf(fp, "\n");
1599 int perf_evlist__strerror_open(struct perf_evlist *evlist,
1600 int err, char *buf, size_t size)
1602 int printed, value;
1603 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1605 switch (err) {
1606 case EACCES:
1607 case EPERM:
1608 printed = scnprintf(buf, size,
1609 "Error:\t%s.\n"
1610 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1612 value = perf_event_paranoid();
1614 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1616 if (value >= 2) {
1617 printed += scnprintf(buf + printed, size - printed,
1618 "For your workloads it needs to be <= 1\nHint:\t");
1620 printed += scnprintf(buf + printed, size - printed,
1621 "For system wide tracing it needs to be set to -1.\n");
1623 printed += scnprintf(buf + printed, size - printed,
1624 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1625 "Hint:\tThe current value is %d.", value);
1626 break;
1627 case EINVAL: {
1628 struct perf_evsel *first = perf_evlist__first(evlist);
1629 int max_freq;
1631 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1632 goto out_default;
1634 if (first->attr.sample_freq < (u64)max_freq)
1635 goto out_default;
1637 printed = scnprintf(buf, size,
1638 "Error:\t%s.\n"
1639 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1640 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1641 emsg, max_freq, first->attr.sample_freq);
1642 break;
1644 default:
1645 out_default:
1646 scnprintf(buf, size, "%s", emsg);
1647 break;
1650 return 0;
1653 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1655 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1656 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1658 switch (err) {
1659 case EPERM:
1660 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1661 printed += scnprintf(buf + printed, size - printed,
1662 "Error:\t%s.\n"
1663 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1664 "Hint:\tTried using %zd kB.\n",
1665 emsg, pages_max_per_user, pages_attempted);
1667 if (pages_attempted >= pages_max_per_user) {
1668 printed += scnprintf(buf + printed, size - printed,
1669 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1670 pages_max_per_user + pages_attempted);
1673 printed += scnprintf(buf + printed, size - printed,
1674 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1675 break;
1676 default:
1677 scnprintf(buf, size, "%s", emsg);
1678 break;
1681 return 0;
1684 void perf_evlist__to_front(struct perf_evlist *evlist,
1685 struct perf_evsel *move_evsel)
1687 struct perf_evsel *evsel, *n;
1688 LIST_HEAD(move);
1690 if (move_evsel == perf_evlist__first(evlist))
1691 return;
1693 evlist__for_each_entry_safe(evlist, n, evsel) {
1694 if (evsel->leader == move_evsel->leader)
1695 list_move_tail(&evsel->node, &move);
1698 list_splice(&move, &evlist->entries);
1701 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1702 struct perf_evsel *tracking_evsel)
1704 struct perf_evsel *evsel;
1706 if (tracking_evsel->tracking)
1707 return;
1709 evlist__for_each_entry(evlist, evsel) {
1710 if (evsel != tracking_evsel)
1711 evsel->tracking = false;
1714 tracking_evsel->tracking = true;
1717 struct perf_evsel *
1718 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1719 const char *str)
1721 struct perf_evsel *evsel;
1723 evlist__for_each_entry(evlist, evsel) {
1724 if (!evsel->name)
1725 continue;
1726 if (strcmp(str, evsel->name) == 0)
1727 return evsel;
1730 return NULL;
1733 void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1734 enum bkw_mmap_state state)
1736 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1737 enum action {
1738 NONE,
1739 PAUSE,
1740 RESUME,
1741 } action = NONE;
1743 if (!evlist->overwrite_mmap)
1744 return;
1746 switch (old_state) {
1747 case BKW_MMAP_NOTREADY: {
1748 if (state != BKW_MMAP_RUNNING)
1749 goto state_err;
1750 break;
1752 case BKW_MMAP_RUNNING: {
1753 if (state != BKW_MMAP_DATA_PENDING)
1754 goto state_err;
1755 action = PAUSE;
1756 break;
1758 case BKW_MMAP_DATA_PENDING: {
1759 if (state != BKW_MMAP_EMPTY)
1760 goto state_err;
1761 break;
1763 case BKW_MMAP_EMPTY: {
1764 if (state != BKW_MMAP_RUNNING)
1765 goto state_err;
1766 action = RESUME;
1767 break;
1769 default:
1770 WARN_ONCE(1, "Shouldn't get there\n");
1773 evlist->bkw_mmap_state = state;
1775 switch (action) {
1776 case PAUSE:
1777 perf_evlist__pause(evlist);
1778 break;
1779 case RESUME:
1780 perf_evlist__resume(evlist);
1781 break;
1782 case NONE:
1783 default:
1784 break;
1787 state_err:
1788 return;
1791 bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
1793 struct perf_evsel *evsel;
1795 evlist__for_each_entry(evlist, evsel) {
1796 if (!evsel->attr.exclude_kernel)
1797 return false;
1800 return true;
1804 * Events in data file are not collect in groups, but we still want
1805 * the group display. Set the artificial group and set the leader's
1806 * forced_leader flag to notify the display code.
1808 void perf_evlist__force_leader(struct perf_evlist *evlist)
1810 if (!evlist->nr_groups) {
1811 struct perf_evsel *leader = perf_evlist__first(evlist);
1813 perf_evlist__set_leader(evlist);
1814 leader->forced_leader = true;