treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / tools / lib / perf / evlist.c
blob5b9f2ca50591dd2a5b5b79d24d4e4bee979c1d84
1 // SPDX-License-Identifier: GPL-2.0
2 #include <perf/evlist.h>
3 #include <perf/evsel.h>
4 #include <linux/bitops.h>
5 #include <linux/list.h>
6 #include <linux/hash.h>
7 #include <sys/ioctl.h>
8 #include <internal/evlist.h>
9 #include <internal/evsel.h>
10 #include <internal/xyarray.h>
11 #include <internal/mmap.h>
12 #include <internal/cpumap.h>
13 #include <internal/threadmap.h>
14 #include <internal/xyarray.h>
15 #include <internal/lib.h>
16 #include <linux/zalloc.h>
17 #include <sys/ioctl.h>
18 #include <stdlib.h>
19 #include <errno.h>
20 #include <unistd.h>
21 #include <fcntl.h>
22 #include <signal.h>
23 #include <poll.h>
24 #include <sys/mman.h>
25 #include <perf/cpumap.h>
26 #include <perf/threadmap.h>
27 #include <api/fd/array.h>
29 void perf_evlist__init(struct perf_evlist *evlist)
31 int i;
33 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
34 INIT_HLIST_HEAD(&evlist->heads[i]);
35 INIT_LIST_HEAD(&evlist->entries);
36 evlist->nr_entries = 0;
37 fdarray__init(&evlist->pollfd, 64);
40 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
41 struct perf_evsel *evsel)
44 * We already have cpus for evsel (via PMU sysfs) so
45 * keep it, if there's no target cpu list defined.
47 if (!evsel->own_cpus || evlist->has_user_cpus) {
48 perf_cpu_map__put(evsel->cpus);
49 evsel->cpus = perf_cpu_map__get(evlist->cpus);
50 } else if (evsel->cpus != evsel->own_cpus) {
51 perf_cpu_map__put(evsel->cpus);
52 evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
55 perf_thread_map__put(evsel->threads);
56 evsel->threads = perf_thread_map__get(evlist->threads);
57 evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
60 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
62 struct perf_evsel *evsel;
64 perf_evlist__for_each_evsel(evlist, evsel)
65 __perf_evlist__propagate_maps(evlist, evsel);
68 void perf_evlist__add(struct perf_evlist *evlist,
69 struct perf_evsel *evsel)
71 list_add_tail(&evsel->node, &evlist->entries);
72 evlist->nr_entries += 1;
73 __perf_evlist__propagate_maps(evlist, evsel);
76 void perf_evlist__remove(struct perf_evlist *evlist,
77 struct perf_evsel *evsel)
79 list_del_init(&evsel->node);
80 evlist->nr_entries -= 1;
83 struct perf_evlist *perf_evlist__new(void)
85 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
87 if (evlist != NULL)
88 perf_evlist__init(evlist);
90 return evlist;
93 struct perf_evsel *
94 perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
96 struct perf_evsel *next;
98 if (!prev) {
99 next = list_first_entry(&evlist->entries,
100 struct perf_evsel,
101 node);
102 } else {
103 next = list_next_entry(prev, node);
106 /* Empty list is noticed here so don't need checking on entry. */
107 if (&next->node == &evlist->entries)
108 return NULL;
110 return next;
113 static void perf_evlist__purge(struct perf_evlist *evlist)
115 struct perf_evsel *pos, *n;
117 perf_evlist__for_each_entry_safe(evlist, n, pos) {
118 list_del_init(&pos->node);
119 perf_evsel__delete(pos);
122 evlist->nr_entries = 0;
125 void perf_evlist__exit(struct perf_evlist *evlist)
127 perf_cpu_map__put(evlist->cpus);
128 perf_thread_map__put(evlist->threads);
129 evlist->cpus = NULL;
130 evlist->threads = NULL;
131 fdarray__exit(&evlist->pollfd);
134 void perf_evlist__delete(struct perf_evlist *evlist)
136 if (evlist == NULL)
137 return;
139 perf_evlist__munmap(evlist);
140 perf_evlist__close(evlist);
141 perf_evlist__purge(evlist);
142 perf_evlist__exit(evlist);
143 free(evlist);
146 void perf_evlist__set_maps(struct perf_evlist *evlist,
147 struct perf_cpu_map *cpus,
148 struct perf_thread_map *threads)
151 * Allow for the possibility that one or another of the maps isn't being
152 * changed i.e. don't put it. Note we are assuming the maps that are
153 * being applied are brand new and evlist is taking ownership of the
154 * original reference count of 1. If that is not the case it is up to
155 * the caller to increase the reference count.
157 if (cpus != evlist->cpus) {
158 perf_cpu_map__put(evlist->cpus);
159 evlist->cpus = perf_cpu_map__get(cpus);
162 if (threads != evlist->threads) {
163 perf_thread_map__put(evlist->threads);
164 evlist->threads = perf_thread_map__get(threads);
167 if (!evlist->all_cpus && cpus)
168 evlist->all_cpus = perf_cpu_map__get(cpus);
170 perf_evlist__propagate_maps(evlist);
173 int perf_evlist__open(struct perf_evlist *evlist)
175 struct perf_evsel *evsel;
176 int err;
178 perf_evlist__for_each_entry(evlist, evsel) {
179 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
180 if (err < 0)
181 goto out_err;
184 return 0;
186 out_err:
187 perf_evlist__close(evlist);
188 return err;
191 void perf_evlist__close(struct perf_evlist *evlist)
193 struct perf_evsel *evsel;
195 perf_evlist__for_each_entry_reverse(evlist, evsel)
196 perf_evsel__close(evsel);
199 void perf_evlist__enable(struct perf_evlist *evlist)
201 struct perf_evsel *evsel;
203 perf_evlist__for_each_entry(evlist, evsel)
204 perf_evsel__enable(evsel);
207 void perf_evlist__disable(struct perf_evlist *evlist)
209 struct perf_evsel *evsel;
211 perf_evlist__for_each_entry(evlist, evsel)
212 perf_evsel__disable(evsel);
215 u64 perf_evlist__read_format(struct perf_evlist *evlist)
217 struct perf_evsel *first = perf_evlist__first(evlist);
219 return first->attr.read_format;
222 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
224 static void perf_evlist__id_hash(struct perf_evlist *evlist,
225 struct perf_evsel *evsel,
226 int cpu, int thread, u64 id)
228 int hash;
229 struct perf_sample_id *sid = SID(evsel, cpu, thread);
231 sid->id = id;
232 sid->evsel = evsel;
233 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
234 hlist_add_head(&sid->node, &evlist->heads[hash]);
237 void perf_evlist__id_add(struct perf_evlist *evlist,
238 struct perf_evsel *evsel,
239 int cpu, int thread, u64 id)
241 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
242 evsel->id[evsel->ids++] = id;
245 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
246 struct perf_evsel *evsel,
247 int cpu, int thread, int fd)
249 u64 read_data[4] = { 0, };
250 int id_idx = 1; /* The first entry is the counter value */
251 u64 id;
252 int ret;
254 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
255 if (!ret)
256 goto add;
258 if (errno != ENOTTY)
259 return -1;
261 /* Legacy way to get event id.. All hail to old kernels! */
264 * This way does not work with group format read, so bail
265 * out in that case.
267 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
268 return -1;
270 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
271 read(fd, &read_data, sizeof(read_data)) == -1)
272 return -1;
274 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
275 ++id_idx;
276 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
277 ++id_idx;
279 id = read_data[id_idx];
281 add:
282 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
283 return 0;
286 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
288 int nr_cpus = perf_cpu_map__nr(evlist->cpus);
289 int nr_threads = perf_thread_map__nr(evlist->threads);
290 int nfds = 0;
291 struct perf_evsel *evsel;
293 perf_evlist__for_each_entry(evlist, evsel) {
294 if (evsel->system_wide)
295 nfds += nr_cpus;
296 else
297 nfds += nr_cpus * nr_threads;
300 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
301 fdarray__grow(&evlist->pollfd, nfds) < 0)
302 return -ENOMEM;
304 return 0;
307 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
308 void *ptr, short revent)
310 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
312 if (pos >= 0) {
313 evlist->pollfd.priv[pos].ptr = ptr;
314 fcntl(fd, F_SETFL, O_NONBLOCK);
317 return pos;
320 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
321 void *arg __maybe_unused)
323 struct perf_mmap *map = fda->priv[fd].ptr;
325 if (map)
326 perf_mmap__put(map);
329 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
331 return fdarray__filter(&evlist->pollfd, revents_and_mask,
332 perf_evlist__munmap_filtered, NULL);
335 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
337 return fdarray__poll(&evlist->pollfd, timeout);
340 static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
342 int i;
343 struct perf_mmap *map;
345 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
346 if (!map)
347 return NULL;
349 for (i = 0; i < evlist->nr_mmaps; i++) {
350 struct perf_mmap *prev = i ? &map[i - 1] : NULL;
353 * When the perf_mmap() call is made we grab one refcount, plus
354 * one extra to let perf_mmap__consume() get the last
355 * events after all real references (perf_mmap__get()) are
356 * dropped.
358 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
359 * thus does perf_mmap__get() on it.
361 perf_mmap__init(&map[i], prev, overwrite, NULL);
364 return map;
367 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
368 struct perf_evsel *evsel, int idx, int cpu,
369 int thread)
371 struct perf_sample_id *sid = SID(evsel, cpu, thread);
373 sid->idx = idx;
374 if (evlist->cpus && cpu >= 0)
375 sid->cpu = evlist->cpus->map[cpu];
376 else
377 sid->cpu = -1;
378 if (!evsel->system_wide && evlist->threads && thread >= 0)
379 sid->tid = perf_thread_map__pid(evlist->threads, thread);
380 else
381 sid->tid = -1;
384 static struct perf_mmap*
385 perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
387 struct perf_mmap *maps;
389 maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
391 if (!maps) {
392 maps = perf_evlist__alloc_mmap(evlist, overwrite);
393 if (!maps)
394 return NULL;
396 if (overwrite)
397 evlist->mmap_ovw = maps;
398 else
399 evlist->mmap = maps;
402 return &maps[idx];
405 #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
407 static int
408 perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
409 int output, int cpu)
411 return perf_mmap__mmap(map, mp, output, cpu);
414 static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
415 bool overwrite)
417 if (overwrite)
418 evlist->mmap_ovw_first = map;
419 else
420 evlist->mmap_first = map;
423 static int
424 mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
425 int idx, struct perf_mmap_param *mp, int cpu_idx,
426 int thread, int *_output, int *_output_overwrite)
428 int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
429 struct perf_evsel *evsel;
430 int revent;
432 perf_evlist__for_each_entry(evlist, evsel) {
433 bool overwrite = evsel->attr.write_backward;
434 struct perf_mmap *map;
435 int *output, fd, cpu;
437 if (evsel->system_wide && thread)
438 continue;
440 cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
441 if (cpu == -1)
442 continue;
444 map = ops->get(evlist, overwrite, idx);
445 if (map == NULL)
446 return -ENOMEM;
448 if (overwrite) {
449 mp->prot = PROT_READ;
450 output = _output_overwrite;
451 } else {
452 mp->prot = PROT_READ | PROT_WRITE;
453 output = _output;
456 fd = FD(evsel, cpu, thread);
458 if (*output == -1) {
459 *output = fd;
462 * The last one will be done at perf_mmap__consume(), so that we
463 * make sure we don't prevent tools from consuming every last event in
464 * the ring buffer.
466 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
467 * anymore, but the last events for it are still in the ring buffer,
468 * waiting to be consumed.
470 * Tools can chose to ignore this at their own discretion, but the
471 * evlist layer can't just drop it when filtering events in
472 * perf_evlist__filter_pollfd().
474 refcount_set(&map->refcnt, 2);
476 if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
477 return -1;
479 if (!idx)
480 perf_evlist__set_mmap_first(evlist, map, overwrite);
481 } else {
482 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
483 return -1;
485 perf_mmap__get(map);
488 revent = !overwrite ? POLLIN : 0;
490 if (!evsel->system_wide &&
491 perf_evlist__add_pollfd(evlist, fd, map, revent) < 0) {
492 perf_mmap__put(map);
493 return -1;
496 if (evsel->attr.read_format & PERF_FORMAT_ID) {
497 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
498 fd) < 0)
499 return -1;
500 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
501 thread);
505 return 0;
508 static int
509 mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
510 struct perf_mmap_param *mp)
512 int thread;
513 int nr_threads = perf_thread_map__nr(evlist->threads);
515 for (thread = 0; thread < nr_threads; thread++) {
516 int output = -1;
517 int output_overwrite = -1;
519 if (ops->idx)
520 ops->idx(evlist, mp, thread, false);
522 if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread,
523 &output, &output_overwrite))
524 goto out_unmap;
527 return 0;
529 out_unmap:
530 perf_evlist__munmap(evlist);
531 return -1;
534 static int
535 mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
536 struct perf_mmap_param *mp)
538 int nr_threads = perf_thread_map__nr(evlist->threads);
539 int nr_cpus = perf_cpu_map__nr(evlist->cpus);
540 int cpu, thread;
542 for (cpu = 0; cpu < nr_cpus; cpu++) {
543 int output = -1;
544 int output_overwrite = -1;
546 if (ops->idx)
547 ops->idx(evlist, mp, cpu, true);
549 for (thread = 0; thread < nr_threads; thread++) {
550 if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
551 thread, &output, &output_overwrite))
552 goto out_unmap;
556 return 0;
558 out_unmap:
559 perf_evlist__munmap(evlist);
560 return -1;
563 static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
565 int nr_mmaps;
567 nr_mmaps = perf_cpu_map__nr(evlist->cpus);
568 if (perf_cpu_map__empty(evlist->cpus))
569 nr_mmaps = perf_thread_map__nr(evlist->threads);
571 return nr_mmaps;
574 int perf_evlist__mmap_ops(struct perf_evlist *evlist,
575 struct perf_evlist_mmap_ops *ops,
576 struct perf_mmap_param *mp)
578 struct perf_evsel *evsel;
579 const struct perf_cpu_map *cpus = evlist->cpus;
580 const struct perf_thread_map *threads = evlist->threads;
582 if (!ops || !ops->get || !ops->mmap)
583 return -EINVAL;
585 mp->mask = evlist->mmap_len - page_size - 1;
587 evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
589 perf_evlist__for_each_entry(evlist, evsel) {
590 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
591 evsel->sample_id == NULL &&
592 perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
593 return -ENOMEM;
596 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
597 return -ENOMEM;
599 if (perf_cpu_map__empty(cpus))
600 return mmap_per_thread(evlist, ops, mp);
602 return mmap_per_cpu(evlist, ops, mp);
605 int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
607 struct perf_mmap_param mp;
608 struct perf_evlist_mmap_ops ops = {
609 .get = perf_evlist__mmap_cb_get,
610 .mmap = perf_evlist__mmap_cb_mmap,
613 evlist->mmap_len = (pages + 1) * page_size;
615 return perf_evlist__mmap_ops(evlist, &ops, &mp);
618 void perf_evlist__munmap(struct perf_evlist *evlist)
620 int i;
622 if (evlist->mmap) {
623 for (i = 0; i < evlist->nr_mmaps; i++)
624 perf_mmap__munmap(&evlist->mmap[i]);
627 if (evlist->mmap_ovw) {
628 for (i = 0; i < evlist->nr_mmaps; i++)
629 perf_mmap__munmap(&evlist->mmap_ovw[i]);
632 zfree(&evlist->mmap);
633 zfree(&evlist->mmap_ovw);
636 struct perf_mmap*
637 perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
638 bool overwrite)
640 if (map)
641 return map->next;
643 return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;