2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
13 #include "thread_map.h"
18 #include "parse-events.h"
22 #include <linux/bitops.h>
23 #include <linux/hash.h>
25 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
26 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
28 void perf_evlist__init(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
29 struct thread_map
*threads
)
33 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
34 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
35 INIT_LIST_HEAD(&evlist
->entries
);
36 perf_evlist__set_maps(evlist
, cpus
, threads
);
37 evlist
->workload
.pid
= -1;
40 struct perf_evlist
*perf_evlist__new(struct cpu_map
*cpus
,
41 struct thread_map
*threads
)
43 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
46 perf_evlist__init(evlist
, cpus
, threads
);
51 void perf_evlist__config_attrs(struct perf_evlist
*evlist
,
52 struct perf_record_opts
*opts
)
54 struct perf_evsel
*evsel
;
56 if (evlist
->cpus
->map
[0] < 0)
57 opts
->no_inherit
= true;
59 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
60 perf_evsel__config(evsel
, opts
);
62 if (evlist
->nr_entries
> 1)
63 evsel
->attr
.sample_type
|= PERF_SAMPLE_ID
;
67 static void perf_evlist__purge(struct perf_evlist
*evlist
)
69 struct perf_evsel
*pos
, *n
;
71 list_for_each_entry_safe(pos
, n
, &evlist
->entries
, node
) {
72 list_del_init(&pos
->node
);
73 perf_evsel__delete(pos
);
76 evlist
->nr_entries
= 0;
79 void perf_evlist__exit(struct perf_evlist
*evlist
)
84 evlist
->pollfd
= NULL
;
87 void perf_evlist__delete(struct perf_evlist
*evlist
)
89 perf_evlist__purge(evlist
);
90 perf_evlist__exit(evlist
);
94 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
96 list_add_tail(&entry
->node
, &evlist
->entries
);
100 static void perf_evlist__splice_list_tail(struct perf_evlist
*evlist
,
101 struct list_head
*list
,
104 list_splice_tail(list
, &evlist
->entries
);
105 evlist
->nr_entries
+= nr_entries
;
108 int perf_evlist__add_default(struct perf_evlist
*evlist
)
110 struct perf_event_attr attr
= {
111 .type
= PERF_TYPE_HARDWARE
,
112 .config
= PERF_COUNT_HW_CPU_CYCLES
,
114 struct perf_evsel
*evsel
;
116 event_attr_init(&attr
);
118 evsel
= perf_evsel__new(&attr
, 0);
122 /* use strdup() because free(evsel) assumes name is allocated */
123 evsel
->name
= strdup("cycles");
127 perf_evlist__add(evlist
, evsel
);
130 perf_evsel__delete(evsel
);
135 int perf_evlist__add_attrs(struct perf_evlist
*evlist
,
136 struct perf_event_attr
*attrs
, size_t nr_attrs
)
138 struct perf_evsel
*evsel
, *n
;
142 for (i
= 0; i
< nr_attrs
; i
++) {
143 evsel
= perf_evsel__new(attrs
+ i
, evlist
->nr_entries
+ i
);
145 goto out_delete_partial_list
;
146 list_add_tail(&evsel
->node
, &head
);
149 perf_evlist__splice_list_tail(evlist
, &head
, nr_attrs
);
153 out_delete_partial_list
:
154 list_for_each_entry_safe(evsel
, n
, &head
, node
)
155 perf_evsel__delete(evsel
);
159 static int trace_event__id(const char *evname
)
161 char *filename
, *colon
;
164 if (asprintf(&filename
, "%s/%s/id", tracing_events_path
, evname
) < 0)
167 colon
= strrchr(filename
, ':');
171 fd
= open(filename
, O_RDONLY
);
174 if (read(fd
, id
, sizeof(id
)) > 0)
183 int perf_evlist__add_tracepoints(struct perf_evlist
*evlist
,
184 const char *tracepoints
[],
185 size_t nr_tracepoints
)
189 struct perf_event_attr
*attrs
= zalloc(nr_tracepoints
* sizeof(*attrs
));
194 for (i
= 0; i
< nr_tracepoints
; i
++) {
195 err
= trace_event__id(tracepoints
[i
]);
200 attrs
[i
].type
= PERF_TYPE_TRACEPOINT
;
201 attrs
[i
].config
= err
;
202 attrs
[i
].sample_type
= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
204 attrs
[i
].sample_period
= 1;
207 err
= perf_evlist__add_attrs(evlist
, attrs
, nr_tracepoints
);
213 static struct perf_evsel
*
214 perf_evlist__find_tracepoint_by_id(struct perf_evlist
*evlist
, int id
)
216 struct perf_evsel
*evsel
;
218 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
219 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
220 (int)evsel
->attr
.config
== id
)
227 int perf_evlist__set_tracepoints_handlers(struct perf_evlist
*evlist
,
228 const struct perf_evsel_str_handler
*assocs
,
231 struct perf_evsel
*evsel
;
235 for (i
= 0; i
< nr_assocs
; i
++) {
236 err
= trace_event__id(assocs
[i
].name
);
240 evsel
= perf_evlist__find_tracepoint_by_id(evlist
, err
);
245 if (evsel
->handler
.func
!= NULL
)
247 evsel
->handler
.func
= assocs
[i
].handler
;
255 void perf_evlist__disable(struct perf_evlist
*evlist
)
258 struct perf_evsel
*pos
;
260 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
261 list_for_each_entry(pos
, &evlist
->entries
, node
) {
262 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++)
263 ioctl(FD(pos
, cpu
, thread
), PERF_EVENT_IOC_DISABLE
);
268 void perf_evlist__enable(struct perf_evlist
*evlist
)
271 struct perf_evsel
*pos
;
273 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
274 list_for_each_entry(pos
, &evlist
->entries
, node
) {
275 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++)
276 ioctl(FD(pos
, cpu
, thread
), PERF_EVENT_IOC_ENABLE
);
281 static int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
)
283 int nfds
= evlist
->cpus
->nr
* evlist
->threads
->nr
* evlist
->nr_entries
;
284 evlist
->pollfd
= malloc(sizeof(struct pollfd
) * nfds
);
285 return evlist
->pollfd
!= NULL
? 0 : -ENOMEM
;
288 void perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
290 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
291 evlist
->pollfd
[evlist
->nr_fds
].fd
= fd
;
292 evlist
->pollfd
[evlist
->nr_fds
].events
= POLLIN
;
296 static void perf_evlist__id_hash(struct perf_evlist
*evlist
,
297 struct perf_evsel
*evsel
,
298 int cpu
, int thread
, u64 id
)
301 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
305 hash
= hash_64(sid
->id
, PERF_EVLIST__HLIST_BITS
);
306 hlist_add_head(&sid
->node
, &evlist
->heads
[hash
]);
309 void perf_evlist__id_add(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
310 int cpu
, int thread
, u64 id
)
312 perf_evlist__id_hash(evlist
, evsel
, cpu
, thread
, id
);
313 evsel
->id
[evsel
->ids
++] = id
;
316 static int perf_evlist__id_add_fd(struct perf_evlist
*evlist
,
317 struct perf_evsel
*evsel
,
318 int cpu
, int thread
, int fd
)
320 u64 read_data
[4] = { 0, };
321 int id_idx
= 1; /* The first entry is the counter value */
323 if (!(evsel
->attr
.read_format
& PERF_FORMAT_ID
) ||
324 read(fd
, &read_data
, sizeof(read_data
)) == -1)
327 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
329 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
332 perf_evlist__id_add(evlist
, evsel
, cpu
, thread
, read_data
[id_idx
]);
336 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
338 struct hlist_head
*head
;
339 struct hlist_node
*pos
;
340 struct perf_sample_id
*sid
;
343 if (evlist
->nr_entries
== 1)
344 return list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
346 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
347 head
= &evlist
->heads
[hash
];
349 hlist_for_each_entry(sid
, pos
, head
, node
)
353 if (!perf_evlist__sample_id_all(evlist
))
354 return list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
359 union perf_event
*perf_evlist__mmap_read(struct perf_evlist
*evlist
, int idx
)
361 /* XXX Move this to perf.c, making it generally available */
362 unsigned int page_size
= sysconf(_SC_PAGE_SIZE
);
363 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
364 unsigned int head
= perf_mmap__read_head(md
);
365 unsigned int old
= md
->prev
;
366 unsigned char *data
= md
->base
+ page_size
;
367 union perf_event
*event
= NULL
;
369 if (evlist
->overwrite
) {
371 * If we're further behind than half the buffer, there's a chance
372 * the writer will bite our tail and mess up the samples under us.
374 * If we somehow ended up ahead of the head, we got messed up.
376 * In either case, truncate and restart at head.
378 int diff
= head
- old
;
379 if (diff
> md
->mask
/ 2 || diff
< 0) {
380 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
383 * head points to a known good entry, start there.
392 event
= (union perf_event
*)&data
[old
& md
->mask
];
393 size
= event
->header
.size
;
396 * Event straddles the mmap boundary -- header should always
397 * be inside due to u64 alignment of output.
399 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
400 unsigned int offset
= old
;
401 unsigned int len
= min(sizeof(*event
), size
), cpy
;
402 void *dst
= &evlist
->event_copy
;
405 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
406 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
412 event
= &evlist
->event_copy
;
420 if (!evlist
->overwrite
)
421 perf_mmap__write_tail(md
, old
);
426 void perf_evlist__munmap(struct perf_evlist
*evlist
)
430 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
431 if (evlist
->mmap
[i
].base
!= NULL
) {
432 munmap(evlist
->mmap
[i
].base
, evlist
->mmap_len
);
433 evlist
->mmap
[i
].base
= NULL
;
441 static int perf_evlist__alloc_mmap(struct perf_evlist
*evlist
)
443 evlist
->nr_mmaps
= evlist
->cpus
->nr
;
444 if (evlist
->cpus
->map
[0] == -1)
445 evlist
->nr_mmaps
= evlist
->threads
->nr
;
446 evlist
->mmap
= zalloc(evlist
->nr_mmaps
* sizeof(struct perf_mmap
));
447 return evlist
->mmap
!= NULL
? 0 : -ENOMEM
;
450 static int __perf_evlist__mmap(struct perf_evlist
*evlist
,
451 int idx
, int prot
, int mask
, int fd
)
453 evlist
->mmap
[idx
].prev
= 0;
454 evlist
->mmap
[idx
].mask
= mask
;
455 evlist
->mmap
[idx
].base
= mmap(NULL
, evlist
->mmap_len
, prot
,
457 if (evlist
->mmap
[idx
].base
== MAP_FAILED
) {
458 evlist
->mmap
[idx
].base
= NULL
;
462 perf_evlist__add_pollfd(evlist
, fd
);
466 static int perf_evlist__mmap_per_cpu(struct perf_evlist
*evlist
, int prot
, int mask
)
468 struct perf_evsel
*evsel
;
471 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
474 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++) {
475 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
476 int fd
= FD(evsel
, cpu
, thread
);
480 if (__perf_evlist__mmap(evlist
, cpu
,
481 prot
, mask
, output
) < 0)
484 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
488 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
489 perf_evlist__id_add_fd(evlist
, evsel
, cpu
, thread
, fd
) < 0)
498 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
499 if (evlist
->mmap
[cpu
].base
!= NULL
) {
500 munmap(evlist
->mmap
[cpu
].base
, evlist
->mmap_len
);
501 evlist
->mmap
[cpu
].base
= NULL
;
507 static int perf_evlist__mmap_per_thread(struct perf_evlist
*evlist
, int prot
, int mask
)
509 struct perf_evsel
*evsel
;
512 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++) {
515 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
516 int fd
= FD(evsel
, 0, thread
);
520 if (__perf_evlist__mmap(evlist
, thread
,
521 prot
, mask
, output
) < 0)
524 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
528 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
529 perf_evlist__id_add_fd(evlist
, evsel
, 0, thread
, fd
) < 0)
537 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++) {
538 if (evlist
->mmap
[thread
].base
!= NULL
) {
539 munmap(evlist
->mmap
[thread
].base
, evlist
->mmap_len
);
540 evlist
->mmap
[thread
].base
= NULL
;
546 /** perf_evlist__mmap - Create per cpu maps to receive events
548 * @evlist - list of events
549 * @pages - map length in pages
550 * @overwrite - overwrite older events?
552 * If overwrite is false the user needs to signal event consuption using:
554 * struct perf_mmap *m = &evlist->mmap[cpu];
555 * unsigned int head = perf_mmap__read_head(m);
557 * perf_mmap__write_tail(m, head)
559 * Using perf_evlist__read_on_cpu does this automatically.
561 int perf_evlist__mmap(struct perf_evlist
*evlist
, unsigned int pages
,
564 unsigned int page_size
= sysconf(_SC_PAGE_SIZE
);
565 struct perf_evsel
*evsel
;
566 const struct cpu_map
*cpus
= evlist
->cpus
;
567 const struct thread_map
*threads
= evlist
->threads
;
568 int prot
= PROT_READ
| (overwrite
? 0 : PROT_WRITE
), mask
;
570 /* 512 kiB: default amount of unprivileged mlocked memory */
571 if (pages
== UINT_MAX
)
572 pages
= (512 * 1024) / page_size
;
573 else if (!is_power_of_2(pages
))
576 mask
= pages
* page_size
- 1;
578 if (evlist
->mmap
== NULL
&& perf_evlist__alloc_mmap(evlist
) < 0)
581 if (evlist
->pollfd
== NULL
&& perf_evlist__alloc_pollfd(evlist
) < 0)
584 evlist
->overwrite
= overwrite
;
585 evlist
->mmap_len
= (pages
+ 1) * page_size
;
587 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
588 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
589 evsel
->sample_id
== NULL
&&
590 perf_evsel__alloc_id(evsel
, cpus
->nr
, threads
->nr
) < 0)
594 if (evlist
->cpus
->map
[0] == -1)
595 return perf_evlist__mmap_per_thread(evlist
, prot
, mask
);
597 return perf_evlist__mmap_per_cpu(evlist
, prot
, mask
);
600 int perf_evlist__create_maps(struct perf_evlist
*evlist
, pid_t target_pid
,
601 pid_t target_tid
, const char *cpu_list
)
603 evlist
->threads
= thread_map__new(target_pid
, target_tid
);
605 if (evlist
->threads
== NULL
)
608 if (cpu_list
== NULL
&& target_tid
!= -1)
609 evlist
->cpus
= cpu_map__dummy_new();
611 evlist
->cpus
= cpu_map__new(cpu_list
);
613 if (evlist
->cpus
== NULL
)
614 goto out_delete_threads
;
619 thread_map__delete(evlist
->threads
);
623 void perf_evlist__delete_maps(struct perf_evlist
*evlist
)
625 cpu_map__delete(evlist
->cpus
);
626 thread_map__delete(evlist
->threads
);
628 evlist
->threads
= NULL
;
631 int perf_evlist__set_filters(struct perf_evlist
*evlist
)
633 const struct thread_map
*threads
= evlist
->threads
;
634 const struct cpu_map
*cpus
= evlist
->cpus
;
635 struct perf_evsel
*evsel
;
642 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
643 filter
= evsel
->filter
;
646 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
647 for (thread
= 0; thread
< threads
->nr
; thread
++) {
648 fd
= FD(evsel
, cpu
, thread
);
649 err
= ioctl(fd
, PERF_EVENT_IOC_SET_FILTER
, filter
);
659 bool perf_evlist__valid_sample_type(const struct perf_evlist
*evlist
)
661 struct perf_evsel
*pos
, *first
;
663 pos
= first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
665 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
666 if (first
->attr
.sample_type
!= pos
->attr
.sample_type
)
673 u64
perf_evlist__sample_type(const struct perf_evlist
*evlist
)
675 struct perf_evsel
*first
;
677 first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
678 return first
->attr
.sample_type
;
681 u16
perf_evlist__id_hdr_size(const struct perf_evlist
*evlist
)
683 struct perf_evsel
*first
;
684 struct perf_sample
*data
;
688 first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
690 if (!first
->attr
.sample_id_all
)
693 sample_type
= first
->attr
.sample_type
;
695 if (sample_type
& PERF_SAMPLE_TID
)
696 size
+= sizeof(data
->tid
) * 2;
698 if (sample_type
& PERF_SAMPLE_TIME
)
699 size
+= sizeof(data
->time
);
701 if (sample_type
& PERF_SAMPLE_ID
)
702 size
+= sizeof(data
->id
);
704 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
705 size
+= sizeof(data
->stream_id
);
707 if (sample_type
& PERF_SAMPLE_CPU
)
708 size
+= sizeof(data
->cpu
) * 2;
713 bool perf_evlist__valid_sample_id_all(const struct perf_evlist
*evlist
)
715 struct perf_evsel
*pos
, *first
;
717 pos
= first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
719 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
720 if (first
->attr
.sample_id_all
!= pos
->attr
.sample_id_all
)
727 bool perf_evlist__sample_id_all(const struct perf_evlist
*evlist
)
729 struct perf_evsel
*first
;
731 first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
732 return first
->attr
.sample_id_all
;
735 void perf_evlist__set_selected(struct perf_evlist
*evlist
,
736 struct perf_evsel
*evsel
)
738 evlist
->selected
= evsel
;
741 int perf_evlist__open(struct perf_evlist
*evlist
, bool group
)
743 struct perf_evsel
*evsel
, *first
;
744 int err
, ncpus
, nthreads
;
746 first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
748 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
749 struct xyarray
*group_fd
= NULL
;
751 if (group
&& evsel
!= first
)
752 group_fd
= first
->fd
;
754 err
= perf_evsel__open(evsel
, evlist
->cpus
, evlist
->threads
,
762 ncpus
= evlist
->cpus
? evlist
->cpus
->nr
: 1;
763 nthreads
= evlist
->threads
? evlist
->threads
->nr
: 1;
765 list_for_each_entry_reverse(evsel
, &evlist
->entries
, node
)
766 perf_evsel__close(evsel
, ncpus
, nthreads
);
771 int perf_evlist__prepare_workload(struct perf_evlist
*evlist
,
772 struct perf_record_opts
*opts
,
775 int child_ready_pipe
[2], go_pipe
[2];
778 if (pipe(child_ready_pipe
) < 0) {
779 perror("failed to create 'ready' pipe");
783 if (pipe(go_pipe
) < 0) {
784 perror("failed to create 'go' pipe");
785 goto out_close_ready_pipe
;
788 evlist
->workload
.pid
= fork();
789 if (evlist
->workload
.pid
< 0) {
790 perror("failed to fork");
791 goto out_close_pipes
;
794 if (!evlist
->workload
.pid
) {
795 if (opts
->pipe_output
)
798 close(child_ready_pipe
[0]);
800 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
803 * Do a dummy execvp to get the PLT entry resolved,
804 * so we avoid the resolver overhead on the real
807 execvp("", (char **)argv
);
810 * Tell the parent we're ready to go
812 close(child_ready_pipe
[1]);
815 * Wait until the parent tells us to go.
817 if (read(go_pipe
[0], &bf
, 1) == -1)
818 perror("unable to read pipe");
820 execvp(argv
[0], (char **)argv
);
823 kill(getppid(), SIGUSR1
);
827 if (!opts
->system_wide
&& opts
->target_tid
== -1 && opts
->target_pid
== -1)
828 evlist
->threads
->map
[0] = evlist
->workload
.pid
;
830 close(child_ready_pipe
[1]);
833 * wait for child to settle
835 if (read(child_ready_pipe
[0], &bf
, 1) == -1) {
836 perror("unable to read pipe");
837 goto out_close_pipes
;
840 evlist
->workload
.cork_fd
= go_pipe
[1];
841 close(child_ready_pipe
[0]);
847 out_close_ready_pipe
:
848 close(child_ready_pipe
[0]);
849 close(child_ready_pipe
[1]);
853 int perf_evlist__start_workload(struct perf_evlist
*evlist
)
855 if (evlist
->workload
.cork_fd
> 0) {
857 * Remove the cork, let it rip!
859 return close(evlist
->workload
.cork_fd
);