2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <lk/debugfs.h>
13 #include "thread_map.h"
20 #include "parse-events.h"
24 #include <linux/bitops.h>
25 #include <linux/hash.h>
27 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
28 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
30 void perf_evlist__init(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
31 struct thread_map
*threads
)
35 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
36 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
37 INIT_LIST_HEAD(&evlist
->entries
);
38 perf_evlist__set_maps(evlist
, cpus
, threads
);
39 evlist
->workload
.pid
= -1;
42 struct perf_evlist
*perf_evlist__new(void)
44 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
47 perf_evlist__init(evlist
, NULL
, NULL
);
53 * perf_evlist__set_id_pos - set the positions of event ids.
54 * @evlist: selected event list
56 * Events with compatible sample types all have the same id_pos
57 * and is_pos. For convenience, put a copy on evlist.
59 void perf_evlist__set_id_pos(struct perf_evlist
*evlist
)
61 struct perf_evsel
*first
= perf_evlist__first(evlist
);
63 evlist
->id_pos
= first
->id_pos
;
64 evlist
->is_pos
= first
->is_pos
;
67 static void perf_evlist__update_id_pos(struct perf_evlist
*evlist
)
69 struct perf_evsel
*evsel
;
71 list_for_each_entry(evsel
, &evlist
->entries
, node
)
72 perf_evsel__calc_id_pos(evsel
);
74 perf_evlist__set_id_pos(evlist
);
77 static void perf_evlist__purge(struct perf_evlist
*evlist
)
79 struct perf_evsel
*pos
, *n
;
81 list_for_each_entry_safe(pos
, n
, &evlist
->entries
, node
) {
82 list_del_init(&pos
->node
);
83 perf_evsel__delete(pos
);
86 evlist
->nr_entries
= 0;
89 void perf_evlist__exit(struct perf_evlist
*evlist
)
94 evlist
->pollfd
= NULL
;
97 void perf_evlist__delete(struct perf_evlist
*evlist
)
99 perf_evlist__purge(evlist
);
100 perf_evlist__exit(evlist
);
104 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
106 list_add_tail(&entry
->node
, &evlist
->entries
);
107 if (!evlist
->nr_entries
++)
108 perf_evlist__set_id_pos(evlist
);
111 void perf_evlist__splice_list_tail(struct perf_evlist
*evlist
,
112 struct list_head
*list
,
115 bool set_id_pos
= !evlist
->nr_entries
;
117 list_splice_tail(list
, &evlist
->entries
);
118 evlist
->nr_entries
+= nr_entries
;
120 perf_evlist__set_id_pos(evlist
);
123 void __perf_evlist__set_leader(struct list_head
*list
)
125 struct perf_evsel
*evsel
, *leader
;
127 leader
= list_entry(list
->next
, struct perf_evsel
, node
);
128 evsel
= list_entry(list
->prev
, struct perf_evsel
, node
);
130 leader
->nr_members
= evsel
->idx
- leader
->idx
+ 1;
132 list_for_each_entry(evsel
, list
, node
) {
133 evsel
->leader
= leader
;
137 void perf_evlist__set_leader(struct perf_evlist
*evlist
)
139 if (evlist
->nr_entries
) {
140 evlist
->nr_groups
= evlist
->nr_entries
> 1 ? 1 : 0;
141 __perf_evlist__set_leader(&evlist
->entries
);
145 int perf_evlist__add_default(struct perf_evlist
*evlist
)
147 struct perf_event_attr attr
= {
148 .type
= PERF_TYPE_HARDWARE
,
149 .config
= PERF_COUNT_HW_CPU_CYCLES
,
151 struct perf_evsel
*evsel
;
153 event_attr_init(&attr
);
155 evsel
= perf_evsel__new(&attr
, 0);
159 /* use strdup() because free(evsel) assumes name is allocated */
160 evsel
->name
= strdup("cycles");
164 perf_evlist__add(evlist
, evsel
);
167 perf_evsel__delete(evsel
);
172 static int perf_evlist__add_attrs(struct perf_evlist
*evlist
,
173 struct perf_event_attr
*attrs
, size_t nr_attrs
)
175 struct perf_evsel
*evsel
, *n
;
179 for (i
= 0; i
< nr_attrs
; i
++) {
180 evsel
= perf_evsel__new(attrs
+ i
, evlist
->nr_entries
+ i
);
182 goto out_delete_partial_list
;
183 list_add_tail(&evsel
->node
, &head
);
186 perf_evlist__splice_list_tail(evlist
, &head
, nr_attrs
);
190 out_delete_partial_list
:
191 list_for_each_entry_safe(evsel
, n
, &head
, node
)
192 perf_evsel__delete(evsel
);
196 int __perf_evlist__add_default_attrs(struct perf_evlist
*evlist
,
197 struct perf_event_attr
*attrs
, size_t nr_attrs
)
201 for (i
= 0; i
< nr_attrs
; i
++)
202 event_attr_init(attrs
+ i
);
204 return perf_evlist__add_attrs(evlist
, attrs
, nr_attrs
);
208 perf_evlist__find_tracepoint_by_id(struct perf_evlist
*evlist
, int id
)
210 struct perf_evsel
*evsel
;
212 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
213 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
214 (int)evsel
->attr
.config
== id
)
222 perf_evlist__find_tracepoint_by_name(struct perf_evlist
*evlist
,
225 struct perf_evsel
*evsel
;
227 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
228 if ((evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
) &&
229 (strcmp(evsel
->name
, name
) == 0))
236 int perf_evlist__add_newtp(struct perf_evlist
*evlist
,
237 const char *sys
, const char *name
, void *handler
)
239 struct perf_evsel
*evsel
;
241 evsel
= perf_evsel__newtp(sys
, name
, evlist
->nr_entries
);
245 evsel
->handler
.func
= handler
;
246 perf_evlist__add(evlist
, evsel
);
250 void perf_evlist__disable(struct perf_evlist
*evlist
)
253 struct perf_evsel
*pos
;
254 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
255 int nr_threads
= thread_map__nr(evlist
->threads
);
257 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
258 list_for_each_entry(pos
, &evlist
->entries
, node
) {
259 if (!perf_evsel__is_group_leader(pos
) || !pos
->fd
)
261 for (thread
= 0; thread
< nr_threads
; thread
++)
262 ioctl(FD(pos
, cpu
, thread
),
263 PERF_EVENT_IOC_DISABLE
, 0);
268 void perf_evlist__enable(struct perf_evlist
*evlist
)
271 struct perf_evsel
*pos
;
272 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
273 int nr_threads
= thread_map__nr(evlist
->threads
);
275 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
276 list_for_each_entry(pos
, &evlist
->entries
, node
) {
277 if (!perf_evsel__is_group_leader(pos
) || !pos
->fd
)
279 for (thread
= 0; thread
< nr_threads
; thread
++)
280 ioctl(FD(pos
, cpu
, thread
),
281 PERF_EVENT_IOC_ENABLE
, 0);
286 int perf_evlist__disable_event(struct perf_evlist
*evlist
,
287 struct perf_evsel
*evsel
)
289 int cpu
, thread
, err
;
294 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
295 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++) {
296 err
= ioctl(FD(evsel
, cpu
, thread
),
297 PERF_EVENT_IOC_DISABLE
, 0);
305 int perf_evlist__enable_event(struct perf_evlist
*evlist
,
306 struct perf_evsel
*evsel
)
308 int cpu
, thread
, err
;
313 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
314 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++) {
315 err
= ioctl(FD(evsel
, cpu
, thread
),
316 PERF_EVENT_IOC_ENABLE
, 0);
324 static int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
)
326 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
327 int nr_threads
= thread_map__nr(evlist
->threads
);
328 int nfds
= nr_cpus
* nr_threads
* evlist
->nr_entries
;
329 evlist
->pollfd
= malloc(sizeof(struct pollfd
) * nfds
);
330 return evlist
->pollfd
!= NULL
? 0 : -ENOMEM
;
333 void perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
335 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
336 evlist
->pollfd
[evlist
->nr_fds
].fd
= fd
;
337 evlist
->pollfd
[evlist
->nr_fds
].events
= POLLIN
;
341 static void perf_evlist__id_hash(struct perf_evlist
*evlist
,
342 struct perf_evsel
*evsel
,
343 int cpu
, int thread
, u64 id
)
346 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
350 hash
= hash_64(sid
->id
, PERF_EVLIST__HLIST_BITS
);
351 hlist_add_head(&sid
->node
, &evlist
->heads
[hash
]);
354 void perf_evlist__id_add(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
355 int cpu
, int thread
, u64 id
)
357 perf_evlist__id_hash(evlist
, evsel
, cpu
, thread
, id
);
358 evsel
->id
[evsel
->ids
++] = id
;
361 static int perf_evlist__id_add_fd(struct perf_evlist
*evlist
,
362 struct perf_evsel
*evsel
,
363 int cpu
, int thread
, int fd
)
365 u64 read_data
[4] = { 0, };
366 int id_idx
= 1; /* The first entry is the counter value */
370 ret
= ioctl(fd
, PERF_EVENT_IOC_ID
, &id
);
377 /* Legacy way to get event id.. All hail to old kernels! */
380 * This way does not work with group format read, so bail
383 if (perf_evlist__read_format(evlist
) & PERF_FORMAT_GROUP
)
386 if (!(evsel
->attr
.read_format
& PERF_FORMAT_ID
) ||
387 read(fd
, &read_data
, sizeof(read_data
)) == -1)
390 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
392 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
395 id
= read_data
[id_idx
];
398 perf_evlist__id_add(evlist
, evsel
, cpu
, thread
, id
);
402 struct perf_sample_id
*perf_evlist__id2sid(struct perf_evlist
*evlist
, u64 id
)
404 struct hlist_head
*head
;
405 struct perf_sample_id
*sid
;
408 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
409 head
= &evlist
->heads
[hash
];
411 hlist_for_each_entry(sid
, head
, node
)
418 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
420 struct perf_sample_id
*sid
;
422 if (evlist
->nr_entries
== 1)
423 return perf_evlist__first(evlist
);
425 sid
= perf_evlist__id2sid(evlist
, id
);
429 if (!perf_evlist__sample_id_all(evlist
))
430 return perf_evlist__first(evlist
);
435 static int perf_evlist__event2id(struct perf_evlist
*evlist
,
436 union perf_event
*event
, u64
*id
)
438 const u64
*array
= event
->sample
.array
;
441 n
= (event
->header
.size
- sizeof(event
->header
)) >> 3;
443 if (event
->header
.type
== PERF_RECORD_SAMPLE
) {
444 if (evlist
->id_pos
>= n
)
446 *id
= array
[evlist
->id_pos
];
448 if (evlist
->is_pos
> n
)
456 static struct perf_evsel
*perf_evlist__event2evsel(struct perf_evlist
*evlist
,
457 union perf_event
*event
)
459 struct perf_evsel
*first
= perf_evlist__first(evlist
);
460 struct hlist_head
*head
;
461 struct perf_sample_id
*sid
;
465 if (evlist
->nr_entries
== 1)
468 if (!first
->attr
.sample_id_all
&&
469 event
->header
.type
!= PERF_RECORD_SAMPLE
)
472 if (perf_evlist__event2id(evlist
, event
, &id
))
475 /* Synthesized events have an id of zero */
479 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
480 head
= &evlist
->heads
[hash
];
482 hlist_for_each_entry(sid
, head
, node
) {
489 union perf_event
*perf_evlist__mmap_read(struct perf_evlist
*evlist
, int idx
)
491 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
492 unsigned int head
= perf_mmap__read_head(md
);
493 unsigned int old
= md
->prev
;
494 unsigned char *data
= md
->base
+ page_size
;
495 union perf_event
*event
= NULL
;
497 if (evlist
->overwrite
) {
499 * If we're further behind than half the buffer, there's a chance
500 * the writer will bite our tail and mess up the samples under us.
502 * If we somehow ended up ahead of the head, we got messed up.
504 * In either case, truncate and restart at head.
506 int diff
= head
- old
;
507 if (diff
> md
->mask
/ 2 || diff
< 0) {
508 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
511 * head points to a known good entry, start there.
520 event
= (union perf_event
*)&data
[old
& md
->mask
];
521 size
= event
->header
.size
;
524 * Event straddles the mmap boundary -- header should always
525 * be inside due to u64 alignment of output.
527 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
528 unsigned int offset
= old
;
529 unsigned int len
= min(sizeof(*event
), size
), cpy
;
530 void *dst
= &md
->event_copy
;
533 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
534 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
540 event
= &md
->event_copy
;
551 void perf_evlist__mmap_consume(struct perf_evlist
*evlist
, int idx
)
553 if (!evlist
->overwrite
) {
554 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
555 unsigned int old
= md
->prev
;
557 perf_mmap__write_tail(md
, old
);
561 static void __perf_evlist__munmap(struct perf_evlist
*evlist
, int idx
)
563 if (evlist
->mmap
[idx
].base
!= NULL
) {
564 munmap(evlist
->mmap
[idx
].base
, evlist
->mmap_len
);
565 evlist
->mmap
[idx
].base
= NULL
;
569 void perf_evlist__munmap(struct perf_evlist
*evlist
)
573 for (i
= 0; i
< evlist
->nr_mmaps
; i
++)
574 __perf_evlist__munmap(evlist
, i
);
580 static int perf_evlist__alloc_mmap(struct perf_evlist
*evlist
)
582 evlist
->nr_mmaps
= cpu_map__nr(evlist
->cpus
);
583 if (cpu_map__empty(evlist
->cpus
))
584 evlist
->nr_mmaps
= thread_map__nr(evlist
->threads
);
585 evlist
->mmap
= zalloc(evlist
->nr_mmaps
* sizeof(struct perf_mmap
));
586 return evlist
->mmap
!= NULL
? 0 : -ENOMEM
;
589 static int __perf_evlist__mmap(struct perf_evlist
*evlist
,
590 int idx
, int prot
, int mask
, int fd
)
592 evlist
->mmap
[idx
].prev
= 0;
593 evlist
->mmap
[idx
].mask
= mask
;
594 evlist
->mmap
[idx
].base
= mmap(NULL
, evlist
->mmap_len
, prot
,
596 if (evlist
->mmap
[idx
].base
== MAP_FAILED
) {
597 evlist
->mmap
[idx
].base
= NULL
;
601 perf_evlist__add_pollfd(evlist
, fd
);
605 static int perf_evlist__mmap_per_cpu(struct perf_evlist
*evlist
, int prot
, int mask
)
607 struct perf_evsel
*evsel
;
609 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
610 int nr_threads
= thread_map__nr(evlist
->threads
);
612 pr_debug2("perf event ring buffer mmapped per cpu\n");
613 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
616 for (thread
= 0; thread
< nr_threads
; thread
++) {
617 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
618 int fd
= FD(evsel
, cpu
, thread
);
622 if (__perf_evlist__mmap(evlist
, cpu
,
623 prot
, mask
, output
) < 0)
626 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
630 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
631 perf_evlist__id_add_fd(evlist
, evsel
, cpu
, thread
, fd
) < 0)
640 for (cpu
= 0; cpu
< nr_cpus
; cpu
++)
641 __perf_evlist__munmap(evlist
, cpu
);
645 static int perf_evlist__mmap_per_thread(struct perf_evlist
*evlist
, int prot
, int mask
)
647 struct perf_evsel
*evsel
;
649 int nr_threads
= thread_map__nr(evlist
->threads
);
651 pr_debug2("perf event ring buffer mmapped per thread\n");
652 for (thread
= 0; thread
< nr_threads
; thread
++) {
655 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
656 int fd
= FD(evsel
, 0, thread
);
660 if (__perf_evlist__mmap(evlist
, thread
,
661 prot
, mask
, output
) < 0)
664 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
668 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
669 perf_evlist__id_add_fd(evlist
, evsel
, 0, thread
, fd
) < 0)
677 for (thread
= 0; thread
< nr_threads
; thread
++)
678 __perf_evlist__munmap(evlist
, thread
);
682 /** perf_evlist__mmap - Create per cpu maps to receive events
684 * @evlist - list of events
685 * @pages - map length in pages
686 * @overwrite - overwrite older events?
688 * If overwrite is false the user needs to signal event consuption using:
690 * struct perf_mmap *m = &evlist->mmap[cpu];
691 * unsigned int head = perf_mmap__read_head(m);
693 * perf_mmap__write_tail(m, head)
695 * Using perf_evlist__read_on_cpu does this automatically.
697 int perf_evlist__mmap(struct perf_evlist
*evlist
, unsigned int pages
,
700 struct perf_evsel
*evsel
;
701 const struct cpu_map
*cpus
= evlist
->cpus
;
702 const struct thread_map
*threads
= evlist
->threads
;
703 int prot
= PROT_READ
| (overwrite
? 0 : PROT_WRITE
), mask
;
705 /* 512 kiB: default amount of unprivileged mlocked memory */
706 if (pages
== UINT_MAX
)
707 pages
= (512 * 1024) / page_size
;
708 else if (!is_power_of_2(pages
))
711 mask
= pages
* page_size
- 1;
713 if (evlist
->mmap
== NULL
&& perf_evlist__alloc_mmap(evlist
) < 0)
716 if (evlist
->pollfd
== NULL
&& perf_evlist__alloc_pollfd(evlist
) < 0)
719 evlist
->overwrite
= overwrite
;
720 evlist
->mmap_len
= (pages
+ 1) * page_size
;
722 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
723 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
724 evsel
->sample_id
== NULL
&&
725 perf_evsel__alloc_id(evsel
, cpu_map__nr(cpus
), threads
->nr
) < 0)
729 if (cpu_map__empty(cpus
))
730 return perf_evlist__mmap_per_thread(evlist
, prot
, mask
);
732 return perf_evlist__mmap_per_cpu(evlist
, prot
, mask
);
735 int perf_evlist__create_maps(struct perf_evlist
*evlist
,
736 struct perf_target
*target
)
738 evlist
->threads
= thread_map__new_str(target
->pid
, target
->tid
,
741 if (evlist
->threads
== NULL
)
744 if (perf_target__has_task(target
))
745 evlist
->cpus
= cpu_map__dummy_new();
746 else if (!perf_target__has_cpu(target
) && !target
->uses_mmap
)
747 evlist
->cpus
= cpu_map__dummy_new();
749 evlist
->cpus
= cpu_map__new(target
->cpu_list
);
751 if (evlist
->cpus
== NULL
)
752 goto out_delete_threads
;
757 thread_map__delete(evlist
->threads
);
761 void perf_evlist__delete_maps(struct perf_evlist
*evlist
)
763 cpu_map__delete(evlist
->cpus
);
764 thread_map__delete(evlist
->threads
);
766 evlist
->threads
= NULL
;
769 int perf_evlist__apply_filters(struct perf_evlist
*evlist
)
771 struct perf_evsel
*evsel
;
773 const int ncpus
= cpu_map__nr(evlist
->cpus
),
774 nthreads
= thread_map__nr(evlist
->threads
);
776 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
777 if (evsel
->filter
== NULL
)
780 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, evsel
->filter
);
788 int perf_evlist__set_filter(struct perf_evlist
*evlist
, const char *filter
)
790 struct perf_evsel
*evsel
;
792 const int ncpus
= cpu_map__nr(evlist
->cpus
),
793 nthreads
= thread_map__nr(evlist
->threads
);
795 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
796 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, filter
);
804 bool perf_evlist__valid_sample_type(struct perf_evlist
*evlist
)
806 struct perf_evsel
*pos
;
808 if (evlist
->nr_entries
== 1)
811 if (evlist
->id_pos
< 0 || evlist
->is_pos
< 0)
814 list_for_each_entry(pos
, &evlist
->entries
, node
) {
815 if (pos
->id_pos
!= evlist
->id_pos
||
816 pos
->is_pos
!= evlist
->is_pos
)
823 u64
__perf_evlist__combined_sample_type(struct perf_evlist
*evlist
)
825 struct perf_evsel
*evsel
;
827 if (evlist
->combined_sample_type
)
828 return evlist
->combined_sample_type
;
830 list_for_each_entry(evsel
, &evlist
->entries
, node
)
831 evlist
->combined_sample_type
|= evsel
->attr
.sample_type
;
833 return evlist
->combined_sample_type
;
836 u64
perf_evlist__combined_sample_type(struct perf_evlist
*evlist
)
838 evlist
->combined_sample_type
= 0;
839 return __perf_evlist__combined_sample_type(evlist
);
842 bool perf_evlist__valid_read_format(struct perf_evlist
*evlist
)
844 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
845 u64 read_format
= first
->attr
.read_format
;
846 u64 sample_type
= first
->attr
.sample_type
;
848 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
849 if (read_format
!= pos
->attr
.read_format
)
853 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
854 if ((sample_type
& PERF_SAMPLE_READ
) &&
855 !(read_format
& PERF_FORMAT_ID
)) {
862 u64
perf_evlist__read_format(struct perf_evlist
*evlist
)
864 struct perf_evsel
*first
= perf_evlist__first(evlist
);
865 return first
->attr
.read_format
;
868 u16
perf_evlist__id_hdr_size(struct perf_evlist
*evlist
)
870 struct perf_evsel
*first
= perf_evlist__first(evlist
);
871 struct perf_sample
*data
;
875 if (!first
->attr
.sample_id_all
)
878 sample_type
= first
->attr
.sample_type
;
880 if (sample_type
& PERF_SAMPLE_TID
)
881 size
+= sizeof(data
->tid
) * 2;
883 if (sample_type
& PERF_SAMPLE_TIME
)
884 size
+= sizeof(data
->time
);
886 if (sample_type
& PERF_SAMPLE_ID
)
887 size
+= sizeof(data
->id
);
889 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
890 size
+= sizeof(data
->stream_id
);
892 if (sample_type
& PERF_SAMPLE_CPU
)
893 size
+= sizeof(data
->cpu
) * 2;
895 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
896 size
+= sizeof(data
->id
);
901 bool perf_evlist__valid_sample_id_all(struct perf_evlist
*evlist
)
903 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
905 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
906 if (first
->attr
.sample_id_all
!= pos
->attr
.sample_id_all
)
913 bool perf_evlist__sample_id_all(struct perf_evlist
*evlist
)
915 struct perf_evsel
*first
= perf_evlist__first(evlist
);
916 return first
->attr
.sample_id_all
;
919 void perf_evlist__set_selected(struct perf_evlist
*evlist
,
920 struct perf_evsel
*evsel
)
922 evlist
->selected
= evsel
;
925 void perf_evlist__close(struct perf_evlist
*evlist
)
927 struct perf_evsel
*evsel
;
928 int ncpus
= cpu_map__nr(evlist
->cpus
);
929 int nthreads
= thread_map__nr(evlist
->threads
);
931 list_for_each_entry_reverse(evsel
, &evlist
->entries
, node
)
932 perf_evsel__close(evsel
, ncpus
, nthreads
);
935 int perf_evlist__open(struct perf_evlist
*evlist
)
937 struct perf_evsel
*evsel
;
940 perf_evlist__update_id_pos(evlist
);
942 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
943 err
= perf_evsel__open(evsel
, evlist
->cpus
, evlist
->threads
);
950 perf_evlist__close(evlist
);
955 int perf_evlist__prepare_workload(struct perf_evlist
*evlist
,
956 struct perf_target
*target
,
957 const char *argv
[], bool pipe_output
,
960 int child_ready_pipe
[2], go_pipe
[2];
963 if (pipe(child_ready_pipe
) < 0) {
964 perror("failed to create 'ready' pipe");
968 if (pipe(go_pipe
) < 0) {
969 perror("failed to create 'go' pipe");
970 goto out_close_ready_pipe
;
973 evlist
->workload
.pid
= fork();
974 if (evlist
->workload
.pid
< 0) {
975 perror("failed to fork");
976 goto out_close_pipes
;
979 if (!evlist
->workload
.pid
) {
983 signal(SIGTERM
, SIG_DFL
);
985 close(child_ready_pipe
[0]);
987 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
990 * Tell the parent we're ready to go
992 close(child_ready_pipe
[1]);
995 * Wait until the parent tells us to go.
997 if (read(go_pipe
[0], &bf
, 1) == -1)
998 perror("unable to read pipe");
1000 execvp(argv
[0], (char **)argv
);
1004 kill(getppid(), SIGUSR1
);
1008 if (perf_target__none(target
))
1009 evlist
->threads
->map
[0] = evlist
->workload
.pid
;
1011 close(child_ready_pipe
[1]);
1014 * wait for child to settle
1016 if (read(child_ready_pipe
[0], &bf
, 1) == -1) {
1017 perror("unable to read pipe");
1018 goto out_close_pipes
;
1021 fcntl(go_pipe
[1], F_SETFD
, FD_CLOEXEC
);
1022 evlist
->workload
.cork_fd
= go_pipe
[1];
1023 close(child_ready_pipe
[0]);
1029 out_close_ready_pipe
:
1030 close(child_ready_pipe
[0]);
1031 close(child_ready_pipe
[1]);
1035 int perf_evlist__start_workload(struct perf_evlist
*evlist
)
1037 if (evlist
->workload
.cork_fd
> 0) {
1041 * Remove the cork, let it rip!
1043 ret
= write(evlist
->workload
.cork_fd
, &bf
, 1);
1045 perror("enable to write to pipe");
1047 close(evlist
->workload
.cork_fd
);
1054 int perf_evlist__parse_sample(struct perf_evlist
*evlist
, union perf_event
*event
,
1055 struct perf_sample
*sample
)
1057 struct perf_evsel
*evsel
= perf_evlist__event2evsel(evlist
, event
);
1061 return perf_evsel__parse_sample(evsel
, event
, sample
);
1064 size_t perf_evlist__fprintf(struct perf_evlist
*evlist
, FILE *fp
)
1066 struct perf_evsel
*evsel
;
1069 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
1070 printed
+= fprintf(fp
, "%s%s", evsel
->idx
? ", " : "",
1071 perf_evsel__name(evsel
));
1074 return printed
+ fprintf(fp
, "\n");;