2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include "thread_map.h"
18 #include <linux/bitops.h>
19 #include <linux/hash.h>
21 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
22 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
24 void perf_evlist__init(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
25 struct thread_map
*threads
)
29 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
30 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
31 INIT_LIST_HEAD(&evlist
->entries
);
32 perf_evlist__set_maps(evlist
, cpus
, threads
);
35 struct perf_evlist
*perf_evlist__new(struct cpu_map
*cpus
,
36 struct thread_map
*threads
)
38 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
41 perf_evlist__init(evlist
, cpus
, threads
);
46 static void perf_evlist__purge(struct perf_evlist
*evlist
)
48 struct perf_evsel
*pos
, *n
;
50 list_for_each_entry_safe(pos
, n
, &evlist
->entries
, node
) {
51 list_del_init(&pos
->node
);
52 perf_evsel__delete(pos
);
55 evlist
->nr_entries
= 0;
58 void perf_evlist__exit(struct perf_evlist
*evlist
)
63 evlist
->pollfd
= NULL
;
66 void perf_evlist__delete(struct perf_evlist
*evlist
)
68 perf_evlist__purge(evlist
);
69 perf_evlist__exit(evlist
);
73 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
75 list_add_tail(&entry
->node
, &evlist
->entries
);
79 int perf_evlist__add_default(struct perf_evlist
*evlist
)
81 struct perf_event_attr attr
= {
82 .type
= PERF_TYPE_HARDWARE
,
83 .config
= PERF_COUNT_HW_CPU_CYCLES
,
85 struct perf_evsel
*evsel
= perf_evsel__new(&attr
, 0);
90 /* use strdup() because free(evsel) assumes name is allocated */
91 evsel
->name
= strdup("cycles");
95 perf_evlist__add(evlist
, evsel
);
98 perf_evsel__delete(evsel
);
103 void perf_evlist__disable(struct perf_evlist
*evlist
)
106 struct perf_evsel
*pos
;
108 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
109 list_for_each_entry(pos
, &evlist
->entries
, node
) {
110 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++)
111 ioctl(FD(pos
, cpu
, thread
), PERF_EVENT_IOC_DISABLE
);
116 void perf_evlist__enable(struct perf_evlist
*evlist
)
119 struct perf_evsel
*pos
;
121 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
122 list_for_each_entry(pos
, &evlist
->entries
, node
) {
123 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++)
124 ioctl(FD(pos
, cpu
, thread
), PERF_EVENT_IOC_ENABLE
);
129 int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
)
131 int nfds
= evlist
->cpus
->nr
* evlist
->threads
->nr
* evlist
->nr_entries
;
132 evlist
->pollfd
= malloc(sizeof(struct pollfd
) * nfds
);
133 return evlist
->pollfd
!= NULL
? 0 : -ENOMEM
;
136 void perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
138 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
139 evlist
->pollfd
[evlist
->nr_fds
].fd
= fd
;
140 evlist
->pollfd
[evlist
->nr_fds
].events
= POLLIN
;
144 static void perf_evlist__id_hash(struct perf_evlist
*evlist
,
145 struct perf_evsel
*evsel
,
146 int cpu
, int thread
, u64 id
)
149 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
153 hash
= hash_64(sid
->id
, PERF_EVLIST__HLIST_BITS
);
154 hlist_add_head(&sid
->node
, &evlist
->heads
[hash
]);
157 void perf_evlist__id_add(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
158 int cpu
, int thread
, u64 id
)
160 perf_evlist__id_hash(evlist
, evsel
, cpu
, thread
, id
);
161 evsel
->id
[evsel
->ids
++] = id
;
164 static int perf_evlist__id_add_fd(struct perf_evlist
*evlist
,
165 struct perf_evsel
*evsel
,
166 int cpu
, int thread
, int fd
)
168 u64 read_data
[4] = { 0, };
169 int id_idx
= 1; /* The first entry is the counter value */
171 if (!(evsel
->attr
.read_format
& PERF_FORMAT_ID
) ||
172 read(fd
, &read_data
, sizeof(read_data
)) == -1)
175 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
177 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
180 perf_evlist__id_add(evlist
, evsel
, cpu
, thread
, read_data
[id_idx
]);
184 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
186 struct hlist_head
*head
;
187 struct hlist_node
*pos
;
188 struct perf_sample_id
*sid
;
191 if (evlist
->nr_entries
== 1)
192 return list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
194 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
195 head
= &evlist
->heads
[hash
];
197 hlist_for_each_entry(sid
, pos
, head
, node
)
203 union perf_event
*perf_evlist__mmap_read(struct perf_evlist
*evlist
, int idx
)
205 /* XXX Move this to perf.c, making it generally available */
206 unsigned int page_size
= sysconf(_SC_PAGE_SIZE
);
207 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
208 unsigned int head
= perf_mmap__read_head(md
);
209 unsigned int old
= md
->prev
;
210 unsigned char *data
= md
->base
+ page_size
;
211 union perf_event
*event
= NULL
;
213 if (evlist
->overwrite
) {
215 * If we're further behind than half the buffer, there's a chance
216 * the writer will bite our tail and mess up the samples under us.
218 * If we somehow ended up ahead of the head, we got messed up.
220 * In either case, truncate and restart at head.
222 int diff
= head
- old
;
223 if (diff
> md
->mask
/ 2 || diff
< 0) {
224 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
227 * head points to a known good entry, start there.
236 event
= (union perf_event
*)&data
[old
& md
->mask
];
237 size
= event
->header
.size
;
240 * Event straddles the mmap boundary -- header should always
241 * be inside due to u64 alignment of output.
243 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
244 unsigned int offset
= old
;
245 unsigned int len
= min(sizeof(*event
), size
), cpy
;
246 void *dst
= &evlist
->event_copy
;
249 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
250 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
256 event
= &evlist
->event_copy
;
264 if (!evlist
->overwrite
)
265 perf_mmap__write_tail(md
, old
);
270 void perf_evlist__munmap(struct perf_evlist
*evlist
)
274 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
275 if (evlist
->mmap
[i
].base
!= NULL
) {
276 munmap(evlist
->mmap
[i
].base
, evlist
->mmap_len
);
277 evlist
->mmap
[i
].base
= NULL
;
285 int perf_evlist__alloc_mmap(struct perf_evlist
*evlist
)
287 evlist
->nr_mmaps
= evlist
->cpus
->nr
;
288 if (evlist
->cpus
->map
[0] == -1)
289 evlist
->nr_mmaps
= evlist
->threads
->nr
;
290 evlist
->mmap
= zalloc(evlist
->nr_mmaps
* sizeof(struct perf_mmap
));
291 return evlist
->mmap
!= NULL
? 0 : -ENOMEM
;
294 static int __perf_evlist__mmap(struct perf_evlist
*evlist
,
295 int idx
, int prot
, int mask
, int fd
)
297 evlist
->mmap
[idx
].prev
= 0;
298 evlist
->mmap
[idx
].mask
= mask
;
299 evlist
->mmap
[idx
].base
= mmap(NULL
, evlist
->mmap_len
, prot
,
301 if (evlist
->mmap
[idx
].base
== MAP_FAILED
)
304 perf_evlist__add_pollfd(evlist
, fd
);
308 static int perf_evlist__mmap_per_cpu(struct perf_evlist
*evlist
, int prot
, int mask
)
310 struct perf_evsel
*evsel
;
313 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
316 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++) {
317 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
318 int fd
= FD(evsel
, cpu
, thread
);
322 if (__perf_evlist__mmap(evlist
, cpu
,
323 prot
, mask
, output
) < 0)
326 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
330 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
331 perf_evlist__id_add_fd(evlist
, evsel
, cpu
, thread
, fd
) < 0)
340 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
341 if (evlist
->mmap
[cpu
].base
!= NULL
) {
342 munmap(evlist
->mmap
[cpu
].base
, evlist
->mmap_len
);
343 evlist
->mmap
[cpu
].base
= NULL
;
349 static int perf_evlist__mmap_per_thread(struct perf_evlist
*evlist
, int prot
, int mask
)
351 struct perf_evsel
*evsel
;
354 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++) {
357 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
358 int fd
= FD(evsel
, 0, thread
);
362 if (__perf_evlist__mmap(evlist
, thread
,
363 prot
, mask
, output
) < 0)
366 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
370 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
371 perf_evlist__id_add_fd(evlist
, evsel
, 0, thread
, fd
) < 0)
379 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++) {
380 if (evlist
->mmap
[thread
].base
!= NULL
) {
381 munmap(evlist
->mmap
[thread
].base
, evlist
->mmap_len
);
382 evlist
->mmap
[thread
].base
= NULL
;
388 /** perf_evlist__mmap - Create per cpu maps to receive events
390 * @evlist - list of events
391 * @pages - map length in pages
392 * @overwrite - overwrite older events?
394 * If overwrite is false the user needs to signal event consuption using:
396 * struct perf_mmap *m = &evlist->mmap[cpu];
397 * unsigned int head = perf_mmap__read_head(m);
399 * perf_mmap__write_tail(m, head)
401 * Using perf_evlist__read_on_cpu does this automatically.
403 int perf_evlist__mmap(struct perf_evlist
*evlist
, int pages
, bool overwrite
)
405 unsigned int page_size
= sysconf(_SC_PAGE_SIZE
);
406 int mask
= pages
* page_size
- 1;
407 struct perf_evsel
*evsel
;
408 const struct cpu_map
*cpus
= evlist
->cpus
;
409 const struct thread_map
*threads
= evlist
->threads
;
410 int prot
= PROT_READ
| (overwrite
? 0 : PROT_WRITE
);
412 if (evlist
->mmap
== NULL
&& perf_evlist__alloc_mmap(evlist
) < 0)
415 if (evlist
->pollfd
== NULL
&& perf_evlist__alloc_pollfd(evlist
) < 0)
418 evlist
->overwrite
= overwrite
;
419 evlist
->mmap_len
= (pages
+ 1) * page_size
;
421 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
422 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
423 evsel
->sample_id
== NULL
&&
424 perf_evsel__alloc_id(evsel
, cpus
->nr
, threads
->nr
) < 0)
428 if (evlist
->cpus
->map
[0] == -1)
429 return perf_evlist__mmap_per_thread(evlist
, prot
, mask
);
431 return perf_evlist__mmap_per_cpu(evlist
, prot
, mask
);
434 int perf_evlist__create_maps(struct perf_evlist
*evlist
, pid_t target_pid
,
435 pid_t target_tid
, const char *cpu_list
)
437 evlist
->threads
= thread_map__new(target_pid
, target_tid
);
439 if (evlist
->threads
== NULL
)
442 if (cpu_list
== NULL
&& target_tid
!= -1)
443 evlist
->cpus
= cpu_map__dummy_new();
445 evlist
->cpus
= cpu_map__new(cpu_list
);
447 if (evlist
->cpus
== NULL
)
448 goto out_delete_threads
;
453 thread_map__delete(evlist
->threads
);
457 void perf_evlist__delete_maps(struct perf_evlist
*evlist
)
459 cpu_map__delete(evlist
->cpus
);
460 thread_map__delete(evlist
->threads
);
462 evlist
->threads
= NULL
;
465 int perf_evlist__set_filters(struct perf_evlist
*evlist
)
467 const struct thread_map
*threads
= evlist
->threads
;
468 const struct cpu_map
*cpus
= evlist
->cpus
;
469 struct perf_evsel
*evsel
;
476 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
477 filter
= evsel
->filter
;
480 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
481 for (thread
= 0; thread
< threads
->nr
; thread
++) {
482 fd
= FD(evsel
, cpu
, thread
);
483 err
= ioctl(fd
, PERF_EVENT_IOC_SET_FILTER
, filter
);
493 bool perf_evlist__valid_sample_type(const struct perf_evlist
*evlist
)
495 struct perf_evsel
*pos
, *first
;
497 pos
= first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
499 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
500 if (first
->attr
.sample_type
!= pos
->attr
.sample_type
)
507 u64
perf_evlist__sample_type(const struct perf_evlist
*evlist
)
509 struct perf_evsel
*first
;
511 first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
512 return first
->attr
.sample_type
;
515 bool perf_evlist__valid_sample_id_all(const struct perf_evlist
*evlist
)
517 struct perf_evsel
*pos
, *first
;
519 pos
= first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
521 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
522 if (first
->attr
.sample_id_all
!= pos
->attr
.sample_id_all
)
529 bool perf_evlist__sample_id_all(const struct perf_evlist
*evlist
)
531 struct perf_evsel
*first
;
533 first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
534 return first
->attr
.sample_id_all
;
537 void perf_evlist__set_selected(struct perf_evlist
*evlist
,
538 struct perf_evsel
*evsel
)
540 evlist
->selected
= evsel
;