2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
14 #include "thread_map.h"
16 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
18 void perf_evsel__init(struct perf_evsel
*evsel
,
19 struct perf_event_attr
*attr
, int idx
)
23 INIT_LIST_HEAD(&evsel
->node
);
26 struct perf_evsel
*perf_evsel__new(struct perf_event_attr
*attr
, int idx
)
28 struct perf_evsel
*evsel
= zalloc(sizeof(*evsel
));
31 perf_evsel__init(evsel
, attr
, idx
);
36 int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
38 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
39 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
42 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
44 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
45 if (evsel
->sample_id
== NULL
)
48 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
49 if (evsel
->id
== NULL
) {
50 xyarray__delete(evsel
->sample_id
);
51 evsel
->sample_id
= NULL
;
58 int perf_evsel__alloc_counts(struct perf_evsel
*evsel
, int ncpus
)
60 evsel
->counts
= zalloc((sizeof(*evsel
->counts
) +
61 (ncpus
* sizeof(struct perf_counts_values
))));
62 return evsel
->counts
!= NULL
? 0 : -ENOMEM
;
65 void perf_evsel__free_fd(struct perf_evsel
*evsel
)
67 xyarray__delete(evsel
->fd
);
71 void perf_evsel__free_id(struct perf_evsel
*evsel
)
73 xyarray__delete(evsel
->sample_id
);
74 evsel
->sample_id
= NULL
;
79 void perf_evsel__close_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
83 for (cpu
= 0; cpu
< ncpus
; cpu
++)
84 for (thread
= 0; thread
< nthreads
; ++thread
) {
85 close(FD(evsel
, cpu
, thread
));
86 FD(evsel
, cpu
, thread
) = -1;
90 void perf_evsel__exit(struct perf_evsel
*evsel
)
92 assert(list_empty(&evsel
->node
));
93 xyarray__delete(evsel
->fd
);
94 xyarray__delete(evsel
->sample_id
);
98 void perf_evsel__delete(struct perf_evsel
*evsel
)
100 perf_evsel__exit(evsel
);
101 close_cgroup(evsel
->cgrp
);
106 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
107 int cpu
, int thread
, bool scale
)
109 struct perf_counts_values count
;
110 size_t nv
= scale
? 3 : 1;
112 if (FD(evsel
, cpu
, thread
) < 0)
115 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1) < 0)
118 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) < 0)
124 else if (count
.run
< count
.ena
)
125 count
.val
= (u64
)((double)count
.val
* count
.ena
/ count
.run
+ 0.5);
127 count
.ena
= count
.run
= 0;
129 evsel
->counts
->cpu
[cpu
] = count
;
133 int __perf_evsel__read(struct perf_evsel
*evsel
,
134 int ncpus
, int nthreads
, bool scale
)
136 size_t nv
= scale
? 3 : 1;
138 struct perf_counts_values
*aggr
= &evsel
->counts
->aggr
, count
;
140 aggr
->val
= aggr
->ena
= aggr
->run
= 0;
142 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
143 for (thread
= 0; thread
< nthreads
; thread
++) {
144 if (FD(evsel
, cpu
, thread
) < 0)
147 if (readn(FD(evsel
, cpu
, thread
),
148 &count
, nv
* sizeof(u64
)) < 0)
151 aggr
->val
+= count
.val
;
153 aggr
->ena
+= count
.ena
;
154 aggr
->run
+= count
.run
;
159 evsel
->counts
->scaled
= 0;
161 if (aggr
->run
== 0) {
162 evsel
->counts
->scaled
= -1;
167 if (aggr
->run
< aggr
->ena
) {
168 evsel
->counts
->scaled
= 1;
169 aggr
->val
= (u64
)((double)aggr
->val
* aggr
->ena
/ aggr
->run
+ 0.5);
172 aggr
->ena
= aggr
->run
= 0;
177 static int __perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
178 struct thread_map
*threads
, bool group
, bool inherit
)
181 unsigned long flags
= 0;
184 if (evsel
->fd
== NULL
&&
185 perf_evsel__alloc_fd(evsel
, cpus
->nr
, threads
->nr
) < 0)
189 flags
= PERF_FLAG_PID_CGROUP
;
190 pid
= evsel
->cgrp
->fd
;
193 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
196 * Don't allow mmap() of inherited per-task counters. This
197 * would create a performance issue due to all children writing
198 * to the same buffer.
201 * Proper fix is not to pass 'inherit' to perf_evsel__open*,
202 * but a 'flags' parameter, with 'group' folded there as well,
203 * then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if
204 * O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is
205 * set. Lets go for the minimal fix first tho.
207 evsel
->attr
.inherit
= (cpus
->map
[cpu
] >= 0) && inherit
;
209 for (thread
= 0; thread
< threads
->nr
; thread
++) {
212 pid
= threads
->map
[thread
];
214 FD(evsel
, cpu
, thread
) = sys_perf_event_open(&evsel
->attr
,
218 if (FD(evsel
, cpu
, thread
) < 0)
221 if (group
&& group_fd
== -1)
222 group_fd
= FD(evsel
, cpu
, thread
);
230 while (--thread
>= 0) {
231 close(FD(evsel
, cpu
, thread
));
232 FD(evsel
, cpu
, thread
) = -1;
234 thread
= threads
->nr
;
235 } while (--cpu
>= 0);
248 struct thread_map map
;
250 } empty_thread_map
= {
255 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
256 struct thread_map
*threads
, bool group
, bool inherit
)
259 /* Work around old compiler warnings about strict aliasing */
260 cpus
= &empty_cpu_map
.map
;
264 threads
= &empty_thread_map
.map
;
266 return __perf_evsel__open(evsel
, cpus
, threads
, group
, inherit
);
269 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
270 struct cpu_map
*cpus
, bool group
, bool inherit
)
272 return __perf_evsel__open(evsel
, cpus
, &empty_thread_map
.map
, group
, inherit
);
275 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
276 struct thread_map
*threads
, bool group
, bool inherit
)
278 return __perf_evsel__open(evsel
, &empty_cpu_map
.map
, threads
, group
, inherit
);
281 static int perf_event__parse_id_sample(const union perf_event
*event
, u64 type
,
282 struct perf_sample
*sample
)
284 const u64
*array
= event
->sample
.array
;
286 array
+= ((event
->header
.size
-
287 sizeof(event
->header
)) / sizeof(u64
)) - 1;
289 if (type
& PERF_SAMPLE_CPU
) {
290 u32
*p
= (u32
*)array
;
295 if (type
& PERF_SAMPLE_STREAM_ID
) {
296 sample
->stream_id
= *array
;
300 if (type
& PERF_SAMPLE_ID
) {
305 if (type
& PERF_SAMPLE_TIME
) {
306 sample
->time
= *array
;
310 if (type
& PERF_SAMPLE_TID
) {
311 u32
*p
= (u32
*)array
;
319 int perf_event__parse_sample(const union perf_event
*event
, u64 type
,
320 bool sample_id_all
, struct perf_sample
*data
)
324 data
->cpu
= data
->pid
= data
->tid
= -1;
325 data
->stream_id
= data
->id
= data
->time
= -1ULL;
327 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
330 return perf_event__parse_id_sample(event
, type
, data
);
333 array
= event
->sample
.array
;
335 if (type
& PERF_SAMPLE_IP
) {
336 data
->ip
= event
->ip
.ip
;
340 if (type
& PERF_SAMPLE_TID
) {
341 u32
*p
= (u32
*)array
;
347 if (type
& PERF_SAMPLE_TIME
) {
352 if (type
& PERF_SAMPLE_ADDR
) {
358 if (type
& PERF_SAMPLE_ID
) {
363 if (type
& PERF_SAMPLE_STREAM_ID
) {
364 data
->stream_id
= *array
;
368 if (type
& PERF_SAMPLE_CPU
) {
369 u32
*p
= (u32
*)array
;
374 if (type
& PERF_SAMPLE_PERIOD
) {
375 data
->period
= *array
;
379 if (type
& PERF_SAMPLE_READ
) {
380 fprintf(stderr
, "PERF_SAMPLE_READ is unsuported for now\n");
384 if (type
& PERF_SAMPLE_CALLCHAIN
) {
385 data
->callchain
= (struct ip_callchain
*)array
;
386 array
+= 1 + data
->callchain
->nr
;
389 if (type
& PERF_SAMPLE_RAW
) {
390 u32
*p
= (u32
*)array
;