2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
16 #include "thread_map.h"
18 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
20 int __perf_evsel__sample_size(u64 sample_type
)
22 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
26 for (i
= 0; i
< 64; i
++) {
27 if (mask
& (1ULL << i
))
36 void perf_evsel__init(struct perf_evsel
*evsel
,
37 struct perf_event_attr
*attr
, int idx
)
41 INIT_LIST_HEAD(&evsel
->node
);
42 hists__init(&evsel
->hists
);
45 struct perf_evsel
*perf_evsel__new(struct perf_event_attr
*attr
, int idx
)
47 struct perf_evsel
*evsel
= zalloc(sizeof(*evsel
));
50 perf_evsel__init(evsel
, attr
, idx
);
55 int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
58 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
61 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
62 for (thread
= 0; thread
< nthreads
; thread
++) {
63 FD(evsel
, cpu
, thread
) = -1;
68 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
71 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
73 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
74 if (evsel
->sample_id
== NULL
)
77 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
78 if (evsel
->id
== NULL
) {
79 xyarray__delete(evsel
->sample_id
);
80 evsel
->sample_id
= NULL
;
87 int perf_evsel__alloc_counts(struct perf_evsel
*evsel
, int ncpus
)
89 evsel
->counts
= zalloc((sizeof(*evsel
->counts
) +
90 (ncpus
* sizeof(struct perf_counts_values
))));
91 return evsel
->counts
!= NULL
? 0 : -ENOMEM
;
94 void perf_evsel__free_fd(struct perf_evsel
*evsel
)
96 xyarray__delete(evsel
->fd
);
100 void perf_evsel__free_id(struct perf_evsel
*evsel
)
102 xyarray__delete(evsel
->sample_id
);
103 evsel
->sample_id
= NULL
;
108 void perf_evsel__close_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
112 for (cpu
= 0; cpu
< ncpus
; cpu
++)
113 for (thread
= 0; thread
< nthreads
; ++thread
) {
114 close(FD(evsel
, cpu
, thread
));
115 FD(evsel
, cpu
, thread
) = -1;
119 void perf_evsel__exit(struct perf_evsel
*evsel
)
121 assert(list_empty(&evsel
->node
));
122 xyarray__delete(evsel
->fd
);
123 xyarray__delete(evsel
->sample_id
);
127 void perf_evsel__delete(struct perf_evsel
*evsel
)
129 perf_evsel__exit(evsel
);
130 close_cgroup(evsel
->cgrp
);
135 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
136 int cpu
, int thread
, bool scale
)
138 struct perf_counts_values count
;
139 size_t nv
= scale
? 3 : 1;
141 if (FD(evsel
, cpu
, thread
) < 0)
144 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1) < 0)
147 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) < 0)
153 else if (count
.run
< count
.ena
)
154 count
.val
= (u64
)((double)count
.val
* count
.ena
/ count
.run
+ 0.5);
156 count
.ena
= count
.run
= 0;
158 evsel
->counts
->cpu
[cpu
] = count
;
162 int __perf_evsel__read(struct perf_evsel
*evsel
,
163 int ncpus
, int nthreads
, bool scale
)
165 size_t nv
= scale
? 3 : 1;
167 struct perf_counts_values
*aggr
= &evsel
->counts
->aggr
, count
;
169 aggr
->val
= aggr
->ena
= aggr
->run
= 0;
171 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
172 for (thread
= 0; thread
< nthreads
; thread
++) {
173 if (FD(evsel
, cpu
, thread
) < 0)
176 if (readn(FD(evsel
, cpu
, thread
),
177 &count
, nv
* sizeof(u64
)) < 0)
180 aggr
->val
+= count
.val
;
182 aggr
->ena
+= count
.ena
;
183 aggr
->run
+= count
.run
;
188 evsel
->counts
->scaled
= 0;
190 if (aggr
->run
== 0) {
191 evsel
->counts
->scaled
= -1;
196 if (aggr
->run
< aggr
->ena
) {
197 evsel
->counts
->scaled
= 1;
198 aggr
->val
= (u64
)((double)aggr
->val
* aggr
->ena
/ aggr
->run
+ 0.5);
201 aggr
->ena
= aggr
->run
= 0;
206 static int __perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
207 struct thread_map
*threads
, bool group
)
210 unsigned long flags
= 0;
213 if (evsel
->fd
== NULL
&&
214 perf_evsel__alloc_fd(evsel
, cpus
->nr
, threads
->nr
) < 0)
218 flags
= PERF_FLAG_PID_CGROUP
;
219 pid
= evsel
->cgrp
->fd
;
222 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
225 for (thread
= 0; thread
< threads
->nr
; thread
++) {
228 pid
= threads
->map
[thread
];
230 FD(evsel
, cpu
, thread
) = sys_perf_event_open(&evsel
->attr
,
234 if (FD(evsel
, cpu
, thread
) < 0)
237 if (group
&& group_fd
== -1)
238 group_fd
= FD(evsel
, cpu
, thread
);
246 while (--thread
>= 0) {
247 close(FD(evsel
, cpu
, thread
));
248 FD(evsel
, cpu
, thread
) = -1;
250 thread
= threads
->nr
;
251 } while (--cpu
>= 0);
264 struct thread_map map
;
266 } empty_thread_map
= {
271 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
272 struct thread_map
*threads
, bool group
)
275 /* Work around old compiler warnings about strict aliasing */
276 cpus
= &empty_cpu_map
.map
;
280 threads
= &empty_thread_map
.map
;
282 return __perf_evsel__open(evsel
, cpus
, threads
, group
);
285 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
286 struct cpu_map
*cpus
, bool group
)
288 return __perf_evsel__open(evsel
, cpus
, &empty_thread_map
.map
, group
);
291 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
292 struct thread_map
*threads
, bool group
)
294 return __perf_evsel__open(evsel
, &empty_cpu_map
.map
, threads
, group
);
297 static int perf_event__parse_id_sample(const union perf_event
*event
, u64 type
,
298 struct perf_sample
*sample
)
300 const u64
*array
= event
->sample
.array
;
302 array
+= ((event
->header
.size
-
303 sizeof(event
->header
)) / sizeof(u64
)) - 1;
305 if (type
& PERF_SAMPLE_CPU
) {
306 u32
*p
= (u32
*)array
;
311 if (type
& PERF_SAMPLE_STREAM_ID
) {
312 sample
->stream_id
= *array
;
316 if (type
& PERF_SAMPLE_ID
) {
321 if (type
& PERF_SAMPLE_TIME
) {
322 sample
->time
= *array
;
326 if (type
& PERF_SAMPLE_TID
) {
327 u32
*p
= (u32
*)array
;
335 static bool sample_overlap(const union perf_event
*event
,
336 const void *offset
, u64 size
)
338 const void *base
= event
;
340 if (offset
+ size
> base
+ event
->header
.size
)
346 int perf_event__parse_sample(const union perf_event
*event
, u64 type
,
347 int sample_size
, bool sample_id_all
,
348 struct perf_sample
*data
, bool swapped
)
353 * used for cross-endian analysis. See git commit 65014ab3
354 * for why this goofiness is needed.
362 data
->cpu
= data
->pid
= data
->tid
= -1;
363 data
->stream_id
= data
->id
= data
->time
= -1ULL;
365 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
368 return perf_event__parse_id_sample(event
, type
, data
);
371 array
= event
->sample
.array
;
373 if (sample_size
+ sizeof(event
->header
) > event
->header
.size
)
376 if (type
& PERF_SAMPLE_IP
) {
377 data
->ip
= event
->ip
.ip
;
381 if (type
& PERF_SAMPLE_TID
) {
384 /* undo swap of u64, then swap on individual u32s */
385 u
.val64
= bswap_64(u
.val64
);
386 u
.val32
[0] = bswap_32(u
.val32
[0]);
387 u
.val32
[1] = bswap_32(u
.val32
[1]);
390 data
->pid
= u
.val32
[0];
391 data
->tid
= u
.val32
[1];
395 if (type
& PERF_SAMPLE_TIME
) {
401 if (type
& PERF_SAMPLE_ADDR
) {
407 if (type
& PERF_SAMPLE_ID
) {
412 if (type
& PERF_SAMPLE_STREAM_ID
) {
413 data
->stream_id
= *array
;
417 if (type
& PERF_SAMPLE_CPU
) {
421 /* undo swap of u64, then swap on individual u32s */
422 u
.val64
= bswap_64(u
.val64
);
423 u
.val32
[0] = bswap_32(u
.val32
[0]);
426 data
->cpu
= u
.val32
[0];
430 if (type
& PERF_SAMPLE_PERIOD
) {
431 data
->period
= *array
;
435 if (type
& PERF_SAMPLE_READ
) {
436 fprintf(stderr
, "PERF_SAMPLE_READ is unsuported for now\n");
440 if (type
& PERF_SAMPLE_CALLCHAIN
) {
441 if (sample_overlap(event
, array
, sizeof(data
->callchain
->nr
)))
444 data
->callchain
= (struct ip_callchain
*)array
;
446 if (sample_overlap(event
, array
, data
->callchain
->nr
))
449 array
+= 1 + data
->callchain
->nr
;
452 if (type
& PERF_SAMPLE_RAW
) {
456 if (WARN_ONCE(swapped
,
457 "Endianness of raw data not corrected!\n")) {
458 /* undo swap of u64, then swap on individual u32s */
459 u
.val64
= bswap_64(u
.val64
);
460 u
.val32
[0] = bswap_32(u
.val32
[0]);
461 u
.val32
[1] = bswap_32(u
.val32
[1]);
464 if (sample_overlap(event
, array
, sizeof(u32
)))
467 data
->raw_size
= u
.val32
[0];
468 pdata
= (void *) array
+ sizeof(u32
);
470 if (sample_overlap(event
, pdata
, data
->raw_size
))
473 data
->raw_data
= (void *) pdata
;