vmxnet3: Fix inconsistent LRO state after initialization
[linux-2.6/linux-mips.git] / tools / perf / util / evsel.c
blob662596afd7f1d49fccaeb8c95ee7c8c64748b8c9
1 /*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
10 #include "evsel.h"
11 #include "evlist.h"
12 #include "util.h"
13 #include "cpumap.h"
14 #include "thread_map.h"
16 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
18 void perf_evsel__init(struct perf_evsel *evsel,
19 struct perf_event_attr *attr, int idx)
21 evsel->idx = idx;
22 evsel->attr = *attr;
23 INIT_LIST_HEAD(&evsel->node);
26 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
28 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
30 if (evsel != NULL)
31 perf_evsel__init(evsel, attr, idx);
33 return evsel;
36 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
38 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
39 return evsel->fd != NULL ? 0 : -ENOMEM;
42 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
44 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
45 if (evsel->sample_id == NULL)
46 return -ENOMEM;
48 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
49 if (evsel->id == NULL) {
50 xyarray__delete(evsel->sample_id);
51 evsel->sample_id = NULL;
52 return -ENOMEM;
55 return 0;
58 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
60 evsel->counts = zalloc((sizeof(*evsel->counts) +
61 (ncpus * sizeof(struct perf_counts_values))));
62 return evsel->counts != NULL ? 0 : -ENOMEM;
65 void perf_evsel__free_fd(struct perf_evsel *evsel)
67 xyarray__delete(evsel->fd);
68 evsel->fd = NULL;
71 void perf_evsel__free_id(struct perf_evsel *evsel)
73 xyarray__delete(evsel->sample_id);
74 evsel->sample_id = NULL;
75 free(evsel->id);
76 evsel->id = NULL;
79 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
81 int cpu, thread;
83 for (cpu = 0; cpu < ncpus; cpu++)
84 for (thread = 0; thread < nthreads; ++thread) {
85 close(FD(evsel, cpu, thread));
86 FD(evsel, cpu, thread) = -1;
90 void perf_evsel__exit(struct perf_evsel *evsel)
92 assert(list_empty(&evsel->node));
93 xyarray__delete(evsel->fd);
94 xyarray__delete(evsel->sample_id);
95 free(evsel->id);
98 void perf_evsel__delete(struct perf_evsel *evsel)
100 perf_evsel__exit(evsel);
101 close_cgroup(evsel->cgrp);
102 free(evsel->name);
103 free(evsel);
106 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
107 int cpu, int thread, bool scale)
109 struct perf_counts_values count;
110 size_t nv = scale ? 3 : 1;
112 if (FD(evsel, cpu, thread) < 0)
113 return -EINVAL;
115 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
116 return -ENOMEM;
118 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
119 return -errno;
121 if (scale) {
122 if (count.run == 0)
123 count.val = 0;
124 else if (count.run < count.ena)
125 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
126 } else
127 count.ena = count.run = 0;
129 evsel->counts->cpu[cpu] = count;
130 return 0;
133 int __perf_evsel__read(struct perf_evsel *evsel,
134 int ncpus, int nthreads, bool scale)
136 size_t nv = scale ? 3 : 1;
137 int cpu, thread;
138 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
140 aggr->val = aggr->ena = aggr->run = 0;
142 for (cpu = 0; cpu < ncpus; cpu++) {
143 for (thread = 0; thread < nthreads; thread++) {
144 if (FD(evsel, cpu, thread) < 0)
145 continue;
147 if (readn(FD(evsel, cpu, thread),
148 &count, nv * sizeof(u64)) < 0)
149 return -errno;
151 aggr->val += count.val;
152 if (scale) {
153 aggr->ena += count.ena;
154 aggr->run += count.run;
159 evsel->counts->scaled = 0;
160 if (scale) {
161 if (aggr->run == 0) {
162 evsel->counts->scaled = -1;
163 aggr->val = 0;
164 return 0;
167 if (aggr->run < aggr->ena) {
168 evsel->counts->scaled = 1;
169 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
171 } else
172 aggr->ena = aggr->run = 0;
174 return 0;
177 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
178 struct thread_map *threads, bool group, bool inherit)
180 int cpu, thread;
181 unsigned long flags = 0;
182 int pid = -1;
184 if (evsel->fd == NULL &&
185 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
186 return -1;
188 if (evsel->cgrp) {
189 flags = PERF_FLAG_PID_CGROUP;
190 pid = evsel->cgrp->fd;
193 for (cpu = 0; cpu < cpus->nr; cpu++) {
194 int group_fd = -1;
196 * Don't allow mmap() of inherited per-task counters. This
197 * would create a performance issue due to all children writing
198 * to the same buffer.
200 * FIXME:
201 * Proper fix is not to pass 'inherit' to perf_evsel__open*,
202 * but a 'flags' parameter, with 'group' folded there as well,
203 * then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if
204 * O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is
205 * set. Lets go for the minimal fix first tho.
207 evsel->attr.inherit = (cpus->map[cpu] >= 0) && inherit;
209 for (thread = 0; thread < threads->nr; thread++) {
211 if (!evsel->cgrp)
212 pid = threads->map[thread];
214 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
215 pid,
216 cpus->map[cpu],
217 group_fd, flags);
218 if (FD(evsel, cpu, thread) < 0)
219 goto out_close;
221 if (group && group_fd == -1)
222 group_fd = FD(evsel, cpu, thread);
226 return 0;
228 out_close:
229 do {
230 while (--thread >= 0) {
231 close(FD(evsel, cpu, thread));
232 FD(evsel, cpu, thread) = -1;
234 thread = threads->nr;
235 } while (--cpu >= 0);
236 return -1;
239 static struct {
240 struct cpu_map map;
241 int cpus[1];
242 } empty_cpu_map = {
243 .map.nr = 1,
244 .cpus = { -1, },
247 static struct {
248 struct thread_map map;
249 int threads[1];
250 } empty_thread_map = {
251 .map.nr = 1,
252 .threads = { -1, },
255 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
256 struct thread_map *threads, bool group, bool inherit)
258 if (cpus == NULL) {
259 /* Work around old compiler warnings about strict aliasing */
260 cpus = &empty_cpu_map.map;
263 if (threads == NULL)
264 threads = &empty_thread_map.map;
266 return __perf_evsel__open(evsel, cpus, threads, group, inherit);
269 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
270 struct cpu_map *cpus, bool group, bool inherit)
272 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
275 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
276 struct thread_map *threads, bool group, bool inherit)
278 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
281 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
282 struct perf_sample *sample)
284 const u64 *array = event->sample.array;
286 array += ((event->header.size -
287 sizeof(event->header)) / sizeof(u64)) - 1;
289 if (type & PERF_SAMPLE_CPU) {
290 u32 *p = (u32 *)array;
291 sample->cpu = *p;
292 array--;
295 if (type & PERF_SAMPLE_STREAM_ID) {
296 sample->stream_id = *array;
297 array--;
300 if (type & PERF_SAMPLE_ID) {
301 sample->id = *array;
302 array--;
305 if (type & PERF_SAMPLE_TIME) {
306 sample->time = *array;
307 array--;
310 if (type & PERF_SAMPLE_TID) {
311 u32 *p = (u32 *)array;
312 sample->pid = p[0];
313 sample->tid = p[1];
316 return 0;
319 int perf_event__parse_sample(const union perf_event *event, u64 type,
320 bool sample_id_all, struct perf_sample *data)
322 const u64 *array;
324 data->cpu = data->pid = data->tid = -1;
325 data->stream_id = data->id = data->time = -1ULL;
327 if (event->header.type != PERF_RECORD_SAMPLE) {
328 if (!sample_id_all)
329 return 0;
330 return perf_event__parse_id_sample(event, type, data);
333 array = event->sample.array;
335 if (type & PERF_SAMPLE_IP) {
336 data->ip = event->ip.ip;
337 array++;
340 if (type & PERF_SAMPLE_TID) {
341 u32 *p = (u32 *)array;
342 data->pid = p[0];
343 data->tid = p[1];
344 array++;
347 if (type & PERF_SAMPLE_TIME) {
348 data->time = *array;
349 array++;
352 if (type & PERF_SAMPLE_ADDR) {
353 data->addr = *array;
354 array++;
357 data->id = -1ULL;
358 if (type & PERF_SAMPLE_ID) {
359 data->id = *array;
360 array++;
363 if (type & PERF_SAMPLE_STREAM_ID) {
364 data->stream_id = *array;
365 array++;
368 if (type & PERF_SAMPLE_CPU) {
369 u32 *p = (u32 *)array;
370 data->cpu = *p;
371 array++;
374 if (type & PERF_SAMPLE_PERIOD) {
375 data->period = *array;
376 array++;
379 if (type & PERF_SAMPLE_READ) {
380 fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
381 return -1;
384 if (type & PERF_SAMPLE_CALLCHAIN) {
385 data->callchain = (struct ip_callchain *)array;
386 array += 1 + data->callchain->nr;
389 if (type & PERF_SAMPLE_RAW) {
390 u32 *p = (u32 *)array;
391 data->raw_size = *p;
392 p++;
393 data->raw_data = p;
396 return 0;