4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
9 $ perf stat ~/hackbench 10
12 Performance counter stats for '/home/mingo/hackbench':
14 1255.538611 task clock ticks # 10.143 CPU utilization factor
15 54011 context switches # 0.043 M/sec
16 385 CPU migrations # 0.000 M/sec
17 17755 pagefaults # 0.014 M/sec
18 3808323185 CPU cycles # 3033.219 M/sec
19 1575111190 instructions # 1254.530 M/sec
20 17367895 cache references # 13.833 M/sec
21 7674421 cache misses # 6.112 M/sec
23 Wall-clock time elapsed: 123.786620 msecs
26 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
28 * Improvements and fixes by:
30 * Arjan van de Ven <arjan@linux.intel.com>
31 * Yanmin Zhang <yanmin.zhang@intel.com>
32 * Wu Fengguang <fengguang.wu@intel.com>
33 * Mike Galbraith <efault@gmx.de>
34 * Paul Mackerras <paulus@samba.org>
35 * Jaswinder Singh Rajput <jaswinder@kernel.org>
37 * Released under the GPL v2. (and only v2, not any later version)
42 #include "util/util.h"
43 #include "util/parse-options.h"
44 #include "util/parse-events.h"
46 #include <sys/prctl.h>
49 static struct perf_counter_attr default_attrs
[] = {
51 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_TASK_CLOCK
},
52 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CONTEXT_SWITCHES
},
53 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CPU_MIGRATIONS
},
54 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_PAGE_FAULTS
},
56 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CPU_CYCLES
},
57 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_INSTRUCTIONS
},
58 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CACHE_REFERENCES
},
59 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CACHE_MISSES
},
65 static int system_wide
= 0;
66 static int verbose
= 0;
67 static unsigned int nr_cpus
= 0;
68 static int run_idx
= 0;
70 static int run_count
= 1;
71 static int inherit
= 1;
73 static int target_pid
= -1;
74 static int null_run
= 0;
76 static int fd
[MAX_NR_CPUS
][MAX_COUNTERS
];
78 static u64 runtime_nsecs
[MAX_RUN
];
79 static u64 walltime_nsecs
[MAX_RUN
];
80 static u64 runtime_cycles
[MAX_RUN
];
82 static u64 event_res
[MAX_RUN
][MAX_COUNTERS
][3];
83 static u64 event_scaled
[MAX_RUN
][MAX_COUNTERS
];
91 static double avg_stats(struct stats
*stats
)
93 return stats
->sum
/ run_count
;
97 * stddev = sqrt(1/N (\Sum n_i^2) - avg(n)^2)
99 static double stddev_stats(struct stats
*stats
)
101 double avg
= stats
->sum
/ run_count
;
103 return sqrt(stats
->sum_sq
/run_count
- avg
*avg
);
106 struct stats event_res_stats
[MAX_COUNTERS
][3];
107 struct stats event_scaled_stats
[MAX_COUNTERS
];
108 struct stats runtime_nsecs_stats
;
109 struct stats walltime_nsecs_stats
;
110 struct stats runtime_cycles_stats
;
112 #define MATCH_EVENT(t, c, counter) \
113 (attrs[counter].type == PERF_TYPE_##t && \
114 attrs[counter].config == PERF_COUNT_##c)
116 #define ERR_PERF_OPEN \
117 "Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n"
119 static void create_perf_stat_counter(int counter
, int pid
)
121 struct perf_counter_attr
*attr
= attrs
+ counter
;
124 attr
->read_format
= PERF_FORMAT_TOTAL_TIME_ENABLED
|
125 PERF_FORMAT_TOTAL_TIME_RUNNING
;
130 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
131 fd
[cpu
][counter
] = sys_perf_counter_open(attr
, -1, cpu
, -1, 0);
132 if (fd
[cpu
][counter
] < 0 && verbose
)
133 fprintf(stderr
, ERR_PERF_OPEN
, counter
,
134 fd
[cpu
][counter
], strerror(errno
));
137 attr
->inherit
= inherit
;
139 attr
->enable_on_exec
= 1;
141 fd
[0][counter
] = sys_perf_counter_open(attr
, pid
, -1, -1, 0);
142 if (fd
[0][counter
] < 0 && verbose
)
143 fprintf(stderr
, ERR_PERF_OPEN
, counter
,
144 fd
[0][counter
], strerror(errno
));
149 * Does the counter have nsecs as a unit?
151 static inline int nsec_counter(int counter
)
153 if (MATCH_EVENT(SOFTWARE
, SW_CPU_CLOCK
, counter
) ||
154 MATCH_EVENT(SOFTWARE
, SW_TASK_CLOCK
, counter
))
161 * Read out the results of a single counter:
163 static void read_counter(int counter
)
165 u64
*count
, single_count
[3];
170 count
= event_res
[run_idx
][counter
];
172 count
[0] = count
[1] = count
[2] = 0;
175 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
176 if (fd
[cpu
][counter
] < 0)
179 res
= read(fd
[cpu
][counter
], single_count
, nv
* sizeof(u64
));
180 assert(res
== nv
* sizeof(u64
));
182 close(fd
[cpu
][counter
]);
183 fd
[cpu
][counter
] = -1;
185 count
[0] += single_count
[0];
187 count
[1] += single_count
[1];
188 count
[2] += single_count
[2];
195 event_scaled
[run_idx
][counter
] = -1;
200 if (count
[2] < count
[1]) {
201 event_scaled
[run_idx
][counter
] = 1;
202 count
[0] = (unsigned long long)
203 ((double)count
[0] * count
[1] / count
[2] + 0.5);
207 * Save the full runtime - to allow normalization during printout:
209 if (MATCH_EVENT(SOFTWARE
, SW_TASK_CLOCK
, counter
))
210 runtime_nsecs
[run_idx
] = count
[0];
211 if (MATCH_EVENT(HARDWARE
, HW_CPU_CYCLES
, counter
))
212 runtime_cycles
[run_idx
] = count
[0];
215 static int run_perf_stat(int argc __used
, const char **argv
)
217 unsigned long long t0
, t1
;
221 int child_ready_pipe
[2], go_pipe
[2];
227 if (pipe(child_ready_pipe
) < 0 || pipe(go_pipe
) < 0) {
228 perror("failed to create pipes");
232 if ((pid
= fork()) < 0)
233 perror("failed to fork");
236 close(child_ready_pipe
[0]);
238 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
241 * Do a dummy execvp to get the PLT entry resolved,
242 * so we avoid the resolver overhead on the real
245 execvp("", (char **)argv
);
248 * Tell the parent we're ready to go
250 close(child_ready_pipe
[1]);
253 * Wait until the parent tells us to go.
255 if (read(go_pipe
[0], &buf
, 1) == -1)
256 perror("unable to read pipe");
258 execvp(argv
[0], (char **)argv
);
265 * Wait for the child to be ready to exec.
267 close(child_ready_pipe
[1]);
269 if (read(child_ready_pipe
[0], &buf
, 1) == -1)
270 perror("unable to read pipe");
271 close(child_ready_pipe
[0]);
273 for (counter
= 0; counter
< nr_counters
; counter
++)
274 create_perf_stat_counter(counter
, pid
);
277 * Enable counters and exec the command:
286 walltime_nsecs
[run_idx
] = t1
- t0
;
288 for (counter
= 0; counter
< nr_counters
; counter
++)
289 read_counter(counter
);
291 return WEXITSTATUS(status
);
294 static void print_noise(double avg
, double stddev
)
297 fprintf(stderr
, " ( +- %7.3f%% )", 100*stddev
/ avg
);
300 static void nsec_printout(int counter
, double avg
, double stddev
)
302 double msecs
= avg
/ 1e6
;
304 fprintf(stderr
, " %14.6f %-24s", msecs
, event_name(counter
));
306 if (MATCH_EVENT(SOFTWARE
, SW_TASK_CLOCK
, counter
)) {
307 fprintf(stderr
, " # %10.3f CPUs ",
308 avg
/ avg_stats(&walltime_nsecs_stats
));
310 print_noise(avg
, stddev
);
313 static void abs_printout(int counter
, double avg
, double stddev
)
315 fprintf(stderr
, " %14.0f %-24s", avg
, event_name(counter
));
317 if (MATCH_EVENT(HARDWARE
, HW_INSTRUCTIONS
, counter
)) {
318 fprintf(stderr
, " # %10.3f IPC ",
319 avg
/ avg_stats(&runtime_cycles_stats
));
321 fprintf(stderr
, " # %10.3f M/sec",
322 1000.0 * avg
/ avg_stats(&runtime_nsecs_stats
));
324 print_noise(avg
, stddev
);
328 * Print out the results of a single counter:
330 static void print_counter(int counter
)
335 avg
= avg_stats(&event_res_stats
[counter
][0]);
336 stddev
= stddev_stats(&event_res_stats
[counter
][0]);
337 scaled
= avg_stats(&event_scaled_stats
[counter
]);
340 fprintf(stderr
, " %14s %-24s\n",
341 "<not counted>", event_name(counter
));
345 if (nsec_counter(counter
))
346 nsec_printout(counter
, avg
, stddev
);
348 abs_printout(counter
, avg
, stddev
);
351 double avg_enabled
, avg_running
;
353 avg_enabled
= avg_stats(&event_res_stats
[counter
][1]);
354 avg_running
= avg_stats(&event_res_stats
[counter
][2]);
356 fprintf(stderr
, " (scaled from %.2f%%)",
357 100 * avg_running
/ avg_enabled
);
360 fprintf(stderr
, "\n");
363 static void update_stats(const char *name
, int idx
, struct stats
*stats
, u64
*val
)
368 stats
->sum_sq
+= sq
* sq
;
371 fprintf(stderr
, "debug: %20s[%d]: %Ld\n", name
, idx
, *val
);
375 * Calculate the averages and noises:
377 static void calc_avg(void)
382 fprintf(stderr
, "\n");
384 for (i
= 0; i
< run_count
; i
++) {
385 update_stats("runtime", 0, &runtime_nsecs_stats
, runtime_nsecs
+ i
);
386 update_stats("walltime", 0, &walltime_nsecs_stats
, walltime_nsecs
+ i
);
387 update_stats("runtime_cycles", 0, &runtime_cycles_stats
, runtime_cycles
+ i
);
389 for (j
= 0; j
< nr_counters
; j
++) {
390 update_stats("counter/0", j
,
391 event_res_stats
[j
]+0, event_res
[i
][j
]+0);
392 update_stats("counter/1", j
,
393 event_res_stats
[j
]+1, event_res
[i
][j
]+1);
394 update_stats("counter/2", j
,
395 event_res_stats
[j
]+2, event_res
[i
][j
]+2);
396 if (event_scaled
[i
][j
] != (u64
)-1)
397 update_stats("scaled", j
,
398 event_scaled_stats
+ j
, event_scaled
[i
]+j
);
403 static void print_stat(int argc
, const char **argv
)
411 fprintf(stderr
, "\n");
412 fprintf(stderr
, " Performance counter stats for \'%s", argv
[0]);
414 for (i
= 1; i
< argc
; i
++)
415 fprintf(stderr
, " %s", argv
[i
]);
417 fprintf(stderr
, "\'");
419 fprintf(stderr
, " (%d runs)", run_count
);
420 fprintf(stderr
, ":\n\n");
422 for (counter
= 0; counter
< nr_counters
; counter
++)
423 print_counter(counter
);
425 fprintf(stderr
, "\n");
426 fprintf(stderr
, " %14.9f seconds time elapsed",
427 avg_stats(&walltime_nsecs_stats
)/1e9
);
429 fprintf(stderr
, " ( +- %7.3f%% )",
430 100*stddev_stats(&walltime_nsecs_stats
) /
431 avg_stats(&walltime_nsecs_stats
));
433 fprintf(stderr
, "\n\n");
436 static volatile int signr
= -1;
438 static void skip_signal(int signo
)
443 static void sig_atexit(void)
448 signal(signr
, SIG_DFL
);
449 kill(getpid(), signr
);
452 static const char * const stat_usage
[] = {
453 "perf stat [<options>] <command>",
457 static const struct option options
[] = {
458 OPT_CALLBACK('e', "event", NULL
, "event",
459 "event selector. use 'perf list' to list available events",
461 OPT_BOOLEAN('i', "inherit", &inherit
,
462 "child tasks inherit counters"),
463 OPT_INTEGER('p', "pid", &target_pid
,
464 "stat events on existing pid"),
465 OPT_BOOLEAN('a', "all-cpus", &system_wide
,
466 "system-wide collection from all CPUs"),
467 OPT_BOOLEAN('c', "scale", &scale
,
468 "scale/normalize counters"),
469 OPT_BOOLEAN('v', "verbose", &verbose
,
470 "be more verbose (show counter open errors, etc)"),
471 OPT_INTEGER('r', "repeat", &run_count
,
472 "repeat command and print average + stddev (max: 100)"),
473 OPT_BOOLEAN('n', "null", &null_run
,
474 "null run - dont start any counters"),
478 int cmd_stat(int argc
, const char **argv
, const char *prefix __used
)
482 argc
= parse_options(argc
, argv
, options
, stat_usage
,
483 PARSE_OPT_STOP_AT_NON_OPTION
);
485 usage_with_options(stat_usage
, options
);
486 if (run_count
<= 0 || run_count
> MAX_RUN
)
487 usage_with_options(stat_usage
, options
);
489 /* Set attrs and nr_counters if no event is selected and !null_run */
490 if (!null_run
&& !nr_counters
) {
491 memcpy(attrs
, default_attrs
, sizeof(default_attrs
));
492 nr_counters
= ARRAY_SIZE(default_attrs
);
495 nr_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
496 assert(nr_cpus
<= MAX_NR_CPUS
);
497 assert((int)nr_cpus
>= 0);
500 * We dont want to block the signals - that would cause
501 * child tasks to inherit that and Ctrl-C would not work.
502 * What we want is for Ctrl-C to work in the exec()-ed
503 * task, but being ignored by perf stat itself:
506 signal(SIGINT
, skip_signal
);
507 signal(SIGALRM
, skip_signal
);
508 signal(SIGABRT
, skip_signal
);
511 for (run_idx
= 0; run_idx
< run_count
; run_idx
++) {
512 if (run_count
!= 1 && verbose
)
513 fprintf(stderr
, "[ perf stat: executing run #%d ... ]\n", run_idx
+ 1);
514 status
= run_perf_stat(argc
, argv
);
517 print_stat(argc
, argv
);