1 // SPDX-License-Identifier: GPL-2.0-only
5 * Builtin stat command: Give a precise performance counters summary
6 * overview about any workload, CPU or specific PID.
10 $ perf stat ./hackbench 10
14 Performance counter stats for './hackbench 10':
16 1708.761321 task-clock # 11.037 CPUs utilized
17 41,190 context-switches # 0.024 M/sec
18 6,735 CPU-migrations # 0.004 M/sec
19 17,318 page-faults # 0.010 M/sec
20 5,205,202,243 cycles # 3.046 GHz
21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
23 2,603,501,247 instructions # 0.50 insns per cycle
24 # 1.48 stalled cycles per insn
25 484,357,498 branches # 283.455 M/sec
26 6,388,934 branch-misses # 1.32% of all branches
28 0.154822978 seconds time elapsed
31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
33 * Improvements and fixes by:
35 * Arjan van de Ven <arjan@linux.intel.com>
36 * Yanmin Zhang <yanmin.zhang@intel.com>
37 * Wu Fengguang <fengguang.wu@intel.com>
38 * Mike Galbraith <efault@gmx.de>
39 * Paul Mackerras <paulus@samba.org>
40 * Jaswinder Singh Rajput <jaswinder@kernel.org>
45 #include "util/cgroup.h"
46 #include <subcmd/parse-options.h>
47 #include "util/parse-events.h"
49 #include "util/event.h"
50 #include "util/evlist.h"
51 #include "util/evsel.h"
52 #include "util/debug.h"
53 #include "util/color.h"
54 #include "util/stat.h"
55 #include "util/header.h"
56 #include "util/cpumap.h"
57 #include "util/thread_map.h"
58 #include "util/counts.h"
59 #include "util/group.h"
60 #include "util/session.h"
61 #include "util/tool.h"
62 #include "util/string2.h"
63 #include "util/metricgroup.h"
64 #include "util/synthetic-events.h"
65 #include "util/target.h"
66 #include "util/time-utils.h"
68 #include "util/affinity.h"
71 #include <linux/time64.h>
72 #include <linux/zalloc.h>
73 #include <api/fs/fs.h>
77 #include <sys/prctl.h>
81 #include <sys/types.h>
86 #include <sys/resource.h>
87 #include <linux/err.h>
89 #include <linux/ctype.h>
90 #include <perf/evlist.h>
92 #define DEFAULT_SEPARATOR " "
93 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
95 static void print_counters(struct timespec
*ts
, int argc
, const char **argv
);
97 /* Default events used for perf stat -T */
98 static const char *transaction_attrs
= {
110 /* More limited version when the CPU does not have all events. */
111 static const char * transaction_limited_attrs
= {
121 static const char * topdown_attrs
[] = {
122 "topdown-total-slots",
123 "topdown-slots-retired",
124 "topdown-recovery-bubbles",
125 "topdown-fetch-bubbles",
126 "topdown-slots-issued",
130 static const char *smi_cost_attrs
= {
138 static struct evlist
*evsel_list
;
140 static struct target target
= {
144 #define METRIC_ONLY_LEN 20
146 static volatile pid_t child_pid
= -1;
147 static int detailed_run
= 0;
148 static bool transaction_run
;
149 static bool topdown_run
= false;
150 static bool smi_cost
= false;
151 static bool smi_reset
= false;
152 static int big_num_opt
= -1;
153 static bool group
= false;
154 static const char *pre_cmd
= NULL
;
155 static const char *post_cmd
= NULL
;
156 static bool sync_run
= false;
157 static bool forever
= false;
158 static bool force_metric_only
= false;
159 static struct timespec ref_time
;
160 static bool append_file
;
161 static bool interval_count
;
162 static const char *output_name
;
163 static int output_fd
;
167 struct perf_data data
;
168 struct perf_session
*session
;
170 struct perf_tool tool
;
172 struct perf_cpu_map
*cpus
;
173 struct perf_thread_map
*threads
;
174 enum aggr_mode aggr_mode
;
177 static struct perf_stat perf_stat
;
178 #define STAT_RECORD perf_stat.record
180 static volatile int done
= 0;
182 static struct perf_stat_config stat_config
= {
183 .aggr_mode
= AGGR_GLOBAL
,
185 .unit_width
= 4, /* strlen("unit") */
187 .metric_only_len
= METRIC_ONLY_LEN
,
188 .walltime_nsecs_stats
= &walltime_nsecs_stats
,
192 static inline void diff_timespec(struct timespec
*r
, struct timespec
*a
,
195 r
->tv_sec
= a
->tv_sec
- b
->tv_sec
;
196 if (a
->tv_nsec
< b
->tv_nsec
) {
197 r
->tv_nsec
= a
->tv_nsec
+ NSEC_PER_SEC
- b
->tv_nsec
;
200 r
->tv_nsec
= a
->tv_nsec
- b
->tv_nsec
;
204 static void perf_stat__reset_stats(void)
208 perf_evlist__reset_stats(evsel_list
);
209 perf_stat__reset_shadow_stats();
211 for (i
= 0; i
< stat_config
.stats_num
; i
++)
212 perf_stat__reset_shadow_per_stat(&stat_config
.stats
[i
]);
215 static int process_synthesized_event(struct perf_tool
*tool __maybe_unused
,
216 union perf_event
*event
,
217 struct perf_sample
*sample __maybe_unused
,
218 struct machine
*machine __maybe_unused
)
220 if (perf_data__write(&perf_stat
.data
, event
, event
->header
.size
) < 0) {
221 pr_err("failed to write perf data, error: %m\n");
225 perf_stat
.bytes_written
+= event
->header
.size
;
229 static int write_stat_round_event(u64 tm
, u64 type
)
231 return perf_event__synthesize_stat_round(NULL
, tm
, type
,
232 process_synthesized_event
,
236 #define WRITE_STAT_ROUND_EVENT(time, interval) \
237 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
239 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
242 perf_evsel__write_stat_event(struct evsel
*counter
, u32 cpu
, u32 thread
,
243 struct perf_counts_values
*count
)
245 struct perf_sample_id
*sid
= SID(counter
, cpu
, thread
);
247 return perf_event__synthesize_stat(NULL
, cpu
, thread
, sid
->id
, count
,
248 process_synthesized_event
, NULL
);
251 static int read_single_counter(struct evsel
*counter
, int cpu
,
252 int thread
, struct timespec
*rs
)
254 if (counter
->tool_event
== PERF_TOOL_DURATION_TIME
) {
255 u64 val
= rs
->tv_nsec
+ rs
->tv_sec
*1000000000ULL;
256 struct perf_counts_values
*count
=
257 perf_counts(counter
->counts
, cpu
, thread
);
258 count
->ena
= count
->run
= val
;
262 return perf_evsel__read_counter(counter
, cpu
, thread
);
266 * Read out the results of a single counter:
267 * do not aggregate counts across CPUs in system-wide mode
269 static int read_counter_cpu(struct evsel
*counter
, struct timespec
*rs
, int cpu
)
271 int nthreads
= perf_thread_map__nr(evsel_list
->core
.threads
);
274 if (!counter
->supported
)
277 if (counter
->core
.system_wide
)
280 for (thread
= 0; thread
< nthreads
; thread
++) {
281 struct perf_counts_values
*count
;
283 count
= perf_counts(counter
->counts
, cpu
, thread
);
286 * The leader's group read loads data into its group members
287 * (via perf_evsel__read_counter()) and sets their count->loaded.
289 if (!perf_counts__is_loaded(counter
->counts
, cpu
, thread
) &&
290 read_single_counter(counter
, cpu
, thread
, rs
)) {
291 counter
->counts
->scaled
= -1;
292 perf_counts(counter
->counts
, cpu
, thread
)->ena
= 0;
293 perf_counts(counter
->counts
, cpu
, thread
)->run
= 0;
297 perf_counts__set_loaded(counter
->counts
, cpu
, thread
, false);
300 if (perf_evsel__write_stat_event(counter
, cpu
, thread
, count
)) {
301 pr_err("failed to write stat event\n");
307 fprintf(stat_config
.output
,
308 "%s: %d: %" PRIu64
" %" PRIu64
" %" PRIu64
"\n",
309 perf_evsel__name(counter
),
311 count
->val
, count
->ena
, count
->run
);
318 static void read_counters(struct timespec
*rs
)
320 struct evsel
*counter
;
321 struct affinity affinity
;
324 if (affinity__setup(&affinity
) < 0)
327 ncpus
= perf_cpu_map__nr(evsel_list
->core
.all_cpus
);
328 if (!target__has_cpu(&target
) || target__has_per_thread(&target
))
330 evlist__for_each_cpu(evsel_list
, i
, cpu
) {
333 affinity__set(&affinity
, cpu
);
335 evlist__for_each_entry(evsel_list
, counter
) {
336 if (evsel__cpu_iter_skip(counter
, cpu
))
339 counter
->err
= read_counter_cpu(counter
, rs
,
340 counter
->cpu_iter
- 1);
344 affinity__cleanup(&affinity
);
346 evlist__for_each_entry(evsel_list
, counter
) {
348 pr_debug("failed to read counter %s\n", counter
->name
);
349 if (counter
->err
== 0 && perf_stat_process_counter(&stat_config
, counter
))
350 pr_warning("failed to process counter %s\n", counter
->name
);
355 static void process_interval(void)
357 struct timespec ts
, rs
;
359 clock_gettime(CLOCK_MONOTONIC
, &ts
);
360 diff_timespec(&rs
, &ts
, &ref_time
);
365 if (WRITE_STAT_ROUND_EVENT(rs
.tv_sec
* NSEC_PER_SEC
+ rs
.tv_nsec
, INTERVAL
))
366 pr_err("failed to write stat round event\n");
369 init_stats(&walltime_nsecs_stats
);
370 update_stats(&walltime_nsecs_stats
, stat_config
.interval
* 1000000);
371 print_counters(&rs
, 0, NULL
);
374 static void enable_counters(void)
376 if (stat_config
.initial_delay
)
377 usleep(stat_config
.initial_delay
* USEC_PER_MSEC
);
380 * We need to enable counters only if:
381 * - we don't have tracee (attaching to task or cpu)
382 * - we have initial delay configured
384 if (!target__none(&target
) || stat_config
.initial_delay
)
385 evlist__enable(evsel_list
);
388 static void disable_counters(void)
391 * If we don't have tracee (attaching to task or cpu), counters may
392 * still be running. To get accurate group ratios, we must stop groups
393 * from counting before reading their constituent counters.
395 if (!target__none(&target
))
396 evlist__disable(evsel_list
);
399 static volatile int workload_exec_errno
;
402 * perf_evlist__prepare_workload will send a SIGUSR1
403 * if the fork fails, since we asked by setting its
404 * want_signal to true.
406 static void workload_exec_failed_signal(int signo __maybe_unused
, siginfo_t
*info
,
407 void *ucontext __maybe_unused
)
409 workload_exec_errno
= info
->si_value
.sival_int
;
412 static bool perf_evsel__should_store_id(struct evsel
*counter
)
414 return STAT_RECORD
|| counter
->core
.attr
.read_format
& PERF_FORMAT_ID
;
417 static bool is_target_alive(struct target
*_target
,
418 struct perf_thread_map
*threads
)
423 if (!target__has_task(_target
))
426 for (i
= 0; i
< threads
->nr
; i
++) {
429 scnprintf(path
, PATH_MAX
, "%s/%d", procfs__mountpoint(),
430 threads
->map
[i
].pid
);
432 if (!stat(path
, &st
))
439 enum counter_recovery
{
445 static enum counter_recovery
stat_handle_error(struct evsel
*counter
)
449 * PPC returns ENXIO for HW counters until 2.6.37
450 * (behavior changed with commit b0a873e).
452 if (errno
== EINVAL
|| errno
== ENOSYS
||
453 errno
== ENOENT
|| errno
== EOPNOTSUPP
||
456 ui__warning("%s event is not supported by the kernel.\n",
457 perf_evsel__name(counter
));
458 counter
->supported
= false;
460 * errored is a sticky flag that means one of the counter's
461 * cpu event had a problem and needs to be reexamined.
463 counter
->errored
= true;
465 if ((counter
->leader
!= counter
) ||
466 !(counter
->leader
->core
.nr_members
> 1))
468 } else if (perf_evsel__fallback(counter
, errno
, msg
, sizeof(msg
))) {
470 ui__warning("%s\n", msg
);
471 return COUNTER_RETRY
;
472 } else if (target__has_per_thread(&target
) &&
473 evsel_list
->core
.threads
&&
474 evsel_list
->core
.threads
->err_thread
!= -1) {
476 * For global --per-thread case, skip current
479 if (!thread_map__remove(evsel_list
->core
.threads
,
480 evsel_list
->core
.threads
->err_thread
)) {
481 evsel_list
->core
.threads
->err_thread
= -1;
482 return COUNTER_RETRY
;
486 perf_evsel__open_strerror(counter
, &target
,
487 errno
, msg
, sizeof(msg
));
488 ui__error("%s\n", msg
);
491 kill(child_pid
, SIGTERM
);
492 return COUNTER_FATAL
;
495 static int __run_perf_stat(int argc
, const char **argv
, int run_idx
)
497 int interval
= stat_config
.interval
;
498 int times
= stat_config
.times
;
499 int timeout
= stat_config
.timeout
;
501 unsigned long long t0
, t1
;
502 struct evsel
*counter
;
506 const bool forks
= (argc
> 0);
507 bool is_pipe
= STAT_RECORD
? perf_stat
.data
.is_pipe
: false;
508 struct affinity affinity
;
510 bool second_pass
= false;
513 ts
.tv_sec
= interval
/ USEC_PER_MSEC
;
514 ts
.tv_nsec
= (interval
% USEC_PER_MSEC
) * NSEC_PER_MSEC
;
515 } else if (timeout
) {
516 ts
.tv_sec
= timeout
/ USEC_PER_MSEC
;
517 ts
.tv_nsec
= (timeout
% USEC_PER_MSEC
) * NSEC_PER_MSEC
;
524 if (perf_evlist__prepare_workload(evsel_list
, &target
, argv
, is_pipe
,
525 workload_exec_failed_signal
) < 0) {
526 perror("failed to prepare workload");
529 child_pid
= evsel_list
->workload
.pid
;
533 perf_evlist__set_leader(evsel_list
);
535 if (affinity__setup(&affinity
) < 0)
538 evlist__for_each_cpu (evsel_list
, i
, cpu
) {
539 affinity__set(&affinity
, cpu
);
541 evlist__for_each_entry(evsel_list
, counter
) {
542 if (evsel__cpu_iter_skip(counter
, cpu
))
544 if (counter
->reset_group
|| counter
->errored
)
547 if (create_perf_stat_counter(counter
, &stat_config
, &target
,
548 counter
->cpu_iter
- 1) < 0) {
551 * Weak group failed. We cannot just undo this here
552 * because earlier CPUs might be in group mode, and the kernel
553 * doesn't support mixing group and non group reads. Defer
555 * Don't close here because we're in the wrong affinity.
557 if ((errno
== EINVAL
|| errno
== EBADF
) &&
558 counter
->leader
!= counter
&&
559 counter
->weak_group
) {
560 perf_evlist__reset_weak_group(evsel_list
, counter
, false);
561 assert(counter
->reset_group
);
566 switch (stat_handle_error(counter
)) {
578 counter
->supported
= true;
584 * Now redo all the weak group after closing them,
585 * and also close errored counters.
588 evlist__for_each_cpu(evsel_list
, i
, cpu
) {
589 affinity__set(&affinity
, cpu
);
590 /* First close errored or weak retry */
591 evlist__for_each_entry(evsel_list
, counter
) {
592 if (!counter
->reset_group
&& !counter
->errored
)
594 if (evsel__cpu_iter_skip_no_inc(counter
, cpu
))
596 perf_evsel__close_cpu(&counter
->core
, counter
->cpu_iter
);
598 /* Now reopen weak */
599 evlist__for_each_entry(evsel_list
, counter
) {
600 if (!counter
->reset_group
&& !counter
->errored
)
602 if (evsel__cpu_iter_skip(counter
, cpu
))
604 if (!counter
->reset_group
)
607 pr_debug2("reopening weak %s\n", perf_evsel__name(counter
));
608 if (create_perf_stat_counter(counter
, &stat_config
, &target
,
609 counter
->cpu_iter
- 1) < 0) {
611 switch (stat_handle_error(counter
)) {
615 goto try_again_reset
;
622 counter
->supported
= true;
626 affinity__cleanup(&affinity
);
628 evlist__for_each_entry(evsel_list
, counter
) {
629 if (!counter
->supported
) {
630 perf_evsel__free_fd(&counter
->core
);
634 l
= strlen(counter
->unit
);
635 if (l
> stat_config
.unit_width
)
636 stat_config
.unit_width
= l
;
638 if (perf_evsel__should_store_id(counter
) &&
639 perf_evsel__store_ids(counter
, evsel_list
))
643 if (perf_evlist__apply_filters(evsel_list
, &counter
)) {
644 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
645 counter
->filter
, perf_evsel__name(counter
), errno
,
646 str_error_r(errno
, msg
, sizeof(msg
)));
651 int err
, fd
= perf_data__fd(&perf_stat
.data
);
654 err
= perf_header__write_pipe(perf_data__fd(&perf_stat
.data
));
656 err
= perf_session__write_header(perf_stat
.session
, evsel_list
,
663 err
= perf_event__synthesize_stat_events(&stat_config
, NULL
, evsel_list
,
664 process_synthesized_event
, is_pipe
);
670 * Enable counters and exec the command:
673 clock_gettime(CLOCK_MONOTONIC
, &ref_time
);
676 perf_evlist__start_workload(evsel_list
);
679 if (interval
|| timeout
) {
680 while (!waitpid(child_pid
, &status
, WNOHANG
)) {
681 nanosleep(&ts
, NULL
);
685 if (interval_count
&& !(--times
))
690 wait4(child_pid
, &status
, 0, &stat_config
.ru_data
);
692 if (workload_exec_errno
) {
693 const char *emsg
= str_error_r(workload_exec_errno
, msg
, sizeof(msg
));
694 pr_err("Workload failed: %s\n", emsg
);
698 if (WIFSIGNALED(status
))
699 psignal(WTERMSIG(status
), argv
[0]);
703 nanosleep(&ts
, NULL
);
704 if (!is_target_alive(&target
, evsel_list
->core
.threads
))
710 if (interval_count
&& !(--times
))
720 if (stat_config
.walltime_run_table
)
721 stat_config
.walltime_run
[run_idx
] = t1
- t0
;
723 update_stats(&walltime_nsecs_stats
, t1
- t0
);
726 * Closing a group leader splits the group, and as we only disable
727 * group leaders, results in remaining events becoming enabled. To
728 * avoid arbitrary skew, we must read all counters before closing any
731 read_counters(&(struct timespec
) { .tv_nsec
= t1
-t0
});
734 * We need to keep evsel_list alive, because it's processed
735 * later the evsel_list will be closed after.
738 evlist__close(evsel_list
);
740 return WEXITSTATUS(status
);
743 static int run_perf_stat(int argc
, const char **argv
, int run_idx
)
748 ret
= system(pre_cmd
);
756 ret
= __run_perf_stat(argc
, argv
, run_idx
);
761 ret
= system(post_cmd
);
769 static void print_counters(struct timespec
*ts
, int argc
, const char **argv
)
771 /* Do not print anything if we record to the pipe. */
772 if (STAT_RECORD
&& perf_stat
.data
.is_pipe
)
775 perf_evlist__print_counters(evsel_list
, &stat_config
, &target
,
779 static volatile int signr
= -1;
781 static void skip_signal(int signo
)
783 if ((child_pid
== -1) || stat_config
.interval
)
788 * render child_pid harmless
789 * won't send SIGTERM to a random
790 * process in case of race condition
791 * and fast PID recycling
796 static void sig_atexit(void)
801 * avoid race condition with SIGCHLD handler
802 * in skip_signal() which is modifying child_pid
803 * goal is to avoid send SIGTERM to a random
807 sigaddset(&set
, SIGCHLD
);
808 sigprocmask(SIG_BLOCK
, &set
, &oset
);
811 kill(child_pid
, SIGTERM
);
813 sigprocmask(SIG_SETMASK
, &oset
, NULL
);
818 signal(signr
, SIG_DFL
);
819 kill(getpid(), signr
);
822 static int stat__set_big_num(const struct option
*opt __maybe_unused
,
823 const char *s __maybe_unused
, int unset
)
825 big_num_opt
= unset
? 0 : 1;
829 static int enable_metric_only(const struct option
*opt __maybe_unused
,
830 const char *s __maybe_unused
, int unset
)
832 force_metric_only
= true;
833 stat_config
.metric_only
= !unset
;
837 static int parse_metric_groups(const struct option
*opt
,
839 int unset __maybe_unused
)
841 return metricgroup__parse_groups(opt
, str
, &stat_config
.metric_events
);
844 static struct option stat_options
[] = {
845 OPT_BOOLEAN('T', "transaction", &transaction_run
,
846 "hardware transaction statistics"),
847 OPT_CALLBACK('e', "event", &evsel_list
, "event",
848 "event selector. use 'perf list' to list available events",
849 parse_events_option
),
850 OPT_CALLBACK(0, "filter", &evsel_list
, "filter",
851 "event filter", parse_filter
),
852 OPT_BOOLEAN('i', "no-inherit", &stat_config
.no_inherit
,
853 "child tasks do not inherit counters"),
854 OPT_STRING('p', "pid", &target
.pid
, "pid",
855 "stat events on existing process id"),
856 OPT_STRING('t', "tid", &target
.tid
, "tid",
857 "stat events on existing thread id"),
858 OPT_BOOLEAN('a', "all-cpus", &target
.system_wide
,
859 "system-wide collection from all CPUs"),
860 OPT_BOOLEAN('g', "group", &group
,
861 "put the counters into a counter group"),
862 OPT_BOOLEAN(0, "scale", &stat_config
.scale
,
863 "Use --no-scale to disable counter scaling for multiplexing"),
864 OPT_INCR('v', "verbose", &verbose
,
865 "be more verbose (show counter open errors, etc)"),
866 OPT_INTEGER('r', "repeat", &stat_config
.run_count
,
867 "repeat command and print average + stddev (max: 100, forever: 0)"),
868 OPT_BOOLEAN(0, "table", &stat_config
.walltime_run_table
,
869 "display details about each run (only with -r option)"),
870 OPT_BOOLEAN('n', "null", &stat_config
.null_run
,
871 "null run - dont start any counters"),
872 OPT_INCR('d', "detailed", &detailed_run
,
873 "detailed run - start a lot of events"),
874 OPT_BOOLEAN('S', "sync", &sync_run
,
875 "call sync() before starting a run"),
876 OPT_CALLBACK_NOOPT('B', "big-num", NULL
, NULL
,
877 "print large numbers with thousands\' separators",
879 OPT_STRING('C', "cpu", &target
.cpu_list
, "cpu",
880 "list of cpus to monitor in system-wide"),
881 OPT_SET_UINT('A', "no-aggr", &stat_config
.aggr_mode
,
882 "disable CPU count aggregation", AGGR_NONE
),
883 OPT_BOOLEAN(0, "no-merge", &stat_config
.no_merge
, "Do not merge identical named events"),
884 OPT_STRING('x', "field-separator", &stat_config
.csv_sep
, "separator",
885 "print counts with custom separator"),
886 OPT_CALLBACK('G', "cgroup", &evsel_list
, "name",
887 "monitor event in cgroup name only", parse_cgroups
),
888 OPT_STRING('o', "output", &output_name
, "file", "output file name"),
889 OPT_BOOLEAN(0, "append", &append_file
, "append to the output file"),
890 OPT_INTEGER(0, "log-fd", &output_fd
,
891 "log output to fd, instead of stderr"),
892 OPT_STRING(0, "pre", &pre_cmd
, "command",
893 "command to run prior to the measured command"),
894 OPT_STRING(0, "post", &post_cmd
, "command",
895 "command to run after to the measured command"),
896 OPT_UINTEGER('I', "interval-print", &stat_config
.interval
,
897 "print counts at regular interval in ms "
898 "(overhead is possible for values <= 100ms)"),
899 OPT_INTEGER(0, "interval-count", &stat_config
.times
,
900 "print counts for fixed number of times"),
901 OPT_BOOLEAN(0, "interval-clear", &stat_config
.interval_clear
,
902 "clear screen in between new interval"),
903 OPT_UINTEGER(0, "timeout", &stat_config
.timeout
,
904 "stop workload and print counts after a timeout period in ms (>= 10ms)"),
905 OPT_SET_UINT(0, "per-socket", &stat_config
.aggr_mode
,
906 "aggregate counts per processor socket", AGGR_SOCKET
),
907 OPT_SET_UINT(0, "per-die", &stat_config
.aggr_mode
,
908 "aggregate counts per processor die", AGGR_DIE
),
909 OPT_SET_UINT(0, "per-core", &stat_config
.aggr_mode
,
910 "aggregate counts per physical processor core", AGGR_CORE
),
911 OPT_SET_UINT(0, "per-thread", &stat_config
.aggr_mode
,
912 "aggregate counts per thread", AGGR_THREAD
),
913 OPT_SET_UINT(0, "per-node", &stat_config
.aggr_mode
,
914 "aggregate counts per numa node", AGGR_NODE
),
915 OPT_UINTEGER('D', "delay", &stat_config
.initial_delay
,
916 "ms to wait before starting measurement after program start"),
917 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config
.metric_only
, NULL
,
918 "Only print computed metrics. No raw values", enable_metric_only
),
919 OPT_BOOLEAN(0, "topdown", &topdown_run
,
920 "measure topdown level 1 statistics"),
921 OPT_BOOLEAN(0, "smi-cost", &smi_cost
,
923 OPT_CALLBACK('M', "metrics", &evsel_list
, "metric/metric group list",
924 "monitor specified metrics or metric groups (separated by ,)",
925 parse_metric_groups
),
926 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config
.all_kernel
,
927 "Configure all used events to run in kernel space.",
928 PARSE_OPT_EXCLUSIVE
),
929 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config
.all_user
,
930 "Configure all used events to run in user space.",
931 PARSE_OPT_EXCLUSIVE
),
935 static int perf_stat__get_socket(struct perf_stat_config
*config __maybe_unused
,
936 struct perf_cpu_map
*map
, int cpu
)
938 return cpu_map__get_socket(map
, cpu
, NULL
);
941 static int perf_stat__get_die(struct perf_stat_config
*config __maybe_unused
,
942 struct perf_cpu_map
*map
, int cpu
)
944 return cpu_map__get_die(map
, cpu
, NULL
);
947 static int perf_stat__get_core(struct perf_stat_config
*config __maybe_unused
,
948 struct perf_cpu_map
*map
, int cpu
)
950 return cpu_map__get_core(map
, cpu
, NULL
);
953 static int perf_stat__get_node(struct perf_stat_config
*config __maybe_unused
,
954 struct perf_cpu_map
*map
, int cpu
)
956 return cpu_map__get_node(map
, cpu
, NULL
);
959 static int perf_stat__get_aggr(struct perf_stat_config
*config
,
960 aggr_get_id_t get_id
, struct perf_cpu_map
*map
, int idx
)
969 if (config
->cpus_aggr_map
->map
[cpu
] == -1)
970 config
->cpus_aggr_map
->map
[cpu
] = get_id(config
, map
, idx
);
972 return config
->cpus_aggr_map
->map
[cpu
];
975 static int perf_stat__get_socket_cached(struct perf_stat_config
*config
,
976 struct perf_cpu_map
*map
, int idx
)
978 return perf_stat__get_aggr(config
, perf_stat__get_socket
, map
, idx
);
981 static int perf_stat__get_die_cached(struct perf_stat_config
*config
,
982 struct perf_cpu_map
*map
, int idx
)
984 return perf_stat__get_aggr(config
, perf_stat__get_die
, map
, idx
);
987 static int perf_stat__get_core_cached(struct perf_stat_config
*config
,
988 struct perf_cpu_map
*map
, int idx
)
990 return perf_stat__get_aggr(config
, perf_stat__get_core
, map
, idx
);
993 static int perf_stat__get_node_cached(struct perf_stat_config
*config
,
994 struct perf_cpu_map
*map
, int idx
)
996 return perf_stat__get_aggr(config
, perf_stat__get_node
, map
, idx
);
999 static bool term_percore_set(void)
1001 struct evsel
*counter
;
1003 evlist__for_each_entry(evsel_list
, counter
) {
1004 if (counter
->percore
)
1011 static int perf_stat_init_aggr_mode(void)
1015 switch (stat_config
.aggr_mode
) {
1017 if (cpu_map__build_socket_map(evsel_list
->core
.cpus
, &stat_config
.aggr_map
)) {
1018 perror("cannot build socket map");
1021 stat_config
.aggr_get_id
= perf_stat__get_socket_cached
;
1024 if (cpu_map__build_die_map(evsel_list
->core
.cpus
, &stat_config
.aggr_map
)) {
1025 perror("cannot build die map");
1028 stat_config
.aggr_get_id
= perf_stat__get_die_cached
;
1031 if (cpu_map__build_core_map(evsel_list
->core
.cpus
, &stat_config
.aggr_map
)) {
1032 perror("cannot build core map");
1035 stat_config
.aggr_get_id
= perf_stat__get_core_cached
;
1038 if (cpu_map__build_node_map(evsel_list
->core
.cpus
, &stat_config
.aggr_map
)) {
1039 perror("cannot build core map");
1042 stat_config
.aggr_get_id
= perf_stat__get_node_cached
;
1045 if (term_percore_set()) {
1046 if (cpu_map__build_core_map(evsel_list
->core
.cpus
,
1047 &stat_config
.aggr_map
)) {
1048 perror("cannot build core map");
1051 stat_config
.aggr_get_id
= perf_stat__get_core_cached
;
1062 * The evsel_list->cpus is the base we operate on,
1063 * taking the highest cpu number to be the size of
1064 * the aggregation translate cpumap.
1066 nr
= perf_cpu_map__max(evsel_list
->core
.cpus
);
1067 stat_config
.cpus_aggr_map
= perf_cpu_map__empty_new(nr
+ 1);
1068 return stat_config
.cpus_aggr_map
? 0 : -ENOMEM
;
1071 static void perf_stat__exit_aggr_mode(void)
1073 perf_cpu_map__put(stat_config
.aggr_map
);
1074 perf_cpu_map__put(stat_config
.cpus_aggr_map
);
1075 stat_config
.aggr_map
= NULL
;
1076 stat_config
.cpus_aggr_map
= NULL
;
1079 static inline int perf_env__get_cpu(struct perf_env
*env
, struct perf_cpu_map
*map
, int idx
)
1086 cpu
= map
->map
[idx
];
1088 if (cpu
>= env
->nr_cpus_avail
)
1094 static int perf_env__get_socket(struct perf_cpu_map
*map
, int idx
, void *data
)
1096 struct perf_env
*env
= data
;
1097 int cpu
= perf_env__get_cpu(env
, map
, idx
);
1099 return cpu
== -1 ? -1 : env
->cpu
[cpu
].socket_id
;
1102 static int perf_env__get_die(struct perf_cpu_map
*map
, int idx
, void *data
)
1104 struct perf_env
*env
= data
;
1105 int die_id
= -1, cpu
= perf_env__get_cpu(env
, map
, idx
);
1109 * Encode socket in bit range 15:8
1110 * die_id is relative to socket,
1111 * we need a global id. So we combine
1114 if (WARN_ONCE(env
->cpu
[cpu
].socket_id
>> 8, "The socket id number is too big.\n"))
1117 if (WARN_ONCE(env
->cpu
[cpu
].die_id
>> 8, "The die id number is too big.\n"))
1120 die_id
= (env
->cpu
[cpu
].socket_id
<< 8) | (env
->cpu
[cpu
].die_id
& 0xff);
1126 static int perf_env__get_core(struct perf_cpu_map
*map
, int idx
, void *data
)
1128 struct perf_env
*env
= data
;
1129 int core
= -1, cpu
= perf_env__get_cpu(env
, map
, idx
);
1133 * Encode socket in bit range 31:24
1134 * encode die id in bit range 23:16
1135 * core_id is relative to socket and die,
1136 * we need a global id. So we combine
1137 * socket + die id + core id
1139 if (WARN_ONCE(env
->cpu
[cpu
].socket_id
>> 8, "The socket id number is too big.\n"))
1142 if (WARN_ONCE(env
->cpu
[cpu
].die_id
>> 8, "The die id number is too big.\n"))
1145 if (WARN_ONCE(env
->cpu
[cpu
].core_id
>> 16, "The core id number is too big.\n"))
1148 core
= (env
->cpu
[cpu
].socket_id
<< 24) |
1149 (env
->cpu
[cpu
].die_id
<< 16) |
1150 (env
->cpu
[cpu
].core_id
& 0xffff);
1156 static int perf_env__get_node(struct perf_cpu_map
*map
, int idx
, void *data
)
1158 int cpu
= perf_env__get_cpu(data
, map
, idx
);
1160 return perf_env__numa_node(data
, cpu
);
1163 static int perf_env__build_socket_map(struct perf_env
*env
, struct perf_cpu_map
*cpus
,
1164 struct perf_cpu_map
**sockp
)
1166 return cpu_map__build_map(cpus
, sockp
, perf_env__get_socket
, env
);
1169 static int perf_env__build_die_map(struct perf_env
*env
, struct perf_cpu_map
*cpus
,
1170 struct perf_cpu_map
**diep
)
1172 return cpu_map__build_map(cpus
, diep
, perf_env__get_die
, env
);
1175 static int perf_env__build_core_map(struct perf_env
*env
, struct perf_cpu_map
*cpus
,
1176 struct perf_cpu_map
**corep
)
1178 return cpu_map__build_map(cpus
, corep
, perf_env__get_core
, env
);
1181 static int perf_env__build_node_map(struct perf_env
*env
, struct perf_cpu_map
*cpus
,
1182 struct perf_cpu_map
**nodep
)
1184 return cpu_map__build_map(cpus
, nodep
, perf_env__get_node
, env
);
1187 static int perf_stat__get_socket_file(struct perf_stat_config
*config __maybe_unused
,
1188 struct perf_cpu_map
*map
, int idx
)
1190 return perf_env__get_socket(map
, idx
, &perf_stat
.session
->header
.env
);
1192 static int perf_stat__get_die_file(struct perf_stat_config
*config __maybe_unused
,
1193 struct perf_cpu_map
*map
, int idx
)
1195 return perf_env__get_die(map
, idx
, &perf_stat
.session
->header
.env
);
1198 static int perf_stat__get_core_file(struct perf_stat_config
*config __maybe_unused
,
1199 struct perf_cpu_map
*map
, int idx
)
1201 return perf_env__get_core(map
, idx
, &perf_stat
.session
->header
.env
);
1204 static int perf_stat__get_node_file(struct perf_stat_config
*config __maybe_unused
,
1205 struct perf_cpu_map
*map
, int idx
)
1207 return perf_env__get_node(map
, idx
, &perf_stat
.session
->header
.env
);
1210 static int perf_stat_init_aggr_mode_file(struct perf_stat
*st
)
1212 struct perf_env
*env
= &st
->session
->header
.env
;
1214 switch (stat_config
.aggr_mode
) {
1216 if (perf_env__build_socket_map(env
, evsel_list
->core
.cpus
, &stat_config
.aggr_map
)) {
1217 perror("cannot build socket map");
1220 stat_config
.aggr_get_id
= perf_stat__get_socket_file
;
1223 if (perf_env__build_die_map(env
, evsel_list
->core
.cpus
, &stat_config
.aggr_map
)) {
1224 perror("cannot build die map");
1227 stat_config
.aggr_get_id
= perf_stat__get_die_file
;
1230 if (perf_env__build_core_map(env
, evsel_list
->core
.cpus
, &stat_config
.aggr_map
)) {
1231 perror("cannot build core map");
1234 stat_config
.aggr_get_id
= perf_stat__get_core_file
;
1237 if (perf_env__build_node_map(env
, evsel_list
->core
.cpus
, &stat_config
.aggr_map
)) {
1238 perror("cannot build core map");
1241 stat_config
.aggr_get_id
= perf_stat__get_node_file
;
1254 static int topdown_filter_events(const char **attr
, char **str
, bool use_group
)
1261 for (i
= 0; attr
[i
]; i
++) {
1262 if (pmu_have_event("cpu", attr
[i
])) {
1263 len
+= strlen(attr
[i
]) + 1;
1264 attr
[i
- off
] = attr
[i
];
1268 attr
[i
- off
] = NULL
;
1270 *str
= malloc(len
+ 1 + 2);
1280 for (i
= 0; attr
[i
]; i
++) {
1293 __weak
bool arch_topdown_check_group(bool *warn
)
1299 __weak
void arch_topdown_group_warn(void)
1304 * Add default attributes, if there were no attributes specified or
1305 * if -d/--detailed, -d -d or -d -d -d is used:
1307 static int add_default_attributes(void)
1310 struct perf_event_attr default_attrs0
[] = {
1312 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_TASK_CLOCK
},
1313 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CONTEXT_SWITCHES
},
1314 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CPU_MIGRATIONS
},
1315 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_PAGE_FAULTS
},
1317 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CPU_CYCLES
},
1319 struct perf_event_attr frontend_attrs
[] = {
1320 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
},
1322 struct perf_event_attr backend_attrs
[] = {
1323 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_STALLED_CYCLES_BACKEND
},
1325 struct perf_event_attr default_attrs1
[] = {
1326 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_INSTRUCTIONS
},
1327 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
},
1328 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_BRANCH_MISSES
},
1333 * Detailed stats (-d), covering the L1 and last level data caches:
1335 struct perf_event_attr detailed_attrs
[] = {
1337 { .type
= PERF_TYPE_HW_CACHE
,
1339 PERF_COUNT_HW_CACHE_L1D
<< 0 |
1340 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1341 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1343 { .type
= PERF_TYPE_HW_CACHE
,
1345 PERF_COUNT_HW_CACHE_L1D
<< 0 |
1346 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1347 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1349 { .type
= PERF_TYPE_HW_CACHE
,
1351 PERF_COUNT_HW_CACHE_LL
<< 0 |
1352 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1353 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1355 { .type
= PERF_TYPE_HW_CACHE
,
1357 PERF_COUNT_HW_CACHE_LL
<< 0 |
1358 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1359 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1363 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1365 struct perf_event_attr very_detailed_attrs
[] = {
1367 { .type
= PERF_TYPE_HW_CACHE
,
1369 PERF_COUNT_HW_CACHE_L1I
<< 0 |
1370 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1371 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1373 { .type
= PERF_TYPE_HW_CACHE
,
1375 PERF_COUNT_HW_CACHE_L1I
<< 0 |
1376 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1377 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1379 { .type
= PERF_TYPE_HW_CACHE
,
1381 PERF_COUNT_HW_CACHE_DTLB
<< 0 |
1382 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1383 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1385 { .type
= PERF_TYPE_HW_CACHE
,
1387 PERF_COUNT_HW_CACHE_DTLB
<< 0 |
1388 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1389 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1391 { .type
= PERF_TYPE_HW_CACHE
,
1393 PERF_COUNT_HW_CACHE_ITLB
<< 0 |
1394 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1395 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1397 { .type
= PERF_TYPE_HW_CACHE
,
1399 PERF_COUNT_HW_CACHE_ITLB
<< 0 |
1400 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1401 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1406 * Very, very detailed stats (-d -d -d), adding prefetch events:
1408 struct perf_event_attr very_very_detailed_attrs
[] = {
1410 { .type
= PERF_TYPE_HW_CACHE
,
1412 PERF_COUNT_HW_CACHE_L1D
<< 0 |
1413 (PERF_COUNT_HW_CACHE_OP_PREFETCH
<< 8) |
1414 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1416 { .type
= PERF_TYPE_HW_CACHE
,
1418 PERF_COUNT_HW_CACHE_L1D
<< 0 |
1419 (PERF_COUNT_HW_CACHE_OP_PREFETCH
<< 8) |
1420 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1422 struct parse_events_error errinfo
;
1424 /* Set attrs if no event is selected and !null_run: */
1425 if (stat_config
.null_run
)
1428 bzero(&errinfo
, sizeof(errinfo
));
1429 if (transaction_run
) {
1430 /* Handle -T as -M transaction. Once platform specific metrics
1431 * support has been added to the json files, all archictures
1432 * will use this approach. To determine transaction support
1433 * on an architecture test for such a metric name.
1435 if (metricgroup__has_metric("transaction")) {
1436 struct option opt
= { .value
= &evsel_list
};
1438 return metricgroup__parse_groups(&opt
, "transaction",
1439 &stat_config
.metric_events
);
1442 if (pmu_have_event("cpu", "cycles-ct") &&
1443 pmu_have_event("cpu", "el-start"))
1444 err
= parse_events(evsel_list
, transaction_attrs
,
1447 err
= parse_events(evsel_list
,
1448 transaction_limited_attrs
,
1451 fprintf(stderr
, "Cannot set up transaction events\n");
1452 parse_events_print_error(&errinfo
, transaction_attrs
);
1461 if (sysfs__read_int(FREEZE_ON_SMI_PATH
, &smi
) < 0) {
1462 fprintf(stderr
, "freeze_on_smi is not supported.\n");
1467 if (sysfs__write_int(FREEZE_ON_SMI_PATH
, 1) < 0) {
1468 fprintf(stderr
, "Failed to set freeze_on_smi.\n");
1474 if (pmu_have_event("msr", "aperf") &&
1475 pmu_have_event("msr", "smi")) {
1476 if (!force_metric_only
)
1477 stat_config
.metric_only
= true;
1478 err
= parse_events(evsel_list
, smi_cost_attrs
, &errinfo
);
1480 fprintf(stderr
, "To measure SMI cost, it needs "
1481 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
1482 parse_events_print_error(&errinfo
, smi_cost_attrs
);
1486 parse_events_print_error(&errinfo
, smi_cost_attrs
);
1487 fprintf(stderr
, "Cannot set up SMI cost events\n");
1497 if (stat_config
.aggr_mode
!= AGGR_GLOBAL
&&
1498 stat_config
.aggr_mode
!= AGGR_CORE
) {
1499 pr_err("top down event configuration requires --per-core mode\n");
1502 stat_config
.aggr_mode
= AGGR_CORE
;
1503 if (nr_cgroups
|| !target__has_cpu(&target
)) {
1504 pr_err("top down event configuration requires system-wide mode (-a)\n");
1508 if (!force_metric_only
)
1509 stat_config
.metric_only
= true;
1510 if (topdown_filter_events(topdown_attrs
, &str
,
1511 arch_topdown_check_group(&warn
)) < 0) {
1512 pr_err("Out of memory\n");
1515 if (topdown_attrs
[0] && str
) {
1517 arch_topdown_group_warn();
1518 err
= parse_events(evsel_list
, str
, &errinfo
);
1521 "Cannot set up top down events %s: %d\n",
1523 parse_events_print_error(&errinfo
, str
);
1528 fprintf(stderr
, "System does not support topdown\n");
1534 if (!evsel_list
->core
.nr_entries
) {
1535 if (target__has_cpu(&target
))
1536 default_attrs0
[0].config
= PERF_COUNT_SW_CPU_CLOCK
;
1538 if (perf_evlist__add_default_attrs(evsel_list
, default_attrs0
) < 0)
1540 if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
1541 if (perf_evlist__add_default_attrs(evsel_list
,
1542 frontend_attrs
) < 0)
1545 if (pmu_have_event("cpu", "stalled-cycles-backend")) {
1546 if (perf_evlist__add_default_attrs(evsel_list
,
1550 if (perf_evlist__add_default_attrs(evsel_list
, default_attrs1
) < 0)
1554 /* Detailed events get appended to the event list: */
1556 if (detailed_run
< 1)
1559 /* Append detailed run extra attributes: */
1560 if (perf_evlist__add_default_attrs(evsel_list
, detailed_attrs
) < 0)
1563 if (detailed_run
< 2)
1566 /* Append very detailed run extra attributes: */
1567 if (perf_evlist__add_default_attrs(evsel_list
, very_detailed_attrs
) < 0)
1570 if (detailed_run
< 3)
1573 /* Append very, very detailed run extra attributes: */
1574 return perf_evlist__add_default_attrs(evsel_list
, very_very_detailed_attrs
);
1577 static const char * const stat_record_usage
[] = {
1578 "perf stat record [<options>]",
1582 static void init_features(struct perf_session
*session
)
1586 for (feat
= HEADER_FIRST_FEATURE
; feat
< HEADER_LAST_FEATURE
; feat
++)
1587 perf_header__set_feat(&session
->header
, feat
);
1589 perf_header__clear_feat(&session
->header
, HEADER_DIR_FORMAT
);
1590 perf_header__clear_feat(&session
->header
, HEADER_BUILD_ID
);
1591 perf_header__clear_feat(&session
->header
, HEADER_TRACING_DATA
);
1592 perf_header__clear_feat(&session
->header
, HEADER_BRANCH_STACK
);
1593 perf_header__clear_feat(&session
->header
, HEADER_AUXTRACE
);
1596 static int __cmd_record(int argc
, const char **argv
)
1598 struct perf_session
*session
;
1599 struct perf_data
*data
= &perf_stat
.data
;
1601 argc
= parse_options(argc
, argv
, stat_options
, stat_record_usage
,
1602 PARSE_OPT_STOP_AT_NON_OPTION
);
1605 data
->path
= output_name
;
1607 if (stat_config
.run_count
!= 1 || forever
) {
1608 pr_err("Cannot use -r option with perf stat record.\n");
1612 session
= perf_session__new(data
, false, NULL
);
1613 if (IS_ERR(session
)) {
1614 pr_err("Perf session creation failed\n");
1615 return PTR_ERR(session
);
1618 init_features(session
);
1620 session
->evlist
= evsel_list
;
1621 perf_stat
.session
= session
;
1622 perf_stat
.record
= true;
1626 static int process_stat_round_event(struct perf_session
*session
,
1627 union perf_event
*event
)
1629 struct perf_record_stat_round
*stat_round
= &event
->stat_round
;
1630 struct evsel
*counter
;
1631 struct timespec tsh
, *ts
= NULL
;
1632 const char **argv
= session
->header
.env
.cmdline_argv
;
1633 int argc
= session
->header
.env
.nr_cmdline
;
1635 evlist__for_each_entry(evsel_list
, counter
)
1636 perf_stat_process_counter(&stat_config
, counter
);
1638 if (stat_round
->type
== PERF_STAT_ROUND_TYPE__FINAL
)
1639 update_stats(&walltime_nsecs_stats
, stat_round
->time
);
1641 if (stat_config
.interval
&& stat_round
->time
) {
1642 tsh
.tv_sec
= stat_round
->time
/ NSEC_PER_SEC
;
1643 tsh
.tv_nsec
= stat_round
->time
% NSEC_PER_SEC
;
1647 print_counters(ts
, argc
, argv
);
1652 int process_stat_config_event(struct perf_session
*session
,
1653 union perf_event
*event
)
1655 struct perf_tool
*tool
= session
->tool
;
1656 struct perf_stat
*st
= container_of(tool
, struct perf_stat
, tool
);
1658 perf_event__read_stat_config(&stat_config
, &event
->stat_config
);
1660 if (perf_cpu_map__empty(st
->cpus
)) {
1661 if (st
->aggr_mode
!= AGGR_UNSET
)
1662 pr_warning("warning: processing task data, aggregation mode not set\n");
1666 if (st
->aggr_mode
!= AGGR_UNSET
)
1667 stat_config
.aggr_mode
= st
->aggr_mode
;
1669 if (perf_stat
.data
.is_pipe
)
1670 perf_stat_init_aggr_mode();
1672 perf_stat_init_aggr_mode_file(st
);
1677 static int set_maps(struct perf_stat
*st
)
1679 if (!st
->cpus
|| !st
->threads
)
1682 if (WARN_ONCE(st
->maps_allocated
, "stats double allocation\n"))
1685 perf_evlist__set_maps(&evsel_list
->core
, st
->cpus
, st
->threads
);
1687 if (perf_evlist__alloc_stats(evsel_list
, true))
1690 st
->maps_allocated
= true;
1695 int process_thread_map_event(struct perf_session
*session
,
1696 union perf_event
*event
)
1698 struct perf_tool
*tool
= session
->tool
;
1699 struct perf_stat
*st
= container_of(tool
, struct perf_stat
, tool
);
1702 pr_warning("Extra thread map event, ignoring.\n");
1706 st
->threads
= thread_map__new_event(&event
->thread_map
);
1710 return set_maps(st
);
1714 int process_cpu_map_event(struct perf_session
*session
,
1715 union perf_event
*event
)
1717 struct perf_tool
*tool
= session
->tool
;
1718 struct perf_stat
*st
= container_of(tool
, struct perf_stat
, tool
);
1719 struct perf_cpu_map
*cpus
;
1722 pr_warning("Extra cpu map event, ignoring.\n");
1726 cpus
= cpu_map__new_data(&event
->cpu_map
.data
);
1731 return set_maps(st
);
1734 static int runtime_stat_new(struct perf_stat_config
*config
, int nthreads
)
1738 config
->stats
= calloc(nthreads
, sizeof(struct runtime_stat
));
1742 config
->stats_num
= nthreads
;
1744 for (i
= 0; i
< nthreads
; i
++)
1745 runtime_stat__init(&config
->stats
[i
]);
1750 static void runtime_stat_delete(struct perf_stat_config
*config
)
1757 for (i
= 0; i
< config
->stats_num
; i
++)
1758 runtime_stat__exit(&config
->stats
[i
]);
1760 zfree(&config
->stats
);
1763 static const char * const stat_report_usage
[] = {
1764 "perf stat report [<options>]",
1768 static struct perf_stat perf_stat
= {
1770 .attr
= perf_event__process_attr
,
1771 .event_update
= perf_event__process_event_update
,
1772 .thread_map
= process_thread_map_event
,
1773 .cpu_map
= process_cpu_map_event
,
1774 .stat_config
= process_stat_config_event
,
1775 .stat
= perf_event__process_stat_event
,
1776 .stat_round
= process_stat_round_event
,
1778 .aggr_mode
= AGGR_UNSET
,
1781 static int __cmd_report(int argc
, const char **argv
)
1783 struct perf_session
*session
;
1784 const struct option options
[] = {
1785 OPT_STRING('i', "input", &input_name
, "file", "input file name"),
1786 OPT_SET_UINT(0, "per-socket", &perf_stat
.aggr_mode
,
1787 "aggregate counts per processor socket", AGGR_SOCKET
),
1788 OPT_SET_UINT(0, "per-die", &perf_stat
.aggr_mode
,
1789 "aggregate counts per processor die", AGGR_DIE
),
1790 OPT_SET_UINT(0, "per-core", &perf_stat
.aggr_mode
,
1791 "aggregate counts per physical processor core", AGGR_CORE
),
1792 OPT_SET_UINT(0, "per-node", &perf_stat
.aggr_mode
,
1793 "aggregate counts per numa node", AGGR_NODE
),
1794 OPT_SET_UINT('A', "no-aggr", &perf_stat
.aggr_mode
,
1795 "disable CPU count aggregation", AGGR_NONE
),
1801 argc
= parse_options(argc
, argv
, options
, stat_report_usage
, 0);
1803 if (!input_name
|| !strlen(input_name
)) {
1804 if (!fstat(STDIN_FILENO
, &st
) && S_ISFIFO(st
.st_mode
))
1807 input_name
= "perf.data";
1810 perf_stat
.data
.path
= input_name
;
1811 perf_stat
.data
.mode
= PERF_DATA_MODE_READ
;
1813 session
= perf_session__new(&perf_stat
.data
, false, &perf_stat
.tool
);
1814 if (IS_ERR(session
))
1815 return PTR_ERR(session
);
1817 perf_stat
.session
= session
;
1818 stat_config
.output
= stderr
;
1819 evsel_list
= session
->evlist
;
1821 ret
= perf_session__process_events(session
);
1825 perf_session__delete(session
);
1829 static void setup_system_wide(int forks
)
1832 * Make system wide (-a) the default target if
1833 * no target was specified and one of following
1834 * conditions is met:
1836 * - there's no workload specified
1837 * - there is workload specified but all requested
1838 * events are system wide events
1840 if (!target__none(&target
))
1844 target
.system_wide
= true;
1846 struct evsel
*counter
;
1848 evlist__for_each_entry(evsel_list
, counter
) {
1849 if (!counter
->core
.system_wide
)
1853 if (evsel_list
->core
.nr_entries
)
1854 target
.system_wide
= true;
1858 int cmd_stat(int argc
, const char **argv
)
1860 const char * const stat_usage
[] = {
1861 "perf stat [<options>] [<command>]",
1864 int status
= -EINVAL
, run_idx
;
1866 FILE *output
= stderr
;
1867 unsigned int interval
, timeout
;
1868 const char * const stat_subcommands
[] = { "record", "report" };
1870 setlocale(LC_ALL
, "");
1872 evsel_list
= evlist__new();
1873 if (evsel_list
== NULL
)
1876 parse_events__shrink_config_terms();
1878 /* String-parsing callback-based options would segfault when negated */
1879 set_option_flag(stat_options
, 'e', "event", PARSE_OPT_NONEG
);
1880 set_option_flag(stat_options
, 'M', "metrics", PARSE_OPT_NONEG
);
1881 set_option_flag(stat_options
, 'G', "cgroup", PARSE_OPT_NONEG
);
1883 argc
= parse_options_subcommand(argc
, argv
, stat_options
, stat_subcommands
,
1884 (const char **) stat_usage
,
1885 PARSE_OPT_STOP_AT_NON_OPTION
);
1886 perf_stat__collect_metric_expr(evsel_list
);
1887 perf_stat__init_shadow_stats();
1889 if (stat_config
.csv_sep
) {
1890 stat_config
.csv_output
= true;
1891 if (!strcmp(stat_config
.csv_sep
, "\\t"))
1892 stat_config
.csv_sep
= "\t";
1894 stat_config
.csv_sep
= DEFAULT_SEPARATOR
;
1896 if (argc
&& !strncmp(argv
[0], "rec", 3)) {
1897 argc
= __cmd_record(argc
, argv
);
1900 } else if (argc
&& !strncmp(argv
[0], "rep", 3))
1901 return __cmd_report(argc
, argv
);
1903 interval
= stat_config
.interval
;
1904 timeout
= stat_config
.timeout
;
1907 * For record command the -o is already taken care of.
1909 if (!STAT_RECORD
&& output_name
&& strcmp(output_name
, "-"))
1912 if (output_name
&& output_fd
) {
1913 fprintf(stderr
, "cannot use both --output and --log-fd\n");
1914 parse_options_usage(stat_usage
, stat_options
, "o", 1);
1915 parse_options_usage(NULL
, stat_options
, "log-fd", 0);
1919 if (stat_config
.metric_only
&& stat_config
.aggr_mode
== AGGR_THREAD
) {
1920 fprintf(stderr
, "--metric-only is not supported with --per-thread\n");
1924 if (stat_config
.metric_only
&& stat_config
.run_count
> 1) {
1925 fprintf(stderr
, "--metric-only is not supported with -r\n");
1929 if (stat_config
.walltime_run_table
&& stat_config
.run_count
<= 1) {
1930 fprintf(stderr
, "--table is only supported with -r\n");
1931 parse_options_usage(stat_usage
, stat_options
, "r", 1);
1932 parse_options_usage(NULL
, stat_options
, "table", 0);
1936 if (output_fd
< 0) {
1937 fprintf(stderr
, "argument to --log-fd must be a > 0\n");
1938 parse_options_usage(stat_usage
, stat_options
, "log-fd", 0);
1944 mode
= append_file
? "a" : "w";
1946 output
= fopen(output_name
, mode
);
1948 perror("failed to create output file");
1951 clock_gettime(CLOCK_REALTIME
, &tm
);
1952 fprintf(output
, "# started on %s\n", ctime(&tm
.tv_sec
));
1953 } else if (output_fd
> 0) {
1954 mode
= append_file
? "a" : "w";
1955 output
= fdopen(output_fd
, mode
);
1957 perror("Failed opening logfd");
1962 stat_config
.output
= output
;
1965 * let the spreadsheet do the pretty-printing
1967 if (stat_config
.csv_output
) {
1968 /* User explicitly passed -B? */
1969 if (big_num_opt
== 1) {
1970 fprintf(stderr
, "-B option not supported with -x\n");
1971 parse_options_usage(stat_usage
, stat_options
, "B", 1);
1972 parse_options_usage(NULL
, stat_options
, "x", 1);
1974 } else /* Nope, so disable big number formatting */
1975 stat_config
.big_num
= false;
1976 } else if (big_num_opt
== 0) /* User passed --no-big-num */
1977 stat_config
.big_num
= false;
1979 setup_system_wide(argc
);
1982 * Display user/system times only for single
1983 * run and when there's specified tracee.
1985 if ((stat_config
.run_count
== 1) && target__none(&target
))
1986 stat_config
.ru_display
= true;
1988 if (stat_config
.run_count
< 0) {
1989 pr_err("Run count must be a positive number\n");
1990 parse_options_usage(stat_usage
, stat_options
, "r", 1);
1992 } else if (stat_config
.run_count
== 0) {
1994 stat_config
.run_count
= 1;
1997 if (stat_config
.walltime_run_table
) {
1998 stat_config
.walltime_run
= zalloc(stat_config
.run_count
* sizeof(stat_config
.walltime_run
[0]));
1999 if (!stat_config
.walltime_run
) {
2000 pr_err("failed to setup -r option");
2005 if ((stat_config
.aggr_mode
== AGGR_THREAD
) &&
2006 !target__has_task(&target
)) {
2007 if (!target
.system_wide
|| target
.cpu_list
) {
2008 fprintf(stderr
, "The --per-thread option is only "
2009 "available when monitoring via -p -t -a "
2010 "options or only --per-thread.\n");
2011 parse_options_usage(NULL
, stat_options
, "p", 1);
2012 parse_options_usage(NULL
, stat_options
, "t", 1);
2018 * no_aggr, cgroup are for system-wide only
2019 * --per-thread is aggregated per thread, we dont mix it with cpu mode
2021 if (((stat_config
.aggr_mode
!= AGGR_GLOBAL
&&
2022 stat_config
.aggr_mode
!= AGGR_THREAD
) || nr_cgroups
) &&
2023 !target__has_cpu(&target
)) {
2024 fprintf(stderr
, "both cgroup and no-aggregation "
2025 "modes only available in system-wide mode\n");
2027 parse_options_usage(stat_usage
, stat_options
, "G", 1);
2028 parse_options_usage(NULL
, stat_options
, "A", 1);
2029 parse_options_usage(NULL
, stat_options
, "a", 1);
2033 if (add_default_attributes())
2036 target__validate(&target
);
2038 if ((stat_config
.aggr_mode
== AGGR_THREAD
) && (target
.system_wide
))
2039 target
.per_thread
= true;
2041 if (perf_evlist__create_maps(evsel_list
, &target
) < 0) {
2042 if (target__has_task(&target
)) {
2043 pr_err("Problems finding threads of monitor\n");
2044 parse_options_usage(stat_usage
, stat_options
, "p", 1);
2045 parse_options_usage(NULL
, stat_options
, "t", 1);
2046 } else if (target__has_cpu(&target
)) {
2047 perror("failed to parse CPUs map");
2048 parse_options_usage(stat_usage
, stat_options
, "C", 1);
2049 parse_options_usage(NULL
, stat_options
, "a", 1);
2055 * Initialize thread_map with comm names,
2056 * so we could print it out on output.
2058 if (stat_config
.aggr_mode
== AGGR_THREAD
) {
2059 thread_map__read_comms(evsel_list
->core
.threads
);
2060 if (target
.system_wide
) {
2061 if (runtime_stat_new(&stat_config
,
2062 perf_thread_map__nr(evsel_list
->core
.threads
))) {
2068 if (stat_config
.aggr_mode
== AGGR_NODE
)
2069 cpu__setup_cpunode_map();
2071 if (stat_config
.times
&& interval
)
2072 interval_count
= true;
2073 else if (stat_config
.times
&& !interval
) {
2074 pr_err("interval-count option should be used together with "
2075 "interval-print.\n");
2076 parse_options_usage(stat_usage
, stat_options
, "interval-count", 0);
2077 parse_options_usage(stat_usage
, stat_options
, "I", 1);
2081 if (timeout
&& timeout
< 100) {
2083 pr_err("timeout must be >= 10ms.\n");
2084 parse_options_usage(stat_usage
, stat_options
, "timeout", 0);
2087 pr_warning("timeout < 100ms. "
2088 "The overhead percentage could be high in some cases. "
2089 "Please proceed with caution.\n");
2091 if (timeout
&& interval
) {
2092 pr_err("timeout option is not supported with interval-print.\n");
2093 parse_options_usage(stat_usage
, stat_options
, "timeout", 0);
2094 parse_options_usage(stat_usage
, stat_options
, "I", 1);
2098 if (perf_evlist__alloc_stats(evsel_list
, interval
))
2101 if (perf_stat_init_aggr_mode())
2105 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
2106 * while avoiding that older tools show confusing messages.
2108 * However for pipe sessions we need to keep it zero,
2109 * because script's perf_evsel__check_attr is triggered
2110 * by attr->sample_type != 0, and we can't run it on
2113 stat_config
.identifier
= !(STAT_RECORD
&& perf_stat
.data
.is_pipe
);
2116 * We dont want to block the signals - that would cause
2117 * child tasks to inherit that and Ctrl-C would not work.
2118 * What we want is for Ctrl-C to work in the exec()-ed
2119 * task, but being ignored by perf stat itself:
2123 signal(SIGINT
, skip_signal
);
2124 signal(SIGCHLD
, skip_signal
);
2125 signal(SIGALRM
, skip_signal
);
2126 signal(SIGABRT
, skip_signal
);
2129 for (run_idx
= 0; forever
|| run_idx
< stat_config
.run_count
; run_idx
++) {
2130 if (stat_config
.run_count
!= 1 && verbose
> 0)
2131 fprintf(output
, "[ perf stat: executing run #%d ... ]\n",
2135 perf_evlist__reset_prev_raw_counts(evsel_list
);
2137 status
= run_perf_stat(argc
, argv
, run_idx
);
2138 if (forever
&& status
!= -1 && !interval
) {
2139 print_counters(NULL
, argc
, argv
);
2140 perf_stat__reset_stats();
2144 if (!forever
&& status
!= -1 && !interval
)
2145 print_counters(NULL
, argc
, argv
);
2149 * We synthesize the kernel mmap record just so that older tools
2150 * don't emit warnings about not being able to resolve symbols
2151 * due to /proc/sys/kernel/kptr_restrict settings and instear provide
2152 * a saner message about no samples being in the perf.data file.
2154 * This also serves to suppress a warning about f_header.data.size == 0
2155 * in header.c at the moment 'perf stat record' gets introduced, which
2156 * is not really needed once we start adding the stat specific PERF_RECORD_
2157 * records, but the need to suppress the kptr_restrict messages in older
2158 * tools remain -acme
2160 int fd
= perf_data__fd(&perf_stat
.data
);
2161 int err
= perf_event__synthesize_kernel_mmap((void *)&perf_stat
,
2162 process_synthesized_event
,
2163 &perf_stat
.session
->machines
.host
);
2165 pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
2166 "older tools may produce warnings about this file\n.");
2170 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats
.max
, FINAL
))
2171 pr_err("failed to write stat round event\n");
2174 if (!perf_stat
.data
.is_pipe
) {
2175 perf_stat
.session
->header
.data_size
+= perf_stat
.bytes_written
;
2176 perf_session__write_header(perf_stat
.session
, evsel_list
, fd
, true);
2179 evlist__close(evsel_list
);
2180 perf_session__delete(perf_stat
.session
);
2183 perf_stat__exit_aggr_mode();
2184 perf_evlist__free_stats(evsel_list
);
2186 zfree(&stat_config
.walltime_run
);
2188 if (smi_cost
&& smi_reset
)
2189 sysfs__write_int(FREEZE_ON_SMI_PATH
, 0);
2191 evlist__delete(evsel_list
);
2193 runtime_stat_delete(&stat_config
);