4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
9 $ perf stat ./hackbench 10
13 Performance counter stats for './hackbench 10':
15 1708.761321 task-clock # 11.037 CPUs utilized
16 41,190 context-switches # 0.024 M/sec
17 6,735 CPU-migrations # 0.004 M/sec
18 17,318 page-faults # 0.010 M/sec
19 5,205,202,243 cycles # 3.046 GHz
20 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
21 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
22 2,603,501,247 instructions # 0.50 insns per cycle
23 # 1.48 stalled cycles per insn
24 484,357,498 branches # 283.455 M/sec
25 6,388,934 branch-misses # 1.32% of all branches
27 0.154822978 seconds time elapsed
30 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
32 * Improvements and fixes by:
34 * Arjan van de Ven <arjan@linux.intel.com>
35 * Yanmin Zhang <yanmin.zhang@intel.com>
36 * Wu Fengguang <fengguang.wu@intel.com>
37 * Mike Galbraith <efault@gmx.de>
38 * Paul Mackerras <paulus@samba.org>
39 * Jaswinder Singh Rajput <jaswinder@kernel.org>
41 * Released under the GPL v2. (and only v2, not any later version)
46 #include "util/cgroup.h"
47 #include "util/util.h"
48 #include <subcmd/parse-options.h>
49 #include "util/parse-events.h"
51 #include "util/event.h"
52 #include "util/evlist.h"
53 #include "util/evsel.h"
54 #include "util/debug.h"
55 #include "util/drv_configs.h"
56 #include "util/color.h"
57 #include "util/stat.h"
58 #include "util/header.h"
59 #include "util/cpumap.h"
60 #include "util/thread.h"
61 #include "util/thread_map.h"
62 #include "util/counts.h"
63 #include "util/group.h"
64 #include "util/session.h"
65 #include "util/tool.h"
66 #include "util/string2.h"
67 #include "util/metricgroup.h"
71 #include <linux/time64.h>
72 #include <api/fs/fs.h>
76 #include <sys/prctl.h>
80 #include <sys/types.h>
85 #include <sys/resource.h>
88 #include "sane_ctype.h"
90 #define DEFAULT_SEPARATOR " "
91 #define CNTR_NOT_SUPPORTED "<not supported>"
92 #define CNTR_NOT_COUNTED "<not counted>"
93 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
95 static void print_counters(struct timespec
*ts
, int argc
, const char **argv
);
97 /* Default events used for perf stat -T */
98 static const char *transaction_attrs
= {
110 /* More limited version when the CPU does not have all events. */
111 static const char * transaction_limited_attrs
= {
121 static const char * topdown_attrs
[] = {
122 "topdown-total-slots",
123 "topdown-slots-retired",
124 "topdown-recovery-bubbles",
125 "topdown-fetch-bubbles",
126 "topdown-slots-issued",
130 static const char *smi_cost_attrs
= {
138 static struct perf_evlist
*evsel_list
;
140 static struct rblist metric_events
;
142 static struct target target
= {
146 typedef int (*aggr_get_id_t
)(struct cpu_map
*m
, int cpu
);
148 #define METRIC_ONLY_LEN 20
150 static int run_count
= 1;
151 static bool no_inherit
= false;
152 static volatile pid_t child_pid
= -1;
153 static bool null_run
= false;
154 static int detailed_run
= 0;
155 static bool transaction_run
;
156 static bool topdown_run
= false;
157 static bool smi_cost
= false;
158 static bool smi_reset
= false;
159 static bool big_num
= true;
160 static int big_num_opt
= -1;
161 static const char *csv_sep
= NULL
;
162 static bool csv_output
= false;
163 static bool group
= false;
164 static const char *pre_cmd
= NULL
;
165 static const char *post_cmd
= NULL
;
166 static bool sync_run
= false;
167 static unsigned int initial_delay
= 0;
168 static unsigned int unit_width
= 4; /* strlen("unit") */
169 static bool forever
= false;
170 static bool metric_only
= false;
171 static bool force_metric_only
= false;
172 static bool no_merge
= false;
173 static bool walltime_run_table
= false;
174 static struct timespec ref_time
;
175 static struct cpu_map
*aggr_map
;
176 static aggr_get_id_t aggr_get_id
;
177 static bool append_file
;
178 static bool interval_count
;
179 static bool interval_clear
;
180 static const char *output_name
;
181 static int output_fd
;
182 static int print_free_counters_hint
;
183 static int print_mixed_hw_group_error
;
184 static u64
*walltime_run
;
185 static bool ru_display
= false;
186 static struct rusage ru_data
;
187 static unsigned int metric_only_len
= METRIC_ONLY_LEN
;
191 struct perf_data data
;
192 struct perf_session
*session
;
194 struct perf_tool tool
;
196 struct cpu_map
*cpus
;
197 struct thread_map
*threads
;
198 enum aggr_mode aggr_mode
;
201 static struct perf_stat perf_stat
;
202 #define STAT_RECORD perf_stat.record
204 static volatile int done
= 0;
206 static struct perf_stat_config stat_config
= {
207 .aggr_mode
= AGGR_GLOBAL
,
211 static bool is_duration_time(struct perf_evsel
*evsel
)
213 return !strcmp(evsel
->name
, "duration_time");
216 static inline void diff_timespec(struct timespec
*r
, struct timespec
*a
,
219 r
->tv_sec
= a
->tv_sec
- b
->tv_sec
;
220 if (a
->tv_nsec
< b
->tv_nsec
) {
221 r
->tv_nsec
= a
->tv_nsec
+ NSEC_PER_SEC
- b
->tv_nsec
;
224 r
->tv_nsec
= a
->tv_nsec
- b
->tv_nsec
;
228 static void perf_stat__reset_stats(void)
232 perf_evlist__reset_stats(evsel_list
);
233 perf_stat__reset_shadow_stats();
235 for (i
= 0; i
< stat_config
.stats_num
; i
++)
236 perf_stat__reset_shadow_per_stat(&stat_config
.stats
[i
]);
239 static int create_perf_stat_counter(struct perf_evsel
*evsel
)
241 struct perf_event_attr
*attr
= &evsel
->attr
;
242 struct perf_evsel
*leader
= evsel
->leader
;
244 if (stat_config
.scale
) {
245 attr
->read_format
= PERF_FORMAT_TOTAL_TIME_ENABLED
|
246 PERF_FORMAT_TOTAL_TIME_RUNNING
;
250 * The event is part of non trivial group, let's enable
251 * the group read (for leader) and ID retrieval for all
254 if (leader
->nr_members
> 1)
255 attr
->read_format
|= PERF_FORMAT_ID
|PERF_FORMAT_GROUP
;
257 attr
->inherit
= !no_inherit
;
260 * Some events get initialized with sample_(period/type) set,
261 * like tracepoints. Clear it up for counting.
263 attr
->sample_period
= 0;
266 * But set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
267 * while avoiding that older tools show confusing messages.
269 * However for pipe sessions we need to keep it zero,
270 * because script's perf_evsel__check_attr is triggered
271 * by attr->sample_type != 0, and we can't run it on
274 if (!(STAT_RECORD
&& perf_stat
.data
.is_pipe
))
275 attr
->sample_type
= PERF_SAMPLE_IDENTIFIER
;
278 * Disabling all counters initially, they will be enabled
279 * either manually by us or by kernel via enable_on_exec
282 if (perf_evsel__is_group_leader(evsel
)) {
286 * In case of initial_delay we enable tracee
289 if (target__none(&target
) && !initial_delay
)
290 attr
->enable_on_exec
= 1;
293 if (target__has_cpu(&target
) && !target__has_per_thread(&target
))
294 return perf_evsel__open_per_cpu(evsel
, perf_evsel__cpus(evsel
));
296 return perf_evsel__open_per_thread(evsel
, evsel_list
->threads
);
299 static int process_synthesized_event(struct perf_tool
*tool __maybe_unused
,
300 union perf_event
*event
,
301 struct perf_sample
*sample __maybe_unused
,
302 struct machine
*machine __maybe_unused
)
304 if (perf_data__write(&perf_stat
.data
, event
, event
->header
.size
) < 0) {
305 pr_err("failed to write perf data, error: %m\n");
309 perf_stat
.bytes_written
+= event
->header
.size
;
313 static int write_stat_round_event(u64 tm
, u64 type
)
315 return perf_event__synthesize_stat_round(NULL
, tm
, type
,
316 process_synthesized_event
,
320 #define WRITE_STAT_ROUND_EVENT(time, interval) \
321 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
323 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
326 perf_evsel__write_stat_event(struct perf_evsel
*counter
, u32 cpu
, u32 thread
,
327 struct perf_counts_values
*count
)
329 struct perf_sample_id
*sid
= SID(counter
, cpu
, thread
);
331 return perf_event__synthesize_stat(NULL
, cpu
, thread
, sid
->id
, count
,
332 process_synthesized_event
, NULL
);
336 * Read out the results of a single counter:
337 * do not aggregate counts across CPUs in system-wide mode
339 static int read_counter(struct perf_evsel
*counter
)
341 int nthreads
= thread_map__nr(evsel_list
->threads
);
342 int ncpus
, cpu
, thread
;
344 if (target__has_cpu(&target
) && !target__has_per_thread(&target
))
345 ncpus
= perf_evsel__nr_cpus(counter
);
349 if (!counter
->supported
)
352 if (counter
->system_wide
)
355 for (thread
= 0; thread
< nthreads
; thread
++) {
356 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
357 struct perf_counts_values
*count
;
359 count
= perf_counts(counter
->counts
, cpu
, thread
);
362 * The leader's group read loads data into its group members
363 * (via perf_evsel__read_counter) and sets threir count->loaded.
365 if (!count
->loaded
&&
366 perf_evsel__read_counter(counter
, cpu
, thread
)) {
367 counter
->counts
->scaled
= -1;
368 perf_counts(counter
->counts
, cpu
, thread
)->ena
= 0;
369 perf_counts(counter
->counts
, cpu
, thread
)->run
= 0;
373 count
->loaded
= false;
376 if (perf_evsel__write_stat_event(counter
, cpu
, thread
, count
)) {
377 pr_err("failed to write stat event\n");
383 fprintf(stat_config
.output
,
384 "%s: %d: %" PRIu64
" %" PRIu64
" %" PRIu64
"\n",
385 perf_evsel__name(counter
),
387 count
->val
, count
->ena
, count
->run
);
395 static void read_counters(void)
397 struct perf_evsel
*counter
;
400 evlist__for_each_entry(evsel_list
, counter
) {
401 ret
= read_counter(counter
);
403 pr_debug("failed to read counter %s\n", counter
->name
);
405 if (ret
== 0 && perf_stat_process_counter(&stat_config
, counter
))
406 pr_warning("failed to process counter %s\n", counter
->name
);
410 static void process_interval(void)
412 struct timespec ts
, rs
;
416 clock_gettime(CLOCK_MONOTONIC
, &ts
);
417 diff_timespec(&rs
, &ts
, &ref_time
);
420 if (WRITE_STAT_ROUND_EVENT(rs
.tv_sec
* NSEC_PER_SEC
+ rs
.tv_nsec
, INTERVAL
))
421 pr_err("failed to write stat round event\n");
424 init_stats(&walltime_nsecs_stats
);
425 update_stats(&walltime_nsecs_stats
, stat_config
.interval
* 1000000);
426 print_counters(&rs
, 0, NULL
);
429 static void enable_counters(void)
432 usleep(initial_delay
* USEC_PER_MSEC
);
435 * We need to enable counters only if:
436 * - we don't have tracee (attaching to task or cpu)
437 * - we have initial delay configured
439 if (!target__none(&target
) || initial_delay
)
440 perf_evlist__enable(evsel_list
);
443 static void disable_counters(void)
446 * If we don't have tracee (attaching to task or cpu), counters may
447 * still be running. To get accurate group ratios, we must stop groups
448 * from counting before reading their constituent counters.
450 if (!target__none(&target
))
451 perf_evlist__disable(evsel_list
);
454 static volatile int workload_exec_errno
;
457 * perf_evlist__prepare_workload will send a SIGUSR1
458 * if the fork fails, since we asked by setting its
459 * want_signal to true.
461 static void workload_exec_failed_signal(int signo __maybe_unused
, siginfo_t
*info
,
462 void *ucontext __maybe_unused
)
464 workload_exec_errno
= info
->si_value
.sival_int
;
467 static int perf_stat_synthesize_config(bool is_pipe
)
472 err
= perf_event__synthesize_attrs(NULL
, perf_stat
.session
,
473 process_synthesized_event
);
475 pr_err("Couldn't synthesize attrs.\n");
480 err
= perf_event__synthesize_extra_attr(NULL
,
482 process_synthesized_event
,
485 err
= perf_event__synthesize_thread_map2(NULL
, evsel_list
->threads
,
486 process_synthesized_event
,
489 pr_err("Couldn't synthesize thread map.\n");
493 err
= perf_event__synthesize_cpu_map(NULL
, evsel_list
->cpus
,
494 process_synthesized_event
, NULL
);
496 pr_err("Couldn't synthesize thread map.\n");
500 err
= perf_event__synthesize_stat_config(NULL
, &stat_config
,
501 process_synthesized_event
, NULL
);
503 pr_err("Couldn't synthesize config.\n");
510 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
512 static int __store_counter_ids(struct perf_evsel
*counter
)
516 for (cpu
= 0; cpu
< xyarray__max_x(counter
->fd
); cpu
++) {
517 for (thread
= 0; thread
< xyarray__max_y(counter
->fd
);
519 int fd
= FD(counter
, cpu
, thread
);
521 if (perf_evlist__id_add_fd(evsel_list
, counter
,
522 cpu
, thread
, fd
) < 0)
530 static int store_counter_ids(struct perf_evsel
*counter
)
532 struct cpu_map
*cpus
= counter
->cpus
;
533 struct thread_map
*threads
= counter
->threads
;
535 if (perf_evsel__alloc_id(counter
, cpus
->nr
, threads
->nr
))
538 return __store_counter_ids(counter
);
541 static bool perf_evsel__should_store_id(struct perf_evsel
*counter
)
543 return STAT_RECORD
|| counter
->attr
.read_format
& PERF_FORMAT_ID
;
546 static struct perf_evsel
*perf_evsel__reset_weak_group(struct perf_evsel
*evsel
)
548 struct perf_evsel
*c2
, *leader
;
551 leader
= evsel
->leader
;
552 pr_debug("Weak group for %s/%d failed\n",
553 leader
->name
, leader
->nr_members
);
556 * for_each_group_member doesn't work here because it doesn't
557 * include the first entry.
559 evlist__for_each_entry(evsel_list
, c2
) {
562 if (c2
->leader
== leader
) {
564 perf_evsel__close(c2
);
572 static int __run_perf_stat(int argc
, const char **argv
, int run_idx
)
574 int interval
= stat_config
.interval
;
575 int times
= stat_config
.times
;
576 int timeout
= stat_config
.timeout
;
578 unsigned long long t0
, t1
;
579 struct perf_evsel
*counter
;
583 const bool forks
= (argc
> 0);
584 bool is_pipe
= STAT_RECORD
? perf_stat
.data
.is_pipe
: false;
585 struct perf_evsel_config_term
*err_term
;
588 ts
.tv_sec
= interval
/ USEC_PER_MSEC
;
589 ts
.tv_nsec
= (interval
% USEC_PER_MSEC
) * NSEC_PER_MSEC
;
590 } else if (timeout
) {
591 ts
.tv_sec
= timeout
/ USEC_PER_MSEC
;
592 ts
.tv_nsec
= (timeout
% USEC_PER_MSEC
) * NSEC_PER_MSEC
;
599 if (perf_evlist__prepare_workload(evsel_list
, &target
, argv
, is_pipe
,
600 workload_exec_failed_signal
) < 0) {
601 perror("failed to prepare workload");
604 child_pid
= evsel_list
->workload
.pid
;
608 perf_evlist__set_leader(evsel_list
);
610 evlist__for_each_entry(evsel_list
, counter
) {
612 if (create_perf_stat_counter(counter
) < 0) {
614 /* Weak group failed. Reset the group. */
615 if ((errno
== EINVAL
|| errno
== EBADF
) &&
616 counter
->leader
!= counter
&&
617 counter
->weak_group
) {
618 counter
= perf_evsel__reset_weak_group(counter
);
623 * PPC returns ENXIO for HW counters until 2.6.37
624 * (behavior changed with commit b0a873e).
626 if (errno
== EINVAL
|| errno
== ENOSYS
||
627 errno
== ENOENT
|| errno
== EOPNOTSUPP
||
630 ui__warning("%s event is not supported by the kernel.\n",
631 perf_evsel__name(counter
));
632 counter
->supported
= false;
634 if ((counter
->leader
!= counter
) ||
635 !(counter
->leader
->nr_members
> 1))
637 } else if (perf_evsel__fallback(counter
, errno
, msg
, sizeof(msg
))) {
639 ui__warning("%s\n", msg
);
641 } else if (target__has_per_thread(&target
) &&
642 evsel_list
->threads
&&
643 evsel_list
->threads
->err_thread
!= -1) {
645 * For global --per-thread case, skip current
648 if (!thread_map__remove(evsel_list
->threads
,
649 evsel_list
->threads
->err_thread
)) {
650 evsel_list
->threads
->err_thread
= -1;
655 perf_evsel__open_strerror(counter
, &target
,
656 errno
, msg
, sizeof(msg
));
657 ui__error("%s\n", msg
);
660 kill(child_pid
, SIGTERM
);
664 counter
->supported
= true;
666 l
= strlen(counter
->unit
);
670 if (perf_evsel__should_store_id(counter
) &&
671 store_counter_ids(counter
))
675 if (perf_evlist__apply_filters(evsel_list
, &counter
)) {
676 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
677 counter
->filter
, perf_evsel__name(counter
), errno
,
678 str_error_r(errno
, msg
, sizeof(msg
)));
682 if (perf_evlist__apply_drv_configs(evsel_list
, &counter
, &err_term
)) {
683 pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
684 err_term
->val
.drv_cfg
, perf_evsel__name(counter
), errno
,
685 str_error_r(errno
, msg
, sizeof(msg
)));
690 int err
, fd
= perf_data__fd(&perf_stat
.data
);
693 err
= perf_header__write_pipe(perf_data__fd(&perf_stat
.data
));
695 err
= perf_session__write_header(perf_stat
.session
, evsel_list
,
702 err
= perf_stat_synthesize_config(is_pipe
);
708 * Enable counters and exec the command:
711 clock_gettime(CLOCK_MONOTONIC
, &ref_time
);
714 perf_evlist__start_workload(evsel_list
);
717 if (interval
|| timeout
) {
718 while (!waitpid(child_pid
, &status
, WNOHANG
)) {
719 nanosleep(&ts
, NULL
);
723 if (interval_count
&& !(--times
))
727 wait4(child_pid
, &status
, 0, &ru_data
);
729 if (workload_exec_errno
) {
730 const char *emsg
= str_error_r(workload_exec_errno
, msg
, sizeof(msg
));
731 pr_err("Workload failed: %s\n", emsg
);
735 if (WIFSIGNALED(status
))
736 psignal(WTERMSIG(status
), argv
[0]);
740 nanosleep(&ts
, NULL
);
745 if (interval_count
&& !(--times
))
755 if (walltime_run_table
)
756 walltime_run
[run_idx
] = t1
- t0
;
758 update_stats(&walltime_nsecs_stats
, t1
- t0
);
761 * Closing a group leader splits the group, and as we only disable
762 * group leaders, results in remaining events becoming enabled. To
763 * avoid arbitrary skew, we must read all counters before closing any
767 perf_evlist__close(evsel_list
);
769 return WEXITSTATUS(status
);
772 static int run_perf_stat(int argc
, const char **argv
, int run_idx
)
777 ret
= system(pre_cmd
);
785 ret
= __run_perf_stat(argc
, argv
, run_idx
);
790 ret
= system(post_cmd
);
798 static void print_running(u64 run
, u64 ena
)
801 fprintf(stat_config
.output
, "%s%" PRIu64
"%s%.2f",
805 ena
? 100.0 * run
/ ena
: 100.0);
806 } else if (run
!= ena
) {
807 fprintf(stat_config
.output
, " (%.2f%%)", 100.0 * run
/ ena
);
811 static void print_noise_pct(double total
, double avg
)
813 double pct
= rel_stddev_stats(total
, avg
);
816 fprintf(stat_config
.output
, "%s%.2f%%", csv_sep
, pct
);
818 fprintf(stat_config
.output
, " ( +-%6.2f%% )", pct
);
821 static void print_noise(struct perf_evsel
*evsel
, double avg
)
823 struct perf_stat_evsel
*ps
;
829 print_noise_pct(stddev_stats(&ps
->res_stats
[0]), avg
);
832 static void aggr_printout(struct perf_evsel
*evsel
, int id
, int nr
)
834 switch (stat_config
.aggr_mode
) {
836 fprintf(stat_config
.output
, "S%d-C%*d%s%*d%s",
837 cpu_map__id_to_socket(id
),
839 cpu_map__id_to_cpu(id
),
846 fprintf(stat_config
.output
, "S%*d%s%*d%s",
855 fprintf(stat_config
.output
, "CPU%*d%s",
857 perf_evsel__cpus(evsel
)->map
[id
], csv_sep
);
860 fprintf(stat_config
.output
, "%*s-%*d%s",
862 thread_map__comm(evsel
->threads
, id
),
864 thread_map__pid(evsel
->threads
, id
),
880 struct perf_evsel
*evsel
;
883 #define METRIC_LEN 35
885 static void new_line_std(void *ctx
)
887 struct outstate
*os
= ctx
;
892 static void do_new_line_std(struct outstate
*os
)
895 fputs(os
->prefix
, os
->fh
);
896 aggr_printout(os
->evsel
, os
->id
, os
->nr
);
897 if (stat_config
.aggr_mode
== AGGR_NONE
)
898 fprintf(os
->fh
, " ");
899 fprintf(os
->fh
, " ");
902 static void print_metric_std(void *ctx
, const char *color
, const char *fmt
,
903 const char *unit
, double val
)
905 struct outstate
*os
= ctx
;
908 bool newline
= os
->newline
;
912 if (unit
== NULL
|| fmt
== NULL
) {
913 fprintf(out
, "%-*s", METRIC_LEN
, "");
920 n
= fprintf(out
, " # ");
922 n
+= color_fprintf(out
, color
, fmt
, val
);
924 n
+= fprintf(out
, fmt
, val
);
925 fprintf(out
, " %-*s", METRIC_LEN
- n
- 1, unit
);
928 static void new_line_csv(void *ctx
)
930 struct outstate
*os
= ctx
;
935 fprintf(os
->fh
, "%s%s", os
->prefix
, csv_sep
);
936 aggr_printout(os
->evsel
, os
->id
, os
->nr
);
937 for (i
= 0; i
< os
->nfields
; i
++)
938 fputs(csv_sep
, os
->fh
);
941 static void print_metric_csv(void *ctx
,
942 const char *color __maybe_unused
,
943 const char *fmt
, const char *unit
, double val
)
945 struct outstate
*os
= ctx
;
947 char buf
[64], *vals
, *ends
;
949 if (unit
== NULL
|| fmt
== NULL
) {
950 fprintf(out
, "%s%s", csv_sep
, csv_sep
);
953 snprintf(buf
, sizeof(buf
), fmt
, val
);
954 ends
= vals
= ltrim(buf
);
955 while (isdigit(*ends
) || *ends
== '.')
958 while (isspace(*unit
))
960 fprintf(out
, "%s%s%s%s", csv_sep
, vals
, csv_sep
, unit
);
963 /* Filter out some columns that don't work well in metrics only mode */
965 static bool valid_only_metric(const char *unit
)
969 if (strstr(unit
, "/sec") ||
970 strstr(unit
, "hz") ||
971 strstr(unit
, "Hz") ||
972 strstr(unit
, "CPUs utilized"))
977 static const char *fixunit(char *buf
, struct perf_evsel
*evsel
,
980 if (!strncmp(unit
, "of all", 6)) {
981 snprintf(buf
, 1024, "%s %s", perf_evsel__name(evsel
),
988 static void print_metric_only(void *ctx
, const char *color
, const char *fmt
,
989 const char *unit
, double val
)
991 struct outstate
*os
= ctx
;
993 char buf
[1024], str
[1024];
994 unsigned mlen
= metric_only_len
;
996 if (!valid_only_metric(unit
))
998 unit
= fixunit(buf
, os
->evsel
, unit
);
999 if (mlen
< strlen(unit
))
1000 mlen
= strlen(unit
) + 1;
1003 mlen
+= strlen(color
) + sizeof(PERF_COLOR_RESET
) - 1;
1005 color_snprintf(str
, sizeof(str
), color
?: "", fmt
, val
);
1006 fprintf(out
, "%*s ", mlen
, str
);
1009 static void print_metric_only_csv(void *ctx
, const char *color __maybe_unused
,
1011 const char *unit
, double val
)
1013 struct outstate
*os
= ctx
;
1015 char buf
[64], *vals
, *ends
;
1018 if (!valid_only_metric(unit
))
1020 unit
= fixunit(tbuf
, os
->evsel
, unit
);
1021 snprintf(buf
, sizeof buf
, fmt
, val
);
1022 ends
= vals
= ltrim(buf
);
1023 while (isdigit(*ends
) || *ends
== '.')
1026 fprintf(out
, "%s%s", vals
, csv_sep
);
1029 static void new_line_metric(void *ctx __maybe_unused
)
1033 static void print_metric_header(void *ctx
, const char *color __maybe_unused
,
1034 const char *fmt __maybe_unused
,
1035 const char *unit
, double val __maybe_unused
)
1037 struct outstate
*os
= ctx
;
1040 if (!valid_only_metric(unit
))
1042 unit
= fixunit(tbuf
, os
->evsel
, unit
);
1044 fprintf(os
->fh
, "%s%s", unit
, csv_sep
);
1046 fprintf(os
->fh
, "%*s ", metric_only_len
, unit
);
1049 static int first_shadow_cpu(struct perf_evsel
*evsel
, int id
)
1056 if (stat_config
.aggr_mode
== AGGR_NONE
)
1059 if (stat_config
.aggr_mode
== AGGR_GLOBAL
)
1062 for (i
= 0; i
< perf_evsel__nr_cpus(evsel
); i
++) {
1063 int cpu2
= perf_evsel__cpus(evsel
)->map
[i
];
1065 if (aggr_get_id(evsel_list
->cpus
, cpu2
) == id
)
1071 static void abs_printout(int id
, int nr
, struct perf_evsel
*evsel
, double avg
)
1073 FILE *output
= stat_config
.output
;
1074 double sc
= evsel
->scale
;
1078 fmt
= floor(sc
) != sc
? "%.2f%s" : "%.0f%s";
1081 fmt
= floor(sc
) != sc
? "%'18.2f%s" : "%'18.0f%s";
1083 fmt
= floor(sc
) != sc
? "%18.2f%s" : "%18.0f%s";
1086 aggr_printout(evsel
, id
, nr
);
1088 fprintf(output
, fmt
, avg
, csv_sep
);
1091 fprintf(output
, "%-*s%s",
1092 csv_output
? 0 : unit_width
,
1093 evsel
->unit
, csv_sep
);
1095 fprintf(output
, "%-*s", csv_output
? 0 : 25, perf_evsel__name(evsel
));
1098 fprintf(output
, "%s%s", csv_sep
, evsel
->cgrp
->name
);
1101 static bool is_mixed_hw_group(struct perf_evsel
*counter
)
1103 struct perf_evlist
*evlist
= counter
->evlist
;
1104 u32 pmu_type
= counter
->attr
.type
;
1105 struct perf_evsel
*pos
;
1107 if (counter
->nr_members
< 2)
1110 evlist__for_each_entry(evlist
, pos
) {
1111 /* software events can be part of any hardware group */
1112 if (pos
->attr
.type
== PERF_TYPE_SOFTWARE
)
1114 if (pmu_type
== PERF_TYPE_SOFTWARE
) {
1115 pmu_type
= pos
->attr
.type
;
1118 if (pmu_type
!= pos
->attr
.type
)
1125 static void printout(int id
, int nr
, struct perf_evsel
*counter
, double uval
,
1126 char *prefix
, u64 run
, u64 ena
, double noise
,
1127 struct runtime_stat
*st
)
1129 struct perf_stat_output_ctx out
;
1130 struct outstate os
= {
1131 .fh
= stat_config
.output
,
1132 .prefix
= prefix
? prefix
: "",
1137 print_metric_t pm
= print_metric_std
;
1141 nl
= new_line_metric
;
1143 pm
= print_metric_only_csv
;
1145 pm
= print_metric_only
;
1149 if (csv_output
&& !metric_only
) {
1150 static int aggr_fields
[] = {
1158 pm
= print_metric_csv
;
1161 os
.nfields
+= aggr_fields
[stat_config
.aggr_mode
];
1165 if (run
== 0 || ena
== 0 || counter
->counts
->scaled
== -1) {
1167 pm(&os
, NULL
, "", "", 0);
1170 aggr_printout(counter
, id
, nr
);
1172 fprintf(stat_config
.output
, "%*s%s",
1173 csv_output
? 0 : 18,
1174 counter
->supported
? CNTR_NOT_COUNTED
: CNTR_NOT_SUPPORTED
,
1177 if (counter
->supported
) {
1178 print_free_counters_hint
= 1;
1179 if (is_mixed_hw_group(counter
))
1180 print_mixed_hw_group_error
= 1;
1183 fprintf(stat_config
.output
, "%-*s%s",
1184 csv_output
? 0 : unit_width
,
1185 counter
->unit
, csv_sep
);
1187 fprintf(stat_config
.output
, "%*s",
1188 csv_output
? 0 : -25,
1189 perf_evsel__name(counter
));
1192 fprintf(stat_config
.output
, "%s%s",
1193 csv_sep
, counter
->cgrp
->name
);
1196 pm(&os
, NULL
, NULL
, "", 0);
1197 print_noise(counter
, noise
);
1198 print_running(run
, ena
);
1200 pm(&os
, NULL
, NULL
, "", 0);
1205 abs_printout(id
, nr
, counter
, uval
);
1207 out
.print_metric
= pm
;
1210 out
.force_header
= false;
1212 if (csv_output
&& !metric_only
) {
1213 print_noise(counter
, noise
);
1214 print_running(run
, ena
);
1217 perf_stat__print_shadow_stats(counter
, uval
,
1218 first_shadow_cpu(counter
, id
),
1219 &out
, &metric_events
, st
);
1220 if (!csv_output
&& !metric_only
) {
1221 print_noise(counter
, noise
);
1222 print_running(run
, ena
);
1226 static void aggr_update_shadow(void)
1230 struct perf_evsel
*counter
;
1232 for (s
= 0; s
< aggr_map
->nr
; s
++) {
1233 id
= aggr_map
->map
[s
];
1234 evlist__for_each_entry(evsel_list
, counter
) {
1236 for (cpu
= 0; cpu
< perf_evsel__nr_cpus(counter
); cpu
++) {
1237 s2
= aggr_get_id(evsel_list
->cpus
, cpu
);
1240 val
+= perf_counts(counter
->counts
, cpu
, 0)->val
;
1242 perf_stat__update_shadow_stats(counter
, val
,
1243 first_shadow_cpu(counter
, id
),
1249 static void uniquify_event_name(struct perf_evsel
*counter
)
1254 if (counter
->uniquified_name
||
1255 !counter
->pmu_name
|| !strncmp(counter
->name
, counter
->pmu_name
,
1256 strlen(counter
->pmu_name
)))
1259 config
= strchr(counter
->name
, '/');
1261 if (asprintf(&new_name
,
1262 "%s%s", counter
->pmu_name
, config
) > 0) {
1263 free(counter
->name
);
1264 counter
->name
= new_name
;
1267 if (asprintf(&new_name
,
1268 "%s [%s]", counter
->name
, counter
->pmu_name
) > 0) {
1269 free(counter
->name
);
1270 counter
->name
= new_name
;
1274 counter
->uniquified_name
= true;
1277 static void collect_all_aliases(struct perf_evsel
*counter
,
1278 void (*cb
)(struct perf_evsel
*counter
, void *data
,
1282 struct perf_evsel
*alias
;
1284 alias
= list_prepare_entry(counter
, &(evsel_list
->entries
), node
);
1285 list_for_each_entry_continue (alias
, &evsel_list
->entries
, node
) {
1286 if (strcmp(perf_evsel__name(alias
), perf_evsel__name(counter
)) ||
1287 alias
->scale
!= counter
->scale
||
1288 alias
->cgrp
!= counter
->cgrp
||
1289 strcmp(alias
->unit
, counter
->unit
) ||
1290 perf_evsel__is_clock(alias
) != perf_evsel__is_clock(counter
))
1292 alias
->merged_stat
= true;
1293 cb(alias
, data
, false);
1297 static bool collect_data(struct perf_evsel
*counter
,
1298 void (*cb
)(struct perf_evsel
*counter
, void *data
,
1302 if (counter
->merged_stat
)
1304 cb(counter
, data
, true);
1306 uniquify_event_name(counter
);
1307 else if (counter
->auto_merge_stats
)
1308 collect_all_aliases(counter
, cb
, data
);
1319 static void aggr_cb(struct perf_evsel
*counter
, void *data
, bool first
)
1321 struct aggr_data
*ad
= data
;
1324 for (cpu
= 0; cpu
< perf_evsel__nr_cpus(counter
); cpu
++) {
1325 struct perf_counts_values
*counts
;
1327 s2
= aggr_get_id(perf_evsel__cpus(counter
), cpu
);
1332 counts
= perf_counts(counter
->counts
, cpu
, 0);
1334 * When any result is bad, make them all to give
1335 * consistent output in interval mode.
1337 if (counts
->ena
== 0 || counts
->run
== 0 ||
1338 counter
->counts
->scaled
== -1) {
1343 ad
->val
+= counts
->val
;
1344 ad
->ena
+= counts
->ena
;
1345 ad
->run
+= counts
->run
;
1349 static void print_aggr(char *prefix
)
1351 FILE *output
= stat_config
.output
;
1352 struct perf_evsel
*counter
;
1358 if (!(aggr_map
|| aggr_get_id
))
1361 aggr_update_shadow();
1364 * With metric_only everything is on a single line.
1365 * Without each counter has its own line.
1367 for (s
= 0; s
< aggr_map
->nr
; s
++) {
1368 struct aggr_data ad
;
1369 if (prefix
&& metric_only
)
1370 fprintf(output
, "%s", prefix
);
1372 ad
.id
= id
= aggr_map
->map
[s
];
1374 evlist__for_each_entry(evsel_list
, counter
) {
1375 if (is_duration_time(counter
))
1378 ad
.val
= ad
.ena
= ad
.run
= 0;
1380 if (!collect_data(counter
, aggr_cb
, &ad
))
1386 if (first
&& metric_only
) {
1388 aggr_printout(counter
, id
, nr
);
1390 if (prefix
&& !metric_only
)
1391 fprintf(output
, "%s", prefix
);
1393 uval
= val
* counter
->scale
;
1394 printout(id
, nr
, counter
, uval
, prefix
, run
, ena
, 1.0,
1397 fputc('\n', output
);
1400 fputc('\n', output
);
1404 static int cmp_val(const void *a
, const void *b
)
1406 return ((struct perf_aggr_thread_value
*)b
)->val
-
1407 ((struct perf_aggr_thread_value
*)a
)->val
;
1410 static struct perf_aggr_thread_value
*sort_aggr_thread(
1411 struct perf_evsel
*counter
,
1412 int nthreads
, int ncpus
,
1415 int cpu
, thread
, i
= 0;
1417 struct perf_aggr_thread_value
*buf
;
1419 buf
= calloc(nthreads
, sizeof(struct perf_aggr_thread_value
));
1423 for (thread
= 0; thread
< nthreads
; thread
++) {
1424 u64 ena
= 0, run
= 0, val
= 0;
1426 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
1427 val
+= perf_counts(counter
->counts
, cpu
, thread
)->val
;
1428 ena
+= perf_counts(counter
->counts
, cpu
, thread
)->ena
;
1429 run
+= perf_counts(counter
->counts
, cpu
, thread
)->run
;
1432 uval
= val
* counter
->scale
;
1435 * Skip value 0 when enabling --per-thread globally,
1436 * otherwise too many 0 output.
1438 if (uval
== 0.0 && target__has_per_thread(&target
))
1441 buf
[i
].counter
= counter
;
1450 qsort(buf
, i
, sizeof(struct perf_aggr_thread_value
), cmp_val
);
1458 static void print_aggr_thread(struct perf_evsel
*counter
, char *prefix
)
1460 FILE *output
= stat_config
.output
;
1461 int nthreads
= thread_map__nr(counter
->threads
);
1462 int ncpus
= cpu_map__nr(counter
->cpus
);
1463 int thread
, sorted_threads
, id
;
1464 struct perf_aggr_thread_value
*buf
;
1466 buf
= sort_aggr_thread(counter
, nthreads
, ncpus
, &sorted_threads
);
1468 perror("cannot sort aggr thread");
1472 for (thread
= 0; thread
< sorted_threads
; thread
++) {
1474 fprintf(output
, "%s", prefix
);
1476 id
= buf
[thread
].id
;
1477 if (stat_config
.stats
)
1478 printout(id
, 0, buf
[thread
].counter
, buf
[thread
].uval
,
1479 prefix
, buf
[thread
].run
, buf
[thread
].ena
, 1.0,
1480 &stat_config
.stats
[id
]);
1482 printout(id
, 0, buf
[thread
].counter
, buf
[thread
].uval
,
1483 prefix
, buf
[thread
].run
, buf
[thread
].ena
, 1.0,
1485 fputc('\n', output
);
1492 double avg
, avg_enabled
, avg_running
;
1495 static void counter_aggr_cb(struct perf_evsel
*counter
, void *data
,
1496 bool first __maybe_unused
)
1498 struct caggr_data
*cd
= data
;
1499 struct perf_stat_evsel
*ps
= counter
->stats
;
1501 cd
->avg
+= avg_stats(&ps
->res_stats
[0]);
1502 cd
->avg_enabled
+= avg_stats(&ps
->res_stats
[1]);
1503 cd
->avg_running
+= avg_stats(&ps
->res_stats
[2]);
1507 * Print out the results of a single counter:
1508 * aggregated counts in system-wide mode
1510 static void print_counter_aggr(struct perf_evsel
*counter
, char *prefix
)
1512 FILE *output
= stat_config
.output
;
1514 struct caggr_data cd
= { .avg
= 0.0 };
1516 if (!collect_data(counter
, counter_aggr_cb
, &cd
))
1519 if (prefix
&& !metric_only
)
1520 fprintf(output
, "%s", prefix
);
1522 uval
= cd
.avg
* counter
->scale
;
1523 printout(-1, 0, counter
, uval
, prefix
, cd
.avg_running
, cd
.avg_enabled
,
1526 fprintf(output
, "\n");
1529 static void counter_cb(struct perf_evsel
*counter
, void *data
,
1530 bool first __maybe_unused
)
1532 struct aggr_data
*ad
= data
;
1534 ad
->val
+= perf_counts(counter
->counts
, ad
->cpu
, 0)->val
;
1535 ad
->ena
+= perf_counts(counter
->counts
, ad
->cpu
, 0)->ena
;
1536 ad
->run
+= perf_counts(counter
->counts
, ad
->cpu
, 0)->run
;
1540 * Print out the results of a single counter:
1541 * does not use aggregated count in system-wide
1543 static void print_counter(struct perf_evsel
*counter
, char *prefix
)
1545 FILE *output
= stat_config
.output
;
1550 for (cpu
= 0; cpu
< perf_evsel__nr_cpus(counter
); cpu
++) {
1551 struct aggr_data ad
= { .cpu
= cpu
};
1553 if (!collect_data(counter
, counter_cb
, &ad
))
1560 fprintf(output
, "%s", prefix
);
1562 uval
= val
* counter
->scale
;
1563 printout(cpu
, 0, counter
, uval
, prefix
, run
, ena
, 1.0,
1566 fputc('\n', output
);
1570 static void print_no_aggr_metric(char *prefix
)
1574 struct perf_evsel
*counter
;
1578 nrcpus
= evsel_list
->cpus
->nr
;
1579 for (cpu
= 0; cpu
< nrcpus
; cpu
++) {
1583 fputs(prefix
, stat_config
.output
);
1584 evlist__for_each_entry(evsel_list
, counter
) {
1585 if (is_duration_time(counter
))
1588 aggr_printout(counter
, cpu
, 0);
1591 val
= perf_counts(counter
->counts
, cpu
, 0)->val
;
1592 ena
= perf_counts(counter
->counts
, cpu
, 0)->ena
;
1593 run
= perf_counts(counter
->counts
, cpu
, 0)->run
;
1595 uval
= val
* counter
->scale
;
1596 printout(cpu
, 0, counter
, uval
, prefix
, run
, ena
, 1.0,
1599 fputc('\n', stat_config
.output
);
1603 static int aggr_header_lens
[] = {
1611 static const char *aggr_header_csv
[] = {
1612 [AGGR_CORE
] = "core,cpus,",
1613 [AGGR_SOCKET
] = "socket,cpus",
1614 [AGGR_NONE
] = "cpu,",
1615 [AGGR_THREAD
] = "comm-pid,",
1619 static void print_metric_headers(const char *prefix
, bool no_indent
)
1621 struct perf_stat_output_ctx out
;
1622 struct perf_evsel
*counter
;
1623 struct outstate os
= {
1624 .fh
= stat_config
.output
1628 fprintf(stat_config
.output
, "%s", prefix
);
1630 if (!csv_output
&& !no_indent
)
1631 fprintf(stat_config
.output
, "%*s",
1632 aggr_header_lens
[stat_config
.aggr_mode
], "");
1634 if (stat_config
.interval
)
1635 fputs("time,", stat_config
.output
);
1636 fputs(aggr_header_csv
[stat_config
.aggr_mode
],
1637 stat_config
.output
);
1640 /* Print metrics headers only */
1641 evlist__for_each_entry(evsel_list
, counter
) {
1642 if (is_duration_time(counter
))
1646 out
.print_metric
= print_metric_header
;
1647 out
.new_line
= new_line_metric
;
1648 out
.force_header
= true;
1650 perf_stat__print_shadow_stats(counter
, 0,
1656 fputc('\n', stat_config
.output
);
1659 static void print_interval(char *prefix
, struct timespec
*ts
)
1661 FILE *output
= stat_config
.output
;
1662 static int num_print_interval
;
1665 puts(CONSOLE_CLEAR
);
1667 sprintf(prefix
, "%6lu.%09lu%s", ts
->tv_sec
, ts
->tv_nsec
, csv_sep
);
1669 if ((num_print_interval
== 0 && !csv_output
) || interval_clear
) {
1670 switch (stat_config
.aggr_mode
) {
1672 fprintf(output
, "# time socket cpus");
1674 fprintf(output
, " counts %*s events\n", unit_width
, "unit");
1677 fprintf(output
, "# time core cpus");
1679 fprintf(output
, " counts %*s events\n", unit_width
, "unit");
1682 fprintf(output
, "# time CPU ");
1684 fprintf(output
, " counts %*s events\n", unit_width
, "unit");
1687 fprintf(output
, "# time comm-pid");
1689 fprintf(output
, " counts %*s events\n", unit_width
, "unit");
1693 fprintf(output
, "# time");
1695 fprintf(output
, " counts %*s events\n", unit_width
, "unit");
1701 if ((num_print_interval
== 0 || interval_clear
) && metric_only
)
1702 print_metric_headers(" ", true);
1703 if (++num_print_interval
== 25)
1704 num_print_interval
= 0;
1707 static void print_header(int argc
, const char **argv
)
1709 FILE *output
= stat_config
.output
;
1715 fprintf(output
, "\n");
1716 fprintf(output
, " Performance counter stats for ");
1717 if (target
.system_wide
)
1718 fprintf(output
, "\'system wide");
1719 else if (target
.cpu_list
)
1720 fprintf(output
, "\'CPU(s) %s", target
.cpu_list
);
1721 else if (!target__has_task(&target
)) {
1722 fprintf(output
, "\'%s", argv
? argv
[0] : "pipe");
1723 for (i
= 1; argv
&& (i
< argc
); i
++)
1724 fprintf(output
, " %s", argv
[i
]);
1725 } else if (target
.pid
)
1726 fprintf(output
, "process id \'%s", target
.pid
);
1728 fprintf(output
, "thread id \'%s", target
.tid
);
1730 fprintf(output
, "\'");
1732 fprintf(output
, " (%d runs)", run_count
);
1733 fprintf(output
, ":\n\n");
1737 static int get_precision(double num
)
1742 return lround(ceil(-log10(num
)));
1745 static void print_table(FILE *output
, int precision
, double avg
)
1748 int idx
, indent
= 0;
1750 scnprintf(tmp
, 64, " %17.*f", precision
, avg
);
1751 while (tmp
[indent
] == ' ')
1754 fprintf(output
, "%*s# Table of individual measurements:\n", indent
, "");
1756 for (idx
= 0; idx
< run_count
; idx
++) {
1757 double run
= (double) walltime_run
[idx
] / NSEC_PER_SEC
;
1758 int h
, n
= 1 + abs((int) (100.0 * (run
- avg
)/run
) / 5);
1760 fprintf(output
, " %17.*f (%+.*f) ",
1761 precision
, run
, precision
, run
- avg
);
1763 for (h
= 0; h
< n
; h
++)
1764 fprintf(output
, "#");
1766 fprintf(output
, "\n");
1769 fprintf(output
, "\n%*s# Final result:\n", indent
, "");
1772 static double timeval2double(struct timeval
*t
)
1774 return t
->tv_sec
+ (double) t
->tv_usec
/USEC_PER_SEC
;
1777 static void print_footer(void)
1779 double avg
= avg_stats(&walltime_nsecs_stats
) / NSEC_PER_SEC
;
1780 FILE *output
= stat_config
.output
;
1784 fprintf(output
, "\n");
1786 if (run_count
== 1) {
1787 fprintf(output
, " %17.9f seconds time elapsed", avg
);
1790 double ru_utime
= timeval2double(&ru_data
.ru_utime
);
1791 double ru_stime
= timeval2double(&ru_data
.ru_stime
);
1793 fprintf(output
, "\n\n");
1794 fprintf(output
, " %17.9f seconds user\n", ru_utime
);
1795 fprintf(output
, " %17.9f seconds sys\n", ru_stime
);
1798 double sd
= stddev_stats(&walltime_nsecs_stats
) / NSEC_PER_SEC
;
1800 * Display at most 2 more significant
1801 * digits than the stddev inaccuracy.
1803 int precision
= get_precision(sd
) + 2;
1805 if (walltime_run_table
)
1806 print_table(output
, precision
, avg
);
1808 fprintf(output
, " %17.*f +- %.*f seconds time elapsed",
1809 precision
, avg
, precision
, sd
);
1811 print_noise_pct(sd
, avg
);
1813 fprintf(output
, "\n\n");
1815 if (print_free_counters_hint
&&
1816 sysctl__read_int("kernel/nmi_watchdog", &n
) >= 0 &&
1819 "Some events weren't counted. Try disabling the NMI watchdog:\n"
1820 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
1822 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
1824 if (print_mixed_hw_group_error
)
1826 "The events in group usually have to be from "
1827 "the same PMU. Try reorganizing the group.\n");
1830 static void print_counters(struct timespec
*ts
, int argc
, const char **argv
)
1832 int interval
= stat_config
.interval
;
1833 struct perf_evsel
*counter
;
1834 char buf
[64], *prefix
= NULL
;
1836 /* Do not print anything if we record to the pipe. */
1837 if (STAT_RECORD
&& perf_stat
.data
.is_pipe
)
1841 print_interval(prefix
= buf
, ts
);
1843 print_header(argc
, argv
);
1846 static int num_print_iv
;
1848 if (num_print_iv
== 0 && !interval
)
1849 print_metric_headers(prefix
, false);
1850 if (num_print_iv
++ == 25)
1852 if (stat_config
.aggr_mode
== AGGR_GLOBAL
&& prefix
)
1853 fprintf(stat_config
.output
, "%s", prefix
);
1856 switch (stat_config
.aggr_mode
) {
1862 evlist__for_each_entry(evsel_list
, counter
) {
1863 if (is_duration_time(counter
))
1865 print_aggr_thread(counter
, prefix
);
1869 evlist__for_each_entry(evsel_list
, counter
) {
1870 if (is_duration_time(counter
))
1872 print_counter_aggr(counter
, prefix
);
1875 fputc('\n', stat_config
.output
);
1879 print_no_aggr_metric(prefix
);
1881 evlist__for_each_entry(evsel_list
, counter
) {
1882 if (is_duration_time(counter
))
1884 print_counter(counter
, prefix
);
1893 if (!interval
&& !csv_output
)
1896 fflush(stat_config
.output
);
1899 static volatile int signr
= -1;
1901 static void skip_signal(int signo
)
1903 if ((child_pid
== -1) || stat_config
.interval
)
1908 * render child_pid harmless
1909 * won't send SIGTERM to a random
1910 * process in case of race condition
1911 * and fast PID recycling
1916 static void sig_atexit(void)
1921 * avoid race condition with SIGCHLD handler
1922 * in skip_signal() which is modifying child_pid
1923 * goal is to avoid send SIGTERM to a random
1927 sigaddset(&set
, SIGCHLD
);
1928 sigprocmask(SIG_BLOCK
, &set
, &oset
);
1930 if (child_pid
!= -1)
1931 kill(child_pid
, SIGTERM
);
1933 sigprocmask(SIG_SETMASK
, &oset
, NULL
);
1938 signal(signr
, SIG_DFL
);
1939 kill(getpid(), signr
);
1942 static int stat__set_big_num(const struct option
*opt __maybe_unused
,
1943 const char *s __maybe_unused
, int unset
)
1945 big_num_opt
= unset
? 0 : 1;
1949 static int enable_metric_only(const struct option
*opt __maybe_unused
,
1950 const char *s __maybe_unused
, int unset
)
1952 force_metric_only
= true;
1953 metric_only
= !unset
;
1957 static int parse_metric_groups(const struct option
*opt
,
1959 int unset __maybe_unused
)
1961 return metricgroup__parse_groups(opt
, str
, &metric_events
);
1964 static struct option stat_options
[] = {
1965 OPT_BOOLEAN('T', "transaction", &transaction_run
,
1966 "hardware transaction statistics"),
1967 OPT_CALLBACK('e', "event", &evsel_list
, "event",
1968 "event selector. use 'perf list' to list available events",
1969 parse_events_option
),
1970 OPT_CALLBACK(0, "filter", &evsel_list
, "filter",
1971 "event filter", parse_filter
),
1972 OPT_BOOLEAN('i', "no-inherit", &no_inherit
,
1973 "child tasks do not inherit counters"),
1974 OPT_STRING('p', "pid", &target
.pid
, "pid",
1975 "stat events on existing process id"),
1976 OPT_STRING('t', "tid", &target
.tid
, "tid",
1977 "stat events on existing thread id"),
1978 OPT_BOOLEAN('a', "all-cpus", &target
.system_wide
,
1979 "system-wide collection from all CPUs"),
1980 OPT_BOOLEAN('g', "group", &group
,
1981 "put the counters into a counter group"),
1982 OPT_BOOLEAN('c', "scale", &stat_config
.scale
, "scale/normalize counters"),
1983 OPT_INCR('v', "verbose", &verbose
,
1984 "be more verbose (show counter open errors, etc)"),
1985 OPT_INTEGER('r', "repeat", &run_count
,
1986 "repeat command and print average + stddev (max: 100, forever: 0)"),
1987 OPT_BOOLEAN(0, "table", &walltime_run_table
,
1988 "display details about each run (only with -r option)"),
1989 OPT_BOOLEAN('n', "null", &null_run
,
1990 "null run - dont start any counters"),
1991 OPT_INCR('d', "detailed", &detailed_run
,
1992 "detailed run - start a lot of events"),
1993 OPT_BOOLEAN('S', "sync", &sync_run
,
1994 "call sync() before starting a run"),
1995 OPT_CALLBACK_NOOPT('B', "big-num", NULL
, NULL
,
1996 "print large numbers with thousands\' separators",
1998 OPT_STRING('C', "cpu", &target
.cpu_list
, "cpu",
1999 "list of cpus to monitor in system-wide"),
2000 OPT_SET_UINT('A', "no-aggr", &stat_config
.aggr_mode
,
2001 "disable CPU count aggregation", AGGR_NONE
),
2002 OPT_BOOLEAN(0, "no-merge", &no_merge
, "Do not merge identical named events"),
2003 OPT_STRING('x', "field-separator", &csv_sep
, "separator",
2004 "print counts with custom separator"),
2005 OPT_CALLBACK('G', "cgroup", &evsel_list
, "name",
2006 "monitor event in cgroup name only", parse_cgroups
),
2007 OPT_STRING('o', "output", &output_name
, "file", "output file name"),
2008 OPT_BOOLEAN(0, "append", &append_file
, "append to the output file"),
2009 OPT_INTEGER(0, "log-fd", &output_fd
,
2010 "log output to fd, instead of stderr"),
2011 OPT_STRING(0, "pre", &pre_cmd
, "command",
2012 "command to run prior to the measured command"),
2013 OPT_STRING(0, "post", &post_cmd
, "command",
2014 "command to run after to the measured command"),
2015 OPT_UINTEGER('I', "interval-print", &stat_config
.interval
,
2016 "print counts at regular interval in ms "
2017 "(overhead is possible for values <= 100ms)"),
2018 OPT_INTEGER(0, "interval-count", &stat_config
.times
,
2019 "print counts for fixed number of times"),
2020 OPT_BOOLEAN(0, "interval-clear", &interval_clear
,
2021 "clear screen in between new interval"),
2022 OPT_UINTEGER(0, "timeout", &stat_config
.timeout
,
2023 "stop workload and print counts after a timeout period in ms (>= 10ms)"),
2024 OPT_SET_UINT(0, "per-socket", &stat_config
.aggr_mode
,
2025 "aggregate counts per processor socket", AGGR_SOCKET
),
2026 OPT_SET_UINT(0, "per-core", &stat_config
.aggr_mode
,
2027 "aggregate counts per physical processor core", AGGR_CORE
),
2028 OPT_SET_UINT(0, "per-thread", &stat_config
.aggr_mode
,
2029 "aggregate counts per thread", AGGR_THREAD
),
2030 OPT_UINTEGER('D', "delay", &initial_delay
,
2031 "ms to wait before starting measurement after program start"),
2032 OPT_CALLBACK_NOOPT(0, "metric-only", &metric_only
, NULL
,
2033 "Only print computed metrics. No raw values", enable_metric_only
),
2034 OPT_BOOLEAN(0, "topdown", &topdown_run
,
2035 "measure topdown level 1 statistics"),
2036 OPT_BOOLEAN(0, "smi-cost", &smi_cost
,
2037 "measure SMI cost"),
2038 OPT_CALLBACK('M', "metrics", &evsel_list
, "metric/metric group list",
2039 "monitor specified metrics or metric groups (separated by ,)",
2040 parse_metric_groups
),
2044 static int perf_stat__get_socket(struct cpu_map
*map
, int cpu
)
2046 return cpu_map__get_socket(map
, cpu
, NULL
);
2049 static int perf_stat__get_core(struct cpu_map
*map
, int cpu
)
2051 return cpu_map__get_core(map
, cpu
, NULL
);
2054 static int cpu_map__get_max(struct cpu_map
*map
)
2058 for (i
= 0; i
< map
->nr
; i
++) {
2059 if (map
->map
[i
] > max
)
2066 static struct cpu_map
*cpus_aggr_map
;
2068 static int perf_stat__get_aggr(aggr_get_id_t get_id
, struct cpu_map
*map
, int idx
)
2075 cpu
= map
->map
[idx
];
2077 if (cpus_aggr_map
->map
[cpu
] == -1)
2078 cpus_aggr_map
->map
[cpu
] = get_id(map
, idx
);
2080 return cpus_aggr_map
->map
[cpu
];
2083 static int perf_stat__get_socket_cached(struct cpu_map
*map
, int idx
)
2085 return perf_stat__get_aggr(perf_stat__get_socket
, map
, idx
);
2088 static int perf_stat__get_core_cached(struct cpu_map
*map
, int idx
)
2090 return perf_stat__get_aggr(perf_stat__get_core
, map
, idx
);
2093 static int perf_stat_init_aggr_mode(void)
2097 switch (stat_config
.aggr_mode
) {
2099 if (cpu_map__build_socket_map(evsel_list
->cpus
, &aggr_map
)) {
2100 perror("cannot build socket map");
2103 aggr_get_id
= perf_stat__get_socket_cached
;
2106 if (cpu_map__build_core_map(evsel_list
->cpus
, &aggr_map
)) {
2107 perror("cannot build core map");
2110 aggr_get_id
= perf_stat__get_core_cached
;
2121 * The evsel_list->cpus is the base we operate on,
2122 * taking the highest cpu number to be the size of
2123 * the aggregation translate cpumap.
2125 nr
= cpu_map__get_max(evsel_list
->cpus
);
2126 cpus_aggr_map
= cpu_map__empty_new(nr
+ 1);
2127 return cpus_aggr_map
? 0 : -ENOMEM
;
2130 static void perf_stat__exit_aggr_mode(void)
2132 cpu_map__put(aggr_map
);
2133 cpu_map__put(cpus_aggr_map
);
2135 cpus_aggr_map
= NULL
;
2138 static inline int perf_env__get_cpu(struct perf_env
*env
, struct cpu_map
*map
, int idx
)
2145 cpu
= map
->map
[idx
];
2147 if (cpu
>= env
->nr_cpus_avail
)
2153 static int perf_env__get_socket(struct cpu_map
*map
, int idx
, void *data
)
2155 struct perf_env
*env
= data
;
2156 int cpu
= perf_env__get_cpu(env
, map
, idx
);
2158 return cpu
== -1 ? -1 : env
->cpu
[cpu
].socket_id
;
2161 static int perf_env__get_core(struct cpu_map
*map
, int idx
, void *data
)
2163 struct perf_env
*env
= data
;
2164 int core
= -1, cpu
= perf_env__get_cpu(env
, map
, idx
);
2167 int socket_id
= env
->cpu
[cpu
].socket_id
;
2170 * Encode socket in upper 16 bits
2171 * core_id is relative to socket, and
2172 * we need a global id. So we combine
2175 core
= (socket_id
<< 16) | (env
->cpu
[cpu
].core_id
& 0xffff);
2181 static int perf_env__build_socket_map(struct perf_env
*env
, struct cpu_map
*cpus
,
2182 struct cpu_map
**sockp
)
2184 return cpu_map__build_map(cpus
, sockp
, perf_env__get_socket
, env
);
2187 static int perf_env__build_core_map(struct perf_env
*env
, struct cpu_map
*cpus
,
2188 struct cpu_map
**corep
)
2190 return cpu_map__build_map(cpus
, corep
, perf_env__get_core
, env
);
2193 static int perf_stat__get_socket_file(struct cpu_map
*map
, int idx
)
2195 return perf_env__get_socket(map
, idx
, &perf_stat
.session
->header
.env
);
2198 static int perf_stat__get_core_file(struct cpu_map
*map
, int idx
)
2200 return perf_env__get_core(map
, idx
, &perf_stat
.session
->header
.env
);
2203 static int perf_stat_init_aggr_mode_file(struct perf_stat
*st
)
2205 struct perf_env
*env
= &st
->session
->header
.env
;
2207 switch (stat_config
.aggr_mode
) {
2209 if (perf_env__build_socket_map(env
, evsel_list
->cpus
, &aggr_map
)) {
2210 perror("cannot build socket map");
2213 aggr_get_id
= perf_stat__get_socket_file
;
2216 if (perf_env__build_core_map(env
, evsel_list
->cpus
, &aggr_map
)) {
2217 perror("cannot build core map");
2220 aggr_get_id
= perf_stat__get_core_file
;
2233 static int topdown_filter_events(const char **attr
, char **str
, bool use_group
)
2240 for (i
= 0; attr
[i
]; i
++) {
2241 if (pmu_have_event("cpu", attr
[i
])) {
2242 len
+= strlen(attr
[i
]) + 1;
2243 attr
[i
- off
] = attr
[i
];
2247 attr
[i
- off
] = NULL
;
2249 *str
= malloc(len
+ 1 + 2);
2259 for (i
= 0; attr
[i
]; i
++) {
2272 __weak
bool arch_topdown_check_group(bool *warn
)
2278 __weak
void arch_topdown_group_warn(void)
2283 * Add default attributes, if there were no attributes specified or
2284 * if -d/--detailed, -d -d or -d -d -d is used:
2286 static int add_default_attributes(void)
2289 struct perf_event_attr default_attrs0
[] = {
2291 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_TASK_CLOCK
},
2292 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CONTEXT_SWITCHES
},
2293 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CPU_MIGRATIONS
},
2294 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_PAGE_FAULTS
},
2296 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CPU_CYCLES
},
2298 struct perf_event_attr frontend_attrs
[] = {
2299 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
},
2301 struct perf_event_attr backend_attrs
[] = {
2302 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_STALLED_CYCLES_BACKEND
},
2304 struct perf_event_attr default_attrs1
[] = {
2305 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_INSTRUCTIONS
},
2306 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
},
2307 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_BRANCH_MISSES
},
2312 * Detailed stats (-d), covering the L1 and last level data caches:
2314 struct perf_event_attr detailed_attrs
[] = {
2316 { .type
= PERF_TYPE_HW_CACHE
,
2318 PERF_COUNT_HW_CACHE_L1D
<< 0 |
2319 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
2320 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
2322 { .type
= PERF_TYPE_HW_CACHE
,
2324 PERF_COUNT_HW_CACHE_L1D
<< 0 |
2325 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
2326 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
2328 { .type
= PERF_TYPE_HW_CACHE
,
2330 PERF_COUNT_HW_CACHE_LL
<< 0 |
2331 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
2332 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
2334 { .type
= PERF_TYPE_HW_CACHE
,
2336 PERF_COUNT_HW_CACHE_LL
<< 0 |
2337 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
2338 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
2342 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
2344 struct perf_event_attr very_detailed_attrs
[] = {
2346 { .type
= PERF_TYPE_HW_CACHE
,
2348 PERF_COUNT_HW_CACHE_L1I
<< 0 |
2349 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
2350 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
2352 { .type
= PERF_TYPE_HW_CACHE
,
2354 PERF_COUNT_HW_CACHE_L1I
<< 0 |
2355 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
2356 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
2358 { .type
= PERF_TYPE_HW_CACHE
,
2360 PERF_COUNT_HW_CACHE_DTLB
<< 0 |
2361 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
2362 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
2364 { .type
= PERF_TYPE_HW_CACHE
,
2366 PERF_COUNT_HW_CACHE_DTLB
<< 0 |
2367 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
2368 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
2370 { .type
= PERF_TYPE_HW_CACHE
,
2372 PERF_COUNT_HW_CACHE_ITLB
<< 0 |
2373 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
2374 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
2376 { .type
= PERF_TYPE_HW_CACHE
,
2378 PERF_COUNT_HW_CACHE_ITLB
<< 0 |
2379 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
2380 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
2385 * Very, very detailed stats (-d -d -d), adding prefetch events:
2387 struct perf_event_attr very_very_detailed_attrs
[] = {
2389 { .type
= PERF_TYPE_HW_CACHE
,
2391 PERF_COUNT_HW_CACHE_L1D
<< 0 |
2392 (PERF_COUNT_HW_CACHE_OP_PREFETCH
<< 8) |
2393 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
2395 { .type
= PERF_TYPE_HW_CACHE
,
2397 PERF_COUNT_HW_CACHE_L1D
<< 0 |
2398 (PERF_COUNT_HW_CACHE_OP_PREFETCH
<< 8) |
2399 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
2401 struct parse_events_error errinfo
;
2403 /* Set attrs if no event is selected and !null_run: */
2407 if (transaction_run
) {
2408 /* Handle -T as -M transaction. Once platform specific metrics
2409 * support has been added to the json files, all archictures
2410 * will use this approach. To determine transaction support
2411 * on an architecture test for such a metric name.
2413 if (metricgroup__has_metric("transaction")) {
2414 struct option opt
= { .value
= &evsel_list
};
2416 return metricgroup__parse_groups(&opt
, "transaction",
2420 if (pmu_have_event("cpu", "cycles-ct") &&
2421 pmu_have_event("cpu", "el-start"))
2422 err
= parse_events(evsel_list
, transaction_attrs
,
2425 err
= parse_events(evsel_list
,
2426 transaction_limited_attrs
,
2429 fprintf(stderr
, "Cannot set up transaction events\n");
2430 parse_events_print_error(&errinfo
, transaction_attrs
);
2439 if (sysfs__read_int(FREEZE_ON_SMI_PATH
, &smi
) < 0) {
2440 fprintf(stderr
, "freeze_on_smi is not supported.\n");
2445 if (sysfs__write_int(FREEZE_ON_SMI_PATH
, 1) < 0) {
2446 fprintf(stderr
, "Failed to set freeze_on_smi.\n");
2452 if (pmu_have_event("msr", "aperf") &&
2453 pmu_have_event("msr", "smi")) {
2454 if (!force_metric_only
)
2456 err
= parse_events(evsel_list
, smi_cost_attrs
, &errinfo
);
2458 fprintf(stderr
, "To measure SMI cost, it needs "
2459 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
2460 parse_events_print_error(&errinfo
, smi_cost_attrs
);
2464 fprintf(stderr
, "Cannot set up SMI cost events\n");
2474 if (stat_config
.aggr_mode
!= AGGR_GLOBAL
&&
2475 stat_config
.aggr_mode
!= AGGR_CORE
) {
2476 pr_err("top down event configuration requires --per-core mode\n");
2479 stat_config
.aggr_mode
= AGGR_CORE
;
2480 if (nr_cgroups
|| !target__has_cpu(&target
)) {
2481 pr_err("top down event configuration requires system-wide mode (-a)\n");
2485 if (!force_metric_only
)
2487 if (topdown_filter_events(topdown_attrs
, &str
,
2488 arch_topdown_check_group(&warn
)) < 0) {
2489 pr_err("Out of memory\n");
2492 if (topdown_attrs
[0] && str
) {
2494 arch_topdown_group_warn();
2495 err
= parse_events(evsel_list
, str
, &errinfo
);
2498 "Cannot set up top down events %s: %d\n",
2501 parse_events_print_error(&errinfo
, str
);
2505 fprintf(stderr
, "System does not support topdown\n");
2511 if (!evsel_list
->nr_entries
) {
2512 if (target__has_cpu(&target
))
2513 default_attrs0
[0].config
= PERF_COUNT_SW_CPU_CLOCK
;
2515 if (perf_evlist__add_default_attrs(evsel_list
, default_attrs0
) < 0)
2517 if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
2518 if (perf_evlist__add_default_attrs(evsel_list
,
2519 frontend_attrs
) < 0)
2522 if (pmu_have_event("cpu", "stalled-cycles-backend")) {
2523 if (perf_evlist__add_default_attrs(evsel_list
,
2527 if (perf_evlist__add_default_attrs(evsel_list
, default_attrs1
) < 0)
2531 /* Detailed events get appended to the event list: */
2533 if (detailed_run
< 1)
2536 /* Append detailed run extra attributes: */
2537 if (perf_evlist__add_default_attrs(evsel_list
, detailed_attrs
) < 0)
2540 if (detailed_run
< 2)
2543 /* Append very detailed run extra attributes: */
2544 if (perf_evlist__add_default_attrs(evsel_list
, very_detailed_attrs
) < 0)
2547 if (detailed_run
< 3)
2550 /* Append very, very detailed run extra attributes: */
2551 return perf_evlist__add_default_attrs(evsel_list
, very_very_detailed_attrs
);
2554 static const char * const stat_record_usage
[] = {
2555 "perf stat record [<options>]",
2559 static void init_features(struct perf_session
*session
)
2563 for (feat
= HEADER_FIRST_FEATURE
; feat
< HEADER_LAST_FEATURE
; feat
++)
2564 perf_header__set_feat(&session
->header
, feat
);
2566 perf_header__clear_feat(&session
->header
, HEADER_BUILD_ID
);
2567 perf_header__clear_feat(&session
->header
, HEADER_TRACING_DATA
);
2568 perf_header__clear_feat(&session
->header
, HEADER_BRANCH_STACK
);
2569 perf_header__clear_feat(&session
->header
, HEADER_AUXTRACE
);
2572 static int __cmd_record(int argc
, const char **argv
)
2574 struct perf_session
*session
;
2575 struct perf_data
*data
= &perf_stat
.data
;
2577 argc
= parse_options(argc
, argv
, stat_options
, stat_record_usage
,
2578 PARSE_OPT_STOP_AT_NON_OPTION
);
2581 data
->file
.path
= output_name
;
2583 if (run_count
!= 1 || forever
) {
2584 pr_err("Cannot use -r option with perf stat record.\n");
2588 session
= perf_session__new(data
, false, NULL
);
2589 if (session
== NULL
) {
2590 pr_err("Perf session creation failed.\n");
2594 init_features(session
);
2596 session
->evlist
= evsel_list
;
2597 perf_stat
.session
= session
;
2598 perf_stat
.record
= true;
2602 static int process_stat_round_event(struct perf_tool
*tool __maybe_unused
,
2603 union perf_event
*event
,
2604 struct perf_session
*session
)
2606 struct stat_round_event
*stat_round
= &event
->stat_round
;
2607 struct perf_evsel
*counter
;
2608 struct timespec tsh
, *ts
= NULL
;
2609 const char **argv
= session
->header
.env
.cmdline_argv
;
2610 int argc
= session
->header
.env
.nr_cmdline
;
2612 evlist__for_each_entry(evsel_list
, counter
)
2613 perf_stat_process_counter(&stat_config
, counter
);
2615 if (stat_round
->type
== PERF_STAT_ROUND_TYPE__FINAL
)
2616 update_stats(&walltime_nsecs_stats
, stat_round
->time
);
2618 if (stat_config
.interval
&& stat_round
->time
) {
2619 tsh
.tv_sec
= stat_round
->time
/ NSEC_PER_SEC
;
2620 tsh
.tv_nsec
= stat_round
->time
% NSEC_PER_SEC
;
2624 print_counters(ts
, argc
, argv
);
2629 int process_stat_config_event(struct perf_tool
*tool
,
2630 union perf_event
*event
,
2631 struct perf_session
*session __maybe_unused
)
2633 struct perf_stat
*st
= container_of(tool
, struct perf_stat
, tool
);
2635 perf_event__read_stat_config(&stat_config
, &event
->stat_config
);
2637 if (cpu_map__empty(st
->cpus
)) {
2638 if (st
->aggr_mode
!= AGGR_UNSET
)
2639 pr_warning("warning: processing task data, aggregation mode not set\n");
2643 if (st
->aggr_mode
!= AGGR_UNSET
)
2644 stat_config
.aggr_mode
= st
->aggr_mode
;
2646 if (perf_stat
.data
.is_pipe
)
2647 perf_stat_init_aggr_mode();
2649 perf_stat_init_aggr_mode_file(st
);
2654 static int set_maps(struct perf_stat
*st
)
2656 if (!st
->cpus
|| !st
->threads
)
2659 if (WARN_ONCE(st
->maps_allocated
, "stats double allocation\n"))
2662 perf_evlist__set_maps(evsel_list
, st
->cpus
, st
->threads
);
2664 if (perf_evlist__alloc_stats(evsel_list
, true))
2667 st
->maps_allocated
= true;
2672 int process_thread_map_event(struct perf_tool
*tool
,
2673 union perf_event
*event
,
2674 struct perf_session
*session __maybe_unused
)
2676 struct perf_stat
*st
= container_of(tool
, struct perf_stat
, tool
);
2679 pr_warning("Extra thread map event, ignoring.\n");
2683 st
->threads
= thread_map__new_event(&event
->thread_map
);
2687 return set_maps(st
);
2691 int process_cpu_map_event(struct perf_tool
*tool
,
2692 union perf_event
*event
,
2693 struct perf_session
*session __maybe_unused
)
2695 struct perf_stat
*st
= container_of(tool
, struct perf_stat
, tool
);
2696 struct cpu_map
*cpus
;
2699 pr_warning("Extra cpu map event, ignoring.\n");
2703 cpus
= cpu_map__new_data(&event
->cpu_map
.data
);
2708 return set_maps(st
);
2711 static int runtime_stat_new(struct perf_stat_config
*config
, int nthreads
)
2715 config
->stats
= calloc(nthreads
, sizeof(struct runtime_stat
));
2719 config
->stats_num
= nthreads
;
2721 for (i
= 0; i
< nthreads
; i
++)
2722 runtime_stat__init(&config
->stats
[i
]);
2727 static void runtime_stat_delete(struct perf_stat_config
*config
)
2734 for (i
= 0; i
< config
->stats_num
; i
++)
2735 runtime_stat__exit(&config
->stats
[i
]);
2737 free(config
->stats
);
2740 static const char * const stat_report_usage
[] = {
2741 "perf stat report [<options>]",
2745 static struct perf_stat perf_stat
= {
2747 .attr
= perf_event__process_attr
,
2748 .event_update
= perf_event__process_event_update
,
2749 .thread_map
= process_thread_map_event
,
2750 .cpu_map
= process_cpu_map_event
,
2751 .stat_config
= process_stat_config_event
,
2752 .stat
= perf_event__process_stat_event
,
2753 .stat_round
= process_stat_round_event
,
2755 .aggr_mode
= AGGR_UNSET
,
2758 static int __cmd_report(int argc
, const char **argv
)
2760 struct perf_session
*session
;
2761 const struct option options
[] = {
2762 OPT_STRING('i', "input", &input_name
, "file", "input file name"),
2763 OPT_SET_UINT(0, "per-socket", &perf_stat
.aggr_mode
,
2764 "aggregate counts per processor socket", AGGR_SOCKET
),
2765 OPT_SET_UINT(0, "per-core", &perf_stat
.aggr_mode
,
2766 "aggregate counts per physical processor core", AGGR_CORE
),
2767 OPT_SET_UINT('A', "no-aggr", &perf_stat
.aggr_mode
,
2768 "disable CPU count aggregation", AGGR_NONE
),
2774 argc
= parse_options(argc
, argv
, options
, stat_report_usage
, 0);
2776 if (!input_name
|| !strlen(input_name
)) {
2777 if (!fstat(STDIN_FILENO
, &st
) && S_ISFIFO(st
.st_mode
))
2780 input_name
= "perf.data";
2783 perf_stat
.data
.file
.path
= input_name
;
2784 perf_stat
.data
.mode
= PERF_DATA_MODE_READ
;
2786 session
= perf_session__new(&perf_stat
.data
, false, &perf_stat
.tool
);
2787 if (session
== NULL
)
2790 perf_stat
.session
= session
;
2791 stat_config
.output
= stderr
;
2792 evsel_list
= session
->evlist
;
2794 ret
= perf_session__process_events(session
);
2798 perf_session__delete(session
);
2802 static void setup_system_wide(int forks
)
2805 * Make system wide (-a) the default target if
2806 * no target was specified and one of following
2807 * conditions is met:
2809 * - there's no workload specified
2810 * - there is workload specified but all requested
2811 * events are system wide events
2813 if (!target__none(&target
))
2817 target
.system_wide
= true;
2819 struct perf_evsel
*counter
;
2821 evlist__for_each_entry(evsel_list
, counter
) {
2822 if (!counter
->system_wide
)
2826 if (evsel_list
->nr_entries
)
2827 target
.system_wide
= true;
2831 int cmd_stat(int argc
, const char **argv
)
2833 const char * const stat_usage
[] = {
2834 "perf stat [<options>] [<command>]",
2837 int status
= -EINVAL
, run_idx
;
2839 FILE *output
= stderr
;
2840 unsigned int interval
, timeout
;
2841 const char * const stat_subcommands
[] = { "record", "report" };
2843 setlocale(LC_ALL
, "");
2845 evsel_list
= perf_evlist__new();
2846 if (evsel_list
== NULL
)
2849 parse_events__shrink_config_terms();
2851 /* String-parsing callback-based options would segfault when negated */
2852 set_option_flag(stat_options
, 'e', "event", PARSE_OPT_NONEG
);
2853 set_option_flag(stat_options
, 'M', "metrics", PARSE_OPT_NONEG
);
2854 set_option_flag(stat_options
, 'G', "cgroup", PARSE_OPT_NONEG
);
2856 argc
= parse_options_subcommand(argc
, argv
, stat_options
, stat_subcommands
,
2857 (const char **) stat_usage
,
2858 PARSE_OPT_STOP_AT_NON_OPTION
);
2859 perf_stat__collect_metric_expr(evsel_list
);
2860 perf_stat__init_shadow_stats();
2864 if (!strcmp(csv_sep
, "\\t"))
2867 csv_sep
= DEFAULT_SEPARATOR
;
2869 if (argc
&& !strncmp(argv
[0], "rec", 3)) {
2870 argc
= __cmd_record(argc
, argv
);
2873 } else if (argc
&& !strncmp(argv
[0], "rep", 3))
2874 return __cmd_report(argc
, argv
);
2876 interval
= stat_config
.interval
;
2877 timeout
= stat_config
.timeout
;
2880 * For record command the -o is already taken care of.
2882 if (!STAT_RECORD
&& output_name
&& strcmp(output_name
, "-"))
2885 if (output_name
&& output_fd
) {
2886 fprintf(stderr
, "cannot use both --output and --log-fd\n");
2887 parse_options_usage(stat_usage
, stat_options
, "o", 1);
2888 parse_options_usage(NULL
, stat_options
, "log-fd", 0);
2892 if (metric_only
&& stat_config
.aggr_mode
== AGGR_THREAD
) {
2893 fprintf(stderr
, "--metric-only is not supported with --per-thread\n");
2897 if (metric_only
&& run_count
> 1) {
2898 fprintf(stderr
, "--metric-only is not supported with -r\n");
2902 if (walltime_run_table
&& run_count
<= 1) {
2903 fprintf(stderr
, "--table is only supported with -r\n");
2904 parse_options_usage(stat_usage
, stat_options
, "r", 1);
2905 parse_options_usage(NULL
, stat_options
, "table", 0);
2909 if (output_fd
< 0) {
2910 fprintf(stderr
, "argument to --log-fd must be a > 0\n");
2911 parse_options_usage(stat_usage
, stat_options
, "log-fd", 0);
2917 mode
= append_file
? "a" : "w";
2919 output
= fopen(output_name
, mode
);
2921 perror("failed to create output file");
2924 clock_gettime(CLOCK_REALTIME
, &tm
);
2925 fprintf(output
, "# started on %s\n", ctime(&tm
.tv_sec
));
2926 } else if (output_fd
> 0) {
2927 mode
= append_file
? "a" : "w";
2928 output
= fdopen(output_fd
, mode
);
2930 perror("Failed opening logfd");
2935 stat_config
.output
= output
;
2938 * let the spreadsheet do the pretty-printing
2941 /* User explicitly passed -B? */
2942 if (big_num_opt
== 1) {
2943 fprintf(stderr
, "-B option not supported with -x\n");
2944 parse_options_usage(stat_usage
, stat_options
, "B", 1);
2945 parse_options_usage(NULL
, stat_options
, "x", 1);
2947 } else /* Nope, so disable big number formatting */
2949 } else if (big_num_opt
== 0) /* User passed --no-big-num */
2952 setup_system_wide(argc
);
2955 * Display user/system times only for single
2956 * run and when there's specified tracee.
2958 if ((run_count
== 1) && target__none(&target
))
2961 if (run_count
< 0) {
2962 pr_err("Run count must be a positive number\n");
2963 parse_options_usage(stat_usage
, stat_options
, "r", 1);
2965 } else if (run_count
== 0) {
2970 if (walltime_run_table
) {
2971 walltime_run
= zalloc(run_count
* sizeof(walltime_run
[0]));
2972 if (!walltime_run
) {
2973 pr_err("failed to setup -r option");
2978 if ((stat_config
.aggr_mode
== AGGR_THREAD
) &&
2979 !target__has_task(&target
)) {
2980 if (!target
.system_wide
|| target
.cpu_list
) {
2981 fprintf(stderr
, "The --per-thread option is only "
2982 "available when monitoring via -p -t -a "
2983 "options or only --per-thread.\n");
2984 parse_options_usage(NULL
, stat_options
, "p", 1);
2985 parse_options_usage(NULL
, stat_options
, "t", 1);
2991 * no_aggr, cgroup are for system-wide only
2992 * --per-thread is aggregated per thread, we dont mix it with cpu mode
2994 if (((stat_config
.aggr_mode
!= AGGR_GLOBAL
&&
2995 stat_config
.aggr_mode
!= AGGR_THREAD
) || nr_cgroups
) &&
2996 !target__has_cpu(&target
)) {
2997 fprintf(stderr
, "both cgroup and no-aggregation "
2998 "modes only available in system-wide mode\n");
3000 parse_options_usage(stat_usage
, stat_options
, "G", 1);
3001 parse_options_usage(NULL
, stat_options
, "A", 1);
3002 parse_options_usage(NULL
, stat_options
, "a", 1);
3006 if (add_default_attributes())
3009 target__validate(&target
);
3011 if ((stat_config
.aggr_mode
== AGGR_THREAD
) && (target
.system_wide
))
3012 target
.per_thread
= true;
3014 if (perf_evlist__create_maps(evsel_list
, &target
) < 0) {
3015 if (target__has_task(&target
)) {
3016 pr_err("Problems finding threads of monitor\n");
3017 parse_options_usage(stat_usage
, stat_options
, "p", 1);
3018 parse_options_usage(NULL
, stat_options
, "t", 1);
3019 } else if (target__has_cpu(&target
)) {
3020 perror("failed to parse CPUs map");
3021 parse_options_usage(stat_usage
, stat_options
, "C", 1);
3022 parse_options_usage(NULL
, stat_options
, "a", 1);
3028 * Initialize thread_map with comm names,
3029 * so we could print it out on output.
3031 if (stat_config
.aggr_mode
== AGGR_THREAD
) {
3032 thread_map__read_comms(evsel_list
->threads
);
3033 if (target
.system_wide
) {
3034 if (runtime_stat_new(&stat_config
,
3035 thread_map__nr(evsel_list
->threads
))) {
3041 if (stat_config
.times
&& interval
)
3042 interval_count
= true;
3043 else if (stat_config
.times
&& !interval
) {
3044 pr_err("interval-count option should be used together with "
3045 "interval-print.\n");
3046 parse_options_usage(stat_usage
, stat_options
, "interval-count", 0);
3047 parse_options_usage(stat_usage
, stat_options
, "I", 1);
3051 if (timeout
&& timeout
< 100) {
3053 pr_err("timeout must be >= 10ms.\n");
3054 parse_options_usage(stat_usage
, stat_options
, "timeout", 0);
3057 pr_warning("timeout < 100ms. "
3058 "The overhead percentage could be high in some cases. "
3059 "Please proceed with caution.\n");
3061 if (timeout
&& interval
) {
3062 pr_err("timeout option is not supported with interval-print.\n");
3063 parse_options_usage(stat_usage
, stat_options
, "timeout", 0);
3064 parse_options_usage(stat_usage
, stat_options
, "I", 1);
3068 if (perf_evlist__alloc_stats(evsel_list
, interval
))
3071 if (perf_stat_init_aggr_mode())
3075 * We dont want to block the signals - that would cause
3076 * child tasks to inherit that and Ctrl-C would not work.
3077 * What we want is for Ctrl-C to work in the exec()-ed
3078 * task, but being ignored by perf stat itself:
3082 signal(SIGINT
, skip_signal
);
3083 signal(SIGCHLD
, skip_signal
);
3084 signal(SIGALRM
, skip_signal
);
3085 signal(SIGABRT
, skip_signal
);
3088 for (run_idx
= 0; forever
|| run_idx
< run_count
; run_idx
++) {
3089 if (run_count
!= 1 && verbose
> 0)
3090 fprintf(output
, "[ perf stat: executing run #%d ... ]\n",
3093 status
= run_perf_stat(argc
, argv
, run_idx
);
3094 if (forever
&& status
!= -1) {
3095 print_counters(NULL
, argc
, argv
);
3096 perf_stat__reset_stats();
3100 if (!forever
&& status
!= -1 && !interval
)
3101 print_counters(NULL
, argc
, argv
);
3105 * We synthesize the kernel mmap record just so that older tools
3106 * don't emit warnings about not being able to resolve symbols
3107 * due to /proc/sys/kernel/kptr_restrict settings and instear provide
3108 * a saner message about no samples being in the perf.data file.
3110 * This also serves to suppress a warning about f_header.data.size == 0
3111 * in header.c at the moment 'perf stat record' gets introduced, which
3112 * is not really needed once we start adding the stat specific PERF_RECORD_
3113 * records, but the need to suppress the kptr_restrict messages in older
3114 * tools remain -acme
3116 int fd
= perf_data__fd(&perf_stat
.data
);
3117 int err
= perf_event__synthesize_kernel_mmap((void *)&perf_stat
,
3118 process_synthesized_event
,
3119 &perf_stat
.session
->machines
.host
);
3121 pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
3122 "older tools may produce warnings about this file\n.");
3126 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats
.max
, FINAL
))
3127 pr_err("failed to write stat round event\n");
3130 if (!perf_stat
.data
.is_pipe
) {
3131 perf_stat
.session
->header
.data_size
+= perf_stat
.bytes_written
;
3132 perf_session__write_header(perf_stat
.session
, evsel_list
, fd
, true);
3135 perf_session__delete(perf_stat
.session
);
3138 perf_stat__exit_aggr_mode();
3139 perf_evlist__free_stats(evsel_list
);
3143 if (smi_cost
&& smi_reset
)
3144 sysfs__write_int(FREEZE_ON_SMI_PATH
, 0);
3146 perf_evlist__delete(evsel_list
);
3148 runtime_stat_delete(&stat_config
);