4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
9 $ perf stat ./hackbench 10
13 Performance counter stats for './hackbench 10':
15 1708.761321 task-clock # 11.037 CPUs utilized
16 41,190 context-switches # 0.024 M/sec
17 6,735 CPU-migrations # 0.004 M/sec
18 17,318 page-faults # 0.010 M/sec
19 5,205,202,243 cycles # 3.046 GHz
20 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
21 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
22 2,603,501,247 instructions # 0.50 insns per cycle
23 # 1.48 stalled cycles per insn
24 484,357,498 branches # 283.455 M/sec
25 6,388,934 branch-misses # 1.32% of all branches
27 0.154822978 seconds time elapsed
30 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
32 * Improvements and fixes by:
34 * Arjan van de Ven <arjan@linux.intel.com>
35 * Yanmin Zhang <yanmin.zhang@intel.com>
36 * Wu Fengguang <fengguang.wu@intel.com>
37 * Mike Galbraith <efault@gmx.de>
38 * Paul Mackerras <paulus@samba.org>
39 * Jaswinder Singh Rajput <jaswinder@kernel.org>
41 * Released under the GPL v2. (and only v2, not any later version)
46 #include "util/util.h"
47 #include "util/parse-options.h"
48 #include "util/parse-events.h"
49 #include "util/event.h"
50 #include "util/evlist.h"
51 #include "util/evsel.h"
52 #include "util/debug.h"
53 #include "util/color.h"
54 #include "util/stat.h"
55 #include "util/header.h"
56 #include "util/cpumap.h"
57 #include "util/thread.h"
58 #include "util/thread_map.h"
61 #include <sys/prctl.h>
64 #define DEFAULT_SEPARATOR " "
65 #define CNTR_NOT_SUPPORTED "<not supported>"
66 #define CNTR_NOT_COUNTED "<not counted>"
68 static void print_stat(int argc
, const char **argv
);
69 static void print_counter_aggr(struct perf_evsel
*counter
, char *prefix
);
70 static void print_counter(struct perf_evsel
*counter
, char *prefix
);
71 static void print_aggr(char *prefix
);
73 static struct perf_evlist
*evsel_list
;
75 static struct perf_target target
= {
86 static int run_count
= 1;
87 static bool no_inherit
= false;
88 static bool scale
= true;
89 static enum aggr_mode aggr_mode
= AGGR_GLOBAL
;
90 static pid_t child_pid
= -1;
91 static bool null_run
= false;
92 static int detailed_run
= 0;
93 static bool big_num
= true;
94 static int big_num_opt
= -1;
95 static const char *csv_sep
= NULL
;
96 static bool csv_output
= false;
97 static bool group
= false;
98 static FILE *output
= NULL
;
99 static const char *pre_cmd
= NULL
;
100 static const char *post_cmd
= NULL
;
101 static bool sync_run
= false;
102 static unsigned int interval
= 0;
103 static bool forever
= false;
104 static struct timespec ref_time
;
105 static struct cpu_map
*aggr_map
;
106 static int (*aggr_get_id
)(struct cpu_map
*m
, int cpu
);
108 static volatile int done
= 0;
111 struct stats res_stats
[3];
114 static inline void diff_timespec(struct timespec
*r
, struct timespec
*a
,
117 r
->tv_sec
= a
->tv_sec
- b
->tv_sec
;
118 if (a
->tv_nsec
< b
->tv_nsec
) {
119 r
->tv_nsec
= a
->tv_nsec
+ 1000000000L - b
->tv_nsec
;
122 r
->tv_nsec
= a
->tv_nsec
- b
->tv_nsec
;
126 static inline struct cpu_map
*perf_evsel__cpus(struct perf_evsel
*evsel
)
128 return (evsel
->cpus
&& !target
.cpu_list
) ? evsel
->cpus
: evsel_list
->cpus
;
131 static inline int perf_evsel__nr_cpus(struct perf_evsel
*evsel
)
133 return perf_evsel__cpus(evsel
)->nr
;
136 static void perf_evsel__reset_stat_priv(struct perf_evsel
*evsel
)
138 memset(evsel
->priv
, 0, sizeof(struct perf_stat
));
141 static int perf_evsel__alloc_stat_priv(struct perf_evsel
*evsel
)
143 evsel
->priv
= zalloc(sizeof(struct perf_stat
));
144 return evsel
->priv
== NULL
? -ENOMEM
: 0;
147 static void perf_evsel__free_stat_priv(struct perf_evsel
*evsel
)
153 static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel
*evsel
)
158 sz
= sizeof(*evsel
->counts
) +
159 (perf_evsel__nr_cpus(evsel
) * sizeof(struct perf_counts_values
));
165 evsel
->prev_raw_counts
= addr
;
170 static void perf_evsel__free_prev_raw_counts(struct perf_evsel
*evsel
)
172 free(evsel
->prev_raw_counts
);
173 evsel
->prev_raw_counts
= NULL
;
176 static void perf_evlist__free_stats(struct perf_evlist
*evlist
)
178 struct perf_evsel
*evsel
;
180 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
181 perf_evsel__free_stat_priv(evsel
);
182 perf_evsel__free_counts(evsel
);
183 perf_evsel__free_prev_raw_counts(evsel
);
187 static int perf_evlist__alloc_stats(struct perf_evlist
*evlist
, bool alloc_raw
)
189 struct perf_evsel
*evsel
;
191 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
192 if (perf_evsel__alloc_stat_priv(evsel
) < 0 ||
193 perf_evsel__alloc_counts(evsel
, perf_evsel__nr_cpus(evsel
)) < 0 ||
194 (alloc_raw
&& perf_evsel__alloc_prev_raw_counts(evsel
) < 0))
201 perf_evlist__free_stats(evlist
);
205 static struct stats runtime_nsecs_stats
[MAX_NR_CPUS
];
206 static struct stats runtime_cycles_stats
[MAX_NR_CPUS
];
207 static struct stats runtime_stalled_cycles_front_stats
[MAX_NR_CPUS
];
208 static struct stats runtime_stalled_cycles_back_stats
[MAX_NR_CPUS
];
209 static struct stats runtime_branches_stats
[MAX_NR_CPUS
];
210 static struct stats runtime_cacherefs_stats
[MAX_NR_CPUS
];
211 static struct stats runtime_l1_dcache_stats
[MAX_NR_CPUS
];
212 static struct stats runtime_l1_icache_stats
[MAX_NR_CPUS
];
213 static struct stats runtime_ll_cache_stats
[MAX_NR_CPUS
];
214 static struct stats runtime_itlb_cache_stats
[MAX_NR_CPUS
];
215 static struct stats runtime_dtlb_cache_stats
[MAX_NR_CPUS
];
216 static struct stats walltime_nsecs_stats
;
218 static void perf_stat__reset_stats(struct perf_evlist
*evlist
)
220 struct perf_evsel
*evsel
;
222 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
223 perf_evsel__reset_stat_priv(evsel
);
224 perf_evsel__reset_counts(evsel
, perf_evsel__nr_cpus(evsel
));
227 memset(runtime_nsecs_stats
, 0, sizeof(runtime_nsecs_stats
));
228 memset(runtime_cycles_stats
, 0, sizeof(runtime_cycles_stats
));
229 memset(runtime_stalled_cycles_front_stats
, 0, sizeof(runtime_stalled_cycles_front_stats
));
230 memset(runtime_stalled_cycles_back_stats
, 0, sizeof(runtime_stalled_cycles_back_stats
));
231 memset(runtime_branches_stats
, 0, sizeof(runtime_branches_stats
));
232 memset(runtime_cacherefs_stats
, 0, sizeof(runtime_cacherefs_stats
));
233 memset(runtime_l1_dcache_stats
, 0, sizeof(runtime_l1_dcache_stats
));
234 memset(runtime_l1_icache_stats
, 0, sizeof(runtime_l1_icache_stats
));
235 memset(runtime_ll_cache_stats
, 0, sizeof(runtime_ll_cache_stats
));
236 memset(runtime_itlb_cache_stats
, 0, sizeof(runtime_itlb_cache_stats
));
237 memset(runtime_dtlb_cache_stats
, 0, sizeof(runtime_dtlb_cache_stats
));
238 memset(&walltime_nsecs_stats
, 0, sizeof(walltime_nsecs_stats
));
241 static int create_perf_stat_counter(struct perf_evsel
*evsel
)
243 struct perf_event_attr
*attr
= &evsel
->attr
;
246 attr
->read_format
= PERF_FORMAT_TOTAL_TIME_ENABLED
|
247 PERF_FORMAT_TOTAL_TIME_RUNNING
;
249 attr
->inherit
= !no_inherit
;
251 if (perf_target__has_cpu(&target
))
252 return perf_evsel__open_per_cpu(evsel
, perf_evsel__cpus(evsel
));
254 if (!perf_target__has_task(&target
) &&
255 perf_evsel__is_group_leader(evsel
)) {
257 attr
->enable_on_exec
= 1;
260 return perf_evsel__open_per_thread(evsel
, evsel_list
->threads
);
264 * Does the counter have nsecs as a unit?
266 static inline int nsec_counter(struct perf_evsel
*evsel
)
268 if (perf_evsel__match(evsel
, SOFTWARE
, SW_CPU_CLOCK
) ||
269 perf_evsel__match(evsel
, SOFTWARE
, SW_TASK_CLOCK
))
276 * Update various tracking values we maintain to print
277 * more semantic information such as miss/hit ratios,
278 * instruction rates, etc:
280 static void update_shadow_stats(struct perf_evsel
*counter
, u64
*count
)
282 if (perf_evsel__match(counter
, SOFTWARE
, SW_TASK_CLOCK
))
283 update_stats(&runtime_nsecs_stats
[0], count
[0]);
284 else if (perf_evsel__match(counter
, HARDWARE
, HW_CPU_CYCLES
))
285 update_stats(&runtime_cycles_stats
[0], count
[0]);
286 else if (perf_evsel__match(counter
, HARDWARE
, HW_STALLED_CYCLES_FRONTEND
))
287 update_stats(&runtime_stalled_cycles_front_stats
[0], count
[0]);
288 else if (perf_evsel__match(counter
, HARDWARE
, HW_STALLED_CYCLES_BACKEND
))
289 update_stats(&runtime_stalled_cycles_back_stats
[0], count
[0]);
290 else if (perf_evsel__match(counter
, HARDWARE
, HW_BRANCH_INSTRUCTIONS
))
291 update_stats(&runtime_branches_stats
[0], count
[0]);
292 else if (perf_evsel__match(counter
, HARDWARE
, HW_CACHE_REFERENCES
))
293 update_stats(&runtime_cacherefs_stats
[0], count
[0]);
294 else if (perf_evsel__match(counter
, HW_CACHE
, HW_CACHE_L1D
))
295 update_stats(&runtime_l1_dcache_stats
[0], count
[0]);
296 else if (perf_evsel__match(counter
, HW_CACHE
, HW_CACHE_L1I
))
297 update_stats(&runtime_l1_icache_stats
[0], count
[0]);
298 else if (perf_evsel__match(counter
, HW_CACHE
, HW_CACHE_LL
))
299 update_stats(&runtime_ll_cache_stats
[0], count
[0]);
300 else if (perf_evsel__match(counter
, HW_CACHE
, HW_CACHE_DTLB
))
301 update_stats(&runtime_dtlb_cache_stats
[0], count
[0]);
302 else if (perf_evsel__match(counter
, HW_CACHE
, HW_CACHE_ITLB
))
303 update_stats(&runtime_itlb_cache_stats
[0], count
[0]);
307 * Read out the results of a single counter:
308 * aggregate counts across CPUs in system-wide mode
310 static int read_counter_aggr(struct perf_evsel
*counter
)
312 struct perf_stat
*ps
= counter
->priv
;
313 u64
*count
= counter
->counts
->aggr
.values
;
316 if (__perf_evsel__read(counter
, perf_evsel__nr_cpus(counter
),
317 thread_map__nr(evsel_list
->threads
), scale
) < 0)
320 for (i
= 0; i
< 3; i
++)
321 update_stats(&ps
->res_stats
[i
], count
[i
]);
324 fprintf(output
, "%s: %" PRIu64
" %" PRIu64
" %" PRIu64
"\n",
325 perf_evsel__name(counter
), count
[0], count
[1], count
[2]);
329 * Save the full runtime - to allow normalization during printout:
331 update_shadow_stats(counter
, count
);
337 * Read out the results of a single counter:
338 * do not aggregate counts across CPUs in system-wide mode
340 static int read_counter(struct perf_evsel
*counter
)
345 for (cpu
= 0; cpu
< perf_evsel__nr_cpus(counter
); cpu
++) {
346 if (__perf_evsel__read_on_cpu(counter
, cpu
, 0, scale
) < 0)
349 count
= counter
->counts
->cpu
[cpu
].values
;
351 update_shadow_stats(counter
, count
);
357 static void print_interval(void)
359 static int num_print_interval
;
360 struct perf_evsel
*counter
;
361 struct perf_stat
*ps
;
362 struct timespec ts
, rs
;
365 if (aggr_mode
== AGGR_GLOBAL
) {
366 list_for_each_entry(counter
, &evsel_list
->entries
, node
) {
368 memset(ps
->res_stats
, 0, sizeof(ps
->res_stats
));
369 read_counter_aggr(counter
);
372 list_for_each_entry(counter
, &evsel_list
->entries
, node
) {
374 memset(ps
->res_stats
, 0, sizeof(ps
->res_stats
));
375 read_counter(counter
);
379 clock_gettime(CLOCK_MONOTONIC
, &ts
);
380 diff_timespec(&rs
, &ts
, &ref_time
);
381 sprintf(prefix
, "%6lu.%09lu%s", rs
.tv_sec
, rs
.tv_nsec
, csv_sep
);
383 if (num_print_interval
== 0 && !csv_output
) {
386 fprintf(output
, "# time socket cpus counts events\n");
389 fprintf(output
, "# time core cpus counts events\n");
392 fprintf(output
, "# time CPU counts events\n");
396 fprintf(output
, "# time counts events\n");
400 if (++num_print_interval
== 25)
401 num_print_interval
= 0;
409 list_for_each_entry(counter
, &evsel_list
->entries
, node
)
410 print_counter(counter
, prefix
);
414 list_for_each_entry(counter
, &evsel_list
->entries
, node
)
415 print_counter_aggr(counter
, prefix
);
419 static int __run_perf_stat(int argc
, const char **argv
)
422 unsigned long long t0
, t1
;
423 struct perf_evsel
*counter
;
426 const bool forks
= (argc
> 0);
429 ts
.tv_sec
= interval
/ 1000;
430 ts
.tv_nsec
= (interval
% 1000) * 1000000;
437 if (perf_evlist__prepare_workload(evsel_list
, &target
, argv
,
439 perror("failed to prepare workload");
445 perf_evlist__set_leader(evsel_list
);
447 list_for_each_entry(counter
, &evsel_list
->entries
, node
) {
448 if (create_perf_stat_counter(counter
) < 0) {
450 * PPC returns ENXIO for HW counters until 2.6.37
451 * (behavior changed with commit b0a873e).
453 if (errno
== EINVAL
|| errno
== ENOSYS
||
454 errno
== ENOENT
|| errno
== EOPNOTSUPP
||
457 ui__warning("%s event is not supported by the kernel.\n",
458 perf_evsel__name(counter
));
459 counter
->supported
= false;
463 perf_evsel__open_strerror(counter
, &target
,
464 errno
, msg
, sizeof(msg
));
465 ui__error("%s\n", msg
);
468 kill(child_pid
, SIGTERM
);
472 counter
->supported
= true;
475 if (perf_evlist__apply_filters(evsel_list
)) {
476 error("failed to set filter with %d (%s)\n", errno
,
482 * Enable counters and exec the command:
485 clock_gettime(CLOCK_MONOTONIC
, &ref_time
);
488 perf_evlist__start_workload(evsel_list
);
491 while (!waitpid(child_pid
, &status
, WNOHANG
)) {
492 nanosleep(&ts
, NULL
);
497 if (WIFSIGNALED(status
))
498 psignal(WTERMSIG(status
), argv
[0]);
501 nanosleep(&ts
, NULL
);
509 update_stats(&walltime_nsecs_stats
, t1
- t0
);
511 if (aggr_mode
== AGGR_GLOBAL
) {
512 list_for_each_entry(counter
, &evsel_list
->entries
, node
) {
513 read_counter_aggr(counter
);
514 perf_evsel__close_fd(counter
, perf_evsel__nr_cpus(counter
),
515 thread_map__nr(evsel_list
->threads
));
518 list_for_each_entry(counter
, &evsel_list
->entries
, node
) {
519 read_counter(counter
);
520 perf_evsel__close_fd(counter
, perf_evsel__nr_cpus(counter
), 1);
524 return WEXITSTATUS(status
);
527 static int run_perf_stat(int argc __maybe_unused
, const char **argv
)
532 ret
= system(pre_cmd
);
540 ret
= __run_perf_stat(argc
, argv
);
545 ret
= system(post_cmd
);
553 static void print_noise_pct(double total
, double avg
)
555 double pct
= rel_stddev_stats(total
, avg
);
558 fprintf(output
, "%s%.2f%%", csv_sep
, pct
);
560 fprintf(output
, " ( +-%6.2f%% )", pct
);
563 static void print_noise(struct perf_evsel
*evsel
, double avg
)
565 struct perf_stat
*ps
;
571 print_noise_pct(stddev_stats(&ps
->res_stats
[0]), avg
);
574 static void aggr_printout(struct perf_evsel
*evsel
, int id
, int nr
)
578 fprintf(output
, "S%d-C%*d%s%*d%s",
579 cpu_map__id_to_socket(id
),
581 cpu_map__id_to_cpu(id
),
588 fprintf(output
, "S%*d%s%*d%s",
597 fprintf(output
, "CPU%*d%s",
599 perf_evsel__cpus(evsel
)->map
[id
], csv_sep
);
607 static void nsec_printout(int cpu
, int nr
, struct perf_evsel
*evsel
, double avg
)
609 double msecs
= avg
/ 1e6
;
610 const char *fmt
= csv_output
? "%.6f%s%s" : "%18.6f%s%-25s";
612 aggr_printout(evsel
, cpu
, nr
);
614 fprintf(output
, fmt
, msecs
, csv_sep
, perf_evsel__name(evsel
));
617 fprintf(output
, "%s%s", csv_sep
, evsel
->cgrp
->name
);
619 if (csv_output
|| interval
)
622 if (perf_evsel__match(evsel
, SOFTWARE
, SW_TASK_CLOCK
))
623 fprintf(output
, " # %8.3f CPUs utilized ",
624 avg
/ avg_stats(&walltime_nsecs_stats
));
626 fprintf(output
, " ");
629 /* used for get_ratio_color() */
631 GRC_STALLED_CYCLES_FE
,
632 GRC_STALLED_CYCLES_BE
,
637 static const char *get_ratio_color(enum grc_type type
, double ratio
)
639 static const double grc_table
[GRC_MAX_NR
][3] = {
640 [GRC_STALLED_CYCLES_FE
] = { 50.0, 30.0, 10.0 },
641 [GRC_STALLED_CYCLES_BE
] = { 75.0, 50.0, 20.0 },
642 [GRC_CACHE_MISSES
] = { 20.0, 10.0, 5.0 },
644 const char *color
= PERF_COLOR_NORMAL
;
646 if (ratio
> grc_table
[type
][0])
647 color
= PERF_COLOR_RED
;
648 else if (ratio
> grc_table
[type
][1])
649 color
= PERF_COLOR_MAGENTA
;
650 else if (ratio
> grc_table
[type
][2])
651 color
= PERF_COLOR_YELLOW
;
656 static void print_stalled_cycles_frontend(int cpu
,
657 struct perf_evsel
*evsel
658 __maybe_unused
, double avg
)
660 double total
, ratio
= 0.0;
663 total
= avg_stats(&runtime_cycles_stats
[cpu
]);
666 ratio
= avg
/ total
* 100.0;
668 color
= get_ratio_color(GRC_STALLED_CYCLES_FE
, ratio
);
670 fprintf(output
, " # ");
671 color_fprintf(output
, color
, "%6.2f%%", ratio
);
672 fprintf(output
, " frontend cycles idle ");
675 static void print_stalled_cycles_backend(int cpu
,
676 struct perf_evsel
*evsel
677 __maybe_unused
, double avg
)
679 double total
, ratio
= 0.0;
682 total
= avg_stats(&runtime_cycles_stats
[cpu
]);
685 ratio
= avg
/ total
* 100.0;
687 color
= get_ratio_color(GRC_STALLED_CYCLES_BE
, ratio
);
689 fprintf(output
, " # ");
690 color_fprintf(output
, color
, "%6.2f%%", ratio
);
691 fprintf(output
, " backend cycles idle ");
694 static void print_branch_misses(int cpu
,
695 struct perf_evsel
*evsel __maybe_unused
,
698 double total
, ratio
= 0.0;
701 total
= avg_stats(&runtime_branches_stats
[cpu
]);
704 ratio
= avg
/ total
* 100.0;
706 color
= get_ratio_color(GRC_CACHE_MISSES
, ratio
);
708 fprintf(output
, " # ");
709 color_fprintf(output
, color
, "%6.2f%%", ratio
);
710 fprintf(output
, " of all branches ");
713 static void print_l1_dcache_misses(int cpu
,
714 struct perf_evsel
*evsel __maybe_unused
,
717 double total
, ratio
= 0.0;
720 total
= avg_stats(&runtime_l1_dcache_stats
[cpu
]);
723 ratio
= avg
/ total
* 100.0;
725 color
= get_ratio_color(GRC_CACHE_MISSES
, ratio
);
727 fprintf(output
, " # ");
728 color_fprintf(output
, color
, "%6.2f%%", ratio
);
729 fprintf(output
, " of all L1-dcache hits ");
732 static void print_l1_icache_misses(int cpu
,
733 struct perf_evsel
*evsel __maybe_unused
,
736 double total
, ratio
= 0.0;
739 total
= avg_stats(&runtime_l1_icache_stats
[cpu
]);
742 ratio
= avg
/ total
* 100.0;
744 color
= get_ratio_color(GRC_CACHE_MISSES
, ratio
);
746 fprintf(output
, " # ");
747 color_fprintf(output
, color
, "%6.2f%%", ratio
);
748 fprintf(output
, " of all L1-icache hits ");
751 static void print_dtlb_cache_misses(int cpu
,
752 struct perf_evsel
*evsel __maybe_unused
,
755 double total
, ratio
= 0.0;
758 total
= avg_stats(&runtime_dtlb_cache_stats
[cpu
]);
761 ratio
= avg
/ total
* 100.0;
763 color
= get_ratio_color(GRC_CACHE_MISSES
, ratio
);
765 fprintf(output
, " # ");
766 color_fprintf(output
, color
, "%6.2f%%", ratio
);
767 fprintf(output
, " of all dTLB cache hits ");
770 static void print_itlb_cache_misses(int cpu
,
771 struct perf_evsel
*evsel __maybe_unused
,
774 double total
, ratio
= 0.0;
777 total
= avg_stats(&runtime_itlb_cache_stats
[cpu
]);
780 ratio
= avg
/ total
* 100.0;
782 color
= get_ratio_color(GRC_CACHE_MISSES
, ratio
);
784 fprintf(output
, " # ");
785 color_fprintf(output
, color
, "%6.2f%%", ratio
);
786 fprintf(output
, " of all iTLB cache hits ");
789 static void print_ll_cache_misses(int cpu
,
790 struct perf_evsel
*evsel __maybe_unused
,
793 double total
, ratio
= 0.0;
796 total
= avg_stats(&runtime_ll_cache_stats
[cpu
]);
799 ratio
= avg
/ total
* 100.0;
801 color
= get_ratio_color(GRC_CACHE_MISSES
, ratio
);
803 fprintf(output
, " # ");
804 color_fprintf(output
, color
, "%6.2f%%", ratio
);
805 fprintf(output
, " of all LL-cache hits ");
808 static void abs_printout(int cpu
, int nr
, struct perf_evsel
*evsel
, double avg
)
810 double total
, ratio
= 0.0;
816 fmt
= "%'18.0f%s%-25s";
818 fmt
= "%18.0f%s%-25s";
820 aggr_printout(evsel
, cpu
, nr
);
822 if (aggr_mode
== AGGR_GLOBAL
)
825 fprintf(output
, fmt
, avg
, csv_sep
, perf_evsel__name(evsel
));
828 fprintf(output
, "%s%s", csv_sep
, evsel
->cgrp
->name
);
830 if (csv_output
|| interval
)
833 if (perf_evsel__match(evsel
, HARDWARE
, HW_INSTRUCTIONS
)) {
834 total
= avg_stats(&runtime_cycles_stats
[cpu
]);
838 fprintf(output
, " # %5.2f insns per cycle ", ratio
);
840 total
= avg_stats(&runtime_stalled_cycles_front_stats
[cpu
]);
841 total
= max(total
, avg_stats(&runtime_stalled_cycles_back_stats
[cpu
]));
845 fprintf(output
, "\n # %5.2f stalled cycles per insn", ratio
);
848 } else if (perf_evsel__match(evsel
, HARDWARE
, HW_BRANCH_MISSES
) &&
849 runtime_branches_stats
[cpu
].n
!= 0) {
850 print_branch_misses(cpu
, evsel
, avg
);
852 evsel
->attr
.type
== PERF_TYPE_HW_CACHE
&&
853 evsel
->attr
.config
== ( PERF_COUNT_HW_CACHE_L1D
|
854 ((PERF_COUNT_HW_CACHE_OP_READ
) << 8) |
855 ((PERF_COUNT_HW_CACHE_RESULT_MISS
) << 16)) &&
856 runtime_l1_dcache_stats
[cpu
].n
!= 0) {
857 print_l1_dcache_misses(cpu
, evsel
, avg
);
859 evsel
->attr
.type
== PERF_TYPE_HW_CACHE
&&
860 evsel
->attr
.config
== ( PERF_COUNT_HW_CACHE_L1I
|
861 ((PERF_COUNT_HW_CACHE_OP_READ
) << 8) |
862 ((PERF_COUNT_HW_CACHE_RESULT_MISS
) << 16)) &&
863 runtime_l1_icache_stats
[cpu
].n
!= 0) {
864 print_l1_icache_misses(cpu
, evsel
, avg
);
866 evsel
->attr
.type
== PERF_TYPE_HW_CACHE
&&
867 evsel
->attr
.config
== ( PERF_COUNT_HW_CACHE_DTLB
|
868 ((PERF_COUNT_HW_CACHE_OP_READ
) << 8) |
869 ((PERF_COUNT_HW_CACHE_RESULT_MISS
) << 16)) &&
870 runtime_dtlb_cache_stats
[cpu
].n
!= 0) {
871 print_dtlb_cache_misses(cpu
, evsel
, avg
);
873 evsel
->attr
.type
== PERF_TYPE_HW_CACHE
&&
874 evsel
->attr
.config
== ( PERF_COUNT_HW_CACHE_ITLB
|
875 ((PERF_COUNT_HW_CACHE_OP_READ
) << 8) |
876 ((PERF_COUNT_HW_CACHE_RESULT_MISS
) << 16)) &&
877 runtime_itlb_cache_stats
[cpu
].n
!= 0) {
878 print_itlb_cache_misses(cpu
, evsel
, avg
);
880 evsel
->attr
.type
== PERF_TYPE_HW_CACHE
&&
881 evsel
->attr
.config
== ( PERF_COUNT_HW_CACHE_LL
|
882 ((PERF_COUNT_HW_CACHE_OP_READ
) << 8) |
883 ((PERF_COUNT_HW_CACHE_RESULT_MISS
) << 16)) &&
884 runtime_ll_cache_stats
[cpu
].n
!= 0) {
885 print_ll_cache_misses(cpu
, evsel
, avg
);
886 } else if (perf_evsel__match(evsel
, HARDWARE
, HW_CACHE_MISSES
) &&
887 runtime_cacherefs_stats
[cpu
].n
!= 0) {
888 total
= avg_stats(&runtime_cacherefs_stats
[cpu
]);
891 ratio
= avg
* 100 / total
;
893 fprintf(output
, " # %8.3f %% of all cache refs ", ratio
);
895 } else if (perf_evsel__match(evsel
, HARDWARE
, HW_STALLED_CYCLES_FRONTEND
)) {
896 print_stalled_cycles_frontend(cpu
, evsel
, avg
);
897 } else if (perf_evsel__match(evsel
, HARDWARE
, HW_STALLED_CYCLES_BACKEND
)) {
898 print_stalled_cycles_backend(cpu
, evsel
, avg
);
899 } else if (perf_evsel__match(evsel
, HARDWARE
, HW_CPU_CYCLES
)) {
900 total
= avg_stats(&runtime_nsecs_stats
[cpu
]);
903 ratio
= 1.0 * avg
/ total
;
905 fprintf(output
, " # %8.3f GHz ", ratio
);
906 } else if (runtime_nsecs_stats
[cpu
].n
!= 0) {
909 total
= avg_stats(&runtime_nsecs_stats
[cpu
]);
912 ratio
= 1000.0 * avg
/ total
;
918 fprintf(output
, " # %8.3f %c/sec ", ratio
, unit
);
920 fprintf(output
, " ");
924 static void print_aggr(char *prefix
)
926 struct perf_evsel
*counter
;
927 int cpu
, s
, s2
, id
, nr
;
930 if (!(aggr_map
|| aggr_get_id
))
933 for (s
= 0; s
< aggr_map
->nr
; s
++) {
934 id
= aggr_map
->map
[s
];
935 list_for_each_entry(counter
, &evsel_list
->entries
, node
) {
938 for (cpu
= 0; cpu
< perf_evsel__nr_cpus(counter
); cpu
++) {
939 s2
= aggr_get_id(evsel_list
->cpus
, cpu
);
942 val
+= counter
->counts
->cpu
[cpu
].val
;
943 ena
+= counter
->counts
->cpu
[cpu
].ena
;
944 run
+= counter
->counts
->cpu
[cpu
].run
;
948 fprintf(output
, "%s", prefix
);
950 if (run
== 0 || ena
== 0) {
951 aggr_printout(counter
, cpu
, nr
);
953 fprintf(output
, "%*s%s%*s",
955 counter
->supported
? CNTR_NOT_COUNTED
: CNTR_NOT_SUPPORTED
,
957 csv_output
? 0 : -24,
958 perf_evsel__name(counter
));
961 fprintf(output
, "%s%s",
962 csv_sep
, counter
->cgrp
->name
);
968 if (nsec_counter(counter
))
969 nsec_printout(id
, nr
, counter
, val
);
971 abs_printout(id
, nr
, counter
, val
);
974 print_noise(counter
, 1.0);
977 fprintf(output
, " (%.2f%%)",
986 * Print out the results of a single counter:
987 * aggregated counts in system-wide mode
989 static void print_counter_aggr(struct perf_evsel
*counter
, char *prefix
)
991 struct perf_stat
*ps
= counter
->priv
;
992 double avg
= avg_stats(&ps
->res_stats
[0]);
993 int scaled
= counter
->counts
->scaled
;
996 fprintf(output
, "%s", prefix
);
999 fprintf(output
, "%*s%s%*s",
1000 csv_output
? 0 : 18,
1001 counter
->supported
? CNTR_NOT_COUNTED
: CNTR_NOT_SUPPORTED
,
1003 csv_output
? 0 : -24,
1004 perf_evsel__name(counter
));
1007 fprintf(output
, "%s%s", csv_sep
, counter
->cgrp
->name
);
1009 fputc('\n', output
);
1013 if (nsec_counter(counter
))
1014 nsec_printout(-1, 0, counter
, avg
);
1016 abs_printout(-1, 0, counter
, avg
);
1018 print_noise(counter
, avg
);
1021 fputc('\n', output
);
1026 double avg_enabled
, avg_running
;
1028 avg_enabled
= avg_stats(&ps
->res_stats
[1]);
1029 avg_running
= avg_stats(&ps
->res_stats
[2]);
1031 fprintf(output
, " [%5.2f%%]", 100 * avg_running
/ avg_enabled
);
1033 fprintf(output
, "\n");
1037 * Print out the results of a single counter:
1038 * does not use aggregated count in system-wide
1040 static void print_counter(struct perf_evsel
*counter
, char *prefix
)
1045 for (cpu
= 0; cpu
< perf_evsel__nr_cpus(counter
); cpu
++) {
1046 val
= counter
->counts
->cpu
[cpu
].val
;
1047 ena
= counter
->counts
->cpu
[cpu
].ena
;
1048 run
= counter
->counts
->cpu
[cpu
].run
;
1051 fprintf(output
, "%s", prefix
);
1053 if (run
== 0 || ena
== 0) {
1054 fprintf(output
, "CPU%*d%s%*s%s%*s",
1055 csv_output
? 0 : -4,
1056 perf_evsel__cpus(counter
)->map
[cpu
], csv_sep
,
1057 csv_output
? 0 : 18,
1058 counter
->supported
? CNTR_NOT_COUNTED
: CNTR_NOT_SUPPORTED
,
1060 csv_output
? 0 : -24,
1061 perf_evsel__name(counter
));
1064 fprintf(output
, "%s%s",
1065 csv_sep
, counter
->cgrp
->name
);
1067 fputc('\n', output
);
1071 if (nsec_counter(counter
))
1072 nsec_printout(cpu
, 0, counter
, val
);
1074 abs_printout(cpu
, 0, counter
, val
);
1077 print_noise(counter
, 1.0);
1080 fprintf(output
, " (%.2f%%)",
1083 fputc('\n', output
);
1087 static void print_stat(int argc
, const char **argv
)
1089 struct perf_evsel
*counter
;
1095 fprintf(output
, "\n");
1096 fprintf(output
, " Performance counter stats for ");
1097 if (!perf_target__has_task(&target
)) {
1098 fprintf(output
, "\'%s", argv
[0]);
1099 for (i
= 1; i
< argc
; i
++)
1100 fprintf(output
, " %s", argv
[i
]);
1101 } else if (target
.pid
)
1102 fprintf(output
, "process id \'%s", target
.pid
);
1104 fprintf(output
, "thread id \'%s", target
.tid
);
1106 fprintf(output
, "\'");
1108 fprintf(output
, " (%d runs)", run_count
);
1109 fprintf(output
, ":\n\n");
1112 switch (aggr_mode
) {
1118 list_for_each_entry(counter
, &evsel_list
->entries
, node
)
1119 print_counter_aggr(counter
, NULL
);
1122 list_for_each_entry(counter
, &evsel_list
->entries
, node
)
1123 print_counter(counter
, NULL
);
1131 fprintf(output
, "\n");
1132 fprintf(output
, " %17.9f seconds time elapsed",
1133 avg_stats(&walltime_nsecs_stats
)/1e9
);
1134 if (run_count
> 1) {
1135 fprintf(output
, " ");
1136 print_noise_pct(stddev_stats(&walltime_nsecs_stats
),
1137 avg_stats(&walltime_nsecs_stats
));
1139 fprintf(output
, "\n\n");
1143 static volatile int signr
= -1;
1145 static void skip_signal(int signo
)
1147 if ((child_pid
== -1) || interval
)
1153 static void sig_atexit(void)
1155 if (child_pid
!= -1)
1156 kill(child_pid
, SIGTERM
);
1161 signal(signr
, SIG_DFL
);
1162 kill(getpid(), signr
);
1165 static int stat__set_big_num(const struct option
*opt __maybe_unused
,
1166 const char *s __maybe_unused
, int unset
)
1168 big_num_opt
= unset
? 0 : 1;
1172 static int perf_stat_init_aggr_mode(void)
1174 switch (aggr_mode
) {
1176 if (cpu_map__build_socket_map(evsel_list
->cpus
, &aggr_map
)) {
1177 perror("cannot build socket map");
1180 aggr_get_id
= cpu_map__get_socket
;
1183 if (cpu_map__build_core_map(evsel_list
->cpus
, &aggr_map
)) {
1184 perror("cannot build core map");
1187 aggr_get_id
= cpu_map__get_core
;
1199 * Add default attributes, if there were no attributes specified or
1200 * if -d/--detailed, -d -d or -d -d -d is used:
1202 static int add_default_attributes(void)
1204 struct perf_event_attr default_attrs
[] = {
1206 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_TASK_CLOCK
},
1207 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CONTEXT_SWITCHES
},
1208 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CPU_MIGRATIONS
},
1209 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_PAGE_FAULTS
},
1211 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CPU_CYCLES
},
1212 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
},
1213 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_STALLED_CYCLES_BACKEND
},
1214 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_INSTRUCTIONS
},
1215 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
},
1216 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_BRANCH_MISSES
},
1221 * Detailed stats (-d), covering the L1 and last level data caches:
1223 struct perf_event_attr detailed_attrs
[] = {
1225 { .type
= PERF_TYPE_HW_CACHE
,
1227 PERF_COUNT_HW_CACHE_L1D
<< 0 |
1228 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1229 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1231 { .type
= PERF_TYPE_HW_CACHE
,
1233 PERF_COUNT_HW_CACHE_L1D
<< 0 |
1234 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1235 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1237 { .type
= PERF_TYPE_HW_CACHE
,
1239 PERF_COUNT_HW_CACHE_LL
<< 0 |
1240 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1241 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1243 { .type
= PERF_TYPE_HW_CACHE
,
1245 PERF_COUNT_HW_CACHE_LL
<< 0 |
1246 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1247 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1251 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1253 struct perf_event_attr very_detailed_attrs
[] = {
1255 { .type
= PERF_TYPE_HW_CACHE
,
1257 PERF_COUNT_HW_CACHE_L1I
<< 0 |
1258 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1259 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1261 { .type
= PERF_TYPE_HW_CACHE
,
1263 PERF_COUNT_HW_CACHE_L1I
<< 0 |
1264 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1265 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1267 { .type
= PERF_TYPE_HW_CACHE
,
1269 PERF_COUNT_HW_CACHE_DTLB
<< 0 |
1270 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1271 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1273 { .type
= PERF_TYPE_HW_CACHE
,
1275 PERF_COUNT_HW_CACHE_DTLB
<< 0 |
1276 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1277 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1279 { .type
= PERF_TYPE_HW_CACHE
,
1281 PERF_COUNT_HW_CACHE_ITLB
<< 0 |
1282 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1283 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1285 { .type
= PERF_TYPE_HW_CACHE
,
1287 PERF_COUNT_HW_CACHE_ITLB
<< 0 |
1288 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1289 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1294 * Very, very detailed stats (-d -d -d), adding prefetch events:
1296 struct perf_event_attr very_very_detailed_attrs
[] = {
1298 { .type
= PERF_TYPE_HW_CACHE
,
1300 PERF_COUNT_HW_CACHE_L1D
<< 0 |
1301 (PERF_COUNT_HW_CACHE_OP_PREFETCH
<< 8) |
1302 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1304 { .type
= PERF_TYPE_HW_CACHE
,
1306 PERF_COUNT_HW_CACHE_L1D
<< 0 |
1307 (PERF_COUNT_HW_CACHE_OP_PREFETCH
<< 8) |
1308 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1311 /* Set attrs if no event is selected and !null_run: */
1315 if (!evsel_list
->nr_entries
) {
1316 if (perf_evlist__add_default_attrs(evsel_list
, default_attrs
) < 0)
1320 /* Detailed events get appended to the event list: */
1322 if (detailed_run
< 1)
1325 /* Append detailed run extra attributes: */
1326 if (perf_evlist__add_default_attrs(evsel_list
, detailed_attrs
) < 0)
1329 if (detailed_run
< 2)
1332 /* Append very detailed run extra attributes: */
1333 if (perf_evlist__add_default_attrs(evsel_list
, very_detailed_attrs
) < 0)
1336 if (detailed_run
< 3)
1339 /* Append very, very detailed run extra attributes: */
1340 return perf_evlist__add_default_attrs(evsel_list
, very_very_detailed_attrs
);
1343 int cmd_stat(int argc
, const char **argv
, const char *prefix __maybe_unused
)
1345 bool append_file
= false;
1347 const char *output_name
= NULL
;
1348 const struct option options
[] = {
1349 OPT_CALLBACK('e', "event", &evsel_list
, "event",
1350 "event selector. use 'perf list' to list available events",
1351 parse_events_option
),
1352 OPT_CALLBACK(0, "filter", &evsel_list
, "filter",
1353 "event filter", parse_filter
),
1354 OPT_BOOLEAN('i', "no-inherit", &no_inherit
,
1355 "child tasks do not inherit counters"),
1356 OPT_STRING('p', "pid", &target
.pid
, "pid",
1357 "stat events on existing process id"),
1358 OPT_STRING('t', "tid", &target
.tid
, "tid",
1359 "stat events on existing thread id"),
1360 OPT_BOOLEAN('a', "all-cpus", &target
.system_wide
,
1361 "system-wide collection from all CPUs"),
1362 OPT_BOOLEAN('g', "group", &group
,
1363 "put the counters into a counter group"),
1364 OPT_BOOLEAN('c', "scale", &scale
, "scale/normalize counters"),
1365 OPT_INCR('v', "verbose", &verbose
,
1366 "be more verbose (show counter open errors, etc)"),
1367 OPT_INTEGER('r', "repeat", &run_count
,
1368 "repeat command and print average + stddev (max: 100, forever: 0)"),
1369 OPT_BOOLEAN('n', "null", &null_run
,
1370 "null run - dont start any counters"),
1371 OPT_INCR('d', "detailed", &detailed_run
,
1372 "detailed run - start a lot of events"),
1373 OPT_BOOLEAN('S', "sync", &sync_run
,
1374 "call sync() before starting a run"),
1375 OPT_CALLBACK_NOOPT('B', "big-num", NULL
, NULL
,
1376 "print large numbers with thousands\' separators",
1378 OPT_STRING('C', "cpu", &target
.cpu_list
, "cpu",
1379 "list of cpus to monitor in system-wide"),
1380 OPT_SET_UINT('A', "no-aggr", &aggr_mode
,
1381 "disable CPU count aggregation", AGGR_NONE
),
1382 OPT_STRING('x', "field-separator", &csv_sep
, "separator",
1383 "print counts with custom separator"),
1384 OPT_CALLBACK('G', "cgroup", &evsel_list
, "name",
1385 "monitor event in cgroup name only", parse_cgroups
),
1386 OPT_STRING('o', "output", &output_name
, "file", "output file name"),
1387 OPT_BOOLEAN(0, "append", &append_file
, "append to the output file"),
1388 OPT_INTEGER(0, "log-fd", &output_fd
,
1389 "log output to fd, instead of stderr"),
1390 OPT_STRING(0, "pre", &pre_cmd
, "command",
1391 "command to run prior to the measured command"),
1392 OPT_STRING(0, "post", &post_cmd
, "command",
1393 "command to run after to the measured command"),
1394 OPT_UINTEGER('I', "interval-print", &interval
,
1395 "print counts at regular interval in ms (>= 100)"),
1396 OPT_SET_UINT(0, "per-socket", &aggr_mode
,
1397 "aggregate counts per processor socket", AGGR_SOCKET
),
1398 OPT_SET_UINT(0, "per-core", &aggr_mode
,
1399 "aggregate counts per physical processor core", AGGR_CORE
),
1402 const char * const stat_usage
[] = {
1403 "perf stat [<options>] [<command>]",
1406 int status
= -ENOMEM
, run_idx
;
1409 setlocale(LC_ALL
, "");
1411 evsel_list
= perf_evlist__new();
1412 if (evsel_list
== NULL
)
1415 argc
= parse_options(argc
, argv
, options
, stat_usage
,
1416 PARSE_OPT_STOP_AT_NON_OPTION
);
1419 if (output_name
&& strcmp(output_name
, "-"))
1422 if (output_name
&& output_fd
) {
1423 fprintf(stderr
, "cannot use both --output and --log-fd\n");
1424 usage_with_options(stat_usage
, options
);
1427 if (output_fd
< 0) {
1428 fprintf(stderr
, "argument to --log-fd must be a > 0\n");
1429 usage_with_options(stat_usage
, options
);
1434 mode
= append_file
? "a" : "w";
1436 output
= fopen(output_name
, mode
);
1438 perror("failed to create output file");
1441 clock_gettime(CLOCK_REALTIME
, &tm
);
1442 fprintf(output
, "# started on %s\n", ctime(&tm
.tv_sec
));
1443 } else if (output_fd
> 0) {
1444 mode
= append_file
? "a" : "w";
1445 output
= fdopen(output_fd
, mode
);
1447 perror("Failed opening logfd");
1454 if (!strcmp(csv_sep
, "\\t"))
1457 csv_sep
= DEFAULT_SEPARATOR
;
1460 * let the spreadsheet do the pretty-printing
1463 /* User explicitly passed -B? */
1464 if (big_num_opt
== 1) {
1465 fprintf(stderr
, "-B option not supported with -x\n");
1466 usage_with_options(stat_usage
, options
);
1467 } else /* Nope, so disable big number formatting */
1469 } else if (big_num_opt
== 0) /* User passed --no-big-num */
1472 if (!argc
&& !perf_target__has_task(&target
))
1473 usage_with_options(stat_usage
, options
);
1474 if (run_count
< 0) {
1475 usage_with_options(stat_usage
, options
);
1476 } else if (run_count
== 0) {
1481 /* no_aggr, cgroup are for system-wide only */
1482 if ((aggr_mode
!= AGGR_GLOBAL
|| nr_cgroups
)
1483 && !perf_target__has_cpu(&target
)) {
1484 fprintf(stderr
, "both cgroup and no-aggregation "
1485 "modes only available in system-wide mode\n");
1487 usage_with_options(stat_usage
, options
);
1491 if (add_default_attributes())
1494 perf_target__validate(&target
);
1496 if (perf_evlist__create_maps(evsel_list
, &target
) < 0) {
1497 if (perf_target__has_task(&target
))
1498 pr_err("Problems finding threads of monitor\n");
1499 if (perf_target__has_cpu(&target
))
1500 perror("failed to parse CPUs map");
1502 usage_with_options(stat_usage
, options
);
1505 if (interval
&& interval
< 100) {
1506 pr_err("print interval must be >= 100ms\n");
1507 usage_with_options(stat_usage
, options
);
1511 if (perf_evlist__alloc_stats(evsel_list
, interval
))
1514 if (perf_stat_init_aggr_mode())
1518 * We dont want to block the signals - that would cause
1519 * child tasks to inherit that and Ctrl-C would not work.
1520 * What we want is for Ctrl-C to work in the exec()-ed
1521 * task, but being ignored by perf stat itself:
1525 signal(SIGINT
, skip_signal
);
1526 signal(SIGCHLD
, skip_signal
);
1527 signal(SIGALRM
, skip_signal
);
1528 signal(SIGABRT
, skip_signal
);
1531 for (run_idx
= 0; forever
|| run_idx
< run_count
; run_idx
++) {
1532 if (run_count
!= 1 && verbose
)
1533 fprintf(output
, "[ perf stat: executing run #%d ... ]\n",
1536 status
= run_perf_stat(argc
, argv
);
1537 if (forever
&& status
!= -1) {
1538 print_stat(argc
, argv
);
1539 perf_stat__reset_stats(evsel_list
);
1543 if (!forever
&& status
!= -1 && !interval
)
1544 print_stat(argc
, argv
);
1546 perf_evlist__free_stats(evsel_list
);
1548 perf_evlist__delete_maps(evsel_list
);
1550 perf_evlist__delete(evsel_list
);