1 // SPDX-License-Identifier: GPL-2.0-only
5 * Builtin top command: Display a continuously updated profile of
6 * any workload, CPU or specific PID.
8 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
9 * 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Improvements and fixes by:
13 * Arjan van de Ven <arjan@linux.intel.com>
14 * Yanmin Zhang <yanmin.zhang@intel.com>
15 * Wu Fengguang <fengguang.wu@intel.com>
16 * Mike Galbraith <efault@gmx.de>
17 * Paul Mackerras <paulus@samba.org>
23 #include "util/annotate.h"
24 #include "util/bpf-event.h"
25 #include "util/cgroup.h"
26 #include "util/config.h"
27 #include "util/color.h"
29 #include "util/evlist.h"
30 #include "util/evsel.h"
31 #include "util/evsel_config.h"
32 #include "util/event.h"
33 #include "util/machine.h"
35 #include "util/mmap.h"
36 #include "util/session.h"
37 #include "util/thread.h"
38 #include "util/symbol.h"
39 #include "util/synthetic-events.h"
41 #include "util/util.h"
42 #include <linux/rbtree.h>
43 #include <subcmd/parse-options.h>
44 #include "util/parse-events.h"
45 #include "util/callchain.h"
46 #include "util/cpumap.h"
47 #include "util/sort.h"
48 #include "util/string2.h"
49 #include "util/term.h"
50 #include "util/intlist.h"
51 #include "util/parse-branch-options.h"
52 #include "arch/common.h"
55 #include "util/debug.h"
56 #include "util/ordered-events.h"
73 #include <sys/syscall.h>
74 #include <sys/ioctl.h>
76 #include <sys/prctl.h>
79 #include <sys/utsname.h>
82 #include <linux/stringify.h>
83 #include <linux/time64.h>
84 #include <linux/types.h>
85 #include <linux/err.h>
87 #include <linux/ctype.h>
88 #include <perf/mmap.h>
90 static volatile sig_atomic_t done
;
91 static volatile sig_atomic_t resize
;
93 #define HEADER_LINE_NR 5
95 static void perf_top__update_print_entries(struct perf_top
*top
)
97 top
->print_entries
= top
->winsize
.ws_row
- HEADER_LINE_NR
;
100 static void winch_sig(int sig __maybe_unused
)
105 static void perf_top__resize(struct perf_top
*top
)
107 get_term_dimensions(&top
->winsize
);
108 perf_top__update_print_entries(top
);
111 static int perf_top__parse_source(struct perf_top
*top
, struct hist_entry
*he
)
115 struct annotation
*notes
;
120 if (!he
|| !he
->ms
.sym
)
123 evsel
= hists_to_evsel(he
->hists
);
130 * We can't annotate with just /proc/kallsyms
132 if (dso__symtab_type(dso
) == DSO_BINARY_TYPE__KALLSYMS
&& !dso__is_kcore(dso
)) {
133 pr_err("Can't annotate %s: No vmlinux file was found in the "
134 "path\n", sym
->name
);
139 notes
= symbol__annotation(sym
);
140 annotation__lock(notes
);
142 if (!symbol__hists(sym
, top
->evlist
->core
.nr_entries
)) {
143 annotation__unlock(notes
);
144 pr_err("Not enough memory for annotating '%s' symbol!\n",
150 err
= symbol__annotate(&he
->ms
, evsel
, NULL
);
152 top
->sym_filter_entry
= he
;
155 symbol__strerror_disassemble(&he
->ms
, err
, msg
, sizeof(msg
));
156 pr_err("Couldn't annotate %s: %s\n", sym
->name
, msg
);
159 annotation__unlock(notes
);
163 static void __zero_source_counters(struct hist_entry
*he
)
165 struct symbol
*sym
= he
->ms
.sym
;
166 symbol__annotate_zero_histograms(sym
);
169 static void ui__warn_map_erange(struct map
*map
, struct symbol
*sym
, u64 ip
)
172 int err
= uname(&uts
);
173 struct dso
*dso
= map__dso(map
);
175 ui__warning("Out of bounds address found:\n\n"
176 "Addr: %" PRIx64
"\n"
178 "Map: %" PRIx64
"-%" PRIx64
"\n"
179 "Symbol: %" PRIx64
"-%" PRIx64
" %c %s\n"
183 "Not all samples will be on the annotation output.\n\n"
184 "Please report to linux-kernel@vger.kernel.org\n",
185 ip
, dso__long_name(dso
), dso__symtab_origin(dso
),
186 map__start(map
), map__end(map
), sym
->start
, sym
->end
,
187 sym
->binding
== STB_GLOBAL
? 'g' :
188 sym
->binding
== STB_LOCAL
? 'l' : 'w', sym
->name
,
189 err
? "[unknown]" : uts
.machine
,
190 err
? "[unknown]" : uts
.release
, perf_version_string
);
191 if (use_browser
<= 0)
194 map__set_erange_warned(map
);
197 static void perf_top__record_precise_ip(struct perf_top
*top
,
198 struct hist_entry
*he
,
199 struct perf_sample
*sample
,
200 struct evsel
*evsel
, u64 ip
)
201 EXCLUSIVE_LOCKS_REQUIRED(he
->hists
->lock
)
203 struct annotation
*notes
;
204 struct symbol
*sym
= he
->ms
.sym
;
207 if (sym
== NULL
|| (use_browser
== 0 &&
208 (top
->sym_filter_entry
== NULL
||
209 top
->sym_filter_entry
->ms
.sym
!= sym
)))
212 notes
= symbol__annotation(sym
);
214 if (!annotation__trylock(notes
))
217 err
= hist_entry__inc_addr_samples(he
, sample
, evsel
, ip
);
219 annotation__unlock(notes
);
223 * This function is now called with he->hists->lock held.
224 * Release it before going to sleep.
226 mutex_unlock(&he
->hists
->lock
);
228 if (err
== -ERANGE
&& !map__erange_warned(he
->ms
.map
))
229 ui__warn_map_erange(he
->ms
.map
, sym
, ip
);
230 else if (err
== -ENOMEM
) {
231 pr_err("Not enough memory for annotating '%s' symbol!\n",
236 mutex_lock(&he
->hists
->lock
);
240 static void perf_top__show_details(struct perf_top
*top
)
242 struct hist_entry
*he
= top
->sym_filter_entry
;
244 struct annotation
*notes
;
245 struct symbol
*symbol
;
251 evsel
= hists_to_evsel(he
->hists
);
254 notes
= symbol__annotation(symbol
);
256 annotation__lock(notes
);
258 symbol__calc_percent(symbol
, evsel
);
260 if (notes
->src
== NULL
)
263 printf("Showing %s for %s\n", evsel__name(top
->sym_evsel
), symbol
->name
);
264 printf(" Events Pcnt (>=%d%%)\n", annotate_opts
.min_pcnt
);
266 more
= symbol__annotate_printf(&he
->ms
, top
->sym_evsel
);
268 if (top
->evlist
->enabled
) {
270 symbol__annotate_zero_histogram(symbol
, top
->sym_evsel
->core
.idx
);
272 symbol__annotate_decay_histogram(symbol
, top
->sym_evsel
->core
.idx
);
275 printf("%d lines not displayed, maybe increase display entries [e]\n", more
);
277 annotation__unlock(notes
);
280 static void perf_top__resort_hists(struct perf_top
*t
)
282 struct evlist
*evlist
= t
->evlist
;
285 evlist__for_each_entry(evlist
, pos
) {
286 struct hists
*hists
= evsel__hists(pos
);
289 * unlink existing entries so that they can be linked
290 * in a correct order in hists__match() below.
292 hists__unlink(hists
);
294 if (evlist
->enabled
) {
296 hists__delete_entries(hists
);
298 hists__decay_entries(hists
, t
->hide_user_symbols
,
299 t
->hide_kernel_symbols
);
303 hists__collapse_resort(hists
, NULL
);
305 /* Non-group events are considered as leader */
306 if (symbol_conf
.event_group
&& !evsel__is_group_leader(pos
)) {
307 struct hists
*leader_hists
= evsel__hists(evsel__leader(pos
));
309 hists__match(leader_hists
, hists
);
310 hists__link(leader_hists
, hists
);
314 evlist__for_each_entry(evlist
, pos
) {
315 evsel__output_resort(pos
, NULL
);
319 static void perf_top__print_sym_table(struct perf_top
*top
)
323 const int win_width
= top
->winsize
.ws_col
- 1;
324 struct evsel
*evsel
= top
->sym_evsel
;
325 struct hists
*hists
= evsel__hists(evsel
);
329 perf_top__header_snprintf(top
, bf
, sizeof(bf
));
332 printf("%-*.*s\n", win_width
, win_width
, graph_dotted_line
);
334 if (!top
->record_opts
.overwrite
&&
335 (top
->evlist
->stats
.nr_lost_warned
!=
336 top
->evlist
->stats
.nr_events
[PERF_RECORD_LOST
])) {
337 top
->evlist
->stats
.nr_lost_warned
=
338 top
->evlist
->stats
.nr_events
[PERF_RECORD_LOST
];
339 color_fprintf(stdout
, PERF_COLOR_RED
,
340 "WARNING: LOST %d chunks, Check IO/CPU overload",
341 top
->evlist
->stats
.nr_lost_warned
);
345 if (top
->sym_filter_entry
) {
346 perf_top__show_details(top
);
350 perf_top__resort_hists(top
);
352 hists__output_recalc_col_len(hists
, top
->print_entries
- printed
);
354 hists__fprintf(hists
, false, top
->print_entries
- printed
, win_width
,
355 top
->min_percent
, stdout
, !symbol_conf
.use_callchain
);
358 static void prompt_integer(int *target
, const char *msg
)
360 char *buf
= NULL
, *p
;
364 fprintf(stdout
, "\n%s: ", msg
);
365 if (getline(&buf
, &dummy
, stdin
) < 0)
368 p
= strchr(buf
, '\n');
378 tmp
= strtoul(buf
, NULL
, 10);
384 static void prompt_percent(int *target
, const char *msg
)
388 prompt_integer(&tmp
, msg
);
389 if (tmp
>= 0 && tmp
<= 100)
393 static void perf_top__prompt_symbol(struct perf_top
*top
, const char *msg
)
395 char *buf
= NULL
, *p
;
396 struct hist_entry
*syme
= top
->sym_filter_entry
, *n
, *found
= NULL
;
397 struct hists
*hists
= evsel__hists(top
->sym_evsel
);
398 struct rb_node
*next
;
401 /* zero counters of active symbol */
403 __zero_source_counters(syme
);
404 top
->sym_filter_entry
= NULL
;
407 fprintf(stdout
, "\n%s: ", msg
);
408 if (getline(&buf
, &dummy
, stdin
) < 0)
411 p
= strchr(buf
, '\n');
415 next
= rb_first_cached(&hists
->entries
);
417 n
= rb_entry(next
, struct hist_entry
, rb_node
);
418 if (n
->ms
.sym
&& !strcmp(buf
, n
->ms
.sym
->name
)) {
422 next
= rb_next(&n
->rb_node
);
426 fprintf(stderr
, "Sorry, %s is not active.\n", buf
);
429 perf_top__parse_source(top
, found
);
435 static void perf_top__print_mapped_keys(struct perf_top
*top
)
439 if (top
->sym_filter_entry
) {
440 struct symbol
*sym
= top
->sym_filter_entry
->ms
.sym
;
444 fprintf(stdout
, "\nMapped keys:\n");
445 fprintf(stdout
, "\t[d] display refresh delay. \t(%d)\n", top
->delay_secs
);
446 fprintf(stdout
, "\t[e] display entries (lines). \t(%d)\n", top
->print_entries
);
448 if (top
->evlist
->core
.nr_entries
> 1)
449 fprintf(stdout
, "\t[E] active event counter. \t(%s)\n", evsel__name(top
->sym_evsel
));
451 fprintf(stdout
, "\t[f] profile display filter (count). \t(%d)\n", top
->count_filter
);
453 fprintf(stdout
, "\t[F] annotate display filter (percent). \t(%d%%)\n", annotate_opts
.min_pcnt
);
454 fprintf(stdout
, "\t[s] annotate symbol. \t(%s)\n", name
?: "NULL");
455 fprintf(stdout
, "\t[S] stop annotation.\n");
458 "\t[K] hide kernel symbols. \t(%s)\n",
459 top
->hide_kernel_symbols
? "yes" : "no");
461 "\t[U] hide user symbols. \t(%s)\n",
462 top
->hide_user_symbols
? "yes" : "no");
463 fprintf(stdout
, "\t[z] toggle sample zeroing. \t(%d)\n", top
->zero
? 1 : 0);
464 fprintf(stdout
, "\t[qQ] quit.\n");
467 static int perf_top__key_mapped(struct perf_top
*top
, int c
)
483 return top
->evlist
->core
.nr_entries
> 1 ? 1 : 0;
491 static bool perf_top__handle_keypress(struct perf_top
*top
, int c
)
495 if (!perf_top__key_mapped(top
, c
)) {
496 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
499 perf_top__print_mapped_keys(top
);
500 fprintf(stdout
, "\nEnter selection, or unmapped key to continue: ");
503 set_term_quiet_input(&save
);
505 poll(&stdin_poll
, 1, -1);
508 tcsetattr(0, TCSAFLUSH
, &save
);
509 if (!perf_top__key_mapped(top
, c
))
515 prompt_integer(&top
->delay_secs
, "Enter display delay");
516 if (top
->delay_secs
< 1)
520 prompt_integer(&top
->print_entries
, "Enter display entries (lines)");
521 if (top
->print_entries
== 0) {
522 perf_top__resize(top
);
523 signal(SIGWINCH
, winch_sig
);
525 signal(SIGWINCH
, SIG_DFL
);
529 if (top
->evlist
->core
.nr_entries
> 1) {
530 /* Select 0 as the default event: */
533 fprintf(stderr
, "\nAvailable events:");
535 evlist__for_each_entry(top
->evlist
, top
->sym_evsel
)
536 fprintf(stderr
, "\n\t%d %s", top
->sym_evsel
->core
.idx
, evsel__name(top
->sym_evsel
));
538 prompt_integer(&counter
, "Enter details event counter");
540 if (counter
>= top
->evlist
->core
.nr_entries
) {
541 top
->sym_evsel
= evlist__first(top
->evlist
);
542 fprintf(stderr
, "Sorry, no such event, using %s.\n", evsel__name(top
->sym_evsel
));
546 evlist__for_each_entry(top
->evlist
, top
->sym_evsel
)
547 if (top
->sym_evsel
->core
.idx
== counter
)
550 top
->sym_evsel
= evlist__first(top
->evlist
);
553 prompt_integer(&top
->count_filter
, "Enter display event count filter");
556 prompt_percent(&annotate_opts
.min_pcnt
,
557 "Enter details display event filter (percent)");
560 top
->hide_kernel_symbols
= !top
->hide_kernel_symbols
;
564 printf("exiting.\n");
565 if (top
->dump_symtab
)
566 perf_session__fprintf_dsos(top
->session
, stderr
);
570 perf_top__prompt_symbol(top
, "Enter details symbol");
573 if (!top
->sym_filter_entry
)
576 struct hist_entry
*syme
= top
->sym_filter_entry
;
578 top
->sym_filter_entry
= NULL
;
579 __zero_source_counters(syme
);
583 top
->hide_user_symbols
= !top
->hide_user_symbols
;
586 top
->zero
= !top
->zero
;
595 static void perf_top__sort_new_samples(void *arg
)
597 struct perf_top
*t
= arg
;
599 if (t
->evlist
->selected
!= NULL
)
600 t
->sym_evsel
= t
->evlist
->selected
;
602 perf_top__resort_hists(t
);
604 if (t
->lost
|| t
->drop
)
605 pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
608 static void stop_top(void)
614 static void *display_thread_tui(void *arg
)
617 struct perf_top
*top
= arg
;
618 const char *help
= "For a higher level overview, try: perf top --sort comm,dso";
619 struct hist_browser_timer hbt
= {
620 .timer
= perf_top__sort_new_samples
,
622 .refresh
= top
->delay_secs
,
626 /* In order to read symbols from other namespaces perf to needs to call
627 * setns(2). This isn't permitted if the struct_fs has multiple users.
628 * unshare(2) the fs so that we may continue to setns into namespaces
629 * that we're observing.
633 prctl(PR_SET_NAME
, "perf-top-UI", 0, 0, 0);
636 perf_top__sort_new_samples(top
);
639 * Initialize the uid_filter_str, in the future the TUI will allow
640 * Zooming in/out UIDs. For now just use whatever the user passed
643 evlist__for_each_entry(top
->evlist
, pos
) {
644 struct hists
*hists
= evsel__hists(pos
);
645 hists
->uid_filter_str
= top
->record_opts
.target
.uid_str
;
648 ret
= evlist__tui_browse_hists(top
->evlist
, help
, &hbt
, top
->min_percent
,
649 &top
->session
->header
.env
, !top
->record_opts
.overwrite
);
650 if (ret
== K_RELOAD
) {
659 static void display_sig(int sig __maybe_unused
)
664 static void display_setup_sig(void)
666 signal(SIGSEGV
, sighandler_dump_stack
);
667 signal(SIGFPE
, sighandler_dump_stack
);
668 signal(SIGINT
, display_sig
);
669 signal(SIGQUIT
, display_sig
);
670 signal(SIGTERM
, display_sig
);
673 static void *display_thread(void *arg
)
675 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
677 struct perf_top
*top
= arg
;
680 /* In order to read symbols from other namespaces perf to needs to call
681 * setns(2). This isn't permitted if the struct_fs has multiple users.
682 * unshare(2) the fs so that we may continue to setns into namespaces
683 * that we're observing.
687 prctl(PR_SET_NAME
, "perf-top-UI", 0, 0, 0);
690 pthread__unblock_sigwinch();
692 delay_msecs
= top
->delay_secs
* MSEC_PER_SEC
;
693 set_term_quiet_input(&save
);
696 if (poll(&stdin_poll
, 1, 0) > 0)
700 perf_top__print_sym_table(top
);
702 * Either timeout expired or we got an EINTR due to SIGWINCH,
703 * refresh screen in both cases.
705 switch (poll(&stdin_poll
, 1, delay_msecs
)) {
714 tcsetattr(0, TCSAFLUSH
, &save
);
716 if (perf_top__handle_keypress(top
, c
))
722 tcsetattr(0, TCSAFLUSH
, &save
);
726 static int hist_iter__top_callback(struct hist_entry_iter
*iter
,
727 struct addr_location
*al
, bool single
,
729 EXCLUSIVE_LOCKS_REQUIRED(iter
->he
->hists
->lock
)
731 struct perf_top
*top
= arg
;
732 struct evsel
*evsel
= iter
->evsel
;
734 if (perf_hpp_list
.sym
&& single
)
735 perf_top__record_precise_ip(top
, iter
->he
, iter
->sample
, evsel
, al
->addr
);
737 hist__account_cycles(iter
->sample
->branch_stack
, al
, iter
->sample
,
738 !(top
->record_opts
.branch_stack
& PERF_SAMPLE_BRANCH_ANY
),
743 static void perf_event__process_sample(const struct perf_tool
*tool
,
744 const union perf_event
*event
,
746 struct perf_sample
*sample
,
747 struct machine
*machine
)
749 struct perf_top
*top
= container_of(tool
, struct perf_top
, tool
);
750 struct addr_location al
;
752 if (!machine
&& perf_guest
) {
753 static struct intlist
*seen
;
756 seen
= intlist__new(NULL
);
758 if (!intlist__has_entry(seen
, sample
->pid
)) {
759 pr_err("Can't find guest [%d]'s kernel information\n",
761 intlist__add(seen
, sample
->pid
);
767 pr_err("%u unprocessable samples recorded.\r",
768 top
->session
->evlist
->stats
.nr_unprocessable_samples
++);
772 if (event
->header
.misc
& PERF_RECORD_MISC_EXACT_IP
)
773 top
->exact_samples
++;
775 addr_location__init(&al
);
776 if (machine__resolve(machine
, &al
, sample
) < 0)
780 thread__set_lbr_stitch_enable(al
.thread
, true);
782 if (!machine
->kptr_restrict_warned
&&
783 symbol_conf
.kptr_restrict
&&
784 al
.cpumode
== PERF_RECORD_MISC_KERNEL
) {
785 if (!evlist__exclude_kernel(top
->session
->evlist
)) {
787 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
788 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
789 "Kernel%s samples will not be resolved.\n",
790 al
.map
&& map__has_symbols(al
.map
) ?
792 if (use_browser
<= 0)
795 machine
->kptr_restrict_warned
= true;
798 if (al
.sym
== NULL
&& al
.map
!= NULL
) {
799 const char *msg
= "Kernel samples will not be resolved.\n";
801 * As we do lazy loading of symtabs we only will know if the
802 * specified vmlinux file is invalid when we actually have a
803 * hit in kernel space and then try to load it. So if we get
804 * here and there are _no_ symbols in the DSO backing the
805 * kernel map, bail out.
807 * We may never get here, for instance, if we use -K/
808 * --hide-kernel-symbols, even if the user specifies an
809 * invalid --vmlinux ;-)
811 if (!machine
->kptr_restrict_warned
&& !top
->vmlinux_warned
&&
812 __map__is_kernel(al
.map
) && map__has_symbols(al
.map
)) {
813 if (symbol_conf
.vmlinux_name
) {
816 dso__strerror_load(map__dso(al
.map
), serr
, sizeof(serr
));
817 ui__warning("The %s file can't be used: %s\n%s",
818 symbol_conf
.vmlinux_name
, serr
, msg
);
820 ui__warning("A vmlinux file was not found.\n%s",
824 if (use_browser
<= 0)
826 top
->vmlinux_warned
= true;
830 if (al
.sym
== NULL
|| !al
.sym
->idle
) {
831 struct hists
*hists
= evsel__hists(evsel
);
832 struct hist_entry_iter iter
= {
835 .add_entry_cb
= hist_iter__top_callback
,
838 if (symbol_conf
.cumulate_callchain
)
839 iter
.ops
= &hist_iter_cumulative
;
841 iter
.ops
= &hist_iter_normal
;
843 mutex_lock(&hists
->lock
);
845 if (hist_entry_iter__add(&iter
, &al
, top
->max_stack
, top
) < 0)
846 pr_err("Problem incrementing symbol period, skipping event\n");
848 mutex_unlock(&hists
->lock
);
852 addr_location__exit(&al
);
856 perf_top__process_lost(struct perf_top
*top
, union perf_event
*event
,
859 top
->lost
+= event
->lost
.lost
;
860 top
->lost_total
+= event
->lost
.lost
;
861 evsel
->evlist
->stats
.total_lost
+= event
->lost
.lost
;
865 perf_top__process_lost_samples(struct perf_top
*top
,
866 union perf_event
*event
,
869 top
->lost
+= event
->lost_samples
.lost
;
870 top
->lost_total
+= event
->lost_samples
.lost
;
871 evsel
->evlist
->stats
.total_lost_samples
+= event
->lost_samples
.lost
;
874 static u64 last_timestamp
;
876 static void perf_top__mmap_read_idx(struct perf_top
*top
, int idx
)
878 struct record_opts
*opts
= &top
->record_opts
;
879 struct evlist
*evlist
= top
->evlist
;
881 union perf_event
*event
;
883 md
= opts
->overwrite
? &evlist
->overwrite_mmap
[idx
] : &evlist
->mmap
[idx
];
884 if (perf_mmap__read_init(&md
->core
) < 0)
887 while ((event
= perf_mmap__read_event(&md
->core
)) != NULL
) {
890 ret
= evlist__parse_sample_timestamp(evlist
, event
, &last_timestamp
);
891 if (ret
&& ret
!= -1)
894 ret
= ordered_events__queue(top
->qe
.in
, event
, last_timestamp
, 0, NULL
);
898 perf_mmap__consume(&md
->core
);
900 if (top
->qe
.rotate
) {
901 mutex_lock(&top
->qe
.mutex
);
902 top
->qe
.rotate
= false;
903 cond_signal(&top
->qe
.cond
);
904 mutex_unlock(&top
->qe
.mutex
);
908 perf_mmap__read_done(&md
->core
);
911 static void perf_top__mmap_read(struct perf_top
*top
)
913 bool overwrite
= top
->record_opts
.overwrite
;
914 struct evlist
*evlist
= top
->evlist
;
918 evlist__toggle_bkw_mmap(evlist
, BKW_MMAP_DATA_PENDING
);
920 for (i
= 0; i
< top
->evlist
->core
.nr_mmaps
; i
++)
921 perf_top__mmap_read_idx(top
, i
);
924 evlist__toggle_bkw_mmap(evlist
, BKW_MMAP_EMPTY
);
925 evlist__toggle_bkw_mmap(evlist
, BKW_MMAP_RUNNING
);
930 * Check per-event overwrite term.
931 * perf top should support consistent term for all events.
932 * - All events don't have per-event term
933 * E.g. "cpu/cpu-cycles/,cpu/instructions/"
934 * Nothing change, return 0.
935 * - All events have same per-event term
936 * E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
937 * Using the per-event setting to replace the opts->overwrite if
938 * they are different, then return 0.
939 * - Events have different per-event term
940 * E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
942 * - Some of the event set per-event term, but some not.
943 * E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
946 static int perf_top__overwrite_check(struct perf_top
*top
)
948 struct record_opts
*opts
= &top
->record_opts
;
949 struct evlist
*evlist
= top
->evlist
;
950 struct evsel_config_term
*term
;
951 struct list_head
*config_terms
;
953 int set
, overwrite
= -1;
955 evlist__for_each_entry(evlist
, evsel
) {
957 config_terms
= &evsel
->config_terms
;
958 list_for_each_entry(term
, config_terms
, list
) {
959 if (term
->type
== EVSEL__CONFIG_TERM_OVERWRITE
)
960 set
= term
->val
.overwrite
? 1 : 0;
963 /* no term for current and previous event (likely) */
964 if ((overwrite
< 0) && (set
< 0))
967 /* has term for both current and previous event, compare */
968 if ((overwrite
>= 0) && (set
>= 0) && (overwrite
!= set
))
971 /* no term for current event but has term for previous one */
972 if ((overwrite
>= 0) && (set
< 0))
975 /* has term for current event */
976 if ((overwrite
< 0) && (set
>= 0)) {
977 /* if it's first event, set overwrite */
978 if (evsel
== evlist__first(evlist
))
985 if ((overwrite
>= 0) && (opts
->overwrite
!= overwrite
))
986 opts
->overwrite
= overwrite
;
991 static int perf_top_overwrite_fallback(struct perf_top
*top
,
994 struct record_opts
*opts
= &top
->record_opts
;
995 struct evlist
*evlist
= top
->evlist
;
996 struct evsel
*counter
;
998 if (!opts
->overwrite
)
1001 /* only fall back when first event fails */
1002 if (evsel
!= evlist__first(evlist
))
1005 evlist__for_each_entry(evlist
, counter
)
1006 counter
->core
.attr
.write_backward
= false;
1007 opts
->overwrite
= false;
1008 pr_debug2("fall back to non-overwrite mode\n");
1012 static int perf_top__start_counters(struct perf_top
*top
)
1015 struct evsel
*counter
;
1016 struct evlist
*evlist
= top
->evlist
;
1017 struct record_opts
*opts
= &top
->record_opts
;
1019 if (perf_top__overwrite_check(top
)) {
1020 ui__error("perf top only support consistent per-event "
1021 "overwrite setting for all events\n");
1025 evlist__config(evlist
, opts
, &callchain_param
);
1027 evlist__for_each_entry(evlist
, counter
) {
1029 if (evsel__open(counter
, counter
->core
.cpus
,
1030 counter
->core
.threads
) < 0) {
1033 * Specially handle overwrite fall back.
1034 * Because perf top is the only tool which has
1035 * overwrite mode by default, support
1036 * both overwrite and non-overwrite mode, and
1037 * require consistent mode for all events.
1039 * May move it to generic code with more tools
1040 * have similar attribute.
1042 if (perf_missing_features
.write_backward
&&
1043 perf_top_overwrite_fallback(top
, counter
))
1046 if (evsel__fallback(counter
, &opts
->target
, errno
, msg
, sizeof(msg
))) {
1048 ui__warning("%s\n", msg
);
1052 evsel__open_strerror(counter
, &opts
->target
, errno
, msg
, sizeof(msg
));
1053 ui__error("%s\n", msg
);
1058 if (evlist__apply_filters(evlist
, &counter
, &opts
->target
)) {
1059 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
1060 counter
->filter
?: "BPF", evsel__name(counter
), errno
,
1061 str_error_r(errno
, msg
, sizeof(msg
)));
1065 if (evlist__mmap(evlist
, opts
->mmap_pages
) < 0) {
1066 ui__error("Failed to mmap with %d (%s)\n",
1067 errno
, str_error_r(errno
, msg
, sizeof(msg
)));
1077 static int callchain_param__setup_sample_type(struct callchain_param
*callchain
)
1079 if (callchain
->mode
!= CHAIN_NONE
) {
1080 if (callchain_register_param(callchain
) < 0) {
1081 ui__error("Can't register callchain params.\n");
1089 static struct ordered_events
*rotate_queues(struct perf_top
*top
)
1091 struct ordered_events
*in
= top
->qe
.in
;
1093 if (top
->qe
.in
== &top
->qe
.data
[1])
1094 top
->qe
.in
= &top
->qe
.data
[0];
1096 top
->qe
.in
= &top
->qe
.data
[1];
1101 static void *process_thread(void *arg
)
1103 struct perf_top
*top
= arg
;
1106 struct ordered_events
*out
, *in
= top
->qe
.in
;
1108 if (!in
->nr_events
) {
1113 out
= rotate_queues(top
);
1115 mutex_lock(&top
->qe
.mutex
);
1116 top
->qe
.rotate
= true;
1117 cond_wait(&top
->qe
.cond
, &top
->qe
.mutex
);
1118 mutex_unlock(&top
->qe
.mutex
);
1120 if (ordered_events__flush(out
, OE_FLUSH__TOP
))
1121 pr_err("failed to process events\n");
1128 * Allow only 'top->delay_secs' seconds behind samples.
1130 static int should_drop(struct ordered_event
*qevent
, struct perf_top
*top
)
1132 union perf_event
*event
= qevent
->event
;
1133 u64 delay_timestamp
;
1135 if (event
->header
.type
!= PERF_RECORD_SAMPLE
)
1138 delay_timestamp
= qevent
->timestamp
+ top
->delay_secs
* NSEC_PER_SEC
;
1139 return delay_timestamp
< last_timestamp
;
1142 static int deliver_event(struct ordered_events
*qe
,
1143 struct ordered_event
*qevent
)
1145 struct perf_top
*top
= qe
->data
;
1146 struct evlist
*evlist
= top
->evlist
;
1147 struct perf_session
*session
= top
->session
;
1148 union perf_event
*event
= qevent
->event
;
1149 struct perf_sample sample
;
1150 struct evsel
*evsel
;
1151 struct machine
*machine
;
1154 if (should_drop(qevent
, top
)) {
1160 ret
= evlist__parse_sample(evlist
, event
, &sample
);
1162 pr_err("Can't parse sample, err = %d\n", ret
);
1166 evsel
= evlist__id2evsel(session
->evlist
, sample
.id
);
1167 assert(evsel
!= NULL
);
1169 if (event
->header
.type
== PERF_RECORD_SAMPLE
) {
1170 if (evswitch__discard(&top
->evswitch
, evsel
))
1175 switch (sample
.cpumode
) {
1176 case PERF_RECORD_MISC_USER
:
1178 if (top
->hide_user_symbols
)
1180 machine
= &session
->machines
.host
;
1182 case PERF_RECORD_MISC_KERNEL
:
1183 ++top
->kernel_samples
;
1184 if (top
->hide_kernel_symbols
)
1186 machine
= &session
->machines
.host
;
1188 case PERF_RECORD_MISC_GUEST_KERNEL
:
1189 ++top
->guest_kernel_samples
;
1190 machine
= perf_session__find_machine(session
,
1193 case PERF_RECORD_MISC_GUEST_USER
:
1194 ++top
->guest_us_samples
;
1196 * TODO: we don't process guest user from host side
1197 * except simple counting.
1201 if (event
->header
.type
== PERF_RECORD_SAMPLE
)
1203 machine
= &session
->machines
.host
;
1207 if (event
->header
.type
== PERF_RECORD_SAMPLE
) {
1208 perf_event__process_sample(&top
->tool
, event
, evsel
,
1210 } else if (event
->header
.type
== PERF_RECORD_LOST
) {
1211 perf_top__process_lost(top
, event
, evsel
);
1212 } else if (event
->header
.type
== PERF_RECORD_LOST_SAMPLES
) {
1213 perf_top__process_lost_samples(top
, event
, evsel
);
1214 } else if (event
->header
.type
< PERF_RECORD_MAX
) {
1215 events_stats__inc(&session
->evlist
->stats
, event
->header
.type
);
1216 machine__process_event(machine
, event
, &sample
);
1218 ++session
->evlist
->stats
.nr_unknown_events
;
1225 static void init_process_thread(struct perf_top
*top
)
1227 ordered_events__init(&top
->qe
.data
[0], deliver_event
, top
);
1228 ordered_events__init(&top
->qe
.data
[1], deliver_event
, top
);
1229 ordered_events__set_copy_on_queue(&top
->qe
.data
[0], true);
1230 ordered_events__set_copy_on_queue(&top
->qe
.data
[1], true);
1231 top
->qe
.in
= &top
->qe
.data
[0];
1232 mutex_init(&top
->qe
.mutex
);
1233 cond_init(&top
->qe
.cond
);
1236 static void exit_process_thread(struct perf_top
*top
)
1238 ordered_events__free(&top
->qe
.data
[0]);
1239 ordered_events__free(&top
->qe
.data
[1]);
1240 mutex_destroy(&top
->qe
.mutex
);
1241 cond_destroy(&top
->qe
.cond
);
1244 static int __cmd_top(struct perf_top
*top
)
1246 struct record_opts
*opts
= &top
->record_opts
;
1247 pthread_t thread
, thread_process
;
1250 if (!annotate_opts
.objdump_path
) {
1251 ret
= perf_env__lookup_objdump(&top
->session
->header
.env
,
1252 &annotate_opts
.objdump_path
);
1257 ret
= callchain_param__setup_sample_type(&callchain_param
);
1261 if (perf_session__register_idle_thread(top
->session
) < 0)
1264 if (top
->nr_threads_synthesize
> 1)
1265 perf_set_multithreaded();
1267 init_process_thread(top
);
1269 if (opts
->record_namespaces
)
1270 top
->tool
.namespace_events
= true;
1271 if (opts
->record_cgroup
) {
1272 #ifdef HAVE_FILE_HANDLE
1273 top
->tool
.cgroup_events
= true;
1275 pr_err("cgroup tracking is not supported.\n");
1280 ret
= perf_event__synthesize_bpf_events(top
->session
, perf_event__process
,
1281 &top
->session
->machines
.host
,
1284 pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
1286 ret
= perf_event__synthesize_cgroups(&top
->tool
, perf_event__process
,
1287 &top
->session
->machines
.host
);
1289 pr_debug("Couldn't synthesize cgroup events.\n");
1291 machine__synthesize_threads(&top
->session
->machines
.host
, &opts
->target
,
1292 top
->evlist
->core
.threads
, true, false,
1293 top
->nr_threads_synthesize
);
1295 perf_set_multithreaded();
1297 if (perf_hpp_list
.socket
) {
1298 ret
= perf_env__read_cpu_topology_map(&perf_env
);
1300 char errbuf
[BUFSIZ
];
1301 const char *err
= str_error_r(-ret
, errbuf
, sizeof(errbuf
));
1303 ui__error("Could not read the CPU topology map: %s\n", err
);
1308 evlist__uniquify_name(top
->evlist
);
1309 ret
= perf_top__start_counters(top
);
1313 top
->session
->evlist
= top
->evlist
;
1314 perf_session__set_id_hdr_size(top
->session
);
1317 * When perf is starting the traced process, all the events (apart from
1318 * group members) have enable_on_exec=1 set, so don't spoil it by
1319 * prematurely enabling them.
1321 * XXX 'top' still doesn't start workloads like record, trace, but should,
1322 * so leave the check here.
1324 if (!target__none(&opts
->target
))
1325 evlist__enable(top
->evlist
);
1328 if (pthread_create(&thread_process
, NULL
, process_thread
, top
)) {
1329 ui__error("Could not create process thread.\n");
1333 if (pthread_create(&thread
, NULL
, (use_browser
> 0 ? display_thread_tui
:
1334 display_thread
), top
)) {
1335 ui__error("Could not create display thread.\n");
1336 goto out_join_thread
;
1339 if (top
->realtime_prio
) {
1340 struct sched_param param
;
1342 param
.sched_priority
= top
->realtime_prio
;
1343 if (sched_setscheduler(0, SCHED_FIFO
, ¶m
)) {
1344 ui__error("Could not set realtime priority.\n");
1349 /* Wait for a minimal set of events before starting the snapshot */
1350 evlist__poll(top
->evlist
, 100);
1352 perf_top__mmap_read(top
);
1355 u64 hits
= top
->samples
;
1357 perf_top__mmap_read(top
);
1359 if (opts
->overwrite
|| (hits
== top
->samples
))
1360 ret
= evlist__poll(top
->evlist
, 100);
1363 perf_top__resize(top
);
1370 pthread_join(thread
, NULL
);
1372 cond_signal(&top
->qe
.cond
);
1373 pthread_join(thread_process
, NULL
);
1374 perf_set_singlethreaded();
1375 exit_process_thread(top
);
1380 callchain_opt(const struct option
*opt
, const char *arg
, int unset
)
1382 symbol_conf
.use_callchain
= true;
1383 return record_callchain_opt(opt
, arg
, unset
);
1387 parse_callchain_opt(const struct option
*opt
, const char *arg
, int unset
)
1389 struct callchain_param
*callchain
= opt
->value
;
1391 callchain
->enabled
= !unset
;
1392 callchain
->record_mode
= CALLCHAIN_FP
;
1398 symbol_conf
.use_callchain
= false;
1399 callchain
->record_mode
= CALLCHAIN_NONE
;
1403 return parse_callchain_top_opt(arg
);
1406 static int perf_top_config(const char *var
, const char *value
, void *cb __maybe_unused
)
1408 if (!strcmp(var
, "top.call-graph")) {
1409 var
= "call-graph.record-mode";
1410 return perf_default_config(var
, value
, cb
);
1412 if (!strcmp(var
, "top.children")) {
1413 symbol_conf
.cumulate_callchain
= perf_config_bool(var
, value
);
1421 parse_percent_limit(const struct option
*opt
, const char *arg
,
1422 int unset __maybe_unused
)
1424 struct perf_top
*top
= opt
->value
;
1426 top
->min_percent
= strtof(arg
, NULL
);
1430 const char top_callchain_help
[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
1431 "\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
1433 int cmd_top(int argc
, const char **argv
)
1435 char errbuf
[BUFSIZ
];
1436 struct perf_top top
= {
1440 .mmap_pages
= UINT_MAX
,
1441 .user_freq
= UINT_MAX
,
1442 .user_interval
= ULLONG_MAX
,
1443 .freq
= 4000, /* 4 KHz */
1448 * FIXME: This will lose PERF_RECORD_MMAP and other metadata
1449 * when we pause, fix that and reenable. Probably using a
1450 * separate evlist with a dummy event, i.e. a non-overwrite
1451 * ring buffer just for metadata events, while PERF_RECORD_SAMPLE
1452 * stays in overwrite mode. -acme
1455 .sample_time
= true,
1456 .sample_time_set
= true,
1458 .max_stack
= sysctl__max_stack(),
1459 .nr_threads_synthesize
= UINT_MAX
,
1461 struct parse_events_option_args parse_events_option_args
= {
1462 .evlistp
= &top
.evlist
,
1464 bool branch_call_mode
= false;
1465 struct record_opts
*opts
= &top
.record_opts
;
1466 struct target
*target
= &opts
->target
;
1467 const char *disassembler_style
= NULL
, *objdump_path
= NULL
, *addr2line_path
= NULL
;
1468 const struct option options
[] = {
1469 OPT_CALLBACK('e', "event", &parse_events_option_args
, "event",
1470 "event selector. use 'perf list' to list available events",
1471 parse_events_option
),
1472 OPT_CALLBACK(0, "filter", &top
.evlist
, "filter",
1473 "event filter", parse_filter
),
1474 OPT_U64('c', "count", &opts
->user_interval
, "event period to sample"),
1475 OPT_STRING('p', "pid", &target
->pid
, "pid",
1476 "profile events on existing process id"),
1477 OPT_STRING('t', "tid", &target
->tid
, "tid",
1478 "profile events on existing thread id"),
1479 OPT_BOOLEAN('a', "all-cpus", &target
->system_wide
,
1480 "system-wide collection from all CPUs"),
1481 OPT_STRING('C', "cpu", &target
->cpu_list
, "cpu",
1482 "list of cpus to monitor"),
1483 OPT_STRING('k', "vmlinux", &symbol_conf
.vmlinux_name
,
1484 "file", "vmlinux pathname"),
1485 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf
.ignore_vmlinux
,
1486 "don't load vmlinux even if found"),
1487 OPT_STRING(0, "kallsyms", &symbol_conf
.kallsyms_name
,
1488 "file", "kallsyms pathname"),
1489 OPT_BOOLEAN('K', "hide_kernel_symbols", &top
.hide_kernel_symbols
,
1490 "hide kernel symbols"),
1491 OPT_CALLBACK('m', "mmap-pages", &opts
->mmap_pages
, "pages",
1492 "number of mmap data pages", evlist__parse_mmap_pages
),
1493 OPT_INTEGER('r', "realtime", &top
.realtime_prio
,
1494 "collect data with this RT SCHED_FIFO priority"),
1495 OPT_INTEGER('d', "delay", &top
.delay_secs
,
1496 "number of seconds to delay between refreshes"),
1497 OPT_BOOLEAN('D', "dump-symtab", &top
.dump_symtab
,
1498 "dump the symbol table used for profiling"),
1499 OPT_INTEGER('f', "count-filter", &top
.count_filter
,
1500 "only display functions with more events than this"),
1501 OPT_BOOLEAN('i', "no-inherit", &opts
->no_inherit
,
1502 "child tasks do not inherit counters"),
1503 OPT_STRING(0, "sym-annotate", &top
.sym_filter
, "symbol name",
1504 "symbol to annotate"),
1505 OPT_BOOLEAN('z', "zero", &top
.zero
, "zero history across updates"),
1506 OPT_CALLBACK('F', "freq", &top
.record_opts
, "freq or 'max'",
1507 "profile at this frequency",
1508 record__parse_freq
),
1509 OPT_INTEGER('E', "entries", &top
.print_entries
,
1510 "display this many functions"),
1511 OPT_BOOLEAN('U', "hide_user_symbols", &top
.hide_user_symbols
,
1512 "hide user symbols"),
1513 #ifdef HAVE_SLANG_SUPPORT
1514 OPT_BOOLEAN(0, "tui", &top
.use_tui
, "Use the TUI interface"),
1516 OPT_BOOLEAN(0, "stdio", &top
.use_stdio
, "Use the stdio interface"),
1517 OPT_INCR('v', "verbose", &verbose
,
1518 "be more verbose (show counter open errors, etc)"),
1519 OPT_STRING('s', "sort", &sort_order
, "key[,key2...]",
1520 "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
1521 " Please refer the man page for the complete list."),
1522 OPT_STRING(0, "fields", &field_order
, "key[,keys...]",
1523 "output field(s): overhead, period, sample plus all of sort keys"),
1524 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf
.show_nr_samples
,
1525 "Show a column with the number of samples"),
1526 OPT_CALLBACK_NOOPT('g', NULL
, &callchain_param
,
1527 NULL
, "enables call-graph recording and display",
1529 OPT_CALLBACK(0, "call-graph", &callchain_param
,
1530 "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
1531 top_callchain_help
, &parse_callchain_opt
),
1532 OPT_BOOLEAN(0, "children", &symbol_conf
.cumulate_callchain
,
1533 "Accumulate callchains of children and show total overhead as well"),
1534 OPT_INTEGER(0, "max-stack", &top
.max_stack
,
1535 "Set the maximum stack depth when parsing the callchain. "
1536 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH
)),
1537 OPT_CALLBACK(0, "ignore-callees", NULL
, "regex",
1538 "ignore callees of these functions in call graphs",
1539 report_parse_ignore_callees_opt
),
1540 OPT_BOOLEAN(0, "show-total-period", &symbol_conf
.show_total_period
,
1541 "Show a column with the sum of periods"),
1542 OPT_STRING(0, "dsos", &symbol_conf
.dso_list_str
, "dso[,dso...]",
1543 "only consider symbols in these dsos"),
1544 OPT_STRING(0, "comms", &symbol_conf
.comm_list_str
, "comm[,comm...]",
1545 "only consider symbols in these comms"),
1546 OPT_STRING(0, "symbols", &symbol_conf
.sym_list_str
, "symbol[,symbol...]",
1547 "only consider these symbols"),
1548 OPT_BOOLEAN(0, "source", &annotate_opts
.annotate_src
,
1549 "Interleave source code with assembly code (default)"),
1550 OPT_BOOLEAN(0, "asm-raw", &annotate_opts
.show_asm_raw
,
1551 "Display raw encoding of assembly instructions (default)"),
1552 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf
.demangle_kernel
,
1553 "Enable kernel symbol demangling"),
1554 OPT_BOOLEAN(0, "no-bpf-event", &top
.record_opts
.no_bpf_event
, "do not record bpf events"),
1555 OPT_STRING(0, "objdump", &objdump_path
, "path",
1556 "objdump binary to use for disassembly and annotations"),
1557 OPT_STRING(0, "addr2line", &addr2line_path
, "path",
1558 "addr2line binary to use for line numbers"),
1559 OPT_STRING('M', "disassembler-style", &disassembler_style
, "disassembler style",
1560 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1561 OPT_STRING(0, "prefix", &annotate_opts
.prefix
, "prefix",
1562 "Add prefix to source file path names in programs (with --prefix-strip)"),
1563 OPT_STRING(0, "prefix-strip", &annotate_opts
.prefix_strip
, "N",
1564 "Strip first N entries of source file path name in programs (with --prefix)"),
1565 OPT_STRING('u', "uid", &target
->uid_str
, "user", "user to profile"),
1566 OPT_CALLBACK(0, "percent-limit", &top
, "percent",
1567 "Don't show entries under that percent", parse_percent_limit
),
1568 OPT_CALLBACK(0, "percentage", NULL
, "relative|absolute",
1569 "How to display percentage of filtered entries", parse_filter_percentage
),
1570 OPT_STRING('w', "column-widths", &symbol_conf
.col_width_list_str
,
1572 "don't try to adjust column width, use these fixed values"),
1573 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout
,
1574 "per thread proc mmap processing timeout in ms"),
1575 OPT_CALLBACK_NOOPT('b', "branch-any", &opts
->branch_stack
,
1576 "branch any", "sample any taken branches",
1577 parse_branch_stack
),
1578 OPT_CALLBACK('j', "branch-filter", &opts
->branch_stack
,
1579 "branch filter mask", "branch stack filter modes",
1580 parse_branch_stack
),
1581 OPT_BOOLEAN(0, "branch-history", &branch_call_mode
,
1582 "add last branch records to call history"),
1583 OPT_BOOLEAN(0, "raw-trace", &symbol_conf
.raw_trace
,
1584 "Show raw trace event output (do not use print fmt or plugins)"),
1585 OPT_BOOLEAN('H', "hierarchy", &symbol_conf
.report_hierarchy
,
1586 "Show entries in a hierarchy"),
1587 OPT_BOOLEAN(0, "overwrite", &top
.record_opts
.overwrite
,
1588 "Use a backward ring buffer, default: no"),
1589 OPT_BOOLEAN(0, "force", &symbol_conf
.force
, "don't complain, do it"),
1590 OPT_UINTEGER(0, "num-thread-synthesize", &top
.nr_threads_synthesize
,
1591 "number of thread to run event synthesize"),
1592 OPT_CALLBACK('G', "cgroup", &top
.evlist
, "name",
1593 "monitor event in cgroup name only", parse_cgroups
),
1594 OPT_BOOLEAN(0, "namespaces", &opts
->record_namespaces
,
1595 "Record namespaces events"),
1596 OPT_BOOLEAN(0, "all-cgroups", &opts
->record_cgroup
,
1597 "Record cgroup events"),
1598 OPT_INTEGER(0, "group-sort-idx", &symbol_conf
.group_sort_idx
,
1599 "Sort the output by the event at the index n in group. "
1600 "If n is invalid, sort by the first event. "
1601 "WARNING: should be used on grouped events."),
1602 OPT_BOOLEAN(0, "stitch-lbr", &top
.stitch_lbr
,
1603 "Enable LBR callgraph stitching approach"),
1605 OPT_CALLBACK(0, "pfm-events", &top
.evlist
, "event",
1606 "libpfm4 event selector. use 'perf list' to list available events",
1607 parse_libpfm_events_option
),
1609 OPTS_EVSWITCH(&top
.evswitch
),
1612 const char * const top_usage
[] = {
1613 "perf top [<options>]",
1616 int status
= hists__init();
1621 annotation_options__init();
1623 annotate_opts
.min_pcnt
= 5;
1624 annotate_opts
.context
= 4;
1626 top
.evlist
= evlist__new();
1627 if (top
.evlist
== NULL
)
1630 status
= perf_config(perf_top_config
, &top
);
1634 * Since the per arch annotation init routine may need the cpuid, read
1635 * it here, since we are not getting this from the perf.data header.
1637 status
= perf_env__read_cpuid(&perf_env
);
1640 * Some arches do not provide a get_cpuid(), so just use pr_debug, otherwise
1641 * warn the user explicitly.
1643 eprintf(status
== ENOSYS
? 1 : 0, verbose
,
1644 "Couldn't read the cpuid for this machine: %s\n",
1645 str_error_r(errno
, errbuf
, sizeof(errbuf
)));
1647 top
.evlist
->env
= &perf_env
;
1649 argc
= parse_options(argc
, argv
, options
, top_usage
, 0);
1651 usage_with_options(top_usage
, options
);
1653 if (disassembler_style
) {
1654 annotate_opts
.disassembler_style
= strdup(disassembler_style
);
1655 if (!annotate_opts
.disassembler_style
)
1659 annotate_opts
.objdump_path
= strdup(objdump_path
);
1660 if (!annotate_opts
.objdump_path
)
1663 if (addr2line_path
) {
1664 symbol_conf
.addr2line_path
= strdup(addr2line_path
);
1665 if (!symbol_conf
.addr2line_path
)
1669 status
= symbol__validate_sym_arguments();
1671 goto out_delete_evlist
;
1673 if (annotate_check_args() < 0)
1674 goto out_delete_evlist
;
1676 if (!top
.evlist
->core
.nr_entries
) {
1677 bool can_profile_kernel
= perf_event_paranoid_check(1);
1678 int err
= parse_event(top
.evlist
, can_profile_kernel
? "cycles:P" : "cycles:Pu");
1681 goto out_delete_evlist
;
1684 status
= evswitch__init(&top
.evswitch
, top
.evlist
, stderr
);
1686 goto out_delete_evlist
;
1688 if (symbol_conf
.report_hierarchy
) {
1689 /* disable incompatible options */
1690 symbol_conf
.event_group
= false;
1691 symbol_conf
.cumulate_callchain
= false;
1694 pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1695 parse_options_usage(top_usage
, options
, "fields", 0);
1696 parse_options_usage(NULL
, options
, "hierarchy", 0);
1697 goto out_delete_evlist
;
1701 if (top
.stitch_lbr
&& !(callchain_param
.record_mode
== CALLCHAIN_LBR
)) {
1702 pr_err("Error: --stitch-lbr must be used with --call-graph lbr\n");
1703 goto out_delete_evlist
;
1706 if (nr_cgroups
> 0 && opts
->record_cgroup
) {
1707 pr_err("--cgroup and --all-cgroups cannot be used together\n");
1708 goto out_delete_evlist
;
1711 if (branch_call_mode
) {
1712 if (!opts
->branch_stack
)
1713 opts
->branch_stack
= PERF_SAMPLE_BRANCH_ANY
;
1714 symbol_conf
.use_callchain
= true;
1715 callchain_param
.key
= CCKEY_ADDRESS
;
1716 callchain_param
.branch_callstack
= true;
1717 callchain_param
.enabled
= true;
1718 if (callchain_param
.record_mode
== CALLCHAIN_NONE
)
1719 callchain_param
.record_mode
= CALLCHAIN_FP
;
1720 callchain_register_param(&callchain_param
);
1722 sort_order
= "srcline,symbol,dso";
1725 if (opts
->branch_stack
&& callchain_param
.enabled
)
1726 symbol_conf
.show_branchflag_count
= true;
1728 sort__mode
= SORT_MODE__TOP
;
1729 /* display thread wants entries to be collapsed in a different tree */
1730 perf_hpp_list
.need_collapse
= 1;
1734 #ifdef HAVE_SLANG_SUPPORT
1735 else if (top
.use_tui
)
1739 setup_browser(false);
1741 if (setup_sorting(top
.evlist
) < 0) {
1743 parse_options_usage(top_usage
, options
, "s", 1);
1745 parse_options_usage(sort_order
? NULL
: top_usage
,
1746 options
, "fields", 0);
1747 goto out_delete_evlist
;
1750 status
= target__validate(target
);
1752 target__strerror(target
, status
, errbuf
, BUFSIZ
);
1753 ui__warning("%s\n", errbuf
);
1756 status
= target__parse_uid(target
);
1758 int saved_errno
= errno
;
1760 target__strerror(target
, status
, errbuf
, BUFSIZ
);
1761 ui__error("%s\n", errbuf
);
1763 status
= -saved_errno
;
1764 goto out_delete_evlist
;
1767 if (target__none(target
))
1768 target
->system_wide
= true;
1770 if (evlist__create_maps(top
.evlist
, target
) < 0) {
1771 ui__error("Couldn't create thread/CPU maps: %s\n",
1772 errno
== ENOENT
? "No such process" : str_error_r(errno
, errbuf
, sizeof(errbuf
)));
1774 goto out_delete_evlist
;
1777 if (top
.delay_secs
< 1)
1780 if (record_opts__config(opts
)) {
1782 goto out_delete_evlist
;
1785 top
.sym_evsel
= evlist__first(top
.evlist
);
1787 if (!callchain_param
.enabled
) {
1788 symbol_conf
.cumulate_callchain
= false;
1789 perf_hpp__cancel_cumulate();
1792 if (symbol_conf
.cumulate_callchain
&& !callchain_param
.order_set
)
1793 callchain_param
.order
= ORDER_CALLER
;
1795 status
= symbol__annotation_init();
1797 goto out_delete_evlist
;
1799 annotation_config__init();
1801 symbol_conf
.try_vmlinux_path
= (symbol_conf
.vmlinux_name
== NULL
);
1802 status
= symbol__init(NULL
);
1804 goto out_delete_evlist
;
1806 sort__setup_elide(stdout
);
1808 get_term_dimensions(&top
.winsize
);
1809 if (top
.print_entries
== 0) {
1810 perf_top__update_print_entries(&top
);
1811 signal(SIGWINCH
, winch_sig
);
1814 top
.session
= perf_session__new(NULL
, NULL
);
1815 if (IS_ERR(top
.session
)) {
1816 status
= PTR_ERR(top
.session
);
1818 goto out_delete_evlist
;
1821 #ifdef HAVE_LIBBPF_SUPPORT
1822 if (!top
.record_opts
.no_bpf_event
) {
1823 top
.sb_evlist
= evlist__new();
1825 if (top
.sb_evlist
== NULL
) {
1826 pr_err("Couldn't create side band evlist.\n.");
1828 goto out_delete_evlist
;
1831 if (evlist__add_bpf_sb_event(top
.sb_evlist
, &perf_env
)) {
1832 pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
1834 goto out_delete_evlist
;
1839 if (evlist__start_sb_thread(top
.sb_evlist
, target
)) {
1840 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1841 opts
->no_bpf_event
= true;
1844 status
= __cmd_top(&top
);
1846 if (!opts
->no_bpf_event
)
1847 evlist__stop_sb_thread(top
.sb_evlist
);
1850 evlist__delete(top
.evlist
);
1851 perf_session__delete(top
.session
);
1852 annotation_options__exit();