4 * Builtin top command: Display a continuously updated profile of
5 * any workload, CPU or specific PID.
7 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
8 * 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Improvements and fixes by:
12 * Arjan van de Ven <arjan@linux.intel.com>
13 * Yanmin Zhang <yanmin.zhang@intel.com>
14 * Wu Fengguang <fengguang.wu@intel.com>
15 * Mike Galbraith <efault@gmx.de>
16 * Paul Mackerras <paulus@samba.org>
18 * Released under the GPL v2. (and only v2, not any later version)
24 #include "util/annotate.h"
25 #include "util/cache.h"
26 #include "util/color.h"
27 #include "util/evlist.h"
28 #include "util/evsel.h"
29 #include "util/session.h"
30 #include "util/symbol.h"
31 #include "util/thread.h"
32 #include "util/thread_map.h"
34 #include "util/util.h"
35 #include <linux/rbtree.h>
36 #include "util/parse-options.h"
37 #include "util/parse-events.h"
38 #include "util/cpumap.h"
39 #include "util/xyarray.h"
40 #include "util/sort.h"
41 #include "util/intlist.h"
43 #include "util/debug.h"
58 #include <sys/syscall.h>
59 #include <sys/ioctl.h>
61 #include <sys/prctl.h>
64 #include <sys/utsname.h>
67 #include <linux/unistd.h>
68 #include <linux/types.h>
70 void get_term_dimensions(struct winsize
*ws
)
72 char *s
= getenv("LINES");
76 s
= getenv("COLUMNS");
79 if (ws
->ws_row
&& ws
->ws_col
)
84 if (ioctl(1, TIOCGWINSZ
, ws
) == 0 &&
85 ws
->ws_row
&& ws
->ws_col
)
92 static void perf_top__update_print_entries(struct perf_top
*top
)
94 if (top
->print_entries
> 9)
95 top
->print_entries
-= 9;
98 static void perf_top__sig_winch(int sig __used
, siginfo_t
*info __used
, void *arg
)
100 struct perf_top
*top
= arg
;
102 get_term_dimensions(&top
->winsize
);
103 if (!top
->print_entries
104 || (top
->print_entries
+4) > top
->winsize
.ws_row
) {
105 top
->print_entries
= top
->winsize
.ws_row
;
107 top
->print_entries
+= 4;
108 top
->winsize
.ws_row
= top
->print_entries
;
110 perf_top__update_print_entries(top
);
113 static int perf_top__parse_source(struct perf_top
*top
, struct hist_entry
*he
)
116 struct annotation
*notes
;
120 if (!he
|| !he
->ms
.sym
)
127 * We can't annotate with just /proc/kallsyms
129 if (map
->dso
->symtab_type
== DSO_BINARY_TYPE__KALLSYMS
) {
130 pr_err("Can't annotate %s: No vmlinux file was found in the "
131 "path\n", sym
->name
);
136 notes
= symbol__annotation(sym
);
137 if (notes
->src
!= NULL
) {
138 pthread_mutex_lock(¬es
->lock
);
142 pthread_mutex_lock(¬es
->lock
);
144 if (symbol__alloc_hist(sym
) < 0) {
145 pthread_mutex_unlock(¬es
->lock
);
146 pr_err("Not enough memory for annotating '%s' symbol!\n",
152 err
= symbol__annotate(sym
, map
, 0);
155 top
->sym_filter_entry
= he
;
158 pthread_mutex_unlock(¬es
->lock
);
162 static void __zero_source_counters(struct hist_entry
*he
)
164 struct symbol
*sym
= he
->ms
.sym
;
165 symbol__annotate_zero_histograms(sym
);
168 static void ui__warn_map_erange(struct map
*map
, struct symbol
*sym
, u64 ip
)
171 int err
= uname(&uts
);
173 ui__warning("Out of bounds address found:\n\n"
174 "Addr: %" PRIx64
"\n"
176 "Map: %" PRIx64
"-%" PRIx64
"\n"
177 "Symbol: %" PRIx64
"-%" PRIx64
" %c %s\n"
181 "Not all samples will be on the annotation output.\n\n"
182 "Please report to linux-kernel@vger.kernel.org\n",
183 ip
, map
->dso
->long_name
, dso__symtab_origin(map
->dso
),
184 map
->start
, map
->end
, sym
->start
, sym
->end
,
185 sym
->binding
== STB_GLOBAL
? 'g' :
186 sym
->binding
== STB_LOCAL
? 'l' : 'w', sym
->name
,
187 err
? "[unknown]" : uts
.machine
,
188 err
? "[unknown]" : uts
.release
, perf_version_string
);
189 if (use_browser
<= 0)
192 map
->erange_warned
= true;
195 static void perf_top__record_precise_ip(struct perf_top
*top
,
196 struct hist_entry
*he
,
199 struct annotation
*notes
;
203 if (he
== NULL
|| he
->ms
.sym
== NULL
||
204 ((top
->sym_filter_entry
== NULL
||
205 top
->sym_filter_entry
->ms
.sym
!= he
->ms
.sym
) && use_browser
!= 1))
209 notes
= symbol__annotation(sym
);
211 if (pthread_mutex_trylock(¬es
->lock
))
214 if (notes
->src
== NULL
&& symbol__alloc_hist(sym
) < 0) {
215 pthread_mutex_unlock(¬es
->lock
);
216 pr_err("Not enough memory for annotating '%s' symbol!\n",
222 ip
= he
->ms
.map
->map_ip(he
->ms
.map
, ip
);
223 err
= symbol__inc_addr_samples(sym
, he
->ms
.map
, counter
, ip
);
225 pthread_mutex_unlock(¬es
->lock
);
227 if (err
== -ERANGE
&& !he
->ms
.map
->erange_warned
)
228 ui__warn_map_erange(he
->ms
.map
, sym
, ip
);
231 static void perf_top__show_details(struct perf_top
*top
)
233 struct hist_entry
*he
= top
->sym_filter_entry
;
234 struct annotation
*notes
;
235 struct symbol
*symbol
;
242 notes
= symbol__annotation(symbol
);
244 pthread_mutex_lock(¬es
->lock
);
246 if (notes
->src
== NULL
)
249 printf("Showing %s for %s\n", perf_evsel__name(top
->sym_evsel
), symbol
->name
);
250 printf(" Events Pcnt (>=%d%%)\n", top
->sym_pcnt_filter
);
252 more
= symbol__annotate_printf(symbol
, he
->ms
.map
, top
->sym_evsel
->idx
,
253 0, top
->sym_pcnt_filter
, top
->print_entries
, 4);
255 symbol__annotate_zero_histogram(symbol
, top
->sym_evsel
->idx
);
257 symbol__annotate_decay_histogram(symbol
, top
->sym_evsel
->idx
);
259 printf("%d lines not displayed, maybe increase display entries [e]\n", more
);
261 pthread_mutex_unlock(¬es
->lock
);
264 static const char CONSOLE_CLEAR
[] = "\e[H\e[2J";
266 static struct hist_entry
*perf_evsel__add_hist_entry(struct perf_evsel
*evsel
,
267 struct addr_location
*al
,
268 struct perf_sample
*sample
)
270 struct hist_entry
*he
;
272 he
= __hists__add_entry(&evsel
->hists
, al
, NULL
, sample
->period
);
276 hists__inc_nr_events(&evsel
->hists
, PERF_RECORD_SAMPLE
);
280 static void perf_top__print_sym_table(struct perf_top
*top
)
284 const int win_width
= top
->winsize
.ws_col
- 1;
288 perf_top__header_snprintf(top
, bf
, sizeof(bf
));
291 perf_top__reset_sample_counters(top
);
293 printf("%-*.*s\n", win_width
, win_width
, graph_dotted_line
);
295 if (top
->sym_evsel
->hists
.stats
.nr_lost_warned
!=
296 top
->sym_evsel
->hists
.stats
.nr_events
[PERF_RECORD_LOST
]) {
297 top
->sym_evsel
->hists
.stats
.nr_lost_warned
=
298 top
->sym_evsel
->hists
.stats
.nr_events
[PERF_RECORD_LOST
];
299 color_fprintf(stdout
, PERF_COLOR_RED
,
300 "WARNING: LOST %d chunks, Check IO/CPU overload",
301 top
->sym_evsel
->hists
.stats
.nr_lost_warned
);
305 if (top
->sym_filter_entry
) {
306 perf_top__show_details(top
);
310 hists__collapse_resort_threaded(&top
->sym_evsel
->hists
);
311 hists__output_resort_threaded(&top
->sym_evsel
->hists
);
312 hists__decay_entries_threaded(&top
->sym_evsel
->hists
,
313 top
->hide_user_symbols
,
314 top
->hide_kernel_symbols
);
315 hists__output_recalc_col_len(&top
->sym_evsel
->hists
,
316 top
->winsize
.ws_row
- 3);
318 hists__fprintf(&top
->sym_evsel
->hists
, NULL
, false, false,
319 top
->winsize
.ws_row
- 4 - printed
, win_width
, stdout
);
322 static void prompt_integer(int *target
, const char *msg
)
324 char *buf
= malloc(0), *p
;
328 fprintf(stdout
, "\n%s: ", msg
);
329 if (getline(&buf
, &dummy
, stdin
) < 0)
332 p
= strchr(buf
, '\n');
342 tmp
= strtoul(buf
, NULL
, 10);
348 static void prompt_percent(int *target
, const char *msg
)
352 prompt_integer(&tmp
, msg
);
353 if (tmp
>= 0 && tmp
<= 100)
357 static void perf_top__prompt_symbol(struct perf_top
*top
, const char *msg
)
359 char *buf
= malloc(0), *p
;
360 struct hist_entry
*syme
= top
->sym_filter_entry
, *n
, *found
= NULL
;
361 struct rb_node
*next
;
364 /* zero counters of active symbol */
366 __zero_source_counters(syme
);
367 top
->sym_filter_entry
= NULL
;
370 fprintf(stdout
, "\n%s: ", msg
);
371 if (getline(&buf
, &dummy
, stdin
) < 0)
374 p
= strchr(buf
, '\n');
378 next
= rb_first(&top
->sym_evsel
->hists
.entries
);
380 n
= rb_entry(next
, struct hist_entry
, rb_node
);
381 if (n
->ms
.sym
&& !strcmp(buf
, n
->ms
.sym
->name
)) {
385 next
= rb_next(&n
->rb_node
);
389 fprintf(stderr
, "Sorry, %s is not active.\n", buf
);
392 perf_top__parse_source(top
, found
);
398 static void perf_top__print_mapped_keys(struct perf_top
*top
)
402 if (top
->sym_filter_entry
) {
403 struct symbol
*sym
= top
->sym_filter_entry
->ms
.sym
;
407 fprintf(stdout
, "\nMapped keys:\n");
408 fprintf(stdout
, "\t[d] display refresh delay. \t(%d)\n", top
->delay_secs
);
409 fprintf(stdout
, "\t[e] display entries (lines). \t(%d)\n", top
->print_entries
);
411 if (top
->evlist
->nr_entries
> 1)
412 fprintf(stdout
, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top
->sym_evsel
));
414 fprintf(stdout
, "\t[f] profile display filter (count). \t(%d)\n", top
->count_filter
);
416 fprintf(stdout
, "\t[F] annotate display filter (percent). \t(%d%%)\n", top
->sym_pcnt_filter
);
417 fprintf(stdout
, "\t[s] annotate symbol. \t(%s)\n", name
?: "NULL");
418 fprintf(stdout
, "\t[S] stop annotation.\n");
421 "\t[K] hide kernel_symbols symbols. \t(%s)\n",
422 top
->hide_kernel_symbols
? "yes" : "no");
424 "\t[U] hide user symbols. \t(%s)\n",
425 top
->hide_user_symbols
? "yes" : "no");
426 fprintf(stdout
, "\t[z] toggle sample zeroing. \t(%d)\n", top
->zero
? 1 : 0);
427 fprintf(stdout
, "\t[qQ] quit.\n");
430 static int perf_top__key_mapped(struct perf_top
*top
, int c
)
446 return top
->evlist
->nr_entries
> 1 ? 1 : 0;
454 static void perf_top__handle_keypress(struct perf_top
*top
, int c
)
456 if (!perf_top__key_mapped(top
, c
)) {
457 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
458 struct termios tc
, save
;
460 perf_top__print_mapped_keys(top
);
461 fprintf(stdout
, "\nEnter selection, or unmapped key to continue: ");
466 tc
.c_lflag
&= ~(ICANON
| ECHO
);
469 tcsetattr(0, TCSANOW
, &tc
);
471 poll(&stdin_poll
, 1, -1);
474 tcsetattr(0, TCSAFLUSH
, &save
);
475 if (!perf_top__key_mapped(top
, c
))
481 prompt_integer(&top
->delay_secs
, "Enter display delay");
482 if (top
->delay_secs
< 1)
486 prompt_integer(&top
->print_entries
, "Enter display entries (lines)");
487 if (top
->print_entries
== 0) {
488 struct sigaction act
= {
489 .sa_sigaction
= perf_top__sig_winch
,
490 .sa_flags
= SA_SIGINFO
,
492 perf_top__sig_winch(SIGWINCH
, NULL
, top
);
493 sigaction(SIGWINCH
, &act
, NULL
);
495 perf_top__sig_winch(SIGWINCH
, NULL
, top
);
496 signal(SIGWINCH
, SIG_DFL
);
500 if (top
->evlist
->nr_entries
> 1) {
501 /* Select 0 as the default event: */
504 fprintf(stderr
, "\nAvailable events:");
506 list_for_each_entry(top
->sym_evsel
, &top
->evlist
->entries
, node
)
507 fprintf(stderr
, "\n\t%d %s", top
->sym_evsel
->idx
, perf_evsel__name(top
->sym_evsel
));
509 prompt_integer(&counter
, "Enter details event counter");
511 if (counter
>= top
->evlist
->nr_entries
) {
512 top
->sym_evsel
= list_entry(top
->evlist
->entries
.next
, struct perf_evsel
, node
);
513 fprintf(stderr
, "Sorry, no such event, using %s.\n", perf_evsel__name(top
->sym_evsel
));
517 list_for_each_entry(top
->sym_evsel
, &top
->evlist
->entries
, node
)
518 if (top
->sym_evsel
->idx
== counter
)
521 top
->sym_evsel
= list_entry(top
->evlist
->entries
.next
, struct perf_evsel
, node
);
524 prompt_integer(&top
->count_filter
, "Enter display event count filter");
527 prompt_percent(&top
->sym_pcnt_filter
,
528 "Enter details display event filter (percent)");
531 top
->hide_kernel_symbols
= !top
->hide_kernel_symbols
;
535 printf("exiting.\n");
536 if (top
->dump_symtab
)
537 perf_session__fprintf_dsos(top
->session
, stderr
);
540 perf_top__prompt_symbol(top
, "Enter details symbol");
543 if (!top
->sym_filter_entry
)
546 struct hist_entry
*syme
= top
->sym_filter_entry
;
548 top
->sym_filter_entry
= NULL
;
549 __zero_source_counters(syme
);
553 top
->hide_user_symbols
= !top
->hide_user_symbols
;
556 top
->zero
= !top
->zero
;
563 static void perf_top__sort_new_samples(void *arg
)
565 struct perf_top
*t
= arg
;
566 perf_top__reset_sample_counters(t
);
568 if (t
->evlist
->selected
!= NULL
)
569 t
->sym_evsel
= t
->evlist
->selected
;
571 hists__collapse_resort_threaded(&t
->sym_evsel
->hists
);
572 hists__output_resort_threaded(&t
->sym_evsel
->hists
);
573 hists__decay_entries_threaded(&t
->sym_evsel
->hists
,
574 t
->hide_user_symbols
,
575 t
->hide_kernel_symbols
);
578 static void *display_thread_tui(void *arg
)
580 struct perf_evsel
*pos
;
581 struct perf_top
*top
= arg
;
582 const char *help
= "For a higher level overview, try: perf top --sort comm,dso";
584 perf_top__sort_new_samples(top
);
587 * Initialize the uid_filter_str, in the future the TUI will allow
588 * Zooming in/out UIDs. For now juse use whatever the user passed
591 list_for_each_entry(pos
, &top
->evlist
->entries
, node
)
592 pos
->hists
.uid_filter_str
= top
->target
.uid_str
;
594 perf_evlist__tui_browse_hists(top
->evlist
, help
,
595 perf_top__sort_new_samples
,
596 top
, top
->delay_secs
);
603 static void *display_thread(void *arg
)
605 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
606 struct termios tc
, save
;
607 struct perf_top
*top
= arg
;
612 tc
.c_lflag
&= ~(ICANON
| ECHO
);
616 pthread__unblock_sigwinch();
618 delay_msecs
= top
->delay_secs
* 1000;
619 tcsetattr(0, TCSANOW
, &tc
);
624 perf_top__print_sym_table(top
);
626 * Either timeout expired or we got an EINTR due to SIGWINCH,
627 * refresh screen in both cases.
629 switch (poll(&stdin_poll
, 1, delay_msecs
)) {
642 tcsetattr(0, TCSAFLUSH
, &save
);
644 perf_top__handle_keypress(top
, c
);
650 /* Tag samples to be skipped. */
651 static const char *skip_symbols
[] = {
659 "mwait_idle_with_hints",
661 "ppc64_runlatch_off",
662 "pseries_dedicated_idle_sleep",
666 static int symbol_filter(struct map
*map __used
, struct symbol
*sym
)
668 const char *name
= sym
->name
;
672 * ppc64 uses function descriptors and appends a '.' to the
673 * start of every instruction address. Remove it.
678 if (!strcmp(name
, "_text") ||
679 !strcmp(name
, "_etext") ||
680 !strcmp(name
, "_sinittext") ||
681 !strncmp("init_module", name
, 11) ||
682 !strncmp("cleanup_module", name
, 14) ||
683 strstr(name
, "_text_start") ||
684 strstr(name
, "_text_end"))
687 for (i
= 0; skip_symbols
[i
]; i
++) {
688 if (!strcmp(skip_symbols
[i
], name
)) {
697 static void perf_event__process_sample(struct perf_tool
*tool
,
698 const union perf_event
*event
,
699 struct perf_evsel
*evsel
,
700 struct perf_sample
*sample
,
701 struct machine
*machine
)
703 struct perf_top
*top
= container_of(tool
, struct perf_top
, tool
);
704 struct symbol
*parent
= NULL
;
705 u64 ip
= event
->ip
.ip
;
706 struct addr_location al
;
709 if (!machine
&& perf_guest
) {
710 static struct intlist
*seen
;
713 seen
= intlist__new();
715 if (!intlist__has_entry(seen
, event
->ip
.pid
)) {
716 pr_err("Can't find guest [%d]'s kernel information\n",
718 intlist__add(seen
, event
->ip
.pid
);
724 pr_err("%u unprocessable samples recorded.",
725 top
->session
->hists
.stats
.nr_unprocessable_samples
++);
729 if (event
->header
.misc
& PERF_RECORD_MISC_EXACT_IP
)
730 top
->exact_samples
++;
732 if (perf_event__preprocess_sample(event
, machine
, &al
, sample
,
733 symbol_filter
) < 0 ||
737 if (!top
->kptr_restrict_warned
&&
738 symbol_conf
.kptr_restrict
&&
739 al
.cpumode
== PERF_RECORD_MISC_KERNEL
) {
741 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
742 "Check /proc/sys/kernel/kptr_restrict.\n\n"
743 "Kernel%s samples will not be resolved.\n",
744 !RB_EMPTY_ROOT(&al
.map
->dso
->symbols
[MAP__FUNCTION
]) ?
746 if (use_browser
<= 0)
748 top
->kptr_restrict_warned
= true;
751 if (al
.sym
== NULL
) {
752 const char *msg
= "Kernel samples will not be resolved.\n";
754 * As we do lazy loading of symtabs we only will know if the
755 * specified vmlinux file is invalid when we actually have a
756 * hit in kernel space and then try to load it. So if we get
757 * here and there are _no_ symbols in the DSO backing the
758 * kernel map, bail out.
760 * We may never get here, for instance, if we use -K/
761 * --hide-kernel-symbols, even if the user specifies an
762 * invalid --vmlinux ;-)
764 if (!top
->kptr_restrict_warned
&& !top
->vmlinux_warned
&&
765 al
.map
== machine
->vmlinux_maps
[MAP__FUNCTION
] &&
766 RB_EMPTY_ROOT(&al
.map
->dso
->symbols
[MAP__FUNCTION
])) {
767 if (symbol_conf
.vmlinux_name
) {
768 ui__warning("The %s file can't be used.\n%s",
769 symbol_conf
.vmlinux_name
, msg
);
771 ui__warning("A vmlinux file was not found.\n%s",
775 if (use_browser
<= 0)
777 top
->vmlinux_warned
= true;
781 if (al
.sym
== NULL
|| !al
.sym
->ignore
) {
782 struct hist_entry
*he
;
784 if ((sort__has_parent
|| symbol_conf
.use_callchain
) &&
786 err
= machine__resolve_callchain(machine
, al
.thread
,
787 sample
->callchain
, &parent
);
792 he
= perf_evsel__add_hist_entry(evsel
, &al
, sample
);
794 pr_err("Problem incrementing symbol period, skipping event\n");
798 if (symbol_conf
.use_callchain
) {
799 err
= callchain_append(he
->callchain
, &callchain_cursor
,
805 if (top
->sort_has_symbols
)
806 perf_top__record_precise_ip(top
, he
, evsel
->idx
, ip
);
812 static void perf_top__mmap_read_idx(struct perf_top
*top
, int idx
)
814 struct perf_sample sample
;
815 struct perf_evsel
*evsel
;
816 struct perf_session
*session
= top
->session
;
817 union perf_event
*event
;
818 struct machine
*machine
;
822 while ((event
= perf_evlist__mmap_read(top
->evlist
, idx
)) != NULL
) {
823 ret
= perf_evlist__parse_sample(top
->evlist
, event
, &sample
, false);
825 pr_err("Can't parse sample, err = %d\n", ret
);
829 evsel
= perf_evlist__id2evsel(session
->evlist
, sample
.id
);
830 assert(evsel
!= NULL
);
832 origin
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
834 if (event
->header
.type
== PERF_RECORD_SAMPLE
)
838 case PERF_RECORD_MISC_USER
:
840 if (top
->hide_user_symbols
)
842 machine
= perf_session__find_host_machine(session
);
844 case PERF_RECORD_MISC_KERNEL
:
845 ++top
->kernel_samples
;
846 if (top
->hide_kernel_symbols
)
848 machine
= perf_session__find_host_machine(session
);
850 case PERF_RECORD_MISC_GUEST_KERNEL
:
851 ++top
->guest_kernel_samples
;
852 machine
= perf_session__find_machine(session
, event
->ip
.pid
);
854 case PERF_RECORD_MISC_GUEST_USER
:
855 ++top
->guest_us_samples
;
857 * TODO: we don't process guest user from host side
858 * except simple counting.
866 if (event
->header
.type
== PERF_RECORD_SAMPLE
) {
867 perf_event__process_sample(&top
->tool
, event
, evsel
,
869 } else if (event
->header
.type
< PERF_RECORD_MAX
) {
870 hists__inc_nr_events(&evsel
->hists
, event
->header
.type
);
871 perf_event__process(&top
->tool
, event
, &sample
, machine
);
873 ++session
->hists
.stats
.nr_unknown_events
;
877 static void perf_top__mmap_read(struct perf_top
*top
)
881 for (i
= 0; i
< top
->evlist
->nr_mmaps
; i
++)
882 perf_top__mmap_read_idx(top
, i
);
885 static void perf_top__start_counters(struct perf_top
*top
)
887 struct perf_evsel
*counter
, *first
;
888 struct perf_evlist
*evlist
= top
->evlist
;
890 first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
892 list_for_each_entry(counter
, &evlist
->entries
, node
) {
893 struct perf_event_attr
*attr
= &counter
->attr
;
894 struct xyarray
*group_fd
= NULL
;
896 if (top
->group
&& counter
!= first
)
897 group_fd
= first
->fd
;
899 attr
->sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
;
902 attr
->sample_type
|= PERF_SAMPLE_PERIOD
;
904 attr
->sample_freq
= top
->freq
;
907 if (evlist
->nr_entries
> 1) {
908 attr
->sample_type
|= PERF_SAMPLE_ID
;
909 attr
->read_format
|= PERF_FORMAT_ID
;
912 if (perf_target__has_cpu(&top
->target
))
913 attr
->sample_type
|= PERF_SAMPLE_CPU
;
915 if (symbol_conf
.use_callchain
)
916 attr
->sample_type
|= PERF_SAMPLE_CALLCHAIN
;
920 attr
->inherit
= top
->inherit
;
921 fallback_missing_features
:
922 if (top
->exclude_guest_missing
)
923 attr
->exclude_guest
= attr
->exclude_host
= 0;
925 attr
->sample_id_all
= top
->sample_id_all_missing
? 0 : 1;
927 if (perf_evsel__open(counter
, top
->evlist
->cpus
,
928 top
->evlist
->threads
, top
->group
,
932 if (err
== EPERM
|| err
== EACCES
) {
933 ui__error_paranoid();
935 } else if (err
== EINVAL
) {
936 if (!top
->exclude_guest_missing
&&
937 (attr
->exclude_guest
|| attr
->exclude_host
)) {
938 pr_debug("Old kernel, cannot exclude "
939 "guest or host samples.\n");
940 top
->exclude_guest_missing
= true;
941 goto fallback_missing_features
;
942 } else if (!top
->sample_id_all_missing
) {
944 * Old kernel, no attr->sample_id_type_all field
946 top
->sample_id_all_missing
= true;
947 goto retry_sample_id
;
951 * If it's cycles then fall back to hrtimer
952 * based cpu-clock-tick sw counter, which
953 * is always available even if no PMU support:
955 if ((err
== ENOENT
|| err
== ENXIO
) &&
956 (attr
->type
== PERF_TYPE_HARDWARE
) &&
957 (attr
->config
== PERF_COUNT_HW_CPU_CYCLES
)) {
960 ui__warning("Cycles event not supported,\n"
961 "trying to fall back to cpu-clock-ticks\n");
963 attr
->type
= PERF_TYPE_SOFTWARE
;
964 attr
->config
= PERF_COUNT_SW_CPU_CLOCK
;
967 counter
->name
= NULL
;
973 ui__error("The %s event is not supported.\n",
974 perf_evsel__name(counter
));
976 } else if (err
== EMFILE
) {
977 ui__error("Too many events are opened.\n"
978 "Try again after reducing the number of events\n");
982 ui__error("The sys_perf_event_open() syscall "
983 "returned with %d (%s). /bin/dmesg "
984 "may provide additional information.\n"
985 "No CONFIG_PERF_EVENTS=y kernel support "
986 "configured?\n", err
, strerror(err
));
991 if (perf_evlist__mmap(evlist
, top
->mmap_pages
, false) < 0) {
992 ui__error("Failed to mmap with %d (%s)\n",
993 errno
, strerror(errno
));
1004 static int perf_top__setup_sample_type(struct perf_top
*top
)
1006 if (!top
->sort_has_symbols
) {
1007 if (symbol_conf
.use_callchain
) {
1008 ui__error("Selected -g but \"sym\" not present in --sort/-s.");
1011 } else if (!top
->dont_use_callchains
&& callchain_param
.mode
!= CHAIN_NONE
) {
1012 if (callchain_register_param(&callchain_param
) < 0) {
1013 ui__error("Can't register callchain params.\n");
1021 static int __cmd_top(struct perf_top
*top
)
1026 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
1027 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
1029 top
->session
= perf_session__new(NULL
, O_WRONLY
, false, false, NULL
);
1030 if (top
->session
== NULL
)
1033 ret
= perf_top__setup_sample_type(top
);
1037 if (perf_target__has_task(&top
->target
))
1038 perf_event__synthesize_thread_map(&top
->tool
, top
->evlist
->threads
,
1039 perf_event__process
,
1040 &top
->session
->host_machine
);
1042 perf_event__synthesize_threads(&top
->tool
, perf_event__process
,
1043 &top
->session
->host_machine
);
1044 perf_top__start_counters(top
);
1045 top
->session
->evlist
= top
->evlist
;
1046 perf_session__set_id_hdr_size(top
->session
);
1048 /* Wait for a minimal set of events before starting the snapshot */
1049 poll(top
->evlist
->pollfd
, top
->evlist
->nr_fds
, 100);
1051 perf_top__mmap_read(top
);
1053 if (pthread_create(&thread
, NULL
, (use_browser
> 0 ? display_thread_tui
:
1054 display_thread
), top
)) {
1055 ui__error("Could not create display thread.\n");
1059 if (top
->realtime_prio
) {
1060 struct sched_param param
;
1062 param
.sched_priority
= top
->realtime_prio
;
1063 if (sched_setscheduler(0, SCHED_FIFO
, ¶m
)) {
1064 ui__error("Could not set realtime priority.\n");
1070 u64 hits
= top
->samples
;
1072 perf_top__mmap_read(top
);
1074 if (hits
== top
->samples
)
1075 ret
= poll(top
->evlist
->pollfd
, top
->evlist
->nr_fds
, 100);
1079 perf_session__delete(top
->session
);
1080 top
->session
= NULL
;
1086 parse_callchain_opt(const struct option
*opt
, const char *arg
, int unset
)
1088 struct perf_top
*top
= (struct perf_top
*)opt
->value
;
1096 top
->dont_use_callchains
= true;
1100 symbol_conf
.use_callchain
= true;
1105 tok
= strtok((char *)arg
, ",");
1109 /* get the output mode */
1110 if (!strncmp(tok
, "graph", strlen(arg
)))
1111 callchain_param
.mode
= CHAIN_GRAPH_ABS
;
1113 else if (!strncmp(tok
, "flat", strlen(arg
)))
1114 callchain_param
.mode
= CHAIN_FLAT
;
1116 else if (!strncmp(tok
, "fractal", strlen(arg
)))
1117 callchain_param
.mode
= CHAIN_GRAPH_REL
;
1119 else if (!strncmp(tok
, "none", strlen(arg
))) {
1120 callchain_param
.mode
= CHAIN_NONE
;
1121 symbol_conf
.use_callchain
= false;
1127 /* get the min percentage */
1128 tok
= strtok(NULL
, ",");
1132 callchain_param
.min_percent
= strtod(tok
, &endptr
);
1136 /* get the print limit */
1137 tok2
= strtok(NULL
, ",");
1141 if (tok2
[0] != 'c') {
1142 callchain_param
.print_limit
= strtod(tok2
, &endptr
);
1143 tok2
= strtok(NULL
, ",");
1148 /* get the call chain order */
1149 if (!strcmp(tok2
, "caller"))
1150 callchain_param
.order
= ORDER_CALLER
;
1151 else if (!strcmp(tok2
, "callee"))
1152 callchain_param
.order
= ORDER_CALLEE
;
1156 if (callchain_register_param(&callchain_param
) < 0) {
1157 fprintf(stderr
, "Can't register callchain params\n");
1163 static const char * const top_usage
[] = {
1164 "perf top [<options>]",
1168 int cmd_top(int argc
, const char **argv
, const char *prefix __used
)
1170 struct perf_evsel
*pos
;
1172 char errbuf
[BUFSIZ
];
1173 struct perf_top top
= {
1176 .freq
= 4000, /* 4 KHz */
1178 .sym_pcnt_filter
= 5,
1183 char callchain_default_opt
[] = "fractal,0.5,callee";
1184 const struct option options
[] = {
1185 OPT_CALLBACK('e', "event", &top
.evlist
, "event",
1186 "event selector. use 'perf list' to list available events",
1187 parse_events_option
),
1188 OPT_INTEGER('c', "count", &top
.default_interval
,
1189 "event period to sample"),
1190 OPT_STRING('p', "pid", &top
.target
.pid
, "pid",
1191 "profile events on existing process id"),
1192 OPT_STRING('t', "tid", &top
.target
.tid
, "tid",
1193 "profile events on existing thread id"),
1194 OPT_BOOLEAN('a', "all-cpus", &top
.target
.system_wide
,
1195 "system-wide collection from all CPUs"),
1196 OPT_STRING('C', "cpu", &top
.target
.cpu_list
, "cpu",
1197 "list of cpus to monitor"),
1198 OPT_STRING('k', "vmlinux", &symbol_conf
.vmlinux_name
,
1199 "file", "vmlinux pathname"),
1200 OPT_BOOLEAN('K', "hide_kernel_symbols", &top
.hide_kernel_symbols
,
1201 "hide kernel symbols"),
1202 OPT_UINTEGER('m', "mmap-pages", &top
.mmap_pages
, "number of mmap data pages"),
1203 OPT_INTEGER('r', "realtime", &top
.realtime_prio
,
1204 "collect data with this RT SCHED_FIFO priority"),
1205 OPT_INTEGER('d', "delay", &top
.delay_secs
,
1206 "number of seconds to delay between refreshes"),
1207 OPT_BOOLEAN('D', "dump-symtab", &top
.dump_symtab
,
1208 "dump the symbol table used for profiling"),
1209 OPT_INTEGER('f', "count-filter", &top
.count_filter
,
1210 "only display functions with more events than this"),
1211 OPT_BOOLEAN('g', "group", &top
.group
,
1212 "put the counters into a counter group"),
1213 OPT_BOOLEAN('i', "inherit", &top
.inherit
,
1214 "child tasks inherit counters"),
1215 OPT_STRING(0, "sym-annotate", &top
.sym_filter
, "symbol name",
1216 "symbol to annotate"),
1217 OPT_BOOLEAN('z', "zero", &top
.zero
,
1218 "zero history across updates"),
1219 OPT_INTEGER('F', "freq", &top
.freq
,
1220 "profile at this frequency"),
1221 OPT_INTEGER('E', "entries", &top
.print_entries
,
1222 "display this many functions"),
1223 OPT_BOOLEAN('U', "hide_user_symbols", &top
.hide_user_symbols
,
1224 "hide user symbols"),
1225 OPT_BOOLEAN(0, "tui", &top
.use_tui
, "Use the TUI interface"),
1226 OPT_BOOLEAN(0, "stdio", &top
.use_stdio
, "Use the stdio interface"),
1227 OPT_INCR('v', "verbose", &verbose
,
1228 "be more verbose (show counter open errors, etc)"),
1229 OPT_STRING('s', "sort", &sort_order
, "key[,key2...]",
1230 "sort by key(s): pid, comm, dso, symbol, parent"),
1231 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf
.show_nr_samples
,
1232 "Show a column with the number of samples"),
1233 OPT_CALLBACK_DEFAULT('G', "call-graph", &top
, "output_type,min_percent, call_order",
1234 "Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. "
1235 "Default: fractal,0.5,callee", &parse_callchain_opt
,
1236 callchain_default_opt
),
1237 OPT_BOOLEAN(0, "show-total-period", &symbol_conf
.show_total_period
,
1238 "Show a column with the sum of periods"),
1239 OPT_STRING(0, "dsos", &symbol_conf
.dso_list_str
, "dso[,dso...]",
1240 "only consider symbols in these dsos"),
1241 OPT_STRING(0, "comms", &symbol_conf
.comm_list_str
, "comm[,comm...]",
1242 "only consider symbols in these comms"),
1243 OPT_STRING(0, "symbols", &symbol_conf
.sym_list_str
, "symbol[,symbol...]",
1244 "only consider these symbols"),
1245 OPT_BOOLEAN(0, "source", &symbol_conf
.annotate_src
,
1246 "Interleave source code with assembly code (default)"),
1247 OPT_BOOLEAN(0, "asm-raw", &symbol_conf
.annotate_asm_raw
,
1248 "Display raw encoding of assembly instructions (default)"),
1249 OPT_STRING('M', "disassembler-style", &disassembler_style
, "disassembler style",
1250 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1251 OPT_STRING('u', "uid", &top
.target
.uid_str
, "user", "user to profile"),
1255 top
.evlist
= perf_evlist__new(NULL
, NULL
);
1256 if (top
.evlist
== NULL
)
1259 symbol_conf
.exclude_other
= false;
1261 argc
= parse_options(argc
, argv
, options
, top_usage
, 0);
1263 usage_with_options(top_usage
, options
);
1265 if (sort_order
== default_sort_order
)
1266 sort_order
= "dso,symbol";
1268 setup_sorting(top_usage
, options
);
1272 else if (top
.use_tui
)
1275 setup_browser(false);
1277 status
= perf_target__validate(&top
.target
);
1279 perf_target__strerror(&top
.target
, status
, errbuf
, BUFSIZ
);
1280 ui__warning("%s", errbuf
);
1283 status
= perf_target__parse_uid(&top
.target
);
1285 int saved_errno
= errno
;
1287 perf_target__strerror(&top
.target
, status
, errbuf
, BUFSIZ
);
1288 ui__error("%s", errbuf
);
1290 status
= -saved_errno
;
1291 goto out_delete_evlist
;
1294 if (perf_target__none(&top
.target
))
1295 top
.target
.system_wide
= true;
1297 if (perf_evlist__create_maps(top
.evlist
, &top
.target
) < 0)
1298 usage_with_options(top_usage
, options
);
1300 if (!top
.evlist
->nr_entries
&&
1301 perf_evlist__add_default(top
.evlist
) < 0) {
1302 ui__error("Not enough memory for event selector list\n");
1306 symbol_conf
.nr_events
= top
.evlist
->nr_entries
;
1308 if (top
.delay_secs
< 1)
1312 * User specified count overrides default frequency.
1314 if (top
.default_interval
)
1316 else if (top
.freq
) {
1317 top
.default_interval
= top
.freq
;
1319 ui__error("frequency and count are zero, aborting\n");
1323 list_for_each_entry(pos
, &top
.evlist
->entries
, node
) {
1325 * Fill in the ones not specifically initialized via -c:
1327 if (!pos
->attr
.sample_period
)
1328 pos
->attr
.sample_period
= top
.default_interval
;
1331 top
.sym_evsel
= list_entry(top
.evlist
->entries
.next
, struct perf_evsel
, node
);
1333 symbol_conf
.priv_size
= sizeof(struct annotation
);
1335 symbol_conf
.try_vmlinux_path
= (symbol_conf
.vmlinux_name
== NULL
);
1336 if (symbol__init() < 0)
1339 sort_entry__setup_elide(&sort_dso
, symbol_conf
.dso_list
, "dso", stdout
);
1340 sort_entry__setup_elide(&sort_comm
, symbol_conf
.comm_list
, "comm", stdout
);
1341 sort_entry__setup_elide(&sort_sym
, symbol_conf
.sym_list
, "symbol", stdout
);
1344 * Avoid annotation data structures overhead when symbols aren't on the
1347 top
.sort_has_symbols
= sort_sym
.list
.next
!= NULL
;
1349 get_term_dimensions(&top
.winsize
);
1350 if (top
.print_entries
== 0) {
1351 struct sigaction act
= {
1352 .sa_sigaction
= perf_top__sig_winch
,
1353 .sa_flags
= SA_SIGINFO
,
1355 perf_top__update_print_entries(&top
);
1356 sigaction(SIGWINCH
, &act
, NULL
);
1359 status
= __cmd_top(&top
);
1362 perf_evlist__delete(top
.evlist
);