4 * Builtin top command: Display a continuously updated profile of
5 * any workload, CPU or specific PID.
7 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
9 * Improvements and fixes by:
11 * Arjan van de Ven <arjan@linux.intel.com>
12 * Yanmin Zhang <yanmin.zhang@intel.com>
13 * Wu Fengguang <fengguang.wu@intel.com>
14 * Mike Galbraith <efault@gmx.de>
15 * Paul Mackerras <paulus@samba.org>
17 * Released under the GPL v2. (and only v2, not any later version)
23 #include "util/annotate.h"
24 #include "util/cache.h"
25 #include "util/color.h"
26 #include "util/evlist.h"
27 #include "util/evsel.h"
28 #include "util/session.h"
29 #include "util/symbol.h"
30 #include "util/thread.h"
31 #include "util/thread_map.h"
33 #include "util/util.h"
34 #include <linux/rbtree.h>
35 #include "util/parse-options.h"
36 #include "util/parse-events.h"
37 #include "util/cpumap.h"
38 #include "util/xyarray.h"
40 #include "util/debug.h"
54 #include <sys/syscall.h>
55 #include <sys/ioctl.h>
57 #include <sys/prctl.h>
62 #include <linux/unistd.h>
63 #include <linux/types.h>
65 static struct perf_top top
= {
68 .display_weighted
= -1,
71 .active_symbols
= LIST_HEAD_INIT(top
.active_symbols
),
72 .active_symbols_lock
= PTHREAD_MUTEX_INITIALIZER
,
73 .active_symbols_cond
= PTHREAD_COND_INITIALIZER
,
74 .freq
= 1000, /* 1 KHz */
77 static bool system_wide
= false;
79 static bool use_tui
, use_stdio
;
81 static int default_interval
= 0;
83 static bool kptr_restrict_warned
;
84 static bool vmlinux_warned
;
85 static bool inherit
= false;
86 static int realtime_prio
= 0;
87 static bool group
= false;
88 static unsigned int page_size
;
89 static unsigned int mmap_pages
= 128;
91 static bool dump_symtab
= false;
93 static struct winsize winsize
;
95 static const char *sym_filter
= NULL
;
96 struct sym_entry
*sym_filter_entry_sched
= NULL
;
97 static int sym_pcnt_filter
= 5;
103 void get_term_dimensions(struct winsize
*ws
)
105 char *s
= getenv("LINES");
108 ws
->ws_row
= atoi(s
);
109 s
= getenv("COLUMNS");
111 ws
->ws_col
= atoi(s
);
112 if (ws
->ws_row
&& ws
->ws_col
)
117 if (ioctl(1, TIOCGWINSZ
, ws
) == 0 &&
118 ws
->ws_row
&& ws
->ws_col
)
125 static void update_print_entries(struct winsize
*ws
)
127 top
.print_entries
= ws
->ws_row
;
129 if (top
.print_entries
> 9)
130 top
.print_entries
-= 9;
133 static void sig_winch_handler(int sig __used
)
135 get_term_dimensions(&winsize
);
136 update_print_entries(&winsize
);
139 static int parse_source(struct sym_entry
*syme
)
142 struct annotation
*notes
;
149 sym
= sym_entry__symbol(syme
);
153 * We can't annotate with just /proc/kallsyms
155 if (map
->dso
->symtab_type
== SYMTAB__KALLSYMS
) {
156 pr_err("Can't annotate %s: No vmlinux file was found in the "
157 "path\n", sym
->name
);
162 notes
= symbol__annotation(sym
);
163 if (notes
->src
!= NULL
) {
164 pthread_mutex_lock(¬es
->lock
);
168 pthread_mutex_lock(¬es
->lock
);
170 if (symbol__alloc_hist(sym
, top
.evlist
->nr_entries
) < 0) {
171 pthread_mutex_unlock(¬es
->lock
);
172 pr_err("Not enough memory for annotating '%s' symbol!\n",
178 err
= symbol__annotate(sym
, syme
->map
, 0);
181 top
.sym_filter_entry
= syme
;
184 pthread_mutex_unlock(¬es
->lock
);
188 static void __zero_source_counters(struct sym_entry
*syme
)
190 struct symbol
*sym
= sym_entry__symbol(syme
);
191 symbol__annotate_zero_histograms(sym
);
194 static void record_precise_ip(struct sym_entry
*syme
, struct map
*map
,
197 struct annotation
*notes
;
200 if (syme
!= top
.sym_filter_entry
)
203 sym
= sym_entry__symbol(syme
);
204 notes
= symbol__annotation(sym
);
206 if (pthread_mutex_trylock(¬es
->lock
))
209 ip
= map
->map_ip(map
, ip
);
210 symbol__inc_addr_samples(sym
, map
, counter
, ip
);
212 pthread_mutex_unlock(¬es
->lock
);
215 static void show_details(struct sym_entry
*syme
)
217 struct annotation
*notes
;
218 struct symbol
*symbol
;
224 symbol
= sym_entry__symbol(syme
);
225 notes
= symbol__annotation(symbol
);
227 pthread_mutex_lock(¬es
->lock
);
229 if (notes
->src
== NULL
)
232 printf("Showing %s for %s\n", event_name(top
.sym_evsel
), symbol
->name
);
233 printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter
);
235 more
= symbol__annotate_printf(symbol
, syme
->map
, top
.sym_evsel
->idx
,
236 0, sym_pcnt_filter
, top
.print_entries
, 4);
238 symbol__annotate_zero_histogram(symbol
, top
.sym_evsel
->idx
);
240 symbol__annotate_decay_histogram(symbol
, top
.sym_evsel
->idx
);
242 printf("%d lines not displayed, maybe increase display entries [e]\n", more
);
244 pthread_mutex_unlock(¬es
->lock
);
247 static const char CONSOLE_CLEAR
[] = "\e[H\e[2J";
249 static void __list_insert_active_sym(struct sym_entry
*syme
)
251 list_add(&syme
->node
, &top
.active_symbols
);
254 static void print_sym_table(struct perf_session
*session
)
259 struct sym_entry
*syme
;
260 struct rb_root tmp
= RB_ROOT
;
261 const int win_width
= winsize
.ws_col
- 1;
262 int sym_width
, dso_width
, dso_short_width
;
263 float sum_ksamples
= perf_top__decay_samples(&top
, &tmp
);
267 perf_top__header_snprintf(&top
, bf
, sizeof(bf
));
270 perf_top__reset_sample_counters(&top
);
272 printf("%-*.*s\n", win_width
, win_width
, graph_dotted_line
);
274 if (session
->hists
.stats
.total_lost
!= 0) {
275 color_fprintf(stdout
, PERF_COLOR_RED
, "WARNING:");
276 printf(" LOST %" PRIu64
" events, Check IO/CPU overload\n",
277 session
->hists
.stats
.total_lost
);
280 if (top
.sym_filter_entry
) {
281 show_details(top
.sym_filter_entry
);
285 perf_top__find_widths(&top
, &tmp
, &dso_width
, &dso_short_width
,
288 if (sym_width
+ dso_width
> winsize
.ws_col
- 29) {
289 dso_width
= dso_short_width
;
290 if (sym_width
+ dso_width
> winsize
.ws_col
- 29)
291 sym_width
= winsize
.ws_col
- dso_width
- 29;
294 if (top
.evlist
->nr_entries
== 1)
295 printf(" samples pcnt");
297 printf(" weight samples pcnt");
301 printf(" %-*.*s DSO\n", sym_width
, sym_width
, "function");
302 printf(" %s _______ _____",
303 top
.evlist
->nr_entries
== 1 ? " " : "______");
305 printf(" ________________");
306 printf(" %-*.*s", sym_width
, sym_width
, graph_line
);
307 printf(" %-*.*s", dso_width
, dso_width
, graph_line
);
310 for (nd
= rb_first(&tmp
); nd
; nd
= rb_next(nd
)) {
314 syme
= rb_entry(nd
, struct sym_entry
, rb_node
);
315 sym
= sym_entry__symbol(syme
);
316 if (++printed
> top
.print_entries
||
317 (int)syme
->snap_count
< top
.count_filter
)
320 pcnt
= 100.0 - (100.0 * ((sum_ksamples
- syme
->snap_count
) /
323 if (top
.evlist
->nr_entries
== 1 || !top
.display_weighted
)
324 printf("%20.2f ", syme
->weight
);
326 printf("%9.1f %10ld ", syme
->weight
, syme
->snap_count
);
328 percent_color_fprintf(stdout
, "%4.1f%%", pcnt
);
330 printf(" %016" PRIx64
, sym
->start
);
331 printf(" %-*.*s", sym_width
, sym_width
, sym
->name
);
332 printf(" %-*.*s\n", dso_width
, dso_width
,
333 dso_width
>= syme
->map
->dso
->long_name_len
?
334 syme
->map
->dso
->long_name
:
335 syme
->map
->dso
->short_name
);
339 static void prompt_integer(int *target
, const char *msg
)
341 char *buf
= malloc(0), *p
;
345 fprintf(stdout
, "\n%s: ", msg
);
346 if (getline(&buf
, &dummy
, stdin
) < 0)
349 p
= strchr(buf
, '\n');
359 tmp
= strtoul(buf
, NULL
, 10);
365 static void prompt_percent(int *target
, const char *msg
)
369 prompt_integer(&tmp
, msg
);
370 if (tmp
>= 0 && tmp
<= 100)
374 static void prompt_symbol(struct sym_entry
**target
, const char *msg
)
376 char *buf
= malloc(0), *p
;
377 struct sym_entry
*syme
= *target
, *n
, *found
= NULL
;
380 /* zero counters of active symbol */
382 __zero_source_counters(syme
);
386 fprintf(stdout
, "\n%s: ", msg
);
387 if (getline(&buf
, &dummy
, stdin
) < 0)
390 p
= strchr(buf
, '\n');
394 pthread_mutex_lock(&top
.active_symbols_lock
);
395 syme
= list_entry(top
.active_symbols
.next
, struct sym_entry
, node
);
396 pthread_mutex_unlock(&top
.active_symbols_lock
);
398 list_for_each_entry_safe_from(syme
, n
, &top
.active_symbols
, node
) {
399 struct symbol
*sym
= sym_entry__symbol(syme
);
401 if (!strcmp(buf
, sym
->name
)) {
408 fprintf(stderr
, "Sorry, %s is not active.\n", buf
);
418 static void print_mapped_keys(void)
422 if (top
.sym_filter_entry
) {
423 struct symbol
*sym
= sym_entry__symbol(top
.sym_filter_entry
);
427 fprintf(stdout
, "\nMapped keys:\n");
428 fprintf(stdout
, "\t[d] display refresh delay. \t(%d)\n", top
.delay_secs
);
429 fprintf(stdout
, "\t[e] display entries (lines). \t(%d)\n", top
.print_entries
);
431 if (top
.evlist
->nr_entries
> 1)
432 fprintf(stdout
, "\t[E] active event counter. \t(%s)\n", event_name(top
.sym_evsel
));
434 fprintf(stdout
, "\t[f] profile display filter (count). \t(%d)\n", top
.count_filter
);
436 fprintf(stdout
, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter
);
437 fprintf(stdout
, "\t[s] annotate symbol. \t(%s)\n", name
?: "NULL");
438 fprintf(stdout
, "\t[S] stop annotation.\n");
440 if (top
.evlist
->nr_entries
> 1)
441 fprintf(stdout
, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", top
.display_weighted
? 1 : 0);
444 "\t[K] hide kernel_symbols symbols. \t(%s)\n",
445 top
.hide_kernel_symbols
? "yes" : "no");
447 "\t[U] hide user symbols. \t(%s)\n",
448 top
.hide_user_symbols
? "yes" : "no");
449 fprintf(stdout
, "\t[z] toggle sample zeroing. \t(%d)\n", top
.zero
? 1 : 0);
450 fprintf(stdout
, "\t[qQ] quit.\n");
453 static int key_mapped(int c
)
470 return top
.evlist
->nr_entries
> 1 ? 1 : 0;
478 static void handle_keypress(struct perf_session
*session
, int c
)
480 if (!key_mapped(c
)) {
481 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
482 struct termios tc
, save
;
485 fprintf(stdout
, "\nEnter selection, or unmapped key to continue: ");
490 tc
.c_lflag
&= ~(ICANON
| ECHO
);
493 tcsetattr(0, TCSANOW
, &tc
);
495 poll(&stdin_poll
, 1, -1);
498 tcsetattr(0, TCSAFLUSH
, &save
);
505 prompt_integer(&top
.delay_secs
, "Enter display delay");
506 if (top
.delay_secs
< 1)
510 prompt_integer(&top
.print_entries
, "Enter display entries (lines)");
511 if (top
.print_entries
== 0) {
512 sig_winch_handler(SIGWINCH
);
513 signal(SIGWINCH
, sig_winch_handler
);
515 signal(SIGWINCH
, SIG_DFL
);
518 if (top
.evlist
->nr_entries
> 1) {
519 /* Select 0 as the default event: */
522 fprintf(stderr
, "\nAvailable events:");
524 list_for_each_entry(top
.sym_evsel
, &top
.evlist
->entries
, node
)
525 fprintf(stderr
, "\n\t%d %s", top
.sym_evsel
->idx
, event_name(top
.sym_evsel
));
527 prompt_integer(&counter
, "Enter details event counter");
529 if (counter
>= top
.evlist
->nr_entries
) {
530 top
.sym_evsel
= list_entry(top
.evlist
->entries
.next
, struct perf_evsel
, node
);
531 fprintf(stderr
, "Sorry, no such event, using %s.\n", event_name(top
.sym_evsel
));
535 list_for_each_entry(top
.sym_evsel
, &top
.evlist
->entries
, node
)
536 if (top
.sym_evsel
->idx
== counter
)
539 top
.sym_evsel
= list_entry(top
.evlist
->entries
.next
, struct perf_evsel
, node
);
542 prompt_integer(&top
.count_filter
, "Enter display event count filter");
545 prompt_percent(&sym_pcnt_filter
, "Enter details display event filter (percent)");
548 top
.hide_kernel_symbols
= !top
.hide_kernel_symbols
;
552 printf("exiting.\n");
554 perf_session__fprintf_dsos(session
, stderr
);
557 prompt_symbol(&top
.sym_filter_entry
, "Enter details symbol");
560 if (!top
.sym_filter_entry
)
563 struct sym_entry
*syme
= top
.sym_filter_entry
;
565 top
.sym_filter_entry
= NULL
;
566 __zero_source_counters(syme
);
570 top
.hide_user_symbols
= !top
.hide_user_symbols
;
573 top
.display_weighted
= ~top
.display_weighted
;
576 top
.zero
= !top
.zero
;
583 static void *display_thread_tui(void *arg __used
)
586 pthread_mutex_lock(&top
.active_symbols_lock
);
587 while (list_empty(&top
.active_symbols
)) {
588 err
= pthread_cond_wait(&top
.active_symbols_cond
,
589 &top
.active_symbols_lock
);
593 pthread_mutex_unlock(&top
.active_symbols_lock
);
595 perf_top__tui_browser(&top
);
601 static void *display_thread(void *arg __used
)
603 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
604 struct termios tc
, save
;
606 struct perf_session
*session
= (struct perf_session
*) arg
;
610 tc
.c_lflag
&= ~(ICANON
| ECHO
);
615 delay_msecs
= top
.delay_secs
* 1000;
616 tcsetattr(0, TCSANOW
, &tc
);
621 print_sym_table(session
);
622 } while (!poll(&stdin_poll
, 1, delay_msecs
) == 1);
625 tcsetattr(0, TCSAFLUSH
, &save
);
627 handle_keypress(session
, c
);
633 /* Tag samples to be skipped. */
634 static const char *skip_symbols
[] = {
641 "mwait_idle_with_hints",
643 "ppc64_runlatch_off",
644 "pseries_dedicated_idle_sleep",
648 static int symbol_filter(struct map
*map
, struct symbol
*sym
)
650 struct sym_entry
*syme
;
651 const char *name
= sym
->name
;
655 * ppc64 uses function descriptors and appends a '.' to the
656 * start of every instruction address. Remove it.
661 if (!strcmp(name
, "_text") ||
662 !strcmp(name
, "_etext") ||
663 !strcmp(name
, "_sinittext") ||
664 !strncmp("init_module", name
, 11) ||
665 !strncmp("cleanup_module", name
, 14) ||
666 strstr(name
, "_text_start") ||
667 strstr(name
, "_text_end"))
670 syme
= symbol__priv(sym
);
672 symbol__annotate_init(map
, sym
);
674 if (!top
.sym_filter_entry
&& sym_filter
&& !strcmp(name
, sym_filter
)) {
675 /* schedule initial sym_filter_entry setup */
676 sym_filter_entry_sched
= syme
;
680 for (i
= 0; skip_symbols
[i
]; i
++) {
681 if (!strcmp(skip_symbols
[i
], name
)) {
690 static void perf_event__process_sample(const union perf_event
*event
,
691 struct perf_sample
*sample
,
692 struct perf_session
*session
)
694 u64 ip
= event
->ip
.ip
;
695 struct sym_entry
*syme
;
696 struct addr_location al
;
697 struct machine
*machine
;
698 u8 origin
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
703 case PERF_RECORD_MISC_USER
:
705 if (top
.hide_user_symbols
)
707 machine
= perf_session__find_host_machine(session
);
709 case PERF_RECORD_MISC_KERNEL
:
710 ++top
.kernel_samples
;
711 if (top
.hide_kernel_symbols
)
713 machine
= perf_session__find_host_machine(session
);
715 case PERF_RECORD_MISC_GUEST_KERNEL
:
716 ++top
.guest_kernel_samples
;
717 machine
= perf_session__find_machine(session
, event
->ip
.pid
);
719 case PERF_RECORD_MISC_GUEST_USER
:
720 ++top
.guest_us_samples
;
722 * TODO: we don't process guest user from host side
723 * except simple counting.
730 if (!machine
&& perf_guest
) {
731 pr_err("Can't find guest [%d]'s kernel information\n",
736 if (event
->header
.misc
& PERF_RECORD_MISC_EXACT_IP
)
739 if (perf_event__preprocess_sample(event
, session
, &al
, sample
,
740 symbol_filter
) < 0 ||
744 if (!kptr_restrict_warned
&&
745 symbol_conf
.kptr_restrict
&&
746 al
.cpumode
== PERF_RECORD_MISC_KERNEL
) {
748 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
749 "Check /proc/sys/kernel/kptr_restrict.\n\n"
750 "Kernel%s samples will not be resolved.\n",
751 !RB_EMPTY_ROOT(&al
.map
->dso
->symbols
[MAP__FUNCTION
]) ?
753 if (use_browser
<= 0)
755 kptr_restrict_warned
= true;
758 if (al
.sym
== NULL
) {
759 const char *msg
= "Kernel samples will not be resolved.\n";
761 * As we do lazy loading of symtabs we only will know if the
762 * specified vmlinux file is invalid when we actually have a
763 * hit in kernel space and then try to load it. So if we get
764 * here and there are _no_ symbols in the DSO backing the
765 * kernel map, bail out.
767 * We may never get here, for instance, if we use -K/
768 * --hide-kernel-symbols, even if the user specifies an
769 * invalid --vmlinux ;-)
771 if (!kptr_restrict_warned
&& !vmlinux_warned
&&
772 al
.map
== machine
->vmlinux_maps
[MAP__FUNCTION
] &&
773 RB_EMPTY_ROOT(&al
.map
->dso
->symbols
[MAP__FUNCTION
])) {
774 if (symbol_conf
.vmlinux_name
) {
775 ui__warning("The %s file can't be used.\n%s",
776 symbol_conf
.vmlinux_name
, msg
);
778 ui__warning("A vmlinux file was not found.\n%s",
782 if (use_browser
<= 0)
784 vmlinux_warned
= true;
790 /* let's see, whether we need to install initial sym_filter_entry */
791 if (sym_filter_entry_sched
) {
792 top
.sym_filter_entry
= sym_filter_entry_sched
;
793 sym_filter_entry_sched
= NULL
;
794 if (parse_source(top
.sym_filter_entry
) < 0) {
795 struct symbol
*sym
= sym_entry__symbol(top
.sym_filter_entry
);
797 pr_err("Can't annotate %s", sym
->name
);
798 if (top
.sym_filter_entry
->map
->dso
->symtab_type
== SYMTAB__KALLSYMS
) {
799 pr_err(": No vmlinux file was found in the path:\n");
800 machine__fprintf_vmlinux_path(machine
, stderr
);
807 syme
= symbol__priv(al
.sym
);
808 if (!al
.sym
->ignore
) {
809 struct perf_evsel
*evsel
;
811 evsel
= perf_evlist__id2evsel(top
.evlist
, sample
->id
);
812 assert(evsel
!= NULL
);
813 syme
->count
[evsel
->idx
]++;
814 record_precise_ip(syme
, al
.map
, evsel
->idx
, ip
);
815 pthread_mutex_lock(&top
.active_symbols_lock
);
816 if (list_empty(&syme
->node
) || !syme
->node
.next
) {
817 static bool first
= true;
818 __list_insert_active_sym(syme
);
820 pthread_cond_broadcast(&top
.active_symbols_cond
);
824 pthread_mutex_unlock(&top
.active_symbols_lock
);
828 static void perf_session__mmap_read_idx(struct perf_session
*self
, int idx
)
830 struct perf_sample sample
;
831 union perf_event
*event
;
834 while ((event
= perf_evlist__mmap_read(top
.evlist
, idx
)) != NULL
) {
835 ret
= perf_session__parse_sample(self
, event
, &sample
);
837 pr_err("Can't parse sample, err = %d\n", ret
);
841 if (event
->header
.type
== PERF_RECORD_SAMPLE
)
842 perf_event__process_sample(event
, &sample
, self
);
844 perf_event__process(event
, &sample
, self
);
848 static void perf_session__mmap_read(struct perf_session
*self
)
852 for (i
= 0; i
< top
.evlist
->nr_mmaps
; i
++)
853 perf_session__mmap_read_idx(self
, i
);
856 static void start_counters(struct perf_evlist
*evlist
)
858 struct perf_evsel
*counter
;
860 list_for_each_entry(counter
, &evlist
->entries
, node
) {
861 struct perf_event_attr
*attr
= &counter
->attr
;
863 attr
->sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
;
866 attr
->sample_type
|= PERF_SAMPLE_PERIOD
;
868 attr
->sample_freq
= top
.freq
;
871 if (evlist
->nr_entries
> 1) {
872 attr
->sample_type
|= PERF_SAMPLE_ID
;
873 attr
->read_format
|= PERF_FORMAT_ID
;
877 attr
->inherit
= inherit
;
879 if (perf_evsel__open(counter
, top
.evlist
->cpus
,
880 top
.evlist
->threads
, group
) < 0) {
883 if (err
== EPERM
|| err
== EACCES
) {
884 ui__warning_paranoid();
888 * If it's cycles then fall back to hrtimer
889 * based cpu-clock-tick sw counter, which
890 * is always available even if no PMU support:
892 if (attr
->type
== PERF_TYPE_HARDWARE
&&
893 attr
->config
== PERF_COUNT_HW_CPU_CYCLES
) {
895 ui__warning("Cycles event not supported,\n"
896 "trying to fall back to cpu-clock-ticks\n");
898 attr
->type
= PERF_TYPE_SOFTWARE
;
899 attr
->config
= PERF_COUNT_SW_CPU_CLOCK
;
904 ui__warning("The %s event is not supported.\n",
905 event_name(counter
));
909 ui__warning("The sys_perf_event_open() syscall "
910 "returned with %d (%s). /bin/dmesg "
911 "may provide additional information.\n"
912 "No CONFIG_PERF_EVENTS=y kernel support "
913 "configured?\n", err
, strerror(err
));
918 if (perf_evlist__mmap(evlist
, mmap_pages
, false) < 0) {
919 ui__warning("Failed to mmap with %d (%s)\n",
920 errno
, strerror(errno
));
931 static int __cmd_top(void)
936 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
937 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
939 struct perf_session
*session
= perf_session__new(NULL
, O_WRONLY
, false, false, NULL
);
943 if (top
.target_tid
!= -1)
944 perf_event__synthesize_thread_map(top
.evlist
->threads
,
945 perf_event__process
, session
);
947 perf_event__synthesize_threads(perf_event__process
, session
);
949 start_counters(top
.evlist
);
950 session
->evlist
= top
.evlist
;
951 perf_session__update_sample_type(session
);
953 /* Wait for a minimal set of events before starting the snapshot */
954 poll(top
.evlist
->pollfd
, top
.evlist
->nr_fds
, 100);
956 perf_session__mmap_read(session
);
958 if (pthread_create(&thread
, NULL
, (use_browser
> 0 ? display_thread_tui
:
959 display_thread
), session
)) {
960 printf("Could not create display thread.\n");
965 struct sched_param param
;
967 param
.sched_priority
= realtime_prio
;
968 if (sched_setscheduler(0, SCHED_FIFO
, ¶m
)) {
969 printf("Could not set realtime priority.\n");
975 u64 hits
= top
.samples
;
977 perf_session__mmap_read(session
);
979 if (hits
== top
.samples
)
980 ret
= poll(top
.evlist
->pollfd
, top
.evlist
->nr_fds
, 100);
986 static const char * const top_usage
[] = {
987 "perf top [<options>]",
991 static const struct option options
[] = {
992 OPT_CALLBACK('e', "event", &top
.evlist
, "event",
993 "event selector. use 'perf list' to list available events",
994 parse_events_option
),
995 OPT_INTEGER('c', "count", &default_interval
,
996 "event period to sample"),
997 OPT_INTEGER('p', "pid", &top
.target_pid
,
998 "profile events on existing process id"),
999 OPT_INTEGER('t', "tid", &top
.target_tid
,
1000 "profile events on existing thread id"),
1001 OPT_BOOLEAN('a', "all-cpus", &system_wide
,
1002 "system-wide collection from all CPUs"),
1003 OPT_STRING('C', "cpu", &top
.cpu_list
, "cpu",
1004 "list of cpus to monitor"),
1005 OPT_STRING('k', "vmlinux", &symbol_conf
.vmlinux_name
,
1006 "file", "vmlinux pathname"),
1007 OPT_BOOLEAN('K', "hide_kernel_symbols", &top
.hide_kernel_symbols
,
1008 "hide kernel symbols"),
1009 OPT_UINTEGER('m', "mmap-pages", &mmap_pages
, "number of mmap data pages"),
1010 OPT_INTEGER('r', "realtime", &realtime_prio
,
1011 "collect data with this RT SCHED_FIFO priority"),
1012 OPT_INTEGER('d', "delay", &top
.delay_secs
,
1013 "number of seconds to delay between refreshes"),
1014 OPT_BOOLEAN('D', "dump-symtab", &dump_symtab
,
1015 "dump the symbol table used for profiling"),
1016 OPT_INTEGER('f', "count-filter", &top
.count_filter
,
1017 "only display functions with more events than this"),
1018 OPT_BOOLEAN('g', "group", &group
,
1019 "put the counters into a counter group"),
1020 OPT_BOOLEAN('i', "inherit", &inherit
,
1021 "child tasks inherit counters"),
1022 OPT_STRING('s', "sym-annotate", &sym_filter
, "symbol name",
1023 "symbol to annotate"),
1024 OPT_BOOLEAN('z', "zero", &top
.zero
,
1025 "zero history across updates"),
1026 OPT_INTEGER('F', "freq", &top
.freq
,
1027 "profile at this frequency"),
1028 OPT_INTEGER('E', "entries", &top
.print_entries
,
1029 "display this many functions"),
1030 OPT_BOOLEAN('U', "hide_user_symbols", &top
.hide_user_symbols
,
1031 "hide user symbols"),
1032 OPT_BOOLEAN(0, "tui", &use_tui
, "Use the TUI interface"),
1033 OPT_BOOLEAN(0, "stdio", &use_stdio
, "Use the stdio interface"),
1034 OPT_INCR('v', "verbose", &verbose
,
1035 "be more verbose (show counter open errors, etc)"),
1039 int cmd_top(int argc
, const char **argv
, const char *prefix __used
)
1041 struct perf_evsel
*pos
;
1042 int status
= -ENOMEM
;
1044 top
.evlist
= perf_evlist__new(NULL
, NULL
);
1045 if (top
.evlist
== NULL
)
1048 page_size
= sysconf(_SC_PAGE_SIZE
);
1050 argc
= parse_options(argc
, argv
, options
, top_usage
, 0);
1052 usage_with_options(top_usage
, options
);
1055 * XXX For now start disabled, only using TUI if explicitely asked for.
1056 * Change that when handle_keys equivalent gets written, live annotation
1066 setup_browser(false);
1068 /* CPU and PID are mutually exclusive */
1069 if (top
.target_tid
> 0 && top
.cpu_list
) {
1070 printf("WARNING: PID switch overriding CPU\n");
1072 top
.cpu_list
= NULL
;
1075 if (top
.target_pid
!= -1)
1076 top
.target_tid
= top
.target_pid
;
1078 if (perf_evlist__create_maps(top
.evlist
, top
.target_pid
,
1079 top
.target_tid
, top
.cpu_list
) < 0)
1080 usage_with_options(top_usage
, options
);
1082 if (!top
.evlist
->nr_entries
&&
1083 perf_evlist__add_default(top
.evlist
) < 0) {
1084 pr_err("Not enough memory for event selector list\n");
1088 if (top
.delay_secs
< 1)
1092 * User specified count overrides default frequency.
1094 if (default_interval
)
1096 else if (top
.freq
) {
1097 default_interval
= top
.freq
;
1099 fprintf(stderr
, "frequency and count are zero, aborting\n");
1103 list_for_each_entry(pos
, &top
.evlist
->entries
, node
) {
1104 if (perf_evsel__alloc_fd(pos
, top
.evlist
->cpus
->nr
,
1105 top
.evlist
->threads
->nr
) < 0)
1108 * Fill in the ones not specifically initialized via -c:
1110 if (pos
->attr
.sample_period
)
1113 pos
->attr
.sample_period
= default_interval
;
1116 if (perf_evlist__alloc_pollfd(top
.evlist
) < 0 ||
1117 perf_evlist__alloc_mmap(top
.evlist
) < 0)
1120 top
.sym_evsel
= list_entry(top
.evlist
->entries
.next
, struct perf_evsel
, node
);
1122 symbol_conf
.priv_size
= (sizeof(struct sym_entry
) + sizeof(struct annotation
) +
1123 (top
.evlist
->nr_entries
+ 1) * sizeof(unsigned long));
1125 symbol_conf
.try_vmlinux_path
= (symbol_conf
.vmlinux_name
== NULL
);
1126 if (symbol__init() < 0)
1129 get_term_dimensions(&winsize
);
1130 if (top
.print_entries
== 0) {
1131 update_print_entries(&winsize
);
1132 signal(SIGWINCH
, sig_winch_handler
);
1135 status
= __cmd_top();
1137 perf_evlist__delete(top
.evlist
);