4 * Builtin top command: Display a continuously updated profile of
5 * any workload, CPU or specific PID.
7 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
9 * Improvements and fixes by:
11 * Arjan van de Ven <arjan@linux.intel.com>
12 * Yanmin Zhang <yanmin.zhang@intel.com>
13 * Wu Fengguang <fengguang.wu@intel.com>
14 * Mike Galbraith <efault@gmx.de>
15 * Paul Mackerras <paulus@samba.org>
17 * Released under the GPL v2. (and only v2, not any later version)
23 #include "util/symbol.h"
24 #include "util/color.h"
25 #include "util/util.h"
26 #include <linux/rbtree.h>
27 #include "util/parse-options.h"
28 #include "util/parse-events.h"
30 #include "util/debug.h"
44 #include <sys/syscall.h>
45 #include <sys/ioctl.h>
47 #include <sys/prctl.h>
52 #include <linux/unistd.h>
53 #include <linux/types.h>
55 static int fd
[MAX_NR_CPUS
][MAX_COUNTERS
];
57 static int system_wide
= 0;
59 static int default_interval
= 100000;
61 static int count_filter
= 5;
62 static int print_entries
= 15;
64 static int target_pid
= -1;
65 static int inherit
= 0;
66 static int profile_cpu
= -1;
67 static int nr_cpus
= 0;
68 static unsigned int realtime_prio
= 0;
70 static unsigned int page_size
;
71 static unsigned int mmap_pages
= 16;
74 static int delay_secs
= 2;
76 static int dump_symtab
;
84 unsigned long count
[MAX_COUNTERS
];
86 struct source_line
*next
;
89 static char *sym_filter
= NULL
;
90 struct sym_entry
*sym_filter_entry
= NULL
;
91 static int sym_pcnt_filter
= 5;
92 static int sym_counter
= 0;
93 static int display_weighted
= -1;
100 static u64 max_ip
= -1ll;
103 struct rb_node rb_node
;
104 struct list_head node
;
105 unsigned long count
[MAX_COUNTERS
];
106 unsigned long snap_count
;
109 struct source_line
*source
;
110 struct source_line
*lines
;
111 struct source_line
**lines_tail
;
112 pthread_mutex_t source_lock
;
119 static void parse_source(struct sym_entry
*syme
)
122 struct module
*module
;
123 struct section
*section
= NULL
;
125 char command
[PATH_MAX
*2];
126 const char *path
= vmlinux_name
;
133 pthread_mutex_lock(&syme
->source_lock
);
137 sym
= (struct symbol
*)(syme
+ 1);
138 module
= sym
->module
;
145 start
= sym
->obj_start
;
150 section
= module
->sections
->find_section(module
->sections
, ".text");
152 start
-= section
->vma
;
155 end
= start
+ sym
->end
- sym
->start
+ 1;
156 len
= sym
->end
- sym
->start
;
158 sprintf(command
, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", start
, end
, path
);
160 file
= popen(command
, "r");
164 pthread_mutex_lock(&syme
->source_lock
);
165 syme
->lines_tail
= &syme
->lines
;
166 while (!feof(file
)) {
167 struct source_line
*src
;
171 src
= malloc(sizeof(struct source_line
));
173 memset(src
, 0, sizeof(struct source_line
));
175 if (getline(&src
->line
, &dummy
, file
) < 0)
180 c
= strchr(src
->line
, '\n');
185 *syme
->lines_tail
= src
;
186 syme
->lines_tail
= &src
->next
;
188 if (strlen(src
->line
)>8 && src
->line
[8] == ':') {
189 src
->eip
= strtoull(src
->line
, NULL
, 16);
191 src
->eip
+= section
->vma
;
193 if (strlen(src
->line
)>8 && src
->line
[16] == ':') {
194 src
->eip
= strtoull(src
->line
, NULL
, 16);
196 src
->eip
+= section
->vma
;
201 sym_filter_entry
= syme
;
202 pthread_mutex_unlock(&syme
->source_lock
);
205 static void __zero_source_counters(struct sym_entry
*syme
)
208 struct source_line
*line
;
212 for (i
= 0; i
< nr_counters
; i
++)
218 static void record_precise_ip(struct sym_entry
*syme
, int counter
, u64 ip
)
220 struct source_line
*line
;
222 if (syme
!= sym_filter_entry
)
225 if (pthread_mutex_trylock(&syme
->source_lock
))
231 for (line
= syme
->lines
; line
; line
= line
->next
) {
232 if (line
->eip
== ip
) {
233 line
->count
[counter
]++;
240 pthread_mutex_unlock(&syme
->source_lock
);
243 static void lookup_sym_source(struct sym_entry
*syme
)
245 struct symbol
*symbol
= (struct symbol
*)(syme
+ 1);
246 struct source_line
*line
;
247 char pattern
[PATH_MAX
];
250 sprintf(pattern
, "<%s>:", symbol
->name
);
252 if (symbol
->module
) {
253 idx
= strstr(pattern
, "\t");
258 pthread_mutex_lock(&syme
->source_lock
);
259 for (line
= syme
->lines
; line
; line
= line
->next
) {
260 if (strstr(line
->line
, pattern
)) {
265 pthread_mutex_unlock(&syme
->source_lock
);
268 static void show_lines(struct source_line
*queue
, int count
, int total
)
271 struct source_line
*line
;
274 for (i
= 0; i
< count
; i
++) {
275 float pcnt
= 100.0*(float)line
->count
[sym_counter
]/(float)total
;
277 printf("%8li %4.1f%%\t%s\n", line
->count
[sym_counter
], pcnt
, line
->line
);
282 #define TRACE_COUNT 3
284 static void show_details(struct sym_entry
*syme
)
286 struct symbol
*symbol
;
287 struct source_line
*line
;
288 struct source_line
*line_queue
= NULL
;
290 int line_queue_count
= 0, total
= 0, more
= 0;
296 lookup_sym_source(syme
);
301 symbol
= (struct symbol
*)(syme
+ 1);
302 printf("Showing %s for %s\n", event_name(sym_counter
), symbol
->name
);
303 printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter
);
305 pthread_mutex_lock(&syme
->source_lock
);
308 total
+= line
->count
[sym_counter
];
316 if (!line_queue_count
)
320 if (line
->count
[sym_counter
])
321 pcnt
= 100.0 * line
->count
[sym_counter
] / (float)total
;
322 if (pcnt
>= (float)sym_pcnt_filter
) {
323 if (displayed
<= print_entries
)
324 show_lines(line_queue
, line_queue_count
, total
);
326 displayed
+= line_queue_count
;
327 line_queue_count
= 0;
329 } else if (line_queue_count
> TRACE_COUNT
) {
330 line_queue
= line_queue
->next
;
334 line
->count
[sym_counter
] = zero
? 0 : line
->count
[sym_counter
] * 7 / 8;
337 pthread_mutex_unlock(&syme
->source_lock
);
339 printf("%d lines not displayed, maybe increase display entries [e]\n", more
);
343 * Symbols will be added here in record_ip and will get out
346 static LIST_HEAD(active_symbols
);
347 static pthread_mutex_t active_symbols_lock
= PTHREAD_MUTEX_INITIALIZER
;
350 * Ordering weight: count-1 * count-2 * ... / count-n
352 static double sym_weight(const struct sym_entry
*sym
)
354 double weight
= sym
->snap_count
;
357 if (!display_weighted
)
360 for (counter
= 1; counter
< nr_counters
-1; counter
++)
361 weight
*= sym
->count
[counter
];
363 weight
/= (sym
->count
[counter
] + 1);
369 static long userspace_samples
;
370 static const char CONSOLE_CLEAR
[] = "\e[H\e[2J";
372 static void __list_insert_active_sym(struct sym_entry
*syme
)
374 list_add(&syme
->node
, &active_symbols
);
377 static void list_remove_active_sym(struct sym_entry
*syme
)
379 pthread_mutex_lock(&active_symbols_lock
);
380 list_del_init(&syme
->node
);
381 pthread_mutex_unlock(&active_symbols_lock
);
384 static void rb_insert_active_sym(struct rb_root
*tree
, struct sym_entry
*se
)
386 struct rb_node
**p
= &tree
->rb_node
;
387 struct rb_node
*parent
= NULL
;
388 struct sym_entry
*iter
;
392 iter
= rb_entry(parent
, struct sym_entry
, rb_node
);
394 if (se
->weight
> iter
->weight
)
400 rb_link_node(&se
->rb_node
, parent
, p
);
401 rb_insert_color(&se
->rb_node
, tree
);
404 static void print_sym_table(void)
407 int counter
, snap
= !display_weighted
? sym_counter
: 0;
408 float samples_per_sec
= samples
/delay_secs
;
409 float ksamples_per_sec
= (samples
-userspace_samples
)/delay_secs
;
410 float sum_ksamples
= 0.0;
411 struct sym_entry
*syme
, *n
;
412 struct rb_root tmp
= RB_ROOT
;
415 samples
= userspace_samples
= 0;
417 /* Sort the active symbols */
418 pthread_mutex_lock(&active_symbols_lock
);
419 syme
= list_entry(active_symbols
.next
, struct sym_entry
, node
);
420 pthread_mutex_unlock(&active_symbols_lock
);
422 list_for_each_entry_safe_from(syme
, n
, &active_symbols
, node
) {
423 syme
->snap_count
= syme
->count
[snap
];
424 if (syme
->snap_count
!= 0) {
425 syme
->weight
= sym_weight(syme
);
426 rb_insert_active_sym(&tmp
, syme
);
427 sum_ksamples
+= syme
->snap_count
;
429 for (j
= 0; j
< nr_counters
; j
++)
430 syme
->count
[j
] = zero
? 0 : syme
->count
[j
] * 7 / 8;
432 list_remove_active_sym(syme
);
438 "------------------------------------------------------------------------------\n");
439 printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [",
441 100.0 - (100.0*((samples_per_sec
-ksamples_per_sec
)/samples_per_sec
)));
443 if (nr_counters
== 1 || !display_weighted
) {
444 printf("%Ld", (u64
)attrs
[0].sample_period
);
451 if (!display_weighted
)
452 printf("%s", event_name(sym_counter
));
453 else for (counter
= 0; counter
< nr_counters
; counter
++) {
457 printf("%s", event_name(counter
));
462 if (target_pid
!= -1)
463 printf(" (target_pid: %d", target_pid
);
467 if (profile_cpu
!= -1)
468 printf(", cpu: %d)\n", profile_cpu
);
470 if (target_pid
!= -1)
473 printf(", %d CPUs)\n", nr_cpus
);
476 printf("------------------------------------------------------------------------------\n\n");
478 if (sym_filter_entry
) {
479 show_details(sym_filter_entry
);
483 if (nr_counters
== 1)
484 printf(" samples pcnt");
486 printf(" weight samples pcnt");
490 printf(" kernel function\n");
491 printf(" %s _______ _____",
492 nr_counters
== 1 ? " " : "______");
494 printf(" ________________");
495 printf(" _______________\n\n");
497 for (nd
= rb_first(&tmp
); nd
; nd
= rb_next(nd
)) {
501 syme
= rb_entry(nd
, struct sym_entry
, rb_node
);
502 sym
= (struct symbol
*)(syme
+ 1);
504 if (++printed
> print_entries
|| (int)syme
->snap_count
< count_filter
)
507 pcnt
= 100.0 - (100.0 * ((sum_ksamples
- syme
->snap_count
) /
510 if (nr_counters
== 1 || !display_weighted
)
511 printf("%20.2f - ", syme
->weight
);
513 printf("%9.1f %10ld - ", syme
->weight
, syme
->snap_count
);
515 percent_color_fprintf(stdout
, "%4.1f%%", pcnt
);
517 printf(" - %016llx", sym
->start
);
518 printf(" : %s", sym
->name
);
520 printf("\t[%s]", sym
->module
->name
);
525 static void prompt_integer(int *target
, const char *msg
)
527 char *buf
= malloc(0), *p
;
531 fprintf(stdout
, "\n%s: ", msg
);
532 if (getline(&buf
, &dummy
, stdin
) < 0)
535 p
= strchr(buf
, '\n');
545 tmp
= strtoul(buf
, NULL
, 10);
551 static void prompt_percent(int *target
, const char *msg
)
555 prompt_integer(&tmp
, msg
);
556 if (tmp
>= 0 && tmp
<= 100)
560 static void prompt_symbol(struct sym_entry
**target
, const char *msg
)
562 char *buf
= malloc(0), *p
;
563 struct sym_entry
*syme
= *target
, *n
, *found
= NULL
;
566 /* zero counters of active symbol */
568 pthread_mutex_lock(&syme
->source_lock
);
569 __zero_source_counters(syme
);
571 pthread_mutex_unlock(&syme
->source_lock
);
574 fprintf(stdout
, "\n%s: ", msg
);
575 if (getline(&buf
, &dummy
, stdin
) < 0)
578 p
= strchr(buf
, '\n');
582 pthread_mutex_lock(&active_symbols_lock
);
583 syme
= list_entry(active_symbols
.next
, struct sym_entry
, node
);
584 pthread_mutex_unlock(&active_symbols_lock
);
586 list_for_each_entry_safe_from(syme
, n
, &active_symbols
, node
) {
587 struct symbol
*sym
= (struct symbol
*)(syme
+ 1);
589 if (!strcmp(buf
, sym
->name
)) {
596 fprintf(stderr
, "Sorry, %s is not active.\n", sym_filter
);
606 static void print_mapped_keys(void)
610 if (sym_filter_entry
) {
611 struct symbol
*sym
= (struct symbol
*)(sym_filter_entry
+1);
615 fprintf(stdout
, "\nMapped keys:\n");
616 fprintf(stdout
, "\t[d] display refresh delay. \t(%d)\n", delay_secs
);
617 fprintf(stdout
, "\t[e] display entries (lines). \t(%d)\n", print_entries
);
620 fprintf(stdout
, "\t[E] active event counter. \t(%s)\n", event_name(sym_counter
));
622 fprintf(stdout
, "\t[f] profile display filter (count). \t(%d)\n", count_filter
);
625 fprintf(stdout
, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter
);
626 fprintf(stdout
, "\t[s] annotate symbol. \t(%s)\n", name
?: "NULL");
627 fprintf(stdout
, "\t[S] stop annotation.\n");
631 fprintf(stdout
, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted
? 1 : 0);
633 fprintf(stdout
, "\t[z] toggle sample zeroing. \t(%d)\n", zero
? 1 : 0);
634 fprintf(stdout
, "\t[qQ] quit.\n");
637 static int key_mapped(int c
)
649 return nr_counters
> 1 ? 1 : 0;
653 return vmlinux_name
? 1 : 0;
661 static void handle_keypress(int c
)
663 if (!key_mapped(c
)) {
664 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
665 struct termios tc
, save
;
668 fprintf(stdout
, "\nEnter selection, or unmapped key to continue: ");
673 tc
.c_lflag
&= ~(ICANON
| ECHO
);
676 tcsetattr(0, TCSANOW
, &tc
);
678 poll(&stdin_poll
, 1, -1);
681 tcsetattr(0, TCSAFLUSH
, &save
);
688 prompt_integer(&delay_secs
, "Enter display delay");
693 prompt_integer(&print_entries
, "Enter display entries (lines)");
696 if (nr_counters
> 1) {
699 fprintf(stderr
, "\nAvailable events:");
700 for (i
= 0; i
< nr_counters
; i
++)
701 fprintf(stderr
, "\n\t%d %s", i
, event_name(i
));
703 prompt_integer(&sym_counter
, "Enter details event counter");
705 if (sym_counter
>= nr_counters
) {
706 fprintf(stderr
, "Sorry, no such event, using %s.\n", event_name(0));
710 } else sym_counter
= 0;
713 prompt_integer(&count_filter
, "Enter display event count filter");
716 prompt_percent(&sym_pcnt_filter
, "Enter details display event filter (percent)");
720 printf("exiting.\n");
723 prompt_symbol(&sym_filter_entry
, "Enter details symbol");
726 if (!sym_filter_entry
)
729 struct sym_entry
*syme
= sym_filter_entry
;
731 pthread_mutex_lock(&syme
->source_lock
);
732 sym_filter_entry
= NULL
;
733 __zero_source_counters(syme
);
734 pthread_mutex_unlock(&syme
->source_lock
);
738 display_weighted
= ~display_weighted
;
748 static void *display_thread(void *arg __used
)
750 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
751 struct termios tc
, save
;
756 tc
.c_lflag
&= ~(ICANON
| ECHO
);
761 delay_msecs
= delay_secs
* 1000;
762 tcsetattr(0, TCSANOW
, &tc
);
768 } while (!poll(&stdin_poll
, 1, delay_msecs
) == 1);
771 tcsetattr(0, TCSAFLUSH
, &save
);
779 /* Tag samples to be skipped. */
780 static const char *skip_symbols
[] = {
786 "mwait_idle_with_hints",
788 "ppc64_runlatch_off",
789 "pseries_dedicated_idle_sleep",
793 static int symbol_filter(struct dso
*self
, struct symbol
*sym
)
795 struct sym_entry
*syme
;
796 const char *name
= sym
->name
;
800 * ppc64 uses function descriptors and appends a '.' to the
801 * start of every instruction address. Remove it.
806 if (!strcmp(name
, "_text") ||
807 !strcmp(name
, "_etext") ||
808 !strcmp(name
, "_sinittext") ||
809 !strncmp("init_module", name
, 11) ||
810 !strncmp("cleanup_module", name
, 14) ||
811 strstr(name
, "_text_start") ||
812 strstr(name
, "_text_end"))
815 syme
= dso__sym_priv(self
, sym
);
816 pthread_mutex_init(&syme
->source_lock
, NULL
);
817 if (!sym_filter_entry
&& sym_filter
&& !strcmp(name
, sym_filter
))
818 sym_filter_entry
= syme
;
820 for (i
= 0; skip_symbols
[i
]; i
++) {
821 if (!strcmp(skip_symbols
[i
], name
)) {
830 static int parse_symbols(void)
832 struct rb_node
*node
;
834 int use_modules
= vmlinux_name
? 1 : 0;
836 kernel_dso
= dso__new("[kernel]", sizeof(struct sym_entry
));
837 if (kernel_dso
== NULL
)
840 if (dso__load_kernel(kernel_dso
, vmlinux_name
, symbol_filter
, verbose
, use_modules
) <= 0)
843 node
= rb_first(&kernel_dso
->syms
);
844 sym
= rb_entry(node
, struct symbol
, rb_node
);
847 node
= rb_last(&kernel_dso
->syms
);
848 sym
= rb_entry(node
, struct symbol
, rb_node
);
852 dso__fprintf(kernel_dso
, stderr
);
857 dso__delete(kernel_dso
);
863 * Binary search in the histogram table and record the hit:
865 static void record_ip(u64 ip
, int counter
)
867 struct symbol
*sym
= dso__find_symbol(kernel_dso
, ip
);
870 struct sym_entry
*syme
= dso__sym_priv(kernel_dso
, sym
);
873 syme
->count
[counter
]++;
874 record_precise_ip(syme
, counter
, ip
);
875 pthread_mutex_lock(&active_symbols_lock
);
876 if (list_empty(&syme
->node
) || !syme
->node
.next
)
877 __list_insert_active_sym(syme
);
878 pthread_mutex_unlock(&active_symbols_lock
);
886 static void process_event(u64 ip
, int counter
, int user
)
895 record_ip(ip
, counter
);
905 static unsigned int mmap_read_head(struct mmap_data
*md
)
907 struct perf_event_mmap_page
*pc
= md
->base
;
910 head
= pc
->data_head
;
916 struct timeval last_read
, this_read
;
918 static void mmap_read_counter(struct mmap_data
*md
)
920 unsigned int head
= mmap_read_head(md
);
921 unsigned int old
= md
->prev
;
922 unsigned char *data
= md
->base
+ page_size
;
925 gettimeofday(&this_read
, NULL
);
928 * If we're further behind than half the buffer, there's a chance
929 * the writer will bite our tail and mess up the samples under us.
931 * If we somehow ended up ahead of the head, we got messed up.
933 * In either case, truncate and restart at head.
936 if (diff
> md
->mask
/ 2 || diff
< 0) {
940 timersub(&this_read
, &last_read
, &iv
);
941 msecs
= iv
.tv_sec
*1000 + iv
.tv_usec
/1000;
943 fprintf(stderr
, "WARNING: failed to keep up with mmap data."
944 " Last read %lu msecs ago.\n", msecs
);
947 * head points to a known good entry, start there.
952 last_read
= this_read
;
954 for (; old
!= head
;) {
955 event_t
*event
= (event_t
*)&data
[old
& md
->mask
];
959 size_t size
= event
->header
.size
;
962 * Event straddles the mmap boundary -- header should always
963 * be inside due to u64 alignment of output.
965 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
966 unsigned int offset
= old
;
967 unsigned int len
= min(sizeof(*event
), size
), cpy
;
968 void *dst
= &event_copy
;
971 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
972 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
983 if (event
->header
.type
== PERF_RECORD_SAMPLE
) {
985 (event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
) == PERF_RECORD_MISC_USER
;
986 process_event(event
->ip
.ip
, md
->counter
, user
);
993 static struct pollfd event_array
[MAX_NR_CPUS
* MAX_COUNTERS
];
994 static struct mmap_data mmap_array
[MAX_NR_CPUS
][MAX_COUNTERS
];
996 static void mmap_read(void)
1000 for (i
= 0; i
< nr_cpus
; i
++) {
1001 for (counter
= 0; counter
< nr_counters
; counter
++)
1002 mmap_read_counter(&mmap_array
[i
][counter
]);
1009 static void start_counter(int i
, int counter
)
1011 struct perf_event_attr
*attr
;
1015 if (target_pid
== -1 && profile_cpu
== -1)
1018 attr
= attrs
+ counter
;
1020 attr
->sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
;
1022 attr
->inherit
= (cpu
< 0) && inherit
;
1025 fd
[i
][counter
] = sys_perf_event_open(attr
, target_pid
, cpu
, group_fd
, 0);
1027 if (fd
[i
][counter
] < 0) {
1030 if (err
== EPERM
|| err
== EACCES
)
1031 die("No permission - are you root?\n");
1033 * If it's cycles then fall back to hrtimer
1034 * based cpu-clock-tick sw counter, which
1035 * is always available even if no PMU support:
1037 if (attr
->type
== PERF_TYPE_HARDWARE
1038 && attr
->config
== PERF_COUNT_HW_CPU_CYCLES
) {
1041 warning(" ... trying to fall back to cpu-clock-ticks\n");
1043 attr
->type
= PERF_TYPE_SOFTWARE
;
1044 attr
->config
= PERF_COUNT_SW_CPU_CLOCK
;
1048 error("perfcounter syscall returned with %d (%s)\n",
1049 fd
[i
][counter
], strerror(err
));
1050 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
1053 assert(fd
[i
][counter
] >= 0);
1054 fcntl(fd
[i
][counter
], F_SETFL
, O_NONBLOCK
);
1057 * First counter acts as the group leader:
1059 if (group
&& group_fd
== -1)
1060 group_fd
= fd
[i
][counter
];
1062 event_array
[nr_poll
].fd
= fd
[i
][counter
];
1063 event_array
[nr_poll
].events
= POLLIN
;
1066 mmap_array
[i
][counter
].counter
= counter
;
1067 mmap_array
[i
][counter
].prev
= 0;
1068 mmap_array
[i
][counter
].mask
= mmap_pages
*page_size
- 1;
1069 mmap_array
[i
][counter
].base
= mmap(NULL
, (mmap_pages
+1)*page_size
,
1070 PROT_READ
, MAP_SHARED
, fd
[i
][counter
], 0);
1071 if (mmap_array
[i
][counter
].base
== MAP_FAILED
)
1072 die("failed to mmap with %d (%s)\n", errno
, strerror(errno
));
1075 static int __cmd_top(void)
1081 for (i
= 0; i
< nr_cpus
; i
++) {
1083 for (counter
= 0; counter
< nr_counters
; counter
++)
1084 start_counter(i
, counter
);
1087 /* Wait for a minimal set of events before starting the snapshot */
1088 poll(event_array
, nr_poll
, 100);
1092 if (pthread_create(&thread
, NULL
, display_thread
, NULL
)) {
1093 printf("Could not create display thread.\n");
1097 if (realtime_prio
) {
1098 struct sched_param param
;
1100 param
.sched_priority
= realtime_prio
;
1101 if (sched_setscheduler(0, SCHED_FIFO
, ¶m
)) {
1102 printf("Could not set realtime priority.\n");
1112 if (hits
== samples
)
1113 ret
= poll(event_array
, nr_poll
, 100);
1119 static const char * const top_usage
[] = {
1120 "perf top [<options>]",
1124 static const struct option options
[] = {
1125 OPT_CALLBACK('e', "event", NULL
, "event",
1126 "event selector. use 'perf list' to list available events",
1128 OPT_INTEGER('c', "count", &default_interval
,
1129 "event period to sample"),
1130 OPT_INTEGER('p', "pid", &target_pid
,
1131 "profile events on existing pid"),
1132 OPT_BOOLEAN('a', "all-cpus", &system_wide
,
1133 "system-wide collection from all CPUs"),
1134 OPT_INTEGER('C', "CPU", &profile_cpu
,
1135 "CPU to profile on"),
1136 OPT_STRING('k', "vmlinux", &vmlinux_name
, "file", "vmlinux pathname"),
1137 OPT_INTEGER('m', "mmap-pages", &mmap_pages
,
1138 "number of mmap data pages"),
1139 OPT_INTEGER('r', "realtime", &realtime_prio
,
1140 "collect data with this RT SCHED_FIFO priority"),
1141 OPT_INTEGER('d', "delay", &delay_secs
,
1142 "number of seconds to delay between refreshes"),
1143 OPT_BOOLEAN('D', "dump-symtab", &dump_symtab
,
1144 "dump the symbol table used for profiling"),
1145 OPT_INTEGER('f', "count-filter", &count_filter
,
1146 "only display functions with more events than this"),
1147 OPT_BOOLEAN('g', "group", &group
,
1148 "put the counters into a counter group"),
1149 OPT_BOOLEAN('i', "inherit", &inherit
,
1150 "child tasks inherit counters"),
1151 OPT_STRING('s', "sym-annotate", &sym_filter
, "symbol name",
1152 "symbol to annotate - requires -k option"),
1153 OPT_BOOLEAN('z', "zero", &zero
,
1154 "zero history across updates"),
1155 OPT_INTEGER('F', "freq", &freq
,
1156 "profile at this frequency"),
1157 OPT_INTEGER('E', "entries", &print_entries
,
1158 "display this many functions"),
1159 OPT_BOOLEAN('v', "verbose", &verbose
,
1160 "be more verbose (show counter open errors, etc)"),
1164 int cmd_top(int argc
, const char **argv
, const char *prefix __used
)
1170 page_size
= sysconf(_SC_PAGE_SIZE
);
1172 argc
= parse_options(argc
, argv
, options
, top_usage
, 0);
1174 usage_with_options(top_usage
, options
);
1177 default_interval
= freq
;
1181 /* CPU and PID are mutually exclusive */
1182 if (target_pid
!= -1 && profile_cpu
!= -1) {
1183 printf("WARNING: PID switch overriding CPU\n");
1195 parse_source(sym_filter_entry
);
1198 * Fill in the ones not specifically initialized via -c:
1200 for (counter
= 0; counter
< nr_counters
; counter
++) {
1201 if (attrs
[counter
].sample_period
)
1204 attrs
[counter
].sample_period
= default_interval
;
1207 nr_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
1208 assert(nr_cpus
<= MAX_NR_CPUS
);
1209 assert(nr_cpus
>= 0);
1211 if (target_pid
!= -1 || profile_cpu
!= -1)