vmxnet3: Fix inconsistent LRO state after initialization
[linux-2.6/linux-mips.git] / tools / perf / util / top.c
bloba11f60735a188afae7152998f5e8d2f63bb08475
1 /*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Refactored from builtin-top.c, see that files for further copyright notes.
6 * Released under the GPL v2. (and only v2, not any later version)
7 */
9 #include "cpumap.h"
10 #include "event.h"
11 #include "evlist.h"
12 #include "evsel.h"
13 #include "parse-events.h"
14 #include "symbol.h"
15 #include "top.h"
16 #include <inttypes.h>
19 * Ordering weight: count-1 * count-2 * ... / count-n
21 static double sym_weight(const struct sym_entry *sym, struct perf_top *top)
23 double weight = sym->snap_count;
24 int counter;
26 if (!top->display_weighted)
27 return weight;
29 for (counter = 1; counter < top->evlist->nr_entries - 1; counter++)
30 weight *= sym->count[counter];
32 weight /= (sym->count[counter] + 1);
34 return weight;
37 static void perf_top__remove_active_sym(struct perf_top *top, struct sym_entry *syme)
39 pthread_mutex_lock(&top->active_symbols_lock);
40 list_del_init(&syme->node);
41 pthread_mutex_unlock(&top->active_symbols_lock);
44 static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
46 struct rb_node **p = &tree->rb_node;
47 struct rb_node *parent = NULL;
48 struct sym_entry *iter;
50 while (*p != NULL) {
51 parent = *p;
52 iter = rb_entry(parent, struct sym_entry, rb_node);
54 if (se->weight > iter->weight)
55 p = &(*p)->rb_left;
56 else
57 p = &(*p)->rb_right;
60 rb_link_node(&se->rb_node, parent, p);
61 rb_insert_color(&se->rb_node, tree);
64 #define SNPRINTF(buf, size, fmt, args...) \
65 ({ \
66 size_t r = snprintf(buf, size, fmt, ## args); \
67 r > size ? size : r; \
70 size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
72 struct perf_evsel *counter;
73 float samples_per_sec = top->samples / top->delay_secs;
74 float ksamples_per_sec = top->kernel_samples / top->delay_secs;
75 float esamples_percent = (100.0 * top->exact_samples) / top->samples;
76 size_t ret = 0;
78 if (!perf_guest) {
79 ret = SNPRINTF(bf, size,
80 " PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
81 " exact: %4.1f%% [", samples_per_sec,
82 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
83 samples_per_sec)),
84 esamples_percent);
85 } else {
86 float us_samples_per_sec = top->us_samples / top->delay_secs;
87 float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
88 float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs;
90 ret = SNPRINTF(bf, size,
91 " PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
92 " guest kernel:%4.1f%% guest us:%4.1f%%"
93 " exact: %4.1f%% [", samples_per_sec,
94 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
95 samples_per_sec)),
96 100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) /
97 samples_per_sec)),
98 100.0 - (100.0 * ((samples_per_sec -
99 guest_kernel_samples_per_sec) /
100 samples_per_sec)),
101 100.0 - (100.0 * ((samples_per_sec -
102 guest_us_samples_per_sec) /
103 samples_per_sec)),
104 esamples_percent);
107 if (top->evlist->nr_entries == 1 || !top->display_weighted) {
108 struct perf_evsel *first;
109 first = list_entry(top->evlist->entries.next, struct perf_evsel, node);
110 ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
111 (uint64_t)first->attr.sample_period,
112 top->freq ? "Hz" : "");
115 if (!top->display_weighted) {
116 ret += SNPRINTF(bf + ret, size - ret, "%s",
117 event_name(top->sym_evsel));
118 } else {
120 * Don't let events eat all the space. Leaving 30 bytes
121 * for the rest should be enough.
123 size_t last_pos = size - 30;
125 list_for_each_entry(counter, &top->evlist->entries, node) {
126 ret += SNPRINTF(bf + ret, size - ret, "%s%s",
127 counter->idx ? "/" : "",
128 event_name(counter));
129 if (ret > last_pos) {
130 sprintf(bf + last_pos - 3, "..");
131 ret = last_pos - 1;
132 break;
137 ret += SNPRINTF(bf + ret, size - ret, "], ");
139 if (top->target_pid != -1)
140 ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %d",
141 top->target_pid);
142 else if (top->target_tid != -1)
143 ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %d",
144 top->target_tid);
145 else
146 ret += SNPRINTF(bf + ret, size - ret, " (all");
148 if (top->cpu_list)
149 ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
150 top->evlist->cpus->nr > 1 ? "s" : "", top->cpu_list);
151 else {
152 if (top->target_tid != -1)
153 ret += SNPRINTF(bf + ret, size - ret, ")");
154 else
155 ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
156 top->evlist->cpus->nr,
157 top->evlist->cpus->nr > 1 ? "s" : "");
160 return ret;
163 void perf_top__reset_sample_counters(struct perf_top *top)
165 top->samples = top->us_samples = top->kernel_samples =
166 top->exact_samples = top->guest_kernel_samples =
167 top->guest_us_samples = 0;
170 float perf_top__decay_samples(struct perf_top *top, struct rb_root *root)
172 struct sym_entry *syme, *n;
173 float sum_ksamples = 0.0;
174 int snap = !top->display_weighted ? top->sym_evsel->idx : 0, j;
176 /* Sort the active symbols */
177 pthread_mutex_lock(&top->active_symbols_lock);
178 syme = list_entry(top->active_symbols.next, struct sym_entry, node);
179 pthread_mutex_unlock(&top->active_symbols_lock);
181 top->rb_entries = 0;
182 list_for_each_entry_safe_from(syme, n, &top->active_symbols, node) {
183 syme->snap_count = syme->count[snap];
184 if (syme->snap_count != 0) {
186 if ((top->hide_user_symbols &&
187 syme->map->dso->kernel == DSO_TYPE_USER) ||
188 (top->hide_kernel_symbols &&
189 syme->map->dso->kernel == DSO_TYPE_KERNEL)) {
190 perf_top__remove_active_sym(top, syme);
191 continue;
193 syme->weight = sym_weight(syme, top);
195 if ((int)syme->snap_count >= top->count_filter) {
196 rb_insert_active_sym(root, syme);
197 ++top->rb_entries;
199 sum_ksamples += syme->snap_count;
201 for (j = 0; j < top->evlist->nr_entries; j++)
202 syme->count[j] = top->zero ? 0 : syme->count[j] * 7 / 8;
203 } else
204 perf_top__remove_active_sym(top, syme);
207 return sum_ksamples;
211 * Find the longest symbol name that will be displayed
213 void perf_top__find_widths(struct perf_top *top, struct rb_root *root,
214 int *dso_width, int *dso_short_width, int *sym_width)
216 struct rb_node *nd;
217 int printed = 0;
219 *sym_width = *dso_width = *dso_short_width = 0;
221 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
222 struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node);
223 struct symbol *sym = sym_entry__symbol(syme);
225 if (++printed > top->print_entries ||
226 (int)syme->snap_count < top->count_filter)
227 continue;
229 if (syme->map->dso->long_name_len > *dso_width)
230 *dso_width = syme->map->dso->long_name_len;
232 if (syme->map->dso->short_name_len > *dso_short_width)
233 *dso_short_width = syme->map->dso->short_name_len;
235 if (sym->namelen > *sym_width)
236 *sym_width = sym->namelen;