Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / tools / perf / util / sort.c
blob9dd60c7869a28dae1a24ff3042422fe36a151af4
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <regex.h>
5 #include <stdlib.h>
6 #include <linux/mman.h>
7 #include <linux/time64.h>
8 #include "debug.h"
9 #include "dso.h"
10 #include "sort.h"
11 #include "hist.h"
12 #include "cacheline.h"
13 #include "comm.h"
14 #include "map.h"
15 #include "maps.h"
16 #include "symbol.h"
17 #include "map_symbol.h"
18 #include "branch.h"
19 #include "thread.h"
20 #include "evsel.h"
21 #include "evlist.h"
22 #include "srcline.h"
23 #include "strlist.h"
24 #include "strbuf.h"
25 #include "mem-events.h"
26 #include "mem-info.h"
27 #include "annotate.h"
28 #include "annotate-data.h"
29 #include "event.h"
30 #include "time-utils.h"
31 #include "cgroup.h"
32 #include "machine.h"
33 #include "trace-event.h"
34 #include <linux/kernel.h>
35 #include <linux/string.h>
37 #ifdef HAVE_LIBTRACEEVENT
38 #include <event-parse.h>
39 #endif
41 regex_t parent_regex;
42 const char default_parent_pattern[] = "^sys_|^do_page_fault";
43 const char *parent_pattern = default_parent_pattern;
44 const char *default_sort_order = "comm,dso,symbol";
45 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
46 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
47 const char default_top_sort_order[] = "dso,symbol";
48 const char default_diff_sort_order[] = "dso,symbol";
49 const char default_tracepoint_sort_order[] = "trace";
50 const char *sort_order;
51 const char *field_order;
52 regex_t ignore_callees_regex;
53 int have_ignore_callees = 0;
54 enum sort_mode sort__mode = SORT_MODE__NORMAL;
55 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
56 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
59 * Some architectures have Adjacent Cacheline Prefetch feature, which
60 * behaves like the cacheline size is doubled. Enable this flag to
61 * check things in double cacheline granularity.
63 bool chk_double_cl;
66 * Replaces all occurrences of a char used with the:
68 * -t, --field-separator
70 * option, that uses a special separator character and don't pad with spaces,
71 * replacing all occurrences of this separator in symbol names (and other
72 * output) with a '.' character, that thus it's the only non valid separator.
74 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
76 int n;
77 va_list ap;
79 va_start(ap, fmt);
80 n = vsnprintf(bf, size, fmt, ap);
81 if (symbol_conf.field_sep && n > 0) {
82 char *sep = bf;
84 while (1) {
85 sep = strchr(sep, *symbol_conf.field_sep);
86 if (sep == NULL)
87 break;
88 *sep = '.';
91 va_end(ap);
93 if (n >= (int)size)
94 return size - 1;
95 return n;
98 static int64_t cmp_null(const void *l, const void *r)
100 if (!l && !r)
101 return 0;
102 else if (!l)
103 return -1;
104 else
105 return 1;
108 /* --sort pid */
110 static int64_t
111 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
113 return thread__tid(right->thread) - thread__tid(left->thread);
116 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
117 size_t size, unsigned int width)
119 const char *comm = thread__comm_str(he->thread);
121 width = max(7U, width) - 8;
122 return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
123 width, width, comm ?: "");
126 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
128 const struct thread *th = arg;
130 if (type != HIST_FILTER__THREAD)
131 return -1;
133 return th && !RC_CHK_EQUAL(he->thread, th);
136 struct sort_entry sort_thread = {
137 .se_header = " Pid:Command",
138 .se_cmp = sort__thread_cmp,
139 .se_snprintf = hist_entry__thread_snprintf,
140 .se_filter = hist_entry__thread_filter,
141 .se_width_idx = HISTC_THREAD,
144 /* --sort simd */
146 static int64_t
147 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
149 if (left->simd_flags.arch != right->simd_flags.arch)
150 return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
152 return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
155 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
157 u64 arch = simd_flags->arch;
159 if (arch & SIMD_OP_FLAGS_ARCH_SVE)
160 return "SVE";
161 else
162 return "n/a";
165 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
166 size_t size, unsigned int width __maybe_unused)
168 const char *name;
170 if (!he->simd_flags.arch)
171 return repsep_snprintf(bf, size, "");
173 name = hist_entry__get_simd_name(&he->simd_flags);
175 if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
176 return repsep_snprintf(bf, size, "[e] %s", name);
177 else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
178 return repsep_snprintf(bf, size, "[p] %s", name);
180 return repsep_snprintf(bf, size, "[.] %s", name);
183 struct sort_entry sort_simd = {
184 .se_header = "Simd ",
185 .se_cmp = sort__simd_cmp,
186 .se_snprintf = hist_entry__simd_snprintf,
187 .se_width_idx = HISTC_SIMD,
190 /* --sort comm */
193 * We can't use pointer comparison in functions below,
194 * because it gives different results based on pointer
195 * values, which could break some sorting assumptions.
197 static int64_t
198 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
200 return strcmp(comm__str(right->comm), comm__str(left->comm));
203 static int64_t
204 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
206 return strcmp(comm__str(right->comm), comm__str(left->comm));
209 static int64_t
210 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
212 return strcmp(comm__str(right->comm), comm__str(left->comm));
215 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
216 size_t size, unsigned int width)
218 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
221 struct sort_entry sort_comm = {
222 .se_header = "Command",
223 .se_cmp = sort__comm_cmp,
224 .se_collapse = sort__comm_collapse,
225 .se_sort = sort__comm_sort,
226 .se_snprintf = hist_entry__comm_snprintf,
227 .se_filter = hist_entry__thread_filter,
228 .se_width_idx = HISTC_COMM,
231 /* --sort dso */
233 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
235 struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
236 struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
237 const char *dso_name_l, *dso_name_r;
239 if (!dso_l || !dso_r)
240 return cmp_null(dso_r, dso_l);
242 if (verbose > 0) {
243 dso_name_l = dso__long_name(dso_l);
244 dso_name_r = dso__long_name(dso_r);
245 } else {
246 dso_name_l = dso__short_name(dso_l);
247 dso_name_r = dso__short_name(dso_r);
250 return strcmp(dso_name_l, dso_name_r);
253 static int64_t
254 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
256 return _sort__dso_cmp(right->ms.map, left->ms.map);
259 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
260 size_t size, unsigned int width)
262 const struct dso *dso = map ? map__dso(map) : NULL;
263 const char *dso_name = "[unknown]";
265 if (dso)
266 dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso);
268 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
271 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
272 size_t size, unsigned int width)
274 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
277 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
279 const struct dso *dso = arg;
281 if (type != HIST_FILTER__DSO)
282 return -1;
284 return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
287 struct sort_entry sort_dso = {
288 .se_header = "Shared Object",
289 .se_cmp = sort__dso_cmp,
290 .se_snprintf = hist_entry__dso_snprintf,
291 .se_filter = hist_entry__dso_filter,
292 .se_width_idx = HISTC_DSO,
295 /* --sort symbol */
297 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
299 return (int64_t)(right_ip - left_ip);
302 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
304 if (!sym_l || !sym_r)
305 return cmp_null(sym_l, sym_r);
307 if (sym_l == sym_r)
308 return 0;
310 if (sym_l->inlined || sym_r->inlined) {
311 int ret = strcmp(sym_l->name, sym_r->name);
313 if (ret)
314 return ret;
315 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
316 return 0;
319 if (sym_l->start != sym_r->start)
320 return (int64_t)(sym_r->start - sym_l->start);
322 return (int64_t)(sym_r->end - sym_l->end);
325 static int64_t
326 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
328 int64_t ret;
330 if (!left->ms.sym && !right->ms.sym)
331 return _sort__addr_cmp(left->ip, right->ip);
334 * comparing symbol address alone is not enough since it's a
335 * relative address within a dso.
337 if (!hists__has(left->hists, dso)) {
338 ret = sort__dso_cmp(left, right);
339 if (ret != 0)
340 return ret;
343 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
346 static int64_t
347 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
349 if (!left->ms.sym || !right->ms.sym)
350 return cmp_null(left->ms.sym, right->ms.sym);
352 return strcmp(right->ms.sym->name, left->ms.sym->name);
355 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
356 u64 ip, char level, char *bf, size_t size,
357 unsigned int width)
359 struct symbol *sym = ms->sym;
360 struct map *map = ms->map;
361 size_t ret = 0;
363 if (verbose > 0) {
364 struct dso *dso = map ? map__dso(map) : NULL;
365 char o = dso ? dso__symtab_origin(dso) : '!';
366 u64 rip = ip;
368 if (dso && dso__kernel(dso) && dso__adjust_symbols(dso))
369 rip = map__unmap_ip(map, ip);
371 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
372 BITS_PER_LONG / 4 + 2, rip, o);
375 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
376 if (sym && map) {
377 if (sym->type == STT_OBJECT) {
378 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
379 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
380 ip - map__unmap_ip(map, sym->start));
381 } else {
382 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
383 width - ret,
384 sym->name);
385 if (sym->inlined)
386 ret += repsep_snprintf(bf + ret, size - ret,
387 " (inlined)");
389 } else {
390 size_t len = BITS_PER_LONG / 4;
391 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
392 len, ip);
395 return ret;
398 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
400 return _hist_entry__sym_snprintf(&he->ms, he->ip,
401 he->level, bf, size, width);
404 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
406 const char *sym = arg;
408 if (type != HIST_FILTER__SYMBOL)
409 return -1;
411 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
414 struct sort_entry sort_sym = {
415 .se_header = "Symbol",
416 .se_cmp = sort__sym_cmp,
417 .se_sort = sort__sym_sort,
418 .se_snprintf = hist_entry__sym_snprintf,
419 .se_filter = hist_entry__sym_filter,
420 .se_width_idx = HISTC_SYMBOL,
423 /* --sort symoff */
425 static int64_t
426 sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right)
428 int64_t ret;
430 ret = sort__sym_cmp(left, right);
431 if (ret)
432 return ret;
434 return left->ip - right->ip;
437 static int64_t
438 sort__symoff_sort(struct hist_entry *left, struct hist_entry *right)
440 int64_t ret;
442 ret = sort__sym_sort(left, right);
443 if (ret)
444 return ret;
446 return left->ip - right->ip;
449 static int
450 hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
452 struct symbol *sym = he->ms.sym;
454 if (sym == NULL)
455 return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip);
457 return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start);
460 struct sort_entry sort_sym_offset = {
461 .se_header = "Symbol Offset",
462 .se_cmp = sort__symoff_cmp,
463 .se_sort = sort__symoff_sort,
464 .se_snprintf = hist_entry__symoff_snprintf,
465 .se_filter = hist_entry__sym_filter,
466 .se_width_idx = HISTC_SYMBOL_OFFSET,
469 /* --sort srcline */
471 char *hist_entry__srcline(struct hist_entry *he)
473 return map__srcline(he->ms.map, he->ip, he->ms.sym);
476 static int64_t
477 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
479 int64_t ret;
481 ret = _sort__addr_cmp(left->ip, right->ip);
482 if (ret)
483 return ret;
485 return sort__dso_cmp(left, right);
488 static int64_t
489 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
491 if (!left->srcline)
492 left->srcline = hist_entry__srcline(left);
493 if (!right->srcline)
494 right->srcline = hist_entry__srcline(right);
496 return strcmp(right->srcline, left->srcline);
499 static int64_t
500 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
502 return sort__srcline_collapse(left, right);
505 static void
506 sort__srcline_init(struct hist_entry *he)
508 if (!he->srcline)
509 he->srcline = hist_entry__srcline(he);
512 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
513 size_t size, unsigned int width)
515 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
518 struct sort_entry sort_srcline = {
519 .se_header = "Source:Line",
520 .se_cmp = sort__srcline_cmp,
521 .se_collapse = sort__srcline_collapse,
522 .se_sort = sort__srcline_sort,
523 .se_init = sort__srcline_init,
524 .se_snprintf = hist_entry__srcline_snprintf,
525 .se_width_idx = HISTC_SRCLINE,
528 /* --sort srcline_from */
530 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
532 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
535 static int64_t
536 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
538 return left->branch_info->from.addr - right->branch_info->from.addr;
541 static int64_t
542 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
544 if (!left->branch_info->srcline_from)
545 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
547 if (!right->branch_info->srcline_from)
548 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
550 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
553 static int64_t
554 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
556 return sort__srcline_from_collapse(left, right);
559 static void sort__srcline_from_init(struct hist_entry *he)
561 if (!he->branch_info->srcline_from)
562 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
565 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
566 size_t size, unsigned int width)
568 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
571 struct sort_entry sort_srcline_from = {
572 .se_header = "From Source:Line",
573 .se_cmp = sort__srcline_from_cmp,
574 .se_collapse = sort__srcline_from_collapse,
575 .se_sort = sort__srcline_from_sort,
576 .se_init = sort__srcline_from_init,
577 .se_snprintf = hist_entry__srcline_from_snprintf,
578 .se_width_idx = HISTC_SRCLINE_FROM,
581 /* --sort srcline_to */
583 static int64_t
584 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
586 return left->branch_info->to.addr - right->branch_info->to.addr;
589 static int64_t
590 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
592 if (!left->branch_info->srcline_to)
593 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
595 if (!right->branch_info->srcline_to)
596 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
598 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
601 static int64_t
602 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
604 return sort__srcline_to_collapse(left, right);
607 static void sort__srcline_to_init(struct hist_entry *he)
609 if (!he->branch_info->srcline_to)
610 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
613 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
614 size_t size, unsigned int width)
616 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
619 struct sort_entry sort_srcline_to = {
620 .se_header = "To Source:Line",
621 .se_cmp = sort__srcline_to_cmp,
622 .se_collapse = sort__srcline_to_collapse,
623 .se_sort = sort__srcline_to_sort,
624 .se_init = sort__srcline_to_init,
625 .se_snprintf = hist_entry__srcline_to_snprintf,
626 .se_width_idx = HISTC_SRCLINE_TO,
629 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
630 size_t size, unsigned int width)
633 struct symbol *sym = he->ms.sym;
634 struct annotated_branch *branch;
635 double ipc = 0.0, coverage = 0.0;
636 char tmp[64];
638 if (!sym)
639 return repsep_snprintf(bf, size, "%-*s", width, "-");
641 branch = symbol__annotation(sym)->branch;
643 if (branch && branch->hit_cycles)
644 ipc = branch->hit_insn / ((double)branch->hit_cycles);
646 if (branch && branch->total_insn) {
647 coverage = branch->cover_insn * 100.0 /
648 ((double)branch->total_insn);
651 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
652 return repsep_snprintf(bf, size, "%-*s", width, tmp);
655 struct sort_entry sort_sym_ipc = {
656 .se_header = "IPC [IPC Coverage]",
657 .se_cmp = sort__sym_cmp,
658 .se_snprintf = hist_entry__sym_ipc_snprintf,
659 .se_width_idx = HISTC_SYMBOL_IPC,
662 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
663 __maybe_unused,
664 char *bf, size_t size,
665 unsigned int width)
667 char tmp[64];
669 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
670 return repsep_snprintf(bf, size, "%-*s", width, tmp);
673 struct sort_entry sort_sym_ipc_null = {
674 .se_header = "IPC [IPC Coverage]",
675 .se_cmp = sort__sym_cmp,
676 .se_snprintf = hist_entry__sym_ipc_null_snprintf,
677 .se_width_idx = HISTC_SYMBOL_IPC,
680 /* --sort callchain_branch_predicted */
682 static int64_t
683 sort__callchain_branch_predicted_cmp(struct hist_entry *left __maybe_unused,
684 struct hist_entry *right __maybe_unused)
686 return 0;
689 static int hist_entry__callchain_branch_predicted_snprintf(
690 struct hist_entry *he, char *bf, size_t size, unsigned int width)
692 u64 branch_count, predicted_count;
693 double percent = 0.0;
694 char str[32];
696 callchain_branch_counts(he->callchain, &branch_count,
697 &predicted_count, NULL, NULL);
699 if (branch_count)
700 percent = predicted_count * 100.0 / branch_count;
702 snprintf(str, sizeof(str), "%.1f%%", percent);
703 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
706 struct sort_entry sort_callchain_branch_predicted = {
707 .se_header = "Predicted",
708 .se_cmp = sort__callchain_branch_predicted_cmp,
709 .se_snprintf = hist_entry__callchain_branch_predicted_snprintf,
710 .se_width_idx = HISTC_CALLCHAIN_BRANCH_PREDICTED,
713 /* --sort callchain_branch_abort */
715 static int64_t
716 sort__callchain_branch_abort_cmp(struct hist_entry *left __maybe_unused,
717 struct hist_entry *right __maybe_unused)
719 return 0;
722 static int hist_entry__callchain_branch_abort_snprintf(struct hist_entry *he,
723 char *bf, size_t size,
724 unsigned int width)
726 u64 branch_count, abort_count;
727 char str[32];
729 callchain_branch_counts(he->callchain, &branch_count,
730 NULL, &abort_count, NULL);
732 snprintf(str, sizeof(str), "%" PRId64, abort_count);
733 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
736 struct sort_entry sort_callchain_branch_abort = {
737 .se_header = "Abort",
738 .se_cmp = sort__callchain_branch_abort_cmp,
739 .se_snprintf = hist_entry__callchain_branch_abort_snprintf,
740 .se_width_idx = HISTC_CALLCHAIN_BRANCH_ABORT,
743 /* --sort callchain_branch_cycles */
745 static int64_t
746 sort__callchain_branch_cycles_cmp(struct hist_entry *left __maybe_unused,
747 struct hist_entry *right __maybe_unused)
749 return 0;
752 static int hist_entry__callchain_branch_cycles_snprintf(struct hist_entry *he,
753 char *bf, size_t size,
754 unsigned int width)
756 u64 branch_count, cycles_count, cycles = 0;
757 char str[32];
759 callchain_branch_counts(he->callchain, &branch_count,
760 NULL, NULL, &cycles_count);
762 if (branch_count)
763 cycles = cycles_count / branch_count;
765 snprintf(str, sizeof(str), "%" PRId64 "", cycles);
766 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
769 struct sort_entry sort_callchain_branch_cycles = {
770 .se_header = "Cycles",
771 .se_cmp = sort__callchain_branch_cycles_cmp,
772 .se_snprintf = hist_entry__callchain_branch_cycles_snprintf,
773 .se_width_idx = HISTC_CALLCHAIN_BRANCH_CYCLES,
776 /* --sort srcfile */
778 static char no_srcfile[1];
780 static char *hist_entry__get_srcfile(struct hist_entry *e)
782 char *sf, *p;
783 struct map *map = e->ms.map;
785 if (!map)
786 return no_srcfile;
788 sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
789 e->ms.sym, false, true, true, e->ip);
790 if (sf == SRCLINE_UNKNOWN)
791 return no_srcfile;
792 p = strchr(sf, ':');
793 if (p && *sf) {
794 *p = 0;
795 return sf;
797 free(sf);
798 return no_srcfile;
801 static int64_t
802 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
804 return sort__srcline_cmp(left, right);
807 static int64_t
808 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
810 if (!left->srcfile)
811 left->srcfile = hist_entry__get_srcfile(left);
812 if (!right->srcfile)
813 right->srcfile = hist_entry__get_srcfile(right);
815 return strcmp(right->srcfile, left->srcfile);
818 static int64_t
819 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
821 return sort__srcfile_collapse(left, right);
824 static void sort__srcfile_init(struct hist_entry *he)
826 if (!he->srcfile)
827 he->srcfile = hist_entry__get_srcfile(he);
830 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
831 size_t size, unsigned int width)
833 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
836 struct sort_entry sort_srcfile = {
837 .se_header = "Source File",
838 .se_cmp = sort__srcfile_cmp,
839 .se_collapse = sort__srcfile_collapse,
840 .se_sort = sort__srcfile_sort,
841 .se_init = sort__srcfile_init,
842 .se_snprintf = hist_entry__srcfile_snprintf,
843 .se_width_idx = HISTC_SRCFILE,
846 /* --sort parent */
848 static int64_t
849 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
851 struct symbol *sym_l = left->parent;
852 struct symbol *sym_r = right->parent;
854 if (!sym_l || !sym_r)
855 return cmp_null(sym_l, sym_r);
857 return strcmp(sym_r->name, sym_l->name);
860 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
861 size_t size, unsigned int width)
863 return repsep_snprintf(bf, size, "%-*.*s", width, width,
864 he->parent ? he->parent->name : "[other]");
867 struct sort_entry sort_parent = {
868 .se_header = "Parent symbol",
869 .se_cmp = sort__parent_cmp,
870 .se_snprintf = hist_entry__parent_snprintf,
871 .se_width_idx = HISTC_PARENT,
874 /* --sort cpu */
876 static int64_t
877 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
879 return right->cpu - left->cpu;
882 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
883 size_t size, unsigned int width)
885 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
888 struct sort_entry sort_cpu = {
889 .se_header = "CPU",
890 .se_cmp = sort__cpu_cmp,
891 .se_snprintf = hist_entry__cpu_snprintf,
892 .se_width_idx = HISTC_CPU,
895 /* --sort cgroup_id */
897 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
899 return (int64_t)(right_dev - left_dev);
902 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
904 return (int64_t)(right_ino - left_ino);
907 static int64_t
908 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
910 int64_t ret;
912 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
913 if (ret != 0)
914 return ret;
916 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
917 left->cgroup_id.ino);
920 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
921 char *bf, size_t size,
922 unsigned int width __maybe_unused)
924 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
925 he->cgroup_id.ino);
928 struct sort_entry sort_cgroup_id = {
929 .se_header = "cgroup id (dev/inode)",
930 .se_cmp = sort__cgroup_id_cmp,
931 .se_snprintf = hist_entry__cgroup_id_snprintf,
932 .se_width_idx = HISTC_CGROUP_ID,
935 /* --sort cgroup */
937 static int64_t
938 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
940 return right->cgroup - left->cgroup;
943 static int hist_entry__cgroup_snprintf(struct hist_entry *he,
944 char *bf, size_t size,
945 unsigned int width __maybe_unused)
947 const char *cgrp_name = "N/A";
949 if (he->cgroup) {
950 struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env,
951 he->cgroup);
952 if (cgrp != NULL)
953 cgrp_name = cgrp->name;
954 else
955 cgrp_name = "unknown";
958 return repsep_snprintf(bf, size, "%s", cgrp_name);
961 struct sort_entry sort_cgroup = {
962 .se_header = "Cgroup",
963 .se_cmp = sort__cgroup_cmp,
964 .se_snprintf = hist_entry__cgroup_snprintf,
965 .se_width_idx = HISTC_CGROUP,
968 /* --sort socket */
970 static int64_t
971 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
973 return right->socket - left->socket;
976 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
977 size_t size, unsigned int width)
979 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
982 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
984 int sk = *(const int *)arg;
986 if (type != HIST_FILTER__SOCKET)
987 return -1;
989 return sk >= 0 && he->socket != sk;
992 struct sort_entry sort_socket = {
993 .se_header = "Socket",
994 .se_cmp = sort__socket_cmp,
995 .se_snprintf = hist_entry__socket_snprintf,
996 .se_filter = hist_entry__socket_filter,
997 .se_width_idx = HISTC_SOCKET,
1000 /* --sort time */
1002 static int64_t
1003 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
1005 return right->time - left->time;
1008 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
1009 size_t size, unsigned int width)
1011 char he_time[32];
1013 if (symbol_conf.nanosecs)
1014 timestamp__scnprintf_nsec(he->time, he_time,
1015 sizeof(he_time));
1016 else
1017 timestamp__scnprintf_usec(he->time, he_time,
1018 sizeof(he_time));
1020 return repsep_snprintf(bf, size, "%-.*s", width, he_time);
1023 struct sort_entry sort_time = {
1024 .se_header = "Time",
1025 .se_cmp = sort__time_cmp,
1026 .se_snprintf = hist_entry__time_snprintf,
1027 .se_width_idx = HISTC_TIME,
1030 /* --sort trace */
1032 #ifdef HAVE_LIBTRACEEVENT
1033 static char *get_trace_output(struct hist_entry *he)
1035 struct trace_seq seq;
1036 struct evsel *evsel;
1037 struct tep_record rec = {
1038 .data = he->raw_data,
1039 .size = he->raw_size,
1042 evsel = hists_to_evsel(he->hists);
1044 trace_seq_init(&seq);
1045 if (symbol_conf.raw_trace) {
1046 tep_print_fields(&seq, he->raw_data, he->raw_size,
1047 evsel->tp_format);
1048 } else {
1049 tep_print_event(evsel->tp_format->tep,
1050 &seq, &rec, "%s", TEP_PRINT_INFO);
1053 * Trim the buffer, it starts at 4KB and we're not going to
1054 * add anything more to this buffer.
1056 return realloc(seq.buffer, seq.len + 1);
1059 static int64_t
1060 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
1062 struct evsel *evsel;
1064 evsel = hists_to_evsel(left->hists);
1065 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1066 return 0;
1068 if (left->trace_output == NULL)
1069 left->trace_output = get_trace_output(left);
1070 if (right->trace_output == NULL)
1071 right->trace_output = get_trace_output(right);
1073 return strcmp(right->trace_output, left->trace_output);
1076 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
1077 size_t size, unsigned int width)
1079 struct evsel *evsel;
1081 evsel = hists_to_evsel(he->hists);
1082 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1083 return scnprintf(bf, size, "%-.*s", width, "N/A");
1085 if (he->trace_output == NULL)
1086 he->trace_output = get_trace_output(he);
1087 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
1090 struct sort_entry sort_trace = {
1091 .se_header = "Trace output",
1092 .se_cmp = sort__trace_cmp,
1093 .se_snprintf = hist_entry__trace_snprintf,
1094 .se_width_idx = HISTC_TRACE,
1096 #endif /* HAVE_LIBTRACEEVENT */
1098 /* sort keys for branch stacks */
1100 static int64_t
1101 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
1103 if (!left->branch_info || !right->branch_info)
1104 return cmp_null(left->branch_info, right->branch_info);
1106 return _sort__dso_cmp(left->branch_info->from.ms.map,
1107 right->branch_info->from.ms.map);
1110 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
1111 size_t size, unsigned int width)
1113 if (he->branch_info)
1114 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
1115 bf, size, width);
1116 else
1117 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1120 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
1121 const void *arg)
1123 const struct dso *dso = arg;
1125 if (type != HIST_FILTER__DSO)
1126 return -1;
1128 return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
1129 map__dso(he->branch_info->from.ms.map) != dso);
1132 static int64_t
1133 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
1135 if (!left->branch_info || !right->branch_info)
1136 return cmp_null(left->branch_info, right->branch_info);
1138 return _sort__dso_cmp(left->branch_info->to.ms.map,
1139 right->branch_info->to.ms.map);
1142 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
1143 size_t size, unsigned int width)
1145 if (he->branch_info)
1146 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
1147 bf, size, width);
1148 else
1149 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1152 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
1153 const void *arg)
1155 const struct dso *dso = arg;
1157 if (type != HIST_FILTER__DSO)
1158 return -1;
1160 return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
1161 map__dso(he->branch_info->to.ms.map) != dso);
1164 static int64_t
1165 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
1167 struct addr_map_symbol *from_l, *from_r;
1169 if (!left->branch_info || !right->branch_info)
1170 return cmp_null(left->branch_info, right->branch_info);
1172 from_l = &left->branch_info->from;
1173 from_r = &right->branch_info->from;
1175 if (!from_l->ms.sym && !from_r->ms.sym)
1176 return _sort__addr_cmp(from_l->addr, from_r->addr);
1178 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
1181 static int64_t
1182 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
1184 struct addr_map_symbol *to_l, *to_r;
1186 if (!left->branch_info || !right->branch_info)
1187 return cmp_null(left->branch_info, right->branch_info);
1189 to_l = &left->branch_info->to;
1190 to_r = &right->branch_info->to;
1192 if (!to_l->ms.sym && !to_r->ms.sym)
1193 return _sort__addr_cmp(to_l->addr, to_r->addr);
1195 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1198 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1199 size_t size, unsigned int width)
1201 if (he->branch_info) {
1202 struct addr_map_symbol *from = &he->branch_info->from;
1204 return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1205 from->al_level, bf, size, width);
1208 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1211 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1212 size_t size, unsigned int width)
1214 if (he->branch_info) {
1215 struct addr_map_symbol *to = &he->branch_info->to;
1217 return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1218 to->al_level, bf, size, width);
1221 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1224 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1225 const void *arg)
1227 const char *sym = arg;
1229 if (type != HIST_FILTER__SYMBOL)
1230 return -1;
1232 return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1233 strstr(he->branch_info->from.ms.sym->name, sym));
1236 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1237 const void *arg)
1239 const char *sym = arg;
1241 if (type != HIST_FILTER__SYMBOL)
1242 return -1;
1244 return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1245 strstr(he->branch_info->to.ms.sym->name, sym));
1248 struct sort_entry sort_dso_from = {
1249 .se_header = "Source Shared Object",
1250 .se_cmp = sort__dso_from_cmp,
1251 .se_snprintf = hist_entry__dso_from_snprintf,
1252 .se_filter = hist_entry__dso_from_filter,
1253 .se_width_idx = HISTC_DSO_FROM,
1256 struct sort_entry sort_dso_to = {
1257 .se_header = "Target Shared Object",
1258 .se_cmp = sort__dso_to_cmp,
1259 .se_snprintf = hist_entry__dso_to_snprintf,
1260 .se_filter = hist_entry__dso_to_filter,
1261 .se_width_idx = HISTC_DSO_TO,
1264 struct sort_entry sort_sym_from = {
1265 .se_header = "Source Symbol",
1266 .se_cmp = sort__sym_from_cmp,
1267 .se_snprintf = hist_entry__sym_from_snprintf,
1268 .se_filter = hist_entry__sym_from_filter,
1269 .se_width_idx = HISTC_SYMBOL_FROM,
1272 struct sort_entry sort_sym_to = {
1273 .se_header = "Target Symbol",
1274 .se_cmp = sort__sym_to_cmp,
1275 .se_snprintf = hist_entry__sym_to_snprintf,
1276 .se_filter = hist_entry__sym_to_filter,
1277 .se_width_idx = HISTC_SYMBOL_TO,
1280 static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1281 u64 ip, char level, char *bf, size_t size,
1282 unsigned int width)
1284 struct symbol *sym = ms->sym;
1285 struct map *map = ms->map;
1286 size_t ret = 0, offs;
1288 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1289 if (sym && map) {
1290 if (sym->type == STT_OBJECT) {
1291 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1292 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1293 ip - map__unmap_ip(map, sym->start));
1294 } else {
1295 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1296 width - ret,
1297 sym->name);
1298 offs = ip - sym->start;
1299 if (offs)
1300 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1302 } else {
1303 size_t len = BITS_PER_LONG / 4;
1304 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1305 len, ip);
1308 return ret;
1311 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1312 size_t size, unsigned int width)
1314 if (he->branch_info) {
1315 struct addr_map_symbol *from = &he->branch_info->from;
1317 return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1318 he->level, bf, size, width);
1321 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1324 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1325 size_t size, unsigned int width)
1327 if (he->branch_info) {
1328 struct addr_map_symbol *to = &he->branch_info->to;
1330 return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1331 he->level, bf, size, width);
1334 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1337 static int64_t
1338 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1340 struct addr_map_symbol *from_l;
1341 struct addr_map_symbol *from_r;
1342 int64_t ret;
1344 if (!left->branch_info || !right->branch_info)
1345 return cmp_null(left->branch_info, right->branch_info);
1347 from_l = &left->branch_info->from;
1348 from_r = &right->branch_info->from;
1351 * comparing symbol address alone is not enough since it's a
1352 * relative address within a dso.
1354 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1355 if (ret != 0)
1356 return ret;
1358 return _sort__addr_cmp(from_l->addr, from_r->addr);
1361 static int64_t
1362 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1364 struct addr_map_symbol *to_l;
1365 struct addr_map_symbol *to_r;
1366 int64_t ret;
1368 if (!left->branch_info || !right->branch_info)
1369 return cmp_null(left->branch_info, right->branch_info);
1371 to_l = &left->branch_info->to;
1372 to_r = &right->branch_info->to;
1375 * comparing symbol address alone is not enough since it's a
1376 * relative address within a dso.
1378 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1379 if (ret != 0)
1380 return ret;
1382 return _sort__addr_cmp(to_l->addr, to_r->addr);
1385 struct sort_entry sort_addr_from = {
1386 .se_header = "Source Address",
1387 .se_cmp = sort__addr_from_cmp,
1388 .se_snprintf = hist_entry__addr_from_snprintf,
1389 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */
1390 .se_width_idx = HISTC_ADDR_FROM,
1393 struct sort_entry sort_addr_to = {
1394 .se_header = "Target Address",
1395 .se_cmp = sort__addr_to_cmp,
1396 .se_snprintf = hist_entry__addr_to_snprintf,
1397 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */
1398 .se_width_idx = HISTC_ADDR_TO,
1402 static int64_t
1403 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1405 unsigned char mp, p;
1407 if (!left->branch_info || !right->branch_info)
1408 return cmp_null(left->branch_info, right->branch_info);
1410 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1411 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1412 return mp || p;
1415 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1416 size_t size, unsigned int width){
1417 static const char *out = "N/A";
1419 if (he->branch_info) {
1420 if (he->branch_info->flags.predicted)
1421 out = "N";
1422 else if (he->branch_info->flags.mispred)
1423 out = "Y";
1426 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1429 static int64_t
1430 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1432 if (!left->branch_info || !right->branch_info)
1433 return cmp_null(left->branch_info, right->branch_info);
1435 return left->branch_info->flags.cycles -
1436 right->branch_info->flags.cycles;
1439 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1440 size_t size, unsigned int width)
1442 if (!he->branch_info)
1443 return scnprintf(bf, size, "%-.*s", width, "N/A");
1444 if (he->branch_info->flags.cycles == 0)
1445 return repsep_snprintf(bf, size, "%-*s", width, "-");
1446 return repsep_snprintf(bf, size, "%-*hd", width,
1447 he->branch_info->flags.cycles);
1450 struct sort_entry sort_cycles = {
1451 .se_header = "Basic Block Cycles",
1452 .se_cmp = sort__cycles_cmp,
1453 .se_snprintf = hist_entry__cycles_snprintf,
1454 .se_width_idx = HISTC_CYCLES,
1457 /* --sort daddr_sym */
1458 int64_t
1459 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1461 uint64_t l = 0, r = 0;
1463 if (left->mem_info)
1464 l = mem_info__daddr(left->mem_info)->addr;
1465 if (right->mem_info)
1466 r = mem_info__daddr(right->mem_info)->addr;
1468 return (int64_t)(r - l);
1471 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1472 size_t size, unsigned int width)
1474 uint64_t addr = 0;
1475 struct map_symbol *ms = NULL;
1477 if (he->mem_info) {
1478 addr = mem_info__daddr(he->mem_info)->addr;
1479 ms = &mem_info__daddr(he->mem_info)->ms;
1481 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1484 int64_t
1485 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1487 uint64_t l = 0, r = 0;
1489 if (left->mem_info)
1490 l = mem_info__iaddr(left->mem_info)->addr;
1491 if (right->mem_info)
1492 r = mem_info__iaddr(right->mem_info)->addr;
1494 return (int64_t)(r - l);
1497 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1498 size_t size, unsigned int width)
1500 uint64_t addr = 0;
1501 struct map_symbol *ms = NULL;
1503 if (he->mem_info) {
1504 addr = mem_info__iaddr(he->mem_info)->addr;
1505 ms = &mem_info__iaddr(he->mem_info)->ms;
1507 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1510 static int64_t
1511 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1513 struct map *map_l = NULL;
1514 struct map *map_r = NULL;
1516 if (left->mem_info)
1517 map_l = mem_info__daddr(left->mem_info)->ms.map;
1518 if (right->mem_info)
1519 map_r = mem_info__daddr(right->mem_info)->ms.map;
1521 return _sort__dso_cmp(map_l, map_r);
1524 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1525 size_t size, unsigned int width)
1527 struct map *map = NULL;
1529 if (he->mem_info)
1530 map = mem_info__daddr(he->mem_info)->ms.map;
1532 return _hist_entry__dso_snprintf(map, bf, size, width);
1535 static int64_t
1536 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1538 union perf_mem_data_src data_src_l;
1539 union perf_mem_data_src data_src_r;
1541 if (left->mem_info)
1542 data_src_l = *mem_info__data_src(left->mem_info);
1543 else
1544 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1546 if (right->mem_info)
1547 data_src_r = *mem_info__data_src(right->mem_info);
1548 else
1549 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1551 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1554 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1555 size_t size, unsigned int width)
1557 char out[10];
1559 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1560 return repsep_snprintf(bf, size, "%.*s", width, out);
1563 static int64_t
1564 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1566 union perf_mem_data_src data_src_l;
1567 union perf_mem_data_src data_src_r;
1569 if (left->mem_info)
1570 data_src_l = *mem_info__data_src(left->mem_info);
1571 else
1572 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1574 if (right->mem_info)
1575 data_src_r = *mem_info__data_src(right->mem_info);
1576 else
1577 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1579 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1582 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1583 size_t size, unsigned int width)
1585 char out[64];
1587 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1588 return repsep_snprintf(bf, size, "%-*s", width, out);
1591 static int64_t
1592 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1594 union perf_mem_data_src data_src_l;
1595 union perf_mem_data_src data_src_r;
1597 if (left->mem_info)
1598 data_src_l = *mem_info__data_src(left->mem_info);
1599 else
1600 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1602 if (right->mem_info)
1603 data_src_r = *mem_info__data_src(right->mem_info);
1604 else
1605 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1607 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1610 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1611 size_t size, unsigned int width)
1613 char out[64];
1615 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1616 return repsep_snprintf(bf, size, "%-*s", width, out);
1619 static int64_t
1620 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1622 union perf_mem_data_src data_src_l;
1623 union perf_mem_data_src data_src_r;
1625 if (left->mem_info)
1626 data_src_l = *mem_info__data_src(left->mem_info);
1627 else
1628 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1630 if (right->mem_info)
1631 data_src_r = *mem_info__data_src(right->mem_info);
1632 else
1633 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1635 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1638 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1639 size_t size, unsigned int width)
1641 char out[64];
1643 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1644 return repsep_snprintf(bf, size, "%-*s", width, out);
1647 int64_t
1648 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1650 u64 l, r;
1651 struct map *l_map, *r_map;
1652 struct dso *l_dso, *r_dso;
1653 int rc;
1655 if (!left->mem_info) return -1;
1656 if (!right->mem_info) return 1;
1658 /* group event types together */
1659 if (left->cpumode > right->cpumode) return -1;
1660 if (left->cpumode < right->cpumode) return 1;
1662 l_map = mem_info__daddr(left->mem_info)->ms.map;
1663 r_map = mem_info__daddr(right->mem_info)->ms.map;
1665 /* if both are NULL, jump to sort on al_addr instead */
1666 if (!l_map && !r_map)
1667 goto addr;
1669 if (!l_map) return -1;
1670 if (!r_map) return 1;
1672 l_dso = map__dso(l_map);
1673 r_dso = map__dso(r_map);
1674 rc = dso__cmp_id(l_dso, r_dso);
1675 if (rc)
1676 return rc;
1678 * Addresses with no major/minor numbers are assumed to be
1679 * anonymous in userspace. Sort those on pid then address.
1681 * The kernel and non-zero major/minor mapped areas are
1682 * assumed to be unity mapped. Sort those on address.
1685 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1686 (!(map__flags(l_map) & MAP_SHARED)) && !dso__id(l_dso)->maj && !dso__id(l_dso)->min &&
1687 !dso__id(l_dso)->ino && !dso__id(l_dso)->ino_generation) {
1688 /* userspace anonymous */
1690 if (thread__pid(left->thread) > thread__pid(right->thread))
1691 return -1;
1692 if (thread__pid(left->thread) < thread__pid(right->thread))
1693 return 1;
1696 addr:
1697 /* al_addr does all the right addr - start + offset calculations */
1698 l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl);
1699 r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl);
1701 if (l > r) return -1;
1702 if (l < r) return 1;
1704 return 0;
1707 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1708 size_t size, unsigned int width)
1711 uint64_t addr = 0;
1712 struct map_symbol *ms = NULL;
1713 char level = he->level;
1715 if (he->mem_info) {
1716 struct map *map = mem_info__daddr(he->mem_info)->ms.map;
1717 struct dso *dso = map ? map__dso(map) : NULL;
1719 addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl);
1720 ms = &mem_info__daddr(he->mem_info)->ms;
1722 /* print [s] for shared data mmaps */
1723 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1724 map && !(map__prot(map) & PROT_EXEC) &&
1725 (map__flags(map) & MAP_SHARED) &&
1726 (dso__id(dso)->maj || dso__id(dso)->min || dso__id(dso)->ino ||
1727 dso__id(dso)->ino_generation))
1728 level = 's';
1729 else if (!map)
1730 level = 'X';
1732 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1735 struct sort_entry sort_mispredict = {
1736 .se_header = "Branch Mispredicted",
1737 .se_cmp = sort__mispredict_cmp,
1738 .se_snprintf = hist_entry__mispredict_snprintf,
1739 .se_width_idx = HISTC_MISPREDICT,
1742 static int64_t
1743 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1745 return left->weight - right->weight;
1748 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1749 size_t size, unsigned int width)
1751 return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1754 struct sort_entry sort_local_weight = {
1755 .se_header = "Local Weight",
1756 .se_cmp = sort__weight_cmp,
1757 .se_snprintf = hist_entry__local_weight_snprintf,
1758 .se_width_idx = HISTC_LOCAL_WEIGHT,
1761 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1762 size_t size, unsigned int width)
1764 return repsep_snprintf(bf, size, "%-*llu", width,
1765 he->weight * he->stat.nr_events);
1768 struct sort_entry sort_global_weight = {
1769 .se_header = "Weight",
1770 .se_cmp = sort__weight_cmp,
1771 .se_snprintf = hist_entry__global_weight_snprintf,
1772 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1775 static int64_t
1776 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1778 return left->ins_lat - right->ins_lat;
1781 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1782 size_t size, unsigned int width)
1784 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1787 struct sort_entry sort_local_ins_lat = {
1788 .se_header = "Local INSTR Latency",
1789 .se_cmp = sort__ins_lat_cmp,
1790 .se_snprintf = hist_entry__local_ins_lat_snprintf,
1791 .se_width_idx = HISTC_LOCAL_INS_LAT,
1794 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1795 size_t size, unsigned int width)
1797 return repsep_snprintf(bf, size, "%-*u", width,
1798 he->ins_lat * he->stat.nr_events);
1801 struct sort_entry sort_global_ins_lat = {
1802 .se_header = "INSTR Latency",
1803 .se_cmp = sort__ins_lat_cmp,
1804 .se_snprintf = hist_entry__global_ins_lat_snprintf,
1805 .se_width_idx = HISTC_GLOBAL_INS_LAT,
1808 static int64_t
1809 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1811 return left->p_stage_cyc - right->p_stage_cyc;
1814 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1815 size_t size, unsigned int width)
1817 return repsep_snprintf(bf, size, "%-*u", width,
1818 he->p_stage_cyc * he->stat.nr_events);
1822 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1823 size_t size, unsigned int width)
1825 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1828 struct sort_entry sort_local_p_stage_cyc = {
1829 .se_header = "Local Pipeline Stage Cycle",
1830 .se_cmp = sort__p_stage_cyc_cmp,
1831 .se_snprintf = hist_entry__p_stage_cyc_snprintf,
1832 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC,
1835 struct sort_entry sort_global_p_stage_cyc = {
1836 .se_header = "Pipeline Stage Cycle",
1837 .se_cmp = sort__p_stage_cyc_cmp,
1838 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf,
1839 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC,
1842 struct sort_entry sort_mem_daddr_sym = {
1843 .se_header = "Data Symbol",
1844 .se_cmp = sort__daddr_cmp,
1845 .se_snprintf = hist_entry__daddr_snprintf,
1846 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1849 struct sort_entry sort_mem_iaddr_sym = {
1850 .se_header = "Code Symbol",
1851 .se_cmp = sort__iaddr_cmp,
1852 .se_snprintf = hist_entry__iaddr_snprintf,
1853 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1856 struct sort_entry sort_mem_daddr_dso = {
1857 .se_header = "Data Object",
1858 .se_cmp = sort__dso_daddr_cmp,
1859 .se_snprintf = hist_entry__dso_daddr_snprintf,
1860 .se_width_idx = HISTC_MEM_DADDR_DSO,
1863 struct sort_entry sort_mem_locked = {
1864 .se_header = "Locked",
1865 .se_cmp = sort__locked_cmp,
1866 .se_snprintf = hist_entry__locked_snprintf,
1867 .se_width_idx = HISTC_MEM_LOCKED,
1870 struct sort_entry sort_mem_tlb = {
1871 .se_header = "TLB access",
1872 .se_cmp = sort__tlb_cmp,
1873 .se_snprintf = hist_entry__tlb_snprintf,
1874 .se_width_idx = HISTC_MEM_TLB,
1877 struct sort_entry sort_mem_lvl = {
1878 .se_header = "Memory access",
1879 .se_cmp = sort__lvl_cmp,
1880 .se_snprintf = hist_entry__lvl_snprintf,
1881 .se_width_idx = HISTC_MEM_LVL,
1884 struct sort_entry sort_mem_snoop = {
1885 .se_header = "Snoop",
1886 .se_cmp = sort__snoop_cmp,
1887 .se_snprintf = hist_entry__snoop_snprintf,
1888 .se_width_idx = HISTC_MEM_SNOOP,
1891 struct sort_entry sort_mem_dcacheline = {
1892 .se_header = "Data Cacheline",
1893 .se_cmp = sort__dcacheline_cmp,
1894 .se_snprintf = hist_entry__dcacheline_snprintf,
1895 .se_width_idx = HISTC_MEM_DCACHELINE,
1898 static int64_t
1899 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1901 union perf_mem_data_src data_src_l;
1902 union perf_mem_data_src data_src_r;
1904 if (left->mem_info)
1905 data_src_l = *mem_info__data_src(left->mem_info);
1906 else
1907 data_src_l.mem_blk = PERF_MEM_BLK_NA;
1909 if (right->mem_info)
1910 data_src_r = *mem_info__data_src(right->mem_info);
1911 else
1912 data_src_r.mem_blk = PERF_MEM_BLK_NA;
1914 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1917 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1918 size_t size, unsigned int width)
1920 char out[16];
1922 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1923 return repsep_snprintf(bf, size, "%.*s", width, out);
1926 struct sort_entry sort_mem_blocked = {
1927 .se_header = "Blocked",
1928 .se_cmp = sort__blocked_cmp,
1929 .se_snprintf = hist_entry__blocked_snprintf,
1930 .se_width_idx = HISTC_MEM_BLOCKED,
1933 static int64_t
1934 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1936 uint64_t l = 0, r = 0;
1938 if (left->mem_info)
1939 l = mem_info__daddr(left->mem_info)->phys_addr;
1940 if (right->mem_info)
1941 r = mem_info__daddr(right->mem_info)->phys_addr;
1943 return (int64_t)(r - l);
1946 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1947 size_t size, unsigned int width)
1949 uint64_t addr = 0;
1950 size_t ret = 0;
1951 size_t len = BITS_PER_LONG / 4;
1953 addr = mem_info__daddr(he->mem_info)->phys_addr;
1955 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1957 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1959 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1961 if (ret > width)
1962 bf[width] = '\0';
1964 return width;
1967 struct sort_entry sort_mem_phys_daddr = {
1968 .se_header = "Data Physical Address",
1969 .se_cmp = sort__phys_daddr_cmp,
1970 .se_snprintf = hist_entry__phys_daddr_snprintf,
1971 .se_width_idx = HISTC_MEM_PHYS_DADDR,
1974 static int64_t
1975 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1977 uint64_t l = 0, r = 0;
1979 if (left->mem_info)
1980 l = mem_info__daddr(left->mem_info)->data_page_size;
1981 if (right->mem_info)
1982 r = mem_info__daddr(right->mem_info)->data_page_size;
1984 return (int64_t)(r - l);
1987 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
1988 size_t size, unsigned int width)
1990 char str[PAGE_SIZE_NAME_LEN];
1992 return repsep_snprintf(bf, size, "%-*s", width,
1993 get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str));
1996 struct sort_entry sort_mem_data_page_size = {
1997 .se_header = "Data Page Size",
1998 .se_cmp = sort__data_page_size_cmp,
1999 .se_snprintf = hist_entry__data_page_size_snprintf,
2000 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE,
2003 static int64_t
2004 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
2006 uint64_t l = left->code_page_size;
2007 uint64_t r = right->code_page_size;
2009 return (int64_t)(r - l);
2012 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
2013 size_t size, unsigned int width)
2015 char str[PAGE_SIZE_NAME_LEN];
2017 return repsep_snprintf(bf, size, "%-*s", width,
2018 get_page_size_name(he->code_page_size, str));
2021 struct sort_entry sort_code_page_size = {
2022 .se_header = "Code Page Size",
2023 .se_cmp = sort__code_page_size_cmp,
2024 .se_snprintf = hist_entry__code_page_size_snprintf,
2025 .se_width_idx = HISTC_CODE_PAGE_SIZE,
2028 static int64_t
2029 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
2031 if (!left->branch_info || !right->branch_info)
2032 return cmp_null(left->branch_info, right->branch_info);
2034 return left->branch_info->flags.abort !=
2035 right->branch_info->flags.abort;
2038 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
2039 size_t size, unsigned int width)
2041 static const char *out = "N/A";
2043 if (he->branch_info) {
2044 if (he->branch_info->flags.abort)
2045 out = "A";
2046 else
2047 out = ".";
2050 return repsep_snprintf(bf, size, "%-*s", width, out);
2053 struct sort_entry sort_abort = {
2054 .se_header = "Transaction abort",
2055 .se_cmp = sort__abort_cmp,
2056 .se_snprintf = hist_entry__abort_snprintf,
2057 .se_width_idx = HISTC_ABORT,
2060 static int64_t
2061 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
2063 if (!left->branch_info || !right->branch_info)
2064 return cmp_null(left->branch_info, right->branch_info);
2066 return left->branch_info->flags.in_tx !=
2067 right->branch_info->flags.in_tx;
2070 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
2071 size_t size, unsigned int width)
2073 static const char *out = "N/A";
2075 if (he->branch_info) {
2076 if (he->branch_info->flags.in_tx)
2077 out = "T";
2078 else
2079 out = ".";
2082 return repsep_snprintf(bf, size, "%-*s", width, out);
2085 struct sort_entry sort_in_tx = {
2086 .se_header = "Branch in transaction",
2087 .se_cmp = sort__in_tx_cmp,
2088 .se_snprintf = hist_entry__in_tx_snprintf,
2089 .se_width_idx = HISTC_IN_TX,
2092 static int64_t
2093 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
2095 return left->transaction - right->transaction;
2098 static inline char *add_str(char *p, const char *str)
2100 strcpy(p, str);
2101 return p + strlen(str);
2104 static struct txbit {
2105 unsigned flag;
2106 const char *name;
2107 int skip_for_len;
2108 } txbits[] = {
2109 { PERF_TXN_ELISION, "EL ", 0 },
2110 { PERF_TXN_TRANSACTION, "TX ", 1 },
2111 { PERF_TXN_SYNC, "SYNC ", 1 },
2112 { PERF_TXN_ASYNC, "ASYNC ", 0 },
2113 { PERF_TXN_RETRY, "RETRY ", 0 },
2114 { PERF_TXN_CONFLICT, "CON ", 0 },
2115 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
2116 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
2117 { 0, NULL, 0 }
2120 int hist_entry__transaction_len(void)
2122 int i;
2123 int len = 0;
2125 for (i = 0; txbits[i].name; i++) {
2126 if (!txbits[i].skip_for_len)
2127 len += strlen(txbits[i].name);
2129 len += 4; /* :XX<space> */
2130 return len;
2133 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
2134 size_t size, unsigned int width)
2136 u64 t = he->transaction;
2137 char buf[128];
2138 char *p = buf;
2139 int i;
2141 buf[0] = 0;
2142 for (i = 0; txbits[i].name; i++)
2143 if (txbits[i].flag & t)
2144 p = add_str(p, txbits[i].name);
2145 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
2146 p = add_str(p, "NEITHER ");
2147 if (t & PERF_TXN_ABORT_MASK) {
2148 sprintf(p, ":%" PRIx64,
2149 (t & PERF_TXN_ABORT_MASK) >>
2150 PERF_TXN_ABORT_SHIFT);
2151 p += strlen(p);
2154 return repsep_snprintf(bf, size, "%-*s", width, buf);
2157 struct sort_entry sort_transaction = {
2158 .se_header = "Transaction ",
2159 .se_cmp = sort__transaction_cmp,
2160 .se_snprintf = hist_entry__transaction_snprintf,
2161 .se_width_idx = HISTC_TRANSACTION,
2164 /* --sort symbol_size */
2166 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
2168 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
2169 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
2171 return size_l < size_r ? -1 :
2172 size_l == size_r ? 0 : 1;
2175 static int64_t
2176 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
2178 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
2181 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
2182 size_t bf_size, unsigned int width)
2184 if (sym)
2185 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
2187 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2190 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
2191 size_t size, unsigned int width)
2193 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
2196 struct sort_entry sort_sym_size = {
2197 .se_header = "Symbol size",
2198 .se_cmp = sort__sym_size_cmp,
2199 .se_snprintf = hist_entry__sym_size_snprintf,
2200 .se_width_idx = HISTC_SYM_SIZE,
2203 /* --sort dso_size */
2205 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2207 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2208 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2210 return size_l < size_r ? -1 :
2211 size_l == size_r ? 0 : 1;
2214 static int64_t
2215 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2217 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2220 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2221 size_t bf_size, unsigned int width)
2223 if (map && map__dso(map))
2224 return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
2226 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2229 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2230 size_t size, unsigned int width)
2232 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2235 struct sort_entry sort_dso_size = {
2236 .se_header = "DSO size",
2237 .se_cmp = sort__dso_size_cmp,
2238 .se_snprintf = hist_entry__dso_size_snprintf,
2239 .se_width_idx = HISTC_DSO_SIZE,
2242 /* --sort addr */
2244 static int64_t
2245 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2247 u64 left_ip = left->ip;
2248 u64 right_ip = right->ip;
2249 struct map *left_map = left->ms.map;
2250 struct map *right_map = right->ms.map;
2252 if (left_map)
2253 left_ip = map__unmap_ip(left_map, left_ip);
2254 if (right_map)
2255 right_ip = map__unmap_ip(right_map, right_ip);
2257 return _sort__addr_cmp(left_ip, right_ip);
2260 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2261 size_t size, unsigned int width)
2263 u64 ip = he->ip;
2264 struct map *map = he->ms.map;
2266 if (map)
2267 ip = map__unmap_ip(map, ip);
2269 return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2272 struct sort_entry sort_addr = {
2273 .se_header = "Address",
2274 .se_cmp = sort__addr_cmp,
2275 .se_snprintf = hist_entry__addr_snprintf,
2276 .se_width_idx = HISTC_ADDR,
2279 /* --sort type */
2281 struct annotated_data_type unknown_type = {
2282 .self = {
2283 .type_name = (char *)"(unknown)",
2284 .children = LIST_HEAD_INIT(unknown_type.self.children),
2288 static int64_t
2289 sort__type_cmp(struct hist_entry *left, struct hist_entry *right)
2291 return sort__addr_cmp(left, right);
2294 static void sort__type_init(struct hist_entry *he)
2296 if (he->mem_type)
2297 return;
2299 he->mem_type = hist_entry__get_data_type(he);
2300 if (he->mem_type == NULL) {
2301 he->mem_type = &unknown_type;
2302 he->mem_type_off = 0;
2306 static int64_t
2307 sort__type_collapse(struct hist_entry *left, struct hist_entry *right)
2309 struct annotated_data_type *left_type = left->mem_type;
2310 struct annotated_data_type *right_type = right->mem_type;
2312 if (!left_type) {
2313 sort__type_init(left);
2314 left_type = left->mem_type;
2317 if (!right_type) {
2318 sort__type_init(right);
2319 right_type = right->mem_type;
2322 return strcmp(left_type->self.type_name, right_type->self.type_name);
2325 static int64_t
2326 sort__type_sort(struct hist_entry *left, struct hist_entry *right)
2328 return sort__type_collapse(left, right);
2331 static int hist_entry__type_snprintf(struct hist_entry *he, char *bf,
2332 size_t size, unsigned int width)
2334 return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name);
2337 struct sort_entry sort_type = {
2338 .se_header = "Data Type",
2339 .se_cmp = sort__type_cmp,
2340 .se_collapse = sort__type_collapse,
2341 .se_sort = sort__type_sort,
2342 .se_init = sort__type_init,
2343 .se_snprintf = hist_entry__type_snprintf,
2344 .se_width_idx = HISTC_TYPE,
2347 /* --sort typeoff */
2349 static int64_t
2350 sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right)
2352 struct annotated_data_type *left_type = left->mem_type;
2353 struct annotated_data_type *right_type = right->mem_type;
2354 int64_t ret;
2356 if (!left_type) {
2357 sort__type_init(left);
2358 left_type = left->mem_type;
2361 if (!right_type) {
2362 sort__type_init(right);
2363 right_type = right->mem_type;
2366 ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2367 if (ret)
2368 return ret;
2369 return left->mem_type_off - right->mem_type_off;
2372 static void fill_member_name(char *buf, size_t sz, struct annotated_member *m,
2373 int offset, bool first)
2375 struct annotated_member *child;
2377 if (list_empty(&m->children))
2378 return;
2380 list_for_each_entry(child, &m->children, node) {
2381 if (child->offset <= offset && offset < child->offset + child->size) {
2382 int len = 0;
2384 /* It can have anonymous struct/union members */
2385 if (child->var_name) {
2386 len = scnprintf(buf, sz, "%s%s",
2387 first ? "" : ".", child->var_name);
2388 first = false;
2391 fill_member_name(buf + len, sz - len, child, offset, first);
2392 return;
2397 static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
2398 size_t size, unsigned int width __maybe_unused)
2400 struct annotated_data_type *he_type = he->mem_type;
2401 char buf[4096];
2403 buf[0] = '\0';
2404 if (list_empty(&he_type->self.children))
2405 snprintf(buf, sizeof(buf), "no field");
2406 else
2407 fill_member_name(buf, sizeof(buf), &he_type->self,
2408 he->mem_type_off, true);
2409 buf[4095] = '\0';
2411 return repsep_snprintf(bf, size, "%s +%#x (%s)", he_type->self.type_name,
2412 he->mem_type_off, buf);
2415 struct sort_entry sort_type_offset = {
2416 .se_header = "Data Type Offset",
2417 .se_cmp = sort__type_cmp,
2418 .se_collapse = sort__typeoff_sort,
2419 .se_sort = sort__typeoff_sort,
2420 .se_init = sort__type_init,
2421 .se_snprintf = hist_entry__typeoff_snprintf,
2422 .se_width_idx = HISTC_TYPE_OFFSET,
2425 /* --sort typecln */
2427 /* TODO: use actual value in the system */
2428 #define TYPE_CACHELINE_SIZE 64
2430 static int64_t
2431 sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
2433 struct annotated_data_type *left_type = left->mem_type;
2434 struct annotated_data_type *right_type = right->mem_type;
2435 int64_t left_cln, right_cln;
2436 int64_t ret;
2438 if (!left_type) {
2439 sort__type_init(left);
2440 left_type = left->mem_type;
2443 if (!right_type) {
2444 sort__type_init(right);
2445 right_type = right->mem_type;
2448 ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2449 if (ret)
2450 return ret;
2452 left_cln = left->mem_type_off / TYPE_CACHELINE_SIZE;
2453 right_cln = right->mem_type_off / TYPE_CACHELINE_SIZE;
2454 return left_cln - right_cln;
2457 static int hist_entry__typecln_snprintf(struct hist_entry *he, char *bf,
2458 size_t size, unsigned int width __maybe_unused)
2460 struct annotated_data_type *he_type = he->mem_type;
2462 return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name,
2463 he->mem_type_off / TYPE_CACHELINE_SIZE);
2466 struct sort_entry sort_type_cacheline = {
2467 .se_header = "Data Type Cacheline",
2468 .se_cmp = sort__type_cmp,
2469 .se_collapse = sort__typecln_sort,
2470 .se_sort = sort__typecln_sort,
2471 .se_init = sort__type_init,
2472 .se_snprintf = hist_entry__typecln_snprintf,
2473 .se_width_idx = HISTC_TYPE_CACHELINE,
2477 struct sort_dimension {
2478 const char *name;
2479 struct sort_entry *entry;
2480 int taken;
2483 int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
2485 return 0;
2488 const char * __weak arch_perf_header_entry(const char *se_header)
2490 return se_header;
2493 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
2495 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
2498 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2500 static struct sort_dimension common_sort_dimensions[] = {
2501 DIM(SORT_PID, "pid", sort_thread),
2502 DIM(SORT_COMM, "comm", sort_comm),
2503 DIM(SORT_DSO, "dso", sort_dso),
2504 DIM(SORT_SYM, "symbol", sort_sym),
2505 DIM(SORT_PARENT, "parent", sort_parent),
2506 DIM(SORT_CPU, "cpu", sort_cpu),
2507 DIM(SORT_SOCKET, "socket", sort_socket),
2508 DIM(SORT_SRCLINE, "srcline", sort_srcline),
2509 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2510 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2511 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2512 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2513 #ifdef HAVE_LIBTRACEEVENT
2514 DIM(SORT_TRACE, "trace", sort_trace),
2515 #endif
2516 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2517 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2518 DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2519 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2520 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2521 DIM(SORT_TIME, "time", sort_time),
2522 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2523 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2524 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2525 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2526 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2527 DIM(SORT_ADDR, "addr", sort_addr),
2528 DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
2529 DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
2530 DIM(SORT_SIMD, "simd", sort_simd),
2531 DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
2532 DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
2533 DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset),
2534 DIM(SORT_ANNOTATE_DATA_TYPE_CACHELINE, "typecln", sort_type_cacheline),
2537 #undef DIM
2539 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2541 static struct sort_dimension bstack_sort_dimensions[] = {
2542 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2543 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2544 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2545 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2546 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2547 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2548 DIM(SORT_ABORT, "abort", sort_abort),
2549 DIM(SORT_CYCLES, "cycles", sort_cycles),
2550 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2551 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2552 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2553 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2554 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2555 DIM(SORT_CALLCHAIN_BRANCH_PREDICTED,
2556 "callchain_branch_predicted",
2557 sort_callchain_branch_predicted),
2558 DIM(SORT_CALLCHAIN_BRANCH_ABORT,
2559 "callchain_branch_abort",
2560 sort_callchain_branch_abort),
2561 DIM(SORT_CALLCHAIN_BRANCH_CYCLES,
2562 "callchain_branch_cycles",
2563 sort_callchain_branch_cycles)
2566 #undef DIM
2568 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2570 static struct sort_dimension memory_sort_dimensions[] = {
2571 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2572 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2573 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2574 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2575 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2576 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2577 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2578 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2579 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2580 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2581 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2584 #undef DIM
2586 struct hpp_dimension {
2587 const char *name;
2588 struct perf_hpp_fmt *fmt;
2589 int taken;
2592 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2594 static struct hpp_dimension hpp_sort_dimensions[] = {
2595 DIM(PERF_HPP__OVERHEAD, "overhead"),
2596 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2597 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2598 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2599 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2600 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2601 DIM(PERF_HPP__SAMPLES, "sample"),
2602 DIM(PERF_HPP__PERIOD, "period"),
2603 DIM(PERF_HPP__WEIGHT1, "weight1"),
2604 DIM(PERF_HPP__WEIGHT2, "weight2"),
2605 DIM(PERF_HPP__WEIGHT3, "weight3"),
2606 /* aliases for weight_struct */
2607 DIM(PERF_HPP__WEIGHT2, "ins_lat"),
2608 DIM(PERF_HPP__WEIGHT3, "retire_lat"),
2609 DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"),
2612 #undef DIM
2614 struct hpp_sort_entry {
2615 struct perf_hpp_fmt hpp;
2616 struct sort_entry *se;
2619 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2621 struct hpp_sort_entry *hse;
2623 if (!perf_hpp__is_sort_entry(fmt))
2624 return;
2626 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2627 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2630 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2631 struct hists *hists, int line __maybe_unused,
2632 int *span __maybe_unused)
2634 struct hpp_sort_entry *hse;
2635 size_t len = fmt->user_len;
2637 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2639 if (!len)
2640 len = hists__col_len(hists, hse->se->se_width_idx);
2642 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
2645 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2646 struct perf_hpp *hpp __maybe_unused,
2647 struct hists *hists)
2649 struct hpp_sort_entry *hse;
2650 size_t len = fmt->user_len;
2652 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2654 if (!len)
2655 len = hists__col_len(hists, hse->se->se_width_idx);
2657 return len;
2660 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2661 struct hist_entry *he)
2663 struct hpp_sort_entry *hse;
2664 size_t len = fmt->user_len;
2666 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2668 if (!len)
2669 len = hists__col_len(he->hists, hse->se->se_width_idx);
2671 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2674 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2675 struct hist_entry *a, struct hist_entry *b)
2677 struct hpp_sort_entry *hse;
2679 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2680 return hse->se->se_cmp(a, b);
2683 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2684 struct hist_entry *a, struct hist_entry *b)
2686 struct hpp_sort_entry *hse;
2687 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2689 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2690 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2691 return collapse_fn(a, b);
2694 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2695 struct hist_entry *a, struct hist_entry *b)
2697 struct hpp_sort_entry *hse;
2698 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2700 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2701 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2702 return sort_fn(a, b);
2705 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2707 return format->header == __sort__hpp_header;
2710 #define MK_SORT_ENTRY_CHK(key) \
2711 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
2713 struct hpp_sort_entry *hse; \
2715 if (!perf_hpp__is_sort_entry(fmt)) \
2716 return false; \
2718 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
2719 return hse->se == &sort_ ## key ; \
2722 #ifdef HAVE_LIBTRACEEVENT
2723 MK_SORT_ENTRY_CHK(trace)
2724 #else
2725 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2727 return false;
2729 #endif
2730 MK_SORT_ENTRY_CHK(srcline)
2731 MK_SORT_ENTRY_CHK(srcfile)
2732 MK_SORT_ENTRY_CHK(thread)
2733 MK_SORT_ENTRY_CHK(comm)
2734 MK_SORT_ENTRY_CHK(dso)
2735 MK_SORT_ENTRY_CHK(sym)
2738 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2740 struct hpp_sort_entry *hse_a;
2741 struct hpp_sort_entry *hse_b;
2743 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2744 return false;
2746 hse_a = container_of(a, struct hpp_sort_entry, hpp);
2747 hse_b = container_of(b, struct hpp_sort_entry, hpp);
2749 return hse_a->se == hse_b->se;
2752 static void hse_free(struct perf_hpp_fmt *fmt)
2754 struct hpp_sort_entry *hse;
2756 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2757 free(hse);
2760 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2762 struct hpp_sort_entry *hse;
2764 if (!perf_hpp__is_sort_entry(fmt))
2765 return;
2767 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2769 if (hse->se->se_init)
2770 hse->se->se_init(he);
2773 static struct hpp_sort_entry *
2774 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2776 struct hpp_sort_entry *hse;
2778 hse = malloc(sizeof(*hse));
2779 if (hse == NULL) {
2780 pr_err("Memory allocation failed\n");
2781 return NULL;
2784 hse->se = sd->entry;
2785 hse->hpp.name = sd->entry->se_header;
2786 hse->hpp.header = __sort__hpp_header;
2787 hse->hpp.width = __sort__hpp_width;
2788 hse->hpp.entry = __sort__hpp_entry;
2789 hse->hpp.color = NULL;
2791 hse->hpp.cmp = __sort__hpp_cmp;
2792 hse->hpp.collapse = __sort__hpp_collapse;
2793 hse->hpp.sort = __sort__hpp_sort;
2794 hse->hpp.equal = __sort__hpp_equal;
2795 hse->hpp.free = hse_free;
2796 hse->hpp.init = hse_init;
2798 INIT_LIST_HEAD(&hse->hpp.list);
2799 INIT_LIST_HEAD(&hse->hpp.sort_list);
2800 hse->hpp.elide = false;
2801 hse->hpp.len = 0;
2802 hse->hpp.user_len = 0;
2803 hse->hpp.level = level;
2805 return hse;
2808 static void hpp_free(struct perf_hpp_fmt *fmt)
2810 free(fmt);
2813 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2814 int level)
2816 struct perf_hpp_fmt *fmt;
2818 fmt = memdup(hd->fmt, sizeof(*fmt));
2819 if (fmt) {
2820 INIT_LIST_HEAD(&fmt->list);
2821 INIT_LIST_HEAD(&fmt->sort_list);
2822 fmt->free = hpp_free;
2823 fmt->level = level;
2826 return fmt;
2829 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2831 struct perf_hpp_fmt *fmt;
2832 struct hpp_sort_entry *hse;
2833 int ret = -1;
2834 int r;
2836 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2837 if (!perf_hpp__is_sort_entry(fmt))
2838 continue;
2840 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2841 if (hse->se->se_filter == NULL)
2842 continue;
2845 * hist entry is filtered if any of sort key in the hpp list
2846 * is applied. But it should skip non-matched filter types.
2848 r = hse->se->se_filter(he, type, arg);
2849 if (r >= 0) {
2850 if (ret < 0)
2851 ret = 0;
2852 ret |= r;
2856 return ret;
2859 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2860 struct perf_hpp_list *list,
2861 int level)
2863 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2865 if (hse == NULL)
2866 return -1;
2868 perf_hpp_list__register_sort_field(list, &hse->hpp);
2869 return 0;
2872 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2873 struct perf_hpp_list *list)
2875 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2877 if (hse == NULL)
2878 return -1;
2880 perf_hpp_list__column_register(list, &hse->hpp);
2881 return 0;
2884 #ifndef HAVE_LIBTRACEEVENT
2885 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2887 return false;
2889 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2890 struct hists *hists __maybe_unused)
2892 return false;
2894 #else
2895 struct hpp_dynamic_entry {
2896 struct perf_hpp_fmt hpp;
2897 struct evsel *evsel;
2898 struct tep_format_field *field;
2899 unsigned dynamic_len;
2900 bool raw_trace;
2903 static int hde_width(struct hpp_dynamic_entry *hde)
2905 if (!hde->hpp.len) {
2906 int len = hde->dynamic_len;
2907 int namelen = strlen(hde->field->name);
2908 int fieldlen = hde->field->size;
2910 if (namelen > len)
2911 len = namelen;
2913 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2914 /* length for print hex numbers */
2915 fieldlen = hde->field->size * 2 + 2;
2917 if (fieldlen > len)
2918 len = fieldlen;
2920 hde->hpp.len = len;
2922 return hde->hpp.len;
2925 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2926 struct hist_entry *he)
2928 char *str, *pos;
2929 struct tep_format_field *field = hde->field;
2930 size_t namelen;
2931 bool last = false;
2933 if (hde->raw_trace)
2934 return;
2936 /* parse pretty print result and update max length */
2937 if (!he->trace_output)
2938 he->trace_output = get_trace_output(he);
2940 namelen = strlen(field->name);
2941 str = he->trace_output;
2943 while (str) {
2944 pos = strchr(str, ' ');
2945 if (pos == NULL) {
2946 last = true;
2947 pos = str + strlen(str);
2950 if (!strncmp(str, field->name, namelen)) {
2951 size_t len;
2953 str += namelen + 1;
2954 len = pos - str;
2956 if (len > hde->dynamic_len)
2957 hde->dynamic_len = len;
2958 break;
2961 if (last)
2962 str = NULL;
2963 else
2964 str = pos + 1;
2968 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2969 struct hists *hists __maybe_unused,
2970 int line __maybe_unused,
2971 int *span __maybe_unused)
2973 struct hpp_dynamic_entry *hde;
2974 size_t len = fmt->user_len;
2976 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2978 if (!len)
2979 len = hde_width(hde);
2981 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2984 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2985 struct perf_hpp *hpp __maybe_unused,
2986 struct hists *hists __maybe_unused)
2988 struct hpp_dynamic_entry *hde;
2989 size_t len = fmt->user_len;
2991 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2993 if (!len)
2994 len = hde_width(hde);
2996 return len;
2999 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
3001 struct hpp_dynamic_entry *hde;
3003 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3005 return hists_to_evsel(hists) == hde->evsel;
3008 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
3009 struct hist_entry *he)
3011 struct hpp_dynamic_entry *hde;
3012 size_t len = fmt->user_len;
3013 char *str, *pos;
3014 struct tep_format_field *field;
3015 size_t namelen;
3016 bool last = false;
3017 int ret;
3019 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3021 if (!len)
3022 len = hde_width(hde);
3024 if (hde->raw_trace)
3025 goto raw_field;
3027 if (!he->trace_output)
3028 he->trace_output = get_trace_output(he);
3030 field = hde->field;
3031 namelen = strlen(field->name);
3032 str = he->trace_output;
3034 while (str) {
3035 pos = strchr(str, ' ');
3036 if (pos == NULL) {
3037 last = true;
3038 pos = str + strlen(str);
3041 if (!strncmp(str, field->name, namelen)) {
3042 str += namelen + 1;
3043 str = strndup(str, pos - str);
3045 if (str == NULL)
3046 return scnprintf(hpp->buf, hpp->size,
3047 "%*.*s", len, len, "ERROR");
3048 break;
3051 if (last)
3052 str = NULL;
3053 else
3054 str = pos + 1;
3057 if (str == NULL) {
3058 struct trace_seq seq;
3059 raw_field:
3060 trace_seq_init(&seq);
3061 tep_print_field(&seq, he->raw_data, hde->field);
3062 str = seq.buffer;
3065 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
3066 free(str);
3067 return ret;
3070 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
3071 struct hist_entry *a, struct hist_entry *b)
3073 struct hpp_dynamic_entry *hde;
3074 struct tep_format_field *field;
3075 unsigned offset, size;
3077 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3079 field = hde->field;
3080 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
3081 unsigned long long dyn;
3083 tep_read_number_field(field, a->raw_data, &dyn);
3084 offset = dyn & 0xffff;
3085 size = (dyn >> 16) & 0xffff;
3086 if (tep_field_is_relative(field->flags))
3087 offset += field->offset + field->size;
3088 /* record max width for output */
3089 if (size > hde->dynamic_len)
3090 hde->dynamic_len = size;
3091 } else {
3092 offset = field->offset;
3093 size = field->size;
3096 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
3099 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
3101 return fmt->cmp == __sort__hde_cmp;
3104 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
3106 struct hpp_dynamic_entry *hde_a;
3107 struct hpp_dynamic_entry *hde_b;
3109 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
3110 return false;
3112 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
3113 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
3115 return hde_a->field == hde_b->field;
3118 static void hde_free(struct perf_hpp_fmt *fmt)
3120 struct hpp_dynamic_entry *hde;
3122 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3123 free(hde);
3126 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
3128 struct hpp_dynamic_entry *hde;
3130 if (!perf_hpp__is_dynamic_entry(fmt))
3131 return;
3133 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3134 update_dynamic_len(hde, he);
3137 static struct hpp_dynamic_entry *
3138 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
3139 int level)
3141 struct hpp_dynamic_entry *hde;
3143 hde = malloc(sizeof(*hde));
3144 if (hde == NULL) {
3145 pr_debug("Memory allocation failed\n");
3146 return NULL;
3149 hde->evsel = evsel;
3150 hde->field = field;
3151 hde->dynamic_len = 0;
3153 hde->hpp.name = field->name;
3154 hde->hpp.header = __sort__hde_header;
3155 hde->hpp.width = __sort__hde_width;
3156 hde->hpp.entry = __sort__hde_entry;
3157 hde->hpp.color = NULL;
3159 hde->hpp.init = __sort__hde_init;
3160 hde->hpp.cmp = __sort__hde_cmp;
3161 hde->hpp.collapse = __sort__hde_cmp;
3162 hde->hpp.sort = __sort__hde_cmp;
3163 hde->hpp.equal = __sort__hde_equal;
3164 hde->hpp.free = hde_free;
3166 INIT_LIST_HEAD(&hde->hpp.list);
3167 INIT_LIST_HEAD(&hde->hpp.sort_list);
3168 hde->hpp.elide = false;
3169 hde->hpp.len = 0;
3170 hde->hpp.user_len = 0;
3171 hde->hpp.level = level;
3173 return hde;
3175 #endif /* HAVE_LIBTRACEEVENT */
3177 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
3179 struct perf_hpp_fmt *new_fmt = NULL;
3181 if (perf_hpp__is_sort_entry(fmt)) {
3182 struct hpp_sort_entry *hse, *new_hse;
3184 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3185 new_hse = memdup(hse, sizeof(*hse));
3186 if (new_hse)
3187 new_fmt = &new_hse->hpp;
3188 #ifdef HAVE_LIBTRACEEVENT
3189 } else if (perf_hpp__is_dynamic_entry(fmt)) {
3190 struct hpp_dynamic_entry *hde, *new_hde;
3192 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3193 new_hde = memdup(hde, sizeof(*hde));
3194 if (new_hde)
3195 new_fmt = &new_hde->hpp;
3196 #endif
3197 } else {
3198 new_fmt = memdup(fmt, sizeof(*fmt));
3201 INIT_LIST_HEAD(&new_fmt->list);
3202 INIT_LIST_HEAD(&new_fmt->sort_list);
3204 return new_fmt;
3207 static int parse_field_name(char *str, char **event, char **field, char **opt)
3209 char *event_name, *field_name, *opt_name;
3211 event_name = str;
3212 field_name = strchr(str, '.');
3214 if (field_name) {
3215 *field_name++ = '\0';
3216 } else {
3217 event_name = NULL;
3218 field_name = str;
3221 opt_name = strchr(field_name, '/');
3222 if (opt_name)
3223 *opt_name++ = '\0';
3225 *event = event_name;
3226 *field = field_name;
3227 *opt = opt_name;
3229 return 0;
3232 /* find match evsel using a given event name. The event name can be:
3233 * 1. '%' + event index (e.g. '%1' for first event)
3234 * 2. full event name (e.g. sched:sched_switch)
3235 * 3. partial event name (should not contain ':')
3237 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
3239 struct evsel *evsel = NULL;
3240 struct evsel *pos;
3241 bool full_name;
3243 /* case 1 */
3244 if (event_name[0] == '%') {
3245 int nr = strtol(event_name+1, NULL, 0);
3247 if (nr > evlist->core.nr_entries)
3248 return NULL;
3250 evsel = evlist__first(evlist);
3251 while (--nr > 0)
3252 evsel = evsel__next(evsel);
3254 return evsel;
3257 full_name = !!strchr(event_name, ':');
3258 evlist__for_each_entry(evlist, pos) {
3259 /* case 2 */
3260 if (full_name && evsel__name_is(pos, event_name))
3261 return pos;
3262 /* case 3 */
3263 if (!full_name && strstr(pos->name, event_name)) {
3264 if (evsel) {
3265 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
3266 event_name, evsel->name, pos->name);
3267 return NULL;
3269 evsel = pos;
3273 return evsel;
3276 #ifdef HAVE_LIBTRACEEVENT
3277 static int __dynamic_dimension__add(struct evsel *evsel,
3278 struct tep_format_field *field,
3279 bool raw_trace, int level)
3281 struct hpp_dynamic_entry *hde;
3283 hde = __alloc_dynamic_entry(evsel, field, level);
3284 if (hde == NULL)
3285 return -ENOMEM;
3287 hde->raw_trace = raw_trace;
3289 perf_hpp__register_sort_field(&hde->hpp);
3290 return 0;
3293 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
3295 int ret;
3296 struct tep_format_field *field;
3298 field = evsel->tp_format->format.fields;
3299 while (field) {
3300 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3301 if (ret < 0)
3302 return ret;
3304 field = field->next;
3306 return 0;
3309 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
3310 int level)
3312 int ret;
3313 struct evsel *evsel;
3315 evlist__for_each_entry(evlist, evsel) {
3316 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3317 continue;
3319 ret = add_evsel_fields(evsel, raw_trace, level);
3320 if (ret < 0)
3321 return ret;
3323 return 0;
3326 static int add_all_matching_fields(struct evlist *evlist,
3327 char *field_name, bool raw_trace, int level)
3329 int ret = -ESRCH;
3330 struct evsel *evsel;
3331 struct tep_format_field *field;
3333 evlist__for_each_entry(evlist, evsel) {
3334 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3335 continue;
3337 field = tep_find_any_field(evsel->tp_format, field_name);
3338 if (field == NULL)
3339 continue;
3341 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3342 if (ret < 0)
3343 break;
3345 return ret;
3347 #endif /* HAVE_LIBTRACEEVENT */
3349 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
3350 int level)
3352 char *str, *event_name, *field_name, *opt_name;
3353 struct evsel *evsel;
3354 bool raw_trace = symbol_conf.raw_trace;
3355 int ret = 0;
3357 if (evlist == NULL)
3358 return -ENOENT;
3360 str = strdup(tok);
3361 if (str == NULL)
3362 return -ENOMEM;
3364 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
3365 ret = -EINVAL;
3366 goto out;
3369 if (opt_name) {
3370 if (strcmp(opt_name, "raw")) {
3371 pr_debug("unsupported field option %s\n", opt_name);
3372 ret = -EINVAL;
3373 goto out;
3375 raw_trace = true;
3378 #ifdef HAVE_LIBTRACEEVENT
3379 if (!strcmp(field_name, "trace_fields")) {
3380 ret = add_all_dynamic_fields(evlist, raw_trace, level);
3381 goto out;
3384 if (event_name == NULL) {
3385 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
3386 goto out;
3388 #else
3389 evlist__for_each_entry(evlist, evsel) {
3390 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
3391 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
3392 ret = -ENOTSUP;
3396 if (ret) {
3397 pr_err("\n");
3398 goto out;
3400 #endif
3402 evsel = find_evsel(evlist, event_name);
3403 if (evsel == NULL) {
3404 pr_debug("Cannot find event: %s\n", event_name);
3405 ret = -ENOENT;
3406 goto out;
3409 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3410 pr_debug("%s is not a tracepoint event\n", event_name);
3411 ret = -EINVAL;
3412 goto out;
3415 #ifdef HAVE_LIBTRACEEVENT
3416 if (!strcmp(field_name, "*")) {
3417 ret = add_evsel_fields(evsel, raw_trace, level);
3418 } else {
3419 struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name);
3421 if (field == NULL) {
3422 pr_debug("Cannot find event field for %s.%s\n",
3423 event_name, field_name);
3424 return -ENOENT;
3427 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3429 #else
3430 (void)level;
3431 (void)raw_trace;
3432 #endif /* HAVE_LIBTRACEEVENT */
3434 out:
3435 free(str);
3436 return ret;
3439 static int __sort_dimension__add(struct sort_dimension *sd,
3440 struct perf_hpp_list *list,
3441 int level)
3443 if (sd->taken)
3444 return 0;
3446 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3447 return -1;
3449 if (sd->entry->se_collapse)
3450 list->need_collapse = 1;
3452 sd->taken = 1;
3454 return 0;
3457 static int __hpp_dimension__add(struct hpp_dimension *hd,
3458 struct perf_hpp_list *list,
3459 int level)
3461 struct perf_hpp_fmt *fmt;
3463 if (hd->taken)
3464 return 0;
3466 fmt = __hpp_dimension__alloc_hpp(hd, level);
3467 if (!fmt)
3468 return -1;
3470 hd->taken = 1;
3471 perf_hpp_list__register_sort_field(list, fmt);
3472 return 0;
3475 static int __sort_dimension__add_output(struct perf_hpp_list *list,
3476 struct sort_dimension *sd)
3478 if (sd->taken)
3479 return 0;
3481 if (__sort_dimension__add_hpp_output(sd, list) < 0)
3482 return -1;
3484 sd->taken = 1;
3485 return 0;
3488 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3489 struct hpp_dimension *hd)
3491 struct perf_hpp_fmt *fmt;
3493 if (hd->taken)
3494 return 0;
3496 fmt = __hpp_dimension__alloc_hpp(hd, 0);
3497 if (!fmt)
3498 return -1;
3500 hd->taken = 1;
3501 perf_hpp_list__column_register(list, fmt);
3502 return 0;
3505 int hpp_dimension__add_output(unsigned col)
3507 BUG_ON(col >= PERF_HPP__MAX_INDEX);
3508 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
3511 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3512 struct evlist *evlist,
3513 int level)
3515 unsigned int i, j;
3518 * Check to see if there are any arch specific
3519 * sort dimensions not applicable for the current
3520 * architecture. If so, Skip that sort key since
3521 * we don't want to display it in the output fields.
3523 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3524 if (!strcmp(arch_specific_sort_keys[j], tok) &&
3525 !arch_support_sort_key(tok)) {
3526 return 0;
3530 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3531 struct sort_dimension *sd = &common_sort_dimensions[i];
3533 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3534 continue;
3536 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3537 if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3538 sort_dimension_add_dynamic_header(sd);
3541 if (sd->entry == &sort_parent && parent_pattern) {
3542 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3543 if (ret) {
3544 char err[BUFSIZ];
3546 regerror(ret, &parent_regex, err, sizeof(err));
3547 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3548 return -EINVAL;
3550 list->parent = 1;
3551 } else if (sd->entry == &sort_sym) {
3552 list->sym = 1;
3554 * perf diff displays the performance difference amongst
3555 * two or more perf.data files. Those files could come
3556 * from different binaries. So we should not compare
3557 * their ips, but the name of symbol.
3559 if (sort__mode == SORT_MODE__DIFF)
3560 sd->entry->se_collapse = sort__sym_sort;
3562 } else if (sd->entry == &sort_dso) {
3563 list->dso = 1;
3564 } else if (sd->entry == &sort_socket) {
3565 list->socket = 1;
3566 } else if (sd->entry == &sort_thread) {
3567 list->thread = 1;
3568 } else if (sd->entry == &sort_comm) {
3569 list->comm = 1;
3570 } else if (sd->entry == &sort_type_offset) {
3571 symbol_conf.annotate_data_member = true;
3574 return __sort_dimension__add(sd, list, level);
3577 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3578 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3580 if (strncasecmp(tok, hd->name, strlen(tok)))
3581 continue;
3583 return __hpp_dimension__add(hd, list, level);
3586 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3587 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3589 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3590 continue;
3592 if ((sort__mode != SORT_MODE__BRANCH) &&
3593 strncasecmp(tok, "callchain_branch_predicted",
3594 strlen(tok)) &&
3595 strncasecmp(tok, "callchain_branch_abort",
3596 strlen(tok)) &&
3597 strncasecmp(tok, "callchain_branch_cycles",
3598 strlen(tok)))
3599 return -EINVAL;
3601 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
3602 list->sym = 1;
3604 __sort_dimension__add(sd, list, level);
3605 return 0;
3608 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3609 struct sort_dimension *sd = &memory_sort_dimensions[i];
3611 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3612 continue;
3614 if (sort__mode != SORT_MODE__MEMORY)
3615 return -EINVAL;
3617 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
3618 return -EINVAL;
3620 if (sd->entry == &sort_mem_daddr_sym)
3621 list->sym = 1;
3623 __sort_dimension__add(sd, list, level);
3624 return 0;
3627 if (!add_dynamic_entry(evlist, tok, level))
3628 return 0;
3630 return -ESRCH;
3633 static int setup_sort_list(struct perf_hpp_list *list, char *str,
3634 struct evlist *evlist)
3636 char *tmp, *tok;
3637 int ret = 0;
3638 int level = 0;
3639 int next_level = 1;
3640 bool in_group = false;
3642 do {
3643 tok = str;
3644 tmp = strpbrk(str, "{}, ");
3645 if (tmp) {
3646 if (in_group)
3647 next_level = level;
3648 else
3649 next_level = level + 1;
3651 if (*tmp == '{')
3652 in_group = true;
3653 else if (*tmp == '}')
3654 in_group = false;
3656 *tmp = '\0';
3657 str = tmp + 1;
3660 if (*tok) {
3661 ret = sort_dimension__add(list, tok, evlist, level);
3662 if (ret == -EINVAL) {
3663 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3664 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3665 else
3666 ui__error("Invalid --sort key: `%s'", tok);
3667 break;
3668 } else if (ret == -ESRCH) {
3669 ui__error("Unknown --sort key: `%s'", tok);
3670 break;
3674 level = next_level;
3675 } while (tmp);
3677 return ret;
3680 static const char *get_default_sort_order(struct evlist *evlist)
3682 const char *default_sort_orders[] = {
3683 default_sort_order,
3684 default_branch_sort_order,
3685 default_mem_sort_order,
3686 default_top_sort_order,
3687 default_diff_sort_order,
3688 default_tracepoint_sort_order,
3690 bool use_trace = true;
3691 struct evsel *evsel;
3693 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3695 if (evlist == NULL || evlist__empty(evlist))
3696 goto out_no_evlist;
3698 evlist__for_each_entry(evlist, evsel) {
3699 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3700 use_trace = false;
3701 break;
3705 if (use_trace) {
3706 sort__mode = SORT_MODE__TRACEPOINT;
3707 if (symbol_conf.raw_trace)
3708 return "trace_fields";
3710 out_no_evlist:
3711 return default_sort_orders[sort__mode];
3714 static int setup_sort_order(struct evlist *evlist)
3716 char *new_sort_order;
3719 * Append '+'-prefixed sort order to the default sort
3720 * order string.
3722 if (!sort_order || is_strict_order(sort_order))
3723 return 0;
3725 if (sort_order[1] == '\0') {
3726 ui__error("Invalid --sort key: `+'");
3727 return -EINVAL;
3731 * We allocate new sort_order string, but we never free it,
3732 * because it's checked over the rest of the code.
3734 if (asprintf(&new_sort_order, "%s,%s",
3735 get_default_sort_order(evlist), sort_order + 1) < 0) {
3736 pr_err("Not enough memory to set up --sort");
3737 return -ENOMEM;
3740 sort_order = new_sort_order;
3741 return 0;
3745 * Adds 'pre,' prefix into 'str' is 'pre' is
3746 * not already part of 'str'.
3748 static char *prefix_if_not_in(const char *pre, char *str)
3750 char *n;
3752 if (!str || strstr(str, pre))
3753 return str;
3755 if (asprintf(&n, "%s,%s", pre, str) < 0)
3756 n = NULL;
3758 free(str);
3759 return n;
3762 static char *setup_overhead(char *keys)
3764 if (sort__mode == SORT_MODE__DIFF)
3765 return keys;
3767 keys = prefix_if_not_in("overhead", keys);
3769 if (symbol_conf.cumulate_callchain)
3770 keys = prefix_if_not_in("overhead_children", keys);
3772 return keys;
3775 static int __setup_sorting(struct evlist *evlist)
3777 char *str;
3778 const char *sort_keys;
3779 int ret = 0;
3781 ret = setup_sort_order(evlist);
3782 if (ret)
3783 return ret;
3785 sort_keys = sort_order;
3786 if (sort_keys == NULL) {
3787 if (is_strict_order(field_order)) {
3789 * If user specified field order but no sort order,
3790 * we'll honor it and not add default sort orders.
3792 return 0;
3795 sort_keys = get_default_sort_order(evlist);
3798 str = strdup(sort_keys);
3799 if (str == NULL) {
3800 pr_err("Not enough memory to setup sort keys");
3801 return -ENOMEM;
3805 * Prepend overhead fields for backward compatibility.
3807 if (!is_strict_order(field_order)) {
3808 str = setup_overhead(str);
3809 if (str == NULL) {
3810 pr_err("Not enough memory to setup overhead keys");
3811 return -ENOMEM;
3815 ret = setup_sort_list(&perf_hpp_list, str, evlist);
3817 free(str);
3818 return ret;
3821 void perf_hpp__set_elide(int idx, bool elide)
3823 struct perf_hpp_fmt *fmt;
3824 struct hpp_sort_entry *hse;
3826 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3827 if (!perf_hpp__is_sort_entry(fmt))
3828 continue;
3830 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3831 if (hse->se->se_width_idx == idx) {
3832 fmt->elide = elide;
3833 break;
3838 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3840 if (list && strlist__nr_entries(list) == 1) {
3841 if (fp != NULL)
3842 fprintf(fp, "# %s: %s\n", list_name,
3843 strlist__entry(list, 0)->s);
3844 return true;
3846 return false;
3849 static bool get_elide(int idx, FILE *output)
3851 switch (idx) {
3852 case HISTC_SYMBOL:
3853 return __get_elide(symbol_conf.sym_list, "symbol", output);
3854 case HISTC_DSO:
3855 return __get_elide(symbol_conf.dso_list, "dso", output);
3856 case HISTC_COMM:
3857 return __get_elide(symbol_conf.comm_list, "comm", output);
3858 default:
3859 break;
3862 if (sort__mode != SORT_MODE__BRANCH)
3863 return false;
3865 switch (idx) {
3866 case HISTC_SYMBOL_FROM:
3867 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3868 case HISTC_SYMBOL_TO:
3869 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3870 case HISTC_DSO_FROM:
3871 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3872 case HISTC_DSO_TO:
3873 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3874 case HISTC_ADDR_FROM:
3875 return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
3876 case HISTC_ADDR_TO:
3877 return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
3878 default:
3879 break;
3882 return false;
3885 void sort__setup_elide(FILE *output)
3887 struct perf_hpp_fmt *fmt;
3888 struct hpp_sort_entry *hse;
3890 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3891 if (!perf_hpp__is_sort_entry(fmt))
3892 continue;
3894 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3895 fmt->elide = get_elide(hse->se->se_width_idx, output);
3899 * It makes no sense to elide all of sort entries.
3900 * Just revert them to show up again.
3902 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3903 if (!perf_hpp__is_sort_entry(fmt))
3904 continue;
3906 if (!fmt->elide)
3907 return;
3910 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3911 if (!perf_hpp__is_sort_entry(fmt))
3912 continue;
3914 fmt->elide = false;
3918 int output_field_add(struct perf_hpp_list *list, const char *tok)
3920 unsigned int i;
3922 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3923 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3925 if (strncasecmp(tok, hd->name, strlen(tok)))
3926 continue;
3928 if (!strcasecmp(tok, "weight"))
3929 ui__warning("--fields weight shows the average value unlike in the --sort key.\n");
3931 return __hpp_dimension__add_output(list, hd);
3934 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3935 struct sort_dimension *sd = &common_sort_dimensions[i];
3937 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3938 continue;
3940 return __sort_dimension__add_output(list, sd);
3943 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3944 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3946 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3947 continue;
3949 if (sort__mode != SORT_MODE__BRANCH)
3950 return -EINVAL;
3952 return __sort_dimension__add_output(list, sd);
3955 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3956 struct sort_dimension *sd = &memory_sort_dimensions[i];
3958 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3959 continue;
3961 if (sort__mode != SORT_MODE__MEMORY)
3962 return -EINVAL;
3964 return __sort_dimension__add_output(list, sd);
3967 return -ESRCH;
3970 static int setup_output_list(struct perf_hpp_list *list, char *str)
3972 char *tmp, *tok;
3973 int ret = 0;
3975 for (tok = strtok_r(str, ", ", &tmp);
3976 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3977 ret = output_field_add(list, tok);
3978 if (ret == -EINVAL) {
3979 ui__error("Invalid --fields key: `%s'", tok);
3980 break;
3981 } else if (ret == -ESRCH) {
3982 ui__error("Unknown --fields key: `%s'", tok);
3983 break;
3987 return ret;
3990 void reset_dimensions(void)
3992 unsigned int i;
3994 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3995 common_sort_dimensions[i].taken = 0;
3997 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3998 hpp_sort_dimensions[i].taken = 0;
4000 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
4001 bstack_sort_dimensions[i].taken = 0;
4003 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
4004 memory_sort_dimensions[i].taken = 0;
4007 bool is_strict_order(const char *order)
4009 return order && (*order != '+');
4012 static int __setup_output_field(void)
4014 char *str, *strp;
4015 int ret = -EINVAL;
4017 if (field_order == NULL)
4018 return 0;
4020 strp = str = strdup(field_order);
4021 if (str == NULL) {
4022 pr_err("Not enough memory to setup output fields");
4023 return -ENOMEM;
4026 if (!is_strict_order(field_order))
4027 strp++;
4029 if (!strlen(strp)) {
4030 ui__error("Invalid --fields key: `+'");
4031 goto out;
4034 ret = setup_output_list(&perf_hpp_list, strp);
4036 out:
4037 free(str);
4038 return ret;
4041 int setup_sorting(struct evlist *evlist)
4043 int err;
4045 err = __setup_sorting(evlist);
4046 if (err < 0)
4047 return err;
4049 if (parent_pattern != default_parent_pattern) {
4050 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
4051 if (err < 0)
4052 return err;
4055 reset_dimensions();
4058 * perf diff doesn't use default hpp output fields.
4060 if (sort__mode != SORT_MODE__DIFF)
4061 perf_hpp__init();
4063 err = __setup_output_field();
4064 if (err < 0)
4065 return err;
4067 /* copy sort keys to output fields */
4068 perf_hpp__setup_output_field(&perf_hpp_list);
4069 /* and then copy output fields to sort keys */
4070 perf_hpp__append_sort_keys(&perf_hpp_list);
4072 /* setup hists-specific output fields */
4073 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
4074 return -1;
4076 return 0;
4079 void reset_output_field(void)
4081 perf_hpp_list.need_collapse = 0;
4082 perf_hpp_list.parent = 0;
4083 perf_hpp_list.sym = 0;
4084 perf_hpp_list.dso = 0;
4086 field_order = NULL;
4087 sort_order = NULL;
4089 reset_dimensions();
4090 perf_hpp__reset_output_field(&perf_hpp_list);
4093 #define INDENT (3*8 + 1)
4095 static void add_key(struct strbuf *sb, const char *str, int *llen)
4097 if (!str)
4098 return;
4100 if (*llen >= 75) {
4101 strbuf_addstr(sb, "\n\t\t\t ");
4102 *llen = INDENT;
4104 strbuf_addf(sb, " %s", str);
4105 *llen += strlen(str) + 1;
4108 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
4109 int *llen)
4111 int i;
4113 for (i = 0; i < n; i++)
4114 add_key(sb, s[i].name, llen);
4117 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
4118 int *llen)
4120 int i;
4122 for (i = 0; i < n; i++)
4123 add_key(sb, s[i].name, llen);
4126 char *sort_help(const char *prefix, enum sort_mode mode)
4128 struct strbuf sb;
4129 char *s;
4130 int len = strlen(prefix) + INDENT;
4132 strbuf_init(&sb, 300);
4133 strbuf_addstr(&sb, prefix);
4134 add_hpp_sort_string(&sb, hpp_sort_dimensions,
4135 ARRAY_SIZE(hpp_sort_dimensions), &len);
4136 add_sort_string(&sb, common_sort_dimensions,
4137 ARRAY_SIZE(common_sort_dimensions), &len);
4138 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__BRANCH)
4139 add_sort_string(&sb, bstack_sort_dimensions,
4140 ARRAY_SIZE(bstack_sort_dimensions), &len);
4141 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__MEMORY)
4142 add_sort_string(&sb, memory_sort_dimensions,
4143 ARRAY_SIZE(memory_sort_dimensions), &len);
4144 s = strbuf_detach(&sb, NULL);
4145 strbuf_release(&sb);
4146 return s;