Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
[linux/fpc-iii.git] / tools / perf / util / sort.c
blob47966a1618c7310108a42de59327930e6acdb184
1 #include <sys/mman.h>
2 #include "sort.h"
3 #include "hist.h"
4 #include "comm.h"
5 #include "symbol.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include <traceevent/event-parse.h>
9 #include "mem-events.h"
11 regex_t parent_regex;
12 const char default_parent_pattern[] = "^sys_|^do_page_fault";
13 const char *parent_pattern = default_parent_pattern;
14 const char default_sort_order[] = "comm,dso,symbol";
15 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
16 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
17 const char default_top_sort_order[] = "dso,symbol";
18 const char default_diff_sort_order[] = "dso,symbol";
19 const char default_tracepoint_sort_order[] = "trace";
20 const char *sort_order;
21 const char *field_order;
22 regex_t ignore_callees_regex;
23 int have_ignore_callees = 0;
24 int sort__need_collapse = 0;
25 int sort__has_parent = 0;
26 int sort__has_sym = 0;
27 int sort__has_dso = 0;
28 int sort__has_socket = 0;
29 int sort__has_thread = 0;
30 int sort__has_comm = 0;
31 enum sort_mode sort__mode = SORT_MODE__NORMAL;
34 * Replaces all occurrences of a char used with the:
36 * -t, --field-separator
38 * option, that uses a special separator character and don't pad with spaces,
39 * replacing all occurances of this separator in symbol names (and other
40 * output) with a '.' character, that thus it's the only non valid separator.
42 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
44 int n;
45 va_list ap;
47 va_start(ap, fmt);
48 n = vsnprintf(bf, size, fmt, ap);
49 if (symbol_conf.field_sep && n > 0) {
50 char *sep = bf;
52 while (1) {
53 sep = strchr(sep, *symbol_conf.field_sep);
54 if (sep == NULL)
55 break;
56 *sep = '.';
59 va_end(ap);
61 if (n >= (int)size)
62 return size - 1;
63 return n;
66 static int64_t cmp_null(const void *l, const void *r)
68 if (!l && !r)
69 return 0;
70 else if (!l)
71 return -1;
72 else
73 return 1;
76 /* --sort pid */
78 static int64_t
79 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
81 return right->thread->tid - left->thread->tid;
84 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
85 size_t size, unsigned int width)
87 const char *comm = thread__comm_str(he->thread);
89 width = max(7U, width) - 6;
90 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
91 width, width, comm ?: "");
94 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
96 const struct thread *th = arg;
98 if (type != HIST_FILTER__THREAD)
99 return -1;
101 return th && he->thread != th;
104 struct sort_entry sort_thread = {
105 .se_header = " Pid:Command",
106 .se_cmp = sort__thread_cmp,
107 .se_snprintf = hist_entry__thread_snprintf,
108 .se_filter = hist_entry__thread_filter,
109 .se_width_idx = HISTC_THREAD,
112 /* --sort comm */
114 static int64_t
115 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
117 /* Compare the addr that should be unique among comm */
118 return strcmp(comm__str(right->comm), comm__str(left->comm));
121 static int64_t
122 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
124 /* Compare the addr that should be unique among comm */
125 return strcmp(comm__str(right->comm), comm__str(left->comm));
128 static int64_t
129 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
131 return strcmp(comm__str(right->comm), comm__str(left->comm));
134 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
135 size_t size, unsigned int width)
137 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
140 struct sort_entry sort_comm = {
141 .se_header = "Command",
142 .se_cmp = sort__comm_cmp,
143 .se_collapse = sort__comm_collapse,
144 .se_sort = sort__comm_sort,
145 .se_snprintf = hist_entry__comm_snprintf,
146 .se_filter = hist_entry__thread_filter,
147 .se_width_idx = HISTC_COMM,
150 /* --sort dso */
152 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
154 struct dso *dso_l = map_l ? map_l->dso : NULL;
155 struct dso *dso_r = map_r ? map_r->dso : NULL;
156 const char *dso_name_l, *dso_name_r;
158 if (!dso_l || !dso_r)
159 return cmp_null(dso_r, dso_l);
161 if (verbose) {
162 dso_name_l = dso_l->long_name;
163 dso_name_r = dso_r->long_name;
164 } else {
165 dso_name_l = dso_l->short_name;
166 dso_name_r = dso_r->short_name;
169 return strcmp(dso_name_l, dso_name_r);
172 static int64_t
173 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
175 return _sort__dso_cmp(right->ms.map, left->ms.map);
178 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
179 size_t size, unsigned int width)
181 if (map && map->dso) {
182 const char *dso_name = !verbose ? map->dso->short_name :
183 map->dso->long_name;
184 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
187 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
190 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
191 size_t size, unsigned int width)
193 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
196 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
198 const struct dso *dso = arg;
200 if (type != HIST_FILTER__DSO)
201 return -1;
203 return dso && (!he->ms.map || he->ms.map->dso != dso);
206 struct sort_entry sort_dso = {
207 .se_header = "Shared Object",
208 .se_cmp = sort__dso_cmp,
209 .se_snprintf = hist_entry__dso_snprintf,
210 .se_filter = hist_entry__dso_filter,
211 .se_width_idx = HISTC_DSO,
214 /* --sort symbol */
216 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
218 return (int64_t)(right_ip - left_ip);
221 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
223 if (!sym_l || !sym_r)
224 return cmp_null(sym_l, sym_r);
226 if (sym_l == sym_r)
227 return 0;
229 if (sym_l->start != sym_r->start)
230 return (int64_t)(sym_r->start - sym_l->start);
232 return (int64_t)(sym_r->end - sym_l->end);
235 static int64_t
236 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
238 int64_t ret;
240 if (!left->ms.sym && !right->ms.sym)
241 return _sort__addr_cmp(left->ip, right->ip);
244 * comparing symbol address alone is not enough since it's a
245 * relative address within a dso.
247 if (!sort__has_dso) {
248 ret = sort__dso_cmp(left, right);
249 if (ret != 0)
250 return ret;
253 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
256 static int64_t
257 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
259 if (!left->ms.sym || !right->ms.sym)
260 return cmp_null(left->ms.sym, right->ms.sym);
262 return strcmp(right->ms.sym->name, left->ms.sym->name);
265 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
266 u64 ip, char level, char *bf, size_t size,
267 unsigned int width)
269 size_t ret = 0;
271 if (verbose) {
272 char o = map ? dso__symtab_origin(map->dso) : '!';
273 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
274 BITS_PER_LONG / 4 + 2, ip, o);
277 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
278 if (sym && map) {
279 if (map->type == MAP__VARIABLE) {
280 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
281 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
282 ip - map->unmap_ip(map, sym->start));
283 } else {
284 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
285 width - ret,
286 sym->name);
288 } else {
289 size_t len = BITS_PER_LONG / 4;
290 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
291 len, ip);
294 return ret;
297 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
298 size_t size, unsigned int width)
300 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
301 he->level, bf, size, width);
304 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
306 const char *sym = arg;
308 if (type != HIST_FILTER__SYMBOL)
309 return -1;
311 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
314 struct sort_entry sort_sym = {
315 .se_header = "Symbol",
316 .se_cmp = sort__sym_cmp,
317 .se_sort = sort__sym_sort,
318 .se_snprintf = hist_entry__sym_snprintf,
319 .se_filter = hist_entry__sym_filter,
320 .se_width_idx = HISTC_SYMBOL,
323 /* --sort srcline */
325 static char *hist_entry__get_srcline(struct hist_entry *he)
327 struct map *map = he->ms.map;
329 if (!map)
330 return SRCLINE_UNKNOWN;
332 return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
333 he->ms.sym, true);
336 static int64_t
337 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
339 if (!left->srcline)
340 left->srcline = hist_entry__get_srcline(left);
341 if (!right->srcline)
342 right->srcline = hist_entry__get_srcline(right);
344 return strcmp(right->srcline, left->srcline);
347 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
348 size_t size, unsigned int width)
350 if (!he->srcline)
351 he->srcline = hist_entry__get_srcline(he);
353 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
356 struct sort_entry sort_srcline = {
357 .se_header = "Source:Line",
358 .se_cmp = sort__srcline_cmp,
359 .se_snprintf = hist_entry__srcline_snprintf,
360 .se_width_idx = HISTC_SRCLINE,
363 /* --sort srcfile */
365 static char no_srcfile[1];
367 static char *hist_entry__get_srcfile(struct hist_entry *e)
369 char *sf, *p;
370 struct map *map = e->ms.map;
372 if (!map)
373 return no_srcfile;
375 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
376 e->ms.sym, false, true);
377 if (!strcmp(sf, SRCLINE_UNKNOWN))
378 return no_srcfile;
379 p = strchr(sf, ':');
380 if (p && *sf) {
381 *p = 0;
382 return sf;
384 free(sf);
385 return no_srcfile;
388 static int64_t
389 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
391 if (!left->srcfile)
392 left->srcfile = hist_entry__get_srcfile(left);
393 if (!right->srcfile)
394 right->srcfile = hist_entry__get_srcfile(right);
396 return strcmp(right->srcfile, left->srcfile);
399 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
400 size_t size, unsigned int width)
402 if (!he->srcfile)
403 he->srcfile = hist_entry__get_srcfile(he);
405 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
408 struct sort_entry sort_srcfile = {
409 .se_header = "Source File",
410 .se_cmp = sort__srcfile_cmp,
411 .se_snprintf = hist_entry__srcfile_snprintf,
412 .se_width_idx = HISTC_SRCFILE,
415 /* --sort parent */
417 static int64_t
418 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
420 struct symbol *sym_l = left->parent;
421 struct symbol *sym_r = right->parent;
423 if (!sym_l || !sym_r)
424 return cmp_null(sym_l, sym_r);
426 return strcmp(sym_r->name, sym_l->name);
429 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
430 size_t size, unsigned int width)
432 return repsep_snprintf(bf, size, "%-*.*s", width, width,
433 he->parent ? he->parent->name : "[other]");
436 struct sort_entry sort_parent = {
437 .se_header = "Parent symbol",
438 .se_cmp = sort__parent_cmp,
439 .se_snprintf = hist_entry__parent_snprintf,
440 .se_width_idx = HISTC_PARENT,
443 /* --sort cpu */
445 static int64_t
446 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
448 return right->cpu - left->cpu;
451 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
452 size_t size, unsigned int width)
454 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
457 struct sort_entry sort_cpu = {
458 .se_header = "CPU",
459 .se_cmp = sort__cpu_cmp,
460 .se_snprintf = hist_entry__cpu_snprintf,
461 .se_width_idx = HISTC_CPU,
464 /* --sort socket */
466 static int64_t
467 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
469 return right->socket - left->socket;
472 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
473 size_t size, unsigned int width)
475 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
478 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
480 int sk = *(const int *)arg;
482 if (type != HIST_FILTER__SOCKET)
483 return -1;
485 return sk >= 0 && he->socket != sk;
488 struct sort_entry sort_socket = {
489 .se_header = "Socket",
490 .se_cmp = sort__socket_cmp,
491 .se_snprintf = hist_entry__socket_snprintf,
492 .se_filter = hist_entry__socket_filter,
493 .se_width_idx = HISTC_SOCKET,
496 /* --sort trace */
498 static char *get_trace_output(struct hist_entry *he)
500 struct trace_seq seq;
501 struct perf_evsel *evsel;
502 struct pevent_record rec = {
503 .data = he->raw_data,
504 .size = he->raw_size,
507 evsel = hists_to_evsel(he->hists);
509 trace_seq_init(&seq);
510 if (symbol_conf.raw_trace) {
511 pevent_print_fields(&seq, he->raw_data, he->raw_size,
512 evsel->tp_format);
513 } else {
514 pevent_event_info(&seq, evsel->tp_format, &rec);
516 return seq.buffer;
519 static int64_t
520 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
522 struct perf_evsel *evsel;
524 evsel = hists_to_evsel(left->hists);
525 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
526 return 0;
528 if (left->trace_output == NULL)
529 left->trace_output = get_trace_output(left);
530 if (right->trace_output == NULL)
531 right->trace_output = get_trace_output(right);
533 return strcmp(right->trace_output, left->trace_output);
536 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
537 size_t size, unsigned int width)
539 struct perf_evsel *evsel;
541 evsel = hists_to_evsel(he->hists);
542 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
543 return scnprintf(bf, size, "%-.*s", width, "N/A");
545 if (he->trace_output == NULL)
546 he->trace_output = get_trace_output(he);
547 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
550 struct sort_entry sort_trace = {
551 .se_header = "Trace output",
552 .se_cmp = sort__trace_cmp,
553 .se_snprintf = hist_entry__trace_snprintf,
554 .se_width_idx = HISTC_TRACE,
557 /* sort keys for branch stacks */
559 static int64_t
560 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
562 if (!left->branch_info || !right->branch_info)
563 return cmp_null(left->branch_info, right->branch_info);
565 return _sort__dso_cmp(left->branch_info->from.map,
566 right->branch_info->from.map);
569 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
570 size_t size, unsigned int width)
572 if (he->branch_info)
573 return _hist_entry__dso_snprintf(he->branch_info->from.map,
574 bf, size, width);
575 else
576 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
579 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
580 const void *arg)
582 const struct dso *dso = arg;
584 if (type != HIST_FILTER__DSO)
585 return -1;
587 return dso && (!he->branch_info || !he->branch_info->from.map ||
588 he->branch_info->from.map->dso != dso);
591 static int64_t
592 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
594 if (!left->branch_info || !right->branch_info)
595 return cmp_null(left->branch_info, right->branch_info);
597 return _sort__dso_cmp(left->branch_info->to.map,
598 right->branch_info->to.map);
601 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
602 size_t size, unsigned int width)
604 if (he->branch_info)
605 return _hist_entry__dso_snprintf(he->branch_info->to.map,
606 bf, size, width);
607 else
608 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
611 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
612 const void *arg)
614 const struct dso *dso = arg;
616 if (type != HIST_FILTER__DSO)
617 return -1;
619 return dso && (!he->branch_info || !he->branch_info->to.map ||
620 he->branch_info->to.map->dso != dso);
623 static int64_t
624 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
626 struct addr_map_symbol *from_l = &left->branch_info->from;
627 struct addr_map_symbol *from_r = &right->branch_info->from;
629 if (!left->branch_info || !right->branch_info)
630 return cmp_null(left->branch_info, right->branch_info);
632 from_l = &left->branch_info->from;
633 from_r = &right->branch_info->from;
635 if (!from_l->sym && !from_r->sym)
636 return _sort__addr_cmp(from_l->addr, from_r->addr);
638 return _sort__sym_cmp(from_l->sym, from_r->sym);
641 static int64_t
642 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
644 struct addr_map_symbol *to_l, *to_r;
646 if (!left->branch_info || !right->branch_info)
647 return cmp_null(left->branch_info, right->branch_info);
649 to_l = &left->branch_info->to;
650 to_r = &right->branch_info->to;
652 if (!to_l->sym && !to_r->sym)
653 return _sort__addr_cmp(to_l->addr, to_r->addr);
655 return _sort__sym_cmp(to_l->sym, to_r->sym);
658 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
659 size_t size, unsigned int width)
661 if (he->branch_info) {
662 struct addr_map_symbol *from = &he->branch_info->from;
664 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
665 he->level, bf, size, width);
668 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
671 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
672 size_t size, unsigned int width)
674 if (he->branch_info) {
675 struct addr_map_symbol *to = &he->branch_info->to;
677 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
678 he->level, bf, size, width);
681 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
684 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
685 const void *arg)
687 const char *sym = arg;
689 if (type != HIST_FILTER__SYMBOL)
690 return -1;
692 return sym && !(he->branch_info && he->branch_info->from.sym &&
693 strstr(he->branch_info->from.sym->name, sym));
696 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
697 const void *arg)
699 const char *sym = arg;
701 if (type != HIST_FILTER__SYMBOL)
702 return -1;
704 return sym && !(he->branch_info && he->branch_info->to.sym &&
705 strstr(he->branch_info->to.sym->name, sym));
708 struct sort_entry sort_dso_from = {
709 .se_header = "Source Shared Object",
710 .se_cmp = sort__dso_from_cmp,
711 .se_snprintf = hist_entry__dso_from_snprintf,
712 .se_filter = hist_entry__dso_from_filter,
713 .se_width_idx = HISTC_DSO_FROM,
716 struct sort_entry sort_dso_to = {
717 .se_header = "Target Shared Object",
718 .se_cmp = sort__dso_to_cmp,
719 .se_snprintf = hist_entry__dso_to_snprintf,
720 .se_filter = hist_entry__dso_to_filter,
721 .se_width_idx = HISTC_DSO_TO,
724 struct sort_entry sort_sym_from = {
725 .se_header = "Source Symbol",
726 .se_cmp = sort__sym_from_cmp,
727 .se_snprintf = hist_entry__sym_from_snprintf,
728 .se_filter = hist_entry__sym_from_filter,
729 .se_width_idx = HISTC_SYMBOL_FROM,
732 struct sort_entry sort_sym_to = {
733 .se_header = "Target Symbol",
734 .se_cmp = sort__sym_to_cmp,
735 .se_snprintf = hist_entry__sym_to_snprintf,
736 .se_filter = hist_entry__sym_to_filter,
737 .se_width_idx = HISTC_SYMBOL_TO,
740 static int64_t
741 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
743 unsigned char mp, p;
745 if (!left->branch_info || !right->branch_info)
746 return cmp_null(left->branch_info, right->branch_info);
748 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
749 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
750 return mp || p;
753 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
754 size_t size, unsigned int width){
755 static const char *out = "N/A";
757 if (he->branch_info) {
758 if (he->branch_info->flags.predicted)
759 out = "N";
760 else if (he->branch_info->flags.mispred)
761 out = "Y";
764 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
767 static int64_t
768 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
770 return left->branch_info->flags.cycles -
771 right->branch_info->flags.cycles;
774 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
775 size_t size, unsigned int width)
777 if (he->branch_info->flags.cycles == 0)
778 return repsep_snprintf(bf, size, "%-*s", width, "-");
779 return repsep_snprintf(bf, size, "%-*hd", width,
780 he->branch_info->flags.cycles);
783 struct sort_entry sort_cycles = {
784 .se_header = "Basic Block Cycles",
785 .se_cmp = sort__cycles_cmp,
786 .se_snprintf = hist_entry__cycles_snprintf,
787 .se_width_idx = HISTC_CYCLES,
790 /* --sort daddr_sym */
791 static int64_t
792 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
794 uint64_t l = 0, r = 0;
796 if (left->mem_info)
797 l = left->mem_info->daddr.addr;
798 if (right->mem_info)
799 r = right->mem_info->daddr.addr;
801 return (int64_t)(r - l);
804 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
805 size_t size, unsigned int width)
807 uint64_t addr = 0;
808 struct map *map = NULL;
809 struct symbol *sym = NULL;
811 if (he->mem_info) {
812 addr = he->mem_info->daddr.addr;
813 map = he->mem_info->daddr.map;
814 sym = he->mem_info->daddr.sym;
816 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
817 width);
820 static int64_t
821 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
823 uint64_t l = 0, r = 0;
825 if (left->mem_info)
826 l = left->mem_info->iaddr.addr;
827 if (right->mem_info)
828 r = right->mem_info->iaddr.addr;
830 return (int64_t)(r - l);
833 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
834 size_t size, unsigned int width)
836 uint64_t addr = 0;
837 struct map *map = NULL;
838 struct symbol *sym = NULL;
840 if (he->mem_info) {
841 addr = he->mem_info->iaddr.addr;
842 map = he->mem_info->iaddr.map;
843 sym = he->mem_info->iaddr.sym;
845 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
846 width);
849 static int64_t
850 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
852 struct map *map_l = NULL;
853 struct map *map_r = NULL;
855 if (left->mem_info)
856 map_l = left->mem_info->daddr.map;
857 if (right->mem_info)
858 map_r = right->mem_info->daddr.map;
860 return _sort__dso_cmp(map_l, map_r);
863 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
864 size_t size, unsigned int width)
866 struct map *map = NULL;
868 if (he->mem_info)
869 map = he->mem_info->daddr.map;
871 return _hist_entry__dso_snprintf(map, bf, size, width);
874 static int64_t
875 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
877 union perf_mem_data_src data_src_l;
878 union perf_mem_data_src data_src_r;
880 if (left->mem_info)
881 data_src_l = left->mem_info->data_src;
882 else
883 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
885 if (right->mem_info)
886 data_src_r = right->mem_info->data_src;
887 else
888 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
890 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
893 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
894 size_t size, unsigned int width)
896 char out[10];
898 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
899 return repsep_snprintf(bf, size, "%.*s", width, out);
902 static int64_t
903 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
905 union perf_mem_data_src data_src_l;
906 union perf_mem_data_src data_src_r;
908 if (left->mem_info)
909 data_src_l = left->mem_info->data_src;
910 else
911 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
913 if (right->mem_info)
914 data_src_r = right->mem_info->data_src;
915 else
916 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
918 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
921 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
922 size_t size, unsigned int width)
924 char out[64];
926 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
927 return repsep_snprintf(bf, size, "%-*s", width, out);
930 static int64_t
931 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
933 union perf_mem_data_src data_src_l;
934 union perf_mem_data_src data_src_r;
936 if (left->mem_info)
937 data_src_l = left->mem_info->data_src;
938 else
939 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
941 if (right->mem_info)
942 data_src_r = right->mem_info->data_src;
943 else
944 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
946 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
949 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
950 size_t size, unsigned int width)
952 char out[64];
954 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
955 return repsep_snprintf(bf, size, "%-*s", width, out);
958 static int64_t
959 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
961 union perf_mem_data_src data_src_l;
962 union perf_mem_data_src data_src_r;
964 if (left->mem_info)
965 data_src_l = left->mem_info->data_src;
966 else
967 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
969 if (right->mem_info)
970 data_src_r = right->mem_info->data_src;
971 else
972 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
974 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
977 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
978 size_t size, unsigned int width)
980 char out[64];
982 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
983 return repsep_snprintf(bf, size, "%-*s", width, out);
986 static int64_t
987 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
989 u64 l, r;
990 struct map *l_map, *r_map;
992 if (!left->mem_info) return -1;
993 if (!right->mem_info) return 1;
995 /* group event types together */
996 if (left->cpumode > right->cpumode) return -1;
997 if (left->cpumode < right->cpumode) return 1;
999 l_map = left->mem_info->daddr.map;
1000 r_map = right->mem_info->daddr.map;
1002 /* if both are NULL, jump to sort on al_addr instead */
1003 if (!l_map && !r_map)
1004 goto addr;
1006 if (!l_map) return -1;
1007 if (!r_map) return 1;
1009 if (l_map->maj > r_map->maj) return -1;
1010 if (l_map->maj < r_map->maj) return 1;
1012 if (l_map->min > r_map->min) return -1;
1013 if (l_map->min < r_map->min) return 1;
1015 if (l_map->ino > r_map->ino) return -1;
1016 if (l_map->ino < r_map->ino) return 1;
1018 if (l_map->ino_generation > r_map->ino_generation) return -1;
1019 if (l_map->ino_generation < r_map->ino_generation) return 1;
1022 * Addresses with no major/minor numbers are assumed to be
1023 * anonymous in userspace. Sort those on pid then address.
1025 * The kernel and non-zero major/minor mapped areas are
1026 * assumed to be unity mapped. Sort those on address.
1029 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1030 (!(l_map->flags & MAP_SHARED)) &&
1031 !l_map->maj && !l_map->min && !l_map->ino &&
1032 !l_map->ino_generation) {
1033 /* userspace anonymous */
1035 if (left->thread->pid_ > right->thread->pid_) return -1;
1036 if (left->thread->pid_ < right->thread->pid_) return 1;
1039 addr:
1040 /* al_addr does all the right addr - start + offset calculations */
1041 l = cl_address(left->mem_info->daddr.al_addr);
1042 r = cl_address(right->mem_info->daddr.al_addr);
1044 if (l > r) return -1;
1045 if (l < r) return 1;
1047 return 0;
1050 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1051 size_t size, unsigned int width)
1054 uint64_t addr = 0;
1055 struct map *map = NULL;
1056 struct symbol *sym = NULL;
1057 char level = he->level;
1059 if (he->mem_info) {
1060 addr = cl_address(he->mem_info->daddr.al_addr);
1061 map = he->mem_info->daddr.map;
1062 sym = he->mem_info->daddr.sym;
1064 /* print [s] for shared data mmaps */
1065 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1066 map && (map->type == MAP__VARIABLE) &&
1067 (map->flags & MAP_SHARED) &&
1068 (map->maj || map->min || map->ino ||
1069 map->ino_generation))
1070 level = 's';
1071 else if (!map)
1072 level = 'X';
1074 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1075 width);
1078 struct sort_entry sort_mispredict = {
1079 .se_header = "Branch Mispredicted",
1080 .se_cmp = sort__mispredict_cmp,
1081 .se_snprintf = hist_entry__mispredict_snprintf,
1082 .se_width_idx = HISTC_MISPREDICT,
1085 static u64 he_weight(struct hist_entry *he)
1087 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1090 static int64_t
1091 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1093 return he_weight(left) - he_weight(right);
1096 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1097 size_t size, unsigned int width)
1099 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1102 struct sort_entry sort_local_weight = {
1103 .se_header = "Local Weight",
1104 .se_cmp = sort__local_weight_cmp,
1105 .se_snprintf = hist_entry__local_weight_snprintf,
1106 .se_width_idx = HISTC_LOCAL_WEIGHT,
1109 static int64_t
1110 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1112 return left->stat.weight - right->stat.weight;
1115 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1116 size_t size, unsigned int width)
1118 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1121 struct sort_entry sort_global_weight = {
1122 .se_header = "Weight",
1123 .se_cmp = sort__global_weight_cmp,
1124 .se_snprintf = hist_entry__global_weight_snprintf,
1125 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1128 struct sort_entry sort_mem_daddr_sym = {
1129 .se_header = "Data Symbol",
1130 .se_cmp = sort__daddr_cmp,
1131 .se_snprintf = hist_entry__daddr_snprintf,
1132 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1135 struct sort_entry sort_mem_iaddr_sym = {
1136 .se_header = "Code Symbol",
1137 .se_cmp = sort__iaddr_cmp,
1138 .se_snprintf = hist_entry__iaddr_snprintf,
1139 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1142 struct sort_entry sort_mem_daddr_dso = {
1143 .se_header = "Data Object",
1144 .se_cmp = sort__dso_daddr_cmp,
1145 .se_snprintf = hist_entry__dso_daddr_snprintf,
1146 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1149 struct sort_entry sort_mem_locked = {
1150 .se_header = "Locked",
1151 .se_cmp = sort__locked_cmp,
1152 .se_snprintf = hist_entry__locked_snprintf,
1153 .se_width_idx = HISTC_MEM_LOCKED,
1156 struct sort_entry sort_mem_tlb = {
1157 .se_header = "TLB access",
1158 .se_cmp = sort__tlb_cmp,
1159 .se_snprintf = hist_entry__tlb_snprintf,
1160 .se_width_idx = HISTC_MEM_TLB,
1163 struct sort_entry sort_mem_lvl = {
1164 .se_header = "Memory access",
1165 .se_cmp = sort__lvl_cmp,
1166 .se_snprintf = hist_entry__lvl_snprintf,
1167 .se_width_idx = HISTC_MEM_LVL,
1170 struct sort_entry sort_mem_snoop = {
1171 .se_header = "Snoop",
1172 .se_cmp = sort__snoop_cmp,
1173 .se_snprintf = hist_entry__snoop_snprintf,
1174 .se_width_idx = HISTC_MEM_SNOOP,
1177 struct sort_entry sort_mem_dcacheline = {
1178 .se_header = "Data Cacheline",
1179 .se_cmp = sort__dcacheline_cmp,
1180 .se_snprintf = hist_entry__dcacheline_snprintf,
1181 .se_width_idx = HISTC_MEM_DCACHELINE,
1184 static int64_t
1185 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1187 if (!left->branch_info || !right->branch_info)
1188 return cmp_null(left->branch_info, right->branch_info);
1190 return left->branch_info->flags.abort !=
1191 right->branch_info->flags.abort;
1194 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1195 size_t size, unsigned int width)
1197 static const char *out = "N/A";
1199 if (he->branch_info) {
1200 if (he->branch_info->flags.abort)
1201 out = "A";
1202 else
1203 out = ".";
1206 return repsep_snprintf(bf, size, "%-*s", width, out);
1209 struct sort_entry sort_abort = {
1210 .se_header = "Transaction abort",
1211 .se_cmp = sort__abort_cmp,
1212 .se_snprintf = hist_entry__abort_snprintf,
1213 .se_width_idx = HISTC_ABORT,
1216 static int64_t
1217 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1219 if (!left->branch_info || !right->branch_info)
1220 return cmp_null(left->branch_info, right->branch_info);
1222 return left->branch_info->flags.in_tx !=
1223 right->branch_info->flags.in_tx;
1226 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1227 size_t size, unsigned int width)
1229 static const char *out = "N/A";
1231 if (he->branch_info) {
1232 if (he->branch_info->flags.in_tx)
1233 out = "T";
1234 else
1235 out = ".";
1238 return repsep_snprintf(bf, size, "%-*s", width, out);
1241 struct sort_entry sort_in_tx = {
1242 .se_header = "Branch in transaction",
1243 .se_cmp = sort__in_tx_cmp,
1244 .se_snprintf = hist_entry__in_tx_snprintf,
1245 .se_width_idx = HISTC_IN_TX,
1248 static int64_t
1249 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1251 return left->transaction - right->transaction;
1254 static inline char *add_str(char *p, const char *str)
1256 strcpy(p, str);
1257 return p + strlen(str);
1260 static struct txbit {
1261 unsigned flag;
1262 const char *name;
1263 int skip_for_len;
1264 } txbits[] = {
1265 { PERF_TXN_ELISION, "EL ", 0 },
1266 { PERF_TXN_TRANSACTION, "TX ", 1 },
1267 { PERF_TXN_SYNC, "SYNC ", 1 },
1268 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1269 { PERF_TXN_RETRY, "RETRY ", 0 },
1270 { PERF_TXN_CONFLICT, "CON ", 0 },
1271 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1272 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1273 { 0, NULL, 0 }
1276 int hist_entry__transaction_len(void)
1278 int i;
1279 int len = 0;
1281 for (i = 0; txbits[i].name; i++) {
1282 if (!txbits[i].skip_for_len)
1283 len += strlen(txbits[i].name);
1285 len += 4; /* :XX<space> */
1286 return len;
1289 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1290 size_t size, unsigned int width)
1292 u64 t = he->transaction;
1293 char buf[128];
1294 char *p = buf;
1295 int i;
1297 buf[0] = 0;
1298 for (i = 0; txbits[i].name; i++)
1299 if (txbits[i].flag & t)
1300 p = add_str(p, txbits[i].name);
1301 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1302 p = add_str(p, "NEITHER ");
1303 if (t & PERF_TXN_ABORT_MASK) {
1304 sprintf(p, ":%" PRIx64,
1305 (t & PERF_TXN_ABORT_MASK) >>
1306 PERF_TXN_ABORT_SHIFT);
1307 p += strlen(p);
1310 return repsep_snprintf(bf, size, "%-*s", width, buf);
1313 struct sort_entry sort_transaction = {
1314 .se_header = "Transaction ",
1315 .se_cmp = sort__transaction_cmp,
1316 .se_snprintf = hist_entry__transaction_snprintf,
1317 .se_width_idx = HISTC_TRANSACTION,
1320 struct sort_dimension {
1321 const char *name;
1322 struct sort_entry *entry;
1323 int taken;
1326 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1328 static struct sort_dimension common_sort_dimensions[] = {
1329 DIM(SORT_PID, "pid", sort_thread),
1330 DIM(SORT_COMM, "comm", sort_comm),
1331 DIM(SORT_DSO, "dso", sort_dso),
1332 DIM(SORT_SYM, "symbol", sort_sym),
1333 DIM(SORT_PARENT, "parent", sort_parent),
1334 DIM(SORT_CPU, "cpu", sort_cpu),
1335 DIM(SORT_SOCKET, "socket", sort_socket),
1336 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1337 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1338 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1339 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1340 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1341 DIM(SORT_TRACE, "trace", sort_trace),
1344 #undef DIM
1346 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1348 static struct sort_dimension bstack_sort_dimensions[] = {
1349 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1350 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1351 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1352 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1353 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1354 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1355 DIM(SORT_ABORT, "abort", sort_abort),
1356 DIM(SORT_CYCLES, "cycles", sort_cycles),
1359 #undef DIM
1361 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1363 static struct sort_dimension memory_sort_dimensions[] = {
1364 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1365 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1366 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1367 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1368 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1369 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1370 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1371 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1374 #undef DIM
1376 struct hpp_dimension {
1377 const char *name;
1378 struct perf_hpp_fmt *fmt;
1379 int taken;
1382 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1384 static struct hpp_dimension hpp_sort_dimensions[] = {
1385 DIM(PERF_HPP__OVERHEAD, "overhead"),
1386 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1387 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1388 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1389 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1390 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1391 DIM(PERF_HPP__SAMPLES, "sample"),
1392 DIM(PERF_HPP__PERIOD, "period"),
1395 #undef DIM
1397 struct hpp_sort_entry {
1398 struct perf_hpp_fmt hpp;
1399 struct sort_entry *se;
1402 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1404 struct hpp_sort_entry *hse;
1406 if (!perf_hpp__is_sort_entry(fmt))
1407 return;
1409 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1410 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1413 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1414 struct perf_evsel *evsel)
1416 struct hpp_sort_entry *hse;
1417 size_t len = fmt->user_len;
1419 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1421 if (!len)
1422 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1424 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1427 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1428 struct perf_hpp *hpp __maybe_unused,
1429 struct perf_evsel *evsel)
1431 struct hpp_sort_entry *hse;
1432 size_t len = fmt->user_len;
1434 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1436 if (!len)
1437 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1439 return len;
1442 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1443 struct hist_entry *he)
1445 struct hpp_sort_entry *hse;
1446 size_t len = fmt->user_len;
1448 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1450 if (!len)
1451 len = hists__col_len(he->hists, hse->se->se_width_idx);
1453 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1456 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1457 struct hist_entry *a, struct hist_entry *b)
1459 struct hpp_sort_entry *hse;
1461 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1462 return hse->se->se_cmp(a, b);
1465 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1466 struct hist_entry *a, struct hist_entry *b)
1468 struct hpp_sort_entry *hse;
1469 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1471 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1472 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1473 return collapse_fn(a, b);
1476 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1477 struct hist_entry *a, struct hist_entry *b)
1479 struct hpp_sort_entry *hse;
1480 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1482 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1483 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1484 return sort_fn(a, b);
1487 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1489 return format->header == __sort__hpp_header;
1492 #define MK_SORT_ENTRY_CHK(key) \
1493 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
1495 struct hpp_sort_entry *hse; \
1497 if (!perf_hpp__is_sort_entry(fmt)) \
1498 return false; \
1500 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
1501 return hse->se == &sort_ ## key ; \
1504 MK_SORT_ENTRY_CHK(trace)
1505 MK_SORT_ENTRY_CHK(srcline)
1506 MK_SORT_ENTRY_CHK(srcfile)
1507 MK_SORT_ENTRY_CHK(thread)
1508 MK_SORT_ENTRY_CHK(comm)
1509 MK_SORT_ENTRY_CHK(dso)
1510 MK_SORT_ENTRY_CHK(sym)
1513 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1515 struct hpp_sort_entry *hse_a;
1516 struct hpp_sort_entry *hse_b;
1518 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1519 return false;
1521 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1522 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1524 return hse_a->se == hse_b->se;
1527 static void hse_free(struct perf_hpp_fmt *fmt)
1529 struct hpp_sort_entry *hse;
1531 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1532 free(hse);
1535 static struct hpp_sort_entry *
1536 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
1538 struct hpp_sort_entry *hse;
1540 hse = malloc(sizeof(*hse));
1541 if (hse == NULL) {
1542 pr_err("Memory allocation failed\n");
1543 return NULL;
1546 hse->se = sd->entry;
1547 hse->hpp.name = sd->entry->se_header;
1548 hse->hpp.header = __sort__hpp_header;
1549 hse->hpp.width = __sort__hpp_width;
1550 hse->hpp.entry = __sort__hpp_entry;
1551 hse->hpp.color = NULL;
1553 hse->hpp.cmp = __sort__hpp_cmp;
1554 hse->hpp.collapse = __sort__hpp_collapse;
1555 hse->hpp.sort = __sort__hpp_sort;
1556 hse->hpp.equal = __sort__hpp_equal;
1557 hse->hpp.free = hse_free;
1559 INIT_LIST_HEAD(&hse->hpp.list);
1560 INIT_LIST_HEAD(&hse->hpp.sort_list);
1561 hse->hpp.elide = false;
1562 hse->hpp.len = 0;
1563 hse->hpp.user_len = 0;
1564 hse->hpp.level = level;
1566 return hse;
1569 static void hpp_free(struct perf_hpp_fmt *fmt)
1571 free(fmt);
1574 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
1575 int level)
1577 struct perf_hpp_fmt *fmt;
1579 fmt = memdup(hd->fmt, sizeof(*fmt));
1580 if (fmt) {
1581 INIT_LIST_HEAD(&fmt->list);
1582 INIT_LIST_HEAD(&fmt->sort_list);
1583 fmt->free = hpp_free;
1584 fmt->level = level;
1587 return fmt;
1590 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
1592 struct perf_hpp_fmt *fmt;
1593 struct hpp_sort_entry *hse;
1594 int ret = -1;
1595 int r;
1597 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1598 if (!perf_hpp__is_sort_entry(fmt))
1599 continue;
1601 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1602 if (hse->se->se_filter == NULL)
1603 continue;
1606 * hist entry is filtered if any of sort key in the hpp list
1607 * is applied. But it should skip non-matched filter types.
1609 r = hse->se->se_filter(he, type, arg);
1610 if (r >= 0) {
1611 if (ret < 0)
1612 ret = 0;
1613 ret |= r;
1617 return ret;
1620 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
1621 struct perf_hpp_list *list,
1622 int level)
1624 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
1626 if (hse == NULL)
1627 return -1;
1629 perf_hpp_list__register_sort_field(list, &hse->hpp);
1630 return 0;
1633 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
1634 struct perf_hpp_list *list)
1636 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
1638 if (hse == NULL)
1639 return -1;
1641 perf_hpp_list__column_register(list, &hse->hpp);
1642 return 0;
1645 struct hpp_dynamic_entry {
1646 struct perf_hpp_fmt hpp;
1647 struct perf_evsel *evsel;
1648 struct format_field *field;
1649 unsigned dynamic_len;
1650 bool raw_trace;
1653 static int hde_width(struct hpp_dynamic_entry *hde)
1655 if (!hde->hpp.len) {
1656 int len = hde->dynamic_len;
1657 int namelen = strlen(hde->field->name);
1658 int fieldlen = hde->field->size;
1660 if (namelen > len)
1661 len = namelen;
1663 if (!(hde->field->flags & FIELD_IS_STRING)) {
1664 /* length for print hex numbers */
1665 fieldlen = hde->field->size * 2 + 2;
1667 if (fieldlen > len)
1668 len = fieldlen;
1670 hde->hpp.len = len;
1672 return hde->hpp.len;
1675 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1676 struct hist_entry *he)
1678 char *str, *pos;
1679 struct format_field *field = hde->field;
1680 size_t namelen;
1681 bool last = false;
1683 if (hde->raw_trace)
1684 return;
1686 /* parse pretty print result and update max length */
1687 if (!he->trace_output)
1688 he->trace_output = get_trace_output(he);
1690 namelen = strlen(field->name);
1691 str = he->trace_output;
1693 while (str) {
1694 pos = strchr(str, ' ');
1695 if (pos == NULL) {
1696 last = true;
1697 pos = str + strlen(str);
1700 if (!strncmp(str, field->name, namelen)) {
1701 size_t len;
1703 str += namelen + 1;
1704 len = pos - str;
1706 if (len > hde->dynamic_len)
1707 hde->dynamic_len = len;
1708 break;
1711 if (last)
1712 str = NULL;
1713 else
1714 str = pos + 1;
1718 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1719 struct perf_evsel *evsel __maybe_unused)
1721 struct hpp_dynamic_entry *hde;
1722 size_t len = fmt->user_len;
1724 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1726 if (!len)
1727 len = hde_width(hde);
1729 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1732 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1733 struct perf_hpp *hpp __maybe_unused,
1734 struct perf_evsel *evsel __maybe_unused)
1736 struct hpp_dynamic_entry *hde;
1737 size_t len = fmt->user_len;
1739 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1741 if (!len)
1742 len = hde_width(hde);
1744 return len;
1747 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1749 struct hpp_dynamic_entry *hde;
1751 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1753 return hists_to_evsel(hists) == hde->evsel;
1756 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1757 struct hist_entry *he)
1759 struct hpp_dynamic_entry *hde;
1760 size_t len = fmt->user_len;
1761 char *str, *pos;
1762 struct format_field *field;
1763 size_t namelen;
1764 bool last = false;
1765 int ret;
1767 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1769 if (!len)
1770 len = hde_width(hde);
1772 if (hde->raw_trace)
1773 goto raw_field;
1775 if (!he->trace_output)
1776 he->trace_output = get_trace_output(he);
1778 field = hde->field;
1779 namelen = strlen(field->name);
1780 str = he->trace_output;
1782 while (str) {
1783 pos = strchr(str, ' ');
1784 if (pos == NULL) {
1785 last = true;
1786 pos = str + strlen(str);
1789 if (!strncmp(str, field->name, namelen)) {
1790 str += namelen + 1;
1791 str = strndup(str, pos - str);
1793 if (str == NULL)
1794 return scnprintf(hpp->buf, hpp->size,
1795 "%*.*s", len, len, "ERROR");
1796 break;
1799 if (last)
1800 str = NULL;
1801 else
1802 str = pos + 1;
1805 if (str == NULL) {
1806 struct trace_seq seq;
1807 raw_field:
1808 trace_seq_init(&seq);
1809 pevent_print_field(&seq, he->raw_data, hde->field);
1810 str = seq.buffer;
1813 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1814 free(str);
1815 return ret;
1818 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1819 struct hist_entry *a, struct hist_entry *b)
1821 struct hpp_dynamic_entry *hde;
1822 struct format_field *field;
1823 unsigned offset, size;
1825 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1827 if (b == NULL) {
1828 update_dynamic_len(hde, a);
1829 return 0;
1832 field = hde->field;
1833 if (field->flags & FIELD_IS_DYNAMIC) {
1834 unsigned long long dyn;
1836 pevent_read_number_field(field, a->raw_data, &dyn);
1837 offset = dyn & 0xffff;
1838 size = (dyn >> 16) & 0xffff;
1840 /* record max width for output */
1841 if (size > hde->dynamic_len)
1842 hde->dynamic_len = size;
1843 } else {
1844 offset = field->offset;
1845 size = field->size;
1848 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1851 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1853 return fmt->cmp == __sort__hde_cmp;
1856 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1858 struct hpp_dynamic_entry *hde_a;
1859 struct hpp_dynamic_entry *hde_b;
1861 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
1862 return false;
1864 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
1865 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
1867 return hde_a->field == hde_b->field;
1870 static void hde_free(struct perf_hpp_fmt *fmt)
1872 struct hpp_dynamic_entry *hde;
1874 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1875 free(hde);
1878 static struct hpp_dynamic_entry *
1879 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
1880 int level)
1882 struct hpp_dynamic_entry *hde;
1884 hde = malloc(sizeof(*hde));
1885 if (hde == NULL) {
1886 pr_debug("Memory allocation failed\n");
1887 return NULL;
1890 hde->evsel = evsel;
1891 hde->field = field;
1892 hde->dynamic_len = 0;
1894 hde->hpp.name = field->name;
1895 hde->hpp.header = __sort__hde_header;
1896 hde->hpp.width = __sort__hde_width;
1897 hde->hpp.entry = __sort__hde_entry;
1898 hde->hpp.color = NULL;
1900 hde->hpp.cmp = __sort__hde_cmp;
1901 hde->hpp.collapse = __sort__hde_cmp;
1902 hde->hpp.sort = __sort__hde_cmp;
1903 hde->hpp.equal = __sort__hde_equal;
1904 hde->hpp.free = hde_free;
1906 INIT_LIST_HEAD(&hde->hpp.list);
1907 INIT_LIST_HEAD(&hde->hpp.sort_list);
1908 hde->hpp.elide = false;
1909 hde->hpp.len = 0;
1910 hde->hpp.user_len = 0;
1911 hde->hpp.level = level;
1913 return hde;
1916 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
1918 struct perf_hpp_fmt *new_fmt = NULL;
1920 if (perf_hpp__is_sort_entry(fmt)) {
1921 struct hpp_sort_entry *hse, *new_hse;
1923 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1924 new_hse = memdup(hse, sizeof(*hse));
1925 if (new_hse)
1926 new_fmt = &new_hse->hpp;
1927 } else if (perf_hpp__is_dynamic_entry(fmt)) {
1928 struct hpp_dynamic_entry *hde, *new_hde;
1930 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1931 new_hde = memdup(hde, sizeof(*hde));
1932 if (new_hde)
1933 new_fmt = &new_hde->hpp;
1934 } else {
1935 new_fmt = memdup(fmt, sizeof(*fmt));
1938 INIT_LIST_HEAD(&new_fmt->list);
1939 INIT_LIST_HEAD(&new_fmt->sort_list);
1941 return new_fmt;
1944 static int parse_field_name(char *str, char **event, char **field, char **opt)
1946 char *event_name, *field_name, *opt_name;
1948 event_name = str;
1949 field_name = strchr(str, '.');
1951 if (field_name) {
1952 *field_name++ = '\0';
1953 } else {
1954 event_name = NULL;
1955 field_name = str;
1958 opt_name = strchr(field_name, '/');
1959 if (opt_name)
1960 *opt_name++ = '\0';
1962 *event = event_name;
1963 *field = field_name;
1964 *opt = opt_name;
1966 return 0;
1969 /* find match evsel using a given event name. The event name can be:
1970 * 1. '%' + event index (e.g. '%1' for first event)
1971 * 2. full event name (e.g. sched:sched_switch)
1972 * 3. partial event name (should not contain ':')
1974 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1976 struct perf_evsel *evsel = NULL;
1977 struct perf_evsel *pos;
1978 bool full_name;
1980 /* case 1 */
1981 if (event_name[0] == '%') {
1982 int nr = strtol(event_name+1, NULL, 0);
1984 if (nr > evlist->nr_entries)
1985 return NULL;
1987 evsel = perf_evlist__first(evlist);
1988 while (--nr > 0)
1989 evsel = perf_evsel__next(evsel);
1991 return evsel;
1994 full_name = !!strchr(event_name, ':');
1995 evlist__for_each(evlist, pos) {
1996 /* case 2 */
1997 if (full_name && !strcmp(pos->name, event_name))
1998 return pos;
1999 /* case 3 */
2000 if (!full_name && strstr(pos->name, event_name)) {
2001 if (evsel) {
2002 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2003 event_name, evsel->name, pos->name);
2004 return NULL;
2006 evsel = pos;
2010 return evsel;
2013 static int __dynamic_dimension__add(struct perf_evsel *evsel,
2014 struct format_field *field,
2015 bool raw_trace, int level)
2017 struct hpp_dynamic_entry *hde;
2019 hde = __alloc_dynamic_entry(evsel, field, level);
2020 if (hde == NULL)
2021 return -ENOMEM;
2023 hde->raw_trace = raw_trace;
2025 perf_hpp__register_sort_field(&hde->hpp);
2026 return 0;
2029 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
2031 int ret;
2032 struct format_field *field;
2034 field = evsel->tp_format->format.fields;
2035 while (field) {
2036 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2037 if (ret < 0)
2038 return ret;
2040 field = field->next;
2042 return 0;
2045 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
2046 int level)
2048 int ret;
2049 struct perf_evsel *evsel;
2051 evlist__for_each(evlist, evsel) {
2052 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2053 continue;
2055 ret = add_evsel_fields(evsel, raw_trace, level);
2056 if (ret < 0)
2057 return ret;
2059 return 0;
2062 static int add_all_matching_fields(struct perf_evlist *evlist,
2063 char *field_name, bool raw_trace, int level)
2065 int ret = -ESRCH;
2066 struct perf_evsel *evsel;
2067 struct format_field *field;
2069 evlist__for_each(evlist, evsel) {
2070 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2071 continue;
2073 field = pevent_find_any_field(evsel->tp_format, field_name);
2074 if (field == NULL)
2075 continue;
2077 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2078 if (ret < 0)
2079 break;
2081 return ret;
2084 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
2085 int level)
2087 char *str, *event_name, *field_name, *opt_name;
2088 struct perf_evsel *evsel;
2089 struct format_field *field;
2090 bool raw_trace = symbol_conf.raw_trace;
2091 int ret = 0;
2093 if (evlist == NULL)
2094 return -ENOENT;
2096 str = strdup(tok);
2097 if (str == NULL)
2098 return -ENOMEM;
2100 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2101 ret = -EINVAL;
2102 goto out;
2105 if (opt_name) {
2106 if (strcmp(opt_name, "raw")) {
2107 pr_debug("unsupported field option %s\n", opt_name);
2108 ret = -EINVAL;
2109 goto out;
2111 raw_trace = true;
2114 if (!strcmp(field_name, "trace_fields")) {
2115 ret = add_all_dynamic_fields(evlist, raw_trace, level);
2116 goto out;
2119 if (event_name == NULL) {
2120 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2121 goto out;
2124 evsel = find_evsel(evlist, event_name);
2125 if (evsel == NULL) {
2126 pr_debug("Cannot find event: %s\n", event_name);
2127 ret = -ENOENT;
2128 goto out;
2131 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2132 pr_debug("%s is not a tracepoint event\n", event_name);
2133 ret = -EINVAL;
2134 goto out;
2137 if (!strcmp(field_name, "*")) {
2138 ret = add_evsel_fields(evsel, raw_trace, level);
2139 } else {
2140 field = pevent_find_any_field(evsel->tp_format, field_name);
2141 if (field == NULL) {
2142 pr_debug("Cannot find event field for %s.%s\n",
2143 event_name, field_name);
2144 return -ENOENT;
2147 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2150 out:
2151 free(str);
2152 return ret;
2155 static int __sort_dimension__add(struct sort_dimension *sd,
2156 struct perf_hpp_list *list,
2157 int level)
2159 if (sd->taken)
2160 return 0;
2162 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2163 return -1;
2165 if (sd->entry->se_collapse)
2166 sort__need_collapse = 1;
2168 sd->taken = 1;
2170 return 0;
2173 static int __hpp_dimension__add(struct hpp_dimension *hd,
2174 struct perf_hpp_list *list,
2175 int level)
2177 struct perf_hpp_fmt *fmt;
2179 if (hd->taken)
2180 return 0;
2182 fmt = __hpp_dimension__alloc_hpp(hd, level);
2183 if (!fmt)
2184 return -1;
2186 hd->taken = 1;
2187 perf_hpp_list__register_sort_field(list, fmt);
2188 return 0;
2191 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2192 struct sort_dimension *sd)
2194 if (sd->taken)
2195 return 0;
2197 if (__sort_dimension__add_hpp_output(sd, list) < 0)
2198 return -1;
2200 sd->taken = 1;
2201 return 0;
2204 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2205 struct hpp_dimension *hd)
2207 struct perf_hpp_fmt *fmt;
2209 if (hd->taken)
2210 return 0;
2212 fmt = __hpp_dimension__alloc_hpp(hd, 0);
2213 if (!fmt)
2214 return -1;
2216 hd->taken = 1;
2217 perf_hpp_list__column_register(list, fmt);
2218 return 0;
2221 int hpp_dimension__add_output(unsigned col)
2223 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2224 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2227 static int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2228 struct perf_evlist *evlist,
2229 int level)
2231 unsigned int i;
2233 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2234 struct sort_dimension *sd = &common_sort_dimensions[i];
2236 if (strncasecmp(tok, sd->name, strlen(tok)))
2237 continue;
2239 if (sd->entry == &sort_parent) {
2240 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2241 if (ret) {
2242 char err[BUFSIZ];
2244 regerror(ret, &parent_regex, err, sizeof(err));
2245 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2246 return -EINVAL;
2248 sort__has_parent = 1;
2249 } else if (sd->entry == &sort_sym) {
2250 sort__has_sym = 1;
2252 * perf diff displays the performance difference amongst
2253 * two or more perf.data files. Those files could come
2254 * from different binaries. So we should not compare
2255 * their ips, but the name of symbol.
2257 if (sort__mode == SORT_MODE__DIFF)
2258 sd->entry->se_collapse = sort__sym_sort;
2260 } else if (sd->entry == &sort_dso) {
2261 sort__has_dso = 1;
2262 } else if (sd->entry == &sort_socket) {
2263 sort__has_socket = 1;
2264 } else if (sd->entry == &sort_thread) {
2265 sort__has_thread = 1;
2266 } else if (sd->entry == &sort_comm) {
2267 sort__has_comm = 1;
2270 return __sort_dimension__add(sd, list, level);
2273 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2274 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2276 if (strncasecmp(tok, hd->name, strlen(tok)))
2277 continue;
2279 return __hpp_dimension__add(hd, list, level);
2282 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2283 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2285 if (strncasecmp(tok, sd->name, strlen(tok)))
2286 continue;
2288 if (sort__mode != SORT_MODE__BRANCH)
2289 return -EINVAL;
2291 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2292 sort__has_sym = 1;
2294 __sort_dimension__add(sd, list, level);
2295 return 0;
2298 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2299 struct sort_dimension *sd = &memory_sort_dimensions[i];
2301 if (strncasecmp(tok, sd->name, strlen(tok)))
2302 continue;
2304 if (sort__mode != SORT_MODE__MEMORY)
2305 return -EINVAL;
2307 if (sd->entry == &sort_mem_daddr_sym)
2308 sort__has_sym = 1;
2310 __sort_dimension__add(sd, list, level);
2311 return 0;
2314 if (!add_dynamic_entry(evlist, tok, level))
2315 return 0;
2317 return -ESRCH;
2320 static int setup_sort_list(struct perf_hpp_list *list, char *str,
2321 struct perf_evlist *evlist)
2323 char *tmp, *tok;
2324 int ret = 0;
2325 int level = 0;
2326 int next_level = 1;
2327 bool in_group = false;
2329 do {
2330 tok = str;
2331 tmp = strpbrk(str, "{}, ");
2332 if (tmp) {
2333 if (in_group)
2334 next_level = level;
2335 else
2336 next_level = level + 1;
2338 if (*tmp == '{')
2339 in_group = true;
2340 else if (*tmp == '}')
2341 in_group = false;
2343 *tmp = '\0';
2344 str = tmp + 1;
2347 if (*tok) {
2348 ret = sort_dimension__add(list, tok, evlist, level);
2349 if (ret == -EINVAL) {
2350 error("Invalid --sort key: `%s'", tok);
2351 break;
2352 } else if (ret == -ESRCH) {
2353 error("Unknown --sort key: `%s'", tok);
2354 break;
2358 level = next_level;
2359 } while (tmp);
2361 return ret;
2364 static const char *get_default_sort_order(struct perf_evlist *evlist)
2366 const char *default_sort_orders[] = {
2367 default_sort_order,
2368 default_branch_sort_order,
2369 default_mem_sort_order,
2370 default_top_sort_order,
2371 default_diff_sort_order,
2372 default_tracepoint_sort_order,
2374 bool use_trace = true;
2375 struct perf_evsel *evsel;
2377 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2379 if (evlist == NULL)
2380 goto out_no_evlist;
2382 evlist__for_each(evlist, evsel) {
2383 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2384 use_trace = false;
2385 break;
2389 if (use_trace) {
2390 sort__mode = SORT_MODE__TRACEPOINT;
2391 if (symbol_conf.raw_trace)
2392 return "trace_fields";
2394 out_no_evlist:
2395 return default_sort_orders[sort__mode];
2398 static int setup_sort_order(struct perf_evlist *evlist)
2400 char *new_sort_order;
2403 * Append '+'-prefixed sort order to the default sort
2404 * order string.
2406 if (!sort_order || is_strict_order(sort_order))
2407 return 0;
2409 if (sort_order[1] == '\0') {
2410 error("Invalid --sort key: `+'");
2411 return -EINVAL;
2415 * We allocate new sort_order string, but we never free it,
2416 * because it's checked over the rest of the code.
2418 if (asprintf(&new_sort_order, "%s,%s",
2419 get_default_sort_order(evlist), sort_order + 1) < 0) {
2420 error("Not enough memory to set up --sort");
2421 return -ENOMEM;
2424 sort_order = new_sort_order;
2425 return 0;
2429 * Adds 'pre,' prefix into 'str' is 'pre' is
2430 * not already part of 'str'.
2432 static char *prefix_if_not_in(const char *pre, char *str)
2434 char *n;
2436 if (!str || strstr(str, pre))
2437 return str;
2439 if (asprintf(&n, "%s,%s", pre, str) < 0)
2440 return NULL;
2442 free(str);
2443 return n;
2446 static char *setup_overhead(char *keys)
2448 keys = prefix_if_not_in("overhead", keys);
2450 if (symbol_conf.cumulate_callchain)
2451 keys = prefix_if_not_in("overhead_children", keys);
2453 return keys;
2456 static int __setup_sorting(struct perf_evlist *evlist)
2458 char *str;
2459 const char *sort_keys;
2460 int ret = 0;
2462 ret = setup_sort_order(evlist);
2463 if (ret)
2464 return ret;
2466 sort_keys = sort_order;
2467 if (sort_keys == NULL) {
2468 if (is_strict_order(field_order)) {
2470 * If user specified field order but no sort order,
2471 * we'll honor it and not add default sort orders.
2473 return 0;
2476 sort_keys = get_default_sort_order(evlist);
2479 str = strdup(sort_keys);
2480 if (str == NULL) {
2481 error("Not enough memory to setup sort keys");
2482 return -ENOMEM;
2486 * Prepend overhead fields for backward compatibility.
2488 if (!is_strict_order(field_order)) {
2489 str = setup_overhead(str);
2490 if (str == NULL) {
2491 error("Not enough memory to setup overhead keys");
2492 return -ENOMEM;
2496 ret = setup_sort_list(&perf_hpp_list, str, evlist);
2498 free(str);
2499 return ret;
2502 void perf_hpp__set_elide(int idx, bool elide)
2504 struct perf_hpp_fmt *fmt;
2505 struct hpp_sort_entry *hse;
2507 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2508 if (!perf_hpp__is_sort_entry(fmt))
2509 continue;
2511 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2512 if (hse->se->se_width_idx == idx) {
2513 fmt->elide = elide;
2514 break;
2519 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2521 if (list && strlist__nr_entries(list) == 1) {
2522 if (fp != NULL)
2523 fprintf(fp, "# %s: %s\n", list_name,
2524 strlist__entry(list, 0)->s);
2525 return true;
2527 return false;
2530 static bool get_elide(int idx, FILE *output)
2532 switch (idx) {
2533 case HISTC_SYMBOL:
2534 return __get_elide(symbol_conf.sym_list, "symbol", output);
2535 case HISTC_DSO:
2536 return __get_elide(symbol_conf.dso_list, "dso", output);
2537 case HISTC_COMM:
2538 return __get_elide(symbol_conf.comm_list, "comm", output);
2539 default:
2540 break;
2543 if (sort__mode != SORT_MODE__BRANCH)
2544 return false;
2546 switch (idx) {
2547 case HISTC_SYMBOL_FROM:
2548 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2549 case HISTC_SYMBOL_TO:
2550 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2551 case HISTC_DSO_FROM:
2552 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2553 case HISTC_DSO_TO:
2554 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2555 default:
2556 break;
2559 return false;
2562 void sort__setup_elide(FILE *output)
2564 struct perf_hpp_fmt *fmt;
2565 struct hpp_sort_entry *hse;
2567 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2568 if (!perf_hpp__is_sort_entry(fmt))
2569 continue;
2571 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2572 fmt->elide = get_elide(hse->se->se_width_idx, output);
2576 * It makes no sense to elide all of sort entries.
2577 * Just revert them to show up again.
2579 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2580 if (!perf_hpp__is_sort_entry(fmt))
2581 continue;
2583 if (!fmt->elide)
2584 return;
2587 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2588 if (!perf_hpp__is_sort_entry(fmt))
2589 continue;
2591 fmt->elide = false;
2595 static int output_field_add(struct perf_hpp_list *list, char *tok)
2597 unsigned int i;
2599 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2600 struct sort_dimension *sd = &common_sort_dimensions[i];
2602 if (strncasecmp(tok, sd->name, strlen(tok)))
2603 continue;
2605 return __sort_dimension__add_output(list, sd);
2608 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2609 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2611 if (strncasecmp(tok, hd->name, strlen(tok)))
2612 continue;
2614 return __hpp_dimension__add_output(list, hd);
2617 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2618 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2620 if (strncasecmp(tok, sd->name, strlen(tok)))
2621 continue;
2623 return __sort_dimension__add_output(list, sd);
2626 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2627 struct sort_dimension *sd = &memory_sort_dimensions[i];
2629 if (strncasecmp(tok, sd->name, strlen(tok)))
2630 continue;
2632 return __sort_dimension__add_output(list, sd);
2635 return -ESRCH;
2638 static int setup_output_list(struct perf_hpp_list *list, char *str)
2640 char *tmp, *tok;
2641 int ret = 0;
2643 for (tok = strtok_r(str, ", ", &tmp);
2644 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2645 ret = output_field_add(list, tok);
2646 if (ret == -EINVAL) {
2647 error("Invalid --fields key: `%s'", tok);
2648 break;
2649 } else if (ret == -ESRCH) {
2650 error("Unknown --fields key: `%s'", tok);
2651 break;
2655 return ret;
2658 static void reset_dimensions(void)
2660 unsigned int i;
2662 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2663 common_sort_dimensions[i].taken = 0;
2665 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2666 hpp_sort_dimensions[i].taken = 0;
2668 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2669 bstack_sort_dimensions[i].taken = 0;
2671 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2672 memory_sort_dimensions[i].taken = 0;
2675 bool is_strict_order(const char *order)
2677 return order && (*order != '+');
2680 static int __setup_output_field(void)
2682 char *str, *strp;
2683 int ret = -EINVAL;
2685 if (field_order == NULL)
2686 return 0;
2688 strp = str = strdup(field_order);
2689 if (str == NULL) {
2690 error("Not enough memory to setup output fields");
2691 return -ENOMEM;
2694 if (!is_strict_order(field_order))
2695 strp++;
2697 if (!strlen(strp)) {
2698 error("Invalid --fields key: `+'");
2699 goto out;
2702 ret = setup_output_list(&perf_hpp_list, strp);
2704 out:
2705 free(str);
2706 return ret;
2709 int setup_sorting(struct perf_evlist *evlist)
2711 int err;
2713 err = __setup_sorting(evlist);
2714 if (err < 0)
2715 return err;
2717 if (parent_pattern != default_parent_pattern) {
2718 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
2719 if (err < 0)
2720 return err;
2723 reset_dimensions();
2726 * perf diff doesn't use default hpp output fields.
2728 if (sort__mode != SORT_MODE__DIFF)
2729 perf_hpp__init();
2731 err = __setup_output_field();
2732 if (err < 0)
2733 return err;
2735 /* copy sort keys to output fields */
2736 perf_hpp__setup_output_field(&perf_hpp_list);
2737 /* and then copy output fields to sort keys */
2738 perf_hpp__append_sort_keys(&perf_hpp_list);
2740 /* setup hists-specific output fields */
2741 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
2742 return -1;
2744 return 0;
2747 void reset_output_field(void)
2749 sort__need_collapse = 0;
2750 sort__has_parent = 0;
2751 sort__has_sym = 0;
2752 sort__has_dso = 0;
2754 field_order = NULL;
2755 sort_order = NULL;
2757 reset_dimensions();
2758 perf_hpp__reset_output_field(&perf_hpp_list);