8 #include <traceevent/event-parse.h>
9 #include "mem-events.h"
12 const char default_parent_pattern
[] = "^sys_|^do_page_fault";
13 const char *parent_pattern
= default_parent_pattern
;
14 const char default_sort_order
[] = "comm,dso,symbol";
15 const char default_branch_sort_order
[] = "comm,dso_from,symbol_from,symbol_to,cycles";
16 const char default_mem_sort_order
[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
17 const char default_top_sort_order
[] = "dso,symbol";
18 const char default_diff_sort_order
[] = "dso,symbol";
19 const char default_tracepoint_sort_order
[] = "trace";
20 const char *sort_order
;
21 const char *field_order
;
22 regex_t ignore_callees_regex
;
23 int have_ignore_callees
= 0;
24 int sort__need_collapse
= 0;
25 int sort__has_parent
= 0;
26 int sort__has_sym
= 0;
27 int sort__has_dso
= 0;
28 int sort__has_socket
= 0;
29 int sort__has_thread
= 0;
30 enum sort_mode sort__mode
= SORT_MODE__NORMAL
;
33 * Replaces all occurrences of a char used with the:
35 * -t, --field-separator
37 * option, that uses a special separator character and don't pad with spaces,
38 * replacing all occurances of this separator in symbol names (and other
39 * output) with a '.' character, that thus it's the only non valid separator.
41 static int repsep_snprintf(char *bf
, size_t size
, const char *fmt
, ...)
47 n
= vsnprintf(bf
, size
, fmt
, ap
);
48 if (symbol_conf
.field_sep
&& n
> 0) {
52 sep
= strchr(sep
, *symbol_conf
.field_sep
);
65 static int64_t cmp_null(const void *l
, const void *r
)
78 sort__thread_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
80 return right
->thread
->tid
- left
->thread
->tid
;
83 static int hist_entry__thread_snprintf(struct hist_entry
*he
, char *bf
,
84 size_t size
, unsigned int width
)
86 const char *comm
= thread__comm_str(he
->thread
);
88 width
= max(7U, width
) - 6;
89 return repsep_snprintf(bf
, size
, "%5d:%-*.*s", he
->thread
->tid
,
90 width
, width
, comm
?: "");
93 static int hist_entry__thread_filter(struct hist_entry
*he
, int type
, const void *arg
)
95 const struct thread
*th
= arg
;
97 if (type
!= HIST_FILTER__THREAD
)
100 return th
&& he
->thread
!= th
;
103 struct sort_entry sort_thread
= {
104 .se_header
= " Pid:Command",
105 .se_cmp
= sort__thread_cmp
,
106 .se_snprintf
= hist_entry__thread_snprintf
,
107 .se_filter
= hist_entry__thread_filter
,
108 .se_width_idx
= HISTC_THREAD
,
114 sort__comm_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
116 /* Compare the addr that should be unique among comm */
117 return strcmp(comm__str(right
->comm
), comm__str(left
->comm
));
121 sort__comm_collapse(struct hist_entry
*left
, struct hist_entry
*right
)
123 /* Compare the addr that should be unique among comm */
124 return strcmp(comm__str(right
->comm
), comm__str(left
->comm
));
128 sort__comm_sort(struct hist_entry
*left
, struct hist_entry
*right
)
130 return strcmp(comm__str(right
->comm
), comm__str(left
->comm
));
133 static int hist_entry__comm_snprintf(struct hist_entry
*he
, char *bf
,
134 size_t size
, unsigned int width
)
136 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, comm__str(he
->comm
));
139 struct sort_entry sort_comm
= {
140 .se_header
= "Command",
141 .se_cmp
= sort__comm_cmp
,
142 .se_collapse
= sort__comm_collapse
,
143 .se_sort
= sort__comm_sort
,
144 .se_snprintf
= hist_entry__comm_snprintf
,
145 .se_filter
= hist_entry__thread_filter
,
146 .se_width_idx
= HISTC_COMM
,
151 static int64_t _sort__dso_cmp(struct map
*map_l
, struct map
*map_r
)
153 struct dso
*dso_l
= map_l
? map_l
->dso
: NULL
;
154 struct dso
*dso_r
= map_r
? map_r
->dso
: NULL
;
155 const char *dso_name_l
, *dso_name_r
;
157 if (!dso_l
|| !dso_r
)
158 return cmp_null(dso_r
, dso_l
);
161 dso_name_l
= dso_l
->long_name
;
162 dso_name_r
= dso_r
->long_name
;
164 dso_name_l
= dso_l
->short_name
;
165 dso_name_r
= dso_r
->short_name
;
168 return strcmp(dso_name_l
, dso_name_r
);
172 sort__dso_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
174 return _sort__dso_cmp(right
->ms
.map
, left
->ms
.map
);
177 static int _hist_entry__dso_snprintf(struct map
*map
, char *bf
,
178 size_t size
, unsigned int width
)
180 if (map
&& map
->dso
) {
181 const char *dso_name
= !verbose
? map
->dso
->short_name
:
183 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, dso_name
);
186 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "[unknown]");
189 static int hist_entry__dso_snprintf(struct hist_entry
*he
, char *bf
,
190 size_t size
, unsigned int width
)
192 return _hist_entry__dso_snprintf(he
->ms
.map
, bf
, size
, width
);
195 static int hist_entry__dso_filter(struct hist_entry
*he
, int type
, const void *arg
)
197 const struct dso
*dso
= arg
;
199 if (type
!= HIST_FILTER__DSO
)
202 return dso
&& (!he
->ms
.map
|| he
->ms
.map
->dso
!= dso
);
205 struct sort_entry sort_dso
= {
206 .se_header
= "Shared Object",
207 .se_cmp
= sort__dso_cmp
,
208 .se_snprintf
= hist_entry__dso_snprintf
,
209 .se_filter
= hist_entry__dso_filter
,
210 .se_width_idx
= HISTC_DSO
,
215 static int64_t _sort__addr_cmp(u64 left_ip
, u64 right_ip
)
217 return (int64_t)(right_ip
- left_ip
);
220 static int64_t _sort__sym_cmp(struct symbol
*sym_l
, struct symbol
*sym_r
)
222 if (!sym_l
|| !sym_r
)
223 return cmp_null(sym_l
, sym_r
);
228 if (sym_l
->start
!= sym_r
->start
)
229 return (int64_t)(sym_r
->start
- sym_l
->start
);
231 return (int64_t)(sym_r
->end
- sym_l
->end
);
235 sort__sym_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
239 if (!left
->ms
.sym
&& !right
->ms
.sym
)
240 return _sort__addr_cmp(left
->ip
, right
->ip
);
243 * comparing symbol address alone is not enough since it's a
244 * relative address within a dso.
246 if (!sort__has_dso
) {
247 ret
= sort__dso_cmp(left
, right
);
252 return _sort__sym_cmp(left
->ms
.sym
, right
->ms
.sym
);
256 sort__sym_sort(struct hist_entry
*left
, struct hist_entry
*right
)
258 if (!left
->ms
.sym
|| !right
->ms
.sym
)
259 return cmp_null(left
->ms
.sym
, right
->ms
.sym
);
261 return strcmp(right
->ms
.sym
->name
, left
->ms
.sym
->name
);
264 static int _hist_entry__sym_snprintf(struct map
*map
, struct symbol
*sym
,
265 u64 ip
, char level
, char *bf
, size_t size
,
271 char o
= map
? dso__symtab_origin(map
->dso
) : '!';
272 ret
+= repsep_snprintf(bf
, size
, "%-#*llx %c ",
273 BITS_PER_LONG
/ 4 + 2, ip
, o
);
276 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "[%c] ", level
);
278 if (map
->type
== MAP__VARIABLE
) {
279 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%s", sym
->name
);
280 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "+0x%llx",
281 ip
- map
->unmap_ip(map
, sym
->start
));
283 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%.*s",
288 size_t len
= BITS_PER_LONG
/ 4;
289 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%-#.*llx",
296 static int hist_entry__sym_snprintf(struct hist_entry
*he
, char *bf
,
297 size_t size
, unsigned int width
)
299 return _hist_entry__sym_snprintf(he
->ms
.map
, he
->ms
.sym
, he
->ip
,
300 he
->level
, bf
, size
, width
);
303 static int hist_entry__sym_filter(struct hist_entry
*he
, int type
, const void *arg
)
305 const char *sym
= arg
;
307 if (type
!= HIST_FILTER__SYMBOL
)
310 return sym
&& (!he
->ms
.sym
|| !strstr(he
->ms
.sym
->name
, sym
));
313 struct sort_entry sort_sym
= {
314 .se_header
= "Symbol",
315 .se_cmp
= sort__sym_cmp
,
316 .se_sort
= sort__sym_sort
,
317 .se_snprintf
= hist_entry__sym_snprintf
,
318 .se_filter
= hist_entry__sym_filter
,
319 .se_width_idx
= HISTC_SYMBOL
,
324 static char *hist_entry__get_srcline(struct hist_entry
*he
)
326 struct map
*map
= he
->ms
.map
;
329 return SRCLINE_UNKNOWN
;
331 return get_srcline(map
->dso
, map__rip_2objdump(map
, he
->ip
),
336 sort__srcline_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
339 left
->srcline
= hist_entry__get_srcline(left
);
341 right
->srcline
= hist_entry__get_srcline(right
);
343 return strcmp(right
->srcline
, left
->srcline
);
346 static int hist_entry__srcline_snprintf(struct hist_entry
*he
, char *bf
,
347 size_t size
, unsigned int width
)
350 he
->srcline
= hist_entry__get_srcline(he
);
352 return repsep_snprintf(bf
, size
, "%-.*s", width
, he
->srcline
);
355 struct sort_entry sort_srcline
= {
356 .se_header
= "Source:Line",
357 .se_cmp
= sort__srcline_cmp
,
358 .se_snprintf
= hist_entry__srcline_snprintf
,
359 .se_width_idx
= HISTC_SRCLINE
,
364 static char no_srcfile
[1];
366 static char *hist_entry__get_srcfile(struct hist_entry
*e
)
369 struct map
*map
= e
->ms
.map
;
374 sf
= __get_srcline(map
->dso
, map__rip_2objdump(map
, e
->ip
),
375 e
->ms
.sym
, false, true);
376 if (!strcmp(sf
, SRCLINE_UNKNOWN
))
388 sort__srcfile_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
391 left
->srcfile
= hist_entry__get_srcfile(left
);
393 right
->srcfile
= hist_entry__get_srcfile(right
);
395 return strcmp(right
->srcfile
, left
->srcfile
);
398 static int hist_entry__srcfile_snprintf(struct hist_entry
*he
, char *bf
,
399 size_t size
, unsigned int width
)
402 he
->srcfile
= hist_entry__get_srcfile(he
);
404 return repsep_snprintf(bf
, size
, "%-.*s", width
, he
->srcfile
);
407 struct sort_entry sort_srcfile
= {
408 .se_header
= "Source File",
409 .se_cmp
= sort__srcfile_cmp
,
410 .se_snprintf
= hist_entry__srcfile_snprintf
,
411 .se_width_idx
= HISTC_SRCFILE
,
417 sort__parent_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
419 struct symbol
*sym_l
= left
->parent
;
420 struct symbol
*sym_r
= right
->parent
;
422 if (!sym_l
|| !sym_r
)
423 return cmp_null(sym_l
, sym_r
);
425 return strcmp(sym_r
->name
, sym_l
->name
);
428 static int hist_entry__parent_snprintf(struct hist_entry
*he
, char *bf
,
429 size_t size
, unsigned int width
)
431 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
,
432 he
->parent
? he
->parent
->name
: "[other]");
435 struct sort_entry sort_parent
= {
436 .se_header
= "Parent symbol",
437 .se_cmp
= sort__parent_cmp
,
438 .se_snprintf
= hist_entry__parent_snprintf
,
439 .se_width_idx
= HISTC_PARENT
,
445 sort__cpu_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
447 return right
->cpu
- left
->cpu
;
450 static int hist_entry__cpu_snprintf(struct hist_entry
*he
, char *bf
,
451 size_t size
, unsigned int width
)
453 return repsep_snprintf(bf
, size
, "%*.*d", width
, width
, he
->cpu
);
456 struct sort_entry sort_cpu
= {
458 .se_cmp
= sort__cpu_cmp
,
459 .se_snprintf
= hist_entry__cpu_snprintf
,
460 .se_width_idx
= HISTC_CPU
,
466 sort__socket_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
468 return right
->socket
- left
->socket
;
471 static int hist_entry__socket_snprintf(struct hist_entry
*he
, char *bf
,
472 size_t size
, unsigned int width
)
474 return repsep_snprintf(bf
, size
, "%*.*d", width
, width
-3, he
->socket
);
477 static int hist_entry__socket_filter(struct hist_entry
*he
, int type
, const void *arg
)
479 int sk
= *(const int *)arg
;
481 if (type
!= HIST_FILTER__SOCKET
)
484 return sk
>= 0 && he
->socket
!= sk
;
487 struct sort_entry sort_socket
= {
488 .se_header
= "Socket",
489 .se_cmp
= sort__socket_cmp
,
490 .se_snprintf
= hist_entry__socket_snprintf
,
491 .se_filter
= hist_entry__socket_filter
,
492 .se_width_idx
= HISTC_SOCKET
,
497 static char *get_trace_output(struct hist_entry
*he
)
499 struct trace_seq seq
;
500 struct perf_evsel
*evsel
;
501 struct pevent_record rec
= {
502 .data
= he
->raw_data
,
503 .size
= he
->raw_size
,
506 evsel
= hists_to_evsel(he
->hists
);
508 trace_seq_init(&seq
);
509 if (symbol_conf
.raw_trace
) {
510 pevent_print_fields(&seq
, he
->raw_data
, he
->raw_size
,
513 pevent_event_info(&seq
, evsel
->tp_format
, &rec
);
519 sort__trace_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
521 struct perf_evsel
*evsel
;
523 evsel
= hists_to_evsel(left
->hists
);
524 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
527 if (left
->trace_output
== NULL
)
528 left
->trace_output
= get_trace_output(left
);
529 if (right
->trace_output
== NULL
)
530 right
->trace_output
= get_trace_output(right
);
532 return strcmp(right
->trace_output
, left
->trace_output
);
535 static int hist_entry__trace_snprintf(struct hist_entry
*he
, char *bf
,
536 size_t size
, unsigned int width
)
538 struct perf_evsel
*evsel
;
540 evsel
= hists_to_evsel(he
->hists
);
541 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
542 return scnprintf(bf
, size
, "%-.*s", width
, "N/A");
544 if (he
->trace_output
== NULL
)
545 he
->trace_output
= get_trace_output(he
);
546 return repsep_snprintf(bf
, size
, "%-.*s", width
, he
->trace_output
);
549 struct sort_entry sort_trace
= {
550 .se_header
= "Trace output",
551 .se_cmp
= sort__trace_cmp
,
552 .se_snprintf
= hist_entry__trace_snprintf
,
553 .se_width_idx
= HISTC_TRACE
,
556 /* sort keys for branch stacks */
559 sort__dso_from_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
561 if (!left
->branch_info
|| !right
->branch_info
)
562 return cmp_null(left
->branch_info
, right
->branch_info
);
564 return _sort__dso_cmp(left
->branch_info
->from
.map
,
565 right
->branch_info
->from
.map
);
568 static int hist_entry__dso_from_snprintf(struct hist_entry
*he
, char *bf
,
569 size_t size
, unsigned int width
)
572 return _hist_entry__dso_snprintf(he
->branch_info
->from
.map
,
575 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
578 static int hist_entry__dso_from_filter(struct hist_entry
*he
, int type
,
581 const struct dso
*dso
= arg
;
583 if (type
!= HIST_FILTER__DSO
)
586 return dso
&& (!he
->branch_info
|| !he
->branch_info
->from
.map
||
587 he
->branch_info
->from
.map
->dso
!= dso
);
591 sort__dso_to_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
593 if (!left
->branch_info
|| !right
->branch_info
)
594 return cmp_null(left
->branch_info
, right
->branch_info
);
596 return _sort__dso_cmp(left
->branch_info
->to
.map
,
597 right
->branch_info
->to
.map
);
600 static int hist_entry__dso_to_snprintf(struct hist_entry
*he
, char *bf
,
601 size_t size
, unsigned int width
)
604 return _hist_entry__dso_snprintf(he
->branch_info
->to
.map
,
607 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
610 static int hist_entry__dso_to_filter(struct hist_entry
*he
, int type
,
613 const struct dso
*dso
= arg
;
615 if (type
!= HIST_FILTER__DSO
)
618 return dso
&& (!he
->branch_info
|| !he
->branch_info
->to
.map
||
619 he
->branch_info
->to
.map
->dso
!= dso
);
623 sort__sym_from_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
625 struct addr_map_symbol
*from_l
= &left
->branch_info
->from
;
626 struct addr_map_symbol
*from_r
= &right
->branch_info
->from
;
628 if (!left
->branch_info
|| !right
->branch_info
)
629 return cmp_null(left
->branch_info
, right
->branch_info
);
631 from_l
= &left
->branch_info
->from
;
632 from_r
= &right
->branch_info
->from
;
634 if (!from_l
->sym
&& !from_r
->sym
)
635 return _sort__addr_cmp(from_l
->addr
, from_r
->addr
);
637 return _sort__sym_cmp(from_l
->sym
, from_r
->sym
);
641 sort__sym_to_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
643 struct addr_map_symbol
*to_l
, *to_r
;
645 if (!left
->branch_info
|| !right
->branch_info
)
646 return cmp_null(left
->branch_info
, right
->branch_info
);
648 to_l
= &left
->branch_info
->to
;
649 to_r
= &right
->branch_info
->to
;
651 if (!to_l
->sym
&& !to_r
->sym
)
652 return _sort__addr_cmp(to_l
->addr
, to_r
->addr
);
654 return _sort__sym_cmp(to_l
->sym
, to_r
->sym
);
657 static int hist_entry__sym_from_snprintf(struct hist_entry
*he
, char *bf
,
658 size_t size
, unsigned int width
)
660 if (he
->branch_info
) {
661 struct addr_map_symbol
*from
= &he
->branch_info
->from
;
663 return _hist_entry__sym_snprintf(from
->map
, from
->sym
, from
->addr
,
664 he
->level
, bf
, size
, width
);
667 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
670 static int hist_entry__sym_to_snprintf(struct hist_entry
*he
, char *bf
,
671 size_t size
, unsigned int width
)
673 if (he
->branch_info
) {
674 struct addr_map_symbol
*to
= &he
->branch_info
->to
;
676 return _hist_entry__sym_snprintf(to
->map
, to
->sym
, to
->addr
,
677 he
->level
, bf
, size
, width
);
680 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
683 static int hist_entry__sym_from_filter(struct hist_entry
*he
, int type
,
686 const char *sym
= arg
;
688 if (type
!= HIST_FILTER__SYMBOL
)
691 return sym
&& !(he
->branch_info
&& he
->branch_info
->from
.sym
&&
692 strstr(he
->branch_info
->from
.sym
->name
, sym
));
695 static int hist_entry__sym_to_filter(struct hist_entry
*he
, int type
,
698 const char *sym
= arg
;
700 if (type
!= HIST_FILTER__SYMBOL
)
703 return sym
&& !(he
->branch_info
&& he
->branch_info
->to
.sym
&&
704 strstr(he
->branch_info
->to
.sym
->name
, sym
));
707 struct sort_entry sort_dso_from
= {
708 .se_header
= "Source Shared Object",
709 .se_cmp
= sort__dso_from_cmp
,
710 .se_snprintf
= hist_entry__dso_from_snprintf
,
711 .se_filter
= hist_entry__dso_from_filter
,
712 .se_width_idx
= HISTC_DSO_FROM
,
715 struct sort_entry sort_dso_to
= {
716 .se_header
= "Target Shared Object",
717 .se_cmp
= sort__dso_to_cmp
,
718 .se_snprintf
= hist_entry__dso_to_snprintf
,
719 .se_filter
= hist_entry__dso_to_filter
,
720 .se_width_idx
= HISTC_DSO_TO
,
723 struct sort_entry sort_sym_from
= {
724 .se_header
= "Source Symbol",
725 .se_cmp
= sort__sym_from_cmp
,
726 .se_snprintf
= hist_entry__sym_from_snprintf
,
727 .se_filter
= hist_entry__sym_from_filter
,
728 .se_width_idx
= HISTC_SYMBOL_FROM
,
731 struct sort_entry sort_sym_to
= {
732 .se_header
= "Target Symbol",
733 .se_cmp
= sort__sym_to_cmp
,
734 .se_snprintf
= hist_entry__sym_to_snprintf
,
735 .se_filter
= hist_entry__sym_to_filter
,
736 .se_width_idx
= HISTC_SYMBOL_TO
,
740 sort__mispredict_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
744 if (!left
->branch_info
|| !right
->branch_info
)
745 return cmp_null(left
->branch_info
, right
->branch_info
);
747 mp
= left
->branch_info
->flags
.mispred
!= right
->branch_info
->flags
.mispred
;
748 p
= left
->branch_info
->flags
.predicted
!= right
->branch_info
->flags
.predicted
;
752 static int hist_entry__mispredict_snprintf(struct hist_entry
*he
, char *bf
,
753 size_t size
, unsigned int width
){
754 static const char *out
= "N/A";
756 if (he
->branch_info
) {
757 if (he
->branch_info
->flags
.predicted
)
759 else if (he
->branch_info
->flags
.mispred
)
763 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, out
);
767 sort__cycles_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
769 return left
->branch_info
->flags
.cycles
-
770 right
->branch_info
->flags
.cycles
;
773 static int hist_entry__cycles_snprintf(struct hist_entry
*he
, char *bf
,
774 size_t size
, unsigned int width
)
776 if (he
->branch_info
->flags
.cycles
== 0)
777 return repsep_snprintf(bf
, size
, "%-*s", width
, "-");
778 return repsep_snprintf(bf
, size
, "%-*hd", width
,
779 he
->branch_info
->flags
.cycles
);
782 struct sort_entry sort_cycles
= {
783 .se_header
= "Basic Block Cycles",
784 .se_cmp
= sort__cycles_cmp
,
785 .se_snprintf
= hist_entry__cycles_snprintf
,
786 .se_width_idx
= HISTC_CYCLES
,
789 /* --sort daddr_sym */
791 sort__daddr_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
793 uint64_t l
= 0, r
= 0;
796 l
= left
->mem_info
->daddr
.addr
;
798 r
= right
->mem_info
->daddr
.addr
;
800 return (int64_t)(r
- l
);
803 static int hist_entry__daddr_snprintf(struct hist_entry
*he
, char *bf
,
804 size_t size
, unsigned int width
)
807 struct map
*map
= NULL
;
808 struct symbol
*sym
= NULL
;
811 addr
= he
->mem_info
->daddr
.addr
;
812 map
= he
->mem_info
->daddr
.map
;
813 sym
= he
->mem_info
->daddr
.sym
;
815 return _hist_entry__sym_snprintf(map
, sym
, addr
, he
->level
, bf
, size
,
820 sort__iaddr_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
822 uint64_t l
= 0, r
= 0;
825 l
= left
->mem_info
->iaddr
.addr
;
827 r
= right
->mem_info
->iaddr
.addr
;
829 return (int64_t)(r
- l
);
832 static int hist_entry__iaddr_snprintf(struct hist_entry
*he
, char *bf
,
833 size_t size
, unsigned int width
)
836 struct map
*map
= NULL
;
837 struct symbol
*sym
= NULL
;
840 addr
= he
->mem_info
->iaddr
.addr
;
841 map
= he
->mem_info
->iaddr
.map
;
842 sym
= he
->mem_info
->iaddr
.sym
;
844 return _hist_entry__sym_snprintf(map
, sym
, addr
, he
->level
, bf
, size
,
849 sort__dso_daddr_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
851 struct map
*map_l
= NULL
;
852 struct map
*map_r
= NULL
;
855 map_l
= left
->mem_info
->daddr
.map
;
857 map_r
= right
->mem_info
->daddr
.map
;
859 return _sort__dso_cmp(map_l
, map_r
);
862 static int hist_entry__dso_daddr_snprintf(struct hist_entry
*he
, char *bf
,
863 size_t size
, unsigned int width
)
865 struct map
*map
= NULL
;
868 map
= he
->mem_info
->daddr
.map
;
870 return _hist_entry__dso_snprintf(map
, bf
, size
, width
);
874 sort__locked_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
876 union perf_mem_data_src data_src_l
;
877 union perf_mem_data_src data_src_r
;
880 data_src_l
= left
->mem_info
->data_src
;
882 data_src_l
.mem_lock
= PERF_MEM_LOCK_NA
;
885 data_src_r
= right
->mem_info
->data_src
;
887 data_src_r
.mem_lock
= PERF_MEM_LOCK_NA
;
889 return (int64_t)(data_src_r
.mem_lock
- data_src_l
.mem_lock
);
892 static int hist_entry__locked_snprintf(struct hist_entry
*he
, char *bf
,
893 size_t size
, unsigned int width
)
897 perf_mem__lck_scnprintf(out
, sizeof(out
), he
->mem_info
);
898 return repsep_snprintf(bf
, size
, "%.*s", width
, out
);
902 sort__tlb_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
904 union perf_mem_data_src data_src_l
;
905 union perf_mem_data_src data_src_r
;
908 data_src_l
= left
->mem_info
->data_src
;
910 data_src_l
.mem_dtlb
= PERF_MEM_TLB_NA
;
913 data_src_r
= right
->mem_info
->data_src
;
915 data_src_r
.mem_dtlb
= PERF_MEM_TLB_NA
;
917 return (int64_t)(data_src_r
.mem_dtlb
- data_src_l
.mem_dtlb
);
920 static int hist_entry__tlb_snprintf(struct hist_entry
*he
, char *bf
,
921 size_t size
, unsigned int width
)
925 perf_mem__tlb_scnprintf(out
, sizeof(out
), he
->mem_info
);
926 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
930 sort__lvl_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
932 union perf_mem_data_src data_src_l
;
933 union perf_mem_data_src data_src_r
;
936 data_src_l
= left
->mem_info
->data_src
;
938 data_src_l
.mem_lvl
= PERF_MEM_LVL_NA
;
941 data_src_r
= right
->mem_info
->data_src
;
943 data_src_r
.mem_lvl
= PERF_MEM_LVL_NA
;
945 return (int64_t)(data_src_r
.mem_lvl
- data_src_l
.mem_lvl
);
948 static int hist_entry__lvl_snprintf(struct hist_entry
*he
, char *bf
,
949 size_t size
, unsigned int width
)
953 perf_mem__lvl_scnprintf(out
, sizeof(out
), he
->mem_info
);
954 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
958 sort__snoop_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
960 union perf_mem_data_src data_src_l
;
961 union perf_mem_data_src data_src_r
;
964 data_src_l
= left
->mem_info
->data_src
;
966 data_src_l
.mem_snoop
= PERF_MEM_SNOOP_NA
;
969 data_src_r
= right
->mem_info
->data_src
;
971 data_src_r
.mem_snoop
= PERF_MEM_SNOOP_NA
;
973 return (int64_t)(data_src_r
.mem_snoop
- data_src_l
.mem_snoop
);
976 static int hist_entry__snoop_snprintf(struct hist_entry
*he
, char *bf
,
977 size_t size
, unsigned int width
)
981 perf_mem__snp_scnprintf(out
, sizeof(out
), he
->mem_info
);
982 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
986 sort__dcacheline_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
989 struct map
*l_map
, *r_map
;
991 if (!left
->mem_info
) return -1;
992 if (!right
->mem_info
) return 1;
994 /* group event types together */
995 if (left
->cpumode
> right
->cpumode
) return -1;
996 if (left
->cpumode
< right
->cpumode
) return 1;
998 l_map
= left
->mem_info
->daddr
.map
;
999 r_map
= right
->mem_info
->daddr
.map
;
1001 /* if both are NULL, jump to sort on al_addr instead */
1002 if (!l_map
&& !r_map
)
1005 if (!l_map
) return -1;
1006 if (!r_map
) return 1;
1008 if (l_map
->maj
> r_map
->maj
) return -1;
1009 if (l_map
->maj
< r_map
->maj
) return 1;
1011 if (l_map
->min
> r_map
->min
) return -1;
1012 if (l_map
->min
< r_map
->min
) return 1;
1014 if (l_map
->ino
> r_map
->ino
) return -1;
1015 if (l_map
->ino
< r_map
->ino
) return 1;
1017 if (l_map
->ino_generation
> r_map
->ino_generation
) return -1;
1018 if (l_map
->ino_generation
< r_map
->ino_generation
) return 1;
1021 * Addresses with no major/minor numbers are assumed to be
1022 * anonymous in userspace. Sort those on pid then address.
1024 * The kernel and non-zero major/minor mapped areas are
1025 * assumed to be unity mapped. Sort those on address.
1028 if ((left
->cpumode
!= PERF_RECORD_MISC_KERNEL
) &&
1029 (!(l_map
->flags
& MAP_SHARED
)) &&
1030 !l_map
->maj
&& !l_map
->min
&& !l_map
->ino
&&
1031 !l_map
->ino_generation
) {
1032 /* userspace anonymous */
1034 if (left
->thread
->pid_
> right
->thread
->pid_
) return -1;
1035 if (left
->thread
->pid_
< right
->thread
->pid_
) return 1;
1039 /* al_addr does all the right addr - start + offset calculations */
1040 l
= cl_address(left
->mem_info
->daddr
.al_addr
);
1041 r
= cl_address(right
->mem_info
->daddr
.al_addr
);
1043 if (l
> r
) return -1;
1044 if (l
< r
) return 1;
1049 static int hist_entry__dcacheline_snprintf(struct hist_entry
*he
, char *bf
,
1050 size_t size
, unsigned int width
)
1054 struct map
*map
= NULL
;
1055 struct symbol
*sym
= NULL
;
1056 char level
= he
->level
;
1059 addr
= cl_address(he
->mem_info
->daddr
.al_addr
);
1060 map
= he
->mem_info
->daddr
.map
;
1061 sym
= he
->mem_info
->daddr
.sym
;
1063 /* print [s] for shared data mmaps */
1064 if ((he
->cpumode
!= PERF_RECORD_MISC_KERNEL
) &&
1065 map
&& (map
->type
== MAP__VARIABLE
) &&
1066 (map
->flags
& MAP_SHARED
) &&
1067 (map
->maj
|| map
->min
|| map
->ino
||
1068 map
->ino_generation
))
1073 return _hist_entry__sym_snprintf(map
, sym
, addr
, level
, bf
, size
,
1077 struct sort_entry sort_mispredict
= {
1078 .se_header
= "Branch Mispredicted",
1079 .se_cmp
= sort__mispredict_cmp
,
1080 .se_snprintf
= hist_entry__mispredict_snprintf
,
1081 .se_width_idx
= HISTC_MISPREDICT
,
1084 static u64
he_weight(struct hist_entry
*he
)
1086 return he
->stat
.nr_events
? he
->stat
.weight
/ he
->stat
.nr_events
: 0;
1090 sort__local_weight_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1092 return he_weight(left
) - he_weight(right
);
1095 static int hist_entry__local_weight_snprintf(struct hist_entry
*he
, char *bf
,
1096 size_t size
, unsigned int width
)
1098 return repsep_snprintf(bf
, size
, "%-*llu", width
, he_weight(he
));
1101 struct sort_entry sort_local_weight
= {
1102 .se_header
= "Local Weight",
1103 .se_cmp
= sort__local_weight_cmp
,
1104 .se_snprintf
= hist_entry__local_weight_snprintf
,
1105 .se_width_idx
= HISTC_LOCAL_WEIGHT
,
1109 sort__global_weight_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1111 return left
->stat
.weight
- right
->stat
.weight
;
1114 static int hist_entry__global_weight_snprintf(struct hist_entry
*he
, char *bf
,
1115 size_t size
, unsigned int width
)
1117 return repsep_snprintf(bf
, size
, "%-*llu", width
, he
->stat
.weight
);
1120 struct sort_entry sort_global_weight
= {
1121 .se_header
= "Weight",
1122 .se_cmp
= sort__global_weight_cmp
,
1123 .se_snprintf
= hist_entry__global_weight_snprintf
,
1124 .se_width_idx
= HISTC_GLOBAL_WEIGHT
,
1127 struct sort_entry sort_mem_daddr_sym
= {
1128 .se_header
= "Data Symbol",
1129 .se_cmp
= sort__daddr_cmp
,
1130 .se_snprintf
= hist_entry__daddr_snprintf
,
1131 .se_width_idx
= HISTC_MEM_DADDR_SYMBOL
,
1134 struct sort_entry sort_mem_iaddr_sym
= {
1135 .se_header
= "Code Symbol",
1136 .se_cmp
= sort__iaddr_cmp
,
1137 .se_snprintf
= hist_entry__iaddr_snprintf
,
1138 .se_width_idx
= HISTC_MEM_IADDR_SYMBOL
,
1141 struct sort_entry sort_mem_daddr_dso
= {
1142 .se_header
= "Data Object",
1143 .se_cmp
= sort__dso_daddr_cmp
,
1144 .se_snprintf
= hist_entry__dso_daddr_snprintf
,
1145 .se_width_idx
= HISTC_MEM_DADDR_SYMBOL
,
1148 struct sort_entry sort_mem_locked
= {
1149 .se_header
= "Locked",
1150 .se_cmp
= sort__locked_cmp
,
1151 .se_snprintf
= hist_entry__locked_snprintf
,
1152 .se_width_idx
= HISTC_MEM_LOCKED
,
1155 struct sort_entry sort_mem_tlb
= {
1156 .se_header
= "TLB access",
1157 .se_cmp
= sort__tlb_cmp
,
1158 .se_snprintf
= hist_entry__tlb_snprintf
,
1159 .se_width_idx
= HISTC_MEM_TLB
,
1162 struct sort_entry sort_mem_lvl
= {
1163 .se_header
= "Memory access",
1164 .se_cmp
= sort__lvl_cmp
,
1165 .se_snprintf
= hist_entry__lvl_snprintf
,
1166 .se_width_idx
= HISTC_MEM_LVL
,
1169 struct sort_entry sort_mem_snoop
= {
1170 .se_header
= "Snoop",
1171 .se_cmp
= sort__snoop_cmp
,
1172 .se_snprintf
= hist_entry__snoop_snprintf
,
1173 .se_width_idx
= HISTC_MEM_SNOOP
,
1176 struct sort_entry sort_mem_dcacheline
= {
1177 .se_header
= "Data Cacheline",
1178 .se_cmp
= sort__dcacheline_cmp
,
1179 .se_snprintf
= hist_entry__dcacheline_snprintf
,
1180 .se_width_idx
= HISTC_MEM_DCACHELINE
,
1184 sort__abort_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1186 if (!left
->branch_info
|| !right
->branch_info
)
1187 return cmp_null(left
->branch_info
, right
->branch_info
);
1189 return left
->branch_info
->flags
.abort
!=
1190 right
->branch_info
->flags
.abort
;
1193 static int hist_entry__abort_snprintf(struct hist_entry
*he
, char *bf
,
1194 size_t size
, unsigned int width
)
1196 static const char *out
= "N/A";
1198 if (he
->branch_info
) {
1199 if (he
->branch_info
->flags
.abort
)
1205 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
1208 struct sort_entry sort_abort
= {
1209 .se_header
= "Transaction abort",
1210 .se_cmp
= sort__abort_cmp
,
1211 .se_snprintf
= hist_entry__abort_snprintf
,
1212 .se_width_idx
= HISTC_ABORT
,
1216 sort__in_tx_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1218 if (!left
->branch_info
|| !right
->branch_info
)
1219 return cmp_null(left
->branch_info
, right
->branch_info
);
1221 return left
->branch_info
->flags
.in_tx
!=
1222 right
->branch_info
->flags
.in_tx
;
1225 static int hist_entry__in_tx_snprintf(struct hist_entry
*he
, char *bf
,
1226 size_t size
, unsigned int width
)
1228 static const char *out
= "N/A";
1230 if (he
->branch_info
) {
1231 if (he
->branch_info
->flags
.in_tx
)
1237 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
1240 struct sort_entry sort_in_tx
= {
1241 .se_header
= "Branch in transaction",
1242 .se_cmp
= sort__in_tx_cmp
,
1243 .se_snprintf
= hist_entry__in_tx_snprintf
,
1244 .se_width_idx
= HISTC_IN_TX
,
1248 sort__transaction_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1250 return left
->transaction
- right
->transaction
;
1253 static inline char *add_str(char *p
, const char *str
)
1256 return p
+ strlen(str
);
1259 static struct txbit
{
1264 { PERF_TXN_ELISION
, "EL ", 0 },
1265 { PERF_TXN_TRANSACTION
, "TX ", 1 },
1266 { PERF_TXN_SYNC
, "SYNC ", 1 },
1267 { PERF_TXN_ASYNC
, "ASYNC ", 0 },
1268 { PERF_TXN_RETRY
, "RETRY ", 0 },
1269 { PERF_TXN_CONFLICT
, "CON ", 0 },
1270 { PERF_TXN_CAPACITY_WRITE
, "CAP-WRITE ", 1 },
1271 { PERF_TXN_CAPACITY_READ
, "CAP-READ ", 0 },
1275 int hist_entry__transaction_len(void)
1280 for (i
= 0; txbits
[i
].name
; i
++) {
1281 if (!txbits
[i
].skip_for_len
)
1282 len
+= strlen(txbits
[i
].name
);
1284 len
+= 4; /* :XX<space> */
1288 static int hist_entry__transaction_snprintf(struct hist_entry
*he
, char *bf
,
1289 size_t size
, unsigned int width
)
1291 u64 t
= he
->transaction
;
1297 for (i
= 0; txbits
[i
].name
; i
++)
1298 if (txbits
[i
].flag
& t
)
1299 p
= add_str(p
, txbits
[i
].name
);
1300 if (t
&& !(t
& (PERF_TXN_SYNC
|PERF_TXN_ASYNC
)))
1301 p
= add_str(p
, "NEITHER ");
1302 if (t
& PERF_TXN_ABORT_MASK
) {
1303 sprintf(p
, ":%" PRIx64
,
1304 (t
& PERF_TXN_ABORT_MASK
) >>
1305 PERF_TXN_ABORT_SHIFT
);
1309 return repsep_snprintf(bf
, size
, "%-*s", width
, buf
);
1312 struct sort_entry sort_transaction
= {
1313 .se_header
= "Transaction ",
1314 .se_cmp
= sort__transaction_cmp
,
1315 .se_snprintf
= hist_entry__transaction_snprintf
,
1316 .se_width_idx
= HISTC_TRANSACTION
,
1319 struct sort_dimension
{
1321 struct sort_entry
*entry
;
1325 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1327 static struct sort_dimension common_sort_dimensions
[] = {
1328 DIM(SORT_PID
, "pid", sort_thread
),
1329 DIM(SORT_COMM
, "comm", sort_comm
),
1330 DIM(SORT_DSO
, "dso", sort_dso
),
1331 DIM(SORT_SYM
, "symbol", sort_sym
),
1332 DIM(SORT_PARENT
, "parent", sort_parent
),
1333 DIM(SORT_CPU
, "cpu", sort_cpu
),
1334 DIM(SORT_SOCKET
, "socket", sort_socket
),
1335 DIM(SORT_SRCLINE
, "srcline", sort_srcline
),
1336 DIM(SORT_SRCFILE
, "srcfile", sort_srcfile
),
1337 DIM(SORT_LOCAL_WEIGHT
, "local_weight", sort_local_weight
),
1338 DIM(SORT_GLOBAL_WEIGHT
, "weight", sort_global_weight
),
1339 DIM(SORT_TRANSACTION
, "transaction", sort_transaction
),
1340 DIM(SORT_TRACE
, "trace", sort_trace
),
1345 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1347 static struct sort_dimension bstack_sort_dimensions
[] = {
1348 DIM(SORT_DSO_FROM
, "dso_from", sort_dso_from
),
1349 DIM(SORT_DSO_TO
, "dso_to", sort_dso_to
),
1350 DIM(SORT_SYM_FROM
, "symbol_from", sort_sym_from
),
1351 DIM(SORT_SYM_TO
, "symbol_to", sort_sym_to
),
1352 DIM(SORT_MISPREDICT
, "mispredict", sort_mispredict
),
1353 DIM(SORT_IN_TX
, "in_tx", sort_in_tx
),
1354 DIM(SORT_ABORT
, "abort", sort_abort
),
1355 DIM(SORT_CYCLES
, "cycles", sort_cycles
),
1360 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1362 static struct sort_dimension memory_sort_dimensions
[] = {
1363 DIM(SORT_MEM_DADDR_SYMBOL
, "symbol_daddr", sort_mem_daddr_sym
),
1364 DIM(SORT_MEM_IADDR_SYMBOL
, "symbol_iaddr", sort_mem_iaddr_sym
),
1365 DIM(SORT_MEM_DADDR_DSO
, "dso_daddr", sort_mem_daddr_dso
),
1366 DIM(SORT_MEM_LOCKED
, "locked", sort_mem_locked
),
1367 DIM(SORT_MEM_TLB
, "tlb", sort_mem_tlb
),
1368 DIM(SORT_MEM_LVL
, "mem", sort_mem_lvl
),
1369 DIM(SORT_MEM_SNOOP
, "snoop", sort_mem_snoop
),
1370 DIM(SORT_MEM_DCACHELINE
, "dcacheline", sort_mem_dcacheline
),
1375 struct hpp_dimension
{
1377 struct perf_hpp_fmt
*fmt
;
1381 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1383 static struct hpp_dimension hpp_sort_dimensions
[] = {
1384 DIM(PERF_HPP__OVERHEAD
, "overhead"),
1385 DIM(PERF_HPP__OVERHEAD_SYS
, "overhead_sys"),
1386 DIM(PERF_HPP__OVERHEAD_US
, "overhead_us"),
1387 DIM(PERF_HPP__OVERHEAD_GUEST_SYS
, "overhead_guest_sys"),
1388 DIM(PERF_HPP__OVERHEAD_GUEST_US
, "overhead_guest_us"),
1389 DIM(PERF_HPP__OVERHEAD_ACC
, "overhead_children"),
1390 DIM(PERF_HPP__SAMPLES
, "sample"),
1391 DIM(PERF_HPP__PERIOD
, "period"),
1396 struct hpp_sort_entry
{
1397 struct perf_hpp_fmt hpp
;
1398 struct sort_entry
*se
;
1401 void perf_hpp__reset_sort_width(struct perf_hpp_fmt
*fmt
, struct hists
*hists
)
1403 struct hpp_sort_entry
*hse
;
1405 if (!perf_hpp__is_sort_entry(fmt
))
1408 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1409 hists__new_col_len(hists
, hse
->se
->se_width_idx
, strlen(fmt
->name
));
1412 static int __sort__hpp_header(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1413 struct perf_evsel
*evsel
)
1415 struct hpp_sort_entry
*hse
;
1416 size_t len
= fmt
->user_len
;
1418 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1421 len
= hists__col_len(evsel__hists(evsel
), hse
->se
->se_width_idx
);
1423 return scnprintf(hpp
->buf
, hpp
->size
, "%-*.*s", len
, len
, fmt
->name
);
1426 static int __sort__hpp_width(struct perf_hpp_fmt
*fmt
,
1427 struct perf_hpp
*hpp __maybe_unused
,
1428 struct perf_evsel
*evsel
)
1430 struct hpp_sort_entry
*hse
;
1431 size_t len
= fmt
->user_len
;
1433 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1436 len
= hists__col_len(evsel__hists(evsel
), hse
->se
->se_width_idx
);
1441 static int __sort__hpp_entry(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1442 struct hist_entry
*he
)
1444 struct hpp_sort_entry
*hse
;
1445 size_t len
= fmt
->user_len
;
1447 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1450 len
= hists__col_len(he
->hists
, hse
->se
->se_width_idx
);
1452 return hse
->se
->se_snprintf(he
, hpp
->buf
, hpp
->size
, len
);
1455 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt
*fmt
,
1456 struct hist_entry
*a
, struct hist_entry
*b
)
1458 struct hpp_sort_entry
*hse
;
1460 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1461 return hse
->se
->se_cmp(a
, b
);
1464 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt
*fmt
,
1465 struct hist_entry
*a
, struct hist_entry
*b
)
1467 struct hpp_sort_entry
*hse
;
1468 int64_t (*collapse_fn
)(struct hist_entry
*, struct hist_entry
*);
1470 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1471 collapse_fn
= hse
->se
->se_collapse
?: hse
->se
->se_cmp
;
1472 return collapse_fn(a
, b
);
1475 static int64_t __sort__hpp_sort(struct perf_hpp_fmt
*fmt
,
1476 struct hist_entry
*a
, struct hist_entry
*b
)
1478 struct hpp_sort_entry
*hse
;
1479 int64_t (*sort_fn
)(struct hist_entry
*, struct hist_entry
*);
1481 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1482 sort_fn
= hse
->se
->se_sort
?: hse
->se
->se_cmp
;
1483 return sort_fn(a
, b
);
1486 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt
*format
)
1488 return format
->header
== __sort__hpp_header
;
1491 #define MK_SORT_ENTRY_CHK(key) \
1492 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
1494 struct hpp_sort_entry *hse; \
1496 if (!perf_hpp__is_sort_entry(fmt)) \
1499 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
1500 return hse->se == &sort_ ## key ; \
1503 MK_SORT_ENTRY_CHK(trace
)
1504 MK_SORT_ENTRY_CHK(srcline
)
1505 MK_SORT_ENTRY_CHK(srcfile
)
1506 MK_SORT_ENTRY_CHK(thread
)
1507 MK_SORT_ENTRY_CHK(comm
)
1508 MK_SORT_ENTRY_CHK(dso
)
1509 MK_SORT_ENTRY_CHK(sym
)
1512 static bool __sort__hpp_equal(struct perf_hpp_fmt
*a
, struct perf_hpp_fmt
*b
)
1514 struct hpp_sort_entry
*hse_a
;
1515 struct hpp_sort_entry
*hse_b
;
1517 if (!perf_hpp__is_sort_entry(a
) || !perf_hpp__is_sort_entry(b
))
1520 hse_a
= container_of(a
, struct hpp_sort_entry
, hpp
);
1521 hse_b
= container_of(b
, struct hpp_sort_entry
, hpp
);
1523 return hse_a
->se
== hse_b
->se
;
1526 static void hse_free(struct perf_hpp_fmt
*fmt
)
1528 struct hpp_sort_entry
*hse
;
1530 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1534 static struct hpp_sort_entry
*
1535 __sort_dimension__alloc_hpp(struct sort_dimension
*sd
, int level
)
1537 struct hpp_sort_entry
*hse
;
1539 hse
= malloc(sizeof(*hse
));
1541 pr_err("Memory allocation failed\n");
1545 hse
->se
= sd
->entry
;
1546 hse
->hpp
.name
= sd
->entry
->se_header
;
1547 hse
->hpp
.header
= __sort__hpp_header
;
1548 hse
->hpp
.width
= __sort__hpp_width
;
1549 hse
->hpp
.entry
= __sort__hpp_entry
;
1550 hse
->hpp
.color
= NULL
;
1552 hse
->hpp
.cmp
= __sort__hpp_cmp
;
1553 hse
->hpp
.collapse
= __sort__hpp_collapse
;
1554 hse
->hpp
.sort
= __sort__hpp_sort
;
1555 hse
->hpp
.equal
= __sort__hpp_equal
;
1556 hse
->hpp
.free
= hse_free
;
1558 INIT_LIST_HEAD(&hse
->hpp
.list
);
1559 INIT_LIST_HEAD(&hse
->hpp
.sort_list
);
1560 hse
->hpp
.elide
= false;
1562 hse
->hpp
.user_len
= 0;
1563 hse
->hpp
.level
= level
;
1568 static void hpp_free(struct perf_hpp_fmt
*fmt
)
1573 static struct perf_hpp_fmt
*__hpp_dimension__alloc_hpp(struct hpp_dimension
*hd
,
1576 struct perf_hpp_fmt
*fmt
;
1578 fmt
= memdup(hd
->fmt
, sizeof(*fmt
));
1580 INIT_LIST_HEAD(&fmt
->list
);
1581 INIT_LIST_HEAD(&fmt
->sort_list
);
1582 fmt
->free
= hpp_free
;
1589 int hist_entry__filter(struct hist_entry
*he
, int type
, const void *arg
)
1591 struct perf_hpp_fmt
*fmt
;
1592 struct hpp_sort_entry
*hse
;
1596 perf_hpp_list__for_each_format(he
->hpp_list
, fmt
) {
1597 if (!perf_hpp__is_sort_entry(fmt
))
1600 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1601 if (hse
->se
->se_filter
== NULL
)
1605 * hist entry is filtered if any of sort key in the hpp list
1606 * is applied. But it should skip non-matched filter types.
1608 r
= hse
->se
->se_filter(he
, type
, arg
);
1619 static int __sort_dimension__add_hpp_sort(struct sort_dimension
*sd
,
1620 struct perf_hpp_list
*list
,
1623 struct hpp_sort_entry
*hse
= __sort_dimension__alloc_hpp(sd
, level
);
1628 perf_hpp_list__register_sort_field(list
, &hse
->hpp
);
1632 static int __sort_dimension__add_hpp_output(struct sort_dimension
*sd
,
1633 struct perf_hpp_list
*list
)
1635 struct hpp_sort_entry
*hse
= __sort_dimension__alloc_hpp(sd
, 0);
1640 perf_hpp_list__column_register(list
, &hse
->hpp
);
1644 struct hpp_dynamic_entry
{
1645 struct perf_hpp_fmt hpp
;
1646 struct perf_evsel
*evsel
;
1647 struct format_field
*field
;
1648 unsigned dynamic_len
;
1652 static int hde_width(struct hpp_dynamic_entry
*hde
)
1654 if (!hde
->hpp
.len
) {
1655 int len
= hde
->dynamic_len
;
1656 int namelen
= strlen(hde
->field
->name
);
1657 int fieldlen
= hde
->field
->size
;
1662 if (!(hde
->field
->flags
& FIELD_IS_STRING
)) {
1663 /* length for print hex numbers */
1664 fieldlen
= hde
->field
->size
* 2 + 2;
1671 return hde
->hpp
.len
;
1674 static void update_dynamic_len(struct hpp_dynamic_entry
*hde
,
1675 struct hist_entry
*he
)
1678 struct format_field
*field
= hde
->field
;
1685 /* parse pretty print result and update max length */
1686 if (!he
->trace_output
)
1687 he
->trace_output
= get_trace_output(he
);
1689 namelen
= strlen(field
->name
);
1690 str
= he
->trace_output
;
1693 pos
= strchr(str
, ' ');
1696 pos
= str
+ strlen(str
);
1699 if (!strncmp(str
, field
->name
, namelen
)) {
1705 if (len
> hde
->dynamic_len
)
1706 hde
->dynamic_len
= len
;
1717 static int __sort__hde_header(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1718 struct perf_evsel
*evsel __maybe_unused
)
1720 struct hpp_dynamic_entry
*hde
;
1721 size_t len
= fmt
->user_len
;
1723 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1726 len
= hde_width(hde
);
1728 return scnprintf(hpp
->buf
, hpp
->size
, "%*.*s", len
, len
, hde
->field
->name
);
1731 static int __sort__hde_width(struct perf_hpp_fmt
*fmt
,
1732 struct perf_hpp
*hpp __maybe_unused
,
1733 struct perf_evsel
*evsel __maybe_unused
)
1735 struct hpp_dynamic_entry
*hde
;
1736 size_t len
= fmt
->user_len
;
1738 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1741 len
= hde_width(hde
);
1746 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt
*fmt
, struct hists
*hists
)
1748 struct hpp_dynamic_entry
*hde
;
1750 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1752 return hists_to_evsel(hists
) == hde
->evsel
;
1755 static int __sort__hde_entry(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1756 struct hist_entry
*he
)
1758 struct hpp_dynamic_entry
*hde
;
1759 size_t len
= fmt
->user_len
;
1761 struct format_field
*field
;
1766 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1769 len
= hde_width(hde
);
1774 if (!he
->trace_output
)
1775 he
->trace_output
= get_trace_output(he
);
1778 namelen
= strlen(field
->name
);
1779 str
= he
->trace_output
;
1782 pos
= strchr(str
, ' ');
1785 pos
= str
+ strlen(str
);
1788 if (!strncmp(str
, field
->name
, namelen
)) {
1790 str
= strndup(str
, pos
- str
);
1793 return scnprintf(hpp
->buf
, hpp
->size
,
1794 "%*.*s", len
, len
, "ERROR");
1805 struct trace_seq seq
;
1807 trace_seq_init(&seq
);
1808 pevent_print_field(&seq
, he
->raw_data
, hde
->field
);
1812 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%*.*s", len
, len
, str
);
1817 static int64_t __sort__hde_cmp(struct perf_hpp_fmt
*fmt
,
1818 struct hist_entry
*a
, struct hist_entry
*b
)
1820 struct hpp_dynamic_entry
*hde
;
1821 struct format_field
*field
;
1822 unsigned offset
, size
;
1824 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1827 update_dynamic_len(hde
, a
);
1832 if (field
->flags
& FIELD_IS_DYNAMIC
) {
1833 unsigned long long dyn
;
1835 pevent_read_number_field(field
, a
->raw_data
, &dyn
);
1836 offset
= dyn
& 0xffff;
1837 size
= (dyn
>> 16) & 0xffff;
1839 /* record max width for output */
1840 if (size
> hde
->dynamic_len
)
1841 hde
->dynamic_len
= size
;
1843 offset
= field
->offset
;
1847 return memcmp(a
->raw_data
+ offset
, b
->raw_data
+ offset
, size
);
1850 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt
*fmt
)
1852 return fmt
->cmp
== __sort__hde_cmp
;
1855 static bool __sort__hde_equal(struct perf_hpp_fmt
*a
, struct perf_hpp_fmt
*b
)
1857 struct hpp_dynamic_entry
*hde_a
;
1858 struct hpp_dynamic_entry
*hde_b
;
1860 if (!perf_hpp__is_dynamic_entry(a
) || !perf_hpp__is_dynamic_entry(b
))
1863 hde_a
= container_of(a
, struct hpp_dynamic_entry
, hpp
);
1864 hde_b
= container_of(b
, struct hpp_dynamic_entry
, hpp
);
1866 return hde_a
->field
== hde_b
->field
;
1869 static void hde_free(struct perf_hpp_fmt
*fmt
)
1871 struct hpp_dynamic_entry
*hde
;
1873 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1877 static struct hpp_dynamic_entry
*
1878 __alloc_dynamic_entry(struct perf_evsel
*evsel
, struct format_field
*field
,
1881 struct hpp_dynamic_entry
*hde
;
1883 hde
= malloc(sizeof(*hde
));
1885 pr_debug("Memory allocation failed\n");
1891 hde
->dynamic_len
= 0;
1893 hde
->hpp
.name
= field
->name
;
1894 hde
->hpp
.header
= __sort__hde_header
;
1895 hde
->hpp
.width
= __sort__hde_width
;
1896 hde
->hpp
.entry
= __sort__hde_entry
;
1897 hde
->hpp
.color
= NULL
;
1899 hde
->hpp
.cmp
= __sort__hde_cmp
;
1900 hde
->hpp
.collapse
= __sort__hde_cmp
;
1901 hde
->hpp
.sort
= __sort__hde_cmp
;
1902 hde
->hpp
.equal
= __sort__hde_equal
;
1903 hde
->hpp
.free
= hde_free
;
1905 INIT_LIST_HEAD(&hde
->hpp
.list
);
1906 INIT_LIST_HEAD(&hde
->hpp
.sort_list
);
1907 hde
->hpp
.elide
= false;
1909 hde
->hpp
.user_len
= 0;
1910 hde
->hpp
.level
= level
;
1915 struct perf_hpp_fmt
*perf_hpp_fmt__dup(struct perf_hpp_fmt
*fmt
)
1917 struct perf_hpp_fmt
*new_fmt
= NULL
;
1919 if (perf_hpp__is_sort_entry(fmt
)) {
1920 struct hpp_sort_entry
*hse
, *new_hse
;
1922 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1923 new_hse
= memdup(hse
, sizeof(*hse
));
1925 new_fmt
= &new_hse
->hpp
;
1926 } else if (perf_hpp__is_dynamic_entry(fmt
)) {
1927 struct hpp_dynamic_entry
*hde
, *new_hde
;
1929 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1930 new_hde
= memdup(hde
, sizeof(*hde
));
1932 new_fmt
= &new_hde
->hpp
;
1934 new_fmt
= memdup(fmt
, sizeof(*fmt
));
1937 INIT_LIST_HEAD(&new_fmt
->list
);
1938 INIT_LIST_HEAD(&new_fmt
->sort_list
);
1943 static int parse_field_name(char *str
, char **event
, char **field
, char **opt
)
1945 char *event_name
, *field_name
, *opt_name
;
1948 field_name
= strchr(str
, '.');
1951 *field_name
++ = '\0';
1957 opt_name
= strchr(field_name
, '/');
1961 *event
= event_name
;
1962 *field
= field_name
;
1968 /* find match evsel using a given event name. The event name can be:
1969 * 1. '%' + event index (e.g. '%1' for first event)
1970 * 2. full event name (e.g. sched:sched_switch)
1971 * 3. partial event name (should not contain ':')
1973 static struct perf_evsel
*find_evsel(struct perf_evlist
*evlist
, char *event_name
)
1975 struct perf_evsel
*evsel
= NULL
;
1976 struct perf_evsel
*pos
;
1980 if (event_name
[0] == '%') {
1981 int nr
= strtol(event_name
+1, NULL
, 0);
1983 if (nr
> evlist
->nr_entries
)
1986 evsel
= perf_evlist__first(evlist
);
1988 evsel
= perf_evsel__next(evsel
);
1993 full_name
= !!strchr(event_name
, ':');
1994 evlist__for_each(evlist
, pos
) {
1996 if (full_name
&& !strcmp(pos
->name
, event_name
))
1999 if (!full_name
&& strstr(pos
->name
, event_name
)) {
2001 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2002 event_name
, evsel
->name
, pos
->name
);
2012 static int __dynamic_dimension__add(struct perf_evsel
*evsel
,
2013 struct format_field
*field
,
2014 bool raw_trace
, int level
)
2016 struct hpp_dynamic_entry
*hde
;
2018 hde
= __alloc_dynamic_entry(evsel
, field
, level
);
2022 hde
->raw_trace
= raw_trace
;
2024 perf_hpp__register_sort_field(&hde
->hpp
);
2028 static int add_evsel_fields(struct perf_evsel
*evsel
, bool raw_trace
, int level
)
2031 struct format_field
*field
;
2033 field
= evsel
->tp_format
->format
.fields
;
2035 ret
= __dynamic_dimension__add(evsel
, field
, raw_trace
, level
);
2039 field
= field
->next
;
2044 static int add_all_dynamic_fields(struct perf_evlist
*evlist
, bool raw_trace
,
2048 struct perf_evsel
*evsel
;
2050 evlist__for_each(evlist
, evsel
) {
2051 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
2054 ret
= add_evsel_fields(evsel
, raw_trace
, level
);
2061 static int add_all_matching_fields(struct perf_evlist
*evlist
,
2062 char *field_name
, bool raw_trace
, int level
)
2065 struct perf_evsel
*evsel
;
2066 struct format_field
*field
;
2068 evlist__for_each(evlist
, evsel
) {
2069 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
2072 field
= pevent_find_any_field(evsel
->tp_format
, field_name
);
2076 ret
= __dynamic_dimension__add(evsel
, field
, raw_trace
, level
);
2083 static int add_dynamic_entry(struct perf_evlist
*evlist
, const char *tok
,
2086 char *str
, *event_name
, *field_name
, *opt_name
;
2087 struct perf_evsel
*evsel
;
2088 struct format_field
*field
;
2089 bool raw_trace
= symbol_conf
.raw_trace
;
2099 if (parse_field_name(str
, &event_name
, &field_name
, &opt_name
) < 0) {
2105 if (strcmp(opt_name
, "raw")) {
2106 pr_debug("unsupported field option %s\n", opt_name
);
2113 if (!strcmp(field_name
, "trace_fields")) {
2114 ret
= add_all_dynamic_fields(evlist
, raw_trace
, level
);
2118 if (event_name
== NULL
) {
2119 ret
= add_all_matching_fields(evlist
, field_name
, raw_trace
, level
);
2123 evsel
= find_evsel(evlist
, event_name
);
2124 if (evsel
== NULL
) {
2125 pr_debug("Cannot find event: %s\n", event_name
);
2130 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
) {
2131 pr_debug("%s is not a tracepoint event\n", event_name
);
2136 if (!strcmp(field_name
, "*")) {
2137 ret
= add_evsel_fields(evsel
, raw_trace
, level
);
2139 field
= pevent_find_any_field(evsel
->tp_format
, field_name
);
2140 if (field
== NULL
) {
2141 pr_debug("Cannot find event field for %s.%s\n",
2142 event_name
, field_name
);
2146 ret
= __dynamic_dimension__add(evsel
, field
, raw_trace
, level
);
2154 static int __sort_dimension__add(struct sort_dimension
*sd
,
2155 struct perf_hpp_list
*list
,
2161 if (__sort_dimension__add_hpp_sort(sd
, list
, level
) < 0)
2164 if (sd
->entry
->se_collapse
)
2165 sort__need_collapse
= 1;
2172 static int __hpp_dimension__add(struct hpp_dimension
*hd
,
2173 struct perf_hpp_list
*list
,
2176 struct perf_hpp_fmt
*fmt
;
2181 fmt
= __hpp_dimension__alloc_hpp(hd
, level
);
2186 perf_hpp_list__register_sort_field(list
, fmt
);
2190 static int __sort_dimension__add_output(struct perf_hpp_list
*list
,
2191 struct sort_dimension
*sd
)
2196 if (__sort_dimension__add_hpp_output(sd
, list
) < 0)
2203 static int __hpp_dimension__add_output(struct perf_hpp_list
*list
,
2204 struct hpp_dimension
*hd
)
2206 struct perf_hpp_fmt
*fmt
;
2211 fmt
= __hpp_dimension__alloc_hpp(hd
, 0);
2216 perf_hpp_list__column_register(list
, fmt
);
2220 int hpp_dimension__add_output(unsigned col
)
2222 BUG_ON(col
>= PERF_HPP__MAX_INDEX
);
2223 return __hpp_dimension__add_output(&perf_hpp_list
, &hpp_sort_dimensions
[col
]);
2226 static int sort_dimension__add(struct perf_hpp_list
*list
, const char *tok
,
2227 struct perf_evlist
*evlist __maybe_unused
,
2232 for (i
= 0; i
< ARRAY_SIZE(common_sort_dimensions
); i
++) {
2233 struct sort_dimension
*sd
= &common_sort_dimensions
[i
];
2235 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2238 if (sd
->entry
== &sort_parent
) {
2239 int ret
= regcomp(&parent_regex
, parent_pattern
, REG_EXTENDED
);
2243 regerror(ret
, &parent_regex
, err
, sizeof(err
));
2244 pr_err("Invalid regex: %s\n%s", parent_pattern
, err
);
2247 sort__has_parent
= 1;
2248 } else if (sd
->entry
== &sort_sym
) {
2251 * perf diff displays the performance difference amongst
2252 * two or more perf.data files. Those files could come
2253 * from different binaries. So we should not compare
2254 * their ips, but the name of symbol.
2256 if (sort__mode
== SORT_MODE__DIFF
)
2257 sd
->entry
->se_collapse
= sort__sym_sort
;
2259 } else if (sd
->entry
== &sort_dso
) {
2261 } else if (sd
->entry
== &sort_socket
) {
2262 sort__has_socket
= 1;
2263 } else if (sd
->entry
== &sort_thread
) {
2264 sort__has_thread
= 1;
2267 return __sort_dimension__add(sd
, list
, level
);
2270 for (i
= 0; i
< ARRAY_SIZE(hpp_sort_dimensions
); i
++) {
2271 struct hpp_dimension
*hd
= &hpp_sort_dimensions
[i
];
2273 if (strncasecmp(tok
, hd
->name
, strlen(tok
)))
2276 return __hpp_dimension__add(hd
, list
, level
);
2279 for (i
= 0; i
< ARRAY_SIZE(bstack_sort_dimensions
); i
++) {
2280 struct sort_dimension
*sd
= &bstack_sort_dimensions
[i
];
2282 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2285 if (sort__mode
!= SORT_MODE__BRANCH
)
2288 if (sd
->entry
== &sort_sym_from
|| sd
->entry
== &sort_sym_to
)
2291 __sort_dimension__add(sd
, list
, level
);
2295 for (i
= 0; i
< ARRAY_SIZE(memory_sort_dimensions
); i
++) {
2296 struct sort_dimension
*sd
= &memory_sort_dimensions
[i
];
2298 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2301 if (sort__mode
!= SORT_MODE__MEMORY
)
2304 if (sd
->entry
== &sort_mem_daddr_sym
)
2307 __sort_dimension__add(sd
, list
, level
);
2311 if (!add_dynamic_entry(evlist
, tok
, level
))
2317 static int setup_sort_list(struct perf_hpp_list
*list
, char *str
,
2318 struct perf_evlist
*evlist
)
2324 bool in_group
= false;
2328 tmp
= strpbrk(str
, "{}, ");
2333 next_level
= level
+ 1;
2337 else if (*tmp
== '}')
2345 ret
= sort_dimension__add(list
, tok
, evlist
, level
);
2346 if (ret
== -EINVAL
) {
2347 error("Invalid --sort key: `%s'", tok
);
2349 } else if (ret
== -ESRCH
) {
2350 error("Unknown --sort key: `%s'", tok
);
2361 static const char *get_default_sort_order(struct perf_evlist
*evlist
)
2363 const char *default_sort_orders
[] = {
2365 default_branch_sort_order
,
2366 default_mem_sort_order
,
2367 default_top_sort_order
,
2368 default_diff_sort_order
,
2369 default_tracepoint_sort_order
,
2371 bool use_trace
= true;
2372 struct perf_evsel
*evsel
;
2374 BUG_ON(sort__mode
>= ARRAY_SIZE(default_sort_orders
));
2379 evlist__for_each(evlist
, evsel
) {
2380 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
) {
2387 sort__mode
= SORT_MODE__TRACEPOINT
;
2388 if (symbol_conf
.raw_trace
)
2389 return "trace_fields";
2392 return default_sort_orders
[sort__mode
];
2395 static int setup_sort_order(struct perf_evlist
*evlist
)
2397 char *new_sort_order
;
2400 * Append '+'-prefixed sort order to the default sort
2403 if (!sort_order
|| is_strict_order(sort_order
))
2406 if (sort_order
[1] == '\0') {
2407 error("Invalid --sort key: `+'");
2412 * We allocate new sort_order string, but we never free it,
2413 * because it's checked over the rest of the code.
2415 if (asprintf(&new_sort_order
, "%s,%s",
2416 get_default_sort_order(evlist
), sort_order
+ 1) < 0) {
2417 error("Not enough memory to set up --sort");
2421 sort_order
= new_sort_order
;
2426 * Adds 'pre,' prefix into 'str' is 'pre' is
2427 * not already part of 'str'.
2429 static char *prefix_if_not_in(const char *pre
, char *str
)
2433 if (!str
|| strstr(str
, pre
))
2436 if (asprintf(&n
, "%s,%s", pre
, str
) < 0)
2443 static char *setup_overhead(char *keys
)
2445 keys
= prefix_if_not_in("overhead", keys
);
2447 if (symbol_conf
.cumulate_callchain
)
2448 keys
= prefix_if_not_in("overhead_children", keys
);
2453 static int __setup_sorting(struct perf_evlist
*evlist
)
2456 const char *sort_keys
;
2459 ret
= setup_sort_order(evlist
);
2463 sort_keys
= sort_order
;
2464 if (sort_keys
== NULL
) {
2465 if (is_strict_order(field_order
)) {
2467 * If user specified field order but no sort order,
2468 * we'll honor it and not add default sort orders.
2473 sort_keys
= get_default_sort_order(evlist
);
2476 str
= strdup(sort_keys
);
2478 error("Not enough memory to setup sort keys");
2483 * Prepend overhead fields for backward compatibility.
2485 if (!is_strict_order(field_order
)) {
2486 str
= setup_overhead(str
);
2488 error("Not enough memory to setup overhead keys");
2493 ret
= setup_sort_list(&perf_hpp_list
, str
, evlist
);
2499 void perf_hpp__set_elide(int idx
, bool elide
)
2501 struct perf_hpp_fmt
*fmt
;
2502 struct hpp_sort_entry
*hse
;
2504 perf_hpp_list__for_each_format(&perf_hpp_list
, fmt
) {
2505 if (!perf_hpp__is_sort_entry(fmt
))
2508 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
2509 if (hse
->se
->se_width_idx
== idx
) {
2516 static bool __get_elide(struct strlist
*list
, const char *list_name
, FILE *fp
)
2518 if (list
&& strlist__nr_entries(list
) == 1) {
2520 fprintf(fp
, "# %s: %s\n", list_name
,
2521 strlist__entry(list
, 0)->s
);
2527 static bool get_elide(int idx
, FILE *output
)
2531 return __get_elide(symbol_conf
.sym_list
, "symbol", output
);
2533 return __get_elide(symbol_conf
.dso_list
, "dso", output
);
2535 return __get_elide(symbol_conf
.comm_list
, "comm", output
);
2540 if (sort__mode
!= SORT_MODE__BRANCH
)
2544 case HISTC_SYMBOL_FROM
:
2545 return __get_elide(symbol_conf
.sym_from_list
, "sym_from", output
);
2546 case HISTC_SYMBOL_TO
:
2547 return __get_elide(symbol_conf
.sym_to_list
, "sym_to", output
);
2548 case HISTC_DSO_FROM
:
2549 return __get_elide(symbol_conf
.dso_from_list
, "dso_from", output
);
2551 return __get_elide(symbol_conf
.dso_to_list
, "dso_to", output
);
2559 void sort__setup_elide(FILE *output
)
2561 struct perf_hpp_fmt
*fmt
;
2562 struct hpp_sort_entry
*hse
;
2564 perf_hpp_list__for_each_format(&perf_hpp_list
, fmt
) {
2565 if (!perf_hpp__is_sort_entry(fmt
))
2568 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
2569 fmt
->elide
= get_elide(hse
->se
->se_width_idx
, output
);
2573 * It makes no sense to elide all of sort entries.
2574 * Just revert them to show up again.
2576 perf_hpp_list__for_each_format(&perf_hpp_list
, fmt
) {
2577 if (!perf_hpp__is_sort_entry(fmt
))
2584 perf_hpp_list__for_each_format(&perf_hpp_list
, fmt
) {
2585 if (!perf_hpp__is_sort_entry(fmt
))
2592 static int output_field_add(struct perf_hpp_list
*list
, char *tok
)
2596 for (i
= 0; i
< ARRAY_SIZE(common_sort_dimensions
); i
++) {
2597 struct sort_dimension
*sd
= &common_sort_dimensions
[i
];
2599 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2602 return __sort_dimension__add_output(list
, sd
);
2605 for (i
= 0; i
< ARRAY_SIZE(hpp_sort_dimensions
); i
++) {
2606 struct hpp_dimension
*hd
= &hpp_sort_dimensions
[i
];
2608 if (strncasecmp(tok
, hd
->name
, strlen(tok
)))
2611 return __hpp_dimension__add_output(list
, hd
);
2614 for (i
= 0; i
< ARRAY_SIZE(bstack_sort_dimensions
); i
++) {
2615 struct sort_dimension
*sd
= &bstack_sort_dimensions
[i
];
2617 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2620 return __sort_dimension__add_output(list
, sd
);
2623 for (i
= 0; i
< ARRAY_SIZE(memory_sort_dimensions
); i
++) {
2624 struct sort_dimension
*sd
= &memory_sort_dimensions
[i
];
2626 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2629 return __sort_dimension__add_output(list
, sd
);
2635 static int setup_output_list(struct perf_hpp_list
*list
, char *str
)
2640 for (tok
= strtok_r(str
, ", ", &tmp
);
2641 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
2642 ret
= output_field_add(list
, tok
);
2643 if (ret
== -EINVAL
) {
2644 error("Invalid --fields key: `%s'", tok
);
2646 } else if (ret
== -ESRCH
) {
2647 error("Unknown --fields key: `%s'", tok
);
2655 static void reset_dimensions(void)
2659 for (i
= 0; i
< ARRAY_SIZE(common_sort_dimensions
); i
++)
2660 common_sort_dimensions
[i
].taken
= 0;
2662 for (i
= 0; i
< ARRAY_SIZE(hpp_sort_dimensions
); i
++)
2663 hpp_sort_dimensions
[i
].taken
= 0;
2665 for (i
= 0; i
< ARRAY_SIZE(bstack_sort_dimensions
); i
++)
2666 bstack_sort_dimensions
[i
].taken
= 0;
2668 for (i
= 0; i
< ARRAY_SIZE(memory_sort_dimensions
); i
++)
2669 memory_sort_dimensions
[i
].taken
= 0;
2672 bool is_strict_order(const char *order
)
2674 return order
&& (*order
!= '+');
2677 static int __setup_output_field(void)
2682 if (field_order
== NULL
)
2685 strp
= str
= strdup(field_order
);
2687 error("Not enough memory to setup output fields");
2691 if (!is_strict_order(field_order
))
2694 if (!strlen(strp
)) {
2695 error("Invalid --fields key: `+'");
2699 ret
= setup_output_list(&perf_hpp_list
, strp
);
2706 static void evlist__set_hists_nr_sort_keys(struct perf_evlist
*evlist
)
2708 struct perf_evsel
*evsel
;
2710 evlist__for_each(evlist
, evsel
) {
2711 struct perf_hpp_fmt
*fmt
;
2712 struct hists
*hists
= evsel__hists(evsel
);
2714 hists
->nr_sort_keys
= perf_hpp_list
.nr_sort_keys
;
2717 * If dynamic entries were used, it might add multiple
2718 * entries to each evsel for a single field name. Set
2719 * actual number of sort keys for each hists.
2721 perf_hpp_list__for_each_sort_list(&perf_hpp_list
, fmt
) {
2722 if (perf_hpp__is_dynamic_entry(fmt
) &&
2723 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
2724 hists
->nr_sort_keys
--;
2729 int setup_sorting(struct perf_evlist
*evlist
)
2733 err
= __setup_sorting(evlist
);
2737 if (parent_pattern
!= default_parent_pattern
) {
2738 err
= sort_dimension__add(&perf_hpp_list
, "parent", evlist
, -1);
2744 evlist__set_hists_nr_sort_keys(evlist
);
2749 * perf diff doesn't use default hpp output fields.
2751 if (sort__mode
!= SORT_MODE__DIFF
)
2754 err
= __setup_output_field();
2758 /* copy sort keys to output fields */
2759 perf_hpp__setup_output_field(&perf_hpp_list
);
2760 /* and then copy output fields to sort keys */
2761 perf_hpp__append_sort_keys(&perf_hpp_list
);
2763 /* setup hists-specific output fields */
2764 if (perf_hpp__setup_hists_formats(&perf_hpp_list
, evlist
) < 0)
2770 void reset_output_field(void)
2772 sort__need_collapse
= 0;
2773 sort__has_parent
= 0;
2781 perf_hpp__reset_output_field(&perf_hpp_list
);