8 #include <traceevent/event-parse.h>
9 #include "mem-events.h"
12 const char default_parent_pattern
[] = "^sys_|^do_page_fault";
13 const char *parent_pattern
= default_parent_pattern
;
14 const char default_sort_order
[] = "comm,dso,symbol";
15 const char default_branch_sort_order
[] = "comm,dso_from,symbol_from,symbol_to,cycles";
16 const char default_mem_sort_order
[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
17 const char default_top_sort_order
[] = "dso,symbol";
18 const char default_diff_sort_order
[] = "dso,symbol";
19 const char default_tracepoint_sort_order
[] = "trace";
20 const char *sort_order
;
21 const char *field_order
;
22 regex_t ignore_callees_regex
;
23 int have_ignore_callees
= 0;
24 int sort__need_collapse
= 0;
25 int sort__has_parent
= 0;
26 int sort__has_sym
= 0;
27 int sort__has_dso
= 0;
28 int sort__has_socket
= 0;
29 int sort__has_thread
= 0;
30 int sort__has_comm
= 0;
31 enum sort_mode sort__mode
= SORT_MODE__NORMAL
;
34 * Replaces all occurrences of a char used with the:
36 * -t, --field-separator
38 * option, that uses a special separator character and don't pad with spaces,
39 * replacing all occurances of this separator in symbol names (and other
40 * output) with a '.' character, that thus it's the only non valid separator.
42 static int repsep_snprintf(char *bf
, size_t size
, const char *fmt
, ...)
48 n
= vsnprintf(bf
, size
, fmt
, ap
);
49 if (symbol_conf
.field_sep
&& n
> 0) {
53 sep
= strchr(sep
, *symbol_conf
.field_sep
);
66 static int64_t cmp_null(const void *l
, const void *r
)
79 sort__thread_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
81 return right
->thread
->tid
- left
->thread
->tid
;
84 static int hist_entry__thread_snprintf(struct hist_entry
*he
, char *bf
,
85 size_t size
, unsigned int width
)
87 const char *comm
= thread__comm_str(he
->thread
);
89 width
= max(7U, width
) - 6;
90 return repsep_snprintf(bf
, size
, "%5d:%-*.*s", he
->thread
->tid
,
91 width
, width
, comm
?: "");
94 static int hist_entry__thread_filter(struct hist_entry
*he
, int type
, const void *arg
)
96 const struct thread
*th
= arg
;
98 if (type
!= HIST_FILTER__THREAD
)
101 return th
&& he
->thread
!= th
;
104 struct sort_entry sort_thread
= {
105 .se_header
= " Pid:Command",
106 .se_cmp
= sort__thread_cmp
,
107 .se_snprintf
= hist_entry__thread_snprintf
,
108 .se_filter
= hist_entry__thread_filter
,
109 .se_width_idx
= HISTC_THREAD
,
115 sort__comm_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
117 /* Compare the addr that should be unique among comm */
118 return strcmp(comm__str(right
->comm
), comm__str(left
->comm
));
122 sort__comm_collapse(struct hist_entry
*left
, struct hist_entry
*right
)
124 /* Compare the addr that should be unique among comm */
125 return strcmp(comm__str(right
->comm
), comm__str(left
->comm
));
129 sort__comm_sort(struct hist_entry
*left
, struct hist_entry
*right
)
131 return strcmp(comm__str(right
->comm
), comm__str(left
->comm
));
134 static int hist_entry__comm_snprintf(struct hist_entry
*he
, char *bf
,
135 size_t size
, unsigned int width
)
137 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, comm__str(he
->comm
));
140 struct sort_entry sort_comm
= {
141 .se_header
= "Command",
142 .se_cmp
= sort__comm_cmp
,
143 .se_collapse
= sort__comm_collapse
,
144 .se_sort
= sort__comm_sort
,
145 .se_snprintf
= hist_entry__comm_snprintf
,
146 .se_filter
= hist_entry__thread_filter
,
147 .se_width_idx
= HISTC_COMM
,
152 static int64_t _sort__dso_cmp(struct map
*map_l
, struct map
*map_r
)
154 struct dso
*dso_l
= map_l
? map_l
->dso
: NULL
;
155 struct dso
*dso_r
= map_r
? map_r
->dso
: NULL
;
156 const char *dso_name_l
, *dso_name_r
;
158 if (!dso_l
|| !dso_r
)
159 return cmp_null(dso_r
, dso_l
);
162 dso_name_l
= dso_l
->long_name
;
163 dso_name_r
= dso_r
->long_name
;
165 dso_name_l
= dso_l
->short_name
;
166 dso_name_r
= dso_r
->short_name
;
169 return strcmp(dso_name_l
, dso_name_r
);
173 sort__dso_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
175 return _sort__dso_cmp(right
->ms
.map
, left
->ms
.map
);
178 static int _hist_entry__dso_snprintf(struct map
*map
, char *bf
,
179 size_t size
, unsigned int width
)
181 if (map
&& map
->dso
) {
182 const char *dso_name
= !verbose
? map
->dso
->short_name
:
184 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, dso_name
);
187 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "[unknown]");
190 static int hist_entry__dso_snprintf(struct hist_entry
*he
, char *bf
,
191 size_t size
, unsigned int width
)
193 return _hist_entry__dso_snprintf(he
->ms
.map
, bf
, size
, width
);
196 static int hist_entry__dso_filter(struct hist_entry
*he
, int type
, const void *arg
)
198 const struct dso
*dso
= arg
;
200 if (type
!= HIST_FILTER__DSO
)
203 return dso
&& (!he
->ms
.map
|| he
->ms
.map
->dso
!= dso
);
206 struct sort_entry sort_dso
= {
207 .se_header
= "Shared Object",
208 .se_cmp
= sort__dso_cmp
,
209 .se_snprintf
= hist_entry__dso_snprintf
,
210 .se_filter
= hist_entry__dso_filter
,
211 .se_width_idx
= HISTC_DSO
,
216 static int64_t _sort__addr_cmp(u64 left_ip
, u64 right_ip
)
218 return (int64_t)(right_ip
- left_ip
);
221 static int64_t _sort__sym_cmp(struct symbol
*sym_l
, struct symbol
*sym_r
)
223 if (!sym_l
|| !sym_r
)
224 return cmp_null(sym_l
, sym_r
);
229 if (sym_l
->start
!= sym_r
->start
)
230 return (int64_t)(sym_r
->start
- sym_l
->start
);
232 return (int64_t)(sym_r
->end
- sym_l
->end
);
236 sort__sym_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
240 if (!left
->ms
.sym
&& !right
->ms
.sym
)
241 return _sort__addr_cmp(left
->ip
, right
->ip
);
244 * comparing symbol address alone is not enough since it's a
245 * relative address within a dso.
247 if (!sort__has_dso
) {
248 ret
= sort__dso_cmp(left
, right
);
253 return _sort__sym_cmp(left
->ms
.sym
, right
->ms
.sym
);
257 sort__sym_sort(struct hist_entry
*left
, struct hist_entry
*right
)
259 if (!left
->ms
.sym
|| !right
->ms
.sym
)
260 return cmp_null(left
->ms
.sym
, right
->ms
.sym
);
262 return strcmp(right
->ms
.sym
->name
, left
->ms
.sym
->name
);
265 static int _hist_entry__sym_snprintf(struct map
*map
, struct symbol
*sym
,
266 u64 ip
, char level
, char *bf
, size_t size
,
272 char o
= map
? dso__symtab_origin(map
->dso
) : '!';
273 ret
+= repsep_snprintf(bf
, size
, "%-#*llx %c ",
274 BITS_PER_LONG
/ 4 + 2, ip
, o
);
277 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "[%c] ", level
);
279 if (map
->type
== MAP__VARIABLE
) {
280 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%s", sym
->name
);
281 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "+0x%llx",
282 ip
- map
->unmap_ip(map
, sym
->start
));
284 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%.*s",
289 size_t len
= BITS_PER_LONG
/ 4;
290 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%-#.*llx",
297 static int hist_entry__sym_snprintf(struct hist_entry
*he
, char *bf
,
298 size_t size
, unsigned int width
)
300 return _hist_entry__sym_snprintf(he
->ms
.map
, he
->ms
.sym
, he
->ip
,
301 he
->level
, bf
, size
, width
);
304 static int hist_entry__sym_filter(struct hist_entry
*he
, int type
, const void *arg
)
306 const char *sym
= arg
;
308 if (type
!= HIST_FILTER__SYMBOL
)
311 return sym
&& (!he
->ms
.sym
|| !strstr(he
->ms
.sym
->name
, sym
));
314 struct sort_entry sort_sym
= {
315 .se_header
= "Symbol",
316 .se_cmp
= sort__sym_cmp
,
317 .se_sort
= sort__sym_sort
,
318 .se_snprintf
= hist_entry__sym_snprintf
,
319 .se_filter
= hist_entry__sym_filter
,
320 .se_width_idx
= HISTC_SYMBOL
,
325 static char *hist_entry__get_srcline(struct hist_entry
*he
)
327 struct map
*map
= he
->ms
.map
;
330 return SRCLINE_UNKNOWN
;
332 return get_srcline(map
->dso
, map__rip_2objdump(map
, he
->ip
),
337 sort__srcline_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
340 left
->srcline
= hist_entry__get_srcline(left
);
342 right
->srcline
= hist_entry__get_srcline(right
);
344 return strcmp(right
->srcline
, left
->srcline
);
347 static int hist_entry__srcline_snprintf(struct hist_entry
*he
, char *bf
,
348 size_t size
, unsigned int width
)
351 he
->srcline
= hist_entry__get_srcline(he
);
353 return repsep_snprintf(bf
, size
, "%-.*s", width
, he
->srcline
);
356 struct sort_entry sort_srcline
= {
357 .se_header
= "Source:Line",
358 .se_cmp
= sort__srcline_cmp
,
359 .se_snprintf
= hist_entry__srcline_snprintf
,
360 .se_width_idx
= HISTC_SRCLINE
,
365 static char no_srcfile
[1];
367 static char *hist_entry__get_srcfile(struct hist_entry
*e
)
370 struct map
*map
= e
->ms
.map
;
375 sf
= __get_srcline(map
->dso
, map__rip_2objdump(map
, e
->ip
),
376 e
->ms
.sym
, false, true);
377 if (!strcmp(sf
, SRCLINE_UNKNOWN
))
389 sort__srcfile_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
392 left
->srcfile
= hist_entry__get_srcfile(left
);
394 right
->srcfile
= hist_entry__get_srcfile(right
);
396 return strcmp(right
->srcfile
, left
->srcfile
);
399 static int hist_entry__srcfile_snprintf(struct hist_entry
*he
, char *bf
,
400 size_t size
, unsigned int width
)
403 he
->srcfile
= hist_entry__get_srcfile(he
);
405 return repsep_snprintf(bf
, size
, "%-.*s", width
, he
->srcfile
);
408 struct sort_entry sort_srcfile
= {
409 .se_header
= "Source File",
410 .se_cmp
= sort__srcfile_cmp
,
411 .se_snprintf
= hist_entry__srcfile_snprintf
,
412 .se_width_idx
= HISTC_SRCFILE
,
418 sort__parent_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
420 struct symbol
*sym_l
= left
->parent
;
421 struct symbol
*sym_r
= right
->parent
;
423 if (!sym_l
|| !sym_r
)
424 return cmp_null(sym_l
, sym_r
);
426 return strcmp(sym_r
->name
, sym_l
->name
);
429 static int hist_entry__parent_snprintf(struct hist_entry
*he
, char *bf
,
430 size_t size
, unsigned int width
)
432 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
,
433 he
->parent
? he
->parent
->name
: "[other]");
436 struct sort_entry sort_parent
= {
437 .se_header
= "Parent symbol",
438 .se_cmp
= sort__parent_cmp
,
439 .se_snprintf
= hist_entry__parent_snprintf
,
440 .se_width_idx
= HISTC_PARENT
,
446 sort__cpu_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
448 return right
->cpu
- left
->cpu
;
451 static int hist_entry__cpu_snprintf(struct hist_entry
*he
, char *bf
,
452 size_t size
, unsigned int width
)
454 return repsep_snprintf(bf
, size
, "%*.*d", width
, width
, he
->cpu
);
457 struct sort_entry sort_cpu
= {
459 .se_cmp
= sort__cpu_cmp
,
460 .se_snprintf
= hist_entry__cpu_snprintf
,
461 .se_width_idx
= HISTC_CPU
,
467 sort__socket_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
469 return right
->socket
- left
->socket
;
472 static int hist_entry__socket_snprintf(struct hist_entry
*he
, char *bf
,
473 size_t size
, unsigned int width
)
475 return repsep_snprintf(bf
, size
, "%*.*d", width
, width
-3, he
->socket
);
478 static int hist_entry__socket_filter(struct hist_entry
*he
, int type
, const void *arg
)
480 int sk
= *(const int *)arg
;
482 if (type
!= HIST_FILTER__SOCKET
)
485 return sk
>= 0 && he
->socket
!= sk
;
488 struct sort_entry sort_socket
= {
489 .se_header
= "Socket",
490 .se_cmp
= sort__socket_cmp
,
491 .se_snprintf
= hist_entry__socket_snprintf
,
492 .se_filter
= hist_entry__socket_filter
,
493 .se_width_idx
= HISTC_SOCKET
,
498 static char *get_trace_output(struct hist_entry
*he
)
500 struct trace_seq seq
;
501 struct perf_evsel
*evsel
;
502 struct pevent_record rec
= {
503 .data
= he
->raw_data
,
504 .size
= he
->raw_size
,
507 evsel
= hists_to_evsel(he
->hists
);
509 trace_seq_init(&seq
);
510 if (symbol_conf
.raw_trace
) {
511 pevent_print_fields(&seq
, he
->raw_data
, he
->raw_size
,
514 pevent_event_info(&seq
, evsel
->tp_format
, &rec
);
520 sort__trace_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
522 struct perf_evsel
*evsel
;
524 evsel
= hists_to_evsel(left
->hists
);
525 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
528 if (left
->trace_output
== NULL
)
529 left
->trace_output
= get_trace_output(left
);
530 if (right
->trace_output
== NULL
)
531 right
->trace_output
= get_trace_output(right
);
533 return strcmp(right
->trace_output
, left
->trace_output
);
536 static int hist_entry__trace_snprintf(struct hist_entry
*he
, char *bf
,
537 size_t size
, unsigned int width
)
539 struct perf_evsel
*evsel
;
541 evsel
= hists_to_evsel(he
->hists
);
542 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
543 return scnprintf(bf
, size
, "%-.*s", width
, "N/A");
545 if (he
->trace_output
== NULL
)
546 he
->trace_output
= get_trace_output(he
);
547 return repsep_snprintf(bf
, size
, "%-.*s", width
, he
->trace_output
);
550 struct sort_entry sort_trace
= {
551 .se_header
= "Trace output",
552 .se_cmp
= sort__trace_cmp
,
553 .se_snprintf
= hist_entry__trace_snprintf
,
554 .se_width_idx
= HISTC_TRACE
,
557 /* sort keys for branch stacks */
560 sort__dso_from_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
562 if (!left
->branch_info
|| !right
->branch_info
)
563 return cmp_null(left
->branch_info
, right
->branch_info
);
565 return _sort__dso_cmp(left
->branch_info
->from
.map
,
566 right
->branch_info
->from
.map
);
569 static int hist_entry__dso_from_snprintf(struct hist_entry
*he
, char *bf
,
570 size_t size
, unsigned int width
)
573 return _hist_entry__dso_snprintf(he
->branch_info
->from
.map
,
576 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
579 static int hist_entry__dso_from_filter(struct hist_entry
*he
, int type
,
582 const struct dso
*dso
= arg
;
584 if (type
!= HIST_FILTER__DSO
)
587 return dso
&& (!he
->branch_info
|| !he
->branch_info
->from
.map
||
588 he
->branch_info
->from
.map
->dso
!= dso
);
592 sort__dso_to_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
594 if (!left
->branch_info
|| !right
->branch_info
)
595 return cmp_null(left
->branch_info
, right
->branch_info
);
597 return _sort__dso_cmp(left
->branch_info
->to
.map
,
598 right
->branch_info
->to
.map
);
601 static int hist_entry__dso_to_snprintf(struct hist_entry
*he
, char *bf
,
602 size_t size
, unsigned int width
)
605 return _hist_entry__dso_snprintf(he
->branch_info
->to
.map
,
608 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
611 static int hist_entry__dso_to_filter(struct hist_entry
*he
, int type
,
614 const struct dso
*dso
= arg
;
616 if (type
!= HIST_FILTER__DSO
)
619 return dso
&& (!he
->branch_info
|| !he
->branch_info
->to
.map
||
620 he
->branch_info
->to
.map
->dso
!= dso
);
624 sort__sym_from_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
626 struct addr_map_symbol
*from_l
= &left
->branch_info
->from
;
627 struct addr_map_symbol
*from_r
= &right
->branch_info
->from
;
629 if (!left
->branch_info
|| !right
->branch_info
)
630 return cmp_null(left
->branch_info
, right
->branch_info
);
632 from_l
= &left
->branch_info
->from
;
633 from_r
= &right
->branch_info
->from
;
635 if (!from_l
->sym
&& !from_r
->sym
)
636 return _sort__addr_cmp(from_l
->addr
, from_r
->addr
);
638 return _sort__sym_cmp(from_l
->sym
, from_r
->sym
);
642 sort__sym_to_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
644 struct addr_map_symbol
*to_l
, *to_r
;
646 if (!left
->branch_info
|| !right
->branch_info
)
647 return cmp_null(left
->branch_info
, right
->branch_info
);
649 to_l
= &left
->branch_info
->to
;
650 to_r
= &right
->branch_info
->to
;
652 if (!to_l
->sym
&& !to_r
->sym
)
653 return _sort__addr_cmp(to_l
->addr
, to_r
->addr
);
655 return _sort__sym_cmp(to_l
->sym
, to_r
->sym
);
658 static int hist_entry__sym_from_snprintf(struct hist_entry
*he
, char *bf
,
659 size_t size
, unsigned int width
)
661 if (he
->branch_info
) {
662 struct addr_map_symbol
*from
= &he
->branch_info
->from
;
664 return _hist_entry__sym_snprintf(from
->map
, from
->sym
, from
->addr
,
665 he
->level
, bf
, size
, width
);
668 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
671 static int hist_entry__sym_to_snprintf(struct hist_entry
*he
, char *bf
,
672 size_t size
, unsigned int width
)
674 if (he
->branch_info
) {
675 struct addr_map_symbol
*to
= &he
->branch_info
->to
;
677 return _hist_entry__sym_snprintf(to
->map
, to
->sym
, to
->addr
,
678 he
->level
, bf
, size
, width
);
681 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
684 static int hist_entry__sym_from_filter(struct hist_entry
*he
, int type
,
687 const char *sym
= arg
;
689 if (type
!= HIST_FILTER__SYMBOL
)
692 return sym
&& !(he
->branch_info
&& he
->branch_info
->from
.sym
&&
693 strstr(he
->branch_info
->from
.sym
->name
, sym
));
696 static int hist_entry__sym_to_filter(struct hist_entry
*he
, int type
,
699 const char *sym
= arg
;
701 if (type
!= HIST_FILTER__SYMBOL
)
704 return sym
&& !(he
->branch_info
&& he
->branch_info
->to
.sym
&&
705 strstr(he
->branch_info
->to
.sym
->name
, sym
));
708 struct sort_entry sort_dso_from
= {
709 .se_header
= "Source Shared Object",
710 .se_cmp
= sort__dso_from_cmp
,
711 .se_snprintf
= hist_entry__dso_from_snprintf
,
712 .se_filter
= hist_entry__dso_from_filter
,
713 .se_width_idx
= HISTC_DSO_FROM
,
716 struct sort_entry sort_dso_to
= {
717 .se_header
= "Target Shared Object",
718 .se_cmp
= sort__dso_to_cmp
,
719 .se_snprintf
= hist_entry__dso_to_snprintf
,
720 .se_filter
= hist_entry__dso_to_filter
,
721 .se_width_idx
= HISTC_DSO_TO
,
724 struct sort_entry sort_sym_from
= {
725 .se_header
= "Source Symbol",
726 .se_cmp
= sort__sym_from_cmp
,
727 .se_snprintf
= hist_entry__sym_from_snprintf
,
728 .se_filter
= hist_entry__sym_from_filter
,
729 .se_width_idx
= HISTC_SYMBOL_FROM
,
732 struct sort_entry sort_sym_to
= {
733 .se_header
= "Target Symbol",
734 .se_cmp
= sort__sym_to_cmp
,
735 .se_snprintf
= hist_entry__sym_to_snprintf
,
736 .se_filter
= hist_entry__sym_to_filter
,
737 .se_width_idx
= HISTC_SYMBOL_TO
,
741 sort__mispredict_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
745 if (!left
->branch_info
|| !right
->branch_info
)
746 return cmp_null(left
->branch_info
, right
->branch_info
);
748 mp
= left
->branch_info
->flags
.mispred
!= right
->branch_info
->flags
.mispred
;
749 p
= left
->branch_info
->flags
.predicted
!= right
->branch_info
->flags
.predicted
;
753 static int hist_entry__mispredict_snprintf(struct hist_entry
*he
, char *bf
,
754 size_t size
, unsigned int width
){
755 static const char *out
= "N/A";
757 if (he
->branch_info
) {
758 if (he
->branch_info
->flags
.predicted
)
760 else if (he
->branch_info
->flags
.mispred
)
764 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, out
);
768 sort__cycles_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
770 return left
->branch_info
->flags
.cycles
-
771 right
->branch_info
->flags
.cycles
;
774 static int hist_entry__cycles_snprintf(struct hist_entry
*he
, char *bf
,
775 size_t size
, unsigned int width
)
777 if (he
->branch_info
->flags
.cycles
== 0)
778 return repsep_snprintf(bf
, size
, "%-*s", width
, "-");
779 return repsep_snprintf(bf
, size
, "%-*hd", width
,
780 he
->branch_info
->flags
.cycles
);
783 struct sort_entry sort_cycles
= {
784 .se_header
= "Basic Block Cycles",
785 .se_cmp
= sort__cycles_cmp
,
786 .se_snprintf
= hist_entry__cycles_snprintf
,
787 .se_width_idx
= HISTC_CYCLES
,
790 /* --sort daddr_sym */
792 sort__daddr_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
794 uint64_t l
= 0, r
= 0;
797 l
= left
->mem_info
->daddr
.addr
;
799 r
= right
->mem_info
->daddr
.addr
;
801 return (int64_t)(r
- l
);
804 static int hist_entry__daddr_snprintf(struct hist_entry
*he
, char *bf
,
805 size_t size
, unsigned int width
)
808 struct map
*map
= NULL
;
809 struct symbol
*sym
= NULL
;
812 addr
= he
->mem_info
->daddr
.addr
;
813 map
= he
->mem_info
->daddr
.map
;
814 sym
= he
->mem_info
->daddr
.sym
;
816 return _hist_entry__sym_snprintf(map
, sym
, addr
, he
->level
, bf
, size
,
821 sort__iaddr_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
823 uint64_t l
= 0, r
= 0;
826 l
= left
->mem_info
->iaddr
.addr
;
828 r
= right
->mem_info
->iaddr
.addr
;
830 return (int64_t)(r
- l
);
833 static int hist_entry__iaddr_snprintf(struct hist_entry
*he
, char *bf
,
834 size_t size
, unsigned int width
)
837 struct map
*map
= NULL
;
838 struct symbol
*sym
= NULL
;
841 addr
= he
->mem_info
->iaddr
.addr
;
842 map
= he
->mem_info
->iaddr
.map
;
843 sym
= he
->mem_info
->iaddr
.sym
;
845 return _hist_entry__sym_snprintf(map
, sym
, addr
, he
->level
, bf
, size
,
850 sort__dso_daddr_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
852 struct map
*map_l
= NULL
;
853 struct map
*map_r
= NULL
;
856 map_l
= left
->mem_info
->daddr
.map
;
858 map_r
= right
->mem_info
->daddr
.map
;
860 return _sort__dso_cmp(map_l
, map_r
);
863 static int hist_entry__dso_daddr_snprintf(struct hist_entry
*he
, char *bf
,
864 size_t size
, unsigned int width
)
866 struct map
*map
= NULL
;
869 map
= he
->mem_info
->daddr
.map
;
871 return _hist_entry__dso_snprintf(map
, bf
, size
, width
);
875 sort__locked_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
877 union perf_mem_data_src data_src_l
;
878 union perf_mem_data_src data_src_r
;
881 data_src_l
= left
->mem_info
->data_src
;
883 data_src_l
.mem_lock
= PERF_MEM_LOCK_NA
;
886 data_src_r
= right
->mem_info
->data_src
;
888 data_src_r
.mem_lock
= PERF_MEM_LOCK_NA
;
890 return (int64_t)(data_src_r
.mem_lock
- data_src_l
.mem_lock
);
893 static int hist_entry__locked_snprintf(struct hist_entry
*he
, char *bf
,
894 size_t size
, unsigned int width
)
898 perf_mem__lck_scnprintf(out
, sizeof(out
), he
->mem_info
);
899 return repsep_snprintf(bf
, size
, "%.*s", width
, out
);
903 sort__tlb_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
905 union perf_mem_data_src data_src_l
;
906 union perf_mem_data_src data_src_r
;
909 data_src_l
= left
->mem_info
->data_src
;
911 data_src_l
.mem_dtlb
= PERF_MEM_TLB_NA
;
914 data_src_r
= right
->mem_info
->data_src
;
916 data_src_r
.mem_dtlb
= PERF_MEM_TLB_NA
;
918 return (int64_t)(data_src_r
.mem_dtlb
- data_src_l
.mem_dtlb
);
921 static int hist_entry__tlb_snprintf(struct hist_entry
*he
, char *bf
,
922 size_t size
, unsigned int width
)
926 perf_mem__tlb_scnprintf(out
, sizeof(out
), he
->mem_info
);
927 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
931 sort__lvl_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
933 union perf_mem_data_src data_src_l
;
934 union perf_mem_data_src data_src_r
;
937 data_src_l
= left
->mem_info
->data_src
;
939 data_src_l
.mem_lvl
= PERF_MEM_LVL_NA
;
942 data_src_r
= right
->mem_info
->data_src
;
944 data_src_r
.mem_lvl
= PERF_MEM_LVL_NA
;
946 return (int64_t)(data_src_r
.mem_lvl
- data_src_l
.mem_lvl
);
949 static int hist_entry__lvl_snprintf(struct hist_entry
*he
, char *bf
,
950 size_t size
, unsigned int width
)
954 perf_mem__lvl_scnprintf(out
, sizeof(out
), he
->mem_info
);
955 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
959 sort__snoop_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
961 union perf_mem_data_src data_src_l
;
962 union perf_mem_data_src data_src_r
;
965 data_src_l
= left
->mem_info
->data_src
;
967 data_src_l
.mem_snoop
= PERF_MEM_SNOOP_NA
;
970 data_src_r
= right
->mem_info
->data_src
;
972 data_src_r
.mem_snoop
= PERF_MEM_SNOOP_NA
;
974 return (int64_t)(data_src_r
.mem_snoop
- data_src_l
.mem_snoop
);
977 static int hist_entry__snoop_snprintf(struct hist_entry
*he
, char *bf
,
978 size_t size
, unsigned int width
)
982 perf_mem__snp_scnprintf(out
, sizeof(out
), he
->mem_info
);
983 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
987 sort__dcacheline_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
990 struct map
*l_map
, *r_map
;
992 if (!left
->mem_info
) return -1;
993 if (!right
->mem_info
) return 1;
995 /* group event types together */
996 if (left
->cpumode
> right
->cpumode
) return -1;
997 if (left
->cpumode
< right
->cpumode
) return 1;
999 l_map
= left
->mem_info
->daddr
.map
;
1000 r_map
= right
->mem_info
->daddr
.map
;
1002 /* if both are NULL, jump to sort on al_addr instead */
1003 if (!l_map
&& !r_map
)
1006 if (!l_map
) return -1;
1007 if (!r_map
) return 1;
1009 if (l_map
->maj
> r_map
->maj
) return -1;
1010 if (l_map
->maj
< r_map
->maj
) return 1;
1012 if (l_map
->min
> r_map
->min
) return -1;
1013 if (l_map
->min
< r_map
->min
) return 1;
1015 if (l_map
->ino
> r_map
->ino
) return -1;
1016 if (l_map
->ino
< r_map
->ino
) return 1;
1018 if (l_map
->ino_generation
> r_map
->ino_generation
) return -1;
1019 if (l_map
->ino_generation
< r_map
->ino_generation
) return 1;
1022 * Addresses with no major/minor numbers are assumed to be
1023 * anonymous in userspace. Sort those on pid then address.
1025 * The kernel and non-zero major/minor mapped areas are
1026 * assumed to be unity mapped. Sort those on address.
1029 if ((left
->cpumode
!= PERF_RECORD_MISC_KERNEL
) &&
1030 (!(l_map
->flags
& MAP_SHARED
)) &&
1031 !l_map
->maj
&& !l_map
->min
&& !l_map
->ino
&&
1032 !l_map
->ino_generation
) {
1033 /* userspace anonymous */
1035 if (left
->thread
->pid_
> right
->thread
->pid_
) return -1;
1036 if (left
->thread
->pid_
< right
->thread
->pid_
) return 1;
1040 /* al_addr does all the right addr - start + offset calculations */
1041 l
= cl_address(left
->mem_info
->daddr
.al_addr
);
1042 r
= cl_address(right
->mem_info
->daddr
.al_addr
);
1044 if (l
> r
) return -1;
1045 if (l
< r
) return 1;
1050 static int hist_entry__dcacheline_snprintf(struct hist_entry
*he
, char *bf
,
1051 size_t size
, unsigned int width
)
1055 struct map
*map
= NULL
;
1056 struct symbol
*sym
= NULL
;
1057 char level
= he
->level
;
1060 addr
= cl_address(he
->mem_info
->daddr
.al_addr
);
1061 map
= he
->mem_info
->daddr
.map
;
1062 sym
= he
->mem_info
->daddr
.sym
;
1064 /* print [s] for shared data mmaps */
1065 if ((he
->cpumode
!= PERF_RECORD_MISC_KERNEL
) &&
1066 map
&& (map
->type
== MAP__VARIABLE
) &&
1067 (map
->flags
& MAP_SHARED
) &&
1068 (map
->maj
|| map
->min
|| map
->ino
||
1069 map
->ino_generation
))
1074 return _hist_entry__sym_snprintf(map
, sym
, addr
, level
, bf
, size
,
1078 struct sort_entry sort_mispredict
= {
1079 .se_header
= "Branch Mispredicted",
1080 .se_cmp
= sort__mispredict_cmp
,
1081 .se_snprintf
= hist_entry__mispredict_snprintf
,
1082 .se_width_idx
= HISTC_MISPREDICT
,
1085 static u64
he_weight(struct hist_entry
*he
)
1087 return he
->stat
.nr_events
? he
->stat
.weight
/ he
->stat
.nr_events
: 0;
1091 sort__local_weight_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1093 return he_weight(left
) - he_weight(right
);
1096 static int hist_entry__local_weight_snprintf(struct hist_entry
*he
, char *bf
,
1097 size_t size
, unsigned int width
)
1099 return repsep_snprintf(bf
, size
, "%-*llu", width
, he_weight(he
));
1102 struct sort_entry sort_local_weight
= {
1103 .se_header
= "Local Weight",
1104 .se_cmp
= sort__local_weight_cmp
,
1105 .se_snprintf
= hist_entry__local_weight_snprintf
,
1106 .se_width_idx
= HISTC_LOCAL_WEIGHT
,
1110 sort__global_weight_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1112 return left
->stat
.weight
- right
->stat
.weight
;
1115 static int hist_entry__global_weight_snprintf(struct hist_entry
*he
, char *bf
,
1116 size_t size
, unsigned int width
)
1118 return repsep_snprintf(bf
, size
, "%-*llu", width
, he
->stat
.weight
);
1121 struct sort_entry sort_global_weight
= {
1122 .se_header
= "Weight",
1123 .se_cmp
= sort__global_weight_cmp
,
1124 .se_snprintf
= hist_entry__global_weight_snprintf
,
1125 .se_width_idx
= HISTC_GLOBAL_WEIGHT
,
1128 struct sort_entry sort_mem_daddr_sym
= {
1129 .se_header
= "Data Symbol",
1130 .se_cmp
= sort__daddr_cmp
,
1131 .se_snprintf
= hist_entry__daddr_snprintf
,
1132 .se_width_idx
= HISTC_MEM_DADDR_SYMBOL
,
1135 struct sort_entry sort_mem_iaddr_sym
= {
1136 .se_header
= "Code Symbol",
1137 .se_cmp
= sort__iaddr_cmp
,
1138 .se_snprintf
= hist_entry__iaddr_snprintf
,
1139 .se_width_idx
= HISTC_MEM_IADDR_SYMBOL
,
1142 struct sort_entry sort_mem_daddr_dso
= {
1143 .se_header
= "Data Object",
1144 .se_cmp
= sort__dso_daddr_cmp
,
1145 .se_snprintf
= hist_entry__dso_daddr_snprintf
,
1146 .se_width_idx
= HISTC_MEM_DADDR_SYMBOL
,
1149 struct sort_entry sort_mem_locked
= {
1150 .se_header
= "Locked",
1151 .se_cmp
= sort__locked_cmp
,
1152 .se_snprintf
= hist_entry__locked_snprintf
,
1153 .se_width_idx
= HISTC_MEM_LOCKED
,
1156 struct sort_entry sort_mem_tlb
= {
1157 .se_header
= "TLB access",
1158 .se_cmp
= sort__tlb_cmp
,
1159 .se_snprintf
= hist_entry__tlb_snprintf
,
1160 .se_width_idx
= HISTC_MEM_TLB
,
1163 struct sort_entry sort_mem_lvl
= {
1164 .se_header
= "Memory access",
1165 .se_cmp
= sort__lvl_cmp
,
1166 .se_snprintf
= hist_entry__lvl_snprintf
,
1167 .se_width_idx
= HISTC_MEM_LVL
,
1170 struct sort_entry sort_mem_snoop
= {
1171 .se_header
= "Snoop",
1172 .se_cmp
= sort__snoop_cmp
,
1173 .se_snprintf
= hist_entry__snoop_snprintf
,
1174 .se_width_idx
= HISTC_MEM_SNOOP
,
1177 struct sort_entry sort_mem_dcacheline
= {
1178 .se_header
= "Data Cacheline",
1179 .se_cmp
= sort__dcacheline_cmp
,
1180 .se_snprintf
= hist_entry__dcacheline_snprintf
,
1181 .se_width_idx
= HISTC_MEM_DCACHELINE
,
1185 sort__abort_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1187 if (!left
->branch_info
|| !right
->branch_info
)
1188 return cmp_null(left
->branch_info
, right
->branch_info
);
1190 return left
->branch_info
->flags
.abort
!=
1191 right
->branch_info
->flags
.abort
;
1194 static int hist_entry__abort_snprintf(struct hist_entry
*he
, char *bf
,
1195 size_t size
, unsigned int width
)
1197 static const char *out
= "N/A";
1199 if (he
->branch_info
) {
1200 if (he
->branch_info
->flags
.abort
)
1206 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
1209 struct sort_entry sort_abort
= {
1210 .se_header
= "Transaction abort",
1211 .se_cmp
= sort__abort_cmp
,
1212 .se_snprintf
= hist_entry__abort_snprintf
,
1213 .se_width_idx
= HISTC_ABORT
,
1217 sort__in_tx_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1219 if (!left
->branch_info
|| !right
->branch_info
)
1220 return cmp_null(left
->branch_info
, right
->branch_info
);
1222 return left
->branch_info
->flags
.in_tx
!=
1223 right
->branch_info
->flags
.in_tx
;
1226 static int hist_entry__in_tx_snprintf(struct hist_entry
*he
, char *bf
,
1227 size_t size
, unsigned int width
)
1229 static const char *out
= "N/A";
1231 if (he
->branch_info
) {
1232 if (he
->branch_info
->flags
.in_tx
)
1238 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
1241 struct sort_entry sort_in_tx
= {
1242 .se_header
= "Branch in transaction",
1243 .se_cmp
= sort__in_tx_cmp
,
1244 .se_snprintf
= hist_entry__in_tx_snprintf
,
1245 .se_width_idx
= HISTC_IN_TX
,
1249 sort__transaction_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1251 return left
->transaction
- right
->transaction
;
1254 static inline char *add_str(char *p
, const char *str
)
1257 return p
+ strlen(str
);
1260 static struct txbit
{
1265 { PERF_TXN_ELISION
, "EL ", 0 },
1266 { PERF_TXN_TRANSACTION
, "TX ", 1 },
1267 { PERF_TXN_SYNC
, "SYNC ", 1 },
1268 { PERF_TXN_ASYNC
, "ASYNC ", 0 },
1269 { PERF_TXN_RETRY
, "RETRY ", 0 },
1270 { PERF_TXN_CONFLICT
, "CON ", 0 },
1271 { PERF_TXN_CAPACITY_WRITE
, "CAP-WRITE ", 1 },
1272 { PERF_TXN_CAPACITY_READ
, "CAP-READ ", 0 },
1276 int hist_entry__transaction_len(void)
1281 for (i
= 0; txbits
[i
].name
; i
++) {
1282 if (!txbits
[i
].skip_for_len
)
1283 len
+= strlen(txbits
[i
].name
);
1285 len
+= 4; /* :XX<space> */
1289 static int hist_entry__transaction_snprintf(struct hist_entry
*he
, char *bf
,
1290 size_t size
, unsigned int width
)
1292 u64 t
= he
->transaction
;
1298 for (i
= 0; txbits
[i
].name
; i
++)
1299 if (txbits
[i
].flag
& t
)
1300 p
= add_str(p
, txbits
[i
].name
);
1301 if (t
&& !(t
& (PERF_TXN_SYNC
|PERF_TXN_ASYNC
)))
1302 p
= add_str(p
, "NEITHER ");
1303 if (t
& PERF_TXN_ABORT_MASK
) {
1304 sprintf(p
, ":%" PRIx64
,
1305 (t
& PERF_TXN_ABORT_MASK
) >>
1306 PERF_TXN_ABORT_SHIFT
);
1310 return repsep_snprintf(bf
, size
, "%-*s", width
, buf
);
1313 struct sort_entry sort_transaction
= {
1314 .se_header
= "Transaction ",
1315 .se_cmp
= sort__transaction_cmp
,
1316 .se_snprintf
= hist_entry__transaction_snprintf
,
1317 .se_width_idx
= HISTC_TRANSACTION
,
1320 struct sort_dimension
{
1322 struct sort_entry
*entry
;
1326 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1328 static struct sort_dimension common_sort_dimensions
[] = {
1329 DIM(SORT_PID
, "pid", sort_thread
),
1330 DIM(SORT_COMM
, "comm", sort_comm
),
1331 DIM(SORT_DSO
, "dso", sort_dso
),
1332 DIM(SORT_SYM
, "symbol", sort_sym
),
1333 DIM(SORT_PARENT
, "parent", sort_parent
),
1334 DIM(SORT_CPU
, "cpu", sort_cpu
),
1335 DIM(SORT_SOCKET
, "socket", sort_socket
),
1336 DIM(SORT_SRCLINE
, "srcline", sort_srcline
),
1337 DIM(SORT_SRCFILE
, "srcfile", sort_srcfile
),
1338 DIM(SORT_LOCAL_WEIGHT
, "local_weight", sort_local_weight
),
1339 DIM(SORT_GLOBAL_WEIGHT
, "weight", sort_global_weight
),
1340 DIM(SORT_TRANSACTION
, "transaction", sort_transaction
),
1341 DIM(SORT_TRACE
, "trace", sort_trace
),
1346 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1348 static struct sort_dimension bstack_sort_dimensions
[] = {
1349 DIM(SORT_DSO_FROM
, "dso_from", sort_dso_from
),
1350 DIM(SORT_DSO_TO
, "dso_to", sort_dso_to
),
1351 DIM(SORT_SYM_FROM
, "symbol_from", sort_sym_from
),
1352 DIM(SORT_SYM_TO
, "symbol_to", sort_sym_to
),
1353 DIM(SORT_MISPREDICT
, "mispredict", sort_mispredict
),
1354 DIM(SORT_IN_TX
, "in_tx", sort_in_tx
),
1355 DIM(SORT_ABORT
, "abort", sort_abort
),
1356 DIM(SORT_CYCLES
, "cycles", sort_cycles
),
1361 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1363 static struct sort_dimension memory_sort_dimensions
[] = {
1364 DIM(SORT_MEM_DADDR_SYMBOL
, "symbol_daddr", sort_mem_daddr_sym
),
1365 DIM(SORT_MEM_IADDR_SYMBOL
, "symbol_iaddr", sort_mem_iaddr_sym
),
1366 DIM(SORT_MEM_DADDR_DSO
, "dso_daddr", sort_mem_daddr_dso
),
1367 DIM(SORT_MEM_LOCKED
, "locked", sort_mem_locked
),
1368 DIM(SORT_MEM_TLB
, "tlb", sort_mem_tlb
),
1369 DIM(SORT_MEM_LVL
, "mem", sort_mem_lvl
),
1370 DIM(SORT_MEM_SNOOP
, "snoop", sort_mem_snoop
),
1371 DIM(SORT_MEM_DCACHELINE
, "dcacheline", sort_mem_dcacheline
),
1376 struct hpp_dimension
{
1378 struct perf_hpp_fmt
*fmt
;
1382 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1384 static struct hpp_dimension hpp_sort_dimensions
[] = {
1385 DIM(PERF_HPP__OVERHEAD
, "overhead"),
1386 DIM(PERF_HPP__OVERHEAD_SYS
, "overhead_sys"),
1387 DIM(PERF_HPP__OVERHEAD_US
, "overhead_us"),
1388 DIM(PERF_HPP__OVERHEAD_GUEST_SYS
, "overhead_guest_sys"),
1389 DIM(PERF_HPP__OVERHEAD_GUEST_US
, "overhead_guest_us"),
1390 DIM(PERF_HPP__OVERHEAD_ACC
, "overhead_children"),
1391 DIM(PERF_HPP__SAMPLES
, "sample"),
1392 DIM(PERF_HPP__PERIOD
, "period"),
1397 struct hpp_sort_entry
{
1398 struct perf_hpp_fmt hpp
;
1399 struct sort_entry
*se
;
1402 void perf_hpp__reset_sort_width(struct perf_hpp_fmt
*fmt
, struct hists
*hists
)
1404 struct hpp_sort_entry
*hse
;
1406 if (!perf_hpp__is_sort_entry(fmt
))
1409 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1410 hists__new_col_len(hists
, hse
->se
->se_width_idx
, strlen(fmt
->name
));
1413 static int __sort__hpp_header(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1414 struct perf_evsel
*evsel
)
1416 struct hpp_sort_entry
*hse
;
1417 size_t len
= fmt
->user_len
;
1419 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1422 len
= hists__col_len(evsel__hists(evsel
), hse
->se
->se_width_idx
);
1424 return scnprintf(hpp
->buf
, hpp
->size
, "%-*.*s", len
, len
, fmt
->name
);
1427 static int __sort__hpp_width(struct perf_hpp_fmt
*fmt
,
1428 struct perf_hpp
*hpp __maybe_unused
,
1429 struct perf_evsel
*evsel
)
1431 struct hpp_sort_entry
*hse
;
1432 size_t len
= fmt
->user_len
;
1434 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1437 len
= hists__col_len(evsel__hists(evsel
), hse
->se
->se_width_idx
);
1442 static int __sort__hpp_entry(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1443 struct hist_entry
*he
)
1445 struct hpp_sort_entry
*hse
;
1446 size_t len
= fmt
->user_len
;
1448 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1451 len
= hists__col_len(he
->hists
, hse
->se
->se_width_idx
);
1453 return hse
->se
->se_snprintf(he
, hpp
->buf
, hpp
->size
, len
);
1456 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt
*fmt
,
1457 struct hist_entry
*a
, struct hist_entry
*b
)
1459 struct hpp_sort_entry
*hse
;
1461 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1462 return hse
->se
->se_cmp(a
, b
);
1465 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt
*fmt
,
1466 struct hist_entry
*a
, struct hist_entry
*b
)
1468 struct hpp_sort_entry
*hse
;
1469 int64_t (*collapse_fn
)(struct hist_entry
*, struct hist_entry
*);
1471 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1472 collapse_fn
= hse
->se
->se_collapse
?: hse
->se
->se_cmp
;
1473 return collapse_fn(a
, b
);
1476 static int64_t __sort__hpp_sort(struct perf_hpp_fmt
*fmt
,
1477 struct hist_entry
*a
, struct hist_entry
*b
)
1479 struct hpp_sort_entry
*hse
;
1480 int64_t (*sort_fn
)(struct hist_entry
*, struct hist_entry
*);
1482 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1483 sort_fn
= hse
->se
->se_sort
?: hse
->se
->se_cmp
;
1484 return sort_fn(a
, b
);
1487 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt
*format
)
1489 return format
->header
== __sort__hpp_header
;
1492 #define MK_SORT_ENTRY_CHK(key) \
1493 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
1495 struct hpp_sort_entry *hse; \
1497 if (!perf_hpp__is_sort_entry(fmt)) \
1500 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
1501 return hse->se == &sort_ ## key ; \
1504 MK_SORT_ENTRY_CHK(trace
)
1505 MK_SORT_ENTRY_CHK(srcline
)
1506 MK_SORT_ENTRY_CHK(srcfile
)
1507 MK_SORT_ENTRY_CHK(thread
)
1508 MK_SORT_ENTRY_CHK(comm
)
1509 MK_SORT_ENTRY_CHK(dso
)
1510 MK_SORT_ENTRY_CHK(sym
)
1513 static bool __sort__hpp_equal(struct perf_hpp_fmt
*a
, struct perf_hpp_fmt
*b
)
1515 struct hpp_sort_entry
*hse_a
;
1516 struct hpp_sort_entry
*hse_b
;
1518 if (!perf_hpp__is_sort_entry(a
) || !perf_hpp__is_sort_entry(b
))
1521 hse_a
= container_of(a
, struct hpp_sort_entry
, hpp
);
1522 hse_b
= container_of(b
, struct hpp_sort_entry
, hpp
);
1524 return hse_a
->se
== hse_b
->se
;
1527 static void hse_free(struct perf_hpp_fmt
*fmt
)
1529 struct hpp_sort_entry
*hse
;
1531 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1535 static struct hpp_sort_entry
*
1536 __sort_dimension__alloc_hpp(struct sort_dimension
*sd
, int level
)
1538 struct hpp_sort_entry
*hse
;
1540 hse
= malloc(sizeof(*hse
));
1542 pr_err("Memory allocation failed\n");
1546 hse
->se
= sd
->entry
;
1547 hse
->hpp
.name
= sd
->entry
->se_header
;
1548 hse
->hpp
.header
= __sort__hpp_header
;
1549 hse
->hpp
.width
= __sort__hpp_width
;
1550 hse
->hpp
.entry
= __sort__hpp_entry
;
1551 hse
->hpp
.color
= NULL
;
1553 hse
->hpp
.cmp
= __sort__hpp_cmp
;
1554 hse
->hpp
.collapse
= __sort__hpp_collapse
;
1555 hse
->hpp
.sort
= __sort__hpp_sort
;
1556 hse
->hpp
.equal
= __sort__hpp_equal
;
1557 hse
->hpp
.free
= hse_free
;
1559 INIT_LIST_HEAD(&hse
->hpp
.list
);
1560 INIT_LIST_HEAD(&hse
->hpp
.sort_list
);
1561 hse
->hpp
.elide
= false;
1563 hse
->hpp
.user_len
= 0;
1564 hse
->hpp
.level
= level
;
1569 static void hpp_free(struct perf_hpp_fmt
*fmt
)
1574 static struct perf_hpp_fmt
*__hpp_dimension__alloc_hpp(struct hpp_dimension
*hd
,
1577 struct perf_hpp_fmt
*fmt
;
1579 fmt
= memdup(hd
->fmt
, sizeof(*fmt
));
1581 INIT_LIST_HEAD(&fmt
->list
);
1582 INIT_LIST_HEAD(&fmt
->sort_list
);
1583 fmt
->free
= hpp_free
;
1590 int hist_entry__filter(struct hist_entry
*he
, int type
, const void *arg
)
1592 struct perf_hpp_fmt
*fmt
;
1593 struct hpp_sort_entry
*hse
;
1597 perf_hpp_list__for_each_format(he
->hpp_list
, fmt
) {
1598 if (!perf_hpp__is_sort_entry(fmt
))
1601 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1602 if (hse
->se
->se_filter
== NULL
)
1606 * hist entry is filtered if any of sort key in the hpp list
1607 * is applied. But it should skip non-matched filter types.
1609 r
= hse
->se
->se_filter(he
, type
, arg
);
1620 static int __sort_dimension__add_hpp_sort(struct sort_dimension
*sd
,
1621 struct perf_hpp_list
*list
,
1624 struct hpp_sort_entry
*hse
= __sort_dimension__alloc_hpp(sd
, level
);
1629 perf_hpp_list__register_sort_field(list
, &hse
->hpp
);
1633 static int __sort_dimension__add_hpp_output(struct sort_dimension
*sd
,
1634 struct perf_hpp_list
*list
)
1636 struct hpp_sort_entry
*hse
= __sort_dimension__alloc_hpp(sd
, 0);
1641 perf_hpp_list__column_register(list
, &hse
->hpp
);
1645 struct hpp_dynamic_entry
{
1646 struct perf_hpp_fmt hpp
;
1647 struct perf_evsel
*evsel
;
1648 struct format_field
*field
;
1649 unsigned dynamic_len
;
1653 static int hde_width(struct hpp_dynamic_entry
*hde
)
1655 if (!hde
->hpp
.len
) {
1656 int len
= hde
->dynamic_len
;
1657 int namelen
= strlen(hde
->field
->name
);
1658 int fieldlen
= hde
->field
->size
;
1663 if (!(hde
->field
->flags
& FIELD_IS_STRING
)) {
1664 /* length for print hex numbers */
1665 fieldlen
= hde
->field
->size
* 2 + 2;
1672 return hde
->hpp
.len
;
1675 static void update_dynamic_len(struct hpp_dynamic_entry
*hde
,
1676 struct hist_entry
*he
)
1679 struct format_field
*field
= hde
->field
;
1686 /* parse pretty print result and update max length */
1687 if (!he
->trace_output
)
1688 he
->trace_output
= get_trace_output(he
);
1690 namelen
= strlen(field
->name
);
1691 str
= he
->trace_output
;
1694 pos
= strchr(str
, ' ');
1697 pos
= str
+ strlen(str
);
1700 if (!strncmp(str
, field
->name
, namelen
)) {
1706 if (len
> hde
->dynamic_len
)
1707 hde
->dynamic_len
= len
;
1718 static int __sort__hde_header(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1719 struct perf_evsel
*evsel __maybe_unused
)
1721 struct hpp_dynamic_entry
*hde
;
1722 size_t len
= fmt
->user_len
;
1724 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1727 len
= hde_width(hde
);
1729 return scnprintf(hpp
->buf
, hpp
->size
, "%*.*s", len
, len
, hde
->field
->name
);
1732 static int __sort__hde_width(struct perf_hpp_fmt
*fmt
,
1733 struct perf_hpp
*hpp __maybe_unused
,
1734 struct perf_evsel
*evsel __maybe_unused
)
1736 struct hpp_dynamic_entry
*hde
;
1737 size_t len
= fmt
->user_len
;
1739 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1742 len
= hde_width(hde
);
1747 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt
*fmt
, struct hists
*hists
)
1749 struct hpp_dynamic_entry
*hde
;
1751 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1753 return hists_to_evsel(hists
) == hde
->evsel
;
1756 static int __sort__hde_entry(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1757 struct hist_entry
*he
)
1759 struct hpp_dynamic_entry
*hde
;
1760 size_t len
= fmt
->user_len
;
1762 struct format_field
*field
;
1767 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1770 len
= hde_width(hde
);
1775 if (!he
->trace_output
)
1776 he
->trace_output
= get_trace_output(he
);
1779 namelen
= strlen(field
->name
);
1780 str
= he
->trace_output
;
1783 pos
= strchr(str
, ' ');
1786 pos
= str
+ strlen(str
);
1789 if (!strncmp(str
, field
->name
, namelen
)) {
1791 str
= strndup(str
, pos
- str
);
1794 return scnprintf(hpp
->buf
, hpp
->size
,
1795 "%*.*s", len
, len
, "ERROR");
1806 struct trace_seq seq
;
1808 trace_seq_init(&seq
);
1809 pevent_print_field(&seq
, he
->raw_data
, hde
->field
);
1813 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%*.*s", len
, len
, str
);
1818 static int64_t __sort__hde_cmp(struct perf_hpp_fmt
*fmt
,
1819 struct hist_entry
*a
, struct hist_entry
*b
)
1821 struct hpp_dynamic_entry
*hde
;
1822 struct format_field
*field
;
1823 unsigned offset
, size
;
1825 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1828 update_dynamic_len(hde
, a
);
1833 if (field
->flags
& FIELD_IS_DYNAMIC
) {
1834 unsigned long long dyn
;
1836 pevent_read_number_field(field
, a
->raw_data
, &dyn
);
1837 offset
= dyn
& 0xffff;
1838 size
= (dyn
>> 16) & 0xffff;
1840 /* record max width for output */
1841 if (size
> hde
->dynamic_len
)
1842 hde
->dynamic_len
= size
;
1844 offset
= field
->offset
;
1848 return memcmp(a
->raw_data
+ offset
, b
->raw_data
+ offset
, size
);
1851 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt
*fmt
)
1853 return fmt
->cmp
== __sort__hde_cmp
;
1856 static bool __sort__hde_equal(struct perf_hpp_fmt
*a
, struct perf_hpp_fmt
*b
)
1858 struct hpp_dynamic_entry
*hde_a
;
1859 struct hpp_dynamic_entry
*hde_b
;
1861 if (!perf_hpp__is_dynamic_entry(a
) || !perf_hpp__is_dynamic_entry(b
))
1864 hde_a
= container_of(a
, struct hpp_dynamic_entry
, hpp
);
1865 hde_b
= container_of(b
, struct hpp_dynamic_entry
, hpp
);
1867 return hde_a
->field
== hde_b
->field
;
1870 static void hde_free(struct perf_hpp_fmt
*fmt
)
1872 struct hpp_dynamic_entry
*hde
;
1874 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1878 static struct hpp_dynamic_entry
*
1879 __alloc_dynamic_entry(struct perf_evsel
*evsel
, struct format_field
*field
,
1882 struct hpp_dynamic_entry
*hde
;
1884 hde
= malloc(sizeof(*hde
));
1886 pr_debug("Memory allocation failed\n");
1892 hde
->dynamic_len
= 0;
1894 hde
->hpp
.name
= field
->name
;
1895 hde
->hpp
.header
= __sort__hde_header
;
1896 hde
->hpp
.width
= __sort__hde_width
;
1897 hde
->hpp
.entry
= __sort__hde_entry
;
1898 hde
->hpp
.color
= NULL
;
1900 hde
->hpp
.cmp
= __sort__hde_cmp
;
1901 hde
->hpp
.collapse
= __sort__hde_cmp
;
1902 hde
->hpp
.sort
= __sort__hde_cmp
;
1903 hde
->hpp
.equal
= __sort__hde_equal
;
1904 hde
->hpp
.free
= hde_free
;
1906 INIT_LIST_HEAD(&hde
->hpp
.list
);
1907 INIT_LIST_HEAD(&hde
->hpp
.sort_list
);
1908 hde
->hpp
.elide
= false;
1910 hde
->hpp
.user_len
= 0;
1911 hde
->hpp
.level
= level
;
1916 struct perf_hpp_fmt
*perf_hpp_fmt__dup(struct perf_hpp_fmt
*fmt
)
1918 struct perf_hpp_fmt
*new_fmt
= NULL
;
1920 if (perf_hpp__is_sort_entry(fmt
)) {
1921 struct hpp_sort_entry
*hse
, *new_hse
;
1923 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1924 new_hse
= memdup(hse
, sizeof(*hse
));
1926 new_fmt
= &new_hse
->hpp
;
1927 } else if (perf_hpp__is_dynamic_entry(fmt
)) {
1928 struct hpp_dynamic_entry
*hde
, *new_hde
;
1930 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1931 new_hde
= memdup(hde
, sizeof(*hde
));
1933 new_fmt
= &new_hde
->hpp
;
1935 new_fmt
= memdup(fmt
, sizeof(*fmt
));
1938 INIT_LIST_HEAD(&new_fmt
->list
);
1939 INIT_LIST_HEAD(&new_fmt
->sort_list
);
1944 static int parse_field_name(char *str
, char **event
, char **field
, char **opt
)
1946 char *event_name
, *field_name
, *opt_name
;
1949 field_name
= strchr(str
, '.');
1952 *field_name
++ = '\0';
1958 opt_name
= strchr(field_name
, '/');
1962 *event
= event_name
;
1963 *field
= field_name
;
1969 /* find match evsel using a given event name. The event name can be:
1970 * 1. '%' + event index (e.g. '%1' for first event)
1971 * 2. full event name (e.g. sched:sched_switch)
1972 * 3. partial event name (should not contain ':')
1974 static struct perf_evsel
*find_evsel(struct perf_evlist
*evlist
, char *event_name
)
1976 struct perf_evsel
*evsel
= NULL
;
1977 struct perf_evsel
*pos
;
1981 if (event_name
[0] == '%') {
1982 int nr
= strtol(event_name
+1, NULL
, 0);
1984 if (nr
> evlist
->nr_entries
)
1987 evsel
= perf_evlist__first(evlist
);
1989 evsel
= perf_evsel__next(evsel
);
1994 full_name
= !!strchr(event_name
, ':');
1995 evlist__for_each(evlist
, pos
) {
1997 if (full_name
&& !strcmp(pos
->name
, event_name
))
2000 if (!full_name
&& strstr(pos
->name
, event_name
)) {
2002 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2003 event_name
, evsel
->name
, pos
->name
);
2013 static int __dynamic_dimension__add(struct perf_evsel
*evsel
,
2014 struct format_field
*field
,
2015 bool raw_trace
, int level
)
2017 struct hpp_dynamic_entry
*hde
;
2019 hde
= __alloc_dynamic_entry(evsel
, field
, level
);
2023 hde
->raw_trace
= raw_trace
;
2025 perf_hpp__register_sort_field(&hde
->hpp
);
2029 static int add_evsel_fields(struct perf_evsel
*evsel
, bool raw_trace
, int level
)
2032 struct format_field
*field
;
2034 field
= evsel
->tp_format
->format
.fields
;
2036 ret
= __dynamic_dimension__add(evsel
, field
, raw_trace
, level
);
2040 field
= field
->next
;
2045 static int add_all_dynamic_fields(struct perf_evlist
*evlist
, bool raw_trace
,
2049 struct perf_evsel
*evsel
;
2051 evlist__for_each(evlist
, evsel
) {
2052 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
2055 ret
= add_evsel_fields(evsel
, raw_trace
, level
);
2062 static int add_all_matching_fields(struct perf_evlist
*evlist
,
2063 char *field_name
, bool raw_trace
, int level
)
2066 struct perf_evsel
*evsel
;
2067 struct format_field
*field
;
2069 evlist__for_each(evlist
, evsel
) {
2070 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
2073 field
= pevent_find_any_field(evsel
->tp_format
, field_name
);
2077 ret
= __dynamic_dimension__add(evsel
, field
, raw_trace
, level
);
2084 static int add_dynamic_entry(struct perf_evlist
*evlist
, const char *tok
,
2087 char *str
, *event_name
, *field_name
, *opt_name
;
2088 struct perf_evsel
*evsel
;
2089 struct format_field
*field
;
2090 bool raw_trace
= symbol_conf
.raw_trace
;
2100 if (parse_field_name(str
, &event_name
, &field_name
, &opt_name
) < 0) {
2106 if (strcmp(opt_name
, "raw")) {
2107 pr_debug("unsupported field option %s\n", opt_name
);
2114 if (!strcmp(field_name
, "trace_fields")) {
2115 ret
= add_all_dynamic_fields(evlist
, raw_trace
, level
);
2119 if (event_name
== NULL
) {
2120 ret
= add_all_matching_fields(evlist
, field_name
, raw_trace
, level
);
2124 evsel
= find_evsel(evlist
, event_name
);
2125 if (evsel
== NULL
) {
2126 pr_debug("Cannot find event: %s\n", event_name
);
2131 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
) {
2132 pr_debug("%s is not a tracepoint event\n", event_name
);
2137 if (!strcmp(field_name
, "*")) {
2138 ret
= add_evsel_fields(evsel
, raw_trace
, level
);
2140 field
= pevent_find_any_field(evsel
->tp_format
, field_name
);
2141 if (field
== NULL
) {
2142 pr_debug("Cannot find event field for %s.%s\n",
2143 event_name
, field_name
);
2147 ret
= __dynamic_dimension__add(evsel
, field
, raw_trace
, level
);
2155 static int __sort_dimension__add(struct sort_dimension
*sd
,
2156 struct perf_hpp_list
*list
,
2162 if (__sort_dimension__add_hpp_sort(sd
, list
, level
) < 0)
2165 if (sd
->entry
->se_collapse
)
2166 sort__need_collapse
= 1;
2173 static int __hpp_dimension__add(struct hpp_dimension
*hd
,
2174 struct perf_hpp_list
*list
,
2177 struct perf_hpp_fmt
*fmt
;
2182 fmt
= __hpp_dimension__alloc_hpp(hd
, level
);
2187 perf_hpp_list__register_sort_field(list
, fmt
);
2191 static int __sort_dimension__add_output(struct perf_hpp_list
*list
,
2192 struct sort_dimension
*sd
)
2197 if (__sort_dimension__add_hpp_output(sd
, list
) < 0)
2204 static int __hpp_dimension__add_output(struct perf_hpp_list
*list
,
2205 struct hpp_dimension
*hd
)
2207 struct perf_hpp_fmt
*fmt
;
2212 fmt
= __hpp_dimension__alloc_hpp(hd
, 0);
2217 perf_hpp_list__column_register(list
, fmt
);
2221 int hpp_dimension__add_output(unsigned col
)
2223 BUG_ON(col
>= PERF_HPP__MAX_INDEX
);
2224 return __hpp_dimension__add_output(&perf_hpp_list
, &hpp_sort_dimensions
[col
]);
2227 static int sort_dimension__add(struct perf_hpp_list
*list
, const char *tok
,
2228 struct perf_evlist
*evlist
,
2233 for (i
= 0; i
< ARRAY_SIZE(common_sort_dimensions
); i
++) {
2234 struct sort_dimension
*sd
= &common_sort_dimensions
[i
];
2236 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2239 if (sd
->entry
== &sort_parent
) {
2240 int ret
= regcomp(&parent_regex
, parent_pattern
, REG_EXTENDED
);
2244 regerror(ret
, &parent_regex
, err
, sizeof(err
));
2245 pr_err("Invalid regex: %s\n%s", parent_pattern
, err
);
2248 sort__has_parent
= 1;
2249 } else if (sd
->entry
== &sort_sym
) {
2252 * perf diff displays the performance difference amongst
2253 * two or more perf.data files. Those files could come
2254 * from different binaries. So we should not compare
2255 * their ips, but the name of symbol.
2257 if (sort__mode
== SORT_MODE__DIFF
)
2258 sd
->entry
->se_collapse
= sort__sym_sort
;
2260 } else if (sd
->entry
== &sort_dso
) {
2262 } else if (sd
->entry
== &sort_socket
) {
2263 sort__has_socket
= 1;
2264 } else if (sd
->entry
== &sort_thread
) {
2265 sort__has_thread
= 1;
2266 } else if (sd
->entry
== &sort_comm
) {
2270 return __sort_dimension__add(sd
, list
, level
);
2273 for (i
= 0; i
< ARRAY_SIZE(hpp_sort_dimensions
); i
++) {
2274 struct hpp_dimension
*hd
= &hpp_sort_dimensions
[i
];
2276 if (strncasecmp(tok
, hd
->name
, strlen(tok
)))
2279 return __hpp_dimension__add(hd
, list
, level
);
2282 for (i
= 0; i
< ARRAY_SIZE(bstack_sort_dimensions
); i
++) {
2283 struct sort_dimension
*sd
= &bstack_sort_dimensions
[i
];
2285 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2288 if (sort__mode
!= SORT_MODE__BRANCH
)
2291 if (sd
->entry
== &sort_sym_from
|| sd
->entry
== &sort_sym_to
)
2294 __sort_dimension__add(sd
, list
, level
);
2298 for (i
= 0; i
< ARRAY_SIZE(memory_sort_dimensions
); i
++) {
2299 struct sort_dimension
*sd
= &memory_sort_dimensions
[i
];
2301 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2304 if (sort__mode
!= SORT_MODE__MEMORY
)
2307 if (sd
->entry
== &sort_mem_daddr_sym
)
2310 __sort_dimension__add(sd
, list
, level
);
2314 if (!add_dynamic_entry(evlist
, tok
, level
))
2320 static int setup_sort_list(struct perf_hpp_list
*list
, char *str
,
2321 struct perf_evlist
*evlist
)
2327 bool in_group
= false;
2331 tmp
= strpbrk(str
, "{}, ");
2336 next_level
= level
+ 1;
2340 else if (*tmp
== '}')
2348 ret
= sort_dimension__add(list
, tok
, evlist
, level
);
2349 if (ret
== -EINVAL
) {
2350 error("Invalid --sort key: `%s'", tok
);
2352 } else if (ret
== -ESRCH
) {
2353 error("Unknown --sort key: `%s'", tok
);
2364 static const char *get_default_sort_order(struct perf_evlist
*evlist
)
2366 const char *default_sort_orders
[] = {
2368 default_branch_sort_order
,
2369 default_mem_sort_order
,
2370 default_top_sort_order
,
2371 default_diff_sort_order
,
2372 default_tracepoint_sort_order
,
2374 bool use_trace
= true;
2375 struct perf_evsel
*evsel
;
2377 BUG_ON(sort__mode
>= ARRAY_SIZE(default_sort_orders
));
2382 evlist__for_each(evlist
, evsel
) {
2383 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
) {
2390 sort__mode
= SORT_MODE__TRACEPOINT
;
2391 if (symbol_conf
.raw_trace
)
2392 return "trace_fields";
2395 return default_sort_orders
[sort__mode
];
2398 static int setup_sort_order(struct perf_evlist
*evlist
)
2400 char *new_sort_order
;
2403 * Append '+'-prefixed sort order to the default sort
2406 if (!sort_order
|| is_strict_order(sort_order
))
2409 if (sort_order
[1] == '\0') {
2410 error("Invalid --sort key: `+'");
2415 * We allocate new sort_order string, but we never free it,
2416 * because it's checked over the rest of the code.
2418 if (asprintf(&new_sort_order
, "%s,%s",
2419 get_default_sort_order(evlist
), sort_order
+ 1) < 0) {
2420 error("Not enough memory to set up --sort");
2424 sort_order
= new_sort_order
;
2429 * Adds 'pre,' prefix into 'str' is 'pre' is
2430 * not already part of 'str'.
2432 static char *prefix_if_not_in(const char *pre
, char *str
)
2436 if (!str
|| strstr(str
, pre
))
2439 if (asprintf(&n
, "%s,%s", pre
, str
) < 0)
2446 static char *setup_overhead(char *keys
)
2448 keys
= prefix_if_not_in("overhead", keys
);
2450 if (symbol_conf
.cumulate_callchain
)
2451 keys
= prefix_if_not_in("overhead_children", keys
);
2456 static int __setup_sorting(struct perf_evlist
*evlist
)
2459 const char *sort_keys
;
2462 ret
= setup_sort_order(evlist
);
2466 sort_keys
= sort_order
;
2467 if (sort_keys
== NULL
) {
2468 if (is_strict_order(field_order
)) {
2470 * If user specified field order but no sort order,
2471 * we'll honor it and not add default sort orders.
2476 sort_keys
= get_default_sort_order(evlist
);
2479 str
= strdup(sort_keys
);
2481 error("Not enough memory to setup sort keys");
2486 * Prepend overhead fields for backward compatibility.
2488 if (!is_strict_order(field_order
)) {
2489 str
= setup_overhead(str
);
2491 error("Not enough memory to setup overhead keys");
2496 ret
= setup_sort_list(&perf_hpp_list
, str
, evlist
);
2502 void perf_hpp__set_elide(int idx
, bool elide
)
2504 struct perf_hpp_fmt
*fmt
;
2505 struct hpp_sort_entry
*hse
;
2507 perf_hpp_list__for_each_format(&perf_hpp_list
, fmt
) {
2508 if (!perf_hpp__is_sort_entry(fmt
))
2511 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
2512 if (hse
->se
->se_width_idx
== idx
) {
2519 static bool __get_elide(struct strlist
*list
, const char *list_name
, FILE *fp
)
2521 if (list
&& strlist__nr_entries(list
) == 1) {
2523 fprintf(fp
, "# %s: %s\n", list_name
,
2524 strlist__entry(list
, 0)->s
);
2530 static bool get_elide(int idx
, FILE *output
)
2534 return __get_elide(symbol_conf
.sym_list
, "symbol", output
);
2536 return __get_elide(symbol_conf
.dso_list
, "dso", output
);
2538 return __get_elide(symbol_conf
.comm_list
, "comm", output
);
2543 if (sort__mode
!= SORT_MODE__BRANCH
)
2547 case HISTC_SYMBOL_FROM
:
2548 return __get_elide(symbol_conf
.sym_from_list
, "sym_from", output
);
2549 case HISTC_SYMBOL_TO
:
2550 return __get_elide(symbol_conf
.sym_to_list
, "sym_to", output
);
2551 case HISTC_DSO_FROM
:
2552 return __get_elide(symbol_conf
.dso_from_list
, "dso_from", output
);
2554 return __get_elide(symbol_conf
.dso_to_list
, "dso_to", output
);
2562 void sort__setup_elide(FILE *output
)
2564 struct perf_hpp_fmt
*fmt
;
2565 struct hpp_sort_entry
*hse
;
2567 perf_hpp_list__for_each_format(&perf_hpp_list
, fmt
) {
2568 if (!perf_hpp__is_sort_entry(fmt
))
2571 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
2572 fmt
->elide
= get_elide(hse
->se
->se_width_idx
, output
);
2576 * It makes no sense to elide all of sort entries.
2577 * Just revert them to show up again.
2579 perf_hpp_list__for_each_format(&perf_hpp_list
, fmt
) {
2580 if (!perf_hpp__is_sort_entry(fmt
))
2587 perf_hpp_list__for_each_format(&perf_hpp_list
, fmt
) {
2588 if (!perf_hpp__is_sort_entry(fmt
))
2595 static int output_field_add(struct perf_hpp_list
*list
, char *tok
)
2599 for (i
= 0; i
< ARRAY_SIZE(common_sort_dimensions
); i
++) {
2600 struct sort_dimension
*sd
= &common_sort_dimensions
[i
];
2602 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2605 return __sort_dimension__add_output(list
, sd
);
2608 for (i
= 0; i
< ARRAY_SIZE(hpp_sort_dimensions
); i
++) {
2609 struct hpp_dimension
*hd
= &hpp_sort_dimensions
[i
];
2611 if (strncasecmp(tok
, hd
->name
, strlen(tok
)))
2614 return __hpp_dimension__add_output(list
, hd
);
2617 for (i
= 0; i
< ARRAY_SIZE(bstack_sort_dimensions
); i
++) {
2618 struct sort_dimension
*sd
= &bstack_sort_dimensions
[i
];
2620 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2623 return __sort_dimension__add_output(list
, sd
);
2626 for (i
= 0; i
< ARRAY_SIZE(memory_sort_dimensions
); i
++) {
2627 struct sort_dimension
*sd
= &memory_sort_dimensions
[i
];
2629 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2632 return __sort_dimension__add_output(list
, sd
);
2638 static int setup_output_list(struct perf_hpp_list
*list
, char *str
)
2643 for (tok
= strtok_r(str
, ", ", &tmp
);
2644 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
2645 ret
= output_field_add(list
, tok
);
2646 if (ret
== -EINVAL
) {
2647 error("Invalid --fields key: `%s'", tok
);
2649 } else if (ret
== -ESRCH
) {
2650 error("Unknown --fields key: `%s'", tok
);
2658 static void reset_dimensions(void)
2662 for (i
= 0; i
< ARRAY_SIZE(common_sort_dimensions
); i
++)
2663 common_sort_dimensions
[i
].taken
= 0;
2665 for (i
= 0; i
< ARRAY_SIZE(hpp_sort_dimensions
); i
++)
2666 hpp_sort_dimensions
[i
].taken
= 0;
2668 for (i
= 0; i
< ARRAY_SIZE(bstack_sort_dimensions
); i
++)
2669 bstack_sort_dimensions
[i
].taken
= 0;
2671 for (i
= 0; i
< ARRAY_SIZE(memory_sort_dimensions
); i
++)
2672 memory_sort_dimensions
[i
].taken
= 0;
2675 bool is_strict_order(const char *order
)
2677 return order
&& (*order
!= '+');
2680 static int __setup_output_field(void)
2685 if (field_order
== NULL
)
2688 strp
= str
= strdup(field_order
);
2690 error("Not enough memory to setup output fields");
2694 if (!is_strict_order(field_order
))
2697 if (!strlen(strp
)) {
2698 error("Invalid --fields key: `+'");
2702 ret
= setup_output_list(&perf_hpp_list
, strp
);
2709 int setup_sorting(struct perf_evlist
*evlist
)
2713 err
= __setup_sorting(evlist
);
2717 if (parent_pattern
!= default_parent_pattern
) {
2718 err
= sort_dimension__add(&perf_hpp_list
, "parent", evlist
, -1);
2726 * perf diff doesn't use default hpp output fields.
2728 if (sort__mode
!= SORT_MODE__DIFF
)
2731 err
= __setup_output_field();
2735 /* copy sort keys to output fields */
2736 perf_hpp__setup_output_field(&perf_hpp_list
);
2737 /* and then copy output fields to sort keys */
2738 perf_hpp__append_sort_keys(&perf_hpp_list
);
2740 /* setup hists-specific output fields */
2741 if (perf_hpp__setup_hists_formats(&perf_hpp_list
, evlist
) < 0)
2747 void reset_output_field(void)
2749 sort__need_collapse
= 0;
2750 sort__has_parent
= 0;
2758 perf_hpp__reset_output_field(&perf_hpp_list
);