8 #include <traceevent/event-parse.h>
11 const char default_parent_pattern
[] = "^sys_|^do_page_fault";
12 const char *parent_pattern
= default_parent_pattern
;
13 const char default_sort_order
[] = "comm,dso,symbol";
14 const char default_branch_sort_order
[] = "comm,dso_from,symbol_from,symbol_to,cycles";
15 const char default_mem_sort_order
[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
16 const char default_top_sort_order
[] = "dso,symbol";
17 const char default_diff_sort_order
[] = "dso,symbol";
18 const char default_tracepoint_sort_order
[] = "trace";
19 const char *sort_order
;
20 const char *field_order
;
21 regex_t ignore_callees_regex
;
22 int have_ignore_callees
= 0;
23 int sort__need_collapse
= 0;
24 int sort__has_parent
= 0;
25 int sort__has_sym
= 0;
26 int sort__has_dso
= 0;
27 int sort__has_socket
= 0;
28 enum sort_mode sort__mode
= SORT_MODE__NORMAL
;
31 static int repsep_snprintf(char *bf
, size_t size
, const char *fmt
, ...)
37 n
= vsnprintf(bf
, size
, fmt
, ap
);
38 if (symbol_conf
.field_sep
&& n
> 0) {
42 sep
= strchr(sep
, *symbol_conf
.field_sep
);
55 static int64_t cmp_null(const void *l
, const void *r
)
68 sort__thread_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
70 return right
->thread
->tid
- left
->thread
->tid
;
73 static int hist_entry__thread_snprintf(struct hist_entry
*he
, char *bf
,
74 size_t size
, unsigned int width
)
76 const char *comm
= thread__comm_str(he
->thread
);
78 width
= max(7U, width
) - 6;
79 return repsep_snprintf(bf
, size
, "%5d:%-*.*s", he
->thread
->tid
,
80 width
, width
, comm
?: "");
83 struct sort_entry sort_thread
= {
84 .se_header
= " Pid:Command",
85 .se_cmp
= sort__thread_cmp
,
86 .se_snprintf
= hist_entry__thread_snprintf
,
87 .se_width_idx
= HISTC_THREAD
,
93 sort__comm_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
95 /* Compare the addr that should be unique among comm */
96 return strcmp(comm__str(right
->comm
), comm__str(left
->comm
));
100 sort__comm_collapse(struct hist_entry
*left
, struct hist_entry
*right
)
102 /* Compare the addr that should be unique among comm */
103 return strcmp(comm__str(right
->comm
), comm__str(left
->comm
));
107 sort__comm_sort(struct hist_entry
*left
, struct hist_entry
*right
)
109 return strcmp(comm__str(right
->comm
), comm__str(left
->comm
));
112 static int hist_entry__comm_snprintf(struct hist_entry
*he
, char *bf
,
113 size_t size
, unsigned int width
)
115 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, comm__str(he
->comm
));
118 struct sort_entry sort_comm
= {
119 .se_header
= "Command",
120 .se_cmp
= sort__comm_cmp
,
121 .se_collapse
= sort__comm_collapse
,
122 .se_sort
= sort__comm_sort
,
123 .se_snprintf
= hist_entry__comm_snprintf
,
124 .se_width_idx
= HISTC_COMM
,
129 static int64_t _sort__dso_cmp(struct map
*map_l
, struct map
*map_r
)
131 struct dso
*dso_l
= map_l
? map_l
->dso
: NULL
;
132 struct dso
*dso_r
= map_r
? map_r
->dso
: NULL
;
133 const char *dso_name_l
, *dso_name_r
;
135 if (!dso_l
|| !dso_r
)
136 return cmp_null(dso_r
, dso_l
);
139 dso_name_l
= dso_l
->long_name
;
140 dso_name_r
= dso_r
->long_name
;
142 dso_name_l
= dso_l
->short_name
;
143 dso_name_r
= dso_r
->short_name
;
146 return strcmp(dso_name_l
, dso_name_r
);
150 sort__dso_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
152 return _sort__dso_cmp(right
->ms
.map
, left
->ms
.map
);
155 static int _hist_entry__dso_snprintf(struct map
*map
, char *bf
,
156 size_t size
, unsigned int width
)
158 if (map
&& map
->dso
) {
159 const char *dso_name
= !verbose
? map
->dso
->short_name
:
161 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, dso_name
);
164 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "[unknown]");
167 static int hist_entry__dso_snprintf(struct hist_entry
*he
, char *bf
,
168 size_t size
, unsigned int width
)
170 return _hist_entry__dso_snprintf(he
->ms
.map
, bf
, size
, width
);
173 struct sort_entry sort_dso
= {
174 .se_header
= "Shared Object",
175 .se_cmp
= sort__dso_cmp
,
176 .se_snprintf
= hist_entry__dso_snprintf
,
177 .se_width_idx
= HISTC_DSO
,
182 static int64_t _sort__addr_cmp(u64 left_ip
, u64 right_ip
)
184 return (int64_t)(right_ip
- left_ip
);
187 static int64_t _sort__sym_cmp(struct symbol
*sym_l
, struct symbol
*sym_r
)
189 if (!sym_l
|| !sym_r
)
190 return cmp_null(sym_l
, sym_r
);
195 if (sym_l
->start
!= sym_r
->start
)
196 return (int64_t)(sym_r
->start
- sym_l
->start
);
198 return (int64_t)(sym_r
->end
- sym_l
->end
);
202 sort__sym_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
206 if (!left
->ms
.sym
&& !right
->ms
.sym
)
207 return _sort__addr_cmp(left
->ip
, right
->ip
);
210 * comparing symbol address alone is not enough since it's a
211 * relative address within a dso.
213 if (!sort__has_dso
) {
214 ret
= sort__dso_cmp(left
, right
);
219 return _sort__sym_cmp(left
->ms
.sym
, right
->ms
.sym
);
223 sort__sym_sort(struct hist_entry
*left
, struct hist_entry
*right
)
225 if (!left
->ms
.sym
|| !right
->ms
.sym
)
226 return cmp_null(left
->ms
.sym
, right
->ms
.sym
);
228 return strcmp(right
->ms
.sym
->name
, left
->ms
.sym
->name
);
231 static int _hist_entry__sym_snprintf(struct map
*map
, struct symbol
*sym
,
232 u64 ip
, char level
, char *bf
, size_t size
,
238 char o
= map
? dso__symtab_origin(map
->dso
) : '!';
239 ret
+= repsep_snprintf(bf
, size
, "%-#*llx %c ",
240 BITS_PER_LONG
/ 4 + 2, ip
, o
);
243 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "[%c] ", level
);
245 if (map
->type
== MAP__VARIABLE
) {
246 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%s", sym
->name
);
247 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "+0x%llx",
248 ip
- map
->unmap_ip(map
, sym
->start
));
249 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%-*s",
252 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%-*s",
257 size_t len
= BITS_PER_LONG
/ 4;
258 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%-#.*llx",
260 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%-*s",
270 static int hist_entry__sym_snprintf(struct hist_entry
*he
, char *bf
,
271 size_t size
, unsigned int width
)
273 return _hist_entry__sym_snprintf(he
->ms
.map
, he
->ms
.sym
, he
->ip
,
274 he
->level
, bf
, size
, width
);
277 struct sort_entry sort_sym
= {
278 .se_header
= "Symbol",
279 .se_cmp
= sort__sym_cmp
,
280 .se_sort
= sort__sym_sort
,
281 .se_snprintf
= hist_entry__sym_snprintf
,
282 .se_width_idx
= HISTC_SYMBOL
,
288 sort__srcline_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
290 if (!left
->srcline
) {
292 left
->srcline
= SRCLINE_UNKNOWN
;
294 struct map
*map
= left
->ms
.map
;
295 left
->srcline
= get_srcline(map
->dso
,
296 map__rip_2objdump(map
, left
->ip
),
300 if (!right
->srcline
) {
302 right
->srcline
= SRCLINE_UNKNOWN
;
304 struct map
*map
= right
->ms
.map
;
305 right
->srcline
= get_srcline(map
->dso
,
306 map__rip_2objdump(map
, right
->ip
),
307 right
->ms
.sym
, true);
310 return strcmp(right
->srcline
, left
->srcline
);
313 static int hist_entry__srcline_snprintf(struct hist_entry
*he
, char *bf
,
314 size_t size
, unsigned int width
)
316 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, he
->srcline
);
319 struct sort_entry sort_srcline
= {
320 .se_header
= "Source:Line",
321 .se_cmp
= sort__srcline_cmp
,
322 .se_snprintf
= hist_entry__srcline_snprintf
,
323 .se_width_idx
= HISTC_SRCLINE
,
328 static char no_srcfile
[1];
330 static char *get_srcfile(struct hist_entry
*e
)
333 struct map
*map
= e
->ms
.map
;
335 sf
= __get_srcline(map
->dso
, map__rip_2objdump(map
, e
->ip
),
336 e
->ms
.sym
, false, true);
337 if (!strcmp(sf
, SRCLINE_UNKNOWN
))
349 sort__srcfile_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
351 if (!left
->srcfile
) {
353 left
->srcfile
= no_srcfile
;
355 left
->srcfile
= get_srcfile(left
);
357 if (!right
->srcfile
) {
359 right
->srcfile
= no_srcfile
;
361 right
->srcfile
= get_srcfile(right
);
363 return strcmp(right
->srcfile
, left
->srcfile
);
366 static int hist_entry__srcfile_snprintf(struct hist_entry
*he
, char *bf
,
367 size_t size
, unsigned int width
)
369 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, he
->srcfile
);
372 struct sort_entry sort_srcfile
= {
373 .se_header
= "Source File",
374 .se_cmp
= sort__srcfile_cmp
,
375 .se_snprintf
= hist_entry__srcfile_snprintf
,
376 .se_width_idx
= HISTC_SRCFILE
,
382 sort__parent_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
384 struct symbol
*sym_l
= left
->parent
;
385 struct symbol
*sym_r
= right
->parent
;
387 if (!sym_l
|| !sym_r
)
388 return cmp_null(sym_l
, sym_r
);
390 return strcmp(sym_r
->name
, sym_l
->name
);
393 static int hist_entry__parent_snprintf(struct hist_entry
*he
, char *bf
,
394 size_t size
, unsigned int width
)
396 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
,
397 he
->parent
? he
->parent
->name
: "[other]");
400 struct sort_entry sort_parent
= {
401 .se_header
= "Parent symbol",
402 .se_cmp
= sort__parent_cmp
,
403 .se_snprintf
= hist_entry__parent_snprintf
,
404 .se_width_idx
= HISTC_PARENT
,
410 sort__cpu_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
412 return right
->cpu
- left
->cpu
;
415 static int hist_entry__cpu_snprintf(struct hist_entry
*he
, char *bf
,
416 size_t size
, unsigned int width
)
418 return repsep_snprintf(bf
, size
, "%*.*d", width
, width
, he
->cpu
);
421 struct sort_entry sort_cpu
= {
423 .se_cmp
= sort__cpu_cmp
,
424 .se_snprintf
= hist_entry__cpu_snprintf
,
425 .se_width_idx
= HISTC_CPU
,
431 sort__socket_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
433 return right
->socket
- left
->socket
;
436 static int hist_entry__socket_snprintf(struct hist_entry
*he
, char *bf
,
437 size_t size
, unsigned int width
)
439 return repsep_snprintf(bf
, size
, "%*.*d", width
, width
-3, he
->socket
);
442 struct sort_entry sort_socket
= {
443 .se_header
= "Socket",
444 .se_cmp
= sort__socket_cmp
,
445 .se_snprintf
= hist_entry__socket_snprintf
,
446 .se_width_idx
= HISTC_SOCKET
,
451 static char *get_trace_output(struct hist_entry
*he
)
453 struct trace_seq seq
;
454 struct perf_evsel
*evsel
;
455 struct pevent_record rec
= {
456 .data
= he
->raw_data
,
457 .size
= he
->raw_size
,
460 evsel
= hists_to_evsel(he
->hists
);
462 trace_seq_init(&seq
);
463 if (symbol_conf
.raw_trace
) {
464 pevent_print_fields(&seq
, he
->raw_data
, he
->raw_size
,
467 pevent_event_info(&seq
, evsel
->tp_format
, &rec
);
473 sort__trace_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
475 struct perf_evsel
*evsel
;
477 evsel
= hists_to_evsel(left
->hists
);
478 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
481 if (left
->trace_output
== NULL
)
482 left
->trace_output
= get_trace_output(left
);
483 if (right
->trace_output
== NULL
)
484 right
->trace_output
= get_trace_output(right
);
486 hists__new_col_len(left
->hists
, HISTC_TRACE
, strlen(left
->trace_output
));
487 hists__new_col_len(right
->hists
, HISTC_TRACE
, strlen(right
->trace_output
));
489 return strcmp(right
->trace_output
, left
->trace_output
);
492 static int hist_entry__trace_snprintf(struct hist_entry
*he
, char *bf
,
493 size_t size
, unsigned int width
)
495 struct perf_evsel
*evsel
;
497 evsel
= hists_to_evsel(he
->hists
);
498 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
499 return scnprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
501 if (he
->trace_output
== NULL
)
502 he
->trace_output
= get_trace_output(he
);
503 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, he
->trace_output
);
506 struct sort_entry sort_trace
= {
507 .se_header
= "Trace output",
508 .se_cmp
= sort__trace_cmp
,
509 .se_snprintf
= hist_entry__trace_snprintf
,
510 .se_width_idx
= HISTC_TRACE
,
513 /* sort keys for branch stacks */
516 sort__dso_from_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
518 if (!left
->branch_info
|| !right
->branch_info
)
519 return cmp_null(left
->branch_info
, right
->branch_info
);
521 return _sort__dso_cmp(left
->branch_info
->from
.map
,
522 right
->branch_info
->from
.map
);
525 static int hist_entry__dso_from_snprintf(struct hist_entry
*he
, char *bf
,
526 size_t size
, unsigned int width
)
529 return _hist_entry__dso_snprintf(he
->branch_info
->from
.map
,
532 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
536 sort__dso_to_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
538 if (!left
->branch_info
|| !right
->branch_info
)
539 return cmp_null(left
->branch_info
, right
->branch_info
);
541 return _sort__dso_cmp(left
->branch_info
->to
.map
,
542 right
->branch_info
->to
.map
);
545 static int hist_entry__dso_to_snprintf(struct hist_entry
*he
, char *bf
,
546 size_t size
, unsigned int width
)
549 return _hist_entry__dso_snprintf(he
->branch_info
->to
.map
,
552 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
556 sort__sym_from_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
558 struct addr_map_symbol
*from_l
= &left
->branch_info
->from
;
559 struct addr_map_symbol
*from_r
= &right
->branch_info
->from
;
561 if (!left
->branch_info
|| !right
->branch_info
)
562 return cmp_null(left
->branch_info
, right
->branch_info
);
564 from_l
= &left
->branch_info
->from
;
565 from_r
= &right
->branch_info
->from
;
567 if (!from_l
->sym
&& !from_r
->sym
)
568 return _sort__addr_cmp(from_l
->addr
, from_r
->addr
);
570 return _sort__sym_cmp(from_l
->sym
, from_r
->sym
);
574 sort__sym_to_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
576 struct addr_map_symbol
*to_l
, *to_r
;
578 if (!left
->branch_info
|| !right
->branch_info
)
579 return cmp_null(left
->branch_info
, right
->branch_info
);
581 to_l
= &left
->branch_info
->to
;
582 to_r
= &right
->branch_info
->to
;
584 if (!to_l
->sym
&& !to_r
->sym
)
585 return _sort__addr_cmp(to_l
->addr
, to_r
->addr
);
587 return _sort__sym_cmp(to_l
->sym
, to_r
->sym
);
590 static int hist_entry__sym_from_snprintf(struct hist_entry
*he
, char *bf
,
591 size_t size
, unsigned int width
)
593 if (he
->branch_info
) {
594 struct addr_map_symbol
*from
= &he
->branch_info
->from
;
596 return _hist_entry__sym_snprintf(from
->map
, from
->sym
, from
->addr
,
597 he
->level
, bf
, size
, width
);
600 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
603 static int hist_entry__sym_to_snprintf(struct hist_entry
*he
, char *bf
,
604 size_t size
, unsigned int width
)
606 if (he
->branch_info
) {
607 struct addr_map_symbol
*to
= &he
->branch_info
->to
;
609 return _hist_entry__sym_snprintf(to
->map
, to
->sym
, to
->addr
,
610 he
->level
, bf
, size
, width
);
613 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
616 struct sort_entry sort_dso_from
= {
617 .se_header
= "Source Shared Object",
618 .se_cmp
= sort__dso_from_cmp
,
619 .se_snprintf
= hist_entry__dso_from_snprintf
,
620 .se_width_idx
= HISTC_DSO_FROM
,
623 struct sort_entry sort_dso_to
= {
624 .se_header
= "Target Shared Object",
625 .se_cmp
= sort__dso_to_cmp
,
626 .se_snprintf
= hist_entry__dso_to_snprintf
,
627 .se_width_idx
= HISTC_DSO_TO
,
630 struct sort_entry sort_sym_from
= {
631 .se_header
= "Source Symbol",
632 .se_cmp
= sort__sym_from_cmp
,
633 .se_snprintf
= hist_entry__sym_from_snprintf
,
634 .se_width_idx
= HISTC_SYMBOL_FROM
,
637 struct sort_entry sort_sym_to
= {
638 .se_header
= "Target Symbol",
639 .se_cmp
= sort__sym_to_cmp
,
640 .se_snprintf
= hist_entry__sym_to_snprintf
,
641 .se_width_idx
= HISTC_SYMBOL_TO
,
645 sort__mispredict_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
649 if (!left
->branch_info
|| !right
->branch_info
)
650 return cmp_null(left
->branch_info
, right
->branch_info
);
652 mp
= left
->branch_info
->flags
.mispred
!= right
->branch_info
->flags
.mispred
;
653 p
= left
->branch_info
->flags
.predicted
!= right
->branch_info
->flags
.predicted
;
657 static int hist_entry__mispredict_snprintf(struct hist_entry
*he
, char *bf
,
658 size_t size
, unsigned int width
){
659 static const char *out
= "N/A";
661 if (he
->branch_info
) {
662 if (he
->branch_info
->flags
.predicted
)
664 else if (he
->branch_info
->flags
.mispred
)
668 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, out
);
672 sort__cycles_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
674 return left
->branch_info
->flags
.cycles
-
675 right
->branch_info
->flags
.cycles
;
678 static int hist_entry__cycles_snprintf(struct hist_entry
*he
, char *bf
,
679 size_t size
, unsigned int width
)
681 if (he
->branch_info
->flags
.cycles
== 0)
682 return repsep_snprintf(bf
, size
, "%-*s", width
, "-");
683 return repsep_snprintf(bf
, size
, "%-*hd", width
,
684 he
->branch_info
->flags
.cycles
);
687 struct sort_entry sort_cycles
= {
688 .se_header
= "Basic Block Cycles",
689 .se_cmp
= sort__cycles_cmp
,
690 .se_snprintf
= hist_entry__cycles_snprintf
,
691 .se_width_idx
= HISTC_CYCLES
,
694 /* --sort daddr_sym */
696 sort__daddr_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
698 uint64_t l
= 0, r
= 0;
701 l
= left
->mem_info
->daddr
.addr
;
703 r
= right
->mem_info
->daddr
.addr
;
705 return (int64_t)(r
- l
);
708 static int hist_entry__daddr_snprintf(struct hist_entry
*he
, char *bf
,
709 size_t size
, unsigned int width
)
712 struct map
*map
= NULL
;
713 struct symbol
*sym
= NULL
;
716 addr
= he
->mem_info
->daddr
.addr
;
717 map
= he
->mem_info
->daddr
.map
;
718 sym
= he
->mem_info
->daddr
.sym
;
720 return _hist_entry__sym_snprintf(map
, sym
, addr
, he
->level
, bf
, size
,
725 sort__iaddr_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
727 uint64_t l
= 0, r
= 0;
730 l
= left
->mem_info
->iaddr
.addr
;
732 r
= right
->mem_info
->iaddr
.addr
;
734 return (int64_t)(r
- l
);
737 static int hist_entry__iaddr_snprintf(struct hist_entry
*he
, char *bf
,
738 size_t size
, unsigned int width
)
741 struct map
*map
= NULL
;
742 struct symbol
*sym
= NULL
;
745 addr
= he
->mem_info
->iaddr
.addr
;
746 map
= he
->mem_info
->iaddr
.map
;
747 sym
= he
->mem_info
->iaddr
.sym
;
749 return _hist_entry__sym_snprintf(map
, sym
, addr
, he
->level
, bf
, size
,
754 sort__dso_daddr_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
756 struct map
*map_l
= NULL
;
757 struct map
*map_r
= NULL
;
760 map_l
= left
->mem_info
->daddr
.map
;
762 map_r
= right
->mem_info
->daddr
.map
;
764 return _sort__dso_cmp(map_l
, map_r
);
767 static int hist_entry__dso_daddr_snprintf(struct hist_entry
*he
, char *bf
,
768 size_t size
, unsigned int width
)
770 struct map
*map
= NULL
;
773 map
= he
->mem_info
->daddr
.map
;
775 return _hist_entry__dso_snprintf(map
, bf
, size
, width
);
779 sort__locked_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
781 union perf_mem_data_src data_src_l
;
782 union perf_mem_data_src data_src_r
;
785 data_src_l
= left
->mem_info
->data_src
;
787 data_src_l
.mem_lock
= PERF_MEM_LOCK_NA
;
790 data_src_r
= right
->mem_info
->data_src
;
792 data_src_r
.mem_lock
= PERF_MEM_LOCK_NA
;
794 return (int64_t)(data_src_r
.mem_lock
- data_src_l
.mem_lock
);
797 static int hist_entry__locked_snprintf(struct hist_entry
*he
, char *bf
,
798 size_t size
, unsigned int width
)
801 u64 mask
= PERF_MEM_LOCK_NA
;
804 mask
= he
->mem_info
->data_src
.mem_lock
;
806 if (mask
& PERF_MEM_LOCK_NA
)
808 else if (mask
& PERF_MEM_LOCK_LOCKED
)
813 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
817 sort__tlb_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
819 union perf_mem_data_src data_src_l
;
820 union perf_mem_data_src data_src_r
;
823 data_src_l
= left
->mem_info
->data_src
;
825 data_src_l
.mem_dtlb
= PERF_MEM_TLB_NA
;
828 data_src_r
= right
->mem_info
->data_src
;
830 data_src_r
.mem_dtlb
= PERF_MEM_TLB_NA
;
832 return (int64_t)(data_src_r
.mem_dtlb
- data_src_l
.mem_dtlb
);
835 static const char * const tlb_access
[] = {
844 #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
846 static int hist_entry__tlb_snprintf(struct hist_entry
*he
, char *bf
,
847 size_t size
, unsigned int width
)
850 size_t sz
= sizeof(out
) - 1; /* -1 for null termination */
852 u64 m
= PERF_MEM_TLB_NA
;
858 m
= he
->mem_info
->data_src
.mem_dtlb
;
860 hit
= m
& PERF_MEM_TLB_HIT
;
861 miss
= m
& PERF_MEM_TLB_MISS
;
863 /* already taken care of */
864 m
&= ~(PERF_MEM_TLB_HIT
|PERF_MEM_TLB_MISS
);
866 for (i
= 0; m
&& i
< NUM_TLB_ACCESS
; i
++, m
>>= 1) {
873 strncat(out
, tlb_access
[i
], sz
- l
);
874 l
+= strlen(tlb_access
[i
]);
879 strncat(out
, " hit", sz
- l
);
881 strncat(out
, " miss", sz
- l
);
883 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
887 sort__lvl_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
889 union perf_mem_data_src data_src_l
;
890 union perf_mem_data_src data_src_r
;
893 data_src_l
= left
->mem_info
->data_src
;
895 data_src_l
.mem_lvl
= PERF_MEM_LVL_NA
;
898 data_src_r
= right
->mem_info
->data_src
;
900 data_src_r
.mem_lvl
= PERF_MEM_LVL_NA
;
902 return (int64_t)(data_src_r
.mem_lvl
- data_src_l
.mem_lvl
);
905 static const char * const mem_lvl
[] = {
914 "Remote RAM (1 hop)",
915 "Remote RAM (2 hops)",
916 "Remote Cache (1 hop)",
917 "Remote Cache (2 hops)",
921 #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
923 static int hist_entry__lvl_snprintf(struct hist_entry
*he
, char *bf
,
924 size_t size
, unsigned int width
)
927 size_t sz
= sizeof(out
) - 1; /* -1 for null termination */
929 u64 m
= PERF_MEM_LVL_NA
;
933 m
= he
->mem_info
->data_src
.mem_lvl
;
937 hit
= m
& PERF_MEM_LVL_HIT
;
938 miss
= m
& PERF_MEM_LVL_MISS
;
940 /* already taken care of */
941 m
&= ~(PERF_MEM_LVL_HIT
|PERF_MEM_LVL_MISS
);
943 for (i
= 0; m
&& i
< NUM_MEM_LVL
; i
++, m
>>= 1) {
950 strncat(out
, mem_lvl
[i
], sz
- l
);
951 l
+= strlen(mem_lvl
[i
]);
956 strncat(out
, " hit", sz
- l
);
958 strncat(out
, " miss", sz
- l
);
960 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
964 sort__snoop_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
966 union perf_mem_data_src data_src_l
;
967 union perf_mem_data_src data_src_r
;
970 data_src_l
= left
->mem_info
->data_src
;
972 data_src_l
.mem_snoop
= PERF_MEM_SNOOP_NA
;
975 data_src_r
= right
->mem_info
->data_src
;
977 data_src_r
.mem_snoop
= PERF_MEM_SNOOP_NA
;
979 return (int64_t)(data_src_r
.mem_snoop
- data_src_l
.mem_snoop
);
982 static const char * const snoop_access
[] = {
989 #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
991 static int hist_entry__snoop_snprintf(struct hist_entry
*he
, char *bf
,
992 size_t size
, unsigned int width
)
995 size_t sz
= sizeof(out
) - 1; /* -1 for null termination */
997 u64 m
= PERF_MEM_SNOOP_NA
;
1002 m
= he
->mem_info
->data_src
.mem_snoop
;
1004 for (i
= 0; m
&& i
< NUM_SNOOP_ACCESS
; i
++, m
>>= 1) {
1008 strcat(out
, " or ");
1011 strncat(out
, snoop_access
[i
], sz
- l
);
1012 l
+= strlen(snoop_access
[i
]);
1018 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
1021 static inline u64
cl_address(u64 address
)
1023 /* return the cacheline of the address */
1024 return (address
& ~(cacheline_size
- 1));
1028 sort__dcacheline_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1031 struct map
*l_map
, *r_map
;
1033 if (!left
->mem_info
) return -1;
1034 if (!right
->mem_info
) return 1;
1036 /* group event types together */
1037 if (left
->cpumode
> right
->cpumode
) return -1;
1038 if (left
->cpumode
< right
->cpumode
) return 1;
1040 l_map
= left
->mem_info
->daddr
.map
;
1041 r_map
= right
->mem_info
->daddr
.map
;
1043 /* if both are NULL, jump to sort on al_addr instead */
1044 if (!l_map
&& !r_map
)
1047 if (!l_map
) return -1;
1048 if (!r_map
) return 1;
1050 if (l_map
->maj
> r_map
->maj
) return -1;
1051 if (l_map
->maj
< r_map
->maj
) return 1;
1053 if (l_map
->min
> r_map
->min
) return -1;
1054 if (l_map
->min
< r_map
->min
) return 1;
1056 if (l_map
->ino
> r_map
->ino
) return -1;
1057 if (l_map
->ino
< r_map
->ino
) return 1;
1059 if (l_map
->ino_generation
> r_map
->ino_generation
) return -1;
1060 if (l_map
->ino_generation
< r_map
->ino_generation
) return 1;
1063 * Addresses with no major/minor numbers are assumed to be
1064 * anonymous in userspace. Sort those on pid then address.
1066 * The kernel and non-zero major/minor mapped areas are
1067 * assumed to be unity mapped. Sort those on address.
1070 if ((left
->cpumode
!= PERF_RECORD_MISC_KERNEL
) &&
1071 (!(l_map
->flags
& MAP_SHARED
)) &&
1072 !l_map
->maj
&& !l_map
->min
&& !l_map
->ino
&&
1073 !l_map
->ino_generation
) {
1074 /* userspace anonymous */
1076 if (left
->thread
->pid_
> right
->thread
->pid_
) return -1;
1077 if (left
->thread
->pid_
< right
->thread
->pid_
) return 1;
1081 /* al_addr does all the right addr - start + offset calculations */
1082 l
= cl_address(left
->mem_info
->daddr
.al_addr
);
1083 r
= cl_address(right
->mem_info
->daddr
.al_addr
);
1085 if (l
> r
) return -1;
1086 if (l
< r
) return 1;
1091 static int hist_entry__dcacheline_snprintf(struct hist_entry
*he
, char *bf
,
1092 size_t size
, unsigned int width
)
1096 struct map
*map
= NULL
;
1097 struct symbol
*sym
= NULL
;
1098 char level
= he
->level
;
1101 addr
= cl_address(he
->mem_info
->daddr
.al_addr
);
1102 map
= he
->mem_info
->daddr
.map
;
1103 sym
= he
->mem_info
->daddr
.sym
;
1105 /* print [s] for shared data mmaps */
1106 if ((he
->cpumode
!= PERF_RECORD_MISC_KERNEL
) &&
1107 map
&& (map
->type
== MAP__VARIABLE
) &&
1108 (map
->flags
& MAP_SHARED
) &&
1109 (map
->maj
|| map
->min
|| map
->ino
||
1110 map
->ino_generation
))
1115 return _hist_entry__sym_snprintf(map
, sym
, addr
, level
, bf
, size
,
1119 struct sort_entry sort_mispredict
= {
1120 .se_header
= "Branch Mispredicted",
1121 .se_cmp
= sort__mispredict_cmp
,
1122 .se_snprintf
= hist_entry__mispredict_snprintf
,
1123 .se_width_idx
= HISTC_MISPREDICT
,
1126 static u64
he_weight(struct hist_entry
*he
)
1128 return he
->stat
.nr_events
? he
->stat
.weight
/ he
->stat
.nr_events
: 0;
1132 sort__local_weight_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1134 return he_weight(left
) - he_weight(right
);
1137 static int hist_entry__local_weight_snprintf(struct hist_entry
*he
, char *bf
,
1138 size_t size
, unsigned int width
)
1140 return repsep_snprintf(bf
, size
, "%-*llu", width
, he_weight(he
));
1143 struct sort_entry sort_local_weight
= {
1144 .se_header
= "Local Weight",
1145 .se_cmp
= sort__local_weight_cmp
,
1146 .se_snprintf
= hist_entry__local_weight_snprintf
,
1147 .se_width_idx
= HISTC_LOCAL_WEIGHT
,
1151 sort__global_weight_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1153 return left
->stat
.weight
- right
->stat
.weight
;
1156 static int hist_entry__global_weight_snprintf(struct hist_entry
*he
, char *bf
,
1157 size_t size
, unsigned int width
)
1159 return repsep_snprintf(bf
, size
, "%-*llu", width
, he
->stat
.weight
);
1162 struct sort_entry sort_global_weight
= {
1163 .se_header
= "Weight",
1164 .se_cmp
= sort__global_weight_cmp
,
1165 .se_snprintf
= hist_entry__global_weight_snprintf
,
1166 .se_width_idx
= HISTC_GLOBAL_WEIGHT
,
1169 struct sort_entry sort_mem_daddr_sym
= {
1170 .se_header
= "Data Symbol",
1171 .se_cmp
= sort__daddr_cmp
,
1172 .se_snprintf
= hist_entry__daddr_snprintf
,
1173 .se_width_idx
= HISTC_MEM_DADDR_SYMBOL
,
1176 struct sort_entry sort_mem_iaddr_sym
= {
1177 .se_header
= "Code Symbol",
1178 .se_cmp
= sort__iaddr_cmp
,
1179 .se_snprintf
= hist_entry__iaddr_snprintf
,
1180 .se_width_idx
= HISTC_MEM_IADDR_SYMBOL
,
1183 struct sort_entry sort_mem_daddr_dso
= {
1184 .se_header
= "Data Object",
1185 .se_cmp
= sort__dso_daddr_cmp
,
1186 .se_snprintf
= hist_entry__dso_daddr_snprintf
,
1187 .se_width_idx
= HISTC_MEM_DADDR_SYMBOL
,
1190 struct sort_entry sort_mem_locked
= {
1191 .se_header
= "Locked",
1192 .se_cmp
= sort__locked_cmp
,
1193 .se_snprintf
= hist_entry__locked_snprintf
,
1194 .se_width_idx
= HISTC_MEM_LOCKED
,
1197 struct sort_entry sort_mem_tlb
= {
1198 .se_header
= "TLB access",
1199 .se_cmp
= sort__tlb_cmp
,
1200 .se_snprintf
= hist_entry__tlb_snprintf
,
1201 .se_width_idx
= HISTC_MEM_TLB
,
1204 struct sort_entry sort_mem_lvl
= {
1205 .se_header
= "Memory access",
1206 .se_cmp
= sort__lvl_cmp
,
1207 .se_snprintf
= hist_entry__lvl_snprintf
,
1208 .se_width_idx
= HISTC_MEM_LVL
,
1211 struct sort_entry sort_mem_snoop
= {
1212 .se_header
= "Snoop",
1213 .se_cmp
= sort__snoop_cmp
,
1214 .se_snprintf
= hist_entry__snoop_snprintf
,
1215 .se_width_idx
= HISTC_MEM_SNOOP
,
1218 struct sort_entry sort_mem_dcacheline
= {
1219 .se_header
= "Data Cacheline",
1220 .se_cmp
= sort__dcacheline_cmp
,
1221 .se_snprintf
= hist_entry__dcacheline_snprintf
,
1222 .se_width_idx
= HISTC_MEM_DCACHELINE
,
1226 sort__abort_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1228 if (!left
->branch_info
|| !right
->branch_info
)
1229 return cmp_null(left
->branch_info
, right
->branch_info
);
1231 return left
->branch_info
->flags
.abort
!=
1232 right
->branch_info
->flags
.abort
;
1235 static int hist_entry__abort_snprintf(struct hist_entry
*he
, char *bf
,
1236 size_t size
, unsigned int width
)
1238 static const char *out
= "N/A";
1240 if (he
->branch_info
) {
1241 if (he
->branch_info
->flags
.abort
)
1247 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
1250 struct sort_entry sort_abort
= {
1251 .se_header
= "Transaction abort",
1252 .se_cmp
= sort__abort_cmp
,
1253 .se_snprintf
= hist_entry__abort_snprintf
,
1254 .se_width_idx
= HISTC_ABORT
,
1258 sort__in_tx_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1260 if (!left
->branch_info
|| !right
->branch_info
)
1261 return cmp_null(left
->branch_info
, right
->branch_info
);
1263 return left
->branch_info
->flags
.in_tx
!=
1264 right
->branch_info
->flags
.in_tx
;
1267 static int hist_entry__in_tx_snprintf(struct hist_entry
*he
, char *bf
,
1268 size_t size
, unsigned int width
)
1270 static const char *out
= "N/A";
1272 if (he
->branch_info
) {
1273 if (he
->branch_info
->flags
.in_tx
)
1279 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
1282 struct sort_entry sort_in_tx
= {
1283 .se_header
= "Branch in transaction",
1284 .se_cmp
= sort__in_tx_cmp
,
1285 .se_snprintf
= hist_entry__in_tx_snprintf
,
1286 .se_width_idx
= HISTC_IN_TX
,
1290 sort__transaction_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1292 return left
->transaction
- right
->transaction
;
1295 static inline char *add_str(char *p
, const char *str
)
1298 return p
+ strlen(str
);
1301 static struct txbit
{
1306 { PERF_TXN_ELISION
, "EL ", 0 },
1307 { PERF_TXN_TRANSACTION
, "TX ", 1 },
1308 { PERF_TXN_SYNC
, "SYNC ", 1 },
1309 { PERF_TXN_ASYNC
, "ASYNC ", 0 },
1310 { PERF_TXN_RETRY
, "RETRY ", 0 },
1311 { PERF_TXN_CONFLICT
, "CON ", 0 },
1312 { PERF_TXN_CAPACITY_WRITE
, "CAP-WRITE ", 1 },
1313 { PERF_TXN_CAPACITY_READ
, "CAP-READ ", 0 },
1317 int hist_entry__transaction_len(void)
1322 for (i
= 0; txbits
[i
].name
; i
++) {
1323 if (!txbits
[i
].skip_for_len
)
1324 len
+= strlen(txbits
[i
].name
);
1326 len
+= 4; /* :XX<space> */
1330 static int hist_entry__transaction_snprintf(struct hist_entry
*he
, char *bf
,
1331 size_t size
, unsigned int width
)
1333 u64 t
= he
->transaction
;
1339 for (i
= 0; txbits
[i
].name
; i
++)
1340 if (txbits
[i
].flag
& t
)
1341 p
= add_str(p
, txbits
[i
].name
);
1342 if (t
&& !(t
& (PERF_TXN_SYNC
|PERF_TXN_ASYNC
)))
1343 p
= add_str(p
, "NEITHER ");
1344 if (t
& PERF_TXN_ABORT_MASK
) {
1345 sprintf(p
, ":%" PRIx64
,
1346 (t
& PERF_TXN_ABORT_MASK
) >>
1347 PERF_TXN_ABORT_SHIFT
);
1351 return repsep_snprintf(bf
, size
, "%-*s", width
, buf
);
1354 struct sort_entry sort_transaction
= {
1355 .se_header
= "Transaction ",
1356 .se_cmp
= sort__transaction_cmp
,
1357 .se_snprintf
= hist_entry__transaction_snprintf
,
1358 .se_width_idx
= HISTC_TRANSACTION
,
1361 struct sort_dimension
{
1363 struct sort_entry
*entry
;
1367 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1369 static struct sort_dimension common_sort_dimensions
[] = {
1370 DIM(SORT_PID
, "pid", sort_thread
),
1371 DIM(SORT_COMM
, "comm", sort_comm
),
1372 DIM(SORT_DSO
, "dso", sort_dso
),
1373 DIM(SORT_SYM
, "symbol", sort_sym
),
1374 DIM(SORT_PARENT
, "parent", sort_parent
),
1375 DIM(SORT_CPU
, "cpu", sort_cpu
),
1376 DIM(SORT_SOCKET
, "socket", sort_socket
),
1377 DIM(SORT_SRCLINE
, "srcline", sort_srcline
),
1378 DIM(SORT_SRCFILE
, "srcfile", sort_srcfile
),
1379 DIM(SORT_LOCAL_WEIGHT
, "local_weight", sort_local_weight
),
1380 DIM(SORT_GLOBAL_WEIGHT
, "weight", sort_global_weight
),
1381 DIM(SORT_TRANSACTION
, "transaction", sort_transaction
),
1382 DIM(SORT_TRACE
, "trace", sort_trace
),
1387 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1389 static struct sort_dimension bstack_sort_dimensions
[] = {
1390 DIM(SORT_DSO_FROM
, "dso_from", sort_dso_from
),
1391 DIM(SORT_DSO_TO
, "dso_to", sort_dso_to
),
1392 DIM(SORT_SYM_FROM
, "symbol_from", sort_sym_from
),
1393 DIM(SORT_SYM_TO
, "symbol_to", sort_sym_to
),
1394 DIM(SORT_MISPREDICT
, "mispredict", sort_mispredict
),
1395 DIM(SORT_IN_TX
, "in_tx", sort_in_tx
),
1396 DIM(SORT_ABORT
, "abort", sort_abort
),
1397 DIM(SORT_CYCLES
, "cycles", sort_cycles
),
1402 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1404 static struct sort_dimension memory_sort_dimensions
[] = {
1405 DIM(SORT_MEM_DADDR_SYMBOL
, "symbol_daddr", sort_mem_daddr_sym
),
1406 DIM(SORT_MEM_IADDR_SYMBOL
, "symbol_iaddr", sort_mem_iaddr_sym
),
1407 DIM(SORT_MEM_DADDR_DSO
, "dso_daddr", sort_mem_daddr_dso
),
1408 DIM(SORT_MEM_LOCKED
, "locked", sort_mem_locked
),
1409 DIM(SORT_MEM_TLB
, "tlb", sort_mem_tlb
),
1410 DIM(SORT_MEM_LVL
, "mem", sort_mem_lvl
),
1411 DIM(SORT_MEM_SNOOP
, "snoop", sort_mem_snoop
),
1412 DIM(SORT_MEM_DCACHELINE
, "dcacheline", sort_mem_dcacheline
),
1417 struct hpp_dimension
{
1419 struct perf_hpp_fmt
*fmt
;
1423 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1425 static struct hpp_dimension hpp_sort_dimensions
[] = {
1426 DIM(PERF_HPP__OVERHEAD
, "overhead"),
1427 DIM(PERF_HPP__OVERHEAD_SYS
, "overhead_sys"),
1428 DIM(PERF_HPP__OVERHEAD_US
, "overhead_us"),
1429 DIM(PERF_HPP__OVERHEAD_GUEST_SYS
, "overhead_guest_sys"),
1430 DIM(PERF_HPP__OVERHEAD_GUEST_US
, "overhead_guest_us"),
1431 DIM(PERF_HPP__OVERHEAD_ACC
, "overhead_children"),
1432 DIM(PERF_HPP__SAMPLES
, "sample"),
1433 DIM(PERF_HPP__PERIOD
, "period"),
1438 struct hpp_sort_entry
{
1439 struct perf_hpp_fmt hpp
;
1440 struct sort_entry
*se
;
1443 bool perf_hpp__same_sort_entry(struct perf_hpp_fmt
*a
, struct perf_hpp_fmt
*b
)
1445 struct hpp_sort_entry
*hse_a
;
1446 struct hpp_sort_entry
*hse_b
;
1448 if (!perf_hpp__is_sort_entry(a
) || !perf_hpp__is_sort_entry(b
))
1451 hse_a
= container_of(a
, struct hpp_sort_entry
, hpp
);
1452 hse_b
= container_of(b
, struct hpp_sort_entry
, hpp
);
1454 return hse_a
->se
== hse_b
->se
;
1457 void perf_hpp__reset_sort_width(struct perf_hpp_fmt
*fmt
, struct hists
*hists
)
1459 struct hpp_sort_entry
*hse
;
1461 if (!perf_hpp__is_sort_entry(fmt
))
1464 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1465 hists__new_col_len(hists
, hse
->se
->se_width_idx
, strlen(fmt
->name
));
1468 static int __sort__hpp_header(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1469 struct perf_evsel
*evsel
)
1471 struct hpp_sort_entry
*hse
;
1472 size_t len
= fmt
->user_len
;
1474 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1477 len
= hists__col_len(evsel__hists(evsel
), hse
->se
->se_width_idx
);
1479 return scnprintf(hpp
->buf
, hpp
->size
, "%-*.*s", len
, len
, fmt
->name
);
1482 static int __sort__hpp_width(struct perf_hpp_fmt
*fmt
,
1483 struct perf_hpp
*hpp __maybe_unused
,
1484 struct perf_evsel
*evsel
)
1486 struct hpp_sort_entry
*hse
;
1487 size_t len
= fmt
->user_len
;
1489 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1492 len
= hists__col_len(evsel__hists(evsel
), hse
->se
->se_width_idx
);
1497 static int __sort__hpp_entry(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1498 struct hist_entry
*he
)
1500 struct hpp_sort_entry
*hse
;
1501 size_t len
= fmt
->user_len
;
1503 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1506 len
= hists__col_len(he
->hists
, hse
->se
->se_width_idx
);
1508 return hse
->se
->se_snprintf(he
, hpp
->buf
, hpp
->size
, len
);
1511 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt
*fmt
,
1512 struct hist_entry
*a
, struct hist_entry
*b
)
1514 struct hpp_sort_entry
*hse
;
1516 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1517 return hse
->se
->se_cmp(a
, b
);
1520 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt
*fmt
,
1521 struct hist_entry
*a
, struct hist_entry
*b
)
1523 struct hpp_sort_entry
*hse
;
1524 int64_t (*collapse_fn
)(struct hist_entry
*, struct hist_entry
*);
1526 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1527 collapse_fn
= hse
->se
->se_collapse
?: hse
->se
->se_cmp
;
1528 return collapse_fn(a
, b
);
1531 static int64_t __sort__hpp_sort(struct perf_hpp_fmt
*fmt
,
1532 struct hist_entry
*a
, struct hist_entry
*b
)
1534 struct hpp_sort_entry
*hse
;
1535 int64_t (*sort_fn
)(struct hist_entry
*, struct hist_entry
*);
1537 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1538 sort_fn
= hse
->se
->se_sort
?: hse
->se
->se_cmp
;
1539 return sort_fn(a
, b
);
1542 static struct hpp_sort_entry
*
1543 __sort_dimension__alloc_hpp(struct sort_dimension
*sd
)
1545 struct hpp_sort_entry
*hse
;
1547 hse
= malloc(sizeof(*hse
));
1549 pr_err("Memory allocation failed\n");
1553 hse
->se
= sd
->entry
;
1554 hse
->hpp
.name
= sd
->entry
->se_header
;
1555 hse
->hpp
.header
= __sort__hpp_header
;
1556 hse
->hpp
.width
= __sort__hpp_width
;
1557 hse
->hpp
.entry
= __sort__hpp_entry
;
1558 hse
->hpp
.color
= NULL
;
1560 hse
->hpp
.cmp
= __sort__hpp_cmp
;
1561 hse
->hpp
.collapse
= __sort__hpp_collapse
;
1562 hse
->hpp
.sort
= __sort__hpp_sort
;
1564 INIT_LIST_HEAD(&hse
->hpp
.list
);
1565 INIT_LIST_HEAD(&hse
->hpp
.sort_list
);
1566 hse
->hpp
.elide
= false;
1568 hse
->hpp
.user_len
= 0;
1573 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt
*format
)
1575 return format
->header
== __sort__hpp_header
;
1578 static int __sort_dimension__add_hpp_sort(struct sort_dimension
*sd
)
1580 struct hpp_sort_entry
*hse
= __sort_dimension__alloc_hpp(sd
);
1585 perf_hpp__register_sort_field(&hse
->hpp
);
1589 static int __sort_dimension__add_hpp_output(struct sort_dimension
*sd
)
1591 struct hpp_sort_entry
*hse
= __sort_dimension__alloc_hpp(sd
);
1596 perf_hpp__column_register(&hse
->hpp
);
1600 struct hpp_dynamic_entry
{
1601 struct perf_hpp_fmt hpp
;
1602 struct perf_evsel
*evsel
;
1603 struct format_field
*field
;
1604 unsigned dynamic_len
;
1608 static int hde_width(struct hpp_dynamic_entry
*hde
)
1610 if (!hde
->hpp
.len
) {
1611 int len
= hde
->dynamic_len
;
1612 int namelen
= strlen(hde
->field
->name
);
1613 int fieldlen
= hde
->field
->size
;
1618 if (!(hde
->field
->flags
& FIELD_IS_STRING
)) {
1619 /* length for print hex numbers */
1620 fieldlen
= hde
->field
->size
* 2 + 2;
1627 return hde
->hpp
.len
;
1630 static void update_dynamic_len(struct hpp_dynamic_entry
*hde
,
1631 struct hist_entry
*he
)
1634 struct format_field
*field
= hde
->field
;
1641 /* parse pretty print result and update max length */
1642 if (!he
->trace_output
)
1643 he
->trace_output
= get_trace_output(he
);
1645 namelen
= strlen(field
->name
);
1646 str
= he
->trace_output
;
1649 pos
= strchr(str
, ' ');
1652 pos
= str
+ strlen(str
);
1655 if (!strncmp(str
, field
->name
, namelen
)) {
1661 if (len
> hde
->dynamic_len
)
1662 hde
->dynamic_len
= len
;
1673 static int __sort__hde_header(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1674 struct perf_evsel
*evsel __maybe_unused
)
1676 struct hpp_dynamic_entry
*hde
;
1677 size_t len
= fmt
->user_len
;
1679 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1682 len
= hde_width(hde
);
1684 return scnprintf(hpp
->buf
, hpp
->size
, "%*.*s", len
, len
, hde
->field
->name
);
1687 static int __sort__hde_width(struct perf_hpp_fmt
*fmt
,
1688 struct perf_hpp
*hpp __maybe_unused
,
1689 struct perf_evsel
*evsel __maybe_unused
)
1691 struct hpp_dynamic_entry
*hde
;
1692 size_t len
= fmt
->user_len
;
1694 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1697 len
= hde_width(hde
);
1702 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt
*fmt
, struct hists
*hists
)
1704 struct hpp_dynamic_entry
*hde
;
1706 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1708 return hists_to_evsel(hists
) == hde
->evsel
;
1711 static int __sort__hde_entry(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1712 struct hist_entry
*he
)
1714 struct hpp_dynamic_entry
*hde
;
1715 size_t len
= fmt
->user_len
;
1717 struct format_field
*field
;
1722 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1725 len
= hde_width(hde
);
1731 namelen
= strlen(field
->name
);
1732 str
= he
->trace_output
;
1735 pos
= strchr(str
, ' ');
1738 pos
= str
+ strlen(str
);
1741 if (!strncmp(str
, field
->name
, namelen
)) {
1743 str
= strndup(str
, pos
- str
);
1746 return scnprintf(hpp
->buf
, hpp
->size
,
1747 "%*.*s", len
, len
, "ERROR");
1758 struct trace_seq seq
;
1760 trace_seq_init(&seq
);
1761 pevent_print_field(&seq
, he
->raw_data
, hde
->field
);
1765 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%*.*s", len
, len
, str
);
1770 static int64_t __sort__hde_cmp(struct perf_hpp_fmt
*fmt
,
1771 struct hist_entry
*a
, struct hist_entry
*b
)
1773 struct hpp_dynamic_entry
*hde
;
1774 struct format_field
*field
;
1775 unsigned offset
, size
;
1777 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1780 if (field
->flags
& FIELD_IS_DYNAMIC
) {
1781 unsigned long long dyn
;
1783 pevent_read_number_field(field
, a
->raw_data
, &dyn
);
1784 offset
= dyn
& 0xffff;
1785 size
= (dyn
>> 16) & 0xffff;
1787 /* record max width for output */
1788 if (size
> hde
->dynamic_len
)
1789 hde
->dynamic_len
= size
;
1791 offset
= field
->offset
;
1794 update_dynamic_len(hde
, a
);
1795 update_dynamic_len(hde
, b
);
1798 return memcmp(a
->raw_data
+ offset
, b
->raw_data
+ offset
, size
);
1801 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt
*fmt
)
1803 return fmt
->cmp
== __sort__hde_cmp
;
1806 static struct hpp_dynamic_entry
*
1807 __alloc_dynamic_entry(struct perf_evsel
*evsel
, struct format_field
*field
)
1809 struct hpp_dynamic_entry
*hde
;
1811 hde
= malloc(sizeof(*hde
));
1813 pr_debug("Memory allocation failed\n");
1819 hde
->dynamic_len
= 0;
1821 hde
->hpp
.name
= field
->name
;
1822 hde
->hpp
.header
= __sort__hde_header
;
1823 hde
->hpp
.width
= __sort__hde_width
;
1824 hde
->hpp
.entry
= __sort__hde_entry
;
1825 hde
->hpp
.color
= NULL
;
1827 hde
->hpp
.cmp
= __sort__hde_cmp
;
1828 hde
->hpp
.collapse
= __sort__hde_cmp
;
1829 hde
->hpp
.sort
= __sort__hde_cmp
;
1831 INIT_LIST_HEAD(&hde
->hpp
.list
);
1832 INIT_LIST_HEAD(&hde
->hpp
.sort_list
);
1833 hde
->hpp
.elide
= false;
1835 hde
->hpp
.user_len
= 0;
1840 static int parse_field_name(char *str
, char **event
, char **field
, char **opt
)
1842 char *event_name
, *field_name
, *opt_name
;
1845 field_name
= strchr(str
, '.');
1848 *field_name
++ = '\0';
1854 opt_name
= strchr(field_name
, '/');
1858 *event
= event_name
;
1859 *field
= field_name
;
1865 /* find match evsel using a given event name. The event name can be:
1866 * 1. '%' + event index (e.g. '%1' for first event)
1867 * 2. full event name (e.g. sched:sched_switch)
1868 * 3. partial event name (should not contain ':')
1870 static struct perf_evsel
*find_evsel(struct perf_evlist
*evlist
, char *event_name
)
1872 struct perf_evsel
*evsel
= NULL
;
1873 struct perf_evsel
*pos
;
1877 if (event_name
[0] == '%') {
1878 int nr
= strtol(event_name
+1, NULL
, 0);
1880 if (nr
> evlist
->nr_entries
)
1883 evsel
= perf_evlist__first(evlist
);
1885 evsel
= perf_evsel__next(evsel
);
1890 full_name
= !!strchr(event_name
, ':');
1891 evlist__for_each(evlist
, pos
) {
1893 if (full_name
&& !strcmp(pos
->name
, event_name
))
1896 if (!full_name
&& strstr(pos
->name
, event_name
)) {
1898 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1899 event_name
, evsel
->name
, pos
->name
);
1909 static int __dynamic_dimension__add(struct perf_evsel
*evsel
,
1910 struct format_field
*field
,
1913 struct hpp_dynamic_entry
*hde
;
1915 hde
= __alloc_dynamic_entry(evsel
, field
);
1919 hde
->raw_trace
= raw_trace
;
1921 perf_hpp__register_sort_field(&hde
->hpp
);
1925 static int add_evsel_fields(struct perf_evsel
*evsel
, bool raw_trace
)
1928 struct format_field
*field
;
1930 field
= evsel
->tp_format
->format
.fields
;
1932 ret
= __dynamic_dimension__add(evsel
, field
, raw_trace
);
1936 field
= field
->next
;
1941 static int add_all_dynamic_fields(struct perf_evlist
*evlist
, bool raw_trace
)
1944 struct perf_evsel
*evsel
;
1946 evlist__for_each(evlist
, evsel
) {
1947 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
1950 ret
= add_evsel_fields(evsel
, raw_trace
);
1957 static int add_all_matching_fields(struct perf_evlist
*evlist
,
1958 char *field_name
, bool raw_trace
)
1961 struct perf_evsel
*evsel
;
1962 struct format_field
*field
;
1964 evlist__for_each(evlist
, evsel
) {
1965 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
1968 field
= pevent_find_any_field(evsel
->tp_format
, field_name
);
1972 ret
= __dynamic_dimension__add(evsel
, field
, raw_trace
);
1979 static int add_dynamic_entry(struct perf_evlist
*evlist
, const char *tok
)
1981 char *str
, *event_name
, *field_name
, *opt_name
;
1982 struct perf_evsel
*evsel
;
1983 struct format_field
*field
;
1984 bool raw_trace
= symbol_conf
.raw_trace
;
1994 if (parse_field_name(str
, &event_name
, &field_name
, &opt_name
) < 0) {
2000 if (strcmp(opt_name
, "raw")) {
2001 pr_debug("unsupported field option %s\n", opt_name
);
2008 if (!strcmp(field_name
, "trace_fields")) {
2009 ret
= add_all_dynamic_fields(evlist
, raw_trace
);
2013 if (event_name
== NULL
) {
2014 ret
= add_all_matching_fields(evlist
, field_name
, raw_trace
);
2018 evsel
= find_evsel(evlist
, event_name
);
2019 if (evsel
== NULL
) {
2020 pr_debug("Cannot find event: %s\n", event_name
);
2025 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
) {
2026 pr_debug("%s is not a tracepoint event\n", event_name
);
2031 if (!strcmp(field_name
, "*")) {
2032 ret
= add_evsel_fields(evsel
, raw_trace
);
2034 field
= pevent_find_any_field(evsel
->tp_format
, field_name
);
2035 if (field
== NULL
) {
2036 pr_debug("Cannot find event field for %s.%s\n",
2037 event_name
, field_name
);
2041 ret
= __dynamic_dimension__add(evsel
, field
, raw_trace
);
2049 static int __sort_dimension__add(struct sort_dimension
*sd
)
2054 if (__sort_dimension__add_hpp_sort(sd
) < 0)
2057 if (sd
->entry
->se_collapse
)
2058 sort__need_collapse
= 1;
2065 static int __hpp_dimension__add(struct hpp_dimension
*hd
)
2070 perf_hpp__register_sort_field(hd
->fmt
);
2075 static int __sort_dimension__add_output(struct sort_dimension
*sd
)
2080 if (__sort_dimension__add_hpp_output(sd
) < 0)
2087 static int __hpp_dimension__add_output(struct hpp_dimension
*hd
)
2092 perf_hpp__column_register(hd
->fmt
);
2097 int hpp_dimension__add_output(unsigned col
)
2099 BUG_ON(col
>= PERF_HPP__MAX_INDEX
);
2100 return __hpp_dimension__add_output(&hpp_sort_dimensions
[col
]);
2103 static int sort_dimension__add(const char *tok
,
2104 struct perf_evlist
*evlist __maybe_unused
)
2108 for (i
= 0; i
< ARRAY_SIZE(common_sort_dimensions
); i
++) {
2109 struct sort_dimension
*sd
= &common_sort_dimensions
[i
];
2111 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2114 if (sd
->entry
== &sort_parent
) {
2115 int ret
= regcomp(&parent_regex
, parent_pattern
, REG_EXTENDED
);
2119 regerror(ret
, &parent_regex
, err
, sizeof(err
));
2120 pr_err("Invalid regex: %s\n%s", parent_pattern
, err
);
2123 sort__has_parent
= 1;
2124 } else if (sd
->entry
== &sort_sym
) {
2127 * perf diff displays the performance difference amongst
2128 * two or more perf.data files. Those files could come
2129 * from different binaries. So we should not compare
2130 * their ips, but the name of symbol.
2132 if (sort__mode
== SORT_MODE__DIFF
)
2133 sd
->entry
->se_collapse
= sort__sym_sort
;
2135 } else if (sd
->entry
== &sort_dso
) {
2137 } else if (sd
->entry
== &sort_socket
) {
2138 sort__has_socket
= 1;
2141 return __sort_dimension__add(sd
);
2144 for (i
= 0; i
< ARRAY_SIZE(hpp_sort_dimensions
); i
++) {
2145 struct hpp_dimension
*hd
= &hpp_sort_dimensions
[i
];
2147 if (strncasecmp(tok
, hd
->name
, strlen(tok
)))
2150 return __hpp_dimension__add(hd
);
2153 for (i
= 0; i
< ARRAY_SIZE(bstack_sort_dimensions
); i
++) {
2154 struct sort_dimension
*sd
= &bstack_sort_dimensions
[i
];
2156 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2159 if (sort__mode
!= SORT_MODE__BRANCH
)
2162 if (sd
->entry
== &sort_sym_from
|| sd
->entry
== &sort_sym_to
)
2165 __sort_dimension__add(sd
);
2169 for (i
= 0; i
< ARRAY_SIZE(memory_sort_dimensions
); i
++) {
2170 struct sort_dimension
*sd
= &memory_sort_dimensions
[i
];
2172 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2175 if (sort__mode
!= SORT_MODE__MEMORY
)
2178 if (sd
->entry
== &sort_mem_daddr_sym
)
2181 __sort_dimension__add(sd
);
2185 if (!add_dynamic_entry(evlist
, tok
))
2191 static const char *get_default_sort_order(struct perf_evlist
*evlist
)
2193 const char *default_sort_orders
[] = {
2195 default_branch_sort_order
,
2196 default_mem_sort_order
,
2197 default_top_sort_order
,
2198 default_diff_sort_order
,
2199 default_tracepoint_sort_order
,
2201 bool use_trace
= true;
2202 struct perf_evsel
*evsel
;
2204 BUG_ON(sort__mode
>= ARRAY_SIZE(default_sort_orders
));
2209 evlist__for_each(evlist
, evsel
) {
2210 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
) {
2217 sort__mode
= SORT_MODE__TRACEPOINT
;
2218 if (symbol_conf
.raw_trace
)
2219 return "trace_fields";
2222 return default_sort_orders
[sort__mode
];
2225 static int setup_sort_order(struct perf_evlist
*evlist
)
2227 char *new_sort_order
;
2230 * Append '+'-prefixed sort order to the default sort
2233 if (!sort_order
|| is_strict_order(sort_order
))
2236 if (sort_order
[1] == '\0') {
2237 error("Invalid --sort key: `+'");
2242 * We allocate new sort_order string, but we never free it,
2243 * because it's checked over the rest of the code.
2245 if (asprintf(&new_sort_order
, "%s,%s",
2246 get_default_sort_order(evlist
), sort_order
+ 1) < 0) {
2247 error("Not enough memory to set up --sort");
2251 sort_order
= new_sort_order
;
2256 * Adds 'pre,' prefix into 'str' is 'pre' is
2257 * not already part of 'str'.
2259 static char *prefix_if_not_in(const char *pre
, char *str
)
2263 if (!str
|| strstr(str
, pre
))
2266 if (asprintf(&n
, "%s,%s", pre
, str
) < 0)
2273 static char *setup_overhead(char *keys
)
2275 keys
= prefix_if_not_in("overhead", keys
);
2277 if (symbol_conf
.cumulate_callchain
)
2278 keys
= prefix_if_not_in("overhead_children", keys
);
2283 static int __setup_sorting(struct perf_evlist
*evlist
)
2285 char *tmp
, *tok
, *str
;
2286 const char *sort_keys
;
2289 ret
= setup_sort_order(evlist
);
2293 sort_keys
= sort_order
;
2294 if (sort_keys
== NULL
) {
2295 if (is_strict_order(field_order
)) {
2297 * If user specified field order but no sort order,
2298 * we'll honor it and not add default sort orders.
2303 sort_keys
= get_default_sort_order(evlist
);
2306 str
= strdup(sort_keys
);
2308 error("Not enough memory to setup sort keys");
2313 * Prepend overhead fields for backward compatibility.
2315 if (!is_strict_order(field_order
)) {
2316 str
= setup_overhead(str
);
2318 error("Not enough memory to setup overhead keys");
2323 for (tok
= strtok_r(str
, ", ", &tmp
);
2324 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
2325 ret
= sort_dimension__add(tok
, evlist
);
2326 if (ret
== -EINVAL
) {
2327 error("Invalid --sort key: `%s'", tok
);
2329 } else if (ret
== -ESRCH
) {
2330 error("Unknown --sort key: `%s'", tok
);
2339 void perf_hpp__set_elide(int idx
, bool elide
)
2341 struct perf_hpp_fmt
*fmt
;
2342 struct hpp_sort_entry
*hse
;
2344 perf_hpp__for_each_format(fmt
) {
2345 if (!perf_hpp__is_sort_entry(fmt
))
2348 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
2349 if (hse
->se
->se_width_idx
== idx
) {
2356 static bool __get_elide(struct strlist
*list
, const char *list_name
, FILE *fp
)
2358 if (list
&& strlist__nr_entries(list
) == 1) {
2360 fprintf(fp
, "# %s: %s\n", list_name
,
2361 strlist__entry(list
, 0)->s
);
2367 static bool get_elide(int idx
, FILE *output
)
2371 return __get_elide(symbol_conf
.sym_list
, "symbol", output
);
2373 return __get_elide(symbol_conf
.dso_list
, "dso", output
);
2375 return __get_elide(symbol_conf
.comm_list
, "comm", output
);
2380 if (sort__mode
!= SORT_MODE__BRANCH
)
2384 case HISTC_SYMBOL_FROM
:
2385 return __get_elide(symbol_conf
.sym_from_list
, "sym_from", output
);
2386 case HISTC_SYMBOL_TO
:
2387 return __get_elide(symbol_conf
.sym_to_list
, "sym_to", output
);
2388 case HISTC_DSO_FROM
:
2389 return __get_elide(symbol_conf
.dso_from_list
, "dso_from", output
);
2391 return __get_elide(symbol_conf
.dso_to_list
, "dso_to", output
);
2399 void sort__setup_elide(FILE *output
)
2401 struct perf_hpp_fmt
*fmt
;
2402 struct hpp_sort_entry
*hse
;
2404 perf_hpp__for_each_format(fmt
) {
2405 if (!perf_hpp__is_sort_entry(fmt
))
2408 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
2409 fmt
->elide
= get_elide(hse
->se
->se_width_idx
, output
);
2413 * It makes no sense to elide all of sort entries.
2414 * Just revert them to show up again.
2416 perf_hpp__for_each_format(fmt
) {
2417 if (!perf_hpp__is_sort_entry(fmt
))
2424 perf_hpp__for_each_format(fmt
) {
2425 if (!perf_hpp__is_sort_entry(fmt
))
2432 static int output_field_add(char *tok
)
2436 for (i
= 0; i
< ARRAY_SIZE(common_sort_dimensions
); i
++) {
2437 struct sort_dimension
*sd
= &common_sort_dimensions
[i
];
2439 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2442 return __sort_dimension__add_output(sd
);
2445 for (i
= 0; i
< ARRAY_SIZE(hpp_sort_dimensions
); i
++) {
2446 struct hpp_dimension
*hd
= &hpp_sort_dimensions
[i
];
2448 if (strncasecmp(tok
, hd
->name
, strlen(tok
)))
2451 return __hpp_dimension__add_output(hd
);
2454 for (i
= 0; i
< ARRAY_SIZE(bstack_sort_dimensions
); i
++) {
2455 struct sort_dimension
*sd
= &bstack_sort_dimensions
[i
];
2457 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2460 return __sort_dimension__add_output(sd
);
2463 for (i
= 0; i
< ARRAY_SIZE(memory_sort_dimensions
); i
++) {
2464 struct sort_dimension
*sd
= &memory_sort_dimensions
[i
];
2466 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2469 return __sort_dimension__add_output(sd
);
2475 static void reset_dimensions(void)
2479 for (i
= 0; i
< ARRAY_SIZE(common_sort_dimensions
); i
++)
2480 common_sort_dimensions
[i
].taken
= 0;
2482 for (i
= 0; i
< ARRAY_SIZE(hpp_sort_dimensions
); i
++)
2483 hpp_sort_dimensions
[i
].taken
= 0;
2485 for (i
= 0; i
< ARRAY_SIZE(bstack_sort_dimensions
); i
++)
2486 bstack_sort_dimensions
[i
].taken
= 0;
2488 for (i
= 0; i
< ARRAY_SIZE(memory_sort_dimensions
); i
++)
2489 memory_sort_dimensions
[i
].taken
= 0;
2492 bool is_strict_order(const char *order
)
2494 return order
&& (*order
!= '+');
2497 static int __setup_output_field(void)
2499 char *tmp
, *tok
, *str
, *strp
;
2502 if (field_order
== NULL
)
2505 strp
= str
= strdup(field_order
);
2507 error("Not enough memory to setup output fields");
2511 if (!is_strict_order(field_order
))
2514 if (!strlen(strp
)) {
2515 error("Invalid --fields key: `+'");
2519 for (tok
= strtok_r(strp
, ", ", &tmp
);
2520 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
2521 ret
= output_field_add(tok
);
2522 if (ret
== -EINVAL
) {
2523 error("Invalid --fields key: `%s'", tok
);
2525 } else if (ret
== -ESRCH
) {
2526 error("Unknown --fields key: `%s'", tok
);
2536 int setup_sorting(struct perf_evlist
*evlist
)
2540 err
= __setup_sorting(evlist
);
2544 if (parent_pattern
!= default_parent_pattern
) {
2545 err
= sort_dimension__add("parent", evlist
);
2553 * perf diff doesn't use default hpp output fields.
2555 if (sort__mode
!= SORT_MODE__DIFF
)
2558 err
= __setup_output_field();
2562 /* copy sort keys to output fields */
2563 perf_hpp__setup_output_field();
2564 /* and then copy output fields to sort keys */
2565 perf_hpp__append_sort_keys();
2570 void reset_output_field(void)
2572 sort__need_collapse
= 0;
2573 sort__has_parent
= 0;
2581 perf_hpp__reset_output_field();