10 static bool hists__filter_entry_by_dso(struct hists
*hists
,
11 struct hist_entry
*he
);
12 static bool hists__filter_entry_by_thread(struct hists
*hists
,
13 struct hist_entry
*he
);
14 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
15 struct hist_entry
*he
);
24 struct callchain_param callchain_param
= {
25 .mode
= CHAIN_GRAPH_REL
,
27 .order
= ORDER_CALLEE
,
31 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
33 return hists
->col_len
[col
];
36 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
38 hists
->col_len
[col
] = len
;
41 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
43 if (len
> hists__col_len(hists
, col
)) {
44 hists__set_col_len(hists
, col
, len
);
50 void hists__reset_col_len(struct hists
*hists
)
54 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
55 hists__set_col_len(hists
, col
, 0);
58 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
60 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
62 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
63 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
64 !symbol_conf
.dso_list
)
65 hists__set_col_len(hists
, dso
, unresolved_col_width
);
68 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
70 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
75 * +4 accounts for '[x] ' priv level info
76 * +2 accounts for 0x prefix on raw addresses
77 * +3 accounts for ' y ' symtab origin info
80 symlen
= h
->ms
.sym
->namelen
+ 4;
82 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
83 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
85 symlen
= unresolved_col_width
+ 4 + 2;
86 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
87 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
90 len
= thread__comm_len(h
->thread
);
91 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
92 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
95 len
= dso__name_len(h
->ms
.map
->dso
);
96 hists__new_col_len(hists
, HISTC_DSO
, len
);
100 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
102 if (h
->branch_info
) {
103 if (h
->branch_info
->from
.sym
) {
104 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
106 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
107 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
109 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
110 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
112 symlen
= unresolved_col_width
+ 4 + 2;
113 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
114 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
117 if (h
->branch_info
->to
.sym
) {
118 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
120 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
121 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
123 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
124 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
126 symlen
= unresolved_col_width
+ 4 + 2;
127 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
128 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
133 if (h
->mem_info
->daddr
.sym
) {
134 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
135 + unresolved_col_width
+ 2;
136 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
139 symlen
= unresolved_col_width
+ 4 + 2;
140 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
143 if (h
->mem_info
->daddr
.map
) {
144 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
145 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
148 symlen
= unresolved_col_width
+ 4 + 2;
149 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
152 symlen
= unresolved_col_width
+ 4 + 2;
153 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
154 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
157 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
158 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
159 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
160 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
161 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
162 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
165 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
167 struct rb_node
*next
= rb_first(&hists
->entries
);
168 struct hist_entry
*n
;
171 hists__reset_col_len(hists
);
173 while (next
&& row
++ < max_rows
) {
174 n
= rb_entry(next
, struct hist_entry
, rb_node
);
176 hists__calc_col_len(hists
, n
);
177 next
= rb_next(&n
->rb_node
);
181 static void hist_entry__add_cpumode_period(struct hist_entry
*he
,
182 unsigned int cpumode
, u64 period
)
185 case PERF_RECORD_MISC_KERNEL
:
186 he
->stat
.period_sys
+= period
;
188 case PERF_RECORD_MISC_USER
:
189 he
->stat
.period_us
+= period
;
191 case PERF_RECORD_MISC_GUEST_KERNEL
:
192 he
->stat
.period_guest_sys
+= period
;
194 case PERF_RECORD_MISC_GUEST_USER
:
195 he
->stat
.period_guest_us
+= period
;
202 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
206 he_stat
->period
+= period
;
207 he_stat
->weight
+= weight
;
208 he_stat
->nr_events
+= 1;
211 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
213 dest
->period
+= src
->period
;
214 dest
->period_sys
+= src
->period_sys
;
215 dest
->period_us
+= src
->period_us
;
216 dest
->period_guest_sys
+= src
->period_guest_sys
;
217 dest
->period_guest_us
+= src
->period_guest_us
;
218 dest
->nr_events
+= src
->nr_events
;
219 dest
->weight
+= src
->weight
;
222 static void hist_entry__decay(struct hist_entry
*he
)
224 he
->stat
.period
= (he
->stat
.period
* 7) / 8;
225 he
->stat
.nr_events
= (he
->stat
.nr_events
* 7) / 8;
226 /* XXX need decay for weight too? */
229 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
231 u64 prev_period
= he
->stat
.period
;
233 if (prev_period
== 0)
236 hist_entry__decay(he
);
239 hists
->stats
.total_period
-= prev_period
- he
->stat
.period
;
241 return he
->stat
.period
== 0;
244 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
246 struct rb_node
*next
= rb_first(&hists
->entries
);
247 struct hist_entry
*n
;
250 n
= rb_entry(next
, struct hist_entry
, rb_node
);
251 next
= rb_next(&n
->rb_node
);
253 * We may be annotating this, for instance, so keep it here in
254 * case some it gets new samples, we'll eventually free it when
255 * the user stops browsing and it agains gets fully decayed.
257 if (((zap_user
&& n
->level
== '.') ||
258 (zap_kernel
&& n
->level
!= '.') ||
259 hists__decay_entry(hists
, n
)) &&
261 rb_erase(&n
->rb_node
, &hists
->entries
);
263 if (sort__need_collapse
)
264 rb_erase(&n
->rb_node_in
, &hists
->entries_collapsed
);
273 * histogram, sorted on item, collects periods
276 static struct hist_entry
*hist_entry__new(struct hist_entry
*template)
278 size_t callchain_size
= symbol_conf
.use_callchain
? sizeof(struct callchain_root
) : 0;
279 struct hist_entry
*he
= zalloc(sizeof(*he
) + callchain_size
);
285 he
->ms
.map
->referenced
= true;
287 if (he
->branch_info
) {
289 * This branch info is (a part of) allocated from
290 * machine__resolve_bstack() and will be freed after
291 * adding new entries. So we need to save a copy.
293 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
294 if (he
->branch_info
== NULL
) {
299 memcpy(he
->branch_info
, template->branch_info
,
300 sizeof(*he
->branch_info
));
302 if (he
->branch_info
->from
.map
)
303 he
->branch_info
->from
.map
->referenced
= true;
304 if (he
->branch_info
->to
.map
)
305 he
->branch_info
->to
.map
->referenced
= true;
309 if (he
->mem_info
->iaddr
.map
)
310 he
->mem_info
->iaddr
.map
->referenced
= true;
311 if (he
->mem_info
->daddr
.map
)
312 he
->mem_info
->daddr
.map
->referenced
= true;
315 if (symbol_conf
.use_callchain
)
316 callchain_init(he
->callchain
);
318 INIT_LIST_HEAD(&he
->pairs
.node
);
324 void hists__inc_nr_entries(struct hists
*hists
, struct hist_entry
*h
)
327 hists__calc_col_len(hists
, h
);
329 hists
->stats
.total_period
+= h
->stat
.period
;
333 static u8
symbol__parent_filter(const struct symbol
*parent
)
335 if (symbol_conf
.exclude_other
&& parent
== NULL
)
336 return 1 << HIST_FILTER__PARENT
;
340 static struct hist_entry
*add_hist_entry(struct hists
*hists
,
341 struct hist_entry
*entry
,
342 struct addr_location
*al
,
347 struct rb_node
*parent
= NULL
;
348 struct hist_entry
*he
;
351 p
= &hists
->entries_in
->rb_node
;
355 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
358 * Make sure that it receives arguments in a same order as
359 * hist_entry__collapse() so that we can use an appropriate
360 * function when searching an entry regardless which sort
363 cmp
= hist_entry__cmp(he
, entry
);
366 he_stat__add_period(&he
->stat
, period
, weight
);
369 * This mem info was allocated from machine__resolve_mem
370 * and will not be used anymore.
372 free(entry
->mem_info
);
374 /* If the map of an existing hist_entry has
375 * become out-of-date due to an exec() or
376 * similar, update it. Otherwise we will
377 * mis-adjust symbol addresses when computing
378 * the history counter to increment.
380 if (he
->ms
.map
!= entry
->ms
.map
) {
381 he
->ms
.map
= entry
->ms
.map
;
383 he
->ms
.map
->referenced
= true;
394 he
= hist_entry__new(entry
);
398 rb_link_node(&he
->rb_node_in
, parent
, p
);
399 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
401 hist_entry__add_cpumode_period(he
, al
->cpumode
, period
);
405 struct hist_entry
*__hists__add_mem_entry(struct hists
*self
,
406 struct addr_location
*al
,
407 struct symbol
*sym_parent
,
412 struct hist_entry entry
= {
413 .thread
= al
->thread
,
426 .parent
= sym_parent
,
427 .filtered
= symbol__parent_filter(sym_parent
),
432 return add_hist_entry(self
, &entry
, al
, period
, weight
);
435 struct hist_entry
*__hists__add_branch_entry(struct hists
*self
,
436 struct addr_location
*al
,
437 struct symbol
*sym_parent
,
438 struct branch_info
*bi
,
442 struct hist_entry entry
= {
443 .thread
= al
->thread
,
456 .parent
= sym_parent
,
457 .filtered
= symbol__parent_filter(sym_parent
),
463 return add_hist_entry(self
, &entry
, al
, period
, weight
);
466 struct hist_entry
*__hists__add_entry(struct hists
*self
,
467 struct addr_location
*al
,
468 struct symbol
*sym_parent
, u64 period
,
471 struct hist_entry entry
= {
472 .thread
= al
->thread
,
485 .parent
= sym_parent
,
486 .filtered
= symbol__parent_filter(sym_parent
),
492 return add_hist_entry(self
, &entry
, al
, period
, weight
);
496 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
498 struct sort_entry
*se
;
501 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
502 cmp
= se
->se_cmp(left
, right
);
511 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
513 struct sort_entry
*se
;
516 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
517 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
519 f
= se
->se_collapse
?: se
->se_cmp
;
521 cmp
= f(left
, right
);
529 void hist_entry__free(struct hist_entry
*he
)
531 free(he
->branch_info
);
537 * collapse the histogram
540 static bool hists__collapse_insert_entry(struct hists
*hists __maybe_unused
,
541 struct rb_root
*root
,
542 struct hist_entry
*he
)
544 struct rb_node
**p
= &root
->rb_node
;
545 struct rb_node
*parent
= NULL
;
546 struct hist_entry
*iter
;
551 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
553 cmp
= hist_entry__collapse(iter
, he
);
556 he_stat__add_stat(&iter
->stat
, &he
->stat
);
558 if (symbol_conf
.use_callchain
) {
559 callchain_cursor_reset(&callchain_cursor
);
560 callchain_merge(&callchain_cursor
,
564 hist_entry__free(he
);
574 rb_link_node(&he
->rb_node_in
, parent
, p
);
575 rb_insert_color(&he
->rb_node_in
, root
);
579 static struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
581 struct rb_root
*root
;
583 pthread_mutex_lock(&hists
->lock
);
585 root
= hists
->entries_in
;
586 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
587 hists
->entries_in
= &hists
->entries_in_array
[0];
589 pthread_mutex_unlock(&hists
->lock
);
594 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
596 hists__filter_entry_by_dso(hists
, he
);
597 hists__filter_entry_by_thread(hists
, he
);
598 hists__filter_entry_by_symbol(hists
, he
);
601 void hists__collapse_resort(struct hists
*hists
)
603 struct rb_root
*root
;
604 struct rb_node
*next
;
605 struct hist_entry
*n
;
607 if (!sort__need_collapse
)
610 root
= hists__get_rotate_entries_in(hists
);
611 next
= rb_first(root
);
616 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
617 next
= rb_next(&n
->rb_node_in
);
619 rb_erase(&n
->rb_node_in
, root
);
620 if (hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
)) {
622 * If it wasn't combined with one of the entries already
623 * collapsed, we need to apply the filters that may have
624 * been set by, say, the hist_browser.
626 hists__apply_filters(hists
, n
);
632 * reverse the map, sort on period.
635 static int period_cmp(u64 period_a
, u64 period_b
)
637 if (period_a
> period_b
)
639 if (period_a
< period_b
)
644 static int hist_entry__sort_on_period(struct hist_entry
*a
,
645 struct hist_entry
*b
)
649 struct perf_evsel
*evsel
;
650 struct hist_entry
*pair
;
651 u64
*periods_a
, *periods_b
;
653 ret
= period_cmp(a
->stat
.period
, b
->stat
.period
);
654 if (ret
|| !symbol_conf
.event_group
)
657 evsel
= hists_to_evsel(a
->hists
);
658 nr_members
= evsel
->nr_members
;
662 periods_a
= zalloc(sizeof(periods_a
) * nr_members
);
663 periods_b
= zalloc(sizeof(periods_b
) * nr_members
);
665 if (!periods_a
|| !periods_b
)
668 list_for_each_entry(pair
, &a
->pairs
.head
, pairs
.node
) {
669 evsel
= hists_to_evsel(pair
->hists
);
670 periods_a
[perf_evsel__group_idx(evsel
)] = pair
->stat
.period
;
673 list_for_each_entry(pair
, &b
->pairs
.head
, pairs
.node
) {
674 evsel
= hists_to_evsel(pair
->hists
);
675 periods_b
[perf_evsel__group_idx(evsel
)] = pair
->stat
.period
;
678 for (i
= 1; i
< nr_members
; i
++) {
679 ret
= period_cmp(periods_a
[i
], periods_b
[i
]);
691 static void __hists__insert_output_entry(struct rb_root
*entries
,
692 struct hist_entry
*he
,
693 u64 min_callchain_hits
)
695 struct rb_node
**p
= &entries
->rb_node
;
696 struct rb_node
*parent
= NULL
;
697 struct hist_entry
*iter
;
699 if (symbol_conf
.use_callchain
)
700 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
701 min_callchain_hits
, &callchain_param
);
705 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
707 if (hist_entry__sort_on_period(he
, iter
) > 0)
713 rb_link_node(&he
->rb_node
, parent
, p
);
714 rb_insert_color(&he
->rb_node
, entries
);
717 void hists__output_resort(struct hists
*hists
)
719 struct rb_root
*root
;
720 struct rb_node
*next
;
721 struct hist_entry
*n
;
722 u64 min_callchain_hits
;
724 min_callchain_hits
= hists
->stats
.total_period
* (callchain_param
.min_percent
/ 100);
726 if (sort__need_collapse
)
727 root
= &hists
->entries_collapsed
;
729 root
= hists
->entries_in
;
731 next
= rb_first(root
);
732 hists
->entries
= RB_ROOT
;
734 hists
->nr_entries
= 0;
735 hists
->stats
.total_period
= 0;
736 hists__reset_col_len(hists
);
739 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
740 next
= rb_next(&n
->rb_node_in
);
742 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
);
743 hists__inc_nr_entries(hists
, n
);
747 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
748 enum hist_filter filter
)
750 h
->filtered
&= ~(1 << filter
);
756 hists
->nr_entries
+= h
->nr_rows
;
758 hists
->stats
.total_period
+= h
->stat
.period
;
759 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] += h
->stat
.nr_events
;
761 hists__calc_col_len(hists
, h
);
765 static bool hists__filter_entry_by_dso(struct hists
*hists
,
766 struct hist_entry
*he
)
768 if (hists
->dso_filter
!= NULL
&&
769 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
770 he
->filtered
|= (1 << HIST_FILTER__DSO
);
777 void hists__filter_by_dso(struct hists
*hists
)
781 hists
->nr_entries
= hists
->stats
.total_period
= 0;
782 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
783 hists__reset_col_len(hists
);
785 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
786 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
788 if (symbol_conf
.exclude_other
&& !h
->parent
)
791 if (hists__filter_entry_by_dso(hists
, h
))
794 hists__remove_entry_filter(hists
, h
, HIST_FILTER__DSO
);
798 static bool hists__filter_entry_by_thread(struct hists
*hists
,
799 struct hist_entry
*he
)
801 if (hists
->thread_filter
!= NULL
&&
802 he
->thread
!= hists
->thread_filter
) {
803 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
810 void hists__filter_by_thread(struct hists
*hists
)
814 hists
->nr_entries
= hists
->stats
.total_period
= 0;
815 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
816 hists__reset_col_len(hists
);
818 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
819 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
821 if (hists__filter_entry_by_thread(hists
, h
))
824 hists__remove_entry_filter(hists
, h
, HIST_FILTER__THREAD
);
828 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
829 struct hist_entry
*he
)
831 if (hists
->symbol_filter_str
!= NULL
&&
832 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
833 hists
->symbol_filter_str
) == NULL
)) {
834 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
841 void hists__filter_by_symbol(struct hists
*hists
)
845 hists
->nr_entries
= hists
->stats
.total_period
= 0;
846 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
847 hists__reset_col_len(hists
);
849 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
850 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
852 if (hists__filter_entry_by_symbol(hists
, h
))
855 hists__remove_entry_filter(hists
, h
, HIST_FILTER__SYMBOL
);
859 int hist_entry__inc_addr_samples(struct hist_entry
*he
, int evidx
, u64 ip
)
861 return symbol__inc_addr_samples(he
->ms
.sym
, he
->ms
.map
, evidx
, ip
);
864 int hist_entry__annotate(struct hist_entry
*he
, size_t privsize
)
866 return symbol__annotate(he
->ms
.sym
, he
->ms
.map
, privsize
);
869 void events_stats__inc(struct events_stats
*stats
, u32 type
)
871 ++stats
->nr_events
[0];
872 ++stats
->nr_events
[type
];
875 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
877 events_stats__inc(&hists
->stats
, type
);
880 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
881 struct hist_entry
*pair
)
883 struct rb_root
*root
;
885 struct rb_node
*parent
= NULL
;
886 struct hist_entry
*he
;
889 if (sort__need_collapse
)
890 root
= &hists
->entries_collapsed
;
892 root
= hists
->entries_in
;
898 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
900 cmp
= hist_entry__collapse(he
, pair
);
911 he
= hist_entry__new(pair
);
913 memset(&he
->stat
, 0, sizeof(he
->stat
));
915 rb_link_node(&he
->rb_node_in
, parent
, p
);
916 rb_insert_color(&he
->rb_node_in
, root
);
917 hists__inc_nr_entries(hists
, he
);
924 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
925 struct hist_entry
*he
)
929 if (sort__need_collapse
)
930 n
= hists
->entries_collapsed
.rb_node
;
932 n
= hists
->entries_in
->rb_node
;
935 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
936 int64_t cmp
= hist_entry__collapse(iter
, he
);
950 * Look for pairs to link to the leader buckets (hist_entries):
952 void hists__match(struct hists
*leader
, struct hists
*other
)
954 struct rb_root
*root
;
956 struct hist_entry
*pos
, *pair
;
958 if (sort__need_collapse
)
959 root
= &leader
->entries_collapsed
;
961 root
= leader
->entries_in
;
963 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
964 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
965 pair
= hists__find_entry(other
, pos
);
968 hist_entry__add_pair(pair
, pos
);
973 * Look for entries in the other hists that are not present in the leader, if
974 * we find them, just add a dummy entry on the leader hists, with period=0,
975 * nr_events=0, to serve as the list header.
977 int hists__link(struct hists
*leader
, struct hists
*other
)
979 struct rb_root
*root
;
981 struct hist_entry
*pos
, *pair
;
983 if (sort__need_collapse
)
984 root
= &other
->entries_collapsed
;
986 root
= other
->entries_in
;
988 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
989 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
991 if (!hist_entry__has_pairs(pos
)) {
992 pair
= hists__add_dummy_entry(leader
, pos
);
995 hist_entry__add_pair(pos
, pair
);