10 #include "parse-events.h"
11 #include "hists_common.h"
16 struct thread
*thread
;
21 /* For the numbers, see hists_common.c */
22 static struct sample fake_common_samples
[] = {
23 /* perf [kernel] schedule() */
24 { .pid
= FAKE_PID_PERF1
, .ip
= FAKE_IP_KERNEL_SCHEDULE
, },
25 /* perf [perf] main() */
26 { .pid
= FAKE_PID_PERF2
, .ip
= FAKE_IP_PERF_MAIN
, },
27 /* perf [perf] cmd_record() */
28 { .pid
= FAKE_PID_PERF2
, .ip
= FAKE_IP_PERF_CMD_RECORD
, },
29 /* bash [bash] xmalloc() */
30 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_BASH_XMALLOC
, },
31 /* bash [libc] malloc() */
32 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_LIBC_MALLOC
, },
35 static struct sample fake_samples
[][5] = {
37 /* perf [perf] run_command() */
38 { .pid
= FAKE_PID_PERF1
, .ip
= FAKE_IP_PERF_RUN_COMMAND
, },
39 /* perf [libc] malloc() */
40 { .pid
= FAKE_PID_PERF1
, .ip
= FAKE_IP_LIBC_MALLOC
, },
41 /* perf [kernel] page_fault() */
42 { .pid
= FAKE_PID_PERF1
, .ip
= FAKE_IP_KERNEL_PAGE_FAULT
, },
43 /* perf [kernel] sys_perf_event_open() */
44 { .pid
= FAKE_PID_PERF2
, .ip
= FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN
, },
45 /* bash [libc] free() */
46 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_LIBC_FREE
, },
49 /* perf [libc] free() */
50 { .pid
= FAKE_PID_PERF2
, .ip
= FAKE_IP_LIBC_FREE
, },
51 /* bash [libc] malloc() */
52 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_LIBC_MALLOC
, }, /* will be merged */
53 /* bash [bash] xfee() */
54 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_BASH_XFREE
, },
55 /* bash [libc] realloc() */
56 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_LIBC_REALLOC
, },
57 /* bash [kernel] page_fault() */
58 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_KERNEL_PAGE_FAULT
, },
62 static int add_hist_entries(struct perf_evlist
*evlist
, struct machine
*machine
)
64 struct perf_evsel
*evsel
;
65 struct addr_location al
;
66 struct hist_entry
*he
;
67 struct perf_sample sample
= { .period
= 1, };
71 * each evsel will have 10 samples - 5 common and 5 distinct.
72 * However the second evsel also has a collapsed entry for
73 * "bash [libc] malloc" so total 9 entries will be in the tree.
75 evlist__for_each(evlist
, evsel
) {
76 struct hists
*hists
= evsel__hists(evsel
);
78 for (k
= 0; k
< ARRAY_SIZE(fake_common_samples
); k
++) {
79 const union perf_event event
= {
81 .misc
= PERF_RECORD_MISC_USER
,
85 sample
.pid
= fake_common_samples
[k
].pid
;
86 sample
.tid
= fake_common_samples
[k
].pid
;
87 sample
.ip
= fake_common_samples
[k
].ip
;
88 if (perf_event__preprocess_sample(&event
, machine
, &al
,
92 he
= __hists__add_entry(hists
, &al
, NULL
,
93 NULL
, NULL
, 1, 1, 0, true);
95 addr_location__put(&al
);
99 fake_common_samples
[k
].thread
= al
.thread
;
100 fake_common_samples
[k
].map
= al
.map
;
101 fake_common_samples
[k
].sym
= al
.sym
;
104 for (k
= 0; k
< ARRAY_SIZE(fake_samples
[i
]); k
++) {
105 const union perf_event event
= {
107 .misc
= PERF_RECORD_MISC_USER
,
111 sample
.pid
= fake_samples
[i
][k
].pid
;
112 sample
.tid
= fake_samples
[i
][k
].pid
;
113 sample
.ip
= fake_samples
[i
][k
].ip
;
114 if (perf_event__preprocess_sample(&event
, machine
, &al
,
118 he
= __hists__add_entry(hists
, &al
, NULL
,
119 NULL
, NULL
, 1, 1, 0, true);
121 addr_location__put(&al
);
125 fake_samples
[i
][k
].thread
= al
.thread
;
126 fake_samples
[i
][k
].map
= al
.map
;
127 fake_samples
[i
][k
].sym
= al
.sym
;
135 pr_debug("Not enough memory for adding a hist entry\n");
139 static int find_sample(struct sample
*samples
, size_t nr_samples
,
140 struct thread
*t
, struct map
*m
, struct symbol
*s
)
142 while (nr_samples
--) {
143 if (samples
->thread
== t
&& samples
->map
== m
&&
151 static int __validate_match(struct hists
*hists
)
154 struct rb_root
*root
;
155 struct rb_node
*node
;
158 * Only entries from fake_common_samples should have a pair.
160 if (sort__need_collapse
)
161 root
= &hists
->entries_collapsed
;
163 root
= hists
->entries_in
;
165 node
= rb_first(root
);
167 struct hist_entry
*he
;
169 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
171 if (hist_entry__has_pairs(he
)) {
172 if (find_sample(fake_common_samples
,
173 ARRAY_SIZE(fake_common_samples
),
174 he
->thread
, he
->ms
.map
, he
->ms
.sym
)) {
177 pr_debug("Can't find the matched entry\n");
182 node
= rb_next(node
);
185 if (count
!= ARRAY_SIZE(fake_common_samples
)) {
186 pr_debug("Invalid count for matched entries: %zd of %zd\n",
187 count
, ARRAY_SIZE(fake_common_samples
));
194 static int validate_match(struct hists
*leader
, struct hists
*other
)
196 return __validate_match(leader
) || __validate_match(other
);
199 static int __validate_link(struct hists
*hists
, int idx
)
202 size_t count_pair
= 0;
203 size_t count_dummy
= 0;
204 struct rb_root
*root
;
205 struct rb_node
*node
;
208 * Leader hists (idx = 0) will have dummy entries from other,
209 * and some entries will have no pair. However every entry
210 * in other hists should have (dummy) pair.
212 if (sort__need_collapse
)
213 root
= &hists
->entries_collapsed
;
215 root
= hists
->entries_in
;
217 node
= rb_first(root
);
219 struct hist_entry
*he
;
221 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
223 if (hist_entry__has_pairs(he
)) {
224 if (!find_sample(fake_common_samples
,
225 ARRAY_SIZE(fake_common_samples
),
226 he
->thread
, he
->ms
.map
, he
->ms
.sym
) &&
227 !find_sample(fake_samples
[idx
],
228 ARRAY_SIZE(fake_samples
[idx
]),
229 he
->thread
, he
->ms
.map
, he
->ms
.sym
)) {
234 pr_debug("A entry from the other hists should have pair\n");
239 node
= rb_next(node
);
243 * Note that we have a entry collapsed in the other (idx = 1) hists.
246 if (count_dummy
!= ARRAY_SIZE(fake_samples
[1]) - 1) {
247 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
248 count_dummy
, ARRAY_SIZE(fake_samples
[1]) - 1);
251 if (count
!= count_pair
+ ARRAY_SIZE(fake_samples
[0])) {
252 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
253 count
, count_pair
+ ARRAY_SIZE(fake_samples
[0]));
257 if (count
!= count_pair
) {
258 pr_debug("Invalid count of total other entries: %zd of %zd\n",
262 if (count_dummy
> 0) {
263 pr_debug("Other hists should not have dummy entries: %zd\n",
272 static int validate_link(struct hists
*leader
, struct hists
*other
)
274 return __validate_link(leader
, 0) || __validate_link(other
, 1);
277 int test__hists_link(void)
280 struct hists
*hists
, *first_hists
;
281 struct machines machines
;
282 struct machine
*machine
= NULL
;
283 struct perf_evsel
*evsel
, *first
;
284 struct perf_evlist
*evlist
= perf_evlist__new();
289 err
= parse_events(evlist
, "cpu-clock", NULL
);
292 err
= parse_events(evlist
, "task-clock", NULL
);
296 /* default sort order (comm,dso,sym) will be used */
297 if (setup_sorting() < 0)
300 machines__init(&machines
);
302 /* setup threads/dso/map/symbols also */
303 machine
= setup_fake_machine(&machines
);
308 machine__fprintf(machine
, stderr
);
310 /* process sample events */
311 err
= add_hist_entries(evlist
, machine
);
315 evlist__for_each(evlist
, evsel
) {
316 hists
= evsel__hists(evsel
);
317 hists__collapse_resort(hists
, NULL
);
320 print_hists_in(hists
);
323 first
= perf_evlist__first(evlist
);
324 evsel
= perf_evlist__last(evlist
);
326 first_hists
= evsel__hists(first
);
327 hists
= evsel__hists(evsel
);
329 /* match common entries */
330 hists__match(first_hists
, hists
);
331 err
= validate_match(first_hists
, hists
);
335 /* link common and/or dummy entries */
336 hists__link(first_hists
, hists
);
337 err
= validate_link(first_hists
, hists
);
344 /* tear down everything */
345 perf_evlist__delete(evlist
);
346 reset_output_field();
347 machines__exit(&machines
);