10 #include "parse-events.h"
11 #include "hists_common.h"
16 struct thread
*thread
;
21 /* For the numbers, see hists_common.c */
22 static struct sample fake_common_samples
[] = {
23 /* perf [kernel] schedule() */
24 { .pid
= FAKE_PID_PERF1
, .ip
= FAKE_IP_KERNEL_SCHEDULE
, },
25 /* perf [perf] main() */
26 { .pid
= FAKE_PID_PERF2
, .ip
= FAKE_IP_PERF_MAIN
, },
27 /* perf [perf] cmd_record() */
28 { .pid
= FAKE_PID_PERF2
, .ip
= FAKE_IP_PERF_CMD_RECORD
, },
29 /* bash [bash] xmalloc() */
30 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_BASH_XMALLOC
, },
31 /* bash [libc] malloc() */
32 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_LIBC_MALLOC
, },
35 static struct sample fake_samples
[][5] = {
37 /* perf [perf] run_command() */
38 { .pid
= FAKE_PID_PERF1
, .ip
= FAKE_IP_PERF_RUN_COMMAND
, },
39 /* perf [libc] malloc() */
40 { .pid
= FAKE_PID_PERF1
, .ip
= FAKE_IP_LIBC_MALLOC
, },
41 /* perf [kernel] page_fault() */
42 { .pid
= FAKE_PID_PERF1
, .ip
= FAKE_IP_KERNEL_PAGE_FAULT
, },
43 /* perf [kernel] sys_perf_event_open() */
44 { .pid
= FAKE_PID_PERF2
, .ip
= FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN
, },
45 /* bash [libc] free() */
46 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_LIBC_FREE
, },
49 /* perf [libc] free() */
50 { .pid
= FAKE_PID_PERF2
, .ip
= FAKE_IP_LIBC_FREE
, },
51 /* bash [libc] malloc() */
52 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_LIBC_MALLOC
, }, /* will be merged */
53 /* bash [bash] xfee() */
54 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_BASH_XFREE
, },
55 /* bash [libc] realloc() */
56 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_LIBC_REALLOC
, },
57 /* bash [kernel] page_fault() */
58 { .pid
= FAKE_PID_BASH
, .ip
= FAKE_IP_KERNEL_PAGE_FAULT
, },
62 static int add_hist_entries(struct perf_evlist
*evlist
, struct machine
*machine
)
64 struct perf_evsel
*evsel
;
65 struct addr_location al
;
66 struct hist_entry
*he
;
67 struct perf_sample sample
= { .period
= 1, };
71 * each evsel will have 10 samples - 5 common and 5 distinct.
72 * However the second evsel also has a collapsed entry for
73 * "bash [libc] malloc" so total 9 entries will be in the tree.
75 evlist__for_each(evlist
, evsel
) {
76 struct hists
*hists
= evsel__hists(evsel
);
78 for (k
= 0; k
< ARRAY_SIZE(fake_common_samples
); k
++) {
79 const union perf_event event
= {
81 .misc
= PERF_RECORD_MISC_USER
,
85 sample
.pid
= fake_common_samples
[k
].pid
;
86 sample
.tid
= fake_common_samples
[k
].pid
;
87 sample
.ip
= fake_common_samples
[k
].ip
;
88 if (perf_event__preprocess_sample(&event
, machine
, &al
,
92 he
= __hists__add_entry(hists
, &al
, NULL
,
93 NULL
, NULL
, 1, 1, 0, true);
97 fake_common_samples
[k
].thread
= al
.thread
;
98 fake_common_samples
[k
].map
= al
.map
;
99 fake_common_samples
[k
].sym
= al
.sym
;
102 for (k
= 0; k
< ARRAY_SIZE(fake_samples
[i
]); k
++) {
103 const union perf_event event
= {
105 .misc
= PERF_RECORD_MISC_USER
,
109 sample
.pid
= fake_samples
[i
][k
].pid
;
110 sample
.tid
= fake_samples
[i
][k
].pid
;
111 sample
.ip
= fake_samples
[i
][k
].ip
;
112 if (perf_event__preprocess_sample(&event
, machine
, &al
,
116 he
= __hists__add_entry(hists
, &al
, NULL
,
117 NULL
, NULL
, 1, 1, 0, true);
121 fake_samples
[i
][k
].thread
= al
.thread
;
122 fake_samples
[i
][k
].map
= al
.map
;
123 fake_samples
[i
][k
].sym
= al
.sym
;
131 pr_debug("Not enough memory for adding a hist entry\n");
135 static int find_sample(struct sample
*samples
, size_t nr_samples
,
136 struct thread
*t
, struct map
*m
, struct symbol
*s
)
138 while (nr_samples
--) {
139 if (samples
->thread
== t
&& samples
->map
== m
&&
147 static int __validate_match(struct hists
*hists
)
150 struct rb_root
*root
;
151 struct rb_node
*node
;
154 * Only entries from fake_common_samples should have a pair.
156 if (sort__need_collapse
)
157 root
= &hists
->entries_collapsed
;
159 root
= hists
->entries_in
;
161 node
= rb_first(root
);
163 struct hist_entry
*he
;
165 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
167 if (hist_entry__has_pairs(he
)) {
168 if (find_sample(fake_common_samples
,
169 ARRAY_SIZE(fake_common_samples
),
170 he
->thread
, he
->ms
.map
, he
->ms
.sym
)) {
173 pr_debug("Can't find the matched entry\n");
178 node
= rb_next(node
);
181 if (count
!= ARRAY_SIZE(fake_common_samples
)) {
182 pr_debug("Invalid count for matched entries: %zd of %zd\n",
183 count
, ARRAY_SIZE(fake_common_samples
));
190 static int validate_match(struct hists
*leader
, struct hists
*other
)
192 return __validate_match(leader
) || __validate_match(other
);
195 static int __validate_link(struct hists
*hists
, int idx
)
198 size_t count_pair
= 0;
199 size_t count_dummy
= 0;
200 struct rb_root
*root
;
201 struct rb_node
*node
;
204 * Leader hists (idx = 0) will have dummy entries from other,
205 * and some entries will have no pair. However every entry
206 * in other hists should have (dummy) pair.
208 if (sort__need_collapse
)
209 root
= &hists
->entries_collapsed
;
211 root
= hists
->entries_in
;
213 node
= rb_first(root
);
215 struct hist_entry
*he
;
217 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
219 if (hist_entry__has_pairs(he
)) {
220 if (!find_sample(fake_common_samples
,
221 ARRAY_SIZE(fake_common_samples
),
222 he
->thread
, he
->ms
.map
, he
->ms
.sym
) &&
223 !find_sample(fake_samples
[idx
],
224 ARRAY_SIZE(fake_samples
[idx
]),
225 he
->thread
, he
->ms
.map
, he
->ms
.sym
)) {
230 pr_debug("A entry from the other hists should have pair\n");
235 node
= rb_next(node
);
239 * Note that we have a entry collapsed in the other (idx = 1) hists.
242 if (count_dummy
!= ARRAY_SIZE(fake_samples
[1]) - 1) {
243 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
244 count_dummy
, ARRAY_SIZE(fake_samples
[1]) - 1);
247 if (count
!= count_pair
+ ARRAY_SIZE(fake_samples
[0])) {
248 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
249 count
, count_pair
+ ARRAY_SIZE(fake_samples
[0]));
253 if (count
!= count_pair
) {
254 pr_debug("Invalid count of total other entries: %zd of %zd\n",
258 if (count_dummy
> 0) {
259 pr_debug("Other hists should not have dummy entries: %zd\n",
268 static int validate_link(struct hists
*leader
, struct hists
*other
)
270 return __validate_link(leader
, 0) || __validate_link(other
, 1);
273 int test__hists_link(void)
276 struct hists
*hists
, *first_hists
;
277 struct machines machines
;
278 struct machine
*machine
= NULL
;
279 struct perf_evsel
*evsel
, *first
;
280 struct perf_evlist
*evlist
= perf_evlist__new();
285 err
= parse_events(evlist
, "cpu-clock");
288 err
= parse_events(evlist
, "task-clock");
292 /* default sort order (comm,dso,sym) will be used */
293 if (setup_sorting() < 0)
296 machines__init(&machines
);
298 /* setup threads/dso/map/symbols also */
299 machine
= setup_fake_machine(&machines
);
304 machine__fprintf(machine
, stderr
);
306 /* process sample events */
307 err
= add_hist_entries(evlist
, machine
);
311 evlist__for_each(evlist
, evsel
) {
312 hists
= evsel__hists(evsel
);
313 hists__collapse_resort(hists
, NULL
);
316 print_hists_in(hists
);
319 first
= perf_evlist__first(evlist
);
320 evsel
= perf_evlist__last(evlist
);
322 first_hists
= evsel__hists(first
);
323 hists
= evsel__hists(evsel
);
325 /* match common entries */
326 hists__match(first_hists
, hists
);
327 err
= validate_match(first_hists
, hists
);
331 /* link common and/or dummy entries */
332 hists__link(first_hists
, hists
);
333 err
= validate_link(first_hists
, hists
);
340 /* tear down everything */
341 perf_evlist__delete(evlist
);
342 reset_output_field();
343 machines__exit(&machines
);