10 #include "parse-events.h"
25 } fake_mmap_info
[] = {
26 { 100, 0x40000, "perf" },
27 { 100, 0x50000, "libc" },
28 { 100, 0xf0000, "[kernel]" },
29 { 200, 0x40000, "perf" },
30 { 200, 0x50000, "libc" },
31 { 200, 0xf0000, "[kernel]" },
32 { 300, 0x40000, "bash" },
33 { 300, 0x50000, "libc" },
34 { 300, 0xf0000, "[kernel]" },
43 static struct fake_sym perf_syms
[] = {
45 { 800, 100, "run_command" },
46 { 900, 100, "cmd_record" },
49 static struct fake_sym bash_syms
[] = {
51 { 800, 100, "xmalloc" },
52 { 900, 100, "xfree" },
55 static struct fake_sym libc_syms
[] = {
56 { 700, 100, "malloc" },
58 { 900, 100, "realloc" },
61 static struct fake_sym kernel_syms
[] = {
62 { 700, 100, "schedule" },
63 { 800, 100, "page_fault" },
64 { 900, 100, "sys_perf_event_open" },
69 struct fake_sym
*syms
;
72 { "perf", perf_syms
, ARRAY_SIZE(perf_syms
) },
73 { "bash", bash_syms
, ARRAY_SIZE(bash_syms
) },
74 { "libc", libc_syms
, ARRAY_SIZE(libc_syms
) },
75 { "[kernel]", kernel_syms
, ARRAY_SIZE(kernel_syms
) },
78 static struct machine
*setup_fake_machine(struct machines
*machines
)
80 struct machine
*machine
= machines__find(machines
, HOST_KERNEL_ID
);
83 if (machine
== NULL
) {
84 pr_debug("Not enough memory for machine setup\n");
88 for (i
= 0; i
< ARRAY_SIZE(fake_threads
); i
++) {
89 struct thread
*thread
;
91 thread
= machine__findnew_thread(machine
, fake_threads
[i
].pid
,
96 thread__set_comm(thread
, fake_threads
[i
].comm
, 0);
99 for (i
= 0; i
< ARRAY_SIZE(fake_mmap_info
); i
++) {
100 union perf_event fake_mmap_event
= {
102 .header
= { .misc
= PERF_RECORD_MISC_USER
, },
103 .pid
= fake_mmap_info
[i
].pid
,
104 .start
= fake_mmap_info
[i
].start
,
110 strcpy(fake_mmap_event
.mmap
.filename
,
111 fake_mmap_info
[i
].filename
);
113 machine__process_mmap_event(machine
, &fake_mmap_event
, NULL
);
116 for (i
= 0; i
< ARRAY_SIZE(fake_symbols
); i
++) {
120 dso
= __dsos__findnew(&machine
->user_dsos
,
121 fake_symbols
[i
].dso_name
);
125 /* emulate dso__load() */
126 dso__set_loaded(dso
, MAP__FUNCTION
);
128 for (k
= 0; k
< fake_symbols
[i
].nr_syms
; k
++) {
130 struct fake_sym
*fsym
= &fake_symbols
[i
].syms
[k
];
132 sym
= symbol__new(fsym
->start
, fsym
->length
,
133 STB_GLOBAL
, fsym
->name
);
137 symbols__insert(&dso
->symbols
[MAP__FUNCTION
], sym
);
144 pr_debug("Not enough memory for machine setup\n");
145 machine__delete_threads(machine
);
146 machine__delete(machine
);
153 struct thread
*thread
;
158 static struct sample fake_common_samples
[] = {
159 /* perf [kernel] schedule() */
160 { .pid
= 100, .ip
= 0xf0000 + 700, },
161 /* perf [perf] main() */
162 { .pid
= 200, .ip
= 0x40000 + 700, },
163 /* perf [perf] cmd_record() */
164 { .pid
= 200, .ip
= 0x40000 + 900, },
165 /* bash [bash] xmalloc() */
166 { .pid
= 300, .ip
= 0x40000 + 800, },
167 /* bash [libc] malloc() */
168 { .pid
= 300, .ip
= 0x50000 + 700, },
171 static struct sample fake_samples
[][5] = {
173 /* perf [perf] run_command() */
174 { .pid
= 100, .ip
= 0x40000 + 800, },
175 /* perf [libc] malloc() */
176 { .pid
= 100, .ip
= 0x50000 + 700, },
177 /* perf [kernel] page_fault() */
178 { .pid
= 100, .ip
= 0xf0000 + 800, },
179 /* perf [kernel] sys_perf_event_open() */
180 { .pid
= 200, .ip
= 0xf0000 + 900, },
181 /* bash [libc] free() */
182 { .pid
= 300, .ip
= 0x50000 + 800, },
185 /* perf [libc] free() */
186 { .pid
= 200, .ip
= 0x50000 + 800, },
187 /* bash [libc] malloc() */
188 { .pid
= 300, .ip
= 0x50000 + 700, }, /* will be merged */
189 /* bash [bash] xfee() */
190 { .pid
= 300, .ip
= 0x40000 + 900, },
191 /* bash [libc] realloc() */
192 { .pid
= 300, .ip
= 0x50000 + 900, },
193 /* bash [kernel] page_fault() */
194 { .pid
= 300, .ip
= 0xf0000 + 800, },
198 static int add_hist_entries(struct perf_evlist
*evlist
, struct machine
*machine
)
200 struct perf_evsel
*evsel
;
201 struct addr_location al
;
202 struct hist_entry
*he
;
203 struct perf_sample sample
= { .cpu
= 0, };
207 * each evsel will have 10 samples - 5 common and 5 distinct.
208 * However the second evsel also has a collapsed entry for
209 * "bash [libc] malloc" so total 9 entries will be in the tree.
211 evlist__for_each(evlist
, evsel
) {
212 for (k
= 0; k
< ARRAY_SIZE(fake_common_samples
); k
++) {
213 const union perf_event event
= {
215 .misc
= PERF_RECORD_MISC_USER
,
219 sample
.pid
= fake_common_samples
[k
].pid
;
220 sample
.ip
= fake_common_samples
[k
].ip
;
221 if (perf_event__preprocess_sample(&event
, machine
, &al
,
225 he
= __hists__add_entry(&evsel
->hists
, &al
, NULL
,
226 NULL
, NULL
, 1, 1, 0);
230 fake_common_samples
[k
].thread
= al
.thread
;
231 fake_common_samples
[k
].map
= al
.map
;
232 fake_common_samples
[k
].sym
= al
.sym
;
235 for (k
= 0; k
< ARRAY_SIZE(fake_samples
[i
]); k
++) {
236 const union perf_event event
= {
238 .misc
= PERF_RECORD_MISC_USER
,
242 sample
.pid
= fake_samples
[i
][k
].pid
;
243 sample
.ip
= fake_samples
[i
][k
].ip
;
244 if (perf_event__preprocess_sample(&event
, machine
, &al
,
248 he
= __hists__add_entry(&evsel
->hists
, &al
, NULL
,
249 NULL
, NULL
, 1, 1, 0);
253 fake_samples
[i
][k
].thread
= al
.thread
;
254 fake_samples
[i
][k
].map
= al
.map
;
255 fake_samples
[i
][k
].sym
= al
.sym
;
263 pr_debug("Not enough memory for adding a hist entry\n");
267 static int find_sample(struct sample
*samples
, size_t nr_samples
,
268 struct thread
*t
, struct map
*m
, struct symbol
*s
)
270 while (nr_samples
--) {
271 if (samples
->thread
== t
&& samples
->map
== m
&&
279 static int __validate_match(struct hists
*hists
)
282 struct rb_root
*root
;
283 struct rb_node
*node
;
286 * Only entries from fake_common_samples should have a pair.
288 if (sort__need_collapse
)
289 root
= &hists
->entries_collapsed
;
291 root
= hists
->entries_in
;
293 node
= rb_first(root
);
295 struct hist_entry
*he
;
297 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
299 if (hist_entry__has_pairs(he
)) {
300 if (find_sample(fake_common_samples
,
301 ARRAY_SIZE(fake_common_samples
),
302 he
->thread
, he
->ms
.map
, he
->ms
.sym
)) {
305 pr_debug("Can't find the matched entry\n");
310 node
= rb_next(node
);
313 if (count
!= ARRAY_SIZE(fake_common_samples
)) {
314 pr_debug("Invalid count for matched entries: %zd of %zd\n",
315 count
, ARRAY_SIZE(fake_common_samples
));
322 static int validate_match(struct hists
*leader
, struct hists
*other
)
324 return __validate_match(leader
) || __validate_match(other
);
327 static int __validate_link(struct hists
*hists
, int idx
)
330 size_t count_pair
= 0;
331 size_t count_dummy
= 0;
332 struct rb_root
*root
;
333 struct rb_node
*node
;
336 * Leader hists (idx = 0) will have dummy entries from other,
337 * and some entries will have no pair. However every entry
338 * in other hists should have (dummy) pair.
340 if (sort__need_collapse
)
341 root
= &hists
->entries_collapsed
;
343 root
= hists
->entries_in
;
345 node
= rb_first(root
);
347 struct hist_entry
*he
;
349 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
351 if (hist_entry__has_pairs(he
)) {
352 if (!find_sample(fake_common_samples
,
353 ARRAY_SIZE(fake_common_samples
),
354 he
->thread
, he
->ms
.map
, he
->ms
.sym
) &&
355 !find_sample(fake_samples
[idx
],
356 ARRAY_SIZE(fake_samples
[idx
]),
357 he
->thread
, he
->ms
.map
, he
->ms
.sym
)) {
362 pr_debug("A entry from the other hists should have pair\n");
367 node
= rb_next(node
);
371 * Note that we have a entry collapsed in the other (idx = 1) hists.
374 if (count_dummy
!= ARRAY_SIZE(fake_samples
[1]) - 1) {
375 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
376 count_dummy
, ARRAY_SIZE(fake_samples
[1]) - 1);
379 if (count
!= count_pair
+ ARRAY_SIZE(fake_samples
[0])) {
380 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
381 count
, count_pair
+ ARRAY_SIZE(fake_samples
[0]));
385 if (count
!= count_pair
) {
386 pr_debug("Invalid count of total other entries: %zd of %zd\n",
390 if (count_dummy
> 0) {
391 pr_debug("Other hists should not have dummy entries: %zd\n",
400 static int validate_link(struct hists
*leader
, struct hists
*other
)
402 return __validate_link(leader
, 0) || __validate_link(other
, 1);
405 static void print_hists(struct hists
*hists
)
408 struct rb_root
*root
;
409 struct rb_node
*node
;
411 if (sort__need_collapse
)
412 root
= &hists
->entries_collapsed
;
414 root
= hists
->entries_in
;
416 pr_info("----- %s --------\n", __func__
);
417 node
= rb_first(root
);
419 struct hist_entry
*he
;
421 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
423 pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64
"\n",
424 i
, thread__comm_str(he
->thread
), he
->ms
.map
->dso
->short_name
,
425 he
->ms
.sym
->name
, he
->stat
.period
);
428 node
= rb_next(node
);
432 int test__hists_link(void)
435 struct machines machines
;
436 struct machine
*machine
= NULL
;
437 struct perf_evsel
*evsel
, *first
;
438 struct perf_evlist
*evlist
= perf_evlist__new();
443 err
= parse_events(evlist
, "cpu-clock");
446 err
= parse_events(evlist
, "task-clock");
450 /* default sort order (comm,dso,sym) will be used */
451 if (setup_sorting() < 0)
454 machines__init(&machines
);
456 /* setup threads/dso/map/symbols also */
457 machine
= setup_fake_machine(&machines
);
462 machine__fprintf(machine
, stderr
);
464 /* process sample events */
465 err
= add_hist_entries(evlist
, machine
);
469 evlist__for_each(evlist
, evsel
) {
470 hists__collapse_resort(&evsel
->hists
, NULL
);
473 print_hists(&evsel
->hists
);
476 first
= perf_evlist__first(evlist
);
477 evsel
= perf_evlist__last(evlist
);
479 /* match common entries */
480 hists__match(&first
->hists
, &evsel
->hists
);
481 err
= validate_match(&first
->hists
, &evsel
->hists
);
485 /* link common and/or dummy entries */
486 hists__link(&first
->hists
, &evsel
->hists
);
487 err
= validate_link(&first
->hists
, &evsel
->hists
);
494 /* tear down everything */
495 perf_evlist__delete(evlist
);
496 machines__exit(&machines
);