1 #include <linux/types.h>
11 #include "thread_map.h"
12 #include "symbol/kallsyms.h"
14 static const char *perf_event__names
[] = {
16 [PERF_RECORD_MMAP
] = "MMAP",
17 [PERF_RECORD_MMAP2
] = "MMAP2",
18 [PERF_RECORD_LOST
] = "LOST",
19 [PERF_RECORD_COMM
] = "COMM",
20 [PERF_RECORD_EXIT
] = "EXIT",
21 [PERF_RECORD_THROTTLE
] = "THROTTLE",
22 [PERF_RECORD_UNTHROTTLE
] = "UNTHROTTLE",
23 [PERF_RECORD_FORK
] = "FORK",
24 [PERF_RECORD_READ
] = "READ",
25 [PERF_RECORD_SAMPLE
] = "SAMPLE",
26 [PERF_RECORD_HEADER_ATTR
] = "ATTR",
27 [PERF_RECORD_HEADER_EVENT_TYPE
] = "EVENT_TYPE",
28 [PERF_RECORD_HEADER_TRACING_DATA
] = "TRACING_DATA",
29 [PERF_RECORD_HEADER_BUILD_ID
] = "BUILD_ID",
30 [PERF_RECORD_FINISHED_ROUND
] = "FINISHED_ROUND",
33 const char *perf_event__name(unsigned int id
)
35 if (id
>= ARRAY_SIZE(perf_event__names
))
37 if (!perf_event__names
[id
])
39 return perf_event__names
[id
];
42 static struct perf_sample synth_sample
= {
51 static pid_t
perf_event__get_comm_tgid(pid_t pid
, char *comm
, size_t len
)
53 char filename
[PATH_MAX
];
59 snprintf(filename
, sizeof(filename
), "/proc/%d/status", pid
);
61 fp
= fopen(filename
, "r");
63 pr_debug("couldn't open %s\n", filename
);
67 while (!comm
[0] || (tgid
< 0)) {
68 if (fgets(bf
, sizeof(bf
), fp
) == NULL
) {
69 pr_warning("couldn't get COMM and pgid, malformed %s\n",
74 if (memcmp(bf
, "Name:", 5) == 0) {
76 while (*name
&& isspace(*name
))
78 size
= strlen(name
) - 1;
81 memcpy(comm
, name
, size
);
84 } else if (memcmp(bf
, "Tgid:", 5) == 0) {
86 while (*tgids
&& isspace(*tgids
))
97 static pid_t
perf_event__synthesize_comm(struct perf_tool
*tool
,
98 union perf_event
*event
, pid_t pid
,
99 perf_event__handler_t process
,
100 struct machine
*machine
)
105 memset(&event
->comm
, 0, sizeof(event
->comm
));
107 if (machine__is_host(machine
))
108 tgid
= perf_event__get_comm_tgid(pid
, event
->comm
.comm
,
109 sizeof(event
->comm
.comm
));
116 event
->comm
.pid
= tgid
;
117 event
->comm
.header
.type
= PERF_RECORD_COMM
;
119 size
= strlen(event
->comm
.comm
) + 1;
120 size
= PERF_ALIGN(size
, sizeof(u64
));
121 memset(event
->comm
.comm
+ size
, 0, machine
->id_hdr_size
);
122 event
->comm
.header
.size
= (sizeof(event
->comm
) -
123 (sizeof(event
->comm
.comm
) - size
) +
124 machine
->id_hdr_size
);
125 event
->comm
.tid
= pid
;
127 if (process(tool
, event
, &synth_sample
, machine
) != 0)
134 static int perf_event__synthesize_fork(struct perf_tool
*tool
,
135 union perf_event
*event
, pid_t pid
,
136 pid_t tgid
, perf_event__handler_t process
,
137 struct machine
*machine
)
139 memset(&event
->fork
, 0, sizeof(event
->fork
) + machine
->id_hdr_size
);
141 /* this is really a clone event but we use fork to synthesize it */
142 event
->fork
.ppid
= tgid
;
143 event
->fork
.ptid
= tgid
;
144 event
->fork
.pid
= tgid
;
145 event
->fork
.tid
= pid
;
146 event
->fork
.header
.type
= PERF_RECORD_FORK
;
148 event
->fork
.header
.size
= (sizeof(event
->fork
) + machine
->id_hdr_size
);
150 if (process(tool
, event
, &synth_sample
, machine
) != 0)
156 int perf_event__synthesize_mmap_events(struct perf_tool
*tool
,
157 union perf_event
*event
,
158 pid_t pid
, pid_t tgid
,
159 perf_event__handler_t process
,
160 struct machine
*machine
,
163 char filename
[PATH_MAX
];
167 if (machine__is_default_guest(machine
))
170 snprintf(filename
, sizeof(filename
), "%s/proc/%d/maps",
171 machine
->root_dir
, pid
);
173 fp
= fopen(filename
, "r");
176 * We raced with a task exiting - just return:
178 pr_debug("couldn't open %s\n", filename
);
182 event
->header
.type
= PERF_RECORD_MMAP2
;
187 char execname
[PATH_MAX
];
188 char anonstr
[] = "//anon";
193 if (fgets(bf
, sizeof(bf
), fp
) == NULL
)
196 /* ensure null termination since stack will be reused. */
197 strcpy(execname
, "");
199 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
200 n
= sscanf(bf
, "%"PRIx64
"-%"PRIx64
" %s %"PRIx64
" %x:%x %u %s\n",
201 &event
->mmap2
.start
, &event
->mmap2
.len
, prot
,
202 &event
->mmap2
.pgoff
, &event
->mmap2
.maj
,
207 * Anon maps don't have the execname.
212 event
->mmap2
.ino
= (u64
)ino
;
215 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
217 if (machine__is_host(machine
))
218 event
->header
.misc
= PERF_RECORD_MISC_USER
;
220 event
->header
.misc
= PERF_RECORD_MISC_GUEST_USER
;
222 /* map protection and flags bits */
223 event
->mmap2
.prot
= 0;
224 event
->mmap2
.flags
= 0;
226 event
->mmap2
.prot
|= PROT_READ
;
228 event
->mmap2
.prot
|= PROT_WRITE
;
230 event
->mmap2
.prot
|= PROT_EXEC
;
233 event
->mmap2
.flags
|= MAP_SHARED
;
235 event
->mmap2
.flags
|= MAP_PRIVATE
;
237 if (prot
[2] != 'x') {
238 if (!mmap_data
|| prot
[0] != 'r')
241 event
->header
.misc
|= PERF_RECORD_MISC_MMAP_DATA
;
244 if (!strcmp(execname
, ""))
245 strcpy(execname
, anonstr
);
247 size
= strlen(execname
) + 1;
248 memcpy(event
->mmap2
.filename
, execname
, size
);
249 size
= PERF_ALIGN(size
, sizeof(u64
));
250 event
->mmap2
.len
-= event
->mmap
.start
;
251 event
->mmap2
.header
.size
= (sizeof(event
->mmap2
) -
252 (sizeof(event
->mmap2
.filename
) - size
));
253 memset(event
->mmap2
.filename
+ size
, 0, machine
->id_hdr_size
);
254 event
->mmap2
.header
.size
+= machine
->id_hdr_size
;
255 event
->mmap2
.pid
= tgid
;
256 event
->mmap2
.tid
= pid
;
258 if (process(tool
, event
, &synth_sample
, machine
) != 0) {
268 int perf_event__synthesize_modules(struct perf_tool
*tool
,
269 perf_event__handler_t process
,
270 struct machine
*machine
)
274 struct map_groups
*kmaps
= &machine
->kmaps
;
275 union perf_event
*event
= zalloc((sizeof(event
->mmap
) +
276 machine
->id_hdr_size
));
278 pr_debug("Not enough memory synthesizing mmap event "
279 "for kernel modules\n");
283 event
->header
.type
= PERF_RECORD_MMAP
;
286 * kernel uses 0 for user space maps, see kernel/perf_event.c
289 if (machine__is_host(machine
))
290 event
->header
.misc
= PERF_RECORD_MISC_KERNEL
;
292 event
->header
.misc
= PERF_RECORD_MISC_GUEST_KERNEL
;
294 for (nd
= rb_first(&kmaps
->maps
[MAP__FUNCTION
]);
295 nd
; nd
= rb_next(nd
)) {
297 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
299 if (pos
->dso
->kernel
)
302 size
= PERF_ALIGN(pos
->dso
->long_name_len
+ 1, sizeof(u64
));
303 event
->mmap
.header
.type
= PERF_RECORD_MMAP
;
304 event
->mmap
.header
.size
= (sizeof(event
->mmap
) -
305 (sizeof(event
->mmap
.filename
) - size
));
306 memset(event
->mmap
.filename
+ size
, 0, machine
->id_hdr_size
);
307 event
->mmap
.header
.size
+= machine
->id_hdr_size
;
308 event
->mmap
.start
= pos
->start
;
309 event
->mmap
.len
= pos
->end
- pos
->start
;
310 event
->mmap
.pid
= machine
->pid
;
312 memcpy(event
->mmap
.filename
, pos
->dso
->long_name
,
313 pos
->dso
->long_name_len
+ 1);
314 if (process(tool
, event
, &synth_sample
, machine
) != 0) {
324 static int __event__synthesize_thread(union perf_event
*comm_event
,
325 union perf_event
*mmap_event
,
326 union perf_event
*fork_event
,
328 perf_event__handler_t process
,
329 struct perf_tool
*tool
,
330 struct machine
*machine
, bool mmap_data
)
332 char filename
[PATH_MAX
];
334 struct dirent dirent
, *next
;
337 /* special case: only send one comm event using passed in pid */
339 tgid
= perf_event__synthesize_comm(tool
, comm_event
, pid
,
345 return perf_event__synthesize_mmap_events(tool
, mmap_event
, pid
, tgid
,
346 process
, machine
, mmap_data
);
349 if (machine__is_default_guest(machine
))
352 snprintf(filename
, sizeof(filename
), "%s/proc/%d/task",
353 machine
->root_dir
, pid
);
355 tasks
= opendir(filename
);
357 pr_debug("couldn't open %s\n", filename
);
361 while (!readdir_r(tasks
, &dirent
, &next
) && next
) {
366 _pid
= strtol(dirent
.d_name
, &end
, 10);
370 tgid
= perf_event__synthesize_comm(tool
, comm_event
, _pid
,
376 /* process the parent's maps too */
377 rc
= perf_event__synthesize_mmap_events(tool
, mmap_event
, pid
, tgid
,
378 process
, machine
, mmap_data
);
380 /* only fork the tid's map, to save time */
381 rc
= perf_event__synthesize_fork(tool
, fork_event
, _pid
, tgid
,
393 int perf_event__synthesize_thread_map(struct perf_tool
*tool
,
394 struct thread_map
*threads
,
395 perf_event__handler_t process
,
396 struct machine
*machine
,
399 union perf_event
*comm_event
, *mmap_event
, *fork_event
;
400 int err
= -1, thread
, j
;
402 comm_event
= malloc(sizeof(comm_event
->comm
) + machine
->id_hdr_size
);
403 if (comm_event
== NULL
)
406 mmap_event
= malloc(sizeof(mmap_event
->mmap
) + machine
->id_hdr_size
);
407 if (mmap_event
== NULL
)
410 fork_event
= malloc(sizeof(fork_event
->fork
) + machine
->id_hdr_size
);
411 if (fork_event
== NULL
)
415 for (thread
= 0; thread
< threads
->nr
; ++thread
) {
416 if (__event__synthesize_thread(comm_event
, mmap_event
,
418 threads
->map
[thread
], 0,
419 process
, tool
, machine
,
426 * comm.pid is set to thread group id by
427 * perf_event__synthesize_comm
429 if ((int) comm_event
->comm
.pid
!= threads
->map
[thread
]) {
430 bool need_leader
= true;
432 /* is thread group leader in thread_map? */
433 for (j
= 0; j
< threads
->nr
; ++j
) {
434 if ((int) comm_event
->comm
.pid
== threads
->map
[j
]) {
440 /* if not, generate events for it */
442 __event__synthesize_thread(comm_event
, mmap_event
,
444 comm_event
->comm
.pid
, 0,
445 process
, tool
, machine
,
461 int perf_event__synthesize_threads(struct perf_tool
*tool
,
462 perf_event__handler_t process
,
463 struct machine
*machine
, bool mmap_data
)
466 char proc_path
[PATH_MAX
];
467 struct dirent dirent
, *next
;
468 union perf_event
*comm_event
, *mmap_event
, *fork_event
;
471 if (machine__is_default_guest(machine
))
474 comm_event
= malloc(sizeof(comm_event
->comm
) + machine
->id_hdr_size
);
475 if (comm_event
== NULL
)
478 mmap_event
= malloc(sizeof(mmap_event
->mmap
) + machine
->id_hdr_size
);
479 if (mmap_event
== NULL
)
482 fork_event
= malloc(sizeof(fork_event
->fork
) + machine
->id_hdr_size
);
483 if (fork_event
== NULL
)
486 snprintf(proc_path
, sizeof(proc_path
), "%s/proc", machine
->root_dir
);
487 proc
= opendir(proc_path
);
492 while (!readdir_r(proc
, &dirent
, &next
) && next
) {
494 pid_t pid
= strtol(dirent
.d_name
, &end
, 10);
496 if (*end
) /* only interested in proper numerical dirents */
499 * We may race with exiting thread, so don't stop just because
500 * one thread couldn't be synthesized.
502 __event__synthesize_thread(comm_event
, mmap_event
, fork_event
, pid
,
503 1, process
, tool
, machine
, mmap_data
);
518 struct process_symbol_args
{
523 static int find_symbol_cb(void *arg
, const char *name
, char type
,
526 struct process_symbol_args
*args
= arg
;
529 * Must be a function or at least an alias, as in PARISC64, where "_text" is
530 * an 'A' to the same address as "_stext".
532 if (!(symbol_type__is_a(type
, MAP__FUNCTION
) ||
533 type
== 'A') || strcmp(name
, args
->name
))
540 u64
kallsyms__get_function_start(const char *kallsyms_filename
,
541 const char *symbol_name
)
543 struct process_symbol_args args
= { .name
= symbol_name
, };
545 if (kallsyms__parse(kallsyms_filename
, &args
, find_symbol_cb
) <= 0)
551 int perf_event__synthesize_kernel_mmap(struct perf_tool
*tool
,
552 perf_event__handler_t process
,
553 struct machine
*machine
)
556 const char *mmap_name
;
557 char name_buff
[PATH_MAX
];
562 * We should get this from /sys/kernel/sections/.text, but till that is
563 * available use this, and after it is use this as a fallback for older
566 union perf_event
*event
= zalloc((sizeof(event
->mmap
) +
567 machine
->id_hdr_size
));
569 pr_debug("Not enough memory synthesizing mmap event "
570 "for kernel modules\n");
574 mmap_name
= machine__mmap_name(machine
, name_buff
, sizeof(name_buff
));
575 if (machine__is_host(machine
)) {
577 * kernel uses PERF_RECORD_MISC_USER for user space maps,
578 * see kernel/perf_event.c __perf_event_mmap
580 event
->header
.misc
= PERF_RECORD_MISC_KERNEL
;
582 event
->header
.misc
= PERF_RECORD_MISC_GUEST_KERNEL
;
585 map
= machine
->vmlinux_maps
[MAP__FUNCTION
];
586 kmap
= map__kmap(map
);
587 size
= snprintf(event
->mmap
.filename
, sizeof(event
->mmap
.filename
),
588 "%s%s", mmap_name
, kmap
->ref_reloc_sym
->name
) + 1;
589 size
= PERF_ALIGN(size
, sizeof(u64
));
590 event
->mmap
.header
.type
= PERF_RECORD_MMAP
;
591 event
->mmap
.header
.size
= (sizeof(event
->mmap
) -
592 (sizeof(event
->mmap
.filename
) - size
) + machine
->id_hdr_size
);
593 event
->mmap
.pgoff
= kmap
->ref_reloc_sym
->addr
;
594 event
->mmap
.start
= map
->start
;
595 event
->mmap
.len
= map
->end
- event
->mmap
.start
;
596 event
->mmap
.pid
= machine
->pid
;
598 err
= process(tool
, event
, &synth_sample
, machine
);
604 size_t perf_event__fprintf_comm(union perf_event
*event
, FILE *fp
)
606 return fprintf(fp
, ": %s:%d\n", event
->comm
.comm
, event
->comm
.tid
);
609 int perf_event__process_comm(struct perf_tool
*tool __maybe_unused
,
610 union perf_event
*event
,
611 struct perf_sample
*sample
,
612 struct machine
*machine
)
614 return machine__process_comm_event(machine
, event
, sample
);
617 int perf_event__process_lost(struct perf_tool
*tool __maybe_unused
,
618 union perf_event
*event
,
619 struct perf_sample
*sample
,
620 struct machine
*machine
)
622 return machine__process_lost_event(machine
, event
, sample
);
625 size_t perf_event__fprintf_mmap(union perf_event
*event
, FILE *fp
)
627 return fprintf(fp
, " %d/%d: [%#" PRIx64
"(%#" PRIx64
") @ %#" PRIx64
"]: %c %s\n",
628 event
->mmap
.pid
, event
->mmap
.tid
, event
->mmap
.start
,
629 event
->mmap
.len
, event
->mmap
.pgoff
,
630 (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
) ? 'r' : 'x',
631 event
->mmap
.filename
);
634 size_t perf_event__fprintf_mmap2(union perf_event
*event
, FILE *fp
)
636 return fprintf(fp
, " %d/%d: [%#" PRIx64
"(%#" PRIx64
") @ %#" PRIx64
637 " %02x:%02x %"PRIu64
" %"PRIu64
"]: %c%c%c%c %s\n",
638 event
->mmap2
.pid
, event
->mmap2
.tid
, event
->mmap2
.start
,
639 event
->mmap2
.len
, event
->mmap2
.pgoff
, event
->mmap2
.maj
,
640 event
->mmap2
.min
, event
->mmap2
.ino
,
641 event
->mmap2
.ino_generation
,
642 (event
->mmap2
.prot
& PROT_READ
) ? 'r' : '-',
643 (event
->mmap2
.prot
& PROT_WRITE
) ? 'w' : '-',
644 (event
->mmap2
.prot
& PROT_EXEC
) ? 'x' : '-',
645 (event
->mmap2
.flags
& MAP_SHARED
) ? 's' : 'p',
646 event
->mmap2
.filename
);
649 int perf_event__process_mmap(struct perf_tool
*tool __maybe_unused
,
650 union perf_event
*event
,
651 struct perf_sample
*sample
,
652 struct machine
*machine
)
654 return machine__process_mmap_event(machine
, event
, sample
);
657 int perf_event__process_mmap2(struct perf_tool
*tool __maybe_unused
,
658 union perf_event
*event
,
659 struct perf_sample
*sample
,
660 struct machine
*machine
)
662 return machine__process_mmap2_event(machine
, event
, sample
);
665 size_t perf_event__fprintf_task(union perf_event
*event
, FILE *fp
)
667 return fprintf(fp
, "(%d:%d):(%d:%d)\n",
668 event
->fork
.pid
, event
->fork
.tid
,
669 event
->fork
.ppid
, event
->fork
.ptid
);
672 int perf_event__process_fork(struct perf_tool
*tool __maybe_unused
,
673 union perf_event
*event
,
674 struct perf_sample
*sample
,
675 struct machine
*machine
)
677 return machine__process_fork_event(machine
, event
, sample
);
680 int perf_event__process_exit(struct perf_tool
*tool __maybe_unused
,
681 union perf_event
*event
,
682 struct perf_sample
*sample
,
683 struct machine
*machine
)
685 return machine__process_exit_event(machine
, event
, sample
);
688 size_t perf_event__fprintf(union perf_event
*event
, FILE *fp
)
690 size_t ret
= fprintf(fp
, "PERF_RECORD_%s",
691 perf_event__name(event
->header
.type
));
693 switch (event
->header
.type
) {
694 case PERF_RECORD_COMM
:
695 ret
+= perf_event__fprintf_comm(event
, fp
);
697 case PERF_RECORD_FORK
:
698 case PERF_RECORD_EXIT
:
699 ret
+= perf_event__fprintf_task(event
, fp
);
701 case PERF_RECORD_MMAP
:
702 ret
+= perf_event__fprintf_mmap(event
, fp
);
704 case PERF_RECORD_MMAP2
:
705 ret
+= perf_event__fprintf_mmap2(event
, fp
);
708 ret
+= fprintf(fp
, "\n");
714 int perf_event__process(struct perf_tool
*tool __maybe_unused
,
715 union perf_event
*event
,
716 struct perf_sample
*sample
,
717 struct machine
*machine
)
719 return machine__process_event(machine
, event
, sample
);
722 void thread__find_addr_map(struct thread
*thread
,
723 struct machine
*machine
, u8 cpumode
,
724 enum map_type type
, u64 addr
,
725 struct addr_location
*al
)
727 struct map_groups
*mg
= thread
->mg
;
728 bool load_map
= false;
730 al
->machine
= machine
;
733 al
->cpumode
= cpumode
;
736 if (machine
== NULL
) {
741 if (cpumode
== PERF_RECORD_MISC_KERNEL
&& perf_host
) {
743 mg
= &machine
->kmaps
;
745 } else if (cpumode
== PERF_RECORD_MISC_USER
&& perf_host
) {
747 } else if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
&& perf_guest
) {
749 mg
= &machine
->kmaps
;
751 } else if (cpumode
== PERF_RECORD_MISC_GUEST_USER
&& perf_guest
) {
757 if ((cpumode
== PERF_RECORD_MISC_GUEST_USER
||
758 cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) &&
760 al
->filtered
|= (1 << HIST_FILTER__GUEST
);
761 if ((cpumode
== PERF_RECORD_MISC_USER
||
762 cpumode
== PERF_RECORD_MISC_KERNEL
) &&
764 al
->filtered
|= (1 << HIST_FILTER__HOST
);
769 al
->map
= map_groups__find(mg
, type
, al
->addr
);
770 if (al
->map
== NULL
) {
772 * If this is outside of all known maps, and is a negative
773 * address, try to look it up in the kernel dso, as it might be
774 * a vsyscall or vdso (which executes in user-mode).
776 * XXX This is nasty, we should have a symbol list in the
777 * "[vdso]" dso, but for now lets use the old trick of looking
778 * in the whole kernel symbol list.
780 if ((long long)al
->addr
< 0 &&
781 cpumode
== PERF_RECORD_MISC_USER
&&
782 machine
&& mg
!= &machine
->kmaps
) {
783 mg
= &machine
->kmaps
;
788 * Kernel maps might be changed when loading symbols so loading
789 * must be done prior to using kernel maps.
792 map__load(al
->map
, machine
->symbol_filter
);
793 al
->addr
= al
->map
->map_ip(al
->map
, al
->addr
);
797 void thread__find_addr_location(struct thread
*thread
, struct machine
*machine
,
798 u8 cpumode
, enum map_type type
, u64 addr
,
799 struct addr_location
*al
)
801 thread__find_addr_map(thread
, machine
, cpumode
, type
, addr
, al
);
803 al
->sym
= map__find_symbol(al
->map
, al
->addr
,
804 machine
->symbol_filter
);
809 int perf_event__preprocess_sample(const union perf_event
*event
,
810 struct machine
*machine
,
811 struct addr_location
*al
,
812 struct perf_sample
*sample
)
814 u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
815 struct thread
*thread
= machine__findnew_thread(machine
, sample
->pid
,
821 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread
), thread
->tid
);
823 * Have we already created the kernel maps for this machine?
825 * This should have happened earlier, when we processed the kernel MMAP
826 * events, but for older perf.data files there was no such thing, so do
829 if (cpumode
== PERF_RECORD_MISC_KERNEL
&&
830 machine
->vmlinux_maps
[MAP__FUNCTION
] == NULL
)
831 machine__create_kernel_maps(machine
);
833 thread__find_addr_map(thread
, machine
, cpumode
, MAP__FUNCTION
,
835 dump_printf(" ...... dso: %s\n",
836 al
->map
? al
->map
->dso
->long_name
:
837 al
->level
== 'H' ? "[hypervisor]" : "<not found>");
839 if (thread__is_filtered(thread
))
840 al
->filtered
|= (1 << HIST_FILTER__THREAD
);
843 al
->cpu
= sample
->cpu
;
846 struct dso
*dso
= al
->map
->dso
;
848 if (symbol_conf
.dso_list
&&
849 (!dso
|| !(strlist__has_entry(symbol_conf
.dso_list
,
851 (dso
->short_name
!= dso
->long_name
&&
852 strlist__has_entry(symbol_conf
.dso_list
,
853 dso
->long_name
))))) {
854 al
->filtered
|= (1 << HIST_FILTER__DSO
);
857 al
->sym
= map__find_symbol(al
->map
, al
->addr
,
858 machine
->symbol_filter
);
861 if (symbol_conf
.sym_list
&&
862 (!al
->sym
|| !strlist__has_entry(symbol_conf
.sym_list
,
864 al
->filtered
|= (1 << HIST_FILTER__SYMBOL
);