3 #include <sys/resource.h>
11 char dso__symtab_origin(const struct dso
*dso
)
13 static const char origin
[] = {
14 [DSO_BINARY_TYPE__KALLSYMS
] = 'k',
15 [DSO_BINARY_TYPE__VMLINUX
] = 'v',
16 [DSO_BINARY_TYPE__JAVA_JIT
] = 'j',
17 [DSO_BINARY_TYPE__DEBUGLINK
] = 'l',
18 [DSO_BINARY_TYPE__BUILD_ID_CACHE
] = 'B',
19 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO
] = 'f',
20 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO
] = 'u',
21 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO
] = 'o',
22 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO
] = 'b',
23 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO
] = 'd',
24 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
] = 'K',
25 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP
] = 'm',
26 [DSO_BINARY_TYPE__GUEST_KALLSYMS
] = 'g',
27 [DSO_BINARY_TYPE__GUEST_KMODULE
] = 'G',
28 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP
] = 'M',
29 [DSO_BINARY_TYPE__GUEST_VMLINUX
] = 'V',
32 if (dso
== NULL
|| dso
->symtab_type
== DSO_BINARY_TYPE__NOT_FOUND
)
34 return origin
[dso
->symtab_type
];
37 int dso__read_binary_type_filename(const struct dso
*dso
,
38 enum dso_binary_type type
,
39 char *root_dir
, char *filename
, size_t size
)
41 char build_id_hex
[BUILD_ID_SIZE
* 2 + 1];
46 case DSO_BINARY_TYPE__DEBUGLINK
: {
49 len
= __symbol__join_symfs(filename
, size
, dso
->long_name
);
50 debuglink
= filename
+ len
;
51 while (debuglink
!= filename
&& *debuglink
!= '/')
53 if (*debuglink
== '/')
57 if (!is_regular_file(filename
))
60 ret
= filename__read_debuglink(filename
, debuglink
,
61 size
- (debuglink
- filename
));
64 case DSO_BINARY_TYPE__BUILD_ID_CACHE
:
65 /* skip the locally configured cache if a symfs is given */
66 if (symbol_conf
.symfs
[0] ||
67 (dso__build_id_filename(dso
, filename
, size
) == NULL
))
71 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO
:
72 len
= __symbol__join_symfs(filename
, size
, "/usr/lib/debug");
73 snprintf(filename
+ len
, size
- len
, "%s.debug", dso
->long_name
);
76 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO
:
77 len
= __symbol__join_symfs(filename
, size
, "/usr/lib/debug");
78 snprintf(filename
+ len
, size
- len
, "%s", dso
->long_name
);
81 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO
:
83 const char *last_slash
;
86 last_slash
= dso
->long_name
+ dso
->long_name_len
;
87 while (last_slash
!= dso
->long_name
&& *last_slash
!= '/')
90 len
= __symbol__join_symfs(filename
, size
, "");
91 dir_size
= last_slash
- dso
->long_name
+ 2;
92 if (dir_size
> (size
- len
)) {
96 len
+= scnprintf(filename
+ len
, dir_size
, "%s", dso
->long_name
);
97 len
+= scnprintf(filename
+ len
, size
- len
, ".debug%s",
102 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO
:
103 if (!dso
->has_build_id
) {
108 build_id__sprintf(dso
->build_id
,
109 sizeof(dso
->build_id
),
111 len
= __symbol__join_symfs(filename
, size
, "/usr/lib/debug/.build-id/");
112 snprintf(filename
+ len
, size
- len
, "%.2s/%s.debug",
113 build_id_hex
, build_id_hex
+ 2);
116 case DSO_BINARY_TYPE__VMLINUX
:
117 case DSO_BINARY_TYPE__GUEST_VMLINUX
:
118 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO
:
119 __symbol__join_symfs(filename
, size
, dso
->long_name
);
122 case DSO_BINARY_TYPE__GUEST_KMODULE
:
123 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP
:
124 path__join3(filename
, size
, symbol_conf
.symfs
,
125 root_dir
, dso
->long_name
);
128 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
:
129 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP
:
130 __symbol__join_symfs(filename
, size
, dso
->long_name
);
133 case DSO_BINARY_TYPE__KCORE
:
134 case DSO_BINARY_TYPE__GUEST_KCORE
:
135 snprintf(filename
, size
, "%s", dso
->long_name
);
139 case DSO_BINARY_TYPE__KALLSYMS
:
140 case DSO_BINARY_TYPE__GUEST_KALLSYMS
:
141 case DSO_BINARY_TYPE__JAVA_JIT
:
142 case DSO_BINARY_TYPE__NOT_FOUND
:
150 static const struct {
152 int (*decompress
)(const char *input
, int output
);
154 #ifdef HAVE_ZLIB_SUPPORT
155 { "gz", gzip_decompress_to_file
},
157 #ifdef HAVE_LZMA_SUPPORT
158 { "xz", lzma_decompress_to_file
},
163 bool is_supported_compression(const char *ext
)
167 for (i
= 0; compressions
[i
].fmt
; i
++) {
168 if (!strcmp(ext
, compressions
[i
].fmt
))
174 bool is_kernel_module(const char *pathname
, int cpumode
)
177 int mode
= cpumode
& PERF_RECORD_MISC_CPUMODE_MASK
;
179 WARN_ONCE(mode
!= cpumode
,
180 "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
184 case PERF_RECORD_MISC_USER
:
185 case PERF_RECORD_MISC_HYPERVISOR
:
186 case PERF_RECORD_MISC_GUEST_USER
:
188 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
190 if (kmod_path__parse(&m
, pathname
)) {
191 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
200 bool decompress_to_file(const char *ext
, const char *filename
, int output_fd
)
204 for (i
= 0; compressions
[i
].fmt
; i
++) {
205 if (!strcmp(ext
, compressions
[i
].fmt
))
206 return !compressions
[i
].decompress(filename
,
212 bool dso__needs_decompress(struct dso
*dso
)
214 return dso
->symtab_type
== DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP
||
215 dso
->symtab_type
== DSO_BINARY_TYPE__GUEST_KMODULE_COMP
;
219 * Parses kernel module specified in @path and updates
222 * @comp - true if @path contains supported compression suffix,
224 * @kmod - true if @path contains '.ko' suffix in right position,
226 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
227 * of the kernel module without suffixes, otherwise strudup-ed
229 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
230 * the compression suffix
232 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
234 int __kmod_path__parse(struct kmod_path
*m
, const char *path
,
235 bool alloc_name
, bool alloc_ext
)
237 const char *name
= strrchr(path
, '/');
238 const char *ext
= strrchr(path
, '.');
239 bool is_simple_name
= false;
241 memset(m
, 0x0, sizeof(*m
));
242 name
= name
? name
+ 1 : path
;
245 * '.' is also a valid character for module name. For example:
246 * [aaa.bbb] is a valid module name. '[' should have higher
247 * priority than '.ko' suffix.
249 * The kernel names are from machine__mmap_name. Such
250 * name should belong to kernel itself, not kernel module.
252 if (name
[0] == '[') {
253 is_simple_name
= true;
254 if ((strncmp(name
, "[kernel.kallsyms]", 17) == 0) ||
255 (strncmp(name
, "[guest.kernel.kallsyms", 22) == 0) ||
256 (strncmp(name
, "[vdso]", 6) == 0) ||
257 (strncmp(name
, "[vsyscall]", 10) == 0)) {
264 /* No extension, just return name. */
265 if ((ext
== NULL
) || is_simple_name
) {
267 m
->name
= strdup(name
);
268 return m
->name
? 0 : -ENOMEM
;
273 if (is_supported_compression(ext
+ 1)) {
278 /* Check .ko extension only if there's enough name left. */
280 m
->kmod
= !strncmp(ext
, ".ko", 3);
284 if (asprintf(&m
->name
, "[%.*s]", (int) (ext
- name
), name
) == -1)
287 if (asprintf(&m
->name
, "%s", name
) == -1)
291 strxfrchar(m
->name
, '-', '_');
294 if (alloc_ext
&& m
->comp
) {
295 m
->ext
= strdup(ext
+ 4);
297 free((void *) m
->name
);
306 * Global list of open DSOs and the counter.
308 static LIST_HEAD(dso__data_open
);
309 static long dso__data_open_cnt
;
310 static pthread_mutex_t dso__data_open_lock
= PTHREAD_MUTEX_INITIALIZER
;
312 static void dso__list_add(struct dso
*dso
)
314 list_add_tail(&dso
->data
.open_entry
, &dso__data_open
);
315 dso__data_open_cnt
++;
318 static void dso__list_del(struct dso
*dso
)
320 list_del(&dso
->data
.open_entry
);
321 WARN_ONCE(dso__data_open_cnt
<= 0,
322 "DSO data fd counter out of bounds.");
323 dso__data_open_cnt
--;
326 static void close_first_dso(void);
328 static int do_open(char *name
)
331 char sbuf
[STRERR_BUFSIZE
];
334 fd
= open(name
, O_RDONLY
);
338 pr_debug("dso open failed: %s\n",
339 strerror_r(errno
, sbuf
, sizeof(sbuf
)));
340 if (!dso__data_open_cnt
|| errno
!= EMFILE
)
349 static int __open_dso(struct dso
*dso
, struct machine
*machine
)
352 char *root_dir
= (char *)"";
353 char *name
= malloc(PATH_MAX
);
359 root_dir
= machine
->root_dir
;
361 if (dso__read_binary_type_filename(dso
, dso
->binary_type
,
362 root_dir
, name
, PATH_MAX
)) {
372 static void check_data_close(void);
375 * dso_close - Open DSO data file
378 * Open @dso's data file descriptor and updates
379 * list/count of open DSO objects.
381 static int open_dso(struct dso
*dso
, struct machine
*machine
)
383 int fd
= __open_dso(dso
, machine
);
388 * Check if we crossed the allowed number
389 * of opened DSOs and close one if needed.
397 static void close_data_fd(struct dso
*dso
)
399 if (dso
->data
.fd
>= 0) {
402 dso
->data
.file_size
= 0;
408 * dso_close - Close DSO data file
411 * Close @dso's data file descriptor and updates
412 * list/count of open DSO objects.
414 static void close_dso(struct dso
*dso
)
419 static void close_first_dso(void)
423 dso
= list_first_entry(&dso__data_open
, struct dso
, data
.open_entry
);
427 static rlim_t
get_fd_limit(void)
432 /* Allow half of the current open fd limit. */
433 if (getrlimit(RLIMIT_NOFILE
, &l
) == 0) {
434 if (l
.rlim_cur
== RLIM_INFINITY
)
437 limit
= l
.rlim_cur
/ 2;
439 pr_err("failed to get fd limit\n");
446 static bool may_cache_fd(void)
451 limit
= get_fd_limit();
453 if (limit
== RLIM_INFINITY
)
456 return limit
> (rlim_t
) dso__data_open_cnt
;
460 * Check and close LRU dso if we crossed allowed limit
461 * for opened dso file descriptors. The limit is half
462 * of the RLIMIT_NOFILE files opened.
464 static void check_data_close(void)
466 bool cache_fd
= may_cache_fd();
473 * dso__data_close - Close DSO data file
476 * External interface to close @dso's data file descriptor.
478 void dso__data_close(struct dso
*dso
)
480 pthread_mutex_lock(&dso__data_open_lock
);
482 pthread_mutex_unlock(&dso__data_open_lock
);
485 static void try_to_open_dso(struct dso
*dso
, struct machine
*machine
)
487 enum dso_binary_type binary_type_data
[] = {
488 DSO_BINARY_TYPE__BUILD_ID_CACHE
,
489 DSO_BINARY_TYPE__SYSTEM_PATH_DSO
,
490 DSO_BINARY_TYPE__NOT_FOUND
,
494 if (dso
->data
.fd
>= 0)
497 if (dso
->binary_type
!= DSO_BINARY_TYPE__NOT_FOUND
) {
498 dso
->data
.fd
= open_dso(dso
, machine
);
503 dso
->binary_type
= binary_type_data
[i
++];
505 dso
->data
.fd
= open_dso(dso
, machine
);
506 if (dso
->data
.fd
>= 0)
509 } while (dso
->binary_type
!= DSO_BINARY_TYPE__NOT_FOUND
);
511 if (dso
->data
.fd
>= 0)
512 dso
->data
.status
= DSO_DATA_STATUS_OK
;
514 dso
->data
.status
= DSO_DATA_STATUS_ERROR
;
518 * dso__data_get_fd - Get dso's data file descriptor
520 * @machine: machine object
522 * External interface to find dso's file, open it and
523 * returns file descriptor. It should be paired with
524 * dso__data_put_fd() if it returns non-negative value.
526 int dso__data_get_fd(struct dso
*dso
, struct machine
*machine
)
528 if (dso
->data
.status
== DSO_DATA_STATUS_ERROR
)
531 if (pthread_mutex_lock(&dso__data_open_lock
) < 0)
534 try_to_open_dso(dso
, machine
);
536 if (dso
->data
.fd
< 0)
537 pthread_mutex_unlock(&dso__data_open_lock
);
542 void dso__data_put_fd(struct dso
*dso __maybe_unused
)
544 pthread_mutex_unlock(&dso__data_open_lock
);
547 bool dso__data_status_seen(struct dso
*dso
, enum dso_data_status_seen by
)
551 if (dso
->data
.status_seen
& flag
)
554 dso
->data
.status_seen
|= flag
;
560 dso_cache__free(struct dso
*dso
)
562 struct rb_root
*root
= &dso
->data
.cache
;
563 struct rb_node
*next
= rb_first(root
);
565 pthread_mutex_lock(&dso
->lock
);
567 struct dso_cache
*cache
;
569 cache
= rb_entry(next
, struct dso_cache
, rb_node
);
570 next
= rb_next(&cache
->rb_node
);
571 rb_erase(&cache
->rb_node
, root
);
574 pthread_mutex_unlock(&dso
->lock
);
577 static struct dso_cache
*dso_cache__find(struct dso
*dso
, u64 offset
)
579 const struct rb_root
*root
= &dso
->data
.cache
;
580 struct rb_node
* const *p
= &root
->rb_node
;
581 const struct rb_node
*parent
= NULL
;
582 struct dso_cache
*cache
;
588 cache
= rb_entry(parent
, struct dso_cache
, rb_node
);
589 end
= cache
->offset
+ DSO__DATA_CACHE_SIZE
;
591 if (offset
< cache
->offset
)
593 else if (offset
>= end
)
602 static struct dso_cache
*
603 dso_cache__insert(struct dso
*dso
, struct dso_cache
*new)
605 struct rb_root
*root
= &dso
->data
.cache
;
606 struct rb_node
**p
= &root
->rb_node
;
607 struct rb_node
*parent
= NULL
;
608 struct dso_cache
*cache
;
609 u64 offset
= new->offset
;
611 pthread_mutex_lock(&dso
->lock
);
616 cache
= rb_entry(parent
, struct dso_cache
, rb_node
);
617 end
= cache
->offset
+ DSO__DATA_CACHE_SIZE
;
619 if (offset
< cache
->offset
)
621 else if (offset
>= end
)
627 rb_link_node(&new->rb_node
, parent
, p
);
628 rb_insert_color(&new->rb_node
, root
);
632 pthread_mutex_unlock(&dso
->lock
);
637 dso_cache__memcpy(struct dso_cache
*cache
, u64 offset
,
640 u64 cache_offset
= offset
- cache
->offset
;
641 u64 cache_size
= min(cache
->size
- cache_offset
, size
);
643 memcpy(data
, cache
->data
+ cache_offset
, cache_size
);
648 dso_cache__read(struct dso
*dso
, struct machine
*machine
,
649 u64 offset
, u8
*data
, ssize_t size
)
651 struct dso_cache
*cache
;
652 struct dso_cache
*old
;
658 cache
= zalloc(sizeof(*cache
) + DSO__DATA_CACHE_SIZE
);
662 pthread_mutex_lock(&dso__data_open_lock
);
665 * dso->data.fd might be closed if other thread opened another
666 * file (dso) due to open file limit (RLIMIT_NOFILE).
668 try_to_open_dso(dso
, machine
);
670 if (dso
->data
.fd
< 0) {
672 dso
->data
.status
= DSO_DATA_STATUS_ERROR
;
676 cache_offset
= offset
& DSO__DATA_CACHE_MASK
;
678 ret
= pread(dso
->data
.fd
, cache
->data
, DSO__DATA_CACHE_SIZE
, cache_offset
);
682 cache
->offset
= cache_offset
;
686 pthread_mutex_unlock(&dso__data_open_lock
);
689 old
= dso_cache__insert(dso
, cache
);
691 /* we lose the race */
696 ret
= dso_cache__memcpy(cache
, offset
, data
, size
);
705 static ssize_t
dso_cache_read(struct dso
*dso
, struct machine
*machine
,
706 u64 offset
, u8
*data
, ssize_t size
)
708 struct dso_cache
*cache
;
710 cache
= dso_cache__find(dso
, offset
);
712 return dso_cache__memcpy(cache
, offset
, data
, size
);
714 return dso_cache__read(dso
, machine
, offset
, data
, size
);
718 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
719 * in the rb_tree. Any read to already cached data is served
722 static ssize_t
cached_read(struct dso
*dso
, struct machine
*machine
,
723 u64 offset
, u8
*data
, ssize_t size
)
731 ret
= dso_cache_read(dso
, machine
, offset
, p
, size
);
735 /* Reached EOF, return what we have. */
751 static int data_file_size(struct dso
*dso
, struct machine
*machine
)
755 char sbuf
[STRERR_BUFSIZE
];
757 if (dso
->data
.file_size
)
760 if (dso
->data
.status
== DSO_DATA_STATUS_ERROR
)
763 pthread_mutex_lock(&dso__data_open_lock
);
766 * dso->data.fd might be closed if other thread opened another
767 * file (dso) due to open file limit (RLIMIT_NOFILE).
769 try_to_open_dso(dso
, machine
);
771 if (dso
->data
.fd
< 0) {
773 dso
->data
.status
= DSO_DATA_STATUS_ERROR
;
777 if (fstat(dso
->data
.fd
, &st
) < 0) {
779 pr_err("dso cache fstat failed: %s\n",
780 strerror_r(errno
, sbuf
, sizeof(sbuf
)));
781 dso
->data
.status
= DSO_DATA_STATUS_ERROR
;
784 dso
->data
.file_size
= st
.st_size
;
787 pthread_mutex_unlock(&dso__data_open_lock
);
792 * dso__data_size - Return dso data size
794 * @machine: machine object
796 * Return: dso data size
798 off_t
dso__data_size(struct dso
*dso
, struct machine
*machine
)
800 if (data_file_size(dso
, machine
))
803 /* For now just estimate dso data size is close to file size */
804 return dso
->data
.file_size
;
807 static ssize_t
data_read_offset(struct dso
*dso
, struct machine
*machine
,
808 u64 offset
, u8
*data
, ssize_t size
)
810 if (data_file_size(dso
, machine
))
813 /* Check the offset sanity. */
814 if (offset
> dso
->data
.file_size
)
817 if (offset
+ size
< offset
)
820 return cached_read(dso
, machine
, offset
, data
, size
);
824 * dso__data_read_offset - Read data from dso file offset
826 * @machine: machine object
827 * @offset: file offset
828 * @data: buffer to store data
829 * @size: size of the @data buffer
831 * External interface to read data from dso file offset. Open
832 * dso data file and use cached_read to get the data.
834 ssize_t
dso__data_read_offset(struct dso
*dso
, struct machine
*machine
,
835 u64 offset
, u8
*data
, ssize_t size
)
837 if (dso
->data
.status
== DSO_DATA_STATUS_ERROR
)
840 return data_read_offset(dso
, machine
, offset
, data
, size
);
844 * dso__data_read_addr - Read data from dso address
846 * @machine: machine object
847 * @add: virtual memory address
848 * @data: buffer to store data
849 * @size: size of the @data buffer
851 * External interface to read data from dso address.
853 ssize_t
dso__data_read_addr(struct dso
*dso
, struct map
*map
,
854 struct machine
*machine
, u64 addr
,
855 u8
*data
, ssize_t size
)
857 u64 offset
= map
->map_ip(map
, addr
);
858 return dso__data_read_offset(dso
, machine
, offset
, data
, size
);
861 struct map
*dso__new_map(const char *name
)
863 struct map
*map
= NULL
;
864 struct dso
*dso
= dso__new(name
);
867 map
= map__new2(0, dso
, MAP__FUNCTION
);
872 struct dso
*machine__findnew_kernel(struct machine
*machine
, const char *name
,
873 const char *short_name
, int dso_type
)
876 * The kernel dso could be created by build_id processing.
878 struct dso
*dso
= machine__findnew_dso(machine
, name
);
881 * We need to run this in all cases, since during the build_id
882 * processing we had no idea this was the kernel dso.
885 dso__set_short_name(dso
, short_name
, false);
886 dso
->kernel
= dso_type
;
893 * Find a matching entry and/or link current entry to RB tree.
894 * Either one of the dso or name parameter must be non-NULL or the
895 * function will not work.
897 static struct dso
*__dso__findlink_by_longname(struct rb_root
*root
,
898 struct dso
*dso
, const char *name
)
900 struct rb_node
**p
= &root
->rb_node
;
901 struct rb_node
*parent
= NULL
;
904 name
= dso
->long_name
;
906 * Find node with the matching name
909 struct dso
*this = rb_entry(*p
, struct dso
, rb_node
);
910 int rc
= strcmp(name
, this->long_name
);
915 * In case the new DSO is a duplicate of an existing
916 * one, print an one-time warning & put the new entry
917 * at the end of the list of duplicates.
919 if (!dso
|| (dso
== this))
920 return this; /* Find matching dso */
922 * The core kernel DSOs may have duplicated long name.
923 * In this case, the short name should be different.
924 * Comparing the short names to differentiate the DSOs.
926 rc
= strcmp(dso
->short_name
, this->short_name
);
928 pr_err("Duplicated dso name: %s\n", name
);
933 p
= &parent
->rb_left
;
935 p
= &parent
->rb_right
;
938 /* Add new node and rebalance tree */
939 rb_link_node(&dso
->rb_node
, parent
, p
);
940 rb_insert_color(&dso
->rb_node
, root
);
946 static inline struct dso
*__dso__find_by_longname(struct rb_root
*root
,
949 return __dso__findlink_by_longname(root
, NULL
, name
);
952 void dso__set_long_name(struct dso
*dso
, const char *name
, bool name_allocated
)
954 struct rb_root
*root
= dso
->root
;
959 if (dso
->long_name_allocated
)
960 free((char *)dso
->long_name
);
963 rb_erase(&dso
->rb_node
, root
);
965 * __dso__findlink_by_longname() isn't guaranteed to add it
966 * back, so a clean removal is required here.
968 RB_CLEAR_NODE(&dso
->rb_node
);
972 dso
->long_name
= name
;
973 dso
->long_name_len
= strlen(name
);
974 dso
->long_name_allocated
= name_allocated
;
977 __dso__findlink_by_longname(root
, dso
, NULL
);
980 void dso__set_short_name(struct dso
*dso
, const char *name
, bool name_allocated
)
985 if (dso
->short_name_allocated
)
986 free((char *)dso
->short_name
);
988 dso
->short_name
= name
;
989 dso
->short_name_len
= strlen(name
);
990 dso
->short_name_allocated
= name_allocated
;
993 static void dso__set_basename(struct dso
*dso
)
996 * basename() may modify path buffer, so we must pass
999 char *base
, *lname
= strdup(dso
->long_name
);
1005 * basename() may return a pointer to internal
1006 * storage which is reused in subsequent calls
1007 * so copy the result.
1009 base
= strdup(basename(lname
));
1016 dso__set_short_name(dso
, base
, true);
1019 int dso__name_len(const struct dso
*dso
)
1022 return strlen("[unknown]");
1024 return dso
->long_name_len
;
1026 return dso
->short_name_len
;
1029 bool dso__loaded(const struct dso
*dso
, enum map_type type
)
1031 return dso
->loaded
& (1 << type
);
1034 bool dso__sorted_by_name(const struct dso
*dso
, enum map_type type
)
1036 return dso
->sorted_by_name
& (1 << type
);
1039 void dso__set_sorted_by_name(struct dso
*dso
, enum map_type type
)
1041 dso
->sorted_by_name
|= (1 << type
);
1044 struct dso
*dso__new(const char *name
)
1046 struct dso
*dso
= calloc(1, sizeof(*dso
) + strlen(name
) + 1);
1050 strcpy(dso
->name
, name
);
1051 dso__set_long_name(dso
, dso
->name
, false);
1052 dso__set_short_name(dso
, dso
->name
, false);
1053 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
1054 dso
->symbols
[i
] = dso
->symbol_names
[i
] = RB_ROOT
;
1055 dso
->data
.cache
= RB_ROOT
;
1057 dso
->data
.status
= DSO_DATA_STATUS_UNKNOWN
;
1058 dso
->symtab_type
= DSO_BINARY_TYPE__NOT_FOUND
;
1059 dso
->binary_type
= DSO_BINARY_TYPE__NOT_FOUND
;
1060 dso
->is_64_bit
= (sizeof(void *) == 8);
1063 dso
->sorted_by_name
= 0;
1064 dso
->has_build_id
= 0;
1065 dso
->has_srcline
= 1;
1067 dso
->kernel
= DSO_TYPE_USER
;
1068 dso
->needs_swap
= DSO_SWAP__UNSET
;
1069 RB_CLEAR_NODE(&dso
->rb_node
);
1071 INIT_LIST_HEAD(&dso
->node
);
1072 INIT_LIST_HEAD(&dso
->data
.open_entry
);
1073 pthread_mutex_init(&dso
->lock
, NULL
);
1074 atomic_set(&dso
->refcnt
, 1);
1080 void dso__delete(struct dso
*dso
)
1084 if (!RB_EMPTY_NODE(&dso
->rb_node
))
1085 pr_err("DSO %s is still in rbtree when being deleted!\n",
1087 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
1088 symbols__delete(&dso
->symbols
[i
]);
1090 if (dso
->short_name_allocated
) {
1091 zfree((char **)&dso
->short_name
);
1092 dso
->short_name_allocated
= false;
1095 if (dso
->long_name_allocated
) {
1096 zfree((char **)&dso
->long_name
);
1097 dso
->long_name_allocated
= false;
1100 dso__data_close(dso
);
1101 auxtrace_cache__free(dso
->auxtrace_cache
);
1102 dso_cache__free(dso
);
1104 zfree(&dso
->symsrc_filename
);
1105 pthread_mutex_destroy(&dso
->lock
);
1109 struct dso
*dso__get(struct dso
*dso
)
1112 atomic_inc(&dso
->refcnt
);
1116 void dso__put(struct dso
*dso
)
1118 if (dso
&& atomic_dec_and_test(&dso
->refcnt
))
1122 void dso__set_build_id(struct dso
*dso
, void *build_id
)
1124 memcpy(dso
->build_id
, build_id
, sizeof(dso
->build_id
));
1125 dso
->has_build_id
= 1;
1128 bool dso__build_id_equal(const struct dso
*dso
, u8
*build_id
)
1130 return memcmp(dso
->build_id
, build_id
, sizeof(dso
->build_id
)) == 0;
1133 void dso__read_running_kernel_build_id(struct dso
*dso
, struct machine
*machine
)
1135 char path
[PATH_MAX
];
1137 if (machine__is_default_guest(machine
))
1139 sprintf(path
, "%s/sys/kernel/notes", machine
->root_dir
);
1140 if (sysfs__read_build_id(path
, dso
->build_id
,
1141 sizeof(dso
->build_id
)) == 0)
1142 dso
->has_build_id
= true;
1145 int dso__kernel_module_get_build_id(struct dso
*dso
,
1146 const char *root_dir
)
1148 char filename
[PATH_MAX
];
1150 * kernel module short names are of the form "[module]" and
1151 * we need just "module" here.
1153 const char *name
= dso
->short_name
+ 1;
1155 snprintf(filename
, sizeof(filename
),
1156 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1157 root_dir
, (int)strlen(name
) - 1, name
);
1159 if (sysfs__read_build_id(filename
, dso
->build_id
,
1160 sizeof(dso
->build_id
)) == 0)
1161 dso
->has_build_id
= true;
1166 bool __dsos__read_build_ids(struct list_head
*head
, bool with_hits
)
1168 bool have_build_id
= false;
1171 list_for_each_entry(pos
, head
, node
) {
1172 if (with_hits
&& !pos
->hit
)
1174 if (pos
->has_build_id
) {
1175 have_build_id
= true;
1178 if (filename__read_build_id(pos
->long_name
, pos
->build_id
,
1179 sizeof(pos
->build_id
)) > 0) {
1180 have_build_id
= true;
1181 pos
->has_build_id
= true;
1185 return have_build_id
;
1188 void __dsos__add(struct dsos
*dsos
, struct dso
*dso
)
1190 list_add_tail(&dso
->node
, &dsos
->head
);
1191 __dso__findlink_by_longname(&dsos
->root
, dso
, NULL
);
1193 * It is now in the linked list, grab a reference, then garbage collect
1194 * this when needing memory, by looking at LRU dso instances in the
1195 * list with atomic_read(&dso->refcnt) == 1, i.e. no references
1196 * anywhere besides the one for the list, do, under a lock for the
1197 * list: remove it from the list, then a dso__put(), that probably will
1198 * be the last and will then call dso__delete(), end of life.
1200 * That, or at the end of the 'struct machine' lifetime, when all
1201 * 'struct dso' instances will be removed from the list, in
1202 * dsos__exit(), if they have no other reference from some other data
1205 * E.g.: after processing a 'perf.data' file and storing references
1206 * to objects instantiated while processing events, we will have
1207 * references to the 'thread', 'map', 'dso' structs all from 'struct
1208 * hist_entry' instances, but we may not need anything not referenced,
1209 * so we might as well call machines__exit()/machines__delete() and
1210 * garbage collect it.
1215 void dsos__add(struct dsos
*dsos
, struct dso
*dso
)
1217 pthread_rwlock_wrlock(&dsos
->lock
);
1218 __dsos__add(dsos
, dso
);
1219 pthread_rwlock_unlock(&dsos
->lock
);
1222 struct dso
*__dsos__find(struct dsos
*dsos
, const char *name
, bool cmp_short
)
1227 list_for_each_entry(pos
, &dsos
->head
, node
)
1228 if (strcmp(pos
->short_name
, name
) == 0)
1232 return __dso__find_by_longname(&dsos
->root
, name
);
1235 struct dso
*dsos__find(struct dsos
*dsos
, const char *name
, bool cmp_short
)
1238 pthread_rwlock_rdlock(&dsos
->lock
);
1239 dso
= __dsos__find(dsos
, name
, cmp_short
);
1240 pthread_rwlock_unlock(&dsos
->lock
);
1244 struct dso
*__dsos__addnew(struct dsos
*dsos
, const char *name
)
1246 struct dso
*dso
= dso__new(name
);
1249 __dsos__add(dsos
, dso
);
1250 dso__set_basename(dso
);
1251 /* Put dso here because __dsos_add already got it */
1257 struct dso
*__dsos__findnew(struct dsos
*dsos
, const char *name
)
1259 struct dso
*dso
= __dsos__find(dsos
, name
, false);
1261 return dso
? dso
: __dsos__addnew(dsos
, name
);
1264 struct dso
*dsos__findnew(struct dsos
*dsos
, const char *name
)
1267 pthread_rwlock_wrlock(&dsos
->lock
);
1268 dso
= dso__get(__dsos__findnew(dsos
, name
));
1269 pthread_rwlock_unlock(&dsos
->lock
);
1273 size_t __dsos__fprintf_buildid(struct list_head
*head
, FILE *fp
,
1274 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
1279 list_for_each_entry(pos
, head
, node
) {
1280 if (skip
&& skip(pos
, parm
))
1282 ret
+= dso__fprintf_buildid(pos
, fp
);
1283 ret
+= fprintf(fp
, " %s\n", pos
->long_name
);
1288 size_t __dsos__fprintf(struct list_head
*head
, FILE *fp
)
1293 list_for_each_entry(pos
, head
, node
) {
1295 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
1296 ret
+= dso__fprintf(pos
, i
, fp
);
1302 size_t dso__fprintf_buildid(struct dso
*dso
, FILE *fp
)
1304 char sbuild_id
[BUILD_ID_SIZE
* 2 + 1];
1306 build_id__sprintf(dso
->build_id
, sizeof(dso
->build_id
), sbuild_id
);
1307 return fprintf(fp
, "%s", sbuild_id
);
1310 size_t dso__fprintf(struct dso
*dso
, enum map_type type
, FILE *fp
)
1313 size_t ret
= fprintf(fp
, "dso: %s (", dso
->short_name
);
1315 if (dso
->short_name
!= dso
->long_name
)
1316 ret
+= fprintf(fp
, "%s, ", dso
->long_name
);
1317 ret
+= fprintf(fp
, "%s, %sloaded, ", map_type__name
[type
],
1318 dso__loaded(dso
, type
) ? "" : "NOT ");
1319 ret
+= dso__fprintf_buildid(dso
, fp
);
1320 ret
+= fprintf(fp
, ")\n");
1321 for (nd
= rb_first(&dso
->symbols
[type
]); nd
; nd
= rb_next(nd
)) {
1322 struct symbol
*pos
= rb_entry(nd
, struct symbol
, rb_node
);
1323 ret
+= symbol__fprintf(pos
, fp
);
1329 enum dso_type
dso__type(struct dso
*dso
, struct machine
*machine
)
1332 enum dso_type type
= DSO__TYPE_UNKNOWN
;
1334 fd
= dso__data_get_fd(dso
, machine
);
1336 type
= dso__type_fd(fd
);
1337 dso__data_put_fd(dso
);
1343 int dso__strerror_load(struct dso
*dso
, char *buf
, size_t buflen
)
1345 int idx
, errnum
= dso
->load_errno
;
1347 * This must have a same ordering as the enum dso_load_errno.
1349 static const char *dso_load__error_str
[] = {
1350 "Internal tools/perf/ library error",
1352 "Can not read build id",
1353 "Mismatching build id",
1354 "Decompression failure",
1357 BUG_ON(buflen
== 0);
1360 const char *err
= strerror_r(errnum
, buf
, buflen
);
1363 scnprintf(buf
, buflen
, "%s", err
);
1368 if (errnum
< __DSO_LOAD_ERRNO__START
|| errnum
>= __DSO_LOAD_ERRNO__END
)
1371 idx
= errnum
- __DSO_LOAD_ERRNO__START
;
1372 scnprintf(buf
, buflen
, "%s", dso_load__error_str
[idx
]);