1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/zalloc.h>
7 #include <sys/resource.h>
14 #ifdef HAVE_LIBBPF_SUPPORT
15 #include <bpf/libbpf.h>
16 #include "bpf-event.h"
17 #include "bpf-utils.h"
21 #include "namespaces.h"
30 #include "util.h" /* O_CLOEXEC for older systems */
34 #include "annotate-data.h"
36 static const char * const debuglink_paths
[] = {
43 void dso__set_nsinfo(struct dso
*dso
, struct nsinfo
*nsi
)
45 nsinfo__put(RC_CHK_ACCESS(dso
)->nsinfo
);
46 RC_CHK_ACCESS(dso
)->nsinfo
= nsi
;
49 char dso__symtab_origin(const struct dso
*dso
)
51 static const char origin
[] = {
52 [DSO_BINARY_TYPE__KALLSYMS
] = 'k',
53 [DSO_BINARY_TYPE__VMLINUX
] = 'v',
54 [DSO_BINARY_TYPE__JAVA_JIT
] = 'j',
55 [DSO_BINARY_TYPE__DEBUGLINK
] = 'l',
56 [DSO_BINARY_TYPE__BUILD_ID_CACHE
] = 'B',
57 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO
] = 'D',
58 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO
] = 'f',
59 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO
] = 'u',
60 [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO
] = 'x',
61 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO
] = 'o',
62 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO
] = 'b',
63 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO
] = 'd',
64 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
] = 'K',
65 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP
] = 'm',
66 [DSO_BINARY_TYPE__GUEST_KALLSYMS
] = 'g',
67 [DSO_BINARY_TYPE__GUEST_KMODULE
] = 'G',
68 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP
] = 'M',
69 [DSO_BINARY_TYPE__GUEST_VMLINUX
] = 'V',
72 if (dso
== NULL
|| dso__symtab_type(dso
) == DSO_BINARY_TYPE__NOT_FOUND
)
74 return origin
[dso__symtab_type(dso
)];
77 bool dso__is_object_file(const struct dso
*dso
)
79 switch (dso__binary_type(dso
)) {
80 case DSO_BINARY_TYPE__KALLSYMS
:
81 case DSO_BINARY_TYPE__GUEST_KALLSYMS
:
82 case DSO_BINARY_TYPE__JAVA_JIT
:
83 case DSO_BINARY_TYPE__BPF_PROG_INFO
:
84 case DSO_BINARY_TYPE__BPF_IMAGE
:
85 case DSO_BINARY_TYPE__OOL
:
87 case DSO_BINARY_TYPE__VMLINUX
:
88 case DSO_BINARY_TYPE__GUEST_VMLINUX
:
89 case DSO_BINARY_TYPE__DEBUGLINK
:
90 case DSO_BINARY_TYPE__BUILD_ID_CACHE
:
91 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO
:
92 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO
:
93 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO
:
94 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO
:
95 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO
:
96 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO
:
97 case DSO_BINARY_TYPE__GUEST_KMODULE
:
98 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP
:
99 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
:
100 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP
:
101 case DSO_BINARY_TYPE__KCORE
:
102 case DSO_BINARY_TYPE__GUEST_KCORE
:
103 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO
:
104 case DSO_BINARY_TYPE__NOT_FOUND
:
110 int dso__read_binary_type_filename(const struct dso
*dso
,
111 enum dso_binary_type type
,
112 char *root_dir
, char *filename
, size_t size
)
114 char build_id_hex
[SBUILD_ID_SIZE
];
119 case DSO_BINARY_TYPE__DEBUGLINK
:
121 const char *last_slash
;
122 char dso_dir
[PATH_MAX
];
123 char symfile
[PATH_MAX
];
126 len
= __symbol__join_symfs(filename
, size
, dso__long_name(dso
));
127 last_slash
= filename
+ len
;
128 while (last_slash
!= filename
&& *last_slash
!= '/')
131 strncpy(dso_dir
, filename
, last_slash
- filename
);
132 dso_dir
[last_slash
-filename
] = '\0';
134 if (!is_regular_file(filename
)) {
139 ret
= filename__read_debuglink(filename
, symfile
, PATH_MAX
);
143 /* Check predefined locations where debug file might reside */
145 for (i
= 0; i
< ARRAY_SIZE(debuglink_paths
); i
++) {
146 snprintf(filename
, size
,
147 debuglink_paths
[i
], dso_dir
, symfile
);
148 if (is_regular_file(filename
)) {
156 case DSO_BINARY_TYPE__BUILD_ID_CACHE
:
157 if (dso__build_id_filename(dso
, filename
, size
, false) == NULL
)
161 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO
:
162 if (dso__build_id_filename(dso
, filename
, size
, true) == NULL
)
166 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO
:
167 len
= __symbol__join_symfs(filename
, size
, "/usr/lib/debug");
168 snprintf(filename
+ len
, size
- len
, "%s.debug", dso__long_name(dso
));
171 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO
:
172 len
= __symbol__join_symfs(filename
, size
, "/usr/lib/debug");
173 snprintf(filename
+ len
, size
- len
, "%s", dso__long_name(dso
));
176 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO
:
178 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
179 * /usr/lib/debug/lib when it is expected to be in
180 * /usr/lib/debug/usr/lib
182 if (strlen(dso__long_name(dso
)) < 9 ||
183 strncmp(dso__long_name(dso
), "/usr/lib/", 9)) {
187 len
= __symbol__join_symfs(filename
, size
, "/usr/lib/debug");
188 snprintf(filename
+ len
, size
- len
, "%s", dso__long_name(dso
) + 4);
191 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO
:
193 const char *last_slash
;
196 last_slash
= dso__long_name(dso
) + dso__long_name_len(dso
);
197 while (last_slash
!= dso__long_name(dso
) && *last_slash
!= '/')
200 len
= __symbol__join_symfs(filename
, size
, "");
201 dir_size
= last_slash
- dso__long_name(dso
) + 2;
202 if (dir_size
> (size
- len
)) {
206 len
+= scnprintf(filename
+ len
, dir_size
, "%s", dso__long_name(dso
));
207 len
+= scnprintf(filename
+ len
, size
- len
, ".debug%s",
212 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO
:
213 if (!dso__has_build_id(dso
)) {
218 build_id__sprintf(dso__bid_const(dso
), build_id_hex
);
219 len
= __symbol__join_symfs(filename
, size
, "/usr/lib/debug/.build-id/");
220 snprintf(filename
+ len
, size
- len
, "%.2s/%s.debug",
221 build_id_hex
, build_id_hex
+ 2);
224 case DSO_BINARY_TYPE__VMLINUX
:
225 case DSO_BINARY_TYPE__GUEST_VMLINUX
:
226 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO
:
227 __symbol__join_symfs(filename
, size
, dso__long_name(dso
));
230 case DSO_BINARY_TYPE__GUEST_KMODULE
:
231 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP
:
232 path__join3(filename
, size
, symbol_conf
.symfs
,
233 root_dir
, dso__long_name(dso
));
236 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
:
237 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP
:
238 __symbol__join_symfs(filename
, size
, dso__long_name(dso
));
241 case DSO_BINARY_TYPE__KCORE
:
242 case DSO_BINARY_TYPE__GUEST_KCORE
:
243 snprintf(filename
, size
, "%s", dso__long_name(dso
));
247 case DSO_BINARY_TYPE__KALLSYMS
:
248 case DSO_BINARY_TYPE__GUEST_KALLSYMS
:
249 case DSO_BINARY_TYPE__JAVA_JIT
:
250 case DSO_BINARY_TYPE__BPF_PROG_INFO
:
251 case DSO_BINARY_TYPE__BPF_IMAGE
:
252 case DSO_BINARY_TYPE__OOL
:
253 case DSO_BINARY_TYPE__NOT_FOUND
:
265 static const struct {
267 int (*decompress
)(const char *input
, int output
);
268 bool (*is_compressed
)(const char *input
);
270 [COMP_ID__NONE
] = { .fmt
= NULL
, },
271 #ifdef HAVE_ZLIB_SUPPORT
272 { "gz", gzip_decompress_to_file
, gzip_is_compressed
},
274 #ifdef HAVE_LZMA_SUPPORT
275 { "xz", lzma_decompress_to_file
, lzma_is_compressed
},
277 { NULL
, NULL
, NULL
},
280 static int is_supported_compression(const char *ext
)
284 for (i
= 1; compressions
[i
].fmt
; i
++) {
285 if (!strcmp(ext
, compressions
[i
].fmt
))
288 return COMP_ID__NONE
;
291 bool is_kernel_module(const char *pathname
, int cpumode
)
294 int mode
= cpumode
& PERF_RECORD_MISC_CPUMODE_MASK
;
296 WARN_ONCE(mode
!= cpumode
,
297 "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
301 case PERF_RECORD_MISC_USER
:
302 case PERF_RECORD_MISC_HYPERVISOR
:
303 case PERF_RECORD_MISC_GUEST_USER
:
305 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
307 if (kmod_path__parse(&m
, pathname
)) {
308 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
317 bool dso__needs_decompress(struct dso
*dso
)
319 return dso__symtab_type(dso
) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP
||
320 dso__symtab_type(dso
) == DSO_BINARY_TYPE__GUEST_KMODULE_COMP
;
323 int filename__decompress(const char *name
, char *pathname
,
324 size_t len
, int comp
, int *err
)
326 char tmpbuf
[] = KMOD_DECOMP_NAME
;
330 * We have proper compression id for DSO and yet the file
331 * behind the 'name' can still be plain uncompressed object.
333 * The reason is behind the logic we open the DSO object files,
334 * when we try all possible 'debug' objects until we find the
335 * data. So even if the DSO is represented by 'krava.xz' module,
336 * we can end up here opening ~/.debug/....23432432/debug' file
337 * which is not compressed.
339 * To keep this transparent, we detect this and return the file
340 * descriptor to the uncompressed file.
342 if (!compressions
[comp
].is_compressed(name
))
343 return open(name
, O_RDONLY
);
345 fd
= mkstemp(tmpbuf
);
351 if (compressions
[comp
].decompress(name
, fd
)) {
352 *err
= DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE
;
357 if (!pathname
|| (fd
< 0))
360 if (pathname
&& (fd
>= 0))
361 strlcpy(pathname
, tmpbuf
, len
);
366 static int decompress_kmodule(struct dso
*dso
, const char *name
,
367 char *pathname
, size_t len
)
369 if (!dso__needs_decompress(dso
))
372 if (dso__comp(dso
) == COMP_ID__NONE
)
375 return filename__decompress(name
, pathname
, len
, dso__comp(dso
), dso__load_errno(dso
));
378 int dso__decompress_kmodule_fd(struct dso
*dso
, const char *name
)
380 return decompress_kmodule(dso
, name
, NULL
, 0);
383 int dso__decompress_kmodule_path(struct dso
*dso
, const char *name
,
384 char *pathname
, size_t len
)
386 int fd
= decompress_kmodule(dso
, name
, pathname
, len
);
389 return fd
>= 0 ? 0 : -1;
393 * Parses kernel module specified in @path and updates
396 * @comp - true if @path contains supported compression suffix,
398 * @kmod - true if @path contains '.ko' suffix in right position,
400 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
401 * of the kernel module without suffixes, otherwise strudup-ed
403 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
404 * the compression suffix
406 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
408 int __kmod_path__parse(struct kmod_path
*m
, const char *path
,
411 const char *name
= strrchr(path
, '/');
412 const char *ext
= strrchr(path
, '.');
413 bool is_simple_name
= false;
415 memset(m
, 0x0, sizeof(*m
));
416 name
= name
? name
+ 1 : path
;
419 * '.' is also a valid character for module name. For example:
420 * [aaa.bbb] is a valid module name. '[' should have higher
421 * priority than '.ko' suffix.
423 * The kernel names are from machine__mmap_name. Such
424 * name should belong to kernel itself, not kernel module.
426 if (name
[0] == '[') {
427 is_simple_name
= true;
428 if ((strncmp(name
, "[kernel.kallsyms]", 17) == 0) ||
429 (strncmp(name
, "[guest.kernel.kallsyms", 22) == 0) ||
430 (strncmp(name
, "[vdso]", 6) == 0) ||
431 (strncmp(name
, "[vdso32]", 8) == 0) ||
432 (strncmp(name
, "[vdsox32]", 9) == 0) ||
433 (strncmp(name
, "[vsyscall]", 10) == 0)) {
440 /* No extension, just return name. */
441 if ((ext
== NULL
) || is_simple_name
) {
443 m
->name
= strdup(name
);
444 return m
->name
? 0 : -ENOMEM
;
449 m
->comp
= is_supported_compression(ext
+ 1);
450 if (m
->comp
> COMP_ID__NONE
)
453 /* Check .ko extension only if there's enough name left. */
455 m
->kmod
= !strncmp(ext
, ".ko", 3);
459 if (asprintf(&m
->name
, "[%.*s]", (int) (ext
- name
), name
) == -1)
462 if (asprintf(&m
->name
, "%s", name
) == -1)
466 strreplace(m
->name
, '-', '_');
472 void dso__set_module_info(struct dso
*dso
, struct kmod_path
*m
,
473 struct machine
*machine
)
475 if (machine__is_host(machine
))
476 dso__set_symtab_type(dso
, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
);
478 dso__set_symtab_type(dso
, DSO_BINARY_TYPE__GUEST_KMODULE
);
480 /* _KMODULE_COMP should be next to _KMODULE */
481 if (m
->kmod
&& m
->comp
) {
482 dso__set_symtab_type(dso
, dso__symtab_type(dso
) + 1);
483 dso__set_comp(dso
, m
->comp
);
486 dso__set_is_kmod(dso
);
487 dso__set_short_name(dso
, strdup(m
->name
), true);
491 * Global list of open DSOs and the counter.
493 static LIST_HEAD(dso__data_open
);
494 static long dso__data_open_cnt
;
495 static pthread_mutex_t dso__data_open_lock
= PTHREAD_MUTEX_INITIALIZER
;
497 static void dso__list_add(struct dso
*dso
)
499 list_add_tail(&dso__data(dso
)->open_entry
, &dso__data_open
);
500 #ifdef REFCNT_CHECKING
501 dso__data(dso
)->dso
= dso__get(dso
);
503 /* Assume the dso is part of dsos, hence the optional reference count above. */
504 assert(dso__dsos(dso
));
505 dso__data_open_cnt
++;
508 static void dso__list_del(struct dso
*dso
)
510 list_del_init(&dso__data(dso
)->open_entry
);
511 #ifdef REFCNT_CHECKING
512 dso__put(dso__data(dso
)->dso
);
514 WARN_ONCE(dso__data_open_cnt
<= 0,
515 "DSO data fd counter out of bounds.");
516 dso__data_open_cnt
--;
519 static void close_first_dso(void);
521 static int do_open(char *name
)
524 char sbuf
[STRERR_BUFSIZE
];
527 fd
= open(name
, O_RDONLY
|O_CLOEXEC
);
531 pr_debug("dso open failed: %s\n",
532 str_error_r(errno
, sbuf
, sizeof(sbuf
)));
533 if (!dso__data_open_cnt
|| errno
!= EMFILE
)
542 char *dso__filename_with_chroot(const struct dso
*dso
, const char *filename
)
544 return filename_with_chroot(nsinfo__pid(dso__nsinfo_const(dso
)), filename
);
547 static int __open_dso(struct dso
*dso
, struct machine
*machine
)
550 char *root_dir
= (char *)"";
551 char *name
= malloc(PATH_MAX
);
557 mutex_lock(dso__lock(dso
));
559 root_dir
= machine
->root_dir
;
561 if (dso__read_binary_type_filename(dso
, dso__binary_type(dso
),
562 root_dir
, name
, PATH_MAX
))
565 if (!is_regular_file(name
)) {
568 if (errno
!= ENOENT
|| dso__nsinfo(dso
) == NULL
)
571 new_name
= dso__filename_with_chroot(dso
, name
);
579 if (dso__needs_decompress(dso
)) {
580 char newpath
[KMOD_DECOMP_LEN
];
581 size_t len
= sizeof(newpath
);
583 if (dso__decompress_kmodule_path(dso
, name
, newpath
, len
) < 0) {
584 fd
= -(*dso__load_errno(dso
));
589 strcpy(name
, newpath
);
598 mutex_unlock(dso__lock(dso
));
603 static void check_data_close(void);
606 * dso_close - Open DSO data file
609 * Open @dso's data file descriptor and updates
610 * list/count of open DSO objects.
612 static int open_dso(struct dso
*dso
, struct machine
*machine
)
617 if (dso__binary_type(dso
) != DSO_BINARY_TYPE__BUILD_ID_CACHE
) {
618 mutex_lock(dso__lock(dso
));
619 nsinfo__mountns_enter(dso__nsinfo(dso
), &nsc
);
620 mutex_unlock(dso__lock(dso
));
622 fd
= __open_dso(dso
, machine
);
623 if (dso__binary_type(dso
) != DSO_BINARY_TYPE__BUILD_ID_CACHE
)
624 nsinfo__mountns_exit(&nsc
);
629 * Check if we crossed the allowed number
630 * of opened DSOs and close one if needed.
638 static void close_data_fd(struct dso
*dso
)
640 if (dso__data(dso
)->fd
>= 0) {
641 close(dso__data(dso
)->fd
);
642 dso__data(dso
)->fd
= -1;
643 dso__data(dso
)->file_size
= 0;
649 * dso_close - Close DSO data file
652 * Close @dso's data file descriptor and updates
653 * list/count of open DSO objects.
655 static void close_dso(struct dso
*dso
)
660 static void close_first_dso(void)
662 struct dso_data
*dso_data
;
665 dso_data
= list_first_entry(&dso__data_open
, struct dso_data
, open_entry
);
666 #ifdef REFCNT_CHECKING
669 dso
= container_of(dso_data
, struct dso
, data
);
674 static rlim_t
get_fd_limit(void)
679 /* Allow half of the current open fd limit. */
680 if (getrlimit(RLIMIT_NOFILE
, &l
) == 0) {
681 if (l
.rlim_cur
== RLIM_INFINITY
)
684 limit
= l
.rlim_cur
/ 2;
686 pr_err("failed to get fd limit\n");
693 static rlim_t fd_limit
;
696 * Used only by tests/dso-data.c to reset the environment
697 * for tests. I dont expect we should change this during
700 void reset_fd_limit(void)
705 static bool may_cache_fd(void)
708 fd_limit
= get_fd_limit();
710 if (fd_limit
== RLIM_INFINITY
)
713 return fd_limit
> (rlim_t
) dso__data_open_cnt
;
717 * Check and close LRU dso if we crossed allowed limit
718 * for opened dso file descriptors. The limit is half
719 * of the RLIMIT_NOFILE files opened.
721 static void check_data_close(void)
723 bool cache_fd
= may_cache_fd();
730 * dso__data_close - Close DSO data file
733 * External interface to close @dso's data file descriptor.
735 void dso__data_close(struct dso
*dso
)
737 pthread_mutex_lock(&dso__data_open_lock
);
739 pthread_mutex_unlock(&dso__data_open_lock
);
742 static void try_to_open_dso(struct dso
*dso
, struct machine
*machine
)
744 enum dso_binary_type binary_type_data
[] = {
745 DSO_BINARY_TYPE__BUILD_ID_CACHE
,
746 DSO_BINARY_TYPE__SYSTEM_PATH_DSO
,
747 DSO_BINARY_TYPE__NOT_FOUND
,
750 struct dso_data
*dso_data
= dso__data(dso
);
752 if (dso_data
->fd
>= 0)
755 if (dso__binary_type(dso
) != DSO_BINARY_TYPE__NOT_FOUND
) {
756 dso_data
->fd
= open_dso(dso
, machine
);
761 dso__set_binary_type(dso
, binary_type_data
[i
++]);
763 dso_data
->fd
= open_dso(dso
, machine
);
764 if (dso_data
->fd
>= 0)
767 } while (dso__binary_type(dso
) != DSO_BINARY_TYPE__NOT_FOUND
);
769 if (dso_data
->fd
>= 0)
770 dso_data
->status
= DSO_DATA_STATUS_OK
;
772 dso_data
->status
= DSO_DATA_STATUS_ERROR
;
776 * dso__data_get_fd - Get dso's data file descriptor
778 * @machine: machine object
780 * External interface to find dso's file, open it and
781 * returns file descriptor. It should be paired with
782 * dso__data_put_fd() if it returns non-negative value.
784 int dso__data_get_fd(struct dso
*dso
, struct machine
*machine
)
786 if (dso__data(dso
)->status
== DSO_DATA_STATUS_ERROR
)
789 if (pthread_mutex_lock(&dso__data_open_lock
) < 0)
792 try_to_open_dso(dso
, machine
);
794 if (dso__data(dso
)->fd
< 0)
795 pthread_mutex_unlock(&dso__data_open_lock
);
797 return dso__data(dso
)->fd
;
800 void dso__data_put_fd(struct dso
*dso __maybe_unused
)
802 pthread_mutex_unlock(&dso__data_open_lock
);
805 bool dso__data_status_seen(struct dso
*dso
, enum dso_data_status_seen by
)
809 if (dso__data(dso
)->status_seen
& flag
)
812 dso__data(dso
)->status_seen
|= flag
;
817 #ifdef HAVE_LIBBPF_SUPPORT
818 static ssize_t
bpf_read(struct dso
*dso
, u64 offset
, char *data
)
820 struct bpf_prog_info_node
*node
;
821 ssize_t size
= DSO__DATA_CACHE_SIZE
;
822 struct dso_bpf_prog
*dso_bpf_prog
= dso__bpf_prog(dso
);
826 node
= perf_env__find_bpf_prog_info(dso_bpf_prog
->env
, dso_bpf_prog
->id
);
827 if (!node
|| !node
->info_linear
) {
828 dso__data(dso
)->status
= DSO_DATA_STATUS_ERROR
;
832 len
= node
->info_linear
->info
.jited_prog_len
;
833 buf
= (u8
*)(uintptr_t)node
->info_linear
->info
.jited_prog_insns
;
838 size
= (ssize_t
)min(len
- offset
, (u64
)size
);
839 memcpy(data
, buf
+ offset
, size
);
843 static int bpf_size(struct dso
*dso
)
845 struct bpf_prog_info_node
*node
;
846 struct dso_bpf_prog
*dso_bpf_prog
= dso__bpf_prog(dso
);
848 node
= perf_env__find_bpf_prog_info(dso_bpf_prog
->env
, dso_bpf_prog
->id
);
849 if (!node
|| !node
->info_linear
) {
850 dso__data(dso
)->status
= DSO_DATA_STATUS_ERROR
;
854 dso__data(dso
)->file_size
= node
->info_linear
->info
.jited_prog_len
;
857 #endif // HAVE_LIBBPF_SUPPORT
860 dso_cache__free(struct dso
*dso
)
862 struct rb_root
*root
= &dso__data(dso
)->cache
;
863 struct rb_node
*next
= rb_first(root
);
865 mutex_lock(dso__lock(dso
));
867 struct dso_cache
*cache
;
869 cache
= rb_entry(next
, struct dso_cache
, rb_node
);
870 next
= rb_next(&cache
->rb_node
);
871 rb_erase(&cache
->rb_node
, root
);
874 mutex_unlock(dso__lock(dso
));
877 static struct dso_cache
*__dso_cache__find(struct dso
*dso
, u64 offset
)
879 const struct rb_root
*root
= &dso__data(dso
)->cache
;
880 struct rb_node
* const *p
= &root
->rb_node
;
881 const struct rb_node
*parent
= NULL
;
882 struct dso_cache
*cache
;
888 cache
= rb_entry(parent
, struct dso_cache
, rb_node
);
889 end
= cache
->offset
+ DSO__DATA_CACHE_SIZE
;
891 if (offset
< cache
->offset
)
893 else if (offset
>= end
)
902 static struct dso_cache
*
903 dso_cache__insert(struct dso
*dso
, struct dso_cache
*new)
905 struct rb_root
*root
= &dso__data(dso
)->cache
;
906 struct rb_node
**p
= &root
->rb_node
;
907 struct rb_node
*parent
= NULL
;
908 struct dso_cache
*cache
;
909 u64 offset
= new->offset
;
911 mutex_lock(dso__lock(dso
));
916 cache
= rb_entry(parent
, struct dso_cache
, rb_node
);
917 end
= cache
->offset
+ DSO__DATA_CACHE_SIZE
;
919 if (offset
< cache
->offset
)
921 else if (offset
>= end
)
927 rb_link_node(&new->rb_node
, parent
, p
);
928 rb_insert_color(&new->rb_node
, root
);
932 mutex_unlock(dso__lock(dso
));
936 static ssize_t
dso_cache__memcpy(struct dso_cache
*cache
, u64 offset
, u8
*data
,
939 u64 cache_offset
= offset
- cache
->offset
;
940 u64 cache_size
= min(cache
->size
- cache_offset
, size
);
943 memcpy(data
, cache
->data
+ cache_offset
, cache_size
);
945 memcpy(cache
->data
+ cache_offset
, data
, cache_size
);
949 static ssize_t
file_read(struct dso
*dso
, struct machine
*machine
,
950 u64 offset
, char *data
)
954 pthread_mutex_lock(&dso__data_open_lock
);
957 * dso__data(dso)->fd might be closed if other thread opened another
958 * file (dso) due to open file limit (RLIMIT_NOFILE).
960 try_to_open_dso(dso
, machine
);
962 if (dso__data(dso
)->fd
< 0) {
963 dso__data(dso
)->status
= DSO_DATA_STATUS_ERROR
;
968 ret
= pread(dso__data(dso
)->fd
, data
, DSO__DATA_CACHE_SIZE
, offset
);
970 pthread_mutex_unlock(&dso__data_open_lock
);
974 static struct dso_cache
*dso_cache__populate(struct dso
*dso
,
975 struct machine
*machine
,
976 u64 offset
, ssize_t
*ret
)
978 u64 cache_offset
= offset
& DSO__DATA_CACHE_MASK
;
979 struct dso_cache
*cache
;
980 struct dso_cache
*old
;
982 cache
= zalloc(sizeof(*cache
) + DSO__DATA_CACHE_SIZE
);
987 #ifdef HAVE_LIBBPF_SUPPORT
988 if (dso__binary_type(dso
) == DSO_BINARY_TYPE__BPF_PROG_INFO
)
989 *ret
= bpf_read(dso
, cache_offset
, cache
->data
);
992 if (dso__binary_type(dso
) == DSO_BINARY_TYPE__OOL
)
993 *ret
= DSO__DATA_CACHE_SIZE
;
995 *ret
= file_read(dso
, machine
, cache_offset
, cache
->data
);
1002 cache
->offset
= cache_offset
;
1005 old
= dso_cache__insert(dso
, cache
);
1007 /* we lose the race */
1015 static struct dso_cache
*dso_cache__find(struct dso
*dso
,
1016 struct machine
*machine
,
1020 struct dso_cache
*cache
= __dso_cache__find(dso
, offset
);
1022 return cache
? cache
: dso_cache__populate(dso
, machine
, offset
, ret
);
1025 static ssize_t
dso_cache_io(struct dso
*dso
, struct machine
*machine
,
1026 u64 offset
, u8
*data
, ssize_t size
, bool out
)
1028 struct dso_cache
*cache
;
1031 cache
= dso_cache__find(dso
, machine
, offset
, &ret
);
1035 return dso_cache__memcpy(cache
, offset
, data
, size
, out
);
1039 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
1040 * in the rb_tree. Any read to already cached data is served
1041 * by cached data. Writes update the cache only, not the backing file.
1043 static ssize_t
cached_io(struct dso
*dso
, struct machine
*machine
,
1044 u64 offset
, u8
*data
, ssize_t size
, bool out
)
1052 ret
= dso_cache_io(dso
, machine
, offset
, p
, size
, out
);
1056 /* Reached EOF, return what we have. */
1072 static int file_size(struct dso
*dso
, struct machine
*machine
)
1076 char sbuf
[STRERR_BUFSIZE
];
1078 pthread_mutex_lock(&dso__data_open_lock
);
1081 * dso__data(dso)->fd might be closed if other thread opened another
1082 * file (dso) due to open file limit (RLIMIT_NOFILE).
1084 try_to_open_dso(dso
, machine
);
1086 if (dso__data(dso
)->fd
< 0) {
1088 dso__data(dso
)->status
= DSO_DATA_STATUS_ERROR
;
1092 if (fstat(dso__data(dso
)->fd
, &st
) < 0) {
1094 pr_err("dso cache fstat failed: %s\n",
1095 str_error_r(errno
, sbuf
, sizeof(sbuf
)));
1096 dso__data(dso
)->status
= DSO_DATA_STATUS_ERROR
;
1099 dso__data(dso
)->file_size
= st
.st_size
;
1102 pthread_mutex_unlock(&dso__data_open_lock
);
1106 int dso__data_file_size(struct dso
*dso
, struct machine
*machine
)
1108 if (dso__data(dso
)->file_size
)
1111 if (dso__data(dso
)->status
== DSO_DATA_STATUS_ERROR
)
1113 #ifdef HAVE_LIBBPF_SUPPORT
1114 if (dso__binary_type(dso
) == DSO_BINARY_TYPE__BPF_PROG_INFO
)
1115 return bpf_size(dso
);
1117 return file_size(dso
, machine
);
1121 * dso__data_size - Return dso data size
1123 * @machine: machine object
1125 * Return: dso data size
1127 off_t
dso__data_size(struct dso
*dso
, struct machine
*machine
)
1129 if (dso__data_file_size(dso
, machine
))
1132 /* For now just estimate dso data size is close to file size */
1133 return dso__data(dso
)->file_size
;
1136 static ssize_t
data_read_write_offset(struct dso
*dso
, struct machine
*machine
,
1137 u64 offset
, u8
*data
, ssize_t size
,
1140 if (dso__data_file_size(dso
, machine
))
1143 /* Check the offset sanity. */
1144 if (offset
> dso__data(dso
)->file_size
)
1147 if (offset
+ size
< offset
)
1150 return cached_io(dso
, machine
, offset
, data
, size
, out
);
1154 * dso__data_read_offset - Read data from dso file offset
1156 * @machine: machine object
1157 * @offset: file offset
1158 * @data: buffer to store data
1159 * @size: size of the @data buffer
1161 * External interface to read data from dso file offset. Open
1162 * dso data file and use cached_read to get the data.
1164 ssize_t
dso__data_read_offset(struct dso
*dso
, struct machine
*machine
,
1165 u64 offset
, u8
*data
, ssize_t size
)
1167 if (dso__data(dso
)->status
== DSO_DATA_STATUS_ERROR
)
1170 return data_read_write_offset(dso
, machine
, offset
, data
, size
, true);
1174 * dso__data_read_addr - Read data from dso address
1176 * @machine: machine object
1177 * @add: virtual memory address
1178 * @data: buffer to store data
1179 * @size: size of the @data buffer
1181 * External interface to read data from dso address.
1183 ssize_t
dso__data_read_addr(struct dso
*dso
, struct map
*map
,
1184 struct machine
*machine
, u64 addr
,
1185 u8
*data
, ssize_t size
)
1187 u64 offset
= map__map_ip(map
, addr
);
1189 return dso__data_read_offset(dso
, machine
, offset
, data
, size
);
1193 * dso__data_write_cache_offs - Write data to dso data cache at file offset
1195 * @machine: machine object
1196 * @offset: file offset
1197 * @data: buffer to write
1198 * @size: size of the @data buffer
1200 * Write into the dso file data cache, but do not change the file itself.
1202 ssize_t
dso__data_write_cache_offs(struct dso
*dso
, struct machine
*machine
,
1203 u64 offset
, const u8
*data_in
, ssize_t size
)
1205 u8
*data
= (u8
*)data_in
; /* cast away const to use same fns for r/w */
1207 if (dso__data(dso
)->status
== DSO_DATA_STATUS_ERROR
)
1210 return data_read_write_offset(dso
, machine
, offset
, data
, size
, false);
1214 * dso__data_write_cache_addr - Write data to dso data cache at dso address
1216 * @machine: machine object
1217 * @add: virtual memory address
1218 * @data: buffer to write
1219 * @size: size of the @data buffer
1221 * External interface to write into the dso file data cache, but do not change
1224 ssize_t
dso__data_write_cache_addr(struct dso
*dso
, struct map
*map
,
1225 struct machine
*machine
, u64 addr
,
1226 const u8
*data
, ssize_t size
)
1228 u64 offset
= map__map_ip(map
, addr
);
1230 return dso__data_write_cache_offs(dso
, machine
, offset
, data
, size
);
1233 struct map
*dso__new_map(const char *name
)
1235 struct map
*map
= NULL
;
1236 struct dso
*dso
= dso__new(name
);
1239 map
= map__new2(0, dso
);
1246 struct dso
*machine__findnew_kernel(struct machine
*machine
, const char *name
,
1247 const char *short_name
, int dso_type
)
1250 * The kernel dso could be created by build_id processing.
1252 struct dso
*dso
= machine__findnew_dso(machine
, name
);
1255 * We need to run this in all cases, since during the build_id
1256 * processing we had no idea this was the kernel dso.
1259 dso__set_short_name(dso
, short_name
, false);
1260 dso__set_kernel(dso
, dso_type
);
1266 static void dso__set_long_name_id(struct dso
*dso
, const char *name
, bool name_allocated
)
1268 struct dsos
*dsos
= dso__dsos(dso
);
1275 * Need to avoid re-sorting the dsos breaking by non-atomically
1278 down_write(&dsos
->lock
);
1281 if (dso__long_name_allocated(dso
))
1282 free((char *)dso__long_name(dso
));
1284 RC_CHK_ACCESS(dso
)->long_name
= name
;
1285 RC_CHK_ACCESS(dso
)->long_name_len
= strlen(name
);
1286 dso__set_long_name_allocated(dso
, name_allocated
);
1289 dsos
->sorted
= false;
1290 up_write(&dsos
->lock
);
1294 static int __dso_id__cmp(const struct dso_id
*a
, const struct dso_id
*b
)
1296 if (a
->maj
> b
->maj
) return -1;
1297 if (a
->maj
< b
->maj
) return 1;
1299 if (a
->min
> b
->min
) return -1;
1300 if (a
->min
< b
->min
) return 1;
1302 if (a
->ino
> b
->ino
) return -1;
1303 if (a
->ino
< b
->ino
) return 1;
1306 * Synthesized MMAP events have zero ino_generation, avoid comparing
1307 * them with MMAP events with actual ino_generation.
1309 * I found it harmful because the mismatch resulted in a new
1310 * dso that did not have a build ID whereas the original dso did have a
1311 * build ID. The build ID was essential because the object was not found
1312 * otherwise. - Adrian
1314 if (a
->ino_generation
&& b
->ino_generation
) {
1315 if (a
->ino_generation
> b
->ino_generation
) return -1;
1316 if (a
->ino_generation
< b
->ino_generation
) return 1;
1322 bool dso_id__empty(const struct dso_id
*id
)
1327 return !id
->maj
&& !id
->min
&& !id
->ino
&& !id
->ino_generation
;
1330 void __dso__inject_id(struct dso
*dso
, const struct dso_id
*id
)
1332 struct dsos
*dsos
= dso__dsos(dso
);
1333 struct dso_id
*dso_id
= dso__id(dso
);
1335 /* dsos write lock held by caller. */
1337 dso_id
->maj
= id
->maj
;
1338 dso_id
->min
= id
->min
;
1339 dso_id
->ino
= id
->ino
;
1340 dso_id
->ino_generation
= id
->ino_generation
;
1343 dsos
->sorted
= false;
1346 int dso_id__cmp(const struct dso_id
*a
, const struct dso_id
*b
)
1349 * The second is always dso->id, so zeroes if not set, assume passing
1350 * NULL for a means a zeroed id
1352 if (dso_id__empty(a
) || dso_id__empty(b
))
1355 return __dso_id__cmp(a
, b
);
1358 int dso__cmp_id(struct dso
*a
, struct dso
*b
)
1360 return __dso_id__cmp(dso__id(a
), dso__id(b
));
1363 void dso__set_long_name(struct dso
*dso
, const char *name
, bool name_allocated
)
1365 dso__set_long_name_id(dso
, name
, name_allocated
);
1368 void dso__set_short_name(struct dso
*dso
, const char *name
, bool name_allocated
)
1370 struct dsos
*dsos
= dso__dsos(dso
);
1377 * Need to avoid re-sorting the dsos breaking by non-atomically
1380 down_write(&dsos
->lock
);
1382 if (dso__short_name_allocated(dso
))
1383 free((char *)dso__short_name(dso
));
1385 RC_CHK_ACCESS(dso
)->short_name
= name
;
1386 RC_CHK_ACCESS(dso
)->short_name_len
= strlen(name
);
1387 dso__set_short_name_allocated(dso
, name_allocated
);
1390 dsos
->sorted
= false;
1391 up_write(&dsos
->lock
);
1395 int dso__name_len(const struct dso
*dso
)
1398 return strlen("[unknown]");
1400 return dso__long_name_len(dso
);
1402 return dso__short_name_len(dso
);
1405 bool dso__loaded(const struct dso
*dso
)
1407 return RC_CHK_ACCESS(dso
)->loaded
;
1410 bool dso__sorted_by_name(const struct dso
*dso
)
1412 return RC_CHK_ACCESS(dso
)->sorted_by_name
;
1415 void dso__set_sorted_by_name(struct dso
*dso
)
1417 RC_CHK_ACCESS(dso
)->sorted_by_name
= true;
1420 struct dso
*dso__new_id(const char *name
, const struct dso_id
*id
)
1422 RC_STRUCT(dso
) *dso
= zalloc(sizeof(*dso
) + strlen(name
) + 1);
1424 struct dso_data
*data
;
1429 if (ADD_RC_CHK(res
, dso
)) {
1430 strcpy(dso
->name
, name
);
1433 dso__set_long_name_id(res
, dso
->name
, false);
1434 dso__set_short_name(res
, dso
->name
, false);
1435 dso
->symbols
= RB_ROOT_CACHED
;
1436 dso
->symbol_names
= NULL
;
1437 dso
->symbol_names_len
= 0;
1438 dso
->inlined_nodes
= RB_ROOT_CACHED
;
1439 dso
->srclines
= RB_ROOT_CACHED
;
1440 dso
->data_types
= RB_ROOT
;
1441 dso
->global_vars
= RB_ROOT
;
1443 dso
->data
.status
= DSO_DATA_STATUS_UNKNOWN
;
1444 dso
->symtab_type
= DSO_BINARY_TYPE__NOT_FOUND
;
1445 dso
->binary_type
= DSO_BINARY_TYPE__NOT_FOUND
;
1446 dso
->is_64_bit
= (sizeof(void *) == 8);
1449 dso
->sorted_by_name
= 0;
1450 dso
->has_build_id
= 0;
1451 dso
->has_srcline
= 1;
1453 dso
->kernel
= DSO_SPACE__USER
;
1455 dso
->needs_swap
= DSO_SWAP__UNSET
;
1456 dso
->comp
= COMP_ID__NONE
;
1457 mutex_init(&dso
->lock
);
1458 refcount_set(&dso
->refcnt
, 1);
1460 data
->cache
= RB_ROOT
;
1462 data
->status
= DSO_DATA_STATUS_UNKNOWN
;
1463 INIT_LIST_HEAD(&data
->open_entry
);
1464 #ifdef REFCNT_CHECKING
1465 data
->dso
= NULL
; /* Set when on the open_entry list. */
1471 struct dso
*dso__new(const char *name
)
1473 return dso__new_id(name
, NULL
);
1476 void dso__delete(struct dso
*dso
)
1479 pr_err("DSO %s is still in rbtree when being deleted!\n", dso__long_name(dso
));
1481 /* free inlines first, as they reference symbols */
1482 inlines__tree_delete(&RC_CHK_ACCESS(dso
)->inlined_nodes
);
1483 srcline__tree_delete(&RC_CHK_ACCESS(dso
)->srclines
);
1484 symbols__delete(&RC_CHK_ACCESS(dso
)->symbols
);
1485 RC_CHK_ACCESS(dso
)->symbol_names_len
= 0;
1486 zfree(&RC_CHK_ACCESS(dso
)->symbol_names
);
1487 annotated_data_type__tree_delete(dso__data_types(dso
));
1488 global_var_type__tree_delete(dso__global_vars(dso
));
1490 if (RC_CHK_ACCESS(dso
)->short_name_allocated
) {
1491 zfree((char **)&RC_CHK_ACCESS(dso
)->short_name
);
1492 RC_CHK_ACCESS(dso
)->short_name_allocated
= false;
1495 if (RC_CHK_ACCESS(dso
)->long_name_allocated
) {
1496 zfree((char **)&RC_CHK_ACCESS(dso
)->long_name
);
1497 RC_CHK_ACCESS(dso
)->long_name_allocated
= false;
1500 dso__data_close(dso
);
1501 auxtrace_cache__free(RC_CHK_ACCESS(dso
)->auxtrace_cache
);
1502 dso_cache__free(dso
);
1504 dso__free_symsrc_filename(dso
);
1505 nsinfo__zput(RC_CHK_ACCESS(dso
)->nsinfo
);
1506 mutex_destroy(dso__lock(dso
));
1510 struct dso
*dso__get(struct dso
*dso
)
1514 if (RC_CHK_GET(result
, dso
))
1515 refcount_inc(&RC_CHK_ACCESS(dso
)->refcnt
);
1520 void dso__put(struct dso
*dso
)
1522 if (dso
&& refcount_dec_and_test(&RC_CHK_ACCESS(dso
)->refcnt
))
1528 void dso__set_build_id(struct dso
*dso
, struct build_id
*bid
)
1530 RC_CHK_ACCESS(dso
)->bid
= *bid
;
1531 RC_CHK_ACCESS(dso
)->has_build_id
= 1;
1534 bool dso__build_id_equal(const struct dso
*dso
, struct build_id
*bid
)
1536 const struct build_id
*dso_bid
= dso__bid_const(dso
);
1538 if (dso_bid
->size
> bid
->size
&& dso_bid
->size
== BUILD_ID_SIZE
) {
1540 * For the backward compatibility, it allows a build-id has
1543 return !memcmp(dso_bid
->data
, bid
->data
, bid
->size
) &&
1544 !memchr_inv(&dso_bid
->data
[bid
->size
], 0,
1545 dso_bid
->size
- bid
->size
);
1548 return dso_bid
->size
== bid
->size
&&
1549 memcmp(dso_bid
->data
, bid
->data
, dso_bid
->size
) == 0;
1552 void dso__read_running_kernel_build_id(struct dso
*dso
, struct machine
*machine
)
1554 char path
[PATH_MAX
];
1556 if (machine__is_default_guest(machine
))
1558 sprintf(path
, "%s/sys/kernel/notes", machine
->root_dir
);
1559 if (sysfs__read_build_id(path
, dso__bid(dso
)) == 0)
1560 dso__set_has_build_id(dso
);
1563 int dso__kernel_module_get_build_id(struct dso
*dso
,
1564 const char *root_dir
)
1566 char filename
[PATH_MAX
];
1568 * kernel module short names are of the form "[module]" and
1569 * we need just "module" here.
1571 const char *name
= dso__short_name(dso
) + 1;
1573 snprintf(filename
, sizeof(filename
),
1574 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1575 root_dir
, (int)strlen(name
) - 1, name
);
1577 if (sysfs__read_build_id(filename
, dso__bid(dso
)) == 0)
1578 dso__set_has_build_id(dso
);
1583 static size_t dso__fprintf_buildid(struct dso
*dso
, FILE *fp
)
1585 char sbuild_id
[SBUILD_ID_SIZE
];
1587 build_id__sprintf(dso__bid(dso
), sbuild_id
);
1588 return fprintf(fp
, "%s", sbuild_id
);
1591 size_t dso__fprintf(struct dso
*dso
, FILE *fp
)
1594 size_t ret
= fprintf(fp
, "dso: %s (", dso__short_name(dso
));
1596 if (dso__short_name(dso
) != dso__long_name(dso
))
1597 ret
+= fprintf(fp
, "%s, ", dso__long_name(dso
));
1598 ret
+= fprintf(fp
, "%sloaded, ", dso__loaded(dso
) ? "" : "NOT ");
1599 ret
+= dso__fprintf_buildid(dso
, fp
);
1600 ret
+= fprintf(fp
, ")\n");
1601 for (nd
= rb_first_cached(dso__symbols(dso
)); nd
; nd
= rb_next(nd
)) {
1602 struct symbol
*pos
= rb_entry(nd
, struct symbol
, rb_node
);
1603 ret
+= symbol__fprintf(pos
, fp
);
1609 enum dso_type
dso__type(struct dso
*dso
, struct machine
*machine
)
1612 enum dso_type type
= DSO__TYPE_UNKNOWN
;
1614 fd
= dso__data_get_fd(dso
, machine
);
1616 type
= dso__type_fd(fd
);
1617 dso__data_put_fd(dso
);
1623 int dso__strerror_load(struct dso
*dso
, char *buf
, size_t buflen
)
1625 int idx
, errnum
= *dso__load_errno(dso
);
1627 * This must have a same ordering as the enum dso_load_errno.
1629 static const char *dso_load__error_str
[] = {
1630 "Internal tools/perf/ library error",
1632 "Can not read build id",
1633 "Mismatching build id",
1634 "Decompression failure",
1637 BUG_ON(buflen
== 0);
1640 const char *err
= str_error_r(errnum
, buf
, buflen
);
1643 scnprintf(buf
, buflen
, "%s", err
);
1648 if (errnum
< __DSO_LOAD_ERRNO__START
|| errnum
>= __DSO_LOAD_ERRNO__END
)
1651 idx
= errnum
- __DSO_LOAD_ERRNO__START
;
1652 scnprintf(buf
, buflen
, "%s", dso_load__error_str
[idx
]);
1656 bool perf_pid_map_tid(const char *dso_name
, int *tid
)
1658 return sscanf(dso_name
, "/tmp/perf-%d.map", tid
) == 1;
1661 bool is_perf_pid_map_name(const char *dso_name
)
1665 return perf_pid_map_tid(dso_name
, &tid
);