1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
11 #include <sys/utsname.h>
12 #include <sys/param.h>
14 #include <linux/kernel.h>
15 #include <linux/err.h>
16 #include <linux/btf.h>
21 #include "libbpf_internal.h"
24 /* make sure libbpf doesn't use kernel-only integer typedefs */
25 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
27 #define BTF_MAX_NR_TYPES 0x7fffffffU
28 #define BTF_MAX_STR_OFFSET 0x7fffffffU
30 static struct btf_type btf_void
;
34 struct btf_header
*hdr
;
37 struct btf_type
**types
;
46 static inline __u64
ptr_to_u64(const void *ptr
)
48 return (__u64
) (unsigned long) ptr
;
51 static int btf_add_type(struct btf
*btf
, struct btf_type
*t
)
53 if (btf
->types_size
- btf
->nr_types
< 2) {
54 struct btf_type
**new_types
;
55 __u32 expand_by
, new_size
;
57 if (btf
->types_size
== BTF_MAX_NR_TYPES
)
60 expand_by
= max(btf
->types_size
>> 2, 16U);
61 new_size
= min(BTF_MAX_NR_TYPES
, btf
->types_size
+ expand_by
);
63 new_types
= realloc(btf
->types
, sizeof(*new_types
) * new_size
);
67 if (btf
->nr_types
== 0)
68 new_types
[0] = &btf_void
;
70 btf
->types
= new_types
;
71 btf
->types_size
= new_size
;
74 btf
->types
[++(btf
->nr_types
)] = t
;
79 static int btf_parse_hdr(struct btf
*btf
)
81 const struct btf_header
*hdr
= btf
->hdr
;
84 if (btf
->data_size
< sizeof(struct btf_header
)) {
85 pr_debug("BTF header not found\n");
89 if (hdr
->magic
!= BTF_MAGIC
) {
90 pr_debug("Invalid BTF magic:%x\n", hdr
->magic
);
94 if (hdr
->version
!= BTF_VERSION
) {
95 pr_debug("Unsupported BTF version:%u\n", hdr
->version
);
100 pr_debug("Unsupported BTF flags:%x\n", hdr
->flags
);
104 meta_left
= btf
->data_size
- sizeof(*hdr
);
106 pr_debug("BTF has no data\n");
110 if (meta_left
< hdr
->type_off
) {
111 pr_debug("Invalid BTF type section offset:%u\n", hdr
->type_off
);
115 if (meta_left
< hdr
->str_off
) {
116 pr_debug("Invalid BTF string section offset:%u\n", hdr
->str_off
);
120 if (hdr
->type_off
>= hdr
->str_off
) {
121 pr_debug("BTF type section offset >= string section offset. No type?\n");
125 if (hdr
->type_off
& 0x02) {
126 pr_debug("BTF type section is not aligned to 4 bytes\n");
130 btf
->nohdr_data
= btf
->hdr
+ 1;
135 static int btf_parse_str_sec(struct btf
*btf
)
137 const struct btf_header
*hdr
= btf
->hdr
;
138 const char *start
= btf
->nohdr_data
+ hdr
->str_off
;
139 const char *end
= start
+ btf
->hdr
->str_len
;
141 if (!hdr
->str_len
|| hdr
->str_len
- 1 > BTF_MAX_STR_OFFSET
||
142 start
[0] || end
[-1]) {
143 pr_debug("Invalid BTF string section\n");
147 btf
->strings
= start
;
152 static int btf_type_size(struct btf_type
*t
)
154 int base_size
= sizeof(struct btf_type
);
155 __u16 vlen
= btf_vlen(t
);
157 switch (btf_kind(t
)) {
160 case BTF_KIND_VOLATILE
:
161 case BTF_KIND_RESTRICT
:
163 case BTF_KIND_TYPEDEF
:
167 return base_size
+ sizeof(__u32
);
169 return base_size
+ vlen
* sizeof(struct btf_enum
);
171 return base_size
+ sizeof(struct btf_array
);
172 case BTF_KIND_STRUCT
:
174 return base_size
+ vlen
* sizeof(struct btf_member
);
175 case BTF_KIND_FUNC_PROTO
:
176 return base_size
+ vlen
* sizeof(struct btf_param
);
178 return base_size
+ sizeof(struct btf_var
);
179 case BTF_KIND_DATASEC
:
180 return base_size
+ vlen
* sizeof(struct btf_var_secinfo
);
182 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t
));
187 static int btf_parse_type_sec(struct btf
*btf
)
189 struct btf_header
*hdr
= btf
->hdr
;
190 void *nohdr_data
= btf
->nohdr_data
;
191 void *next_type
= nohdr_data
+ hdr
->type_off
;
192 void *end_type
= nohdr_data
+ hdr
->str_off
;
194 while (next_type
< end_type
) {
195 struct btf_type
*t
= next_type
;
199 type_size
= btf_type_size(t
);
202 next_type
+= type_size
;
203 err
= btf_add_type(btf
, t
);
211 __u32
btf__get_nr_types(const struct btf
*btf
)
213 return btf
->nr_types
;
216 const struct btf_type
*btf__type_by_id(const struct btf
*btf
, __u32 type_id
)
218 if (type_id
> btf
->nr_types
)
221 return btf
->types
[type_id
];
224 static bool btf_type_is_void(const struct btf_type
*t
)
226 return t
== &btf_void
|| btf_is_fwd(t
);
229 static bool btf_type_is_void_or_null(const struct btf_type
*t
)
231 return !t
|| btf_type_is_void(t
);
234 #define MAX_RESOLVE_DEPTH 32
236 __s64
btf__resolve_size(const struct btf
*btf
, __u32 type_id
)
238 const struct btf_array
*array
;
239 const struct btf_type
*t
;
244 t
= btf__type_by_id(btf
, type_id
);
245 for (i
= 0; i
< MAX_RESOLVE_DEPTH
&& !btf_type_is_void_or_null(t
);
247 switch (btf_kind(t
)) {
249 case BTF_KIND_STRUCT
:
252 case BTF_KIND_DATASEC
:
256 size
= sizeof(void *);
258 case BTF_KIND_TYPEDEF
:
259 case BTF_KIND_VOLATILE
:
261 case BTF_KIND_RESTRICT
:
266 array
= btf_array(t
);
267 if (nelems
&& array
->nelems
> UINT32_MAX
/ nelems
)
269 nelems
*= array
->nelems
;
270 type_id
= array
->type
;
276 t
= btf__type_by_id(btf
, type_id
);
282 if (nelems
&& size
> UINT32_MAX
/ nelems
)
285 return nelems
* size
;
288 int btf__align_of(const struct btf
*btf
, __u32 id
)
290 const struct btf_type
*t
= btf__type_by_id(btf
, id
);
291 __u16 kind
= btf_kind(t
);
296 return min(sizeof(void *), (size_t)t
->size
);
298 return sizeof(void *);
299 case BTF_KIND_TYPEDEF
:
300 case BTF_KIND_VOLATILE
:
302 case BTF_KIND_RESTRICT
:
303 return btf__align_of(btf
, t
->type
);
305 return btf__align_of(btf
, btf_array(t
)->type
);
306 case BTF_KIND_STRUCT
:
307 case BTF_KIND_UNION
: {
308 const struct btf_member
*m
= btf_members(t
);
309 __u16 vlen
= btf_vlen(t
);
310 int i
, max_align
= 1, align
;
312 for (i
= 0; i
< vlen
; i
++, m
++) {
313 align
= btf__align_of(btf
, m
->type
);
316 max_align
= max(max_align
, align
);
322 pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t
));
327 int btf__resolve_type(const struct btf
*btf
, __u32 type_id
)
329 const struct btf_type
*t
;
332 t
= btf__type_by_id(btf
, type_id
);
333 while (depth
< MAX_RESOLVE_DEPTH
&&
334 !btf_type_is_void_or_null(t
) &&
335 (btf_is_mod(t
) || btf_is_typedef(t
) || btf_is_var(t
))) {
337 t
= btf__type_by_id(btf
, type_id
);
341 if (depth
== MAX_RESOLVE_DEPTH
|| btf_type_is_void_or_null(t
))
347 __s32
btf__find_by_name(const struct btf
*btf
, const char *type_name
)
351 if (!strcmp(type_name
, "void"))
354 for (i
= 1; i
<= btf
->nr_types
; i
++) {
355 const struct btf_type
*t
= btf
->types
[i
];
356 const char *name
= btf__name_by_offset(btf
, t
->name_off
);
358 if (name
&& !strcmp(type_name
, name
))
365 __s32
btf__find_by_name_kind(const struct btf
*btf
, const char *type_name
,
370 if (kind
== BTF_KIND_UNKN
|| !strcmp(type_name
, "void"))
373 for (i
= 1; i
<= btf
->nr_types
; i
++) {
374 const struct btf_type
*t
= btf
->types
[i
];
377 if (btf_kind(t
) != kind
)
379 name
= btf__name_by_offset(btf
, t
->name_off
);
380 if (name
&& !strcmp(type_name
, name
))
387 void btf__free(struct btf
*btf
)
400 struct btf
*btf__new(__u8
*data
, __u32 size
)
405 btf
= calloc(1, sizeof(struct btf
));
407 return ERR_PTR(-ENOMEM
);
411 btf
->data
= malloc(size
);
417 memcpy(btf
->data
, data
, size
);
418 btf
->data_size
= size
;
420 err
= btf_parse_hdr(btf
);
424 err
= btf_parse_str_sec(btf
);
428 err
= btf_parse_type_sec(btf
);
439 static bool btf_check_endianness(const GElf_Ehdr
*ehdr
)
441 #if __BYTE_ORDER == __LITTLE_ENDIAN
442 return ehdr
->e_ident
[EI_DATA
] == ELFDATA2LSB
;
443 #elif __BYTE_ORDER == __BIG_ENDIAN
444 return ehdr
->e_ident
[EI_DATA
] == ELFDATA2MSB
;
446 # error "Unrecognized __BYTE_ORDER__"
450 struct btf
*btf__parse_elf(const char *path
, struct btf_ext
**btf_ext
)
452 Elf_Data
*btf_data
= NULL
, *btf_ext_data
= NULL
;
453 int err
= 0, fd
= -1, idx
= 0;
454 struct btf
*btf
= NULL
;
459 if (elf_version(EV_CURRENT
) == EV_NONE
) {
460 pr_warn("failed to init libelf for %s\n", path
);
461 return ERR_PTR(-LIBBPF_ERRNO__LIBELF
);
464 fd
= open(path
, O_RDONLY
);
467 pr_warn("failed to open %s: %s\n", path
, strerror(errno
));
471 err
= -LIBBPF_ERRNO__FORMAT
;
473 elf
= elf_begin(fd
, ELF_C_READ
, NULL
);
475 pr_warn("failed to open %s as ELF file\n", path
);
478 if (!gelf_getehdr(elf
, &ehdr
)) {
479 pr_warn("failed to get EHDR from %s\n", path
);
482 if (!btf_check_endianness(&ehdr
)) {
483 pr_warn("non-native ELF endianness is not supported\n");
486 if (!elf_rawdata(elf_getscn(elf
, ehdr
.e_shstrndx
), NULL
)) {
487 pr_warn("failed to get e_shstrndx from %s\n", path
);
491 while ((scn
= elf_nextscn(elf
, scn
)) != NULL
) {
496 if (gelf_getshdr(scn
, &sh
) != &sh
) {
497 pr_warn("failed to get section(%d) header from %s\n",
501 name
= elf_strptr(elf
, ehdr
.e_shstrndx
, sh
.sh_name
);
503 pr_warn("failed to get section(%d) name from %s\n",
507 if (strcmp(name
, BTF_ELF_SEC
) == 0) {
508 btf_data
= elf_getdata(scn
, 0);
510 pr_warn("failed to get section(%d, %s) data from %s\n",
515 } else if (btf_ext
&& strcmp(name
, BTF_EXT_ELF_SEC
) == 0) {
516 btf_ext_data
= elf_getdata(scn
, 0);
518 pr_warn("failed to get section(%d, %s) data from %s\n",
532 btf
= btf__new(btf_data
->d_buf
, btf_data
->d_size
);
536 if (btf_ext
&& btf_ext_data
) {
537 *btf_ext
= btf_ext__new(btf_ext_data
->d_buf
,
538 btf_ext_data
->d_size
);
539 if (IS_ERR(*btf_ext
))
541 } else if (btf_ext
) {
552 * btf is always parsed before btf_ext, so no need to clean up
553 * btf_ext, if btf loading failed
557 if (btf_ext
&& IS_ERR(*btf_ext
)) {
559 err
= PTR_ERR(*btf_ext
);
565 static int compare_vsi_off(const void *_a
, const void *_b
)
567 const struct btf_var_secinfo
*a
= _a
;
568 const struct btf_var_secinfo
*b
= _b
;
570 return a
->offset
- b
->offset
;
573 static int btf_fixup_datasec(struct bpf_object
*obj
, struct btf
*btf
,
576 __u32 size
= 0, off
= 0, i
, vars
= btf_vlen(t
);
577 const char *name
= btf__name_by_offset(btf
, t
->name_off
);
578 const struct btf_type
*t_var
;
579 struct btf_var_secinfo
*vsi
;
580 const struct btf_var
*var
;
584 pr_debug("No name found in string section for DATASEC kind.\n");
588 /* .extern datasec size and var offsets were set correctly during
589 * extern collection step, so just skip straight to sorting variables
594 ret
= bpf_object__section_size(obj
, name
, &size
);
595 if (ret
|| !size
|| (t
->size
&& t
->size
!= size
)) {
596 pr_debug("Invalid size for section %s: %u bytes\n", name
, size
);
602 for (i
= 0, vsi
= btf_var_secinfos(t
); i
< vars
; i
++, vsi
++) {
603 t_var
= btf__type_by_id(btf
, vsi
->type
);
604 var
= btf_var(t_var
);
606 if (!btf_is_var(t_var
)) {
607 pr_debug("Non-VAR type seen in section %s\n", name
);
611 if (var
->linkage
== BTF_VAR_STATIC
)
614 name
= btf__name_by_offset(btf
, t_var
->name_off
);
616 pr_debug("No name found in string section for VAR kind\n");
620 ret
= bpf_object__variable_offset(obj
, name
, &off
);
622 pr_debug("No offset found in symbol table for VAR %s\n",
631 qsort(btf_var_secinfos(t
), vars
, sizeof(*vsi
), compare_vsi_off
);
635 int btf__finalize_data(struct bpf_object
*obj
, struct btf
*btf
)
640 for (i
= 1; i
<= btf
->nr_types
; i
++) {
641 struct btf_type
*t
= btf
->types
[i
];
643 /* Loader needs to fix up some of the things compiler
644 * couldn't get its hands on while emitting BTF. This
645 * is section size and global variable offset. We use
646 * the info from the ELF itself for this purpose.
648 if (btf_is_datasec(t
)) {
649 err
= btf_fixup_datasec(obj
, btf
, t
);
658 int btf__load(struct btf
*btf
)
660 __u32 log_buf_size
= BPF_LOG_BUF_SIZE
;
661 char *log_buf
= NULL
;
667 log_buf
= malloc(log_buf_size
);
673 btf
->fd
= bpf_load_btf(btf
->data
, btf
->data_size
,
674 log_buf
, log_buf_size
, false);
677 pr_warn("Error loading BTF: %s(%d)\n", strerror(errno
), errno
);
679 pr_warn("%s\n", log_buf
);
688 int btf__fd(const struct btf
*btf
)
693 const void *btf__get_raw_data(const struct btf
*btf
, __u32
*size
)
695 *size
= btf
->data_size
;
699 const char *btf__name_by_offset(const struct btf
*btf
, __u32 offset
)
701 if (offset
< btf
->hdr
->str_len
)
702 return &btf
->strings
[offset
];
707 int btf__get_from_id(__u32 id
, struct btf
**btf
)
709 struct bpf_btf_info btf_info
= { 0 };
710 __u32 len
= sizeof(btf_info
);
718 btf_fd
= bpf_btf_get_fd_by_id(id
);
722 /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
723 * let's start with a sane default - 4KiB here - and resize it only if
724 * bpf_obj_get_info_by_fd() needs a bigger buffer.
726 btf_info
.btf_size
= 4096;
727 last_size
= btf_info
.btf_size
;
728 ptr
= malloc(last_size
);
734 memset(ptr
, 0, last_size
);
735 btf_info
.btf
= ptr_to_u64(ptr
);
736 err
= bpf_obj_get_info_by_fd(btf_fd
, &btf_info
, &len
);
738 if (!err
&& btf_info
.btf_size
> last_size
) {
741 last_size
= btf_info
.btf_size
;
742 temp_ptr
= realloc(ptr
, last_size
);
748 memset(ptr
, 0, last_size
);
749 btf_info
.btf
= ptr_to_u64(ptr
);
750 err
= bpf_obj_get_info_by_fd(btf_fd
, &btf_info
, &len
);
753 if (err
|| btf_info
.btf_size
> last_size
) {
758 *btf
= btf__new((__u8
*)(long)btf_info
.btf
, btf_info
.btf_size
);
771 int btf__get_map_kv_tids(const struct btf
*btf
, const char *map_name
,
772 __u32 expected_key_size
, __u32 expected_value_size
,
773 __u32
*key_type_id
, __u32
*value_type_id
)
775 const struct btf_type
*container_type
;
776 const struct btf_member
*key
, *value
;
777 const size_t max_name
= 256;
778 char container_name
[max_name
];
779 __s64 key_size
, value_size
;
782 if (snprintf(container_name
, max_name
, "____btf_map_%s", map_name
) ==
784 pr_warn("map:%s length of '____btf_map_%s' is too long\n",
789 container_id
= btf__find_by_name(btf
, container_name
);
790 if (container_id
< 0) {
791 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
792 map_name
, container_name
);
796 container_type
= btf__type_by_id(btf
, container_id
);
797 if (!container_type
) {
798 pr_warn("map:%s cannot find BTF type for container_id:%u\n",
799 map_name
, container_id
);
803 if (!btf_is_struct(container_type
) || btf_vlen(container_type
) < 2) {
804 pr_warn("map:%s container_name:%s is an invalid container struct\n",
805 map_name
, container_name
);
809 key
= btf_members(container_type
);
812 key_size
= btf__resolve_size(btf
, key
->type
);
814 pr_warn("map:%s invalid BTF key_type_size\n", map_name
);
818 if (expected_key_size
!= key_size
) {
819 pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
820 map_name
, (__u32
)key_size
, expected_key_size
);
824 value_size
= btf__resolve_size(btf
, value
->type
);
825 if (value_size
< 0) {
826 pr_warn("map:%s invalid BTF value_type_size\n", map_name
);
830 if (expected_value_size
!= value_size
) {
831 pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
832 map_name
, (__u32
)value_size
, expected_value_size
);
836 *key_type_id
= key
->type
;
837 *value_type_id
= value
->type
;
842 struct btf_ext_sec_setup_param
{
846 struct btf_ext_info
*ext_info
;
850 static int btf_ext_setup_info(struct btf_ext
*btf_ext
,
851 struct btf_ext_sec_setup_param
*ext_sec
)
853 const struct btf_ext_info_sec
*sinfo
;
854 struct btf_ext_info
*ext_info
;
855 __u32 info_left
, record_size
;
856 /* The start of the info sec (including the __u32 record_size). */
859 if (ext_sec
->len
== 0)
862 if (ext_sec
->off
& 0x03) {
863 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
868 info
= btf_ext
->data
+ btf_ext
->hdr
->hdr_len
+ ext_sec
->off
;
869 info_left
= ext_sec
->len
;
871 if (btf_ext
->data
+ btf_ext
->data_size
< info
+ ext_sec
->len
) {
872 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
873 ext_sec
->desc
, ext_sec
->off
, ext_sec
->len
);
877 /* At least a record size */
878 if (info_left
< sizeof(__u32
)) {
879 pr_debug(".BTF.ext %s record size not found\n", ext_sec
->desc
);
883 /* The record size needs to meet the minimum standard */
884 record_size
= *(__u32
*)info
;
885 if (record_size
< ext_sec
->min_rec_size
||
886 record_size
& 0x03) {
887 pr_debug("%s section in .BTF.ext has invalid record size %u\n",
888 ext_sec
->desc
, record_size
);
892 sinfo
= info
+ sizeof(__u32
);
893 info_left
-= sizeof(__u32
);
895 /* If no records, return failure now so .BTF.ext won't be used. */
897 pr_debug("%s section in .BTF.ext has no records", ext_sec
->desc
);
902 unsigned int sec_hdrlen
= sizeof(struct btf_ext_info_sec
);
903 __u64 total_record_size
;
906 if (info_left
< sec_hdrlen
) {
907 pr_debug("%s section header is not found in .BTF.ext\n",
912 num_records
= sinfo
->num_info
;
913 if (num_records
== 0) {
914 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
919 total_record_size
= sec_hdrlen
+
920 (__u64
)num_records
* record_size
;
921 if (info_left
< total_record_size
) {
922 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
927 info_left
-= total_record_size
;
928 sinfo
= (void *)sinfo
+ total_record_size
;
931 ext_info
= ext_sec
->ext_info
;
932 ext_info
->len
= ext_sec
->len
- sizeof(__u32
);
933 ext_info
->rec_size
= record_size
;
934 ext_info
->info
= info
+ sizeof(__u32
);
939 static int btf_ext_setup_func_info(struct btf_ext
*btf_ext
)
941 struct btf_ext_sec_setup_param param
= {
942 .off
= btf_ext
->hdr
->func_info_off
,
943 .len
= btf_ext
->hdr
->func_info_len
,
944 .min_rec_size
= sizeof(struct bpf_func_info_min
),
945 .ext_info
= &btf_ext
->func_info
,
949 return btf_ext_setup_info(btf_ext
, ¶m
);
952 static int btf_ext_setup_line_info(struct btf_ext
*btf_ext
)
954 struct btf_ext_sec_setup_param param
= {
955 .off
= btf_ext
->hdr
->line_info_off
,
956 .len
= btf_ext
->hdr
->line_info_len
,
957 .min_rec_size
= sizeof(struct bpf_line_info_min
),
958 .ext_info
= &btf_ext
->line_info
,
962 return btf_ext_setup_info(btf_ext
, ¶m
);
965 static int btf_ext_setup_field_reloc(struct btf_ext
*btf_ext
)
967 struct btf_ext_sec_setup_param param
= {
968 .off
= btf_ext
->hdr
->field_reloc_off
,
969 .len
= btf_ext
->hdr
->field_reloc_len
,
970 .min_rec_size
= sizeof(struct bpf_field_reloc
),
971 .ext_info
= &btf_ext
->field_reloc_info
,
972 .desc
= "field_reloc",
975 return btf_ext_setup_info(btf_ext
, ¶m
);
978 static int btf_ext_parse_hdr(__u8
*data
, __u32 data_size
)
980 const struct btf_ext_header
*hdr
= (struct btf_ext_header
*)data
;
982 if (data_size
< offsetofend(struct btf_ext_header
, hdr_len
) ||
983 data_size
< hdr
->hdr_len
) {
984 pr_debug("BTF.ext header not found");
988 if (hdr
->magic
!= BTF_MAGIC
) {
989 pr_debug("Invalid BTF.ext magic:%x\n", hdr
->magic
);
993 if (hdr
->version
!= BTF_VERSION
) {
994 pr_debug("Unsupported BTF.ext version:%u\n", hdr
->version
);
999 pr_debug("Unsupported BTF.ext flags:%x\n", hdr
->flags
);
1003 if (data_size
== hdr
->hdr_len
) {
1004 pr_debug("BTF.ext has no data\n");
1011 void btf_ext__free(struct btf_ext
*btf_ext
)
1015 free(btf_ext
->data
);
1019 struct btf_ext
*btf_ext__new(__u8
*data
, __u32 size
)
1021 struct btf_ext
*btf_ext
;
1024 err
= btf_ext_parse_hdr(data
, size
);
1026 return ERR_PTR(err
);
1028 btf_ext
= calloc(1, sizeof(struct btf_ext
));
1030 return ERR_PTR(-ENOMEM
);
1032 btf_ext
->data_size
= size
;
1033 btf_ext
->data
= malloc(size
);
1034 if (!btf_ext
->data
) {
1038 memcpy(btf_ext
->data
, data
, size
);
1040 if (btf_ext
->hdr
->hdr_len
<
1041 offsetofend(struct btf_ext_header
, line_info_len
))
1043 err
= btf_ext_setup_func_info(btf_ext
);
1047 err
= btf_ext_setup_line_info(btf_ext
);
1051 if (btf_ext
->hdr
->hdr_len
<
1052 offsetofend(struct btf_ext_header
, field_reloc_len
))
1054 err
= btf_ext_setup_field_reloc(btf_ext
);
1060 btf_ext__free(btf_ext
);
1061 return ERR_PTR(err
);
1067 const void *btf_ext__get_raw_data(const struct btf_ext
*btf_ext
, __u32
*size
)
1069 *size
= btf_ext
->data_size
;
1070 return btf_ext
->data
;
1073 static int btf_ext_reloc_info(const struct btf
*btf
,
1074 const struct btf_ext_info
*ext_info
,
1075 const char *sec_name
, __u32 insns_cnt
,
1076 void **info
, __u32
*cnt
)
1078 __u32 sec_hdrlen
= sizeof(struct btf_ext_info_sec
);
1079 __u32 i
, record_size
, existing_len
, records_len
;
1080 struct btf_ext_info_sec
*sinfo
;
1081 const char *info_sec_name
;
1085 record_size
= ext_info
->rec_size
;
1086 sinfo
= ext_info
->info
;
1087 remain_len
= ext_info
->len
;
1088 while (remain_len
> 0) {
1089 records_len
= sinfo
->num_info
* record_size
;
1090 info_sec_name
= btf__name_by_offset(btf
, sinfo
->sec_name_off
);
1091 if (strcmp(info_sec_name
, sec_name
)) {
1092 remain_len
-= sec_hdrlen
+ records_len
;
1093 sinfo
= (void *)sinfo
+ sec_hdrlen
+ records_len
;
1097 existing_len
= (*cnt
) * record_size
;
1098 data
= realloc(*info
, existing_len
+ records_len
);
1102 memcpy(data
+ existing_len
, sinfo
->data
, records_len
);
1103 /* adjust insn_off only, the rest data will be passed
1106 for (i
= 0; i
< sinfo
->num_info
; i
++) {
1109 insn_off
= data
+ existing_len
+ (i
* record_size
);
1110 *insn_off
= *insn_off
/ sizeof(struct bpf_insn
) +
1114 *cnt
+= sinfo
->num_info
;
1121 int btf_ext__reloc_func_info(const struct btf
*btf
,
1122 const struct btf_ext
*btf_ext
,
1123 const char *sec_name
, __u32 insns_cnt
,
1124 void **func_info
, __u32
*cnt
)
1126 return btf_ext_reloc_info(btf
, &btf_ext
->func_info
, sec_name
,
1127 insns_cnt
, func_info
, cnt
);
1130 int btf_ext__reloc_line_info(const struct btf
*btf
,
1131 const struct btf_ext
*btf_ext
,
1132 const char *sec_name
, __u32 insns_cnt
,
1133 void **line_info
, __u32
*cnt
)
1135 return btf_ext_reloc_info(btf
, &btf_ext
->line_info
, sec_name
,
1136 insns_cnt
, line_info
, cnt
);
1139 __u32
btf_ext__func_info_rec_size(const struct btf_ext
*btf_ext
)
1141 return btf_ext
->func_info
.rec_size
;
1144 __u32
btf_ext__line_info_rec_size(const struct btf_ext
*btf_ext
)
1146 return btf_ext
->line_info
.rec_size
;
1151 static struct btf_dedup
*btf_dedup_new(struct btf
*btf
, struct btf_ext
*btf_ext
,
1152 const struct btf_dedup_opts
*opts
);
1153 static void btf_dedup_free(struct btf_dedup
*d
);
1154 static int btf_dedup_strings(struct btf_dedup
*d
);
1155 static int btf_dedup_prim_types(struct btf_dedup
*d
);
1156 static int btf_dedup_struct_types(struct btf_dedup
*d
);
1157 static int btf_dedup_ref_types(struct btf_dedup
*d
);
1158 static int btf_dedup_compact_types(struct btf_dedup
*d
);
1159 static int btf_dedup_remap_types(struct btf_dedup
*d
);
1162 * Deduplicate BTF types and strings.
1164 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
1165 * section with all BTF type descriptors and string data. It overwrites that
1166 * memory in-place with deduplicated types and strings without any loss of
1167 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
1168 * is provided, all the strings referenced from .BTF.ext section are honored
1169 * and updated to point to the right offsets after deduplication.
1171 * If function returns with error, type/string data might be garbled and should
1174 * More verbose and detailed description of both problem btf_dedup is solving,
1175 * as well as solution could be found at:
1176 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
1178 * Problem description and justification
1179 * =====================================
1181 * BTF type information is typically emitted either as a result of conversion
1182 * from DWARF to BTF or directly by compiler. In both cases, each compilation
1183 * unit contains information about a subset of all the types that are used
1184 * in an application. These subsets are frequently overlapping and contain a lot
1185 * of duplicated information when later concatenated together into a single
1186 * binary. This algorithm ensures that each unique type is represented by single
1187 * BTF type descriptor, greatly reducing resulting size of BTF data.
1189 * Compilation unit isolation and subsequent duplication of data is not the only
1190 * problem. The same type hierarchy (e.g., struct and all the type that struct
1191 * references) in different compilation units can be represented in BTF to
1192 * various degrees of completeness (or, rather, incompleteness) due to
1193 * struct/union forward declarations.
1195 * Let's take a look at an example, that we'll use to better understand the
1196 * problem (and solution). Suppose we have two compilation units, each using
1197 * same `struct S`, but each of them having incomplete type information about
1226 * In case of CU #1, BTF data will know only that `struct B` exist (but no
1227 * more), but will know the complete type information about `struct A`. While
1228 * for CU #2, it will know full type information about `struct B`, but will
1229 * only know about forward declaration of `struct A` (in BTF terms, it will
1230 * have `BTF_KIND_FWD` type descriptor with name `B`).
1232 * This compilation unit isolation means that it's possible that there is no
1233 * single CU with complete type information describing structs `S`, `A`, and
1234 * `B`. Also, we might get tons of duplicated and redundant type information.
1236 * Additional complication we need to keep in mind comes from the fact that
1237 * types, in general, can form graphs containing cycles, not just DAGs.
1239 * While algorithm does deduplication, it also merges and resolves type
1240 * information (unless disabled throught `struct btf_opts`), whenever possible.
1241 * E.g., in the example above with two compilation units having partial type
1242 * information for structs `A` and `B`, the output of algorithm will emit
1243 * a single copy of each BTF type that describes structs `A`, `B`, and `S`
1244 * (as well as type information for `int` and pointers), as if they were defined
1245 * in a single compilation unit as:
1265 * Algorithm completes its work in 6 separate passes:
1267 * 1. Strings deduplication.
1268 * 2. Primitive types deduplication (int, enum, fwd).
1269 * 3. Struct/union types deduplication.
1270 * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
1271 * protos, and const/volatile/restrict modifiers).
1272 * 5. Types compaction.
1273 * 6. Types remapping.
1275 * Algorithm determines canonical type descriptor, which is a single
1276 * representative type for each truly unique type. This canonical type is the
1277 * one that will go into final deduplicated BTF type information. For
1278 * struct/unions, it is also the type that algorithm will merge additional type
1279 * information into (while resolving FWDs), as it discovers it from data in
1280 * other CUs. Each input BTF type eventually gets either mapped to itself, if
1281 * that type is canonical, or to some other type, if that type is equivalent
1282 * and was chosen as canonical representative. This mapping is stored in
1283 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
1284 * FWD type got resolved to.
1286 * To facilitate fast discovery of canonical types, we also maintain canonical
1287 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
1288 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
1289 * that match that signature. With sufficiently good choice of type signature
1290 * hashing function, we can limit number of canonical types for each unique type
1291 * signature to a very small number, allowing to find canonical type for any
1292 * duplicated type very quickly.
1294 * Struct/union deduplication is the most critical part and algorithm for
1295 * deduplicating structs/unions is described in greater details in comments for
1296 * `btf_dedup_is_equiv` function.
1298 int btf__dedup(struct btf
*btf
, struct btf_ext
*btf_ext
,
1299 const struct btf_dedup_opts
*opts
)
1301 struct btf_dedup
*d
= btf_dedup_new(btf
, btf_ext
, opts
);
1305 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d
));
1309 err
= btf_dedup_strings(d
);
1311 pr_debug("btf_dedup_strings failed:%d\n", err
);
1314 err
= btf_dedup_prim_types(d
);
1316 pr_debug("btf_dedup_prim_types failed:%d\n", err
);
1319 err
= btf_dedup_struct_types(d
);
1321 pr_debug("btf_dedup_struct_types failed:%d\n", err
);
1324 err
= btf_dedup_ref_types(d
);
1326 pr_debug("btf_dedup_ref_types failed:%d\n", err
);
1329 err
= btf_dedup_compact_types(d
);
1331 pr_debug("btf_dedup_compact_types failed:%d\n", err
);
1334 err
= btf_dedup_remap_types(d
);
1336 pr_debug("btf_dedup_remap_types failed:%d\n", err
);
1345 #define BTF_UNPROCESSED_ID ((__u32)-1)
1346 #define BTF_IN_PROGRESS_ID ((__u32)-2)
1349 /* .BTF section to be deduped in-place */
1352 * Optional .BTF.ext section. When provided, any strings referenced
1353 * from it will be taken into account when deduping strings
1355 struct btf_ext
*btf_ext
;
1357 * This is a map from any type's signature hash to a list of possible
1358 * canonical representative type candidates. Hash collisions are
1359 * ignored, so even types of various kinds can share same list of
1360 * candidates, which is fine because we rely on subsequent
1361 * btf_xxx_equal() checks to authoritatively verify type equality.
1363 struct hashmap
*dedup_table
;
1364 /* Canonical types map */
1366 /* Hypothetical mapping, used during type graph equivalence checks */
1371 /* Various option modifying behavior of algorithm */
1372 struct btf_dedup_opts opts
;
1375 struct btf_str_ptr
{
1381 struct btf_str_ptrs
{
1382 struct btf_str_ptr
*ptrs
;
1388 static long hash_combine(long h
, long value
)
1390 return h
* 31 + value
;
1393 #define for_each_dedup_cand(d, node, hash) \
1394 hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
1396 static int btf_dedup_table_add(struct btf_dedup
*d
, long hash
, __u32 type_id
)
1398 return hashmap__append(d
->dedup_table
,
1399 (void *)hash
, (void *)(long)type_id
);
1402 static int btf_dedup_hypot_map_add(struct btf_dedup
*d
,
1403 __u32 from_id
, __u32 to_id
)
1405 if (d
->hypot_cnt
== d
->hypot_cap
) {
1408 d
->hypot_cap
+= max((size_t)16, d
->hypot_cap
/ 2);
1409 new_list
= realloc(d
->hypot_list
, sizeof(__u32
) * d
->hypot_cap
);
1412 d
->hypot_list
= new_list
;
1414 d
->hypot_list
[d
->hypot_cnt
++] = from_id
;
1415 d
->hypot_map
[from_id
] = to_id
;
1419 static void btf_dedup_clear_hypot_map(struct btf_dedup
*d
)
1423 for (i
= 0; i
< d
->hypot_cnt
; i
++)
1424 d
->hypot_map
[d
->hypot_list
[i
]] = BTF_UNPROCESSED_ID
;
1428 static void btf_dedup_free(struct btf_dedup
*d
)
1430 hashmap__free(d
->dedup_table
);
1431 d
->dedup_table
= NULL
;
1437 d
->hypot_map
= NULL
;
1439 free(d
->hypot_list
);
1440 d
->hypot_list
= NULL
;
1445 static size_t btf_dedup_identity_hash_fn(const void *key
, void *ctx
)
1450 static size_t btf_dedup_collision_hash_fn(const void *key
, void *ctx
)
1455 static bool btf_dedup_equal_fn(const void *k1
, const void *k2
, void *ctx
)
1460 static struct btf_dedup
*btf_dedup_new(struct btf
*btf
, struct btf_ext
*btf_ext
,
1461 const struct btf_dedup_opts
*opts
)
1463 struct btf_dedup
*d
= calloc(1, sizeof(struct btf_dedup
));
1464 hashmap_hash_fn hash_fn
= btf_dedup_identity_hash_fn
;
1468 return ERR_PTR(-ENOMEM
);
1470 d
->opts
.dont_resolve_fwds
= opts
&& opts
->dont_resolve_fwds
;
1471 /* dedup_table_size is now used only to force collisions in tests */
1472 if (opts
&& opts
->dedup_table_size
== 1)
1473 hash_fn
= btf_dedup_collision_hash_fn
;
1476 d
->btf_ext
= btf_ext
;
1478 d
->dedup_table
= hashmap__new(hash_fn
, btf_dedup_equal_fn
, NULL
);
1479 if (IS_ERR(d
->dedup_table
)) {
1480 err
= PTR_ERR(d
->dedup_table
);
1481 d
->dedup_table
= NULL
;
1485 d
->map
= malloc(sizeof(__u32
) * (1 + btf
->nr_types
));
1490 /* special BTF "void" type is made canonical immediately */
1492 for (i
= 1; i
<= btf
->nr_types
; i
++) {
1493 struct btf_type
*t
= d
->btf
->types
[i
];
1495 /* VAR and DATASEC are never deduped and are self-canonical */
1496 if (btf_is_var(t
) || btf_is_datasec(t
))
1499 d
->map
[i
] = BTF_UNPROCESSED_ID
;
1502 d
->hypot_map
= malloc(sizeof(__u32
) * (1 + btf
->nr_types
));
1503 if (!d
->hypot_map
) {
1507 for (i
= 0; i
<= btf
->nr_types
; i
++)
1508 d
->hypot_map
[i
] = BTF_UNPROCESSED_ID
;
1513 return ERR_PTR(err
);
1519 typedef int (*str_off_fn_t
)(__u32
*str_off_ptr
, void *ctx
);
1522 * Iterate over all possible places in .BTF and .BTF.ext that can reference
1523 * string and pass pointer to it to a provided callback `fn`.
1525 static int btf_for_each_str_off(struct btf_dedup
*d
, str_off_fn_t fn
, void *ctx
)
1527 void *line_data_cur
, *line_data_end
;
1528 int i
, j
, r
, rec_size
;
1531 for (i
= 1; i
<= d
->btf
->nr_types
; i
++) {
1532 t
= d
->btf
->types
[i
];
1533 r
= fn(&t
->name_off
, ctx
);
1537 switch (btf_kind(t
)) {
1538 case BTF_KIND_STRUCT
:
1539 case BTF_KIND_UNION
: {
1540 struct btf_member
*m
= btf_members(t
);
1541 __u16 vlen
= btf_vlen(t
);
1543 for (j
= 0; j
< vlen
; j
++) {
1544 r
= fn(&m
->name_off
, ctx
);
1551 case BTF_KIND_ENUM
: {
1552 struct btf_enum
*m
= btf_enum(t
);
1553 __u16 vlen
= btf_vlen(t
);
1555 for (j
= 0; j
< vlen
; j
++) {
1556 r
= fn(&m
->name_off
, ctx
);
1563 case BTF_KIND_FUNC_PROTO
: {
1564 struct btf_param
*m
= btf_params(t
);
1565 __u16 vlen
= btf_vlen(t
);
1567 for (j
= 0; j
< vlen
; j
++) {
1568 r
= fn(&m
->name_off
, ctx
);
1583 line_data_cur
= d
->btf_ext
->line_info
.info
;
1584 line_data_end
= d
->btf_ext
->line_info
.info
+ d
->btf_ext
->line_info
.len
;
1585 rec_size
= d
->btf_ext
->line_info
.rec_size
;
1587 while (line_data_cur
< line_data_end
) {
1588 struct btf_ext_info_sec
*sec
= line_data_cur
;
1589 struct bpf_line_info_min
*line_info
;
1590 __u32 num_info
= sec
->num_info
;
1592 r
= fn(&sec
->sec_name_off
, ctx
);
1596 line_data_cur
+= sizeof(struct btf_ext_info_sec
);
1597 for (i
= 0; i
< num_info
; i
++) {
1598 line_info
= line_data_cur
;
1599 r
= fn(&line_info
->file_name_off
, ctx
);
1602 r
= fn(&line_info
->line_off
, ctx
);
1605 line_data_cur
+= rec_size
;
1612 static int str_sort_by_content(const void *a1
, const void *a2
)
1614 const struct btf_str_ptr
*p1
= a1
;
1615 const struct btf_str_ptr
*p2
= a2
;
1617 return strcmp(p1
->str
, p2
->str
);
1620 static int str_sort_by_offset(const void *a1
, const void *a2
)
1622 const struct btf_str_ptr
*p1
= a1
;
1623 const struct btf_str_ptr
*p2
= a2
;
1625 if (p1
->str
!= p2
->str
)
1626 return p1
->str
< p2
->str
? -1 : 1;
1630 static int btf_dedup_str_ptr_cmp(const void *str_ptr
, const void *pelem
)
1632 const struct btf_str_ptr
*p
= pelem
;
1634 if (str_ptr
!= p
->str
)
1635 return (const char *)str_ptr
< p
->str
? -1 : 1;
1639 static int btf_str_mark_as_used(__u32
*str_off_ptr
, void *ctx
)
1641 struct btf_str_ptrs
*strs
;
1642 struct btf_str_ptr
*s
;
1644 if (*str_off_ptr
== 0)
1648 s
= bsearch(strs
->data
+ *str_off_ptr
, strs
->ptrs
, strs
->cnt
,
1649 sizeof(struct btf_str_ptr
), btf_dedup_str_ptr_cmp
);
1656 static int btf_str_remap_offset(__u32
*str_off_ptr
, void *ctx
)
1658 struct btf_str_ptrs
*strs
;
1659 struct btf_str_ptr
*s
;
1661 if (*str_off_ptr
== 0)
1665 s
= bsearch(strs
->data
+ *str_off_ptr
, strs
->ptrs
, strs
->cnt
,
1666 sizeof(struct btf_str_ptr
), btf_dedup_str_ptr_cmp
);
1669 *str_off_ptr
= s
->new_off
;
1674 * Dedup string and filter out those that are not referenced from either .BTF
1675 * or .BTF.ext (if provided) sections.
1677 * This is done by building index of all strings in BTF's string section,
1678 * then iterating over all entities that can reference strings (e.g., type
1679 * names, struct field names, .BTF.ext line info, etc) and marking corresponding
1680 * strings as used. After that all used strings are deduped and compacted into
1681 * sequential blob of memory and new offsets are calculated. Then all the string
1682 * references are iterated again and rewritten using new offsets.
1684 static int btf_dedup_strings(struct btf_dedup
*d
)
1686 const struct btf_header
*hdr
= d
->btf
->hdr
;
1687 char *start
= (char *)d
->btf
->nohdr_data
+ hdr
->str_off
;
1688 char *end
= start
+ d
->btf
->hdr
->str_len
;
1689 char *p
= start
, *tmp_strs
= NULL
;
1690 struct btf_str_ptrs strs
= {
1696 int i
, j
, err
= 0, grp_idx
;
1699 /* build index of all strings */
1701 if (strs
.cnt
+ 1 > strs
.cap
) {
1702 struct btf_str_ptr
*new_ptrs
;
1704 strs
.cap
+= max(strs
.cnt
/ 2, 16U);
1705 new_ptrs
= realloc(strs
.ptrs
,
1706 sizeof(strs
.ptrs
[0]) * strs
.cap
);
1711 strs
.ptrs
= new_ptrs
;
1714 strs
.ptrs
[strs
.cnt
].str
= p
;
1715 strs
.ptrs
[strs
.cnt
].used
= false;
1721 /* temporary storage for deduplicated strings */
1722 tmp_strs
= malloc(d
->btf
->hdr
->str_len
);
1728 /* mark all used strings */
1729 strs
.ptrs
[0].used
= true;
1730 err
= btf_for_each_str_off(d
, btf_str_mark_as_used
, &strs
);
1734 /* sort strings by context, so that we can identify duplicates */
1735 qsort(strs
.ptrs
, strs
.cnt
, sizeof(strs
.ptrs
[0]), str_sort_by_content
);
1738 * iterate groups of equal strings and if any instance in a group was
1739 * referenced, emit single instance and remember new offset
1743 grp_used
= strs
.ptrs
[0].used
;
1744 /* iterate past end to avoid code duplication after loop */
1745 for (i
= 1; i
<= strs
.cnt
; i
++) {
1747 * when i == strs.cnt, we want to skip string comparison and go
1748 * straight to handling last group of strings (otherwise we'd
1749 * need to handle last group after the loop w/ duplicated code)
1752 !strcmp(strs
.ptrs
[i
].str
, strs
.ptrs
[grp_idx
].str
)) {
1753 grp_used
= grp_used
|| strs
.ptrs
[i
].used
;
1758 * this check would have been required after the loop to handle
1759 * last group of strings, but due to <= condition in a loop
1760 * we avoid that duplication
1763 int new_off
= p
- tmp_strs
;
1764 __u32 len
= strlen(strs
.ptrs
[grp_idx
].str
);
1766 memmove(p
, strs
.ptrs
[grp_idx
].str
, len
+ 1);
1767 for (j
= grp_idx
; j
< i
; j
++)
1768 strs
.ptrs
[j
].new_off
= new_off
;
1774 grp_used
= strs
.ptrs
[i
].used
;
1778 /* replace original strings with deduped ones */
1779 d
->btf
->hdr
->str_len
= p
- tmp_strs
;
1780 memmove(start
, tmp_strs
, d
->btf
->hdr
->str_len
);
1781 end
= start
+ d
->btf
->hdr
->str_len
;
1783 /* restore original order for further binary search lookups */
1784 qsort(strs
.ptrs
, strs
.cnt
, sizeof(strs
.ptrs
[0]), str_sort_by_offset
);
1786 /* remap string offsets */
1787 err
= btf_for_each_str_off(d
, btf_str_remap_offset
, &strs
);
1791 d
->btf
->hdr
->str_len
= end
- start
;
1799 static long btf_hash_common(struct btf_type
*t
)
1803 h
= hash_combine(0, t
->name_off
);
1804 h
= hash_combine(h
, t
->info
);
1805 h
= hash_combine(h
, t
->size
);
1809 static bool btf_equal_common(struct btf_type
*t1
, struct btf_type
*t2
)
1811 return t1
->name_off
== t2
->name_off
&&
1812 t1
->info
== t2
->info
&&
1813 t1
->size
== t2
->size
;
1816 /* Calculate type signature hash of INT. */
1817 static long btf_hash_int(struct btf_type
*t
)
1819 __u32 info
= *(__u32
*)(t
+ 1);
1822 h
= btf_hash_common(t
);
1823 h
= hash_combine(h
, info
);
1827 /* Check structural equality of two INTs. */
1828 static bool btf_equal_int(struct btf_type
*t1
, struct btf_type
*t2
)
1832 if (!btf_equal_common(t1
, t2
))
1834 info1
= *(__u32
*)(t1
+ 1);
1835 info2
= *(__u32
*)(t2
+ 1);
1836 return info1
== info2
;
1839 /* Calculate type signature hash of ENUM. */
1840 static long btf_hash_enum(struct btf_type
*t
)
1844 /* don't hash vlen and enum members to support enum fwd resolving */
1845 h
= hash_combine(0, t
->name_off
);
1846 h
= hash_combine(h
, t
->info
& ~0xffff);
1847 h
= hash_combine(h
, t
->size
);
1851 /* Check structural equality of two ENUMs. */
1852 static bool btf_equal_enum(struct btf_type
*t1
, struct btf_type
*t2
)
1854 const struct btf_enum
*m1
, *m2
;
1858 if (!btf_equal_common(t1
, t2
))
1861 vlen
= btf_vlen(t1
);
1864 for (i
= 0; i
< vlen
; i
++) {
1865 if (m1
->name_off
!= m2
->name_off
|| m1
->val
!= m2
->val
)
1873 static inline bool btf_is_enum_fwd(struct btf_type
*t
)
1875 return btf_is_enum(t
) && btf_vlen(t
) == 0;
1878 static bool btf_compat_enum(struct btf_type
*t1
, struct btf_type
*t2
)
1880 if (!btf_is_enum_fwd(t1
) && !btf_is_enum_fwd(t2
))
1881 return btf_equal_enum(t1
, t2
);
1882 /* ignore vlen when comparing */
1883 return t1
->name_off
== t2
->name_off
&&
1884 (t1
->info
& ~0xffff) == (t2
->info
& ~0xffff) &&
1885 t1
->size
== t2
->size
;
1889 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
1890 * as referenced type IDs equivalence is established separately during type
1891 * graph equivalence check algorithm.
1893 static long btf_hash_struct(struct btf_type
*t
)
1895 const struct btf_member
*member
= btf_members(t
);
1896 __u32 vlen
= btf_vlen(t
);
1897 long h
= btf_hash_common(t
);
1900 for (i
= 0; i
< vlen
; i
++) {
1901 h
= hash_combine(h
, member
->name_off
);
1902 h
= hash_combine(h
, member
->offset
);
1903 /* no hashing of referenced type ID, it can be unresolved yet */
1910 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1911 * IDs. This check is performed during type graph equivalence check and
1912 * referenced types equivalence is checked separately.
1914 static bool btf_shallow_equal_struct(struct btf_type
*t1
, struct btf_type
*t2
)
1916 const struct btf_member
*m1
, *m2
;
1920 if (!btf_equal_common(t1
, t2
))
1923 vlen
= btf_vlen(t1
);
1924 m1
= btf_members(t1
);
1925 m2
= btf_members(t2
);
1926 for (i
= 0; i
< vlen
; i
++) {
1927 if (m1
->name_off
!= m2
->name_off
|| m1
->offset
!= m2
->offset
)
1936 * Calculate type signature hash of ARRAY, including referenced type IDs,
1937 * under assumption that they were already resolved to canonical type IDs and
1938 * are not going to change.
1940 static long btf_hash_array(struct btf_type
*t
)
1942 const struct btf_array
*info
= btf_array(t
);
1943 long h
= btf_hash_common(t
);
1945 h
= hash_combine(h
, info
->type
);
1946 h
= hash_combine(h
, info
->index_type
);
1947 h
= hash_combine(h
, info
->nelems
);
1952 * Check exact equality of two ARRAYs, taking into account referenced
1953 * type IDs, under assumption that they were already resolved to canonical
1954 * type IDs and are not going to change.
1955 * This function is called during reference types deduplication to compare
1956 * ARRAY to potential canonical representative.
1958 static bool btf_equal_array(struct btf_type
*t1
, struct btf_type
*t2
)
1960 const struct btf_array
*info1
, *info2
;
1962 if (!btf_equal_common(t1
, t2
))
1965 info1
= btf_array(t1
);
1966 info2
= btf_array(t2
);
1967 return info1
->type
== info2
->type
&&
1968 info1
->index_type
== info2
->index_type
&&
1969 info1
->nelems
== info2
->nelems
;
1973 * Check structural compatibility of two ARRAYs, ignoring referenced type
1974 * IDs. This check is performed during type graph equivalence check and
1975 * referenced types equivalence is checked separately.
1977 static bool btf_compat_array(struct btf_type
*t1
, struct btf_type
*t2
)
1979 if (!btf_equal_common(t1
, t2
))
1982 return btf_array(t1
)->nelems
== btf_array(t2
)->nelems
;
1986 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
1987 * under assumption that they were already resolved to canonical type IDs and
1988 * are not going to change.
1990 static long btf_hash_fnproto(struct btf_type
*t
)
1992 const struct btf_param
*member
= btf_params(t
);
1993 __u16 vlen
= btf_vlen(t
);
1994 long h
= btf_hash_common(t
);
1997 for (i
= 0; i
< vlen
; i
++) {
1998 h
= hash_combine(h
, member
->name_off
);
1999 h
= hash_combine(h
, member
->type
);
2006 * Check exact equality of two FUNC_PROTOs, taking into account referenced
2007 * type IDs, under assumption that they were already resolved to canonical
2008 * type IDs and are not going to change.
2009 * This function is called during reference types deduplication to compare
2010 * FUNC_PROTO to potential canonical representative.
2012 static bool btf_equal_fnproto(struct btf_type
*t1
, struct btf_type
*t2
)
2014 const struct btf_param
*m1
, *m2
;
2018 if (!btf_equal_common(t1
, t2
))
2021 vlen
= btf_vlen(t1
);
2022 m1
= btf_params(t1
);
2023 m2
= btf_params(t2
);
2024 for (i
= 0; i
< vlen
; i
++) {
2025 if (m1
->name_off
!= m2
->name_off
|| m1
->type
!= m2
->type
)
2034 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
2035 * IDs. This check is performed during type graph equivalence check and
2036 * referenced types equivalence is checked separately.
2038 static bool btf_compat_fnproto(struct btf_type
*t1
, struct btf_type
*t2
)
2040 const struct btf_param
*m1
, *m2
;
2044 /* skip return type ID */
2045 if (t1
->name_off
!= t2
->name_off
|| t1
->info
!= t2
->info
)
2048 vlen
= btf_vlen(t1
);
2049 m1
= btf_params(t1
);
2050 m2
= btf_params(t2
);
2051 for (i
= 0; i
< vlen
; i
++) {
2052 if (m1
->name_off
!= m2
->name_off
)
2061 * Deduplicate primitive types, that can't reference other types, by calculating
2062 * their type signature hash and comparing them with any possible canonical
2063 * candidate. If no canonical candidate matches, type itself is marked as
2064 * canonical and is added into `btf_dedup->dedup_table` as another candidate.
2066 static int btf_dedup_prim_type(struct btf_dedup
*d
, __u32 type_id
)
2068 struct btf_type
*t
= d
->btf
->types
[type_id
];
2069 struct hashmap_entry
*hash_entry
;
2070 struct btf_type
*cand
;
2071 /* if we don't find equivalent type, then we are canonical */
2072 __u32 new_id
= type_id
;
2076 switch (btf_kind(t
)) {
2077 case BTF_KIND_CONST
:
2078 case BTF_KIND_VOLATILE
:
2079 case BTF_KIND_RESTRICT
:
2081 case BTF_KIND_TYPEDEF
:
2082 case BTF_KIND_ARRAY
:
2083 case BTF_KIND_STRUCT
:
2084 case BTF_KIND_UNION
:
2086 case BTF_KIND_FUNC_PROTO
:
2088 case BTF_KIND_DATASEC
:
2092 h
= btf_hash_int(t
);
2093 for_each_dedup_cand(d
, hash_entry
, h
) {
2094 cand_id
= (__u32
)(long)hash_entry
->value
;
2095 cand
= d
->btf
->types
[cand_id
];
2096 if (btf_equal_int(t
, cand
)) {
2104 h
= btf_hash_enum(t
);
2105 for_each_dedup_cand(d
, hash_entry
, h
) {
2106 cand_id
= (__u32
)(long)hash_entry
->value
;
2107 cand
= d
->btf
->types
[cand_id
];
2108 if (btf_equal_enum(t
, cand
)) {
2112 if (d
->opts
.dont_resolve_fwds
)
2114 if (btf_compat_enum(t
, cand
)) {
2115 if (btf_is_enum_fwd(t
)) {
2116 /* resolve fwd to full enum */
2120 /* resolve canonical enum fwd to full enum */
2121 d
->map
[cand_id
] = type_id
;
2127 h
= btf_hash_common(t
);
2128 for_each_dedup_cand(d
, hash_entry
, h
) {
2129 cand_id
= (__u32
)(long)hash_entry
->value
;
2130 cand
= d
->btf
->types
[cand_id
];
2131 if (btf_equal_common(t
, cand
)) {
2142 d
->map
[type_id
] = new_id
;
2143 if (type_id
== new_id
&& btf_dedup_table_add(d
, h
, type_id
))
2149 static int btf_dedup_prim_types(struct btf_dedup
*d
)
2153 for (i
= 1; i
<= d
->btf
->nr_types
; i
++) {
2154 err
= btf_dedup_prim_type(d
, i
);
2162 * Check whether type is already mapped into canonical one (could be to itself).
2164 static inline bool is_type_mapped(struct btf_dedup
*d
, uint32_t type_id
)
2166 return d
->map
[type_id
] <= BTF_MAX_NR_TYPES
;
2170 * Resolve type ID into its canonical type ID, if any; otherwise return original
2171 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
2172 * STRUCT/UNION link and resolve it into canonical type ID as well.
2174 static inline __u32
resolve_type_id(struct btf_dedup
*d
, __u32 type_id
)
2176 while (is_type_mapped(d
, type_id
) && d
->map
[type_id
] != type_id
)
2177 type_id
= d
->map
[type_id
];
2182 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
2185 static uint32_t resolve_fwd_id(struct btf_dedup
*d
, uint32_t type_id
)
2187 __u32 orig_type_id
= type_id
;
2189 if (!btf_is_fwd(d
->btf
->types
[type_id
]))
2192 while (is_type_mapped(d
, type_id
) && d
->map
[type_id
] != type_id
)
2193 type_id
= d
->map
[type_id
];
2195 if (!btf_is_fwd(d
->btf
->types
[type_id
]))
2198 return orig_type_id
;
2202 static inline __u16
btf_fwd_kind(struct btf_type
*t
)
2204 return btf_kflag(t
) ? BTF_KIND_UNION
: BTF_KIND_STRUCT
;
2208 * Check equivalence of BTF type graph formed by candidate struct/union (we'll
2209 * call it "candidate graph" in this description for brevity) to a type graph
2210 * formed by (potential) canonical struct/union ("canonical graph" for brevity
2211 * here, though keep in mind that not all types in canonical graph are
2212 * necessarily canonical representatives themselves, some of them might be
2213 * duplicates or its uniqueness might not have been established yet).
2215 * - >0, if type graphs are equivalent;
2216 * - 0, if not equivalent;
2219 * Algorithm performs side-by-side DFS traversal of both type graphs and checks
2220 * equivalence of BTF types at each step. If at any point BTF types in candidate
2221 * and canonical graphs are not compatible structurally, whole graphs are
2222 * incompatible. If types are structurally equivalent (i.e., all information
2223 * except referenced type IDs is exactly the same), a mapping from `canon_id` to
2224 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
2225 * If a type references other types, then those referenced types are checked
2226 * for equivalence recursively.
2228 * During DFS traversal, if we find that for current `canon_id` type we
2229 * already have some mapping in hypothetical map, we check for two possible
2231 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will
2232 * happen when type graphs have cycles. In this case we assume those two
2233 * types are equivalent.
2234 * - `canon_id` is mapped to different type. This is contradiction in our
2235 * hypothetical mapping, because same graph in canonical graph corresponds
2236 * to two different types in candidate graph, which for equivalent type
2237 * graphs shouldn't happen. This condition terminates equivalence check
2238 * with negative result.
2240 * If type graphs traversal exhausts types to check and find no contradiction,
2241 * then type graphs are equivalent.
2243 * When checking types for equivalence, there is one special case: FWD types.
2244 * If FWD type resolution is allowed and one of the types (either from canonical
2245 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
2246 * flag) and their names match, hypothetical mapping is updated to point from
2247 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
2248 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
2250 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
2251 * if there are two exactly named (or anonymous) structs/unions that are
2252 * compatible structurally, one of which has FWD field, while other is concrete
2253 * STRUCT/UNION, but according to C sources they are different structs/unions
2254 * that are referencing different types with the same name. This is extremely
2255 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
2256 * this logic is causing problems.
2258 * Doing FWD resolution means that both candidate and/or canonical graphs can
2259 * consists of portions of the graph that come from multiple compilation units.
2260 * This is due to the fact that types within single compilation unit are always
2261 * deduplicated and FWDs are already resolved, if referenced struct/union
2262 * definiton is available. So, if we had unresolved FWD and found corresponding
2263 * STRUCT/UNION, they will be from different compilation units. This
2264 * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
2265 * type graph will likely have at least two different BTF types that describe
2266 * same type (e.g., most probably there will be two different BTF types for the
2267 * same 'int' primitive type) and could even have "overlapping" parts of type
2268 * graph that describe same subset of types.
2270 * This in turn means that our assumption that each type in canonical graph
2271 * must correspond to exactly one type in candidate graph might not hold
2272 * anymore and will make it harder to detect contradictions using hypothetical
2273 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
2274 * resolution only in canonical graph. FWDs in candidate graphs are never
2275 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
2277 * - Both types in canonical and candidate graphs are FWDs. If they are
2278 * structurally equivalent, then they can either be both resolved to the
2279 * same STRUCT/UNION or not resolved at all. In both cases they are
2280 * equivalent and there is no need to resolve FWD on candidate side.
2281 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
2282 * so nothing to resolve as well, algorithm will check equivalence anyway.
2283 * - Type in canonical graph is FWD, while type in candidate is concrete
2284 * STRUCT/UNION. In this case candidate graph comes from single compilation
2285 * unit, so there is exactly one BTF type for each unique C type. After
2286 * resolving FWD into STRUCT/UNION, there might be more than one BTF type
2287 * in canonical graph mapping to single BTF type in candidate graph, but
2288 * because hypothetical mapping maps from canonical to candidate types, it's
2289 * alright, and we still maintain the property of having single `canon_id`
2290 * mapping to single `cand_id` (there could be two different `canon_id`
2291 * mapped to the same `cand_id`, but it's not contradictory).
2292 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
2293 * graph is FWD. In this case we are just going to check compatibility of
2294 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
2295 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
2296 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
2297 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
2300 static int btf_dedup_is_equiv(struct btf_dedup
*d
, __u32 cand_id
,
2303 struct btf_type
*cand_type
;
2304 struct btf_type
*canon_type
;
2305 __u32 hypot_type_id
;
2310 /* if both resolve to the same canonical, they must be equivalent */
2311 if (resolve_type_id(d
, cand_id
) == resolve_type_id(d
, canon_id
))
2314 canon_id
= resolve_fwd_id(d
, canon_id
);
2316 hypot_type_id
= d
->hypot_map
[canon_id
];
2317 if (hypot_type_id
<= BTF_MAX_NR_TYPES
)
2318 return hypot_type_id
== cand_id
;
2320 if (btf_dedup_hypot_map_add(d
, canon_id
, cand_id
))
2323 cand_type
= d
->btf
->types
[cand_id
];
2324 canon_type
= d
->btf
->types
[canon_id
];
2325 cand_kind
= btf_kind(cand_type
);
2326 canon_kind
= btf_kind(canon_type
);
2328 if (cand_type
->name_off
!= canon_type
->name_off
)
2331 /* FWD <--> STRUCT/UNION equivalence check, if enabled */
2332 if (!d
->opts
.dont_resolve_fwds
2333 && (cand_kind
== BTF_KIND_FWD
|| canon_kind
== BTF_KIND_FWD
)
2334 && cand_kind
!= canon_kind
) {
2338 if (cand_kind
== BTF_KIND_FWD
) {
2339 real_kind
= canon_kind
;
2340 fwd_kind
= btf_fwd_kind(cand_type
);
2342 real_kind
= cand_kind
;
2343 fwd_kind
= btf_fwd_kind(canon_type
);
2345 return fwd_kind
== real_kind
;
2348 if (cand_kind
!= canon_kind
)
2351 switch (cand_kind
) {
2353 return btf_equal_int(cand_type
, canon_type
);
2356 if (d
->opts
.dont_resolve_fwds
)
2357 return btf_equal_enum(cand_type
, canon_type
);
2359 return btf_compat_enum(cand_type
, canon_type
);
2362 return btf_equal_common(cand_type
, canon_type
);
2364 case BTF_KIND_CONST
:
2365 case BTF_KIND_VOLATILE
:
2366 case BTF_KIND_RESTRICT
:
2368 case BTF_KIND_TYPEDEF
:
2370 if (cand_type
->info
!= canon_type
->info
)
2372 return btf_dedup_is_equiv(d
, cand_type
->type
, canon_type
->type
);
2374 case BTF_KIND_ARRAY
: {
2375 const struct btf_array
*cand_arr
, *canon_arr
;
2377 if (!btf_compat_array(cand_type
, canon_type
))
2379 cand_arr
= btf_array(cand_type
);
2380 canon_arr
= btf_array(canon_type
);
2381 eq
= btf_dedup_is_equiv(d
,
2382 cand_arr
->index_type
, canon_arr
->index_type
);
2385 return btf_dedup_is_equiv(d
, cand_arr
->type
, canon_arr
->type
);
2388 case BTF_KIND_STRUCT
:
2389 case BTF_KIND_UNION
: {
2390 const struct btf_member
*cand_m
, *canon_m
;
2393 if (!btf_shallow_equal_struct(cand_type
, canon_type
))
2395 vlen
= btf_vlen(cand_type
);
2396 cand_m
= btf_members(cand_type
);
2397 canon_m
= btf_members(canon_type
);
2398 for (i
= 0; i
< vlen
; i
++) {
2399 eq
= btf_dedup_is_equiv(d
, cand_m
->type
, canon_m
->type
);
2409 case BTF_KIND_FUNC_PROTO
: {
2410 const struct btf_param
*cand_p
, *canon_p
;
2413 if (!btf_compat_fnproto(cand_type
, canon_type
))
2415 eq
= btf_dedup_is_equiv(d
, cand_type
->type
, canon_type
->type
);
2418 vlen
= btf_vlen(cand_type
);
2419 cand_p
= btf_params(cand_type
);
2420 canon_p
= btf_params(canon_type
);
2421 for (i
= 0; i
< vlen
; i
++) {
2422 eq
= btf_dedup_is_equiv(d
, cand_p
->type
, canon_p
->type
);
2438 * Use hypothetical mapping, produced by successful type graph equivalence
2439 * check, to augment existing struct/union canonical mapping, where possible.
2441 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
2442 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
2443 * it doesn't matter if FWD type was part of canonical graph or candidate one,
2444 * we are recording the mapping anyway. As opposed to carefulness required
2445 * for struct/union correspondence mapping (described below), for FWD resolution
2446 * it's not important, as by the time that FWD type (reference type) will be
2447 * deduplicated all structs/unions will be deduped already anyway.
2449 * Recording STRUCT/UNION mapping is purely a performance optimization and is
2450 * not required for correctness. It needs to be done carefully to ensure that
2451 * struct/union from candidate's type graph is not mapped into corresponding
2452 * struct/union from canonical type graph that itself hasn't been resolved into
2453 * canonical representative. The only guarantee we have is that canonical
2454 * struct/union was determined as canonical and that won't change. But any
2455 * types referenced through that struct/union fields could have been not yet
2456 * resolved, so in case like that it's too early to establish any kind of
2457 * correspondence between structs/unions.
2459 * No canonical correspondence is derived for primitive types (they are already
2460 * deduplicated completely already anyway) or reference types (they rely on
2461 * stability of struct/union canonical relationship for equivalence checks).
2463 static void btf_dedup_merge_hypot_map(struct btf_dedup
*d
)
2465 __u32 cand_type_id
, targ_type_id
;
2466 __u16 t_kind
, c_kind
;
2470 for (i
= 0; i
< d
->hypot_cnt
; i
++) {
2471 cand_type_id
= d
->hypot_list
[i
];
2472 targ_type_id
= d
->hypot_map
[cand_type_id
];
2473 t_id
= resolve_type_id(d
, targ_type_id
);
2474 c_id
= resolve_type_id(d
, cand_type_id
);
2475 t_kind
= btf_kind(d
->btf
->types
[t_id
]);
2476 c_kind
= btf_kind(d
->btf
->types
[c_id
]);
2478 * Resolve FWD into STRUCT/UNION.
2479 * It's ok to resolve FWD into STRUCT/UNION that's not yet
2480 * mapped to canonical representative (as opposed to
2481 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
2482 * eventually that struct is going to be mapped and all resolved
2483 * FWDs will automatically resolve to correct canonical
2484 * representative. This will happen before ref type deduping,
2485 * which critically depends on stability of these mapping. This
2486 * stability is not a requirement for STRUCT/UNION equivalence
2489 if (t_kind
!= BTF_KIND_FWD
&& c_kind
== BTF_KIND_FWD
)
2490 d
->map
[c_id
] = t_id
;
2491 else if (t_kind
== BTF_KIND_FWD
&& c_kind
!= BTF_KIND_FWD
)
2492 d
->map
[t_id
] = c_id
;
2494 if ((t_kind
== BTF_KIND_STRUCT
|| t_kind
== BTF_KIND_UNION
) &&
2495 c_kind
!= BTF_KIND_FWD
&&
2496 is_type_mapped(d
, c_id
) &&
2497 !is_type_mapped(d
, t_id
)) {
2499 * as a perf optimization, we can map struct/union
2500 * that's part of type graph we just verified for
2501 * equivalence. We can do that for struct/union that has
2502 * canonical representative only, though.
2504 d
->map
[t_id
] = c_id
;
2510 * Deduplicate struct/union types.
2512 * For each struct/union type its type signature hash is calculated, taking
2513 * into account type's name, size, number, order and names of fields, but
2514 * ignoring type ID's referenced from fields, because they might not be deduped
2515 * completely until after reference types deduplication phase. This type hash
2516 * is used to iterate over all potential canonical types, sharing same hash.
2517 * For each canonical candidate we check whether type graphs that they form
2518 * (through referenced types in fields and so on) are equivalent using algorithm
2519 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
2520 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
2521 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
2522 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
2523 * potentially map other structs/unions to their canonical representatives,
2524 * if such relationship hasn't yet been established. This speeds up algorithm
2525 * by eliminating some of the duplicate work.
2527 * If no matching canonical representative was found, struct/union is marked
2528 * as canonical for itself and is added into btf_dedup->dedup_table hash map
2529 * for further look ups.
2531 static int btf_dedup_struct_type(struct btf_dedup
*d
, __u32 type_id
)
2533 struct btf_type
*cand_type
, *t
;
2534 struct hashmap_entry
*hash_entry
;
2535 /* if we don't find equivalent type, then we are canonical */
2536 __u32 new_id
= type_id
;
2540 /* already deduped or is in process of deduping (loop detected) */
2541 if (d
->map
[type_id
] <= BTF_MAX_NR_TYPES
)
2544 t
= d
->btf
->types
[type_id
];
2547 if (kind
!= BTF_KIND_STRUCT
&& kind
!= BTF_KIND_UNION
)
2550 h
= btf_hash_struct(t
);
2551 for_each_dedup_cand(d
, hash_entry
, h
) {
2552 __u32 cand_id
= (__u32
)(long)hash_entry
->value
;
2556 * Even though btf_dedup_is_equiv() checks for
2557 * btf_shallow_equal_struct() internally when checking two
2558 * structs (unions) for equivalence, we need to guard here
2559 * from picking matching FWD type as a dedup candidate.
2560 * This can happen due to hash collision. In such case just
2561 * relying on btf_dedup_is_equiv() would lead to potentially
2562 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
2563 * FWD and compatible STRUCT/UNION are considered equivalent.
2565 cand_type
= d
->btf
->types
[cand_id
];
2566 if (!btf_shallow_equal_struct(t
, cand_type
))
2569 btf_dedup_clear_hypot_map(d
);
2570 eq
= btf_dedup_is_equiv(d
, type_id
, cand_id
);
2576 btf_dedup_merge_hypot_map(d
);
2580 d
->map
[type_id
] = new_id
;
2581 if (type_id
== new_id
&& btf_dedup_table_add(d
, h
, type_id
))
2587 static int btf_dedup_struct_types(struct btf_dedup
*d
)
2591 for (i
= 1; i
<= d
->btf
->nr_types
; i
++) {
2592 err
= btf_dedup_struct_type(d
, i
);
2600 * Deduplicate reference type.
2602 * Once all primitive and struct/union types got deduplicated, we can easily
2603 * deduplicate all other (reference) BTF types. This is done in two steps:
2605 * 1. Resolve all referenced type IDs into their canonical type IDs. This
2606 * resolution can be done either immediately for primitive or struct/union types
2607 * (because they were deduped in previous two phases) or recursively for
2608 * reference types. Recursion will always terminate at either primitive or
2609 * struct/union type, at which point we can "unwind" chain of reference types
2610 * one by one. There is no danger of encountering cycles because in C type
2611 * system the only way to form type cycle is through struct/union, so any chain
2612 * of reference types, even those taking part in a type cycle, will inevitably
2613 * reach struct/union at some point.
2615 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
2616 * becomes "stable", in the sense that no further deduplication will cause
2617 * any changes to it. With that, it's now possible to calculate type's signature
2618 * hash (this time taking into account referenced type IDs) and loop over all
2619 * potential canonical representatives. If no match was found, current type
2620 * will become canonical representative of itself and will be added into
2621 * btf_dedup->dedup_table as another possible canonical representative.
2623 static int btf_dedup_ref_type(struct btf_dedup
*d
, __u32 type_id
)
2625 struct hashmap_entry
*hash_entry
;
2626 __u32 new_id
= type_id
, cand_id
;
2627 struct btf_type
*t
, *cand
;
2628 /* if we don't find equivalent type, then we are representative type */
2632 if (d
->map
[type_id
] == BTF_IN_PROGRESS_ID
)
2634 if (d
->map
[type_id
] <= BTF_MAX_NR_TYPES
)
2635 return resolve_type_id(d
, type_id
);
2637 t
= d
->btf
->types
[type_id
];
2638 d
->map
[type_id
] = BTF_IN_PROGRESS_ID
;
2640 switch (btf_kind(t
)) {
2641 case BTF_KIND_CONST
:
2642 case BTF_KIND_VOLATILE
:
2643 case BTF_KIND_RESTRICT
:
2645 case BTF_KIND_TYPEDEF
:
2647 ref_type_id
= btf_dedup_ref_type(d
, t
->type
);
2648 if (ref_type_id
< 0)
2650 t
->type
= ref_type_id
;
2652 h
= btf_hash_common(t
);
2653 for_each_dedup_cand(d
, hash_entry
, h
) {
2654 cand_id
= (__u32
)(long)hash_entry
->value
;
2655 cand
= d
->btf
->types
[cand_id
];
2656 if (btf_equal_common(t
, cand
)) {
2663 case BTF_KIND_ARRAY
: {
2664 struct btf_array
*info
= btf_array(t
);
2666 ref_type_id
= btf_dedup_ref_type(d
, info
->type
);
2667 if (ref_type_id
< 0)
2669 info
->type
= ref_type_id
;
2671 ref_type_id
= btf_dedup_ref_type(d
, info
->index_type
);
2672 if (ref_type_id
< 0)
2674 info
->index_type
= ref_type_id
;
2676 h
= btf_hash_array(t
);
2677 for_each_dedup_cand(d
, hash_entry
, h
) {
2678 cand_id
= (__u32
)(long)hash_entry
->value
;
2679 cand
= d
->btf
->types
[cand_id
];
2680 if (btf_equal_array(t
, cand
)) {
2688 case BTF_KIND_FUNC_PROTO
: {
2689 struct btf_param
*param
;
2693 ref_type_id
= btf_dedup_ref_type(d
, t
->type
);
2694 if (ref_type_id
< 0)
2696 t
->type
= ref_type_id
;
2699 param
= btf_params(t
);
2700 for (i
= 0; i
< vlen
; i
++) {
2701 ref_type_id
= btf_dedup_ref_type(d
, param
->type
);
2702 if (ref_type_id
< 0)
2704 param
->type
= ref_type_id
;
2708 h
= btf_hash_fnproto(t
);
2709 for_each_dedup_cand(d
, hash_entry
, h
) {
2710 cand_id
= (__u32
)(long)hash_entry
->value
;
2711 cand
= d
->btf
->types
[cand_id
];
2712 if (btf_equal_fnproto(t
, cand
)) {
2724 d
->map
[type_id
] = new_id
;
2725 if (type_id
== new_id
&& btf_dedup_table_add(d
, h
, type_id
))
2731 static int btf_dedup_ref_types(struct btf_dedup
*d
)
2735 for (i
= 1; i
<= d
->btf
->nr_types
; i
++) {
2736 err
= btf_dedup_ref_type(d
, i
);
2740 /* we won't need d->dedup_table anymore */
2741 hashmap__free(d
->dedup_table
);
2742 d
->dedup_table
= NULL
;
2749 * After we established for each type its corresponding canonical representative
2750 * type, we now can eliminate types that are not canonical and leave only
2751 * canonical ones layed out sequentially in memory by copying them over
2752 * duplicates. During compaction btf_dedup->hypot_map array is reused to store
2753 * a map from original type ID to a new compacted type ID, which will be used
2754 * during next phase to "fix up" type IDs, referenced from struct/union and
2757 static int btf_dedup_compact_types(struct btf_dedup
*d
)
2759 struct btf_type
**new_types
;
2760 __u32 next_type_id
= 1;
2761 char *types_start
, *p
;
2764 /* we are going to reuse hypot_map to store compaction remapping */
2765 d
->hypot_map
[0] = 0;
2766 for (i
= 1; i
<= d
->btf
->nr_types
; i
++)
2767 d
->hypot_map
[i
] = BTF_UNPROCESSED_ID
;
2769 types_start
= d
->btf
->nohdr_data
+ d
->btf
->hdr
->type_off
;
2772 for (i
= 1; i
<= d
->btf
->nr_types
; i
++) {
2776 len
= btf_type_size(d
->btf
->types
[i
]);
2780 memmove(p
, d
->btf
->types
[i
], len
);
2781 d
->hypot_map
[i
] = next_type_id
;
2782 d
->btf
->types
[next_type_id
] = (struct btf_type
*)p
;
2787 /* shrink struct btf's internal types index and update btf_header */
2788 d
->btf
->nr_types
= next_type_id
- 1;
2789 d
->btf
->types_size
= d
->btf
->nr_types
;
2790 d
->btf
->hdr
->type_len
= p
- types_start
;
2791 new_types
= realloc(d
->btf
->types
,
2792 (1 + d
->btf
->nr_types
) * sizeof(struct btf_type
*));
2795 d
->btf
->types
= new_types
;
2797 /* make sure string section follows type information without gaps */
2798 d
->btf
->hdr
->str_off
= p
- (char *)d
->btf
->nohdr_data
;
2799 memmove(p
, d
->btf
->strings
, d
->btf
->hdr
->str_len
);
2800 d
->btf
->strings
= p
;
2801 p
+= d
->btf
->hdr
->str_len
;
2803 d
->btf
->data_size
= p
- (char *)d
->btf
->data
;
2808 * Figure out final (deduplicated and compacted) type ID for provided original
2809 * `type_id` by first resolving it into corresponding canonical type ID and
2810 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
2811 * which is populated during compaction phase.
2813 static int btf_dedup_remap_type_id(struct btf_dedup
*d
, __u32 type_id
)
2815 __u32 resolved_type_id
, new_type_id
;
2817 resolved_type_id
= resolve_type_id(d
, type_id
);
2818 new_type_id
= d
->hypot_map
[resolved_type_id
];
2819 if (new_type_id
> BTF_MAX_NR_TYPES
)
2825 * Remap referenced type IDs into deduped type IDs.
2827 * After BTF types are deduplicated and compacted, their final type IDs may
2828 * differ from original ones. The map from original to a corresponding
2829 * deduped type ID is stored in btf_dedup->hypot_map and is populated during
2830 * compaction phase. During remapping phase we are rewriting all type IDs
2831 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
2832 * their final deduped type IDs.
2834 static int btf_dedup_remap_type(struct btf_dedup
*d
, __u32 type_id
)
2836 struct btf_type
*t
= d
->btf
->types
[type_id
];
2839 switch (btf_kind(t
)) {
2845 case BTF_KIND_CONST
:
2846 case BTF_KIND_VOLATILE
:
2847 case BTF_KIND_RESTRICT
:
2849 case BTF_KIND_TYPEDEF
:
2852 r
= btf_dedup_remap_type_id(d
, t
->type
);
2858 case BTF_KIND_ARRAY
: {
2859 struct btf_array
*arr_info
= btf_array(t
);
2861 r
= btf_dedup_remap_type_id(d
, arr_info
->type
);
2865 r
= btf_dedup_remap_type_id(d
, arr_info
->index_type
);
2868 arr_info
->index_type
= r
;
2872 case BTF_KIND_STRUCT
:
2873 case BTF_KIND_UNION
: {
2874 struct btf_member
*member
= btf_members(t
);
2875 __u16 vlen
= btf_vlen(t
);
2877 for (i
= 0; i
< vlen
; i
++) {
2878 r
= btf_dedup_remap_type_id(d
, member
->type
);
2887 case BTF_KIND_FUNC_PROTO
: {
2888 struct btf_param
*param
= btf_params(t
);
2889 __u16 vlen
= btf_vlen(t
);
2891 r
= btf_dedup_remap_type_id(d
, t
->type
);
2896 for (i
= 0; i
< vlen
; i
++) {
2897 r
= btf_dedup_remap_type_id(d
, param
->type
);
2906 case BTF_KIND_DATASEC
: {
2907 struct btf_var_secinfo
*var
= btf_var_secinfos(t
);
2908 __u16 vlen
= btf_vlen(t
);
2910 for (i
= 0; i
< vlen
; i
++) {
2911 r
= btf_dedup_remap_type_id(d
, var
->type
);
2927 static int btf_dedup_remap_types(struct btf_dedup
*d
)
2931 for (i
= 1; i
<= d
->btf
->nr_types
; i
++) {
2932 r
= btf_dedup_remap_type(d
, i
);
2939 static struct btf
*btf_load_raw(const char *path
)
2947 if (stat(path
, &st
))
2948 return ERR_PTR(-errno
);
2950 data
= malloc(st
.st_size
);
2952 return ERR_PTR(-ENOMEM
);
2954 f
= fopen(path
, "rb");
2956 btf
= ERR_PTR(-errno
);
2960 read_cnt
= fread(data
, 1, st
.st_size
, f
);
2962 if (read_cnt
< st
.st_size
) {
2963 btf
= ERR_PTR(-EBADF
);
2967 btf
= btf__new(data
, read_cnt
);
2975 * Probe few well-known locations for vmlinux kernel image and try to load BTF
2976 * data out of it to use for target BTF.
2978 struct btf
*libbpf_find_kernel_btf(void)
2981 const char *path_fmt
;
2984 /* try canonical vmlinux BTF through sysfs first */
2985 { "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
2986 /* fall back to trying to find vmlinux ELF on disk otherwise */
2987 { "/boot/vmlinux-%1$s" },
2988 { "/lib/modules/%1$s/vmlinux-%1$s" },
2989 { "/lib/modules/%1$s/build/vmlinux" },
2990 { "/usr/lib/modules/%1$s/kernel/vmlinux" },
2991 { "/usr/lib/debug/boot/vmlinux-%1$s" },
2992 { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
2993 { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
2995 char path
[PATH_MAX
+ 1];
3002 for (i
= 0; i
< ARRAY_SIZE(locations
); i
++) {
3003 snprintf(path
, PATH_MAX
, locations
[i
].path_fmt
, buf
.release
);
3005 if (access(path
, R_OK
))
3008 if (locations
[i
].raw_btf
)
3009 btf
= btf_load_raw(path
);
3011 btf
= btf__parse_elf(path
, NULL
);
3013 pr_debug("loading kernel BTF '%s': %ld\n",
3014 path
, IS_ERR(btf
) ? PTR_ERR(btf
) : 0);
3021 pr_warn("failed to find valid kernel BTF\n");
3022 return ERR_PTR(-ESRCH
);