1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
11 #include <sys/utsname.h>
12 #include <sys/param.h>
14 #include <linux/kernel.h>
15 #include <linux/err.h>
16 #include <linux/btf.h>
21 #include "libbpf_internal.h"
24 /* make sure libbpf doesn't use kernel-only integer typedefs */
25 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
27 #define BTF_MAX_NR_TYPES 0x7fffffffU
28 #define BTF_MAX_STR_OFFSET 0x7fffffffU
30 static struct btf_type btf_void
;
34 struct btf_header
*hdr
;
37 struct btf_type
**types
;
46 static inline __u64
ptr_to_u64(const void *ptr
)
48 return (__u64
) (unsigned long) ptr
;
51 static int btf_add_type(struct btf
*btf
, struct btf_type
*t
)
53 if (btf
->types_size
- btf
->nr_types
< 2) {
54 struct btf_type
**new_types
;
55 __u32 expand_by
, new_size
;
57 if (btf
->types_size
== BTF_MAX_NR_TYPES
)
60 expand_by
= max(btf
->types_size
>> 2, 16U);
61 new_size
= min(BTF_MAX_NR_TYPES
, btf
->types_size
+ expand_by
);
63 new_types
= realloc(btf
->types
, sizeof(*new_types
) * new_size
);
67 if (btf
->nr_types
== 0)
68 new_types
[0] = &btf_void
;
70 btf
->types
= new_types
;
71 btf
->types_size
= new_size
;
74 btf
->types
[++(btf
->nr_types
)] = t
;
79 static int btf_parse_hdr(struct btf
*btf
)
81 const struct btf_header
*hdr
= btf
->hdr
;
84 if (btf
->data_size
< sizeof(struct btf_header
)) {
85 pr_debug("BTF header not found\n");
89 if (hdr
->magic
!= BTF_MAGIC
) {
90 pr_debug("Invalid BTF magic:%x\n", hdr
->magic
);
94 if (hdr
->version
!= BTF_VERSION
) {
95 pr_debug("Unsupported BTF version:%u\n", hdr
->version
);
100 pr_debug("Unsupported BTF flags:%x\n", hdr
->flags
);
104 meta_left
= btf
->data_size
- sizeof(*hdr
);
106 pr_debug("BTF has no data\n");
110 if (meta_left
< hdr
->type_off
) {
111 pr_debug("Invalid BTF type section offset:%u\n", hdr
->type_off
);
115 if (meta_left
< hdr
->str_off
) {
116 pr_debug("Invalid BTF string section offset:%u\n", hdr
->str_off
);
120 if (hdr
->type_off
>= hdr
->str_off
) {
121 pr_debug("BTF type section offset >= string section offset. No type?\n");
125 if (hdr
->type_off
& 0x02) {
126 pr_debug("BTF type section is not aligned to 4 bytes\n");
130 btf
->nohdr_data
= btf
->hdr
+ 1;
135 static int btf_parse_str_sec(struct btf
*btf
)
137 const struct btf_header
*hdr
= btf
->hdr
;
138 const char *start
= btf
->nohdr_data
+ hdr
->str_off
;
139 const char *end
= start
+ btf
->hdr
->str_len
;
141 if (!hdr
->str_len
|| hdr
->str_len
- 1 > BTF_MAX_STR_OFFSET
||
142 start
[0] || end
[-1]) {
143 pr_debug("Invalid BTF string section\n");
147 btf
->strings
= start
;
152 static int btf_type_size(struct btf_type
*t
)
154 int base_size
= sizeof(struct btf_type
);
155 __u16 vlen
= btf_vlen(t
);
157 switch (btf_kind(t
)) {
160 case BTF_KIND_VOLATILE
:
161 case BTF_KIND_RESTRICT
:
163 case BTF_KIND_TYPEDEF
:
167 return base_size
+ sizeof(__u32
);
169 return base_size
+ vlen
* sizeof(struct btf_enum
);
171 return base_size
+ sizeof(struct btf_array
);
172 case BTF_KIND_STRUCT
:
174 return base_size
+ vlen
* sizeof(struct btf_member
);
175 case BTF_KIND_FUNC_PROTO
:
176 return base_size
+ vlen
* sizeof(struct btf_param
);
178 return base_size
+ sizeof(struct btf_var
);
179 case BTF_KIND_DATASEC
:
180 return base_size
+ vlen
* sizeof(struct btf_var_secinfo
);
182 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t
));
187 static int btf_parse_type_sec(struct btf
*btf
)
189 struct btf_header
*hdr
= btf
->hdr
;
190 void *nohdr_data
= btf
->nohdr_data
;
191 void *next_type
= nohdr_data
+ hdr
->type_off
;
192 void *end_type
= nohdr_data
+ hdr
->str_off
;
194 while (next_type
< end_type
) {
195 struct btf_type
*t
= next_type
;
199 type_size
= btf_type_size(t
);
202 next_type
+= type_size
;
203 err
= btf_add_type(btf
, t
);
211 __u32
btf__get_nr_types(const struct btf
*btf
)
213 return btf
->nr_types
;
216 const struct btf_type
*btf__type_by_id(const struct btf
*btf
, __u32 type_id
)
218 if (type_id
> btf
->nr_types
)
221 return btf
->types
[type_id
];
224 static bool btf_type_is_void(const struct btf_type
*t
)
226 return t
== &btf_void
|| btf_is_fwd(t
);
229 static bool btf_type_is_void_or_null(const struct btf_type
*t
)
231 return !t
|| btf_type_is_void(t
);
234 #define MAX_RESOLVE_DEPTH 32
236 __s64
btf__resolve_size(const struct btf
*btf
, __u32 type_id
)
238 const struct btf_array
*array
;
239 const struct btf_type
*t
;
244 t
= btf__type_by_id(btf
, type_id
);
245 for (i
= 0; i
< MAX_RESOLVE_DEPTH
&& !btf_type_is_void_or_null(t
);
247 switch (btf_kind(t
)) {
249 case BTF_KIND_STRUCT
:
252 case BTF_KIND_DATASEC
:
256 size
= sizeof(void *);
258 case BTF_KIND_TYPEDEF
:
259 case BTF_KIND_VOLATILE
:
261 case BTF_KIND_RESTRICT
:
266 array
= btf_array(t
);
267 if (nelems
&& array
->nelems
> UINT32_MAX
/ nelems
)
269 nelems
*= array
->nelems
;
270 type_id
= array
->type
;
276 t
= btf__type_by_id(btf
, type_id
);
282 if (nelems
&& size
> UINT32_MAX
/ nelems
)
285 return nelems
* size
;
288 int btf__align_of(const struct btf
*btf
, __u32 id
)
290 const struct btf_type
*t
= btf__type_by_id(btf
, id
);
291 __u16 kind
= btf_kind(t
);
296 return min(sizeof(void *), (size_t)t
->size
);
298 return sizeof(void *);
299 case BTF_KIND_TYPEDEF
:
300 case BTF_KIND_VOLATILE
:
302 case BTF_KIND_RESTRICT
:
303 return btf__align_of(btf
, t
->type
);
305 return btf__align_of(btf
, btf_array(t
)->type
);
306 case BTF_KIND_STRUCT
:
307 case BTF_KIND_UNION
: {
308 const struct btf_member
*m
= btf_members(t
);
309 __u16 vlen
= btf_vlen(t
);
310 int i
, max_align
= 1, align
;
312 for (i
= 0; i
< vlen
; i
++, m
++) {
313 align
= btf__align_of(btf
, m
->type
);
316 max_align
= max(max_align
, align
);
322 pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t
));
327 int btf__resolve_type(const struct btf
*btf
, __u32 type_id
)
329 const struct btf_type
*t
;
332 t
= btf__type_by_id(btf
, type_id
);
333 while (depth
< MAX_RESOLVE_DEPTH
&&
334 !btf_type_is_void_or_null(t
) &&
335 (btf_is_mod(t
) || btf_is_typedef(t
) || btf_is_var(t
))) {
337 t
= btf__type_by_id(btf
, type_id
);
341 if (depth
== MAX_RESOLVE_DEPTH
|| btf_type_is_void_or_null(t
))
347 __s32
btf__find_by_name(const struct btf
*btf
, const char *type_name
)
351 if (!strcmp(type_name
, "void"))
354 for (i
= 1; i
<= btf
->nr_types
; i
++) {
355 const struct btf_type
*t
= btf
->types
[i
];
356 const char *name
= btf__name_by_offset(btf
, t
->name_off
);
358 if (name
&& !strcmp(type_name
, name
))
365 __s32
btf__find_by_name_kind(const struct btf
*btf
, const char *type_name
,
370 if (kind
== BTF_KIND_UNKN
|| !strcmp(type_name
, "void"))
373 for (i
= 1; i
<= btf
->nr_types
; i
++) {
374 const struct btf_type
*t
= btf
->types
[i
];
377 if (btf_kind(t
) != kind
)
379 name
= btf__name_by_offset(btf
, t
->name_off
);
380 if (name
&& !strcmp(type_name
, name
))
387 void btf__free(struct btf
*btf
)
400 struct btf
*btf__new(__u8
*data
, __u32 size
)
405 btf
= calloc(1, sizeof(struct btf
));
407 return ERR_PTR(-ENOMEM
);
411 btf
->data
= malloc(size
);
417 memcpy(btf
->data
, data
, size
);
418 btf
->data_size
= size
;
420 err
= btf_parse_hdr(btf
);
424 err
= btf_parse_str_sec(btf
);
428 err
= btf_parse_type_sec(btf
);
439 static bool btf_check_endianness(const GElf_Ehdr
*ehdr
)
441 #if __BYTE_ORDER == __LITTLE_ENDIAN
442 return ehdr
->e_ident
[EI_DATA
] == ELFDATA2LSB
;
443 #elif __BYTE_ORDER == __BIG_ENDIAN
444 return ehdr
->e_ident
[EI_DATA
] == ELFDATA2MSB
;
446 # error "Unrecognized __BYTE_ORDER__"
450 struct btf
*btf__parse_elf(const char *path
, struct btf_ext
**btf_ext
)
452 Elf_Data
*btf_data
= NULL
, *btf_ext_data
= NULL
;
453 int err
= 0, fd
= -1, idx
= 0;
454 struct btf
*btf
= NULL
;
459 if (elf_version(EV_CURRENT
) == EV_NONE
) {
460 pr_warn("failed to init libelf for %s\n", path
);
461 return ERR_PTR(-LIBBPF_ERRNO__LIBELF
);
464 fd
= open(path
, O_RDONLY
);
467 pr_warn("failed to open %s: %s\n", path
, strerror(errno
));
471 err
= -LIBBPF_ERRNO__FORMAT
;
473 elf
= elf_begin(fd
, ELF_C_READ
, NULL
);
475 pr_warn("failed to open %s as ELF file\n", path
);
478 if (!gelf_getehdr(elf
, &ehdr
)) {
479 pr_warn("failed to get EHDR from %s\n", path
);
482 if (!btf_check_endianness(&ehdr
)) {
483 pr_warn("non-native ELF endianness is not supported\n");
486 if (!elf_rawdata(elf_getscn(elf
, ehdr
.e_shstrndx
), NULL
)) {
487 pr_warn("failed to get e_shstrndx from %s\n", path
);
491 while ((scn
= elf_nextscn(elf
, scn
)) != NULL
) {
496 if (gelf_getshdr(scn
, &sh
) != &sh
) {
497 pr_warn("failed to get section(%d) header from %s\n",
501 name
= elf_strptr(elf
, ehdr
.e_shstrndx
, sh
.sh_name
);
503 pr_warn("failed to get section(%d) name from %s\n",
507 if (strcmp(name
, BTF_ELF_SEC
) == 0) {
508 btf_data
= elf_getdata(scn
, 0);
510 pr_warn("failed to get section(%d, %s) data from %s\n",
515 } else if (btf_ext
&& strcmp(name
, BTF_EXT_ELF_SEC
) == 0) {
516 btf_ext_data
= elf_getdata(scn
, 0);
518 pr_warn("failed to get section(%d, %s) data from %s\n",
532 btf
= btf__new(btf_data
->d_buf
, btf_data
->d_size
);
536 if (btf_ext
&& btf_ext_data
) {
537 *btf_ext
= btf_ext__new(btf_ext_data
->d_buf
,
538 btf_ext_data
->d_size
);
539 if (IS_ERR(*btf_ext
))
541 } else if (btf_ext
) {
552 * btf is always parsed before btf_ext, so no need to clean up
553 * btf_ext, if btf loading failed
557 if (btf_ext
&& IS_ERR(*btf_ext
)) {
559 err
= PTR_ERR(*btf_ext
);
565 static int compare_vsi_off(const void *_a
, const void *_b
)
567 const struct btf_var_secinfo
*a
= _a
;
568 const struct btf_var_secinfo
*b
= _b
;
570 return a
->offset
- b
->offset
;
573 static int btf_fixup_datasec(struct bpf_object
*obj
, struct btf
*btf
,
576 __u32 size
= 0, off
= 0, i
, vars
= btf_vlen(t
);
577 const char *name
= btf__name_by_offset(btf
, t
->name_off
);
578 const struct btf_type
*t_var
;
579 struct btf_var_secinfo
*vsi
;
580 const struct btf_var
*var
;
584 pr_debug("No name found in string section for DATASEC kind.\n");
588 /* .extern datasec size and var offsets were set correctly during
589 * extern collection step, so just skip straight to sorting variables
594 ret
= bpf_object__section_size(obj
, name
, &size
);
595 if (ret
|| !size
|| (t
->size
&& t
->size
!= size
)) {
596 pr_debug("Invalid size for section %s: %u bytes\n", name
, size
);
602 for (i
= 0, vsi
= btf_var_secinfos(t
); i
< vars
; i
++, vsi
++) {
603 t_var
= btf__type_by_id(btf
, vsi
->type
);
604 var
= btf_var(t_var
);
606 if (!btf_is_var(t_var
)) {
607 pr_debug("Non-VAR type seen in section %s\n", name
);
611 if (var
->linkage
== BTF_VAR_STATIC
)
614 name
= btf__name_by_offset(btf
, t_var
->name_off
);
616 pr_debug("No name found in string section for VAR kind\n");
620 ret
= bpf_object__variable_offset(obj
, name
, &off
);
622 pr_debug("No offset found in symbol table for VAR %s\n",
631 qsort(btf_var_secinfos(t
), vars
, sizeof(*vsi
), compare_vsi_off
);
635 int btf__finalize_data(struct bpf_object
*obj
, struct btf
*btf
)
640 for (i
= 1; i
<= btf
->nr_types
; i
++) {
641 struct btf_type
*t
= btf
->types
[i
];
643 /* Loader needs to fix up some of the things compiler
644 * couldn't get its hands on while emitting BTF. This
645 * is section size and global variable offset. We use
646 * the info from the ELF itself for this purpose.
648 if (btf_is_datasec(t
)) {
649 err
= btf_fixup_datasec(obj
, btf
, t
);
658 int btf__load(struct btf
*btf
)
660 __u32 log_buf_size
= 0;
661 char *log_buf
= NULL
;
669 log_buf
= malloc(log_buf_size
);
676 btf
->fd
= bpf_load_btf(btf
->data
, btf
->data_size
,
677 log_buf
, log_buf_size
, false);
679 if (!log_buf
|| errno
== ENOSPC
) {
680 log_buf_size
= max((__u32
)BPF_LOG_BUF_SIZE
,
687 pr_warn("Error loading BTF: %s(%d)\n", strerror(errno
), errno
);
689 pr_warn("%s\n", log_buf
);
698 int btf__fd(const struct btf
*btf
)
703 const void *btf__get_raw_data(const struct btf
*btf
, __u32
*size
)
705 *size
= btf
->data_size
;
709 const char *btf__name_by_offset(const struct btf
*btf
, __u32 offset
)
711 if (offset
< btf
->hdr
->str_len
)
712 return &btf
->strings
[offset
];
717 int btf__get_from_id(__u32 id
, struct btf
**btf
)
719 struct bpf_btf_info btf_info
= { 0 };
720 __u32 len
= sizeof(btf_info
);
728 btf_fd
= bpf_btf_get_fd_by_id(id
);
732 /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
733 * let's start with a sane default - 4KiB here - and resize it only if
734 * bpf_obj_get_info_by_fd() needs a bigger buffer.
736 btf_info
.btf_size
= 4096;
737 last_size
= btf_info
.btf_size
;
738 ptr
= malloc(last_size
);
744 memset(ptr
, 0, last_size
);
745 btf_info
.btf
= ptr_to_u64(ptr
);
746 err
= bpf_obj_get_info_by_fd(btf_fd
, &btf_info
, &len
);
748 if (!err
&& btf_info
.btf_size
> last_size
) {
751 last_size
= btf_info
.btf_size
;
752 temp_ptr
= realloc(ptr
, last_size
);
758 memset(ptr
, 0, last_size
);
759 btf_info
.btf
= ptr_to_u64(ptr
);
760 err
= bpf_obj_get_info_by_fd(btf_fd
, &btf_info
, &len
);
763 if (err
|| btf_info
.btf_size
> last_size
) {
768 *btf
= btf__new((__u8
*)(long)btf_info
.btf
, btf_info
.btf_size
);
781 int btf__get_map_kv_tids(const struct btf
*btf
, const char *map_name
,
782 __u32 expected_key_size
, __u32 expected_value_size
,
783 __u32
*key_type_id
, __u32
*value_type_id
)
785 const struct btf_type
*container_type
;
786 const struct btf_member
*key
, *value
;
787 const size_t max_name
= 256;
788 char container_name
[max_name
];
789 __s64 key_size
, value_size
;
792 if (snprintf(container_name
, max_name
, "____btf_map_%s", map_name
) ==
794 pr_warn("map:%s length of '____btf_map_%s' is too long\n",
799 container_id
= btf__find_by_name(btf
, container_name
);
800 if (container_id
< 0) {
801 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
802 map_name
, container_name
);
806 container_type
= btf__type_by_id(btf
, container_id
);
807 if (!container_type
) {
808 pr_warn("map:%s cannot find BTF type for container_id:%u\n",
809 map_name
, container_id
);
813 if (!btf_is_struct(container_type
) || btf_vlen(container_type
) < 2) {
814 pr_warn("map:%s container_name:%s is an invalid container struct\n",
815 map_name
, container_name
);
819 key
= btf_members(container_type
);
822 key_size
= btf__resolve_size(btf
, key
->type
);
824 pr_warn("map:%s invalid BTF key_type_size\n", map_name
);
828 if (expected_key_size
!= key_size
) {
829 pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
830 map_name
, (__u32
)key_size
, expected_key_size
);
834 value_size
= btf__resolve_size(btf
, value
->type
);
835 if (value_size
< 0) {
836 pr_warn("map:%s invalid BTF value_type_size\n", map_name
);
840 if (expected_value_size
!= value_size
) {
841 pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
842 map_name
, (__u32
)value_size
, expected_value_size
);
846 *key_type_id
= key
->type
;
847 *value_type_id
= value
->type
;
852 struct btf_ext_sec_setup_param
{
856 struct btf_ext_info
*ext_info
;
860 static int btf_ext_setup_info(struct btf_ext
*btf_ext
,
861 struct btf_ext_sec_setup_param
*ext_sec
)
863 const struct btf_ext_info_sec
*sinfo
;
864 struct btf_ext_info
*ext_info
;
865 __u32 info_left
, record_size
;
866 /* The start of the info sec (including the __u32 record_size). */
869 if (ext_sec
->len
== 0)
872 if (ext_sec
->off
& 0x03) {
873 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
878 info
= btf_ext
->data
+ btf_ext
->hdr
->hdr_len
+ ext_sec
->off
;
879 info_left
= ext_sec
->len
;
881 if (btf_ext
->data
+ btf_ext
->data_size
< info
+ ext_sec
->len
) {
882 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
883 ext_sec
->desc
, ext_sec
->off
, ext_sec
->len
);
887 /* At least a record size */
888 if (info_left
< sizeof(__u32
)) {
889 pr_debug(".BTF.ext %s record size not found\n", ext_sec
->desc
);
893 /* The record size needs to meet the minimum standard */
894 record_size
= *(__u32
*)info
;
895 if (record_size
< ext_sec
->min_rec_size
||
896 record_size
& 0x03) {
897 pr_debug("%s section in .BTF.ext has invalid record size %u\n",
898 ext_sec
->desc
, record_size
);
902 sinfo
= info
+ sizeof(__u32
);
903 info_left
-= sizeof(__u32
);
905 /* If no records, return failure now so .BTF.ext won't be used. */
907 pr_debug("%s section in .BTF.ext has no records", ext_sec
->desc
);
912 unsigned int sec_hdrlen
= sizeof(struct btf_ext_info_sec
);
913 __u64 total_record_size
;
916 if (info_left
< sec_hdrlen
) {
917 pr_debug("%s section header is not found in .BTF.ext\n",
922 num_records
= sinfo
->num_info
;
923 if (num_records
== 0) {
924 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
929 total_record_size
= sec_hdrlen
+
930 (__u64
)num_records
* record_size
;
931 if (info_left
< total_record_size
) {
932 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
937 info_left
-= total_record_size
;
938 sinfo
= (void *)sinfo
+ total_record_size
;
941 ext_info
= ext_sec
->ext_info
;
942 ext_info
->len
= ext_sec
->len
- sizeof(__u32
);
943 ext_info
->rec_size
= record_size
;
944 ext_info
->info
= info
+ sizeof(__u32
);
949 static int btf_ext_setup_func_info(struct btf_ext
*btf_ext
)
951 struct btf_ext_sec_setup_param param
= {
952 .off
= btf_ext
->hdr
->func_info_off
,
953 .len
= btf_ext
->hdr
->func_info_len
,
954 .min_rec_size
= sizeof(struct bpf_func_info_min
),
955 .ext_info
= &btf_ext
->func_info
,
959 return btf_ext_setup_info(btf_ext
, ¶m
);
962 static int btf_ext_setup_line_info(struct btf_ext
*btf_ext
)
964 struct btf_ext_sec_setup_param param
= {
965 .off
= btf_ext
->hdr
->line_info_off
,
966 .len
= btf_ext
->hdr
->line_info_len
,
967 .min_rec_size
= sizeof(struct bpf_line_info_min
),
968 .ext_info
= &btf_ext
->line_info
,
972 return btf_ext_setup_info(btf_ext
, ¶m
);
975 static int btf_ext_setup_field_reloc(struct btf_ext
*btf_ext
)
977 struct btf_ext_sec_setup_param param
= {
978 .off
= btf_ext
->hdr
->field_reloc_off
,
979 .len
= btf_ext
->hdr
->field_reloc_len
,
980 .min_rec_size
= sizeof(struct bpf_field_reloc
),
981 .ext_info
= &btf_ext
->field_reloc_info
,
982 .desc
= "field_reloc",
985 return btf_ext_setup_info(btf_ext
, ¶m
);
988 static int btf_ext_parse_hdr(__u8
*data
, __u32 data_size
)
990 const struct btf_ext_header
*hdr
= (struct btf_ext_header
*)data
;
992 if (data_size
< offsetofend(struct btf_ext_header
, hdr_len
) ||
993 data_size
< hdr
->hdr_len
) {
994 pr_debug("BTF.ext header not found");
998 if (hdr
->magic
!= BTF_MAGIC
) {
999 pr_debug("Invalid BTF.ext magic:%x\n", hdr
->magic
);
1003 if (hdr
->version
!= BTF_VERSION
) {
1004 pr_debug("Unsupported BTF.ext version:%u\n", hdr
->version
);
1009 pr_debug("Unsupported BTF.ext flags:%x\n", hdr
->flags
);
1013 if (data_size
== hdr
->hdr_len
) {
1014 pr_debug("BTF.ext has no data\n");
1021 void btf_ext__free(struct btf_ext
*btf_ext
)
1025 free(btf_ext
->data
);
1029 struct btf_ext
*btf_ext__new(__u8
*data
, __u32 size
)
1031 struct btf_ext
*btf_ext
;
1034 err
= btf_ext_parse_hdr(data
, size
);
1036 return ERR_PTR(err
);
1038 btf_ext
= calloc(1, sizeof(struct btf_ext
));
1040 return ERR_PTR(-ENOMEM
);
1042 btf_ext
->data_size
= size
;
1043 btf_ext
->data
= malloc(size
);
1044 if (!btf_ext
->data
) {
1048 memcpy(btf_ext
->data
, data
, size
);
1050 if (btf_ext
->hdr
->hdr_len
<
1051 offsetofend(struct btf_ext_header
, line_info_len
))
1053 err
= btf_ext_setup_func_info(btf_ext
);
1057 err
= btf_ext_setup_line_info(btf_ext
);
1061 if (btf_ext
->hdr
->hdr_len
<
1062 offsetofend(struct btf_ext_header
, field_reloc_len
))
1064 err
= btf_ext_setup_field_reloc(btf_ext
);
1070 btf_ext__free(btf_ext
);
1071 return ERR_PTR(err
);
1077 const void *btf_ext__get_raw_data(const struct btf_ext
*btf_ext
, __u32
*size
)
1079 *size
= btf_ext
->data_size
;
1080 return btf_ext
->data
;
1083 static int btf_ext_reloc_info(const struct btf
*btf
,
1084 const struct btf_ext_info
*ext_info
,
1085 const char *sec_name
, __u32 insns_cnt
,
1086 void **info
, __u32
*cnt
)
1088 __u32 sec_hdrlen
= sizeof(struct btf_ext_info_sec
);
1089 __u32 i
, record_size
, existing_len
, records_len
;
1090 struct btf_ext_info_sec
*sinfo
;
1091 const char *info_sec_name
;
1095 record_size
= ext_info
->rec_size
;
1096 sinfo
= ext_info
->info
;
1097 remain_len
= ext_info
->len
;
1098 while (remain_len
> 0) {
1099 records_len
= sinfo
->num_info
* record_size
;
1100 info_sec_name
= btf__name_by_offset(btf
, sinfo
->sec_name_off
);
1101 if (strcmp(info_sec_name
, sec_name
)) {
1102 remain_len
-= sec_hdrlen
+ records_len
;
1103 sinfo
= (void *)sinfo
+ sec_hdrlen
+ records_len
;
1107 existing_len
= (*cnt
) * record_size
;
1108 data
= realloc(*info
, existing_len
+ records_len
);
1112 memcpy(data
+ existing_len
, sinfo
->data
, records_len
);
1113 /* adjust insn_off only, the rest data will be passed
1116 for (i
= 0; i
< sinfo
->num_info
; i
++) {
1119 insn_off
= data
+ existing_len
+ (i
* record_size
);
1120 *insn_off
= *insn_off
/ sizeof(struct bpf_insn
) +
1124 *cnt
+= sinfo
->num_info
;
1131 int btf_ext__reloc_func_info(const struct btf
*btf
,
1132 const struct btf_ext
*btf_ext
,
1133 const char *sec_name
, __u32 insns_cnt
,
1134 void **func_info
, __u32
*cnt
)
1136 return btf_ext_reloc_info(btf
, &btf_ext
->func_info
, sec_name
,
1137 insns_cnt
, func_info
, cnt
);
1140 int btf_ext__reloc_line_info(const struct btf
*btf
,
1141 const struct btf_ext
*btf_ext
,
1142 const char *sec_name
, __u32 insns_cnt
,
1143 void **line_info
, __u32
*cnt
)
1145 return btf_ext_reloc_info(btf
, &btf_ext
->line_info
, sec_name
,
1146 insns_cnt
, line_info
, cnt
);
1149 __u32
btf_ext__func_info_rec_size(const struct btf_ext
*btf_ext
)
1151 return btf_ext
->func_info
.rec_size
;
1154 __u32
btf_ext__line_info_rec_size(const struct btf_ext
*btf_ext
)
1156 return btf_ext
->line_info
.rec_size
;
1161 static struct btf_dedup
*btf_dedup_new(struct btf
*btf
, struct btf_ext
*btf_ext
,
1162 const struct btf_dedup_opts
*opts
);
1163 static void btf_dedup_free(struct btf_dedup
*d
);
1164 static int btf_dedup_strings(struct btf_dedup
*d
);
1165 static int btf_dedup_prim_types(struct btf_dedup
*d
);
1166 static int btf_dedup_struct_types(struct btf_dedup
*d
);
1167 static int btf_dedup_ref_types(struct btf_dedup
*d
);
1168 static int btf_dedup_compact_types(struct btf_dedup
*d
);
1169 static int btf_dedup_remap_types(struct btf_dedup
*d
);
1172 * Deduplicate BTF types and strings.
1174 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
1175 * section with all BTF type descriptors and string data. It overwrites that
1176 * memory in-place with deduplicated types and strings without any loss of
1177 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
1178 * is provided, all the strings referenced from .BTF.ext section are honored
1179 * and updated to point to the right offsets after deduplication.
1181 * If function returns with error, type/string data might be garbled and should
1184 * More verbose and detailed description of both problem btf_dedup is solving,
1185 * as well as solution could be found at:
1186 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
1188 * Problem description and justification
1189 * =====================================
1191 * BTF type information is typically emitted either as a result of conversion
1192 * from DWARF to BTF or directly by compiler. In both cases, each compilation
1193 * unit contains information about a subset of all the types that are used
1194 * in an application. These subsets are frequently overlapping and contain a lot
1195 * of duplicated information when later concatenated together into a single
1196 * binary. This algorithm ensures that each unique type is represented by single
1197 * BTF type descriptor, greatly reducing resulting size of BTF data.
1199 * Compilation unit isolation and subsequent duplication of data is not the only
1200 * problem. The same type hierarchy (e.g., struct and all the type that struct
1201 * references) in different compilation units can be represented in BTF to
1202 * various degrees of completeness (or, rather, incompleteness) due to
1203 * struct/union forward declarations.
1205 * Let's take a look at an example, that we'll use to better understand the
1206 * problem (and solution). Suppose we have two compilation units, each using
1207 * same `struct S`, but each of them having incomplete type information about
1236 * In case of CU #1, BTF data will know only that `struct B` exist (but no
1237 * more), but will know the complete type information about `struct A`. While
1238 * for CU #2, it will know full type information about `struct B`, but will
1239 * only know about forward declaration of `struct A` (in BTF terms, it will
1240 * have `BTF_KIND_FWD` type descriptor with name `B`).
1242 * This compilation unit isolation means that it's possible that there is no
1243 * single CU with complete type information describing structs `S`, `A`, and
1244 * `B`. Also, we might get tons of duplicated and redundant type information.
1246 * Additional complication we need to keep in mind comes from the fact that
1247 * types, in general, can form graphs containing cycles, not just DAGs.
1249 * While algorithm does deduplication, it also merges and resolves type
1250 * information (unless disabled throught `struct btf_opts`), whenever possible.
1251 * E.g., in the example above with two compilation units having partial type
1252 * information for structs `A` and `B`, the output of algorithm will emit
1253 * a single copy of each BTF type that describes structs `A`, `B`, and `S`
1254 * (as well as type information for `int` and pointers), as if they were defined
1255 * in a single compilation unit as:
1275 * Algorithm completes its work in 6 separate passes:
1277 * 1. Strings deduplication.
1278 * 2. Primitive types deduplication (int, enum, fwd).
1279 * 3. Struct/union types deduplication.
1280 * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
1281 * protos, and const/volatile/restrict modifiers).
1282 * 5. Types compaction.
1283 * 6. Types remapping.
1285 * Algorithm determines canonical type descriptor, which is a single
1286 * representative type for each truly unique type. This canonical type is the
1287 * one that will go into final deduplicated BTF type information. For
1288 * struct/unions, it is also the type that algorithm will merge additional type
1289 * information into (while resolving FWDs), as it discovers it from data in
1290 * other CUs. Each input BTF type eventually gets either mapped to itself, if
1291 * that type is canonical, or to some other type, if that type is equivalent
1292 * and was chosen as canonical representative. This mapping is stored in
1293 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
1294 * FWD type got resolved to.
1296 * To facilitate fast discovery of canonical types, we also maintain canonical
1297 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
1298 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
1299 * that match that signature. With sufficiently good choice of type signature
1300 * hashing function, we can limit number of canonical types for each unique type
1301 * signature to a very small number, allowing to find canonical type for any
1302 * duplicated type very quickly.
1304 * Struct/union deduplication is the most critical part and algorithm for
1305 * deduplicating structs/unions is described in greater details in comments for
1306 * `btf_dedup_is_equiv` function.
1308 int btf__dedup(struct btf
*btf
, struct btf_ext
*btf_ext
,
1309 const struct btf_dedup_opts
*opts
)
1311 struct btf_dedup
*d
= btf_dedup_new(btf
, btf_ext
, opts
);
1315 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d
));
1319 err
= btf_dedup_strings(d
);
1321 pr_debug("btf_dedup_strings failed:%d\n", err
);
1324 err
= btf_dedup_prim_types(d
);
1326 pr_debug("btf_dedup_prim_types failed:%d\n", err
);
1329 err
= btf_dedup_struct_types(d
);
1331 pr_debug("btf_dedup_struct_types failed:%d\n", err
);
1334 err
= btf_dedup_ref_types(d
);
1336 pr_debug("btf_dedup_ref_types failed:%d\n", err
);
1339 err
= btf_dedup_compact_types(d
);
1341 pr_debug("btf_dedup_compact_types failed:%d\n", err
);
1344 err
= btf_dedup_remap_types(d
);
1346 pr_debug("btf_dedup_remap_types failed:%d\n", err
);
1355 #define BTF_UNPROCESSED_ID ((__u32)-1)
1356 #define BTF_IN_PROGRESS_ID ((__u32)-2)
1359 /* .BTF section to be deduped in-place */
1362 * Optional .BTF.ext section. When provided, any strings referenced
1363 * from it will be taken into account when deduping strings
1365 struct btf_ext
*btf_ext
;
1367 * This is a map from any type's signature hash to a list of possible
1368 * canonical representative type candidates. Hash collisions are
1369 * ignored, so even types of various kinds can share same list of
1370 * candidates, which is fine because we rely on subsequent
1371 * btf_xxx_equal() checks to authoritatively verify type equality.
1373 struct hashmap
*dedup_table
;
1374 /* Canonical types map */
1376 /* Hypothetical mapping, used during type graph equivalence checks */
1381 /* Various option modifying behavior of algorithm */
1382 struct btf_dedup_opts opts
;
1385 struct btf_str_ptr
{
1391 struct btf_str_ptrs
{
1392 struct btf_str_ptr
*ptrs
;
1398 static long hash_combine(long h
, long value
)
1400 return h
* 31 + value
;
1403 #define for_each_dedup_cand(d, node, hash) \
1404 hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
1406 static int btf_dedup_table_add(struct btf_dedup
*d
, long hash
, __u32 type_id
)
1408 return hashmap__append(d
->dedup_table
,
1409 (void *)hash
, (void *)(long)type_id
);
1412 static int btf_dedup_hypot_map_add(struct btf_dedup
*d
,
1413 __u32 from_id
, __u32 to_id
)
1415 if (d
->hypot_cnt
== d
->hypot_cap
) {
1418 d
->hypot_cap
+= max((size_t)16, d
->hypot_cap
/ 2);
1419 new_list
= realloc(d
->hypot_list
, sizeof(__u32
) * d
->hypot_cap
);
1422 d
->hypot_list
= new_list
;
1424 d
->hypot_list
[d
->hypot_cnt
++] = from_id
;
1425 d
->hypot_map
[from_id
] = to_id
;
1429 static void btf_dedup_clear_hypot_map(struct btf_dedup
*d
)
1433 for (i
= 0; i
< d
->hypot_cnt
; i
++)
1434 d
->hypot_map
[d
->hypot_list
[i
]] = BTF_UNPROCESSED_ID
;
1438 static void btf_dedup_free(struct btf_dedup
*d
)
1440 hashmap__free(d
->dedup_table
);
1441 d
->dedup_table
= NULL
;
1447 d
->hypot_map
= NULL
;
1449 free(d
->hypot_list
);
1450 d
->hypot_list
= NULL
;
1455 static size_t btf_dedup_identity_hash_fn(const void *key
, void *ctx
)
1460 static size_t btf_dedup_collision_hash_fn(const void *key
, void *ctx
)
1465 static bool btf_dedup_equal_fn(const void *k1
, const void *k2
, void *ctx
)
1470 static struct btf_dedup
*btf_dedup_new(struct btf
*btf
, struct btf_ext
*btf_ext
,
1471 const struct btf_dedup_opts
*opts
)
1473 struct btf_dedup
*d
= calloc(1, sizeof(struct btf_dedup
));
1474 hashmap_hash_fn hash_fn
= btf_dedup_identity_hash_fn
;
1478 return ERR_PTR(-ENOMEM
);
1480 d
->opts
.dont_resolve_fwds
= opts
&& opts
->dont_resolve_fwds
;
1481 /* dedup_table_size is now used only to force collisions in tests */
1482 if (opts
&& opts
->dedup_table_size
== 1)
1483 hash_fn
= btf_dedup_collision_hash_fn
;
1486 d
->btf_ext
= btf_ext
;
1488 d
->dedup_table
= hashmap__new(hash_fn
, btf_dedup_equal_fn
, NULL
);
1489 if (IS_ERR(d
->dedup_table
)) {
1490 err
= PTR_ERR(d
->dedup_table
);
1491 d
->dedup_table
= NULL
;
1495 d
->map
= malloc(sizeof(__u32
) * (1 + btf
->nr_types
));
1500 /* special BTF "void" type is made canonical immediately */
1502 for (i
= 1; i
<= btf
->nr_types
; i
++) {
1503 struct btf_type
*t
= d
->btf
->types
[i
];
1505 /* VAR and DATASEC are never deduped and are self-canonical */
1506 if (btf_is_var(t
) || btf_is_datasec(t
))
1509 d
->map
[i
] = BTF_UNPROCESSED_ID
;
1512 d
->hypot_map
= malloc(sizeof(__u32
) * (1 + btf
->nr_types
));
1513 if (!d
->hypot_map
) {
1517 for (i
= 0; i
<= btf
->nr_types
; i
++)
1518 d
->hypot_map
[i
] = BTF_UNPROCESSED_ID
;
1523 return ERR_PTR(err
);
1529 typedef int (*str_off_fn_t
)(__u32
*str_off_ptr
, void *ctx
);
1532 * Iterate over all possible places in .BTF and .BTF.ext that can reference
1533 * string and pass pointer to it to a provided callback `fn`.
1535 static int btf_for_each_str_off(struct btf_dedup
*d
, str_off_fn_t fn
, void *ctx
)
1537 void *line_data_cur
, *line_data_end
;
1538 int i
, j
, r
, rec_size
;
1541 for (i
= 1; i
<= d
->btf
->nr_types
; i
++) {
1542 t
= d
->btf
->types
[i
];
1543 r
= fn(&t
->name_off
, ctx
);
1547 switch (btf_kind(t
)) {
1548 case BTF_KIND_STRUCT
:
1549 case BTF_KIND_UNION
: {
1550 struct btf_member
*m
= btf_members(t
);
1551 __u16 vlen
= btf_vlen(t
);
1553 for (j
= 0; j
< vlen
; j
++) {
1554 r
= fn(&m
->name_off
, ctx
);
1561 case BTF_KIND_ENUM
: {
1562 struct btf_enum
*m
= btf_enum(t
);
1563 __u16 vlen
= btf_vlen(t
);
1565 for (j
= 0; j
< vlen
; j
++) {
1566 r
= fn(&m
->name_off
, ctx
);
1573 case BTF_KIND_FUNC_PROTO
: {
1574 struct btf_param
*m
= btf_params(t
);
1575 __u16 vlen
= btf_vlen(t
);
1577 for (j
= 0; j
< vlen
; j
++) {
1578 r
= fn(&m
->name_off
, ctx
);
1593 line_data_cur
= d
->btf_ext
->line_info
.info
;
1594 line_data_end
= d
->btf_ext
->line_info
.info
+ d
->btf_ext
->line_info
.len
;
1595 rec_size
= d
->btf_ext
->line_info
.rec_size
;
1597 while (line_data_cur
< line_data_end
) {
1598 struct btf_ext_info_sec
*sec
= line_data_cur
;
1599 struct bpf_line_info_min
*line_info
;
1600 __u32 num_info
= sec
->num_info
;
1602 r
= fn(&sec
->sec_name_off
, ctx
);
1606 line_data_cur
+= sizeof(struct btf_ext_info_sec
);
1607 for (i
= 0; i
< num_info
; i
++) {
1608 line_info
= line_data_cur
;
1609 r
= fn(&line_info
->file_name_off
, ctx
);
1612 r
= fn(&line_info
->line_off
, ctx
);
1615 line_data_cur
+= rec_size
;
1622 static int str_sort_by_content(const void *a1
, const void *a2
)
1624 const struct btf_str_ptr
*p1
= a1
;
1625 const struct btf_str_ptr
*p2
= a2
;
1627 return strcmp(p1
->str
, p2
->str
);
1630 static int str_sort_by_offset(const void *a1
, const void *a2
)
1632 const struct btf_str_ptr
*p1
= a1
;
1633 const struct btf_str_ptr
*p2
= a2
;
1635 if (p1
->str
!= p2
->str
)
1636 return p1
->str
< p2
->str
? -1 : 1;
1640 static int btf_dedup_str_ptr_cmp(const void *str_ptr
, const void *pelem
)
1642 const struct btf_str_ptr
*p
= pelem
;
1644 if (str_ptr
!= p
->str
)
1645 return (const char *)str_ptr
< p
->str
? -1 : 1;
1649 static int btf_str_mark_as_used(__u32
*str_off_ptr
, void *ctx
)
1651 struct btf_str_ptrs
*strs
;
1652 struct btf_str_ptr
*s
;
1654 if (*str_off_ptr
== 0)
1658 s
= bsearch(strs
->data
+ *str_off_ptr
, strs
->ptrs
, strs
->cnt
,
1659 sizeof(struct btf_str_ptr
), btf_dedup_str_ptr_cmp
);
1666 static int btf_str_remap_offset(__u32
*str_off_ptr
, void *ctx
)
1668 struct btf_str_ptrs
*strs
;
1669 struct btf_str_ptr
*s
;
1671 if (*str_off_ptr
== 0)
1675 s
= bsearch(strs
->data
+ *str_off_ptr
, strs
->ptrs
, strs
->cnt
,
1676 sizeof(struct btf_str_ptr
), btf_dedup_str_ptr_cmp
);
1679 *str_off_ptr
= s
->new_off
;
1684 * Dedup string and filter out those that are not referenced from either .BTF
1685 * or .BTF.ext (if provided) sections.
1687 * This is done by building index of all strings in BTF's string section,
1688 * then iterating over all entities that can reference strings (e.g., type
1689 * names, struct field names, .BTF.ext line info, etc) and marking corresponding
1690 * strings as used. After that all used strings are deduped and compacted into
1691 * sequential blob of memory and new offsets are calculated. Then all the string
1692 * references are iterated again and rewritten using new offsets.
1694 static int btf_dedup_strings(struct btf_dedup
*d
)
1696 const struct btf_header
*hdr
= d
->btf
->hdr
;
1697 char *start
= (char *)d
->btf
->nohdr_data
+ hdr
->str_off
;
1698 char *end
= start
+ d
->btf
->hdr
->str_len
;
1699 char *p
= start
, *tmp_strs
= NULL
;
1700 struct btf_str_ptrs strs
= {
1706 int i
, j
, err
= 0, grp_idx
;
1709 /* build index of all strings */
1711 if (strs
.cnt
+ 1 > strs
.cap
) {
1712 struct btf_str_ptr
*new_ptrs
;
1714 strs
.cap
+= max(strs
.cnt
/ 2, 16U);
1715 new_ptrs
= realloc(strs
.ptrs
,
1716 sizeof(strs
.ptrs
[0]) * strs
.cap
);
1721 strs
.ptrs
= new_ptrs
;
1724 strs
.ptrs
[strs
.cnt
].str
= p
;
1725 strs
.ptrs
[strs
.cnt
].used
= false;
1731 /* temporary storage for deduplicated strings */
1732 tmp_strs
= malloc(d
->btf
->hdr
->str_len
);
1738 /* mark all used strings */
1739 strs
.ptrs
[0].used
= true;
1740 err
= btf_for_each_str_off(d
, btf_str_mark_as_used
, &strs
);
1744 /* sort strings by context, so that we can identify duplicates */
1745 qsort(strs
.ptrs
, strs
.cnt
, sizeof(strs
.ptrs
[0]), str_sort_by_content
);
1748 * iterate groups of equal strings and if any instance in a group was
1749 * referenced, emit single instance and remember new offset
1753 grp_used
= strs
.ptrs
[0].used
;
1754 /* iterate past end to avoid code duplication after loop */
1755 for (i
= 1; i
<= strs
.cnt
; i
++) {
1757 * when i == strs.cnt, we want to skip string comparison and go
1758 * straight to handling last group of strings (otherwise we'd
1759 * need to handle last group after the loop w/ duplicated code)
1762 !strcmp(strs
.ptrs
[i
].str
, strs
.ptrs
[grp_idx
].str
)) {
1763 grp_used
= grp_used
|| strs
.ptrs
[i
].used
;
1768 * this check would have been required after the loop to handle
1769 * last group of strings, but due to <= condition in a loop
1770 * we avoid that duplication
1773 int new_off
= p
- tmp_strs
;
1774 __u32 len
= strlen(strs
.ptrs
[grp_idx
].str
);
1776 memmove(p
, strs
.ptrs
[grp_idx
].str
, len
+ 1);
1777 for (j
= grp_idx
; j
< i
; j
++)
1778 strs
.ptrs
[j
].new_off
= new_off
;
1784 grp_used
= strs
.ptrs
[i
].used
;
1788 /* replace original strings with deduped ones */
1789 d
->btf
->hdr
->str_len
= p
- tmp_strs
;
1790 memmove(start
, tmp_strs
, d
->btf
->hdr
->str_len
);
1791 end
= start
+ d
->btf
->hdr
->str_len
;
1793 /* restore original order for further binary search lookups */
1794 qsort(strs
.ptrs
, strs
.cnt
, sizeof(strs
.ptrs
[0]), str_sort_by_offset
);
1796 /* remap string offsets */
1797 err
= btf_for_each_str_off(d
, btf_str_remap_offset
, &strs
);
1801 d
->btf
->hdr
->str_len
= end
- start
;
1809 static long btf_hash_common(struct btf_type
*t
)
1813 h
= hash_combine(0, t
->name_off
);
1814 h
= hash_combine(h
, t
->info
);
1815 h
= hash_combine(h
, t
->size
);
1819 static bool btf_equal_common(struct btf_type
*t1
, struct btf_type
*t2
)
1821 return t1
->name_off
== t2
->name_off
&&
1822 t1
->info
== t2
->info
&&
1823 t1
->size
== t2
->size
;
1826 /* Calculate type signature hash of INT. */
1827 static long btf_hash_int(struct btf_type
*t
)
1829 __u32 info
= *(__u32
*)(t
+ 1);
1832 h
= btf_hash_common(t
);
1833 h
= hash_combine(h
, info
);
1837 /* Check structural equality of two INTs. */
1838 static bool btf_equal_int(struct btf_type
*t1
, struct btf_type
*t2
)
1842 if (!btf_equal_common(t1
, t2
))
1844 info1
= *(__u32
*)(t1
+ 1);
1845 info2
= *(__u32
*)(t2
+ 1);
1846 return info1
== info2
;
1849 /* Calculate type signature hash of ENUM. */
1850 static long btf_hash_enum(struct btf_type
*t
)
1854 /* don't hash vlen and enum members to support enum fwd resolving */
1855 h
= hash_combine(0, t
->name_off
);
1856 h
= hash_combine(h
, t
->info
& ~0xffff);
1857 h
= hash_combine(h
, t
->size
);
1861 /* Check structural equality of two ENUMs. */
1862 static bool btf_equal_enum(struct btf_type
*t1
, struct btf_type
*t2
)
1864 const struct btf_enum
*m1
, *m2
;
1868 if (!btf_equal_common(t1
, t2
))
1871 vlen
= btf_vlen(t1
);
1874 for (i
= 0; i
< vlen
; i
++) {
1875 if (m1
->name_off
!= m2
->name_off
|| m1
->val
!= m2
->val
)
1883 static inline bool btf_is_enum_fwd(struct btf_type
*t
)
1885 return btf_is_enum(t
) && btf_vlen(t
) == 0;
1888 static bool btf_compat_enum(struct btf_type
*t1
, struct btf_type
*t2
)
1890 if (!btf_is_enum_fwd(t1
) && !btf_is_enum_fwd(t2
))
1891 return btf_equal_enum(t1
, t2
);
1892 /* ignore vlen when comparing */
1893 return t1
->name_off
== t2
->name_off
&&
1894 (t1
->info
& ~0xffff) == (t2
->info
& ~0xffff) &&
1895 t1
->size
== t2
->size
;
1899 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
1900 * as referenced type IDs equivalence is established separately during type
1901 * graph equivalence check algorithm.
1903 static long btf_hash_struct(struct btf_type
*t
)
1905 const struct btf_member
*member
= btf_members(t
);
1906 __u32 vlen
= btf_vlen(t
);
1907 long h
= btf_hash_common(t
);
1910 for (i
= 0; i
< vlen
; i
++) {
1911 h
= hash_combine(h
, member
->name_off
);
1912 h
= hash_combine(h
, member
->offset
);
1913 /* no hashing of referenced type ID, it can be unresolved yet */
1920 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1921 * IDs. This check is performed during type graph equivalence check and
1922 * referenced types equivalence is checked separately.
1924 static bool btf_shallow_equal_struct(struct btf_type
*t1
, struct btf_type
*t2
)
1926 const struct btf_member
*m1
, *m2
;
1930 if (!btf_equal_common(t1
, t2
))
1933 vlen
= btf_vlen(t1
);
1934 m1
= btf_members(t1
);
1935 m2
= btf_members(t2
);
1936 for (i
= 0; i
< vlen
; i
++) {
1937 if (m1
->name_off
!= m2
->name_off
|| m1
->offset
!= m2
->offset
)
1946 * Calculate type signature hash of ARRAY, including referenced type IDs,
1947 * under assumption that they were already resolved to canonical type IDs and
1948 * are not going to change.
1950 static long btf_hash_array(struct btf_type
*t
)
1952 const struct btf_array
*info
= btf_array(t
);
1953 long h
= btf_hash_common(t
);
1955 h
= hash_combine(h
, info
->type
);
1956 h
= hash_combine(h
, info
->index_type
);
1957 h
= hash_combine(h
, info
->nelems
);
1962 * Check exact equality of two ARRAYs, taking into account referenced
1963 * type IDs, under assumption that they were already resolved to canonical
1964 * type IDs and are not going to change.
1965 * This function is called during reference types deduplication to compare
1966 * ARRAY to potential canonical representative.
1968 static bool btf_equal_array(struct btf_type
*t1
, struct btf_type
*t2
)
1970 const struct btf_array
*info1
, *info2
;
1972 if (!btf_equal_common(t1
, t2
))
1975 info1
= btf_array(t1
);
1976 info2
= btf_array(t2
);
1977 return info1
->type
== info2
->type
&&
1978 info1
->index_type
== info2
->index_type
&&
1979 info1
->nelems
== info2
->nelems
;
1983 * Check structural compatibility of two ARRAYs, ignoring referenced type
1984 * IDs. This check is performed during type graph equivalence check and
1985 * referenced types equivalence is checked separately.
1987 static bool btf_compat_array(struct btf_type
*t1
, struct btf_type
*t2
)
1989 if (!btf_equal_common(t1
, t2
))
1992 return btf_array(t1
)->nelems
== btf_array(t2
)->nelems
;
1996 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
1997 * under assumption that they were already resolved to canonical type IDs and
1998 * are not going to change.
2000 static long btf_hash_fnproto(struct btf_type
*t
)
2002 const struct btf_param
*member
= btf_params(t
);
2003 __u16 vlen
= btf_vlen(t
);
2004 long h
= btf_hash_common(t
);
2007 for (i
= 0; i
< vlen
; i
++) {
2008 h
= hash_combine(h
, member
->name_off
);
2009 h
= hash_combine(h
, member
->type
);
2016 * Check exact equality of two FUNC_PROTOs, taking into account referenced
2017 * type IDs, under assumption that they were already resolved to canonical
2018 * type IDs and are not going to change.
2019 * This function is called during reference types deduplication to compare
2020 * FUNC_PROTO to potential canonical representative.
2022 static bool btf_equal_fnproto(struct btf_type
*t1
, struct btf_type
*t2
)
2024 const struct btf_param
*m1
, *m2
;
2028 if (!btf_equal_common(t1
, t2
))
2031 vlen
= btf_vlen(t1
);
2032 m1
= btf_params(t1
);
2033 m2
= btf_params(t2
);
2034 for (i
= 0; i
< vlen
; i
++) {
2035 if (m1
->name_off
!= m2
->name_off
|| m1
->type
!= m2
->type
)
2044 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
2045 * IDs. This check is performed during type graph equivalence check and
2046 * referenced types equivalence is checked separately.
2048 static bool btf_compat_fnproto(struct btf_type
*t1
, struct btf_type
*t2
)
2050 const struct btf_param
*m1
, *m2
;
2054 /* skip return type ID */
2055 if (t1
->name_off
!= t2
->name_off
|| t1
->info
!= t2
->info
)
2058 vlen
= btf_vlen(t1
);
2059 m1
= btf_params(t1
);
2060 m2
= btf_params(t2
);
2061 for (i
= 0; i
< vlen
; i
++) {
2062 if (m1
->name_off
!= m2
->name_off
)
2071 * Deduplicate primitive types, that can't reference other types, by calculating
2072 * their type signature hash and comparing them with any possible canonical
2073 * candidate. If no canonical candidate matches, type itself is marked as
2074 * canonical and is added into `btf_dedup->dedup_table` as another candidate.
2076 static int btf_dedup_prim_type(struct btf_dedup
*d
, __u32 type_id
)
2078 struct btf_type
*t
= d
->btf
->types
[type_id
];
2079 struct hashmap_entry
*hash_entry
;
2080 struct btf_type
*cand
;
2081 /* if we don't find equivalent type, then we are canonical */
2082 __u32 new_id
= type_id
;
2086 switch (btf_kind(t
)) {
2087 case BTF_KIND_CONST
:
2088 case BTF_KIND_VOLATILE
:
2089 case BTF_KIND_RESTRICT
:
2091 case BTF_KIND_TYPEDEF
:
2092 case BTF_KIND_ARRAY
:
2093 case BTF_KIND_STRUCT
:
2094 case BTF_KIND_UNION
:
2096 case BTF_KIND_FUNC_PROTO
:
2098 case BTF_KIND_DATASEC
:
2102 h
= btf_hash_int(t
);
2103 for_each_dedup_cand(d
, hash_entry
, h
) {
2104 cand_id
= (__u32
)(long)hash_entry
->value
;
2105 cand
= d
->btf
->types
[cand_id
];
2106 if (btf_equal_int(t
, cand
)) {
2114 h
= btf_hash_enum(t
);
2115 for_each_dedup_cand(d
, hash_entry
, h
) {
2116 cand_id
= (__u32
)(long)hash_entry
->value
;
2117 cand
= d
->btf
->types
[cand_id
];
2118 if (btf_equal_enum(t
, cand
)) {
2122 if (d
->opts
.dont_resolve_fwds
)
2124 if (btf_compat_enum(t
, cand
)) {
2125 if (btf_is_enum_fwd(t
)) {
2126 /* resolve fwd to full enum */
2130 /* resolve canonical enum fwd to full enum */
2131 d
->map
[cand_id
] = type_id
;
2137 h
= btf_hash_common(t
);
2138 for_each_dedup_cand(d
, hash_entry
, h
) {
2139 cand_id
= (__u32
)(long)hash_entry
->value
;
2140 cand
= d
->btf
->types
[cand_id
];
2141 if (btf_equal_common(t
, cand
)) {
2152 d
->map
[type_id
] = new_id
;
2153 if (type_id
== new_id
&& btf_dedup_table_add(d
, h
, type_id
))
2159 static int btf_dedup_prim_types(struct btf_dedup
*d
)
2163 for (i
= 1; i
<= d
->btf
->nr_types
; i
++) {
2164 err
= btf_dedup_prim_type(d
, i
);
2172 * Check whether type is already mapped into canonical one (could be to itself).
2174 static inline bool is_type_mapped(struct btf_dedup
*d
, uint32_t type_id
)
2176 return d
->map
[type_id
] <= BTF_MAX_NR_TYPES
;
2180 * Resolve type ID into its canonical type ID, if any; otherwise return original
2181 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
2182 * STRUCT/UNION link and resolve it into canonical type ID as well.
2184 static inline __u32
resolve_type_id(struct btf_dedup
*d
, __u32 type_id
)
2186 while (is_type_mapped(d
, type_id
) && d
->map
[type_id
] != type_id
)
2187 type_id
= d
->map
[type_id
];
2192 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
2195 static uint32_t resolve_fwd_id(struct btf_dedup
*d
, uint32_t type_id
)
2197 __u32 orig_type_id
= type_id
;
2199 if (!btf_is_fwd(d
->btf
->types
[type_id
]))
2202 while (is_type_mapped(d
, type_id
) && d
->map
[type_id
] != type_id
)
2203 type_id
= d
->map
[type_id
];
2205 if (!btf_is_fwd(d
->btf
->types
[type_id
]))
2208 return orig_type_id
;
2212 static inline __u16
btf_fwd_kind(struct btf_type
*t
)
2214 return btf_kflag(t
) ? BTF_KIND_UNION
: BTF_KIND_STRUCT
;
2218 * Check equivalence of BTF type graph formed by candidate struct/union (we'll
2219 * call it "candidate graph" in this description for brevity) to a type graph
2220 * formed by (potential) canonical struct/union ("canonical graph" for brevity
2221 * here, though keep in mind that not all types in canonical graph are
2222 * necessarily canonical representatives themselves, some of them might be
2223 * duplicates or its uniqueness might not have been established yet).
2225 * - >0, if type graphs are equivalent;
2226 * - 0, if not equivalent;
2229 * Algorithm performs side-by-side DFS traversal of both type graphs and checks
2230 * equivalence of BTF types at each step. If at any point BTF types in candidate
2231 * and canonical graphs are not compatible structurally, whole graphs are
2232 * incompatible. If types are structurally equivalent (i.e., all information
2233 * except referenced type IDs is exactly the same), a mapping from `canon_id` to
2234 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
2235 * If a type references other types, then those referenced types are checked
2236 * for equivalence recursively.
2238 * During DFS traversal, if we find that for current `canon_id` type we
2239 * already have some mapping in hypothetical map, we check for two possible
2241 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will
2242 * happen when type graphs have cycles. In this case we assume those two
2243 * types are equivalent.
2244 * - `canon_id` is mapped to different type. This is contradiction in our
2245 * hypothetical mapping, because same graph in canonical graph corresponds
2246 * to two different types in candidate graph, which for equivalent type
2247 * graphs shouldn't happen. This condition terminates equivalence check
2248 * with negative result.
2250 * If type graphs traversal exhausts types to check and find no contradiction,
2251 * then type graphs are equivalent.
2253 * When checking types for equivalence, there is one special case: FWD types.
2254 * If FWD type resolution is allowed and one of the types (either from canonical
2255 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
2256 * flag) and their names match, hypothetical mapping is updated to point from
2257 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
2258 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
2260 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
2261 * if there are two exactly named (or anonymous) structs/unions that are
2262 * compatible structurally, one of which has FWD field, while other is concrete
2263 * STRUCT/UNION, but according to C sources they are different structs/unions
2264 * that are referencing different types with the same name. This is extremely
2265 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
2266 * this logic is causing problems.
2268 * Doing FWD resolution means that both candidate and/or canonical graphs can
2269 * consists of portions of the graph that come from multiple compilation units.
2270 * This is due to the fact that types within single compilation unit are always
2271 * deduplicated and FWDs are already resolved, if referenced struct/union
2272 * definiton is available. So, if we had unresolved FWD and found corresponding
2273 * STRUCT/UNION, they will be from different compilation units. This
2274 * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
2275 * type graph will likely have at least two different BTF types that describe
2276 * same type (e.g., most probably there will be two different BTF types for the
2277 * same 'int' primitive type) and could even have "overlapping" parts of type
2278 * graph that describe same subset of types.
2280 * This in turn means that our assumption that each type in canonical graph
2281 * must correspond to exactly one type in candidate graph might not hold
2282 * anymore and will make it harder to detect contradictions using hypothetical
2283 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
2284 * resolution only in canonical graph. FWDs in candidate graphs are never
2285 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
2287 * - Both types in canonical and candidate graphs are FWDs. If they are
2288 * structurally equivalent, then they can either be both resolved to the
2289 * same STRUCT/UNION or not resolved at all. In both cases they are
2290 * equivalent and there is no need to resolve FWD on candidate side.
2291 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
2292 * so nothing to resolve as well, algorithm will check equivalence anyway.
2293 * - Type in canonical graph is FWD, while type in candidate is concrete
2294 * STRUCT/UNION. In this case candidate graph comes from single compilation
2295 * unit, so there is exactly one BTF type for each unique C type. After
2296 * resolving FWD into STRUCT/UNION, there might be more than one BTF type
2297 * in canonical graph mapping to single BTF type in candidate graph, but
2298 * because hypothetical mapping maps from canonical to candidate types, it's
2299 * alright, and we still maintain the property of having single `canon_id`
2300 * mapping to single `cand_id` (there could be two different `canon_id`
2301 * mapped to the same `cand_id`, but it's not contradictory).
2302 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
2303 * graph is FWD. In this case we are just going to check compatibility of
2304 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
2305 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
2306 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
2307 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
2310 static int btf_dedup_is_equiv(struct btf_dedup
*d
, __u32 cand_id
,
2313 struct btf_type
*cand_type
;
2314 struct btf_type
*canon_type
;
2315 __u32 hypot_type_id
;
2320 /* if both resolve to the same canonical, they must be equivalent */
2321 if (resolve_type_id(d
, cand_id
) == resolve_type_id(d
, canon_id
))
2324 canon_id
= resolve_fwd_id(d
, canon_id
);
2326 hypot_type_id
= d
->hypot_map
[canon_id
];
2327 if (hypot_type_id
<= BTF_MAX_NR_TYPES
)
2328 return hypot_type_id
== cand_id
;
2330 if (btf_dedup_hypot_map_add(d
, canon_id
, cand_id
))
2333 cand_type
= d
->btf
->types
[cand_id
];
2334 canon_type
= d
->btf
->types
[canon_id
];
2335 cand_kind
= btf_kind(cand_type
);
2336 canon_kind
= btf_kind(canon_type
);
2338 if (cand_type
->name_off
!= canon_type
->name_off
)
2341 /* FWD <--> STRUCT/UNION equivalence check, if enabled */
2342 if (!d
->opts
.dont_resolve_fwds
2343 && (cand_kind
== BTF_KIND_FWD
|| canon_kind
== BTF_KIND_FWD
)
2344 && cand_kind
!= canon_kind
) {
2348 if (cand_kind
== BTF_KIND_FWD
) {
2349 real_kind
= canon_kind
;
2350 fwd_kind
= btf_fwd_kind(cand_type
);
2352 real_kind
= cand_kind
;
2353 fwd_kind
= btf_fwd_kind(canon_type
);
2355 return fwd_kind
== real_kind
;
2358 if (cand_kind
!= canon_kind
)
2361 switch (cand_kind
) {
2363 return btf_equal_int(cand_type
, canon_type
);
2366 if (d
->opts
.dont_resolve_fwds
)
2367 return btf_equal_enum(cand_type
, canon_type
);
2369 return btf_compat_enum(cand_type
, canon_type
);
2372 return btf_equal_common(cand_type
, canon_type
);
2374 case BTF_KIND_CONST
:
2375 case BTF_KIND_VOLATILE
:
2376 case BTF_KIND_RESTRICT
:
2378 case BTF_KIND_TYPEDEF
:
2380 if (cand_type
->info
!= canon_type
->info
)
2382 return btf_dedup_is_equiv(d
, cand_type
->type
, canon_type
->type
);
2384 case BTF_KIND_ARRAY
: {
2385 const struct btf_array
*cand_arr
, *canon_arr
;
2387 if (!btf_compat_array(cand_type
, canon_type
))
2389 cand_arr
= btf_array(cand_type
);
2390 canon_arr
= btf_array(canon_type
);
2391 eq
= btf_dedup_is_equiv(d
,
2392 cand_arr
->index_type
, canon_arr
->index_type
);
2395 return btf_dedup_is_equiv(d
, cand_arr
->type
, canon_arr
->type
);
2398 case BTF_KIND_STRUCT
:
2399 case BTF_KIND_UNION
: {
2400 const struct btf_member
*cand_m
, *canon_m
;
2403 if (!btf_shallow_equal_struct(cand_type
, canon_type
))
2405 vlen
= btf_vlen(cand_type
);
2406 cand_m
= btf_members(cand_type
);
2407 canon_m
= btf_members(canon_type
);
2408 for (i
= 0; i
< vlen
; i
++) {
2409 eq
= btf_dedup_is_equiv(d
, cand_m
->type
, canon_m
->type
);
2419 case BTF_KIND_FUNC_PROTO
: {
2420 const struct btf_param
*cand_p
, *canon_p
;
2423 if (!btf_compat_fnproto(cand_type
, canon_type
))
2425 eq
= btf_dedup_is_equiv(d
, cand_type
->type
, canon_type
->type
);
2428 vlen
= btf_vlen(cand_type
);
2429 cand_p
= btf_params(cand_type
);
2430 canon_p
= btf_params(canon_type
);
2431 for (i
= 0; i
< vlen
; i
++) {
2432 eq
= btf_dedup_is_equiv(d
, cand_p
->type
, canon_p
->type
);
2448 * Use hypothetical mapping, produced by successful type graph equivalence
2449 * check, to augment existing struct/union canonical mapping, where possible.
2451 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
2452 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
2453 * it doesn't matter if FWD type was part of canonical graph or candidate one,
2454 * we are recording the mapping anyway. As opposed to carefulness required
2455 * for struct/union correspondence mapping (described below), for FWD resolution
2456 * it's not important, as by the time that FWD type (reference type) will be
2457 * deduplicated all structs/unions will be deduped already anyway.
2459 * Recording STRUCT/UNION mapping is purely a performance optimization and is
2460 * not required for correctness. It needs to be done carefully to ensure that
2461 * struct/union from candidate's type graph is not mapped into corresponding
2462 * struct/union from canonical type graph that itself hasn't been resolved into
2463 * canonical representative. The only guarantee we have is that canonical
2464 * struct/union was determined as canonical and that won't change. But any
2465 * types referenced through that struct/union fields could have been not yet
2466 * resolved, so in case like that it's too early to establish any kind of
2467 * correspondence between structs/unions.
2469 * No canonical correspondence is derived for primitive types (they are already
2470 * deduplicated completely already anyway) or reference types (they rely on
2471 * stability of struct/union canonical relationship for equivalence checks).
2473 static void btf_dedup_merge_hypot_map(struct btf_dedup
*d
)
2475 __u32 cand_type_id
, targ_type_id
;
2476 __u16 t_kind
, c_kind
;
2480 for (i
= 0; i
< d
->hypot_cnt
; i
++) {
2481 cand_type_id
= d
->hypot_list
[i
];
2482 targ_type_id
= d
->hypot_map
[cand_type_id
];
2483 t_id
= resolve_type_id(d
, targ_type_id
);
2484 c_id
= resolve_type_id(d
, cand_type_id
);
2485 t_kind
= btf_kind(d
->btf
->types
[t_id
]);
2486 c_kind
= btf_kind(d
->btf
->types
[c_id
]);
2488 * Resolve FWD into STRUCT/UNION.
2489 * It's ok to resolve FWD into STRUCT/UNION that's not yet
2490 * mapped to canonical representative (as opposed to
2491 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
2492 * eventually that struct is going to be mapped and all resolved
2493 * FWDs will automatically resolve to correct canonical
2494 * representative. This will happen before ref type deduping,
2495 * which critically depends on stability of these mapping. This
2496 * stability is not a requirement for STRUCT/UNION equivalence
2499 if (t_kind
!= BTF_KIND_FWD
&& c_kind
== BTF_KIND_FWD
)
2500 d
->map
[c_id
] = t_id
;
2501 else if (t_kind
== BTF_KIND_FWD
&& c_kind
!= BTF_KIND_FWD
)
2502 d
->map
[t_id
] = c_id
;
2504 if ((t_kind
== BTF_KIND_STRUCT
|| t_kind
== BTF_KIND_UNION
) &&
2505 c_kind
!= BTF_KIND_FWD
&&
2506 is_type_mapped(d
, c_id
) &&
2507 !is_type_mapped(d
, t_id
)) {
2509 * as a perf optimization, we can map struct/union
2510 * that's part of type graph we just verified for
2511 * equivalence. We can do that for struct/union that has
2512 * canonical representative only, though.
2514 d
->map
[t_id
] = c_id
;
2520 * Deduplicate struct/union types.
2522 * For each struct/union type its type signature hash is calculated, taking
2523 * into account type's name, size, number, order and names of fields, but
2524 * ignoring type ID's referenced from fields, because they might not be deduped
2525 * completely until after reference types deduplication phase. This type hash
2526 * is used to iterate over all potential canonical types, sharing same hash.
2527 * For each canonical candidate we check whether type graphs that they form
2528 * (through referenced types in fields and so on) are equivalent using algorithm
2529 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
2530 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
2531 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
2532 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
2533 * potentially map other structs/unions to their canonical representatives,
2534 * if such relationship hasn't yet been established. This speeds up algorithm
2535 * by eliminating some of the duplicate work.
2537 * If no matching canonical representative was found, struct/union is marked
2538 * as canonical for itself and is added into btf_dedup->dedup_table hash map
2539 * for further look ups.
2541 static int btf_dedup_struct_type(struct btf_dedup
*d
, __u32 type_id
)
2543 struct btf_type
*cand_type
, *t
;
2544 struct hashmap_entry
*hash_entry
;
2545 /* if we don't find equivalent type, then we are canonical */
2546 __u32 new_id
= type_id
;
2550 /* already deduped or is in process of deduping (loop detected) */
2551 if (d
->map
[type_id
] <= BTF_MAX_NR_TYPES
)
2554 t
= d
->btf
->types
[type_id
];
2557 if (kind
!= BTF_KIND_STRUCT
&& kind
!= BTF_KIND_UNION
)
2560 h
= btf_hash_struct(t
);
2561 for_each_dedup_cand(d
, hash_entry
, h
) {
2562 __u32 cand_id
= (__u32
)(long)hash_entry
->value
;
2566 * Even though btf_dedup_is_equiv() checks for
2567 * btf_shallow_equal_struct() internally when checking two
2568 * structs (unions) for equivalence, we need to guard here
2569 * from picking matching FWD type as a dedup candidate.
2570 * This can happen due to hash collision. In such case just
2571 * relying on btf_dedup_is_equiv() would lead to potentially
2572 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
2573 * FWD and compatible STRUCT/UNION are considered equivalent.
2575 cand_type
= d
->btf
->types
[cand_id
];
2576 if (!btf_shallow_equal_struct(t
, cand_type
))
2579 btf_dedup_clear_hypot_map(d
);
2580 eq
= btf_dedup_is_equiv(d
, type_id
, cand_id
);
2586 btf_dedup_merge_hypot_map(d
);
2590 d
->map
[type_id
] = new_id
;
2591 if (type_id
== new_id
&& btf_dedup_table_add(d
, h
, type_id
))
2597 static int btf_dedup_struct_types(struct btf_dedup
*d
)
2601 for (i
= 1; i
<= d
->btf
->nr_types
; i
++) {
2602 err
= btf_dedup_struct_type(d
, i
);
2610 * Deduplicate reference type.
2612 * Once all primitive and struct/union types got deduplicated, we can easily
2613 * deduplicate all other (reference) BTF types. This is done in two steps:
2615 * 1. Resolve all referenced type IDs into their canonical type IDs. This
2616 * resolution can be done either immediately for primitive or struct/union types
2617 * (because they were deduped in previous two phases) or recursively for
2618 * reference types. Recursion will always terminate at either primitive or
2619 * struct/union type, at which point we can "unwind" chain of reference types
2620 * one by one. There is no danger of encountering cycles because in C type
2621 * system the only way to form type cycle is through struct/union, so any chain
2622 * of reference types, even those taking part in a type cycle, will inevitably
2623 * reach struct/union at some point.
2625 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
2626 * becomes "stable", in the sense that no further deduplication will cause
2627 * any changes to it. With that, it's now possible to calculate type's signature
2628 * hash (this time taking into account referenced type IDs) and loop over all
2629 * potential canonical representatives. If no match was found, current type
2630 * will become canonical representative of itself and will be added into
2631 * btf_dedup->dedup_table as another possible canonical representative.
2633 static int btf_dedup_ref_type(struct btf_dedup
*d
, __u32 type_id
)
2635 struct hashmap_entry
*hash_entry
;
2636 __u32 new_id
= type_id
, cand_id
;
2637 struct btf_type
*t
, *cand
;
2638 /* if we don't find equivalent type, then we are representative type */
2642 if (d
->map
[type_id
] == BTF_IN_PROGRESS_ID
)
2644 if (d
->map
[type_id
] <= BTF_MAX_NR_TYPES
)
2645 return resolve_type_id(d
, type_id
);
2647 t
= d
->btf
->types
[type_id
];
2648 d
->map
[type_id
] = BTF_IN_PROGRESS_ID
;
2650 switch (btf_kind(t
)) {
2651 case BTF_KIND_CONST
:
2652 case BTF_KIND_VOLATILE
:
2653 case BTF_KIND_RESTRICT
:
2655 case BTF_KIND_TYPEDEF
:
2657 ref_type_id
= btf_dedup_ref_type(d
, t
->type
);
2658 if (ref_type_id
< 0)
2660 t
->type
= ref_type_id
;
2662 h
= btf_hash_common(t
);
2663 for_each_dedup_cand(d
, hash_entry
, h
) {
2664 cand_id
= (__u32
)(long)hash_entry
->value
;
2665 cand
= d
->btf
->types
[cand_id
];
2666 if (btf_equal_common(t
, cand
)) {
2673 case BTF_KIND_ARRAY
: {
2674 struct btf_array
*info
= btf_array(t
);
2676 ref_type_id
= btf_dedup_ref_type(d
, info
->type
);
2677 if (ref_type_id
< 0)
2679 info
->type
= ref_type_id
;
2681 ref_type_id
= btf_dedup_ref_type(d
, info
->index_type
);
2682 if (ref_type_id
< 0)
2684 info
->index_type
= ref_type_id
;
2686 h
= btf_hash_array(t
);
2687 for_each_dedup_cand(d
, hash_entry
, h
) {
2688 cand_id
= (__u32
)(long)hash_entry
->value
;
2689 cand
= d
->btf
->types
[cand_id
];
2690 if (btf_equal_array(t
, cand
)) {
2698 case BTF_KIND_FUNC_PROTO
: {
2699 struct btf_param
*param
;
2703 ref_type_id
= btf_dedup_ref_type(d
, t
->type
);
2704 if (ref_type_id
< 0)
2706 t
->type
= ref_type_id
;
2709 param
= btf_params(t
);
2710 for (i
= 0; i
< vlen
; i
++) {
2711 ref_type_id
= btf_dedup_ref_type(d
, param
->type
);
2712 if (ref_type_id
< 0)
2714 param
->type
= ref_type_id
;
2718 h
= btf_hash_fnproto(t
);
2719 for_each_dedup_cand(d
, hash_entry
, h
) {
2720 cand_id
= (__u32
)(long)hash_entry
->value
;
2721 cand
= d
->btf
->types
[cand_id
];
2722 if (btf_equal_fnproto(t
, cand
)) {
2734 d
->map
[type_id
] = new_id
;
2735 if (type_id
== new_id
&& btf_dedup_table_add(d
, h
, type_id
))
2741 static int btf_dedup_ref_types(struct btf_dedup
*d
)
2745 for (i
= 1; i
<= d
->btf
->nr_types
; i
++) {
2746 err
= btf_dedup_ref_type(d
, i
);
2750 /* we won't need d->dedup_table anymore */
2751 hashmap__free(d
->dedup_table
);
2752 d
->dedup_table
= NULL
;
2759 * After we established for each type its corresponding canonical representative
2760 * type, we now can eliminate types that are not canonical and leave only
2761 * canonical ones layed out sequentially in memory by copying them over
2762 * duplicates. During compaction btf_dedup->hypot_map array is reused to store
2763 * a map from original type ID to a new compacted type ID, which will be used
2764 * during next phase to "fix up" type IDs, referenced from struct/union and
2767 static int btf_dedup_compact_types(struct btf_dedup
*d
)
2769 struct btf_type
**new_types
;
2770 __u32 next_type_id
= 1;
2771 char *types_start
, *p
;
2774 /* we are going to reuse hypot_map to store compaction remapping */
2775 d
->hypot_map
[0] = 0;
2776 for (i
= 1; i
<= d
->btf
->nr_types
; i
++)
2777 d
->hypot_map
[i
] = BTF_UNPROCESSED_ID
;
2779 types_start
= d
->btf
->nohdr_data
+ d
->btf
->hdr
->type_off
;
2782 for (i
= 1; i
<= d
->btf
->nr_types
; i
++) {
2786 len
= btf_type_size(d
->btf
->types
[i
]);
2790 memmove(p
, d
->btf
->types
[i
], len
);
2791 d
->hypot_map
[i
] = next_type_id
;
2792 d
->btf
->types
[next_type_id
] = (struct btf_type
*)p
;
2797 /* shrink struct btf's internal types index and update btf_header */
2798 d
->btf
->nr_types
= next_type_id
- 1;
2799 d
->btf
->types_size
= d
->btf
->nr_types
;
2800 d
->btf
->hdr
->type_len
= p
- types_start
;
2801 new_types
= realloc(d
->btf
->types
,
2802 (1 + d
->btf
->nr_types
) * sizeof(struct btf_type
*));
2805 d
->btf
->types
= new_types
;
2807 /* make sure string section follows type information without gaps */
2808 d
->btf
->hdr
->str_off
= p
- (char *)d
->btf
->nohdr_data
;
2809 memmove(p
, d
->btf
->strings
, d
->btf
->hdr
->str_len
);
2810 d
->btf
->strings
= p
;
2811 p
+= d
->btf
->hdr
->str_len
;
2813 d
->btf
->data_size
= p
- (char *)d
->btf
->data
;
2818 * Figure out final (deduplicated and compacted) type ID for provided original
2819 * `type_id` by first resolving it into corresponding canonical type ID and
2820 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
2821 * which is populated during compaction phase.
2823 static int btf_dedup_remap_type_id(struct btf_dedup
*d
, __u32 type_id
)
2825 __u32 resolved_type_id
, new_type_id
;
2827 resolved_type_id
= resolve_type_id(d
, type_id
);
2828 new_type_id
= d
->hypot_map
[resolved_type_id
];
2829 if (new_type_id
> BTF_MAX_NR_TYPES
)
2835 * Remap referenced type IDs into deduped type IDs.
2837 * After BTF types are deduplicated and compacted, their final type IDs may
2838 * differ from original ones. The map from original to a corresponding
2839 * deduped type ID is stored in btf_dedup->hypot_map and is populated during
2840 * compaction phase. During remapping phase we are rewriting all type IDs
2841 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
2842 * their final deduped type IDs.
2844 static int btf_dedup_remap_type(struct btf_dedup
*d
, __u32 type_id
)
2846 struct btf_type
*t
= d
->btf
->types
[type_id
];
2849 switch (btf_kind(t
)) {
2855 case BTF_KIND_CONST
:
2856 case BTF_KIND_VOLATILE
:
2857 case BTF_KIND_RESTRICT
:
2859 case BTF_KIND_TYPEDEF
:
2862 r
= btf_dedup_remap_type_id(d
, t
->type
);
2868 case BTF_KIND_ARRAY
: {
2869 struct btf_array
*arr_info
= btf_array(t
);
2871 r
= btf_dedup_remap_type_id(d
, arr_info
->type
);
2875 r
= btf_dedup_remap_type_id(d
, arr_info
->index_type
);
2878 arr_info
->index_type
= r
;
2882 case BTF_KIND_STRUCT
:
2883 case BTF_KIND_UNION
: {
2884 struct btf_member
*member
= btf_members(t
);
2885 __u16 vlen
= btf_vlen(t
);
2887 for (i
= 0; i
< vlen
; i
++) {
2888 r
= btf_dedup_remap_type_id(d
, member
->type
);
2897 case BTF_KIND_FUNC_PROTO
: {
2898 struct btf_param
*param
= btf_params(t
);
2899 __u16 vlen
= btf_vlen(t
);
2901 r
= btf_dedup_remap_type_id(d
, t
->type
);
2906 for (i
= 0; i
< vlen
; i
++) {
2907 r
= btf_dedup_remap_type_id(d
, param
->type
);
2916 case BTF_KIND_DATASEC
: {
2917 struct btf_var_secinfo
*var
= btf_var_secinfos(t
);
2918 __u16 vlen
= btf_vlen(t
);
2920 for (i
= 0; i
< vlen
; i
++) {
2921 r
= btf_dedup_remap_type_id(d
, var
->type
);
2937 static int btf_dedup_remap_types(struct btf_dedup
*d
)
2941 for (i
= 1; i
<= d
->btf
->nr_types
; i
++) {
2942 r
= btf_dedup_remap_type(d
, i
);
2949 static struct btf
*btf_load_raw(const char *path
)
2957 if (stat(path
, &st
))
2958 return ERR_PTR(-errno
);
2960 data
= malloc(st
.st_size
);
2962 return ERR_PTR(-ENOMEM
);
2964 f
= fopen(path
, "rb");
2966 btf
= ERR_PTR(-errno
);
2970 read_cnt
= fread(data
, 1, st
.st_size
, f
);
2972 if (read_cnt
< st
.st_size
) {
2973 btf
= ERR_PTR(-EBADF
);
2977 btf
= btf__new(data
, read_cnt
);
2985 * Probe few well-known locations for vmlinux kernel image and try to load BTF
2986 * data out of it to use for target BTF.
2988 struct btf
*libbpf_find_kernel_btf(void)
2991 const char *path_fmt
;
2994 /* try canonical vmlinux BTF through sysfs first */
2995 { "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
2996 /* fall back to trying to find vmlinux ELF on disk otherwise */
2997 { "/boot/vmlinux-%1$s" },
2998 { "/lib/modules/%1$s/vmlinux-%1$s" },
2999 { "/lib/modules/%1$s/build/vmlinux" },
3000 { "/usr/lib/modules/%1$s/kernel/vmlinux" },
3001 { "/usr/lib/debug/boot/vmlinux-%1$s" },
3002 { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
3003 { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
3005 char path
[PATH_MAX
+ 1];
3012 for (i
= 0; i
< ARRAY_SIZE(locations
); i
++) {
3013 snprintf(path
, PATH_MAX
, locations
[i
].path_fmt
, buf
.release
);
3015 if (access(path
, R_OK
))
3018 if (locations
[i
].raw_btf
)
3019 btf
= btf_load_raw(path
);
3021 btf
= btf__parse_elf(path
, NULL
);
3023 pr_debug("loading kernel BTF '%s': %ld\n",
3024 path
, IS_ERR(btf
) ? PTR_ERR(btf
) : 0);
3031 pr_warn("failed to find valid kernel BTF\n");
3032 return ERR_PTR(-ESRCH
);