gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / tools / lib / bpf / btf.c
blobbfef3d606b54efd01a0dc28c7d93075d104fb037
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
4 #include <endian.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <errno.h>
11 #include <sys/utsname.h>
12 #include <sys/param.h>
13 #include <sys/stat.h>
14 #include <linux/kernel.h>
15 #include <linux/err.h>
16 #include <linux/btf.h>
17 #include <gelf.h>
18 #include "btf.h"
19 #include "bpf.h"
20 #include "libbpf.h"
21 #include "libbpf_internal.h"
22 #include "hashmap.h"
24 /* make sure libbpf doesn't use kernel-only integer typedefs */
25 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
27 #define BTF_MAX_NR_TYPES 0x7fffffffU
28 #define BTF_MAX_STR_OFFSET 0x7fffffffU
30 static struct btf_type btf_void;
32 struct btf {
33 union {
34 struct btf_header *hdr;
35 void *data;
37 struct btf_type **types;
38 const char *strings;
39 void *nohdr_data;
40 __u32 nr_types;
41 __u32 types_size;
42 __u32 data_size;
43 int fd;
46 static inline __u64 ptr_to_u64(const void *ptr)
48 return (__u64) (unsigned long) ptr;
51 static int btf_add_type(struct btf *btf, struct btf_type *t)
53 if (btf->types_size - btf->nr_types < 2) {
54 struct btf_type **new_types;
55 __u32 expand_by, new_size;
57 if (btf->types_size == BTF_MAX_NR_TYPES)
58 return -E2BIG;
60 expand_by = max(btf->types_size >> 2, 16U);
61 new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by);
63 new_types = realloc(btf->types, sizeof(*new_types) * new_size);
64 if (!new_types)
65 return -ENOMEM;
67 if (btf->nr_types == 0)
68 new_types[0] = &btf_void;
70 btf->types = new_types;
71 btf->types_size = new_size;
74 btf->types[++(btf->nr_types)] = t;
76 return 0;
79 static int btf_parse_hdr(struct btf *btf)
81 const struct btf_header *hdr = btf->hdr;
82 __u32 meta_left;
84 if (btf->data_size < sizeof(struct btf_header)) {
85 pr_debug("BTF header not found\n");
86 return -EINVAL;
89 if (hdr->magic != BTF_MAGIC) {
90 pr_debug("Invalid BTF magic:%x\n", hdr->magic);
91 return -EINVAL;
94 if (hdr->version != BTF_VERSION) {
95 pr_debug("Unsupported BTF version:%u\n", hdr->version);
96 return -ENOTSUP;
99 if (hdr->flags) {
100 pr_debug("Unsupported BTF flags:%x\n", hdr->flags);
101 return -ENOTSUP;
104 meta_left = btf->data_size - sizeof(*hdr);
105 if (!meta_left) {
106 pr_debug("BTF has no data\n");
107 return -EINVAL;
110 if (meta_left < hdr->type_off) {
111 pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off);
112 return -EINVAL;
115 if (meta_left < hdr->str_off) {
116 pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off);
117 return -EINVAL;
120 if (hdr->type_off >= hdr->str_off) {
121 pr_debug("BTF type section offset >= string section offset. No type?\n");
122 return -EINVAL;
125 if (hdr->type_off & 0x02) {
126 pr_debug("BTF type section is not aligned to 4 bytes\n");
127 return -EINVAL;
130 btf->nohdr_data = btf->hdr + 1;
132 return 0;
135 static int btf_parse_str_sec(struct btf *btf)
137 const struct btf_header *hdr = btf->hdr;
138 const char *start = btf->nohdr_data + hdr->str_off;
139 const char *end = start + btf->hdr->str_len;
141 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET ||
142 start[0] || end[-1]) {
143 pr_debug("Invalid BTF string section\n");
144 return -EINVAL;
147 btf->strings = start;
149 return 0;
152 static int btf_type_size(struct btf_type *t)
154 int base_size = sizeof(struct btf_type);
155 __u16 vlen = btf_vlen(t);
157 switch (btf_kind(t)) {
158 case BTF_KIND_FWD:
159 case BTF_KIND_CONST:
160 case BTF_KIND_VOLATILE:
161 case BTF_KIND_RESTRICT:
162 case BTF_KIND_PTR:
163 case BTF_KIND_TYPEDEF:
164 case BTF_KIND_FUNC:
165 return base_size;
166 case BTF_KIND_INT:
167 return base_size + sizeof(__u32);
168 case BTF_KIND_ENUM:
169 return base_size + vlen * sizeof(struct btf_enum);
170 case BTF_KIND_ARRAY:
171 return base_size + sizeof(struct btf_array);
172 case BTF_KIND_STRUCT:
173 case BTF_KIND_UNION:
174 return base_size + vlen * sizeof(struct btf_member);
175 case BTF_KIND_FUNC_PROTO:
176 return base_size + vlen * sizeof(struct btf_param);
177 case BTF_KIND_VAR:
178 return base_size + sizeof(struct btf_var);
179 case BTF_KIND_DATASEC:
180 return base_size + vlen * sizeof(struct btf_var_secinfo);
181 default:
182 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
183 return -EINVAL;
187 static int btf_parse_type_sec(struct btf *btf)
189 struct btf_header *hdr = btf->hdr;
190 void *nohdr_data = btf->nohdr_data;
191 void *next_type = nohdr_data + hdr->type_off;
192 void *end_type = nohdr_data + hdr->str_off;
194 while (next_type < end_type) {
195 struct btf_type *t = next_type;
196 int type_size;
197 int err;
199 type_size = btf_type_size(t);
200 if (type_size < 0)
201 return type_size;
202 next_type += type_size;
203 err = btf_add_type(btf, t);
204 if (err)
205 return err;
208 return 0;
211 __u32 btf__get_nr_types(const struct btf *btf)
213 return btf->nr_types;
216 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
218 if (type_id > btf->nr_types)
219 return NULL;
221 return btf->types[type_id];
224 static bool btf_type_is_void(const struct btf_type *t)
226 return t == &btf_void || btf_is_fwd(t);
229 static bool btf_type_is_void_or_null(const struct btf_type *t)
231 return !t || btf_type_is_void(t);
234 #define MAX_RESOLVE_DEPTH 32
236 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
238 const struct btf_array *array;
239 const struct btf_type *t;
240 __u32 nelems = 1;
241 __s64 size = -1;
242 int i;
244 t = btf__type_by_id(btf, type_id);
245 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
246 i++) {
247 switch (btf_kind(t)) {
248 case BTF_KIND_INT:
249 case BTF_KIND_STRUCT:
250 case BTF_KIND_UNION:
251 case BTF_KIND_ENUM:
252 case BTF_KIND_DATASEC:
253 size = t->size;
254 goto done;
255 case BTF_KIND_PTR:
256 size = sizeof(void *);
257 goto done;
258 case BTF_KIND_TYPEDEF:
259 case BTF_KIND_VOLATILE:
260 case BTF_KIND_CONST:
261 case BTF_KIND_RESTRICT:
262 case BTF_KIND_VAR:
263 type_id = t->type;
264 break;
265 case BTF_KIND_ARRAY:
266 array = btf_array(t);
267 if (nelems && array->nelems > UINT32_MAX / nelems)
268 return -E2BIG;
269 nelems *= array->nelems;
270 type_id = array->type;
271 break;
272 default:
273 return -EINVAL;
276 t = btf__type_by_id(btf, type_id);
279 done:
280 if (size < 0)
281 return -EINVAL;
282 if (nelems && size > UINT32_MAX / nelems)
283 return -E2BIG;
285 return nelems * size;
288 int btf__align_of(const struct btf *btf, __u32 id)
290 const struct btf_type *t = btf__type_by_id(btf, id);
291 __u16 kind = btf_kind(t);
293 switch (kind) {
294 case BTF_KIND_INT:
295 case BTF_KIND_ENUM:
296 return min(sizeof(void *), (size_t)t->size);
297 case BTF_KIND_PTR:
298 return sizeof(void *);
299 case BTF_KIND_TYPEDEF:
300 case BTF_KIND_VOLATILE:
301 case BTF_KIND_CONST:
302 case BTF_KIND_RESTRICT:
303 return btf__align_of(btf, t->type);
304 case BTF_KIND_ARRAY:
305 return btf__align_of(btf, btf_array(t)->type);
306 case BTF_KIND_STRUCT:
307 case BTF_KIND_UNION: {
308 const struct btf_member *m = btf_members(t);
309 __u16 vlen = btf_vlen(t);
310 int i, max_align = 1, align;
312 for (i = 0; i < vlen; i++, m++) {
313 align = btf__align_of(btf, m->type);
314 if (align <= 0)
315 return align;
316 max_align = max(max_align, align);
319 return max_align;
321 default:
322 pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
323 return 0;
327 int btf__resolve_type(const struct btf *btf, __u32 type_id)
329 const struct btf_type *t;
330 int depth = 0;
332 t = btf__type_by_id(btf, type_id);
333 while (depth < MAX_RESOLVE_DEPTH &&
334 !btf_type_is_void_or_null(t) &&
335 (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
336 type_id = t->type;
337 t = btf__type_by_id(btf, type_id);
338 depth++;
341 if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
342 return -EINVAL;
344 return type_id;
347 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
349 __u32 i;
351 if (!strcmp(type_name, "void"))
352 return 0;
354 for (i = 1; i <= btf->nr_types; i++) {
355 const struct btf_type *t = btf->types[i];
356 const char *name = btf__name_by_offset(btf, t->name_off);
358 if (name && !strcmp(type_name, name))
359 return i;
362 return -ENOENT;
365 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
366 __u32 kind)
368 __u32 i;
370 if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
371 return 0;
373 for (i = 1; i <= btf->nr_types; i++) {
374 const struct btf_type *t = btf->types[i];
375 const char *name;
377 if (btf_kind(t) != kind)
378 continue;
379 name = btf__name_by_offset(btf, t->name_off);
380 if (name && !strcmp(type_name, name))
381 return i;
384 return -ENOENT;
387 void btf__free(struct btf *btf)
389 if (!btf)
390 return;
392 if (btf->fd != -1)
393 close(btf->fd);
395 free(btf->data);
396 free(btf->types);
397 free(btf);
400 struct btf *btf__new(__u8 *data, __u32 size)
402 struct btf *btf;
403 int err;
405 btf = calloc(1, sizeof(struct btf));
406 if (!btf)
407 return ERR_PTR(-ENOMEM);
409 btf->fd = -1;
411 btf->data = malloc(size);
412 if (!btf->data) {
413 err = -ENOMEM;
414 goto done;
417 memcpy(btf->data, data, size);
418 btf->data_size = size;
420 err = btf_parse_hdr(btf);
421 if (err)
422 goto done;
424 err = btf_parse_str_sec(btf);
425 if (err)
426 goto done;
428 err = btf_parse_type_sec(btf);
430 done:
431 if (err) {
432 btf__free(btf);
433 return ERR_PTR(err);
436 return btf;
439 static bool btf_check_endianness(const GElf_Ehdr *ehdr)
441 #if __BYTE_ORDER == __LITTLE_ENDIAN
442 return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
443 #elif __BYTE_ORDER == __BIG_ENDIAN
444 return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
445 #else
446 # error "Unrecognized __BYTE_ORDER__"
447 #endif
450 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
452 Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
453 int err = 0, fd = -1, idx = 0;
454 struct btf *btf = NULL;
455 Elf_Scn *scn = NULL;
456 Elf *elf = NULL;
457 GElf_Ehdr ehdr;
459 if (elf_version(EV_CURRENT) == EV_NONE) {
460 pr_warn("failed to init libelf for %s\n", path);
461 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
464 fd = open(path, O_RDONLY);
465 if (fd < 0) {
466 err = -errno;
467 pr_warn("failed to open %s: %s\n", path, strerror(errno));
468 return ERR_PTR(err);
471 err = -LIBBPF_ERRNO__FORMAT;
473 elf = elf_begin(fd, ELF_C_READ, NULL);
474 if (!elf) {
475 pr_warn("failed to open %s as ELF file\n", path);
476 goto done;
478 if (!gelf_getehdr(elf, &ehdr)) {
479 pr_warn("failed to get EHDR from %s\n", path);
480 goto done;
482 if (!btf_check_endianness(&ehdr)) {
483 pr_warn("non-native ELF endianness is not supported\n");
484 goto done;
486 if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
487 pr_warn("failed to get e_shstrndx from %s\n", path);
488 goto done;
491 while ((scn = elf_nextscn(elf, scn)) != NULL) {
492 GElf_Shdr sh;
493 char *name;
495 idx++;
496 if (gelf_getshdr(scn, &sh) != &sh) {
497 pr_warn("failed to get section(%d) header from %s\n",
498 idx, path);
499 goto done;
501 name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
502 if (!name) {
503 pr_warn("failed to get section(%d) name from %s\n",
504 idx, path);
505 goto done;
507 if (strcmp(name, BTF_ELF_SEC) == 0) {
508 btf_data = elf_getdata(scn, 0);
509 if (!btf_data) {
510 pr_warn("failed to get section(%d, %s) data from %s\n",
511 idx, name, path);
512 goto done;
514 continue;
515 } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
516 btf_ext_data = elf_getdata(scn, 0);
517 if (!btf_ext_data) {
518 pr_warn("failed to get section(%d, %s) data from %s\n",
519 idx, name, path);
520 goto done;
522 continue;
526 err = 0;
528 if (!btf_data) {
529 err = -ENOENT;
530 goto done;
532 btf = btf__new(btf_data->d_buf, btf_data->d_size);
533 if (IS_ERR(btf))
534 goto done;
536 if (btf_ext && btf_ext_data) {
537 *btf_ext = btf_ext__new(btf_ext_data->d_buf,
538 btf_ext_data->d_size);
539 if (IS_ERR(*btf_ext))
540 goto done;
541 } else if (btf_ext) {
542 *btf_ext = NULL;
544 done:
545 if (elf)
546 elf_end(elf);
547 close(fd);
549 if (err)
550 return ERR_PTR(err);
552 * btf is always parsed before btf_ext, so no need to clean up
553 * btf_ext, if btf loading failed
555 if (IS_ERR(btf))
556 return btf;
557 if (btf_ext && IS_ERR(*btf_ext)) {
558 btf__free(btf);
559 err = PTR_ERR(*btf_ext);
560 return ERR_PTR(err);
562 return btf;
565 static int compare_vsi_off(const void *_a, const void *_b)
567 const struct btf_var_secinfo *a = _a;
568 const struct btf_var_secinfo *b = _b;
570 return a->offset - b->offset;
573 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
574 struct btf_type *t)
576 __u32 size = 0, off = 0, i, vars = btf_vlen(t);
577 const char *name = btf__name_by_offset(btf, t->name_off);
578 const struct btf_type *t_var;
579 struct btf_var_secinfo *vsi;
580 const struct btf_var *var;
581 int ret;
583 if (!name) {
584 pr_debug("No name found in string section for DATASEC kind.\n");
585 return -ENOENT;
588 /* .extern datasec size and var offsets were set correctly during
589 * extern collection step, so just skip straight to sorting variables
591 if (t->size)
592 goto sort_vars;
594 ret = bpf_object__section_size(obj, name, &size);
595 if (ret || !size || (t->size && t->size != size)) {
596 pr_debug("Invalid size for section %s: %u bytes\n", name, size);
597 return -ENOENT;
600 t->size = size;
602 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
603 t_var = btf__type_by_id(btf, vsi->type);
604 var = btf_var(t_var);
606 if (!btf_is_var(t_var)) {
607 pr_debug("Non-VAR type seen in section %s\n", name);
608 return -EINVAL;
611 if (var->linkage == BTF_VAR_STATIC)
612 continue;
614 name = btf__name_by_offset(btf, t_var->name_off);
615 if (!name) {
616 pr_debug("No name found in string section for VAR kind\n");
617 return -ENOENT;
620 ret = bpf_object__variable_offset(obj, name, &off);
621 if (ret) {
622 pr_debug("No offset found in symbol table for VAR %s\n",
623 name);
624 return -ENOENT;
627 vsi->offset = off;
630 sort_vars:
631 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
632 return 0;
635 int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
637 int err = 0;
638 __u32 i;
640 for (i = 1; i <= btf->nr_types; i++) {
641 struct btf_type *t = btf->types[i];
643 /* Loader needs to fix up some of the things compiler
644 * couldn't get its hands on while emitting BTF. This
645 * is section size and global variable offset. We use
646 * the info from the ELF itself for this purpose.
648 if (btf_is_datasec(t)) {
649 err = btf_fixup_datasec(obj, btf, t);
650 if (err)
651 break;
655 return err;
658 int btf__load(struct btf *btf)
660 __u32 log_buf_size = 0;
661 char *log_buf = NULL;
662 int err = 0;
664 if (btf->fd >= 0)
665 return -EEXIST;
667 retry_load:
668 if (log_buf_size) {
669 log_buf = malloc(log_buf_size);
670 if (!log_buf)
671 return -ENOMEM;
673 *log_buf = 0;
676 btf->fd = bpf_load_btf(btf->data, btf->data_size,
677 log_buf, log_buf_size, false);
678 if (btf->fd < 0) {
679 if (!log_buf || errno == ENOSPC) {
680 log_buf_size = max((__u32)BPF_LOG_BUF_SIZE,
681 log_buf_size << 1);
682 free(log_buf);
683 goto retry_load;
686 err = -errno;
687 pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno);
688 if (*log_buf)
689 pr_warn("%s\n", log_buf);
690 goto done;
693 done:
694 free(log_buf);
695 return err;
698 int btf__fd(const struct btf *btf)
700 return btf->fd;
703 const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
705 *size = btf->data_size;
706 return btf->data;
709 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
711 if (offset < btf->hdr->str_len)
712 return &btf->strings[offset];
713 else
714 return NULL;
717 int btf__get_from_id(__u32 id, struct btf **btf)
719 struct bpf_btf_info btf_info = { 0 };
720 __u32 len = sizeof(btf_info);
721 __u32 last_size;
722 int btf_fd;
723 void *ptr;
724 int err;
726 err = 0;
727 *btf = NULL;
728 btf_fd = bpf_btf_get_fd_by_id(id);
729 if (btf_fd < 0)
730 return 0;
732 /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
733 * let's start with a sane default - 4KiB here - and resize it only if
734 * bpf_obj_get_info_by_fd() needs a bigger buffer.
736 btf_info.btf_size = 4096;
737 last_size = btf_info.btf_size;
738 ptr = malloc(last_size);
739 if (!ptr) {
740 err = -ENOMEM;
741 goto exit_free;
744 memset(ptr, 0, last_size);
745 btf_info.btf = ptr_to_u64(ptr);
746 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
748 if (!err && btf_info.btf_size > last_size) {
749 void *temp_ptr;
751 last_size = btf_info.btf_size;
752 temp_ptr = realloc(ptr, last_size);
753 if (!temp_ptr) {
754 err = -ENOMEM;
755 goto exit_free;
757 ptr = temp_ptr;
758 memset(ptr, 0, last_size);
759 btf_info.btf = ptr_to_u64(ptr);
760 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
763 if (err || btf_info.btf_size > last_size) {
764 err = errno;
765 goto exit_free;
768 *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size);
769 if (IS_ERR(*btf)) {
770 err = PTR_ERR(*btf);
771 *btf = NULL;
774 exit_free:
775 close(btf_fd);
776 free(ptr);
778 return err;
781 int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
782 __u32 expected_key_size, __u32 expected_value_size,
783 __u32 *key_type_id, __u32 *value_type_id)
785 const struct btf_type *container_type;
786 const struct btf_member *key, *value;
787 const size_t max_name = 256;
788 char container_name[max_name];
789 __s64 key_size, value_size;
790 __s32 container_id;
792 if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
793 max_name) {
794 pr_warn("map:%s length of '____btf_map_%s' is too long\n",
795 map_name, map_name);
796 return -EINVAL;
799 container_id = btf__find_by_name(btf, container_name);
800 if (container_id < 0) {
801 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
802 map_name, container_name);
803 return container_id;
806 container_type = btf__type_by_id(btf, container_id);
807 if (!container_type) {
808 pr_warn("map:%s cannot find BTF type for container_id:%u\n",
809 map_name, container_id);
810 return -EINVAL;
813 if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
814 pr_warn("map:%s container_name:%s is an invalid container struct\n",
815 map_name, container_name);
816 return -EINVAL;
819 key = btf_members(container_type);
820 value = key + 1;
822 key_size = btf__resolve_size(btf, key->type);
823 if (key_size < 0) {
824 pr_warn("map:%s invalid BTF key_type_size\n", map_name);
825 return key_size;
828 if (expected_key_size != key_size) {
829 pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
830 map_name, (__u32)key_size, expected_key_size);
831 return -EINVAL;
834 value_size = btf__resolve_size(btf, value->type);
835 if (value_size < 0) {
836 pr_warn("map:%s invalid BTF value_type_size\n", map_name);
837 return value_size;
840 if (expected_value_size != value_size) {
841 pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
842 map_name, (__u32)value_size, expected_value_size);
843 return -EINVAL;
846 *key_type_id = key->type;
847 *value_type_id = value->type;
849 return 0;
852 struct btf_ext_sec_setup_param {
853 __u32 off;
854 __u32 len;
855 __u32 min_rec_size;
856 struct btf_ext_info *ext_info;
857 const char *desc;
860 static int btf_ext_setup_info(struct btf_ext *btf_ext,
861 struct btf_ext_sec_setup_param *ext_sec)
863 const struct btf_ext_info_sec *sinfo;
864 struct btf_ext_info *ext_info;
865 __u32 info_left, record_size;
866 /* The start of the info sec (including the __u32 record_size). */
867 void *info;
869 if (ext_sec->len == 0)
870 return 0;
872 if (ext_sec->off & 0x03) {
873 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
874 ext_sec->desc);
875 return -EINVAL;
878 info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
879 info_left = ext_sec->len;
881 if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
882 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
883 ext_sec->desc, ext_sec->off, ext_sec->len);
884 return -EINVAL;
887 /* At least a record size */
888 if (info_left < sizeof(__u32)) {
889 pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
890 return -EINVAL;
893 /* The record size needs to meet the minimum standard */
894 record_size = *(__u32 *)info;
895 if (record_size < ext_sec->min_rec_size ||
896 record_size & 0x03) {
897 pr_debug("%s section in .BTF.ext has invalid record size %u\n",
898 ext_sec->desc, record_size);
899 return -EINVAL;
902 sinfo = info + sizeof(__u32);
903 info_left -= sizeof(__u32);
905 /* If no records, return failure now so .BTF.ext won't be used. */
906 if (!info_left) {
907 pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
908 return -EINVAL;
911 while (info_left) {
912 unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
913 __u64 total_record_size;
914 __u32 num_records;
916 if (info_left < sec_hdrlen) {
917 pr_debug("%s section header is not found in .BTF.ext\n",
918 ext_sec->desc);
919 return -EINVAL;
922 num_records = sinfo->num_info;
923 if (num_records == 0) {
924 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
925 ext_sec->desc);
926 return -EINVAL;
929 total_record_size = sec_hdrlen +
930 (__u64)num_records * record_size;
931 if (info_left < total_record_size) {
932 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
933 ext_sec->desc);
934 return -EINVAL;
937 info_left -= total_record_size;
938 sinfo = (void *)sinfo + total_record_size;
941 ext_info = ext_sec->ext_info;
942 ext_info->len = ext_sec->len - sizeof(__u32);
943 ext_info->rec_size = record_size;
944 ext_info->info = info + sizeof(__u32);
946 return 0;
949 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
951 struct btf_ext_sec_setup_param param = {
952 .off = btf_ext->hdr->func_info_off,
953 .len = btf_ext->hdr->func_info_len,
954 .min_rec_size = sizeof(struct bpf_func_info_min),
955 .ext_info = &btf_ext->func_info,
956 .desc = "func_info"
959 return btf_ext_setup_info(btf_ext, &param);
962 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
964 struct btf_ext_sec_setup_param param = {
965 .off = btf_ext->hdr->line_info_off,
966 .len = btf_ext->hdr->line_info_len,
967 .min_rec_size = sizeof(struct bpf_line_info_min),
968 .ext_info = &btf_ext->line_info,
969 .desc = "line_info",
972 return btf_ext_setup_info(btf_ext, &param);
975 static int btf_ext_setup_field_reloc(struct btf_ext *btf_ext)
977 struct btf_ext_sec_setup_param param = {
978 .off = btf_ext->hdr->field_reloc_off,
979 .len = btf_ext->hdr->field_reloc_len,
980 .min_rec_size = sizeof(struct bpf_field_reloc),
981 .ext_info = &btf_ext->field_reloc_info,
982 .desc = "field_reloc",
985 return btf_ext_setup_info(btf_ext, &param);
988 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
990 const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
992 if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
993 data_size < hdr->hdr_len) {
994 pr_debug("BTF.ext header not found");
995 return -EINVAL;
998 if (hdr->magic != BTF_MAGIC) {
999 pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
1000 return -EINVAL;
1003 if (hdr->version != BTF_VERSION) {
1004 pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
1005 return -ENOTSUP;
1008 if (hdr->flags) {
1009 pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
1010 return -ENOTSUP;
1013 if (data_size == hdr->hdr_len) {
1014 pr_debug("BTF.ext has no data\n");
1015 return -EINVAL;
1018 return 0;
1021 void btf_ext__free(struct btf_ext *btf_ext)
1023 if (!btf_ext)
1024 return;
1025 free(btf_ext->data);
1026 free(btf_ext);
1029 struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
1031 struct btf_ext *btf_ext;
1032 int err;
1034 err = btf_ext_parse_hdr(data, size);
1035 if (err)
1036 return ERR_PTR(err);
1038 btf_ext = calloc(1, sizeof(struct btf_ext));
1039 if (!btf_ext)
1040 return ERR_PTR(-ENOMEM);
1042 btf_ext->data_size = size;
1043 btf_ext->data = malloc(size);
1044 if (!btf_ext->data) {
1045 err = -ENOMEM;
1046 goto done;
1048 memcpy(btf_ext->data, data, size);
1050 if (btf_ext->hdr->hdr_len <
1051 offsetofend(struct btf_ext_header, line_info_len))
1052 goto done;
1053 err = btf_ext_setup_func_info(btf_ext);
1054 if (err)
1055 goto done;
1057 err = btf_ext_setup_line_info(btf_ext);
1058 if (err)
1059 goto done;
1061 if (btf_ext->hdr->hdr_len <
1062 offsetofend(struct btf_ext_header, field_reloc_len))
1063 goto done;
1064 err = btf_ext_setup_field_reloc(btf_ext);
1065 if (err)
1066 goto done;
1068 done:
1069 if (err) {
1070 btf_ext__free(btf_ext);
1071 return ERR_PTR(err);
1074 return btf_ext;
1077 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
1079 *size = btf_ext->data_size;
1080 return btf_ext->data;
1083 static int btf_ext_reloc_info(const struct btf *btf,
1084 const struct btf_ext_info *ext_info,
1085 const char *sec_name, __u32 insns_cnt,
1086 void **info, __u32 *cnt)
1088 __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
1089 __u32 i, record_size, existing_len, records_len;
1090 struct btf_ext_info_sec *sinfo;
1091 const char *info_sec_name;
1092 __u64 remain_len;
1093 void *data;
1095 record_size = ext_info->rec_size;
1096 sinfo = ext_info->info;
1097 remain_len = ext_info->len;
1098 while (remain_len > 0) {
1099 records_len = sinfo->num_info * record_size;
1100 info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
1101 if (strcmp(info_sec_name, sec_name)) {
1102 remain_len -= sec_hdrlen + records_len;
1103 sinfo = (void *)sinfo + sec_hdrlen + records_len;
1104 continue;
1107 existing_len = (*cnt) * record_size;
1108 data = realloc(*info, existing_len + records_len);
1109 if (!data)
1110 return -ENOMEM;
1112 memcpy(data + existing_len, sinfo->data, records_len);
1113 /* adjust insn_off only, the rest data will be passed
1114 * to the kernel.
1116 for (i = 0; i < sinfo->num_info; i++) {
1117 __u32 *insn_off;
1119 insn_off = data + existing_len + (i * record_size);
1120 *insn_off = *insn_off / sizeof(struct bpf_insn) +
1121 insns_cnt;
1123 *info = data;
1124 *cnt += sinfo->num_info;
1125 return 0;
1128 return -ENOENT;
1131 int btf_ext__reloc_func_info(const struct btf *btf,
1132 const struct btf_ext *btf_ext,
1133 const char *sec_name, __u32 insns_cnt,
1134 void **func_info, __u32 *cnt)
1136 return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
1137 insns_cnt, func_info, cnt);
1140 int btf_ext__reloc_line_info(const struct btf *btf,
1141 const struct btf_ext *btf_ext,
1142 const char *sec_name, __u32 insns_cnt,
1143 void **line_info, __u32 *cnt)
1145 return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
1146 insns_cnt, line_info, cnt);
1149 __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
1151 return btf_ext->func_info.rec_size;
1154 __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
1156 return btf_ext->line_info.rec_size;
1159 struct btf_dedup;
1161 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1162 const struct btf_dedup_opts *opts);
1163 static void btf_dedup_free(struct btf_dedup *d);
1164 static int btf_dedup_strings(struct btf_dedup *d);
1165 static int btf_dedup_prim_types(struct btf_dedup *d);
1166 static int btf_dedup_struct_types(struct btf_dedup *d);
1167 static int btf_dedup_ref_types(struct btf_dedup *d);
1168 static int btf_dedup_compact_types(struct btf_dedup *d);
1169 static int btf_dedup_remap_types(struct btf_dedup *d);
1172 * Deduplicate BTF types and strings.
1174 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
1175 * section with all BTF type descriptors and string data. It overwrites that
1176 * memory in-place with deduplicated types and strings without any loss of
1177 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
1178 * is provided, all the strings referenced from .BTF.ext section are honored
1179 * and updated to point to the right offsets after deduplication.
1181 * If function returns with error, type/string data might be garbled and should
1182 * be discarded.
1184 * More verbose and detailed description of both problem btf_dedup is solving,
1185 * as well as solution could be found at:
1186 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
1188 * Problem description and justification
1189 * =====================================
1191 * BTF type information is typically emitted either as a result of conversion
1192 * from DWARF to BTF or directly by compiler. In both cases, each compilation
1193 * unit contains information about a subset of all the types that are used
1194 * in an application. These subsets are frequently overlapping and contain a lot
1195 * of duplicated information when later concatenated together into a single
1196 * binary. This algorithm ensures that each unique type is represented by single
1197 * BTF type descriptor, greatly reducing resulting size of BTF data.
1199 * Compilation unit isolation and subsequent duplication of data is not the only
1200 * problem. The same type hierarchy (e.g., struct and all the type that struct
1201 * references) in different compilation units can be represented in BTF to
1202 * various degrees of completeness (or, rather, incompleteness) due to
1203 * struct/union forward declarations.
1205 * Let's take a look at an example, that we'll use to better understand the
1206 * problem (and solution). Suppose we have two compilation units, each using
1207 * same `struct S`, but each of them having incomplete type information about
1208 * struct's fields:
1210 * // CU #1:
1211 * struct S;
1212 * struct A {
1213 * int a;
1214 * struct A* self;
1215 * struct S* parent;
1216 * };
1217 * struct B;
1218 * struct S {
1219 * struct A* a_ptr;
1220 * struct B* b_ptr;
1221 * };
1223 * // CU #2:
1224 * struct S;
1225 * struct A;
1226 * struct B {
1227 * int b;
1228 * struct B* self;
1229 * struct S* parent;
1230 * };
1231 * struct S {
1232 * struct A* a_ptr;
1233 * struct B* b_ptr;
1234 * };
1236 * In case of CU #1, BTF data will know only that `struct B` exist (but no
1237 * more), but will know the complete type information about `struct A`. While
1238 * for CU #2, it will know full type information about `struct B`, but will
1239 * only know about forward declaration of `struct A` (in BTF terms, it will
1240 * have `BTF_KIND_FWD` type descriptor with name `B`).
1242 * This compilation unit isolation means that it's possible that there is no
1243 * single CU with complete type information describing structs `S`, `A`, and
1244 * `B`. Also, we might get tons of duplicated and redundant type information.
1246 * Additional complication we need to keep in mind comes from the fact that
1247 * types, in general, can form graphs containing cycles, not just DAGs.
1249 * While algorithm does deduplication, it also merges and resolves type
1250 * information (unless disabled throught `struct btf_opts`), whenever possible.
1251 * E.g., in the example above with two compilation units having partial type
1252 * information for structs `A` and `B`, the output of algorithm will emit
1253 * a single copy of each BTF type that describes structs `A`, `B`, and `S`
1254 * (as well as type information for `int` and pointers), as if they were defined
1255 * in a single compilation unit as:
1257 * struct A {
1258 * int a;
1259 * struct A* self;
1260 * struct S* parent;
1261 * };
1262 * struct B {
1263 * int b;
1264 * struct B* self;
1265 * struct S* parent;
1266 * };
1267 * struct S {
1268 * struct A* a_ptr;
1269 * struct B* b_ptr;
1270 * };
1272 * Algorithm summary
1273 * =================
1275 * Algorithm completes its work in 6 separate passes:
1277 * 1. Strings deduplication.
1278 * 2. Primitive types deduplication (int, enum, fwd).
1279 * 3. Struct/union types deduplication.
1280 * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
1281 * protos, and const/volatile/restrict modifiers).
1282 * 5. Types compaction.
1283 * 6. Types remapping.
1285 * Algorithm determines canonical type descriptor, which is a single
1286 * representative type for each truly unique type. This canonical type is the
1287 * one that will go into final deduplicated BTF type information. For
1288 * struct/unions, it is also the type that algorithm will merge additional type
1289 * information into (while resolving FWDs), as it discovers it from data in
1290 * other CUs. Each input BTF type eventually gets either mapped to itself, if
1291 * that type is canonical, or to some other type, if that type is equivalent
1292 * and was chosen as canonical representative. This mapping is stored in
1293 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
1294 * FWD type got resolved to.
1296 * To facilitate fast discovery of canonical types, we also maintain canonical
1297 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
1298 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
1299 * that match that signature. With sufficiently good choice of type signature
1300 * hashing function, we can limit number of canonical types for each unique type
1301 * signature to a very small number, allowing to find canonical type for any
1302 * duplicated type very quickly.
1304 * Struct/union deduplication is the most critical part and algorithm for
1305 * deduplicating structs/unions is described in greater details in comments for
1306 * `btf_dedup_is_equiv` function.
1308 int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
1309 const struct btf_dedup_opts *opts)
1311 struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
1312 int err;
1314 if (IS_ERR(d)) {
1315 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
1316 return -EINVAL;
1319 err = btf_dedup_strings(d);
1320 if (err < 0) {
1321 pr_debug("btf_dedup_strings failed:%d\n", err);
1322 goto done;
1324 err = btf_dedup_prim_types(d);
1325 if (err < 0) {
1326 pr_debug("btf_dedup_prim_types failed:%d\n", err);
1327 goto done;
1329 err = btf_dedup_struct_types(d);
1330 if (err < 0) {
1331 pr_debug("btf_dedup_struct_types failed:%d\n", err);
1332 goto done;
1334 err = btf_dedup_ref_types(d);
1335 if (err < 0) {
1336 pr_debug("btf_dedup_ref_types failed:%d\n", err);
1337 goto done;
1339 err = btf_dedup_compact_types(d);
1340 if (err < 0) {
1341 pr_debug("btf_dedup_compact_types failed:%d\n", err);
1342 goto done;
1344 err = btf_dedup_remap_types(d);
1345 if (err < 0) {
1346 pr_debug("btf_dedup_remap_types failed:%d\n", err);
1347 goto done;
1350 done:
1351 btf_dedup_free(d);
1352 return err;
1355 #define BTF_UNPROCESSED_ID ((__u32)-1)
1356 #define BTF_IN_PROGRESS_ID ((__u32)-2)
1358 struct btf_dedup {
1359 /* .BTF section to be deduped in-place */
1360 struct btf *btf;
1362 * Optional .BTF.ext section. When provided, any strings referenced
1363 * from it will be taken into account when deduping strings
1365 struct btf_ext *btf_ext;
1367 * This is a map from any type's signature hash to a list of possible
1368 * canonical representative type candidates. Hash collisions are
1369 * ignored, so even types of various kinds can share same list of
1370 * candidates, which is fine because we rely on subsequent
1371 * btf_xxx_equal() checks to authoritatively verify type equality.
1373 struct hashmap *dedup_table;
1374 /* Canonical types map */
1375 __u32 *map;
1376 /* Hypothetical mapping, used during type graph equivalence checks */
1377 __u32 *hypot_map;
1378 __u32 *hypot_list;
1379 size_t hypot_cnt;
1380 size_t hypot_cap;
1381 /* Various option modifying behavior of algorithm */
1382 struct btf_dedup_opts opts;
1385 struct btf_str_ptr {
1386 const char *str;
1387 __u32 new_off;
1388 bool used;
1391 struct btf_str_ptrs {
1392 struct btf_str_ptr *ptrs;
1393 const char *data;
1394 __u32 cnt;
1395 __u32 cap;
1398 static long hash_combine(long h, long value)
1400 return h * 31 + value;
1403 #define for_each_dedup_cand(d, node, hash) \
1404 hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
1406 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
1408 return hashmap__append(d->dedup_table,
1409 (void *)hash, (void *)(long)type_id);
1412 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
1413 __u32 from_id, __u32 to_id)
1415 if (d->hypot_cnt == d->hypot_cap) {
1416 __u32 *new_list;
1418 d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
1419 new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
1420 if (!new_list)
1421 return -ENOMEM;
1422 d->hypot_list = new_list;
1424 d->hypot_list[d->hypot_cnt++] = from_id;
1425 d->hypot_map[from_id] = to_id;
1426 return 0;
1429 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
1431 int i;
1433 for (i = 0; i < d->hypot_cnt; i++)
1434 d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
1435 d->hypot_cnt = 0;
1438 static void btf_dedup_free(struct btf_dedup *d)
1440 hashmap__free(d->dedup_table);
1441 d->dedup_table = NULL;
1443 free(d->map);
1444 d->map = NULL;
1446 free(d->hypot_map);
1447 d->hypot_map = NULL;
1449 free(d->hypot_list);
1450 d->hypot_list = NULL;
1452 free(d);
1455 static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
1457 return (size_t)key;
1460 static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
1462 return 0;
1465 static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
1467 return k1 == k2;
1470 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1471 const struct btf_dedup_opts *opts)
1473 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
1474 hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
1475 int i, err = 0;
1477 if (!d)
1478 return ERR_PTR(-ENOMEM);
1480 d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
1481 /* dedup_table_size is now used only to force collisions in tests */
1482 if (opts && opts->dedup_table_size == 1)
1483 hash_fn = btf_dedup_collision_hash_fn;
1485 d->btf = btf;
1486 d->btf_ext = btf_ext;
1488 d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
1489 if (IS_ERR(d->dedup_table)) {
1490 err = PTR_ERR(d->dedup_table);
1491 d->dedup_table = NULL;
1492 goto done;
1495 d->map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1496 if (!d->map) {
1497 err = -ENOMEM;
1498 goto done;
1500 /* special BTF "void" type is made canonical immediately */
1501 d->map[0] = 0;
1502 for (i = 1; i <= btf->nr_types; i++) {
1503 struct btf_type *t = d->btf->types[i];
1505 /* VAR and DATASEC are never deduped and are self-canonical */
1506 if (btf_is_var(t) || btf_is_datasec(t))
1507 d->map[i] = i;
1508 else
1509 d->map[i] = BTF_UNPROCESSED_ID;
1512 d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1513 if (!d->hypot_map) {
1514 err = -ENOMEM;
1515 goto done;
1517 for (i = 0; i <= btf->nr_types; i++)
1518 d->hypot_map[i] = BTF_UNPROCESSED_ID;
1520 done:
1521 if (err) {
1522 btf_dedup_free(d);
1523 return ERR_PTR(err);
1526 return d;
1529 typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
1532 * Iterate over all possible places in .BTF and .BTF.ext that can reference
1533 * string and pass pointer to it to a provided callback `fn`.
1535 static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
1537 void *line_data_cur, *line_data_end;
1538 int i, j, r, rec_size;
1539 struct btf_type *t;
1541 for (i = 1; i <= d->btf->nr_types; i++) {
1542 t = d->btf->types[i];
1543 r = fn(&t->name_off, ctx);
1544 if (r)
1545 return r;
1547 switch (btf_kind(t)) {
1548 case BTF_KIND_STRUCT:
1549 case BTF_KIND_UNION: {
1550 struct btf_member *m = btf_members(t);
1551 __u16 vlen = btf_vlen(t);
1553 for (j = 0; j < vlen; j++) {
1554 r = fn(&m->name_off, ctx);
1555 if (r)
1556 return r;
1557 m++;
1559 break;
1561 case BTF_KIND_ENUM: {
1562 struct btf_enum *m = btf_enum(t);
1563 __u16 vlen = btf_vlen(t);
1565 for (j = 0; j < vlen; j++) {
1566 r = fn(&m->name_off, ctx);
1567 if (r)
1568 return r;
1569 m++;
1571 break;
1573 case BTF_KIND_FUNC_PROTO: {
1574 struct btf_param *m = btf_params(t);
1575 __u16 vlen = btf_vlen(t);
1577 for (j = 0; j < vlen; j++) {
1578 r = fn(&m->name_off, ctx);
1579 if (r)
1580 return r;
1581 m++;
1583 break;
1585 default:
1586 break;
1590 if (!d->btf_ext)
1591 return 0;
1593 line_data_cur = d->btf_ext->line_info.info;
1594 line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
1595 rec_size = d->btf_ext->line_info.rec_size;
1597 while (line_data_cur < line_data_end) {
1598 struct btf_ext_info_sec *sec = line_data_cur;
1599 struct bpf_line_info_min *line_info;
1600 __u32 num_info = sec->num_info;
1602 r = fn(&sec->sec_name_off, ctx);
1603 if (r)
1604 return r;
1606 line_data_cur += sizeof(struct btf_ext_info_sec);
1607 for (i = 0; i < num_info; i++) {
1608 line_info = line_data_cur;
1609 r = fn(&line_info->file_name_off, ctx);
1610 if (r)
1611 return r;
1612 r = fn(&line_info->line_off, ctx);
1613 if (r)
1614 return r;
1615 line_data_cur += rec_size;
1619 return 0;
1622 static int str_sort_by_content(const void *a1, const void *a2)
1624 const struct btf_str_ptr *p1 = a1;
1625 const struct btf_str_ptr *p2 = a2;
1627 return strcmp(p1->str, p2->str);
1630 static int str_sort_by_offset(const void *a1, const void *a2)
1632 const struct btf_str_ptr *p1 = a1;
1633 const struct btf_str_ptr *p2 = a2;
1635 if (p1->str != p2->str)
1636 return p1->str < p2->str ? -1 : 1;
1637 return 0;
1640 static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem)
1642 const struct btf_str_ptr *p = pelem;
1644 if (str_ptr != p->str)
1645 return (const char *)str_ptr < p->str ? -1 : 1;
1646 return 0;
1649 static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx)
1651 struct btf_str_ptrs *strs;
1652 struct btf_str_ptr *s;
1654 if (*str_off_ptr == 0)
1655 return 0;
1657 strs = ctx;
1658 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1659 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1660 if (!s)
1661 return -EINVAL;
1662 s->used = true;
1663 return 0;
1666 static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
1668 struct btf_str_ptrs *strs;
1669 struct btf_str_ptr *s;
1671 if (*str_off_ptr == 0)
1672 return 0;
1674 strs = ctx;
1675 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1676 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1677 if (!s)
1678 return -EINVAL;
1679 *str_off_ptr = s->new_off;
1680 return 0;
1684 * Dedup string and filter out those that are not referenced from either .BTF
1685 * or .BTF.ext (if provided) sections.
1687 * This is done by building index of all strings in BTF's string section,
1688 * then iterating over all entities that can reference strings (e.g., type
1689 * names, struct field names, .BTF.ext line info, etc) and marking corresponding
1690 * strings as used. After that all used strings are deduped and compacted into
1691 * sequential blob of memory and new offsets are calculated. Then all the string
1692 * references are iterated again and rewritten using new offsets.
1694 static int btf_dedup_strings(struct btf_dedup *d)
1696 const struct btf_header *hdr = d->btf->hdr;
1697 char *start = (char *)d->btf->nohdr_data + hdr->str_off;
1698 char *end = start + d->btf->hdr->str_len;
1699 char *p = start, *tmp_strs = NULL;
1700 struct btf_str_ptrs strs = {
1701 .cnt = 0,
1702 .cap = 0,
1703 .ptrs = NULL,
1704 .data = start,
1706 int i, j, err = 0, grp_idx;
1707 bool grp_used;
1709 /* build index of all strings */
1710 while (p < end) {
1711 if (strs.cnt + 1 > strs.cap) {
1712 struct btf_str_ptr *new_ptrs;
1714 strs.cap += max(strs.cnt / 2, 16U);
1715 new_ptrs = realloc(strs.ptrs,
1716 sizeof(strs.ptrs[0]) * strs.cap);
1717 if (!new_ptrs) {
1718 err = -ENOMEM;
1719 goto done;
1721 strs.ptrs = new_ptrs;
1724 strs.ptrs[strs.cnt].str = p;
1725 strs.ptrs[strs.cnt].used = false;
1727 p += strlen(p) + 1;
1728 strs.cnt++;
1731 /* temporary storage for deduplicated strings */
1732 tmp_strs = malloc(d->btf->hdr->str_len);
1733 if (!tmp_strs) {
1734 err = -ENOMEM;
1735 goto done;
1738 /* mark all used strings */
1739 strs.ptrs[0].used = true;
1740 err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs);
1741 if (err)
1742 goto done;
1744 /* sort strings by context, so that we can identify duplicates */
1745 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content);
1748 * iterate groups of equal strings and if any instance in a group was
1749 * referenced, emit single instance and remember new offset
1751 p = tmp_strs;
1752 grp_idx = 0;
1753 grp_used = strs.ptrs[0].used;
1754 /* iterate past end to avoid code duplication after loop */
1755 for (i = 1; i <= strs.cnt; i++) {
1757 * when i == strs.cnt, we want to skip string comparison and go
1758 * straight to handling last group of strings (otherwise we'd
1759 * need to handle last group after the loop w/ duplicated code)
1761 if (i < strs.cnt &&
1762 !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) {
1763 grp_used = grp_used || strs.ptrs[i].used;
1764 continue;
1768 * this check would have been required after the loop to handle
1769 * last group of strings, but due to <= condition in a loop
1770 * we avoid that duplication
1772 if (grp_used) {
1773 int new_off = p - tmp_strs;
1774 __u32 len = strlen(strs.ptrs[grp_idx].str);
1776 memmove(p, strs.ptrs[grp_idx].str, len + 1);
1777 for (j = grp_idx; j < i; j++)
1778 strs.ptrs[j].new_off = new_off;
1779 p += len + 1;
1782 if (i < strs.cnt) {
1783 grp_idx = i;
1784 grp_used = strs.ptrs[i].used;
1788 /* replace original strings with deduped ones */
1789 d->btf->hdr->str_len = p - tmp_strs;
1790 memmove(start, tmp_strs, d->btf->hdr->str_len);
1791 end = start + d->btf->hdr->str_len;
1793 /* restore original order for further binary search lookups */
1794 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset);
1796 /* remap string offsets */
1797 err = btf_for_each_str_off(d, btf_str_remap_offset, &strs);
1798 if (err)
1799 goto done;
1801 d->btf->hdr->str_len = end - start;
1803 done:
1804 free(tmp_strs);
1805 free(strs.ptrs);
1806 return err;
1809 static long btf_hash_common(struct btf_type *t)
1811 long h;
1813 h = hash_combine(0, t->name_off);
1814 h = hash_combine(h, t->info);
1815 h = hash_combine(h, t->size);
1816 return h;
1819 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
1821 return t1->name_off == t2->name_off &&
1822 t1->info == t2->info &&
1823 t1->size == t2->size;
1826 /* Calculate type signature hash of INT. */
1827 static long btf_hash_int(struct btf_type *t)
1829 __u32 info = *(__u32 *)(t + 1);
1830 long h;
1832 h = btf_hash_common(t);
1833 h = hash_combine(h, info);
1834 return h;
1837 /* Check structural equality of two INTs. */
1838 static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
1840 __u32 info1, info2;
1842 if (!btf_equal_common(t1, t2))
1843 return false;
1844 info1 = *(__u32 *)(t1 + 1);
1845 info2 = *(__u32 *)(t2 + 1);
1846 return info1 == info2;
1849 /* Calculate type signature hash of ENUM. */
1850 static long btf_hash_enum(struct btf_type *t)
1852 long h;
1854 /* don't hash vlen and enum members to support enum fwd resolving */
1855 h = hash_combine(0, t->name_off);
1856 h = hash_combine(h, t->info & ~0xffff);
1857 h = hash_combine(h, t->size);
1858 return h;
1861 /* Check structural equality of two ENUMs. */
1862 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
1864 const struct btf_enum *m1, *m2;
1865 __u16 vlen;
1866 int i;
1868 if (!btf_equal_common(t1, t2))
1869 return false;
1871 vlen = btf_vlen(t1);
1872 m1 = btf_enum(t1);
1873 m2 = btf_enum(t2);
1874 for (i = 0; i < vlen; i++) {
1875 if (m1->name_off != m2->name_off || m1->val != m2->val)
1876 return false;
1877 m1++;
1878 m2++;
1880 return true;
1883 static inline bool btf_is_enum_fwd(struct btf_type *t)
1885 return btf_is_enum(t) && btf_vlen(t) == 0;
1888 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
1890 if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
1891 return btf_equal_enum(t1, t2);
1892 /* ignore vlen when comparing */
1893 return t1->name_off == t2->name_off &&
1894 (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
1895 t1->size == t2->size;
1899 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
1900 * as referenced type IDs equivalence is established separately during type
1901 * graph equivalence check algorithm.
1903 static long btf_hash_struct(struct btf_type *t)
1905 const struct btf_member *member = btf_members(t);
1906 __u32 vlen = btf_vlen(t);
1907 long h = btf_hash_common(t);
1908 int i;
1910 for (i = 0; i < vlen; i++) {
1911 h = hash_combine(h, member->name_off);
1912 h = hash_combine(h, member->offset);
1913 /* no hashing of referenced type ID, it can be unresolved yet */
1914 member++;
1916 return h;
1920 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1921 * IDs. This check is performed during type graph equivalence check and
1922 * referenced types equivalence is checked separately.
1924 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
1926 const struct btf_member *m1, *m2;
1927 __u16 vlen;
1928 int i;
1930 if (!btf_equal_common(t1, t2))
1931 return false;
1933 vlen = btf_vlen(t1);
1934 m1 = btf_members(t1);
1935 m2 = btf_members(t2);
1936 for (i = 0; i < vlen; i++) {
1937 if (m1->name_off != m2->name_off || m1->offset != m2->offset)
1938 return false;
1939 m1++;
1940 m2++;
1942 return true;
1946 * Calculate type signature hash of ARRAY, including referenced type IDs,
1947 * under assumption that they were already resolved to canonical type IDs and
1948 * are not going to change.
1950 static long btf_hash_array(struct btf_type *t)
1952 const struct btf_array *info = btf_array(t);
1953 long h = btf_hash_common(t);
1955 h = hash_combine(h, info->type);
1956 h = hash_combine(h, info->index_type);
1957 h = hash_combine(h, info->nelems);
1958 return h;
1962 * Check exact equality of two ARRAYs, taking into account referenced
1963 * type IDs, under assumption that they were already resolved to canonical
1964 * type IDs and are not going to change.
1965 * This function is called during reference types deduplication to compare
1966 * ARRAY to potential canonical representative.
1968 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
1970 const struct btf_array *info1, *info2;
1972 if (!btf_equal_common(t1, t2))
1973 return false;
1975 info1 = btf_array(t1);
1976 info2 = btf_array(t2);
1977 return info1->type == info2->type &&
1978 info1->index_type == info2->index_type &&
1979 info1->nelems == info2->nelems;
1983 * Check structural compatibility of two ARRAYs, ignoring referenced type
1984 * IDs. This check is performed during type graph equivalence check and
1985 * referenced types equivalence is checked separately.
1987 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
1989 if (!btf_equal_common(t1, t2))
1990 return false;
1992 return btf_array(t1)->nelems == btf_array(t2)->nelems;
1996 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
1997 * under assumption that they were already resolved to canonical type IDs and
1998 * are not going to change.
2000 static long btf_hash_fnproto(struct btf_type *t)
2002 const struct btf_param *member = btf_params(t);
2003 __u16 vlen = btf_vlen(t);
2004 long h = btf_hash_common(t);
2005 int i;
2007 for (i = 0; i < vlen; i++) {
2008 h = hash_combine(h, member->name_off);
2009 h = hash_combine(h, member->type);
2010 member++;
2012 return h;
2016 * Check exact equality of two FUNC_PROTOs, taking into account referenced
2017 * type IDs, under assumption that they were already resolved to canonical
2018 * type IDs and are not going to change.
2019 * This function is called during reference types deduplication to compare
2020 * FUNC_PROTO to potential canonical representative.
2022 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
2024 const struct btf_param *m1, *m2;
2025 __u16 vlen;
2026 int i;
2028 if (!btf_equal_common(t1, t2))
2029 return false;
2031 vlen = btf_vlen(t1);
2032 m1 = btf_params(t1);
2033 m2 = btf_params(t2);
2034 for (i = 0; i < vlen; i++) {
2035 if (m1->name_off != m2->name_off || m1->type != m2->type)
2036 return false;
2037 m1++;
2038 m2++;
2040 return true;
2044 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
2045 * IDs. This check is performed during type graph equivalence check and
2046 * referenced types equivalence is checked separately.
2048 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
2050 const struct btf_param *m1, *m2;
2051 __u16 vlen;
2052 int i;
2054 /* skip return type ID */
2055 if (t1->name_off != t2->name_off || t1->info != t2->info)
2056 return false;
2058 vlen = btf_vlen(t1);
2059 m1 = btf_params(t1);
2060 m2 = btf_params(t2);
2061 for (i = 0; i < vlen; i++) {
2062 if (m1->name_off != m2->name_off)
2063 return false;
2064 m1++;
2065 m2++;
2067 return true;
2071 * Deduplicate primitive types, that can't reference other types, by calculating
2072 * their type signature hash and comparing them with any possible canonical
2073 * candidate. If no canonical candidate matches, type itself is marked as
2074 * canonical and is added into `btf_dedup->dedup_table` as another candidate.
2076 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
2078 struct btf_type *t = d->btf->types[type_id];
2079 struct hashmap_entry *hash_entry;
2080 struct btf_type *cand;
2081 /* if we don't find equivalent type, then we are canonical */
2082 __u32 new_id = type_id;
2083 __u32 cand_id;
2084 long h;
2086 switch (btf_kind(t)) {
2087 case BTF_KIND_CONST:
2088 case BTF_KIND_VOLATILE:
2089 case BTF_KIND_RESTRICT:
2090 case BTF_KIND_PTR:
2091 case BTF_KIND_TYPEDEF:
2092 case BTF_KIND_ARRAY:
2093 case BTF_KIND_STRUCT:
2094 case BTF_KIND_UNION:
2095 case BTF_KIND_FUNC:
2096 case BTF_KIND_FUNC_PROTO:
2097 case BTF_KIND_VAR:
2098 case BTF_KIND_DATASEC:
2099 return 0;
2101 case BTF_KIND_INT:
2102 h = btf_hash_int(t);
2103 for_each_dedup_cand(d, hash_entry, h) {
2104 cand_id = (__u32)(long)hash_entry->value;
2105 cand = d->btf->types[cand_id];
2106 if (btf_equal_int(t, cand)) {
2107 new_id = cand_id;
2108 break;
2111 break;
2113 case BTF_KIND_ENUM:
2114 h = btf_hash_enum(t);
2115 for_each_dedup_cand(d, hash_entry, h) {
2116 cand_id = (__u32)(long)hash_entry->value;
2117 cand = d->btf->types[cand_id];
2118 if (btf_equal_enum(t, cand)) {
2119 new_id = cand_id;
2120 break;
2122 if (d->opts.dont_resolve_fwds)
2123 continue;
2124 if (btf_compat_enum(t, cand)) {
2125 if (btf_is_enum_fwd(t)) {
2126 /* resolve fwd to full enum */
2127 new_id = cand_id;
2128 break;
2130 /* resolve canonical enum fwd to full enum */
2131 d->map[cand_id] = type_id;
2134 break;
2136 case BTF_KIND_FWD:
2137 h = btf_hash_common(t);
2138 for_each_dedup_cand(d, hash_entry, h) {
2139 cand_id = (__u32)(long)hash_entry->value;
2140 cand = d->btf->types[cand_id];
2141 if (btf_equal_common(t, cand)) {
2142 new_id = cand_id;
2143 break;
2146 break;
2148 default:
2149 return -EINVAL;
2152 d->map[type_id] = new_id;
2153 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2154 return -ENOMEM;
2156 return 0;
2159 static int btf_dedup_prim_types(struct btf_dedup *d)
2161 int i, err;
2163 for (i = 1; i <= d->btf->nr_types; i++) {
2164 err = btf_dedup_prim_type(d, i);
2165 if (err)
2166 return err;
2168 return 0;
2172 * Check whether type is already mapped into canonical one (could be to itself).
2174 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
2176 return d->map[type_id] <= BTF_MAX_NR_TYPES;
2180 * Resolve type ID into its canonical type ID, if any; otherwise return original
2181 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
2182 * STRUCT/UNION link and resolve it into canonical type ID as well.
2184 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
2186 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
2187 type_id = d->map[type_id];
2188 return type_id;
2192 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
2193 * type ID.
2195 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
2197 __u32 orig_type_id = type_id;
2199 if (!btf_is_fwd(d->btf->types[type_id]))
2200 return type_id;
2202 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
2203 type_id = d->map[type_id];
2205 if (!btf_is_fwd(d->btf->types[type_id]))
2206 return type_id;
2208 return orig_type_id;
2212 static inline __u16 btf_fwd_kind(struct btf_type *t)
2214 return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
2218 * Check equivalence of BTF type graph formed by candidate struct/union (we'll
2219 * call it "candidate graph" in this description for brevity) to a type graph
2220 * formed by (potential) canonical struct/union ("canonical graph" for brevity
2221 * here, though keep in mind that not all types in canonical graph are
2222 * necessarily canonical representatives themselves, some of them might be
2223 * duplicates or its uniqueness might not have been established yet).
2224 * Returns:
2225 * - >0, if type graphs are equivalent;
2226 * - 0, if not equivalent;
2227 * - <0, on error.
2229 * Algorithm performs side-by-side DFS traversal of both type graphs and checks
2230 * equivalence of BTF types at each step. If at any point BTF types in candidate
2231 * and canonical graphs are not compatible structurally, whole graphs are
2232 * incompatible. If types are structurally equivalent (i.e., all information
2233 * except referenced type IDs is exactly the same), a mapping from `canon_id` to
2234 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
2235 * If a type references other types, then those referenced types are checked
2236 * for equivalence recursively.
2238 * During DFS traversal, if we find that for current `canon_id` type we
2239 * already have some mapping in hypothetical map, we check for two possible
2240 * situations:
2241 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will
2242 * happen when type graphs have cycles. In this case we assume those two
2243 * types are equivalent.
2244 * - `canon_id` is mapped to different type. This is contradiction in our
2245 * hypothetical mapping, because same graph in canonical graph corresponds
2246 * to two different types in candidate graph, which for equivalent type
2247 * graphs shouldn't happen. This condition terminates equivalence check
2248 * with negative result.
2250 * If type graphs traversal exhausts types to check and find no contradiction,
2251 * then type graphs are equivalent.
2253 * When checking types for equivalence, there is one special case: FWD types.
2254 * If FWD type resolution is allowed and one of the types (either from canonical
2255 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
2256 * flag) and their names match, hypothetical mapping is updated to point from
2257 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
2258 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
2260 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
2261 * if there are two exactly named (or anonymous) structs/unions that are
2262 * compatible structurally, one of which has FWD field, while other is concrete
2263 * STRUCT/UNION, but according to C sources they are different structs/unions
2264 * that are referencing different types with the same name. This is extremely
2265 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
2266 * this logic is causing problems.
2268 * Doing FWD resolution means that both candidate and/or canonical graphs can
2269 * consists of portions of the graph that come from multiple compilation units.
2270 * This is due to the fact that types within single compilation unit are always
2271 * deduplicated and FWDs are already resolved, if referenced struct/union
2272 * definiton is available. So, if we had unresolved FWD and found corresponding
2273 * STRUCT/UNION, they will be from different compilation units. This
2274 * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
2275 * type graph will likely have at least two different BTF types that describe
2276 * same type (e.g., most probably there will be two different BTF types for the
2277 * same 'int' primitive type) and could even have "overlapping" parts of type
2278 * graph that describe same subset of types.
2280 * This in turn means that our assumption that each type in canonical graph
2281 * must correspond to exactly one type in candidate graph might not hold
2282 * anymore and will make it harder to detect contradictions using hypothetical
2283 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
2284 * resolution only in canonical graph. FWDs in candidate graphs are never
2285 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
2286 * that can occur:
2287 * - Both types in canonical and candidate graphs are FWDs. If they are
2288 * structurally equivalent, then they can either be both resolved to the
2289 * same STRUCT/UNION or not resolved at all. In both cases they are
2290 * equivalent and there is no need to resolve FWD on candidate side.
2291 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
2292 * so nothing to resolve as well, algorithm will check equivalence anyway.
2293 * - Type in canonical graph is FWD, while type in candidate is concrete
2294 * STRUCT/UNION. In this case candidate graph comes from single compilation
2295 * unit, so there is exactly one BTF type for each unique C type. After
2296 * resolving FWD into STRUCT/UNION, there might be more than one BTF type
2297 * in canonical graph mapping to single BTF type in candidate graph, but
2298 * because hypothetical mapping maps from canonical to candidate types, it's
2299 * alright, and we still maintain the property of having single `canon_id`
2300 * mapping to single `cand_id` (there could be two different `canon_id`
2301 * mapped to the same `cand_id`, but it's not contradictory).
2302 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
2303 * graph is FWD. In this case we are just going to check compatibility of
2304 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
2305 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
2306 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
2307 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
2308 * canonical graph.
2310 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
2311 __u32 canon_id)
2313 struct btf_type *cand_type;
2314 struct btf_type *canon_type;
2315 __u32 hypot_type_id;
2316 __u16 cand_kind;
2317 __u16 canon_kind;
2318 int i, eq;
2320 /* if both resolve to the same canonical, they must be equivalent */
2321 if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
2322 return 1;
2324 canon_id = resolve_fwd_id(d, canon_id);
2326 hypot_type_id = d->hypot_map[canon_id];
2327 if (hypot_type_id <= BTF_MAX_NR_TYPES)
2328 return hypot_type_id == cand_id;
2330 if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
2331 return -ENOMEM;
2333 cand_type = d->btf->types[cand_id];
2334 canon_type = d->btf->types[canon_id];
2335 cand_kind = btf_kind(cand_type);
2336 canon_kind = btf_kind(canon_type);
2338 if (cand_type->name_off != canon_type->name_off)
2339 return 0;
2341 /* FWD <--> STRUCT/UNION equivalence check, if enabled */
2342 if (!d->opts.dont_resolve_fwds
2343 && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
2344 && cand_kind != canon_kind) {
2345 __u16 real_kind;
2346 __u16 fwd_kind;
2348 if (cand_kind == BTF_KIND_FWD) {
2349 real_kind = canon_kind;
2350 fwd_kind = btf_fwd_kind(cand_type);
2351 } else {
2352 real_kind = cand_kind;
2353 fwd_kind = btf_fwd_kind(canon_type);
2355 return fwd_kind == real_kind;
2358 if (cand_kind != canon_kind)
2359 return 0;
2361 switch (cand_kind) {
2362 case BTF_KIND_INT:
2363 return btf_equal_int(cand_type, canon_type);
2365 case BTF_KIND_ENUM:
2366 if (d->opts.dont_resolve_fwds)
2367 return btf_equal_enum(cand_type, canon_type);
2368 else
2369 return btf_compat_enum(cand_type, canon_type);
2371 case BTF_KIND_FWD:
2372 return btf_equal_common(cand_type, canon_type);
2374 case BTF_KIND_CONST:
2375 case BTF_KIND_VOLATILE:
2376 case BTF_KIND_RESTRICT:
2377 case BTF_KIND_PTR:
2378 case BTF_KIND_TYPEDEF:
2379 case BTF_KIND_FUNC:
2380 if (cand_type->info != canon_type->info)
2381 return 0;
2382 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2384 case BTF_KIND_ARRAY: {
2385 const struct btf_array *cand_arr, *canon_arr;
2387 if (!btf_compat_array(cand_type, canon_type))
2388 return 0;
2389 cand_arr = btf_array(cand_type);
2390 canon_arr = btf_array(canon_type);
2391 eq = btf_dedup_is_equiv(d,
2392 cand_arr->index_type, canon_arr->index_type);
2393 if (eq <= 0)
2394 return eq;
2395 return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
2398 case BTF_KIND_STRUCT:
2399 case BTF_KIND_UNION: {
2400 const struct btf_member *cand_m, *canon_m;
2401 __u16 vlen;
2403 if (!btf_shallow_equal_struct(cand_type, canon_type))
2404 return 0;
2405 vlen = btf_vlen(cand_type);
2406 cand_m = btf_members(cand_type);
2407 canon_m = btf_members(canon_type);
2408 for (i = 0; i < vlen; i++) {
2409 eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
2410 if (eq <= 0)
2411 return eq;
2412 cand_m++;
2413 canon_m++;
2416 return 1;
2419 case BTF_KIND_FUNC_PROTO: {
2420 const struct btf_param *cand_p, *canon_p;
2421 __u16 vlen;
2423 if (!btf_compat_fnproto(cand_type, canon_type))
2424 return 0;
2425 eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2426 if (eq <= 0)
2427 return eq;
2428 vlen = btf_vlen(cand_type);
2429 cand_p = btf_params(cand_type);
2430 canon_p = btf_params(canon_type);
2431 for (i = 0; i < vlen; i++) {
2432 eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
2433 if (eq <= 0)
2434 return eq;
2435 cand_p++;
2436 canon_p++;
2438 return 1;
2441 default:
2442 return -EINVAL;
2444 return 0;
2448 * Use hypothetical mapping, produced by successful type graph equivalence
2449 * check, to augment existing struct/union canonical mapping, where possible.
2451 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
2452 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
2453 * it doesn't matter if FWD type was part of canonical graph or candidate one,
2454 * we are recording the mapping anyway. As opposed to carefulness required
2455 * for struct/union correspondence mapping (described below), for FWD resolution
2456 * it's not important, as by the time that FWD type (reference type) will be
2457 * deduplicated all structs/unions will be deduped already anyway.
2459 * Recording STRUCT/UNION mapping is purely a performance optimization and is
2460 * not required for correctness. It needs to be done carefully to ensure that
2461 * struct/union from candidate's type graph is not mapped into corresponding
2462 * struct/union from canonical type graph that itself hasn't been resolved into
2463 * canonical representative. The only guarantee we have is that canonical
2464 * struct/union was determined as canonical and that won't change. But any
2465 * types referenced through that struct/union fields could have been not yet
2466 * resolved, so in case like that it's too early to establish any kind of
2467 * correspondence between structs/unions.
2469 * No canonical correspondence is derived for primitive types (they are already
2470 * deduplicated completely already anyway) or reference types (they rely on
2471 * stability of struct/union canonical relationship for equivalence checks).
2473 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
2475 __u32 cand_type_id, targ_type_id;
2476 __u16 t_kind, c_kind;
2477 __u32 t_id, c_id;
2478 int i;
2480 for (i = 0; i < d->hypot_cnt; i++) {
2481 cand_type_id = d->hypot_list[i];
2482 targ_type_id = d->hypot_map[cand_type_id];
2483 t_id = resolve_type_id(d, targ_type_id);
2484 c_id = resolve_type_id(d, cand_type_id);
2485 t_kind = btf_kind(d->btf->types[t_id]);
2486 c_kind = btf_kind(d->btf->types[c_id]);
2488 * Resolve FWD into STRUCT/UNION.
2489 * It's ok to resolve FWD into STRUCT/UNION that's not yet
2490 * mapped to canonical representative (as opposed to
2491 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
2492 * eventually that struct is going to be mapped and all resolved
2493 * FWDs will automatically resolve to correct canonical
2494 * representative. This will happen before ref type deduping,
2495 * which critically depends on stability of these mapping. This
2496 * stability is not a requirement for STRUCT/UNION equivalence
2497 * checks, though.
2499 if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
2500 d->map[c_id] = t_id;
2501 else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
2502 d->map[t_id] = c_id;
2504 if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
2505 c_kind != BTF_KIND_FWD &&
2506 is_type_mapped(d, c_id) &&
2507 !is_type_mapped(d, t_id)) {
2509 * as a perf optimization, we can map struct/union
2510 * that's part of type graph we just verified for
2511 * equivalence. We can do that for struct/union that has
2512 * canonical representative only, though.
2514 d->map[t_id] = c_id;
2520 * Deduplicate struct/union types.
2522 * For each struct/union type its type signature hash is calculated, taking
2523 * into account type's name, size, number, order and names of fields, but
2524 * ignoring type ID's referenced from fields, because they might not be deduped
2525 * completely until after reference types deduplication phase. This type hash
2526 * is used to iterate over all potential canonical types, sharing same hash.
2527 * For each canonical candidate we check whether type graphs that they form
2528 * (through referenced types in fields and so on) are equivalent using algorithm
2529 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
2530 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
2531 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
2532 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
2533 * potentially map other structs/unions to their canonical representatives,
2534 * if such relationship hasn't yet been established. This speeds up algorithm
2535 * by eliminating some of the duplicate work.
2537 * If no matching canonical representative was found, struct/union is marked
2538 * as canonical for itself and is added into btf_dedup->dedup_table hash map
2539 * for further look ups.
2541 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
2543 struct btf_type *cand_type, *t;
2544 struct hashmap_entry *hash_entry;
2545 /* if we don't find equivalent type, then we are canonical */
2546 __u32 new_id = type_id;
2547 __u16 kind;
2548 long h;
2550 /* already deduped or is in process of deduping (loop detected) */
2551 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2552 return 0;
2554 t = d->btf->types[type_id];
2555 kind = btf_kind(t);
2557 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
2558 return 0;
2560 h = btf_hash_struct(t);
2561 for_each_dedup_cand(d, hash_entry, h) {
2562 __u32 cand_id = (__u32)(long)hash_entry->value;
2563 int eq;
2566 * Even though btf_dedup_is_equiv() checks for
2567 * btf_shallow_equal_struct() internally when checking two
2568 * structs (unions) for equivalence, we need to guard here
2569 * from picking matching FWD type as a dedup candidate.
2570 * This can happen due to hash collision. In such case just
2571 * relying on btf_dedup_is_equiv() would lead to potentially
2572 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
2573 * FWD and compatible STRUCT/UNION are considered equivalent.
2575 cand_type = d->btf->types[cand_id];
2576 if (!btf_shallow_equal_struct(t, cand_type))
2577 continue;
2579 btf_dedup_clear_hypot_map(d);
2580 eq = btf_dedup_is_equiv(d, type_id, cand_id);
2581 if (eq < 0)
2582 return eq;
2583 if (!eq)
2584 continue;
2585 new_id = cand_id;
2586 btf_dedup_merge_hypot_map(d);
2587 break;
2590 d->map[type_id] = new_id;
2591 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2592 return -ENOMEM;
2594 return 0;
2597 static int btf_dedup_struct_types(struct btf_dedup *d)
2599 int i, err;
2601 for (i = 1; i <= d->btf->nr_types; i++) {
2602 err = btf_dedup_struct_type(d, i);
2603 if (err)
2604 return err;
2606 return 0;
2610 * Deduplicate reference type.
2612 * Once all primitive and struct/union types got deduplicated, we can easily
2613 * deduplicate all other (reference) BTF types. This is done in two steps:
2615 * 1. Resolve all referenced type IDs into their canonical type IDs. This
2616 * resolution can be done either immediately for primitive or struct/union types
2617 * (because they were deduped in previous two phases) or recursively for
2618 * reference types. Recursion will always terminate at either primitive or
2619 * struct/union type, at which point we can "unwind" chain of reference types
2620 * one by one. There is no danger of encountering cycles because in C type
2621 * system the only way to form type cycle is through struct/union, so any chain
2622 * of reference types, even those taking part in a type cycle, will inevitably
2623 * reach struct/union at some point.
2625 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
2626 * becomes "stable", in the sense that no further deduplication will cause
2627 * any changes to it. With that, it's now possible to calculate type's signature
2628 * hash (this time taking into account referenced type IDs) and loop over all
2629 * potential canonical representatives. If no match was found, current type
2630 * will become canonical representative of itself and will be added into
2631 * btf_dedup->dedup_table as another possible canonical representative.
2633 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
2635 struct hashmap_entry *hash_entry;
2636 __u32 new_id = type_id, cand_id;
2637 struct btf_type *t, *cand;
2638 /* if we don't find equivalent type, then we are representative type */
2639 int ref_type_id;
2640 long h;
2642 if (d->map[type_id] == BTF_IN_PROGRESS_ID)
2643 return -ELOOP;
2644 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2645 return resolve_type_id(d, type_id);
2647 t = d->btf->types[type_id];
2648 d->map[type_id] = BTF_IN_PROGRESS_ID;
2650 switch (btf_kind(t)) {
2651 case BTF_KIND_CONST:
2652 case BTF_KIND_VOLATILE:
2653 case BTF_KIND_RESTRICT:
2654 case BTF_KIND_PTR:
2655 case BTF_KIND_TYPEDEF:
2656 case BTF_KIND_FUNC:
2657 ref_type_id = btf_dedup_ref_type(d, t->type);
2658 if (ref_type_id < 0)
2659 return ref_type_id;
2660 t->type = ref_type_id;
2662 h = btf_hash_common(t);
2663 for_each_dedup_cand(d, hash_entry, h) {
2664 cand_id = (__u32)(long)hash_entry->value;
2665 cand = d->btf->types[cand_id];
2666 if (btf_equal_common(t, cand)) {
2667 new_id = cand_id;
2668 break;
2671 break;
2673 case BTF_KIND_ARRAY: {
2674 struct btf_array *info = btf_array(t);
2676 ref_type_id = btf_dedup_ref_type(d, info->type);
2677 if (ref_type_id < 0)
2678 return ref_type_id;
2679 info->type = ref_type_id;
2681 ref_type_id = btf_dedup_ref_type(d, info->index_type);
2682 if (ref_type_id < 0)
2683 return ref_type_id;
2684 info->index_type = ref_type_id;
2686 h = btf_hash_array(t);
2687 for_each_dedup_cand(d, hash_entry, h) {
2688 cand_id = (__u32)(long)hash_entry->value;
2689 cand = d->btf->types[cand_id];
2690 if (btf_equal_array(t, cand)) {
2691 new_id = cand_id;
2692 break;
2695 break;
2698 case BTF_KIND_FUNC_PROTO: {
2699 struct btf_param *param;
2700 __u16 vlen;
2701 int i;
2703 ref_type_id = btf_dedup_ref_type(d, t->type);
2704 if (ref_type_id < 0)
2705 return ref_type_id;
2706 t->type = ref_type_id;
2708 vlen = btf_vlen(t);
2709 param = btf_params(t);
2710 for (i = 0; i < vlen; i++) {
2711 ref_type_id = btf_dedup_ref_type(d, param->type);
2712 if (ref_type_id < 0)
2713 return ref_type_id;
2714 param->type = ref_type_id;
2715 param++;
2718 h = btf_hash_fnproto(t);
2719 for_each_dedup_cand(d, hash_entry, h) {
2720 cand_id = (__u32)(long)hash_entry->value;
2721 cand = d->btf->types[cand_id];
2722 if (btf_equal_fnproto(t, cand)) {
2723 new_id = cand_id;
2724 break;
2727 break;
2730 default:
2731 return -EINVAL;
2734 d->map[type_id] = new_id;
2735 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2736 return -ENOMEM;
2738 return new_id;
2741 static int btf_dedup_ref_types(struct btf_dedup *d)
2743 int i, err;
2745 for (i = 1; i <= d->btf->nr_types; i++) {
2746 err = btf_dedup_ref_type(d, i);
2747 if (err < 0)
2748 return err;
2750 /* we won't need d->dedup_table anymore */
2751 hashmap__free(d->dedup_table);
2752 d->dedup_table = NULL;
2753 return 0;
2757 * Compact types.
2759 * After we established for each type its corresponding canonical representative
2760 * type, we now can eliminate types that are not canonical and leave only
2761 * canonical ones layed out sequentially in memory by copying them over
2762 * duplicates. During compaction btf_dedup->hypot_map array is reused to store
2763 * a map from original type ID to a new compacted type ID, which will be used
2764 * during next phase to "fix up" type IDs, referenced from struct/union and
2765 * reference types.
2767 static int btf_dedup_compact_types(struct btf_dedup *d)
2769 struct btf_type **new_types;
2770 __u32 next_type_id = 1;
2771 char *types_start, *p;
2772 int i, len;
2774 /* we are going to reuse hypot_map to store compaction remapping */
2775 d->hypot_map[0] = 0;
2776 for (i = 1; i <= d->btf->nr_types; i++)
2777 d->hypot_map[i] = BTF_UNPROCESSED_ID;
2779 types_start = d->btf->nohdr_data + d->btf->hdr->type_off;
2780 p = types_start;
2782 for (i = 1; i <= d->btf->nr_types; i++) {
2783 if (d->map[i] != i)
2784 continue;
2786 len = btf_type_size(d->btf->types[i]);
2787 if (len < 0)
2788 return len;
2790 memmove(p, d->btf->types[i], len);
2791 d->hypot_map[i] = next_type_id;
2792 d->btf->types[next_type_id] = (struct btf_type *)p;
2793 p += len;
2794 next_type_id++;
2797 /* shrink struct btf's internal types index and update btf_header */
2798 d->btf->nr_types = next_type_id - 1;
2799 d->btf->types_size = d->btf->nr_types;
2800 d->btf->hdr->type_len = p - types_start;
2801 new_types = realloc(d->btf->types,
2802 (1 + d->btf->nr_types) * sizeof(struct btf_type *));
2803 if (!new_types)
2804 return -ENOMEM;
2805 d->btf->types = new_types;
2807 /* make sure string section follows type information without gaps */
2808 d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
2809 memmove(p, d->btf->strings, d->btf->hdr->str_len);
2810 d->btf->strings = p;
2811 p += d->btf->hdr->str_len;
2813 d->btf->data_size = p - (char *)d->btf->data;
2814 return 0;
2818 * Figure out final (deduplicated and compacted) type ID for provided original
2819 * `type_id` by first resolving it into corresponding canonical type ID and
2820 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
2821 * which is populated during compaction phase.
2823 static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
2825 __u32 resolved_type_id, new_type_id;
2827 resolved_type_id = resolve_type_id(d, type_id);
2828 new_type_id = d->hypot_map[resolved_type_id];
2829 if (new_type_id > BTF_MAX_NR_TYPES)
2830 return -EINVAL;
2831 return new_type_id;
2835 * Remap referenced type IDs into deduped type IDs.
2837 * After BTF types are deduplicated and compacted, their final type IDs may
2838 * differ from original ones. The map from original to a corresponding
2839 * deduped type ID is stored in btf_dedup->hypot_map and is populated during
2840 * compaction phase. During remapping phase we are rewriting all type IDs
2841 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
2842 * their final deduped type IDs.
2844 static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
2846 struct btf_type *t = d->btf->types[type_id];
2847 int i, r;
2849 switch (btf_kind(t)) {
2850 case BTF_KIND_INT:
2851 case BTF_KIND_ENUM:
2852 break;
2854 case BTF_KIND_FWD:
2855 case BTF_KIND_CONST:
2856 case BTF_KIND_VOLATILE:
2857 case BTF_KIND_RESTRICT:
2858 case BTF_KIND_PTR:
2859 case BTF_KIND_TYPEDEF:
2860 case BTF_KIND_FUNC:
2861 case BTF_KIND_VAR:
2862 r = btf_dedup_remap_type_id(d, t->type);
2863 if (r < 0)
2864 return r;
2865 t->type = r;
2866 break;
2868 case BTF_KIND_ARRAY: {
2869 struct btf_array *arr_info = btf_array(t);
2871 r = btf_dedup_remap_type_id(d, arr_info->type);
2872 if (r < 0)
2873 return r;
2874 arr_info->type = r;
2875 r = btf_dedup_remap_type_id(d, arr_info->index_type);
2876 if (r < 0)
2877 return r;
2878 arr_info->index_type = r;
2879 break;
2882 case BTF_KIND_STRUCT:
2883 case BTF_KIND_UNION: {
2884 struct btf_member *member = btf_members(t);
2885 __u16 vlen = btf_vlen(t);
2887 for (i = 0; i < vlen; i++) {
2888 r = btf_dedup_remap_type_id(d, member->type);
2889 if (r < 0)
2890 return r;
2891 member->type = r;
2892 member++;
2894 break;
2897 case BTF_KIND_FUNC_PROTO: {
2898 struct btf_param *param = btf_params(t);
2899 __u16 vlen = btf_vlen(t);
2901 r = btf_dedup_remap_type_id(d, t->type);
2902 if (r < 0)
2903 return r;
2904 t->type = r;
2906 for (i = 0; i < vlen; i++) {
2907 r = btf_dedup_remap_type_id(d, param->type);
2908 if (r < 0)
2909 return r;
2910 param->type = r;
2911 param++;
2913 break;
2916 case BTF_KIND_DATASEC: {
2917 struct btf_var_secinfo *var = btf_var_secinfos(t);
2918 __u16 vlen = btf_vlen(t);
2920 for (i = 0; i < vlen; i++) {
2921 r = btf_dedup_remap_type_id(d, var->type);
2922 if (r < 0)
2923 return r;
2924 var->type = r;
2925 var++;
2927 break;
2930 default:
2931 return -EINVAL;
2934 return 0;
2937 static int btf_dedup_remap_types(struct btf_dedup *d)
2939 int i, r;
2941 for (i = 1; i <= d->btf->nr_types; i++) {
2942 r = btf_dedup_remap_type(d, i);
2943 if (r < 0)
2944 return r;
2946 return 0;
2949 static struct btf *btf_load_raw(const char *path)
2951 struct btf *btf;
2952 size_t read_cnt;
2953 struct stat st;
2954 void *data;
2955 FILE *f;
2957 if (stat(path, &st))
2958 return ERR_PTR(-errno);
2960 data = malloc(st.st_size);
2961 if (!data)
2962 return ERR_PTR(-ENOMEM);
2964 f = fopen(path, "rb");
2965 if (!f) {
2966 btf = ERR_PTR(-errno);
2967 goto cleanup;
2970 read_cnt = fread(data, 1, st.st_size, f);
2971 fclose(f);
2972 if (read_cnt < st.st_size) {
2973 btf = ERR_PTR(-EBADF);
2974 goto cleanup;
2977 btf = btf__new(data, read_cnt);
2979 cleanup:
2980 free(data);
2981 return btf;
2985 * Probe few well-known locations for vmlinux kernel image and try to load BTF
2986 * data out of it to use for target BTF.
2988 struct btf *libbpf_find_kernel_btf(void)
2990 struct {
2991 const char *path_fmt;
2992 bool raw_btf;
2993 } locations[] = {
2994 /* try canonical vmlinux BTF through sysfs first */
2995 { "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
2996 /* fall back to trying to find vmlinux ELF on disk otherwise */
2997 { "/boot/vmlinux-%1$s" },
2998 { "/lib/modules/%1$s/vmlinux-%1$s" },
2999 { "/lib/modules/%1$s/build/vmlinux" },
3000 { "/usr/lib/modules/%1$s/kernel/vmlinux" },
3001 { "/usr/lib/debug/boot/vmlinux-%1$s" },
3002 { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
3003 { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
3005 char path[PATH_MAX + 1];
3006 struct utsname buf;
3007 struct btf *btf;
3008 int i;
3010 uname(&buf);
3012 for (i = 0; i < ARRAY_SIZE(locations); i++) {
3013 snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
3015 if (access(path, R_OK))
3016 continue;
3018 if (locations[i].raw_btf)
3019 btf = btf_load_raw(path);
3020 else
3021 btf = btf__parse_elf(path, NULL);
3023 pr_debug("loading kernel BTF '%s': %ld\n",
3024 path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
3025 if (IS_ERR(btf))
3026 continue;
3028 return btf;
3031 pr_warn("failed to find valid kernel BTF\n");
3032 return ERR_PTR(-ESRCH);