1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Facebook */
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/types.h>
6 #include <linux/seq_file.h>
7 #include <linux/compiler.h>
8 #include <linux/ctype.h>
9 #include <linux/errno.h>
10 #include <linux/slab.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/uaccess.h>
14 #include <linux/kernel.h>
15 #include <linux/idr.h>
16 #include <linux/sort.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/btf.h>
20 /* BTF (BPF Type Format) is the meta data format which describes
21 * the data types of BPF program/map. Hence, it basically focus
22 * on the C programming language which the modern BPF is primary
27 * The BTF data is stored under the ".BTF" ELF section
31 * Each 'struct btf_type' object describes a C data type.
32 * Depending on the type it is describing, a 'struct btf_type'
33 * object may be followed by more data. F.e.
34 * To describe an array, 'struct btf_type' is followed by
37 * 'struct btf_type' and any extra data following it are
42 * The BTF type section contains a list of 'struct btf_type' objects.
43 * Each one describes a C type. Recall from the above section
44 * that a 'struct btf_type' object could be immediately followed by extra
45 * data in order to desribe some particular C types.
49 * Each btf_type object is identified by a type_id. The type_id
50 * is implicitly implied by the location of the btf_type object in
51 * the BTF type section. The first one has type_id 1. The second
52 * one has type_id 2...etc. Hence, an earlier btf_type has
55 * A btf_type object may refer to another btf_type object by using
56 * type_id (i.e. the "type" in the "struct btf_type").
58 * NOTE that we cannot assume any reference-order.
59 * A btf_type object can refer to an earlier btf_type object
60 * but it can also refer to a later btf_type object.
62 * For example, to describe "const void *". A btf_type
63 * object describing "const" may refer to another btf_type
64 * object describing "void *". This type-reference is done
65 * by specifying type_id:
67 * [1] CONST (anon) type_id=2
68 * [2] PTR (anon) type_id=0
70 * The above is the btf_verifier debug log:
71 * - Each line started with "[?]" is a btf_type object
72 * - [?] is the type_id of the btf_type object.
73 * - CONST/PTR is the BTF_KIND_XXX
74 * - "(anon)" is the name of the type. It just
75 * happens that CONST and PTR has no name.
76 * - type_id=XXX is the 'u32 type' in btf_type
78 * NOTE: "void" has type_id 0
82 * The BTF string section contains the names used by the type section.
83 * Each string is referred by an "offset" from the beginning of the
86 * Each string is '\0' terminated.
88 * The first character in the string section must be '\0'
89 * which is used to mean 'anonymous'. Some btf_type may not
95 * To verify BTF data, two passes are needed.
99 * The first pass is to collect all btf_type objects to
100 * an array: "btf->types".
102 * Depending on the C type that a btf_type is describing,
103 * a btf_type may be followed by extra data. We don't know
104 * how many btf_type is there, and more importantly we don't
105 * know where each btf_type is located in the type section.
107 * Without knowing the location of each type_id, most verifications
108 * cannot be done. e.g. an earlier btf_type may refer to a later
109 * btf_type (recall the "const void *" above), so we cannot
110 * check this type-reference in the first pass.
112 * In the first pass, it still does some verifications (e.g.
113 * checking the name is a valid offset to the string section).
117 * The main focus is to resolve a btf_type that is referring
120 * We have to ensure the referring type:
121 * 1) does exist in the BTF (i.e. in btf->types[])
122 * 2) does not cause a loop:
131 * btf_type_needs_resolve() decides if a btf_type needs
134 * The needs_resolve type implements the "resolve()" ops which
135 * essentially does a DFS and detects backedge.
137 * During resolve (or DFS), different C types have different
138 * "RESOLVED" conditions.
140 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
141 * members because a member is always referring to another
142 * type. A struct's member can be treated as "RESOLVED" if
143 * it is referring to a BTF_KIND_PTR. Otherwise, the
144 * following valid C struct would be rejected:
151 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
152 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
153 * detect a pointer loop, e.g.:
154 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
156 * +-----------------------------------------+
160 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
161 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
162 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
163 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
164 #define BITS_ROUNDUP_BYTES(bits) \
165 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
167 #define BTF_INFO_MASK 0x8f00ffff
168 #define BTF_INT_MASK 0x0fffffff
169 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
170 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
172 /* 16MB for 64k structs and each has 16 members and
173 * a few MB spaces for the string section.
174 * The hard limit is S32_MAX.
176 #define BTF_MAX_SIZE (16 * 1024 * 1024)
178 #define for_each_member(i, struct_type, member) \
179 for (i = 0, member = btf_type_member(struct_type); \
180 i < btf_type_vlen(struct_type); \
183 #define for_each_member_from(i, from, struct_type, member) \
184 for (i = from, member = btf_type_member(struct_type) + from; \
185 i < btf_type_vlen(struct_type); \
188 #define for_each_vsi(i, struct_type, member) \
189 for (i = 0, member = btf_type_var_secinfo(struct_type); \
190 i < btf_type_vlen(struct_type); \
193 #define for_each_vsi_from(i, from, struct_type, member) \
194 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
195 i < btf_type_vlen(struct_type); \
198 static DEFINE_IDR(btf_idr
);
199 static DEFINE_SPINLOCK(btf_idr_lock
);
203 struct btf_type
**types
;
208 struct btf_header hdr
;
217 enum verifier_phase
{
222 struct resolve_vertex
{
223 const struct btf_type
*t
;
235 RESOLVE_TBD
, /* To Be Determined */
236 RESOLVE_PTR
, /* Resolving for Pointer */
237 RESOLVE_STRUCT_OR_ARRAY
, /* Resolving for struct/union
242 #define MAX_RESOLVE_DEPTH 32
244 struct btf_sec_info
{
249 struct btf_verifier_env
{
252 struct resolve_vertex stack
[MAX_RESOLVE_DEPTH
];
253 struct bpf_verifier_log log
;
256 enum verifier_phase phase
;
257 enum resolve_mode resolve_mode
;
260 static const char * const btf_kind_str
[NR_BTF_KINDS
] = {
261 [BTF_KIND_UNKN
] = "UNKNOWN",
262 [BTF_KIND_INT
] = "INT",
263 [BTF_KIND_PTR
] = "PTR",
264 [BTF_KIND_ARRAY
] = "ARRAY",
265 [BTF_KIND_STRUCT
] = "STRUCT",
266 [BTF_KIND_UNION
] = "UNION",
267 [BTF_KIND_ENUM
] = "ENUM",
268 [BTF_KIND_FWD
] = "FWD",
269 [BTF_KIND_TYPEDEF
] = "TYPEDEF",
270 [BTF_KIND_VOLATILE
] = "VOLATILE",
271 [BTF_KIND_CONST
] = "CONST",
272 [BTF_KIND_RESTRICT
] = "RESTRICT",
273 [BTF_KIND_FUNC
] = "FUNC",
274 [BTF_KIND_FUNC_PROTO
] = "FUNC_PROTO",
275 [BTF_KIND_VAR
] = "VAR",
276 [BTF_KIND_DATASEC
] = "DATASEC",
279 struct btf_kind_operations
{
280 s32 (*check_meta
)(struct btf_verifier_env
*env
,
281 const struct btf_type
*t
,
283 int (*resolve
)(struct btf_verifier_env
*env
,
284 const struct resolve_vertex
*v
);
285 int (*check_member
)(struct btf_verifier_env
*env
,
286 const struct btf_type
*struct_type
,
287 const struct btf_member
*member
,
288 const struct btf_type
*member_type
);
289 int (*check_kflag_member
)(struct btf_verifier_env
*env
,
290 const struct btf_type
*struct_type
,
291 const struct btf_member
*member
,
292 const struct btf_type
*member_type
);
293 void (*log_details
)(struct btf_verifier_env
*env
,
294 const struct btf_type
*t
);
295 void (*seq_show
)(const struct btf
*btf
, const struct btf_type
*t
,
296 u32 type_id
, void *data
, u8 bits_offsets
,
300 static const struct btf_kind_operations
* const kind_ops
[NR_BTF_KINDS
];
301 static struct btf_type btf_void
;
303 static int btf_resolve(struct btf_verifier_env
*env
,
304 const struct btf_type
*t
, u32 type_id
);
306 static bool btf_type_is_modifier(const struct btf_type
*t
)
308 /* Some of them is not strictly a C modifier
309 * but they are grouped into the same bucket
311 * A type (t) that refers to another
312 * type through t->type AND its size cannot
313 * be determined without following the t->type.
315 * ptr does not fall into this bucket
316 * because its size is always sizeof(void *).
318 switch (BTF_INFO_KIND(t
->info
)) {
319 case BTF_KIND_TYPEDEF
:
320 case BTF_KIND_VOLATILE
:
322 case BTF_KIND_RESTRICT
:
329 bool btf_type_is_void(const struct btf_type
*t
)
331 return t
== &btf_void
;
334 static bool btf_type_is_fwd(const struct btf_type
*t
)
336 return BTF_INFO_KIND(t
->info
) == BTF_KIND_FWD
;
339 static bool btf_type_is_func(const struct btf_type
*t
)
341 return BTF_INFO_KIND(t
->info
) == BTF_KIND_FUNC
;
344 static bool btf_type_is_func_proto(const struct btf_type
*t
)
346 return BTF_INFO_KIND(t
->info
) == BTF_KIND_FUNC_PROTO
;
349 static bool btf_type_nosize(const struct btf_type
*t
)
351 return btf_type_is_void(t
) || btf_type_is_fwd(t
) ||
352 btf_type_is_func(t
) || btf_type_is_func_proto(t
);
355 static bool btf_type_nosize_or_null(const struct btf_type
*t
)
357 return !t
|| btf_type_nosize(t
);
360 /* union is only a special case of struct:
361 * all its offsetof(member) == 0
363 static bool btf_type_is_struct(const struct btf_type
*t
)
365 u8 kind
= BTF_INFO_KIND(t
->info
);
367 return kind
== BTF_KIND_STRUCT
|| kind
== BTF_KIND_UNION
;
370 static bool __btf_type_is_struct(const struct btf_type
*t
)
372 return BTF_INFO_KIND(t
->info
) == BTF_KIND_STRUCT
;
375 static bool btf_type_is_array(const struct btf_type
*t
)
377 return BTF_INFO_KIND(t
->info
) == BTF_KIND_ARRAY
;
380 static bool btf_type_is_ptr(const struct btf_type
*t
)
382 return BTF_INFO_KIND(t
->info
) == BTF_KIND_PTR
;
385 static bool btf_type_is_int(const struct btf_type
*t
)
387 return BTF_INFO_KIND(t
->info
) == BTF_KIND_INT
;
390 static bool btf_type_is_var(const struct btf_type
*t
)
392 return BTF_INFO_KIND(t
->info
) == BTF_KIND_VAR
;
395 static bool btf_type_is_datasec(const struct btf_type
*t
)
397 return BTF_INFO_KIND(t
->info
) == BTF_KIND_DATASEC
;
400 /* Types that act only as a source, not sink or intermediate
401 * type when resolving.
403 static bool btf_type_is_resolve_source_only(const struct btf_type
*t
)
405 return btf_type_is_var(t
) ||
406 btf_type_is_datasec(t
);
409 /* What types need to be resolved?
411 * btf_type_is_modifier() is an obvious one.
413 * btf_type_is_struct() because its member refers to
414 * another type (through member->type).
416 * btf_type_is_var() because the variable refers to
417 * another type. btf_type_is_datasec() holds multiple
418 * btf_type_is_var() types that need resolving.
420 * btf_type_is_array() because its element (array->type)
421 * refers to another type. Array can be thought of a
422 * special case of struct while array just has the same
423 * member-type repeated by array->nelems of times.
425 static bool btf_type_needs_resolve(const struct btf_type
*t
)
427 return btf_type_is_modifier(t
) ||
428 btf_type_is_ptr(t
) ||
429 btf_type_is_struct(t
) ||
430 btf_type_is_array(t
) ||
431 btf_type_is_var(t
) ||
432 btf_type_is_datasec(t
);
435 /* t->size can be used */
436 static bool btf_type_has_size(const struct btf_type
*t
)
438 switch (BTF_INFO_KIND(t
->info
)) {
440 case BTF_KIND_STRUCT
:
443 case BTF_KIND_DATASEC
:
450 static const char *btf_int_encoding_str(u8 encoding
)
454 else if (encoding
== BTF_INT_SIGNED
)
456 else if (encoding
== BTF_INT_CHAR
)
458 else if (encoding
== BTF_INT_BOOL
)
464 static u16
btf_type_vlen(const struct btf_type
*t
)
466 return BTF_INFO_VLEN(t
->info
);
469 static bool btf_type_kflag(const struct btf_type
*t
)
471 return BTF_INFO_KFLAG(t
->info
);
474 static u32
btf_member_bit_offset(const struct btf_type
*struct_type
,
475 const struct btf_member
*member
)
477 return btf_type_kflag(struct_type
) ? BTF_MEMBER_BIT_OFFSET(member
->offset
)
481 static u32
btf_member_bitfield_size(const struct btf_type
*struct_type
,
482 const struct btf_member
*member
)
484 return btf_type_kflag(struct_type
) ? BTF_MEMBER_BITFIELD_SIZE(member
->offset
)
488 static u32
btf_type_int(const struct btf_type
*t
)
490 return *(u32
*)(t
+ 1);
493 static const struct btf_array
*btf_type_array(const struct btf_type
*t
)
495 return (const struct btf_array
*)(t
+ 1);
498 static const struct btf_member
*btf_type_member(const struct btf_type
*t
)
500 return (const struct btf_member
*)(t
+ 1);
503 static const struct btf_enum
*btf_type_enum(const struct btf_type
*t
)
505 return (const struct btf_enum
*)(t
+ 1);
508 static const struct btf_var
*btf_type_var(const struct btf_type
*t
)
510 return (const struct btf_var
*)(t
+ 1);
513 static const struct btf_var_secinfo
*btf_type_var_secinfo(const struct btf_type
*t
)
515 return (const struct btf_var_secinfo
*)(t
+ 1);
518 static const struct btf_kind_operations
*btf_type_ops(const struct btf_type
*t
)
520 return kind_ops
[BTF_INFO_KIND(t
->info
)];
523 static bool btf_name_offset_valid(const struct btf
*btf
, u32 offset
)
525 return BTF_STR_OFFSET_VALID(offset
) &&
526 offset
< btf
->hdr
.str_len
;
529 static bool __btf_name_char_ok(char c
, bool first
, bool dot_ok
)
531 if ((first
? !isalpha(c
) :
534 ((c
== '.' && !dot_ok
) ||
540 static bool __btf_name_valid(const struct btf
*btf
, u32 offset
, bool dot_ok
)
542 /* offset must be valid */
543 const char *src
= &btf
->strings
[offset
];
544 const char *src_limit
;
546 if (!__btf_name_char_ok(*src
, true, dot_ok
))
549 /* set a limit on identifier length */
550 src_limit
= src
+ KSYM_NAME_LEN
;
552 while (*src
&& src
< src_limit
) {
553 if (!__btf_name_char_ok(*src
, false, dot_ok
))
561 /* Only C-style identifier is permitted. This can be relaxed if
564 static bool btf_name_valid_identifier(const struct btf
*btf
, u32 offset
)
566 return __btf_name_valid(btf
, offset
, false);
569 static bool btf_name_valid_section(const struct btf
*btf
, u32 offset
)
571 return __btf_name_valid(btf
, offset
, true);
574 static const char *__btf_name_by_offset(const struct btf
*btf
, u32 offset
)
578 else if (offset
< btf
->hdr
.str_len
)
579 return &btf
->strings
[offset
];
581 return "(invalid-name-offset)";
584 const char *btf_name_by_offset(const struct btf
*btf
, u32 offset
)
586 if (offset
< btf
->hdr
.str_len
)
587 return &btf
->strings
[offset
];
592 const struct btf_type
*btf_type_by_id(const struct btf
*btf
, u32 type_id
)
594 if (type_id
> btf
->nr_types
)
597 return btf
->types
[type_id
];
601 * Regular int is not a bit field and it must be either
602 * u8/u16/u32/u64 or __int128.
604 static bool btf_type_int_is_regular(const struct btf_type
*t
)
606 u8 nr_bits
, nr_bytes
;
609 int_data
= btf_type_int(t
);
610 nr_bits
= BTF_INT_BITS(int_data
);
611 nr_bytes
= BITS_ROUNDUP_BYTES(nr_bits
);
612 if (BITS_PER_BYTE_MASKED(nr_bits
) ||
613 BTF_INT_OFFSET(int_data
) ||
614 (nr_bytes
!= sizeof(u8
) && nr_bytes
!= sizeof(u16
) &&
615 nr_bytes
!= sizeof(u32
) && nr_bytes
!= sizeof(u64
) &&
616 nr_bytes
!= (2 * sizeof(u64
)))) {
624 * Check that given struct member is a regular int with expected
627 bool btf_member_is_reg_int(const struct btf
*btf
, const struct btf_type
*s
,
628 const struct btf_member
*m
,
629 u32 expected_offset
, u32 expected_size
)
631 const struct btf_type
*t
;
636 t
= btf_type_id_size(btf
, &id
, NULL
);
637 if (!t
|| !btf_type_is_int(t
))
640 int_data
= btf_type_int(t
);
641 nr_bits
= BTF_INT_BITS(int_data
);
642 if (btf_type_kflag(s
)) {
643 u32 bitfield_size
= BTF_MEMBER_BITFIELD_SIZE(m
->offset
);
644 u32 bit_offset
= BTF_MEMBER_BIT_OFFSET(m
->offset
);
646 /* if kflag set, int should be a regular int and
647 * bit offset should be at byte boundary.
649 return !bitfield_size
&&
650 BITS_ROUNDUP_BYTES(bit_offset
) == expected_offset
&&
651 BITS_ROUNDUP_BYTES(nr_bits
) == expected_size
;
654 if (BTF_INT_OFFSET(int_data
) ||
655 BITS_PER_BYTE_MASKED(m
->offset
) ||
656 BITS_ROUNDUP_BYTES(m
->offset
) != expected_offset
||
657 BITS_PER_BYTE_MASKED(nr_bits
) ||
658 BITS_ROUNDUP_BYTES(nr_bits
) != expected_size
)
664 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log
*log
,
665 const char *fmt
, ...)
670 bpf_verifier_vlog(log
, fmt
, args
);
674 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env
*env
,
675 const char *fmt
, ...)
677 struct bpf_verifier_log
*log
= &env
->log
;
680 if (!bpf_verifier_log_needed(log
))
684 bpf_verifier_vlog(log
, fmt
, args
);
688 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env
*env
,
689 const struct btf_type
*t
,
691 const char *fmt
, ...)
693 struct bpf_verifier_log
*log
= &env
->log
;
694 u8 kind
= BTF_INFO_KIND(t
->info
);
695 struct btf
*btf
= env
->btf
;
698 if (!bpf_verifier_log_needed(log
))
701 __btf_verifier_log(log
, "[%u] %s %s%s",
704 __btf_name_by_offset(btf
, t
->name_off
),
705 log_details
? " " : "");
708 btf_type_ops(t
)->log_details(env
, t
);
711 __btf_verifier_log(log
, " ");
713 bpf_verifier_vlog(log
, fmt
, args
);
717 __btf_verifier_log(log
, "\n");
720 #define btf_verifier_log_type(env, t, ...) \
721 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
722 #define btf_verifier_log_basic(env, t, ...) \
723 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
726 static void btf_verifier_log_member(struct btf_verifier_env
*env
,
727 const struct btf_type
*struct_type
,
728 const struct btf_member
*member
,
729 const char *fmt
, ...)
731 struct bpf_verifier_log
*log
= &env
->log
;
732 struct btf
*btf
= env
->btf
;
735 if (!bpf_verifier_log_needed(log
))
738 /* The CHECK_META phase already did a btf dump.
740 * If member is logged again, it must hit an error in
741 * parsing this member. It is useful to print out which
742 * struct this member belongs to.
744 if (env
->phase
!= CHECK_META
)
745 btf_verifier_log_type(env
, struct_type
, NULL
);
747 if (btf_type_kflag(struct_type
))
748 __btf_verifier_log(log
,
749 "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
750 __btf_name_by_offset(btf
, member
->name_off
),
752 BTF_MEMBER_BITFIELD_SIZE(member
->offset
),
753 BTF_MEMBER_BIT_OFFSET(member
->offset
));
755 __btf_verifier_log(log
, "\t%s type_id=%u bits_offset=%u",
756 __btf_name_by_offset(btf
, member
->name_off
),
757 member
->type
, member
->offset
);
760 __btf_verifier_log(log
, " ");
762 bpf_verifier_vlog(log
, fmt
, args
);
766 __btf_verifier_log(log
, "\n");
770 static void btf_verifier_log_vsi(struct btf_verifier_env
*env
,
771 const struct btf_type
*datasec_type
,
772 const struct btf_var_secinfo
*vsi
,
773 const char *fmt
, ...)
775 struct bpf_verifier_log
*log
= &env
->log
;
778 if (!bpf_verifier_log_needed(log
))
780 if (env
->phase
!= CHECK_META
)
781 btf_verifier_log_type(env
, datasec_type
, NULL
);
783 __btf_verifier_log(log
, "\t type_id=%u offset=%u size=%u",
784 vsi
->type
, vsi
->offset
, vsi
->size
);
786 __btf_verifier_log(log
, " ");
788 bpf_verifier_vlog(log
, fmt
, args
);
792 __btf_verifier_log(log
, "\n");
795 static void btf_verifier_log_hdr(struct btf_verifier_env
*env
,
798 struct bpf_verifier_log
*log
= &env
->log
;
799 const struct btf
*btf
= env
->btf
;
800 const struct btf_header
*hdr
;
802 if (!bpf_verifier_log_needed(log
))
806 __btf_verifier_log(log
, "magic: 0x%x\n", hdr
->magic
);
807 __btf_verifier_log(log
, "version: %u\n", hdr
->version
);
808 __btf_verifier_log(log
, "flags: 0x%x\n", hdr
->flags
);
809 __btf_verifier_log(log
, "hdr_len: %u\n", hdr
->hdr_len
);
810 __btf_verifier_log(log
, "type_off: %u\n", hdr
->type_off
);
811 __btf_verifier_log(log
, "type_len: %u\n", hdr
->type_len
);
812 __btf_verifier_log(log
, "str_off: %u\n", hdr
->str_off
);
813 __btf_verifier_log(log
, "str_len: %u\n", hdr
->str_len
);
814 __btf_verifier_log(log
, "btf_total_size: %u\n", btf_data_size
);
817 static int btf_add_type(struct btf_verifier_env
*env
, struct btf_type
*t
)
819 struct btf
*btf
= env
->btf
;
821 /* < 2 because +1 for btf_void which is always in btf->types[0].
822 * btf_void is not accounted in btf->nr_types because btf_void
823 * does not come from the BTF file.
825 if (btf
->types_size
- btf
->nr_types
< 2) {
826 /* Expand 'types' array */
828 struct btf_type
**new_types
;
829 u32 expand_by
, new_size
;
831 if (btf
->types_size
== BTF_MAX_TYPE
) {
832 btf_verifier_log(env
, "Exceeded max num of types");
836 expand_by
= max_t(u32
, btf
->types_size
>> 2, 16);
837 new_size
= min_t(u32
, BTF_MAX_TYPE
,
838 btf
->types_size
+ expand_by
);
840 new_types
= kvcalloc(new_size
, sizeof(*new_types
),
841 GFP_KERNEL
| __GFP_NOWARN
);
845 if (btf
->nr_types
== 0)
846 new_types
[0] = &btf_void
;
848 memcpy(new_types
, btf
->types
,
849 sizeof(*btf
->types
) * (btf
->nr_types
+ 1));
852 btf
->types
= new_types
;
853 btf
->types_size
= new_size
;
856 btf
->types
[++(btf
->nr_types
)] = t
;
861 static int btf_alloc_id(struct btf
*btf
)
865 idr_preload(GFP_KERNEL
);
866 spin_lock_bh(&btf_idr_lock
);
867 id
= idr_alloc_cyclic(&btf_idr
, btf
, 1, INT_MAX
, GFP_ATOMIC
);
870 spin_unlock_bh(&btf_idr_lock
);
873 if (WARN_ON_ONCE(!id
))
876 return id
> 0 ? 0 : id
;
879 static void btf_free_id(struct btf
*btf
)
884 * In map-in-map, calling map_delete_elem() on outer
885 * map will call bpf_map_put on the inner map.
886 * It will then eventually call btf_free_id()
887 * on the inner map. Some of the map_delete_elem()
888 * implementation may have irq disabled, so
889 * we need to use the _irqsave() version instead
890 * of the _bh() version.
892 spin_lock_irqsave(&btf_idr_lock
, flags
);
893 idr_remove(&btf_idr
, btf
->id
);
894 spin_unlock_irqrestore(&btf_idr_lock
, flags
);
897 static void btf_free(struct btf
*btf
)
900 kvfree(btf
->resolved_sizes
);
901 kvfree(btf
->resolved_ids
);
906 static void btf_free_rcu(struct rcu_head
*rcu
)
908 struct btf
*btf
= container_of(rcu
, struct btf
, rcu
);
913 void btf_put(struct btf
*btf
)
915 if (btf
&& refcount_dec_and_test(&btf
->refcnt
)) {
917 call_rcu(&btf
->rcu
, btf_free_rcu
);
921 static int env_resolve_init(struct btf_verifier_env
*env
)
923 struct btf
*btf
= env
->btf
;
924 u32 nr_types
= btf
->nr_types
;
925 u32
*resolved_sizes
= NULL
;
926 u32
*resolved_ids
= NULL
;
927 u8
*visit_states
= NULL
;
929 /* +1 for btf_void */
930 resolved_sizes
= kvcalloc(nr_types
+ 1, sizeof(*resolved_sizes
),
931 GFP_KERNEL
| __GFP_NOWARN
);
935 resolved_ids
= kvcalloc(nr_types
+ 1, sizeof(*resolved_ids
),
936 GFP_KERNEL
| __GFP_NOWARN
);
940 visit_states
= kvcalloc(nr_types
+ 1, sizeof(*visit_states
),
941 GFP_KERNEL
| __GFP_NOWARN
);
945 btf
->resolved_sizes
= resolved_sizes
;
946 btf
->resolved_ids
= resolved_ids
;
947 env
->visit_states
= visit_states
;
952 kvfree(resolved_sizes
);
953 kvfree(resolved_ids
);
954 kvfree(visit_states
);
958 static void btf_verifier_env_free(struct btf_verifier_env
*env
)
960 kvfree(env
->visit_states
);
964 static bool env_type_is_resolve_sink(const struct btf_verifier_env
*env
,
965 const struct btf_type
*next_type
)
967 switch (env
->resolve_mode
) {
969 /* int, enum or void is a sink */
970 return !btf_type_needs_resolve(next_type
);
972 /* int, enum, void, struct, array, func or func_proto is a sink
975 return !btf_type_is_modifier(next_type
) &&
976 !btf_type_is_ptr(next_type
);
977 case RESOLVE_STRUCT_OR_ARRAY
:
978 /* int, enum, void, ptr, func or func_proto is a sink
979 * for struct and array
981 return !btf_type_is_modifier(next_type
) &&
982 !btf_type_is_array(next_type
) &&
983 !btf_type_is_struct(next_type
);
989 static bool env_type_is_resolved(const struct btf_verifier_env
*env
,
992 return env
->visit_states
[type_id
] == RESOLVED
;
995 static int env_stack_push(struct btf_verifier_env
*env
,
996 const struct btf_type
*t
, u32 type_id
)
998 struct resolve_vertex
*v
;
1000 if (env
->top_stack
== MAX_RESOLVE_DEPTH
)
1003 if (env
->visit_states
[type_id
] != NOT_VISITED
)
1006 env
->visit_states
[type_id
] = VISITED
;
1008 v
= &env
->stack
[env
->top_stack
++];
1010 v
->type_id
= type_id
;
1013 if (env
->resolve_mode
== RESOLVE_TBD
) {
1014 if (btf_type_is_ptr(t
))
1015 env
->resolve_mode
= RESOLVE_PTR
;
1016 else if (btf_type_is_struct(t
) || btf_type_is_array(t
))
1017 env
->resolve_mode
= RESOLVE_STRUCT_OR_ARRAY
;
1023 static void env_stack_set_next_member(struct btf_verifier_env
*env
,
1026 env
->stack
[env
->top_stack
- 1].next_member
= next_member
;
1029 static void env_stack_pop_resolved(struct btf_verifier_env
*env
,
1030 u32 resolved_type_id
,
1033 u32 type_id
= env
->stack
[--(env
->top_stack
)].type_id
;
1034 struct btf
*btf
= env
->btf
;
1036 btf
->resolved_sizes
[type_id
] = resolved_size
;
1037 btf
->resolved_ids
[type_id
] = resolved_type_id
;
1038 env
->visit_states
[type_id
] = RESOLVED
;
1041 static const struct resolve_vertex
*env_stack_peak(struct btf_verifier_env
*env
)
1043 return env
->top_stack
? &env
->stack
[env
->top_stack
- 1] : NULL
;
1046 /* The input param "type_id" must point to a needs_resolve type */
1047 static const struct btf_type
*btf_type_id_resolve(const struct btf
*btf
,
1050 *type_id
= btf
->resolved_ids
[*type_id
];
1051 return btf_type_by_id(btf
, *type_id
);
1054 const struct btf_type
*btf_type_id_size(const struct btf
*btf
,
1055 u32
*type_id
, u32
*ret_size
)
1057 const struct btf_type
*size_type
;
1058 u32 size_type_id
= *type_id
;
1061 size_type
= btf_type_by_id(btf
, size_type_id
);
1062 if (btf_type_nosize_or_null(size_type
))
1065 if (btf_type_has_size(size_type
)) {
1066 size
= size_type
->size
;
1067 } else if (btf_type_is_array(size_type
)) {
1068 size
= btf
->resolved_sizes
[size_type_id
];
1069 } else if (btf_type_is_ptr(size_type
)) {
1070 size
= sizeof(void *);
1072 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type
) &&
1073 !btf_type_is_var(size_type
)))
1076 size
= btf
->resolved_sizes
[size_type_id
];
1077 size_type_id
= btf
->resolved_ids
[size_type_id
];
1078 size_type
= btf_type_by_id(btf
, size_type_id
);
1079 if (btf_type_nosize_or_null(size_type
))
1083 *type_id
= size_type_id
;
1090 static int btf_df_check_member(struct btf_verifier_env
*env
,
1091 const struct btf_type
*struct_type
,
1092 const struct btf_member
*member
,
1093 const struct btf_type
*member_type
)
1095 btf_verifier_log_basic(env
, struct_type
,
1096 "Unsupported check_member");
1100 static int btf_df_check_kflag_member(struct btf_verifier_env
*env
,
1101 const struct btf_type
*struct_type
,
1102 const struct btf_member
*member
,
1103 const struct btf_type
*member_type
)
1105 btf_verifier_log_basic(env
, struct_type
,
1106 "Unsupported check_kflag_member");
1110 /* Used for ptr, array and struct/union type members.
1111 * int, enum and modifier types have their specific callback functions.
1113 static int btf_generic_check_kflag_member(struct btf_verifier_env
*env
,
1114 const struct btf_type
*struct_type
,
1115 const struct btf_member
*member
,
1116 const struct btf_type
*member_type
)
1118 if (BTF_MEMBER_BITFIELD_SIZE(member
->offset
)) {
1119 btf_verifier_log_member(env
, struct_type
, member
,
1120 "Invalid member bitfield_size");
1124 /* bitfield size is 0, so member->offset represents bit offset only.
1125 * It is safe to call non kflag check_member variants.
1127 return btf_type_ops(member_type
)->check_member(env
, struct_type
,
1132 static int btf_df_resolve(struct btf_verifier_env
*env
,
1133 const struct resolve_vertex
*v
)
1135 btf_verifier_log_basic(env
, v
->t
, "Unsupported resolve");
1139 static void btf_df_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
1140 u32 type_id
, void *data
, u8 bits_offsets
,
1143 seq_printf(m
, "<unsupported kind:%u>", BTF_INFO_KIND(t
->info
));
1146 static int btf_int_check_member(struct btf_verifier_env
*env
,
1147 const struct btf_type
*struct_type
,
1148 const struct btf_member
*member
,
1149 const struct btf_type
*member_type
)
1151 u32 int_data
= btf_type_int(member_type
);
1152 u32 struct_bits_off
= member
->offset
;
1153 u32 struct_size
= struct_type
->size
;
1157 if (U32_MAX
- struct_bits_off
< BTF_INT_OFFSET(int_data
)) {
1158 btf_verifier_log_member(env
, struct_type
, member
,
1159 "bits_offset exceeds U32_MAX");
1163 struct_bits_off
+= BTF_INT_OFFSET(int_data
);
1164 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
1165 nr_copy_bits
= BTF_INT_BITS(int_data
) +
1166 BITS_PER_BYTE_MASKED(struct_bits_off
);
1168 if (nr_copy_bits
> BITS_PER_U128
) {
1169 btf_verifier_log_member(env
, struct_type
, member
,
1170 "nr_copy_bits exceeds 128");
1174 if (struct_size
< bytes_offset
||
1175 struct_size
- bytes_offset
< BITS_ROUNDUP_BYTES(nr_copy_bits
)) {
1176 btf_verifier_log_member(env
, struct_type
, member
,
1177 "Member exceeds struct_size");
1184 static int btf_int_check_kflag_member(struct btf_verifier_env
*env
,
1185 const struct btf_type
*struct_type
,
1186 const struct btf_member
*member
,
1187 const struct btf_type
*member_type
)
1189 u32 struct_bits_off
, nr_bits
, nr_int_data_bits
, bytes_offset
;
1190 u32 int_data
= btf_type_int(member_type
);
1191 u32 struct_size
= struct_type
->size
;
1194 /* a regular int type is required for the kflag int member */
1195 if (!btf_type_int_is_regular(member_type
)) {
1196 btf_verifier_log_member(env
, struct_type
, member
,
1197 "Invalid member base type");
1201 /* check sanity of bitfield size */
1202 nr_bits
= BTF_MEMBER_BITFIELD_SIZE(member
->offset
);
1203 struct_bits_off
= BTF_MEMBER_BIT_OFFSET(member
->offset
);
1204 nr_int_data_bits
= BTF_INT_BITS(int_data
);
1206 /* Not a bitfield member, member offset must be at byte
1209 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
1210 btf_verifier_log_member(env
, struct_type
, member
,
1211 "Invalid member offset");
1215 nr_bits
= nr_int_data_bits
;
1216 } else if (nr_bits
> nr_int_data_bits
) {
1217 btf_verifier_log_member(env
, struct_type
, member
,
1218 "Invalid member bitfield_size");
1222 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
1223 nr_copy_bits
= nr_bits
+ BITS_PER_BYTE_MASKED(struct_bits_off
);
1224 if (nr_copy_bits
> BITS_PER_U128
) {
1225 btf_verifier_log_member(env
, struct_type
, member
,
1226 "nr_copy_bits exceeds 128");
1230 if (struct_size
< bytes_offset
||
1231 struct_size
- bytes_offset
< BITS_ROUNDUP_BYTES(nr_copy_bits
)) {
1232 btf_verifier_log_member(env
, struct_type
, member
,
1233 "Member exceeds struct_size");
1240 static s32
btf_int_check_meta(struct btf_verifier_env
*env
,
1241 const struct btf_type
*t
,
1244 u32 int_data
, nr_bits
, meta_needed
= sizeof(int_data
);
1247 if (meta_left
< meta_needed
) {
1248 btf_verifier_log_basic(env
, t
,
1249 "meta_left:%u meta_needed:%u",
1250 meta_left
, meta_needed
);
1254 if (btf_type_vlen(t
)) {
1255 btf_verifier_log_type(env
, t
, "vlen != 0");
1259 if (btf_type_kflag(t
)) {
1260 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
1264 int_data
= btf_type_int(t
);
1265 if (int_data
& ~BTF_INT_MASK
) {
1266 btf_verifier_log_basic(env
, t
, "Invalid int_data:%x",
1271 nr_bits
= BTF_INT_BITS(int_data
) + BTF_INT_OFFSET(int_data
);
1273 if (nr_bits
> BITS_PER_U128
) {
1274 btf_verifier_log_type(env
, t
, "nr_bits exceeds %zu",
1279 if (BITS_ROUNDUP_BYTES(nr_bits
) > t
->size
) {
1280 btf_verifier_log_type(env
, t
, "nr_bits exceeds type_size");
1285 * Only one of the encoding bits is allowed and it
1286 * should be sufficient for the pretty print purpose (i.e. decoding).
1287 * Multiple bits can be allowed later if it is found
1288 * to be insufficient.
1290 encoding
= BTF_INT_ENCODING(int_data
);
1292 encoding
!= BTF_INT_SIGNED
&&
1293 encoding
!= BTF_INT_CHAR
&&
1294 encoding
!= BTF_INT_BOOL
) {
1295 btf_verifier_log_type(env
, t
, "Unsupported encoding");
1299 btf_verifier_log_type(env
, t
, NULL
);
1304 static void btf_int_log(struct btf_verifier_env
*env
,
1305 const struct btf_type
*t
)
1307 int int_data
= btf_type_int(t
);
1309 btf_verifier_log(env
,
1310 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1311 t
->size
, BTF_INT_OFFSET(int_data
),
1312 BTF_INT_BITS(int_data
),
1313 btf_int_encoding_str(BTF_INT_ENCODING(int_data
)));
1316 static void btf_int128_print(struct seq_file
*m
, void *data
)
1318 /* data points to a __int128 number.
1320 * int128_num = *(__int128 *)data;
1321 * The below formulas shows what upper_num and lower_num represents:
1322 * upper_num = int128_num >> 64;
1323 * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
1325 u64 upper_num
, lower_num
;
1327 #ifdef __BIG_ENDIAN_BITFIELD
1328 upper_num
= *(u64
*)data
;
1329 lower_num
= *(u64
*)(data
+ 8);
1331 upper_num
= *(u64
*)(data
+ 8);
1332 lower_num
= *(u64
*)data
;
1335 seq_printf(m
, "0x%llx", lower_num
);
1337 seq_printf(m
, "0x%llx%016llx", upper_num
, lower_num
);
1340 static void btf_int128_shift(u64
*print_num
, u16 left_shift_bits
,
1341 u16 right_shift_bits
)
1343 u64 upper_num
, lower_num
;
1345 #ifdef __BIG_ENDIAN_BITFIELD
1346 upper_num
= print_num
[0];
1347 lower_num
= print_num
[1];
1349 upper_num
= print_num
[1];
1350 lower_num
= print_num
[0];
1353 /* shake out un-needed bits by shift/or operations */
1354 if (left_shift_bits
>= 64) {
1355 upper_num
= lower_num
<< (left_shift_bits
- 64);
1358 upper_num
= (upper_num
<< left_shift_bits
) |
1359 (lower_num
>> (64 - left_shift_bits
));
1360 lower_num
= lower_num
<< left_shift_bits
;
1363 if (right_shift_bits
>= 64) {
1364 lower_num
= upper_num
>> (right_shift_bits
- 64);
1367 lower_num
= (lower_num
>> right_shift_bits
) |
1368 (upper_num
<< (64 - right_shift_bits
));
1369 upper_num
= upper_num
>> right_shift_bits
;
1372 #ifdef __BIG_ENDIAN_BITFIELD
1373 print_num
[0] = upper_num
;
1374 print_num
[1] = lower_num
;
1376 print_num
[0] = lower_num
;
1377 print_num
[1] = upper_num
;
1381 static void btf_bitfield_seq_show(void *data
, u8 bits_offset
,
1382 u8 nr_bits
, struct seq_file
*m
)
1384 u16 left_shift_bits
, right_shift_bits
;
1387 u64 print_num
[2] = {};
1389 nr_copy_bits
= nr_bits
+ bits_offset
;
1390 nr_copy_bytes
= BITS_ROUNDUP_BYTES(nr_copy_bits
);
1392 memcpy(print_num
, data
, nr_copy_bytes
);
1394 #ifdef __BIG_ENDIAN_BITFIELD
1395 left_shift_bits
= bits_offset
;
1397 left_shift_bits
= BITS_PER_U128
- nr_copy_bits
;
1399 right_shift_bits
= BITS_PER_U128
- nr_bits
;
1401 btf_int128_shift(print_num
, left_shift_bits
, right_shift_bits
);
1402 btf_int128_print(m
, print_num
);
1406 static void btf_int_bits_seq_show(const struct btf
*btf
,
1407 const struct btf_type
*t
,
1408 void *data
, u8 bits_offset
,
1411 u32 int_data
= btf_type_int(t
);
1412 u8 nr_bits
= BTF_INT_BITS(int_data
);
1413 u8 total_bits_offset
;
1416 * bits_offset is at most 7.
1417 * BTF_INT_OFFSET() cannot exceed 128 bits.
1419 total_bits_offset
= bits_offset
+ BTF_INT_OFFSET(int_data
);
1420 data
+= BITS_ROUNDDOWN_BYTES(total_bits_offset
);
1421 bits_offset
= BITS_PER_BYTE_MASKED(total_bits_offset
);
1422 btf_bitfield_seq_show(data
, bits_offset
, nr_bits
, m
);
1425 static void btf_int_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
1426 u32 type_id
, void *data
, u8 bits_offset
,
1429 u32 int_data
= btf_type_int(t
);
1430 u8 encoding
= BTF_INT_ENCODING(int_data
);
1431 bool sign
= encoding
& BTF_INT_SIGNED
;
1432 u8 nr_bits
= BTF_INT_BITS(int_data
);
1434 if (bits_offset
|| BTF_INT_OFFSET(int_data
) ||
1435 BITS_PER_BYTE_MASKED(nr_bits
)) {
1436 btf_int_bits_seq_show(btf
, t
, data
, bits_offset
, m
);
1442 btf_int128_print(m
, data
);
1446 seq_printf(m
, "%lld", *(s64
*)data
);
1448 seq_printf(m
, "%llu", *(u64
*)data
);
1452 seq_printf(m
, "%d", *(s32
*)data
);
1454 seq_printf(m
, "%u", *(u32
*)data
);
1458 seq_printf(m
, "%d", *(s16
*)data
);
1460 seq_printf(m
, "%u", *(u16
*)data
);
1464 seq_printf(m
, "%d", *(s8
*)data
);
1466 seq_printf(m
, "%u", *(u8
*)data
);
1469 btf_int_bits_seq_show(btf
, t
, data
, bits_offset
, m
);
1473 static const struct btf_kind_operations int_ops
= {
1474 .check_meta
= btf_int_check_meta
,
1475 .resolve
= btf_df_resolve
,
1476 .check_member
= btf_int_check_member
,
1477 .check_kflag_member
= btf_int_check_kflag_member
,
1478 .log_details
= btf_int_log
,
1479 .seq_show
= btf_int_seq_show
,
1482 static int btf_modifier_check_member(struct btf_verifier_env
*env
,
1483 const struct btf_type
*struct_type
,
1484 const struct btf_member
*member
,
1485 const struct btf_type
*member_type
)
1487 const struct btf_type
*resolved_type
;
1488 u32 resolved_type_id
= member
->type
;
1489 struct btf_member resolved_member
;
1490 struct btf
*btf
= env
->btf
;
1492 resolved_type
= btf_type_id_size(btf
, &resolved_type_id
, NULL
);
1493 if (!resolved_type
) {
1494 btf_verifier_log_member(env
, struct_type
, member
,
1499 resolved_member
= *member
;
1500 resolved_member
.type
= resolved_type_id
;
1502 return btf_type_ops(resolved_type
)->check_member(env
, struct_type
,
1507 static int btf_modifier_check_kflag_member(struct btf_verifier_env
*env
,
1508 const struct btf_type
*struct_type
,
1509 const struct btf_member
*member
,
1510 const struct btf_type
*member_type
)
1512 const struct btf_type
*resolved_type
;
1513 u32 resolved_type_id
= member
->type
;
1514 struct btf_member resolved_member
;
1515 struct btf
*btf
= env
->btf
;
1517 resolved_type
= btf_type_id_size(btf
, &resolved_type_id
, NULL
);
1518 if (!resolved_type
) {
1519 btf_verifier_log_member(env
, struct_type
, member
,
1524 resolved_member
= *member
;
1525 resolved_member
.type
= resolved_type_id
;
1527 return btf_type_ops(resolved_type
)->check_kflag_member(env
, struct_type
,
1532 static int btf_ptr_check_member(struct btf_verifier_env
*env
,
1533 const struct btf_type
*struct_type
,
1534 const struct btf_member
*member
,
1535 const struct btf_type
*member_type
)
1537 u32 struct_size
, struct_bits_off
, bytes_offset
;
1539 struct_size
= struct_type
->size
;
1540 struct_bits_off
= member
->offset
;
1541 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
1543 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
1544 btf_verifier_log_member(env
, struct_type
, member
,
1545 "Member is not byte aligned");
1549 if (struct_size
- bytes_offset
< sizeof(void *)) {
1550 btf_verifier_log_member(env
, struct_type
, member
,
1551 "Member exceeds struct_size");
1558 static int btf_ref_type_check_meta(struct btf_verifier_env
*env
,
1559 const struct btf_type
*t
,
1562 if (btf_type_vlen(t
)) {
1563 btf_verifier_log_type(env
, t
, "vlen != 0");
1567 if (btf_type_kflag(t
)) {
1568 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
1572 if (!BTF_TYPE_ID_VALID(t
->type
)) {
1573 btf_verifier_log_type(env
, t
, "Invalid type_id");
1577 /* typedef type must have a valid name, and other ref types,
1578 * volatile, const, restrict, should have a null name.
1580 if (BTF_INFO_KIND(t
->info
) == BTF_KIND_TYPEDEF
) {
1582 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
1583 btf_verifier_log_type(env
, t
, "Invalid name");
1588 btf_verifier_log_type(env
, t
, "Invalid name");
1593 btf_verifier_log_type(env
, t
, NULL
);
1598 static int btf_modifier_resolve(struct btf_verifier_env
*env
,
1599 const struct resolve_vertex
*v
)
1601 const struct btf_type
*t
= v
->t
;
1602 const struct btf_type
*next_type
;
1603 u32 next_type_id
= t
->type
;
1604 struct btf
*btf
= env
->btf
;
1605 u32 next_type_size
= 0;
1607 next_type
= btf_type_by_id(btf
, next_type_id
);
1608 if (!next_type
|| btf_type_is_resolve_source_only(next_type
)) {
1609 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
1613 if (!env_type_is_resolve_sink(env
, next_type
) &&
1614 !env_type_is_resolved(env
, next_type_id
))
1615 return env_stack_push(env
, next_type
, next_type_id
);
1617 /* Figure out the resolved next_type_id with size.
1618 * They will be stored in the current modifier's
1619 * resolved_ids and resolved_sizes such that it can
1620 * save us a few type-following when we use it later (e.g. in
1623 if (!btf_type_id_size(btf
, &next_type_id
, &next_type_size
)) {
1624 if (env_type_is_resolved(env
, next_type_id
))
1625 next_type
= btf_type_id_resolve(btf
, &next_type_id
);
1627 /* "typedef void new_void", "const void"...etc */
1628 if (!btf_type_is_void(next_type
) &&
1629 !btf_type_is_fwd(next_type
) &&
1630 !btf_type_is_func_proto(next_type
)) {
1631 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
1636 env_stack_pop_resolved(env
, next_type_id
, next_type_size
);
1641 static int btf_var_resolve(struct btf_verifier_env
*env
,
1642 const struct resolve_vertex
*v
)
1644 const struct btf_type
*next_type
;
1645 const struct btf_type
*t
= v
->t
;
1646 u32 next_type_id
= t
->type
;
1647 struct btf
*btf
= env
->btf
;
1650 next_type
= btf_type_by_id(btf
, next_type_id
);
1651 if (!next_type
|| btf_type_is_resolve_source_only(next_type
)) {
1652 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
1656 if (!env_type_is_resolve_sink(env
, next_type
) &&
1657 !env_type_is_resolved(env
, next_type_id
))
1658 return env_stack_push(env
, next_type
, next_type_id
);
1660 if (btf_type_is_modifier(next_type
)) {
1661 const struct btf_type
*resolved_type
;
1662 u32 resolved_type_id
;
1664 resolved_type_id
= next_type_id
;
1665 resolved_type
= btf_type_id_resolve(btf
, &resolved_type_id
);
1667 if (btf_type_is_ptr(resolved_type
) &&
1668 !env_type_is_resolve_sink(env
, resolved_type
) &&
1669 !env_type_is_resolved(env
, resolved_type_id
))
1670 return env_stack_push(env
, resolved_type
,
1674 /* We must resolve to something concrete at this point, no
1675 * forward types or similar that would resolve to size of
1678 if (!btf_type_id_size(btf
, &next_type_id
, &next_type_size
)) {
1679 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
1683 env_stack_pop_resolved(env
, next_type_id
, next_type_size
);
1688 static int btf_ptr_resolve(struct btf_verifier_env
*env
,
1689 const struct resolve_vertex
*v
)
1691 const struct btf_type
*next_type
;
1692 const struct btf_type
*t
= v
->t
;
1693 u32 next_type_id
= t
->type
;
1694 struct btf
*btf
= env
->btf
;
1696 next_type
= btf_type_by_id(btf
, next_type_id
);
1697 if (!next_type
|| btf_type_is_resolve_source_only(next_type
)) {
1698 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
1702 if (!env_type_is_resolve_sink(env
, next_type
) &&
1703 !env_type_is_resolved(env
, next_type_id
))
1704 return env_stack_push(env
, next_type
, next_type_id
);
1706 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1707 * the modifier may have stopped resolving when it was resolved
1708 * to a ptr (last-resolved-ptr).
1710 * We now need to continue from the last-resolved-ptr to
1711 * ensure the last-resolved-ptr will not referring back to
1712 * the currenct ptr (t).
1714 if (btf_type_is_modifier(next_type
)) {
1715 const struct btf_type
*resolved_type
;
1716 u32 resolved_type_id
;
1718 resolved_type_id
= next_type_id
;
1719 resolved_type
= btf_type_id_resolve(btf
, &resolved_type_id
);
1721 if (btf_type_is_ptr(resolved_type
) &&
1722 !env_type_is_resolve_sink(env
, resolved_type
) &&
1723 !env_type_is_resolved(env
, resolved_type_id
))
1724 return env_stack_push(env
, resolved_type
,
1728 if (!btf_type_id_size(btf
, &next_type_id
, NULL
)) {
1729 if (env_type_is_resolved(env
, next_type_id
))
1730 next_type
= btf_type_id_resolve(btf
, &next_type_id
);
1732 if (!btf_type_is_void(next_type
) &&
1733 !btf_type_is_fwd(next_type
) &&
1734 !btf_type_is_func_proto(next_type
)) {
1735 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
1740 env_stack_pop_resolved(env
, next_type_id
, 0);
1745 static void btf_modifier_seq_show(const struct btf
*btf
,
1746 const struct btf_type
*t
,
1747 u32 type_id
, void *data
,
1748 u8 bits_offset
, struct seq_file
*m
)
1750 t
= btf_type_id_resolve(btf
, &type_id
);
1752 btf_type_ops(t
)->seq_show(btf
, t
, type_id
, data
, bits_offset
, m
);
1755 static void btf_var_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
1756 u32 type_id
, void *data
, u8 bits_offset
,
1759 t
= btf_type_id_resolve(btf
, &type_id
);
1761 btf_type_ops(t
)->seq_show(btf
, t
, type_id
, data
, bits_offset
, m
);
1764 static void btf_ptr_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
1765 u32 type_id
, void *data
, u8 bits_offset
,
1768 /* It is a hashed value */
1769 seq_printf(m
, "%p", *(void **)data
);
1772 static void btf_ref_type_log(struct btf_verifier_env
*env
,
1773 const struct btf_type
*t
)
1775 btf_verifier_log(env
, "type_id=%u", t
->type
);
1778 static struct btf_kind_operations modifier_ops
= {
1779 .check_meta
= btf_ref_type_check_meta
,
1780 .resolve
= btf_modifier_resolve
,
1781 .check_member
= btf_modifier_check_member
,
1782 .check_kflag_member
= btf_modifier_check_kflag_member
,
1783 .log_details
= btf_ref_type_log
,
1784 .seq_show
= btf_modifier_seq_show
,
1787 static struct btf_kind_operations ptr_ops
= {
1788 .check_meta
= btf_ref_type_check_meta
,
1789 .resolve
= btf_ptr_resolve
,
1790 .check_member
= btf_ptr_check_member
,
1791 .check_kflag_member
= btf_generic_check_kflag_member
,
1792 .log_details
= btf_ref_type_log
,
1793 .seq_show
= btf_ptr_seq_show
,
1796 static s32
btf_fwd_check_meta(struct btf_verifier_env
*env
,
1797 const struct btf_type
*t
,
1800 if (btf_type_vlen(t
)) {
1801 btf_verifier_log_type(env
, t
, "vlen != 0");
1806 btf_verifier_log_type(env
, t
, "type != 0");
1810 /* fwd type must have a valid name */
1812 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
1813 btf_verifier_log_type(env
, t
, "Invalid name");
1817 btf_verifier_log_type(env
, t
, NULL
);
1822 static void btf_fwd_type_log(struct btf_verifier_env
*env
,
1823 const struct btf_type
*t
)
1825 btf_verifier_log(env
, "%s", btf_type_kflag(t
) ? "union" : "struct");
1828 static struct btf_kind_operations fwd_ops
= {
1829 .check_meta
= btf_fwd_check_meta
,
1830 .resolve
= btf_df_resolve
,
1831 .check_member
= btf_df_check_member
,
1832 .check_kflag_member
= btf_df_check_kflag_member
,
1833 .log_details
= btf_fwd_type_log
,
1834 .seq_show
= btf_df_seq_show
,
1837 static int btf_array_check_member(struct btf_verifier_env
*env
,
1838 const struct btf_type
*struct_type
,
1839 const struct btf_member
*member
,
1840 const struct btf_type
*member_type
)
1842 u32 struct_bits_off
= member
->offset
;
1843 u32 struct_size
, bytes_offset
;
1844 u32 array_type_id
, array_size
;
1845 struct btf
*btf
= env
->btf
;
1847 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
1848 btf_verifier_log_member(env
, struct_type
, member
,
1849 "Member is not byte aligned");
1853 array_type_id
= member
->type
;
1854 btf_type_id_size(btf
, &array_type_id
, &array_size
);
1855 struct_size
= struct_type
->size
;
1856 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
1857 if (struct_size
- bytes_offset
< array_size
) {
1858 btf_verifier_log_member(env
, struct_type
, member
,
1859 "Member exceeds struct_size");
1866 static s32
btf_array_check_meta(struct btf_verifier_env
*env
,
1867 const struct btf_type
*t
,
1870 const struct btf_array
*array
= btf_type_array(t
);
1871 u32 meta_needed
= sizeof(*array
);
1873 if (meta_left
< meta_needed
) {
1874 btf_verifier_log_basic(env
, t
,
1875 "meta_left:%u meta_needed:%u",
1876 meta_left
, meta_needed
);
1880 /* array type should not have a name */
1882 btf_verifier_log_type(env
, t
, "Invalid name");
1886 if (btf_type_vlen(t
)) {
1887 btf_verifier_log_type(env
, t
, "vlen != 0");
1891 if (btf_type_kflag(t
)) {
1892 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
1897 btf_verifier_log_type(env
, t
, "size != 0");
1901 /* Array elem type and index type cannot be in type void,
1902 * so !array->type and !array->index_type are not allowed.
1904 if (!array
->type
|| !BTF_TYPE_ID_VALID(array
->type
)) {
1905 btf_verifier_log_type(env
, t
, "Invalid elem");
1909 if (!array
->index_type
|| !BTF_TYPE_ID_VALID(array
->index_type
)) {
1910 btf_verifier_log_type(env
, t
, "Invalid index");
1914 btf_verifier_log_type(env
, t
, NULL
);
1919 static int btf_array_resolve(struct btf_verifier_env
*env
,
1920 const struct resolve_vertex
*v
)
1922 const struct btf_array
*array
= btf_type_array(v
->t
);
1923 const struct btf_type
*elem_type
, *index_type
;
1924 u32 elem_type_id
, index_type_id
;
1925 struct btf
*btf
= env
->btf
;
1928 /* Check array->index_type */
1929 index_type_id
= array
->index_type
;
1930 index_type
= btf_type_by_id(btf
, index_type_id
);
1931 if (btf_type_is_resolve_source_only(index_type
) ||
1932 btf_type_nosize_or_null(index_type
)) {
1933 btf_verifier_log_type(env
, v
->t
, "Invalid index");
1937 if (!env_type_is_resolve_sink(env
, index_type
) &&
1938 !env_type_is_resolved(env
, index_type_id
))
1939 return env_stack_push(env
, index_type
, index_type_id
);
1941 index_type
= btf_type_id_size(btf
, &index_type_id
, NULL
);
1942 if (!index_type
|| !btf_type_is_int(index_type
) ||
1943 !btf_type_int_is_regular(index_type
)) {
1944 btf_verifier_log_type(env
, v
->t
, "Invalid index");
1948 /* Check array->type */
1949 elem_type_id
= array
->type
;
1950 elem_type
= btf_type_by_id(btf
, elem_type_id
);
1951 if (btf_type_is_resolve_source_only(elem_type
) ||
1952 btf_type_nosize_or_null(elem_type
)) {
1953 btf_verifier_log_type(env
, v
->t
,
1958 if (!env_type_is_resolve_sink(env
, elem_type
) &&
1959 !env_type_is_resolved(env
, elem_type_id
))
1960 return env_stack_push(env
, elem_type
, elem_type_id
);
1962 elem_type
= btf_type_id_size(btf
, &elem_type_id
, &elem_size
);
1964 btf_verifier_log_type(env
, v
->t
, "Invalid elem");
1968 if (btf_type_is_int(elem_type
) && !btf_type_int_is_regular(elem_type
)) {
1969 btf_verifier_log_type(env
, v
->t
, "Invalid array of int");
1973 if (array
->nelems
&& elem_size
> U32_MAX
/ array
->nelems
) {
1974 btf_verifier_log_type(env
, v
->t
,
1975 "Array size overflows U32_MAX");
1979 env_stack_pop_resolved(env
, elem_type_id
, elem_size
* array
->nelems
);
1984 static void btf_array_log(struct btf_verifier_env
*env
,
1985 const struct btf_type
*t
)
1987 const struct btf_array
*array
= btf_type_array(t
);
1989 btf_verifier_log(env
, "type_id=%u index_type_id=%u nr_elems=%u",
1990 array
->type
, array
->index_type
, array
->nelems
);
1993 static void btf_array_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
1994 u32 type_id
, void *data
, u8 bits_offset
,
1997 const struct btf_array
*array
= btf_type_array(t
);
1998 const struct btf_kind_operations
*elem_ops
;
1999 const struct btf_type
*elem_type
;
2000 u32 i
, elem_size
, elem_type_id
;
2002 elem_type_id
= array
->type
;
2003 elem_type
= btf_type_id_size(btf
, &elem_type_id
, &elem_size
);
2004 elem_ops
= btf_type_ops(elem_type
);
2006 for (i
= 0; i
< array
->nelems
; i
++) {
2010 elem_ops
->seq_show(btf
, elem_type
, elem_type_id
, data
,
2017 static struct btf_kind_operations array_ops
= {
2018 .check_meta
= btf_array_check_meta
,
2019 .resolve
= btf_array_resolve
,
2020 .check_member
= btf_array_check_member
,
2021 .check_kflag_member
= btf_generic_check_kflag_member
,
2022 .log_details
= btf_array_log
,
2023 .seq_show
= btf_array_seq_show
,
2026 static int btf_struct_check_member(struct btf_verifier_env
*env
,
2027 const struct btf_type
*struct_type
,
2028 const struct btf_member
*member
,
2029 const struct btf_type
*member_type
)
2031 u32 struct_bits_off
= member
->offset
;
2032 u32 struct_size
, bytes_offset
;
2034 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
2035 btf_verifier_log_member(env
, struct_type
, member
,
2036 "Member is not byte aligned");
2040 struct_size
= struct_type
->size
;
2041 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
2042 if (struct_size
- bytes_offset
< member_type
->size
) {
2043 btf_verifier_log_member(env
, struct_type
, member
,
2044 "Member exceeds struct_size");
2051 static s32
btf_struct_check_meta(struct btf_verifier_env
*env
,
2052 const struct btf_type
*t
,
2055 bool is_union
= BTF_INFO_KIND(t
->info
) == BTF_KIND_UNION
;
2056 const struct btf_member
*member
;
2057 u32 meta_needed
, last_offset
;
2058 struct btf
*btf
= env
->btf
;
2059 u32 struct_size
= t
->size
;
2063 meta_needed
= btf_type_vlen(t
) * sizeof(*member
);
2064 if (meta_left
< meta_needed
) {
2065 btf_verifier_log_basic(env
, t
,
2066 "meta_left:%u meta_needed:%u",
2067 meta_left
, meta_needed
);
2071 /* struct type either no name or a valid one */
2073 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
2074 btf_verifier_log_type(env
, t
, "Invalid name");
2078 btf_verifier_log_type(env
, t
, NULL
);
2081 for_each_member(i
, t
, member
) {
2082 if (!btf_name_offset_valid(btf
, member
->name_off
)) {
2083 btf_verifier_log_member(env
, t
, member
,
2084 "Invalid member name_offset:%u",
2089 /* struct member either no name or a valid one */
2090 if (member
->name_off
&&
2091 !btf_name_valid_identifier(btf
, member
->name_off
)) {
2092 btf_verifier_log_member(env
, t
, member
, "Invalid name");
2095 /* A member cannot be in type void */
2096 if (!member
->type
|| !BTF_TYPE_ID_VALID(member
->type
)) {
2097 btf_verifier_log_member(env
, t
, member
,
2102 offset
= btf_member_bit_offset(t
, member
);
2103 if (is_union
&& offset
) {
2104 btf_verifier_log_member(env
, t
, member
,
2105 "Invalid member bits_offset");
2110 * ">" instead of ">=" because the last member could be
2113 if (last_offset
> offset
) {
2114 btf_verifier_log_member(env
, t
, member
,
2115 "Invalid member bits_offset");
2119 if (BITS_ROUNDUP_BYTES(offset
) > struct_size
) {
2120 btf_verifier_log_member(env
, t
, member
,
2121 "Member bits_offset exceeds its struct size");
2125 btf_verifier_log_member(env
, t
, member
, NULL
);
2126 last_offset
= offset
;
2132 static int btf_struct_resolve(struct btf_verifier_env
*env
,
2133 const struct resolve_vertex
*v
)
2135 const struct btf_member
*member
;
2139 /* Before continue resolving the next_member,
2140 * ensure the last member is indeed resolved to a
2141 * type with size info.
2143 if (v
->next_member
) {
2144 const struct btf_type
*last_member_type
;
2145 const struct btf_member
*last_member
;
2146 u16 last_member_type_id
;
2148 last_member
= btf_type_member(v
->t
) + v
->next_member
- 1;
2149 last_member_type_id
= last_member
->type
;
2150 if (WARN_ON_ONCE(!env_type_is_resolved(env
,
2151 last_member_type_id
)))
2154 last_member_type
= btf_type_by_id(env
->btf
,
2155 last_member_type_id
);
2156 if (btf_type_kflag(v
->t
))
2157 err
= btf_type_ops(last_member_type
)->check_kflag_member(env
, v
->t
,
2161 err
= btf_type_ops(last_member_type
)->check_member(env
, v
->t
,
2168 for_each_member_from(i
, v
->next_member
, v
->t
, member
) {
2169 u32 member_type_id
= member
->type
;
2170 const struct btf_type
*member_type
= btf_type_by_id(env
->btf
,
2173 if (btf_type_is_resolve_source_only(member_type
) ||
2174 btf_type_nosize_or_null(member_type
)) {
2175 btf_verifier_log_member(env
, v
->t
, member
,
2180 if (!env_type_is_resolve_sink(env
, member_type
) &&
2181 !env_type_is_resolved(env
, member_type_id
)) {
2182 env_stack_set_next_member(env
, i
+ 1);
2183 return env_stack_push(env
, member_type
, member_type_id
);
2186 if (btf_type_kflag(v
->t
))
2187 err
= btf_type_ops(member_type
)->check_kflag_member(env
, v
->t
,
2191 err
= btf_type_ops(member_type
)->check_member(env
, v
->t
,
2198 env_stack_pop_resolved(env
, 0, 0);
2203 static void btf_struct_log(struct btf_verifier_env
*env
,
2204 const struct btf_type
*t
)
2206 btf_verifier_log(env
, "size=%u vlen=%u", t
->size
, btf_type_vlen(t
));
2209 /* find 'struct bpf_spin_lock' in map value.
2210 * return >= 0 offset if found
2211 * and < 0 in case of error
2213 int btf_find_spin_lock(const struct btf
*btf
, const struct btf_type
*t
)
2215 const struct btf_member
*member
;
2216 u32 i
, off
= -ENOENT
;
2218 if (!__btf_type_is_struct(t
))
2221 for_each_member(i
, t
, member
) {
2222 const struct btf_type
*member_type
= btf_type_by_id(btf
,
2224 if (!__btf_type_is_struct(member_type
))
2226 if (member_type
->size
!= sizeof(struct bpf_spin_lock
))
2228 if (strcmp(__btf_name_by_offset(btf
, member_type
->name_off
),
2232 /* only one 'struct bpf_spin_lock' is allowed */
2234 off
= btf_member_bit_offset(t
, member
);
2236 /* valid C code cannot generate such BTF */
2239 if (off
% __alignof__(struct bpf_spin_lock
))
2240 /* valid struct bpf_spin_lock will be 4 byte aligned */
2246 static void btf_struct_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
2247 u32 type_id
, void *data
, u8 bits_offset
,
2250 const char *seq
= BTF_INFO_KIND(t
->info
) == BTF_KIND_UNION
? "|" : ",";
2251 const struct btf_member
*member
;
2255 for_each_member(i
, t
, member
) {
2256 const struct btf_type
*member_type
= btf_type_by_id(btf
,
2258 const struct btf_kind_operations
*ops
;
2259 u32 member_offset
, bitfield_size
;
2266 member_offset
= btf_member_bit_offset(t
, member
);
2267 bitfield_size
= btf_member_bitfield_size(t
, member
);
2268 bytes_offset
= BITS_ROUNDDOWN_BYTES(member_offset
);
2269 bits8_offset
= BITS_PER_BYTE_MASKED(member_offset
);
2270 if (bitfield_size
) {
2271 btf_bitfield_seq_show(data
+ bytes_offset
, bits8_offset
,
2274 ops
= btf_type_ops(member_type
);
2275 ops
->seq_show(btf
, member_type
, member
->type
,
2276 data
+ bytes_offset
, bits8_offset
, m
);
2282 static struct btf_kind_operations struct_ops
= {
2283 .check_meta
= btf_struct_check_meta
,
2284 .resolve
= btf_struct_resolve
,
2285 .check_member
= btf_struct_check_member
,
2286 .check_kflag_member
= btf_generic_check_kflag_member
,
2287 .log_details
= btf_struct_log
,
2288 .seq_show
= btf_struct_seq_show
,
2291 static int btf_enum_check_member(struct btf_verifier_env
*env
,
2292 const struct btf_type
*struct_type
,
2293 const struct btf_member
*member
,
2294 const struct btf_type
*member_type
)
2296 u32 struct_bits_off
= member
->offset
;
2297 u32 struct_size
, bytes_offset
;
2299 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
2300 btf_verifier_log_member(env
, struct_type
, member
,
2301 "Member is not byte aligned");
2305 struct_size
= struct_type
->size
;
2306 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
2307 if (struct_size
- bytes_offset
< sizeof(int)) {
2308 btf_verifier_log_member(env
, struct_type
, member
,
2309 "Member exceeds struct_size");
2316 static int btf_enum_check_kflag_member(struct btf_verifier_env
*env
,
2317 const struct btf_type
*struct_type
,
2318 const struct btf_member
*member
,
2319 const struct btf_type
*member_type
)
2321 u32 struct_bits_off
, nr_bits
, bytes_end
, struct_size
;
2322 u32 int_bitsize
= sizeof(int) * BITS_PER_BYTE
;
2324 struct_bits_off
= BTF_MEMBER_BIT_OFFSET(member
->offset
);
2325 nr_bits
= BTF_MEMBER_BITFIELD_SIZE(member
->offset
);
2327 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
2328 btf_verifier_log_member(env
, struct_type
, member
,
2329 "Member is not byte aligned");
2333 nr_bits
= int_bitsize
;
2334 } else if (nr_bits
> int_bitsize
) {
2335 btf_verifier_log_member(env
, struct_type
, member
,
2336 "Invalid member bitfield_size");
2340 struct_size
= struct_type
->size
;
2341 bytes_end
= BITS_ROUNDUP_BYTES(struct_bits_off
+ nr_bits
);
2342 if (struct_size
< bytes_end
) {
2343 btf_verifier_log_member(env
, struct_type
, member
,
2344 "Member exceeds struct_size");
2351 static s32
btf_enum_check_meta(struct btf_verifier_env
*env
,
2352 const struct btf_type
*t
,
2355 const struct btf_enum
*enums
= btf_type_enum(t
);
2356 struct btf
*btf
= env
->btf
;
2360 nr_enums
= btf_type_vlen(t
);
2361 meta_needed
= nr_enums
* sizeof(*enums
);
2363 if (meta_left
< meta_needed
) {
2364 btf_verifier_log_basic(env
, t
,
2365 "meta_left:%u meta_needed:%u",
2366 meta_left
, meta_needed
);
2370 if (btf_type_kflag(t
)) {
2371 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
2375 if (t
->size
!= sizeof(int)) {
2376 btf_verifier_log_type(env
, t
, "Expected size:%zu",
2381 /* enum type either no name or a valid one */
2383 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
2384 btf_verifier_log_type(env
, t
, "Invalid name");
2388 btf_verifier_log_type(env
, t
, NULL
);
2390 for (i
= 0; i
< nr_enums
; i
++) {
2391 if (!btf_name_offset_valid(btf
, enums
[i
].name_off
)) {
2392 btf_verifier_log(env
, "\tInvalid name_offset:%u",
2397 /* enum member must have a valid name */
2398 if (!enums
[i
].name_off
||
2399 !btf_name_valid_identifier(btf
, enums
[i
].name_off
)) {
2400 btf_verifier_log_type(env
, t
, "Invalid name");
2405 btf_verifier_log(env
, "\t%s val=%d\n",
2406 __btf_name_by_offset(btf
, enums
[i
].name_off
),
2413 static void btf_enum_log(struct btf_verifier_env
*env
,
2414 const struct btf_type
*t
)
2416 btf_verifier_log(env
, "size=%u vlen=%u", t
->size
, btf_type_vlen(t
));
2419 static void btf_enum_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
2420 u32 type_id
, void *data
, u8 bits_offset
,
2423 const struct btf_enum
*enums
= btf_type_enum(t
);
2424 u32 i
, nr_enums
= btf_type_vlen(t
);
2425 int v
= *(int *)data
;
2427 for (i
= 0; i
< nr_enums
; i
++) {
2428 if (v
== enums
[i
].val
) {
2430 __btf_name_by_offset(btf
,
2431 enums
[i
].name_off
));
2436 seq_printf(m
, "%d", v
);
2439 static struct btf_kind_operations enum_ops
= {
2440 .check_meta
= btf_enum_check_meta
,
2441 .resolve
= btf_df_resolve
,
2442 .check_member
= btf_enum_check_member
,
2443 .check_kflag_member
= btf_enum_check_kflag_member
,
2444 .log_details
= btf_enum_log
,
2445 .seq_show
= btf_enum_seq_show
,
2448 static s32
btf_func_proto_check_meta(struct btf_verifier_env
*env
,
2449 const struct btf_type
*t
,
2452 u32 meta_needed
= btf_type_vlen(t
) * sizeof(struct btf_param
);
2454 if (meta_left
< meta_needed
) {
2455 btf_verifier_log_basic(env
, t
,
2456 "meta_left:%u meta_needed:%u",
2457 meta_left
, meta_needed
);
2462 btf_verifier_log_type(env
, t
, "Invalid name");
2466 if (btf_type_kflag(t
)) {
2467 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
2471 btf_verifier_log_type(env
, t
, NULL
);
2476 static void btf_func_proto_log(struct btf_verifier_env
*env
,
2477 const struct btf_type
*t
)
2479 const struct btf_param
*args
= (const struct btf_param
*)(t
+ 1);
2480 u16 nr_args
= btf_type_vlen(t
), i
;
2482 btf_verifier_log(env
, "return=%u args=(", t
->type
);
2484 btf_verifier_log(env
, "void");
2488 if (nr_args
== 1 && !args
[0].type
) {
2489 /* Only one vararg */
2490 btf_verifier_log(env
, "vararg");
2494 btf_verifier_log(env
, "%u %s", args
[0].type
,
2495 __btf_name_by_offset(env
->btf
,
2497 for (i
= 1; i
< nr_args
- 1; i
++)
2498 btf_verifier_log(env
, ", %u %s", args
[i
].type
,
2499 __btf_name_by_offset(env
->btf
,
2503 const struct btf_param
*last_arg
= &args
[nr_args
- 1];
2506 btf_verifier_log(env
, ", %u %s", last_arg
->type
,
2507 __btf_name_by_offset(env
->btf
,
2508 last_arg
->name_off
));
2510 btf_verifier_log(env
, ", vararg");
2514 btf_verifier_log(env
, ")");
2517 static struct btf_kind_operations func_proto_ops
= {
2518 .check_meta
= btf_func_proto_check_meta
,
2519 .resolve
= btf_df_resolve
,
2521 * BTF_KIND_FUNC_PROTO cannot be directly referred by
2522 * a struct's member.
2524 * It should be a funciton pointer instead.
2525 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
2527 * Hence, there is no btf_func_check_member().
2529 .check_member
= btf_df_check_member
,
2530 .check_kflag_member
= btf_df_check_kflag_member
,
2531 .log_details
= btf_func_proto_log
,
2532 .seq_show
= btf_df_seq_show
,
2535 static s32
btf_func_check_meta(struct btf_verifier_env
*env
,
2536 const struct btf_type
*t
,
2540 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
2541 btf_verifier_log_type(env
, t
, "Invalid name");
2545 if (btf_type_vlen(t
)) {
2546 btf_verifier_log_type(env
, t
, "vlen != 0");
2550 if (btf_type_kflag(t
)) {
2551 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
2555 btf_verifier_log_type(env
, t
, NULL
);
2560 static struct btf_kind_operations func_ops
= {
2561 .check_meta
= btf_func_check_meta
,
2562 .resolve
= btf_df_resolve
,
2563 .check_member
= btf_df_check_member
,
2564 .check_kflag_member
= btf_df_check_kflag_member
,
2565 .log_details
= btf_ref_type_log
,
2566 .seq_show
= btf_df_seq_show
,
2569 static s32
btf_var_check_meta(struct btf_verifier_env
*env
,
2570 const struct btf_type
*t
,
2573 const struct btf_var
*var
;
2574 u32 meta_needed
= sizeof(*var
);
2576 if (meta_left
< meta_needed
) {
2577 btf_verifier_log_basic(env
, t
,
2578 "meta_left:%u meta_needed:%u",
2579 meta_left
, meta_needed
);
2583 if (btf_type_vlen(t
)) {
2584 btf_verifier_log_type(env
, t
, "vlen != 0");
2588 if (btf_type_kflag(t
)) {
2589 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
2594 !__btf_name_valid(env
->btf
, t
->name_off
, true)) {
2595 btf_verifier_log_type(env
, t
, "Invalid name");
2599 /* A var cannot be in type void */
2600 if (!t
->type
|| !BTF_TYPE_ID_VALID(t
->type
)) {
2601 btf_verifier_log_type(env
, t
, "Invalid type_id");
2605 var
= btf_type_var(t
);
2606 if (var
->linkage
!= BTF_VAR_STATIC
&&
2607 var
->linkage
!= BTF_VAR_GLOBAL_ALLOCATED
) {
2608 btf_verifier_log_type(env
, t
, "Linkage not supported");
2612 btf_verifier_log_type(env
, t
, NULL
);
2617 static void btf_var_log(struct btf_verifier_env
*env
, const struct btf_type
*t
)
2619 const struct btf_var
*var
= btf_type_var(t
);
2621 btf_verifier_log(env
, "type_id=%u linkage=%u", t
->type
, var
->linkage
);
2624 static const struct btf_kind_operations var_ops
= {
2625 .check_meta
= btf_var_check_meta
,
2626 .resolve
= btf_var_resolve
,
2627 .check_member
= btf_df_check_member
,
2628 .check_kflag_member
= btf_df_check_kflag_member
,
2629 .log_details
= btf_var_log
,
2630 .seq_show
= btf_var_seq_show
,
2633 static s32
btf_datasec_check_meta(struct btf_verifier_env
*env
,
2634 const struct btf_type
*t
,
2637 const struct btf_var_secinfo
*vsi
;
2638 u64 last_vsi_end_off
= 0, sum
= 0;
2641 meta_needed
= btf_type_vlen(t
) * sizeof(*vsi
);
2642 if (meta_left
< meta_needed
) {
2643 btf_verifier_log_basic(env
, t
,
2644 "meta_left:%u meta_needed:%u",
2645 meta_left
, meta_needed
);
2649 if (!btf_type_vlen(t
)) {
2650 btf_verifier_log_type(env
, t
, "vlen == 0");
2655 btf_verifier_log_type(env
, t
, "size == 0");
2659 if (btf_type_kflag(t
)) {
2660 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
2665 !btf_name_valid_section(env
->btf
, t
->name_off
)) {
2666 btf_verifier_log_type(env
, t
, "Invalid name");
2670 btf_verifier_log_type(env
, t
, NULL
);
2672 for_each_vsi(i
, t
, vsi
) {
2673 /* A var cannot be in type void */
2674 if (!vsi
->type
|| !BTF_TYPE_ID_VALID(vsi
->type
)) {
2675 btf_verifier_log_vsi(env
, t
, vsi
,
2680 if (vsi
->offset
< last_vsi_end_off
|| vsi
->offset
>= t
->size
) {
2681 btf_verifier_log_vsi(env
, t
, vsi
,
2686 if (!vsi
->size
|| vsi
->size
> t
->size
) {
2687 btf_verifier_log_vsi(env
, t
, vsi
,
2692 last_vsi_end_off
= vsi
->offset
+ vsi
->size
;
2693 if (last_vsi_end_off
> t
->size
) {
2694 btf_verifier_log_vsi(env
, t
, vsi
,
2695 "Invalid offset+size");
2699 btf_verifier_log_vsi(env
, t
, vsi
, NULL
);
2703 if (t
->size
< sum
) {
2704 btf_verifier_log_type(env
, t
, "Invalid btf_info size");
2711 static int btf_datasec_resolve(struct btf_verifier_env
*env
,
2712 const struct resolve_vertex
*v
)
2714 const struct btf_var_secinfo
*vsi
;
2715 struct btf
*btf
= env
->btf
;
2718 for_each_vsi_from(i
, v
->next_member
, v
->t
, vsi
) {
2719 u32 var_type_id
= vsi
->type
, type_id
, type_size
= 0;
2720 const struct btf_type
*var_type
= btf_type_by_id(env
->btf
,
2722 if (!var_type
|| !btf_type_is_var(var_type
)) {
2723 btf_verifier_log_vsi(env
, v
->t
, vsi
,
2724 "Not a VAR kind member");
2728 if (!env_type_is_resolve_sink(env
, var_type
) &&
2729 !env_type_is_resolved(env
, var_type_id
)) {
2730 env_stack_set_next_member(env
, i
+ 1);
2731 return env_stack_push(env
, var_type
, var_type_id
);
2734 type_id
= var_type
->type
;
2735 if (!btf_type_id_size(btf
, &type_id
, &type_size
)) {
2736 btf_verifier_log_vsi(env
, v
->t
, vsi
, "Invalid type");
2740 if (vsi
->size
< type_size
) {
2741 btf_verifier_log_vsi(env
, v
->t
, vsi
, "Invalid size");
2746 env_stack_pop_resolved(env
, 0, 0);
2750 static void btf_datasec_log(struct btf_verifier_env
*env
,
2751 const struct btf_type
*t
)
2753 btf_verifier_log(env
, "size=%u vlen=%u", t
->size
, btf_type_vlen(t
));
2756 static void btf_datasec_seq_show(const struct btf
*btf
,
2757 const struct btf_type
*t
, u32 type_id
,
2758 void *data
, u8 bits_offset
,
2761 const struct btf_var_secinfo
*vsi
;
2762 const struct btf_type
*var
;
2765 seq_printf(m
, "section (\"%s\") = {", __btf_name_by_offset(btf
, t
->name_off
));
2766 for_each_vsi(i
, t
, vsi
) {
2767 var
= btf_type_by_id(btf
, vsi
->type
);
2770 btf_type_ops(var
)->seq_show(btf
, var
, vsi
->type
,
2771 data
+ vsi
->offset
, bits_offset
, m
);
2776 static const struct btf_kind_operations datasec_ops
= {
2777 .check_meta
= btf_datasec_check_meta
,
2778 .resolve
= btf_datasec_resolve
,
2779 .check_member
= btf_df_check_member
,
2780 .check_kflag_member
= btf_df_check_kflag_member
,
2781 .log_details
= btf_datasec_log
,
2782 .seq_show
= btf_datasec_seq_show
,
2785 static int btf_func_proto_check(struct btf_verifier_env
*env
,
2786 const struct btf_type
*t
)
2788 const struct btf_type
*ret_type
;
2789 const struct btf_param
*args
;
2790 const struct btf
*btf
;
2795 args
= (const struct btf_param
*)(t
+ 1);
2796 nr_args
= btf_type_vlen(t
);
2798 /* Check func return type which could be "void" (t->type == 0) */
2800 u32 ret_type_id
= t
->type
;
2802 ret_type
= btf_type_by_id(btf
, ret_type_id
);
2804 btf_verifier_log_type(env
, t
, "Invalid return type");
2808 if (btf_type_needs_resolve(ret_type
) &&
2809 !env_type_is_resolved(env
, ret_type_id
)) {
2810 err
= btf_resolve(env
, ret_type
, ret_type_id
);
2815 /* Ensure the return type is a type that has a size */
2816 if (!btf_type_id_size(btf
, &ret_type_id
, NULL
)) {
2817 btf_verifier_log_type(env
, t
, "Invalid return type");
2825 /* Last func arg type_id could be 0 if it is a vararg */
2826 if (!args
[nr_args
- 1].type
) {
2827 if (args
[nr_args
- 1].name_off
) {
2828 btf_verifier_log_type(env
, t
, "Invalid arg#%u",
2836 for (i
= 0; i
< nr_args
; i
++) {
2837 const struct btf_type
*arg_type
;
2840 arg_type_id
= args
[i
].type
;
2841 arg_type
= btf_type_by_id(btf
, arg_type_id
);
2843 btf_verifier_log_type(env
, t
, "Invalid arg#%u", i
+ 1);
2848 if (args
[i
].name_off
&&
2849 (!btf_name_offset_valid(btf
, args
[i
].name_off
) ||
2850 !btf_name_valid_identifier(btf
, args
[i
].name_off
))) {
2851 btf_verifier_log_type(env
, t
,
2852 "Invalid arg#%u", i
+ 1);
2857 if (btf_type_needs_resolve(arg_type
) &&
2858 !env_type_is_resolved(env
, arg_type_id
)) {
2859 err
= btf_resolve(env
, arg_type
, arg_type_id
);
2864 if (!btf_type_id_size(btf
, &arg_type_id
, NULL
)) {
2865 btf_verifier_log_type(env
, t
, "Invalid arg#%u", i
+ 1);
2874 static int btf_func_check(struct btf_verifier_env
*env
,
2875 const struct btf_type
*t
)
2877 const struct btf_type
*proto_type
;
2878 const struct btf_param
*args
;
2879 const struct btf
*btf
;
2883 proto_type
= btf_type_by_id(btf
, t
->type
);
2885 if (!proto_type
|| !btf_type_is_func_proto(proto_type
)) {
2886 btf_verifier_log_type(env
, t
, "Invalid type_id");
2890 args
= (const struct btf_param
*)(proto_type
+ 1);
2891 nr_args
= btf_type_vlen(proto_type
);
2892 for (i
= 0; i
< nr_args
; i
++) {
2893 if (!args
[i
].name_off
&& args
[i
].type
) {
2894 btf_verifier_log_type(env
, t
, "Invalid arg#%u", i
+ 1);
2902 static const struct btf_kind_operations
* const kind_ops
[NR_BTF_KINDS
] = {
2903 [BTF_KIND_INT
] = &int_ops
,
2904 [BTF_KIND_PTR
] = &ptr_ops
,
2905 [BTF_KIND_ARRAY
] = &array_ops
,
2906 [BTF_KIND_STRUCT
] = &struct_ops
,
2907 [BTF_KIND_UNION
] = &struct_ops
,
2908 [BTF_KIND_ENUM
] = &enum_ops
,
2909 [BTF_KIND_FWD
] = &fwd_ops
,
2910 [BTF_KIND_TYPEDEF
] = &modifier_ops
,
2911 [BTF_KIND_VOLATILE
] = &modifier_ops
,
2912 [BTF_KIND_CONST
] = &modifier_ops
,
2913 [BTF_KIND_RESTRICT
] = &modifier_ops
,
2914 [BTF_KIND_FUNC
] = &func_ops
,
2915 [BTF_KIND_FUNC_PROTO
] = &func_proto_ops
,
2916 [BTF_KIND_VAR
] = &var_ops
,
2917 [BTF_KIND_DATASEC
] = &datasec_ops
,
2920 static s32
btf_check_meta(struct btf_verifier_env
*env
,
2921 const struct btf_type
*t
,
2924 u32 saved_meta_left
= meta_left
;
2927 if (meta_left
< sizeof(*t
)) {
2928 btf_verifier_log(env
, "[%u] meta_left:%u meta_needed:%zu",
2929 env
->log_type_id
, meta_left
, sizeof(*t
));
2932 meta_left
-= sizeof(*t
);
2934 if (t
->info
& ~BTF_INFO_MASK
) {
2935 btf_verifier_log(env
, "[%u] Invalid btf_info:%x",
2936 env
->log_type_id
, t
->info
);
2940 if (BTF_INFO_KIND(t
->info
) > BTF_KIND_MAX
||
2941 BTF_INFO_KIND(t
->info
) == BTF_KIND_UNKN
) {
2942 btf_verifier_log(env
, "[%u] Invalid kind:%u",
2943 env
->log_type_id
, BTF_INFO_KIND(t
->info
));
2947 if (!btf_name_offset_valid(env
->btf
, t
->name_off
)) {
2948 btf_verifier_log(env
, "[%u] Invalid name_offset:%u",
2949 env
->log_type_id
, t
->name_off
);
2953 var_meta_size
= btf_type_ops(t
)->check_meta(env
, t
, meta_left
);
2954 if (var_meta_size
< 0)
2955 return var_meta_size
;
2957 meta_left
-= var_meta_size
;
2959 return saved_meta_left
- meta_left
;
2962 static int btf_check_all_metas(struct btf_verifier_env
*env
)
2964 struct btf
*btf
= env
->btf
;
2965 struct btf_header
*hdr
;
2969 cur
= btf
->nohdr_data
+ hdr
->type_off
;
2970 end
= cur
+ hdr
->type_len
;
2972 env
->log_type_id
= 1;
2974 struct btf_type
*t
= cur
;
2977 meta_size
= btf_check_meta(env
, t
, end
- cur
);
2981 btf_add_type(env
, t
);
2989 static bool btf_resolve_valid(struct btf_verifier_env
*env
,
2990 const struct btf_type
*t
,
2993 struct btf
*btf
= env
->btf
;
2995 if (!env_type_is_resolved(env
, type_id
))
2998 if (btf_type_is_struct(t
) || btf_type_is_datasec(t
))
2999 return !btf
->resolved_ids
[type_id
] &&
3000 !btf
->resolved_sizes
[type_id
];
3002 if (btf_type_is_modifier(t
) || btf_type_is_ptr(t
) ||
3003 btf_type_is_var(t
)) {
3004 t
= btf_type_id_resolve(btf
, &type_id
);
3006 !btf_type_is_modifier(t
) &&
3007 !btf_type_is_var(t
) &&
3008 !btf_type_is_datasec(t
);
3011 if (btf_type_is_array(t
)) {
3012 const struct btf_array
*array
= btf_type_array(t
);
3013 const struct btf_type
*elem_type
;
3014 u32 elem_type_id
= array
->type
;
3017 elem_type
= btf_type_id_size(btf
, &elem_type_id
, &elem_size
);
3018 return elem_type
&& !btf_type_is_modifier(elem_type
) &&
3019 (array
->nelems
* elem_size
==
3020 btf
->resolved_sizes
[type_id
]);
3026 static int btf_resolve(struct btf_verifier_env
*env
,
3027 const struct btf_type
*t
, u32 type_id
)
3029 u32 save_log_type_id
= env
->log_type_id
;
3030 const struct resolve_vertex
*v
;
3033 env
->resolve_mode
= RESOLVE_TBD
;
3034 env_stack_push(env
, t
, type_id
);
3035 while (!err
&& (v
= env_stack_peak(env
))) {
3036 env
->log_type_id
= v
->type_id
;
3037 err
= btf_type_ops(v
->t
)->resolve(env
, v
);
3040 env
->log_type_id
= type_id
;
3041 if (err
== -E2BIG
) {
3042 btf_verifier_log_type(env
, t
,
3043 "Exceeded max resolving depth:%u",
3045 } else if (err
== -EEXIST
) {
3046 btf_verifier_log_type(env
, t
, "Loop detected");
3049 /* Final sanity check */
3050 if (!err
&& !btf_resolve_valid(env
, t
, type_id
)) {
3051 btf_verifier_log_type(env
, t
, "Invalid resolve state");
3055 env
->log_type_id
= save_log_type_id
;
3059 static int btf_check_all_types(struct btf_verifier_env
*env
)
3061 struct btf
*btf
= env
->btf
;
3065 err
= env_resolve_init(env
);
3070 for (type_id
= 1; type_id
<= btf
->nr_types
; type_id
++) {
3071 const struct btf_type
*t
= btf_type_by_id(btf
, type_id
);
3073 env
->log_type_id
= type_id
;
3074 if (btf_type_needs_resolve(t
) &&
3075 !env_type_is_resolved(env
, type_id
)) {
3076 err
= btf_resolve(env
, t
, type_id
);
3081 if (btf_type_is_func_proto(t
)) {
3082 err
= btf_func_proto_check(env
, t
);
3087 if (btf_type_is_func(t
)) {
3088 err
= btf_func_check(env
, t
);
3097 static int btf_parse_type_sec(struct btf_verifier_env
*env
)
3099 const struct btf_header
*hdr
= &env
->btf
->hdr
;
3102 /* Type section must align to 4 bytes */
3103 if (hdr
->type_off
& (sizeof(u32
) - 1)) {
3104 btf_verifier_log(env
, "Unaligned type_off");
3108 if (!hdr
->type_len
) {
3109 btf_verifier_log(env
, "No type found");
3113 err
= btf_check_all_metas(env
);
3117 return btf_check_all_types(env
);
3120 static int btf_parse_str_sec(struct btf_verifier_env
*env
)
3122 const struct btf_header
*hdr
;
3123 struct btf
*btf
= env
->btf
;
3124 const char *start
, *end
;
3127 start
= btf
->nohdr_data
+ hdr
->str_off
;
3128 end
= start
+ hdr
->str_len
;
3130 if (end
!= btf
->data
+ btf
->data_size
) {
3131 btf_verifier_log(env
, "String section is not at the end");
3135 if (!hdr
->str_len
|| hdr
->str_len
- 1 > BTF_MAX_NAME_OFFSET
||
3136 start
[0] || end
[-1]) {
3137 btf_verifier_log(env
, "Invalid string section");
3141 btf
->strings
= start
;
3146 static const size_t btf_sec_info_offset
[] = {
3147 offsetof(struct btf_header
, type_off
),
3148 offsetof(struct btf_header
, str_off
),
3151 static int btf_sec_info_cmp(const void *a
, const void *b
)
3153 const struct btf_sec_info
*x
= a
;
3154 const struct btf_sec_info
*y
= b
;
3156 return (int)(x
->off
- y
->off
) ? : (int)(x
->len
- y
->len
);
3159 static int btf_check_sec_info(struct btf_verifier_env
*env
,
3162 struct btf_sec_info secs
[ARRAY_SIZE(btf_sec_info_offset
)];
3163 u32 total
, expected_total
, i
;
3164 const struct btf_header
*hdr
;
3165 const struct btf
*btf
;
3170 /* Populate the secs from hdr */
3171 for (i
= 0; i
< ARRAY_SIZE(btf_sec_info_offset
); i
++)
3172 secs
[i
] = *(struct btf_sec_info
*)((void *)hdr
+
3173 btf_sec_info_offset
[i
]);
3175 sort(secs
, ARRAY_SIZE(btf_sec_info_offset
),
3176 sizeof(struct btf_sec_info
), btf_sec_info_cmp
, NULL
);
3178 /* Check for gaps and overlap among sections */
3180 expected_total
= btf_data_size
- hdr
->hdr_len
;
3181 for (i
= 0; i
< ARRAY_SIZE(btf_sec_info_offset
); i
++) {
3182 if (expected_total
< secs
[i
].off
) {
3183 btf_verifier_log(env
, "Invalid section offset");
3186 if (total
< secs
[i
].off
) {
3188 btf_verifier_log(env
, "Unsupported section found");
3191 if (total
> secs
[i
].off
) {
3192 btf_verifier_log(env
, "Section overlap found");
3195 if (expected_total
- total
< secs
[i
].len
) {
3196 btf_verifier_log(env
,
3197 "Total section length too long");
3200 total
+= secs
[i
].len
;
3203 /* There is data other than hdr and known sections */
3204 if (expected_total
!= total
) {
3205 btf_verifier_log(env
, "Unsupported section found");
3212 static int btf_parse_hdr(struct btf_verifier_env
*env
)
3214 u32 hdr_len
, hdr_copy
, btf_data_size
;
3215 const struct btf_header
*hdr
;
3220 btf_data_size
= btf
->data_size
;
3223 offsetof(struct btf_header
, hdr_len
) + sizeof(hdr
->hdr_len
)) {
3224 btf_verifier_log(env
, "hdr_len not found");
3229 hdr_len
= hdr
->hdr_len
;
3230 if (btf_data_size
< hdr_len
) {
3231 btf_verifier_log(env
, "btf_header not found");
3235 /* Ensure the unsupported header fields are zero */
3236 if (hdr_len
> sizeof(btf
->hdr
)) {
3237 u8
*expected_zero
= btf
->data
+ sizeof(btf
->hdr
);
3238 u8
*end
= btf
->data
+ hdr_len
;
3240 for (; expected_zero
< end
; expected_zero
++) {
3241 if (*expected_zero
) {
3242 btf_verifier_log(env
, "Unsupported btf_header");
3248 hdr_copy
= min_t(u32
, hdr_len
, sizeof(btf
->hdr
));
3249 memcpy(&btf
->hdr
, btf
->data
, hdr_copy
);
3253 btf_verifier_log_hdr(env
, btf_data_size
);
3255 if (hdr
->magic
!= BTF_MAGIC
) {
3256 btf_verifier_log(env
, "Invalid magic");
3260 if (hdr
->version
!= BTF_VERSION
) {
3261 btf_verifier_log(env
, "Unsupported version");
3266 btf_verifier_log(env
, "Unsupported flags");
3270 if (btf_data_size
== hdr
->hdr_len
) {
3271 btf_verifier_log(env
, "No data");
3275 err
= btf_check_sec_info(env
, btf_data_size
);
3282 static struct btf
*btf_parse(void __user
*btf_data
, u32 btf_data_size
,
3283 u32 log_level
, char __user
*log_ubuf
, u32 log_size
)
3285 struct btf_verifier_env
*env
= NULL
;
3286 struct bpf_verifier_log
*log
;
3287 struct btf
*btf
= NULL
;
3291 if (btf_data_size
> BTF_MAX_SIZE
)
3292 return ERR_PTR(-E2BIG
);
3294 env
= kzalloc(sizeof(*env
), GFP_KERNEL
| __GFP_NOWARN
);
3296 return ERR_PTR(-ENOMEM
);
3299 if (log_level
|| log_ubuf
|| log_size
) {
3300 /* user requested verbose verifier output
3301 * and supplied buffer to store the verification trace
3303 log
->level
= log_level
;
3304 log
->ubuf
= log_ubuf
;
3305 log
->len_total
= log_size
;
3307 /* log attributes have to be sane */
3308 if (log
->len_total
< 128 || log
->len_total
> UINT_MAX
>> 8 ||
3309 !log
->level
|| !log
->ubuf
) {
3315 btf
= kzalloc(sizeof(*btf
), GFP_KERNEL
| __GFP_NOWARN
);
3322 data
= kvmalloc(btf_data_size
, GFP_KERNEL
| __GFP_NOWARN
);
3329 btf
->data_size
= btf_data_size
;
3331 if (copy_from_user(data
, btf_data
, btf_data_size
)) {
3336 err
= btf_parse_hdr(env
);
3340 btf
->nohdr_data
= btf
->data
+ btf
->hdr
.hdr_len
;
3342 err
= btf_parse_str_sec(env
);
3346 err
= btf_parse_type_sec(env
);
3350 if (log
->level
&& bpf_verifier_log_full(log
)) {
3355 btf_verifier_env_free(env
);
3356 refcount_set(&btf
->refcnt
, 1);
3360 btf_verifier_env_free(env
);
3363 return ERR_PTR(err
);
3366 void btf_type_seq_show(const struct btf
*btf
, u32 type_id
, void *obj
,
3369 const struct btf_type
*t
= btf_type_by_id(btf
, type_id
);
3371 btf_type_ops(t
)->seq_show(btf
, t
, type_id
, obj
, 0, m
);
3374 static int btf_release(struct inode
*inode
, struct file
*filp
)
3376 btf_put(filp
->private_data
);
3380 const struct file_operations btf_fops
= {
3381 .release
= btf_release
,
3384 static int __btf_new_fd(struct btf
*btf
)
3386 return anon_inode_getfd("btf", &btf_fops
, btf
, O_RDONLY
| O_CLOEXEC
);
3389 int btf_new_fd(const union bpf_attr
*attr
)
3394 btf
= btf_parse(u64_to_user_ptr(attr
->btf
),
3395 attr
->btf_size
, attr
->btf_log_level
,
3396 u64_to_user_ptr(attr
->btf_log_buf
),
3397 attr
->btf_log_size
);
3399 return PTR_ERR(btf
);
3401 ret
= btf_alloc_id(btf
);
3408 * The BTF ID is published to the userspace.
3409 * All BTF free must go through call_rcu() from
3410 * now on (i.e. free by calling btf_put()).
3413 ret
= __btf_new_fd(btf
);
3420 struct btf
*btf_get_by_fd(int fd
)
3428 return ERR_PTR(-EBADF
);
3430 if (f
.file
->f_op
!= &btf_fops
) {
3432 return ERR_PTR(-EINVAL
);
3435 btf
= f
.file
->private_data
;
3436 refcount_inc(&btf
->refcnt
);
3442 int btf_get_info_by_fd(const struct btf
*btf
,
3443 const union bpf_attr
*attr
,
3444 union bpf_attr __user
*uattr
)
3446 struct bpf_btf_info __user
*uinfo
;
3447 struct bpf_btf_info info
= {};
3448 u32 info_copy
, btf_copy
;
3452 uinfo
= u64_to_user_ptr(attr
->info
.info
);
3453 uinfo_len
= attr
->info
.info_len
;
3455 info_copy
= min_t(u32
, uinfo_len
, sizeof(info
));
3456 if (copy_from_user(&info
, uinfo
, info_copy
))
3460 ubtf
= u64_to_user_ptr(info
.btf
);
3461 btf_copy
= min_t(u32
, btf
->data_size
, info
.btf_size
);
3462 if (copy_to_user(ubtf
, btf
->data
, btf_copy
))
3464 info
.btf_size
= btf
->data_size
;
3466 if (copy_to_user(uinfo
, &info
, info_copy
) ||
3467 put_user(info_copy
, &uattr
->info
.info_len
))
3473 int btf_get_fd_by_id(u32 id
)
3479 btf
= idr_find(&btf_idr
, id
);
3480 if (!btf
|| !refcount_inc_not_zero(&btf
->refcnt
))
3481 btf
= ERR_PTR(-ENOENT
);
3485 return PTR_ERR(btf
);
3487 fd
= __btf_new_fd(btf
);
3494 u32
btf_id(const struct btf
*btf
)