split dev_queue
[cor.git] / kernel / bpf / btf.c
blob40efde5eedcbb419886282dcf07dbdbc9e9d9fe1
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Facebook */
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/bpf.h>
6 #include <uapi/linux/bpf_perf_event.h>
7 #include <uapi/linux/types.h>
8 #include <linux/seq_file.h>
9 #include <linux/compiler.h>
10 #include <linux/ctype.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/uaccess.h>
16 #include <linux/kernel.h>
17 #include <linux/idr.h>
18 #include <linux/sort.h>
19 #include <linux/bpf_verifier.h>
20 #include <linux/btf.h>
21 #include <linux/skmsg.h>
22 #include <linux/perf_event.h>
23 #include <net/sock.h>
25 /* BTF (BPF Type Format) is the meta data format which describes
26 * the data types of BPF program/map. Hence, it basically focus
27 * on the C programming language which the modern BPF is primary
28 * using.
30 * ELF Section:
31 * ~~~~~~~~~~~
32 * The BTF data is stored under the ".BTF" ELF section
34 * struct btf_type:
35 * ~~~~~~~~~~~~~~~
36 * Each 'struct btf_type' object describes a C data type.
37 * Depending on the type it is describing, a 'struct btf_type'
38 * object may be followed by more data. F.e.
39 * To describe an array, 'struct btf_type' is followed by
40 * 'struct btf_array'.
42 * 'struct btf_type' and any extra data following it are
43 * 4 bytes aligned.
45 * Type section:
46 * ~~~~~~~~~~~~~
47 * The BTF type section contains a list of 'struct btf_type' objects.
48 * Each one describes a C type. Recall from the above section
49 * that a 'struct btf_type' object could be immediately followed by extra
50 * data in order to desribe some particular C types.
52 * type_id:
53 * ~~~~~~~
54 * Each btf_type object is identified by a type_id. The type_id
55 * is implicitly implied by the location of the btf_type object in
56 * the BTF type section. The first one has type_id 1. The second
57 * one has type_id 2...etc. Hence, an earlier btf_type has
58 * a smaller type_id.
60 * A btf_type object may refer to another btf_type object by using
61 * type_id (i.e. the "type" in the "struct btf_type").
63 * NOTE that we cannot assume any reference-order.
64 * A btf_type object can refer to an earlier btf_type object
65 * but it can also refer to a later btf_type object.
67 * For example, to describe "const void *". A btf_type
68 * object describing "const" may refer to another btf_type
69 * object describing "void *". This type-reference is done
70 * by specifying type_id:
72 * [1] CONST (anon) type_id=2
73 * [2] PTR (anon) type_id=0
75 * The above is the btf_verifier debug log:
76 * - Each line started with "[?]" is a btf_type object
77 * - [?] is the type_id of the btf_type object.
78 * - CONST/PTR is the BTF_KIND_XXX
79 * - "(anon)" is the name of the type. It just
80 * happens that CONST and PTR has no name.
81 * - type_id=XXX is the 'u32 type' in btf_type
83 * NOTE: "void" has type_id 0
85 * String section:
86 * ~~~~~~~~~~~~~~
87 * The BTF string section contains the names used by the type section.
88 * Each string is referred by an "offset" from the beginning of the
89 * string section.
91 * Each string is '\0' terminated.
93 * The first character in the string section must be '\0'
94 * which is used to mean 'anonymous'. Some btf_type may not
95 * have a name.
98 /* BTF verification:
100 * To verify BTF data, two passes are needed.
102 * Pass #1
103 * ~~~~~~~
104 * The first pass is to collect all btf_type objects to
105 * an array: "btf->types".
107 * Depending on the C type that a btf_type is describing,
108 * a btf_type may be followed by extra data. We don't know
109 * how many btf_type is there, and more importantly we don't
110 * know where each btf_type is located in the type section.
112 * Without knowing the location of each type_id, most verifications
113 * cannot be done. e.g. an earlier btf_type may refer to a later
114 * btf_type (recall the "const void *" above), so we cannot
115 * check this type-reference in the first pass.
117 * In the first pass, it still does some verifications (e.g.
118 * checking the name is a valid offset to the string section).
120 * Pass #2
121 * ~~~~~~~
122 * The main focus is to resolve a btf_type that is referring
123 * to another type.
125 * We have to ensure the referring type:
126 * 1) does exist in the BTF (i.e. in btf->types[])
127 * 2) does not cause a loop:
128 * struct A {
129 * struct B b;
130 * };
132 * struct B {
133 * struct A a;
134 * };
136 * btf_type_needs_resolve() decides if a btf_type needs
137 * to be resolved.
139 * The needs_resolve type implements the "resolve()" ops which
140 * essentially does a DFS and detects backedge.
142 * During resolve (or DFS), different C types have different
143 * "RESOLVED" conditions.
145 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
146 * members because a member is always referring to another
147 * type. A struct's member can be treated as "RESOLVED" if
148 * it is referring to a BTF_KIND_PTR. Otherwise, the
149 * following valid C struct would be rejected:
151 * struct A {
152 * int m;
153 * struct A *a;
154 * };
156 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
157 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
158 * detect a pointer loop, e.g.:
159 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
160 * ^ |
161 * +-----------------------------------------+
165 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
166 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
167 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
168 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
169 #define BITS_ROUNDUP_BYTES(bits) \
170 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
172 #define BTF_INFO_MASK 0x8f00ffff
173 #define BTF_INT_MASK 0x0fffffff
174 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
175 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
177 /* 16MB for 64k structs and each has 16 members and
178 * a few MB spaces for the string section.
179 * The hard limit is S32_MAX.
181 #define BTF_MAX_SIZE (16 * 1024 * 1024)
183 #define for_each_member(i, struct_type, member) \
184 for (i = 0, member = btf_type_member(struct_type); \
185 i < btf_type_vlen(struct_type); \
186 i++, member++)
188 #define for_each_member_from(i, from, struct_type, member) \
189 for (i = from, member = btf_type_member(struct_type) + from; \
190 i < btf_type_vlen(struct_type); \
191 i++, member++)
193 #define for_each_vsi(i, struct_type, member) \
194 for (i = 0, member = btf_type_var_secinfo(struct_type); \
195 i < btf_type_vlen(struct_type); \
196 i++, member++)
198 #define for_each_vsi_from(i, from, struct_type, member) \
199 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
200 i < btf_type_vlen(struct_type); \
201 i++, member++)
203 DEFINE_IDR(btf_idr);
204 DEFINE_SPINLOCK(btf_idr_lock);
206 struct btf {
207 void *data;
208 struct btf_type **types;
209 u32 *resolved_ids;
210 u32 *resolved_sizes;
211 const char *strings;
212 void *nohdr_data;
213 struct btf_header hdr;
214 u32 nr_types;
215 u32 types_size;
216 u32 data_size;
217 refcount_t refcnt;
218 u32 id;
219 struct rcu_head rcu;
222 enum verifier_phase {
223 CHECK_META,
224 CHECK_TYPE,
227 struct resolve_vertex {
228 const struct btf_type *t;
229 u32 type_id;
230 u16 next_member;
233 enum visit_state {
234 NOT_VISITED,
235 VISITED,
236 RESOLVED,
239 enum resolve_mode {
240 RESOLVE_TBD, /* To Be Determined */
241 RESOLVE_PTR, /* Resolving for Pointer */
242 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
243 * or array
247 #define MAX_RESOLVE_DEPTH 32
249 struct btf_sec_info {
250 u32 off;
251 u32 len;
254 struct btf_verifier_env {
255 struct btf *btf;
256 u8 *visit_states;
257 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
258 struct bpf_verifier_log log;
259 u32 log_type_id;
260 u32 top_stack;
261 enum verifier_phase phase;
262 enum resolve_mode resolve_mode;
265 static const char * const btf_kind_str[NR_BTF_KINDS] = {
266 [BTF_KIND_UNKN] = "UNKNOWN",
267 [BTF_KIND_INT] = "INT",
268 [BTF_KIND_PTR] = "PTR",
269 [BTF_KIND_ARRAY] = "ARRAY",
270 [BTF_KIND_STRUCT] = "STRUCT",
271 [BTF_KIND_UNION] = "UNION",
272 [BTF_KIND_ENUM] = "ENUM",
273 [BTF_KIND_FWD] = "FWD",
274 [BTF_KIND_TYPEDEF] = "TYPEDEF",
275 [BTF_KIND_VOLATILE] = "VOLATILE",
276 [BTF_KIND_CONST] = "CONST",
277 [BTF_KIND_RESTRICT] = "RESTRICT",
278 [BTF_KIND_FUNC] = "FUNC",
279 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
280 [BTF_KIND_VAR] = "VAR",
281 [BTF_KIND_DATASEC] = "DATASEC",
284 struct btf_kind_operations {
285 s32 (*check_meta)(struct btf_verifier_env *env,
286 const struct btf_type *t,
287 u32 meta_left);
288 int (*resolve)(struct btf_verifier_env *env,
289 const struct resolve_vertex *v);
290 int (*check_member)(struct btf_verifier_env *env,
291 const struct btf_type *struct_type,
292 const struct btf_member *member,
293 const struct btf_type *member_type);
294 int (*check_kflag_member)(struct btf_verifier_env *env,
295 const struct btf_type *struct_type,
296 const struct btf_member *member,
297 const struct btf_type *member_type);
298 void (*log_details)(struct btf_verifier_env *env,
299 const struct btf_type *t);
300 void (*seq_show)(const struct btf *btf, const struct btf_type *t,
301 u32 type_id, void *data, u8 bits_offsets,
302 struct seq_file *m);
305 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
306 static struct btf_type btf_void;
308 static int btf_resolve(struct btf_verifier_env *env,
309 const struct btf_type *t, u32 type_id);
311 static bool btf_type_is_modifier(const struct btf_type *t)
313 /* Some of them is not strictly a C modifier
314 * but they are grouped into the same bucket
315 * for BTF concern:
316 * A type (t) that refers to another
317 * type through t->type AND its size cannot
318 * be determined without following the t->type.
320 * ptr does not fall into this bucket
321 * because its size is always sizeof(void *).
323 switch (BTF_INFO_KIND(t->info)) {
324 case BTF_KIND_TYPEDEF:
325 case BTF_KIND_VOLATILE:
326 case BTF_KIND_CONST:
327 case BTF_KIND_RESTRICT:
328 return true;
331 return false;
334 bool btf_type_is_void(const struct btf_type *t)
336 return t == &btf_void;
339 static bool btf_type_is_fwd(const struct btf_type *t)
341 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
344 static bool btf_type_nosize(const struct btf_type *t)
346 return btf_type_is_void(t) || btf_type_is_fwd(t) ||
347 btf_type_is_func(t) || btf_type_is_func_proto(t);
350 static bool btf_type_nosize_or_null(const struct btf_type *t)
352 return !t || btf_type_nosize(t);
355 /* union is only a special case of struct:
356 * all its offsetof(member) == 0
358 static bool btf_type_is_struct(const struct btf_type *t)
360 u8 kind = BTF_INFO_KIND(t->info);
362 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
365 static bool __btf_type_is_struct(const struct btf_type *t)
367 return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
370 static bool btf_type_is_array(const struct btf_type *t)
372 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
375 static bool btf_type_is_var(const struct btf_type *t)
377 return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
380 static bool btf_type_is_datasec(const struct btf_type *t)
382 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
385 /* Types that act only as a source, not sink or intermediate
386 * type when resolving.
388 static bool btf_type_is_resolve_source_only(const struct btf_type *t)
390 return btf_type_is_var(t) ||
391 btf_type_is_datasec(t);
394 /* What types need to be resolved?
396 * btf_type_is_modifier() is an obvious one.
398 * btf_type_is_struct() because its member refers to
399 * another type (through member->type).
401 * btf_type_is_var() because the variable refers to
402 * another type. btf_type_is_datasec() holds multiple
403 * btf_type_is_var() types that need resolving.
405 * btf_type_is_array() because its element (array->type)
406 * refers to another type. Array can be thought of a
407 * special case of struct while array just has the same
408 * member-type repeated by array->nelems of times.
410 static bool btf_type_needs_resolve(const struct btf_type *t)
412 return btf_type_is_modifier(t) ||
413 btf_type_is_ptr(t) ||
414 btf_type_is_struct(t) ||
415 btf_type_is_array(t) ||
416 btf_type_is_var(t) ||
417 btf_type_is_datasec(t);
420 /* t->size can be used */
421 static bool btf_type_has_size(const struct btf_type *t)
423 switch (BTF_INFO_KIND(t->info)) {
424 case BTF_KIND_INT:
425 case BTF_KIND_STRUCT:
426 case BTF_KIND_UNION:
427 case BTF_KIND_ENUM:
428 case BTF_KIND_DATASEC:
429 return true;
432 return false;
435 static const char *btf_int_encoding_str(u8 encoding)
437 if (encoding == 0)
438 return "(none)";
439 else if (encoding == BTF_INT_SIGNED)
440 return "SIGNED";
441 else if (encoding == BTF_INT_CHAR)
442 return "CHAR";
443 else if (encoding == BTF_INT_BOOL)
444 return "BOOL";
445 else
446 return "UNKN";
449 static u16 btf_type_vlen(const struct btf_type *t)
451 return BTF_INFO_VLEN(t->info);
454 static bool btf_type_kflag(const struct btf_type *t)
456 return BTF_INFO_KFLAG(t->info);
459 static u32 btf_member_bit_offset(const struct btf_type *struct_type,
460 const struct btf_member *member)
462 return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
463 : member->offset;
466 static u32 btf_member_bitfield_size(const struct btf_type *struct_type,
467 const struct btf_member *member)
469 return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
470 : 0;
473 static u32 btf_type_int(const struct btf_type *t)
475 return *(u32 *)(t + 1);
478 static const struct btf_array *btf_type_array(const struct btf_type *t)
480 return (const struct btf_array *)(t + 1);
483 static const struct btf_member *btf_type_member(const struct btf_type *t)
485 return (const struct btf_member *)(t + 1);
488 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
490 return (const struct btf_enum *)(t + 1);
493 static const struct btf_var *btf_type_var(const struct btf_type *t)
495 return (const struct btf_var *)(t + 1);
498 static const struct btf_var_secinfo *btf_type_var_secinfo(const struct btf_type *t)
500 return (const struct btf_var_secinfo *)(t + 1);
503 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
505 return kind_ops[BTF_INFO_KIND(t->info)];
508 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
510 return BTF_STR_OFFSET_VALID(offset) &&
511 offset < btf->hdr.str_len;
514 static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
516 if ((first ? !isalpha(c) :
517 !isalnum(c)) &&
518 c != '_' &&
519 ((c == '.' && !dot_ok) ||
520 c != '.'))
521 return false;
522 return true;
525 static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
527 /* offset must be valid */
528 const char *src = &btf->strings[offset];
529 const char *src_limit;
531 if (!__btf_name_char_ok(*src, true, dot_ok))
532 return false;
534 /* set a limit on identifier length */
535 src_limit = src + KSYM_NAME_LEN;
536 src++;
537 while (*src && src < src_limit) {
538 if (!__btf_name_char_ok(*src, false, dot_ok))
539 return false;
540 src++;
543 return !*src;
546 /* Only C-style identifier is permitted. This can be relaxed if
547 * necessary.
549 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
551 return __btf_name_valid(btf, offset, false);
554 static bool btf_name_valid_section(const struct btf *btf, u32 offset)
556 return __btf_name_valid(btf, offset, true);
559 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
561 if (!offset)
562 return "(anon)";
563 else if (offset < btf->hdr.str_len)
564 return &btf->strings[offset];
565 else
566 return "(invalid-name-offset)";
569 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
571 if (offset < btf->hdr.str_len)
572 return &btf->strings[offset];
574 return NULL;
577 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
579 if (type_id > btf->nr_types)
580 return NULL;
582 return btf->types[type_id];
586 * Regular int is not a bit field and it must be either
587 * u8/u16/u32/u64 or __int128.
589 static bool btf_type_int_is_regular(const struct btf_type *t)
591 u8 nr_bits, nr_bytes;
592 u32 int_data;
594 int_data = btf_type_int(t);
595 nr_bits = BTF_INT_BITS(int_data);
596 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
597 if (BITS_PER_BYTE_MASKED(nr_bits) ||
598 BTF_INT_OFFSET(int_data) ||
599 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
600 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
601 nr_bytes != (2 * sizeof(u64)))) {
602 return false;
605 return true;
609 * Check that given struct member is a regular int with expected
610 * offset and size.
612 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
613 const struct btf_member *m,
614 u32 expected_offset, u32 expected_size)
616 const struct btf_type *t;
617 u32 id, int_data;
618 u8 nr_bits;
620 id = m->type;
621 t = btf_type_id_size(btf, &id, NULL);
622 if (!t || !btf_type_is_int(t))
623 return false;
625 int_data = btf_type_int(t);
626 nr_bits = BTF_INT_BITS(int_data);
627 if (btf_type_kflag(s)) {
628 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
629 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
631 /* if kflag set, int should be a regular int and
632 * bit offset should be at byte boundary.
634 return !bitfield_size &&
635 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
636 BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
639 if (BTF_INT_OFFSET(int_data) ||
640 BITS_PER_BYTE_MASKED(m->offset) ||
641 BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
642 BITS_PER_BYTE_MASKED(nr_bits) ||
643 BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
644 return false;
646 return true;
649 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
650 const char *fmt, ...)
652 va_list args;
654 va_start(args, fmt);
655 bpf_verifier_vlog(log, fmt, args);
656 va_end(args);
659 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
660 const char *fmt, ...)
662 struct bpf_verifier_log *log = &env->log;
663 va_list args;
665 if (!bpf_verifier_log_needed(log))
666 return;
668 va_start(args, fmt);
669 bpf_verifier_vlog(log, fmt, args);
670 va_end(args);
673 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
674 const struct btf_type *t,
675 bool log_details,
676 const char *fmt, ...)
678 struct bpf_verifier_log *log = &env->log;
679 u8 kind = BTF_INFO_KIND(t->info);
680 struct btf *btf = env->btf;
681 va_list args;
683 if (!bpf_verifier_log_needed(log))
684 return;
686 /* btf verifier prints all types it is processing via
687 * btf_verifier_log_type(..., fmt = NULL).
688 * Skip those prints for in-kernel BTF verification.
690 if (log->level == BPF_LOG_KERNEL && !fmt)
691 return;
693 __btf_verifier_log(log, "[%u] %s %s%s",
694 env->log_type_id,
695 btf_kind_str[kind],
696 __btf_name_by_offset(btf, t->name_off),
697 log_details ? " " : "");
699 if (log_details)
700 btf_type_ops(t)->log_details(env, t);
702 if (fmt && *fmt) {
703 __btf_verifier_log(log, " ");
704 va_start(args, fmt);
705 bpf_verifier_vlog(log, fmt, args);
706 va_end(args);
709 __btf_verifier_log(log, "\n");
712 #define btf_verifier_log_type(env, t, ...) \
713 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
714 #define btf_verifier_log_basic(env, t, ...) \
715 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
717 __printf(4, 5)
718 static void btf_verifier_log_member(struct btf_verifier_env *env,
719 const struct btf_type *struct_type,
720 const struct btf_member *member,
721 const char *fmt, ...)
723 struct bpf_verifier_log *log = &env->log;
724 struct btf *btf = env->btf;
725 va_list args;
727 if (!bpf_verifier_log_needed(log))
728 return;
730 if (log->level == BPF_LOG_KERNEL && !fmt)
731 return;
732 /* The CHECK_META phase already did a btf dump.
734 * If member is logged again, it must hit an error in
735 * parsing this member. It is useful to print out which
736 * struct this member belongs to.
738 if (env->phase != CHECK_META)
739 btf_verifier_log_type(env, struct_type, NULL);
741 if (btf_type_kflag(struct_type))
742 __btf_verifier_log(log,
743 "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
744 __btf_name_by_offset(btf, member->name_off),
745 member->type,
746 BTF_MEMBER_BITFIELD_SIZE(member->offset),
747 BTF_MEMBER_BIT_OFFSET(member->offset));
748 else
749 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
750 __btf_name_by_offset(btf, member->name_off),
751 member->type, member->offset);
753 if (fmt && *fmt) {
754 __btf_verifier_log(log, " ");
755 va_start(args, fmt);
756 bpf_verifier_vlog(log, fmt, args);
757 va_end(args);
760 __btf_verifier_log(log, "\n");
763 __printf(4, 5)
764 static void btf_verifier_log_vsi(struct btf_verifier_env *env,
765 const struct btf_type *datasec_type,
766 const struct btf_var_secinfo *vsi,
767 const char *fmt, ...)
769 struct bpf_verifier_log *log = &env->log;
770 va_list args;
772 if (!bpf_verifier_log_needed(log))
773 return;
774 if (log->level == BPF_LOG_KERNEL && !fmt)
775 return;
776 if (env->phase != CHECK_META)
777 btf_verifier_log_type(env, datasec_type, NULL);
779 __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
780 vsi->type, vsi->offset, vsi->size);
781 if (fmt && *fmt) {
782 __btf_verifier_log(log, " ");
783 va_start(args, fmt);
784 bpf_verifier_vlog(log, fmt, args);
785 va_end(args);
788 __btf_verifier_log(log, "\n");
791 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
792 u32 btf_data_size)
794 struct bpf_verifier_log *log = &env->log;
795 const struct btf *btf = env->btf;
796 const struct btf_header *hdr;
798 if (!bpf_verifier_log_needed(log))
799 return;
801 if (log->level == BPF_LOG_KERNEL)
802 return;
803 hdr = &btf->hdr;
804 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
805 __btf_verifier_log(log, "version: %u\n", hdr->version);
806 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
807 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
808 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
809 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
810 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
811 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
812 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
815 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
817 struct btf *btf = env->btf;
819 /* < 2 because +1 for btf_void which is always in btf->types[0].
820 * btf_void is not accounted in btf->nr_types because btf_void
821 * does not come from the BTF file.
823 if (btf->types_size - btf->nr_types < 2) {
824 /* Expand 'types' array */
826 struct btf_type **new_types;
827 u32 expand_by, new_size;
829 if (btf->types_size == BTF_MAX_TYPE) {
830 btf_verifier_log(env, "Exceeded max num of types");
831 return -E2BIG;
834 expand_by = max_t(u32, btf->types_size >> 2, 16);
835 new_size = min_t(u32, BTF_MAX_TYPE,
836 btf->types_size + expand_by);
838 new_types = kvcalloc(new_size, sizeof(*new_types),
839 GFP_KERNEL | __GFP_NOWARN);
840 if (!new_types)
841 return -ENOMEM;
843 if (btf->nr_types == 0)
844 new_types[0] = &btf_void;
845 else
846 memcpy(new_types, btf->types,
847 sizeof(*btf->types) * (btf->nr_types + 1));
849 kvfree(btf->types);
850 btf->types = new_types;
851 btf->types_size = new_size;
854 btf->types[++(btf->nr_types)] = t;
856 return 0;
859 static int btf_alloc_id(struct btf *btf)
861 int id;
863 idr_preload(GFP_KERNEL);
864 spin_lock_bh(&btf_idr_lock);
865 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
866 if (id > 0)
867 btf->id = id;
868 spin_unlock_bh(&btf_idr_lock);
869 idr_preload_end();
871 if (WARN_ON_ONCE(!id))
872 return -ENOSPC;
874 return id > 0 ? 0 : id;
877 static void btf_free_id(struct btf *btf)
879 unsigned long flags;
882 * In map-in-map, calling map_delete_elem() on outer
883 * map will call bpf_map_put on the inner map.
884 * It will then eventually call btf_free_id()
885 * on the inner map. Some of the map_delete_elem()
886 * implementation may have irq disabled, so
887 * we need to use the _irqsave() version instead
888 * of the _bh() version.
890 spin_lock_irqsave(&btf_idr_lock, flags);
891 idr_remove(&btf_idr, btf->id);
892 spin_unlock_irqrestore(&btf_idr_lock, flags);
895 static void btf_free(struct btf *btf)
897 kvfree(btf->types);
898 kvfree(btf->resolved_sizes);
899 kvfree(btf->resolved_ids);
900 kvfree(btf->data);
901 kfree(btf);
904 static void btf_free_rcu(struct rcu_head *rcu)
906 struct btf *btf = container_of(rcu, struct btf, rcu);
908 btf_free(btf);
911 void btf_put(struct btf *btf)
913 if (btf && refcount_dec_and_test(&btf->refcnt)) {
914 btf_free_id(btf);
915 call_rcu(&btf->rcu, btf_free_rcu);
919 static int env_resolve_init(struct btf_verifier_env *env)
921 struct btf *btf = env->btf;
922 u32 nr_types = btf->nr_types;
923 u32 *resolved_sizes = NULL;
924 u32 *resolved_ids = NULL;
925 u8 *visit_states = NULL;
927 /* +1 for btf_void */
928 resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
929 GFP_KERNEL | __GFP_NOWARN);
930 if (!resolved_sizes)
931 goto nomem;
933 resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
934 GFP_KERNEL | __GFP_NOWARN);
935 if (!resolved_ids)
936 goto nomem;
938 visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
939 GFP_KERNEL | __GFP_NOWARN);
940 if (!visit_states)
941 goto nomem;
943 btf->resolved_sizes = resolved_sizes;
944 btf->resolved_ids = resolved_ids;
945 env->visit_states = visit_states;
947 return 0;
949 nomem:
950 kvfree(resolved_sizes);
951 kvfree(resolved_ids);
952 kvfree(visit_states);
953 return -ENOMEM;
956 static void btf_verifier_env_free(struct btf_verifier_env *env)
958 kvfree(env->visit_states);
959 kfree(env);
962 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
963 const struct btf_type *next_type)
965 switch (env->resolve_mode) {
966 case RESOLVE_TBD:
967 /* int, enum or void is a sink */
968 return !btf_type_needs_resolve(next_type);
969 case RESOLVE_PTR:
970 /* int, enum, void, struct, array, func or func_proto is a sink
971 * for ptr
973 return !btf_type_is_modifier(next_type) &&
974 !btf_type_is_ptr(next_type);
975 case RESOLVE_STRUCT_OR_ARRAY:
976 /* int, enum, void, ptr, func or func_proto is a sink
977 * for struct and array
979 return !btf_type_is_modifier(next_type) &&
980 !btf_type_is_array(next_type) &&
981 !btf_type_is_struct(next_type);
982 default:
983 BUG();
987 static bool env_type_is_resolved(const struct btf_verifier_env *env,
988 u32 type_id)
990 return env->visit_states[type_id] == RESOLVED;
993 static int env_stack_push(struct btf_verifier_env *env,
994 const struct btf_type *t, u32 type_id)
996 struct resolve_vertex *v;
998 if (env->top_stack == MAX_RESOLVE_DEPTH)
999 return -E2BIG;
1001 if (env->visit_states[type_id] != NOT_VISITED)
1002 return -EEXIST;
1004 env->visit_states[type_id] = VISITED;
1006 v = &env->stack[env->top_stack++];
1007 v->t = t;
1008 v->type_id = type_id;
1009 v->next_member = 0;
1011 if (env->resolve_mode == RESOLVE_TBD) {
1012 if (btf_type_is_ptr(t))
1013 env->resolve_mode = RESOLVE_PTR;
1014 else if (btf_type_is_struct(t) || btf_type_is_array(t))
1015 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1018 return 0;
1021 static void env_stack_set_next_member(struct btf_verifier_env *env,
1022 u16 next_member)
1024 env->stack[env->top_stack - 1].next_member = next_member;
1027 static void env_stack_pop_resolved(struct btf_verifier_env *env,
1028 u32 resolved_type_id,
1029 u32 resolved_size)
1031 u32 type_id = env->stack[--(env->top_stack)].type_id;
1032 struct btf *btf = env->btf;
1034 btf->resolved_sizes[type_id] = resolved_size;
1035 btf->resolved_ids[type_id] = resolved_type_id;
1036 env->visit_states[type_id] = RESOLVED;
1039 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1041 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1044 /* Resolve the size of a passed-in "type"
1046 * type: is an array (e.g. u32 array[x][y])
1047 * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1048 * *type_size: (x * y * sizeof(u32)). Hence, *type_size always
1049 * corresponds to the return type.
1050 * *elem_type: u32
1051 * *total_nelems: (x * y). Hence, individual elem size is
1052 * (*type_size / *total_nelems)
1054 * type: is not an array (e.g. const struct X)
1055 * return type: type "struct X"
1056 * *type_size: sizeof(struct X)
1057 * *elem_type: same as return type ("struct X")
1058 * *total_nelems: 1
1060 static const struct btf_type *
1061 btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1062 u32 *type_size, const struct btf_type **elem_type,
1063 u32 *total_nelems)
1065 const struct btf_type *array_type = NULL;
1066 const struct btf_array *array;
1067 u32 i, size, nelems = 1;
1069 for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1070 switch (BTF_INFO_KIND(type->info)) {
1071 /* type->size can be used */
1072 case BTF_KIND_INT:
1073 case BTF_KIND_STRUCT:
1074 case BTF_KIND_UNION:
1075 case BTF_KIND_ENUM:
1076 size = type->size;
1077 goto resolved;
1079 case BTF_KIND_PTR:
1080 size = sizeof(void *);
1081 goto resolved;
1083 /* Modifiers */
1084 case BTF_KIND_TYPEDEF:
1085 case BTF_KIND_VOLATILE:
1086 case BTF_KIND_CONST:
1087 case BTF_KIND_RESTRICT:
1088 type = btf_type_by_id(btf, type->type);
1089 break;
1091 case BTF_KIND_ARRAY:
1092 if (!array_type)
1093 array_type = type;
1094 array = btf_type_array(type);
1095 if (nelems && array->nelems > U32_MAX / nelems)
1096 return ERR_PTR(-EINVAL);
1097 nelems *= array->nelems;
1098 type = btf_type_by_id(btf, array->type);
1099 break;
1101 /* type without size */
1102 default:
1103 return ERR_PTR(-EINVAL);
1107 return ERR_PTR(-EINVAL);
1109 resolved:
1110 if (nelems && size > U32_MAX / nelems)
1111 return ERR_PTR(-EINVAL);
1113 *type_size = nelems * size;
1114 *total_nelems = nelems;
1115 *elem_type = type;
1117 return array_type ? : type;
1120 /* The input param "type_id" must point to a needs_resolve type */
1121 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
1122 u32 *type_id)
1124 *type_id = btf->resolved_ids[*type_id];
1125 return btf_type_by_id(btf, *type_id);
1128 const struct btf_type *btf_type_id_size(const struct btf *btf,
1129 u32 *type_id, u32 *ret_size)
1131 const struct btf_type *size_type;
1132 u32 size_type_id = *type_id;
1133 u32 size = 0;
1135 size_type = btf_type_by_id(btf, size_type_id);
1136 if (btf_type_nosize_or_null(size_type))
1137 return NULL;
1139 if (btf_type_has_size(size_type)) {
1140 size = size_type->size;
1141 } else if (btf_type_is_array(size_type)) {
1142 size = btf->resolved_sizes[size_type_id];
1143 } else if (btf_type_is_ptr(size_type)) {
1144 size = sizeof(void *);
1145 } else {
1146 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
1147 !btf_type_is_var(size_type)))
1148 return NULL;
1150 size_type_id = btf->resolved_ids[size_type_id];
1151 size_type = btf_type_by_id(btf, size_type_id);
1152 if (btf_type_nosize_or_null(size_type))
1153 return NULL;
1154 else if (btf_type_has_size(size_type))
1155 size = size_type->size;
1156 else if (btf_type_is_array(size_type))
1157 size = btf->resolved_sizes[size_type_id];
1158 else if (btf_type_is_ptr(size_type))
1159 size = sizeof(void *);
1160 else
1161 return NULL;
1164 *type_id = size_type_id;
1165 if (ret_size)
1166 *ret_size = size;
1168 return size_type;
1171 static int btf_df_check_member(struct btf_verifier_env *env,
1172 const struct btf_type *struct_type,
1173 const struct btf_member *member,
1174 const struct btf_type *member_type)
1176 btf_verifier_log_basic(env, struct_type,
1177 "Unsupported check_member");
1178 return -EINVAL;
1181 static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1182 const struct btf_type *struct_type,
1183 const struct btf_member *member,
1184 const struct btf_type *member_type)
1186 btf_verifier_log_basic(env, struct_type,
1187 "Unsupported check_kflag_member");
1188 return -EINVAL;
1191 /* Used for ptr, array and struct/union type members.
1192 * int, enum and modifier types have their specific callback functions.
1194 static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1195 const struct btf_type *struct_type,
1196 const struct btf_member *member,
1197 const struct btf_type *member_type)
1199 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
1200 btf_verifier_log_member(env, struct_type, member,
1201 "Invalid member bitfield_size");
1202 return -EINVAL;
1205 /* bitfield size is 0, so member->offset represents bit offset only.
1206 * It is safe to call non kflag check_member variants.
1208 return btf_type_ops(member_type)->check_member(env, struct_type,
1209 member,
1210 member_type);
1213 static int btf_df_resolve(struct btf_verifier_env *env,
1214 const struct resolve_vertex *v)
1216 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
1217 return -EINVAL;
1220 static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
1221 u32 type_id, void *data, u8 bits_offsets,
1222 struct seq_file *m)
1224 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
1227 static int btf_int_check_member(struct btf_verifier_env *env,
1228 const struct btf_type *struct_type,
1229 const struct btf_member *member,
1230 const struct btf_type *member_type)
1232 u32 int_data = btf_type_int(member_type);
1233 u32 struct_bits_off = member->offset;
1234 u32 struct_size = struct_type->size;
1235 u32 nr_copy_bits;
1236 u32 bytes_offset;
1238 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
1239 btf_verifier_log_member(env, struct_type, member,
1240 "bits_offset exceeds U32_MAX");
1241 return -EINVAL;
1244 struct_bits_off += BTF_INT_OFFSET(int_data);
1245 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1246 nr_copy_bits = BTF_INT_BITS(int_data) +
1247 BITS_PER_BYTE_MASKED(struct_bits_off);
1249 if (nr_copy_bits > BITS_PER_U128) {
1250 btf_verifier_log_member(env, struct_type, member,
1251 "nr_copy_bits exceeds 128");
1252 return -EINVAL;
1255 if (struct_size < bytes_offset ||
1256 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1257 btf_verifier_log_member(env, struct_type, member,
1258 "Member exceeds struct_size");
1259 return -EINVAL;
1262 return 0;
1265 static int btf_int_check_kflag_member(struct btf_verifier_env *env,
1266 const struct btf_type *struct_type,
1267 const struct btf_member *member,
1268 const struct btf_type *member_type)
1270 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
1271 u32 int_data = btf_type_int(member_type);
1272 u32 struct_size = struct_type->size;
1273 u32 nr_copy_bits;
1275 /* a regular int type is required for the kflag int member */
1276 if (!btf_type_int_is_regular(member_type)) {
1277 btf_verifier_log_member(env, struct_type, member,
1278 "Invalid member base type");
1279 return -EINVAL;
1282 /* check sanity of bitfield size */
1283 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
1284 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
1285 nr_int_data_bits = BTF_INT_BITS(int_data);
1286 if (!nr_bits) {
1287 /* Not a bitfield member, member offset must be at byte
1288 * boundary.
1290 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1291 btf_verifier_log_member(env, struct_type, member,
1292 "Invalid member offset");
1293 return -EINVAL;
1296 nr_bits = nr_int_data_bits;
1297 } else if (nr_bits > nr_int_data_bits) {
1298 btf_verifier_log_member(env, struct_type, member,
1299 "Invalid member bitfield_size");
1300 return -EINVAL;
1303 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1304 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
1305 if (nr_copy_bits > BITS_PER_U128) {
1306 btf_verifier_log_member(env, struct_type, member,
1307 "nr_copy_bits exceeds 128");
1308 return -EINVAL;
1311 if (struct_size < bytes_offset ||
1312 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1313 btf_verifier_log_member(env, struct_type, member,
1314 "Member exceeds struct_size");
1315 return -EINVAL;
1318 return 0;
1321 static s32 btf_int_check_meta(struct btf_verifier_env *env,
1322 const struct btf_type *t,
1323 u32 meta_left)
1325 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
1326 u16 encoding;
1328 if (meta_left < meta_needed) {
1329 btf_verifier_log_basic(env, t,
1330 "meta_left:%u meta_needed:%u",
1331 meta_left, meta_needed);
1332 return -EINVAL;
1335 if (btf_type_vlen(t)) {
1336 btf_verifier_log_type(env, t, "vlen != 0");
1337 return -EINVAL;
1340 if (btf_type_kflag(t)) {
1341 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1342 return -EINVAL;
1345 int_data = btf_type_int(t);
1346 if (int_data & ~BTF_INT_MASK) {
1347 btf_verifier_log_basic(env, t, "Invalid int_data:%x",
1348 int_data);
1349 return -EINVAL;
1352 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
1354 if (nr_bits > BITS_PER_U128) {
1355 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
1356 BITS_PER_U128);
1357 return -EINVAL;
1360 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
1361 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
1362 return -EINVAL;
1366 * Only one of the encoding bits is allowed and it
1367 * should be sufficient for the pretty print purpose (i.e. decoding).
1368 * Multiple bits can be allowed later if it is found
1369 * to be insufficient.
1371 encoding = BTF_INT_ENCODING(int_data);
1372 if (encoding &&
1373 encoding != BTF_INT_SIGNED &&
1374 encoding != BTF_INT_CHAR &&
1375 encoding != BTF_INT_BOOL) {
1376 btf_verifier_log_type(env, t, "Unsupported encoding");
1377 return -ENOTSUPP;
1380 btf_verifier_log_type(env, t, NULL);
1382 return meta_needed;
1385 static void btf_int_log(struct btf_verifier_env *env,
1386 const struct btf_type *t)
1388 int int_data = btf_type_int(t);
1390 btf_verifier_log(env,
1391 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1392 t->size, BTF_INT_OFFSET(int_data),
1393 BTF_INT_BITS(int_data),
1394 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
1397 static void btf_int128_print(struct seq_file *m, void *data)
1399 /* data points to a __int128 number.
1400 * Suppose
1401 * int128_num = *(__int128 *)data;
1402 * The below formulas shows what upper_num and lower_num represents:
1403 * upper_num = int128_num >> 64;
1404 * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
1406 u64 upper_num, lower_num;
1408 #ifdef __BIG_ENDIAN_BITFIELD
1409 upper_num = *(u64 *)data;
1410 lower_num = *(u64 *)(data + 8);
1411 #else
1412 upper_num = *(u64 *)(data + 8);
1413 lower_num = *(u64 *)data;
1414 #endif
1415 if (upper_num == 0)
1416 seq_printf(m, "0x%llx", lower_num);
1417 else
1418 seq_printf(m, "0x%llx%016llx", upper_num, lower_num);
1421 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
1422 u16 right_shift_bits)
1424 u64 upper_num, lower_num;
1426 #ifdef __BIG_ENDIAN_BITFIELD
1427 upper_num = print_num[0];
1428 lower_num = print_num[1];
1429 #else
1430 upper_num = print_num[1];
1431 lower_num = print_num[0];
1432 #endif
1434 /* shake out un-needed bits by shift/or operations */
1435 if (left_shift_bits >= 64) {
1436 upper_num = lower_num << (left_shift_bits - 64);
1437 lower_num = 0;
1438 } else {
1439 upper_num = (upper_num << left_shift_bits) |
1440 (lower_num >> (64 - left_shift_bits));
1441 lower_num = lower_num << left_shift_bits;
1444 if (right_shift_bits >= 64) {
1445 lower_num = upper_num >> (right_shift_bits - 64);
1446 upper_num = 0;
1447 } else {
1448 lower_num = (lower_num >> right_shift_bits) |
1449 (upper_num << (64 - right_shift_bits));
1450 upper_num = upper_num >> right_shift_bits;
1453 #ifdef __BIG_ENDIAN_BITFIELD
1454 print_num[0] = upper_num;
1455 print_num[1] = lower_num;
1456 #else
1457 print_num[0] = lower_num;
1458 print_num[1] = upper_num;
1459 #endif
1462 static void btf_bitfield_seq_show(void *data, u8 bits_offset,
1463 u8 nr_bits, struct seq_file *m)
1465 u16 left_shift_bits, right_shift_bits;
1466 u8 nr_copy_bytes;
1467 u8 nr_copy_bits;
1468 u64 print_num[2] = {};
1470 nr_copy_bits = nr_bits + bits_offset;
1471 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1473 memcpy(print_num, data, nr_copy_bytes);
1475 #ifdef __BIG_ENDIAN_BITFIELD
1476 left_shift_bits = bits_offset;
1477 #else
1478 left_shift_bits = BITS_PER_U128 - nr_copy_bits;
1479 #endif
1480 right_shift_bits = BITS_PER_U128 - nr_bits;
1482 btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
1483 btf_int128_print(m, print_num);
1487 static void btf_int_bits_seq_show(const struct btf *btf,
1488 const struct btf_type *t,
1489 void *data, u8 bits_offset,
1490 struct seq_file *m)
1492 u32 int_data = btf_type_int(t);
1493 u8 nr_bits = BTF_INT_BITS(int_data);
1494 u8 total_bits_offset;
1497 * bits_offset is at most 7.
1498 * BTF_INT_OFFSET() cannot exceed 128 bits.
1500 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1501 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1502 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1503 btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
1506 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1507 u32 type_id, void *data, u8 bits_offset,
1508 struct seq_file *m)
1510 u32 int_data = btf_type_int(t);
1511 u8 encoding = BTF_INT_ENCODING(int_data);
1512 bool sign = encoding & BTF_INT_SIGNED;
1513 u8 nr_bits = BTF_INT_BITS(int_data);
1515 if (bits_offset || BTF_INT_OFFSET(int_data) ||
1516 BITS_PER_BYTE_MASKED(nr_bits)) {
1517 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1518 return;
1521 switch (nr_bits) {
1522 case 128:
1523 btf_int128_print(m, data);
1524 break;
1525 case 64:
1526 if (sign)
1527 seq_printf(m, "%lld", *(s64 *)data);
1528 else
1529 seq_printf(m, "%llu", *(u64 *)data);
1530 break;
1531 case 32:
1532 if (sign)
1533 seq_printf(m, "%d", *(s32 *)data);
1534 else
1535 seq_printf(m, "%u", *(u32 *)data);
1536 break;
1537 case 16:
1538 if (sign)
1539 seq_printf(m, "%d", *(s16 *)data);
1540 else
1541 seq_printf(m, "%u", *(u16 *)data);
1542 break;
1543 case 8:
1544 if (sign)
1545 seq_printf(m, "%d", *(s8 *)data);
1546 else
1547 seq_printf(m, "%u", *(u8 *)data);
1548 break;
1549 default:
1550 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1554 static const struct btf_kind_operations int_ops = {
1555 .check_meta = btf_int_check_meta,
1556 .resolve = btf_df_resolve,
1557 .check_member = btf_int_check_member,
1558 .check_kflag_member = btf_int_check_kflag_member,
1559 .log_details = btf_int_log,
1560 .seq_show = btf_int_seq_show,
1563 static int btf_modifier_check_member(struct btf_verifier_env *env,
1564 const struct btf_type *struct_type,
1565 const struct btf_member *member,
1566 const struct btf_type *member_type)
1568 const struct btf_type *resolved_type;
1569 u32 resolved_type_id = member->type;
1570 struct btf_member resolved_member;
1571 struct btf *btf = env->btf;
1573 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1574 if (!resolved_type) {
1575 btf_verifier_log_member(env, struct_type, member,
1576 "Invalid member");
1577 return -EINVAL;
1580 resolved_member = *member;
1581 resolved_member.type = resolved_type_id;
1583 return btf_type_ops(resolved_type)->check_member(env, struct_type,
1584 &resolved_member,
1585 resolved_type);
1588 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
1589 const struct btf_type *struct_type,
1590 const struct btf_member *member,
1591 const struct btf_type *member_type)
1593 const struct btf_type *resolved_type;
1594 u32 resolved_type_id = member->type;
1595 struct btf_member resolved_member;
1596 struct btf *btf = env->btf;
1598 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1599 if (!resolved_type) {
1600 btf_verifier_log_member(env, struct_type, member,
1601 "Invalid member");
1602 return -EINVAL;
1605 resolved_member = *member;
1606 resolved_member.type = resolved_type_id;
1608 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
1609 &resolved_member,
1610 resolved_type);
1613 static int btf_ptr_check_member(struct btf_verifier_env *env,
1614 const struct btf_type *struct_type,
1615 const struct btf_member *member,
1616 const struct btf_type *member_type)
1618 u32 struct_size, struct_bits_off, bytes_offset;
1620 struct_size = struct_type->size;
1621 struct_bits_off = member->offset;
1622 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1624 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1625 btf_verifier_log_member(env, struct_type, member,
1626 "Member is not byte aligned");
1627 return -EINVAL;
1630 if (struct_size - bytes_offset < sizeof(void *)) {
1631 btf_verifier_log_member(env, struct_type, member,
1632 "Member exceeds struct_size");
1633 return -EINVAL;
1636 return 0;
1639 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1640 const struct btf_type *t,
1641 u32 meta_left)
1643 if (btf_type_vlen(t)) {
1644 btf_verifier_log_type(env, t, "vlen != 0");
1645 return -EINVAL;
1648 if (btf_type_kflag(t)) {
1649 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1650 return -EINVAL;
1653 if (!BTF_TYPE_ID_VALID(t->type)) {
1654 btf_verifier_log_type(env, t, "Invalid type_id");
1655 return -EINVAL;
1658 /* typedef type must have a valid name, and other ref types,
1659 * volatile, const, restrict, should have a null name.
1661 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
1662 if (!t->name_off ||
1663 !btf_name_valid_identifier(env->btf, t->name_off)) {
1664 btf_verifier_log_type(env, t, "Invalid name");
1665 return -EINVAL;
1667 } else {
1668 if (t->name_off) {
1669 btf_verifier_log_type(env, t, "Invalid name");
1670 return -EINVAL;
1674 btf_verifier_log_type(env, t, NULL);
1676 return 0;
1679 static int btf_modifier_resolve(struct btf_verifier_env *env,
1680 const struct resolve_vertex *v)
1682 const struct btf_type *t = v->t;
1683 const struct btf_type *next_type;
1684 u32 next_type_id = t->type;
1685 struct btf *btf = env->btf;
1687 next_type = btf_type_by_id(btf, next_type_id);
1688 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1689 btf_verifier_log_type(env, v->t, "Invalid type_id");
1690 return -EINVAL;
1693 if (!env_type_is_resolve_sink(env, next_type) &&
1694 !env_type_is_resolved(env, next_type_id))
1695 return env_stack_push(env, next_type, next_type_id);
1697 /* Figure out the resolved next_type_id with size.
1698 * They will be stored in the current modifier's
1699 * resolved_ids and resolved_sizes such that it can
1700 * save us a few type-following when we use it later (e.g. in
1701 * pretty print).
1703 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1704 if (env_type_is_resolved(env, next_type_id))
1705 next_type = btf_type_id_resolve(btf, &next_type_id);
1707 /* "typedef void new_void", "const void"...etc */
1708 if (!btf_type_is_void(next_type) &&
1709 !btf_type_is_fwd(next_type) &&
1710 !btf_type_is_func_proto(next_type)) {
1711 btf_verifier_log_type(env, v->t, "Invalid type_id");
1712 return -EINVAL;
1716 env_stack_pop_resolved(env, next_type_id, 0);
1718 return 0;
1721 static int btf_var_resolve(struct btf_verifier_env *env,
1722 const struct resolve_vertex *v)
1724 const struct btf_type *next_type;
1725 const struct btf_type *t = v->t;
1726 u32 next_type_id = t->type;
1727 struct btf *btf = env->btf;
1729 next_type = btf_type_by_id(btf, next_type_id);
1730 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1731 btf_verifier_log_type(env, v->t, "Invalid type_id");
1732 return -EINVAL;
1735 if (!env_type_is_resolve_sink(env, next_type) &&
1736 !env_type_is_resolved(env, next_type_id))
1737 return env_stack_push(env, next_type, next_type_id);
1739 if (btf_type_is_modifier(next_type)) {
1740 const struct btf_type *resolved_type;
1741 u32 resolved_type_id;
1743 resolved_type_id = next_type_id;
1744 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1746 if (btf_type_is_ptr(resolved_type) &&
1747 !env_type_is_resolve_sink(env, resolved_type) &&
1748 !env_type_is_resolved(env, resolved_type_id))
1749 return env_stack_push(env, resolved_type,
1750 resolved_type_id);
1753 /* We must resolve to something concrete at this point, no
1754 * forward types or similar that would resolve to size of
1755 * zero is allowed.
1757 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1758 btf_verifier_log_type(env, v->t, "Invalid type_id");
1759 return -EINVAL;
1762 env_stack_pop_resolved(env, next_type_id, 0);
1764 return 0;
1767 static int btf_ptr_resolve(struct btf_verifier_env *env,
1768 const struct resolve_vertex *v)
1770 const struct btf_type *next_type;
1771 const struct btf_type *t = v->t;
1772 u32 next_type_id = t->type;
1773 struct btf *btf = env->btf;
1775 next_type = btf_type_by_id(btf, next_type_id);
1776 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1777 btf_verifier_log_type(env, v->t, "Invalid type_id");
1778 return -EINVAL;
1781 if (!env_type_is_resolve_sink(env, next_type) &&
1782 !env_type_is_resolved(env, next_type_id))
1783 return env_stack_push(env, next_type, next_type_id);
1785 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1786 * the modifier may have stopped resolving when it was resolved
1787 * to a ptr (last-resolved-ptr).
1789 * We now need to continue from the last-resolved-ptr to
1790 * ensure the last-resolved-ptr will not referring back to
1791 * the currenct ptr (t).
1793 if (btf_type_is_modifier(next_type)) {
1794 const struct btf_type *resolved_type;
1795 u32 resolved_type_id;
1797 resolved_type_id = next_type_id;
1798 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1800 if (btf_type_is_ptr(resolved_type) &&
1801 !env_type_is_resolve_sink(env, resolved_type) &&
1802 !env_type_is_resolved(env, resolved_type_id))
1803 return env_stack_push(env, resolved_type,
1804 resolved_type_id);
1807 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1808 if (env_type_is_resolved(env, next_type_id))
1809 next_type = btf_type_id_resolve(btf, &next_type_id);
1811 if (!btf_type_is_void(next_type) &&
1812 !btf_type_is_fwd(next_type) &&
1813 !btf_type_is_func_proto(next_type)) {
1814 btf_verifier_log_type(env, v->t, "Invalid type_id");
1815 return -EINVAL;
1819 env_stack_pop_resolved(env, next_type_id, 0);
1821 return 0;
1824 static void btf_modifier_seq_show(const struct btf *btf,
1825 const struct btf_type *t,
1826 u32 type_id, void *data,
1827 u8 bits_offset, struct seq_file *m)
1829 t = btf_type_id_resolve(btf, &type_id);
1831 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1834 static void btf_var_seq_show(const struct btf *btf, const struct btf_type *t,
1835 u32 type_id, void *data, u8 bits_offset,
1836 struct seq_file *m)
1838 t = btf_type_id_resolve(btf, &type_id);
1840 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1843 static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1844 u32 type_id, void *data, u8 bits_offset,
1845 struct seq_file *m)
1847 /* It is a hashed value */
1848 seq_printf(m, "%p", *(void **)data);
1851 static void btf_ref_type_log(struct btf_verifier_env *env,
1852 const struct btf_type *t)
1854 btf_verifier_log(env, "type_id=%u", t->type);
1857 static struct btf_kind_operations modifier_ops = {
1858 .check_meta = btf_ref_type_check_meta,
1859 .resolve = btf_modifier_resolve,
1860 .check_member = btf_modifier_check_member,
1861 .check_kflag_member = btf_modifier_check_kflag_member,
1862 .log_details = btf_ref_type_log,
1863 .seq_show = btf_modifier_seq_show,
1866 static struct btf_kind_operations ptr_ops = {
1867 .check_meta = btf_ref_type_check_meta,
1868 .resolve = btf_ptr_resolve,
1869 .check_member = btf_ptr_check_member,
1870 .check_kflag_member = btf_generic_check_kflag_member,
1871 .log_details = btf_ref_type_log,
1872 .seq_show = btf_ptr_seq_show,
1875 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1876 const struct btf_type *t,
1877 u32 meta_left)
1879 if (btf_type_vlen(t)) {
1880 btf_verifier_log_type(env, t, "vlen != 0");
1881 return -EINVAL;
1884 if (t->type) {
1885 btf_verifier_log_type(env, t, "type != 0");
1886 return -EINVAL;
1889 /* fwd type must have a valid name */
1890 if (!t->name_off ||
1891 !btf_name_valid_identifier(env->btf, t->name_off)) {
1892 btf_verifier_log_type(env, t, "Invalid name");
1893 return -EINVAL;
1896 btf_verifier_log_type(env, t, NULL);
1898 return 0;
1901 static void btf_fwd_type_log(struct btf_verifier_env *env,
1902 const struct btf_type *t)
1904 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
1907 static struct btf_kind_operations fwd_ops = {
1908 .check_meta = btf_fwd_check_meta,
1909 .resolve = btf_df_resolve,
1910 .check_member = btf_df_check_member,
1911 .check_kflag_member = btf_df_check_kflag_member,
1912 .log_details = btf_fwd_type_log,
1913 .seq_show = btf_df_seq_show,
1916 static int btf_array_check_member(struct btf_verifier_env *env,
1917 const struct btf_type *struct_type,
1918 const struct btf_member *member,
1919 const struct btf_type *member_type)
1921 u32 struct_bits_off = member->offset;
1922 u32 struct_size, bytes_offset;
1923 u32 array_type_id, array_size;
1924 struct btf *btf = env->btf;
1926 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1927 btf_verifier_log_member(env, struct_type, member,
1928 "Member is not byte aligned");
1929 return -EINVAL;
1932 array_type_id = member->type;
1933 btf_type_id_size(btf, &array_type_id, &array_size);
1934 struct_size = struct_type->size;
1935 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1936 if (struct_size - bytes_offset < array_size) {
1937 btf_verifier_log_member(env, struct_type, member,
1938 "Member exceeds struct_size");
1939 return -EINVAL;
1942 return 0;
1945 static s32 btf_array_check_meta(struct btf_verifier_env *env,
1946 const struct btf_type *t,
1947 u32 meta_left)
1949 const struct btf_array *array = btf_type_array(t);
1950 u32 meta_needed = sizeof(*array);
1952 if (meta_left < meta_needed) {
1953 btf_verifier_log_basic(env, t,
1954 "meta_left:%u meta_needed:%u",
1955 meta_left, meta_needed);
1956 return -EINVAL;
1959 /* array type should not have a name */
1960 if (t->name_off) {
1961 btf_verifier_log_type(env, t, "Invalid name");
1962 return -EINVAL;
1965 if (btf_type_vlen(t)) {
1966 btf_verifier_log_type(env, t, "vlen != 0");
1967 return -EINVAL;
1970 if (btf_type_kflag(t)) {
1971 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1972 return -EINVAL;
1975 if (t->size) {
1976 btf_verifier_log_type(env, t, "size != 0");
1977 return -EINVAL;
1980 /* Array elem type and index type cannot be in type void,
1981 * so !array->type and !array->index_type are not allowed.
1983 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
1984 btf_verifier_log_type(env, t, "Invalid elem");
1985 return -EINVAL;
1988 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
1989 btf_verifier_log_type(env, t, "Invalid index");
1990 return -EINVAL;
1993 btf_verifier_log_type(env, t, NULL);
1995 return meta_needed;
1998 static int btf_array_resolve(struct btf_verifier_env *env,
1999 const struct resolve_vertex *v)
2001 const struct btf_array *array = btf_type_array(v->t);
2002 const struct btf_type *elem_type, *index_type;
2003 u32 elem_type_id, index_type_id;
2004 struct btf *btf = env->btf;
2005 u32 elem_size;
2007 /* Check array->index_type */
2008 index_type_id = array->index_type;
2009 index_type = btf_type_by_id(btf, index_type_id);
2010 if (btf_type_nosize_or_null(index_type) ||
2011 btf_type_is_resolve_source_only(index_type)) {
2012 btf_verifier_log_type(env, v->t, "Invalid index");
2013 return -EINVAL;
2016 if (!env_type_is_resolve_sink(env, index_type) &&
2017 !env_type_is_resolved(env, index_type_id))
2018 return env_stack_push(env, index_type, index_type_id);
2020 index_type = btf_type_id_size(btf, &index_type_id, NULL);
2021 if (!index_type || !btf_type_is_int(index_type) ||
2022 !btf_type_int_is_regular(index_type)) {
2023 btf_verifier_log_type(env, v->t, "Invalid index");
2024 return -EINVAL;
2027 /* Check array->type */
2028 elem_type_id = array->type;
2029 elem_type = btf_type_by_id(btf, elem_type_id);
2030 if (btf_type_nosize_or_null(elem_type) ||
2031 btf_type_is_resolve_source_only(elem_type)) {
2032 btf_verifier_log_type(env, v->t,
2033 "Invalid elem");
2034 return -EINVAL;
2037 if (!env_type_is_resolve_sink(env, elem_type) &&
2038 !env_type_is_resolved(env, elem_type_id))
2039 return env_stack_push(env, elem_type, elem_type_id);
2041 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2042 if (!elem_type) {
2043 btf_verifier_log_type(env, v->t, "Invalid elem");
2044 return -EINVAL;
2047 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
2048 btf_verifier_log_type(env, v->t, "Invalid array of int");
2049 return -EINVAL;
2052 if (array->nelems && elem_size > U32_MAX / array->nelems) {
2053 btf_verifier_log_type(env, v->t,
2054 "Array size overflows U32_MAX");
2055 return -EINVAL;
2058 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
2060 return 0;
2063 static void btf_array_log(struct btf_verifier_env *env,
2064 const struct btf_type *t)
2066 const struct btf_array *array = btf_type_array(t);
2068 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
2069 array->type, array->index_type, array->nelems);
2072 static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
2073 u32 type_id, void *data, u8 bits_offset,
2074 struct seq_file *m)
2076 const struct btf_array *array = btf_type_array(t);
2077 const struct btf_kind_operations *elem_ops;
2078 const struct btf_type *elem_type;
2079 u32 i, elem_size, elem_type_id;
2081 elem_type_id = array->type;
2082 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2083 elem_ops = btf_type_ops(elem_type);
2084 seq_puts(m, "[");
2085 for (i = 0; i < array->nelems; i++) {
2086 if (i)
2087 seq_puts(m, ",");
2089 elem_ops->seq_show(btf, elem_type, elem_type_id, data,
2090 bits_offset, m);
2091 data += elem_size;
2093 seq_puts(m, "]");
2096 static struct btf_kind_operations array_ops = {
2097 .check_meta = btf_array_check_meta,
2098 .resolve = btf_array_resolve,
2099 .check_member = btf_array_check_member,
2100 .check_kflag_member = btf_generic_check_kflag_member,
2101 .log_details = btf_array_log,
2102 .seq_show = btf_array_seq_show,
2105 static int btf_struct_check_member(struct btf_verifier_env *env,
2106 const struct btf_type *struct_type,
2107 const struct btf_member *member,
2108 const struct btf_type *member_type)
2110 u32 struct_bits_off = member->offset;
2111 u32 struct_size, bytes_offset;
2113 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2114 btf_verifier_log_member(env, struct_type, member,
2115 "Member is not byte aligned");
2116 return -EINVAL;
2119 struct_size = struct_type->size;
2120 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2121 if (struct_size - bytes_offset < member_type->size) {
2122 btf_verifier_log_member(env, struct_type, member,
2123 "Member exceeds struct_size");
2124 return -EINVAL;
2127 return 0;
2130 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
2131 const struct btf_type *t,
2132 u32 meta_left)
2134 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
2135 const struct btf_member *member;
2136 u32 meta_needed, last_offset;
2137 struct btf *btf = env->btf;
2138 u32 struct_size = t->size;
2139 u32 offset;
2140 u16 i;
2142 meta_needed = btf_type_vlen(t) * sizeof(*member);
2143 if (meta_left < meta_needed) {
2144 btf_verifier_log_basic(env, t,
2145 "meta_left:%u meta_needed:%u",
2146 meta_left, meta_needed);
2147 return -EINVAL;
2150 /* struct type either no name or a valid one */
2151 if (t->name_off &&
2152 !btf_name_valid_identifier(env->btf, t->name_off)) {
2153 btf_verifier_log_type(env, t, "Invalid name");
2154 return -EINVAL;
2157 btf_verifier_log_type(env, t, NULL);
2159 last_offset = 0;
2160 for_each_member(i, t, member) {
2161 if (!btf_name_offset_valid(btf, member->name_off)) {
2162 btf_verifier_log_member(env, t, member,
2163 "Invalid member name_offset:%u",
2164 member->name_off);
2165 return -EINVAL;
2168 /* struct member either no name or a valid one */
2169 if (member->name_off &&
2170 !btf_name_valid_identifier(btf, member->name_off)) {
2171 btf_verifier_log_member(env, t, member, "Invalid name");
2172 return -EINVAL;
2174 /* A member cannot be in type void */
2175 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
2176 btf_verifier_log_member(env, t, member,
2177 "Invalid type_id");
2178 return -EINVAL;
2181 offset = btf_member_bit_offset(t, member);
2182 if (is_union && offset) {
2183 btf_verifier_log_member(env, t, member,
2184 "Invalid member bits_offset");
2185 return -EINVAL;
2189 * ">" instead of ">=" because the last member could be
2190 * "char a[0];"
2192 if (last_offset > offset) {
2193 btf_verifier_log_member(env, t, member,
2194 "Invalid member bits_offset");
2195 return -EINVAL;
2198 if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
2199 btf_verifier_log_member(env, t, member,
2200 "Member bits_offset exceeds its struct size");
2201 return -EINVAL;
2204 btf_verifier_log_member(env, t, member, NULL);
2205 last_offset = offset;
2208 return meta_needed;
2211 static int btf_struct_resolve(struct btf_verifier_env *env,
2212 const struct resolve_vertex *v)
2214 const struct btf_member *member;
2215 int err;
2216 u16 i;
2218 /* Before continue resolving the next_member,
2219 * ensure the last member is indeed resolved to a
2220 * type with size info.
2222 if (v->next_member) {
2223 const struct btf_type *last_member_type;
2224 const struct btf_member *last_member;
2225 u16 last_member_type_id;
2227 last_member = btf_type_member(v->t) + v->next_member - 1;
2228 last_member_type_id = last_member->type;
2229 if (WARN_ON_ONCE(!env_type_is_resolved(env,
2230 last_member_type_id)))
2231 return -EINVAL;
2233 last_member_type = btf_type_by_id(env->btf,
2234 last_member_type_id);
2235 if (btf_type_kflag(v->t))
2236 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
2237 last_member,
2238 last_member_type);
2239 else
2240 err = btf_type_ops(last_member_type)->check_member(env, v->t,
2241 last_member,
2242 last_member_type);
2243 if (err)
2244 return err;
2247 for_each_member_from(i, v->next_member, v->t, member) {
2248 u32 member_type_id = member->type;
2249 const struct btf_type *member_type = btf_type_by_id(env->btf,
2250 member_type_id);
2252 if (btf_type_nosize_or_null(member_type) ||
2253 btf_type_is_resolve_source_only(member_type)) {
2254 btf_verifier_log_member(env, v->t, member,
2255 "Invalid member");
2256 return -EINVAL;
2259 if (!env_type_is_resolve_sink(env, member_type) &&
2260 !env_type_is_resolved(env, member_type_id)) {
2261 env_stack_set_next_member(env, i + 1);
2262 return env_stack_push(env, member_type, member_type_id);
2265 if (btf_type_kflag(v->t))
2266 err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
2267 member,
2268 member_type);
2269 else
2270 err = btf_type_ops(member_type)->check_member(env, v->t,
2271 member,
2272 member_type);
2273 if (err)
2274 return err;
2277 env_stack_pop_resolved(env, 0, 0);
2279 return 0;
2282 static void btf_struct_log(struct btf_verifier_env *env,
2283 const struct btf_type *t)
2285 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2288 /* find 'struct bpf_spin_lock' in map value.
2289 * return >= 0 offset if found
2290 * and < 0 in case of error
2292 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
2294 const struct btf_member *member;
2295 u32 i, off = -ENOENT;
2297 if (!__btf_type_is_struct(t))
2298 return -EINVAL;
2300 for_each_member(i, t, member) {
2301 const struct btf_type *member_type = btf_type_by_id(btf,
2302 member->type);
2303 if (!__btf_type_is_struct(member_type))
2304 continue;
2305 if (member_type->size != sizeof(struct bpf_spin_lock))
2306 continue;
2307 if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
2308 "bpf_spin_lock"))
2309 continue;
2310 if (off != -ENOENT)
2311 /* only one 'struct bpf_spin_lock' is allowed */
2312 return -E2BIG;
2313 off = btf_member_bit_offset(t, member);
2314 if (off % 8)
2315 /* valid C code cannot generate such BTF */
2316 return -EINVAL;
2317 off /= 8;
2318 if (off % __alignof__(struct bpf_spin_lock))
2319 /* valid struct bpf_spin_lock will be 4 byte aligned */
2320 return -EINVAL;
2322 return off;
2325 static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
2326 u32 type_id, void *data, u8 bits_offset,
2327 struct seq_file *m)
2329 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
2330 const struct btf_member *member;
2331 u32 i;
2333 seq_puts(m, "{");
2334 for_each_member(i, t, member) {
2335 const struct btf_type *member_type = btf_type_by_id(btf,
2336 member->type);
2337 const struct btf_kind_operations *ops;
2338 u32 member_offset, bitfield_size;
2339 u32 bytes_offset;
2340 u8 bits8_offset;
2342 if (i)
2343 seq_puts(m, seq);
2345 member_offset = btf_member_bit_offset(t, member);
2346 bitfield_size = btf_member_bitfield_size(t, member);
2347 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2348 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2349 if (bitfield_size) {
2350 btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
2351 bitfield_size, m);
2352 } else {
2353 ops = btf_type_ops(member_type);
2354 ops->seq_show(btf, member_type, member->type,
2355 data + bytes_offset, bits8_offset, m);
2358 seq_puts(m, "}");
2361 static struct btf_kind_operations struct_ops = {
2362 .check_meta = btf_struct_check_meta,
2363 .resolve = btf_struct_resolve,
2364 .check_member = btf_struct_check_member,
2365 .check_kflag_member = btf_generic_check_kflag_member,
2366 .log_details = btf_struct_log,
2367 .seq_show = btf_struct_seq_show,
2370 static int btf_enum_check_member(struct btf_verifier_env *env,
2371 const struct btf_type *struct_type,
2372 const struct btf_member *member,
2373 const struct btf_type *member_type)
2375 u32 struct_bits_off = member->offset;
2376 u32 struct_size, bytes_offset;
2378 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2379 btf_verifier_log_member(env, struct_type, member,
2380 "Member is not byte aligned");
2381 return -EINVAL;
2384 struct_size = struct_type->size;
2385 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2386 if (struct_size - bytes_offset < sizeof(int)) {
2387 btf_verifier_log_member(env, struct_type, member,
2388 "Member exceeds struct_size");
2389 return -EINVAL;
2392 return 0;
2395 static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
2396 const struct btf_type *struct_type,
2397 const struct btf_member *member,
2398 const struct btf_type *member_type)
2400 u32 struct_bits_off, nr_bits, bytes_end, struct_size;
2401 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
2403 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2404 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2405 if (!nr_bits) {
2406 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2407 btf_verifier_log_member(env, struct_type, member,
2408 "Member is not byte aligned");
2409 return -EINVAL;
2412 nr_bits = int_bitsize;
2413 } else if (nr_bits > int_bitsize) {
2414 btf_verifier_log_member(env, struct_type, member,
2415 "Invalid member bitfield_size");
2416 return -EINVAL;
2419 struct_size = struct_type->size;
2420 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
2421 if (struct_size < bytes_end) {
2422 btf_verifier_log_member(env, struct_type, member,
2423 "Member exceeds struct_size");
2424 return -EINVAL;
2427 return 0;
2430 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
2431 const struct btf_type *t,
2432 u32 meta_left)
2434 const struct btf_enum *enums = btf_type_enum(t);
2435 struct btf *btf = env->btf;
2436 u16 i, nr_enums;
2437 u32 meta_needed;
2439 nr_enums = btf_type_vlen(t);
2440 meta_needed = nr_enums * sizeof(*enums);
2442 if (meta_left < meta_needed) {
2443 btf_verifier_log_basic(env, t,
2444 "meta_left:%u meta_needed:%u",
2445 meta_left, meta_needed);
2446 return -EINVAL;
2449 if (btf_type_kflag(t)) {
2450 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2451 return -EINVAL;
2454 if (t->size > 8 || !is_power_of_2(t->size)) {
2455 btf_verifier_log_type(env, t, "Unexpected size");
2456 return -EINVAL;
2459 /* enum type either no name or a valid one */
2460 if (t->name_off &&
2461 !btf_name_valid_identifier(env->btf, t->name_off)) {
2462 btf_verifier_log_type(env, t, "Invalid name");
2463 return -EINVAL;
2466 btf_verifier_log_type(env, t, NULL);
2468 for (i = 0; i < nr_enums; i++) {
2469 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
2470 btf_verifier_log(env, "\tInvalid name_offset:%u",
2471 enums[i].name_off);
2472 return -EINVAL;
2475 /* enum member must have a valid name */
2476 if (!enums[i].name_off ||
2477 !btf_name_valid_identifier(btf, enums[i].name_off)) {
2478 btf_verifier_log_type(env, t, "Invalid name");
2479 return -EINVAL;
2482 if (env->log.level == BPF_LOG_KERNEL)
2483 continue;
2484 btf_verifier_log(env, "\t%s val=%d\n",
2485 __btf_name_by_offset(btf, enums[i].name_off),
2486 enums[i].val);
2489 return meta_needed;
2492 static void btf_enum_log(struct btf_verifier_env *env,
2493 const struct btf_type *t)
2495 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2498 static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
2499 u32 type_id, void *data, u8 bits_offset,
2500 struct seq_file *m)
2502 const struct btf_enum *enums = btf_type_enum(t);
2503 u32 i, nr_enums = btf_type_vlen(t);
2504 int v = *(int *)data;
2506 for (i = 0; i < nr_enums; i++) {
2507 if (v == enums[i].val) {
2508 seq_printf(m, "%s",
2509 __btf_name_by_offset(btf,
2510 enums[i].name_off));
2511 return;
2515 seq_printf(m, "%d", v);
2518 static struct btf_kind_operations enum_ops = {
2519 .check_meta = btf_enum_check_meta,
2520 .resolve = btf_df_resolve,
2521 .check_member = btf_enum_check_member,
2522 .check_kflag_member = btf_enum_check_kflag_member,
2523 .log_details = btf_enum_log,
2524 .seq_show = btf_enum_seq_show,
2527 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
2528 const struct btf_type *t,
2529 u32 meta_left)
2531 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
2533 if (meta_left < meta_needed) {
2534 btf_verifier_log_basic(env, t,
2535 "meta_left:%u meta_needed:%u",
2536 meta_left, meta_needed);
2537 return -EINVAL;
2540 if (t->name_off) {
2541 btf_verifier_log_type(env, t, "Invalid name");
2542 return -EINVAL;
2545 if (btf_type_kflag(t)) {
2546 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2547 return -EINVAL;
2550 btf_verifier_log_type(env, t, NULL);
2552 return meta_needed;
2555 static void btf_func_proto_log(struct btf_verifier_env *env,
2556 const struct btf_type *t)
2558 const struct btf_param *args = (const struct btf_param *)(t + 1);
2559 u16 nr_args = btf_type_vlen(t), i;
2561 btf_verifier_log(env, "return=%u args=(", t->type);
2562 if (!nr_args) {
2563 btf_verifier_log(env, "void");
2564 goto done;
2567 if (nr_args == 1 && !args[0].type) {
2568 /* Only one vararg */
2569 btf_verifier_log(env, "vararg");
2570 goto done;
2573 btf_verifier_log(env, "%u %s", args[0].type,
2574 __btf_name_by_offset(env->btf,
2575 args[0].name_off));
2576 for (i = 1; i < nr_args - 1; i++)
2577 btf_verifier_log(env, ", %u %s", args[i].type,
2578 __btf_name_by_offset(env->btf,
2579 args[i].name_off));
2581 if (nr_args > 1) {
2582 const struct btf_param *last_arg = &args[nr_args - 1];
2584 if (last_arg->type)
2585 btf_verifier_log(env, ", %u %s", last_arg->type,
2586 __btf_name_by_offset(env->btf,
2587 last_arg->name_off));
2588 else
2589 btf_verifier_log(env, ", vararg");
2592 done:
2593 btf_verifier_log(env, ")");
2596 static struct btf_kind_operations func_proto_ops = {
2597 .check_meta = btf_func_proto_check_meta,
2598 .resolve = btf_df_resolve,
2600 * BTF_KIND_FUNC_PROTO cannot be directly referred by
2601 * a struct's member.
2603 * It should be a funciton pointer instead.
2604 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
2606 * Hence, there is no btf_func_check_member().
2608 .check_member = btf_df_check_member,
2609 .check_kflag_member = btf_df_check_kflag_member,
2610 .log_details = btf_func_proto_log,
2611 .seq_show = btf_df_seq_show,
2614 static s32 btf_func_check_meta(struct btf_verifier_env *env,
2615 const struct btf_type *t,
2616 u32 meta_left)
2618 if (!t->name_off ||
2619 !btf_name_valid_identifier(env->btf, t->name_off)) {
2620 btf_verifier_log_type(env, t, "Invalid name");
2621 return -EINVAL;
2624 if (btf_type_vlen(t)) {
2625 btf_verifier_log_type(env, t, "vlen != 0");
2626 return -EINVAL;
2629 if (btf_type_kflag(t)) {
2630 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2631 return -EINVAL;
2634 btf_verifier_log_type(env, t, NULL);
2636 return 0;
2639 static struct btf_kind_operations func_ops = {
2640 .check_meta = btf_func_check_meta,
2641 .resolve = btf_df_resolve,
2642 .check_member = btf_df_check_member,
2643 .check_kflag_member = btf_df_check_kflag_member,
2644 .log_details = btf_ref_type_log,
2645 .seq_show = btf_df_seq_show,
2648 static s32 btf_var_check_meta(struct btf_verifier_env *env,
2649 const struct btf_type *t,
2650 u32 meta_left)
2652 const struct btf_var *var;
2653 u32 meta_needed = sizeof(*var);
2655 if (meta_left < meta_needed) {
2656 btf_verifier_log_basic(env, t,
2657 "meta_left:%u meta_needed:%u",
2658 meta_left, meta_needed);
2659 return -EINVAL;
2662 if (btf_type_vlen(t)) {
2663 btf_verifier_log_type(env, t, "vlen != 0");
2664 return -EINVAL;
2667 if (btf_type_kflag(t)) {
2668 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2669 return -EINVAL;
2672 if (!t->name_off ||
2673 !__btf_name_valid(env->btf, t->name_off, true)) {
2674 btf_verifier_log_type(env, t, "Invalid name");
2675 return -EINVAL;
2678 /* A var cannot be in type void */
2679 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
2680 btf_verifier_log_type(env, t, "Invalid type_id");
2681 return -EINVAL;
2684 var = btf_type_var(t);
2685 if (var->linkage != BTF_VAR_STATIC &&
2686 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2687 btf_verifier_log_type(env, t, "Linkage not supported");
2688 return -EINVAL;
2691 btf_verifier_log_type(env, t, NULL);
2693 return meta_needed;
2696 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
2698 const struct btf_var *var = btf_type_var(t);
2700 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
2703 static const struct btf_kind_operations var_ops = {
2704 .check_meta = btf_var_check_meta,
2705 .resolve = btf_var_resolve,
2706 .check_member = btf_df_check_member,
2707 .check_kflag_member = btf_df_check_kflag_member,
2708 .log_details = btf_var_log,
2709 .seq_show = btf_var_seq_show,
2712 static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
2713 const struct btf_type *t,
2714 u32 meta_left)
2716 const struct btf_var_secinfo *vsi;
2717 u64 last_vsi_end_off = 0, sum = 0;
2718 u32 i, meta_needed;
2720 meta_needed = btf_type_vlen(t) * sizeof(*vsi);
2721 if (meta_left < meta_needed) {
2722 btf_verifier_log_basic(env, t,
2723 "meta_left:%u meta_needed:%u",
2724 meta_left, meta_needed);
2725 return -EINVAL;
2728 if (!btf_type_vlen(t)) {
2729 btf_verifier_log_type(env, t, "vlen == 0");
2730 return -EINVAL;
2733 if (!t->size) {
2734 btf_verifier_log_type(env, t, "size == 0");
2735 return -EINVAL;
2738 if (btf_type_kflag(t)) {
2739 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2740 return -EINVAL;
2743 if (!t->name_off ||
2744 !btf_name_valid_section(env->btf, t->name_off)) {
2745 btf_verifier_log_type(env, t, "Invalid name");
2746 return -EINVAL;
2749 btf_verifier_log_type(env, t, NULL);
2751 for_each_vsi(i, t, vsi) {
2752 /* A var cannot be in type void */
2753 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
2754 btf_verifier_log_vsi(env, t, vsi,
2755 "Invalid type_id");
2756 return -EINVAL;
2759 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
2760 btf_verifier_log_vsi(env, t, vsi,
2761 "Invalid offset");
2762 return -EINVAL;
2765 if (!vsi->size || vsi->size > t->size) {
2766 btf_verifier_log_vsi(env, t, vsi,
2767 "Invalid size");
2768 return -EINVAL;
2771 last_vsi_end_off = vsi->offset + vsi->size;
2772 if (last_vsi_end_off > t->size) {
2773 btf_verifier_log_vsi(env, t, vsi,
2774 "Invalid offset+size");
2775 return -EINVAL;
2778 btf_verifier_log_vsi(env, t, vsi, NULL);
2779 sum += vsi->size;
2782 if (t->size < sum) {
2783 btf_verifier_log_type(env, t, "Invalid btf_info size");
2784 return -EINVAL;
2787 return meta_needed;
2790 static int btf_datasec_resolve(struct btf_verifier_env *env,
2791 const struct resolve_vertex *v)
2793 const struct btf_var_secinfo *vsi;
2794 struct btf *btf = env->btf;
2795 u16 i;
2797 for_each_vsi_from(i, v->next_member, v->t, vsi) {
2798 u32 var_type_id = vsi->type, type_id, type_size = 0;
2799 const struct btf_type *var_type = btf_type_by_id(env->btf,
2800 var_type_id);
2801 if (!var_type || !btf_type_is_var(var_type)) {
2802 btf_verifier_log_vsi(env, v->t, vsi,
2803 "Not a VAR kind member");
2804 return -EINVAL;
2807 if (!env_type_is_resolve_sink(env, var_type) &&
2808 !env_type_is_resolved(env, var_type_id)) {
2809 env_stack_set_next_member(env, i + 1);
2810 return env_stack_push(env, var_type, var_type_id);
2813 type_id = var_type->type;
2814 if (!btf_type_id_size(btf, &type_id, &type_size)) {
2815 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
2816 return -EINVAL;
2819 if (vsi->size < type_size) {
2820 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
2821 return -EINVAL;
2825 env_stack_pop_resolved(env, 0, 0);
2826 return 0;
2829 static void btf_datasec_log(struct btf_verifier_env *env,
2830 const struct btf_type *t)
2832 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2835 static void btf_datasec_seq_show(const struct btf *btf,
2836 const struct btf_type *t, u32 type_id,
2837 void *data, u8 bits_offset,
2838 struct seq_file *m)
2840 const struct btf_var_secinfo *vsi;
2841 const struct btf_type *var;
2842 u32 i;
2844 seq_printf(m, "section (\"%s\") = {", __btf_name_by_offset(btf, t->name_off));
2845 for_each_vsi(i, t, vsi) {
2846 var = btf_type_by_id(btf, vsi->type);
2847 if (i)
2848 seq_puts(m, ",");
2849 btf_type_ops(var)->seq_show(btf, var, vsi->type,
2850 data + vsi->offset, bits_offset, m);
2852 seq_puts(m, "}");
2855 static const struct btf_kind_operations datasec_ops = {
2856 .check_meta = btf_datasec_check_meta,
2857 .resolve = btf_datasec_resolve,
2858 .check_member = btf_df_check_member,
2859 .check_kflag_member = btf_df_check_kflag_member,
2860 .log_details = btf_datasec_log,
2861 .seq_show = btf_datasec_seq_show,
2864 static int btf_func_proto_check(struct btf_verifier_env *env,
2865 const struct btf_type *t)
2867 const struct btf_type *ret_type;
2868 const struct btf_param *args;
2869 const struct btf *btf;
2870 u16 nr_args, i;
2871 int err;
2873 btf = env->btf;
2874 args = (const struct btf_param *)(t + 1);
2875 nr_args = btf_type_vlen(t);
2877 /* Check func return type which could be "void" (t->type == 0) */
2878 if (t->type) {
2879 u32 ret_type_id = t->type;
2881 ret_type = btf_type_by_id(btf, ret_type_id);
2882 if (!ret_type) {
2883 btf_verifier_log_type(env, t, "Invalid return type");
2884 return -EINVAL;
2887 if (btf_type_needs_resolve(ret_type) &&
2888 !env_type_is_resolved(env, ret_type_id)) {
2889 err = btf_resolve(env, ret_type, ret_type_id);
2890 if (err)
2891 return err;
2894 /* Ensure the return type is a type that has a size */
2895 if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
2896 btf_verifier_log_type(env, t, "Invalid return type");
2897 return -EINVAL;
2901 if (!nr_args)
2902 return 0;
2904 /* Last func arg type_id could be 0 if it is a vararg */
2905 if (!args[nr_args - 1].type) {
2906 if (args[nr_args - 1].name_off) {
2907 btf_verifier_log_type(env, t, "Invalid arg#%u",
2908 nr_args);
2909 return -EINVAL;
2911 nr_args--;
2914 err = 0;
2915 for (i = 0; i < nr_args; i++) {
2916 const struct btf_type *arg_type;
2917 u32 arg_type_id;
2919 arg_type_id = args[i].type;
2920 arg_type = btf_type_by_id(btf, arg_type_id);
2921 if (!arg_type) {
2922 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2923 err = -EINVAL;
2924 break;
2927 if (args[i].name_off &&
2928 (!btf_name_offset_valid(btf, args[i].name_off) ||
2929 !btf_name_valid_identifier(btf, args[i].name_off))) {
2930 btf_verifier_log_type(env, t,
2931 "Invalid arg#%u", i + 1);
2932 err = -EINVAL;
2933 break;
2936 if (btf_type_needs_resolve(arg_type) &&
2937 !env_type_is_resolved(env, arg_type_id)) {
2938 err = btf_resolve(env, arg_type, arg_type_id);
2939 if (err)
2940 break;
2943 if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
2944 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2945 err = -EINVAL;
2946 break;
2950 return err;
2953 static int btf_func_check(struct btf_verifier_env *env,
2954 const struct btf_type *t)
2956 const struct btf_type *proto_type;
2957 const struct btf_param *args;
2958 const struct btf *btf;
2959 u16 nr_args, i;
2961 btf = env->btf;
2962 proto_type = btf_type_by_id(btf, t->type);
2964 if (!proto_type || !btf_type_is_func_proto(proto_type)) {
2965 btf_verifier_log_type(env, t, "Invalid type_id");
2966 return -EINVAL;
2969 args = (const struct btf_param *)(proto_type + 1);
2970 nr_args = btf_type_vlen(proto_type);
2971 for (i = 0; i < nr_args; i++) {
2972 if (!args[i].name_off && args[i].type) {
2973 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2974 return -EINVAL;
2978 return 0;
2981 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
2982 [BTF_KIND_INT] = &int_ops,
2983 [BTF_KIND_PTR] = &ptr_ops,
2984 [BTF_KIND_ARRAY] = &array_ops,
2985 [BTF_KIND_STRUCT] = &struct_ops,
2986 [BTF_KIND_UNION] = &struct_ops,
2987 [BTF_KIND_ENUM] = &enum_ops,
2988 [BTF_KIND_FWD] = &fwd_ops,
2989 [BTF_KIND_TYPEDEF] = &modifier_ops,
2990 [BTF_KIND_VOLATILE] = &modifier_ops,
2991 [BTF_KIND_CONST] = &modifier_ops,
2992 [BTF_KIND_RESTRICT] = &modifier_ops,
2993 [BTF_KIND_FUNC] = &func_ops,
2994 [BTF_KIND_FUNC_PROTO] = &func_proto_ops,
2995 [BTF_KIND_VAR] = &var_ops,
2996 [BTF_KIND_DATASEC] = &datasec_ops,
2999 static s32 btf_check_meta(struct btf_verifier_env *env,
3000 const struct btf_type *t,
3001 u32 meta_left)
3003 u32 saved_meta_left = meta_left;
3004 s32 var_meta_size;
3006 if (meta_left < sizeof(*t)) {
3007 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
3008 env->log_type_id, meta_left, sizeof(*t));
3009 return -EINVAL;
3011 meta_left -= sizeof(*t);
3013 if (t->info & ~BTF_INFO_MASK) {
3014 btf_verifier_log(env, "[%u] Invalid btf_info:%x",
3015 env->log_type_id, t->info);
3016 return -EINVAL;
3019 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
3020 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
3021 btf_verifier_log(env, "[%u] Invalid kind:%u",
3022 env->log_type_id, BTF_INFO_KIND(t->info));
3023 return -EINVAL;
3026 if (!btf_name_offset_valid(env->btf, t->name_off)) {
3027 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
3028 env->log_type_id, t->name_off);
3029 return -EINVAL;
3032 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
3033 if (var_meta_size < 0)
3034 return var_meta_size;
3036 meta_left -= var_meta_size;
3038 return saved_meta_left - meta_left;
3041 static int btf_check_all_metas(struct btf_verifier_env *env)
3043 struct btf *btf = env->btf;
3044 struct btf_header *hdr;
3045 void *cur, *end;
3047 hdr = &btf->hdr;
3048 cur = btf->nohdr_data + hdr->type_off;
3049 end = cur + hdr->type_len;
3051 env->log_type_id = 1;
3052 while (cur < end) {
3053 struct btf_type *t = cur;
3054 s32 meta_size;
3056 meta_size = btf_check_meta(env, t, end - cur);
3057 if (meta_size < 0)
3058 return meta_size;
3060 btf_add_type(env, t);
3061 cur += meta_size;
3062 env->log_type_id++;
3065 return 0;
3068 static bool btf_resolve_valid(struct btf_verifier_env *env,
3069 const struct btf_type *t,
3070 u32 type_id)
3072 struct btf *btf = env->btf;
3074 if (!env_type_is_resolved(env, type_id))
3075 return false;
3077 if (btf_type_is_struct(t) || btf_type_is_datasec(t))
3078 return !btf->resolved_ids[type_id] &&
3079 !btf->resolved_sizes[type_id];
3081 if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
3082 btf_type_is_var(t)) {
3083 t = btf_type_id_resolve(btf, &type_id);
3084 return t &&
3085 !btf_type_is_modifier(t) &&
3086 !btf_type_is_var(t) &&
3087 !btf_type_is_datasec(t);
3090 if (btf_type_is_array(t)) {
3091 const struct btf_array *array = btf_type_array(t);
3092 const struct btf_type *elem_type;
3093 u32 elem_type_id = array->type;
3094 u32 elem_size;
3096 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
3097 return elem_type && !btf_type_is_modifier(elem_type) &&
3098 (array->nelems * elem_size ==
3099 btf->resolved_sizes[type_id]);
3102 return false;
3105 static int btf_resolve(struct btf_verifier_env *env,
3106 const struct btf_type *t, u32 type_id)
3108 u32 save_log_type_id = env->log_type_id;
3109 const struct resolve_vertex *v;
3110 int err = 0;
3112 env->resolve_mode = RESOLVE_TBD;
3113 env_stack_push(env, t, type_id);
3114 while (!err && (v = env_stack_peak(env))) {
3115 env->log_type_id = v->type_id;
3116 err = btf_type_ops(v->t)->resolve(env, v);
3119 env->log_type_id = type_id;
3120 if (err == -E2BIG) {
3121 btf_verifier_log_type(env, t,
3122 "Exceeded max resolving depth:%u",
3123 MAX_RESOLVE_DEPTH);
3124 } else if (err == -EEXIST) {
3125 btf_verifier_log_type(env, t, "Loop detected");
3128 /* Final sanity check */
3129 if (!err && !btf_resolve_valid(env, t, type_id)) {
3130 btf_verifier_log_type(env, t, "Invalid resolve state");
3131 err = -EINVAL;
3134 env->log_type_id = save_log_type_id;
3135 return err;
3138 static int btf_check_all_types(struct btf_verifier_env *env)
3140 struct btf *btf = env->btf;
3141 u32 type_id;
3142 int err;
3144 err = env_resolve_init(env);
3145 if (err)
3146 return err;
3148 env->phase++;
3149 for (type_id = 1; type_id <= btf->nr_types; type_id++) {
3150 const struct btf_type *t = btf_type_by_id(btf, type_id);
3152 env->log_type_id = type_id;
3153 if (btf_type_needs_resolve(t) &&
3154 !env_type_is_resolved(env, type_id)) {
3155 err = btf_resolve(env, t, type_id);
3156 if (err)
3157 return err;
3160 if (btf_type_is_func_proto(t)) {
3161 err = btf_func_proto_check(env, t);
3162 if (err)
3163 return err;
3166 if (btf_type_is_func(t)) {
3167 err = btf_func_check(env, t);
3168 if (err)
3169 return err;
3173 return 0;
3176 static int btf_parse_type_sec(struct btf_verifier_env *env)
3178 const struct btf_header *hdr = &env->btf->hdr;
3179 int err;
3181 /* Type section must align to 4 bytes */
3182 if (hdr->type_off & (sizeof(u32) - 1)) {
3183 btf_verifier_log(env, "Unaligned type_off");
3184 return -EINVAL;
3187 if (!hdr->type_len) {
3188 btf_verifier_log(env, "No type found");
3189 return -EINVAL;
3192 err = btf_check_all_metas(env);
3193 if (err)
3194 return err;
3196 return btf_check_all_types(env);
3199 static int btf_parse_str_sec(struct btf_verifier_env *env)
3201 const struct btf_header *hdr;
3202 struct btf *btf = env->btf;
3203 const char *start, *end;
3205 hdr = &btf->hdr;
3206 start = btf->nohdr_data + hdr->str_off;
3207 end = start + hdr->str_len;
3209 if (end != btf->data + btf->data_size) {
3210 btf_verifier_log(env, "String section is not at the end");
3211 return -EINVAL;
3214 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
3215 start[0] || end[-1]) {
3216 btf_verifier_log(env, "Invalid string section");
3217 return -EINVAL;
3220 btf->strings = start;
3222 return 0;
3225 static const size_t btf_sec_info_offset[] = {
3226 offsetof(struct btf_header, type_off),
3227 offsetof(struct btf_header, str_off),
3230 static int btf_sec_info_cmp(const void *a, const void *b)
3232 const struct btf_sec_info *x = a;
3233 const struct btf_sec_info *y = b;
3235 return (int)(x->off - y->off) ? : (int)(x->len - y->len);
3238 static int btf_check_sec_info(struct btf_verifier_env *env,
3239 u32 btf_data_size)
3241 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
3242 u32 total, expected_total, i;
3243 const struct btf_header *hdr;
3244 const struct btf *btf;
3246 btf = env->btf;
3247 hdr = &btf->hdr;
3249 /* Populate the secs from hdr */
3250 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
3251 secs[i] = *(struct btf_sec_info *)((void *)hdr +
3252 btf_sec_info_offset[i]);
3254 sort(secs, ARRAY_SIZE(btf_sec_info_offset),
3255 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
3257 /* Check for gaps and overlap among sections */
3258 total = 0;
3259 expected_total = btf_data_size - hdr->hdr_len;
3260 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
3261 if (expected_total < secs[i].off) {
3262 btf_verifier_log(env, "Invalid section offset");
3263 return -EINVAL;
3265 if (total < secs[i].off) {
3266 /* gap */
3267 btf_verifier_log(env, "Unsupported section found");
3268 return -EINVAL;
3270 if (total > secs[i].off) {
3271 btf_verifier_log(env, "Section overlap found");
3272 return -EINVAL;
3274 if (expected_total - total < secs[i].len) {
3275 btf_verifier_log(env,
3276 "Total section length too long");
3277 return -EINVAL;
3279 total += secs[i].len;
3282 /* There is data other than hdr and known sections */
3283 if (expected_total != total) {
3284 btf_verifier_log(env, "Unsupported section found");
3285 return -EINVAL;
3288 return 0;
3291 static int btf_parse_hdr(struct btf_verifier_env *env)
3293 u32 hdr_len, hdr_copy, btf_data_size;
3294 const struct btf_header *hdr;
3295 struct btf *btf;
3296 int err;
3298 btf = env->btf;
3299 btf_data_size = btf->data_size;
3301 if (btf_data_size <
3302 offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
3303 btf_verifier_log(env, "hdr_len not found");
3304 return -EINVAL;
3307 hdr = btf->data;
3308 hdr_len = hdr->hdr_len;
3309 if (btf_data_size < hdr_len) {
3310 btf_verifier_log(env, "btf_header not found");
3311 return -EINVAL;
3314 /* Ensure the unsupported header fields are zero */
3315 if (hdr_len > sizeof(btf->hdr)) {
3316 u8 *expected_zero = btf->data + sizeof(btf->hdr);
3317 u8 *end = btf->data + hdr_len;
3319 for (; expected_zero < end; expected_zero++) {
3320 if (*expected_zero) {
3321 btf_verifier_log(env, "Unsupported btf_header");
3322 return -E2BIG;
3327 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
3328 memcpy(&btf->hdr, btf->data, hdr_copy);
3330 hdr = &btf->hdr;
3332 btf_verifier_log_hdr(env, btf_data_size);
3334 if (hdr->magic != BTF_MAGIC) {
3335 btf_verifier_log(env, "Invalid magic");
3336 return -EINVAL;
3339 if (hdr->version != BTF_VERSION) {
3340 btf_verifier_log(env, "Unsupported version");
3341 return -ENOTSUPP;
3344 if (hdr->flags) {
3345 btf_verifier_log(env, "Unsupported flags");
3346 return -ENOTSUPP;
3349 if (btf_data_size == hdr->hdr_len) {
3350 btf_verifier_log(env, "No data");
3351 return -EINVAL;
3354 err = btf_check_sec_info(env, btf_data_size);
3355 if (err)
3356 return err;
3358 return 0;
3361 static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
3362 u32 log_level, char __user *log_ubuf, u32 log_size)
3364 struct btf_verifier_env *env = NULL;
3365 struct bpf_verifier_log *log;
3366 struct btf *btf = NULL;
3367 u8 *data;
3368 int err;
3370 if (btf_data_size > BTF_MAX_SIZE)
3371 return ERR_PTR(-E2BIG);
3373 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3374 if (!env)
3375 return ERR_PTR(-ENOMEM);
3377 log = &env->log;
3378 if (log_level || log_ubuf || log_size) {
3379 /* user requested verbose verifier output
3380 * and supplied buffer to store the verification trace
3382 log->level = log_level;
3383 log->ubuf = log_ubuf;
3384 log->len_total = log_size;
3386 /* log attributes have to be sane */
3387 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
3388 !log->level || !log->ubuf) {
3389 err = -EINVAL;
3390 goto errout;
3394 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3395 if (!btf) {
3396 err = -ENOMEM;
3397 goto errout;
3399 env->btf = btf;
3401 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
3402 if (!data) {
3403 err = -ENOMEM;
3404 goto errout;
3407 btf->data = data;
3408 btf->data_size = btf_data_size;
3410 if (copy_from_user(data, btf_data, btf_data_size)) {
3411 err = -EFAULT;
3412 goto errout;
3415 err = btf_parse_hdr(env);
3416 if (err)
3417 goto errout;
3419 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3421 err = btf_parse_str_sec(env);
3422 if (err)
3423 goto errout;
3425 err = btf_parse_type_sec(env);
3426 if (err)
3427 goto errout;
3429 if (log->level && bpf_verifier_log_full(log)) {
3430 err = -ENOSPC;
3431 goto errout;
3434 btf_verifier_env_free(env);
3435 refcount_set(&btf->refcnt, 1);
3436 return btf;
3438 errout:
3439 btf_verifier_env_free(env);
3440 if (btf)
3441 btf_free(btf);
3442 return ERR_PTR(err);
3445 extern char __weak _binary__btf_vmlinux_bin_start[];
3446 extern char __weak _binary__btf_vmlinux_bin_end[];
3447 extern struct btf *btf_vmlinux;
3449 #define BPF_MAP_TYPE(_id, _ops)
3450 static union {
3451 struct bpf_ctx_convert {
3452 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3453 prog_ctx_type _id##_prog; \
3454 kern_ctx_type _id##_kern;
3455 #include <linux/bpf_types.h>
3456 #undef BPF_PROG_TYPE
3457 } *__t;
3458 /* 't' is written once under lock. Read many times. */
3459 const struct btf_type *t;
3460 } bpf_ctx_convert;
3461 enum {
3462 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3463 __ctx_convert##_id,
3464 #include <linux/bpf_types.h>
3465 #undef BPF_PROG_TYPE
3467 static u8 bpf_ctx_convert_map[] = {
3468 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3469 [_id] = __ctx_convert##_id,
3470 #include <linux/bpf_types.h>
3471 #undef BPF_PROG_TYPE
3473 #undef BPF_MAP_TYPE
3475 static const struct btf_member *
3476 btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
3477 const struct btf_type *t, enum bpf_prog_type prog_type)
3479 const struct btf_type *conv_struct;
3480 const struct btf_type *ctx_struct;
3481 const struct btf_member *ctx_type;
3482 const char *tname, *ctx_tname;
3484 conv_struct = bpf_ctx_convert.t;
3485 if (!conv_struct) {
3486 bpf_log(log, "btf_vmlinux is malformed\n");
3487 return NULL;
3489 t = btf_type_by_id(btf, t->type);
3490 while (btf_type_is_modifier(t))
3491 t = btf_type_by_id(btf, t->type);
3492 if (!btf_type_is_struct(t)) {
3493 /* Only pointer to struct is supported for now.
3494 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
3495 * is not supported yet.
3496 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
3498 bpf_log(log, "BPF program ctx type is not a struct\n");
3499 return NULL;
3501 tname = btf_name_by_offset(btf, t->name_off);
3502 if (!tname) {
3503 bpf_log(log, "BPF program ctx struct doesn't have a name\n");
3504 return NULL;
3506 /* prog_type is valid bpf program type. No need for bounds check. */
3507 ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
3508 /* ctx_struct is a pointer to prog_ctx_type in vmlinux.
3509 * Like 'struct __sk_buff'
3511 ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type);
3512 if (!ctx_struct)
3513 /* should not happen */
3514 return NULL;
3515 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
3516 if (!ctx_tname) {
3517 /* should not happen */
3518 bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
3519 return NULL;
3521 /* only compare that prog's ctx type name is the same as
3522 * kernel expects. No need to compare field by field.
3523 * It's ok for bpf prog to do:
3524 * struct __sk_buff {};
3525 * int socket_filter_bpf_prog(struct __sk_buff *skb)
3526 * { // no fields of skb are ever used }
3528 if (strcmp(ctx_tname, tname))
3529 return NULL;
3530 return ctx_type;
3533 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
3534 struct btf *btf,
3535 const struct btf_type *t,
3536 enum bpf_prog_type prog_type)
3538 const struct btf_member *prog_ctx_type, *kern_ctx_type;
3540 prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type);
3541 if (!prog_ctx_type)
3542 return -ENOENT;
3543 kern_ctx_type = prog_ctx_type + 1;
3544 return kern_ctx_type->type;
3547 struct btf *btf_parse_vmlinux(void)
3549 struct btf_verifier_env *env = NULL;
3550 struct bpf_verifier_log *log;
3551 struct btf *btf = NULL;
3552 int err, i;
3554 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3555 if (!env)
3556 return ERR_PTR(-ENOMEM);
3558 log = &env->log;
3559 log->level = BPF_LOG_KERNEL;
3561 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3562 if (!btf) {
3563 err = -ENOMEM;
3564 goto errout;
3566 env->btf = btf;
3568 btf->data = _binary__btf_vmlinux_bin_start;
3569 btf->data_size = _binary__btf_vmlinux_bin_end -
3570 _binary__btf_vmlinux_bin_start;
3572 err = btf_parse_hdr(env);
3573 if (err)
3574 goto errout;
3576 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3578 err = btf_parse_str_sec(env);
3579 if (err)
3580 goto errout;
3582 err = btf_check_all_metas(env);
3583 if (err)
3584 goto errout;
3586 /* find struct bpf_ctx_convert for type checking later */
3587 for (i = 1; i <= btf->nr_types; i++) {
3588 const struct btf_type *t;
3589 const char *tname;
3591 t = btf_type_by_id(btf, i);
3592 if (!__btf_type_is_struct(t))
3593 continue;
3594 tname = __btf_name_by_offset(btf, t->name_off);
3595 if (!strcmp(tname, "bpf_ctx_convert")) {
3596 /* btf_parse_vmlinux() runs under bpf_verifier_lock */
3597 bpf_ctx_convert.t = t;
3598 break;
3601 if (i > btf->nr_types) {
3602 err = -ENOENT;
3603 goto errout;
3606 btf_verifier_env_free(env);
3607 refcount_set(&btf->refcnt, 1);
3608 return btf;
3610 errout:
3611 btf_verifier_env_free(env);
3612 if (btf) {
3613 kvfree(btf->types);
3614 kfree(btf);
3616 return ERR_PTR(err);
3619 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
3621 struct bpf_prog *tgt_prog = prog->aux->linked_prog;
3623 if (tgt_prog) {
3624 return tgt_prog->aux->btf;
3625 } else {
3626 return btf_vmlinux;
3630 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
3631 const struct bpf_prog *prog,
3632 struct bpf_insn_access_aux *info)
3634 const struct btf_type *t = prog->aux->attach_func_proto;
3635 struct bpf_prog *tgt_prog = prog->aux->linked_prog;
3636 struct btf *btf = bpf_prog_get_target_btf(prog);
3637 const char *tname = prog->aux->attach_func_name;
3638 struct bpf_verifier_log *log = info->log;
3639 const struct btf_param *args;
3640 u32 nr_args, arg;
3641 int ret;
3643 if (off % 8) {
3644 bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
3645 tname, off);
3646 return false;
3648 arg = off / 8;
3649 args = (const struct btf_param *)(t + 1);
3650 /* if (t == NULL) Fall back to default BPF prog with 5 u64 arguments */
3651 nr_args = t ? btf_type_vlen(t) : 5;
3652 if (prog->aux->attach_btf_trace) {
3653 /* skip first 'void *__data' argument in btf_trace_##name typedef */
3654 args++;
3655 nr_args--;
3658 if (prog->expected_attach_type == BPF_TRACE_FEXIT &&
3659 arg == nr_args) {
3660 if (!t)
3661 /* Default prog with 5 args. 6th arg is retval. */
3662 return true;
3663 /* function return type */
3664 t = btf_type_by_id(btf, t->type);
3665 } else if (arg >= nr_args) {
3666 bpf_log(log, "func '%s' doesn't have %d-th argument\n",
3667 tname, arg + 1);
3668 return false;
3669 } else {
3670 if (!t)
3671 /* Default prog with 5 args */
3672 return true;
3673 t = btf_type_by_id(btf, args[arg].type);
3675 /* skip modifiers */
3676 while (btf_type_is_modifier(t))
3677 t = btf_type_by_id(btf, t->type);
3678 if (btf_type_is_int(t))
3679 /* accessing a scalar */
3680 return true;
3681 if (!btf_type_is_ptr(t)) {
3682 bpf_log(log,
3683 "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
3684 tname, arg,
3685 __btf_name_by_offset(btf, t->name_off),
3686 btf_kind_str[BTF_INFO_KIND(t->info)]);
3687 return false;
3689 if (t->type == 0)
3690 /* This is a pointer to void.
3691 * It is the same as scalar from the verifier safety pov.
3692 * No further pointer walking is allowed.
3694 return true;
3696 /* this is a pointer to another type */
3697 info->reg_type = PTR_TO_BTF_ID;
3698 info->btf_id = t->type;
3700 if (tgt_prog) {
3701 ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type);
3702 if (ret > 0) {
3703 info->btf_id = ret;
3704 return true;
3705 } else {
3706 return false;
3709 t = btf_type_by_id(btf, t->type);
3710 /* skip modifiers */
3711 while (btf_type_is_modifier(t))
3712 t = btf_type_by_id(btf, t->type);
3713 if (!btf_type_is_struct(t)) {
3714 bpf_log(log,
3715 "func '%s' arg%d type %s is not a struct\n",
3716 tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]);
3717 return false;
3719 bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
3720 tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)],
3721 __btf_name_by_offset(btf, t->name_off));
3722 return true;
3725 int btf_struct_access(struct bpf_verifier_log *log,
3726 const struct btf_type *t, int off, int size,
3727 enum bpf_access_type atype,
3728 u32 *next_btf_id)
3730 u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
3731 const struct btf_type *mtype, *elem_type = NULL;
3732 const struct btf_member *member;
3733 const char *tname, *mname;
3735 again:
3736 tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
3737 if (!btf_type_is_struct(t)) {
3738 bpf_log(log, "Type '%s' is not a struct", tname);
3739 return -EINVAL;
3742 for_each_member(i, t, member) {
3743 if (btf_member_bitfield_size(t, member))
3744 /* bitfields are not supported yet */
3745 continue;
3747 /* offset of the field in bytes */
3748 moff = btf_member_bit_offset(t, member) / 8;
3749 if (off + size <= moff)
3750 /* won't find anything, field is already too far */
3751 break;
3752 /* In case of "off" is pointing to holes of a struct */
3753 if (off < moff)
3754 continue;
3756 /* type of the field */
3757 mtype = btf_type_by_id(btf_vmlinux, member->type);
3758 mname = __btf_name_by_offset(btf_vmlinux, member->name_off);
3760 mtype = btf_resolve_size(btf_vmlinux, mtype, &msize,
3761 &elem_type, &total_nelems);
3762 if (IS_ERR(mtype)) {
3763 bpf_log(log, "field %s doesn't have size\n", mname);
3764 return -EFAULT;
3767 mtrue_end = moff + msize;
3768 if (off >= mtrue_end)
3769 /* no overlap with member, keep iterating */
3770 continue;
3772 if (btf_type_is_array(mtype)) {
3773 u32 elem_idx;
3775 /* btf_resolve_size() above helps to
3776 * linearize a multi-dimensional array.
3778 * The logic here is treating an array
3779 * in a struct as the following way:
3781 * struct outer {
3782 * struct inner array[2][2];
3783 * };
3785 * looks like:
3787 * struct outer {
3788 * struct inner array_elem0;
3789 * struct inner array_elem1;
3790 * struct inner array_elem2;
3791 * struct inner array_elem3;
3792 * };
3794 * When accessing outer->array[1][0], it moves
3795 * moff to "array_elem2", set mtype to
3796 * "struct inner", and msize also becomes
3797 * sizeof(struct inner). Then most of the
3798 * remaining logic will fall through without
3799 * caring the current member is an array or
3800 * not.
3802 * Unlike mtype/msize/moff, mtrue_end does not
3803 * change. The naming difference ("_true") tells
3804 * that it is not always corresponding to
3805 * the current mtype/msize/moff.
3806 * It is the true end of the current
3807 * member (i.e. array in this case). That
3808 * will allow an int array to be accessed like
3809 * a scratch space,
3810 * i.e. allow access beyond the size of
3811 * the array's element as long as it is
3812 * within the mtrue_end boundary.
3815 /* skip empty array */
3816 if (moff == mtrue_end)
3817 continue;
3819 msize /= total_nelems;
3820 elem_idx = (off - moff) / msize;
3821 moff += elem_idx * msize;
3822 mtype = elem_type;
3825 /* the 'off' we're looking for is either equal to start
3826 * of this field or inside of this struct
3828 if (btf_type_is_struct(mtype)) {
3829 /* our field must be inside that union or struct */
3830 t = mtype;
3832 /* adjust offset we're looking for */
3833 off -= moff;
3834 goto again;
3837 if (btf_type_is_ptr(mtype)) {
3838 const struct btf_type *stype;
3840 if (msize != size || off != moff) {
3841 bpf_log(log,
3842 "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
3843 mname, moff, tname, off, size);
3844 return -EACCES;
3847 stype = btf_type_by_id(btf_vmlinux, mtype->type);
3848 /* skip modifiers */
3849 while (btf_type_is_modifier(stype))
3850 stype = btf_type_by_id(btf_vmlinux, stype->type);
3851 if (btf_type_is_struct(stype)) {
3852 *next_btf_id = mtype->type;
3853 return PTR_TO_BTF_ID;
3857 /* Allow more flexible access within an int as long as
3858 * it is within mtrue_end.
3859 * Since mtrue_end could be the end of an array,
3860 * that also allows using an array of int as a scratch
3861 * space. e.g. skb->cb[].
3863 if (off + size > mtrue_end) {
3864 bpf_log(log,
3865 "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
3866 mname, mtrue_end, tname, off, size);
3867 return -EACCES;
3870 return SCALAR_VALUE;
3872 bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
3873 return -EINVAL;
3876 static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn,
3877 int arg)
3879 char fnname[KSYM_SYMBOL_LEN + 4] = "btf_";
3880 const struct btf_param *args;
3881 const struct btf_type *t;
3882 const char *tname, *sym;
3883 u32 btf_id, i;
3885 if (IS_ERR(btf_vmlinux)) {
3886 bpf_log(log, "btf_vmlinux is malformed\n");
3887 return -EINVAL;
3890 sym = kallsyms_lookup((long)fn, NULL, NULL, NULL, fnname + 4);
3891 if (!sym) {
3892 bpf_log(log, "kernel doesn't have kallsyms\n");
3893 return -EFAULT;
3896 for (i = 1; i <= btf_vmlinux->nr_types; i++) {
3897 t = btf_type_by_id(btf_vmlinux, i);
3898 if (BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF)
3899 continue;
3900 tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
3901 if (!strcmp(tname, fnname))
3902 break;
3904 if (i > btf_vmlinux->nr_types) {
3905 bpf_log(log, "helper %s type is not found\n", fnname);
3906 return -ENOENT;
3909 t = btf_type_by_id(btf_vmlinux, t->type);
3910 if (!btf_type_is_ptr(t))
3911 return -EFAULT;
3912 t = btf_type_by_id(btf_vmlinux, t->type);
3913 if (!btf_type_is_func_proto(t))
3914 return -EFAULT;
3916 args = (const struct btf_param *)(t + 1);
3917 if (arg >= btf_type_vlen(t)) {
3918 bpf_log(log, "bpf helper %s doesn't have %d-th argument\n",
3919 fnname, arg);
3920 return -EINVAL;
3923 t = btf_type_by_id(btf_vmlinux, args[arg].type);
3924 if (!btf_type_is_ptr(t) || !t->type) {
3925 /* anything but the pointer to struct is a helper config bug */
3926 bpf_log(log, "ARG_PTR_TO_BTF is misconfigured\n");
3927 return -EFAULT;
3929 btf_id = t->type;
3930 t = btf_type_by_id(btf_vmlinux, t->type);
3931 /* skip modifiers */
3932 while (btf_type_is_modifier(t)) {
3933 btf_id = t->type;
3934 t = btf_type_by_id(btf_vmlinux, t->type);
3936 if (!btf_type_is_struct(t)) {
3937 bpf_log(log, "ARG_PTR_TO_BTF is not a struct\n");
3938 return -EFAULT;
3940 bpf_log(log, "helper %s arg%d has btf_id %d struct %s\n", fnname + 4,
3941 arg, btf_id, __btf_name_by_offset(btf_vmlinux, t->name_off));
3942 return btf_id;
3945 int btf_resolve_helper_id(struct bpf_verifier_log *log,
3946 const struct bpf_func_proto *fn, int arg)
3948 int *btf_id = &fn->btf_id[arg];
3949 int ret;
3951 if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID)
3952 return -EINVAL;
3954 ret = READ_ONCE(*btf_id);
3955 if (ret)
3956 return ret;
3957 /* ok to race the search. The result is the same */
3958 ret = __btf_resolve_helper_id(log, fn->func, arg);
3959 if (!ret) {
3960 /* Function argument cannot be type 'void' */
3961 bpf_log(log, "BTF resolution bug\n");
3962 return -EFAULT;
3964 WRITE_ONCE(*btf_id, ret);
3965 return ret;
3968 static int __get_type_size(struct btf *btf, u32 btf_id,
3969 const struct btf_type **bad_type)
3971 const struct btf_type *t;
3973 if (!btf_id)
3974 /* void */
3975 return 0;
3976 t = btf_type_by_id(btf, btf_id);
3977 while (t && btf_type_is_modifier(t))
3978 t = btf_type_by_id(btf, t->type);
3979 if (!t)
3980 return -EINVAL;
3981 if (btf_type_is_ptr(t))
3982 /* kernel size of pointer. Not BPF's size of pointer*/
3983 return sizeof(void *);
3984 if (btf_type_is_int(t) || btf_type_is_enum(t))
3985 return t->size;
3986 *bad_type = t;
3987 return -EINVAL;
3990 int btf_distill_func_proto(struct bpf_verifier_log *log,
3991 struct btf *btf,
3992 const struct btf_type *func,
3993 const char *tname,
3994 struct btf_func_model *m)
3996 const struct btf_param *args;
3997 const struct btf_type *t;
3998 u32 i, nargs;
3999 int ret;
4001 if (!func) {
4002 /* BTF function prototype doesn't match the verifier types.
4003 * Fall back to 5 u64 args.
4005 for (i = 0; i < 5; i++)
4006 m->arg_size[i] = 8;
4007 m->ret_size = 8;
4008 m->nr_args = 5;
4009 return 0;
4011 args = (const struct btf_param *)(func + 1);
4012 nargs = btf_type_vlen(func);
4013 if (nargs >= MAX_BPF_FUNC_ARGS) {
4014 bpf_log(log,
4015 "The function %s has %d arguments. Too many.\n",
4016 tname, nargs);
4017 return -EINVAL;
4019 ret = __get_type_size(btf, func->type, &t);
4020 if (ret < 0) {
4021 bpf_log(log,
4022 "The function %s return type %s is unsupported.\n",
4023 tname, btf_kind_str[BTF_INFO_KIND(t->info)]);
4024 return -EINVAL;
4026 m->ret_size = ret;
4028 for (i = 0; i < nargs; i++) {
4029 ret = __get_type_size(btf, args[i].type, &t);
4030 if (ret < 0) {
4031 bpf_log(log,
4032 "The function %s arg%d type %s is unsupported.\n",
4033 tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]);
4034 return -EINVAL;
4036 m->arg_size[i] = ret;
4038 m->nr_args = nargs;
4039 return 0;
4042 int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog)
4044 struct bpf_verifier_state *st = env->cur_state;
4045 struct bpf_func_state *func = st->frame[st->curframe];
4046 struct bpf_reg_state *reg = func->regs;
4047 struct bpf_verifier_log *log = &env->log;
4048 struct bpf_prog *prog = env->prog;
4049 struct btf *btf = prog->aux->btf;
4050 const struct btf_param *args;
4051 const struct btf_type *t;
4052 u32 i, nargs, btf_id;
4053 const char *tname;
4055 if (!prog->aux->func_info)
4056 return 0;
4058 btf_id = prog->aux->func_info[subprog].type_id;
4059 if (!btf_id)
4060 return 0;
4062 if (prog->aux->func_info_aux[subprog].unreliable)
4063 return 0;
4065 t = btf_type_by_id(btf, btf_id);
4066 if (!t || !btf_type_is_func(t)) {
4067 bpf_log(log, "BTF of subprog %d doesn't point to KIND_FUNC\n",
4068 subprog);
4069 return -EINVAL;
4071 tname = btf_name_by_offset(btf, t->name_off);
4073 t = btf_type_by_id(btf, t->type);
4074 if (!t || !btf_type_is_func_proto(t)) {
4075 bpf_log(log, "Invalid type of func %s\n", tname);
4076 return -EINVAL;
4078 args = (const struct btf_param *)(t + 1);
4079 nargs = btf_type_vlen(t);
4080 if (nargs > 5) {
4081 bpf_log(log, "Function %s has %d > 5 args\n", tname, nargs);
4082 goto out;
4084 /* check that BTF function arguments match actual types that the
4085 * verifier sees.
4087 for (i = 0; i < nargs; i++) {
4088 t = btf_type_by_id(btf, args[i].type);
4089 while (btf_type_is_modifier(t))
4090 t = btf_type_by_id(btf, t->type);
4091 if (btf_type_is_int(t) || btf_type_is_enum(t)) {
4092 if (reg[i + 1].type == SCALAR_VALUE)
4093 continue;
4094 bpf_log(log, "R%d is not a scalar\n", i + 1);
4095 goto out;
4097 if (btf_type_is_ptr(t)) {
4098 if (reg[i + 1].type == SCALAR_VALUE) {
4099 bpf_log(log, "R%d is not a pointer\n", i + 1);
4100 goto out;
4102 /* If program is passing PTR_TO_CTX into subprogram
4103 * check that BTF type matches.
4105 if (reg[i + 1].type == PTR_TO_CTX &&
4106 !btf_get_prog_ctx_type(log, btf, t, prog->type))
4107 goto out;
4108 /* All other pointers are ok */
4109 continue;
4111 bpf_log(log, "Unrecognized argument type %s\n",
4112 btf_kind_str[BTF_INFO_KIND(t->info)]);
4113 goto out;
4115 return 0;
4116 out:
4117 /* LLVM optimizations can remove arguments from static functions. */
4118 bpf_log(log,
4119 "Type info disagrees with actual arguments due to compiler optimizations\n");
4120 prog->aux->func_info_aux[subprog].unreliable = true;
4121 return 0;
4124 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
4125 struct seq_file *m)
4127 const struct btf_type *t = btf_type_by_id(btf, type_id);
4129 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
4132 #ifdef CONFIG_PROC_FS
4133 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
4135 const struct btf *btf = filp->private_data;
4137 seq_printf(m, "btf_id:\t%u\n", btf->id);
4139 #endif
4141 static int btf_release(struct inode *inode, struct file *filp)
4143 btf_put(filp->private_data);
4144 return 0;
4147 const struct file_operations btf_fops = {
4148 #ifdef CONFIG_PROC_FS
4149 .show_fdinfo = bpf_btf_show_fdinfo,
4150 #endif
4151 .release = btf_release,
4154 static int __btf_new_fd(struct btf *btf)
4156 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
4159 int btf_new_fd(const union bpf_attr *attr)
4161 struct btf *btf;
4162 int ret;
4164 btf = btf_parse(u64_to_user_ptr(attr->btf),
4165 attr->btf_size, attr->btf_log_level,
4166 u64_to_user_ptr(attr->btf_log_buf),
4167 attr->btf_log_size);
4168 if (IS_ERR(btf))
4169 return PTR_ERR(btf);
4171 ret = btf_alloc_id(btf);
4172 if (ret) {
4173 btf_free(btf);
4174 return ret;
4178 * The BTF ID is published to the userspace.
4179 * All BTF free must go through call_rcu() from
4180 * now on (i.e. free by calling btf_put()).
4183 ret = __btf_new_fd(btf);
4184 if (ret < 0)
4185 btf_put(btf);
4187 return ret;
4190 struct btf *btf_get_by_fd(int fd)
4192 struct btf *btf;
4193 struct fd f;
4195 f = fdget(fd);
4197 if (!f.file)
4198 return ERR_PTR(-EBADF);
4200 if (f.file->f_op != &btf_fops) {
4201 fdput(f);
4202 return ERR_PTR(-EINVAL);
4205 btf = f.file->private_data;
4206 refcount_inc(&btf->refcnt);
4207 fdput(f);
4209 return btf;
4212 int btf_get_info_by_fd(const struct btf *btf,
4213 const union bpf_attr *attr,
4214 union bpf_attr __user *uattr)
4216 struct bpf_btf_info __user *uinfo;
4217 struct bpf_btf_info info = {};
4218 u32 info_copy, btf_copy;
4219 void __user *ubtf;
4220 u32 uinfo_len;
4222 uinfo = u64_to_user_ptr(attr->info.info);
4223 uinfo_len = attr->info.info_len;
4225 info_copy = min_t(u32, uinfo_len, sizeof(info));
4226 if (copy_from_user(&info, uinfo, info_copy))
4227 return -EFAULT;
4229 info.id = btf->id;
4230 ubtf = u64_to_user_ptr(info.btf);
4231 btf_copy = min_t(u32, btf->data_size, info.btf_size);
4232 if (copy_to_user(ubtf, btf->data, btf_copy))
4233 return -EFAULT;
4234 info.btf_size = btf->data_size;
4236 if (copy_to_user(uinfo, &info, info_copy) ||
4237 put_user(info_copy, &uattr->info.info_len))
4238 return -EFAULT;
4240 return 0;
4243 int btf_get_fd_by_id(u32 id)
4245 struct btf *btf;
4246 int fd;
4248 rcu_read_lock();
4249 btf = idr_find(&btf_idr, id);
4250 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
4251 btf = ERR_PTR(-ENOENT);
4252 rcu_read_unlock();
4254 if (IS_ERR(btf))
4255 return PTR_ERR(btf);
4257 fd = __btf_new_fd(btf);
4258 if (fd < 0)
4259 btf_put(btf);
4261 return fd;
4264 u32 btf_id(const struct btf *btf)
4266 return btf->id;